blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5dacc908061af45c9cdd386cefd44c6f23e9bea9 | 1bc2a635a93b5bc84606edf9ac2226851cac9e6d | /rolling-gui.py | 94053a32749bb8af8a2ce44486ab7997863fc25d | [
"MIT"
] | permissive | coolkat64/rolling | 819149cbb1e11a455b93a030477f9da91e2f93e4 | 4c3ee2401128e993a52ac9b52cdbd32e17728129 | refs/heads/master | 2022-11-29T00:35:14.058665 | 2020-07-31T20:37:15 | 2020-07-31T20:37:15 | 285,312,272 | 0 | 0 | MIT | 2020-08-05T14:25:48 | 2020-08-05T14:25:47 | null | UTF-8 | Python | false | false | 204 | py | # coding: utf-8
from sqlalchemy.sql import default_comparator
def main():
from rolling.gui.run import main as photographer_gui_run
photographer_gui_run()
if __name__ == "__main__":
main()
| [
"sevajol.bastien@gmail.com"
] | sevajol.bastien@gmail.com |
8cc25453f6a74c516925cd6336d6cfc5fb51e5c1 | 515a97129ce1b2b8eecca4b2087fde8985b82d5b | /Code-Scraps/old_modules/SpiceFeeds/Advertisements.py | beda188c44933632d7b4f61293836275184facc7 | [] | no_license | SpiceBot/scraps | 3ad6e81ac75e2b6a684fea64eb7e75477b0f4f63 | 90125e1397b57ac87cae5f3e506363aa04ddffdc | refs/heads/master | 2020-05-02T21:51:01.297114 | 2019-03-28T15:38:28 | 2019-03-28T15:38:28 | 178,232,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,619 | py | #!/usr/bin/env python
# coding=utf-8
from __future__ import unicode_literals, absolute_import, print_function, division
import sopel.module
import sys
import os
from random import randint
moduledir = os.path.dirname(__file__)
shareddir = os.path.dirname(os.path.dirname(__file__))
sys.path.append(shareddir)
from BotShared import *
# author jimender2
defaultoptions = [
"DoubleD recommends these new drapes https://goo.gl/BMTMde", "Spiceduck for spicerex mascot 2k18", "Deathbybandaid is looking for developers for spicebot and spicethings",
"Upgrade to premium to remove ads", "Selling panties cheap. Msg DoubleD for more info.", "On sale now: tears of an orphan child!", "One-way ticket to Hell just $199",
"Get a free xboner here", "Extra, Extra, read all about it! A giant Beaver is attacking Canadian people!", "Want to make fast money? Sell Drugs", "Syrup",
"I love Apple products .... In the trash", "Did you know that I am a female?", "Wanna be friends?", "New Features released every day", "I feel neglected. Use me more. Duel assault in me!"]
hardcoded_not_in_this_chan = ["#spiceworks", "##spicebottest", "#spicebottest"]
@sopel.module.commands('ads', 'advertisements', 'ad', 'advertisement', 'spam')
def mainfunction(bot, trigger):
"""Check to see if module is enabled."""
enablestatus, triggerargsarray, botcom, instigator = spicebot_prerun(bot, trigger, 'ads')
if not enablestatus:
# IF "&&" is in the full input, it is treated as multiple commands, and is split
commands_array = spicemanip(bot, triggerargsarray, "split_&&")
if commands_array == []:
commands_array = [[]]
for command_split_partial in commands_array:
triggerargsarray_part = spicemanip(bot, command_split_partial, 'create')
execute_main(bot, trigger, triggerargsarray_part, botcom, instigator)
def execute_main(bot, trigger, triggerargsarray, botcom, instigator):
"""Check to see if there are ads and retrieve one."""
databasekey = 'ads'
command = spicemanip(bot, triggerargsarray, 1) or 'get'
if not sayingscheck(bot, databasekey) and command != "add":
sayingsmodule(bot, databasekey, defaultoptions, 'initialise')
message = sayingsmodule(bot, databasekey, triggerargsarray, command)
osd(bot, trigger.sender, 'say', ["[Advertisement]", message, "[Advertisement]"])
@sopel.module.interval(60)
def advertisement(bot):
"""Get and share random advert at random intervals."""
now = time.time()
last_timesince = time_since(bot, bot.nick, "ads_last_time") or 0
next_timeout = get_database_value(bot, bot.nick, "ads_next_timeout") or 0
if last_timesince <= next_timeout:
return
# set time to now
set_database_value(bot, bot.nick, "ads_last_time", now)
# how long until next event
next_timeout = randint(1200, 7200)
set_database_value(bot, bot.nick, "ads_next_timeout", next_timeout)
message = sayingsmodule(bot, databasekey, defaultoptions, 'get') or "Spiceduck for Spiceworks mascot 2k18"
for channel in bot.channels:
if channel not in hardcoded_not_in_this_chan:
channelmodulesarray = get_database_value(bot, channel, 'modules_enabled') or []
if 'ads' in channelmodulesarray:
osd(bot, channel, 'say', ["[Advertisement]", message, "[Advertisement]"])
# compare timestamps
def time_since(bot, nick, databasekey):
"""Figure out when the last ad was."""
now = time.time()
last = get_database_value(bot, nick, databasekey)
return abs(now - int(last))
| [
"sam@deathbybandaid.net"
] | sam@deathbybandaid.net |
4d58aeda11c26fdd95e76fa20027c44e07eed599 | b13ca274b4463c9900840ee6516094b7509b6041 | /empower/apps/e2eqosmanager/algorithms/exponentialquantumadaptation.py | 1a72f07dda32cb9d4c2b1b07847109214c2d15c8 | [
"Apache-2.0"
] | permissive | imec-idlab/sdn_wifi_manager | 09d206f2f649aa715752d3c44e011d3f54faf592 | eda52649f855722fdec1d02e25a28c61a8fbda06 | refs/heads/master | 2021-06-23T08:03:22.482931 | 2020-12-03T11:30:10 | 2020-12-03T11:30:10 | 162,106,793 | 0 | 0 | Apache-2.0 | 2019-03-27T16:23:31 | 2018-12-17T09:33:47 | Python | UTF-8 | Python | false | false | 2,940 | py | #!/usr/bin/env python3
#
# Copyright (c) 2019 Pedro Heleno Isolani
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
KEEP_CONFIGURATION = 0
EXPLOITATION = 1
EXPLORATION = 2
""" Simple Exponential Quantum Adaptation using different adaptation rates for Exploitation and Exploration """
class ExponentialQuantumAdaptation:
def __init__(self,
exploration_rate=0.05, exploration_trigger=5,
exploitation_rate=0.20, exploitation_trigger=1,
min_quantum=200):
self.exploration_rate = exploration_rate # % to increase BE quantum
self.exploitation_rate = exploitation_rate # % to decrease BE quantum
self.exploration_trigger = exploration_trigger # int to represent when to increase BE quantum
self.exploitation_trigger = exploitation_trigger # int to represent when to decrease BE quantum
self.exploration_counter = 0 # int to trigger exploration
self.exploitation_counter = 0 # int to trigger exploitation
self.min_quantum = min_quantum
self.status = KEEP_CONFIGURATION
def exploit(self):
self.exploitation_counter += 1
if self.exploitation_counter >= self.exploitation_trigger:
self.status = EXPLOITATION
self.exploitation_counter = 0
def explore(self):
self.exploration_counter += 1
if self.exploration_counter >= self.exploration_trigger:
self.status = EXPLORATION
self.exploration_counter = 0
def get_new_quantum(self, old_quantum):
if self.status == EXPLORATION:
new_quantum = int(old_quantum + (old_quantum * self.exploration_rate))
elif self.status == EXPLOITATION:
new_quantum = int(old_quantum - (old_quantum * self.exploitation_rate))
if new_quantum < self.min_quantum:
new_quantum = self.min_quantum
else:
new_quantum = int(old_quantum)
self.status = KEEP_CONFIGURATION
return new_quantum
def __str__(self):
return "Exploitation rate: " + str(self.exploitation_rate) + \
" trigger: " + str(self.exploitation_trigger) + \
" counter: " + str(self.exploitation_counter) + \
"Exploration rate:" + str(self.exploration_rate) + \
" trigger: " + str(self.exploration_trigger) + \
" counter: " + str(self.exploration_counter)
| [
"pedroisolani@gmail.com"
] | pedroisolani@gmail.com |
5c462392b871c1e216a91cdc40e909732cc9f8cd | e238db1ae3e641d84af17e9cf6a881eb43b20039 | /a.py | 6f737a52fd3c1d8d2e2e590d51737e14a909dc53 | [] | no_license | RevathiRathi/Revat | 1aa478f51b147e7b044d7519f54938eda0149619 | 792757fb56243846e3049889bf502014c62d658e | refs/heads/master | 2020-04-15T05:01:36.873956 | 2019-07-18T13:40:34 | 2019-07-18T13:40:34 | 164,406,245 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | #revs
s=input()
t=[]
for i in s:
t.append(s.count(i))
for i in range(0,len(t)):
a=max(t)
if t[i]==a:
print(s[i])
break
| [
"noreply@github.com"
] | RevathiRathi.noreply@github.com |
74928235472f4f38d3c6323b463e1066f86919bf | fdb9bdc6c4ab2f14ba71e544493706d5e275899f | /fhir/resources/R4B/messageheader.py | 80532470000078ea827d5b65e46dee0791e92402 | [
"BSD-3-Clause"
] | permissive | nazrulworld/fhir.resources | 6ae8aea8180c611b0c5050759c6dcdf63e4cb061 | 1fd6ea476b27b3fcb8c4ef8f23bc51cf161e69e3 | refs/heads/main | 2023-08-30T18:27:27.277249 | 2023-07-03T19:57:06 | 2023-07-03T19:57:06 | 165,297,877 | 256 | 83 | NOASSERTION | 2023-08-24T15:34:05 | 2019-01-11T19:26:41 | Python | UTF-8 | Python | false | false | 26,908 | py | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/MessageHeader
Release: R4B
Version: 4.3.0
Build ID: c475c22
Last updated: 2022-05-28T12:47:40.239+10:00
"""
import typing
from pydantic import Field, root_validator
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.errors import MissingError, NoneIsNotAllowedError
from . import backboneelement, domainresource, fhirtypes
class MessageHeader(domainresource.DomainResource):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
A resource that describes a message that is exchanged between systems.
The header for a message exchange that is either requesting or responding
to an action. The reference(s) that are the subject of the action as well
as other information related to the action are typically transmitted in a
bundle in which the MessageHeader resource instance is the first resource
in the bundle.
"""
resource_type = Field("MessageHeader", const=True)
author: fhirtypes.ReferenceType = Field(
None,
alias="author",
title="The source of the decision",
description=(
"The logical author of the message - the person or device that decided "
"the described event should happen. When there is more than one "
"candidate, pick the most proximal to the MessageHeader. Can provide "
"other authors in extensions."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Practitioner", "PractitionerRole"],
)
definition: fhirtypes.Canonical = Field(
None,
alias="definition",
title="Link to the definition for this message",
description="Permanent link to the MessageDefinition for this message.",
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["MessageDefinition"],
)
definition__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_definition", title="Extension field for ``definition``."
)
destination: typing.List[fhirtypes.MessageHeaderDestinationType] = Field(
None,
alias="destination",
title="Message destination application(s)",
description="The destination application which the message is intended for.",
# if property is element of this resource.
element_property=True,
)
enterer: fhirtypes.ReferenceType = Field(
None,
alias="enterer",
title="The source of the data entry",
description=(
"The person or device that performed the data entry leading to this "
"message. When there is more than one candidate, pick the most proximal"
" to the message. Can provide other enterers in extensions."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Practitioner", "PractitionerRole"],
)
eventCoding: fhirtypes.CodingType = Field(
None,
alias="eventCoding",
title="Code for the event this message represents or link to event definition",
description=(
"Code that identifies the event this message represents and connects it"
" with its definition. Events defined as part of the FHIR specification"
' have the system value "http://terminology.hl7.org/CodeSystem/message-'
'events". Alternatively uri to the EventDefinition.'
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e event[x]
one_of_many="event",
one_of_many_required=True,
)
eventUri: fhirtypes.Uri = Field(
None,
alias="eventUri",
title="Code for the event this message represents or link to event definition",
description=(
"Code that identifies the event this message represents and connects it"
" with its definition. Events defined as part of the FHIR specification"
' have the system value "http://terminology.hl7.org/CodeSystem/message-'
'events". Alternatively uri to the EventDefinition.'
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e event[x]
one_of_many="event",
one_of_many_required=True,
)
eventUri__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_eventUri", title="Extension field for ``eventUri``."
)
focus: typing.List[fhirtypes.ReferenceType] = Field(
None,
alias="focus",
title="The actual content of the message",
description=(
"The actual data of the message - a reference to the root/focus class "
"of the event."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Resource"],
)
reason: fhirtypes.CodeableConceptType = Field(
None,
alias="reason",
title="Cause of event",
description=(
"Coded indication of the cause for the event - indicates a reason for "
"the occurrence of the event that is a focus of this message."
),
# if property is element of this resource.
element_property=True,
)
response: fhirtypes.MessageHeaderResponseType = Field(
None,
alias="response",
title="If this is a reply to prior message",
description=(
"Information about the message that this message is a response to. "
"Only present if this message is a response."
),
# if property is element of this resource.
element_property=True,
)
responsible: fhirtypes.ReferenceType = Field(
None,
alias="responsible",
title="Final responsibility for event",
description=(
"The person or organization that accepts overall responsibility for the"
" contents of the message. The implication is that the message event "
"happened under the policies of the responsible party."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Practitioner", "PractitionerRole", "Organization"],
)
sender: fhirtypes.ReferenceType = Field(
None,
alias="sender",
title="Real world sender of the message",
description=(
"Identifies the sending system to allow the use of a trust " "relationship."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Practitioner", "PractitionerRole", "Organization"],
)
source: fhirtypes.MessageHeaderSourceType = Field(
...,
alias="source",
title="Message source application",
description="The source application from which this message originated.",
# if property is element of this resource.
element_property=True,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``MessageHeader`` according specification,
with preserving original sequence order.
"""
return [
"id",
"meta",
"implicitRules",
"language",
"text",
"contained",
"extension",
"modifierExtension",
"eventCoding",
"eventUri",
"destination",
"sender",
"enterer",
"author",
"source",
"responsible",
"reason",
"response",
"focus",
"definition",
]
@root_validator(pre=True, allow_reuse=True)
def validate_one_of_many_1485(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {"event": ["eventCoding", "eventUri"]}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
class MessageHeaderDestination(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Message destination application(s).
The destination application which the message is intended for.
"""
resource_type = Field("MessageHeaderDestination", const=True)
endpoint: fhirtypes.Url = Field(
None,
alias="endpoint",
title="Actual destination address or id",
description="Indicates where the message should be routed to.",
# if property is element of this resource.
element_property=True,
element_required=True,
)
endpoint__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_endpoint", title="Extension field for ``endpoint``."
)
name: fhirtypes.String = Field(
None,
alias="name",
title="Name of system",
description="Human-readable name for the target system.",
# if property is element of this resource.
element_property=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
receiver: fhirtypes.ReferenceType = Field(
None,
alias="receiver",
title='Intended "real-world" recipient for the data',
description=(
"Allows data conveyed by a message to be addressed to a particular "
"person or department when routing to a specific application isn't "
"sufficient."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Practitioner", "PractitionerRole", "Organization"],
)
target: fhirtypes.ReferenceType = Field(
None,
alias="target",
title="Particular delivery destination within the destination",
description=(
"Identifies the target end system in situations where the initial "
"message transmission is to an intermediary system."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Device"],
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``MessageHeaderDestination`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"name",
"target",
"endpoint",
"receiver",
]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_2635(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("endpoint", "endpoint__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class MessageHeaderResponse(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
If this is a reply to prior message.
Information about the message that this message is a response to. Only
present if this message is a response.
"""
resource_type = Field("MessageHeaderResponse", const=True)
code: fhirtypes.Code = Field(
None,
alias="code",
title="ok | transient-error | fatal-error",
description=(
"Code that identifies the type of response to the message - whether it "
"was successful or not, and whether it should be resent or not."
),
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["ok", "transient-error", "fatal-error"],
)
code__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_code", title="Extension field for ``code``."
)
details: fhirtypes.ReferenceType = Field(
None,
alias="details",
title="Specific list of hints/warnings/errors",
description="Full details of any issues found in the message.",
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["OperationOutcome"],
)
identifier: fhirtypes.Id = Field(
None,
alias="identifier",
title="Id of original message",
description=(
"The MessageHeader.id of the message to which this message is a "
"response."
),
# if property is element of this resource.
element_property=True,
element_required=True,
)
identifier__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_identifier", title="Extension field for ``identifier``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``MessageHeaderResponse`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "modifierExtension", "identifier", "code", "details"]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_2319(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("code", "code__ext"), ("identifier", "identifier__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class MessageHeaderSource(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Message source application.
The source application from which this message originated.
"""
resource_type = Field("MessageHeaderSource", const=True)
contact: fhirtypes.ContactPointType = Field(
None,
alias="contact",
title="Human contact for problems",
description=(
"An e-mail, phone, website or other contact point to use to resolve "
"issues with message communications."
),
# if property is element of this resource.
element_property=True,
)
endpoint: fhirtypes.Url = Field(
None,
alias="endpoint",
title="Actual message source address or id",
description="Identifies the routing target to send acknowledgements to.",
# if property is element of this resource.
element_property=True,
element_required=True,
)
endpoint__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_endpoint", title="Extension field for ``endpoint``."
)
name: fhirtypes.String = Field(
None,
alias="name",
title="Name of system",
description="Human-readable name for the source system.",
# if property is element of this resource.
element_property=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
software: fhirtypes.String = Field(
None,
alias="software",
title="Name of software running the system",
description="May include configuration or other information useful in debugging.",
# if property is element of this resource.
element_property=True,
)
software__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_software", title="Extension field for ``software``."
)
version: fhirtypes.String = Field(
None,
alias="version",
title="Version of software running",
description=(
"Can convey versions of multiple systems in situations where a message "
"passes through multiple hands."
),
# if property is element of this resource.
element_property=True,
)
version__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_version", title="Extension field for ``version``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``MessageHeaderSource`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"name",
"software",
"version",
"contact",
"endpoint",
]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_2097(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("endpoint", "endpoint__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
| [
"connect2nazrul@gmail.com"
] | connect2nazrul@gmail.com |
f0b562001a2fdb11fa8dc1bab18cd321327500b1 | 4546398a18590e4e182629fb55d185547dd6df0a | /2023/problems/guillaume/data/random_generator.py | 0b2cd7f91cca91dc4c3ec1c4b0aedb4eb8066aa8 | [] | no_license | ForritunarkeppniFramhaldsskolanna/Keppnir | 352341fa97c6349af65b513c03171f3e706f7db2 | 65c8eb5358d8a49f956edf76c2d47b9372accc3c | refs/heads/master | 2023-04-28T15:33:36.396225 | 2023-04-23T15:00:15 | 2023-04-23T15:00:15 | 78,303,702 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,347 | py | #!/usr/bin/python3
import sys
import random
random.seed(int(sys.argv[-1]))
min_n = eval(sys.argv[1])
max_n = eval(sys.argv[2])
min_non_draws = eval(sys.argv[3])
max_non_draws = eval(sys.argv[4])
tp = sys.argv[5]
n = random.randint(min_n, max_n)
min_non_draws = min((min_non_draws, n))
max_non_draws = min((max_non_draws, n))
def uniform_interleave(s, t):
i = 0
j = 0
res = []
while i+j < len(s)+len(t):
s_rem = len(s) - i
t_rem = len(t) - j
total_rem = s_rem + t_rem
if random.randint(1, total_rem) <= s_rem:
res.append(s[i])
i += 1
else:
res.append(t[j])
j += 1
return ''.join(res)
def get_uniform():
x = random.randint(min_non_draws, max_non_draws)
y = random.randint(0, x)
guillaume = "G"*y
arnar = "A"*(x-y)
draws = "D"*(n-x)
return uniform_interleave(draws, uniform_interleave(arnar, guillaume))
def get_streak():
x = random.randint(min_non_draws, max_non_draws)
y = random.randint(0, x)
valid_games = ["G" if i < y else "A" for i in range(x)]
draws = "D"*(n - x)
return uniform_interleave(valid_games, draws)
d = []
if tp == "uniform":
d = get_uniform()
elif tp == "streak":
d = get_streak()
else:
d = ''.join([tp[i%len(tp)] for i in range(n)])
print(len(d))
print(d)
| [
"bjarki.agust@gmail.com"
] | bjarki.agust@gmail.com |
214b28f3d904980ec408c9cf9c6bc8e5727d741a | 8e07b5b7a8dd38e0ef2c7ffc97d0392d886f32e6 | /venv/Lib/site-packages/mypy/typeshed/stdlib/3/_importlib_modulespec.pyi | a3cf0197992e4fc82dffb75581e346cf586ef642 | [] | no_license | RodrigoNeto/cursopythonyt | fc064a2e6106324e22a23c54bdb9c31040ac9eb6 | 279dad531e21a9c7121b73d84fcbdd714f435e7e | refs/heads/master | 2023-07-03T00:54:09.795054 | 2021-08-13T12:42:24 | 2021-08-13T12:42:24 | 395,646,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,636 | pyi | # ModuleSpec, ModuleType, Loader are part of a dependency cycle.
# They are officially defined/exported in other places:
#
# - ModuleType in types
# - Loader in importlib.abc
# - ModuleSpec in importlib.machinery (3.4 and later only)
#
# _Loader is the PEP-451-defined interface for a loader type/object.
from abc import ABCMeta
from typing import Any, Dict, List, Optional, Protocol
class _Loader(Protocol):
def load_module(self, fullname: str) -> ModuleType: ...
class ModuleSpec:
def __init__(
self,
name: str,
loader: Optional[Loader],
*,
origin: Optional[str] = ...,
loader_state: Any = ...,
is_package: Optional[bool] = ...,
) -> None: ...
name: str
loader: Optional[_Loader]
origin: Optional[str]
submodule_search_locations: Optional[List[str]]
loader_state: Any
cached: Optional[str]
parent: Optional[str]
has_location: bool
class ModuleType:
__name__: str
__file__: str
__dict__: Dict[str, Any]
__loader__: Optional[_Loader]
__package__: Optional[str]
__spec__: Optional[ModuleSpec]
def __init__(self, name: str, doc: Optional[str] = ...) -> None: ...
class Loader(metaclass=ABCMeta):
def load_module(self, fullname: str) -> ModuleType: ...
def module_repr(self, module: ModuleType) -> str: ...
def create_module(self, spec: ModuleSpec) -> Optional[ModuleType]: ...
# Not defined on the actual class for backwards-compatibility reasons,
# but expected in new code.
def exec_module(self, module: ModuleType) -> None: ...
| [
"rodrigoneto.forseti@gmail.com"
] | rodrigoneto.forseti@gmail.com |
612589466e47db3874b810aa5c365e41273ef98f | 42efe06c233479b1882cc6e0e418c9ef6e0a1434 | /CodingTest_Study1/week06/ex1149.py | 8f2567cd9a2925229f49bd1b41e1bb474b224f59 | [] | no_license | KimTaesong/Algorithm | 146d53cb24b12355330b212edb87ec9d22123359 | 4c32275dd21fa692258e5348a02ce0c1c0b4ec91 | refs/heads/master | 2023-06-23T16:46:52.495443 | 2021-07-21T00:30:49 | 2021-07-21T00:30:49 | 194,765,200 | 8 | 2 | null | 2021-07-21T00:30:49 | 2019-07-02T01:20:10 | Python | UTF-8 | Python | false | false | 511 | py | n = int(input())
painting_cost = [[0, 0, 0]]
dp_cost = [[0] * 3 for _ in range(n+1)]
for i in range(n):
painting_cost.append(list(map(int, input().split())))
for i in range(3):
dp_cost[1][i] = painting_cost[1][i]
for i in range(2, n+1):
dp_cost[i][0] = painting_cost[i][0] + min(dp_cost[i-1][1], dp_cost[i-1][2])
dp_cost[i][1] = painting_cost[i][1] + min(dp_cost[i-1][0], dp_cost[i-1][2])
dp_cost[i][2] = painting_cost[i][2] + min(dp_cost[i-1][0], dp_cost[i-1][1])
print(min(dp_cost[n]))
| [
"taesongweb@gmail.com"
] | taesongweb@gmail.com |
85c0b27292ad066cf2bf4e1ce48054ec9373c3aa | 236d1d029767e4aa342df6d9dc425cb5a5247707 | /wheelwhere_server/search/views.py | 86d4b42815c8bff955f457ee768c888676655473 | [] | no_license | KilJaeeun/wheelwhereserver | 2f3ed05cbea4d34bde7af52acebbff01645be052 | 9428876a12ea2a8480f2811de793132555282db9 | refs/heads/master | 2022-12-08T02:51:53.357391 | 2020-08-27T05:59:41 | 2020-08-27T05:59:41 | 285,256,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,221 | py | from django.shortcuts import render
from django.db.models import Q
# Create your views here.
from facility.models import post
from rest_framework.response import Response
from rest_framework import permissions
from rest_framework.views import APIView
class SearchView(APIView):
permission_classes = [permissions.AllowAny]
def get(self, request):
word = request.GET['word']
post_list = post.objects.filter(
Q(name__icontains=word)|Q(address__icontains=word)|Q(description__icontains=word)
).distinct() #중복을 제거한다.
context = []
for i in post_list:
param={}
param['id']=i.id
param['name']=i.name
param['is_toilet']=i.is_toilet
param['is_elibator']=i.is_elibator
param['is_parking']=i.is_parking
param['is_tuck']=i.is_tuck
param['is_tuck']=i.is_tuck
param['is_helper']=i.is_helper
param['description']=i.description
param['latitude']=i.latitude
param['longitude']=i.longitude
param['star']=i.star
context.append(param)
return Response({'msg': 'success', 'object_list': context}) | [
"rha3122@naver.com"
] | rha3122@naver.com |
1ca24d79f42173ecdb2eae95b6d1da39aedafcb7 | a72724b201b24f287555a695840d662b13a8dee5 | /tv/models.py | 1bb02afc9510c8b7f12d828a6732c17baa19cf0d | [] | no_license | webmalc/fh-django | f2bdfe63e2322abaf3523aff5d259495d3b1e1d9 | 0d11ba23c8def6dd1b03d8362730f8665915bd34 | refs/heads/master | 2020-05-14T02:38:37.757625 | 2016-01-29T14:45:28 | 2016-01-29T14:45:28 | 37,921,701 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,238 | py | from django.db import models
from fh.models import CommonInfo
from django.core.urlresolvers import reverse
class Channel(CommonInfo):
""" Channel model """
CATEGORIES = {
'Kids': 'fa fa-child',
'Educational': 'fa fa-flask',
'Public': 'fa fa-group',
'Music': 'fa fa-music',
'Sport': 'fa fa-futbol-o',
'Regional': 'fa fa-globe',
'Entertainment': 'fa fa-star',
'Man': 'fa fa-male',
'Woman': 'fa fa-female',
'Films': 'fa fa-film',
'News': 'fa fa-newspaper-o',
'Religion': 'fa fa-bolt',
'Other': 'fa fa-tv'
}
title = models.CharField(max_length=255)
category = models.CharField(max_length=255, choices=[(i, i) for i in sorted(CATEGORIES.keys())])
is_enabled = models.BooleanField(default=True, verbose_name='Is enabled?')
code = models.TextField()
alternative_code = models.TextField(null=True, blank=True)
is_favorite = models.BooleanField(default=False, verbose_name='Is favorite?')
def get_absolute_url(self):
return reverse('tv:channel_show', kwargs={'pk': self.pk})
def __str__(self):
return '%s' % self.title
class Meta:
ordering = ['category', 'title']
| [
"webmalc@gmail.com"
] | webmalc@gmail.com |
e7efc7032b31a23c99ddf62da954832182c4d66e | 32965ef1ebf4f7a274db451ce1745ba1cb217b96 | /dataapi_client/__init__.py | 08f2f614418be8e958499695c0bc954c65ed4c1e | [
"MIT"
] | permissive | jmnel/dataapi-client | 00cfc0025705b86ca0c9991938d85ead1bee9e27 | 808a1e20b5d9e21fb7348729442ff73d1fd7345d | refs/heads/master | 2023-01-04T10:42:46.961907 | 2020-09-30T04:50:27 | 2020-09-30T04:50:27 | 297,760,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | from .api import ApiConfig
from . import topk
| [
"jmnel92@gmail.com"
] | jmnel92@gmail.com |
c5f052cdc5e5b9335853d117d7cb4f7e9fc51bfd | 551b75f52d28c0b5c8944d808a361470e2602654 | /huaweicloud-sdk-kafka/huaweicloudsdkkafka/v2/model/update_topic_replica_response.py | b7c0fd01ef65b9330f0a828a975b3b977bb94d44 | [
"Apache-2.0"
] | permissive | wuchen-huawei/huaweicloud-sdk-python-v3 | 9d6597ce8ab666a9a297b3d936aeb85c55cf5877 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | refs/heads/master | 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 | NOASSERTION | 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null | UTF-8 | Python | false | false | 2,223 | py | # coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class UpdateTopicReplicaResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
}
attribute_map = {
}
def __init__(self):
"""UpdateTopicReplicaResponse - a model defined in huaweicloud sdk"""
super(UpdateTopicReplicaResponse, self).__init__()
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateTopicReplicaResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
c5ff01e1f225a025e4de230512deabdb29b1f23d | 36e555fb543c3ee4e1adb2959e6017a44af06538 | /venv-creataku/bin/elbadmin | 7c1928a3b695b6a2f8fa08bffdbff8fc34971be1 | [] | no_license | sangyeob/creataku | 0d55d1f603b7988035d2d3eedd0e522a53beca0f | 05abe4b02723455b67a0b751711ed5873b14e643 | refs/heads/master | 2020-03-24T20:22:36.594111 | 2018-07-31T08:34:42 | 2018-07-31T08:34:42 | 142,974,886 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,742 | #!/Users/Sang/OneDrive/Developments/creataku/venv-creataku/bin/python
# Copyright (c) 2009 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
#
# Elastic Load Balancer Tool
#
VERSION = "0.2"
usage = """%prog [options] [command]
Commands:
list|ls List all Elastic Load Balancers
delete <name> Delete ELB <name>
get <name> Get all instances associated with <name>
create <name> Create an ELB; -z and -l are required
add <name> <instances> Add <instances> in ELB <name>
remove|rm <name> <instances> Remove <instances> from ELB <name>
reap <name> Remove terminated instances from ELB <name>
enable|en <name> <zone> Enable Zone <zone> for ELB <name>
disable <name> <zone> Disable Zone <zone> for ELB <name>
addl <name> Add listeners (specified by -l) to the ELB
<name>
rml <name> <port> Remove Listener(s) specified by the port on
the ELB <name>
"""
def find_elb(elb, name):
try:
elbs = elb.get_all_load_balancers(name)
except boto.exception.BotoServerError as se:
if se.code == 'LoadBalancerNotFound':
elbs = []
else:
raise
if len(elbs) < 1:
print "No load balancer by the name of %s found" % name
return None
elif len(elbs) > 1:
print "More than one elb matches %s?" % name
return None
# Should not happen
if name not in elbs[0].name:
print "No load balancer by the name of %s found" % name
return None
return elbs[0]
def list(elb):
"""List all ELBs"""
print "%-20s %s" % ("Name", "DNS Name")
print "-" * 80
for b in elb.get_all_load_balancers():
print "%-20s %s" % (b.name, b.dns_name)
def check_valid_region(conn, region):
if conn is None:
print 'Invalid region (%s)' % region
sys.exit(1)
def get(elb, name):
"""Get details about ELB <name>"""
b = find_elb(elb, name)
if b:
print "=" * 80
print "Name: %s" % b.name
print "DNS Name: %s" % b.dns_name
if b.canonical_hosted_zone_name:
chzn = b.canonical_hosted_zone_name
print "Canonical hosted zone name: %s" % chzn
if b.canonical_hosted_zone_name_id:
chznid = b.canonical_hosted_zone_name_id
print "Canonical hosted zone name id: %s" % chznid
print
print "Health Check: %s" % b.health_check
print
print "Listeners"
print "---------"
print "%-8s %-8s %s" % ("IN", "OUT", "PROTO")
for l in b.listeners:
print "%-8s %-8s %s" % (l[0], l[1], l[2])
print
print " Zones "
print "---------"
for z in b.availability_zones:
print z
print
# Make map of all instance Id's to Name tags
import boto
from boto.compat.six import iteritems
if not options.region:
ec2 = boto.connect_ec2()
else:
ec2 = boto.ec2.connect_to_region(options.region)
check_valid_region(ec2, options.region)
instance_health = b.get_instance_health()
instances = [state.instance_id for state in instance_health]
names = dict((k,'') for k in instances)
for i in ec2.get_only_instances():
if i.id in instances:
names[i.id] = i.tags.get('Name', '')
name_column_width = max([4] + [len(v) for k,v in iteritems(names)]) + 2
print "Instances"
print "---------"
print "%-12s %-15s %-*s %s" % ("ID",
"STATE",
name_column_width, "NAME",
"DESCRIPTION")
for state in instance_health:
print "%-12s %-15s %-*s %s" % (state.instance_id,
state.state,
name_column_width, names[state.instance_id],
state.description)
print
def create(elb, name, zones, listeners):
"""Create an ELB named <name>"""
l_list = []
for l in listeners:
l = l.split(",")
if l[2] == 'HTTPS':
l_list.append((int(l[0]), int(l[1]), l[2], l[3]))
else:
l_list.append((int(l[0]), int(l[1]), l[2]))
b = elb.create_load_balancer(name, zones, l_list)
return get(elb, name)
def delete(elb, name):
"""Delete this ELB"""
b = find_elb(elb, name)
if b:
b.delete()
print "Load Balancer %s deleted" % name
def add_instances(elb, name, instances):
"""Add <instance> to ELB <name>"""
b = find_elb(elb, name)
if b:
b.register_instances(instances)
return get(elb, name)
def remove_instances(elb, name, instances):
"""Remove instance from elb <name>"""
b = find_elb(elb, name)
if b:
b.deregister_instances(instances)
return get(elb, name)
def reap_instances(elb, name):
"""Remove terminated instances from elb <name>"""
b = find_elb(elb, name)
if b:
for state in b.get_instance_health():
if (state.state == 'OutOfService' and
state.description == 'Instance is in terminated state.'):
b.deregister_instances([state.instance_id])
return get(elb, name)
def enable_zone(elb, name, zone):
"""Enable <zone> for elb"""
b = find_elb(elb, name)
if b:
b.enable_zones([zone])
return get(elb, name)
def disable_zone(elb, name, zone):
"""Disable <zone> for elb"""
b = find_elb(elb, name)
if b:
b.disable_zones([zone])
return get(elb, name)
def add_listener(elb, name, listeners):
"""Add listeners to a given load balancer"""
l_list = []
for l in listeners:
l = l.split(",")
l_list.append((int(l[0]), int(l[1]), l[2]))
b = find_elb(elb, name)
if b:
b.create_listeners(l_list)
return get(elb, name)
def rm_listener(elb, name, ports):
"""Remove listeners from a given load balancer"""
b = find_elb(elb, name)
if b:
b.delete_listeners(ports)
return get(elb, name)
if __name__ == "__main__":
try:
import readline
except ImportError:
pass
import boto
import sys
from optparse import OptionParser
from boto.mashups.iobject import IObject
parser = OptionParser(version=VERSION, usage=usage)
parser.add_option("-z", "--zone",
help="Operate on zone",
action="append", default=[], dest="zones")
parser.add_option("-l", "--listener",
help="Specify Listener in,out,proto",
action="append", default=[], dest="listeners")
parser.add_option("-r", "--region",
help="Region to connect to",
action="store", dest="region")
(options, args) = parser.parse_args()
if len(args) < 1:
parser.print_help()
sys.exit(1)
if not options.region:
elb = boto.connect_elb()
else:
import boto.ec2.elb
elb = boto.ec2.elb.connect_to_region(options.region)
check_valid_region(elb, options.region)
print "%s" % (elb.region.endpoint)
command = args[0].lower()
if command in ("ls", "list"):
list(elb)
elif command == "get":
get(elb, args[1])
elif command == "create":
if not options.listeners:
print "-l option required for command create"
sys.exit(1)
if not options.zones:
print "-z option required for command create"
sys.exit(1)
create(elb, args[1], options.zones, options.listeners)
elif command == "delete":
delete(elb, args[1])
elif command in ("add", "put"):
add_instances(elb, args[1], args[2:])
elif command in ("rm", "remove"):
remove_instances(elb, args[1], args[2:])
elif command == "reap":
reap_instances(elb, args[1])
elif command in ("en", "enable"):
enable_zone(elb, args[1], args[2])
elif command == "disable":
disable_zone(elb, args[1], args[2])
elif command == "addl":
if not options.listeners:
print "-l option required for command addl"
sys.exit(1)
add_listener(elb, args[1], options.listeners)
elif command == "rml":
if not args[2:]:
print "port required"
sys.exit(2)
rm_listener(elb, args[1], args[2:])
| [
"sy0414@gmail.com"
] | sy0414@gmail.com | |
cbdfece7cf5dd2fac0ad8e0e0cb8964e9e079a55 | ddf3d1d82def56f4c176e4b07b8ee164ca7d8698 | /glue/core/tests/test_state_objects.py | 9fded0c4b63235316f80fca8f01df90374744687 | [
"BSD-3-Clause"
] | permissive | sergiopasra/glue | 1b604d32f4a5098278a44ca3bb9ada5b19a499ca | c25a217a122a11818382672c99cb21f57a30636f | refs/heads/master | 2021-01-18T14:18:47.103223 | 2018-06-21T10:58:48 | 2018-06-21T10:58:48 | 24,183,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,059 | py | from __future__ import absolute_import, division, print_function
import numpy as np
from numpy.testing import assert_allclose
from glue.external.echo import CallbackProperty, ListCallbackProperty
from glue.core import Data, DataCollection
from glue.utils import nanmedian
from .test_state import clone
from ..state_objects import (State, StateAttributeLimitsHelper,
StateAttributeSingleValueHelper,
StateAttributeHistogramHelper)
class SimpleTestState(State):
a = CallbackProperty()
b = CallbackProperty()
flat = ListCallbackProperty()
nested = ListCallbackProperty()
def test_state_serialization():
state1 = SimpleTestState()
state1.a = 2
state1.b = 'hello'
state1.flat = [1, 3, 4]
sub_state = SimpleTestState()
sub_state.a = 3
sub_state.b = 'blah'
sub_state.flat = [1, 2]
sub_state.nested = []
state1.nested = [1, 3, sub_state]
state2 = clone(state1)
assert state2.a == 2
assert state2.b == 'hello'
assert state2.flat == [1, 3, 4]
assert state2.nested[0:2] == [1, 3]
assert state2.nested[2].a == 3
assert state2.nested[2].b == 'blah'
assert state2.nested[2].flat == [1, 2]
assert state2.nested[2].nested == []
class TestStateAttributeLimitsHelper():
def setup_method(self, method):
self.data = Data(x=np.linspace(-100, 100, 10000),
y=np.linspace(2, 3, 10000), label='test_data')
self.data_collection = DataCollection([self.data])
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
lower = CallbackProperty()
upper = CallbackProperty()
log = CallbackProperty(False)
scale = CallbackProperty(100)
self.state = SimpleState()
self.helper = StateAttributeLimitsHelper(self.state, attribute='comp',
lower='lower', upper='upper',
percentile='scale', log='log')
self.state.data = self.data
self.state.comp = self.data.id['x']
self.x_id = self.data.visible_components[0]
self.y_id = self.data.visible_components[1]
def test_minmax(self):
assert self.helper.lower == -100
assert self.helper.upper == +100
def test_change_attribute(self):
self.helper.attribute = self.y_id
assert self.helper.lower == 2
assert self.helper.upper == 3
self.helper.attribute = self.x_id
assert self.helper.lower == -100
assert self.helper.upper == +100
def test_change_percentile(self):
# Changing scale mode updates the limits
self.helper.percentile = 99.5
assert_allclose(self.helper.lower, -99.5)
assert_allclose(self.helper.upper, +99.5)
self.helper.percentile = 99
assert_allclose(self.helper.lower, -99)
assert_allclose(self.helper.upper, +99)
self.helper.percentile = 90
assert_allclose(self.helper.lower, -90)
assert_allclose(self.helper.upper, +90)
# When switching to custom, the last limits are retained
self.helper.percentile = "Custom"
assert_allclose(self.helper.lower, -90)
assert_allclose(self.helper.upper, +90)
def test_percentile_cached(self):
# Make sure that if we change scale and change attribute, the scale
# modes are cached on a per-attribute basis.
self.helper.percentile = 99.5
self.state.comp = self.y_id
assert self.helper.percentile == 100
self.helper.percentile = 99
self.state.comp = self.x_id
assert self.helper.percentile == 99.5
self.state.comp = self.y_id
assert self.helper.percentile == 99
def test_flip_button(self):
self.helper.flip_limits()
assert self.helper.lower == +100
assert self.helper.upper == -100
# Make sure that values were re-cached when flipping
self.state.comp = self.y_id
assert self.helper.lower == 2
assert self.helper.upper == 3
self.state.comp = self.x_id
assert self.helper.lower == +100
assert self.helper.upper == -100
def test_manual_edit(self):
# Make sure that values are re-cached when edited manually
self.helper.percentile = "Custom"
self.state.lower = -122
self.state.upper = 234
self.helper.log = True
assert self.helper.lower == -122
assert self.helper.upper == 234
assert self.helper.log
self.state.comp = self.y_id
assert self.helper.lower == 2
assert self.helper.upper == 3
assert not self.helper.log
self.state.comp = self.x_id
assert self.helper.lower == -122
assert self.helper.upper == 234
assert self.helper.log
class TestStateAttributeSingleValueHelper():
def setup_method(self, method):
self.data = Data(x=np.linspace(-100, 30, 9999),
y=np.linspace(2, 3, 9999), label='test_data')
self.data_collection = DataCollection([self.data])
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
val = CallbackProperty()
self.state = SimpleState()
self.helper = StateAttributeSingleValueHelper(self.state, attribute='comp',
function=nanmedian, value='val')
self.state.data = self.data
self.state.comp = self.data.id['x']
self.x_id = self.data.visible_components[0]
self.y_id = self.data.visible_components[1]
def test_value(self):
assert self.helper.value == -35.
def test_change_attribute(self):
self.helper.attribute = self.y_id
assert self.helper.value == 2.5
self.helper.attribute = self.x_id
assert self.helper.value == -35
def test_manual_edit(self):
self.state.val = 42.
assert self.helper.value == 42
self.state.comp = self.y_id
assert self.helper.value == 2.5
self.state.comp = self.x_id
assert self.helper.value == 42
class TestStateAttributeHistogramHelper():
def setup_method(self, method):
self.data = Data(x=[-3.2, 4.3, 2.2, 5.4, 7.2, -1.1, 2.3],
y=['a', 'f', 'd', 'e', 'f', 'f', 'a'], label='test_data')
self.data_collection = DataCollection([self.data])
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
x_min = CallbackProperty()
x_max = CallbackProperty()
n_bin = CallbackProperty()
self.state = SimpleState()
self.helper = StateAttributeHistogramHelper(self.state, attribute='comp',
lower='x_min', upper='x_max', n_bin='n_bin')
self.state.data = self.data
def test_default_numerical(self):
self.state.comp = self.data.id['x']
assert self.state.x_min == -3.2
assert self.state.x_max == 7.2
assert self.state.n_bin == 15
def test_default_categorical(self):
self.state.comp = self.data.id['y']
assert self.state.x_min == -0.5
assert self.state.x_max == 3.5
assert self.state.n_bin == 4
def test_hitting_limits(self):
# FIXME: here we modify the internal defaults rather than making a new
# state helper, but this could be improved
self.helper._default_n_bin = 4
self.helper._max_n_bin = 3
self.state.comp = self.data.id['x']
assert self.state.x_min == -3.2
assert self.state.x_max == 7.2
assert self.state.n_bin == 4
self.state.comp = self.data.id['y']
assert self.state.x_min == -0.5
assert self.state.x_max == 3.5
assert self.state.n_bin == 3
def test_caching(self):
self.state.comp = self.data.id['x']
self.state.x_min = 2
self.state.x_max = 7
self.state.n_bin = 8
self.state.comp = self.data.id['y']
self.state.x_min = 1.5
self.state.x_max = 3.5
self.state.n_bin = 3
self.state.comp = self.data.id['x']
assert self.state.x_min == 2
assert self.state.x_max == 7
assert self.state.n_bin == 8
self.state.comp = self.data.id['y']
assert self.state.x_min == 1.5
assert self.state.x_max == 3.5
assert self.state.n_bin == 3
def test_histogram_helper_common_n_bin():
data = Data(x=[-3.2, 4.3, 2.2],
y=['a', 'f', 'd'],
z=[1.1, 2.3, 1.2],
label='test_data')
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
x_min = CallbackProperty()
x_max = CallbackProperty()
n_bin = CallbackProperty()
common = CallbackProperty()
state = SimpleState()
helper = StateAttributeHistogramHelper(state, attribute='comp',
lower='x_min', upper='x_max', n_bin='n_bin',
common_n_bin='common')
state.data = data
state.comp = data.id['x']
state.n_bin = 9
state.comp = data.id['y']
assert state.n_bin == 3
state.comp = data.id['z']
assert state.n_bin == 15
state.n_bin = 12
state.common = True
state.comp = data.id['x']
assert state.n_bin == 12
state.n_bin = 11
state.comp = data.id['y']
assert state.n_bin == 3
state.comp = data.id['z']
assert state.n_bin == 11
state.common = False
state.n_bin = 13
state.comp = data.id['x']
assert state.n_bin == 11
def test_histogram_helper_common_n_bin_active():
# Make sure that common_n_bin works as expected if True from start
data = Data(x=[-3.2, 4.3, 2.2],
y=['a', 'f', 'd'],
z=[1.1, 2.3, 1.2],
label='test_data')
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
x_min = CallbackProperty()
x_max = CallbackProperty()
n_bin = CallbackProperty()
common = CallbackProperty(True)
state = SimpleState()
helper = StateAttributeHistogramHelper(state, attribute='comp',
lower='x_min', upper='x_max', n_bin='n_bin',
common_n_bin='common')
state.data = data
state.comp = data.id['x']
state.n_bin = 9
state.comp = data.id['z']
assert state.n_bin == 9
state.n_bin = 12
state.common = True
state.comp = data.id['x']
assert state.n_bin == 12
state.n_bin = 11
state.comp = data.id['y']
assert state.n_bin == 3
state.comp = data.id['z']
assert state.n_bin == 11
state.common = False
state.n_bin = 13
state.comp = data.id['x']
assert state.n_bin == 11
def test_limits_helper_initial_values():
# Regression test for a bug that occurred if the limits cache was empty
# but some attributes were set to values - in this case we don't want to
# override the existing values.
data = Data(x=np.linspace(-100, 100, 10000),
y=np.linspace(2, 3, 10000), label='test_data')
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
lower = CallbackProperty()
upper = CallbackProperty()
state = SimpleState()
state.lower = 1
state.upper = 2
state.comp = data.id['x']
helper = StateAttributeLimitsHelper(state, attribute='comp',
lower='lower', upper='upper')
assert helper.lower == 1
assert helper.upper == 2
class DatetimeState(State):
a = CallbackProperty()
def test_state_serialization_datetime64():
state1 = DatetimeState()
state1.a = np.datetime64(100, 'D')
state2 = clone(state1)
assert state2.a == np.datetime64(100, 'D')
def test_nan_inf_minmax():
data = Data(x=[3, 1, -2, np.inf, np.nan], label='test_data')
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
lower = CallbackProperty()
upper = CallbackProperty()
percentile = CallbackProperty()
log = CallbackProperty()
state = SimpleState()
helper = StateAttributeLimitsHelper(state, attribute='comp', # noqa
lower='lower', upper='upper',
percentile='percentile', log='log')
state.data = data
state.comp = data.id['x']
assert state.lower == -2
assert state.upper == +3
state.log = True
assert state.lower == +1
assert state.upper == +3
state.log = False
state.percentile = 99
assert_allclose(state.lower, -1.97)
assert_allclose(state.upper, +2.98)
| [
"thomas.robitaille@gmail.com"
] | thomas.robitaille@gmail.com |
fcd45042b397c3fd5d3c2009a394807a3bdf8735 | 1b5f653955779f45e78ca6dda925518779d09e8f | /submissions/1029.py | 509d2f71c2c5d9cfde862b3fbe5d8ef3da0849b1 | [] | no_license | LeonardoSaid/uri-py-solutions | ad285f552934ead54ad2410e23113e84b0724f72 | 43c10c0e99e99d22b4b5ae2871e5d897f8823b42 | refs/heads/master | 2020-08-11T00:28:48.661578 | 2020-04-23T20:21:39 | 2020-04-23T20:21:39 | 214,453,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | #fast fib using memorization (https://stackoverflow.com/questions/18172257/efficient-calculation-of-fibonacci-series)
def fib(n, computed = {0: 0, 1: 1}):
if n not in computed:
computed[n] = fib(n-1, computed) + fib(n-2, computed)
return computed[n]
def fib_numcalls(n):
return 2*fib(n+1)-2
for i in range(int(input())):
x = int(input())
print('fib(%d) = %d calls = %d' % (x, fib_numcalls(x), fib(x)))
| [
"noreply@github.com"
] | LeonardoSaid.noreply@github.com |
da897abaf6951a947d5ed46d46c2df569d6e8f84 | b21e073975c0f7a4f94c9f3523b8f5dcbf98a521 | /pt/105/python/main.py | 392d9a0de4cfd4ca9168a1beb26056c80b83cde8 | [
"MIT"
] | permissive | franciscogomes2020/exercises | 3ed6877f945463ed01c7fcd55271689171b0ad9d | 8b33c4b9349a9331e4002a8225adc2a482c70024 | refs/heads/master | 2023-07-04T15:54:38.919185 | 2021-08-19T20:03:54 | 2021-08-19T20:03:54 | 396,992,428 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | # Faça um programa que tenha uma função notas() que pode receber várias notas de alunos e vai retornar um dicionário com as seguintes informações:
- Quantidade de notas
- A maior nota
- A menor nota
- A média da turma
- A situação (opcional)
Adicione também as docstrings dessa função para consulta pelo desenvolvedor.
| [
"71292537+franciscogomes2020@users.noreply.github.com"
] | 71292537+franciscogomes2020@users.noreply.github.com |
5e126dbaf3ae10daba0e35b740c00f217bfdbb10 | d19cbf8a0483c17a9d9779535b99bd340c4c5712 | /application/modules/transfert/api_trans.py | 5c8549d57aed5f0a1f80b5b53d9fbad165cc2e66 | [] | no_license | wilrona/CarryUp | 1273eb7f0432f1cc33410853f4ebf3940c705b1c | 3d4f65f45bf7859d10f2938559447559f3a5fa0a | refs/heads/master | 2022-12-10T15:16:22.096920 | 2020-08-31T13:30:29 | 2020-08-31T13:30:29 | 291,722,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,050 | py | __author__ = 'User'
from ...modules import *
from ..bcmd.models_bcmd import Documents, LigneDoc
from ..article.models_item import Articles, Variantes
from ..magasin.models_mag import Magasins
from ..compte.models_compte import Comptes
prefix = Blueprint('api_transfert', __name__)
def make_public(data):
new_task = {}
for field in data:
if field == 'id':
new_task['id'] = str(data['id'])
new_task['uri_view'] = url_for('transfert.view', data_id=data['id'], _external=True)
else:
new_task[field] = data[field]
if field == 'etat':
new_task['etat_name'] = 'En attente'
if new_task['etat'] == 2:
new_task['etat_name'] = 'Reception partielle'
if new_task['etat'] == 3:
new_task['etat_name'] = 'Terminee'
if new_task['etat'] == 4:
new_task['etat_name'] = 'Annulation'
return new_task
def make_public_variante(data):
new_task = {}
for field in data:
if field == 'id':
new_task['id'] = str(data['id'])
else:
new_task[field] = data[field]
if field == 'name':
new_task['name_variante'] = data['article_id'].name
if len(data['article_id'].variantes) > 1 :
new_task['name_variante'] += ' ('+new_task['name']+')'
return new_task
@prefix.route('/<objectid:compte_id>', methods=['GET'])
def index(compte_id):
sort = request.args.get('sort')
order = request.args.get('order')
q = str(request.args.get('q'))
try:
page = int(request.args.get('page', 1))
except ValueError:
page = 1
offset = 0
limit = 10
if request.args.get('per'):
limit = int(request.args.get('per'))
if page > 1:
offset = ((page - 1) * limit)
if q is not None:
datas = Documents.objects(Q(compte=compte_id) & Q(reference__icontains=q) & Q(type_transaction=1))
else:
datas = Documents.objects(Q(compte=compte_id) & Q(type_transaction=1))
datas = datas.skip(offset).limit(limit)
order_by = ''
if order == 'desc':
order_by += '-'
if sort is not None:
order_by += sort
datas.order_by(order_by)
count = Documents.objects(Q(compte=compte_id) & Q(type_transaction=1)).count() / limit
return jsonify({'data' : [make_public(data) for data in datas], 'total_page': count, 'order': order, 'sort': sort })
@prefix.route('/check/achat/', methods=['POST'])
def check():
article_id = request.json['id']
quantite = request.json['quantite']
magasin_id = request.args.get('magasin_origine')
info = {}
if article_id :
variante = Variantes.objects.get(id=article_id)
info['name'] = variante.article_id.name
if len(variante.article_id.variantes) > 1 :
info['name'] += ' ('+variante.name+')'
info['id'] = article_id
info['magasin'] = variante.MagVarianteID()
info['stock'] = variante.stock_magasin(magasin_id)
info['quantite'] = int(quantite)
return jsonify(info)
@prefix.route('/all/achat/<objectid:compte_id>/', methods=['GET'])
def allArticle(compte_id):
datas = []
magasin_origine_id = request.args.get('magasin_origine')
magasin_destina_id = request.args.get('magasin_destina')
compte = Comptes.objects.get(id=compte_id)
magasin_origine = None
if magasin_origine_id:
magasin_origine = Magasins.objects.get(id=magasin_origine_id)
magasin_destina = None
if magasin_destina_id:
magasin_destina = Magasins.objects.get(id=magasin_destina_id)
if magasin_destina and magasin_origine:
articles = Articles.objects(Q(compte=compte) & Q(type_article=0))
for article in articles:
for variante in article.variantes:
if magasin_destina in variante.MagVariante() and magasin_origine in variante.MagVariante():
datas.append(variante)
return jsonify({'data': [make_public_variante(data) for data in datas]})
@prefix.route('/ligne/<objectid:compte_id>/<objectid:item_id>', methods=['GET'])
@prefix.route('/ligne/<objectid:compte_id>/', methods=['GET'])
def ligne(compte_id, item_id=None):
data = []
currentSelect = []
if item_id :
docs = Documents.objects.get(id=item_id)
for item in docs.ligne_data:
magasin_id = request.args.get('magasin_origine')
matiere = {}
matiere['id'] = str(item.variante_id.id)
matiere['name'] = item.article_id.name
if len(item.article_id.variantes) > 1 :
matiere['name'] += ' ('+item.variante_id.name+')'
matiere['quantite'] = item.quantite
matiere['magasin'] = item.variante_id.MagVarianteID()
matiere['stock'] = item.old_stock
data.append(matiere)
currentSelect.append(str(item.variante_id.id))
return jsonify({'data': data, 'currentSelect': currentSelect})
| [
"wilrona@gmail.com"
] | wilrona@gmail.com |
b5df12202c5be1e05f8601f51ec94e42fcf45652 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_194/ch29_2019_03_11_18_45_05_167488.py | 4861c2cff3d2bee950f8c15d4e416cd85e3bd815 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | def calcula_aumento(x):
if x > 1250:
y == 1.10*x - x
elif x <= 1250 and x > 0:
y == 1.15*x - x
return y | [
"you@example.com"
] | you@example.com |
902c76802cddf915b918bec8411079d44a6b97fe | 23b5337bf410415b7b150e3ad60cafc1578a0441 | /05-Databases/03-Databases-in-Views/forms.py | 46d0f70c26385f98a9d58e484797ab8fb4d5fd4c | [] | no_license | VerdantFox/flask_course | b8de13ad312c14229f0c3bc2af70e8609a3b00fb | 47b167b54bc580734fa69fc1a2d7e724adfb9610 | refs/heads/master | 2021-09-10T05:01:47.385859 | 2020-02-24T21:07:05 | 2020-02-24T21:07:05 | 241,973,705 | 0 | 0 | null | 2021-09-08T01:40:59 | 2020-02-20T19:40:42 | Python | UTF-8 | Python | false | false | 324 | py | from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, SubmitField
class AddForm(FlaskForm):
name = StringField("Name of Puppy:")
submit = SubmitField("Add Puppy")
class DelForm(FlaskForm):
id = IntegerField("Id Number of Puppy to Remove:")
submit = SubmitField("Remove Puppy")
| [
"verdantfoxx@gmail.com"
] | verdantfoxx@gmail.com |
6dc8f8821937127ecb968d5c8e6366ad6ad177f2 | cd23b0457bc02a60b89f1f52783e56cc36d85b5e | /mutl_process/thread_context.py | 0943d30e34be0cd4bf6b3790559039473f0fc2f9 | [] | no_license | cluo/learingPython | 65c7068613e1a2ae0178e23770503043d9278c45 | 54609288e489047d4dd1dead5ac142f490905f0e | refs/heads/master | 2020-04-01T13:04:15.981758 | 2015-02-23T13:21:31 | 2015-02-23T13:21:31 | 28,440,969 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | import threading
import logging
logging.basicConfig(
level=logging.DEBUG,
format = '(%(threadName)-10s) %(message)s'
)
def worker_with(lock):
with lock:
logging.debug('Lock acquired via with')
def worker_no_with(lock):
lock.acquire()
try:
logging.debug('Lock acquired directly')
finally:
lock.release()
lock = threading.Lock()
w = threading.Thread(target=worker_with, args=(lock,))
nw = threading.Thread(target=worker_no_with, args=(lock,))
w.start()
nw.start() | [
"luosheng@meizu.com"
] | luosheng@meizu.com |
4bf2511aa7bcff09b14328f19d375bbb949a767e | 3235145c84c48535bbf27dabfb3faa7359ed6fef | /google-cloud-sdk/lib/surface/ml_engine/models/create.py | 31cf5ef3f4d2f8ba6cae149bbb0830eb0e2c4ab8 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | paceuniversity/CS3892017team1 | b69fb10f5194f09748cd5bca48901e9bd87a55dc | f8e82537c84cac148f577794d2299ea671b26bc2 | refs/heads/master | 2021-01-17T04:34:04.158071 | 2017-05-09T04:10:22 | 2017-05-09T04:10:22 | 82,976,622 | 2 | 8 | null | 2020-07-25T09:45:47 | 2017-02-23T22:13:04 | Python | UTF-8 | Python | false | false | 2,203 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ml-engine models create command."""
from googlecloudsdk.api_lib.ml import models
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.ml import flags
from googlecloudsdk.command_lib.ml import models_util
def _AddCreateArgs(parser):
"""Get arguments for the `ml-engine models create` command."""
flags.GetModelName().AddToParser(parser)
parser.add_argument(
'--regions',
metavar='REGION',
type=arg_parsers.ArgList(min_length=1),
help="""\
The Google Cloud region where the model will be deployed (currently only a
single region is supported).
Will soon be required, but defaults to 'us-central1' for now.
""")
parser.add_argument(
'--enable-logging',
action='store_true',
help=('If set, enables StackDriver Logging for online prediction.'))
@base.ReleaseTracks(base.ReleaseTrack.BETA, base.ReleaseTrack.ALPHA)
class CreateBeta(base.CreateCommand):
"""Create a new Cloud ML Engine model."""
@staticmethod
def Args(parser):
_AddCreateArgs(parser)
def Run(self, args):
models_util.Create(models.ModelsClient('v1beta1'), args.model,
regions=args.regions, enable_logging=args.enable_logging)
@base.ReleaseTracks(base.ReleaseTrack.GA)
class CreateGa(base.CreateCommand):
"""Create a new Cloud ML Engine model."""
@staticmethod
def Args(parser):
_AddCreateArgs(parser)
def Run(self, args):
models_util.Create(models.ModelsClient('v1'), args.model,
regions=args.regions, enable_logging=args.enable_logging)
| [
"hanastanojkovic@gmail.com"
] | hanastanojkovic@gmail.com |
4dbadf4946762250b2114648b1717804055f5d63 | 032021f2604815f8a71bbfb55af41d7fc8cfab9c | /crawlib/tests/dummy_site_crawler/sql_backend/db.py | e5c00da079bfb355ff6967330726daba635f7215 | [
"MIT"
] | permissive | MacHu-GWU/crawlib-project | ddf5ad8f9971c87b51c618860be967e11f8f9700 | b2963b7f6a36ee7f1ef95a6bf9d8cb746d9da991 | refs/heads/master | 2020-04-18T03:11:12.219272 | 2019-12-31T03:34:10 | 2019-12-31T03:34:10 | 66,882,484 | 1 | 1 | MIT | 2019-12-31T03:34:11 | 2016-08-29T21:35:35 | Python | UTF-8 | Python | false | false | 449 | py | # -*- coding: utf-8 -*-
from sqlalchemy.orm.session import sessionmaker
from sqlalchemy_mate import EngineCreator
from .config_init import config
engine = EngineCreator(
host=config.DB_HOST.get_value(),
port=config.DB_PORT.get_value(),
database=config.DB_DATABASE.get_value(),
username=config.DB_USERNAME.get_value(),
password=config.DB_PASSWORD.get_value(),
).create_postgresql_psycopg2()
Session = sessionmaker(bind=engine)
| [
"husanhe@gmail.com"
] | husanhe@gmail.com |
bb954328311c88612b3a3eab48ae0a1f1f911e36 | e0fa466605d4031260fb2401244ad2de2a07c393 | /ch16/tsUclnt.py | 1dd0749fd68163e671e4ef314af51b6ecde08014 | [] | no_license | saturnisbig/corepynotes | aef2f9ed8d387d51ccb64bb264891be6c8f909d7 | c6d883b57e5ffc485f997e87178243a8b71441c2 | refs/heads/master | 2020-04-29T10:36:15.608223 | 2019-03-17T08:19:39 | 2019-03-17T08:19:39 | 176,066,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | #!/usr/bin/env python
# _*_ coding: utf-8 _*_
from socket import *
REMOTE_HOST = '66.112.209.81'
PORT = 21566
BUFSIZE = 1024
ADDR = (REMOTE_HOST, PORT)
udpCliSock = socket(AF_INET, SOCK_DGRAM)
while True:
data = raw_input('> ')
if not data:
break
udpCliSock.sendto(data, ADDR)
data, addr = udpCliSock.recvfrom(BUFSIZE)
if not data:
break
print data
udpCliSock.close()
| [
"i.kenting@gmail.com"
] | i.kenting@gmail.com |
faa137f479e796189e99c7f83a8f08674f538903 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/51/usersdata/75/20050/submittedfiles/listas.py | 7cec7cbdbc35e7ae3247f73e35ff47a5122308f5 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | # -*- coding: utf-8 -*-
from __future__ import division
def maior_grau (lista):
if i in range (0,len(lista)-1,1):
diferenca=lista[i]-lista[i+1]
if diferenca<0:
diferenca=diferenca*(-1)
if diferenca>diferenca:
return diferenca
n=int(input('Digite a quantidade de termos da lista:'))
while n<=2:
n=int(input('Digite a quantidade de termos da lista:'))
a=[]
for i in range (0,n,1):
a.append(input('Digite os elementos da lista:'))
print maior_grau(a)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
003c2447926534f7845b1498c135b13d428a1509 | 481a65ea079ca021e06f0fd4efa707ec91fa0131 | /setup.py | 02b0c9463f47bbe61561d30c4e464ba0ec6700cc | [] | no_license | maejie/scholarNetwork | 3c51576c7bfc2623b6ad4ad83f6add3dc9546c9c | 11ccd802f9825cfbcfad70b864ac28f2634df593 | refs/heads/master | 2020-02-26T13:32:28.348775 | 2015-05-14T03:43:11 | 2015-05-14T03:43:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,226 | py | from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='scholarNetwork',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.0.0.2',
description='Coauthor-Network of your Google Scholar',
long_description=long_description,
# The project's main homepage.
url='https://github.com/chengjun/scholarNetwork',
# Author details
author='Cheng-Jun Wang & Lingfei Wu',
author_email='wangchj04@gmail.com; wwlf850927@gmail.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='Google Scholar',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['beautifulsoup4', 'networkx', 'matplotlib'],
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
extras_require = {
'dev': ['check-manifest'],
'test': ['coverage'],
},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'sample=sample:main',
],
},
)
| [
"wangchj04@gmail.com"
] | wangchj04@gmail.com |
cf8c8154324e44e0388b650a8c2a31eb18d9c4c3 | e4d3d4d93b9d575a87126bd7d2fb20870d83684c | /manage.py | 5aff1a887b296848e41adbc73c42ef9c7d381455 | [] | no_license | ByAvatarOff/Tourizm-django-site | 8a02cb9d8185ab8ae77480a1c7c86a25d226fe0f | dd2066b379bbee3d6e2ce4d24c81556c85fc3d49 | refs/heads/master | 2022-12-21T23:37:37.276395 | 2020-10-01T13:00:21 | 2020-10-01T13:00:21 | 300,279,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Gosha.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"tsp7439@gmail.com"
] | tsp7439@gmail.com |
56f64bbca3a06b504a457f609ba0d51093b8b8fb | 8ed4bf9fbead471c9e5f88e4d18ac432ec3d628b | /hackerrank/algorithm/implementation/kangaroo.py | 1352c7d1ad39190658665eb1a53de08a7bdc6bfe | [] | no_license | hizbul25/programming_problem | 9bf26e49ed5bb8c9c829d00e765c9401222fb35c | 2acca363704b993ffe5f6c2b00f81a4f4eca7204 | refs/heads/master | 2021-01-10T22:28:26.105787 | 2018-01-21T16:45:45 | 2018-01-21T16:45:45 | 65,394,734 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | import sys
x1, v1, x2, v2 = input().strip().split(' ')
x1, v1, x2, v2 = [int(x1), int(v1), int(x2), int(v2)]
if v1 > v2 and ((x2 - x1) % (v2 - v1) == 0):
print("YES")
else:
print("NO")
| [
"hizbul.ku@gmail.com"
] | hizbul.ku@gmail.com |
573a830546c745be9960037efcde0ebaa64e58a4 | f9a8aecd848bcc79052ca068cc73850a63e6dfcf | /inference/model/latent_features/joint/__init__.py | faa17fa207426457c747d56774c5c4ab99d36d90 | [
"MIT"
] | permissive | khoehlein/fV-SRN-Ensemble-Compression | 537981a1cd31565bb504b00ca730e8bf87e7e0ef | 2780b83d2594c1b38b57ab58087b46bee4b61e8b | refs/heads/master | 2023-04-17T09:42:48.037397 | 2022-09-07T08:55:01 | 2022-09-07T08:55:01 | 532,983,107 | 4 | 1 | null | 2022-09-06T14:39:26 | 2022-09-05T16:43:24 | Python | UTF-8 | Python | false | false | 81 | py | from .joint_latent_space import JointLatentFeatureVector, JointLatentFeatureGrid
| [
"kevin.hoehlein@tum.de"
] | kevin.hoehlein@tum.de |
232e12f3a83cc3ace60e4e2722e476b207515369 | 60e4baae4d6b323b3d3b656df3a7b0ea3ca40ef2 | /project/apps/community/forms.py | eaf8d51edeca2b91c5abcef583e9295120a73913 | [] | no_license | Burzhun/Big-django-project | a03a61a15ee75f49324ad7ea51372b6b013d1650 | 1a71f974b7b5399a45862711b5f858c0d4af50d2 | refs/heads/master | 2020-04-11T00:16:06.211039 | 2018-12-11T19:13:38 | 2018-12-11T19:13:38 | 161,381,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,514 | py | from django.contrib.auth import password_validation
from django.conf import settings
from django import forms
from .models import User
from .choices import COUNTRY_CHOICES
from django_resized.forms import ResizedImageField
from .fields import ReCaptchaField
from .widgets import ReCaptchaWidget
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
'password_mismatch': "Неверное подтверждение пароля",
'uf_rules_is_not_accepted': "Вы должны принять лицензионное соглашение",
}
password1 = forms.CharField(
label="Пароль",
min_length=6,
strip=False,
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label="Подтвердите пароль",
min_length=6,
widget=forms.PasswordInput,
strip=False,
help_text="Пароль должен быть не менее 6 символов",
)
email = forms.EmailField(
label="Email",
required=True,
)
first_name = forms.CharField(
label="Имя",
strip=False,
required=True,
)
last_name = forms.CharField(
label="Фамилия",
strip=False,
required=True,
)
uf_rules = forms.BooleanField(
label="Условия",
required=False
)
captcha = ReCaptchaField(widget=ReCaptchaWidget)
class Meta:
model = User
fields = ("username", "email", "password1", "password2", "first_name", "last_name")
def __init__(self, *args, **kwargs):
super(UserCreationForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
self.instance.username = self.cleaned_data.get('username')
password_validation.validate_password(self.cleaned_data.get('password2'), self.instance)
return password2
def clean_uf_rules(self):
uf_rules = self.cleaned_data.get("uf_rules")
if not uf_rules:
raise forms.ValidationError(
self.error_messages['uf_rules_is_not_accepted'],
code='uf_rules_is_not_accepted',
)
return uf_rules
def is_valid(self, *args, **kwargs):
return super(UserCreationForm, self).is_valid(*args, **kwargs)
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class ProfileEditForm(forms.ModelForm):
error_messages = {
'password_mismatch': "Неверное подтверждение пароля",
}
password1 = forms.CharField(
label="Новый пароль",
min_length=6,
required=False,
strip=False,
widget=forms.PasswordInput(attrs={'class': 'form-control'}),
)
password2 = forms.CharField(
label="Повтор пароля",
min_length=6,
widget=forms.PasswordInput(attrs={'class': 'form-control'}),
required=False,
strip=False,
help_text="Пароль должен быть не менее 6 символов",
)
email = forms.EmailField(
label="Email",
required=True,
widget=forms.EmailInput(attrs={'class': 'form-control'})
)
first_name = forms.CharField(
label="Имя",
strip=False,
required=True,
widget=forms.TextInput(attrs={'class': 'form-control'})
)
last_name = forms.CharField(
label="Фамилия",
strip=False,
required=True,
widget=forms.TextInput(attrs={'class': 'form-control'})
)
is_public_email = forms.BooleanField(
required=False,
widget=forms.CheckboxInput(),
label="Показывать другим пользователям"
)
is_public_b_date = forms.BooleanField(
required=False,
widget=forms.CheckboxInput(),
label="Показывать другим пользователям"
)
is_public_has_children = forms.BooleanField(
required=False,
widget=forms.CheckboxInput(),
label="Показывать другим пользователям"
)
has_children = forms.ChoiceField(
choices=User.HAS_CHILDREN_CHOICES,
required=True,
label='Есть ли дети',
widget=forms.Select(attrs={'class': 'form-control chosen-select '})
)
b_date = forms.DateField(
required=False,
input_formats=['%d.%m.%Y'],
widget=forms.DateInput(attrs={
'class': 'form-control datepicker ',
'data-date-format': "dd.mm.yyyy",
'data-date-end-date': "30.01.2005",
}),
label='Дата рождения'
)
gender = forms.ChoiceField(
label='Пол',
choices=User.GENDER_CHOICES,
required=True,
widget=forms.Select(attrs={'class': 'form-control chosen-select '})
)
country = forms.ChoiceField(
required=False,
choices=COUNTRY_CHOICES,
label='Страна',
widget=forms.Select(attrs={
'class': 'form-control chosen-select ',
'data-placeholder': "Выбери страну..."
})
)
city = forms.CharField(
label='Город',
required=False,
widget=forms.TextInput(attrs={'class': 'form-control'})
)
notes = forms.CharField(
required=False,
label='О себе',
widget=forms.Textarea(attrs={'class': 'form-control'})
)
class Meta:
model = User
fields = (
"username", "email", "password1", "password2", "first_name",
"last_name", "is_public_email", "is_public_b_date", "is_public_has_children_choices",
"b_date", "gender", "country", "city", "notes"
)
def __init__(self, *args, **kwargs):
super(ProfileEditForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
password_validation.validate_password(self.cleaned_data.get('password2'), self.instance)
return password2
def is_valid(self, *args, **kwargs):
return super(ProfileEditForm, self).is_valid(*args, **kwargs)
def save(self, commit=True):
user = super(ProfileEditForm, self).save(commit=False)
if self.cleaned_data["password1"]:
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class ImageUploadForm(forms.ModelForm):
avatar = forms.ImageField(required=True)
class Meta:
model = User
fields = ('avatar', ) | [
"burjunov@yandex.ru"
] | burjunov@yandex.ru |
2bb31beed6b284eb1d71957618d1eb8c31c56355 | 035c7cfbd62a9c06f8dbbb4a92607cf2b2570689 | /src/systemidentification/coefficient_plot.py | 9c94331149a3134488ea65b674b5ea5794be7296 | [
"BSD-3-Clause",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | NikhilPappu/dissertation | 60001328c619caf5f966368ea4a87f3086a29129 | 0a93ada92fa1a964c9de89bcdb558c82a9ef252b | refs/heads/master | 2020-03-24T00:50:05.493962 | 2017-02-07T02:01:17 | 2017-02-07T02:01:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,846 | py | #!/usr/bin/env python
import sys
sys.path.append('..')
from load_paths import read
import os
import cPickle
from numpy import linspace, sqrt, zeros
from scipy.io import loadmat
import matplotlib.pyplot as plt
from matplotlib import rcParams
from bicycleid import data, plot, model
import dtk.bicycle
params = {'axes.labelsize': 8,
'axes.titlesize': 10,
'text.fontsize': 10,
'legend.fontsize': 10,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'text.usetex': True}
rcParams.update(params)
dat = data.ExperimentalData()
coefPlot = plot.CoefficientPlot()
subDef = {}
subDef['Rider'] = ['Charlie', 'Jason', 'Luke']
subDef['Environment'] = ['Treadmill', 'Pavilion']
subDef['Maneuver'] = ['Balance', 'Track Straight Line',
'Balance With Disturbance', 'Track Straight Line With Disturbance']
subDef['Speed'] = ['1.4', '2.0', '3.0', '4.0', '4.92', '5.8', '7.0', '9.0']
subDef['MeanFit'] = 0.0
subDef['Duration'] = 0.0
subDat = dat.subset(**subDef)
speedRange = linspace(0.0, 10.0, num=50)
models = {rider: model.Whipple(rider).matrices(speedRange) for rider in ['Charlie']}
coefPlot.update_graph(subDat, models)
# now add the arm model
m = loadmat('../../data/extensions/armsAB-Charlie.mat', squeeze_me=True) # this is charlie at 101 speeds
inputMats = zeros((101, 4, 1))
for i, B in enumerate(m['inputMatrices']):
inputMats[i] = B[:, 1].reshape(4, 1)
for lab, ax in coefPlot.axes.items():
row, col = int(lab[-2]), int(lab[-1])
if lab[0] == 'a':
ax.plot(m['speed'], m['stateMatrices'][:, row - 1, col - 1], 'r')
elif lab[0] == 'b':
ax.plot(m['speed'], inputMats[:, row - 1, col - 1], 'r')
# now add the model identified from the runs with Luke on the Pavilion floor
# with the canonical realization
with open(read('pathToIdMat')) as f:
idMat = cPickle.load(f)
M, C1, K0, K2, H = idMat['L-P']
speeds = linspace(0, 10, num=50)
As = zeros((len(speeds), 4, 4))
Bs = zeros((len(speeds), 4, 1))
for i, v in enumerate(speeds):
A, B = dtk.bicycle.benchmark_state_space(M, C1, K0, K2, v, 9.81)
As[i] = A
Bs[i] = B[:, 1].reshape(4, 1)
for lab, ax in coefPlot.axes.items():
row, col = int(lab[-2]), int(lab[-1])
if lab[0] == 'a':
ax.plot(speeds, As[:, row - 1, col - 1], 'orange')
elif lab[0] == 'b':
ax.plot(speeds, Bs[:, row - 1, col - 1], 'orange')
width = 6.0
coefPlot.title.set_fontsize(10.0)
coefPlot.figure.set_figwidth(width)
goldenRatio = (sqrt(5) - 1.0) / 2.0
coefPlot.figure.set_figheight(6.0 * goldenRatio)
coefPlot.figure.savefig('../../figures/systemidentification/coefficients.pdf')
# this gtk backend failed when I tried to savefig a png, so I do this
os.system('convert -density 200x200 ../../figures/systemidentification/coefficients.pdf ../../figures/systemidentification/coefficients.png')
| [
"moorepants@gmail.com"
] | moorepants@gmail.com |
2327963633982b1d2c2485fd724170e257c0d587 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/enums/types/ad_group_type.py | 13af772070562ce5a6957bdb7c6b267f66fab80e | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,675 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v6.enums',
marshal='google.ads.googleads.v6',
manifest={
'AdGroupTypeEnum',
},
)
class AdGroupTypeEnum(proto.Message):
r"""Defines types of an ad group, specific to a particular
campaign channel type. This type drives validations that
restrict which entities can be added to the ad group.
"""
class AdGroupType(proto.Enum):
r"""Enum listing the possible types of an ad group."""
UNSPECIFIED = 0
UNKNOWN = 1
SEARCH_STANDARD = 2
DISPLAY_STANDARD = 3
SHOPPING_PRODUCT_ADS = 4
HOTEL_ADS = 6
SHOPPING_SMART_ADS = 7
VIDEO_BUMPER = 8
VIDEO_TRUE_VIEW_IN_STREAM = 9
VIDEO_TRUE_VIEW_IN_DISPLAY = 10
VIDEO_NON_SKIPPABLE_IN_STREAM = 11
VIDEO_OUTSTREAM = 12
SEARCH_DYNAMIC_ADS = 13
SHOPPING_COMPARISON_LISTING_ADS = 14
PROMOTED_HOTEL_ADS = 15
VIDEO_RESPONSIVE = 16
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
ff30f41fe00254920a0ae30820c764f2d75508fc | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_8015.py | 7c024cd497964fc77ff02bd55b75202e8c8baea2 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | # Using a struct as a function argument with the python ctypes module
# WCHAR ReparseTarget[1];
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
ab3c6b1ef110de9dfe5e006d756c82f119ae6354 | 72b00923d4aa11891f4a3038324c8952572cc4b2 | /python/test/socket/socket_sockpair.py | d3c8cc0437efa47725e06fd162ddc41a9c0dc352 | [] | no_license | taowuwen/codec | 3698110a09a770407e8fb631e21d86ba5a885cd5 | d92933b07f21dae950160a91bb361fa187e26cd2 | refs/heads/master | 2022-03-17T07:43:55.574505 | 2022-03-10T05:20:44 | 2022-03-10T05:20:44 | 87,379,261 | 0 | 0 | null | 2019-03-25T15:40:27 | 2017-04-06T02:50:54 | C | UTF-8 | Python | false | false | 505 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import socket
import os
parent, child = socket.socketpair()
pid = os.fork()
if pid:
print('in parent, sending message')
child.close()
parent.sendall(b'ping')
response = parent.recv(1024)
print('response from child:', response)
parent.close()
else:
print('in child, waiting for message')
parent.close()
message = child.recv(1024)
print('message from parent:', message)
child.sendall(b'pong')
child.close()
| [
"taowuwen@gmail.com"
] | taowuwen@gmail.com |
a51792d4ad92eaaec45651912b1cd8c976ed9d40 | dafaa64cf49c76ff00ef86d77f162f98279c0bc6 | /chef/tests/test_data_bag.py | 5cc2ae3321f922e1cc7d6cbb924cfbe9e84a4896 | [] | no_license | SeanOC/pychef | 93c08992d4a85b3002348aa588cf5e460e69402a | bc1b39586f567a5539b92570c4d38ceb02b23b6e | refs/heads/master | 2021-01-21T01:16:02.079784 | 2011-01-30T16:57:37 | 2011-01-30T16:57:37 | 1,309,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,047 | py | from chef import DataBag, DataBagItem, Search
from chef.exceptions import ChefError
from chef.tests import ChefTestCase
class DataBagTestCase(ChefTestCase):
def test_list(self):
bags = DataBag.list()
self.assertIn('test_1', bags)
self.assertIsInstance(bags['test_1'], DataBag)
def test_keys(self):
bag = DataBag('test_1')
self.assertItemsEqual(bag.keys(), ['item_1', 'item_2'])
self.assertItemsEqual(iter(bag), ['item_1', 'item_2'])
def test_item(self):
bag = DataBag('test_1')
item = bag['item_1']
self.assertEqual(item['test_attr'], 1)
self.assertEqual(item['other'], 'foo')
def test_search_item(self):
self.assertIn('test_1', Search.list())
q = Search('test_1')
self.assertIn('item_1', q)
self.assertIn('item_2', q)
self.assertEqual(q['item_1']['raw_data']['test_attr'], 1)
item = q['item_1'].object
self.assertIsInstance(item, DataBagItem)
self.assertEqual(item['test_attr'], 1)
| [
"noah@coderanger.net"
] | noah@coderanger.net |
31ead3576d9ce5a12d1a4e90600d913aa66b6126 | 70cdf0741a22c678401a306229003bf036ffe5a6 | /ocbind/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/igp_ldp_sync/__init__.py | 4054eb3c475093190086733a41aabe692f6bccdb | [] | no_license | zsblevins/nanog81-hackathon | 5001e034339d6b0c6452ae2474f06916bcd715cf | 1b64fd207dd69837f947094fbd6d6c1cea3a1070 | refs/heads/main | 2023-03-03T09:39:28.460000 | 2021-02-15T13:41:38 | 2021-02-15T13:41:38 | 336,698,856 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,830 | py | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class igp_ldp_sync(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/igp-ldp-sync. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: OSPFv2 parameters relating to LDP/IGP synchronization
"""
__slots__ = ('_path_helper', '_extmethods', '__config','__state',)
_yang_name = 'igp-ldp-sync'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
self.__state = YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['network-instances', 'network-instance', 'protocols', 'protocol', 'ospfv2', 'areas', 'area', 'interfaces', 'interface', 'mpls', 'igp-ldp-sync']
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/igp_ldp_sync/config (container)
YANG Description: Configuration parameters relating to LDP/IG
synchronization.
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/igp_ldp_sync/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to LDP/IG
synchronization.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
})
self.__config = t
if hasattr(self, '_set'):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/igp_ldp_sync/state (container)
YANG Description: Operational state variables relating to LDP/IGP
synchronization
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/igp_ldp_sync/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state variables relating to LDP/IGP
synchronization
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
})
self.__state = t
if hasattr(self, '_set'):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([('config', config), ('state', state), ])
from . import config
from . import state
class igp_ldp_sync(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/igp-ldp-sync. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: OSPFv2 parameters relating to LDP/IGP synchronization
"""
__slots__ = ('_path_helper', '_extmethods', '__config','__state',)
_yang_name = 'igp-ldp-sync'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
self.__state = YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['network-instances', 'network-instance', 'protocols', 'protocol', 'ospfv2', 'areas', 'area', 'interfaces', 'interface', 'mpls', 'igp-ldp-sync']
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/igp_ldp_sync/config (container)
YANG Description: Configuration parameters relating to LDP/IG
synchronization.
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/igp_ldp_sync/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to LDP/IG
synchronization.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
})
self.__config = t
if hasattr(self, '_set'):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/igp_ldp_sync/state (container)
YANG Description: Operational state variables relating to LDP/IGP
synchronization
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/igp_ldp_sync/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state variables relating to LDP/IGP
synchronization
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
})
self.__state = t
if hasattr(self, '_set'):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([('config', config), ('state', state), ])
from . import config
from . import state
class igp_ldp_sync(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/igp-ldp-sync. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: OSPFv2 parameters relating to LDP/IGP synchronization
"""
__slots__ = ('_path_helper', '_extmethods', '__config','__state',)
_yang_name = 'igp-ldp-sync'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
self.__state = YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['network-instances', 'network-instance', 'protocols', 'protocol', 'ospfv2', 'areas', 'area', 'interfaces', 'interface', 'mpls', 'igp-ldp-sync']
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/igp_ldp_sync/config (container)
YANG Description: Configuration parameters relating to LDP/IG
synchronization.
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/igp_ldp_sync/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to LDP/IG
synchronization.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
})
self.__config = t
if hasattr(self, '_set'):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/igp_ldp_sync/state (container)
YANG Description: Operational state variables relating to LDP/IGP
synchronization
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/igp_ldp_sync/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state variables relating to LDP/IGP
synchronization
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
})
self.__state = t
if hasattr(self, '_set'):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([('config', config), ('state', state), ])
from . import config
from . import state
class igp_ldp_sync(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/igp-ldp-sync. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: OSPFv2 parameters relating to LDP/IGP synchronization
"""
__slots__ = ('_path_helper', '_extmethods', '__config','__state',)
_yang_name = 'igp-ldp-sync'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
self.__state = YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['network-instances', 'network-instance', 'protocols', 'protocol', 'ospfv2', 'areas', 'area', 'interfaces', 'interface', 'mpls', 'igp-ldp-sync']
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/igp_ldp_sync/config (container)
YANG Description: Configuration parameters relating to LDP/IG
synchronization.
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/igp_ldp_sync/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to LDP/IG
synchronization.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
})
self.__config = t
if hasattr(self, '_set'):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/igp_ldp_sync/state (container)
YANG Description: Operational state variables relating to LDP/IGP
synchronization
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/igp_ldp_sync/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state variables relating to LDP/IGP
synchronization
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
})
self.__state = t
if hasattr(self, '_set'):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([('config', config), ('state', state), ])
| [
"zblevins@netflix.com"
] | zblevins@netflix.com |
08c01252c948b5f3627cadfc766dcc902d5bbeae | 85b102bc9c0dcc04dd469297b32bad9e38065e28 | /backend/auth_app/serializers.py | 44c75c927350cf2d7ac29e284add714b855212dc | [] | no_license | ahrisagree/AHRIS | 60fc58279bf594ba9830e21df25aa7c3c90e6bb9 | 73c480b3d44231acfcc43c0292e0b514654aeb27 | refs/heads/master | 2023-06-06T11:55:33.100575 | 2021-06-29T06:26:08 | 2021-06-29T06:26:08 | 354,016,384 | 0 | 0 | null | 2021-06-29T06:26:09 | 2021-04-02T12:43:21 | JavaScript | UTF-8 | Python | false | false | 2,782 | py | from rest_framework import serializers
from django.utils.translation import ugettext_lazy as _
from allauth.account.adapter import get_adapter
from allauth.account.utils import setup_user_email
from rest_auth.registration.serializers import RegisterSerializer as RestRegisterSerializer
from backend.utils import get_or_none
from .roles import roles
from .models import AppUser, Division
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = AppUser
fields = ('pk', 'username', 'email', 'role', 'divisi', 'gaji')
read_only_fields = ('email', 'role', 'divisi', 'gaji')
depth = 1
class DivisionSerializer(serializers.ModelSerializer):
id = serializers.IntegerField(required=True)
nama_divisi = serializers.CharField(max_length=100, required=False)
class Meta:
model = Division
fields = '__all__'
class UserListSerializer(serializers.ModelSerializer):
divisi = DivisionSerializer(many=True)
class Meta:
model = AppUser
fields = ('pk', 'username', 'email', 'role', 'divisi')
depth = 1
class UserEditSerializer(serializers.ModelSerializer):
divisi = DivisionSerializer(many=True)
class Meta:
model = AppUser
fields = ('pk', 'username', 'email', 'role', 'divisi', 'gaji')
read_only_fields = ('email',)
depth = 1
def validate_divisi(self, divisi):
divisi_list = []
for div in divisi:
divisi_obj = get_or_none(Division, **div)
if divisi_obj == None:
if div.get('nama_divisi', None) != None:
divisi_obj = Division.objects.create(nama_divisi=div['nama_divisi'])
else:
continue
divisi_list.append(divisi_obj)
return divisi_list
def update(self, instance, validated_data):
divisi = validated_data.pop('divisi')
updated_division = []
instance.divisi.set(divisi)
return super().update(instance, validated_data)
class RegisterSerializer(RestRegisterSerializer):
role = serializers.CharField(max_length=20, required=True)
divisi = DivisionSerializer(many=True)
gaji = serializers.IntegerField()
def validate_role(self, role):
if role not in roles:
raise serializers.ValidationError(_("Invalid Role"))
return role
def validate_divisi(self, divisi):
print(divisi)
divisi_list = []
for div in divisi:
divisi_obj = get_or_none(Division, **div)
if divisi_obj == None:
if div.get('nama_divisi', None) != None:
divisi_obj = Division.objects.get_or_create(nama_divisi=div['nama_divisi'])[0]
else:
continue
divisi_list.append(divisi_obj)
return divisi_list
def custom_signup(self, request, user):
print(self.validated_data)
role = self.validated_data.get('role', '')
setattr(user, 'role', role)
divisi = self.validated_data.get('divisi')
user.divisi.set(divisi)
gaji = self.validated_data.get('gaji')
setattr(user, 'gaji', gaji)
user.save() | [
"leonardoeinstein2000@gmail.com"
] | leonardoeinstein2000@gmail.com |
b487cd0e45417baac21ff8cc5b41865294c11429 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/CISCO-SESS-BORDER-CTRLR-STATS-MIB.py | 7bb8ff71ed4c748fe583cd4d198ff53dfbaa86ca | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 38,853 | py | #
# PySNMP MIB module CISCO-SESS-BORDER-CTRLR-STATS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-SESS-BORDER-CTRLR-STATS-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:55:14 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint")
csbCallStatsInstanceIndex, csbCallStatsServiceIndex, CiscoSbcPeriodicStatsInterval = mibBuilder.importSymbols("CISCO-SESS-BORDER-CTRLR-CALL-STATS-MIB", "csbCallStatsInstanceIndex", "csbCallStatsServiceIndex", "CiscoSbcPeriodicStatsInterval")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, ModuleIdentity, iso, ObjectIdentity, Integer32, Gauge32, IpAddress, Bits, Counter32, Unsigned32, MibIdentifier, Counter64, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "ModuleIdentity", "iso", "ObjectIdentity", "Integer32", "Gauge32", "IpAddress", "Bits", "Counter32", "Unsigned32", "MibIdentifier", "Counter64", "NotificationType")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ciscoSbcStatsMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 757))
ciscoSbcStatsMIB.setRevisions(('2010-09-15 00:00',))
if mibBuilder.loadTexts: ciscoSbcStatsMIB.setLastUpdated('201009150000Z')
if mibBuilder.loadTexts: ciscoSbcStatsMIB.setOrganization('Cisco Systems, Inc.')
class CiscoSbcSIPMethod(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14))
namedValues = NamedValues(("unknown", 1), ("ack", 2), ("bye", 3), ("cancel", 4), ("info", 5), ("invite", 6), ("message", 7), ("notify", 8), ("options", 9), ("prack", 10), ("refer", 11), ("register", 12), ("subscribe", 13), ("update", 14))
class CiscoSbcRadiusClientType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("authentication", 1), ("accounting", 2))
ciscoSbcStatsMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 757, 0))
ciscoSbcStatsMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 757, 1))
ciscoSbcStatsMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 757, 2))
csbRadiusStatsTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 1), )
if mibBuilder.loadTexts: csbRadiusStatsTable.setStatus('current')
csbRadiusStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 1, 1), ).setIndexNames((0, "CISCO-SESS-BORDER-CTRLR-CALL-STATS-MIB", "csbCallStatsInstanceIndex"), (0, "CISCO-SESS-BORDER-CTRLR-CALL-STATS-MIB", "csbCallStatsServiceIndex"), (0, "CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRadiusStatsEntIndex"))
if mibBuilder.loadTexts: csbRadiusStatsEntry.setStatus('current')
csbRadiusStatsEntIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 1, 1, 1), Unsigned32())
if mibBuilder.loadTexts: csbRadiusStatsEntIndex.setStatus('current')
csbRadiusStatsClientName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 1, 1, 2), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRadiusStatsClientName.setStatus('current')
csbRadiusStatsClientType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 1, 1, 3), CiscoSbcRadiusClientType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRadiusStatsClientType.setStatus('current')
csbRadiusStatsSrvrName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 1, 1, 4), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRadiusStatsSrvrName.setStatus('current')
csbRadiusStatsAcsReqs = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 1, 1, 5), Counter64()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRadiusStatsAcsReqs.setStatus('current')
csbRadiusStatsAcsRtrns = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 1, 1, 6), Counter64()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRadiusStatsAcsRtrns.setStatus('current')
csbRadiusStatsAcsAccpts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 1, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRadiusStatsAcsAccpts.setStatus('current')
csbRadiusStatsAcsRejects = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 1, 1, 8), Counter64()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRadiusStatsAcsRejects.setStatus('current')
csbRadiusStatsAcsChalls = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 1, 1, 9), Counter64()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRadiusStatsAcsChalls.setStatus('current')
csbRadiusStatsActReqs = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 1, 1, 10), Counter64()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRadiusStatsActReqs.setStatus('current')
csbRadiusStatsActRetrans = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 1, 1, 11), Counter64()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRadiusStatsActRetrans.setStatus('current')
csbRadiusStatsActRsps = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 1, 1, 12), Counter64()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRadiusStatsActRsps.setStatus('current')
csbRadiusStatsMalformedRsps = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 1, 1, 13), Counter64()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRadiusStatsMalformedRsps.setStatus('current')
csbRadiusStatsBadAuths = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 1, 1, 14), Counter64()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRadiusStatsBadAuths.setStatus('current')
csbRadiusStatsPending = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 1, 1, 15), Gauge32()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRadiusStatsPending.setStatus('current')
csbRadiusStatsTimeouts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 1, 1, 16), Counter64()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRadiusStatsTimeouts.setStatus('current')
csbRadiusStatsUnknownType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 1, 1, 17), Counter64()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRadiusStatsUnknownType.setStatus('current')
csbRadiusStatsDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 1, 1, 18), Counter64()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRadiusStatsDropped.setStatus('current')
csbRfBillRealmStatsTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 2), )
if mibBuilder.loadTexts: csbRfBillRealmStatsTable.setStatus('current')
csbRfBillRealmStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 2, 1), ).setIndexNames((0, "CISCO-SESS-BORDER-CTRLR-CALL-STATS-MIB", "csbCallStatsInstanceIndex"), (0, "CISCO-SESS-BORDER-CTRLR-CALL-STATS-MIB", "csbCallStatsServiceIndex"), (0, "CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRfBillRealmStatsIndex"), (0, "CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRfBillRealmStatsRealmName"))
if mibBuilder.loadTexts: csbRfBillRealmStatsEntry.setStatus('current')
csbRfBillRealmStatsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 2, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 31)))
if mibBuilder.loadTexts: csbRfBillRealmStatsIndex.setStatus('current')
csbRfBillRealmStatsRealmName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 2, 1, 2), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRfBillRealmStatsRealmName.setStatus('current')
csbRfBillRealmStatsTotalStartAcrs = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 2, 1, 3), Unsigned32()).setUnits('ACRs').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRfBillRealmStatsTotalStartAcrs.setStatus('current')
csbRfBillRealmStatsTotalInterimAcrs = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 2, 1, 4), Unsigned32()).setUnits('ACRs').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRfBillRealmStatsTotalInterimAcrs.setStatus('current')
csbRfBillRealmStatsTotalStopAcrs = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 2, 1, 5), Unsigned32()).setUnits('ACRs').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRfBillRealmStatsTotalStopAcrs.setStatus('current')
csbRfBillRealmStatsTotalEventAcrs = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 2, 1, 6), Unsigned32()).setUnits('ACRs').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRfBillRealmStatsTotalEventAcrs.setStatus('current')
csbRfBillRealmStatsSuccStartAcrs = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 2, 1, 7), Unsigned32()).setUnits('ACRs').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRfBillRealmStatsSuccStartAcrs.setStatus('current')
csbRfBillRealmStatsSuccInterimAcrs = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 2, 1, 8), Unsigned32()).setUnits('ACRs').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRfBillRealmStatsSuccInterimAcrs.setStatus('current')
csbRfBillRealmStatsSuccStopAcrs = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 2, 1, 9), Unsigned32()).setUnits('ACRs').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRfBillRealmStatsSuccStopAcrs.setStatus('current')
csbRfBillRealmStatsSuccEventAcrs = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 2, 1, 10), Unsigned32()).setUnits('ACRs').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRfBillRealmStatsSuccEventAcrs.setStatus('current')
csbRfBillRealmStatsFailStartAcrs = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 2, 1, 11), Unsigned32()).setUnits('ACRs').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRfBillRealmStatsFailStartAcrs.setStatus('current')
csbRfBillRealmStatsFailInterimAcrs = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 2, 1, 12), Unsigned32()).setUnits('ACRs').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRfBillRealmStatsFailInterimAcrs.setStatus('current')
csbRfBillRealmStatsFailStopAcrs = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 2, 1, 13), Unsigned32()).setUnits('ACRs').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRfBillRealmStatsFailStopAcrs.setStatus('current')
csbRfBillRealmStatsFailEventAcrs = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 2, 1, 14), Unsigned32()).setUnits('ACRs').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbRfBillRealmStatsFailEventAcrs.setStatus('current')
csbSIPMthdCurrentStatsTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 3), )
if mibBuilder.loadTexts: csbSIPMthdCurrentStatsTable.setStatus('current')
csbSIPMthdCurrentStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 3, 1), ).setIndexNames((0, "CISCO-SESS-BORDER-CTRLR-CALL-STATS-MIB", "csbCallStatsInstanceIndex"), (0, "CISCO-SESS-BORDER-CTRLR-CALL-STATS-MIB", "csbCallStatsServiceIndex"), (0, "CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdCurrentStatsAdjName"), (0, "CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdCurrentStatsMethod"), (0, "CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdCurrentStatsInterval"))
if mibBuilder.loadTexts: csbSIPMthdCurrentStatsEntry.setStatus('current')
csbSIPMthdCurrentStatsAdjName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 3, 1, 1), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdCurrentStatsAdjName.setStatus('current')
csbSIPMthdCurrentStatsMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 3, 1, 2), CiscoSbcSIPMethod())
if mibBuilder.loadTexts: csbSIPMthdCurrentStatsMethod.setStatus('current')
csbSIPMthdCurrentStatsInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 3, 1, 3), CiscoSbcPeriodicStatsInterval())
if mibBuilder.loadTexts: csbSIPMthdCurrentStatsInterval.setStatus('current')
csbSIPMthdCurrentStatsMethodName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 3, 1, 4), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdCurrentStatsMethodName.setStatus('current')
csbSIPMthdCurrentStatsReqIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 3, 1, 5), Gauge32()).setUnits('requests').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdCurrentStatsReqIn.setStatus('current')
csbSIPMthdCurrentStatsReqOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 3, 1, 6), Gauge32()).setUnits('requests').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdCurrentStatsReqOut.setStatus('current')
csbSIPMthdCurrentStatsResp1xxIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 3, 1, 7), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdCurrentStatsResp1xxIn.setStatus('current')
csbSIPMthdCurrentStatsResp1xxOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 3, 1, 8), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdCurrentStatsResp1xxOut.setStatus('current')
csbSIPMthdCurrentStatsResp2xxIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 3, 1, 9), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdCurrentStatsResp2xxIn.setStatus('current')
csbSIPMthdCurrentStatsResp2xxOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 3, 1, 10), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdCurrentStatsResp2xxOut.setStatus('current')
csbSIPMthdCurrentStatsResp3xxIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 3, 1, 11), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdCurrentStatsResp3xxIn.setStatus('current')
csbSIPMthdCurrentStatsResp3xxOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 3, 1, 12), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdCurrentStatsResp3xxOut.setStatus('current')
csbSIPMthdCurrentStatsResp4xxIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 3, 1, 13), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdCurrentStatsResp4xxIn.setStatus('current')
csbSIPMthdCurrentStatsResp4xxOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 3, 1, 14), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdCurrentStatsResp4xxOut.setStatus('current')
csbSIPMthdCurrentStatsResp5xxIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 3, 1, 15), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdCurrentStatsResp5xxIn.setStatus('current')
csbSIPMthdCurrentStatsResp5xxOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 3, 1, 16), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdCurrentStatsResp5xxOut.setStatus('current')
csbSIPMthdCurrentStatsResp6xxIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 3, 1, 17), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdCurrentStatsResp6xxIn.setStatus('current')
csbSIPMthdCurrentStatsResp6xxOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 3, 1, 18), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdCurrentStatsResp6xxOut.setStatus('current')
csbSIPMthdHistoryStatsTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 4), )
if mibBuilder.loadTexts: csbSIPMthdHistoryStatsTable.setStatus('current')
csbSIPMthdHistoryStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 4, 1), ).setIndexNames((0, "CISCO-SESS-BORDER-CTRLR-CALL-STATS-MIB", "csbCallStatsInstanceIndex"), (0, "CISCO-SESS-BORDER-CTRLR-CALL-STATS-MIB", "csbCallStatsServiceIndex"), (0, "CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdHistoryStatsAdjName"), (0, "CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdHistoryStatsMethod"), (0, "CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdHistoryStatsInterval"))
if mibBuilder.loadTexts: csbSIPMthdHistoryStatsEntry.setStatus('current')
csbSIPMthdHistoryStatsAdjName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 4, 1, 1), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdHistoryStatsAdjName.setStatus('current')
csbSIPMthdHistoryStatsMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 4, 1, 2), CiscoSbcSIPMethod())
if mibBuilder.loadTexts: csbSIPMthdHistoryStatsMethod.setStatus('current')
csbSIPMthdHistoryStatsInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 4, 1, 3), CiscoSbcPeriodicStatsInterval())
if mibBuilder.loadTexts: csbSIPMthdHistoryStatsInterval.setStatus('current')
csbSIPMthdHistoryStatsMethodName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 4, 1, 4), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdHistoryStatsMethodName.setStatus('current')
csbSIPMthdHistoryStatsReqIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 4, 1, 5), Gauge32()).setUnits('requests').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdHistoryStatsReqIn.setStatus('current')
csbSIPMthdHistoryStatsReqOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 4, 1, 6), Gauge32()).setUnits('requests').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdHistoryStatsReqOut.setStatus('current')
csbSIPMthdHistoryStatsResp1xxIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 4, 1, 7), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdHistoryStatsResp1xxIn.setStatus('current')
csbSIPMthdHistoryStatsResp1xxOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 4, 1, 8), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdHistoryStatsResp1xxOut.setStatus('current')
csbSIPMthdHistoryStatsResp2xxIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 4, 1, 9), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdHistoryStatsResp2xxIn.setStatus('current')
csbSIPMthdHistoryStatsResp2xxOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 4, 1, 10), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdHistoryStatsResp2xxOut.setStatus('current')
csbSIPMthdHistoryStatsResp3xxIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 4, 1, 11), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdHistoryStatsResp3xxIn.setStatus('current')
csbSIPMthdHistoryStatsResp3xxOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 4, 1, 12), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdHistoryStatsResp3xxOut.setStatus('current')
csbSIPMthdHistoryStatsResp4xxIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 4, 1, 13), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdHistoryStatsResp4xxIn.setStatus('current')
csbSIPMthdHistoryStatsResp4xxOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 4, 1, 14), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdHistoryStatsResp4xxOut.setStatus('current')
csbSIPMthdHistoryStatsResp5xxIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 4, 1, 15), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdHistoryStatsResp5xxIn.setStatus('current')
csbSIPMthdHistoryStatsResp5xxOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 4, 1, 16), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdHistoryStatsResp5xxOut.setStatus('current')
csbSIPMthdHistoryStatsResp6xxIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 4, 1, 17), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdHistoryStatsResp6xxIn.setStatus('current')
csbSIPMthdHistoryStatsResp6xxOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 4, 1, 18), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdHistoryStatsResp6xxOut.setStatus('current')
csbSIPMthdRCCurrentStatsTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 5), )
if mibBuilder.loadTexts: csbSIPMthdRCCurrentStatsTable.setStatus('current')
csbSIPMthdRCCurrentStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 5, 1), ).setIndexNames((0, "CISCO-SESS-BORDER-CTRLR-CALL-STATS-MIB", "csbCallStatsInstanceIndex"), (0, "CISCO-SESS-BORDER-CTRLR-CALL-STATS-MIB", "csbCallStatsServiceIndex"), (0, "CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdRCCurrentStatsAdjName"), (0, "CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdRCCurrentStatsMethod"), (0, "CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdRCCurrentStatsRespCode"), (0, "CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdRCCurrentStatsInterval"))
if mibBuilder.loadTexts: csbSIPMthdRCCurrentStatsEntry.setStatus('current')
csbSIPMthdRCCurrentStatsAdjName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 5, 1, 1), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdRCCurrentStatsAdjName.setStatus('current')
csbSIPMthdRCCurrentStatsMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 5, 1, 2), CiscoSbcSIPMethod())
if mibBuilder.loadTexts: csbSIPMthdRCCurrentStatsMethod.setStatus('current')
csbSIPMthdRCCurrentStatsRespCode = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 5, 1, 3), Unsigned32())
if mibBuilder.loadTexts: csbSIPMthdRCCurrentStatsRespCode.setStatus('current')
csbSIPMthdRCCurrentStatsInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 5, 1, 4), CiscoSbcPeriodicStatsInterval())
if mibBuilder.loadTexts: csbSIPMthdRCCurrentStatsInterval.setStatus('current')
csbSIPMthdRCCurrentStatsMethodName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 5, 1, 5), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdRCCurrentStatsMethodName.setStatus('current')
csbSIPMthdRCCurrentStatsRespIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 5, 1, 6), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdRCCurrentStatsRespIn.setStatus('current')
csbSIPMthdRCCurrentStatsRespOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 5, 1, 7), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdRCCurrentStatsRespOut.setStatus('current')
csbSIPMthdRCHistoryStatsTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 6), )
if mibBuilder.loadTexts: csbSIPMthdRCHistoryStatsTable.setStatus('current')
csbSIPMthdRCHistoryStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 6, 1), ).setIndexNames((0, "CISCO-SESS-BORDER-CTRLR-CALL-STATS-MIB", "csbCallStatsInstanceIndex"), (0, "CISCO-SESS-BORDER-CTRLR-CALL-STATS-MIB", "csbCallStatsServiceIndex"), (0, "CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdRCHistoryStatsAdjName"), (0, "CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdRCHistoryStatsMethod"), (0, "CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdRCHistoryStatsRespCode"), (0, "CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdRCHistoryStatsInterval"))
if mibBuilder.loadTexts: csbSIPMthdRCHistoryStatsEntry.setStatus('current')
csbSIPMthdRCHistoryStatsAdjName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 6, 1, 1), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdRCHistoryStatsAdjName.setStatus('current')
csbSIPMthdRCHistoryStatsMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 6, 1, 2), CiscoSbcSIPMethod())
if mibBuilder.loadTexts: csbSIPMthdRCHistoryStatsMethod.setStatus('current')
csbSIPMthdRCHistoryStatsMethodName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 6, 1, 3), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdRCHistoryStatsMethodName.setStatus('current')
csbSIPMthdRCHistoryStatsRespCode = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 6, 1, 4), Unsigned32())
if mibBuilder.loadTexts: csbSIPMthdRCHistoryStatsRespCode.setStatus('current')
csbSIPMthdRCHistoryStatsInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 6, 1, 5), CiscoSbcPeriodicStatsInterval())
if mibBuilder.loadTexts: csbSIPMthdRCHistoryStatsInterval.setStatus('current')
csbSIPMthdRCHistoryStatsRespIn = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 6, 1, 6), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdRCHistoryStatsRespIn.setStatus('current')
csbSIPMthdRCHistoryStatsRespOut = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 757, 1, 6, 1, 7), Gauge32()).setUnits('responses').setMaxAccess("readonly")
if mibBuilder.loadTexts: csbSIPMthdRCHistoryStatsRespOut.setStatus('current')
csbStatsMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 757, 2, 1))
csbStatsMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 757, 2, 2))
csbStatsMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 757, 2, 1, 1)).setObjects(("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRadiusStatsGroup"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRfBillRealmStatsGroup"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdCurrentStatsGroup"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdHistoryStatsGroup"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdRCHistoryStatsGroup"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdRCCurrentStatsGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
csbStatsMIBCompliance = csbStatsMIBCompliance.setStatus('current')
csbRadiusStatsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 757, 2, 2, 1)).setObjects(("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRadiusStatsClientName"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRadiusStatsClientType"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRadiusStatsSrvrName"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRadiusStatsAcsReqs"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRadiusStatsAcsRtrns"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRadiusStatsAcsAccpts"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRadiusStatsAcsRejects"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRadiusStatsAcsChalls"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRadiusStatsActReqs"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRadiusStatsActRetrans"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRadiusStatsActRsps"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRadiusStatsMalformedRsps"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRadiusStatsBadAuths"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRadiusStatsPending"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRadiusStatsTimeouts"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRadiusStatsUnknownType"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRadiusStatsDropped"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
csbRadiusStatsGroup = csbRadiusStatsGroup.setStatus('current')
csbRfBillRealmStatsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 757, 2, 2, 2)).setObjects(("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRfBillRealmStatsRealmName"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRfBillRealmStatsTotalStartAcrs"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRfBillRealmStatsTotalInterimAcrs"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRfBillRealmStatsTotalStopAcrs"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRfBillRealmStatsTotalEventAcrs"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRfBillRealmStatsSuccStartAcrs"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRfBillRealmStatsSuccInterimAcrs"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRfBillRealmStatsSuccStopAcrs"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRfBillRealmStatsSuccEventAcrs"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRfBillRealmStatsFailStartAcrs"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRfBillRealmStatsFailInterimAcrs"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRfBillRealmStatsFailStopAcrs"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbRfBillRealmStatsFailEventAcrs"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
csbRfBillRealmStatsGroup = csbRfBillRealmStatsGroup.setStatus('current')
csbSIPMthdCurrentStatsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 757, 2, 2, 3)).setObjects(("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdCurrentStatsAdjName"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdCurrentStatsMethodName"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdCurrentStatsReqIn"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdCurrentStatsReqOut"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdCurrentStatsResp1xxIn"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdCurrentStatsResp1xxOut"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdCurrentStatsResp2xxIn"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdCurrentStatsResp2xxOut"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdCurrentStatsResp3xxIn"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdCurrentStatsResp3xxOut"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdCurrentStatsResp4xxIn"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdCurrentStatsResp4xxOut"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdCurrentStatsResp5xxIn"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdCurrentStatsResp5xxOut"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdCurrentStatsResp6xxIn"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdCurrentStatsResp6xxOut"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
csbSIPMthdCurrentStatsGroup = csbSIPMthdCurrentStatsGroup.setStatus('current')
csbSIPMthdHistoryStatsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 757, 2, 2, 4)).setObjects(("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdHistoryStatsAdjName"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdHistoryStatsMethodName"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdHistoryStatsReqIn"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdHistoryStatsReqOut"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdHistoryStatsResp1xxIn"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdHistoryStatsResp1xxOut"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdHistoryStatsResp2xxIn"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdHistoryStatsResp2xxOut"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdHistoryStatsResp3xxIn"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdHistoryStatsResp3xxOut"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdHistoryStatsResp4xxIn"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdHistoryStatsResp4xxOut"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdHistoryStatsResp5xxIn"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdHistoryStatsResp5xxOut"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdHistoryStatsResp6xxIn"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdHistoryStatsResp6xxOut"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
csbSIPMthdHistoryStatsGroup = csbSIPMthdHistoryStatsGroup.setStatus('current')
csbSIPMthdRCCurrentStatsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 757, 2, 2, 5)).setObjects(("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdRCCurrentStatsAdjName"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdRCCurrentStatsMethodName"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdRCCurrentStatsRespIn"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdRCCurrentStatsRespOut"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
csbSIPMthdRCCurrentStatsGroup = csbSIPMthdRCCurrentStatsGroup.setStatus('current')
csbSIPMthdRCHistoryStatsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 757, 2, 2, 6)).setObjects(("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdRCHistoryStatsAdjName"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdRCHistoryStatsMethodName"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdRCHistoryStatsRespIn"), ("CISCO-SESS-BORDER-CTRLR-STATS-MIB", "csbSIPMthdRCHistoryStatsRespOut"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
csbSIPMthdRCHistoryStatsGroup = csbSIPMthdRCHistoryStatsGroup.setStatus('current')
mibBuilder.exportSymbols("CISCO-SESS-BORDER-CTRLR-STATS-MIB", csbRfBillRealmStatsSuccStartAcrs=csbRfBillRealmStatsSuccStartAcrs, csbRadiusStatsMalformedRsps=csbRadiusStatsMalformedRsps, PYSNMP_MODULE_ID=ciscoSbcStatsMIB, csbSIPMthdCurrentStatsResp1xxIn=csbSIPMthdCurrentStatsResp1xxIn, csbSIPMthdCurrentStatsResp4xxIn=csbSIPMthdCurrentStatsResp4xxIn, csbSIPMthdRCCurrentStatsEntry=csbSIPMthdRCCurrentStatsEntry, csbRfBillRealmStatsTable=csbRfBillRealmStatsTable, csbSIPMthdCurrentStatsResp3xxIn=csbSIPMthdCurrentStatsResp3xxIn, csbSIPMthdRCCurrentStatsMethodName=csbSIPMthdRCCurrentStatsMethodName, csbSIPMthdHistoryStatsReqOut=csbSIPMthdHistoryStatsReqOut, csbSIPMthdRCCurrentStatsAdjName=csbSIPMthdRCCurrentStatsAdjName, csbRadiusStatsActRsps=csbRadiusStatsActRsps, csbSIPMthdCurrentStatsResp3xxOut=csbSIPMthdCurrentStatsResp3xxOut, csbSIPMthdRCCurrentStatsRespOut=csbSIPMthdRCCurrentStatsRespOut, csbRadiusStatsEntry=csbRadiusStatsEntry, csbRfBillRealmStatsGroup=csbRfBillRealmStatsGroup, csbSIPMthdHistoryStatsReqIn=csbSIPMthdHistoryStatsReqIn, csbRadiusStatsActRetrans=csbRadiusStatsActRetrans, csbRadiusStatsClientType=csbRadiusStatsClientType, csbRadiusStatsAcsChalls=csbRadiusStatsAcsChalls, ciscoSbcStatsMIBNotifs=ciscoSbcStatsMIBNotifs, csbSIPMthdRCCurrentStatsTable=csbSIPMthdRCCurrentStatsTable, csbStatsMIBGroups=csbStatsMIBGroups, csbRfBillRealmStatsTotalStopAcrs=csbRfBillRealmStatsTotalStopAcrs, csbSIPMthdCurrentStatsResp4xxOut=csbSIPMthdCurrentStatsResp4xxOut, csbSIPMthdHistoryStatsResp6xxOut=csbSIPMthdHistoryStatsResp6xxOut, csbSIPMthdHistoryStatsGroup=csbSIPMthdHistoryStatsGroup, csbSIPMthdCurrentStatsAdjName=csbSIPMthdCurrentStatsAdjName, csbRadiusStatsAcsRejects=csbRadiusStatsAcsRejects, csbSIPMthdHistoryStatsResp2xxIn=csbSIPMthdHistoryStatsResp2xxIn, csbSIPMthdHistoryStatsResp3xxIn=csbSIPMthdHistoryStatsResp3xxIn, csbRadiusStatsAcsReqs=csbRadiusStatsAcsReqs, csbRadiusStatsBadAuths=csbRadiusStatsBadAuths, csbStatsMIBCompliances=csbStatsMIBCompliances, csbSIPMthdHistoryStatsMethodName=csbSIPMthdHistoryStatsMethodName, csbSIPMthdRCHistoryStatsGroup=csbSIPMthdRCHistoryStatsGroup, csbSIPMthdCurrentStatsReqOut=csbSIPMthdCurrentStatsReqOut, csbSIPMthdRCHistoryStatsInterval=csbSIPMthdRCHistoryStatsInterval, csbRadiusStatsTimeouts=csbRadiusStatsTimeouts, CiscoSbcRadiusClientType=CiscoSbcRadiusClientType, csbSIPMthdRCHistoryStatsAdjName=csbSIPMthdRCHistoryStatsAdjName, csbSIPMthdRCHistoryStatsRespOut=csbSIPMthdRCHistoryStatsRespOut, csbSIPMthdCurrentStatsResp1xxOut=csbSIPMthdCurrentStatsResp1xxOut, csbSIPMthdCurrentStatsMethodName=csbSIPMthdCurrentStatsMethodName, csbRadiusStatsAcsAccpts=csbRadiusStatsAcsAccpts, CiscoSbcSIPMethod=CiscoSbcSIPMethod, csbRadiusStatsDropped=csbRadiusStatsDropped, csbRadiusStatsEntIndex=csbRadiusStatsEntIndex, csbRfBillRealmStatsSuccEventAcrs=csbRfBillRealmStatsSuccEventAcrs, csbRfBillRealmStatsSuccStopAcrs=csbRfBillRealmStatsSuccStopAcrs, csbSIPMthdHistoryStatsResp1xxIn=csbSIPMthdHistoryStatsResp1xxIn, csbRfBillRealmStatsFailStartAcrs=csbRfBillRealmStatsFailStartAcrs, csbSIPMthdRCHistoryStatsMethodName=csbSIPMthdRCHistoryStatsMethodName, csbSIPMthdRCCurrentStatsGroup=csbSIPMthdRCCurrentStatsGroup, csbSIPMthdRCHistoryStatsTable=csbSIPMthdRCHistoryStatsTable, csbSIPMthdRCCurrentStatsMethod=csbSIPMthdRCCurrentStatsMethod, csbSIPMthdHistoryStatsResp5xxIn=csbSIPMthdHistoryStatsResp5xxIn, csbSIPMthdHistoryStatsResp1xxOut=csbSIPMthdHistoryStatsResp1xxOut, csbRfBillRealmStatsRealmName=csbRfBillRealmStatsRealmName, ciscoSbcStatsMIBObjects=ciscoSbcStatsMIBObjects, csbRadiusStatsClientName=csbRadiusStatsClientName, csbSIPMthdHistoryStatsAdjName=csbSIPMthdHistoryStatsAdjName, csbSIPMthdRCHistoryStatsEntry=csbSIPMthdRCHistoryStatsEntry, csbRfBillRealmStatsTotalStartAcrs=csbRfBillRealmStatsTotalStartAcrs, csbSIPMthdHistoryStatsResp5xxOut=csbSIPMthdHistoryStatsResp5xxOut, csbSIPMthdHistoryStatsInterval=csbSIPMthdHistoryStatsInterval, csbSIPMthdHistoryStatsMethod=csbSIPMthdHistoryStatsMethod, csbSIPMthdCurrentStatsGroup=csbSIPMthdCurrentStatsGroup, csbSIPMthdCurrentStatsResp6xxIn=csbSIPMthdCurrentStatsResp6xxIn, csbRfBillRealmStatsFailInterimAcrs=csbRfBillRealmStatsFailInterimAcrs, csbSIPMthdRCCurrentStatsInterval=csbSIPMthdRCCurrentStatsInterval, csbSIPMthdHistoryStatsResp2xxOut=csbSIPMthdHistoryStatsResp2xxOut, csbSIPMthdCurrentStatsResp2xxOut=csbSIPMthdCurrentStatsResp2xxOut, csbRadiusStatsUnknownType=csbRadiusStatsUnknownType, csbRfBillRealmStatsTotalEventAcrs=csbRfBillRealmStatsTotalEventAcrs, csbSIPMthdHistoryStatsTable=csbSIPMthdHistoryStatsTable, csbSIPMthdCurrentStatsEntry=csbSIPMthdCurrentStatsEntry, csbSIPMthdCurrentStatsMethod=csbSIPMthdCurrentStatsMethod, ciscoSbcStatsMIBConform=ciscoSbcStatsMIBConform, csbSIPMthdRCCurrentStatsRespCode=csbSIPMthdRCCurrentStatsRespCode, csbSIPMthdCurrentStatsTable=csbSIPMthdCurrentStatsTable, csbRfBillRealmStatsSuccInterimAcrs=csbRfBillRealmStatsSuccInterimAcrs, csbRadiusStatsSrvrName=csbRadiusStatsSrvrName, csbRadiusStatsAcsRtrns=csbRadiusStatsAcsRtrns, csbRfBillRealmStatsFailEventAcrs=csbRfBillRealmStatsFailEventAcrs, csbRfBillRealmStatsEntry=csbRfBillRealmStatsEntry, csbRfBillRealmStatsFailStopAcrs=csbRfBillRealmStatsFailStopAcrs, csbStatsMIBCompliance=csbStatsMIBCompliance, csbSIPMthdHistoryStatsResp4xxIn=csbSIPMthdHistoryStatsResp4xxIn, csbSIPMthdCurrentStatsResp5xxOut=csbSIPMthdCurrentStatsResp5xxOut, csbRadiusStatsPending=csbRadiusStatsPending, csbSIPMthdCurrentStatsInterval=csbSIPMthdCurrentStatsInterval, csbSIPMthdRCHistoryStatsRespCode=csbSIPMthdRCHistoryStatsRespCode, csbRfBillRealmStatsIndex=csbRfBillRealmStatsIndex, csbRadiusStatsTable=csbRadiusStatsTable, ciscoSbcStatsMIB=ciscoSbcStatsMIB, csbSIPMthdHistoryStatsEntry=csbSIPMthdHistoryStatsEntry, csbRadiusStatsActReqs=csbRadiusStatsActReqs, csbSIPMthdRCCurrentStatsRespIn=csbSIPMthdRCCurrentStatsRespIn, csbSIPMthdHistoryStatsResp3xxOut=csbSIPMthdHistoryStatsResp3xxOut, csbSIPMthdCurrentStatsReqIn=csbSIPMthdCurrentStatsReqIn, csbSIPMthdRCHistoryStatsRespIn=csbSIPMthdRCHistoryStatsRespIn, csbSIPMthdRCHistoryStatsMethod=csbSIPMthdRCHistoryStatsMethod, csbSIPMthdCurrentStatsResp5xxIn=csbSIPMthdCurrentStatsResp5xxIn, csbSIPMthdCurrentStatsResp2xxIn=csbSIPMthdCurrentStatsResp2xxIn, csbSIPMthdCurrentStatsResp6xxOut=csbSIPMthdCurrentStatsResp6xxOut, csbSIPMthdHistoryStatsResp6xxIn=csbSIPMthdHistoryStatsResp6xxIn, csbRadiusStatsGroup=csbRadiusStatsGroup, csbRfBillRealmStatsTotalInterimAcrs=csbRfBillRealmStatsTotalInterimAcrs, csbSIPMthdHistoryStatsResp4xxOut=csbSIPMthdHistoryStatsResp4xxOut)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
2196245cde307d3cbc0fd7166ef4b6dbe19e8cbf | 5339440a3c9f1318f8feef223d88955617eb2b93 | /apps/its_quote_dash/views.py | c686b63a2d05576076586418e8165cbf85156c68 | [] | no_license | Komaldhall/Quodophile | 372c8872ac93aae23817e834557697882f4bcc64 | a14d230b993ef282fb63369f1994ff4896c0157c | refs/heads/master | 2020-04-16T09:29:55.882991 | 2019-01-13T08:44:39 | 2019-01-13T08:44:39 | 165,466,388 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,568 | py | from django.shortcuts import render, redirect
from django.contrib import messages
from .models import *
import datetime
import bcrypt
import re
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
now = datetime.datetime.today().strftime("%Y-%m-%d")
def register(request):
if request.method == 'POST':
form=request.POST
errors = []
if len(form['first_name']) < 2:
errors.append('First name must be at least 2 characters.')
if len(form['last_name']) < 2:
errors.append('Last name must be at least 2 characters.')
if len(form['password']) < 8:
errors.append('Password must be at least 8 characters.')
if not form['password'] == form['cpassword']:
errors.append('Passwords do not match.')
if not EMAIL_REGEX.match(form['email']):
errors.append('Please provide a valid email')
if form['birthday']>now:
errors.append("Birthday refers to a future date.")
if errors:
for e in errors:
messages.error(request, e)
else:
try:
User.objects.get(email=form['email'])
messages.error(request, 'Email already exists. Please Login!')
except User.DoesNotExist:
hashed_pw = bcrypt.hashpw(form['password'].encode(), bcrypt.gensalt())
c_hashed_pw = hashed_pw.decode('utf-8')
birthday=str(form['birthday'])
User.objects.create(first_name=form['first_name'], last_name=form['last_name'], email=form['email'], password=c_hashed_pw, birth=birthday)
messages.success(request,"You successfully registered. Please login!")
return redirect('/')
return redirect('/signup')
else:
return render(request, 'its_quote_dash/signup.html')
def login(request):
if request.method == 'POST':
errors = []
form=request.POST
# if len(form['emaill']) < 0:
# errors.append('Please provide email')
if not EMAIL_REGEX.match(form['emaill']):
errors.append('Please provide a valid email')
else:
try:
user=User.objects.get(email=form['emaill'])
result = bcrypt.checkpw(request.POST['passwordl'].encode(), user.password.encode())
if result:
request.session['user_id'] = user.id
return redirect('/dashboard')
else:
messages.error(request, 'Password does not match.')
except User.DoesNotExist:
messages.error(request, 'Email does not exists. Please signup.')
return redirect('/')
if errors:
for e in errors:
messages.error(request, e)
return redirect('/')
else:
return render(request, 'its_quote_dash/index.html')
def logout(request):
if not 'user_id' in request.session:
messages.error(request, 'You need to login.')
return redirect('/')
request.session.clear()
return redirect('/')
def dashboard(request):
if not 'user_id' in request.session:
messages.error(request, 'You need to login.')
return redirect('/')
user = User.objects.get(id=request.session['user_id'])
quote = Quote.objects.all().order_by('-created_at')
first_user = user.first_name.split()
first_user = first_user[0][0]
last_user = user.last_name.split()
last_user = last_user[0][0]
context = {
'user': user,
'quote':quote,
'first':first_user,
'last':last_user,
}
return render(request,'its_quote_dash/dashboard.html', context)
def add(request):
if not 'user_id' in request.session:
messages.error(request, 'You need to login.')
return redirect('/')
if request.POST:
errors=[]
form=request.POST
if len(form['author'])<3:
errors.append('Author must be at least 3 characters.')
if len(form['quote']) < 10:
errors.append('Quote must be at least 10 characters.')
if errors:
for e in errors:
messages.error(request, e)
else:
Quote.objects.create(author=form['author'], quote=form['quote'], users_id=request.session['user_id'])
user = User.objects.get(id=request.session['user_id'])
first_user = user.first_name.split()
first_user = first_user[0][0]
last_user = user.last_name.split()
last_user = last_user[0][0]
context = {
'first':first_user,
'last':last_user,
}
return render(request,'its_quote_dash/newquote.html', context)
def edit(request, user_id):
if not 'user_id' in request.session:
messages.error(request, 'You need to login.')
return redirect('/')
user=User.objects.get(id=user_id)
first_user = user.first_name.split()
first_user = first_user[0][0]
last_user = user.last_name.split()
last_user = last_user[0][0]
context={
'user':user,
'first':first_user,
'last':last_user,
}
if request.POST:
errors=[]
form=request.POST
if len(form['first_name']) < 2:
errors.append('First name must be at least 2 characters.')
if len(form['last_name']) < 2:
errors.append('Last name must be at least 2 characters.')
if not EMAIL_REGEX.match(form['email']):
errors.append('Please provide a valid email')
# if len(form['birthday'])<1:
# errors.append("Please provide Birth info")
if form['birthday']>now:
errors.append("Your birthday refers to a future date. Please check!!")
user=User.objects.get(id=user_id)
if errors:
for e in errors:
messages.error(request, e)
else:
if user.email != form['email']:
try:
user=User.objects.get(email=form['email'])
messages.error(request, 'This email already exists. Please change.')
except User.DoesNotExist:
user.email=form['email']
user.first_name=form['first_name']
user.last_name=form['last_name']
user.birth=form['birthday']
user.save()
user=User.objects.get(id=user_id)
first_user = user.first_name.split()
first_user = first_user[0][0]
last_user = user.last_name.split()
last_user = last_user[0][0]
context={
'user':user,
'first':first_user,
'last':last_user,
}
return render(request, 'its_quote_dash/editaccount.html', context)
def show(request, user_id):
if not 'user_id' in request.session:
messages.error(request, 'You need to login.')
return redirect('/')
user = User.objects.get(id=request.session['user_id'])
quote = Quote.objects.filter(users_id=user_id).order_by('-created_at')
quser=User.objects.get(id=user_id)
first_user = user.first_name.split()
first_user = first_user[0][0]
last_user = user.last_name.split()
last_user = last_user[0][0]
context={
'quote':quote,
'quser':quser,
'user':user,
'first':first_user,
'last':last_user,
}
return render(request, 'its_quote_dash/userposts.html', context)
def delete(request, quote_id):
if not 'user_id' in request.session:
messages.error(request, 'You need to login.')
return redirect('/')
quote = Quote.objects.get(id=quote_id)
quote.delete()
return redirect('/dashboard')
def like(request, user_id, quote_id):
if not 'user_id' in request.session:
messages.error(request, 'You need to login.')
return redirect('/')
like=Liked.objects.filter(users_liked_id = user_id).filter(quotes_liked_id = quote_id)
if not like:
like = Liked.objects.create(users_liked_id = user_id, quotes_liked_id = quote_id)
like.save()
quote = Quote.objects.get(id=quote_id)
quote.like=int(quote.like)+int(1)
quote.save()
return redirect('/dashboard')
| [
"komal.dhall7@gmail.com"
] | komal.dhall7@gmail.com |
cc70794577262d87f2821ca1dedabcafa9149ed5 | b09bb602f921064a00835038ce593ed237ae9b16 | /home/forms.py | ba0c456654920c24a92c72f6ce6894c646294b79 | [] | no_license | gauravsaxena1997/Karyavahi | 03f04febe4d726b348c38bc3b8e0e3e7e0113b68 | 6f240234579153aced358d225b45c81b1bc87888 | refs/heads/master | 2020-03-21T06:10:18.854809 | 2018-06-28T18:16:36 | 2018-06-28T18:16:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | from django.contrib.auth.models import User
from django import forms
class userinput(forms.Form):
q=forms.CharField(required=True,max_length=25,label='Input #hashtag')
class UserForm (forms.ModelForm):
password = forms.CharField (widget=forms.PasswordInput)
class Meta:
model = User
fields = [ 'username', 'email', 'password' ]
| [
"gauravsaxena.cs@gmail.com"
] | gauravsaxena.cs@gmail.com |
965b1741916b9fc0c678a0f919e49d5749144d7e | 72dc7d124cdac8f2dcab3f72e95e9a646154a6a0 | /byceps/services/ticketing/ticket_user_management_service.py | e9bf08d17b9df30a3c46ca20e8084f70f9bad11c | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | m-ober/byceps | e6569802ee76e8d81b892f1f547881010359e416 | 4d0d43446f3f86a7888ed55395bc2aba58eb52d5 | refs/heads/master | 2020-11-30T23:31:33.944870 | 2020-02-12T23:53:55 | 2020-02-12T23:56:04 | 40,315,983 | 0 | 0 | null | 2015-08-06T16:41:36 | 2015-08-06T16:41:36 | null | UTF-8 | Python | false | false | 3,084 | py | """
byceps.services.ticketing.ticket_user_management_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from ...database import db
from ...typing import UserID
from ..user import service as user_service
from . import event_service
from .exceptions import (
TicketIsRevoked,
UserAccountSuspended,
UserAlreadyCheckedIn,
UserIdUnknown,
)
from . import ticket_service
from .transfer.models import TicketID
def appoint_user_manager(
ticket_id: TicketID, manager_id: UserID, initiator_id: UserID
) -> None:
"""Appoint the user as the ticket's user manager."""
ticket = ticket_service.find_ticket(ticket_id)
if ticket.revoked:
raise TicketIsRevoked(f'Ticket {ticket_id} has been revoked.')
ticket.user_managed_by_id = manager_id
event = event_service.build_event('user-manager-appointed', ticket.id, {
'appointed_user_manager_id': str(manager_id),
'initiator_id': str(initiator_id),
})
db.session.add(event)
db.session.commit()
def withdraw_user_manager(ticket_id: TicketID, initiator_id: UserID) -> None:
"""Withdraw the ticket's custom user manager."""
ticket = ticket_service.find_ticket(ticket_id)
if ticket.revoked:
raise TicketIsRevoked(f'Ticket {ticket_id} has been revoked.')
ticket.user_managed_by_id = None
event = event_service.build_event('user-manager-withdrawn', ticket.id, {
'initiator_id': str(initiator_id),
})
db.session.add(event)
db.session.commit()
def appoint_user(
ticket_id: TicketID, user_id: UserID, initiator_id: UserID
) -> None:
"""Appoint the user as the ticket's user."""
ticket = ticket_service.find_ticket(ticket_id)
if ticket.revoked:
raise TicketIsRevoked(f'Ticket {ticket_id} has been revoked.')
if ticket.user_checked_in:
raise UserAlreadyCheckedIn('Ticket user has already been checked in.')
user = user_service.find_user(user_id)
if user is None:
raise UserIdUnknown(f"Unknown user ID '{user_id}'")
if user.suspended:
raise UserAccountSuspended(
f'User account {user.screen_name} is suspended.'
)
ticket.used_by_id = user_id
event = event_service.build_event('user-appointed', ticket.id, {
'appointed_user_id': str(user_id),
'initiator_id': str(initiator_id),
})
db.session.add(event)
db.session.commit()
def withdraw_user(ticket_id: TicketID, initiator_id: UserID) -> None:
"""Withdraw the ticket's user."""
ticket = ticket_service.find_ticket(ticket_id)
if ticket.revoked:
raise TicketIsRevoked(f'Ticket {ticket_id} has been revoked.')
if ticket.user_checked_in:
raise UserAlreadyCheckedIn('Ticket user has already been checked in.')
ticket.used_by_id = None
event = event_service.build_event('user-withdrawn', ticket.id, {
'initiator_id': str(initiator_id),
})
db.session.add(event)
db.session.commit()
| [
"homework@nwsnet.de"
] | homework@nwsnet.de |
bb0772a6c4a86996f2c626ca4c8586ad3b56e2e6 | 4946ea045878d30d8c68ac9b867e1b624608a0c7 | /users/migrations/0002_create_profile_existing_users.py | 0869a8e3da799fd2beb72ff0db752677b963f69b | [
"MIT"
] | permissive | jeanettemariemurphy/directory-cms | 6f4a7f5cdd24b891745d4f2dd5f8aa94d3b9b298 | 25c98d13e409c28998724d19fe1c352f7dc19f1d | refs/heads/master | 2020-06-13T12:25:52.083225 | 2019-06-27T10:19:11 | 2019-06-27T10:19:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-05-07 10:51
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.db import migrations
from users.models import APPROVED
def set_existing_users_as_approved(apps, schema_editor):
User = get_user_model()
UserProfile = apps.get_model('users', 'UserProfile')
for user in User.objects.filter(is_active=True):
profile = UserProfile(user_id=user.id,
assignment_status=APPROVED)
profile.save()
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.RunPython(set_existing_users_as_approved,
reverse_code=migrations.RunPython.noop)
]
| [
"alessandro.denoia@digital.trade.gov.uk"
] | alessandro.denoia@digital.trade.gov.uk |
eba28d061f1ca38c49e2d42e932da20989443234 | 661721fa052febc3bb955a04888fdf770e1727b9 | /read_10_buf.py | 37a969d42aca7ea51ef7f5f4ee0fdeb8d32b4d36 | [] | no_license | sumitparw/leetcode_practise | 3f25366996702609e9644eff7b0e8f24de05a2e0 | c98b49c4a29b05ac21b6ae66b5d0e6b7f25a2c1a | refs/heads/master | 2020-08-12T12:24:25.092547 | 2020-02-01T19:36:53 | 2020-02-01T19:36:53 | 214,766,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | filename = "xyz.txt"
buffer = ''
with open(filename) as f:
while True:
c = f.read(10)
if not c:
break
buffer = buffer + c
print(buffer) | [
"46736751+sumitparw@users.noreply.github.com"
] | 46736751+sumitparw@users.noreply.github.com |
a730f5aec6b1037e07a94eedf86850ee5d5da30f | 01857ef455ea60eccaf03b5a9059ec83e9803c2e | /nicegui/tailwind_types/background_color.py | 501965091cc66d3eafcc5126673744ea5026a591 | [
"MIT"
] | permissive | zauberzeug/nicegui | f08312cc1f393deca79e0e84a2506d3a35efff16 | c61b1315f29d51e26cc1168207f5616b302f8df0 | refs/heads/main | 2023-08-18T18:09:30.937322 | 2023-08-18T15:04:00 | 2023-08-18T15:04:00 | 365,250,183 | 5,128 | 271 | MIT | 2023-09-14T01:50:56 | 2021-05-07T13:55:05 | Python | UTF-8 | Python | false | false | 4,213 | py | from typing import Literal
BackgroundColor = Literal[
'inherit',
'current',
'transparent',
'black',
'white',
'slate-50',
'slate-100',
'slate-200',
'slate-300',
'slate-400',
'slate-500',
'slate-600',
'slate-700',
'slate-800',
'slate-900',
'slate-950',
'gray-50',
'gray-100',
'gray-200',
'gray-300',
'gray-400',
'gray-500',
'gray-600',
'gray-700',
'gray-800',
'gray-900',
'gray-950',
'zinc-50',
'zinc-100',
'zinc-200',
'zinc-300',
'zinc-400',
'zinc-500',
'zinc-600',
'zinc-700',
'zinc-800',
'zinc-900',
'zinc-950',
'neutral-50',
'neutral-100',
'neutral-200',
'neutral-300',
'neutral-400',
'neutral-500',
'neutral-600',
'neutral-700',
'neutral-800',
'neutral-900',
'neutral-950',
'stone-50',
'stone-100',
'stone-200',
'stone-300',
'stone-400',
'stone-500',
'stone-600',
'stone-700',
'stone-800',
'stone-900',
'stone-950',
'red-50',
'red-100',
'red-200',
'red-300',
'red-400',
'red-500',
'red-600',
'red-700',
'red-800',
'red-900',
'red-950',
'orange-50',
'orange-100',
'orange-200',
'orange-300',
'orange-400',
'orange-500',
'orange-600',
'orange-700',
'orange-800',
'orange-900',
'orange-950',
'amber-50',
'amber-100',
'amber-200',
'amber-300',
'amber-400',
'amber-500',
'amber-600',
'amber-700',
'amber-800',
'amber-900',
'amber-950',
'yellow-50',
'yellow-100',
'yellow-200',
'yellow-300',
'yellow-400',
'yellow-500',
'yellow-600',
'yellow-700',
'yellow-800',
'yellow-900',
'yellow-950',
'lime-50',
'lime-100',
'lime-200',
'lime-300',
'lime-400',
'lime-500',
'lime-600',
'lime-700',
'lime-800',
'lime-900',
'lime-950',
'green-50',
'green-100',
'green-200',
'green-300',
'green-400',
'green-500',
'green-600',
'green-700',
'green-800',
'green-900',
'green-950',
'emerald-50',
'emerald-100',
'emerald-200',
'emerald-300',
'emerald-400',
'emerald-500',
'emerald-600',
'emerald-700',
'emerald-800',
'emerald-900',
'emerald-950',
'teal-50',
'teal-100',
'teal-200',
'teal-300',
'teal-400',
'teal-500',
'teal-600',
'teal-700',
'teal-800',
'teal-900',
'teal-950',
'cyan-50',
'cyan-100',
'cyan-200',
'cyan-300',
'cyan-400',
'cyan-500',
'cyan-600',
'cyan-700',
'cyan-800',
'cyan-900',
'cyan-950',
'sky-50',
'sky-100',
'sky-200',
'sky-300',
'sky-400',
'sky-500',
'sky-600',
'sky-700',
'sky-800',
'sky-900',
'sky-950',
'blue-50',
'blue-100',
'blue-200',
'blue-300',
'blue-400',
'blue-500',
'blue-600',
'blue-700',
'blue-800',
'blue-900',
'blue-950',
'indigo-50',
'indigo-100',
'indigo-200',
'indigo-300',
'indigo-400',
'indigo-500',
'indigo-600',
'indigo-700',
'indigo-800',
'indigo-900',
'indigo-950',
'violet-50',
'violet-100',
'violet-200',
'violet-300',
'violet-400',
'violet-500',
'violet-600',
'violet-700',
'violet-800',
'violet-900',
'violet-950',
'purple-50',
'purple-100',
'purple-200',
'purple-300',
'purple-400',
'purple-500',
'purple-600',
'purple-700',
'purple-800',
'purple-900',
'purple-950',
'fuchsia-50',
'fuchsia-100',
'fuchsia-200',
'fuchsia-300',
'fuchsia-400',
'fuchsia-500',
'fuchsia-600',
'fuchsia-700',
'fuchsia-800',
'fuchsia-900',
'fuchsia-950',
'pink-50',
'pink-100',
'pink-200',
'pink-300',
'pink-400',
'pink-500',
'pink-600',
'pink-700',
'pink-800',
'pink-900',
'pink-950',
'rose-50',
'rose-100',
'rose-200',
'rose-300',
'rose-400',
'rose-500',
'rose-600',
'rose-700',
'rose-800',
'rose-900',
'rose-950',
]
| [
"falko@zauberzeug.com"
] | falko@zauberzeug.com |
8c5b8f0340d21b17ae0804ac332a38a50c15baa9 | a0556d5e8368bf171b9019aba03e65b06e8c12e4 | /secao12_modulos_e_pacotes/geek/university/geek3.py | 5c1e707e9535d9582c50e3c9ac27f866491b0e98 | [] | no_license | RianMarlon/Python-Geek-University | f1c9db588f23ce8e6699d1352ebfb3428e0ab1ec | 3be7ec5c35bf74b1b2152de63db95bee33ee1719 | refs/heads/master | 2020-06-25T04:41:52.911513 | 2020-02-01T16:18:35 | 2020-02-01T16:18:35 | 199,204,603 | 23 | 8 | null | null | null | null | UTF-8 | Python | false | false | 36 | py |
def funcao3():
return 'Geek'
| [
"rianmarlon20@yahoo.com"
] | rianmarlon20@yahoo.com |
5f040b82054005a76c51c1006456b5a72c0e283a | ebfcae1c5ba2997b2ac4471d5bedc3f5daffcb31 | /repos/indico-master/indico/modules/events/editing/models/editable.py | d1328c69f73c9fb4e11e59e28f4f86e16a39990c | [
"MIT"
] | permissive | babiato/flaskapp1 | 84de2d0b26a54f5820d3bbe97926782ad41e005c | 530beb9e3b8516e0e93960b99521c23a523ef546 | refs/heads/master | 2023-02-26T16:36:49.760632 | 2021-02-04T09:08:40 | 2021-02-04T09:08:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,774 | py | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from sqlalchemy import orm
from sqlalchemy.event import listens_for
from sqlalchemy.orm import column_property
from sqlalchemy.sql import select
from indico.core.db import db
from indico.core.db.sqlalchemy import PyIntEnum
from indico.util.i18n import _
from indico.util.locators import locator_property
from indico.util.string import format_repr
from indico.util.struct.enum import RichIntEnum
from indico.web.flask.util import url_for
class EditableType(RichIntEnum):
__titles__ = [None, _('Paper'), _('Slides'), _('Poster')]
__editor_permissions__ = [None, 'paper_editing', 'slides_editing', 'poster_editing']
paper = 1
slides = 2
poster = 3
@property
def editor_permission(self):
return self.__editor_permissions__[self]
class EditableState(RichIntEnum):
__titles__ = [None, _('New'), _('Ready for Review'), _('Needs Confirmation'), _('Needs Changes'),
_('Accepted'), _('Rejected')]
__css_classes__ = [None, 'highlight', 'ready', 'warning', 'warning', 'success', 'error']
new = 1
ready_for_review = 2
needs_submitter_confirmation = 3
needs_submitter_changes = 4
accepted = 5
rejected = 6
class Editable(db.Model):
__tablename__ = 'editables'
__table_args__ = (db.UniqueConstraint('contribution_id', 'type'),
{'schema': 'event_editing'})
id = db.Column(
db.Integer,
primary_key=True
)
contribution_id = db.Column(
db.ForeignKey('events.contributions.id'),
index=True,
nullable=False
)
type = db.Column(
PyIntEnum(EditableType),
nullable=False
)
editor_id = db.Column(
db.ForeignKey('users.users.id'),
index=True,
nullable=True
)
published_revision_id = db.Column(
db.ForeignKey('event_editing.revisions.id'),
index=True,
nullable=True
)
contribution = db.relationship(
'Contribution',
lazy=True,
backref=db.backref(
'editables',
lazy=True,
)
)
editor = db.relationship(
'User',
lazy=True,
backref=db.backref(
'editor_for_editables',
lazy='dynamic'
)
)
published_revision = db.relationship(
'EditingRevision',
foreign_keys=published_revision_id,
lazy=True,
)
# relationship backrefs:
# - revisions (EditingRevision.editable)
def __repr__(self):
return format_repr(self, 'id', 'contribution_id', 'type')
@locator_property
def locator(self):
return dict(self.contribution.locator, type=self.type.name)
@property
def event(self):
return self.contribution.event
def _has_general_editor_permissions(self, user):
"""Whether the user has general editor permissions on the Editable.
This means that the user has editor permissions for the editable's type,
but does not need to be the assigned editor.
"""
# Editing (and event) managers always have editor-like access
return (
self.event.can_manage(user, permission='editing_manager') or
self.event.can_manage(user, permission=self.type.editor_permission)
)
def can_see_timeline(self, user):
"""Whether the user can see the editable's timeline.
This is pure read access, without any ability to make changes
or leave comments.
"""
# Anyone with editor access to the editable's type can see the timeline.
# Users associated with the editable's contribution can do so as well.
return (
self._has_general_editor_permissions(user) or
self.contribution.can_submit_proceedings(user) or
self.contribution.is_user_associated(user, check_abstract=True)
)
def can_perform_submitter_actions(self, user):
"""Whether the user can perform any submitter actions.
These are actions such as uploading a new revision after having
been asked to make changes or approving/rejecting changes made
by an editor.
"""
# If the user can't even see the timeline, we never allow any modifications
if not self.can_see_timeline(user):
return False
# Anyone who can submit new proceedings can also perform submitter actions,
# i.e. the abstract submitter and anyone with submission access to the contribution.
return self.contribution.can_submit_proceedings(user)
def can_perform_editor_actions(self, user):
"""Whether the user can perform any Editing actions.
These are actions usually made by the assigned Editor of the
editable, such as making changes, asking the user to make changes,
or approving/rejecting the editable.
"""
from indico.modules.events.editing.settings import editable_type_settings
# If the user can't even see the timeline, we never allow any modifications
if not self.can_see_timeline(user):
return False
# Editing/event managers can perform actions when they are the assigned editor
# even when editing is disabled in the settings
if self.editor == user and self.event.can_manage(user, permission='editing_manager'):
return True
# Editing needs to be enabled in the settings otherwise
if not editable_type_settings[self.type].get(self.event, 'editing_enabled'):
return False
# Editors need the permission on the editable type and also be the assigned editor
if self.editor == user and self.event.can_manage(user, permission=self.type.editor_permission):
return True
return False
def can_use_internal_comments(self, user):
"""Whether the user can create/see internal comments."""
return self._has_general_editor_permissions(user)
def can_comment(self, user):
"""Whether the user can comment on the editable."""
# We allow any user associated with the contribution to comment, even if they are
# not authorized to actually perform submitter actions.
return (self.event.can_manage(user, permission=self.type.editor_permission)
or self.event.can_manage(user, permission='editing_manager')
or self.contribution.is_user_associated(user, check_abstract=True))
def can_assign_self(self, user):
"""Whether the user can assign themselves on the editable."""
from indico.modules.events.editing.settings import editable_type_settings
type_settings = editable_type_settings[self.type]
if self.editor and (self.editor == user or not self.can_unassign(user)):
return False
return ((self.event.can_manage(user, permission=self.type.editor_permission)
and type_settings.get(self.event, 'editing_enabled')
and type_settings.get(self.event, 'self_assign_allowed'))
or self.event.can_manage(user, permission='editing_manager'))
def can_unassign(self, user):
"""Whether the user can unassign the editor of the editable."""
from indico.modules.events.editing.settings import editable_type_settings
type_settings = editable_type_settings[self.type]
return (self.event.can_manage(user, permission='editing_manager')
or (self.editor == user
and self.event.can_manage(user, permission=self.type.editor_permission)
and type_settings.get(self.event, 'editing_enabled')
and type_settings.get(self.event, 'self_assign_allowed')))
@property
def review_conditions_valid(self):
from indico.modules.events.editing.models.review_conditions import EditingReviewCondition
query = EditingReviewCondition.query.with_parent(self.event).filter_by(type=self.type)
review_conditions = [{ft.id for ft in cond.file_types} for cond in query]
file_types = {file.file_type_id for file in self.revisions[-1].files}
if not review_conditions:
return True
return any(file_types >= cond for cond in review_conditions)
@property
def editing_enabled(self):
from indico.modules.events.editing.settings import editable_type_settings
return editable_type_settings[self.type].get(self.event, 'editing_enabled')
@property
def external_timeline_url(self):
return url_for('event_editing.editable', self, _external=True)
@property
def timeline_url(self):
return url_for('event_editing.editable', self)
def log(self, *args, **kwargs):
"""Log with prefilled metadata for the editable."""
self.event.log(*args, meta={'editable_id': self.id}, **kwargs)
@listens_for(orm.mapper, 'after_configured', once=True)
def _mappers_configured():
from .revisions import EditingRevision, FinalRevisionState, InitialRevisionState
# Editable.state -- the state of the editable itself
cases = db.cast(db.case({
FinalRevisionState.none: db.case({
InitialRevisionState.new: EditableState.new,
InitialRevisionState.ready_for_review: EditableState.ready_for_review,
InitialRevisionState.needs_submitter_confirmation: EditableState.needs_submitter_confirmation
}, value=EditingRevision.initial_state),
# the states resulting in None are always followed by another revision, so we don't ever
# expect the latest revision of an editable to have such a state
FinalRevisionState.replaced: None,
FinalRevisionState.needs_submitter_confirmation: None,
FinalRevisionState.needs_submitter_changes: EditableState.needs_submitter_changes,
FinalRevisionState.accepted: EditableState.accepted,
FinalRevisionState.rejected: EditableState.rejected,
}, value=EditingRevision.final_state), PyIntEnum(EditableState))
query = (select([cases])
.where(EditingRevision.editable_id == Editable.id)
.order_by(EditingRevision.created_dt.desc())
.limit(1)
.correlate_except(EditingRevision))
Editable.state = column_property(query)
# Editable.revision_count -- the number of revisions the editable has
query = (select([db.func.count(EditingRevision.id)])
.where(EditingRevision.editable_id == Editable.id)
.correlate_except(EditingRevision))
Editable.revision_count = column_property(query)
| [
"jinxufang@tencent.com"
] | jinxufang@tencent.com |
91a43d6b4ce8483705b6463ee0ce47c2bcb62dfd | ee6acbd5fcd0fcd16230e96a4a539de41a02c97e | /operators/kubefed/python/pulumi_pulumi_kubernetes_crds_operators_kubefed/scheduling/v1alpha1/_inputs.py | 151deeb1cd170868eccfa41ce88d7e5f05c7c50b | [
"Apache-2.0"
] | permissive | isabella232/pulumi-kubernetes-crds | 777e78137aaf6525a44b61a02dccf91bf0d87a14 | 372c4c0182f6b899af82d6edaad521aa14f22150 | refs/heads/master | 2023-03-15T04:29:16.039753 | 2020-12-30T19:35:54 | 2020-12-30T19:35:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,116 | py | # coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ReplicaSchedulingPreferenceSpecArgs',
]
@pulumi.input_type
class ReplicaSchedulingPreferenceSpecArgs:
def __init__(__self__, *,
target_kind: pulumi.Input[str],
total_replicas: pulumi.Input[int],
clusters: Optional[pulumi.Input[Mapping[str, Any]]] = None,
rebalance: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] target_kind: TODO (@irfanurrehman); upgrade this to label selector only if need be. The idea of this API is to have a a set of preferences which can be used for a target FederatedDeployment or FederatedReplicaset. Although the set of preferences in question can be applied to multiple target objects using label selectors, but there are no clear advantages of doing that as of now. To keep the implementation and usage simple, matching ns/name of RSP resource to the target resource is sufficient and only additional information needed in RSP resource is a target kind (FederatedDeployment or FederatedReplicaset).
:param pulumi.Input[int] total_replicas: Total number of pods desired across federated clusters. Replicas specified in the spec for target deployment template or replicaset template will be discarded/overridden when scheduling preferences are specified.
:param pulumi.Input[Mapping[str, Any]] clusters: A mapping between cluster names and preferences regarding a local workload object (dep, rs, .. ) in these clusters. "*" (if provided) applies to all clusters if an explicit mapping is not provided. If omitted, clusters without explicit preferences should not have any replicas scheduled.
:param pulumi.Input[bool] rebalance: If set to true then already scheduled and running replicas may be moved to other clusters in order to match current state to the specified preferences. Otherwise, if set to false, up and running replicas will not be moved.
"""
pulumi.set(__self__, "target_kind", target_kind)
pulumi.set(__self__, "total_replicas", total_replicas)
if clusters is not None:
pulumi.set(__self__, "clusters", clusters)
if rebalance is not None:
pulumi.set(__self__, "rebalance", rebalance)
@property
@pulumi.getter(name="targetKind")
def target_kind(self) -> pulumi.Input[str]:
"""
TODO (@irfanurrehman); upgrade this to label selector only if need be. The idea of this API is to have a a set of preferences which can be used for a target FederatedDeployment or FederatedReplicaset. Although the set of preferences in question can be applied to multiple target objects using label selectors, but there are no clear advantages of doing that as of now. To keep the implementation and usage simple, matching ns/name of RSP resource to the target resource is sufficient and only additional information needed in RSP resource is a target kind (FederatedDeployment or FederatedReplicaset).
"""
return pulumi.get(self, "target_kind")
@target_kind.setter
def target_kind(self, value: pulumi.Input[str]):
pulumi.set(self, "target_kind", value)
@property
@pulumi.getter(name="totalReplicas")
def total_replicas(self) -> pulumi.Input[int]:
"""
Total number of pods desired across federated clusters. Replicas specified in the spec for target deployment template or replicaset template will be discarded/overridden when scheduling preferences are specified.
"""
return pulumi.get(self, "total_replicas")
@total_replicas.setter
def total_replicas(self, value: pulumi.Input[int]):
pulumi.set(self, "total_replicas", value)
@property
@pulumi.getter
def clusters(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
A mapping between cluster names and preferences regarding a local workload object (dep, rs, .. ) in these clusters. "*" (if provided) applies to all clusters if an explicit mapping is not provided. If omitted, clusters without explicit preferences should not have any replicas scheduled.
"""
return pulumi.get(self, "clusters")
@clusters.setter
def clusters(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "clusters", value)
@property
@pulumi.getter
def rebalance(self) -> Optional[pulumi.Input[bool]]:
"""
If set to true then already scheduled and running replicas may be moved to other clusters in order to match current state to the specified preferences. Otherwise, if set to false, up and running replicas will not be moved.
"""
return pulumi.get(self, "rebalance")
@rebalance.setter
def rebalance(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "rebalance", value)
| [
"albertzhong0@gmail.com"
] | albertzhong0@gmail.com |
2e5b09aa02c5be1df2acfd4bd03e9c17cfc49f4a | 741ee09b8b73187fab06ecc1f07f46a6ba77e85c | /AutonomousSourceCode/data/raw/squareroot/9bc2ac9b-fa9f-4fac-a960-1f9639102269__Babylon_sqrt.py | 1a10bb951e4c04d378758c4a075585b5a4778d26 | [] | no_license | erickmiller/AutomatousSourceCode | fbe8c8fbf215430a87a8e80d0479eb9c8807accb | 44ee2fb9ac970acf7389e5da35b930d076f2c530 | refs/heads/master | 2021-05-24T01:12:53.154621 | 2020-11-20T23:50:11 | 2020-11-20T23:50:11 | 60,889,742 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | def square_root(a):
"""Return the square root of a.
>>> square_root(9)
3.0
"""
x = 1
while x * x != a:
x = square_root_update(x, a)
return x
def square_root_update(x, a):
return average(x, a/x)
def average(x, y):
return (x + y)/2
| [
"erickmiller@gmail.com"
] | erickmiller@gmail.com |
8f6cbe3b5a97cfbd4419a1be1795d359956892fa | 53eee7eb899cb518983008532257037fb89def13 | /51.n-queens.py | a39e73f83fcbef748541b1370f3597e7ac75d757 | [] | no_license | chenxu0602/LeetCode | 0deb3041a66cb15e12ed4585bbe0fefce5dc6b26 | 3dc5af2bc870fcc8f2142130fcd2b7cab8733151 | refs/heads/master | 2023-07-05T19:26:21.608123 | 2023-07-02T08:35:35 | 2023-07-02T08:35:35 | 233,351,978 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,532 | py | #
# @lc app=leetcode id=51 lang=python3
#
# [51] N-Queens
#
# https://leetcode.com/problems/n-queens/description/
#
# algorithms
# Hard (43.46%)
# Likes: 1437
# Dislikes: 63
# Total Accepted: 177.9K
# Total Submissions: 408.2K
# Testcase Example: '4'
#
# The n-queens puzzle is the problem of placing n queens on an n×n chessboard
# such that no two queens attack each other.
#
#
#
# Given an integer n, return all distinct solutions to the n-queens puzzle.
#
# Each solution contains a distinct board configuration of the n-queens'
# placement, where 'Q' and '.' both indicate a queen and an empty space
# respectively.
#
# Example:
#
#
# Input: 4
# Output: [
# [".Q..", // Solution 1
# "...Q",
# "Q...",
# "..Q."],
#
# ["..Q.", // Solution 2
# "Q...",
# "...Q",
# ".Q.."]
# ]
# Explanation: There exist two distinct solutions to the 4-queens puzzle as
# shown above.
#
#
#
# @lc code=start
class Solution:
def solveNQueens(self, n: int) -> List[List[str]]:
def backtrack(queens, xy_diff, xy_sum):
p = len(queens)
if p == n:
res.append(queens)
return
for q in range(n):
if q not in queens and p - q not in xy_diff and p + q not in xy_sum:
backtrack(queens + [q], xy_diff + [p - q], xy_sum + [p + q])
res = []
backtrack([], [], [])
return [['.' * i + 'Q' + '.' * (n - i - 1) for i in sol] for sol in res]
# @lc code=end
| [
"chenxu@Chens-iMac.local"
] | chenxu@Chens-iMac.local |
8ece111453a17615bb8a45167b123bf01ff41168 | 4bfc3c184e736bb68dccbb6d5657f11c950df002 | /tests/operators/vector/test_scatter_nd_ad_001.py | 895444892735fed5ec3577c5ba74bc149e31e5f8 | [
"Apache-2.0",
"Zlib",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] | permissive | laekov/akg | 159aa64ef6135222b5af784c408731275dfa9bdb | 5316b8cb2340bbf71bdc724dc9d81513a67b3104 | refs/heads/master | 2022-12-01T04:09:03.548063 | 2020-08-19T08:38:57 | 2020-08-19T08:41:28 | 288,678,192 | 0 | 0 | Apache-2.0 | 2020-08-19T08:41:30 | 2020-08-19T08:36:53 | Python | UTF-8 | Python | false | false | 2,249 | py | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
################################################
Testcase_PrepareCondition:
Testcase_TestSteps:
Testcase_ExpectedResult:
"""
import datetime
import os
from base import TestBase
import pytest
from test_run.scatter_nd_ad_run import scatter_nd_ad_run
############################################################
# TestCase= class: put to tests/*/
############################################################
class TestCase(TestBase):
def setup(self):
case_name = "test_akg_scatter_nd_ad_001"
case_path = os.getcwd()
# params init
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
self.testarg = [
## testflag,opfuncname,testRunArgs, dimArgs
('scatter_nd_ad_001', scatter_nd_ad_run, ([4, 1], [4], [8], "int32", 'float16'), ((32, 1), (32, 1))),
#('scatter_nd_ad_002', scatter_nd_ad_run, ([3, 1], [3, 8], [8, 8], "int32", 'float32'), ((64, 1), (64, 1))),
#('scatter_nd_ad_003', scatter_nd_ad_run, ([2, 1], [2, 8, 8], [8, 8, 8], "int32", 'float32'), ((64, 1), (64, 1))),
]
return
@pytest.mark.level2
@pytest.mark.env_onecard
@pytest.mark.platform_x86_ascend_training
def test_run(self):
self.common_run(self.testarg)
def teardown(self):
"""
clean environment
:return:
"""
self._log.info("============= {0} Teardown============".format(self.casename))
return
# if __name__ == "__main__":
# a = TestCase()
# a.setup()
# a.test_run()
# a.teardown()
| [
"ckey.chengbin@huawei.com"
] | ckey.chengbin@huawei.com |
55513fc2b5ef956726d964dc9117f62ffa961065 | bfe394e1b7d8a2ff34e37ae65df8cc52070c69d8 | /Source/External/DataProject/Data_PIREP.py | be0e8ee121eef155004d8c07ca2cb9b6d629a481 | [] | no_license | Jack-GVDL/PredictModel | bb32d37a5c18a656d5ebed36098ba3fac435fb96 | 20495072fb776c31c4bb5f2ddeecda1b43fcc52e | refs/heads/main | 2023-04-30T05:47:34.364328 | 2021-05-11T09:25:13 | 2021-05-11T09:25:13 | 366,314,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,396 | py | from typing import *
from ..DataChain import *
from .Data_Raw import DataKeyLib_Raw
class DataKey_PIREP(DataKey):
def __init__(self):
super().__init__()
# data
self._name = "PIREP"
# key-index
self.date: int = -1
self.time: int = -1
self.latitude: int = -1
self.longitude: int = -1
self.height: int = -1
self.pirep_turbulence: int = -1
self.pirep_icing: int = -1
self.key_date: DataKey = DataKeyLib_Raw.key_date
self.key_time: DataKey = DataKeyLib_Raw.key_time
self.key_longitude: DataKey = DataKeyLib_Raw.key_longitude
self.key_latitude: DataKey = DataKeyLib_Raw.key_latitude
self.key_height: DataKey = DataKeyLib_Raw.key_height
self.key_pirep_turbulence: DataKey = DataKeyLib_Raw.key_pirep_turbulence
self.key_pirep_icing: DataKey = DataKeyLib_Raw.key_pirep_icing
# operation
# add key
self.addDataKey(self.key_date)
self.addDataKey(self.key_time)
self.addDataKey(self.key_longitude)
self.addDataKey(self.key_latitude)
self.addDataKey(self.key_height)
self.addDataKey(self.key_pirep_turbulence)
self.addDataKey(self.key_pirep_icing)
# get index
self.date = self.getKeyIndex_Key(self.key_date)
self.time = self.getKeyIndex_Key(self.key_time)
self.longitude = self.getKeyIndex_Key(self.key_longitude)
self.latitude = self.getKeyIndex_Key(self.key_latitude)
self.height = self.getKeyIndex_Key(self.key_height)
self.pirep_turbulence = self.getKeyIndex_Key(self.key_pirep_turbulence)
self.pirep_icing = self.getKeyIndex_Key(self.key_pirep_icing)
def __del__(self):
return
# Property
# ...
# Operation
# ...
# Protected
# ...
class DataHandler_Text_PIREP(DataHandler):
def __init__(self):
super().__init__()
# data
self._data_key = DataKey_PIREP()
self.file_text = File_Text()
# operation
# ...
def __del__(self):
return
# Property
@property
def data_key(self) -> DataKey:
return self._data_key
@data_key.setter
def data_key(self, key: DataKey) -> DataKey:
raise RuntimeError
# Operation
# ...
# Protected
def _load_(self, data_list: DataList) -> bool:
# check
if self.file_text is None:
return False
# load from plain text
if not self.file_text.load():
return False
# for each item (Data_PIREP)
for item in self.file_text.data:
self._loadSingleData_(item, data_list)
return True
def _dump_(self, data: DataList) -> bool:
raise RuntimeError
"""
Format (tab delimited):
[0] Event date DDMMYYYY/HHMM (in UTC)
[1] Turbulence intensity
[2] Icing intensity (may be absent)
[3] Flight level (in metres)
[4] Latitude
[5] Longitude
"""
def _loadSingleData_(self, s: str, data_list: DataList) -> None:
# setup - file data
string_list: List[str] = s.split()
offset_icing: int = 0
if len(string_list) < 5:
return
# setup - data_key
data_key: DataKey_PIREP = self._data_key
# create object
# TODO: assume: creation must be success
data = data_list.createData()
# icing may be absent
try:
if len(string_list) == 5:
offset_icing = -1
data[data_key.pirep_icing] = 0
else:
data[data_key.pirep_icing] = int(string_list[2])
except ValueError:
return
# direct conversion
try:
data[data_key.date] = self._convertDate_(string_list[0])
data[data_key.time] = self._convertTime_(string_list[0])
data[data_key.pirep_turbulence] = int(string_list[1])
data[data_key.latitude] = float(string_list[4 + offset_icing])
data[data_key.longitude] = float(string_list[5 + offset_icing])
except ValueError:
return
# convert height
try:
# from string to int
height_list: List[str] = string_list[3 + offset_icing].split("-")
# if the height is a range, get the mean
if len(height_list) == 1:
height = int(height_list[0])
else:
height = (int(height_list[0]) + int(height_list[1])) / 2
# convert the value from metric to feet
# 1 metre = 3.2808398950131 feet
height = height * 3.2808398950131
# set to data
data[data_key.height] = int(height)
except ValueError:
return
def _convertDate_(self, s: str) -> List[int]:
try:
temp = [int(s[4:8]), int(s[2:4]), int(s[0:2])]
except ValueError:
return [0, 0, 0]
return temp
def _convertTime_(self, s: str) -> List[int]:
try:
temp = [int(s[9:11]), int(s[11:13]), 0]
except ValueError:
return [0, 0, 0]
return temp
| [
"33114105+Jack-GVDL@users.noreply.github.com"
] | 33114105+Jack-GVDL@users.noreply.github.com |
186471bfecb77345d925d923453b48eb33a11159 | 666a077ce8ba97d8d234bd18d5d02cdc8ccb11e0 | /src/opencmiss/neon/ui/delegates/spinboxdelegate.py | f6c2141213f50bd10b94a5d3a27028d88f73428e | [
"Apache-2.0"
] | permissive | rchristie/neon | 1c8a4c428991d4a6d872c00e3336b4fb1fa005f0 | 3d59f24f680cf981d7221375a34cdff8d73f82a2 | refs/heads/develop | 2021-01-21T07:45:01.603190 | 2016-08-01T03:46:23 | 2016-08-01T03:46:23 | 47,146,276 | 0 | 0 | null | 2015-11-30T21:09:46 | 2015-11-30T21:09:46 | null | UTF-8 | Python | false | false | 1,491 | py | '''
Copyright 2015 University of Auckland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from PySide import QtCore, QtGui
class SpinBoxDelegate(QtGui.QStyledItemDelegate):
def __init__(self, parent=None):
super(SpinBoxDelegate, self).__init__(parent)
def createEditor(self, parent, option, index):
editor = QtGui.QSpinBox(parent)
editor.setFrame(False)
editor.setMinimum(3)
editor.setMaximum(9999)
editor.setValue(9)
return editor
def setEditorData(self, editor, index):
data = index.model().data(index, QtCore.Qt.EditRole)
if data is not None:
value = int(index.model().data(index, QtCore.Qt.EditRole))
editor.setValue(value)
def setModelData(self, editor, model, index):
value = editor.value()
model.setData(index, value, QtCore.Qt.EditRole)
def updateEditorGeometry(self, editor, option, index):
editor.setGeometry(option.rect)
| [
"h.sorby@auckland.ac.nz"
] | h.sorby@auckland.ac.nz |
16d624c7e137c1f1d6d1854d6e049c5ddbc7b9b7 | 2d293a15dd64916f1f1e33e5a894d8ee96757072 | /questions/q26_rmDuplicFromSortedArray.py | 5cd4e53679c35656dc0e76ddbb61b3e529a22162 | [] | no_license | liang12k/leetcodeoj | 66238b76cf64ae0aeef7454fe2d449a781f461a7 | d432d1ac17e96f8a6f6a2d327020b58c46c34ecb | refs/heads/master | 2021-01-10T08:39:01.181646 | 2016-02-26T04:08:50 | 2016-02-26T04:08:50 | 36,776,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | '''
::KEY:: need to edit 'nums' list input!
Given a sorted array, remove the duplicates in place such that each element
appear only once and return the new length.
Do not allocate extra space for another array, you must do this in place with constant memory.
For example,
Given input array nums = [1,1,2],
Your function should return length = 2, with the first two elements of nums being 1 and 2 respectively.
It doesn't matter what you leave beyond the new length.
'''
class Solution(object):
# @param {integer[]} nums
# @return {integer}
def removeDuplicates(self, nums):
if not nums: return nums
# the tracker, keep tabs on latest value
# 'counter' for unique values so far & used to slice the list (see for loop)
currn=None
counter=0
for n in nums:
# if unique value, this is the latest
if currn!=n:
currn=n
# set unique value at current counter (used as index)
nums[counter]=currn
# increment counter for next index & as latest unique values counted
counter+=1
# nums has been edited, slice list to get from [0:counter]
nums=nums[:counter]
# return int as directed
return counter
if __name__=="__main__":
inp=[1,1,2]
print Solution().removeDuplicates(inp)
| [
"kenneth.liang12@gmail.com"
] | kenneth.liang12@gmail.com |
37baf800877b4e64c1ed930a78180a30033ad4d9 | 7730655277bcb97ce0f36e2a9cb022e0706c9c19 | /mltoolkit/mlmo/utils/helpers/computation.py | a9752cd94c1e5754e3e7113531587750cac06b5a | [
"MIT"
] | permissive | developerdrone/Copycat-abstractive-opinion-summarizer | 9de3ade697936934a0a9804bf35ddadaf55ce3f0 | 04fe5393a7bb6883516766b762f6a0c530e95375 | refs/heads/master | 2023-07-29T07:54:14.955146 | 2021-09-02T06:08:24 | 2021-09-02T06:08:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,591 | py | import numpy as np
def cosine_sim(a, b):
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
def score_candidates(target_reprs, cand_reprs, score_func):
"""
Scores every target repr against every candidate repr using a provided
score function.
:return: scores: [target_count, candidate_count]
"""
scores = np.empty((len(target_reprs), len(cand_reprs)), dtype='float32')
for tar_indx, tar_repr in enumerate(target_reprs):
for sour_indx, cand_repr in enumerate(cand_reprs):
scores[tar_indx, sour_indx] = score_func(tar_repr, cand_repr)
return scores
def select_cands_by_th(slot_scores, th=0.4):
"""
Selects candidates(indxs) based on their avg. scores exceeding th.
:param slot_scores: [source_slot_segms_count, cand_segms_count].
:param th: threshold value.
:return indices.
"""
avg_sim = slot_scores.mean(axis=0)
return np.where(avg_sim > th)
def select_cands_by_rank(slot_scores, top_n=1, order='descending'):
"""
Selects a fixed number of unique candidates per source seq. by sorting their
scores in a set order.
:param slot_scores: [source_slot_segs_count, cand_seqs_count].
:param top_n: candidates per source segments.
:param order: descending or ascending.
:return indices of selected candidates.
"""
assert order in ['descending', 'ascending']
if order == 'descending':
s = - slot_scores
else:
s = slot_scores
indxs = np.argsort(s, axis=1)[:, :top_n]
indxs = np.unique(indxs.reshape((-1,)))
return indxs
| [
"bulletdll@gmail.com"
] | bulletdll@gmail.com |
cb1e3e04477b93b669491773c0448f16baa2508d | 460b4ec7a8e9332567bae637797c3cf29619d651 | /tfbrain/loss.py | 467da923941aeb65a45b3c83f617940b8c432dde | [] | no_license | agajews/tfbrain-v2 | e1a79d805007913afb91dd60c73bdf64beba4122 | 86f595bf61d41f70af2bcc6ac3cd6abd6aa0615f | refs/heads/master | 2021-01-13T09:10:02.249595 | 2016-09-26T13:04:02 | 2016-09-26T13:04:02 | 69,250,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,121 | py | import tensorflow as tf
class Loss(object):
def __init__(self, **kwargs):
pass
def get_loss(self):
return self.loss
def build(self, net, y, mask=None):
'''y_hat: Layer subclass
y: a TF placeholder representing the expected output'''
self.net = net
self.y_hat = net.get_output()
self.y = y
self.mask = mask
self.sess = net.get_sess()
self._build()
def eval(self, xs, y, mask=None):
feed_dict = self.net.get_feed_dict(xs)
feed_dict.update({self.y: y})
if mask is not None:
feed_dict.update({self.mask, mask})
return self.sess.run(self.loss, feed_dict=feed_dict)
def _build(self):
'''y_hat: a TF tensor representing a model's output
y: a TF placeholder representing the expected output'''
raise NotImplementedError()
class MSE(Loss):
def _build(self):
if self.mask is None:
errors = self.y - self.y_hat
else:
errors = self.y - tf.reduce_sum(self.y_hat * self.mask,
reduction_indices=1)
self.loss = tf.reduce_mean(tf.square(errors))
class MSEDQN(Loss):
def _build(self):
if self.mask is None:
errors = self.y - self.y_hat
else:
errors = self.y - tf.reduce_sum(self.y_hat * self.mask,
reduction_indices=1)
difference = tf.abs(errors)
quadratic_part = tf.clip_by_value(difference, 0.0, 1.0)
linear_part = tf.sub(difference, quadratic_part)
errors = (0.5 * tf.square(quadratic_part)) + linear_part
self.loss = tf.reduce_sum(errors)
class Crossentropy(Loss):
def __init__(self, log_clip=1e-10, **kwargs):
Loss.__init__(self, **kwargs)
self.log_clip = log_clip
def _build(self):
assert self.mask is None
log_out = tf.log(tf.clip_by_value(self.y_hat, self.log_clip, 1.0))
errors = -tf.reduce_sum(self.y * log_out, reduction_indices=1)
self.loss = tf.reduce_mean(errors)
| [
"agajews@gmail.com"
] | agajews@gmail.com |
0e3087a5b0ef8ba6523aa05734fbd7b086d58c40 | 90f8f9d7a854861d1aed8f10ee8c15c0b41235b5 | /aliyun-python-sdk-green/aliyunsdkgreen/request/v20180509/DeleteSimilarityLibraryRequest.py | 9224c04b650407a9fc43d68c0d1aa02d06f76952 | [
"Apache-2.0"
] | permissive | playbenz/aliyun-openapi-python-sdk | 3c0c5e60f49047a97003998634e68b279a294034 | c74f9a9c6ae751a49c2b19744cf2c8b47ddba47e | refs/heads/master | 2020-07-02T20:30:04.063558 | 2019-08-08T08:13:07 | 2019-08-08T08:13:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,253 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
class DeleteSimilarityLibraryRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'Green', '2018-05-09', 'DeleteSimilarityLibrary','green')
self.set_uri_pattern('/green/similarity/library/delete')
self.set_method('POST')
def get_ClientInfo(self):
return self.get_query_params().get('ClientInfo')
def set_ClientInfo(self,ClientInfo):
self.add_query_param('ClientInfo',ClientInfo) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
10d5a8b188c002f1aac4ded62f1e0871aafb0b34 | f4384a8cc6444ee6036e52be86be3b3a7e1a411f | /src/build/openai_/openai_examples_projects/cartpole_openai_ros_examples/catkin_generated/pkg.installspace.context.pc.py | fe88093a9a5b0d8ae481c91470d02c7655479c9f | [] | no_license | ava6969/BaxterDRL | ad1ba4693d86747c4bfe9f0f11ca8899f9ae16d4 | 1a38a2d4ff697d0b0312582fb944feae9a293b23 | refs/heads/master | 2023-01-23T13:53:09.791157 | 2020-11-16T02:11:24 | 2020-11-16T02:11:24 | 309,526,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "cartpole_openai_ros_examples"
PROJECT_SPACE_DIR = "/usr/local"
PROJECT_VERSION = "0.0.0"
| [
"ava6969@rit.edu"
] | ava6969@rit.edu |
7b351dc5896f7459d7a71a80439a10c009299a66 | b80b5026d926acd9c5128c04e14f725301c7aa87 | /tests/test_dynamodb.py | 8324b3a016f4dc7da9dda7e0f34f06c48d54daf3 | [
"MIT"
] | permissive | sxencon/slam | 3d209ae87f0f6fc21fef5b0498a49641679994ed | e76153211a128e2600d4fb887126dfe2a0b10ea2 | refs/heads/master | 2021-01-23T13:10:13.300442 | 2017-05-24T06:39:31 | 2017-05-24T06:39:31 | 93,233,179 | 2 | 0 | null | 2017-06-03T07:59:43 | 2017-06-03T07:59:42 | null | UTF-8 | Python | false | false | 7,356 | py | from copy import deepcopy
import mock
import unittest
from slam.plugins import dynamodb
from .test_deploy import config as deploy_config
config = deepcopy(deploy_config)
config.update({'dynamodb_tables': dynamodb.init.func(config, 't1,t2')})
class DynamoDBTests(unittest.TestCase):
def test_init(self):
plugin_config = dynamodb.init.func(config=deploy_config,
dynamodb_tables='a,b ,c, d')
for table in ['a', 'b', 'c', 'd']:
self.assertIn(table, plugin_config)
self.assertEqual(plugin_config[table], {
'attributes': {'id': 'S'},
'key': 'id',
'read_throughput': 1,
'write_throughput': 1
})
def test_policies(self):
self.assertEqual(dynamodb._get_dynamodb_policies({}), [])
policies = dynamodb._get_dynamodb_policies(config)
self.assertEqual(len(policies), 1)
statement = policies[0]['PolicyDocument']['Statement'][0]
self.assertEqual(
statement['Action'],
['dynamodb:DeleteItem',
'dynamodb:GetItem',
'dynamodb:PutItem',
'dynamodb:Query',
'dynamodb:Scan',
'dynamodb:UpdateItem',
'dynamodb:DescribeTable'])
self.assertEqual(len(statement['Resource']), 6) # 2 tables x 3 stages
tables = [r['Fn::Join'][1][5]['Ref'] for r in statement['Resource']]
self.assertEqual(set(tables), {'DevT1DynamoDBTable',
'DevT2DynamoDBTable',
'StagingT1DynamoDBTable',
'StagingT2DynamoDBTable',
'ProdT1DynamoDBTable',
'ProdT2DynamoDBTable'})
def test_key_schema(self):
self.assertEqual(dynamodb._get_dynamodb_key_schema('foo'),
[{'AttributeName': 'foo', 'KeyType': 'HASH'}])
self.assertEqual(dynamodb._get_dynamodb_key_schema(['foo', 'bar']),
[{'AttributeName': 'foo', 'KeyType': 'HASH'},
{'AttributeName': 'bar', 'KeyType': 'RANGE'}])
def test_index_projection(self):
self.assertEqual(dynamodb._get_dynamodb_projection(None),
{'ProjectionType': 'KEYS_ONLY'})
self.assertEqual(dynamodb._get_dynamodb_projection([]),
{'ProjectionType': 'KEYS_ONLY'})
self.assertEqual(dynamodb._get_dynamodb_projection('all'),
{'ProjectionType': 'ALL'})
self.assertEqual(dynamodb._get_dynamodb_projection(['foo', 'bar']),
{'ProjectionType': 'INCLUDE',
'NonKeyAttributes': ['foo', 'bar']})
@mock.patch('slam.plugins.dynamodb._get_dynamodb_key_schema',
return_value='key-schema')
def test_table_schema(self, *args):
cfg = deepcopy(config)
cfg['dynamodb_tables']['t1']['attributes'] = {'id': 'S', 'name': 'S',
'age': 'N'}
cfg['dynamodb_tables']['t1']['read_throughput'] = 2
cfg['dynamodb_tables']['t1']['write_throughput'] = 4
table = dynamodb._get_table_resource(cfg, 'dev', 't1')
self.assertEqual(table['Properties']['TableName'], 'dev.t1')
self.assertEqual(len(table['Properties']['AttributeDefinitions']), 3)
for attr in table['Properties']['AttributeDefinitions']:
self.assertIn(attr['AttributeName'],
cfg['dynamodb_tables']['t1']['attributes'])
self.assertEqual(attr['AttributeType'],
cfg['dynamodb_tables']['t1']['attributes']
[attr['AttributeName']])
self.assertEqual(table['Properties']['ProvisionedThroughput'],
{'ReadCapacityUnits': 2, 'WriteCapacityUnits': 4})
self.assertEqual(table['Properties']['KeySchema'], 'key-schema')
@mock.patch('slam.plugins.dynamodb._get_dynamodb_projection',
return_value='projection')
@mock.patch('slam.plugins.dynamodb._get_dynamodb_key_schema',
return_value='key-schema')
def test_local_indexes(self, _get_dynamodb_key_schema,
_get_dynamodb_projection):
cfg = deepcopy(config)
cfg['dynamodb_tables']['t1']['attributes'] = {'id': 'S', 'name': 'S'}
cfg['dynamodb_tables']['t1']['local_secondary_indexes'] = {
'index1': {'key': 'foo', 'project': 'bar'},
'index2': {'key': 'foo2', 'project': 'bar2'}
}
table = dynamodb._get_table_resource(cfg, 'dev', 't1')
self.assertIn({
'IndexName': 'index1',
'KeySchema': 'key-schema',
'Projection': 'projection'
}, table['Properties']['LocalSecondaryIndexes'])
self.assertIn({
'IndexName': 'index2',
'KeySchema': 'key-schema',
'Projection': 'projection'
}, table['Properties']['LocalSecondaryIndexes'])
_get_dynamodb_key_schema.assert_any_call('foo')
_get_dynamodb_projection.assert_any_call('bar')
_get_dynamodb_projection.assert_any_call('bar2')
@mock.patch('slam.plugins.dynamodb._get_dynamodb_projection',
return_value='projection')
@mock.patch('slam.plugins.dynamodb._get_dynamodb_key_schema',
return_value='key-schema')
def test_global_indexes(self, _get_dynamodb_key_schema,
_get_dynamodb_projection):
cfg = deepcopy(config)
cfg['dynamodb_tables']['t1']['attributes'] = {'id': 'S', 'name': 'S'}
cfg['dynamodb_tables']['t1']['global_secondary_indexes'] = {
'index2': {'key': 'foo', 'project': 'bar', 'read_throughput': 2,
'write_throughput': 4}
}
table = dynamodb._get_table_resource(cfg, 'dev', 't1')
self.assertEqual(table['Properties']['GlobalSecondaryIndexes'], [{
'IndexName': 'index2',
'KeySchema': 'key-schema',
'Projection': 'projection',
'ProvisionedThroughput': {'ReadCapacityUnits': 2,
'WriteCapacityUnits': 4}
}])
_get_dynamodb_key_schema.assert_any_call('foo')
_get_dynamodb_projection.assert_called_once_with('bar')
@mock.patch('slam.plugins.dynamodb._get_dynamodb_policies',
return_value=['policies'])
@mock.patch('slam.plugins.dynamodb._get_table_resource',
return_value='resource')
def test_cfn_template(self, _get_table_resource, _get_dynamodb_policies):
tpl = dynamodb.cfn_template(config, {'Resources': {
'FunctionExecutionRole': {'Properties': {'Policies': ['foo']}}}})
self.assertEqual(tpl, {'Resources': {
'DevT1DynamoDBTable': 'resource',
'DevT2DynamoDBTable': 'resource',
'StagingT1DynamoDBTable': 'resource',
'StagingT2DynamoDBTable': 'resource',
'ProdT1DynamoDBTable': 'resource',
'ProdT2DynamoDBTable': 'resource',
'FunctionExecutionRole': {
'Properties': {'Policies': ['foo', 'policies']}}
}})
| [
"miguel.grinberg@gmail.com"
] | miguel.grinberg@gmail.com |
c88c7232837375bcd124d5f36ef61d3163ab3c82 | 576f7b951191d6095df8bc2691c8ad7045d55447 | /Django/banglai_django/mysite/polls/urls.py | 15e54bb54c302350a6a471eb238f97f49460855a | [] | no_license | golam-saroar/Python_Learning | f555368420ef65ceef9a80349b9c3bae2c6842b9 | c077a8c2e5738b47cb301f07806af5a4c6714dff | refs/heads/master | 2021-09-22T09:31:35.907800 | 2018-09-07T09:14:25 | 2018-09-07T09:14:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | from django.conf.urls import url
from polls import views
| [
"golamsaroar89@gmail.com"
] | golamsaroar89@gmail.com |
019b11de4d0556da2167bfff1479169d920b15fd | bd62843278ffc297ef8f6d75a931f1f4ca4caaa7 | /exercises/staircase_20180720.py | 9c1efaa04e361e3b7f18cf159a72fc529678d5b8 | [] | no_license | raysmith619/Introduction-To-Programming | d3bae042b4fc17bd56e8631a4d660233d8cd165b | bedc16eb5f6db0ad3b313355df6d51b5161c3835 | refs/heads/master | 2023-07-19T08:43:41.229893 | 2023-07-15T19:22:28 | 2023-07-15T19:22:28 | 132,622,195 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | #staircase.py 18Jul2018
"""
Write a function staircase(first_n, last_n)
"""
def staircase(first_n, last_n):
sum = 0
n = first_n
while n <= last_n:
sum = sum + n
n = n + 1
return sum
print("1,10 ==> ", staircase(1,10))
print("1,100 ==> ", staircase(1,100))
print("5,5 ==> ", staircase(5,5))
print("10,1 ==> ", staircase(10,1))
| [
"noreply@github.com"
] | raysmith619.noreply@github.com |
8b80a972bf22566cb02c31aa33521f89c7fc9fcd | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/cloud/retail/v2beta/retail-v2beta-py/google/cloud/retail_v2beta/types/purge_config.py | c9e1d7f387900608f425ea6351450c0f5396b4cc | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,516 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.cloud.retail.v2beta',
manifest={
'PurgeMetadata',
'PurgeUserEventsRequest',
'PurgeUserEventsResponse',
},
)
class PurgeMetadata(proto.Message):
r"""Metadata related to the progress of the Purge operation.
This will be returned by the
google.longrunning.Operation.metadata field.
"""
class PurgeUserEventsRequest(proto.Message):
r"""Request message for PurgeUserEvents method.
Attributes:
parent (str):
Required. The resource name of the catalog
under which the events are created. The format
is
"projects/${projectId}/locations/global/catalogs/${catalogId}".
filter (str):
Required. The filter string to specify the events to be
deleted with a length limit of 5,000 characters. Empty
string filter is not allowed. The eligible fields for
filtering are:
- ``eventType``: Double quoted
[UserEvent.event_type][google.cloud.retail.v2beta.UserEvent.event_type]
string.
- ``eventTime``: in ISO 8601 "zulu" format.
- ``visitorId``: Double quoted string. Specifying this will
delete all events associated with a visitor.
- ``userId``: Double quoted string. Specifying this will
delete all events associated with a user.
Examples:
- Deleting all events in a time range:
``eventTime > "2012-04-23T18:25:43.511Z" eventTime < "2012-04-23T18:30:43.511Z"``
- Deleting specific eventType in time range:
``eventTime > "2012-04-23T18:25:43.511Z" eventType = "detail-page-view"``
- Deleting all events for a specific visitor:
``visitorId = "visitor1024"``
The filtering fields are assumed to have an implicit AND.
force (bool):
Actually perform the purge. If ``force`` is set to false,
the method will return the expected purge count without
deleting any user events.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
filter = proto.Field(
proto.STRING,
number=2,
)
force = proto.Field(
proto.BOOL,
number=3,
)
class PurgeUserEventsResponse(proto.Message):
r"""Response of the PurgeUserEventsRequest. If the long running
operation is successfully done, then this message is returned by
the google.longrunning.Operations.response field.
Attributes:
purged_events_count (int):
The total count of events purged as a result
of the operation.
"""
purged_events_count = proto.Field(
proto.INT64,
number=1,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
b06aa32802ade3e4e698ad2bc91fb6f89b5526cd | 537d28fb2142331e27c84ebf2c16bad77aceb24e | /keras/keras16_lstm_test.py | 501629772dd401f2859619456181d656aad3964b | [] | no_license | gema0000/bit2019 | c27c3cec8d8d3a0907ade41523ce1c5ee86337b6 | 2f44ad3956b387186935374d9a488ad40a13bcaf | refs/heads/master | 2020-07-03T05:19:41.051447 | 2019-10-26T23:56:25 | 2019-10-26T23:56:25 | 201,796,021 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | ## keras15_lstm 의 test값을
## 최대한 근접한 predict값을 만든다.
| [
"gema0000@naver.com"
] | gema0000@naver.com |
dc23c83e5ce4c01426137b74d439e7617662dd9f | 5cdcae079a912011d177457bb4da31f298d53354 | /lab4/env_makers.py | 09438dd0e0fab116c94706a8e04422194c5c38a1 | [
"MIT"
] | permissive | sobamchan/drl-camp | cb7afedea86ed0236d291cc187803d902439dc5a | d469cc9732d4c150e9625c710cbeeaeb016357b6 | refs/heads/master | 2022-03-03T23:19:23.480865 | 2019-11-10T11:30:12 | 2019-11-10T11:30:12 | 108,625,985 | 0 | 0 | null | 2019-11-10T11:30:13 | 2017-10-28T06:39:51 | Jupyter Notebook | UTF-8 | Python | false | false | 10,817 | py | """
This project was developed by Rocky Duan, Peter Chen, Pieter Abbeel for the Berkeley Deep RL Bootcamp, August 2017. Bootcamp website with slides and lecture videos: https://sites.google.com/view/deep-rl-bootcamp/.
Copyright 2017 Deep RL Bootcamp Organizers.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
import numpy as np
import cv2
import gym
from collections import deque
from gym import spaces
import gym.envs.registration
from gym.envs.atari.atari_env import AtariEnv
import logger
import os
# import roboschool
import tempfile
import gym.wrappers.monitoring
# Silence the log messages
gym.envs.registration.logger.setLevel(logging.WARNING)
gym.wrappers.monitoring.logger.setLevel(logging.WARNING)
class EnvMaker(object):
def __init__(self, env_id):
self.env_id = env_id
def make(self):
env = gym.make(self.env_id)
if logger.get_dir() is not None:
monitor_dir = os.path.join(logger.get_dir(), "gym_monitor")
resume = True
force = False
else:
monitor_dir = "/tmp/gym-monitoring"
resume = False
force = True
env = gym.wrappers.Monitor(env, directory=monitor_dir, video_callable=False, force=force, resume=resume,
write_upon_reset=True)
if isinstance(env.unwrapped, AtariEnv):
if '-ram-' in self.env_id:
assert 'NoFrameskip' not in self.env_id
env = ScaledFloatFrame(env)
else:
env = ScaledFloatFrame(wrap_atari_pg(env))
return env
# Code below are adopted from OpenAI Baselines:
# https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers_deprecated.py
class NoopResetEnv(gym.Wrapper):
def __init__(self, env=None, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
super(NoopResetEnv, self).__init__(env)
self.noop_max = noop_max
self.override_num_noops = None
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def _reset(self):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset()
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = np.random.randint(1, self.noop_max + 1)
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(0)
if done:
obs = self.env.reset()
return obs
class NormalizeActionWrapper(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
if isinstance(self.env.action_space, gym.spaces.Box):
self.action_space = gym.spaces.Box(
low=-1, high=1, shape=self.env.action_space.shape)
else:
self.action_space = self.env.action_space
def step(self, action):
if not isinstance(self.action_space, gym.spaces.Box):
return super().step(action)
# rescale action
low = self.env.action_space.low
high = self.env.action_space.high
action = np.asarray(action)
rescaled_action = (action + 1) / 2 * (high - low) + low
return super().step(rescaled_action)
class FireResetEnv(gym.Wrapper):
def __init__(self, env=None):
"""For environments where the user need to press FIRE for the game to start."""
super(FireResetEnv, self).__init__(env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def _reset(self):
self.env.reset()
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset()
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset()
return obs
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env=None):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
super(EpisodicLifeEnv, self).__init__(env)
self.lives = 0
self.was_real_done = True
self.was_real_reset = False
def _step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert somtimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def _reset(self):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset()
self.was_real_reset = True
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.was_real_reset = False
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env=None, skip=4):
"""Return only every `skip`-th frame"""
super(MaxAndSkipEnv, self).__init__(env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = deque(maxlen=2)
self._skip = skip
def _step(self, action):
total_reward = 0.0
done = None
for _ in range(self._skip):
obs, reward, done, info = self.env.step(action)
self._obs_buffer.append(obs)
total_reward += reward
if done:
break
max_frame = np.max(np.stack(self._obs_buffer), axis=0)
return max_frame, total_reward, done, info
def _reset(self):
"""Clear past frame buffer and init. to first obs. from inner env."""
self._obs_buffer.clear()
obs = self.env.reset()
self._obs_buffer.append(obs)
return obs
class ProcessFrame84(gym.ObservationWrapper):
def __init__(self, env=None):
super(ProcessFrame84, self).__init__(env)
self.observation_space = spaces.Box(low=0, high=255, shape=(84, 84, 1))
def _observation(self, obs):
return ProcessFrame84.process(obs)
@staticmethod
def process(frame):
if frame.size == 210 * 160 * 3:
img = np.reshape(frame, [210, 160, 3]).astype(np.float32)
elif frame.size == 250 * 160 * 3:
img = np.reshape(frame, [250, 160, 3]).astype(np.float32)
else:
assert False, "Unknown resolution."
img = img[:, :, 0] * 0.299 + img[:, :, 1] * \
0.587 + img[:, :, 2] * 0.114
resized_screen = cv2.resize(
img, (84, 110), interpolation=cv2.INTER_AREA)
x_t = resized_screen[18:102, :]
x_t = np.reshape(x_t, [84, 84, 1])
return x_t.astype(np.uint8)
class ClippedRewardsWrapper(gym.RewardWrapper):
def _reward(self, reward):
"""Change all the positive rewards to 1, negative to -1 and keep zero."""
return np.sign(reward)
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not belive how complex the previous solution was."""
self._frames = frames
def __array__(self, dtype=None):
out = np.concatenate(self._frames, axis=2)
if dtype is not None:
out = out.astype(dtype)
return out
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(
low=0, high=255, shape=(shp[0], shp[1], shp[2] * k))
def _reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def _step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def _observation(self, obs):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(obs).astype(np.float32) / 255.0
def wrap_atari_pg(env):
"""Apply a common set of wrappers for Atari games."""
assert 'NoFrameskip' in env.spec.id
env = EpisodicLifeEnv(env)
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = ProcessFrame84(env)
env = FrameStack(env, 4)
env = ClippedRewardsWrapper(env)
return env
| [
"oh.sore.sore.soutarou@gmail.com"
] | oh.sore.sore.soutarou@gmail.com |
673d7f0e04cc9e9c02c82b6768fae6be6935b77b | f0755c0ca52a0a278d75b76ee5d9b547d9668c0e | /atcoder.jp/abc045/abc045_a/Main.py | e44b5fc83bf98e6706bdb781e618d50a697f7393 | [] | no_license | nasama/procon | 7b70c9a67732d7d92775c40535fd54c0a5e91e25 | cd012065162650b8a5250a30a7acb1c853955b90 | refs/heads/master | 2022-07-28T12:37:21.113636 | 2020-05-19T14:11:30 | 2020-05-19T14:11:30 | 263,695,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | a, b, h = [int(input()) for _ in range(3)]
print((a+b)*h//2) | [
"g1620535@is.ocha.ac.jp"
] | g1620535@is.ocha.ac.jp |
aba27c928883760a8e56fefdbca4f15230131d0b | 443406462ab99481ed1ccfa1e1ed483a242fed78 | /test/test_val_match.py | 4ced5e55541ea7b820747d29197dc15902923a36 | [
"MIT"
] | permissive | RiftNemesis/Pyot | 4343334fd2e2cde29d78d9a0025eb0b4c177c323 | 7c34bbd4bcdad37ec512dcdaac43ae5f69f12975 | refs/heads/master | 2023-05-29T13:05:27.106176 | 2021-06-17T15:24:30 | 2021-06-17T15:24:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,371 | py | from datetime import datetime, timedelta
from pyot.utils import loop_run
from pyot.models import val, riot
async def async_match_history():
account = await riot.Account(name="Tuxedo", tag="AYAYA", region="AMERICAS").get(pipeline="val")
history = await val.MatchHistory(puuid=account.puuid, platform="NA").get()
for match in history:
assert isinstance(match, val.Match)
assert isinstance(match.id, str)
assert isinstance(match.creation, datetime)
assert isinstance(match.team_id, str)
async def async_match():
account = await riot.Account(name="Tuxedo", tag="AYAYA", region="AMERICAS").get(pipeline="val")
history = await val.MatchHistory(puuid=account.puuid, platform="NA").get()
match = await history[0].get()
info = match.info
players = match.players
teams = match.teams
results = match.round_results
assert isinstance(info.id, str)
assert isinstance(info.map_id, str)
assert isinstance(info.duration, timedelta)
assert isinstance(info.creation, datetime)
assert isinstance(info.provisioning_flow_id, str)
assert isinstance(info.is_completed, bool)
assert isinstance(info.custom_game_name, str)
assert isinstance(info.queue_id, str)
assert isinstance(info.game_mode, str)
assert isinstance(info.is_ranked, bool)
assert isinstance(info.season_id, str)
for i in players:
assert isinstance(i.puuid, str)
assert isinstance(i.team_id, str)
assert isinstance(i.party_id, str)
assert isinstance(i.character_id, str)
assert isinstance(i.competitive_tier, int)
assert isinstance(i.player_card, str)
assert isinstance(i.player_title, str)
stat = i.stats
assert isinstance(stat.score, int)
assert isinstance(stat.rounds_played, int)
assert isinstance(stat.kills, int)
assert isinstance(stat.deaths, int)
assert isinstance(stat.assists, int)
assert isinstance(stat.playtime, timedelta)
ability = stat.ability_casts
if ability is not None:
assert isinstance(ability.grenade_casts, int) if ability.grenade_casts is not None else True
assert isinstance(ability.ability1_casts, int) if ability.ability1_casts is not None else True
assert isinstance(ability.ability2_casts, int) if ability.ability2_casts is not None else True
assert isinstance(ability.ultimate_casts, int) if ability.ultimate_casts is not None else True
for i in teams:
assert isinstance(i.id, str)
assert isinstance(i.won, bool)
assert isinstance(i.rounds_played, int)
assert isinstance(i.rounds_won, int)
for i in results:
assert isinstance(i.round_num, int)
assert isinstance(i.round_result, str)
assert isinstance(i.round_ceremony, str)
assert isinstance(i.winning_team, str)
# assert isinstance(i.bomb_planter_puuid, str)
# assert isinstance(i.bomb_defuser_puuid, str)
assert isinstance(i.plant_round_time, timedelta)
assert isinstance(i.plant_site, str)
assert isinstance(i.defuse_round_time, timedelta)
assert isinstance(i.round_result_code, str)
l1 = i.defuse_location
l2 = i.plant_location
assert isinstance(l1.x, int)
assert isinstance(l1.y, int)
assert isinstance(l2.x, int)
assert isinstance(l2.y, int)
defuse = i.defuse_player_locations
plant = i.plant_player_locations
for pi in [defuse, plant]:
if pi is not None:
for p in pi:
assert isinstance(p.puuid, str)
assert isinstance(p.view_radians, float)
assert isinstance(p.location.x, int)
assert isinstance(p.location.y, int)
pss = i.player_stats
for ps in pss:
assert isinstance(ps.puuid, str)
assert isinstance(ps.score, int)
for k in ps.kills:
assert isinstance(k.game_time, timedelta)
assert isinstance(k.round_time, timedelta)
assert isinstance(k.killer_puuid, str)
assert isinstance(k.victim_puuid, str)
assert k.assistant_puuids is not None
assert isinstance(k.victim_location.x, int)
assert isinstance(k.victim_location.y, int)
for pl in k.player_locations:
assert isinstance(pl.puuid, str)
assert isinstance(pl.view_radians, float) or pl.view_radians == 0
assert isinstance(pl.location.x, int)
assert isinstance(pl.location.y, int)
fd = k.finishing_damage
assert isinstance(fd.damage_type, str)
assert isinstance(fd.damage_item, str)
assert isinstance(fd.is_secondary_fire_mode, bool)
for d in ps.damage:
assert isinstance(d.receiver, str)
assert isinstance(d.damage, int)
assert isinstance(d.legshots, int)
assert isinstance(d.bodyshots, int)
assert isinstance(d.headshots, int)
ec = ps.economy
assert isinstance(ec.loadout_value, int)
assert isinstance(ec.weapon, str)
assert isinstance(ec.armor, str)
assert isinstance(ec.remaining, int)
assert isinstance(ec.spent, int)
abi = ps.ability
assert isinstance(abi.grenade_effects, int) if abi.grenade_effects is not None else True
assert isinstance(abi.ability1_effects, int) if abi.ability1_effects is not None else True
assert isinstance(abi.ability2_effects, int) if abi.ability2_effects is not None else True
assert isinstance(abi.ultimate_effects, int) if abi.ultimate_effects is not None else True
async def async_recent():
recent = await val.RecentMatches(queue="competitive", platform="NA").get()
assert isinstance(recent.current_time, datetime)
for match in recent.matches:
assert isinstance(match, val.Match)
assert match.platform == recent.platform
def test_match_history():
loop_run(async_match_history())
def test_match():
loop_run(async_match())
def test_recent():
loop_run(async_recent())
| [
"paaksingtech@gmail.com"
] | paaksingtech@gmail.com |
f185a7c8f9a3cd84146c245ef42ae3dd162ea955 | 3f4f40fede22e93d64c5b7c687461d6fdcf11202 | /binder/jupyter_notebook_config.py | 2ee53857ae141ce9d98154c80e950799c26dc939 | [] | no_license | mamba-org/mamba-navigator | 2fb682354b9a318866de4c79ccd3e2dd3b859c14 | 6c7c8bda9897b08f63b41578c7cac30e60417255 | refs/heads/master | 2022-12-18T16:49:10.599377 | 2020-09-16T09:56:23 | 2020-09-16T09:56:23 | 263,335,713 | 4 | 1 | null | 2020-09-16T09:56:25 | 2020-05-12T12:53:10 | TypeScript | UTF-8 | Python | false | false | 595 | py | import sys
c.ServerProxy.servers = {
"mamba": {
"command": [
sys.executable,
"main.py",
"--no-browser",
'--port={port}',
"--ip=0.0.0.0",
"--NotebookApp.token=''",
"--NotebookApp.base_url={base_url}mamba",
"--NotebookApp.allow_remote_access=True",
],
"timeout": 120,
"absolute_url": True,
"launcher_entry": {
"enabled": True,
"icon_path": "/home/jovyan/style/mamba.svg",
"title": "Mamba Navigator",
},
},
}
| [
"jeremy.tuloup@gmail.com"
] | jeremy.tuloup@gmail.com |
9641ebd3537d79835e97f84a1d8756a5a9a55f1c | e6b1ad137a9bd3d39ae7c61cb5c7f7956ce095b9 | /bruteforce/sum_of_evens.py | 82a04c6dc86d97e9cdd498226df324d34a360b6e | [] | no_license | jadenpadua/Data-Structures-and-Algorithms | d9ba8ece779a2d564a7d66fcbacc9fb7fa1f7205 | 838c29112fec4beb9d9cc3f54db00492b4a480b0 | refs/heads/master | 2021-07-17T13:10:52.029327 | 2020-07-08T02:00:14 | 2020-07-08T02:00:14 | 186,896,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | Create a function that returns the sum of all even elements in a 2D matrix.
def sum_of_evens(lst):
sum = 0
for i in range (len(lst)):
for j in range (len(lst[i])):
if (lst[i][j]%2==0):
sum += lst[i][j]
return sum
| [
"noreply@github.com"
] | jadenpadua.noreply@github.com |
5b760d12cea3f25316b2a67a254ec6f4f3401c7c | bc233c24523f05708dd1e091dca817f9095e6bb5 | /bitmovin_api_sdk/encoding/configurations/audio/opus/customdata/customdata_api.py | a54550e92efdc897ff0f7a3687a09d7c143398eb | [
"MIT"
] | permissive | bitmovin/bitmovin-api-sdk-python | e3d6cf8eb8bdad62cb83ec77c0fc4950b06b9cdd | b0860c0b1be7747cf22ad060985504da625255eb | refs/heads/main | 2023-09-01T15:41:03.628720 | 2023-08-30T10:52:13 | 2023-08-30T10:52:13 | 175,209,828 | 13 | 14 | MIT | 2021-04-29T12:30:31 | 2019-03-12T12:47:18 | Python | UTF-8 | Python | false | false | 1,364 | py | # coding: utf-8
from __future__ import absolute_import
from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase
from bitmovin_api_sdk.common.poscheck import poscheck_except
from bitmovin_api_sdk.models.custom_data import CustomData
from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope
from bitmovin_api_sdk.models.response_error import ResponseError
class CustomdataApi(BaseApi):
@poscheck_except(2)
def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None):
# type: (str, str, str, BitmovinApiLoggerBase) -> None
super(CustomdataApi, self).__init__(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
def get(self, configuration_id, **kwargs):
# type: (string_types, dict) -> CustomData
"""Opus Codec Configuration Custom Data
:param configuration_id: Id of the codec configuration
:type configuration_id: string_types, required
:return: Opus codec configuration custom data
:rtype: CustomData
"""
return self.api_client.get(
'/encoding/configurations/audio/opus/{configuration_id}/customData',
path_params={'configuration_id': configuration_id},
type=CustomData,
**kwargs
)
| [
"openapi@bitmovin.com"
] | openapi@bitmovin.com |
2a8ac7c9d2d3078b808eb31de461bd7522a12385 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2374/58585/316440.py | cac23d042dd5f238056bfb6df4de059e5a5494be | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | a=int(input())
b=int(input())
c=input()
d=int(input())
e=input()
if a==2 and b==5 and c=='5 5 4 6 4':
print('4 4 5 5 6 ')
print('9 9 9 2 5 ')
elif a==2 and b==5 and c=='5 5 4 5 4' and d==5 and e=='9 5 2 2 5':
print('5 5 5 4 4 ')
print('2 2 5 5 9 ')
elif a==2 and b==5 and c=='5 5 4 5 4' and d==5 and e=='9 9 2 2 5':
print('5 5 5 4 4 ')
print('2 2 9 9 5 ')
else:
print('5 5 5 4 4 ')
print('9 9 9 2 5 ') | [
"1069583789@qq.com"
] | 1069583789@qq.com |
c4614e23b2b15bb0e0061871ae7895e8cfa3bc7e | 9d6817b67f7993b3a43319894ebd508b1fa92f9f | /python/WJ_MiNNLO_NNLOPSLike_withPhotos_cff.py | 096f9df590fd8bcf688719768b8893f11d1394a9 | [] | no_license | kdlong/WMassNanoGen | b7c5c12df52862d7dd9d9554d7654b9e5d701167 | d1e0c6db75f671eb593cf907307189cd95aa31f6 | refs/heads/master | 2023-06-27T07:21:53.971633 | 2023-06-19T13:32:41 | 2023-06-19T13:32:41 | 235,908,488 | 2 | 7 | null | 2023-06-12T09:10:10 | 2020-01-23T23:43:11 | Python | UTF-8 | Python | false | false | 3,194 | py | import FWCore.ParameterSet.Config as cms
externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
args = cms.vstring('/afs/cern.ch/work/m/mseidel/public/MiNNLO-gridpacks/Wj_slc6_amd64_gcc700_CMSSW_10_2_16_WplusJToMuNu-nnlopslike-powheg-MiNNLO.tgz'),
nEvents = cms.untracked.uint32(5000),
numberOfParameters = cms.uint32(1),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh')
)
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
from Configuration.Generator.Pythia8PowhegEmissionVetoSettings_cfi import *
from Configuration.Generator.PSweightsPythia.PythiaPSweightsSettings_cfi import *
generator = cms.EDFilter("Pythia8HadronizerFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
pythia8PSweightsSettingsBlock,
pythia8PowhegEmissionVetoSettingsBlock,
processParameters = cms.vstring(
'POWHEG:nFinal = 2', ## Number of final state particles
## (BEFORE THE DECAYS) in the LHE
## other than emitted extra parton
'ParticleDecays:allowPhotonRadiation = on',
'TimeShower:QEDshowerByL = off',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'pythia8PowhegEmissionVetoSettings',
'pythia8PSweightsSettings',
'processParameters')
),
ExternalDecays = cms.PSet(
Photospp = cms.untracked.PSet(
parameterSets = cms.vstring("setExponentiation", "setInfraredCutOff", "setMeCorrectionWtForW", "setMeCorrectionWtForZ", "setMomentumConservationThreshold", "setPairEmission", "setPhotonEmission", "setStopAtCriticalError", "suppressAll", "forceBremForDecay"),
setExponentiation = cms.bool(True),
setMeCorrectionWtForW = cms.bool(True),
setMeCorrectionWtForZ = cms.bool(True),
setInfraredCutOff = cms.double(0.00011),
setMomentumConservationThreshold = cms.double(0.1),
setPairEmission = cms.bool(True),
setPhotonEmission = cms.bool(True),
setStopAtCriticalError = cms.bool(False),
# Use Photos only for W/Z decays
suppressAll = cms.bool(True),
forceBremForDecay = cms.PSet(
parameterSets = cms.vstring("Z", "Wp", "Wm"),
Z = cms.vint32(0, 23),
Wp = cms.vint32(0, 24),
Wm = cms.vint32(0, -24),
),
),
parameterSets = cms.vstring("Photospp")
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"kdlong@hep.wisc.edu"
] | kdlong@hep.wisc.edu |
f16c43bebdb7978fe5f3a18d22a34eb59fe7c6d6 | 1e4eefff1c19ffb81016ce99f2284fb657293f65 | /special/src/paths.py | 67debb8325148760c72f41298431e8cf52493132 | [] | no_license | Solero93/bcn-algorithm-club-py | 5e1edf15f087e0edf2cf7ba0859fb5e4523525ad | 1edf407498756e7ba36534387bb4241b8b455c4f | refs/heads/master | 2020-03-28T09:06:30.328130 | 2019-03-25T10:38:48 | 2019-03-25T10:38:48 | 148,014,386 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | """
/**
* 5 points
*
*
* Given a binary tree and a sum, determine if the tree has a root-to-leaf path such that adding up all the values along the path equals the given sum.
*
*
* Note: A leaf is a node with no children.
*
* Example:
*
* Given the below binary tree and sum = 22,
*
* 5
* / \
* 4 8
* / / \
* 11 13 4
* / \ \
* 7 2 1
*
* return true, as there exist a root-to-leaf path 5->4->11->2 which sum is 22.
*/
"""
def has_path_sum(list_representation: list, sum_path: int) -> bool:
return True
| [
"solero93@gmail.com"
] | solero93@gmail.com |
9b6f6f24e900ff1e3448bfbaa608c7b3bf20aa6b | c03b6ed252fb3120b5972f39f980b717901a9cea | /app.py | a47506455a61a455b9320186cae0105530842bce | [] | no_license | vvalotto/autenticacion_flask | 69f79fd191921184c7a04c8db26a849edd047341 | 3b2db225b2445817a50429692b01e50d2a97a2d5 | refs/heads/master | 2020-05-18T07:16:36.140013 | 2019-04-30T17:42:35 | 2019-04-30T17:42:35 | 184,260,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,106 | py | from flask import Flask
from flask import Flask, flash, redirect, render_template, request, session, abort
import os
from sqlalchemy.orm import sessionmaker
from table import *
engine = create_engine('sqlite:///tutorial.db', echo=True)
app = Flask(__name__)
@app.route('/')
def home():
if not session.get('logged_in'):
return render_template('login.html')
else:
return "Hola! <a href=/logout>Logout</a>"
@app.route('/login', methods=['POST'])
def do_admin_login():
POST_USERNAME = str(request.form['username'])
POST_PASSWORD = str(request.form['password'])
Session = sessionmaker(bind=engine)
s = Session()
query = s.query(User).filter(User.username.in_([POST_USERNAME]), User.password.in_([POST_PASSWORD]))
result = query.first()
if result:
session['logged_in'] = True
else:
flash('wrong password!')
return home()
@app.route("/logout")
def logout():
session['logged_in'] = False
return home()
if __name__ == "__main__":
app.secret_key = os.urandom(12)
app.run(debug=True, host='127.0.0.1', port=4000)
| [
"vvalotto@gmail.com"
] | vvalotto@gmail.com |
3d3127cc236021f4c7634594ae93a4e47381a056 | cdcd00bc69f4e37958ae1c0619f198a156a02e2e | /data_integration/parallel_tasks/sql.py | fd439deb4be2441f7bcf517d9dd71759b0334150 | [
"MIT"
] | permissive | willyhakim/data-integration | 57321727ec850f973ce24ae32092be25347331aa | 1f87ae2cef824d2347885dc64b11ddbdc95082eb | refs/heads/master | 2020-03-14T21:24:03.693810 | 2018-04-30T11:45:18 | 2018-04-30T11:45:18 | 131,795,287 | 1 | 0 | null | 2018-05-02T03:50:57 | 2018-05-02T03:50:57 | null | UTF-8 | Python | false | false | 3,344 | py | import inspect
import re
import typing
from data_integration import config, pipelines
from data_integration.commands import sql
from mara_page import _, html
class ParallelExecuteSQL(pipelines.ParallelTask, sql._SQLCommand):
def __init__(self, id: str, description: str, parameter_function: typing.Callable, parameter_placeholders: [str],
max_number_of_parallel_tasks: int = None, sql_statement: str = None, file_name: str = None,
commands_before: [pipelines.Command] = None, commands_after: [pipelines.Command] = None,
db_alias: str = None, echo_queries: bool = True, timezone: str = None,
replace: {str: str} = None) -> None:
if (not (sql_statement or file_name)) or (sql_statement and file_name):
raise ValueError('Please provide either sql_statement or file_name (but not both)')
pipelines.ParallelTask.__init__(self, id=id, description=description,
max_number_of_parallel_tasks=max_number_of_parallel_tasks,
commands_before=commands_before, commands_after=commands_after)
sql._SQLCommand.__init__(self, sql_statement, file_name, replace)
self.parameter_function = parameter_function
self.parameter_placeholders = parameter_placeholders
self._db_alias = db_alias
self.timezone = timezone
self.echo_queries = echo_queries
@property
def db_alias(self):
return self._db_alias or config.default_db_alias()
def add_parallel_tasks(self, sub_pipeline: 'pipelines.Pipeline') -> None:
parameters = self.parameter_function()
if not isinstance(parameters, list) or not all(isinstance(item, tuple) for item in parameters):
raise ValueError(f'parameter function should return a list of tuples, got "{repr(parameters)}"')
for parameter_tuple in parameters:
id = '-'.join([re.sub('[^0-9a-z\-_]+', '', str(x).lower().replace('-', '_')) for x in parameter_tuple])
replace = self.replace.copy()
for placeholder, param in zip(self.parameter_placeholders, parameter_tuple):
replace[placeholder] = param
sub_pipeline.add(pipelines.Task(
id=id, description=f'Execute SQL for parameters {repr(parameter_tuple)}',
commands=[
sql.ExecuteSQL(sql_file_name=self.sql_file_name, db_alias=self.db_alias,
echo_queries=self.echo_queries, timezone=self.timezone, replace=replace)
if self.sql_file_name else
sql.ExecuteSQL(sql_statement=self.sql_statement, db_alias=self.db_alias,
echo_queries=self.echo_queries, timezone=self.timezone, replace=replace)]))
def html_doc_items(self) -> [(str, str)]:
return [('db', _.tt[self.db_alias])] \
+ sql._SQLCommand.html_doc_items(self, self.db_alias) \
+ [('parameter function', html.highlight_syntax(inspect.getsource(self.parameter_function), 'python')),
('parameter placeholders', _.tt[repr(self.parameter_placeholders)]),
('echo queries', _.tt[str(self.echo_queries)]),
('timezone', _.tt[self.timezone or ''])]
| [
"martin.loetzsch@gmail.com"
] | martin.loetzsch@gmail.com |
307db3e1ccfca58bf0b28885aa59bcb0b3115648 | b38247a5d84d8b52ce8363f8dd81629cfbe17f65 | /reagent/mab/mab_algorithm.py | 07775d12175593e73e44add2ddc1055ad8299afa | [
"BSD-3-Clause"
] | permissive | facebookresearch/ReAgent | 7f2b82eaaf7a19e58cc50aacc307d7b001231440 | c5f1a8371a677b4f8fb0882b600bf331eba5259d | refs/heads/main | 2023-09-05T15:56:49.175072 | 2023-08-29T21:48:40 | 2023-08-29T21:48:40 | 98,565,575 | 1,480 | 290 | BSD-3-Clause | 2023-09-12T23:09:30 | 2017-07-27T17:53:21 | Python | UTF-8 | Python | false | false | 8,530 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from abc import ABC, abstractmethod
from typing import List, Optional, Tuple
import torch
from torch import Tensor
def get_arm_indices(
ids_of_all_arms: List[str], ids_of_arms_in_batch: List[str]
) -> List[int]:
arm_idxs = []
for i in ids_of_arms_in_batch:
try:
arm_idxs.append(ids_of_all_arms.index(i))
except ValueError:
raise ValueError(f"Unknown arm_id {i}. Known arm ids: {ids_of_all_arms}")
return arm_idxs
def place_values_at_indices(values: Tensor, idxs: List[int], total_len: int) -> Tensor:
"""
We place the values provided in `values` at indices provided in idxs. The values at indices
not included in `idxs` are filled with zeros.
TODO: maybe replace with sparse-to-dense tensor function?
Example:
place_values_at_indices(Tensor([4,5]), [2,0], 4) == Tensor([5, 0, 4, 0])
Args:
values (Tensor): The values
idxs (List[int]): The indices at which the values have to be placed
total_len (int): Length of the output tensor
Return:
The output tensor
"""
assert len(values) == len(idxs)
ret = torch.zeros(total_len)
ret[idxs] = values
return ret
def reindex_multiple_tensors(
all_ids: List[str],
batch_ids: Optional[List[str]],
value_tensors: Tuple[Tensor, ...],
) -> Tuple[Tensor, ...]:
"""
Each tensor from value_tensors is ordered by ids from batch_ids. In the output we
return these tensors reindexed by all_ids, filling in zeros for missing entries.
Args:
all_ids (List[str]): The IDs that specify how to order the elements in the output
batch_ids (Optional[List[str]]): The IDs that specify how the elements are ordered in the input
value_tensors (Tuple[Tensor]): A tuple of tensors with elements ordered by `batch_ids`
Return:
A Tuple of reindexed tensors
"""
if batch_ids is None or batch_ids == all_ids:
# the observations are for all arms are already in correct order
return value_tensors
else:
assert len(batch_ids) == len(
set(batch_ids)
) # make sure no duplicates in arm IDs
# get the indices of the arms
arm_idxs = get_arm_indices(all_ids, batch_ids)
# put elements from the batch in the positions specified by `arm_ids` (missing arms will be zero)
ret = []
for v in value_tensors:
ret.append(place_values_at_indices(v, arm_idxs, len(all_ids)))
return tuple(ret)
def randomized_argmax(x: torch.Tensor) -> int:
"""
Like argmax, but return a random (uniformly) index of the max element
This function makes sense only if there are ties for the max element
"""
if torch.isinf(x).any():
# if some scores are inf, return the index for one of the infs
best_indices = torch.nonzero(torch.isinf(x)).squeeze()
else:
max_value = torch.max(x)
best_indices = torch.nonzero(x == max_value).squeeze()
if best_indices.ndim == 0:
# if there is a single argmax
chosen_idx = int(best_indices)
else:
chosen_idx = int(
best_indices[
torch.multinomial(
1.0 / len(best_indices) * torch.ones(len(best_indices)), 1
)[0]
]
)
return chosen_idx
class MABAlgo(torch.nn.Module, ABC):
def __init__(
self,
randomize_ties: bool = True,
min_num_obs_per_arm: int = 1,
*,
n_arms: Optional[int] = None,
arm_ids: Optional[List[str]] = None,
) -> None:
super().__init__()
if n_arms is not None:
self.arm_ids = list(map(str, range(n_arms)))
self.n_arms = n_arms
if arm_ids is not None:
self.arm_ids = arm_ids
self.n_arms = len(arm_ids)
self.min_num_obs_per_arm = min_num_obs_per_arm
self.total_n_obs_all_arms = 0
self.total_n_obs_per_arm = torch.zeros(self.n_arms)
self.total_sum_reward_per_arm = torch.zeros(self.n_arms)
self.total_sum_reward_squared_per_arm = torch.zeros(self.n_arms)
self.randomize_ties = randomize_ties
def add_batch_observations(
self,
n_obs_per_arm: Tensor,
sum_reward_per_arm: Tensor,
sum_reward_squared_per_arm: Tensor,
arm_ids: Optional[List[str]] = None,
) -> None:
(
n_obs_per_arm,
sum_reward_per_arm,
sum_reward_squared_per_arm,
) = reindex_multiple_tensors(
all_ids=self.arm_ids,
batch_ids=arm_ids,
value_tensors=(
n_obs_per_arm,
sum_reward_per_arm,
sum_reward_squared_per_arm,
),
)
self.total_n_obs_per_arm += n_obs_per_arm
self.total_sum_reward_per_arm += sum_reward_per_arm
self.total_sum_reward_squared_per_arm += sum_reward_squared_per_arm
self.total_n_obs_all_arms += int(n_obs_per_arm.sum().item())
def add_single_observation(self, arm_id: str, reward: float) -> None:
"""
Add a single observation (arm played, reward) to the bandit
Args:
arm_id (int): Which arm was played
reward (float): Reward renerated by the arm
"""
assert arm_id in self.arm_ids
arm_idx = self.arm_ids.index(arm_id)
self.total_n_obs_per_arm[arm_idx] += 1
self.total_sum_reward_per_arm[arm_idx] += reward
self.total_sum_reward_squared_per_arm[arm_idx] += reward**2
self.total_n_obs_all_arms += 1
def get_action(self) -> str:
"""
Get the id of the action chosen by the MAB algorithm
Returns:
int: The integer ID of the chosen action
"""
scores = self() # calling forward() under the hood
if self.randomize_ties:
best_idx = randomized_argmax(scores)
else:
best_idx = torch.argmax(scores)
return self.arm_ids[best_idx]
def reset(self) -> None:
"""
Reset the MAB to the initial (empty) state.
"""
self.__init__(randomize_ties=self.randomize_ties, arm_ids=self.arm_ids)
@abstractmethod
def get_scores(self) -> Tensor:
pass
def forward(self):
# set `inf` scores for arms which don't have the minimum number of observations
return torch.where(
self.total_n_obs_per_arm >= self.min_num_obs_per_arm,
self.get_scores().float(),
torch.tensor(torch.inf, dtype=torch.float),
)
def get_avg_reward_values(self) -> Tensor:
return self.total_sum_reward_per_arm / self.total_n_obs_per_arm
@classmethod
def get_scores_from_batch(
cls,
n_obs_per_arm: Tensor,
sum_reward_per_arm: Tensor,
sum_reward_squared_per_arm: Tensor,
) -> Tensor:
"""
A utility method used to create the bandit, feed in a batch of observations and get the scores in one function call
Args:
n_obs_per_arm (Tensor): A tensor of counts of per-arm numbers of observations
sum_reward_per_arm (Tensor): A tensor of sums of rewards for each arm
sum_reward_squared_per_arm (Tensor): A tensor of sums of squared rewards for each arm
Returns:
Tensor: Array of per-arm scores
"""
n_arms = len(n_obs_per_arm)
b = cls(n_arms=n_arms) # pyre-ignore[45]
b.add_batch_observations(
n_obs_per_arm, sum_reward_per_arm, sum_reward_squared_per_arm
)
return b()
def __repr__(self) -> str:
t = ", ".join(
f"{v:.3f} ({int(n)})"
for v, n in zip(self.get_avg_reward_values(), self.total_n_obs_per_arm)
)
return f"{type(self).__name__}({self.n_arms} arms; {t}"
class RandomActionsAlgo(MABAlgo):
"""
A MAB algorithm which samples actions uniformly at random
"""
def get_scores(self) -> Tensor:
return torch.rand(self.n_arms)
class GreedyAlgo(MABAlgo):
"""
Greedy algorithm, which always chooses the best arm played so far
Arms that haven't been played yet are given priority by assigning inf score
Ties are resolved in favor of the arm with the smallest index.
"""
def get_scores(self) -> Tensor:
return self.get_avg_reward_values()
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
0df74f09405291abe27edf78b7b4e06bb4ee8b14 | e942cafaf64f6354e1f9ebd4a84bcf236ad93004 | /yawast/_static_version.py | 2df3a4a6275a3ae95963ccbac54a9ee4d0e3f762 | [
"MIT"
] | permissive | Prodject/yawast | 9a441a0576012dc5f0664cd23cfa0a803fd7a477 | 044309709cf3782de75a35f77297f2d2850d8e1c | refs/heads/master | 2020-03-23T02:32:12.357082 | 2020-01-21T18:13:19 | 2020-01-21T18:13:19 | 140,978,938 | 0 | 0 | BSD-3-Clause | 2020-01-21T18:13:20 | 2018-07-14T21:23:05 | Ruby | UTF-8 | Python | false | false | 627 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2013 - 2020 Adam Caudill and Contributors.
# This file is part of YAWAST which is released under the MIT license.
# See the LICENSE file or go to https://yawast.org/license/ for full license details.
# This file is part of 'miniver': https://github.com/jbweston/miniver
#
# This file will be overwritten by setup.py when a source or binary
# distribution is made. The magic value "__use_git__" is interpreted by
# version.py.
version = "__use_git__"
# These values are only set if the distribution was created with 'git archive'
refnames = "$Format:%D$"
git_hash = "$Format:%h$"
| [
"adam@adamcaudill.com"
] | adam@adamcaudill.com |
87a309cb82702de33083d597c32f97b9f550b950 | 6c816f19d7f4a3d89abbb00eeaf43dd818ecc34f | /apps/account/migrations/0055_auto_20210414_2208.py | 0a9881008c4515c36c8979af88d753d8ec9147de | [] | no_license | reo-dev/bolt | 29ee6aa7cfc96bd50fa7a7dae07fbaafc2125e54 | d1a7859dd1ebe2f5b0e6e295047b620f5afdb92e | refs/heads/master | 2023-07-13T04:05:57.856278 | 2021-08-27T09:07:03 | 2021-08-27T09:07:03 | 382,195,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,201 | py | # Generated by Django 3.0.8 on 2021-04-14 13:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('category', '0011_auto_20210219_1901'),
('account', '0054_auto_20210413_1634'),
]
operations = [
migrations.RemoveField(
model_name='clientclass',
name='client',
),
migrations.RemoveField(
model_name='machine',
name='partner',
),
migrations.RemoveField(
model_name='partner_estimate',
name='partner',
),
migrations.RemoveField(
model_name='partnercategory',
name='category',
),
migrations.RemoveField(
model_name='partnercategory',
name='partner',
),
migrations.RemoveField(
model_name='process',
name='partner',
),
migrations.RemoveField(
model_name='structure',
name='partner',
),
migrations.RemoveField(
model_name='partner',
name='career',
),
migrations.RemoveField(
model_name='partner',
name='coin',
),
migrations.RemoveField(
model_name='partner',
name='employee',
),
migrations.RemoveField(
model_name='partner',
name='info_biz',
),
migrations.RemoveField(
model_name='partner',
name='region',
),
migrations.RemoveField(
model_name='partner',
name='revenue',
),
migrations.DeleteModel(
name='Certification',
),
migrations.DeleteModel(
name='Clientclass',
),
migrations.DeleteModel(
name='Machine',
),
migrations.DeleteModel(
name='Partner_Estimate',
),
migrations.DeleteModel(
name='PartnerCategory',
),
migrations.DeleteModel(
name='Process',
),
migrations.DeleteModel(
name='Structure',
),
]
| [
"75593016+reo-dev@users.noreply.github.com"
] | 75593016+reo-dev@users.noreply.github.com |
365ba771af380e6b95fbad2e044704d6ec5cc9fa | 0987f31e64bcacb41ba3a1e20054d7b8ac0d7346 | /practice/algorithm_datastructure_for_programming_contest/309_ALDS1_12_C_AtCoder.py | 98b22cbaed6fa9c73066ce9865cb9924191fd201 | [] | no_license | masakiaota/kyoupuro | 81ae52ab3014fb2b1e10472994afa4caa9ea463b | 74915a40ac157f89fe400e3f98e9bf3c10012cd7 | refs/heads/master | 2021-06-27T04:13:52.152582 | 2020-09-20T03:21:17 | 2020-09-20T03:21:17 | 147,049,195 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,402 | py | # https://onlinejudge.u-aizu.ac.jp/courses/lesson/1/ALDS1/12/ALDS1_12_C
# これに関してもAtCoderではすでにある実装が使える https://note.nkmk.me/python-scipy-shortest-path/
# じゅっぴーさんの記事 https://juppy.hatenablog.com/entry/2019/06/04/scipy%E3%81%AEFloyd-Warshall%E3%81%A8Dijkstra%E3%81%AE%E3%81%99%E3%81%99%E3%82%81_Python_%E7%AB%B6%E6%8A%80%E3%83%97%E3%83%AD%E3%82%B0%E3%83%A9%E3%83%9F%E3%83%B3%E3%82%B0_Atcoder_1
# 1つ目の記事にあるようにdijkstraでなくshortest_path関数に引数を入れるのが実用的な使い方か
INF = 10**6
from scipy.sparse.csgraph import dijkstra
from scipy.sparse import csr_matrix, lil_matrix
# scipy.sparse.csgraphでは基本的に隣接行列の入力を想定している
# 機械学習ではcsrが基本的だがlil_matrixのほうがデータを打ち込むのが早いらしい
# load datas
N = int(input())
# adj_mat = csr_matrix((N, N))
adj_mat = lil_matrix((N, N))
# print(adj_mat.shape)
# print(adj_mat)
for _ in range(N):
tmp = list(map(int, input().split()))
if tmp[1] != 0:
node = tmp[0]
for i in range(2, 2 + tmp[1] * 2, 2):
adj_mat[node, tmp[i]] = tmp[i + 1]
D = dijkstra(adj_mat)[0]
# 行ごとにその行を始点としたときの各ノードへの最短経路が計算されるのでそれを取り出すだけ
for i in range(N):
print(i, int(D[i]))
| [
"aotamasakimail@gmail.com"
] | aotamasakimail@gmail.com |
554d1fdcac2ae51244747b651cfd9c9970d8637f | e6ef2915c35faa8d09e846708323b4b79786a5fe | /nobrainer/cli_click.py | 9d82f7e8bb816f394ee7ee8983f2047cbdb5e71d | [
"Apache-2.0"
] | permissive | yarikoptic/kwyk | 1bbe1ed795ffecac1157d67c5f3f40003dc5e379 | 7e2b2bdee58962f229befe9375fc5afeadd4aa3c | refs/heads/master | 2020-07-09T01:44:37.646045 | 2019-08-21T02:32:41 | 2019-08-21T02:32:41 | 203,839,875 | 0 | 0 | null | 2019-08-22T17:13:21 | 2019-08-22T17:13:20 | null | UTF-8 | Python | false | false | 4,726 | py | from pathlib import Path
import subprocess
import tempfile
import click
import nibabel as nib
import numpy as np
from nobrainer.io import read_volume
from nobrainer.predict import _get_predictor
from nobrainer.predict import predict_from_filepath
from nobrainer.volume import from_blocks
from nobrainer.volume import to_blocks
from nobrainer.volume import zscore
_here = Path(__file__).parent
_models = {
'bwn': _here.parent / 'saved_models' / 'all_50_wn' / '1555341859',
'bwn_multi': _here.parent / 'saved_models' / 'all_50_bwn_09_multi' / '1555963478',
'bvwn_multi_prior': _here.parent / 'saved_models' / 'all_50_bvwn_multi_prior' / '1556816070',
}
@click.command()
@click.argument('infile')
@click.argument('outprefix')
@click.option('-m', '--model', type=click.Choice(_models.keys()), default="bwn_multi", required=True, help='Model to use for prediction.')
@click.option('-n', '--n-samples', type=int, default=1, help='Number of samples to predict.')
@click.option('-b', '--batch-size', type=int, default=8, help='Batch size during prediction.')
@click.option('--save-variance', is_flag=True, help='Save volume with variance across `n-samples` predictions.')
@click.option('--save-entropy', is_flag=True, help='Save volume of entropy values.')
def predict(*, infile, outprefix, model, n_samples, batch_size, save_variance, save_entropy):
"""Predict labels from features using a trained model.
The predictions are saved to OUTPREFIX_* with the same extension as the input file.
If you encounter out-of-memory issues, use a lower batch size value.
"""
_orig_infile = infile
# Are there other neuroimaging file extensions with multiple periods?
if infile.lower().endswith('.nii.gz'):
outfile_ext = '.nii.gz'
else:
outfile_ext = Path(infile).suffix
outfile_stem = outprefix
outfile_means = "{}_means{}".format(outfile_stem, outfile_ext)
outfile_variance = "{}_variance{}".format(outfile_stem, outfile_ext)
outfile_entropy = "{}_entropy{}".format(outfile_stem, outfile_ext)
for ff in [outfile_means, outfile_variance, outfile_entropy]:
if Path(ff).exists():
raise FileExistsError("file exists: {}".format(ff))
required_shape = (256, 256, 256)
block_shape = (32, 32, 32)
img = nib.load(infile)
ndim = len(img.shape)
if ndim != 3:
raise ValueError("Input volume must have three dimensions but got {}.".format(ndim))
if img.shape != required_shape:
tmp = tempfile.NamedTemporaryFile(suffix='.nii.gz')
print("++ Conforming volume to 1mm^3 voxels and size 256x256x256.")
_conform(infile, tmp.name)
infile = tmp.name
else:
tmp = None
savedmodel_path = _models[model]
print("++ Running forward pass of model.")
predictor = _get_predictor(savedmodel_path)
outputs = predict_from_filepath(
infile,
predictor=predictor,
block_shape=block_shape,
return_variance=True,
return_entropy=True,
n_samples=n_samples,
batch_size=batch_size,
normalizer=zscore)
# Delete temporary file.
if tmp is not None:
tmp.close()
if n_samples > 1:
means, variance, entropy = outputs
else:
means, entropy = outputs
variance = None
outfile_means_orig = "{}_means_orig{}".format(outfile_stem, outfile_ext)
outfile_variance_orig = "{}_variance_orig{}".format(outfile_stem, outfile_ext)
outfile_entropy_orig = "{}_entropy_orig{}".format(outfile_stem, outfile_ext)
print("++ Saving results.")
data = np.round(means.get_fdata()).astype(np.uint8)
means = nib.Nifti1Image(data, header=means.header, affine=means.affine)
means.header.set_data_dtype(np.uint8)
nib.save(means, outfile_means)
_reslice(outfile_means, outfile_means_orig, _orig_infile, True)
if save_variance and variance is not None:
nib.save(variance, outfile_variance)
_reslice(outfile_variance, outfile_variance_orig, _orig_infile)
if save_entropy:
nib.save(entropy, outfile_entropy)
_reslice(outfile_entropy, outfile_entropy_orig, _orig_infile)
def _conform(input, output):
"""Conform volume using FreeSurfer."""
subprocess.run(['mri_convert', '--conform', input, output], check=True)
return output
def _reslice(input, output, reference, labels=False):
"""Conform volume using FreeSurfer."""
if labels:
subprocess.run(['mri_convert', '-rl', reference, '-rt', 'nearest', '-ns', '1',
input, output],
check=True)
else:
subprocess.run(['mri_convert', '-rl', reference, input, output], check=True)
return output
| [
"satra@mit.edu"
] | satra@mit.edu |
38b6bb516de9c2d2385e25f78767678751d1db75 | 853d4cec42071b76a80be38c58ffe0fbf9b9dc34 | /venv/Lib/site-packages/pandas/core/internals/concat.py | 665bce34f0e7273f1c6ba1fb6f5a04b20ef93095 | [] | no_license | msainTesting/TwitterAnalysis | 5e1646dbf40badf887a86e125ef30a9edaa622a4 | b1204346508ba3e3922a52380ead5a8f7079726b | refs/heads/main | 2023-08-28T08:29:28.924620 | 2021-11-04T12:36:30 | 2021-11-04T12:36:30 | 424,242,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,160 | py | # TODO: Needs a better name; too many modules are already called "concat"
from collections import defaultdict
import copy
import numpy as np
from pandas._libs import internals as libinternals, tslibs
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.common import (
_get_dtype,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_float_dtype,
is_numeric_dtype,
is_sparse,
is_timedelta64_dtype,
)
import pandas.core.dtypes.concat as _concat
from pandas.core.dtypes.missing import isna
import pandas.core.algorithms as algos
def get_mgr_concatenation_plan(mgr, indexers):
"""
Construct concatenation plan for given block manager and indexers.
Parameters
----------
mgr : BlockManager
indexers : dict of {axis: indexer}
Returns
-------
plan : list of (BlockPlacement, JoinUnit) tuples
"""
# Calculate post-reindex shape , save for item axis which will be separate
# for each block anyway.
mgr_shape = list(mgr.shape)
for ax, indexer in indexers.items():
mgr_shape[ax] = len(indexer)
mgr_shape = tuple(mgr_shape)
if 0 in indexers:
ax0_indexer = indexers.pop(0)
blknos = algos.take_1d(mgr._blknos, ax0_indexer, fill_value=-1)
blklocs = algos.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1)
else:
if mgr._is_single_block:
blk = mgr.blocks[0]
return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]
ax0_indexer = None
blknos = mgr._blknos
blklocs = mgr._blklocs
plan = []
for blkno, placements in libinternals.get_blkno_placements(
blknos, mgr.nblocks, group=False
):
assert placements.is_slice_like
join_unit_indexers = indexers.copy()
shape = list(mgr_shape)
shape[0] = len(placements)
shape = tuple(shape)
if blkno == -1:
unit = JoinUnit(None, shape)
else:
blk = mgr.blocks[blkno]
ax0_blk_indexer = blklocs[placements.indexer]
unit_no_ax0_reindexing = (
len(placements) == len(blk.mgr_locs)
and
# Fastpath detection of join unit not
# needing to reindex its block: no ax0
# reindexing took place and block
# placement was sequential before.
(
(
ax0_indexer is None
and blk.mgr_locs.is_slice_like
and blk.mgr_locs.as_slice.step == 1
)
or
# Slow-ish detection: all indexer locs
# are sequential (and length match is
# checked above).
(np.diff(ax0_blk_indexer) == 1).all()
)
)
# Omit indexer if no item reindexing is required.
if unit_no_ax0_reindexing:
join_unit_indexers.pop(0, None)
else:
join_unit_indexers[0] = ax0_blk_indexer
unit = JoinUnit(blk, shape, join_unit_indexers)
plan.append((placements, unit))
return plan
class JoinUnit:
def __init__(self, block, shape, indexers=None):
# Passing shape explicitly is required for cases when block is None.
if indexers is None:
indexers = {}
self.block = block
self.indexers = indexers
self.shape = shape
def __repr__(self):
return "{name}({block!r}, {indexers})".format(
name=self.__class__.__name__, block=self.block, indexers=self.indexers
)
@cache_readonly
def needs_filling(self):
for indexer in self.indexers.values():
# FIXME: cache results of indexer == -1 checks.
if (indexer == -1).any():
return True
return False
@cache_readonly
def dtype(self):
if self.block is None:
raise AssertionError("Block is None, no dtype")
if not self.needs_filling:
return self.block.dtype
else:
return _get_dtype(maybe_promote(self.block.dtype, self.block.fill_value)[0])
@cache_readonly
def is_na(self):
if self.block is None:
return True
if not self.block._can_hold_na:
return False
# Usually it's enough to check but a small fraction of values to see if
# a block is NOT null, chunks should help in such cases. 1000 value
# was chosen rather arbitrarily.
values = self.block.values
if self.block.is_categorical:
values_flat = values.categories
elif is_sparse(self.block.values.dtype):
return False
elif self.block.is_extension:
values_flat = values
else:
values_flat = values.ravel(order="K")
total_len = values_flat.shape[0]
chunk_len = max(total_len // 40, 1000)
for i in range(0, total_len, chunk_len):
if not isna(values_flat[i : i + chunk_len]).all():
return False
return True
def get_reindexed_values(self, empty_dtype, upcasted_na):
if upcasted_na is None:
# No upcasting is necessary
fill_value = self.block.fill_value
values = self.block.get_values()
else:
fill_value = upcasted_na
if self.is_na:
if getattr(self.block, "is_object", False):
# we want to avoid filling with np.nan if we are
# using None; we already know that we are all
# nulls
values = self.block.values.ravel(order="K")
if len(values) and values[0] is None:
fill_value = None
if getattr(self.block, "is_datetimetz", False) or is_datetime64tz_dtype(
empty_dtype
):
if self.block is None:
array = empty_dtype.construct_array_type()
return array(
np.full(self.shape[1], fill_value.value), dtype=empty_dtype
)
pass
elif getattr(self.block, "is_categorical", False):
pass
elif getattr(self.block, "is_extension", False):
pass
else:
missing_arr = np.empty(self.shape, dtype=empty_dtype)
missing_arr.fill(fill_value)
return missing_arr
if not self.indexers:
if not self.block._can_consolidate:
# preserve these for validation in _concat_compat
return self.block.values
if self.block.is_bool and not self.block.is_categorical:
# External code requested filling/upcasting, bool values must
# be upcasted to object to avoid being upcasted to numeric.
values = self.block.astype(np.object_).values
elif self.block.is_extension:
values = self.block.values
else:
# No dtype upcasting is done here, it will be performed during
# concatenation itself.
values = self.block.get_values()
if not self.indexers:
# If there's no indexing to be done, we want to signal outside
# code that this array must be copied explicitly. This is done
# by returning a view and checking `retval.base`.
values = values.view()
else:
for ax, indexer in self.indexers.items():
values = algos.take_nd(values, indexer, axis=ax, fill_value=fill_value)
return values
def concatenate_join_units(join_units, concat_axis, copy):
"""
Concatenate values from several join units along selected axis.
"""
if concat_axis == 0 and len(join_units) > 1:
# Concatenating join units along ax0 is handled in _merge_blocks.
raise AssertionError("Concatenating join units along axis0")
empty_dtype, upcasted_na = get_empty_dtype_and_na(join_units)
to_concat = [
ju.get_reindexed_values(empty_dtype=empty_dtype, upcasted_na=upcasted_na)
for ju in join_units
]
if len(to_concat) == 1:
# Only one block, nothing to concatenate.
concat_values = to_concat[0]
if copy:
if isinstance(concat_values, np.ndarray):
# non-reindexed (=not yet copied) arrays are made into a view
# in JoinUnit.get_reindexed_values
if concat_values.base is not None:
concat_values = concat_values.copy()
else:
concat_values = concat_values.copy()
else:
concat_values = _concat._concat_compat(to_concat, axis=concat_axis)
return concat_values
def get_empty_dtype_and_na(join_units):
"""
Return dtype and N/A values to use when concatenating specified units.
Returned N/A value may be None which means there was no casting involved.
Returns
-------
dtype
na
"""
if len(join_units) == 1:
blk = join_units[0].block
if blk is None:
return np.float64, np.nan
if is_uniform_reindex(join_units):
# XXX: integrate property
empty_dtype = join_units[0].block.dtype
upcasted_na = join_units[0].block.fill_value
return empty_dtype, upcasted_na
has_none_blocks = False
dtypes = [None] * len(join_units)
for i, unit in enumerate(join_units):
if unit.block is None:
has_none_blocks = True
else:
dtypes[i] = unit.dtype
upcast_classes = defaultdict(list)
null_upcast_classes = defaultdict(list)
for dtype, unit in zip(dtypes, join_units):
if dtype is None:
continue
if is_categorical_dtype(dtype):
upcast_cls = "category"
elif is_datetime64tz_dtype(dtype):
upcast_cls = "datetimetz"
elif issubclass(dtype.type, np.bool_):
upcast_cls = "bool"
elif issubclass(dtype.type, np.object_):
upcast_cls = "object"
elif is_datetime64_dtype(dtype):
upcast_cls = "datetime"
elif is_timedelta64_dtype(dtype):
upcast_cls = "timedelta"
elif is_sparse(dtype):
upcast_cls = dtype.subtype.name
elif is_extension_array_dtype(dtype):
upcast_cls = "object"
elif is_float_dtype(dtype) or is_numeric_dtype(dtype):
upcast_cls = dtype.name
else:
upcast_cls = "float"
# Null blocks should not influence upcast class selection, unless there
# are only null blocks, when same upcasting rules must be applied to
# null upcast classes.
if unit.is_na:
null_upcast_classes[upcast_cls].append(dtype)
else:
upcast_classes[upcast_cls].append(dtype)
if not upcast_classes:
upcast_classes = null_upcast_classes
# create the result
if "object" in upcast_classes:
return np.dtype(np.object_), np.nan
elif "bool" in upcast_classes:
if has_none_blocks:
return np.dtype(np.object_), np.nan
else:
return np.dtype(np.bool_), None
elif "category" in upcast_classes:
return np.dtype(np.object_), np.nan
elif "datetimetz" in upcast_classes:
# GH-25014. We use NaT instead of iNaT, since this eventually
# ends up in DatetimeArray.take, which does not allow iNaT.
dtype = upcast_classes["datetimetz"]
return dtype[0], tslibs.NaT
elif "datetime" in upcast_classes:
return np.dtype("M8[ns]"), tslibs.iNaT
elif "timedelta" in upcast_classes:
return np.dtype("m8[ns]"), tslibs.iNaT
else: # pragma
try:
g = np.find_common_type(upcast_classes, [])
except TypeError:
# At least one is an ExtensionArray
return np.dtype(np.object_), np.nan
else:
if is_float_dtype(g):
return g, g.type(np.nan)
elif is_numeric_dtype(g):
if has_none_blocks:
return np.float64, np.nan
else:
return g, None
msg = "invalid dtype determination in get_concat_dtype"
raise AssertionError(msg)
def is_uniform_join_units(join_units):
"""
Check if the join units consist of blocks of uniform type that can
be concatenated using Block.concat_same_type instead of the generic
concatenate_join_units (which uses `_concat._concat_compat`).
"""
return (
# all blocks need to have the same type
all(type(ju.block) is type(join_units[0].block) for ju in join_units)
and # noqa
# no blocks that would get missing values (can lead to type upcasts)
# unless we're an extension dtype.
all(not ju.is_na or ju.block.is_extension for ju in join_units)
and
# no blocks with indexers (as then the dimensions do not fit)
all(not ju.indexers for ju in join_units)
and
# only use this path when there is something to concatenate
len(join_units) > 1
)
def is_uniform_reindex(join_units):
return (
# TODO: should this be ju.block._can_hold_na?
all(ju.block and ju.block.is_extension for ju in join_units)
and len({ju.block.dtype.name for ju in join_units}) == 1
)
def trim_join_unit(join_unit, length):
"""
Reduce join_unit's shape along item axis to length.
Extra items that didn't fit are returned as a separate block.
"""
if 0 not in join_unit.indexers:
extra_indexers = join_unit.indexers
if join_unit.block is None:
extra_block = None
else:
extra_block = join_unit.block.getitem_block(slice(length, None))
join_unit.block = join_unit.block.getitem_block(slice(length))
else:
extra_block = join_unit.block
extra_indexers = copy.copy(join_unit.indexers)
extra_indexers[0] = extra_indexers[0][length:]
join_unit.indexers[0] = join_unit.indexers[0][:length]
extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:]
join_unit.shape = (length,) + join_unit.shape[1:]
return JoinUnit(block=extra_block, indexers=extra_indexers, shape=extra_shape)
def combine_concat_plans(plans, concat_axis):
"""
Combine multiple concatenation plans into one.
existing_plan is updated in-place.
"""
if len(plans) == 1:
for p in plans[0]:
yield p[0], [p[1]]
elif concat_axis == 0:
offset = 0
for plan in plans:
last_plc = None
for plc, unit in plan:
yield plc.add(offset), [unit]
last_plc = plc
if last_plc is not None:
offset += last_plc.as_slice.stop
else:
num_ended = [0]
def _next_or_none(seq):
retval = next(seq, None)
if retval is None:
num_ended[0] += 1
return retval
plans = list(map(iter, plans))
next_items = list(map(_next_or_none, plans))
while num_ended[0] != len(next_items):
if num_ended[0] > 0:
raise ValueError("Plan shapes are not aligned")
placements, units = zip(*next_items)
lengths = list(map(len, placements))
min_len, max_len = min(lengths), max(lengths)
if min_len == max_len:
yield placements[0], units
next_items[:] = map(_next_or_none, plans)
else:
yielded_placement = None
yielded_units = [None] * len(next_items)
for i, (plc, unit) in enumerate(next_items):
yielded_units[i] = unit
if len(plc) > min_len:
# trim_join_unit updates unit in place, so only
# placement needs to be sliced to skip min_len.
next_items[i] = (plc[min_len:], trim_join_unit(unit, min_len))
else:
yielded_placement = plc
next_items[i] = _next_or_none(plans[i])
yield yielded_placement, yielded_units
| [
"msaineti@icloud.com"
] | msaineti@icloud.com |
300dff9d384f9c06e0d02144aa0010462b222687 | b4c6013f346e178222cc579ede4da019c7f8c221 | /src/main/python/doctest.py | 6b678a3dd4209afbafcef117928385a788855d32 | [
"BSD-3-Clause",
"OpenSSL",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"GPL-1.0-or-later",
"LicenseRef-scancode-unicode"
] | permissive | cafebabepy/cafebabepy | e69248c4f3d9bab00e93ee749d273bc2c9244f8d | 4ab0e67b8cd79f2ca7cab6281bc811d3b9bc69c1 | refs/heads/develop | 2022-12-09T21:14:56.651792 | 2019-07-01T09:05:23 | 2019-07-01T09:05:23 | 90,854,936 | 9 | 1 | BSD-3-Clause | 2018-01-02T02:13:51 | 2017-05-10T11:05:11 | Java | UTF-8 | Python | false | false | 104,531 | py | # Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'SKIP',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
'FAIL_FAST',
# 1. Utility Functions
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 8. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import argparse
import difflib
import inspect
import linecache
import os
import pdb
import re
import sys
import traceback
import unittest
from io import StringIO
from collections import namedtuple
TestResults = namedtuple('TestResults', 'failed attempted')
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
# Create a new flag unless `name` is already known.
return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
SKIP = register_optionflag('SKIP')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
SKIP |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
FAIL_FAST = register_optionflag('FAIL_FAST')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE |
FAIL_FAST)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Unittest Support
# 8. Debugging Support
# 9. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, str):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _load_testfile(filename, package, module_relative, encoding):
if module_relative:
package = _normalize_module(package, 3)
filename = _module_relative_path(package, filename)
if getattr(package, '__loader__', None) is not None:
if hasattr(package.__loader__, 'get_data'):
file_contents = package.__loader__.get_data(filename)
file_contents = file_contents.decode(encoding)
# get_data() opens files as 'rb', so one must do the equivalent
# conversion as universal newlines would do.
return file_contents.replace(os.linesep, '\n'), filename
with open(filename, encoding=encoding) as f:
return f.read(), filename
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in `s`, and return the result.
"""
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
return result
def truncate(self, size=None):
self.seek(size)
StringIO.truncate(self)
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if ELLIPSIS_MARKER not in want:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
def _strip_exception_details(msg):
# Support for IGNORE_EXCEPTION_DETAIL.
# Get rid of everything except the exception name; in particular, drop
# the possibly dotted module path (if any) and the exception message (if
# any). We assume that a colon is never part of a dotted name, or of an
# exception name.
# E.g., given
# "foo.bar.MyError: la di da"
# return "MyError"
# Or for "abc.def" or "abc.def:\n" return "def".
start, end = 0, len(msg)
# The exception name must appear on the first line.
i = msg.find("\n")
if i >= 0:
end = i
# retain up to the first colon (if any)
i = msg.find(':', 0, end)
if i >= 0:
end = i
# retain just the exception name
i = msg.rfind('.', 0, end)
if i >= 0:
start = i+1
return msg[start: end]
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
self.__debugger_used = False
# do not play signal games in the pdb
pdb.Pdb.__init__(self, stdout=out, nosigint=True)
# still use input() to get user input
self.use_rawinput = 1
def set_trace(self, frame=None):
self.__debugger_used = True
if frame is None:
frame = sys._getframe().f_back
pdb.Pdb.set_trace(self, frame)
def set_continue(self):
# Calling set_continue unconditionally would break unit test
# coverage reporting, as Bdb.set_continue calls sys.settrace(None).
if self.__debugger_used:
pdb.Pdb.set_continue(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, test_path):
if not inspect.ismodule(module):
raise TypeError('Expected a module: %r' % module)
if test_path.startswith('/'):
raise ValueError('Module-relative files may not have absolute paths')
# Normalize the path. On Windows, replace "/" with "\".
test_path = os.path.join(*(test_path.split('/')))
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
if hasattr(module, '__path__'):
for directory in module.__path__:
fullpath = os.path.join(directory, test_path)
if os.path.exists(fullpath):
return fullpath
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module "
"%r (it has no __file__)"
% module.__name__)
# Combine the base directory and the test path.
return os.path.join(basedir, test_path)
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that precede the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self.source == other.source and \
self.want == other.want and \
self.lineno == other.lineno and \
self.indent == other.indent and \
self.options == other.options and \
self.exc_msg == other.exc_msg
def __hash__(self):
return hash((self.source, self.want, self.lineno, self.indent,
self.exc_msg))
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, str), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<%s %s from %s:%s (%s)>' %
(self.__class__.__name__,
self.name, self.filename, self.lineno, examples))
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self.examples == other.examples and \
self.docstring == other.docstring and \
self.globs == other.globs and \
self.name == other.name and \
self.filename == other.filename and \
self.lineno == other.lineno
def __hash__(self):
return hash((self.docstring, self.name, self.filename, self.lineno))
# This lets us sort tests by name:
def __lt__(self, other):
if not isinstance(other, DocTest):
return NotImplemented
return ((self.name, self.filename, self.lineno, id(self))
<
(other.name, other.filename, other.lineno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.+$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile(r'^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj)
except TypeError:
source_lines = None
else:
if not file:
# Check to see if it's one of our special internal "files"
# (see __patched_linecache_getlines).
file = inspect.getfile(obj)
if not file[0]+file[-2:] == '<]>': file = None
if file is None:
source_lines = None
else:
if module is not None:
# Supply the module globals in case the module was
# originally loaded via a PEP 302 loader and
# file is not a valid filesystem path
source_lines = linecache.getlines(file, module.__dict__)
else:
# No access to a loader, so assume it's a normal
# filesystem path
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__' # provide a default module name
# Recursively explore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
# Sort the tests by alpha order of names, for consistency in
# verbose-mode output. This was a feature of doctest in Pythons
# <= 2.3 that got lost by accident in 2.4. It was repaired in
# 2.4.4 and 2.5.
tests.sort()
return tests
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif inspect.isfunction(object):
if isinstance(object.__code__, inspect._builtin_code_type):
return True # XXX: A PyPy builtin - no way to tell
return module.__dict__ is object.__globals__
elif inspect.ismethoddescriptor(object):
if hasattr(object, '__objclass__'):
obj_mod = object.__objclass__.__module__
elif hasattr(object, '__module__'):
obj_mod = object.__module__
else:
return True # [XX] no easy way to tell otherwise
return module.__name__ == obj_mod
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isroutine(inspect.unwrap(val))
or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, str):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isroutine(val) or inspect.isclass(val) or
inspect.ismodule(val) or isinstance(val, str)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if ((inspect.isroutine(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, str):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, str):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] == ".pyc":
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.__func__
if inspect.isfunction(obj): obj = obj.__code__
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile(r'(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> tests.sort(key = lambda test: test.name)
>>> for test in tests:
... print(test.name, '->', runner.run(test))
_TestClass -> TestResults(failed=0, attempted=2)
_TestClass.__init__ -> TestResults(failed=0, attempted=2)
_TestClass.get -> TestResults(failed=0, attempted=2)
_TestClass.square -> TestResults(failed=0, attempted=1)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
TestResults(failed=0, attempted=7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then suppress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# If 'SKIP' is set, then skip this example.
if self.optionflags & SKIP:
continue
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec(compile(example.source, filename, "single",
compileflags, 1), test.globs)
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_msg = traceback.format_exception_only(*exception[:2])[-1]
if not quiet:
got += _exception_traceback(exception)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
if check(_strip_exception_details(example.exc_msg),
_strip_exception_details(exc_msg),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exception)
failures += 1
else:
assert False, ("unknown outcome", outcome)
if failures and self.optionflags & FAIL_FAST:
break
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return TestResults(failures, tries)
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>.+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
return example.source.splitlines(keepends=True)
else:
return self.save_linecache_getlines(filename, module_globals)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
encoding = save_stdout.encoding
if encoding is None or encoding.lower() == 'utf-8':
out = save_stdout.write
else:
# Use backslashreplace error handling on write
def out(s):
s = str(s.encode(encoding, 'backslashreplace'), encoding)
save_stdout.write(s)
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_trace = sys.gettrace()
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
# Make sure sys.displayhook just prints the value to stdout
save_displayhook = sys.displayhook
sys.displayhook = sys.__displayhook__
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
sys.settrace(save_trace)
linecache.getlines = self.save_linecache_getlines
sys.displayhook = save_displayhook
if clear_globs:
test.globs.clear()
import builtins
builtins._ = None
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print(len(notests), "items had no tests:")
notests.sort()
for thing in notests:
print(" ", thing)
if passed:
print(len(passed), "items passed all tests:")
passed.sort()
for thing, count in passed:
print(" %3d tests in %s" % (count, thing))
if failed:
print(self.DIVIDER)
print(len(failed), "items had failures:")
failed.sort()
for thing, (f, t) in failed:
print(" %3d of %3d in %s" % (f, t, thing))
if verbose:
print(totalt, "tests in", len(self._name2ft), "items.")
print(totalt - totalf, "passed and", totalf, "failed.")
if totalf:
print("***Test Failed***", totalf, "failures.")
elif verbose:
print("Test passed.")
return TestResults(totalf, totalt)
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
# Don't print here by default, since doing
# so breaks some of the buildbots
#print("*** DocTestRunner.merge: '" + name + "' in both" \
# " testers; summing outcomes.")
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def _toAscii(self, s):
"""
Convert string to hex-escaped ASCII string.
"""
return str(s.encode('ASCII', 'backslashreplace'), "ASCII")
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# If `want` contains hex-escaped character such as "\u1234",
# then `want` is a string of six characters(e.g. [\,u,1,2,3,4]).
# On the other hand, `got` could be another sequence of
# characters such as [\u1234], so `want` and `got` should
# be folded to hex-escaped ASCII string to compare.
got = self._toAscii(got)
want = self._toAscii(want)
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub(r'(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub(r'(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(keepends=True)
got_lines = got.splitlines(keepends=True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException as f:
... failure = f
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[1] # Already has the traceback
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure as f:
... failure = f
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
doctest.UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
TestResults(failed=0, attempted=1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, report=True,
optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See help(doctest) for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser(),
encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg "encoding" specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
text, filename = _load_testfile(filename, package, module_relative,
encoding or "utf-8")
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__'
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> import doctest
>>> old = doctest._unittest_reportflags
>>> doctest.set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> doctest.set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = DocTestRunner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexpected
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException as f:
... failure = f
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[1] # Already has the traceback
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure as f:
... failure = f
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test, clear_globs=False)
self.tearDown()
def id(self):
return self._dt_test.name
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self._dt_test == other._dt_test and \
self._dt_optionflags == other._dt_optionflags and \
self._dt_setUp == other._dt_setUp and \
self._dt_tearDown == other._dt_tearDown and \
self._dt_checker == other._dt_checker
def __hash__(self):
return hash((self._dt_optionflags, self._dt_setUp, self._dt_tearDown,
self._dt_checker))
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
class SkipDocTestCase(DocTestCase):
def __init__(self, module):
self.module = module
DocTestCase.__init__(self, None)
def setUp(self):
self.skipTest("DocTestSuite will not work with -O2 and above")
def test_skip(self):
pass
def shortDescription(self):
return "Skipping tests from %s" % self.module.__name__
__str__ = shortDescription
class _DocTestSuite(unittest.TestSuite):
def _removeTestAtIndex(self, index):
pass
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if not tests and sys.flags.optimize >=2:
# Skip doctests when running with -O2
suite = _DocTestSuite()
suite.addTest(SkipDocTestCase(module))
return suite
tests.sort()
suite = _DocTestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] == ".pyc":
filename = filename[:-1]
test.filename = filename
suite.addTest(DocTestCase(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(),
encoding=None, **options):
if globs is None:
globs = {}
else:
globs = globs.copy()
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
doc, path = _load_testfile(path, package, module_relative,
encoding or "utf-8")
if "__file__" not in globs:
globs["__file__"] = path
# Find the file and read it.
name = os.path.basename(path)
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
encoding
An encoding that will be used to convert the files to unicode.
"""
suite = _DocTestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 8. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print(script_from_examples(text))
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
<BLANKLINE>
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
# Add a courtesy newline to prevent exec from choking (see bug #1172785)
return '\n'.join(output) + '\n'
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
exec(src, globs, globs)
except:
print(sys.exc_info()[1])
p = pdb.Pdb(nosigint=True)
p.reset()
p.interaction(None, sys.exc_info()[2])
else:
pdb.Pdb(nosigint=True).run("exec(%r)" % src, globs, globs)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 9. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print(t.get())
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print(x.get())
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print('foo\n\nbar\n')
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print(list(range(1000))) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print(list(range(30))) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
parser = argparse.ArgumentParser(description="doctest runner")
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='print very verbose output for all tests')
parser.add_argument('-o', '--option', action='append',
choices=OPTIONFLAGS_BY_NAME.keys(), default=[],
help=('specify a doctest option flag to apply'
' to the test run; may be specified more'
' than once to apply multiple options'))
parser.add_argument('-f', '--fail-fast', action='store_true',
help=('stop running tests after first failure (this'
' is a shorthand for -o FAIL_FAST, and is'
' in addition to any other -o options)'))
parser.add_argument('file', nargs='+',
help='file containing the tests to run')
args = parser.parse_args()
testfiles = args.file
# Verbose used to be handled by the "inspect argv" magic in DocTestRunner,
# but since we are using argparse we are passing it manually now.
verbose = args.verbose
options = 0
for option in args.option:
options |= OPTIONFLAGS_BY_NAME[option]
if args.fail_fast:
options |= FAIL_FAST
for filename in testfiles:
if filename.endswith(".py"):
# It is a module -- insert its dir into sys.path and try to
# import it. If it is part of a package, that possibly
# won't work because of package imports.
dirname, filename = os.path.split(filename)
sys.path.insert(0, dirname)
m = __import__(filename[:-3])
del sys.path[0]
failures, _ = testmod(m, verbose=verbose, optionflags=options)
else:
failures, _ = testfile(filename, module_relative=False,
verbose=verbose, optionflags=options)
if failures:
return 1
return 0
if __name__ == "__main__":
sys.exit(_test())
| [
"zh1bvtan1@gmail.com"
] | zh1bvtan1@gmail.com |
8954a181b5e8d7b31145e5c139935b9780e4d1eb | a7f442bc306d1a8366a3e30db50af0c2c90e9091 | /blockchain-env/Lib/site-packages/Cryptodome/Util/number.pyi | f9864ae390b6a99941307c3bfa0524d8bd651e30 | [] | no_license | Patreva/Python-flask-react-blockchain | cbdce3e0f55d4ba68be6ecfba35620585894bbbc | 474a9795820d8a4b5a370d400d55b52580055a2e | refs/heads/main | 2023-03-29T01:18:53.985398 | 2021-04-06T08:01:24 | 2021-04-06T08:01:24 | 318,560,922 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | pyi | from typing import List, Optional, Callable
def ceil_div(n: int, d: int) -> int: ...
def size (N: int) -> int: ...
def getRandomInteger(N: int, randfunc: Optional[Callable]=None) -> int: ...
def getRandomRange(a: int, b: int, randfunc: Optional[Callable]=None) -> int: ...
def getRandomNBitInteger(N: int, randfunc: Optional[Callable]=None) -> int: ...
def GCD(x: int,y: int) -> int: ...
def inverse(u: int, v: int) -> int: ...
def getPrime(N: int, randfunc: Optional[Callable]=None) -> int: ...
def getStrongPrime(N: int, e: Optional[int]=0, false_positive_prob: Optional[float]=1e-6, randfunc: Optional[Callable]=None) -> int: ...
def isPrime(N: int, false_positive_prob: Optional[float]=1e-6, randfunc: Optional[Callable]=None) -> bool: ...
def long_to_bytes(n: int, blocksize: Optional[int]=0) -> bytes: ...
def bytes_to_long(s: bytes) -> int: ...
def long2str(n: int, blocksize: Optional[int]=0) -> bytes: ...
def str2long(s: bytes) -> int: ...
sieve_base: List[int]
| [
"patrickwahome74@gmail.com"
] | patrickwahome74@gmail.com |
1e02a37c7f9aaf6c82b9607e145b16c47b81a547 | 5280cb50a8b61615a2b92474944c721b1f222aba | /main_projects/aspaceify_extents/scripts/test_aspace_split.py | dddc7a655aa138304b44e7ad6f462b6a471a3104 | [] | no_license | bentley-historical-library/bentley_code | 944125c58c0b9bceef5f424fd58a5282fea52a6f | 61bdfc5b12c088b605e25c9835bf50ab14cfbc14 | refs/heads/master | 2020-07-10T01:05:32.433263 | 2015-07-24T20:58:44 | 2015-07-24T20:58:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,868 | py | from __future__ import absolute_import
import unittest
from collections import namedtuple
from aspaceify_extents.scripts.make_aspace_extent_distinctions import split_into_aspace_components
class TestASpaceSplit(unittest.TestCase):
def setUp(self):
self.ASpaceExtent = namedtuple("ASpaceExtent", ["type_", "portion", "container_summary", "dimensions", "physfacet"])
def check_output_equality(self, extent, type_="", container_summary="", dimensions="", physfacet="", portion="whole", multiple=False):
target_namedtuple = self.ASpaceExtent(type_=type_, portion=portion, physfacet=physfacet, container_summary=container_summary, dimensions=dimensions)
generated_namedtuple = split_into_aspace_components(extent, multiple)
self.assertEqual(generated_namedtuple, target_namedtuple)
def test_type_only(self):
self.check_output_equality("5 volumes", type_="5 volumes")
def test_paren_placed_in_container_summary(self):
self.check_output_equality("5 linear feet (in 5 boxes)", type_="5 linear feet", container_summary="(in 5 boxes)")
def test_in_section_placed_in_container_summary(self):
self.check_output_equality("5 linear feet in 5 boxes", type_="5 linear feet", container_summary="(in 5 boxes)")
def test_dimension_placed_in_dimensions(self):
dimension_examples = [
("p, 2x4in.", "2x4in."),
("p, 2x4 in.", "2x4 in."),
("p, 2x4-5x8 cm.", "2x4-5x8 cm."),
("p, 20 x 40 cm", "20 x 40 cm"),
("p, 3-1/2x5 to 4x6-inch", "3-1/2x5 to 4x6-inch"),
("p, 79.5 x 113.8 cm. (31 x 44-3/8 inches)", "79.5 x 113.8 cm.; 31 x 44-3/8 inches")
]
for dimension_example in dimension_examples:
original_text, dimension = dimension_example
self.check_output_equality(original_text, type_="p", dimensions=dimension)
def test_complex_dimensions(self):
self.check_output_equality("228 3-1/2x5 to 4x6-inch, prints in 5 boxes",
type_="228 prints", dimensions="3-1/2x5 to 4x6-inch", container_summary="(in 5 boxes)")
def test_black_and_white_put_in_phys_facet(self):
self.check_output_equality("48 black and white 8x10-inch prints", type_="48 prints", dimensions="8x10-inch", physfacet="black and white")
def test_horrific_extent_1(self):
self.check_output_equality("26 3-1/4x4-1/4-inch, color and black-and-white; Polaroid prints",
type_="26 Polaroid prints",
dimensions="3-1/4x4-1/4-inch",
physfacet="color and black-and-white",
portion="whole")
def test_horrific_extent_2(self):
self.check_output_equality("236 3-1/2x5-1/2 and 4x6-inch, color prints",
type_="236 prints",
dimensions="3-1/2x5-1/2 and 4x6-inch",
physfacet="color",
portion="whole")
def test_in_edge_case_1(self):
self.check_output_equality("14 folders; formerly in binders", type_="14 folders", container_summary="(formerly in binders)")
def test_in_edge_case_2(self):
self.check_output_equality("(in 4 boxes)", type_="", container_summary="(in 4 boxes)")
def test_reel_special_cases(self):
self.check_output_equality("5 inch reel, 3 3/4 ips", type_="reel", physfacet="5 inch; 3 3/4 ips")
self.check_output_equality('7" reel, 3.75 ips.', type_="reel", physfacet='7"; 3.75 ips')
self.check_output_equality('1 10 1/2" reel', type_="1 reel", physfacet='10 1/2"')
self.check_output_equality("3/4-inch reel", type_="reel", physfacet="3/4-inch")
self.check_output_equality("1 sound tape reel: 7 1/2 ips; 5 inches", type_="1 sound tape reel", physfacet="7 1/2 ips; 5 inches")
self.check_output_equality("2 sound tape reels: 3 3/4 ips; 7 inches", type_="2 sound tape reels", physfacet="3 3/4 ips; 7 inches")
self.check_output_equality("5 sound tape reels (dual track): 7 1/2 ips; 7 inches", type_="5 sound tape reels", physfacet="dual track; 7 1/2 ips; 7 inches")
self.check_output_equality('2 tapes, 3-3/4 ips', type_="2 tapes", physfacet="3-3/4 ips")
self.check_output_equality("147 sound tape reels : 3 3/4 - 7 1/2 ips ; 5-10 inches", type_="147 sound tape reels", physfacet="3 3/4 - 7 1/2 ips ; 5-10 inches")
def test_rpm(self):
self.check_output_equality("33 1/3 rpm Phonograph Records", type_="Phonograph Records", physfacet="33 1/3 rpm")
self.check_output_equality("set of 4 records, 45 rpm,33 1/3 rpm", type_="set of 4 records", physfacet="45 rpm; 33 1/3 rpm")
def test_time_dimensions(self):
self.check_output_equality("50:59", type_="", dimensions="50:59")
self.check_output_equality("2:18 min.", type_="", dimensions="2:18 min.")
self.check_output_equality("ca. 15 min.", type_="", dimensions="ca. 15 min.")
self.check_output_equality("1 sound tape reel (13:08)", type_="1 sound tape reel", dimensions="13:08")
self.check_output_equality("1 sound tape reel (ca. 12 min.)", type_="1 sound tape reel", dimensions="ca. 12 min.")
self.check_output_equality("1 sound tape reel: ca. 3 min.", type_="1 sound tape reel", dimensions="ca. 3 min.")
def test_color_not_removed_when_part_of_other_words(self):
self.check_output_equality("original drawings, pencil and colored pencil on tracing paper", type_="original drawings, pencil and colored pencil on tracing paper")
def test_portion_assigns_part_correctly(self):
self.check_output_equality("1 linear foot", type_="1 linear foot", portion="part", multiple=True)
if __name__ == "__main__":
unittest.main()
| [
"wboyle13@gmail.com"
] | wboyle13@gmail.com |
f81c8d924008de759cda6f47958157a9921dd4e6 | a5c4ea16042a8078e360c32636c00e3163ac99a8 | /PractitionerBundle/practice/chap11-deepergooglenet/pyimagesearch/nn/conv/minigooglenet.py | 04f4ebdcd102bf3c334524029ba9bb8cd187c6f9 | [] | no_license | lykhahaha/Mine | 3b74571b116f72ee17721038ca4c58796610cedd | 1439e7b161a7cd612b0d6fa4403b4c8c61648060 | refs/heads/master | 2020-07-15T05:16:13.808047 | 2019-06-01T07:30:01 | 2019-06-01T07:30:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,760 | py | from keras.layers import Conv2D, BatchNormalization, Dropout, MaxPooling2D, concatenate, Dense, AveragePooling2D, Flatten
from keras.models import Input, Model
from keras.regularizers import l2
from keras import backend as K
class MiniGoogLeNet:
@staticmethod
def conv_module(x, K, k_x, k_y, stride, padding='same', chan_dim, reg=None):
x = Conv2D(K, (k_x, k_y), strides=stride, padding=padding, activation='relu', kernel_regularizer=l2(reg))(x)
x = BatchNormalization(axis=chan_dim)(x)
return x
@staticmethod
def inception_module(x, num_1x1, num_3x3, chan_dim, reg=None):
first = MiniGoogLeNet.conv_module(x, num_1x1, 1, 1, (1, 1), chan_dim=chan_dim, reg=reg)
second = MiniGoogLeNet.conv_module(x, num_3x3, 3, 3, (1, 1), chan_dim=chan_dim, reg=reg)
return concatenate([first, second], axis=chan_dim)
@staticmethod
def downsample_module(x, num_3x3, chan_dim, reg=None):
first = MiniGoogLeNet.conv_module(x, num_3x3, 3, 3, (2, 2), padding='valid', chan_dim, reg=reg)
second = MaxPooling2D((3, 3), strides=(2, 2))(x)
return concatenate([first, second], axis=chan_dim)
@staticmethod
def build(width, height, depth, classes, reg=None):
# define input shape and set channels last
input_shape = (width, height, depth)
chan_dim = -1
if K.image_data_format == 'channels_first':
input_shape = (depth, width, height)
chan_dim = 1
# define input and first convolution
inputs = Input(shape=input_shape)
x = MiniGoogLeNet.conv_module(inputs, 96, 3, 3, (1, 1), chan_dim=chan_dim, reg=reg)
# define inception - inception - downsample
x = MiniGoogLeNet.inception_module(x, 32, 32, chan_dim, reg)
x = MiniGoogLeNet.inception_module(x, 32, 48, chan_dim, reg)
x = MiniGoogLeNet.downsample_module(x, 80, chan_dim, reg)
# define inception - inception - inception - inception- downsample
x = MiniGoogLeNet.inception_module(x, 112, 48, chan_dim, reg)
x = MiniGoogLeNet.inception_module(x, 96, 64, chan_dim, reg)
x = MiniGoogLeNet.inception_module(x, 80, 80, chan_dim, reg)
x = MiniGoogLeNet.inception_module(x, 48, 96, chan_dim, reg)
x = MiniGoogLeNet.downsample_module(x, 96, chan_dim, reg)
# define inception - inception
x = MiniGoogLeNet.inception_module(x, 176, 160, chan_dim, reg)
x = MiniGoogLeNet.inception_module(x, 176, 160, chan_dim, reg)
x = AveragePooling2D((7, 7))(x)
x = Dropout(0.5)(x)
# final layers
x = Flatten()(x)
x = Dense(classes, activation='softmax', kernel_regularizer=l2(reg))(x)
return Model(inputs, x) | [
"ITITIU15033@student.hcmiu.edu.vn"
] | ITITIU15033@student.hcmiu.edu.vn |
e566df7e4fc327591338117e75ad0087079d75cc | 174a186359880f1f9999686fbda5b70f1bc3d28a | /prototypes/newmethod.py | 3b6ac95fbd9ea9a5291736420b4ed3d3c6d3540e | [] | no_license | SuguruChhaya/snake_game_tkinter | 1da8bde533865917aedea6d99ee919cc34e29996 | 4c1c4d6b309c196a1b13666320202b824746e95a | refs/heads/master | 2022-12-03T07:33:41.504751 | 2020-08-25T15:27:32 | 2020-08-25T15:27:32 | 287,422,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,449 | py | from tkinter import *
import random
class Snake():
def __init__(self):
self.root = Tk()
Snake.my_canvas = Canvas(self.root, height=375, width=375, bg='black')
Snake.my_canvas.grid(row=0, column=0)
# *I am going to make a grid system by lines
# *15 by 15 grid
for height in range(0, 375, 25):
Snake.my_canvas.create_line(0, height, 500, height, fill='white')
for width in range(0, 375, 25):
Snake.my_canvas.create_line(width, 0, width, 500, fill='white')
self.head = Snake.my_canvas.create_rectangle(
225, 225, 250, 250, fill='light green', tags='item')
self.snake_body = []
# *Trying to move this thing.
# https://stackoverflow.com/questions/15269682/python-tkinter-canvas-fail-to-bind-keyboard
self.up_bind = self.root.bind("<w>", lambda event: self.up(event))
self.down_bind = self.root.bind("<s>", lambda event: self.down(event))
self.left_bind = self.root.bind("<a>", lambda event: self.left(event))
self.right_bind = self.root.bind(
"<d>", lambda event: self.right(event))
# *Default movement is up
self.x = 0
self.y = -25
self.snake_direction = 'up'
self.last_direction = 'up'
# *I am going to store the turns in the form of a nested list.
# *The first four floats will be the coordinates where I have to turn.
# *The fifth string will be the direction I need to turn to.
self.turns = []
# *Just a variable to allow wall passing or no.
self.wall_death = False
# A great reference for controlling the speed of the moving boxes.
def movement(self):
# *Since this is the function that will constantly be running, I will want to add my checking functions here.
# ?So much to debug here
if self.snake_direction == self.last_direction:
# *I will first check whether there are any items in the self.turns list I have to catch up on.
if len(self.turns) == 0:
print(self.snake_body)
Snake.my_canvas.move(self.head, self.x, self.y)
for item in self.snake_body:
Snake.my_canvas.move(item, self.x, self.y)
# *I have an issue in which the snake head moves two
# *The case in which the head has turned, but the body hasn't kept up.
else:
print(self.turns)
# *I think I have to keep the head and all the already turned body parts separate in a list.
# *These parts have to be treated separately.
Snake.my_canvas.move(self.head, self.x, self.y)
self.not_moved = self.snake_body.copy()
for part in self.snake_body:
for i in range(len(self.turns)):
if part in self.turns[i][2]:
if self.turns[i][1] == 'up':
Snake.my_canvas.move(part, 0, -25)
elif self.turns[i][1] == 'down':
Snake.my_canvas.move(part, 0, 25)
elif self.turns[i][1] == 'left':
Snake.my_canvas.move(part, -25, 0)
elif self.turns[i][1] == 'right':
Snake.my_canvas.move(part, 25, 0)
# *The rest of the snake which doesn't turn yet
if Snake.my_canvas.coords(part) == self.turns[i][0]:
print(part)
print(self.turns[i][2])
self.turns[i][2].remove(part)
if len(self.turns[i][2]) == 0:
self.turns = self.turns[1:]
self.not_moved.remove(part)
break
for part in self.not_moved:
if self.snake_direction == 'up':
Snake.my_canvas.move(part, 0, -25)
elif self.snake_direction == 'down':
Snake.my_canvas.move(part, 0, 25)
elif self.snake_direction == 'left':
Snake.my_canvas.move(part, -25, 0)
elif self.snake_direction == 'right':
Snake.my_canvas.move(part, 25, 0)
else:
# *I will obviously have to get the coords of the head and check add the turning point
# *Temporary variable to store list
turn_add = []
turn_add.append(Snake.my_canvas.coords(self.head))
turn_add.append(self.last_direction)
#?I think since the snake body changes, the thing mutates in the whole nested list
#*Using a tuple solved the issue of it mutating
#!But since a tuple cannot remove objects, I need to find a different solution
self.snake_body_copy = self.snake_body.copy()
turn_add.append(self.snake_body_copy)
#?Due to the value corresponding to snake_body being changed, the snake_body itself changes too.
#*To prevent this, I should make a copy of the snake variable
#!I will have keep track of which body parts have already catched up and which didn't.
self.turns.append(turn_add)
print(self.turns)
Snake.my_canvas.move(self.head, self.x, self.y)
if len(self.snake_body_copy) == 0:
self.turns = self.turns[1:]
self.not_moved = self.snake_body.copy()
for part in self.snake_body:
for i in range(len(self.turns)):
if part in self.turns[i][2]:
if self.turns[i][1] == 'up':
Snake.my_canvas.move(part, 0, -25)
elif self.turns[i][1] == 'down':
Snake.my_canvas.move(part, 0, 25)
elif self.turns[i][1] == 'left':
Snake.my_canvas.move(part, -25, 0)
elif self.turns[i][1] == 'right':
Snake.my_canvas.move(part, 25, 0)
# *The rest of the snake which doesn't turn yet
if Snake.my_canvas.coords(part) == self.turns[i][0]:
self.turns[i][2].remove(part)
if len(self.turns[i][2]) == 0:
self.turns = self.turns[1:]
self.not_moved.remove(part)
break
# *I have to keep it outside of the for-loop so that it applies when the snake body length is 0
for part in self.not_moved:
if self.snake_direction == 'up':
Snake.my_canvas.move(part, 0, -25)
elif self.snake_direction == 'down':
Snake.my_canvas.move(part, 0, 25)
elif self.snake_direction == 'left':
Snake.my_canvas.move(part, -25, 0)
elif self.snake_direction == 'right':
Snake.my_canvas.move(part, 25, 0)
self.snake_x1 = Snake.my_canvas.coords(self.head)[0]
self.snake_y1 = Snake.my_canvas.coords(self.head)[1]
self.snake_x2 = Snake.my_canvas.coords(self.head)[2]
self.snake_y2 = Snake.my_canvas.coords(self.head)[3]
# *I should definitely make a variable to manage whether the after function will run next time or not.
self.after_var = True
if Snake.my_canvas.coords(self.head) == Snake.my_canvas.coords(Apple.apple_1):
self.add_length()
Snake.my_canvas.delete(Apple.apple_1)
Apple.apple_count -= 1
self.after_var = True
if Apple.apple_count == 0:
b.create_apple()
# *Check if there are constantly 1 or more apples
# *If hit the wall, freeze.
if self.after_var:
self.alive = Snake.my_canvas.after(1000, self.movement)
else:
Snake.my_canvas.after_cancel(self.alive)
# *Storing direction to see if there was a turn
self.last_direction = self.snake_direction
def add_length(self):
# *New rectangles will spawn based on which direction the snake was travelling last
#*If I am addin to a length larger than 1, the self.snake_x1 must be the coordinates of the last item in the snake body
if len(self.snake_body) > 0:
self.snake_x1 = Snake.my_canvas.coords(self.snake_body[-1])[0]
self.snake_y1= Snake.my_canvas.coords(self.snake_body[-1])[1]
self.snake_x2= Snake.my_canvas.coords(self.snake_body[-1])[2]
self.snake_y2= Snake.my_canvas.coords(self.snake_body[-1])[3]
if self.snake_direction == 'up':
self.add_length_x1 = self.snake_x1
# *Careful with the coordinate system
self.add_length_y1 = self.snake_y1 + 25
self.add_length_x2 = self.snake_x2
self.add_length_y2 = self.snake_y2 + 25
elif self.snake_direction == 'down':
self.add_length_x1 = self.snake_x1
self.add_length_y1 = self.snake_y1 + 25
self.add_length_x2 = self.snake_x2
self.add_length_y2 = self.snake_y2 + 25
elif self.snake_direction == 'left':
self.add_length_x1 = self.snake_x1 + 25
self.add_length_y1 = self.snake_y1
self.add_length_x2 = self.snake_x2 + 25
self.add_length_y2 = self.snake_y2
elif self.snake_direction == 'right':
self.add_length_x1 = self.snake_x1 - 25
self.add_length_y1 = self.snake_y1
self.add_length_x2 = self.snake_x2 - 25
self.add_length_y2 = self.snake_y2
# *As long as the rectange is in the list, I don't think I have to name it.
self.snake_body.append(Snake.my_canvas.create_rectangle(
self.add_length_x1, self.add_length_y1, self.add_length_x2, self.add_length_y2, fill='green'))
def up(self, event):
self.x = 0
self.y = -25
self.snake_direction = 'up'
def down(self, event):
self.x = 0
self.y = 25
self.snake_direction = 'down'
def left(self, event):
self.x = -25
self.y = 0
self.snake_direction = 'left'
def right(self, event):
self.x = 25
self.y = 0
self.snake_direction = 'right'
class Apple():
apple_count = 0
def __init__(self):
# *Creating the list in which the apple can spawn.
self.apple_coord_list = []
# *The list of which the apple can land
for i in range(0, 375, 25):
self.apple_coord_list.append(i)
self.apple_x_list = self.apple_coord_list.copy()
self.apple_y_list = self.apple_coord_list.copy()
def create_apple(self):
self.apple_x1 = random.choice(self.apple_x_list)
self.apple_x2 = self.apple_x1 + 25
self.apple_y1 = random.choice(self.apple_y_list)
self.apple_y2 = self.apple_y1 + 25
Apple.apple_1 = Snake.my_canvas.create_rectangle(
self.apple_x1, self.apple_y1, self.apple_x2, self.apple_y2, fill='red')
Apple.apple_count += 1
a = Snake()
b = Apple()
b.create_apple()
a.movement()
mainloop()
| [
"suguruchhaya@gmail.com"
] | suguruchhaya@gmail.com |
e5bf07685aa283a83e0748b3c2bb4bfe06af782e | 0e0cb55fb13e5ee6c60869a4da7812120817eb33 | /ruantong/概率分布/几何分布.py | c274c6b72d37a529e680cbb0eb23ecdd2a6fab97 | [] | no_license | xjwhhh/Probability-Theory | 7d2a89fdeb37b4112da5a52583df149ce119d389 | 1ce2e7843de9c5ed44c189992003f88c66abd13b | refs/heads/master | 2020-12-02T18:12:51.775550 | 2017-08-09T05:42:42 | 2017-08-09T05:42:42 | 96,496,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import geom as G
rv=G(0.2)#p=0.2
x=np.arange(1,11,1)
y=rv.pmf(x)
plt.bar(x,y,width=0.6,color='grey')
plt.show()
print(y) | [
"151250171@smail.nju.edu.cn"
] | 151250171@smail.nju.edu.cn |
df118c794a0cc62832e6333daf6389c9a655c01d | b87f66b13293782321e20c39aebc05defd8d4b48 | /maps/build/Traits/enthought/traits/ui/editors/dnd_editor.py | 22afe66e05115ed4899e7412692ac979d6dcade2 | [] | no_license | m-elhussieny/code | 5eae020932d935e4d724c2f3d16126a0d42ebf04 | 5466f5858dbd2f1f082fa0d7417b57c8fb068fad | refs/heads/master | 2021-06-13T18:47:08.700053 | 2016-11-01T05:51:06 | 2016-11-01T05:51:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,986 | py | #------------------------------------------------------------------------------
#
# Copyright (c) 2008, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# Author: David C. Morrill
# Date: 06/25/2006
#
#------------------------------------------------------------------------------
""" Defines the editor factory for a drag-and-drop editor. A drag-and-drop
editor represents its value as a simple image which, depending upon the
editor style, can be a drag source only, a drop target only, or both a
drag source and a drop target.
"""
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
from __future__ import absolute_import
from ..ui_traits import Image
from ..editor_factory import EditorFactory
#-------------------------------------------------------------------------------
# 'ToolkitEditorFactory' class:
#-------------------------------------------------------------------------------
class ToolkitEditorFactory ( EditorFactory ):
""" Editor factory for drag-and-drop editors.
"""
#---------------------------------------------------------------------------
# Trait definitions:
#---------------------------------------------------------------------------
# The image to use for the target:
image = Image
# The image to use when the target is disabled:
disabled_image = Image
# Define the DNDEditor class.
DNDEditor = ToolkitEditorFactory
# EOF #########################################################################
| [
"fspaolo@gmail.com"
] | fspaolo@gmail.com |
d0a119ae6529af7f07541b10b9c8ecb1ff6390ed | 26d802852cdfef2b57902a350b4fa17d5aa07f50 | /13_revp/solution3_revp.py | 018e307228530fa71eb403b8b57157332268547b | [
"MIT"
] | permissive | Vstrains/biofx_python | 7f5e27707f11e77f08d1516381a08a267fd81a1c | 7a2821dba36f1dae8404efbe35f44242833d6180 | refs/heads/main | 2023-03-12T09:51:57.766441 | 2021-02-25T03:22:15 | 2021-02-25T03:22:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,108 | py | #!/usr/bin/env python3
""" Locating Restriction Sites """
import argparse
import sys
import operator
from typing import List, NamedTuple, TextIO
from Bio import SeqIO, Seq
from common import find_kmers
class Args(NamedTuple):
""" Command-line arguments """
file: TextIO
# --------------------------------------------------
def get_args() -> Args:
""" Get command-line arguments """
parser = argparse.ArgumentParser(
description='Locating Restriction Sites',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file',
help='Input FASTA file',
metavar='FILE',
type=argparse.FileType('rt'))
args = parser.parse_args()
return Args(args.file)
# --------------------------------------------------
def main() -> None:
""" Make a jazz noise here """
args = get_args()
recs = SeqIO.parse(args.file, 'fasta')
if rec := next(recs):
for k in range(4, 13):
for pos in revp(str(rec.seq), k):
print(pos, k)
# for k, pos in [(k, p) for k in range(4, 13) for p in revp(seq, k)]:
# print(pos, k)
else:
sys.exit(f'"{args.file.name}" contains no sequences.')
# --------------------------------------------------
def revp(seq: str, k: int) -> List[int]:
""" Return positions of reverse palindromes """
kmers = find_kmers(seq, k)
revc = map(Seq.reverse_complement, kmers)
pairs = enumerate(zip(kmers, revc))
return [pos + 1 for pos, pair in pairs if operator.eq(*pair)]
# --------------------------------------------------
def test_revp() -> None:
""" Test revp """
assert revp('CGCATGCATTGA', 4) == [3, 5]
assert revp('CGCATGCATTGA', 5) == []
assert revp('CGCATGCATTGA', 6) == [2, 4]
assert revp('CGCATGCATTGA', 7) == []
assert revp('CCCGCATGCATT', 4) == [5, 7]
assert revp('CCCGCATGCATT', 5) == []
assert revp('CCCGCATGCATT', 6) == [4, 6]
# --------------------------------------------------
if __name__ == '__main__':
main()
| [
"kyclark@gmail.com"
] | kyclark@gmail.com |
64cb85a8ce03348ff0b349a19422abd9fe6515af | 1aa357433cad86c1c42eeaece7109094cdf790a6 | /12_extensions/ext/custom_ext_pygments.py | 134a984984bcae1b2676618d0b6ef706fa115976 | [
"MIT"
] | permissive | hooj0/jinja2-template-examples | 89ab4466d69ad2232922126a3f208f02b0ed3798 | 20481153a964d46e007e807fd2be9a0c42201dd0 | refs/heads/master | 2020-03-28T11:02:13.451556 | 2018-09-12T07:50:40 | 2018-09-12T07:50:40 | 148,171,173 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,559 | py | #!/usr/bin/env python3
# encoding: utf-8
# @author: hoojo
# @email: hoojo_@126.com
# @github: https://github.com/hooj0
# @create date: 2018-09-12
# @copyright by hoojo @2018
# @link http://www.bjhee.com/jinja2-extension.html
# @changelog user custom extension
# ===============================================================================
# 标题:利用 pygments 库开发 jinja2 template的扩展,做到代码高亮的效果
# ===============================================================================
# 使用:pip install pygments
# -------------------------------------------------------------------------------
# 描述:Pygments是Python提供语法高亮的工具,官网是pygments.org。
# http://pygments.org/docs/
# -------------------------------------------------------------------------------
from jinja2 import nodes
from jinja2.ext import Extension
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import guess_lexer, get_lexer_by_name
# -------------------------------------------------------------------------------
# 创建一个自定义扩展类,继承jinja2.ext.Extension
# -------------------------------------------------------------------------------
class PygmentsExtension(Extension):
# 定义该扩展的语句关键字,这里表示模板中的{% code %}语句会该扩展处理
tags = set(['code'])
def __init__(self, environment):
# 初始化父类,必须这样写
super(PygmentsExtension, self).__init__(environment)
# 在Jinja2的环境变量中添加属性,
# 这样在Flask中,就可以用app.jinja_env.pygments来访问
environment.extend(
pygments=self,
pygments_support=True
)
# 重写jinja2.ext.Extension类的parse函数
# 这是处理模板中{% code %}语句的主程序
def parse(self, parser):
# 进入此函数时,即表示{% code %}标签被找到了
# 下面的代码会获取当前{% code %}语句在模板文件中的行号
lineno = next(parser.stream).lineno
# 获取{% code %}语句中的参数,比如我们调用{% code 'python' %},
# 这里就会返回一个jinja2.nodes.Const类型的对象,值为'python'
lang_type = parser.parse_expression()
# 将参数封装为列表
args = []
if lang_type is not None:
args.append(lang_type)
# 下面的代码可以支持两个参数,参数之间用逗号分隔,不过本例中用不到
# 这里先检查当前处理流的位置是不是个逗号,是的话就再获取一个参数
# 不是的话,就在参数列表最后加个空值对象
# if parser.stream.skip_if('comma'):
# args.append(parser.parse_expression())
# else:
# args.append(nodes.Const(None))
# 解析从{% code %}标志开始,到{% endcode %}为止中间的所有语句
# 将解析完后的内容存在body里,并将当前流位置移到{% endcode %}之后
body = parser.parse_statements(['name:endcode'], drop_needle=True)
# 返回一个CallBlock类型的节点,并将其之前取得的行号设置在该节点中
# 初始化CallBlock节点时,传入我们自定义的"_pygmentize"方法的调用,
# 两个空列表,还有刚才解析后的语句内容body
return nodes.CallBlock(self.call_method('_pygmentize', args), [], [], body).set_lineno(lineno)
# 这个自定义的内部函数,包含了本扩展的主要逻辑。
# 其实上面parse()函数内容,大部分扩展都可以重用
def _pygmentize(self, lang_type, caller):
# 初始化HTML格式器
formatter = HtmlFormatter(linenos='table')
# 获取{% code %}语句中的内容
# 这里caller()对应了上面调用CallBlock()时传入的body
content = caller()
# 将模板语句中解析到了lang_type设置为我们要高亮的语言类型
# 如果这个变量不存在,则让Pygmentize猜测可能的语言类型
lexer = None
if lang_type is None:
lexer = guess_lexer(content)
else:
lexer = get_lexer_by_name(lang_type)
# 将{% code %}语句中的内容高亮,即添加各种<span>, class等标签属性
return highlight(content, lexer, formatter) | [
"hoojo@qq.com"
] | hoojo@qq.com |
adf58aff82e8028fa40481c8c506bcd1b433c7bb | 5cb7627fc47d57ba7c1fc402a3671c17625c8965 | /python/paddle_fl/mpc/examples/test_add.py | 05895b3a832912e4b5bfaa0c77eb5f68b1854ad7 | [] | no_license | NetHole/PaddleFL | c5951e32027dff714baead6e7a6f6135e1ca01a0 | e0c7192c90dda91f64167cf01e79628fd81dc981 | refs/heads/master | 2022-06-24T19:41:42.640865 | 2020-05-11T01:57:07 | 2020-05-11T01:57:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,436 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
test mpc add op
"""
# set proper path for fluid_encrypted without install, should be first line
import env_set
import sys
import numpy as np
import paddle.fluid as fluid
import paddle_fl.mpc as pfl_mpc
role, server, port = env_set.TestOptions().values()
# call mpc add
pfl_mpc.init("aby3", int(role), "localhost", server, int(port))
data_1 = pfl_mpc.data(name='data_1', shape=[8], dtype='int64')
data_2 = pfl_mpc.data(name='data_2', shape=[8], dtype='int64')
d_1 = np.array(
[[0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]]).astype('int64')
d_2 = np.array(
[[7, 6, 5, 4, 3, 2, 1, 0], [7, 6, 5, 4, 3, 2, 1, 0]]).astype('int64')
out_add = data_1 + data_2
exe = fluid.Executor(place=fluid.CPUPlace())
out_add = exe.run(feed={
'data_1': d_1,
'data_2': d_2,
}, fetch_list=[out_add])
print(out_add)
| [
"jingqinghe@baidu.com"
] | jingqinghe@baidu.com |
72a6652e0e2791748a30e32624658798735448df | 0b9e588b3d6ddf95d87a0a0f02d10ef6efcccf51 | /eduapi/api/migrations/0053_auto_20151215_1221.py | 12e1e5e1a4431fdff8d44dbe908b16b8fc5b03a4 | [] | no_license | omni360/inspiration-edu-api | b5d07a7fe3a473689d5323e60e6f88dd3d6fb4cb | 6e1bbf8d895082d4c44af4ae35b9f5aa5cc9addc | refs/heads/master | 2022-01-22T23:30:09.879433 | 2016-04-28T02:02:46 | 2016-04-28T02:02:46 | 57,559,736 | 0 | 0 | null | 2022-01-06T22:24:03 | 2016-05-01T06:35:12 | Python | UTF-8 | Python | false | false | 5,507 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.contrib.postgres.fields
class Migration(migrations.Migration):
dependencies = [
('api', '0052_merge'),
]
operations = [
migrations.RenameField(
model_name='project',
old_name='teacher_info',
new_name='teacher_additional_resources',
),
migrations.AddField(
model_name='project',
name='ccss',
field=django.contrib.postgres.fields.ArrayField(size=None, null=True, base_field=models.CharField(max_length=10, choices=[(b'RL', b'Reading Literature'), (b'RI', b'Reading Informational Text'), (b'RF', b'Reading Foundational Skills'), (b'W', b'Writing'), (b'SL', b'Speaking & Listening'), (b'L', b'Language'), (b'RST', b'Reading Science & Technical Subjects'), (b'WHST', b'Writing in History, Science, & Technical Subjects'), (b'CC', b'Counting and Cardinality'), (b'OA', b'Operations & Algebraic Thinking'), (b'NBT', b'Number & Operation in Base Ten'), (b'NF', b'Number & operations-Fractions'), (b'MD', b'Measurement and Data'), (b'G', b'Geometry'), (b'RP', b'Ratios and Proportional Relationships'), (b'NS', b'Number System'), (b'EE', b'Expressions and Equations'), (b'F', b'Functions'), (b'SP', b'Statistics and Probability'), (b'MP', b'Math Practices')]), blank=True),
),
migrations.AddField(
model_name='project',
name='four_cs_collaboration',
field=models.TextField(help_text=b'4 cs collaboration', max_length=250, null=True, blank=True),
),
migrations.AddField(
model_name='project',
name='four_cs_communication',
field=models.TextField(help_text=b'4 cs communication', max_length=250, null=True, blank=True),
),
migrations.AddField(
model_name='project',
name='four_cs_creativity',
field=models.TextField(help_text=b'4 cs creativity', max_length=250, null=True, blank=True),
),
migrations.AddField(
model_name='project',
name='four_cs_critical',
field=models.TextField(help_text=b'4 cs critical', max_length=250, null=True, blank=True),
),
migrations.AddField(
model_name='project',
name='grades_range',
field=django.contrib.postgres.fields.ArrayField(size=None, null=True, base_field=models.CharField(max_length=10, choices=[(b'K', b'K'), (b'1', b'1'), (b'2', b'2'), (b'3', b'3'), (b'4', b'4'), (b'5', b'5'), (b'6', b'6'), (b'7', b'7'), (b'8', b'8'), (b'9', b'9'), (b'10', b'10'), (b'11', b'11'), (b'12', b'12')]), blank=True),
),
migrations.AddField(
model_name='project',
name='learning_objectives',
field=django.contrib.postgres.fields.ArrayField(size=None, null=True, base_field=models.CharField(max_length=25), blank=True),
),
migrations.AddField(
model_name='project',
name='ngss',
field=django.contrib.postgres.fields.ArrayField(size=None, null=True, base_field=models.CharField(max_length=10, choices=[(b'PS1', b'Matter and Its Interactions'), (b'PS2', b'Motion and Stability: Forces and Interactions'), (b'PS3', b'Energy'), (b'PS4', b'Waves and Their Applications in Technologies for Information Transfer'), (b'LS1', b'From Molecules to Organisms: Structures and Processes'), (b'LS2', b'Ecosystems: Interactions, Energy, and Dynamics'), (b'LS3', b'Heredity: Inheritance and Variation of Traits'), (b'LS4', b'Biological Evolution: Unity and Diversity'), (b'ESS1', b"Earth's Place in the Universe"), (b'ESS2', b"Earth's Systems"), (b'ESS3', b'Earth and Human Activity'), (b'ETS1', b'Engineering Design'), (b'ETS2', b'Links Among Engineering, Technology, Science, and Society')]), blank=True),
),
migrations.AddField(
model_name='project',
name='prerequisites',
field=models.TextField(default=b'', max_length=1000, null=True, help_text=b'Course prerequisites', blank=True),
),
migrations.AddField(
model_name='project',
name='skills_acquired',
field=django.contrib.postgres.fields.ArrayField(size=None, null=True, base_field=models.CharField(max_length=25), blank=True),
),
migrations.AddField(
model_name='project',
name='subject',
field=django.contrib.postgres.fields.ArrayField(size=None, null=True, base_field=models.CharField(max_length=25, choices=[(b'art', b'Art'), (b'drama', b'Drama'), (b'geography', b'Geography'), (b'history', b'History'), (b'language art', b'Language Arts'), (b'math', b'Math'), (b'music', b'Music'), (b'science', b'Science'), (b'social studies', b'Social Studies'), (b'technology', b'Technology')]), blank=True),
),
migrations.AddField(
model_name='project',
name='teacher_tips',
field=models.TextField(default=b'', max_length=1000, null=True, help_text=b'Tips for teachers', blank=True),
),
migrations.AddField(
model_name='project',
name='technology',
field=django.contrib.postgres.fields.ArrayField(size=None, null=True, base_field=models.CharField(max_length=25, choices=[(b'3d printing', b'3D Printing'), (b'electronics', b'Electronics'), (b'3d design', b'3D Design')]), blank=True),
),
]
| [
"frida.cai@autodesk.com"
] | frida.cai@autodesk.com |
b57083b227bffa9c51f22350ea8dd8f0c59f70d7 | 11c00c704a3d1171d1bf4474be7ff1779a1dcb69 | /LSTM_Stateful/lstm_stateful.py | cdc73c77595b3b9e4ffd03df41d1fab30057cbf8 | [] | no_license | mikechen66/LSTM-TF2 | 89bd67ca481ded7264191e240e45f792cc5546c3 | 741250374509c332d9f4f5ddebcb1a966e268df0 | refs/heads/main | 2023-04-08T15:59:46.999735 | 2021-04-20T01:53:34 | 2021-04-20T01:53:34 | 305,043,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,545 | py | """
How to use a stateful LSTM model, stateful vs stateless LSTM performance comparison
[More documentation about the Keras LSTM model](/layers/recurrent/#lstm)
The models are trained on an input/output pair, where the input is a generated uniformly
distributed random sequence of length = `input_len`, and the output is a moving average
of the input with window length = `tsteps`. Both `input_len` and `tsteps` are defined in
the "editable parameters" section.
A larger `tsteps` value means that the LSTM will need more memory to figure out the
input-output relationship. This memory length is controlled by the `lahead` variable (more
details below).
The rest of the parameters are:
- `input_len`: the length of the generated input sequence
- `lahead`: the input sequence length that the LSTM is trained on for each output point
- `batch_size`, `epochs`: same parameters as in the `model.fit(...)` function
When `lahead > 1`, the model input is preprocessed to a "rolling window view" of the data,
with the window length = `lahead`. This is similar to sklearn's `view_as_windows` with
`window_shape` [being a single number.]
(http://scikit-image.org/docs/0.10.x/api/skimage.util.html#view-as-windows)
When `lahead < tsteps`, only the stateful LSTM converges because its statefulness allows
it to see beyond the capability that lahead gave it to fit the n-point average. The stateless
LSTM does not have this capability, and hence is limited by its `lahead` parameter, which is
not sufficient to see the n-point average.
When `lahead >= tsteps`, both the stateful and stateless LSTM converge.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense #, LSTM
from recurrent_v2 import LSTM
import tensorflow as tf
# Set up the GPU memory size to avoid the out-of-memory error
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
# Restrict TensorFlow to only allocate 4GB of memory on the first GPU
try:
tf.config.experimental.set_virtual_device_configuration(
gpus[0],
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4096)])
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Virtual devices must be set before GPUs have been initialized
print(e)
# ----------------------------------------------------------
# EDITABLE PARAMETERS
# Read the documentation in the script head for more details
# ----------------------------------------------------------
# length of input
input_len = 1000
# The window length of the moving average used to generate
# the output from the input in the input/output pair used
# to train the LSTM
# e.g. if tsteps=2 and input=[1, 2, 3, 4, 5],
# then output=[1.5, 2.5, 3.5, 4.5]
tsteps = 2
# The input sequence length that the LSTM is trained on for each output point
lahead = 1
# training parameters passed to "model.fit(...)"
batch_size = 1
epochs = 10
# ------------
# MAIN PROGRAM
# ------------
print("*" * 33)
if lahead >= tsteps:
print("STATELESS LSTM WILL ALSO CONVERGE")
else:
print("STATELESS LSTM WILL NOT CONVERGE")
print("*" * 33)
np.random.seed(1986)
print('Generating Data...')
def gen_uniform_amp(amp=1, xn=10000):
"""Generates uniform random data between
-amp and +amp
and of length xn
# Arguments
amp: maximum/minimum range of uniform data
xn: length of series
"""
data_input = np.random.uniform(-1 * amp, +1 * amp, xn)
data_input = pd.DataFrame(data_input)
return data_input
# Since the output is a moving average of the input,
# the first few points of output will be NaN
# and will be dropped from the generated data
# before training the LSTM.
# Also, when lahead > 1,
# the preprocessing step later of "rolling window view"
# will also cause some points to be lost.
# For aesthetic reasons,
# in order to maintain generated data length = input_len after pre-processing,
# add a few points to account for the values that will be lost.
to_drop = max(tsteps - 1, lahead - 1)
data_input = gen_uniform_amp(amp=0.1, xn=input_len + to_drop)
# set the target to be a N-point average of the input
expected_output = data_input.rolling(window=tsteps, center=False).mean()
# when lahead > 1, need to convert the input to "rolling window view"
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.repeat.html
if lahead > 1:
data_input = np.repeat(data_input.values, repeats=lahead, axis=1)
data_input = pd.DataFrame(data_input)
for i, c in enumerate(data_input.columns):
data_input[c] = data_input[c].shift(i)
# drop the nan
expected_output = expected_output[to_drop:]
data_input = data_input[to_drop:]
print('Input shape:', data_input.shape)
print('Output shape:', expected_output.shape)
print('Input head: ')
print(data_input.head())
print('Output head: ')
print(expected_output.head())
print('Input tail: ')
print(data_input.tail())
print('Output tail: ')
print(expected_output.tail())
print('Plotting input and expected output')
plt.plot(data_input[0][:10], '.')
plt.plot(expected_output[0][:10], '-')
plt.legend(['Input', 'Expected output'])
plt.title('Input')
plt.show()
def create_model(stateful):
model = Sequential()
model.add(LSTM(20,
input_shape=(lahead, 1),
batch_size=batch_size,
stateful=stateful))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
return model
print('Creating Stateful Model...')
model_stateful = create_model(stateful=True)
# split train/test data
def split_data(x, y, ratio=0.8):
to_train = int(input_len * ratio)
# tweak to match with batch_size
to_train -= to_train % batch_size
x_train = x[:to_train]
y_train = y[:to_train]
x_test = x[to_train:]
y_test = y[to_train:]
# tweak to match with batch_size
to_drop = x.shape[0] % batch_size
if to_drop > 0:
x_test = x_test[:-1 * to_drop]
y_test = y_test[:-1 * to_drop]
# some reshaping
reshape_3 = lambda x: x.values.reshape((x.shape[0], x.shape[1], 1))
x_train = reshape_3(x_train)
x_test = reshape_3(x_test)
reshape_2 = lambda x: x.values.reshape((x.shape[0], 1))
y_train = reshape_2(y_train)
y_test = reshape_2(y_test)
return (x_train, y_train), (x_test, y_test)
(x_train, y_train), (x_test, y_test) = split_data(data_input, expected_output)
print('x_train.shape: ', x_train.shape)
print('y_train.shape: ', y_train.shape)
print('x_test.shape: ', x_test.shape)
print('y_test.shape: ', y_test.shape)
print('Training')
for i in range(epochs):
print('Epoch', i + 1, '/', epochs)
# Note that the last state for sample i in a batch will
# be used as initial state for sample i in the next batch.
# Thus we are simultaneously training on batch_size series with
# lower resolution than the original series contained in data_input.
# Each of these series are offset by one step and can be
# extracted with data_input[i::batch_size].
model_stateful.fit(x_train,
y_train,
batch_size=batch_size,
epochs=1,
verbose=1,
validation_data=(x_test, y_test),
shuffle=False)
model_stateful.reset_states()
print('Predicting')
predicted_stateful = model_stateful.predict(x_test, batch_size=batch_size)
print('Creating Stateless Model...')
model_stateless = create_model(stateful=False)
print('Training')
model_stateless.fit(x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test),
shuffle=False)
print('Predicting')
predicted_stateless = model_stateless.predict(x_test, batch_size=batch_size)
# ----------------------------
print('Plotting Results')
plt.subplot(3, 1, 1)
plt.plot(y_test)
plt.title('Expected')
plt.subplot(3, 1, 2)
# drop the first "tsteps-1" because it is not possible to predict them
# since the "previous" timesteps to use do not exist
plt.plot((y_test - predicted_stateful).flatten()[tsteps - 1:])
plt.title('Stateful: Expected - Predicted')
plt.subplot(3, 1, 3)
plt.plot((y_test - predicted_stateless).flatten())
plt.title('Stateless: Expected - Predicted')
plt.show() | [
"noreply@github.com"
] | mikechen66.noreply@github.com |
fcca02f2f1adbc8f766107ecdaa0c0ff86a0d061 | d4e573e8eae32db155fe5931b3e2dcd3aa48969b | /indigo/bin/rocon_uri | 5eb4af63dbf48f015786da051418f1767653e6b9 | [] | no_license | javierdiazp/myros | ee52b0a7c972d559a1a377f8de4eb37878b8a99b | 7571febdfa881872cae6378bf7266deca7901529 | refs/heads/master | 2022-11-09T09:24:47.708988 | 2016-11-10T16:56:28 | 2016-11-10T16:56:28 | 73,733,895 | 0 | 1 | null | 2022-10-25T05:16:35 | 2016-11-14T18:19:06 | C++ | UTF-8 | Python | false | false | 5,048 | #!/usr/bin/python
#
# License: BSD
# https://raw.github.com/robotics-in-concert/tools/license/LICENSE
#
##############################################################################
# Imports
##############################################################################
import argparse
import os
import copy
import re
import sys
import rospy
import rocon_uri
import rocon_console.console as console
##############################################################################
# Methods
##############################################################################
def usage():
usage = console.green + "\nUtility for introspecting on rocon uri strings.\n\n" + console.reset
usage += console.bold + "Commands:\n" + console.reset
usage += console.cyan + "\trocon_uri parse URI\t" + console.yellow + "parse and attempt to validate a rocon URI.\n" + console.reset
usage += console.cyan + "\trocon_uri fields\t" + console.yellow + "print a full list of permitted fields in a rocon uri string.\n" + console.reset
usage += console.cyan + "\trocon_uri rules\t\t" + console.yellow + "print a full list of the ebnf rules for a rocon uri string.\n" + console.reset
usage += "\n"
return usage
def _rocon_uri_cmd_parse(rocon_uri_string):
try:
uri = rocon_uri.RoconURI(rocon_uri_string)
print("\n\t" + console.bold + "'" + rocon_uri_string + "'" + console.reset + console.green + " is a valid rocon uri\n" + console.reset)
print("\t" + console.bold + "'Concert Name' "+ console.reset + console.green + ": %s" % uri.concert_name+ console.reset)
print("\t" + console.bold + "'Hardware Platform' "+ console.reset + console.green + ": %s" % uri.hardware_platform.string + console.reset)
print("\t" + console.bold + "'Name' "+ console.reset + console.green + ": %s" % uri.name.string + console.reset)
print("\t" + console.bold + "'Application Framework' "+ console.reset + console.green + ": %s" % uri.application_framework.string + console.reset)
print("\t" + console.bold + "'Operating System' "+ console.reset + console.green + ": %s" % uri.operating_system.string + console.reset)
if uri.rapp:
print("\t" + console.bold + "'Rapp' "+ console.reset + console.green + ": %s" % uri.rapp + console.reset)
except rocon_uri.RoconURIValueError as e:
print(console.bold + "\nError" + console.reset)
print(console.red + "\tFailed to parse " + console.cyan + rocon_uri_string + console.reset)
print(console.bold + "Reason" + console.reset)
print(console.red + "\t%s\n" % str(e) + console.reset)
def _rocon_uri_cmd_fields():
print("\nA rocon uri string typically takes the form:\n")
print(console.green + "\trocon://concert_name/hardware_platform/name/application_framework/operating_system#rocon_app\n" + console.reset)
print("where permitted values for each of the fields include:\n")
yaml_rules = rocon_uri.rules.load_rules_into_dictionary()
rule_sets = {}
for yaml_rule_set in yaml_rules: # each of hardware_platform, name, application_framework, os
rule_sets.update(yaml_rule_set)
for rule_set_name, rule_set in rule_sets.iteritems():
for name, group, elements in rocon_uri.rules.walk_yaml_rules(rule_set_name, rule_set):
split_name = name.split('/')
prefix = ''
for i in range(0, 2*(len(split_name)-1)):
prefix += ' '
print(prefix + console.cyan + "+ %s" % split_name[-1] + console.reset)
for element in elements:
print(prefix + console.yellow + " - " + element + console.reset)
print("\nYou can modify or extend the list of permitted fields with a pull request at \n")
print(console.green + "\thttps://github.com/robotics-in-concert/rocon_tools/blob/indigo/rocon_uri/src/rocon_uri/rules/rules.yaml\n" + console.reset)
def _rocon_uri_cmd_rules():
print("\nThe " + console.bold + "ebnf" + console.reset + " rules used to internally parse a rocon_uri string:\n" + console.reset)
rules = rocon_uri.rules.load_ebnf_rules()
for name, rules in rules.iteritems():
print(console.cyan + " " + name + console.reset)
for rule in rules:
print(console.green + " " + rule + console.reset)
##############################################################################
# Simple Printout of Rocon URI Rules
##############################################################################
if __name__ == '__main__':
# filter out remapping arguments in case we are being invoked via roslaunch
argv = rospy.myargv(sys.argv)
command = argv[1] if len(argv) > 1 else None
if command == 'parse':
if len(argv) < 3:
print("%s" % usage())
else:
_rocon_uri_cmd_parse(argv[2])
elif command == 'fields':
_rocon_uri_cmd_fields()
elif command == 'rules':
_rocon_uri_cmd_rules()
else:
print("%s" % usage())
| [
"javier.diaz.palacios@gmail.com"
] | javier.diaz.palacios@gmail.com | |
3c1f6f4f9674dcd0e8d8e3bc8a5cfece6c2d762c | 9b722ca41671eb2cea19bac5126d0920639261bd | /.history/app_20201126190857.py | 0337e34e3eeeed73fd0d77cb0603c145682dcff3 | [] | no_license | thawalk/db_flask_server | 7928fd481f99d30bdccc60d97f02db78324cfdbe | cd55f1c9bf84c734457ee02d9f64a6833e295fad | refs/heads/master | 2023-01-25T02:40:19.097457 | 2020-12-06T07:45:50 | 2020-12-06T07:45:50 | 314,229,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,163 | py | import json
import pymongo
from flask import Flask, jsonify, url_for, request, redirect,Response,Request
import pymongo
from bson.json_util import dumps
import mysql.connector
from werkzeug.serving import run_simple
import os
from dotenv import load_dotenv
import datetime
import time
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
load_dotenv()
print(os.getenv('mongo_url'))
test_collection='test_collection'
mongo = pymongo.MongoClient('mongodb://18.209.236.31:27017/?readPreference=primary&appname=MongoDB%20Compass&ssl=false')
metadata_db = pymongo.database.Database(mongo, 'test')
metadata_col = pymongo.collection.Collection(metadata_db, 'test_collection')
userlogging_db = pymongo.database.Database(mongo,'user_analytics')
userlogging_col = pymongo.collection.Collection(userlogging_db,'logging')
metadata_db = mysql.connector.connect(
host ='54.163.143.77',
user = 'root',
password = '',
database = 'reviews',
)
cur = metadata_db.cursor()
def user_logging(userid,timestamp,req,res):
return userlogging_col.insert({"id":userid,"timestamp":timestamp,"request":req,"response":res})
@app.route('/',methods=["GET"])
def api_root():
data = {
'message': 'Welcome to our website. Where reviews are our number one priority'
}
js = json.dumps(data)
response = Response(js, status=200, mimetype='application/json')
user_logging(123,datetime.datetime.now().isoformat(),"GET",200)
return response
@app.route('/categories', methods = ['GET']) #TODO: #returns list of categories
def get_categories():
categories = []
js = json.dumps(data)
response = Response(js, status=200, mimetype='application/json')
user_logging(123,datetime.datetime.now().isoformat(),"GET",200)
return response
@app.route('/search', methods=['GET']) #now it only searches for TITLE. the mongo metadata does not have author
def search_book():
try:
title = request.args.get("title")
result = metadata_col.find({"title":title}).limit(10) #{ $text: { $search: title } }
result_array = dumps(list(result))
response = Response(result_array, status=200, mimetype='application/json')
user_logging(123,datetime.datetime.now().isoformat(),"GET",200)
return response
except:
errMsg = "Please include title."
js = json.dumps(errMsg)
user_logging(123,datetime.datetime.now().isoformat(),"GET",400)
response = Response(js, status=400, mimetype='application/json')
return response
# @app.route('/review', methods=['POST'])
# def add_review():
# if not request.json or not request.json['asin'] or type(request.json['asin']) != str or not request.json['overall'] or not request.json['reviewText'] or type(request.json['reviewText']) != str or not request.json['reviewTime'] or type(request.json['reviewTime']) != str or not request.json['reviewerID'] or type(request.json['reviewerID']) != str or not request.json['reviewerName'] or type(request.json['reviewerName']) != str or not request.json['summary'] or type(request.json['summary']) != str or not request.json['unixReviewTime'] or type(request.json['unixReviewTime']) != int :
# return 'invalid request msg', 404
# txt = "INSERT INTO 'kindle_reviews' ('id', 'asin', 'overall', 'reviewText', 'reviewTime', 'reviewerID', 'reviewerName', 'summary', 'unixReviewTime') VALUES (%s)"
# values = (None, request.json['asin'], request.json['overall'], request.json['reviewText'], request.json['reviewTime'], request.json['reviewerID'], request.json['reviewerName'], request.json['summary'], request.json['unixReviewTime'])
# cur.execute(txt, values)
# return 'successfully uploaded new review', 200
@app.route('/addBook',methods= ['POST'])
def add_book():
# if not request.json or not request.json['asin'] or type(request.json['asin']) != str or not request.json['overall'] or not request.json['reviewText'] or type(request.json['reviewText']) != str or not request.json['reviewTime'] or type(request.json['reviewTime']) != str or not request.json['reviewerID'] or type(request.json['reviewerID']) != str or not request.json['reviewerName'] or type(request.json['reviewerName']) != str or not request.json['summary'] or type(request.json['summary']) != str or not request.json['unixReviewTime'] or type(request.json['unixReviewTime']) != int :
# return 'invalid request msg', 404
try:
data = request.json
title = data['title']
asin = data['asin']
description = data['description']
price = data['price']
categories = data['categories']
message = "Book added successfully"
metadata_col.insert({"title":title,"asin":asin,"description":description,"price":price,"categories":categories})
js = json.dumps(message)
response = Response(js, status=201, mimetype='application/json')
user_logging(123,datetime.datetime.now().isoformat(),"POST",201)
return response
except:
errMsg = "Please include title, asin, description, price and categories."
js = json.dumps(errMsg)
response = Response(js, status=400, mimetype='application/json')
user_logging(123,datetime.datetime.now().isoformat(),"POST",400)
return response
@app.route('/addReview',methods = ['POST']) #TODO: add review INTO sql part
def add_review():
try:
data = request.json
asin = data["asin"]
helpful = [0,0]
overall = data["overall"]
reviewText = data["reviewText"]
reviewTime = data["reviewTime"]
reviewerID = data["reviewerID"]
reviewerName = data["reviewerName"]
summary = data["summary"]
unixReviewTime = int(time.time())
mySQL_insert_query = f"""INSERT INTO reviews.kindle_reviews (asin, helpful, overall, reviewText, reviewTime, reviewerID, reviewerName, summary, unixReviewTime)
VALUES ("{asin}","{helpful}",{overall},"{reviewText}","{reviewTime}","{reviewerID}","{reviewerName}","{summary}","{unixReviewTime}");"""
cur.execute(mySQL_insert_query)
metadata_db.commit()
message = "Successfully uploaded review"
js = json.dumps(message)
response = Response(js, status=201, mimetype='application/json')
user_logging(123,datetime.datetime.now().isoformat(),"POST",201)
return response
except:
errMsg = "An error occurred. Please check if you have all fields."
js = json.dumps(errMsg)
response = Response(js, status=400, mimetype='application/json')
user_logging(123,datetime.datetime.now().isoformat(),"POST",400)
return response
@app.route('/sortByGenres', methods= ['GET']) #TODO: sort by genres from mongo metadata categories
def sort_by_genres():
pass
@app.route('/sortByRating' , methods = ['GET'])
def sort_by_ratings(): #sort by increasing ratings, decreasing rating
try:
rating_preference = request.args.get("rating_preference")
if(rating_preference == 'increasing'): #means rating 1 will come out first
mySQL_sort_query = """SELECT * FROM reviews.kindle_reviews ORDER BY overall ASC LIMIT 10;"""
else: #means rating 5 will come out first
mySQL_sort_query = """SELECT * FROM reviews.kindle_reviews ORDER BY overall DESC LIMIT 10;"""
cur.execute(mySQL_sort_query)
result_set = cur.fetchall()
r = [dict((cur.description[i][0], value) \
for i, value in enumerate(row)) for row in result_set]
js = json.dumps(r)
response = Response(js, status=200, mimetype='application/json')
user_logging(123,datetime.datetime.now().isoformat(),"GET",200)
return response
except:
errMsg = "An error occurred. Please check if you have all fields."
js = json.dumps(errMsg)
response = Response(js, status=400, mimetype='application/json')
user_logging(123,datetime.datetime.now().isoformat(),"GET",400)
return response
if __name__ == '__main__':
app.run(host="0.0.0.0", port=5000) #remember to change this part
# app.run(debug=True)
| [
"akmal_hakim_teo@hotmail.com"
] | akmal_hakim_teo@hotmail.com |
a8e2942c9f26c11e10d17a96fc317b0a47531ceb | 08e052c0c2ee4ad0cd4980fbc8a692c407118659 | /Ex. do Paca/Aula 6/P_6.8.py | 3efaea60835d67a96b62049edab2f317739b66ae | [] | no_license | miltonleal/MAC0110_Introduction_Computer_Science_IME_USP | 6fad182f7fbb3e83403080800074bf57456cb0b5 | b47936ce66e715dba79dff44779a750d795192a0 | refs/heads/master | 2023-03-04T11:06:48.092980 | 2021-02-17T14:47:46 | 2021-02-17T14:47:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | '''Dado um valor inteiro em reais (R$), determinar quantas notas de R$100, R$50, R$20, R$10, R$5, R$2 e R$1
são necessárias para compor esse valor. A solução procurada é aquela com o máximo de notas de cada tipo.'''
N = int(input("Digite o valor total em R$: "))
total_100 = N // 100
total_50 = N % 100 // 50
total_20 = N % 100 % 50 // 20
total_10 = N % 100 % 50 % 20 // 10
total_5 = N % 100 % 50 % 20 % 10 // 5
total_2 = 100 % 50 % 20 % 10 % 5 // 2
total_1 = 100 % 50 % 20 % 10 % 5 % 2 // 1
print ("Serão necessárias", total_100, "nota(s) de R$100", total_50, "nota(s) de R$50", total_20, "nota(s) de R$20", total_10, "nota(s) de R$10", total_5, "nota(s) de R$5", total_2, "nota(s) de R$2 e", total_1, "nota(s) de R$1") | [
"milton.leal@usp.br"
] | milton.leal@usp.br |
24c4ca54be9905e365f6ef7fb5630cad21cabfb9 | d1aa3a3dc4b05a82ccc6497a75d243c89ecf3c95 | /example-scripts/fitsextract.py | 0f8f9ce4c028d7366b3b399ffc04edefbf4dd60c | [
"MIT"
] | permissive | barentsen/dotastro-argparse-tutorial | d8926ddb1ed1eac943985dc414e40f09da3460a0 | d57a155af428ab1c0c2bf918f09f5381b09a3ad4 | refs/heads/master | 2020-12-24T06:36:19.129131 | 2016-06-20T13:17:23 | 2016-06-20T13:17:23 | 61,532,871 | 5 | 4 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | import argparse
from astropy.io import fits
def write_fits_extension(input_fn, extension, output_fn):
fits.open(input_fn)[extension].writeto(output_fn)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Writes a FITS extension to a new file')
parser.add_argument('filename', help='FITS filename')
parser.add_argument('extension', help='Extension number', type=int)
args = parser.parse_args()
output_fn = '{}-ext{}.fits'.format(args.filename, args.extension)
write_fits_extension(input_fn=args.filename,
extension=args.extension,
output_fn=output_fn)
| [
"geert@barentsen.be"
] | geert@barentsen.be |
7c9a871b78369da66f4ca93a6817a8b15af9723e | 67309cbca4ead3623c86647ac7bfaa067b029fdc | /BOJ/dp_greedy/12869.py | d8b6744243185aae3a4e6b6fb2eec5a373ed8816 | [] | no_license | Jeukoh/OJ | b6df132927ec15ab816fee8681952240b5a69e13 | 182b54554896d9860d5e5d09f8eccc07d99aa8e8 | refs/heads/master | 2023-08-30T19:18:10.812225 | 2021-10-15T09:57:14 | 2021-10-15T09:57:14 | 402,799,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | import math
N = int(input())
K = [0,0,0]
tmp = list(map(int,input().split()))
dp = [[[0]*61 for __ in range(61)] for _ in range(61)]
for _ in range(N):
K[_] = tmp[_]
def sol(N,K):
if N == 1:
return math.ceil(K[0]/9)
min_cnt = 500
def recur(s1,s2,s3,n):
nonlocal min_cnt
if s1 <= 0 and s2 <= 0 and s3 <= 0:
min_cnt = min(min_cnt, n)
return
s1, s2, s3 = map(lambda x: max(0,x), [s1,s2,s3])
if dp[s1][s2][s3] != 0 and dp[s1][s2][s3] <= n:
return
dp[s1][s2][s3] = n
recur(s1-9,s2-3,s3-1,n+1)
recur(s1 - 9, s2 - 1, s3 - 3, n + 1)
recur(s1 - 3, s2 - 9, s3 - 1, n + 1)
recur(s1 - 1, s2 - 9, s3 - 3, n + 1)
recur(s1 - 1, s2 - 3, s3 - 9, n + 1)
recur(s1 - 3, s2 - 1, s3 - 9, n + 1)
recur(*K,0)
return min_cnt
print(sol(N,K))
| [
"jeukoh@gmail.com"
] | jeukoh@gmail.com |
6ca34e5f4844fa6cb43b8dd01c856368d7d5d6f7 | c4939f03996ba18b678813ba7c65f519a6532051 | /home/migrations/0003_auto_20200608_1229.py | 0e33b523bd15faee5f21ad9b8f00623ee9b88ac0 | [] | no_license | crowdbotics-apps/tony-template-1-dev-5633 | 030f99cfe366bcbde9506b3679dd104d052f2bcf | e127168ad7a08be5805e9da73530f1ddd8c6ad0e | refs/heads/master | 2022-10-04T19:07:46.565100 | 2020-06-08T12:29:10 | 2020-06-08T12:29:10 | 270,660,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,142 | py | # Generated by Django 2.2.13 on 2020-06-08 12:29
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('home', '0002_load_initial_data'),
]
operations = [
migrations.CreateModel(
name='Test',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('test', models.BigIntegerField()),
],
),
migrations.CreateModel(
name='Testing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('test', models.BigIntegerField()),
],
),
migrations.CreateModel(
name='Testtt',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('testt', models.BinaryField()),
],
),
migrations.AddField(
model_name='customtext',
name='emp',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='customtext_emp', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='customtext',
name='name',
field=models.BinaryField(blank=True, null=True),
),
migrations.AddField(
model_name='customtext',
name='subpage',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='customtext_subpage', to='home.CustomText'),
),
migrations.AddField(
model_name='customtext',
name='test',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='customtext_test', to=settings.AUTH_USER_MODEL),
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
184cce916b7a1eff4b9fb73c933cb7878194682c | 726a548766a9db586806ef540dcf8ea4d0a82a60 | /Python3/examples/bin/loop_for.py | f097bfd41a67b0094776eed6b5cbc71de54d37ab | [] | no_license | ArseniD/learn_python | 6fd735a594ff83ea97888d6688e474e94182ea74 | d73fc790514f50a2f61c5cc198073299b0c71277 | refs/heads/master | 2022-05-28T04:53:54.603475 | 2019-08-27T10:15:29 | 2019-08-27T10:15:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | #!/usr/bin/env python3.6
colors = ['blue', 'green', 'red', 'purple']
for color in colors:
if color == 'blue':
continue
elif color == 'red':
break
print(color)
point = (2.1, 3.2, 7.6)
for value in point:
print(value)
ages = {'kevin': 59, 'bob': 40, 'kayla': 21}
for key in ages:
print(key)
for letter in "my_string":
print(letter)
list_of_points = [(1, 2), (2, 3), (3, 4)]
for x, y in list_of_points:
print(f"x: {x}, y: {y}")
for name, age in ages.items():
print(f"Person Named: {name}")
print(f"Age of: {age}")
| [
"arsenidudko@mail.ru"
] | arsenidudko@mail.ru |
39aadb1cb4d1a708dd82fba9aa3b7584f07754e0 | 122f9bf0d996c104f541453ab35c56f6ff3fc7cd | /z수업용문제/JunminLim/2445_별찍기8.py | ef4a526df4d72399eb817e24a5b6ee5a8395f8bd | [] | no_license | JannaKim/PS | 1302e9b6bc529d582ecc7d7fe4f249a52311ff30 | b9c3ce6a7a47afeaa0c62d952b5936d407da129b | refs/heads/master | 2023-08-10T17:49:00.925460 | 2021-09-13T02:21:34 | 2021-09-13T02:21:34 | 312,822,458 | 0 | 0 | null | 2021-04-23T15:31:11 | 2020-11-14T13:27:34 | Python | UTF-8 | Python | false | false | 410 | py | n=int(input())
manh=n-1
a=[]
m=2*n-1
p=2*n
paper=[[' ']*p for _ in range (m)]
'''
for i in range (len(paper)):
print(''.join(paper[i]))
'''
y1, x1 = n-1, 0
y2, x2= n-1, 2*n
for i in range (m):
for z in range (p):
if abs(x1-z)+abs(y1-i)<n:
paper[i][z]='*'
elif abs(x2-z)+abs(y2-i)<n+1:
paper[i][z]='*'
for i in range (len(paper)):
print(''.join(paper[i]))
| [
"baradamoh@gmail.com"
] | baradamoh@gmail.com |
d467c2620d84b57c8dbb8f3d38b8aa65aa49a062 | 3d88748960deb31c674525df2bd9d79ba1d2db1a | /pythonlib/bin/mcmcint | bfd0d1f647224043bd373dc5c1c16f030af8f49f | [
"BSD-2-Clause"
] | permissive | johnkerl/scripts-math | 1a0eb6ce86fd09d593c82540638252af5036c535 | cb29e52fec10dd00b33c3a697dec0267a87ab8bb | refs/heads/main | 2022-01-31T17:46:05.002494 | 2022-01-17T20:40:31 | 2022-01-17T20:40:31 | 13,338,494 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 410 | #!/usr/bin/python -Wall
from __future__ import division
import random
# ----------------------------------------------------------------
def f(x):
return 1 / (1 + x**2)
# ----------------------------------------------------------------
N = 100000
x0 = 1000.0
b = 10
x = x0
for i in range(0, N):
print x
y = random.normalvariate(x, b)
p = f(y) / f(x)
u = random.uniform(0.0, 1.0)
if (p > u):
x = y
| [
"kerl.john.r@gmail.com"
] | kerl.john.r@gmail.com | |
7867d807e90f24148853f20bc485ff8b66158e3a | 01f7ed47677805e3dcf39b75c657ebdfdf1e89a5 | /scripts/test_cartpole.py | f6802b2fe76a487f2d899151cb8276f40f5d3eb9 | [] | no_license | aoyan27/openai_gym_tutorial | 3d59a1080be8925cc7242128066dff4e4fcfb895 | 4015b4b9b3c2b38948909e4d20e37dca67e6ed19 | refs/heads/master | 2021-09-08T01:21:38.534371 | 2018-03-05T03:23:57 | 2018-03-05T03:23:57 | 98,200,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,702 | py | #!/usr/bin/env python
#coding:utf-8
#################################################
#
# Tutorial-1('Running an environment' section)
#
#################################################
# import gym
# env = gym.make('CartPole-v0')
# env.reset()
# for _ in range(1000):
# env.render()
# observation, reward, done, info = env.step(env.action_space.sample())
# print "observation : ", observation, " reward : ", reward, " done : ", done, " info : ", info
# """
# observation : 行動した後の状態(next_state)
# reward : 報酬
# done : 'env'において定義されたしきい値に応じて、そのエピソードを打ち切るためのフラグ
# info : 学習を行う際の詳細情報を記載できる(自分の自由に?公式に提供されている環境では何も入っていない)
# """
# # print "env.observation_space : ", env.observation_space
# # print "env.action_space. : ", env.observation_space
#################################################
#
# Tutorial-2('Observations' section)
#
#################################################
# import gym
# env = gym.make('CartPole-v0')
# for i_epidode in range(20):
# observation = env.reset()
# for t in range(100):
# env.render()
# print (observation)
# action = env.action_space.sample()
# observation, reward, done, info = env.step(action)
# if done:
# print ("Episode finished after {} timesteps.".format(t+1))
# break
#################################################
#
# Tutorial-3('Spaces' section)
#
#################################################
# import gym
# env = gym.make('CartPole-v0')
# print (env.action_space)
# print (env.observation_space)
# print (env.observation_space.high)
# print (env.observation_space.low)
# """
# spaceというパッケージがある、このパッケージにあるモジュールを使って、OpenAI Gymは状態や行動の定義を表している。
# Discrete : 0からn-1までの負の数を含まない範囲の値を表す。
# 使い方は、
# >> from gym import spaces
# >> a = spaces.Discrete(10) #0~9の範囲の値を考慮する
# >> a.sample() #0~10の中からランダムに値を選択する
# >> a.contains(5) #引数が設定した範囲内にあるかTrue or Falseで返してくる
# Boxes : n次元のboxをあつかう。
# 例えば、
# 1. Box(-1.0, 1.0, (3,4)) # low and high are scalars, and shape is provided
# 2. Box(np.array([-1.0,-2.0]), np.array([2.0,4.0])) # low and high are arrays of the same shape
# のような使い方においては、
# 1. 3x4次元の配列が確保され、それぞれの要素の最小値、最大値が第一引数、第二引数となっている
# 2. 2x2次元の配列が確保され、それぞれの要素の最大値、最小値は引数である配列の値となっている
# --->envでは、'observation_space'や'action_space'として利用されており、
# 状態や行動の型(何次元配列で定義されているか)や、その値の上限、下限値を知ることができる
# """
#################################################
#
# Tutorial-4('Environments' section)
#
#################################################
# import gym
# print (gym.envs.registry.all())
# """
# gym.envs.registry.all()で登録されている環境のリストを確認できる。
# 大元のgym/gym/envs/の中にある__init__.pyの中でgym.envsがimportされた時の初期化処理として、
# 'registration.py'におけるregistor関数を利用して、環境のidや報酬のしきい値、最大episode数、などを登録している。
# そうすると、'env = gym.make('自分で登録した環境の名前')'で環境を利用できるようになる。
# """
#################################################
#
# Tutorial-5(Recoding results'' section)
#
#################################################
import gym
from gym import wrappers
env = gym.make('CartPole-v0')
env = wrappers.Monitor(env, '/tmp/cartpole-experiment-1') #上の行の環境を定義している変数envをwrappr.Monitorクラスで上書きしているイメージ
for i_episode in range(20):
observation = env.reset()
for t in range(100):
env.render()
print(observation)
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if done:
print("Episode finished after {} timesteps.".format(t+1))
break
| [
"ce62001@meiji.ac.jp"
] | ce62001@meiji.ac.jp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.