blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M โ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 โ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 โ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
022bbfcae8b4a0b9b2dd34aae28a3016dde1563c | c1cd6a7a446934c428bc4fbf988f8d6680460488 | /dist/restclient.app/Contents/Resources/setuptools/ssl_support.py | 5820812d1c20dc5e820a4aef68f093c6091b3182 | [] | no_license | devvmh/restclient-py2app | ed016d1763ee99779388c8700dfb9c129cf8ce1a | 6826f6cb81c08a36b30878683a58e4f7a18f5041 | refs/heads/master | 2021-01-10T12:01:31.411373 | 2016-01-18T03:34:02 | 2016-01-18T03:34:02 | 49,850,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | /Users/devin/git/restclient/venv/lib/python2.7/site-packages/setuptools/ssl_support.py | [
"devin@callysto.com"
] | devin@callysto.com |
343405fb1a09d6270a46f3f31a045a175b213b87 | d57b51ec207002e333b8655a8f5832ed143aa28c | /.history/1/test_20200606190302.py | 6ac7d1e8d3f7dff5414a65aed718dfe59f0e1101 | [] | no_license | yevheniir/python_course_2020 | b42766c4278a08b8b79fec77e036a1b987accf51 | a152d400ab4f45d9d98d8ad8b2560d6f0b408c0b | refs/heads/master | 2022-11-15T07:13:24.193173 | 2020-07-11T15:43:26 | 2020-07-11T15:43:26 | 278,890,802 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | import os
import json
people = ['rfdfdf']
with open(os.path.dirname(os.path.realpath(__file__)) + "/save.json", 'r') as file:
people = json.load(file)
while True:
msg = input().encode('ascii')
if msg == "stop":
break
elif msg == "show all":
print(people)
else:
name = ""
game = ""
try:
name, game = msg.split(', ')
except Exception as er:
print("Err, try again with 2 params separated by ', '")
people.append({"name": name, "game": game})
with open(os.path.dirname(os.path.realpath(__file__)) + "/save.json", 'w') as file:
json.dump(people, file)
# print("STOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOP")
# for name in people:
# if name != "JoJo":
# print(name) | [
"yevheniira@intelink-ua.com"
] | yevheniira@intelink-ua.com |
fc1bb49e2a7c68d119589dcf2782a1b582fa4311 | ea544b339809095d2c383b542248f530990c31d5 | /env/lib/python3.6/site-packages/sphinx/domains/__init__.py | d84fe6bfc1ed3280ff67c8af161f31b3de379d74 | [
"BSD-3-Clause"
] | permissive | 724686158/NosqlEXP3 | 5fab1a9e131c6936b5b61e0f1c86eea2c889294a | e29f2807f075831377456b47cf8c9ce0c8d65c30 | refs/heads/master | 2020-04-09T01:40:54.370782 | 2019-01-25T13:04:04 | 2019-01-25T13:04:04 | 159,912,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,727 | py | # -*- coding: utf-8 -*-
"""
sphinx.domains
~~~~~~~~~~~~~~
Support for domains, which are groupings of description directives
and roles describing e.g. constructs of one programming language.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import copy
from six import iteritems
from sphinx.errors import SphinxError
from sphinx.locale import _
if False:
# For type annotation
from typing import Any, Callable, Dict, Iterable, List, Tuple, Type, Union # NOQA
from docutils import nodes # NOQA
from docutils.parsers.rst.states import Inliner # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.roles import XRefRole # NOQA
from sphinx.util.typing import RoleFunction # NOQA
class ObjType(object):
"""
An ObjType is the description for a type of object that a domain can
document. In the object_types attribute of Domain subclasses, object type
names are mapped to instances of this class.
Constructor arguments:
- *lname*: localized name of the type (do not include domain name)
- *roles*: all the roles that can refer to an object of this type
- *attrs*: object attributes -- currently only "searchprio" is known,
which defines the object's priority in the full-text search index,
see :meth:`Domain.get_objects()`.
"""
known_attrs = {
'searchprio': 1,
}
def __init__(self, lname, *roles, **attrs):
# type: (unicode, Any, Any) -> None
self.lname = lname # type: unicode
self.roles = roles # type: Tuple
self.attrs = self.known_attrs.copy() # type: Dict
self.attrs.update(attrs)
class Index(object):
"""
An Index is the description for a domain-specific index. To add an index to
a domain, subclass Index, overriding the three name attributes:
* `name` is an identifier used for generating file names.
* `localname` is the section title for the index.
* `shortname` is a short name for the index, for use in the relation bar in
HTML output. Can be empty to disable entries in the relation bar.
and providing a :meth:`generate()` method. Then, add the index class to
your domain's `indices` list. Extensions can add indices to existing
domains using :meth:`~sphinx.application.Sphinx.add_index_to_domain()`.
"""
name = None # type: unicode
localname = None # type: unicode
shortname = None # type: unicode
def __init__(self, domain):
# type: (Domain) -> None
if self.name is None or self.localname is None:
raise SphinxError('Index subclass %s has no valid name or localname'
% self.__class__.__name__)
self.domain = domain
def generate(self, docnames=None):
# type: (Iterable[unicode]) -> Tuple[List[Tuple[unicode, List[List[Union[unicode, int]]]]], bool] # NOQA
"""Return entries for the index given by *name*. If *docnames* is
given, restrict to entries referring to these docnames.
The return value is a tuple of ``(content, collapse)``, where *collapse*
is a boolean that determines if sub-entries should start collapsed (for
output formats that support collapsing sub-entries).
*content* is a sequence of ``(letter, entries)`` tuples, where *letter*
is the "heading" for the given *entries*, usually the starting letter.
*entries* is a sequence of single entries, where a single entry is a
sequence ``[name, subtype, docname, anchor, extra, qualifier, descr]``.
The items in this sequence have the following meaning:
- `name` -- the name of the index entry to be displayed
- `subtype` -- sub-entry related type:
0 -- normal entry
1 -- entry with sub-entries
2 -- sub-entry
- `docname` -- docname where the entry is located
- `anchor` -- anchor for the entry within `docname`
- `extra` -- extra info for the entry
- `qualifier` -- qualifier for the description
- `descr` -- description for the entry
Qualifier and description are not rendered e.g. in LaTeX output.
"""
raise NotImplementedError
class Domain(object):
"""
A Domain is meant to be a group of "object" description directives for
objects of a similar nature, and corresponding roles to create references to
them. Examples would be Python modules, classes, functions etc., elements
of a templating language, Sphinx roles and directives, etc.
Each domain has a separate storage for information about existing objects
and how to reference them in `self.data`, which must be a dictionary. It
also must implement several functions that expose the object information in
a uniform way to parts of Sphinx that allow the user to reference or search
for objects in a domain-agnostic way.
About `self.data`: since all object and cross-referencing information is
stored on a BuildEnvironment instance, the `domain.data` object is also
stored in the `env.domaindata` dict under the key `domain.name`. Before the
build process starts, every active domain is instantiated and given the
environment object; the `domaindata` dict must then either be nonexistent or
a dictionary whose 'version' key is equal to the domain class'
:attr:`data_version` attribute. Otherwise, `IOError` is raised and the
pickled environment is discarded.
"""
#: domain name: should be short, but unique
name = ''
#: domain label: longer, more descriptive (used in messages)
label = ''
#: type (usually directive) name -> ObjType instance
object_types = {} # type: Dict[unicode, ObjType]
#: directive name -> directive class
directives = {} # type: Dict[unicode, Any]
#: role name -> role callable
roles = {} # type: Dict[unicode, Union[RoleFunction, XRefRole]]
#: a list of Index subclasses
indices = [] # type: List[Type[Index]]
#: role name -> a warning message if reference is missing
dangling_warnings = {} # type: Dict[unicode, unicode]
#: node_class -> (enum_node_type, title_getter)
enumerable_nodes = {} # type: Dict[nodes.Node, Tuple[unicode, Callable]]
#: data value for a fresh environment
initial_data = {} # type: Dict
#: data value
data = None # type: Dict
#: data version, bump this when the format of `self.data` changes
data_version = 0
def __init__(self, env):
# type: (BuildEnvironment) -> None
self.env = env # type: BuildEnvironment
self._role_cache = {} # type: Dict[unicode, Callable]
self._directive_cache = {} # type: Dict[unicode, Callable]
self._role2type = {} # type: Dict[unicode, List[unicode]]
self._type2role = {} # type: Dict[unicode, unicode]
# convert class variables to instance one (to enhance through API)
self.object_types = dict(self.object_types)
self.directives = dict(self.directives)
self.roles = dict(self.roles)
self.indices = list(self.indices)
if self.name not in env.domaindata:
assert isinstance(self.initial_data, dict)
new_data = copy.deepcopy(self.initial_data)
new_data['version'] = self.data_version
self.data = env.domaindata[self.name] = new_data
else:
self.data = env.domaindata[self.name]
if self.data['version'] != self.data_version:
raise IOError('data of %r domain out of date' % self.label)
for name, obj in iteritems(self.object_types):
for rolename in obj.roles:
self._role2type.setdefault(rolename, []).append(name)
self._type2role[name] = obj.roles[0] if obj.roles else ''
self.objtypes_for_role = self._role2type.get # type: Callable[[unicode], List[unicode]] # NOQA
self.role_for_objtype = self._type2role.get # type: Callable[[unicode], unicode]
def add_object_type(self, name, objtype):
# type: (unicode, ObjType) -> None
"""Add an object type."""
self.object_types[name] = objtype
if objtype.roles:
self._type2role[name] = objtype.roles[0]
else:
self._type2role[name] = ''
for role in objtype.roles:
self._role2type.setdefault(role, []).append(name)
def role(self, name):
# type: (unicode) -> Callable
"""Return a role adapter function that always gives the registered
role its full name ('domain:name') as the first argument.
"""
if name in self._role_cache:
return self._role_cache[name]
if name not in self.roles:
return None
fullname = '%s:%s' % (self.name, name)
def role_adapter(typ, rawtext, text, lineno, inliner, options={}, content=[]):
# type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> nodes.Node # NOQA
return self.roles[name](fullname, rawtext, text, lineno,
inliner, options, content)
self._role_cache[name] = role_adapter
return role_adapter
def directive(self, name):
# type: (unicode) -> Callable
"""Return a directive adapter class that always gives the registered
directive its full name ('domain:name') as ``self.name``.
"""
if name in self._directive_cache:
return self._directive_cache[name]
if name not in self.directives:
return None
fullname = '%s:%s' % (self.name, name)
BaseDirective = self.directives[name]
class DirectiveAdapter(BaseDirective): # type: ignore
def run(self):
# type: () -> List[nodes.Node]
self.name = fullname
return BaseDirective.run(self)
self._directive_cache[name] = DirectiveAdapter
return DirectiveAdapter
# methods that should be overwritten
def clear_doc(self, docname):
# type: (unicode) -> None
"""Remove traces of a document in the domain-specific inventories."""
pass
def merge_domaindata(self, docnames, otherdata):
# type: (List[unicode], Dict) -> None
"""Merge in data regarding *docnames* from a different domaindata
inventory (coming from a subprocess in parallel builds).
"""
raise NotImplementedError('merge_domaindata must be implemented in %s '
'to be able to do parallel builds!' %
self.__class__)
def process_doc(self, env, docname, document):
# type: (BuildEnvironment, unicode, nodes.Node) -> None
"""Process a document after it is read by the environment."""
pass
def check_consistency(self):
# type: () -> None
"""Do consistency checks (**experimental**)."""
pass
def process_field_xref(self, pnode):
# type: (nodes.Node) -> None
"""Process a pending xref created in a doc field.
For project, attach information about the current scope.
"""
pass
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
"""Resolve the pending_xref *node* with the given *typ* and *target*.
This method should return a new node, to replace the xref node,
containing the *contnode* which is the markup content of the
cross-reference.
If no resolution can be found, None can be returned; the xref node will
then given to the :event:`missing-reference` event, and if that yields no
resolution, replaced by *contnode*.
The method can also raise :exc:`sphinx.environment.NoUri` to suppress
the :event:`missing-reference` event being emitted.
"""
pass
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
# type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[Tuple[unicode, nodes.Node]] # NOQA
"""Resolve the pending_xref *node* with the given *target*.
The reference comes from an "any" or similar role, which means that we
don't know the type. Otherwise, the arguments are the same as for
:meth:`resolve_xref`.
The method must return a list (potentially empty) of tuples
``('domain:role', newnode)``, where ``'domain:role'`` is the name of a
role that could have created the same reference, e.g. ``'py:func'``.
``newnode`` is what :meth:`resolve_xref` would return.
.. versionadded:: 1.3
"""
raise NotImplementedError
def get_objects(self):
# type: () -> Iterable[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
"""Return an iterable of "object descriptions", which are tuples with
five items:
* `name` -- fully qualified name
* `dispname` -- name to display when searching/linking
* `type` -- object type, a key in ``self.object_types``
* `docname` -- the document where it is to be found
* `anchor` -- the anchor name for the object
* `priority` -- how "important" the object is (determines placement
in search results)
- 1: default priority (placed before full-text matches)
- 0: object is important (placed before default-priority objects)
- 2: object is unimportant (placed after full-text matches)
- -1: object should not show up in search at all
"""
return []
def get_type_name(self, type, primary=False):
# type: (ObjType, bool) -> unicode
"""Return full name for given ObjType."""
if primary:
return type.lname
return _('%s %s') % (self.label, type.lname)
def get_enumerable_node_type(self, node):
# type: (nodes.Node) -> unicode
"""Get type of enumerable nodes (experimental)."""
enum_node_type, _ = self.enumerable_nodes.get(node.__class__, (None, None))
return enum_node_type
def get_full_qualified_name(self, node):
# type: (nodes.Node) -> unicode
"""Return full qualified name for given node."""
return None
| [
"solitarius.holic@gmail.com"
] | solitarius.holic@gmail.com |
1f97ec440ba6c6a279915b778e547ad93602ac7e | 2b3bbfc742ad6a2529f2906193c3c5263ebd5fac | /tools/crc/crc_init.py | 35cab0578bc55530478de02b4d2b2470886733f6 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | permissive | foxBMS/foxbms-2 | 35502ef8441dfc7374fd6c0839e7f5328a5bda8f | 9eb6d1c44e63e43e62bbf6983b2e618fb6ad02cc | refs/heads/master | 2023-05-22T05:30:25.862475 | 2023-02-23T15:03:35 | 2023-02-24T15:04:15 | 353,751,476 | 151 | 80 | NOASSERTION | 2023-09-01T09:59:30 | 2021-04-01T15:52:24 | C | UTF-8 | Python | false | false | 5,199 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010 - 2023, Fraunhofer-Gesellschaft zur Foerderung der angewandten Forschung e.V.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# We kindly request you to use one or more of the following phrases to refer to
# foxBMS in your hardware, software, documentation or advertising materials:
#
# - "This product uses parts of foxBMSยฎ"
# - "This product includes parts of foxBMSยฎ"
# - "This product is derived from foxBMSยฎ"
"""Generate CRC lookup tables for usage in foxBMS"""
import argparse
import logging
import sys
LINE_LENGTH = 120
def get_hex_rep(table):
"""Generate nice printable hex representation of the lookup table"""
max_str_len = len(hex(max(table)))
hex_table = []
for i in table:
hex_rep = f"{i:#0{max_str_len}X}u"
hex_rep = hex_rep[:2].lower() + hex_rep[2:]
hex_table.append(hex_rep)
return hex_table
def generate_c_table(table, crc_len):
"""Generate a CRC table as the foxBMS C style guide requires"""
lines = [
f"/* precomputed CRC-{crc_len} Table */",
f"static const unsigned int crc{crc_len}Table[{len(table)}] = {{",
" ",
]
index = len(lines) - 1
for i in get_hex_rep(table):
if len(lines[index] + f"{i},") < LINE_LENGTH + 1:
lines[index] += f"{i}, "
else:
index += 1
lines.append(f" {i}, ")
lines.append("};")
print("\n".join(lines))
def precalculate_crc_table(polynomial, width):
"""Generate a CRC lookup table based on the polynomial"""
mask = 1 << (width - 1)
table = []
for i in range(256):
remainder = i << (width - 8)
for _ in range(8):
if remainder & mask:
remainder = (remainder << 1) ^ polynomial
else:
remainder <<= 1
remainder = remainder & 0xFFFF
table.append(remainder)
return table
def check_positive_integer(value):
"""Check that the provided value is castable to int"""
try:
value = int(value)
except ValueError:
sys.exit("Width must be an integer.")
if value <= 0:
sys.exit("Width must be a positive integer.")
return value
def check_hex(value):
"""Check that the provided value is a hex representation"""
if not value.lower().startswith("0x"):
sys.exit("Polynomial must be provided as hex representation.")
return value
def main():
"""This script does this and that"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-v",
"--verbosity",
dest="verbosity",
action="count",
default=0,
help="set verbosity level",
)
parser.add_argument(
"-p",
"--polynomial",
dest="polynomial",
action="store",
type=check_hex,
default="0xC599",
help="CRC polynomial",
)
parser.add_argument(
"-w",
"--width",
dest="width",
action="store",
type=check_positive_integer,
default=15,
help="CRC width",
)
args = parser.parse_args()
if args.verbosity == 1:
logging.basicConfig(level=logging.INFO)
elif args.verbosity > 1:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.ERROR)
polynomial = int(args.polynomial, 16)
width = int(args.width)
logging.debug(f"polynomial: {polynomial:#0x}")
logging.debug(f"width: {width}")
table = precalculate_crc_table(polynomial, width)
logging.debug("C code:")
generate_c_table(table, width)
if __name__ == "__main__":
main()
| [
"info@foxbms.org"
] | info@foxbms.org |
ff7aa44906bb0f492c773e8be7365b2fd77bfd3b | 7136e5242793b620fa12e9bd15bf4d8aeb0bfe7a | /examples/adspygoogle/dfp/v201103/update_custom_targeting_values.py | d97cfc120d9432381cb5eec1c4073a5401b8ac01 | [
"Apache-2.0"
] | permissive | hockeyprincess/google-api-dfp-python | 534519695ffd26341204eedda7a8b50648f12ea9 | efa82a8d85cbdc90f030db9d168790c55bd8b12a | refs/heads/master | 2021-01-10T10:01:09.445419 | 2011-04-14T18:25:38 | 2011-04-14T18:25:38 | 52,676,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,585 | py | #!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates the display name of each custom targeting value up to
the first 500. To determine which custom targeting keys exist, run
get_all_custom_targeting_keys_and_values.py."""
__author__ = 'api.sgrinberg@gmail.com (Stan Grinberg)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.append(os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle.dfp.DfpClient import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service. By default, the request is always made against
# sandbox environment.
custom_targeting_service = client.GetCustomTargetingService(
'https://sandbox.google.com', 'v201103')
key_id = 'INSERT_CUSTOM_TARGETING_VALUE_ID_HERE'
values = [{
'key': 'keyId',
'value': {
'xsi_type': 'NumberValue',
'value': key_id
}
}]
filter_statement = {'query': 'WHERE customTargetingKeyId = :keyId LIMIT 500',
'values': values}
# Get custom targeting values by statement.
values = custom_targeting_service.GetCustomTargetingValuesByStatement(
filter_statement)[0]['results']
# Update each local custom targeting value object by changing its display name.
if values:
for value in values:
if not value['displayName']:
value['displayName'] = value['name']
value['displayName'] += ' (Deprecated)'
values = custom_targeting_service.UpdateCustomTargetingValues(values)
# Display results.
if values:
for value in values:
print ('Custom targeting value with id \'%s\', name \'%s\', and display '
'name \'%s\' was updated.'
% (value['id'], value['name'], value['displayName']))
else:
print 'No custom targeting values were updated.'
else:
print 'No custom targeting values were found to update.'
| [
"api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138"
] | api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138 |
55684fdb0228754c5b0c83f161cec75a2780a7c8 | d569476dd95496339c34b231621ff1f5dfd7fe49 | /PyTest/SteamSender/tests/PageObject/SteamActions.py | ef758dc5bfc9407871b3bb268f525393268eeabf | [] | no_license | monteua/Tests | 10f21f9bae027ce1763c73e2ea7edaf436140eae | 553e5f644466683046ea180422727ccb37967b98 | refs/heads/master | 2021-01-23T10:28:49.654273 | 2018-05-09T09:11:30 | 2018-05-09T09:11:30 | 93,061,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
class SteamHome(object):
def __init__(self, driver):
self.driver = driver
def open_browser(self):
self.driver.get("https://steamcommunity.com/login/home/?goto=")
def enter_credentials(self, login, password):
self.driver.find_element_by_id("steamAccountName").send_keys(login)
self.driver.find_element_by_id("steamPassword").send_keys(password, Keys.ENTER)
def pass_steam_guard(self):
WebDriverWait(self.driver, 20).until(EC.visibility_of(self.driver.find_element_by_id("blotter_statuspost_textarea")))
def open_trade_url(self):
self.driver.get("https://steamcommunity.com/tradeoffer/new/?partner=81735615&token=lhNyIIkQ")
time.sleep(2)
self.driver.execute_script("javascript: TradePageSelectInventory(UserYou, 753, 0);")
time.sleep(2)
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(20)
def log_off(self):
self.driver.execute_script("javascript: Logout();") | [
"arximed.monte@gmail.com"
] | arximed.monte@gmail.com |
8e31ff5084ea5f8bf5777a184c508e97ccd22cac | 2da6133f3cd5c5fc19355292d60253b8c0dbcd49 | /.history/antz/models_20200403232602.py | e82e48acaad35fce9f5ead09f86c2ffb0b256759 | [] | no_license | mirfarzam/python-advance-jadi-maktabkhooneh | b24f5c03ab88e3b12c166a439b925af92f50de49 | d9bcecae73fd992f1290c6fd76761683bb512825 | refs/heads/master | 2021-05-25T21:33:37.782734 | 2020-04-07T22:39:28 | 2020-04-07T22:39:28 | 253,927,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | from django.db import models
class CardBrand(models.Model):
name = models.charField(max_length=128)
class CarModel(models.Model):
name = models.charField(max_length=128)
brand = models.ForeignKey(CarBrand, on_delete = models.CASCADE)
class Car(models.Model):
name = models.charField(max_length=255)
brand = models.ForeignKey(CarBrand, on_delete = models.CASCADE)
model = models.ForeignKey(CarModel, on_delete = models.CASCADE)
price = models.Integer | [
"farzam.mirmoeini@gmail.com"
] | farzam.mirmoeini@gmail.com |
4105ca931275e881465db1fab1190e4b1ed38288 | 3e5150447a2c90c26354500f1df9660ef35c990b | /filesystem/delete.py | 9dc80d9ebca76b9e647f7e8d1bf7799d70ed8c7c | [] | no_license | kilirobbs/python-fiddle | 8d6417ebff9d6530e713b6724f8416da86c24c65 | 9c2f320bd2391433288cd4971c2993f1dd5ff464 | refs/heads/master | 2016-09-11T03:56:39.808358 | 2013-03-19T19:26:19 | 2013-03-19T19:26:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | from os import remove
from shutil import rmtree
#remove(filename)
folder="/Users/nordmenss/git/GISTS/4050474/.git/hooks"
rmtree(folder) | [
"cancerhermit@gmail.com"
] | cancerhermit@gmail.com |
6387f2014defc3d4ce1a0a3164d1c6c284117320 | 81084f5947cf300cbbb51149bd1950e0c7b76504 | /Django_Logging/django_logging/manage.py | a673d976bddeb86dbf859cd278b072c1bb71c3d2 | [] | no_license | Gaurav41/Django-Handson | 8bb7acbb77a210cea107eea157761e22f4b5b222 | 80e72b4075c1583b3c9d14b2ba2d94508b5c760e | refs/heads/master | 2023-08-13T19:57:20.692439 | 2021-09-27T18:37:44 | 2021-09-27T18:37:44 | 403,650,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_logging.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"gauravpingale4@gmail.com"
] | gauravpingale4@gmail.com |
82e3ddab0c74beb14332a162cfc09b3f38772cca | d5c6af12520a0f125c3f12b5f4da8a47492b7dc0 | /mathematics/find_the_point.py | e028c70e5952ce5807f60ee56c50267d9cd9b977 | [] | no_license | LubosKolouch/hackerrank | 4e0a2a5ff1309152c6515732f408ee1434712bff | 21de03b0638277108c250c2971fbd3e5b69cf454 | refs/heads/master | 2022-12-24T06:42:13.411394 | 2020-10-04T14:52:04 | 2020-10-04T14:52:04 | 266,756,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | """ https://www.hackerrank.com/challenges/find-point/ """
def findPoint(px, py, qx, qy):
#
# Write your code here.
#
return 2*qx-px, 2*qy-py
| [
"lubos@kolouch.net"
] | lubos@kolouch.net |
24b1f3932e2f102dfb818030036b4b346ea0be8b | 88ae8695987ada722184307301e221e1ba3cc2fa | /third_party/catapult/dashboard/dashboard/pinpoint/handlers/fifo_scheduler_test.py | 330d8312a3f254bc096aa52db2c76f7d022d6fb6 | [
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later",
"BSD-3-Clause"
] | permissive | iridium-browser/iridium-browser | 71d9c5ff76e014e6900b825f67389ab0ccd01329 | 5ee297f53dc7f8e70183031cff62f37b0f19d25f | refs/heads/master | 2023-08-03T16:44:16.844552 | 2023-07-20T15:17:00 | 2023-07-23T16:09:30 | 220,016,632 | 341 | 40 | BSD-3-Clause | 2021-08-13T13:54:45 | 2019-11-06T14:32:31 | null | UTF-8 | Python | false | false | 12,657 | py | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests the FIFO Scheduler Handler."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import mock
import unittest
from dashboard.common import namespaced_stored_object
from dashboard.common import bot_configurations
from dashboard.pinpoint import test
from dashboard.pinpoint.models import job
from dashboard.pinpoint.models import scheduler
from dashboard.pinpoint.models.tasks import bisection_test_util
@mock.patch('dashboard.services.swarming.GetAliveBotsByDimensions',
mock.MagicMock(return_value=["a"]))
@mock.patch('dashboard.common.cloud_metric.PublishPinpointJobStatusMetric',
mock.MagicMock())
@mock.patch('dashboard.common.cloud_metric.PublishPinpointJobRunTimeMetric',
mock.MagicMock())
class FifoSchedulerTest(test.TestCase):
def setUp(self):
super().setUp()
namespaced_stored_object.Set(bot_configurations.BOT_CONFIGURATIONS_KEY,
{'mock': {}})
@mock.patch('dashboard.pinpoint.models.job.Job.Start')
def testSingleQueue(self, mock_job_start):
j = job.Job.New((), (),
arguments={'configuration': 'mock'},
comparison_mode='performance')
scheduler.Schedule(j)
response = self.testapp.get('/cron/fifo-scheduler')
self.assertEqual(response.status_code, 200)
self.ExecuteDeferredTasks('default')
self.assertEqual(mock_job_start.call_count, 1)
# Ensure that the job is still running.
job_id, queue_status = scheduler.PickJobs('mock')[0]
self.assertEqual(job_id, j.job_id)
self.assertEqual(queue_status, 'Running')
# On the next poll, we need to ensure that an ongoing job doesn't get marked
# completed until it really is completed.
j.Start = mock.MagicMock()
response = self.testapp.get('/cron/fifo-scheduler')
self.assertEqual(response.status_code, 200)
self.ExecuteDeferredTasks('default')
self.assertFalse(j.Start.called)
job_id, queue_status = scheduler.PickJobs('mock')[0]
self.assertEqual(job_id, j.job_id)
self.assertEqual(queue_status, 'Running')
@mock.patch('dashboard.pinpoint.models.job.Job.Start')
def testJobCompletes(self, mock_job_start):
j = job.Job.New((), (),
arguments={'configuration': 'mock'},
comparison_mode='performance')
scheduler.Schedule(j)
mock_job_start.side_effect = j._Complete
response = self.testapp.get('/cron/fifo-scheduler')
self.assertEqual(response.status_code, 200)
self.ExecuteDeferredTasks('default')
self.assertEqual(mock_job_start.call_count, 1)
job_id, _ = scheduler.PickJobs('mock')[0]
self.assertIsNone(job_id)
@mock.patch('dashboard.pinpoint.models.job.Job.Start')
def testJobFails(self, mock_job_start):
j = job.Job.New((), (),
arguments={'configuration': 'mock'},
comparison_mode='performance')
scheduler.Schedule(j)
mock_job_start.side_effect = j.Fail
response = self.testapp.get('/cron/fifo-scheduler')
self.assertEqual(response.status_code, 200)
self.ExecuteDeferredTasks('default')
self.assertEqual(mock_job_start.call_count, 1)
job_id, _ = scheduler.PickJobs('mock')[0]
self.assertIsNone(job_id)
def testMultipleQueues(self):
jobs = []
total_jobs = 2
total_queues = 10
for configuration_id in range(total_queues):
for _ in range(total_jobs):
j = job.Job.New(
(), (),
arguments={'configuration': 'queue-{}'.format(configuration_id)},
comparison_mode='performance')
j.Start = mock.MagicMock(side_effect=j._Complete)
scheduler.Schedule(j)
jobs.append(j)
# We ensure that all jobs complete if we poll the fifo-scheduler.
for _ in range(0, total_jobs):
response = self.testapp.get('/cron/fifo-scheduler')
self.assertEqual(response.status_code, 200)
self.ExecuteDeferredTasks('default')
# Check for each job that Job.Start() was called.
for index, j in enumerate(jobs):
self.assertTrue(j.Start.Called,
'job at index {} was not run!'.format(index))
@mock.patch('dashboard.pinpoint.models.job.Job.Start')
def testQueueStatsUpdates(self, mock_job_start):
j = job.Job.New((), (),
arguments={'configuration': 'mock'},
comparison_mode='performance')
scheduler.Schedule(j)
mock_job_start.side_effect = j._Complete
# Check that we can find the queued job.
stats = scheduler.QueueStats('mock')
self.assertEqual(stats['queued_jobs'], 1)
self.assertNotIn('running_jobs', stats)
self.assertEqual(len(stats['queue_time_samples']), 0)
response = self.testapp.get('/cron/fifo-scheduler')
self.assertEqual(response.status_code, 200)
self.ExecuteDeferredTasks('default')
self.assertEqual(mock_job_start.call_count, 1)
job_id, _ = scheduler.PickJobs('mock')[0]
self.assertIsNone(job_id)
# Check that point-in-time stats are zero, and that we have one sample.
stats = scheduler.QueueStats('mock')
self.assertNotIn('queued_jobs', stats)
self.assertNotIn('running_jobs', stats)
self.assertNotEqual(len(stats['queue_time_samples']), 0)
self.assertEqual(len(stats['queue_time_samples'][0]), 2)
def testJobStuckInRunning(self):
self.skipTest('Not implemented yet.')
@mock.patch('dashboard.pinpoint.models.job.Job.Start')
def testJobCancellationSucceedsOnRunningJob(self, mock_job_start):
j = job.Job.New((), (),
arguments={'configuration': 'mock'},
comparison_mode='performance')
scheduler.Schedule(j)
response = self.testapp.get('/cron/fifo-scheduler')
self.assertEqual(response.status_code, 200)
self.ExecuteDeferredTasks('default')
self.assertEqual(mock_job_start.call_count, 1)
# Ensure that the job is still running.
job_id, queue_status = scheduler.PickJobs('mock')[0]
self.assertEqual(job_id, j.job_id)
self.assertEqual(queue_status, 'Running')
# We can cancel a running job.
self.assertTrue(scheduler.Cancel(j))
# Ensure that the job is still running.
job_id, queue_status = scheduler.PickJobs('mock')[0]
self.assertNotEqual(job_id, j.job_id)
self.assertNotEqual(queue_status, 'Running')
def testJobCancellationSucceedsOnQueuedJob(self):
j = job.Job.New((), (),
arguments={'configuration': 'mock'},
comparison_mode='performance')
scheduler.Schedule(j)
j.Start = mock.MagicMock()
self.assertTrue(scheduler.Cancel(j))
response = self.testapp.get('/cron/fifo-scheduler')
self.assertEqual(response.status_code, 200)
self.ExecuteDeferredTasks('default')
self.assertFalse(j.Start.called)
def testJobSamplesCapped(self):
for _ in range(51):
j = job.Job.New((), (),
arguments={'configuration': 'mock'},
comparison_mode='performance')
scheduler.Schedule(j)
j.Start = mock.MagicMock(side_effect=j._Complete)
response = self.testapp.get('/cron/fifo-scheduler')
self.assertEqual(response.status_code, 200)
self.ExecuteDeferredTasks('default')
stats = scheduler.QueueStats('mock')
self.assertLessEqual(len(stats.get('queue_time_samples')), 50)
@mock.patch('dashboard.pinpoint.models.job.Job.Start')
def testSchedulePriorityOrder(self, mock_job_start):
j0 = job.Job.New((), (),
arguments={'configuration': 'mock'},
comparison_mode='performance')
scheduler.Schedule(j0)
j1 = job.Job.New((), (),
arguments={
'configuration': 'mock',
'priority': '100',
},
comparison_mode='performance',
priority=100)
scheduler.Schedule(j1)
j2 = job.Job.New((), (),
arguments={'configuration': 'mock'},
comparison_mode='performance')
scheduler.Schedule(j2)
# The first time we call the scheduler, it must mark j0 completed.
mock_job_start.side_effect = j0._Complete
self.assertEqual(
scheduler.QueueStats('mock')['job_id_with_status'][0]['job_id'], '1')
response = self.testapp.get('/cron/fifo-scheduler')
self.assertEqual(response.status_code, 200)
self.ExecuteDeferredTasks('default')
self.assertEqual(mock_job_start.call_count, 1)
# Next time, j2 should be completed.
mock_job_start.side_effect = j2._Complete
self.assertEqual(
scheduler.QueueStats('mock')['job_id_with_status'][0]['job_id'], '3')
response = self.testapp.get('/cron/fifo-scheduler')
self.assertEqual(response.status_code, 200)
self.ExecuteDeferredTasks('default')
self.assertEqual(mock_job_start.call_count, 2)
# Then we should have j1 completed.
mock_job_start.side_effect = j1._Complete
self.assertEqual(
scheduler.QueueStats('mock')['job_id_with_status'][0]['job_id'], '2')
response = self.testapp.get('/cron/fifo-scheduler')
self.assertEqual(response.status_code, 200)
self.ExecuteDeferredTasks('default')
self.assertEqual(mock_job_start.call_count, 3)
# TODO(dberris): Need to mock *all* of the back-end services that the various
# "live" bisection operations will be looking into.
@unittest.skip("Delete it when removing execution engine")
@mock.patch('dashboard.services.swarming.GetAliveBotsByDimensions',
mock.MagicMock(return_value=["a"]))
class FifoSchedulerExecutionEngineTest(bisection_test_util.BisectionTestBase):
def setUp(self):
super().setUp()
namespaced_stored_object.Set(bot_configurations.BOT_CONFIGURATIONS_KEY,
{'mock': {}})
def testJobRunInExecutionEngine(self):
j = job.Job.New((), (),
arguments={'configuration': 'mock'},
comparison_mode='performance',
use_execution_engine=True)
self.PopulateSimpleBisectionGraph(j)
scheduler.Schedule(j)
j.Start = mock.MagicMock(side_effect=j._Complete)
response = self.testapp.get('/cron/fifo-scheduler')
self.assertEqual(response.status_code, 200)
self.ExecuteDeferredTasks('default')
self.assertTrue(j.Start.called)
job_id, _ = scheduler.PickJobs('mock')[0]
self.assertIsNone(job_id)
@mock.patch('dashboard.services.swarming.GetAliveBotsByDimensions',
mock.MagicMock(return_value=["a"]))
class FifoSchedulerCostModelTest(test.TestCase):
def setUp(self):
super().setUp()
# We're setting up a cost model where tryjobs cost half as much as a
# performance or functional bisection, and ensure that we're seeing the
# budget consumed appropriately in a scheduling iteration.
namespaced_stored_object.Set(
bot_configurations.BOT_CONFIGURATIONS_KEY, {
'mock': {
'scheduler': {
'cost': {
'try': 0.5,
'performance': 1.0,
'functional': 1.0
},
'budget': 2.0
}
}
})
@mock.patch('dashboard.pinpoint.models.job.Job.Start')
@mock.patch('dashboard.common.cloud_metric.PublishPinpointJobStatusMetric',
mock.MagicMock())
def testSchedule_AllBisections(self, mock_job_start):
def CreateAndSchedule():
j = job.Job.New((), (),
arguments={'configuration': 'mock'},
comparison_mode='performance')
scheduler.Schedule(j)
return j
jobs = [CreateAndSchedule() for _ in range(10)]
def CompleteJobAndPop():
jobs[0]._Complete()
jobs.pop(0)
mock_job_start.side_effect = CompleteJobAndPop
# We want to ensure that every iteration, we'll have two of the bisections
# started, given our cost model and budget.
for offset in range(1, 5):
response = self.testapp.get('/cron/fifo-scheduler')
self.assertEqual(response.status_code, 200)
self.ExecuteDeferredTasks('default')
self.assertEqual(job.Job.Start.call_count, offset * 2)
self.assertEqual(
scheduler.QueueStats('mock')['job_id_with_status'][0]['job_id'],
str(offset * 2 + 1))
self.assertEqual(
scheduler.QueueStats('mock')['queued_jobs'], 10 - offset * 2)
| [
"jengelh@inai.de"
] | jengelh@inai.de |
6fcd455d0ae546f2a8441ff6cdb63c295ed32199 | 399dae0b5ad9ca27cde175d25b5435958674eb50 | /System/Run PowerShell Script/run-powershell-script.py | 276874c0fa446e1832bd4e3f0f779886e8c565e6 | [] | no_license | kannanch/pythonscripts | 61e3ea9e8ebf6a6b0ec2a4a829664e4507b803ba | 843a522236f9c2cc2aadc68d504c71bb72600bd9 | refs/heads/master | 2020-06-12T11:18:00.404673 | 2019-06-28T11:24:37 | 2019-06-28T11:24:37 | 194,282,297 | 1 | 0 | null | 2019-06-28T13:55:56 | 2019-06-28T13:55:56 | null | UTF-8 | Python | false | false | 1,655 | py | ps_content=r'''
function Get-Uptime {
Param(
$ComputerName = $env:COMPUTERNAME)
if ($lastBootUpTime = (Get-WmiObject win32_operatingsystem -ComputerName $ComputerName| select @{LABEL='LastBootUpTime';EXPRESSION={$_.ConverttoDateTime($_.lastbootuptime)}}).LastBootUpTime) {
(Get-Date) - $lastBootUpTime
} else {
Write-Error "Unable to retrieve WMI Object win32_operatingsystem from $ComputerName"}}
Get-Uptime
'''
import os
def ecmd(command):
import ctypes
from subprocess import PIPE, Popen
class disable_file_system_redirection:
_disable = ctypes.windll.kernel32.Wow64DisableWow64FsRedirection
_revert = ctypes.windll.kernel32.Wow64RevertWow64FsRedirection
def __enter__(self):
self.old_value = ctypes.c_long()
self.success = self._disable(ctypes.byref(self.old_value))
def __exit__(self, type, value, traceback):
if self.success:
self._revert(self.old_value)
with disable_file_system_redirection():
obj = Popen(command, shell = True, stdout = PIPE, stderr = PIPE)
out, err = obj.communicate()
ret=obj.returncode
if ret==0:
if out:
return out.strip()
else:
return ret
else:
if err:
return err.strip()
else:
return ret
file_name='powershell_file.ps1'
file_path=os.path.join(os.environ['TEMP'], file_name)
with open(file_path, 'wb') as wr:
wr.write(ps_content)
ecmd('powershell "Set-ExecutionPolicy RemoteSigned"')
print ecmd('powershell "%s"'%file_path)
os.remove(file_path)
| [
"noreply@github.com"
] | kannanch.noreply@github.com |
efd3556e9673ddbe9bd70a4daf90badd68eecb71 | 3beaae9591735f31e754e1d325bdbf20a0070e12 | /solver.py | 362114125dd30c68aa47e524ba7c894880f77bab | [] | no_license | benediktwerner/CrossCellsSolver | 6a3b85ae4507804004ab10b7acf444b11811ec10 | 9946ccef221713c6a9af7d49c132c33b23f36cb3 | refs/heads/master | 2020-04-18T21:23:29.708100 | 2019-01-27T05:11:13 | 2019-01-27T05:11:13 | 167,764,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,950 | py | import time
import math
import cv2
import numpy as np
import mss
import pytesseract
import win32con
import win32api
import win32gui
from z3 import Bool, If, simplify, sat, Solver
BORDER_TOP = 100
BORDER_LEFT = 100
BORDER_BOTTOM = 100
BORDER_RIGHT = 100
CELL_SIZE = 35
class Rect:
def __init__(self, rect):
if isinstance(rect, Rect):
self.x = rect.x
self.y = rect.y
self.width = rect.width
self.height = rect.height
else:
self.x = rect[0]
self.y = rect[1]
self.width = rect[2]
self.height = rect[3]
self.x2 = self.x + self.width
self.y2 = self.y + self.height
self.center_x = self.x + self.width // 2
self.center_y = self.y + self.height // 2
@staticmethod
def from_corner_rect(rect):
x1, y1, x2, y2 = rect
return Rect((x1, y1, x2-x1, y2-y1))
def enlarge(self, border):
return Rect((self.x-border, self.y-border, self.width+border*2, self.height+border*2))
def to_rect(self):
return (self.x, self.y, self.width, self.height)
def to_slice(self):
return (slice(self.y, self.y2), slice(self.x, self.x2))
def dist(self, other):
return math.sqrt((self.center_x - other.center_x)**2 + (self.center_y - other.center_y)**2)
def contains(self, other):
return self.x < other.x and other.x2 < self.x2 and self.y < other.y and other.y2 < self.y2
class Label(Rect):
def __init__(self, rect):
super().__init__(rect)
self.connected = []
self.merged = False
self.text = None
def __merge(self, other):
self.x = min(self.x, other.x)
self.y = min(self.y, other.y)
self.x2 = max(self.x2, other.x2)
self.y2 = max(self.y2, other.y2)
def merge(self):
self.merged = True
for other in self.connected:
if other.merged:
continue
other.merge()
self.__merge(other)
self.width = self.x2 - self.x
self.height = self.y2 - self.y
self.center_x = self.x + self.width // 2
self.center_y = self.y + self.height // 2
class Cell(Rect):
def __init__(self, rect, text):
super().__init__(rect)
self.text = text
self.square = None
self.variable = Bool("{},{}".format(self.x, self.y))
class Constraint(Rect):
def __init__(self, rect, initial_cells=None):
super().__init__(rect)
self.cells = [] if initial_cells is None else initial_cells
self.text = None
def get_constraint(self):
if "[" in self.text:
return sum(If(cell.variable, 1, 0) for cell in self.cells) == int(self.text.strip("[]"))
term = 0
for cell in self.cells:
if cell.text[0] == "+":
term += If(cell.variable, int(cell.text[1:]), 0)
else:
term *= If(cell.variable, int(cell.text[1:]), 1)
return term == int(self.text)
class LineConstraint(Constraint):
def __init__(self, rect, cell=None):
super().__init__(rect, None if cell is None else [cell])
def sort_cells(self):
if max(abs(cell.center_x - self.x) for cell in self.cells) < CELL_SIZE:
self.cells.sort(key=lambda c: c.y, reverse=self.cells[0].y > self.y)
else:
self.cells.sort(key=lambda c: c.x, reverse=self.cells[0].x > self.x)
def find_bounding_rects(img, inside=False):
mode = cv2.RETR_LIST if inside else cv2.RETR_EXTERNAL
contours, _ = cv2.findContours(img, mode, cv2.CHAIN_APPROX_SIMPLE)
return [Rect(cv2.boundingRect(contour)) for contour in contours]
class CrossCellsSolver:
def __init__(self):
self.window_handle = win32gui.FindWindow(None, "CrossCells")
self.window_rect = Rect.from_corner_rect(win32gui.GetWindowRect(self.window_handle))
self.monitor = {
"left": self.window_rect.x + BORDER_LEFT,
"top": self.window_rect.y + BORDER_TOP,
"width": self.window_rect.width - BORDER_LEFT - BORDER_RIGHT,
"height": self.window_rect.height - BORDER_TOP - BORDER_BOTTOM
}
self.img = None
self.img_orig = None
self.cells = []
self.squares = []
self.lines = []
self.labels = []
def move_mouse(self, x=0, y=0):
win32api.SetCursorPos((
x + self.window_rect.x + BORDER_LEFT,
y + self.window_rect.y + BORDER_TOP
))
def click(self, x, y, right=False):
self.move_mouse(x, y)
time.sleep(0.01)
x = x + self.window_rect.x + BORDER_LEFT
y = y + self.window_rect.y + BORDER_TOP
btn_down = win32con.MOUSEEVENTF_RIGHTDOWN if right else win32con.MOUSEEVENTF_LEFTDOWN
btn_up = win32con.MOUSEEVENTF_RIGHTUP if right else win32con.MOUSEEVENTF_LEFTUP
win32api.mouse_event(btn_down, x, y, 0, 0)
time.sleep(0.01)
win32api.mouse_event(btn_up, x, y, 0, 0)
time.sleep(0.01)
def screenshot(self):
with mss.mss() as sct:
return np.array(sct.grab(self.monitor))
def detect_text(self, rect):
return pytesseract.image_to_string(self.img[rect.to_slice()], config="tesseract.conf")
def draw_line(self, start, end, color=(0, 0, 255)):
self.img_orig = cv2.line(self.img_orig, start, end, color)
def draw_rect(self, rect, color=(0, 0, 255)):
self.img_orig = cv2.rectangle(self.img_orig, rect.to_rect(), color)
def draw_text(self, text, rect, color=(0, 0, 255)):
self.img_orig = cv2.putText(self.img_orig, text, (rect.x, rect.y), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 2)
def do_level(self):
self.detect_level()
self.solve_level()
def detect_level(self):
self.img = self.screenshot()
self.img_orig = self.img.copy()
self.img = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
_, self.img = cv2.threshold(self.img, 110, 255, cv2.THRESH_BINARY_INV)
self.cells = []
self.squares = []
self.lines = []
self.labels = []
self.detect_objects()
self.process_labels()
self.process_cells()
self.process_lines()
self.process_squares()
if self.labels:
for label in self.labels:
self.draw_rect(label, (0, 0, 255))
def detect_objects(self):
for rect in find_bounding_rects(self.img):
if rect.width > CELL_SIZE and rect.height > CELL_SIZE:
text = self.detect_text(rect)
self.draw_text(text, rect)
self.cells.append(Cell(rect, text))
else:
label = Label(rect)
self.labels.append(label)
for other in self.labels:
if label.dist(other) < 30:
label.connected.append(other)
other.connected.append(label)
def process_labels(self):
labels_merged = []
for label in self.labels:
if label.merged:
continue
label.merge()
labels_merged.append(label)
label.text = self.detect_text(label.enlarge(10))
self.labels = labels_merged
def process_cells(self):
for cell in self.cells:
self.move_mouse(cell.center_x, cell.center_y)
time.sleep(0.5)
img2 = self.screenshot()
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
_, img2 = cv2.threshold(img2, 120, 255, cv2.THRESH_BINARY_INV)
diff = img2 - self.img
for cell2 in self.cells:
diff[cell2.enlarge(5).to_slice()] = 0
for label in self.labels:
diff[label.enlarge(3).to_slice()] = 0
_, diff = cv2.threshold(diff, 110, 255, cv2.THRESH_BINARY)
for rect in find_bounding_rects(diff, inside=True):
if rect.width > CELL_SIZE and rect.height > CELL_SIZE:
if cell.square is None:
self.add_square_constraint(rect)
elif abs(cell.center_x - rect.center_x) < 5 or abs(cell.center_y - rect.center_y) < 5:
self.add_line_constraint(rect, cell)
def add_square_constraint(self, rect):
square = Constraint(rect)
for cell in self.cells:
if square.contains(cell):
square.cells.append(cell)
cell.square = square
self.squares.append(square)
def add_line_constraint(self, rect, cell):
for line in self.lines:
if rect.dist(line) < 5:
line.cells.append(cell)
break
else:
line = LineConstraint(rect, cell)
self.lines.append(line)
def process_lines(self):
for line in self.lines:
line.sort_cells()
label = min(self.labels, key=line.dist)
line.text = label.text
self.labels.remove(label)
self.draw_line((line.center_x, line.center_y), (label.center_x, label.center_y), (255, 0, 0))
self.draw_rect(label, (255, 0, 0))
x = line.x
y = line.y
self.draw_text(line.text, line, (0, 0, 255))
for cell in line.cells:
self.draw_line((x, y), (cell.center_x, cell.center_y), (0, 0, 255))
x = cell.center_x
y = cell.center_y
def process_squares(self):
for square in self.squares:
for label in self.labels:
if square.contains(label):
square.text = label.text
self.labels.remove(label)
self.draw_line((square.x, square.y), (label.center_x, label.center_y), (0, 255, 0))
self.draw_rect(label, (0, 255, 0))
break
self.draw_text(square.text, square, (0, 255, 0))
for cell in square.cells:
self.draw_line((cell.x, cell.y), (cell.square.x, cell.square.y), (0, 255, 0))
def solve_level(self):
solver = Solver()
for line in self.lines:
solver.add(simplify(line.get_constraint()))
for square in self.squares:
solver.add(simplify(square.get_constraint()))
if solver.check() == sat:
model = solver.model()
for cell in self.cells:
self.click(cell.center_x, cell.center_y, model[cell.variable])
else:
print("Solver failed")
print(solver)
cv2.imshow("Detection", self.img_orig)
cv2.waitKey(0)
cv2.destroyAllWindows()
def main():
time.sleep(5)
solver = CrossCellsSolver()
for _ in range(5):
solver.do_level()
solver.move_mouse()
time.sleep(7)
if __name__ == "__main__":
main()
| [
"1benediktwerner@gmail.com"
] | 1benediktwerner@gmail.com |
d789aa6de3ba1cab2a8b9572ee4e4766af9fc98d | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-4311.py | a7166d25a431c34cd89dafbe2bec9519d9fd434c | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,755 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: $ID = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
0964e9a5dbb4533d4abfd265dd6d8ba7144e2691 | 48323c491536a1190f6287161f61230eb0232dfe | /Leetcode/NextPermutation.py | abb8d7755bb53bab4b5faa5ac26972801f8ed397 | [] | no_license | cgxabc/Online-Judge-Programming-Exercise | d8249846eaf1a7f6f228aeae5adcee6d90dfcce6 | 724f514e7dc7774f2df5eecf90ef2a678b233a29 | refs/heads/master | 2021-04-28T13:27:42.239709 | 2018-02-19T18:51:05 | 2018-02-19T18:51:05 | 122,104,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,380 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 17 15:05:43 2017
@author: apple
"""
"""
Implement next permutation, which rearranges numbers into the lexicographically next greater permutation of numbers.
If such arrangement is not possible, it must rearrange it as the lowest possible order (ie, sorted in ascending order).
The replacement must be in-place, do not allocate extra memory.
Here are some examples. Inputs are in the left-hand column and its corresponding outputs are in the right-hand column.
1,2,3 โ 1,3,2
3,2,1 โ 1,2,3
1,1,5 โ 1,5,1
"""
def nextPermutation(nums):
k,l=-1,0
for i in xrange(len(nums)-1):
if nums[i]<nums[i+1]:
k=i
if k==-1:
nums.reverse()
return
for i in xrange(k+1, len(nums)):
if nums[i]>nums[k]:
l=i
nums[k], nums[l]=nums[l], nums[k]
nums[k+1:]=nums[:k:-1]
return nums
print nextPermutation([1,3,4,19,100,67,21,5])
#[1,3,4,21,100,67,19,5]
#[1,3,4,21,5,19,67,100]
#[1, 3, 4, 21, 5, 19, 67, 100]
#def combinationSum(candidates,target):
#nums=[1,3,4,19,8,6,21,5]
#k,l=-1,0
#for i in xrange(len(nums)-1):
# if nums[i]<nums[i+1]:
# k=i
#for i in xrange(k+1, len(nums)):
# if nums[i]>nums[k]:
# l=i
#print k, l #5,6
#[1,3,4,19,8,21,6,5]
#[1,3,4,19,8,21,5,6]
| [
"noreply@github.com"
] | cgxabc.noreply@github.com |
f585be332ad48617e79e279f22f8d8d6ec1fe263 | 6f05f7d5a67b6bb87956a22b988067ec772ba966 | /data/train/python/90ec77532048b6bf469559849d5ca80cd4e54f69action_controller.py | 7d45544d8058f62ec577713a798563d315ed7df2 | [
"MIT"
] | permissive | harshp8l/deep-learning-lang-detection | 93b6d24a38081597c610ecf9b1f3b92c7d669be5 | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | refs/heads/master | 2020-04-07T18:07:00.697994 | 2018-11-29T23:21:23 | 2018-11-29T23:21:23 | 158,597,498 | 0 | 0 | MIT | 2018-11-21T19:36:42 | 2018-11-21T19:36:41 | null | UTF-8 | Python | false | false | 2,067 | py | from Menu.menu import Menu
from Menu.text_menu_entry import TextMenuEntry
from Screen.Console.Menu.ActionMenu.action_menu_screen import ActionMenuScreen
from Screen.Console.Menu.ActionMenu.AttackMenu.attack_controller import AttackController
from Screen.Console.Menu.ActionMenu.SwitchMenu.switch_controller import SwitchController
from kao_console.ascii import ENDL, KAO_UP, KAO_DOWN, KAO_LEFT, KAO_RIGHT
from kao_gui.console.console_controller import ConsoleController
class ActionController(ConsoleController):
""" Controller for selecting a Battle Action """
def __init__(self, pokemon, battle):
""" Builds the Action Controller """
self.pokemon = pokemon
self.battle = battle
self.action = None
entries = [TextMenuEntry("Fight", self.chooseAttack),
TextMenuEntry("Switch", self.switch),
TextMenuEntry("Item", None),
TextMenuEntry("Run", None)]
self.menu = Menu(entries, columns=2)
screen = ActionMenuScreen(self.menu, battle)
cmds = {ENDL:self.menu.enter,
KAO_UP:self.menu.up,
KAO_DOWN:self.menu.down,
KAO_RIGHT:self.menu.right,
KAO_LEFT:self.menu.left}
ConsoleController.__init__(self, screen, commands=cmds)
def chooseAttack(self, entry):
""" Run the Attack Menu Controller """
attackMenuController = AttackController(self.pokemon, self.battle.oppSide.pkmnInPlay, self.battle)
self.runController(attackMenuController)
def switch(self, entry):
""" Run the Switch Menu Controller """
switchMenuController = SwitchController(self.pokemon)
self.runController(switchMenuController)
def runController(self, controller):
""" Runs the given controller """
ConsoleController.runController(self, controller)
if controller.action is not None:
self.action = controller.action
self.stopRunning() | [
"aliostad+github@gmail.com"
] | aliostad+github@gmail.com |
02893c861cf938bbf3afe51a1e5d61e2b8f327bd | 6ae8717002f8fce4457cceb3375a114ddcb837df | /1-100/18. Four Sum.py | ce1a72a2e443da34b890db6074991804027e0403 | [] | no_license | SunnyMarkLiu/LeetCode | 31aea2954d5a84d11a1c4435f760c1d03c6c1243 | 852fad258f5070c7b93c35252f7404e85e709ea6 | refs/heads/master | 2020-05-30T07:17:33.992197 | 2018-03-29T03:57:51 | 2018-03-29T03:57:51 | 104,643,862 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,484 | py | #!/home/sunnymarkliu/software/miniconda2/bin/python
# _*_ coding: utf-8 _*_
"""
4 sum ้ฎ้ข่ฝฌๅไธบ 3 sum ้ฎ้ข
ๅๆถๆณจๆๆถ้ดๅคๆๅบฆ่่ฟ่ก็่พน็ๆฃๆฅ
@author: MarkLiu
@time : 17-10-31 ไธๅ8:05
"""
class Solution(object):
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
if len(nums) < 3:
return []
solutions = []
nums.sort() # ๆณจๆๅ
ๆๅบ
max_num = nums[-1]
min_num = nums[0]
if 4 * min_num > target or 4 * max_num < target: # ๆๅคงๅผๆๅฐๅผ็่พน้ดๆฃๆต
return []
for i in xrange(len(nums)):
if i > 0 and nums[i] == nums[i - 1]: # ๅป้ค้ๅคๆฐๆฎ
continue
if nums[i] + 3 * max_num < target: # nums[i] ๅคชๅฐไบ
continue
if nums[i] + 3 * min_num > target: # nums[i] ๅคชๅคงไบ
break
tmp_target = target - nums[i]
if i == 0:
tmp_nums = nums[1:]
elif i == len(nums) - 1:
tmp_nums = nums[:-1]
else:
tmp_nums = nums[:i]
tmp_nums.extend(nums[i + 1:])
# three sum problem
for j in xrange(len(tmp_nums) - 2):
if j > 0 and tmp_nums[j] == tmp_nums[j - 1]: # ๅป้ค้ๅคๆฐๆฎ
continue
l_index, r_index = j + 1, len(tmp_nums) - 1
while l_index < r_index:
s = tmp_nums[j] + tmp_nums[l_index] + tmp_nums[r_index]
if s < tmp_target:
l_index += 1
elif s > tmp_target:
r_index -= 1
else:
s = [nums[i], tmp_nums[j], tmp_nums[l_index], tmp_nums[r_index]]
s.sort()
if s not in solutions:
solutions.append(s)
while l_index < r_index and tmp_nums[l_index] == tmp_nums[l_index + 1]: # ๅป้ค้ๅคๆฐๆฎ
l_index += 1
while l_index < r_index and tmp_nums[r_index] == tmp_nums[r_index - 1]:
r_index -= 1
l_index += 1
r_index -= 1
return solutions
print Solution().fourSum([1, 0, -1, 0, -2, 2], 0)
| [
"SunnyMarkLiu101@gmail.com"
] | SunnyMarkLiu101@gmail.com |
5329ba8a2cc777d09994227fd8800e3ce9607b95 | ba62f1d4c2d4209cbbe12bbf94ac2b44e56646eb | /callbacks/sdnet_image_callback.py | 3c13bf2fba94ed5b3296807b9016f86938d7f48e | [] | no_license | falconjhc/APDNet-SourceCodes | 358f4387254aa30b04277910b67a31ae050dd1ce | d612177dd462910019f31f32f2ec81aa046a602c | refs/heads/master | 2022-12-05T01:28:49.500452 | 2020-08-27T10:11:11 | 2020-08-27T10:11:11 | 290,631,642 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50,070 | py | import logging
import os
import matplotlib.pyplot as plt
import numpy as np
import scipy
from keras import Input, Model
#from scipy.misc import imsave
from imageio import imwrite as imsave # harric modified
from utils.image_utils import image_show, generate_mask_on_img # harric modified
import utils.data_utils
import utils.image_utils
from callbacks.image_callback import BaseSaveImage
from layers.rounding import Rounding
from utils import sdnet_utils
from utils.distributions import NormalDistribution
from utils.sdnet_utils import get_net
log = logging.getLogger('SDNetImageCallback')
class SDNetImageCallback(BaseSaveImage):
def __init__(self, conf, sdnet, data_gen_lb, mask_gen, img_channels, anato_mask_channels, patho_mask_channels):
'''
Callback for printint various types of images during SDNet training.
:param folder: location of callback images
:param generators: a list of "generator-tye" NN: usually [Decomposer, Reconstructor, Segmentor]
:param discriminators: a list of discriminator NN: usually [D_Images, D_Masks, D_Z]
:param data_gen_lb: a python iterator of images+masks
:param data_gen_ul: a python iterator of images
:param mask_gen: a python iterator of additional masks with full anatomy used in discriminator: can be None
'''
self.conf = conf
super(SDNetImageCallback, self).__init__(conf.folder, sdnet)
self._make_dirs(self.folder)
self.data_gen_lb = data_gen_lb
self.mask_gen = mask_gen
self.img_channels = img_channels
self.anato_mask_channels = anato_mask_channels
self.patho_mask_channels = patho_mask_channels
self.init_models()
def _make_dirs(self, folder):
self.lr_folder = folder + '/images_lr'
if not os.path.exists(self.lr_folder):
os.makedirs(self.lr_folder)
self.anato_segm_folder = folder + '/images_anato_segm'
if not os.path.exists(self.anato_segm_folder):
os.makedirs(self.anato_segm_folder)
self.patho_segm_folder = folder + '/images_patho_segm'
if not os.path.exists(self.patho_segm_folder):
os.makedirs(self.patho_segm_folder)
self.rec_folder = folder + '/images_rec'
if not os.path.exists(self.rec_folder):
os.makedirs(self.rec_folder)
self.reconstruct_discr_folder = folder + '/images_reconstruct_discr'
if not os.path.exists(self.reconstruct_discr_folder):
os.makedirs(self.reconstruct_discr_folder)
self.reconstruct_classifier_folder = folder + '/images_reconstruct_classifier'
if not os.path.exists(self.reconstruct_classifier_folder):
os.makedirs(self.reconstruct_classifier_folder)
self.interp_folder = folder + '/images_interp'
if not os.path.exists(self.interp_folder):
os.makedirs(self.interp_folder)
def init_models(self):
self.enc_anatomy = self.model.Enc_Anatomy
self.reconstructor = self.model.Decoder
self.segmentor = self.model.Segmentor
self.discr_reconstruct_mask = self.model.D_Reconstruction
self.enc_modality = self.model.Enc_Modality
self.enc_pathology = self.model.Enc_Pathology
mean_list = []
var_list = []
for ii in range(self.conf.input_shape[-1]):
mean = get_net(self.enc_modality, 'z_mean_%d' % (ii+1))
var = get_net(self.enc_modality, 'z_log_var_%d' % (ii+1))
mean_list.append(mean.output)
var_list.append(var.output)
self.z_mean = Model(self.enc_modality.inputs, mean_list)
self.z_var = Model(self.enc_modality.inputs, var_list)
inp = Input(self.conf.input_shape)
self.round_model = Model(inp, Rounding()(self.enc_anatomy(inp)))
def on_epoch_end(self, epoch=None, logs=None):
'''
Plot training images from the real_pool. For SDNet the real_pools will contain images paired with masks,
and also unlabelled images.
:param epoch: current training epoch
:param real_pools: pool of images. Each element might be an image or a real mask
:param logs:
'''
lb_image_mask_pack = next(self.data_gen_lb)
# we usually plot 4 image-rows.
# If we have less, it means we've reached the end of the data, so iterate from the beginning
if len(lb_image_mask_pack[0]) < 4:
lb_image_mask_pack = next(self.data_gen_lb)
ims = lb_image_mask_pack[:,:,:,0:self.img_channels]
anato_mks = lb_image_mask_pack[:,:,:,self.img_channels:self.img_channels+self.anato_mask_channels]
patho_mks = lb_image_mask_pack[:, :, :, self.img_channels + self.anato_mask_channels:]
lb_images = [ims, anato_mks, patho_mks]
anato_masks = None if self.mask_gen is None else next(self.mask_gen[0])
patho_masks = None if self.mask_gen is None else next(self.mask_gen[1])
if patho_masks is not None:
if len(anato_masks) < 4:
anato_masks = next(self.mask_gen[0])
_, b = utils.data_utils.crop_same([anato_masks], [anato_masks], size=(lb_images[0].shape[1], lb_images[0].shape[2]))
anato_masks = b[0]
if patho_masks is not None:
if len(patho_masks) < 4:
patho_masks = next(self.mask_gen[1])
_, b = utils.data_utils.crop_same([patho_masks], [patho_masks], size=(lb_images[0].shape[1], lb_images[0].shape[2]))
patho_masks = b[0]
# self.plot_anatomy_mask_discriminator_outputs(lb_images, [anato_masks, patho_masks], epoch)
# self.plot_pathology_mask_discriminator_outputs(lb_images, [anato_masks, patho_masks], epoch)
self.plot_anatomy_segmentations(lb_images, epoch)
self.plot_pathology_segmentations(lb_images, epoch)
self.plot_reconstructions(lb_images, epoch)
#self.plot_reconstruction_classifier_outputs(lb_images, [anato_masks, patho_masks], epoch)
#self.plot_reconstruction_discriminator_outputs(lb_images, [anato_masks, patho_masks], epoch)
self.plot_latent_representation(lb_images, epoch)
#self.plot_image_switch_lr(lb_images, epoch)
#self.plot_image_interpolation(lb_images, epoch)
def plot_latent_representation(self, lb_images, epoch):
"""
Plot a 4-row image, where the first column shows the input image and the following columns
each of the 8 channels of the spatial latent representation.
:param lb_images: a list of 2 4-dim arrays of images + corresponding masks
:param ul_images: a list of 4-dim image arrays
:param epoch : the epoch number
"""
# combine labelled and unlabelled images and randomly sample 4 examples
images = lb_images[0] # [el[0] for el in lb_images]
anato_masks = lb_images[1]
patho_masks = lb_images[2]
current_select = epoch % images.shape[3]
x = images
# plot S
s = self.enc_anatomy.predict(x)
predicted_anatomy = self.segmentor.predict(s)
predicted_pathology_from_predicted_anatomy = self.enc_pathology.predict(np.concatenate([x,predicted_anatomy], axis=-1))
predicted_pathology_from_predicted_anatomy_list = []
for ii in range(len(predicted_pathology_from_predicted_anatomy)):
predicted_pathology_from_predicted_anatomy_list.append(predicted_pathology_from_predicted_anatomy[ii][:,:,:,0:1])
predicted_pathology_from_predicted_anatomy = np.concatenate(predicted_pathology_from_predicted_anatomy_list, axis=-1)
rows = [np.concatenate([x[i, :, :, current_select]]
+ [s[i, :, :, s_chn] for s_chn in range(s.shape[-1])]
+ [predicted_pathology_from_predicted_anatomy[i,:,:,chn]
for chn in range(predicted_pathology_from_predicted_anatomy.shape[-1])],
axis=1)
for i in range(x.shape[0])]
im_plot = np.concatenate(rows, axis=0)
imsave(self.lr_folder + '/s_lr_epoch_%d.png' % epoch, im_plot)
# harric modified
plt.figure()
plt.imshow(im_plot, cmap='gray')
plt.xticks([])
plt.yticks([])
plt.close()
if self.conf.rounding == 'decoder':
s = self.round_model.predict(x)
rows = [np.concatenate([x[i, :, :, 0]] + [s[i, :, :, s_chn] for s_chn in range(s.shape[-1])], axis=1)
for i in range(x.shape[0])]
im_plot = np.concatenate(rows, axis=0)
imsave(self.lr_folder + '/srnd_lr_epoch_%d.png' % epoch, im_plot)
# harric modifiedd
plt.figure()
plt.imshow(im_plot, cmap='gray')
plt.xticks([])
plt.yticks([])
plt.close()
# plot Z
z = self.enc_modality.predict([self.enc_anatomy.predict(images),
predicted_pathology_from_predicted_anatomy,
images])[0:self.conf.input_shape[-1]]
z = np.concatenate(z, axis=-1)
gaussian = NormalDistribution()
real_z = gaussian.sample(z.shape)
fig, axes = plt.subplots(nrows=z.shape[1], ncols=2, sharex=True, sharey=True, figsize=(10, 8))
axes[0, 0].set_title('Predicted Z')
axes[0, 1].set_title('Real Z')
for i in range(len(axes)):
axes[i, 0].hist(z[:, i], normed=True, bins=11, range=(-3, 3))
axes[i, 1].hist(real_z[:, i], normed=True, bins=11, range=(-3, 3))
axes[0, 0].plot(0, 0)
plt.savefig(self.lr_folder + '/z_lr_epoch_%d.png' % epoch)
plt.close()
means = self.z_mean.predict([self.enc_anatomy.predict(images),
predicted_pathology_from_predicted_anatomy,
images])
variances = self.z_var.predict([self.enc_anatomy.predict(images),
predicted_pathology_from_predicted_anatomy,
images])
means = np.concatenate(means,axis=-1)
variances = np.concatenate(variances, axis=-1)
means = np.var(means, axis=0)
variances = np.mean(np.exp(variances), axis=0)
with open(self.lr_folder + '/z_means.csv', 'a+') as f:
f.writelines(', '.join([str(means[i]) for i in range(means.shape[0])]) + '\n')
with open(self.lr_folder + '/z_vars.csv', 'a+') as f:
f.writelines(', '.join([str(variances[i]) for i in range(variances.shape[0])]) + '\n')
def plot_anatomy_segmentations(self, lb_images, epoch):
'''
Plot an image for every sample, where every row contains a channel of the spatial LR and a channel of the
predicted mask.
:param lb_images: a list of 2 4-dim arrays of images + corresponding masks
:param ul_images: a list of 4-dim image arrays
:param epoch: the epoch number
'''
imags = lb_images[0] # [el[0] for el in lb_images]
anato_masks = lb_images[1] # [el[1] for el in lb_images]
patho_masks = lb_images[2] # [el[1] for el in lb_images]
current_select = epoch % imags.shape[3]
x = imags
m_anato = anato_masks
m_patho = patho_masks
assert x.shape[:-1] == m_anato.shape[:-1] == m_patho.shape[:-1], \
'Incompatible shapes: %s vs %s vsv %s' % (str(x.shape), str(m_anato.shape), str(m_patho.shape))
s = self.enc_anatomy.predict(x)
y = self.segmentor.predict(s)
rows = []
for i in range(x.shape[0]):
y_list = [y[i, :, :, chn] for chn in range(y.shape[-1])]
m_anato_list = [m_anato[i, :, :, chn] for chn in range(m_anato.shape[-1])]
if m_anato.shape[-1] < y.shape[-1]:
m_anato_list += [np.zeros(shape=(m_anato.shape[1], m_anato.shape[2]))] * (y.shape[-1] - m_anato.shape[-1])
assert len(y_list) == len(m_anato_list), 'Incompatible sizes: %d vs %d' % (len(y_list), len(m_anato_list))
rows += [np.concatenate([x[i, :, :, current_select]] + y_list + m_anato_list, axis=1)]
im_plot = np.concatenate(rows, axis=0)
imsave(self.anato_segm_folder + '/segmentations_epoch_%d.png' % (epoch), im_plot)
def plot_pathology_segmentations(self, lb_images, epoch):
'''
Plot an image for every sample, where every row contains a channel of the spatial LR and a channel of the
predicted mask.
:param lb_images: a list of 2 4-dim arrays of images + corresponding masks
:param ul_images: a list of 4-dim image arrays
:param epoch: the epoch number
'''
imags = lb_images[0] # [el[0] for el in lb_images]
anato_masks = lb_images[1] # [el[1] for el in lb_images]
patho_masks = lb_images[2] # [el[1] for el in lb_images]
current_select = epoch % imags.shape[3]
x = imags
m_anato = anato_masks
m_patho = patho_masks
assert x.shape[:-1] == m_anato.shape[:-1] == m_patho.shape[:-1], \
'Incompatible shapes: %s vs %s vsv %s' % (str(x.shape), str(m_anato.shape), str(m_patho.shape))
s = self.enc_anatomy.predict(x)
predicted_anatomy = self.segmentor.predict(s)
y = self.enc_pathology.predict(np.concatenate([x, predicted_anatomy], axis=-1))
y_list =[]
for ii in range(m_patho.shape[-1]):
y_list.append(y[ii][:,:,:,0:1])
y = np.concatenate(y_list,axis=-1)
rows = []
for i in range(x.shape[0]):
y_list = [y[i, :, :, chn] for chn in range(y.shape[-1])]
m_patho_list = [m_patho[i, :, :, chn] for chn in range(m_patho.shape[-1])]
if m_patho.shape[-1] < y.shape[-1]:
m_patho_list += [np.zeros(shape=(m_patho.shape[1], m_patho.shape[2]))] * (y.shape[-1] - m_patho.shape[-1])
assert len(y_list) == len(m_patho_list), 'Incompatible sizes: %d vs %d' % (len(y_list), len(m_patho_list))
rows += [np.concatenate([x[i, :, :, current_select]] + y_list + m_patho_list, axis=1)]
im_plot = np.concatenate(rows, axis=0)
imsave(self.patho_segm_folder + '/segmentations_epoch_%d.png' % (epoch), im_plot)
def plot_reconstructions(self, lb_images, epoch):
def _image_reconstruction(s,p,z_list):
rec_list = []
for current_z in z_list:
current_rec = self.reconstructor.predict([s,p,current_z])
rec_list.append(current_rec)
rec_image = np.concatenate(rec_list,axis=-1)
return rec_image
def _create_row(pathology_masks, z):
# y = self.reconstructor.predict([s, pathology_masks, z])
y = _image_reconstruction(s, pathology_masks, z)
zero_z_list = random_z_list = []
for ii in range(len(z)):
zero_z_list.append(np.zeros(z[0].shape))
random_z_list.append(gaussian.sample(z[0].shape))
y_s0 = _image_reconstruction(s, pathology_masks, zero_z_list)
all_bkg = np.concatenate([np.zeros(s.shape[:-1] + (s.shape[-1] - 1,)), np.ones(s.shape[:-1] + (1,))], axis=-1)
y_0z = _image_reconstruction(all_bkg, pathology_masks, z)
y_00 = _image_reconstruction(all_bkg, pathology_masks, zero_z_list)
y_random = _image_reconstruction(s, pathology_masks, random_z_list)
rows = [np.concatenate([_generate_mask_on_img(x[i, :, :, current_select], pathology_masks[i, :, :, :]),
_expand(x[i, :, :, current_select]),
_expand(y[i, :, :, current_select]),
_expand(y_random[i, :, :, current_select]),
_expand(y_s0[i, :, :, current_select])] +
[_expand(_image_reconstruction(self._get_s0chn(k, s), pathology_masks, z)[i, :, :, current_select]) for k in range(s.shape[-1] - 1)] +
[_expand(y_0z[i, :, :, current_select]),
_expand(y_00[i, :, :, current_select])], axis=1) for i in range(x.shape[0])]
return rows
def _expand(img):
return np.tile(np.expand_dims(img, axis=-1), [1,1,3])
def _generate_mask_on_img(img,mask):
img = np.expand_dims(img,axis=-1)
img_cpy = np.copy(img)
mask_pixel = np.copy(img)
mask = np.expand_dims(np.sum(mask,axis=-1), axis=-1)
mask_pixel[np.where(mask>=1)]=1
return np.concatenate([mask_pixel,img_cpy,img_cpy], axis=-1)
"""
Plot two images showing the combination of the spatial and modality LR to generate an image. The first
image uses the predicted S and Z and the second samples Z from a Gaussian.
:param lb_images: a list of 2 4-dim arrays of images + corresponding masks
:param ul_images: a list of 4-dim image arrays
:param epoch: the epoch number
"""
# combine labelled and unlabelled images and randomly sample 4 examples
images = lb_images[0] # [el[0] for el in lb_images]
anato_masks = lb_images[1]
patho_masks = lb_images[2]
pseudo_masks = np.zeros(shape=patho_masks.shape,dtype=patho_masks.dtype)
current_select = epoch % images.shape[3]
# if len(ul_images) > 0:
# images = np.concatenate([images, ul_images], axis=0)
# x = utils.data_utils.sample(images, nb_samples=4)
x = images
# S + Z -> Image
gaussian = NormalDistribution()
s = self.enc_anatomy.predict(x)
predicted_anatomy = self.segmentor.predict(s)
predicted_pathology = self.enc_pathology.predict(np.concatenate([x,predicted_anatomy], axis=-1))
predicted_pathology_list = []
for ii in range(len(predicted_pathology)):
predicted_pathology_list.append(predicted_pathology[ii][:,:,:,0:1])
predicted_pathology = np.concatenate(predicted_pathology_list, axis=-1)
z_with_real_pathology = self.enc_modality.predict([s, patho_masks, x])[0:self.conf.input_shape[-1]]
z_with_pseodu_health = self.enc_modality.predict([s, pseudo_masks, x])[0:self.conf.input_shape[-1]]
z_with_predicted_pathology = self.enc_modality.predict([s, predicted_pathology[:,:,:,0:patho_masks.shape[3]], x])[0:self.conf.input_shape[-1]]
rows_with_real_pathology = _create_row(pathology_masks=patho_masks, z=z_with_real_pathology)
rows_with_pseodu_health = _create_row(pathology_masks=pseudo_masks, z=z_with_pseodu_health)
rows_with_predicted_pathology = _create_row(pathology_masks=predicted_pathology, z=z_with_predicted_pathology)
header = utils.image_utils.makeTextHeaderImage(x.shape[2],
['pathology', 'X', 'rec(s,z)', 'rec(s,~z)', 'rec(s,0)'] +
['rec(s0_%d, z)' % k for k in range(s.shape[-1] - 1)] + [
'rec(0, z)', 'rec(0,0)'])
header = _expand(header)
im_plot_with_actual_pathology = np.concatenate([header] + rows_with_real_pathology, axis=0)
im_plot_with_pseudo_healthy = np.concatenate([header] + rows_with_pseodu_health, axis=0)
im_plot_with_predicted_pathology = np.concatenate([header] + rows_with_predicted_pathology, axis=0)
im_plot_actual_pathology = np.clip(im_plot_with_actual_pathology, -1, 1)
im_plot_pseudo_healthy = np.clip(im_plot_with_pseudo_healthy, -1, 1)
im_plot_predicted_pathology = np.clip(im_plot_with_predicted_pathology, -1, 1)
im = np.concatenate([im_plot_actual_pathology, im_plot_predicted_pathology,im_plot_pseudo_healthy], axis=0)
imsave(self.rec_folder + '/rec_epoch_%d.png' % epoch, im)
plt.figure()
plt.imshow(im, cmap='gray')
plt.xticks([])
plt.yticks([])
plt.tight_layout()
plt.close()
def _get_s0chn(self, k, s):
s_res = s.copy()
chnk = s_res[..., k]
# move channel k 1s to the background
s_res[..., -1][chnk == 1] = 1
s_res[..., k] = 0
return s_res
def plot_reconstruction_classifier_outputs(self, lb_images, other_masks, epoch):
'''
Plot a histogram of predicted values by the discriminator
:param lb_images: a list of 2 4-dim arrays of images + corresponding masks
:param ul_images: a list of 4-dim image arrays
:param other_masks: a 4-dim array of masks with full anatomy: can be None
:param epoch: the epoch number
'''
imags = lb_images[0] # [el[0] for el in lb_images]
anato_masks = lb_images[1] # [el[1] for el in lb_images]
patho_masks = lb_images[2] # [el[1] for el in lb_images]
current_selected = epoch % imags.shape[3]
x = imags
m_anato = anato_masks
m_patho = patho_masks
m_pseudo_health = np.zeros(shape=m_patho.shape,dtype=m_patho.dtype)
print(m_anato.shape)
print(m_patho.shape)
print(self.discr_reconstruct_mask.input_shape[-1], self.model.Decoder.output_shape[-1])
s = self.enc_anatomy.predict(x)
predicted_anatomy = self.segmentor.predict(s)
predicted_pathology = self.enc_pathology.predict(np.concatenate([x, predicted_anatomy], axis=-1))
pred_z_actual_pathology, _ = self.enc_modality.predict([s, m_patho, x])
pred_z_pseudo_health, _ = self.enc_modality.predict([s, m_pseudo_health, x])
pred_z_predicted_pathology,_ = self.enc_modality.predict([s, predicted_pathology[:,:,:,
0:patho_masks.shape[3]], x])
pred_i_actual_pathology = self.reconstructor.predict([s, m_patho, pred_z_actual_pathology])
pred_i_pseudo_health = self.reconstructor.predict([s, m_pseudo_health, pred_z_pseudo_health])
pred_i_predicted_pathology = self.reconstructor.predict([s, predicted_pathology[:,:,:,
0:patho_masks.shape[3]],
pred_z_predicted_pathology])
plt.figure()
for i in range(x.shape[0]):
plt.subplot(x.shape[0], 2, 2 * i + 1)
m_allchn = np.concatenate([x[i, :, :, chn] for chn in range(x.shape[-1])], axis=1)
plt.imshow(m_allchn, cmap='gray')
plt.xticks([])
plt.yticks([])
plt.title('Pred: %.3f / %.3f' % (self.discr_reconstruct_mask.predict(x[i:i + 1])[1][0][0],
self.discr_reconstruct_mask.predict(x[i:i + 1])[1][0][1]))
plt.subplot(x.shape[0], 2, 2 * i + 2)
pred_m_allchn = pred_i_actual_pathology[i:i + 1]
pred_m_allchn_img = np.concatenate([pred_m_allchn[0, :, :, chn] for chn in range(pred_m_allchn.shape[-1])],
axis=1)
plt.imshow(pred_m_allchn_img, cmap='gray')
plt.xticks([])
plt.yticks([])
plt.title('Pred: %.3f / %.3f' % (self.discr_reconstruct_mask.predict(pred_m_allchn)[1][0][0],
self.discr_reconstruct_mask.predict(pred_m_allchn)[1][0][1]))
plt.tight_layout()
plt.savefig(self.reconstruct_classifier_folder +
'/classifier_reconstruction_epoch_%d_actual_pathology.png' % epoch)
plt.close()
plt.figure()
for i in range(x.shape[0]):
plt.subplot(x.shape[0], 2, 2 * i + 1)
m_allchn = np.concatenate([x[i, :, :, chn] for chn in range(x.shape[-1])], axis=1)
plt.imshow(m_allchn, cmap='gray')
plt.xticks([])
plt.yticks([])
plt.title('Pred: %.3f / %.3f' % (self.discr_reconstruct_mask.predict(x[i:i + 1])[1][0][0],
self.discr_reconstruct_mask.predict(x[i:i + 1])[1][0][1]))
plt.subplot(x.shape[0], 2, 2 * i + 2)
pred_m_allchn = pred_i_pseudo_health[i:i + 1]
pred_m_allchn_img = np.concatenate([pred_m_allchn[0, :, :, chn] for chn in range(pred_m_allchn.shape[-1])],
axis=1)
plt.imshow(pred_m_allchn_img, cmap='gray')
plt.xticks([])
plt.yticks([])
plt.title('Pred: %.3f / %.3f' % (self.discr_reconstruct_mask.predict(pred_m_allchn)[1][0][0],
self.discr_reconstruct_mask.predict(pred_m_allchn)[1][0][1]))
plt.tight_layout()
plt.savefig(self.reconstruct_classifier_folder + '/classifier_reconstruction_epoch_%d_pseudo_health.png' % epoch)
plt.close()
plt.figure()
for i in range(x.shape[0]):
plt.subplot(x.shape[0], 2, 2 * i + 1)
m_allchn = np.concatenate([x[i, :, :, chn] for chn in range(x.shape[-1])], axis=1)
plt.imshow(m_allchn, cmap='gray')
plt.xticks([])
plt.yticks([])
plt.title('Pred: %.3f / %.3f' % (self.discr_reconstruct_mask.predict(x[i:i + 1])[1][0][0],
self.discr_reconstruct_mask.predict(x[i:i + 1])[1][0][1]))
plt.subplot(x.shape[0], 2, 2 * i + 2)
pred_m_allchn = pred_i_predicted_pathology[i:i + 1]
pred_m_allchn_img = np.concatenate([pred_m_allchn[0, :, :, chn] for chn in range(pred_m_allchn.shape[-1])],
axis=1)
plt.imshow(pred_m_allchn_img, cmap='gray')
plt.xticks([])
plt.yticks([])
plt.title('Pred: %.3f / %.3f' % (self.discr_reconstruct_mask.predict(pred_m_allchn)[1][0][0],
self.discr_reconstruct_mask.predict(pred_m_allchn)[1][0][1]))
plt.tight_layout()
plt.savefig(
self.reconstruct_classifier_folder +
'/classifier_reconstruction_epoch_%d_predicted_pathology.png' % epoch)
plt.close()
def plot_reconstruction_discriminator_outputs(self, lb_images, other_masks, epoch):
'''
Plot a histogram of predicted values by the discriminator
:param lb_images: a list of 2 4-dim arrays of images + corresponding masks
:param ul_images: a list of 4-dim image arrays
:param other_masks: a 4-dim array of masks with full anatomy: can be None
:param epoch: the epoch number
'''
imags = lb_images[0] # [el[0] for el in lb_images]
anato_masks = lb_images[1] # [el[1] for el in lb_images]
patho_masks = lb_images[2] # [el[1] for el in lb_images]
current_selected = epoch % imags.shape[3]
x = imags
m_anato = anato_masks
m_patho = patho_masks
m_pseudo_health = np.zeros(shape=m_patho.shape,dtype=m_patho.dtype)
print(m_anato.shape)
print(m_patho.shape)
print(self.discr_reconstruct_mask.input_shape[-1], self.model.Decoder.output_shape[-1])
s = self.enc_anatomy.predict(x)
predicted_anatomy = self.segmentor.predict(s)
predicted_pathology = self.enc_pathology.predict(np.concatenate([x, predicted_anatomy], axis=-1))
pred_z_actual_pathology, _ = self.enc_modality.predict([s, m_patho, x])
pred_z_pseudo_health, _ = self.enc_modality.predict([s, m_pseudo_health, x])
pred_z_predicted_pathology, _ = self.enc_modality.predict([s,
predicted_pathology[:, :, :,
0:patho_masks.shape[3]], x])
pred_i_actual_pathology = self.reconstructor.predict([s, m_patho, pred_z_actual_pathology])
pred_i_pseudo_health = self.reconstructor.predict([s, m_pseudo_health, pred_z_pseudo_health])
pred_i_predicted_pathology = self.reconstructor.predict([s,
predicted_pathology[:, :, :,
0:patho_masks.shape[3]],
pred_z_predicted_pathology])
dm_input_fake_actual_pathology = pred_i_actual_pathology
dm_input_fake_pseudo_health = pred_i_pseudo_health
dm_input_fake_predicted_pathology = pred_i_predicted_pathology
dm_true = self.discr_reconstruct_mask.predict(x)[0].reshape(x.shape[0], -1).mean(axis=1)
dm_pred_actual_pathology = self.discr_reconstruct_mask.predict(dm_input_fake_actual_pathology)[0].\
reshape(pred_i_actual_pathology.shape[0], -1).mean(axis=1)
dm_pred_pseudo_health = self.discr_reconstruct_mask.predict(dm_input_fake_pseudo_health)[0].\
reshape(pred_i_pseudo_health.shape[0], -1).mean(axis=1)
dm_pred_predicted_pathology = self.discr_reconstruct_mask.predict(dm_input_fake_predicted_pathology)[0].\
reshape(dm_input_fake_predicted_pathology.shape[0], -1).mean(axis=1)
plt.figure()
plt.subplot(1, 1, 1)
plt.title('Reconstruction Discriminator with Actual Pathology')
plt.hist([dm_true, dm_pred_actual_pathology], stacked=True, normed=True)
plt.savefig(self.reconstruct_discr_folder +
'/discriminator_reconstruction_hist_epoch_%d_actual_pathology.png' % epoch)
plt.close()
plt.figure()
plt.subplot(1, 1, 1)
plt.title('Reconstruction Discriminator with Pseudo Health')
plt.hist([dm_true, dm_pred_pseudo_health], stacked=True, normed=True)
plt.savefig(self.reconstruct_discr_folder +
'/discriminator_reconstruction_hist_epoch_%d_pseudo_health.png' % epoch)
plt.close()
plt.figure()
plt.subplot(1, 1, 1)
plt.title('Reconstruction Discriminator with Pseudo Health')
plt.hist([dm_true, dm_pred_predicted_pathology], stacked=True, normed=True)
plt.savefig(
self.reconstruct_discr_folder +
'/discriminator_reconstruction_hist_epoch_%d_predicted_pathology.png' % epoch)
plt.close()
plt.figure()
for i in range(x.shape[0]):
plt.subplot(x.shape[0], 2, 2 * i + 1)
m_allchn = np.concatenate([x[i, :, :, chn] for chn in range(x.shape[-1])], axis=1)
plt.imshow(m_allchn, cmap='gray')
plt.xticks([])
plt.yticks([])
plt.title('Pred: %.3f' % self.discr_reconstruct_mask.predict(x[i:i + 1])[0].reshape(1, -1).mean(axis=1))
plt.subplot(x.shape[0], 2, 2 * i + 2)
pred_m_allchn = pred_i_actual_pathology[i:i + 1]
pred_m_allchn_img = np.concatenate([pred_m_allchn[0, :, :, chn]
for chn in range(pred_m_allchn.shape[-1])],
axis=1)
plt.imshow(pred_m_allchn_img, cmap='gray')
plt.xticks([])
plt.yticks([])
plt.title('Pred: %.3f' % self.discr_reconstruct_mask.predict(pred_m_allchn)[0].reshape(1, -1).mean(axis=1))
plt.tight_layout()
plt.savefig(self.reconstruct_discr_folder
+ '/discriminator_reconstruction_epoch_%d_actual_pathology.png' % epoch)
plt.close()
plt.figure()
for i in range(x.shape[0]):
plt.subplot(x.shape[0], 2, 2 * i + 1)
m_allchn = np.concatenate([x[i, :, :, chn] for chn in range(x.shape[-1])], axis=1)
plt.imshow(m_allchn, cmap='gray')
plt.xticks([])
plt.yticks([])
plt.title('Pred: %.3f' % self.discr_reconstruct_mask.predict(x[i:i + 1])[0].reshape(1, -1).mean(axis=1))
plt.subplot(x.shape[0], 2, 2 * i + 2)
pred_m_allchn = pred_i_pseudo_health[i:i + 1]
pred_m_allchn_img = np.concatenate([pred_m_allchn[0, :, :, chn]
for chn in range(pred_m_allchn.shape[-1])],
axis=1)
plt.imshow(pred_m_allchn_img, cmap='gray')
plt.xticks([])
plt.yticks([])
plt.title('Pred: %.3f' % self.discr_reconstruct_mask.predict(pred_m_allchn)[0].reshape(1, -1).mean(axis=1))
plt.tight_layout()
plt.savefig(self.reconstruct_discr_folder
+ '/discriminator_reconstruction_epoch_%d_pseudo_health.png' % epoch)
plt.close()
plt.figure()
for i in range(x.shape[0]):
plt.subplot(x.shape[0], 2, 2 * i + 1)
m_allchn = np.concatenate([x[i, :, :, chn] for chn in range(x.shape[-1])], axis=1)
plt.imshow(m_allchn, cmap='gray')
plt.xticks([])
plt.yticks([])
plt.title('Pred: %.3f' % self.discr_reconstruct_mask.predict(x[i:i + 1])[0].reshape(1, -1).mean(axis=1))
plt.subplot(x.shape[0], 2, 2 * i + 2)
pred_m_allchn = pred_i_predicted_pathology[i:i + 1]
pred_m_allchn_img = np.concatenate([pred_m_allchn[0, :, :, chn]
for chn in range(pred_m_allchn.shape[-1])],
axis=1)
plt.imshow(pred_m_allchn_img, cmap='gray')
plt.xticks([])
plt.yticks([])
plt.title('Pred: %.3f' % self.discr_reconstruct_mask.predict(pred_m_allchn)[0].reshape(1, -1).mean(axis=1))
plt.tight_layout()
plt.savefig(self.reconstruct_discr_folder
+ '/discriminator_reconstruction_epoch_%d_predicted_pathology.png' % epoch)
plt.close()
def plot_anatomy_mask_discriminator_outputs(self, lb_images, other_masks, epoch):
'''
Plot a histogram of predicted values by the discriminator
:param lb_images: a list of 2 4-dim arrays of images + corresponding masks
:param ul_images: a list of 4-dim image arrays
:param other_masks: a 4-dim array of masks with full anatomy: can be None
:param epoch: the epoch number
'''
imags = lb_images[0] # [el[0] for el in lb_images]
anato_masks = lb_images[1] # [el[1] for el in lb_images]
patho_masks = lb_images[2] # [el[1] for el in lb_images]
current_selected = epoch % imags.shape[3]
x = imags
m_anato = anato_masks
print(m_anato.shape)
print(self.discr_anato_mask.input_shape[-1], self.model.Decoder.output_shape[-1])
s = self.enc_anatomy.predict(x)
pred_m = self.segmentor.predict(s)
dm_input_fake = pred_m[:,:,:,:-1]
dm_true = self.discr_anato_mask.predict(m_anato).reshape(m_anato.shape[0], -1).mean(axis=1)
dm_pred = self.discr_anato_mask.predict(dm_input_fake).reshape(pred_m.shape[0], -1).mean(axis=1)
plt.figure()
plt.subplot(1, 1, 1)
plt.title('Anatomy Mask Discriminator')
plt.hist([dm_true, dm_pred], stacked=True, normed=True)
plt.savefig(self.anatomy_mask_discr_folder + '/discriminator_hist_epoch_%d.png' % epoch)
plt.close()
plt.figure()
for i in range(m_anato.shape[0]):
plt.subplot(4, 2, 2 * i + 1)
m_allchn = np.concatenate([m_anato[i, :, :, chn] for chn in range(m_anato.shape[-1])], axis=1)
plt.imshow(m_allchn, cmap='gray')
plt.xticks([])
plt.yticks([])
plt.title('Pred: %.3f' % self.discr_anato_mask.predict(m_anato[i:i + 1]).reshape(1, -1).mean(axis=1))
plt.subplot(4, 2, 2 * i + 2)
pred_m_allchn = pred_m[i:i + 1,:,:,:-1]
pred_m_allchn_img = np.concatenate([pred_m_allchn[0, :, :, chn] for chn in range(pred_m_allchn.shape[-1])],
axis=1)
plt.imshow(pred_m_allchn_img, cmap='gray')
plt.xticks([])
plt.yticks([])
plt.title('Pred: %.3f' % self.discr_anato_mask.predict(pred_m_allchn).reshape(1, -1).mean(axis=1))
plt.tight_layout()
plt.savefig(self.anatomy_mask_discr_folder + '/discriminator_mask_epoch_%d.png' % epoch)
plt.close()
def plot_pathology_mask_discriminator_outputs(self, lb_images, other_masks, epoch):
'''
Plot a histogram of predicted values by the discriminator
:param lb_images: a list of 2 4-dim arrays of images + corresponding masks
:param ul_images: a list of 4-dim image arrays
:param other_masks: a 4-dim array of masks with full anatomy: can be None
:param epoch: the epoch number
'''
imags = lb_images[0] # [el[0] for el in lb_images]
anato_masks = lb_images[1] # [el[1] for el in lb_images]
patho_masks = lb_images[2] # [el[1] for el in lb_images]
current_selected = epoch % imags.shape[3]
anato_background = np.ones(shape=anato_masks.shape[:-1]+(1,),dtype=anato_masks.dtype)
for ii in range(anato_masks.shape[-1]):
anato_background = anato_background - np.expand_dims(anato_masks[:,:,:,ii], axis=-1)
anato_masks = np.concatenate([anato_masks, anato_background], axis=-1)
x = imags
m_anato = anato_masks
m_patho = patho_masks
print(m_anato.shape)
print(self.discr_patho_mask.input_shape[-1], self.model.Decoder.output_shape[-1])
s = self.enc_anatomy.predict(x)
predicted_anatomy = self.segmentor.predict(s)
predicted_pathology_from_predicted_anatomy = self.enc_pathology.predict(np.concatenate([x,predicted_anatomy],axis=-1))
predicted_pathology_from_real_anatomy = self.enc_pathology.predict(np.concatenate([x,m_anato],axis=-1))
dm_true = self.discr_patho_mask.predict(m_patho).reshape(m_patho.shape[0], -1).mean(axis=1)
dm_input_fake = predicted_pathology_from_predicted_anatomy[:, :, :, :-1]
dm_pred = self.discr_patho_mask.predict(dm_input_fake).reshape(predicted_pathology_from_predicted_anatomy.shape[0], -1).mean(axis=1)
plt.figure()
plt.subplot(1, 1, 1)
plt.title('Pathology Mask Discriminator: From Predicted Anatomy')
plt.hist([dm_true, dm_pred], stacked=True, normed=True)
plt.savefig(self.pathology_mask_discr_folder
+ '/discriminator_hist_FromPredictedAnatomy_epoch_%d.png' % epoch)
plt.close()
plt.figure()
for i in range(m_patho.shape[0]):
plt.subplot(4, 2, 2 * i + 1)
m_allchn = np.concatenate([m_patho[i, :, :, chn] for chn in range(m_patho.shape[-1])], axis=1)
plt.imshow(m_allchn, cmap='gray')
plt.xticks([])
plt.yticks([])
plt.title('Pred: %.3f' % self.discr_patho_mask.predict(m_patho[i:i + 1]).reshape(1, -1).mean(axis=1))
plt.subplot(4, 2, 2 * i + 2)
pred_m_allchn = predicted_pathology_from_predicted_anatomy[i:i + 1,:,:,:-1]
pred_m_allchn_img = np.concatenate([pred_m_allchn[0, :, :, chn] for chn in range(pred_m_allchn.shape[-1])],
axis=1)
plt.imshow(pred_m_allchn_img, cmap='gray')
plt.xticks([])
plt.yticks([])
plt.title('Pred: %.3f' % self.discr_patho_mask.predict(pred_m_allchn).reshape(1, -1).mean(axis=1))
plt.tight_layout()
plt.savefig(self.anatomy_mask_discr_folder
+ '/discriminator_mask_FromPredictedAnatomy_epoch_%d.png' % epoch)
plt.close()
dm_input_fake = predicted_pathology_from_real_anatomy[:, :, :, :-1]
dm_pred = self.discr_patho_mask.predict(dm_input_fake).reshape(predicted_pathology_from_real_anatomy.shape[0], -1).mean(axis=1)
plt.figure()
plt.subplot(1, 1, 1)
plt.title('Pathology Mask Discriminator: From Real Anatomy')
plt.hist([dm_true, dm_pred], stacked=True, normed=True)
plt.savefig(self.pathology_mask_discr_folder
+ '/discriminator_hist_FromRealAnatomy_epoch_%d.png' % epoch)
plt.close()
plt.figure()
for i in range(m_patho.shape[0]):
plt.subplot(4, 2, 2 * i + 1)
m_allchn = np.concatenate([m_patho[i, :, :, chn] for chn in range(m_patho.shape[-1])], axis=1)
plt.imshow(m_allchn, cmap='gray')
plt.xticks([])
plt.yticks([])
plt.title('Pred: %.3f' % self.discr_patho_mask.predict(m_patho[i:i + 1]).reshape(1, -1).mean(axis=1))
plt.subplot(4, 2, 2 * i + 2)
pred_m_allchn = predicted_pathology_from_real_anatomy[i:i + 1, :, :, :-1]
pred_m_allchn_img = np.concatenate([pred_m_allchn[0, :, :, chn] for chn in range(pred_m_allchn.shape[-1])],
axis=1)
plt.imshow(pred_m_allchn_img, cmap='gray')
plt.xticks([])
plt.yticks([])
plt.title('Pred: %.3f' % self.discr_patho_mask.predict(pred_m_allchn).reshape(1, -1).mean(axis=1))
plt.tight_layout()
plt.savefig(self.anatomy_mask_discr_folder
+ '/discriminator_mask_FromRealAnatomy_epoch_%d.png' % epoch)
plt.close()
def plot_image_switch_lr(self, lb_images, epoch):
'''
Switch anatomy between two images and plot the synthetic result
:param lb_images: a list of 2 4-dim arrays of images + corresponding masks
:param ul_images: a list of 4-dim image arrays
:param epoch: the epoch number
'''
imags = lb_images[0] # [el[0] for el in lb_images]
anato_masks = lb_images[1]
patho_masks = lb_images[2]
pseudo_masks = np.zeros(shape=patho_masks.shape,dtype=patho_masks.dtype)
current_selected = epoch % imags.shape[3]
x = imags
s = self.enc_anatomy.predict(x)
predicted_anatomy = self.segmentor.predict(s)
predicted_pathology_from_predicted_anatomy = self.enc_pathology.\
predict(np.concatenate([x,predicted_anatomy],
axis=-1))
if anato_masks.shape[0]!=4 or patho_masks.shape[0]!=4:
return
rows = []
for i in range(0, imags.shape[0], 2):
x1 = x[i: i + 1]
x2 = x[i + 1: i + 2]
s1 = self.enc_anatomy.predict(x1)
z1, _ = self.enc_modality.predict([s1, predicted_pathology_from_predicted_anatomy[:,:,:,0:-1], x1])
s2 = self.enc_anatomy.predict(x2)
z2, _ = self.enc_modality.predict([s2, predicted_pathology_from_predicted_anatomy[:,:,:,0:-1], x2])
x11 = self.reconstructor.predict([s1, predicted_pathology_from_predicted_anatomy[:,:,:,0:-1], z1])
x12 = self.reconstructor.predict([s1, predicted_pathology_from_predicted_anatomy[:,:,:,0:-1],z2])
x21 = self.reconstructor.predict([s2, predicted_pathology_from_predicted_anatomy[:,:,:,0:-1],z1])
x22 = self.reconstructor.predict([s2, predicted_pathology_from_predicted_anatomy[:,:,:,0:-1],z2])
row = np.concatenate([x1[0, :, :, current_selected],
x11[0, :, :, current_selected],
x12[0, :, :, current_selected],
x21[0, :, :, current_selected],
x22[0, :, :, current_selected],
x2[0, :, :, current_selected]], axis=1)
rows.append(row)
header = utils.image_utils.makeTextHeaderImage(x.shape[2],
['X1', 'Rec(s1,z1)', 'Rec(s1,z2)', 'Rec(s2,z1)', 'Rec(s2,z2)',
'X2'])
image = np.concatenate([header] + rows, axis=0)
imsave(self.interp_folder + '/switch_lr_epoch_%d_actual_pathology.png' % (epoch), image)
rows = []
for i in range(0, imags.shape[0], 2):
x1 = x[i: i + 1]
x2 = x[i + 1: i + 2]
s1 = self.enc_anatomy.predict(x1)
z1, _ = self.enc_modality.predict([s1, pseudo_masks, x1])
s2 = self.enc_anatomy.predict(x2)
z2, _ = self.enc_modality.predict([s2, pseudo_masks, x2])
x11 = self.reconstructor.predict([s1, pseudo_masks, z1])
x12 = self.reconstructor.predict([s1, pseudo_masks, z2])
x21 = self.reconstructor.predict([s2, pseudo_masks, z1])
x22 = self.reconstructor.predict([s2, pseudo_masks, z2])
row = np.concatenate([x1[0, :, :, current_selected],
x11[0, :, :, current_selected],
x12[0, :, :, current_selected],
x21[0, :, :, current_selected],
x22[0, :, :, current_selected],
x2[0, :, :, current_selected]], axis=1)
rows.append(row)
header = utils.image_utils.makeTextHeaderImage(x.shape[2],
['X1', 'Rec(s1,z1)', 'Rec(s1,z2)', 'Rec(s2,z1)', 'Rec(s2,z2)',
'X2'])
image = np.concatenate([header] + rows, axis=0)
imsave(self.interp_folder + '/switch_lr_epoch_%d_pseudo_health.png' % (epoch), image)
def plot_image_interpolation(self, lb_images, epoch):
'''
Interpolate between two images and plot the transition in reconstructing the image.
:param lb_images: a list of 2 4-dim arrays of images + corresponding masks
:param ul_images: a list of 4-dim image arrays
:param epoch: the epoch number
'''
imags = lb_images[0] # [el[0] for el in lb_images]
anato_masks = lb_images[1]
patho_masks = lb_images[2]
pseudo_health_masks = np.zeros(shape=patho_masks.shape,dtype=patho_masks.dtype)
current_selected = epoch % imags.shape[3]
# if len(ul_images) > 0:
# imags = np.concatenate([imags, ul_images], axis=0)
# x = utils.data_utils.sample(imags, 4, seed=self.conf.seed)
x = imags
s = self.enc_anatomy.predict(x)
predicted_anatomy = self.segmentor.predict(s)
predicted_pathology_from_predicted_anatomy = self.enc_pathology.predict(np.concatenate([x,predicted_anatomy], axis=-1))
predicted_pathology_from_predicted_anatomy = predicted_pathology_from_predicted_anatomy[:,:,:,0:-1]
if anato_masks.shape[0] != 4 or patho_masks.shape[0] != 4:
return
for i in range(0, x.shape[0], 2):
x1 = x[i: i + 1]
s1 = self.enc_anatomy.predict(x1)
x2 = x[i + 1: i + 2]
s2 = self.enc_anatomy.predict(x2)
z1_actual_pathology = sdnet_utils.vae_sample([self.z_mean.
predict([s1,
predicted_pathology_from_predicted_anatomy, x1]),
self.z_var.
predict([s1,
predicted_pathology_from_predicted_anatomy, x1])])
z2_actual_pathology = sdnet_utils.vae_sample([self.z_mean.
predict([s2,
predicted_pathology_from_predicted_anatomy, x2]),
self.z_var.
predict([s2,
predicted_pathology_from_predicted_anatomy, x2])])
z1_pseudo_health = sdnet_utils.vae_sample([self.z_mean.predict([s1, pseudo_health_masks, x1]),
self.z_var.predict([s1, pseudo_health_masks, x1])])
z2_pseudo_health = sdnet_utils.vae_sample([self.z_mean.predict([s2, pseudo_health_masks, x2]),
self.z_var.predict([s2, pseudo_health_masks, x2])])
imsave(self.interp_folder + '/interpolation1_epoch_%d_actual_pathology.png' % epoch,
self._interpolate(s1, z1_actual_pathology, z2_actual_pathology, current_selected, patho_masks))
imsave(self.interp_folder + '/interpolation2_epoch_%d_actual_pathology.png' % epoch,
self._interpolate(s2, z2_actual_pathology, z1_actual_pathology, current_selected, patho_masks))
imsave(self.interp_folder + '/interpolation1_epoch_%d_pseudo_health.png' % epoch,
self._interpolate(s1, z1_pseudo_health, z2_pseudo_health, current_selected, pseudo_health_masks))
imsave(self.interp_folder + '/interpolation2_epoch_%d_pseudo_health.png' % epoch,
self._interpolate(s2, z2_pseudo_health, z1_pseudo_health, current_selected, pseudo_health_masks))
def _interpolate(self, s, z1, z2, current_selected, m):
row1, row2 = [], []
for w1, w2 in zip(np.arange(0, 1, 0.1), np.arange(1, 0, -0.1)):
sum = w1 * z1 + w2 * z2
rec = self.reconstructor.predict([s, m, sum])[0, :, :, current_selected]
if w1 < 0.5:
row1.append(rec)
else:
row2.append(rec)
return np.concatenate([np.concatenate(row1, axis=1), np.concatenate(row2, axis=1)], axis=0) | [
"falconjhc@gmail.com"
] | falconjhc@gmail.com |
c4b24cf35d4b377de870d3648ce56e6d70ebe71b | 79ad16a56df93085651886375920306e63121690 | /docs_src/tutorial/fastapi/limit_and_offset/tutorial001.py | 9bdf60446a6f961ddc89296af3342889726b0341 | [
"MIT"
] | permissive | macrosfirst/sqlmodel | 4286f72144afbf1476368e3fd0ca895852799046 | bda2e2818a3e7c2a18be4adf55bfea9bad83bfcc | refs/heads/main | 2023-08-14T02:09:27.072625 | 2021-09-29T13:31:54 | 2021-09-29T13:31:54 | 403,592,064 | 0 | 0 | MIT | 2021-09-29T13:31:55 | 2021-09-06T11:11:59 | Python | UTF-8 | Python | false | false | 1,599 | py | from typing import List, Optional
from fastapi import FastAPI, HTTPException, Query
from sqlmodel import Field, Session, SQLModel, create_engine, select
class HeroBase(SQLModel):
name: str
secret_name: str
age: Optional[int] = None
class Hero(HeroBase, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
class HeroCreate(HeroBase):
pass
class HeroRead(HeroBase):
id: int
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
connect_args = {"check_same_thread": False}
engine = create_engine(sqlite_url, echo=True, connect_args=connect_args)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
app = FastAPI()
@app.on_event("startup")
def on_startup():
create_db_and_tables()
@app.post("/heroes/", response_model=HeroRead)
def create_hero(hero: HeroCreate):
with Session(engine) as session:
db_hero = Hero.from_orm(hero)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
@app.get("/heroes/", response_model=List[HeroRead])
def read_heroes(offset: int = 0, limit: int = Query(default=100, lte=100)):
with Session(engine) as session:
heroes = session.exec(select(Hero).offset(offset).limit(limit)).all()
return heroes
@app.get("/heroes/{hero_id}", response_model=HeroRead)
def read_hero(hero_id: int):
with Session(engine) as session:
hero = session.get(Hero, hero_id)
if not hero:
raise HTTPException(status_code=404, detail="Hero not found")
return hero
| [
"tiangolo@gmail.com"
] | tiangolo@gmail.com |
0396719e3079adfc788b7ebb764fa5b83d5d56c3 | b2ab2caae8d6a24dfb6e43852ed5fd416d912dad | /flask/day02flaskๆฒกๆapi/utils/ch_login.py | ec62393c203afbf7310991b009cf93b18b05e567 | [] | no_license | cheyunping77/learingnote | 781d55eb2e951049840e58ee41af3de8490fd37e | edba6c247eefe33829ba549068d67dcb288ea28b | refs/heads/master | 2023-08-18T12:57:45.624973 | 2023-07-24T06:47:11 | 2023-07-24T06:47:11 | 256,951,310 | 0 | 0 | null | 2020-04-19T08:35:17 | 2020-04-19T08:35:16 | null | UTF-8 | Python | false | false | 271 | py | from flask import url_for,redirect,session
def is_login(func):
def check_login():
user_session = session.get('user_id')
if user_session:
return func
else:
return redirect(url_for('user.login'))
return check_login | [
"380604322@qq.com"
] | 380604322@qq.com |
26fe6bcdf3b7a3144632734a6041c873371cfccb | c1db68ab2abc9c03a733e8de00eca134fe987a67 | /req2.py | 2fee3dd944dd30b7e0d8e08b7533c7b499cbfcb6 | [] | no_license | pooja-pichad/request | f64560c33d6b3f131ab25274f4c7ebc0c88c866e | bf9613b5b23f3df0a15e3367d3f100840ccef23f | refs/heads/main | 2023-08-21T09:35:50.905226 | 2021-10-14T09:06:28 | 2021-10-14T09:06:28 | 387,688,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,339 | py | import requests
import json
# calling a saral api
def saral():
saral_api = " http://saral.navgurukul.org/api/courses" #This link is a API and url
saral_url = requests.get(saral_api) #request is a server
# convert into a json
data = saral_url.json()
with open ("meraki_courses.json","w") as saral_data:
json.dump(data,saral_data,indent = 4)
# Here it was title welcome to navgurukul
print("")
print("* Welcome to navgurukul and Learn basic programming launguage *")
print("")
# And then find the cource name all.....
def parent():
serial_no = 0
for i in data["availableCourses"]:
print(serial_no+1 ,i["name"], i["id"])
serial_no=serial_no+1
parent()
print("")
user_input =int(input("Enter your courses number that you want to learn:- "))
parent_id=data["availableCourses"][user_input-1]["id"]
print(data["availableCourses"][user_input-1]["name"])
print("")
print("* Welcome to navgurukul and Learn basic programming launguage *")
print("")
# And then taking userinput in previous or next .... previous then it will be print all courses name next then it will be print parents...
user_input_1=input("if you want next or previous n/p: ")
if user_input_1=="p":
i=0
while i<len(data["availableCourses"]):
Courses = (data["availableCourses"][i]["name"])
print(i+1," ",Courses,data["availableCourses"][i]["id"])
i=i+1
user_input = int(input("Enter your courses number that you want to learn:-"))
print(data["availableCourses"][user_input-1]["name"])
# calling a parents api
parent_api = "http://saral.navgurukul.org/api/courses/"+str(data["availableCourses"][user_input-1]["id"])+"/exercises"
parent_url = requests.get(parent_api)
# parents api convert into a json
data_1 = parent_url.json()
# pusing a parents data into a json file
with open ("data.json","w") as child_data:
json.dump(data_1,child_data,indent=4)
def data():
serial_no_1=0
for i in data_1["data"]:
print(" ",serial_no_1+1,".",i["name"])
if len(i["childExercises"])>0:
s= 0
for j in i['childExercises']:
s = s+ 1
print( " ",s,j['name'])
else:
print(" 1",i["slug"])
serial_no_1+=1
data()
print("")
topic_no = int(input("Enter topic number that's you want to learn:- "))
serial_no_3= 0
my_list=[]
for l in data_1['data']:
serial_no_3+=1
if topic_no == serial_no_3:
user_input_3=input("Enter topic number that's you want to learn previous or next:- ")
if user_input_3=="p":
serial_no_1=0
for i in data_1["data"]:
print(" ",serial_no_1+1,".",i["name"])
if len(i["childExercises"])>0:
s= 0
for j in i['childExercises']:
s = s+ 1
print( " ",s,j['name'])
else:
print(" 1",i["slug"])
serial_no_1+=1
topic_no = int(input("Enter topic number that's you want to learn:- "))
m = 0
while m < len(data_1["data"][topic_no-1]["childExercises"]):
print(" ", m+1 ,data_1["data"][topic_no-1]["childExercises"][m]["name"])
slug = (data_1["data"][topic_no-1]["childExercises"][m]["slug"])
# calling a child exercise
child_exercises_url = ("http://saral.navgurukul.org/api/courses/" + str(parent_id) +"/exercise/getBySlug?slug=" + slug )
Data_3 = requests.get(child_exercises_url)
# converting data into a json file
convert_data = Data_3.json()
with open("content.json","w") as f:
json.dump(convert_data,f,indent=4)
my_list.append(convert_data["content"])
m = m + 1
# And then taking a user input in a choose the questions....
def Question():
questions_no = int(input("choose the specific questions no :- "))
question=questions_no-1
print(my_list[question])
while questions_no > 0 :
# Here a taking user input in a previous or next
next_question = input("do you next question or previous question n/p :- ")
if questions_no == len(my_list):
print("next page")
if next_question == "p" :
if questions_no == 1:
print("no more questions")
break
elif questions_no > 0:
questions_no = questions_no - 2
print(my_list[questions_no])
elif next_question == "n":
if questions_no < len(my_list):
index = questions_no + 1
print(my_list[index-1])
question = question + 1
questions_no = questions_no + 1
if question == (len(my_list)-1) :
print("next page")
break
Question()
saral() | [
"noreply@github.com"
] | pooja-pichad.noreply@github.com |
ccbc96dcb76deb5429a028de11d8bc19bc885cb9 | d9fb6c246965cbf290186268298859ddb913ee6e | /190930/test.py | 520808d2c39e67f84e60d7b5df8b7b7ab4bd31bd | [] | no_license | 91hongppie/algorithm | 1ca6d54de6eab252c708bf83835ace8a109d73fc | 4c2fa8178e0ef7afbf0b736387f05cbada72f95d | refs/heads/master | 2020-07-20T22:17:40.700366 | 2020-06-29T00:06:11 | 2020-06-29T00:06:11 | 206,717,677 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | import sys
sys.stdin = open('test.txt', 'r')
# def test(c, rc):
# global result, nums
# if rc not in result:
# result.append(rc)
# if c == nums:
# return
# test(c+1, rc+scores[c])
# test(c+1, rc)
N = int(input())
for tc in range(1, N+1):
nums = int(input())
scores = list(map(int, input().split()))
result = [0]*(sum(scores)+1)
result[0] = 1
u = 0
for i in range(len(scores)):
for j in range(sum(scores), -1, -1):
u += 1
if result[j]:
result[scores[i]+j] = 1
a = result.count(1)
print(u)
print('#{} {}'.format(tc, a))
| [
"91hongppie@gmail.com"
] | 91hongppie@gmail.com |
20245bfb78d653c8237477beb7e295e82abc8728 | cc1d44cf04e5b2b15bb296a434aad4ae4bcfc4be | /python3/sql/getloc.py | e7364928c50915bd728d4d97890c8e206995c6dd | [] | no_license | ericosur/ericosur-snippet | dda2200546b13fb9b84632d115a0f4ca5e3d5c47 | 0309eeb614612f9a35843e2f45f4080ae03eaa81 | refs/heads/main | 2023-08-08T04:54:05.907435 | 2023-07-25T06:04:01 | 2023-07-25T06:04:01 | 23,057,196 | 2 | 1 | null | 2022-08-31T09:55:19 | 2014-08-18T03:18:52 | Perl | UTF-8 | Python | false | false | 1,216 | py | #!/usr/bin/python3
# coding: utf-8
'''
query players.db to find location coordinates
'''
import sqlite3
import sys
try:
from hexdump import hexdump
except ImportError:
print("need install module: hexdump")
sys.exit(1)
class Solution():
''' sqlite and query '''
def __init__(self):
self.dbfile = 'players.db'
self.con = sqlite3.connect(self.dbfile)
@staticmethod
def show_blob(buffer):
''' show blob '''
print(type(buffer))
print(len(buffer))
hexdump(buffer)
def query_blob(self):
''' query blob '''
res = self.con.execute("SELECT data FROM localPlayers")
blob = res.fetchone()
self.show_blob(blob[0])
def query_xy(self):
''' query x, y location '''
res = self.con.execute("SELECT name,x,y FROM localPlayers")
(name, x, y) = res.fetchone()
print(f'player name: {name}')
x = int(x)
y = int(y)
print(f'https://map.projectzomboid.com/#{x}x{y}')
def run(self):
''' run '''
self.query_xy()
#self.query_blob()
def main():
''' main '''
sol = Solution()
sol.run()
if __name__ == '__main__':
main()
| [
"ericosur@gmail.com"
] | ericosur@gmail.com |
f67179075a44b0e73699d2357779d53d1c60decb | 868ac4e558cf5fe945e8b557564f34f79b3ad01e | /purity_fb/purity_fb_1dot10/models/directory_service_response.py | 2fc1bc7144e633dfc9badfed480f1a506388d3ff | [
"Apache-2.0"
] | permissive | mabdelhafez/purity_fb_python_client | f4253ce8497fb3cff648e0a0cd1e567f48129fa7 | a9856875b3df43b4302a2e4addd1a6b71f51f5ce | refs/heads/master | 2022-04-20T09:24:22.031408 | 2020-04-20T22:11:32 | 2020-04-20T22:15:44 | 257,372,596 | 0 | 0 | NOASSERTION | 2020-04-20T18:40:24 | 2020-04-20T18:40:23 | null | UTF-8 | Python | false | false | 4,297 | py | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.10 Python SDK
Pure Storage FlashBlade REST 1.10 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.10
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class DirectoryServiceResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'pagination_info': 'PaginationInfo',
'items': 'list[DirectoryService]'
}
attribute_map = {
'pagination_info': 'pagination_info',
'items': 'items'
}
def __init__(self, pagination_info=None, items=None):
"""
DirectoryServiceResponse - a model defined in Swagger
"""
self._pagination_info = None
self._items = None
if pagination_info is not None:
self.pagination_info = pagination_info
if items is not None:
self.items = items
@property
def pagination_info(self):
"""
Gets the pagination_info of this DirectoryServiceResponse.
pagination information, only available in GET requests
:return: The pagination_info of this DirectoryServiceResponse.
:rtype: PaginationInfo
"""
return self._pagination_info
@pagination_info.setter
def pagination_info(self, pagination_info):
"""
Sets the pagination_info of this DirectoryServiceResponse.
pagination information, only available in GET requests
:param pagination_info: The pagination_info of this DirectoryServiceResponse.
:type: PaginationInfo
"""
self._pagination_info = pagination_info
@property
def items(self):
"""
Gets the items of this DirectoryServiceResponse.
A list of directory service objects.
:return: The items of this DirectoryServiceResponse.
:rtype: list[DirectoryService]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this DirectoryServiceResponse.
A list of directory service objects.
:param items: The items of this DirectoryServiceResponse.
:type: list[DirectoryService]
"""
self._items = items
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, DirectoryServiceResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"mabdelhafez@purestorage.com"
] | mabdelhafez@purestorage.com |
0d2788ab9aafd86f7d209b60a0b3697107636ab2 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/102/usersdata/212/49943/submittedfiles/av1_2.py | a70d5686bf818087b807f4b0822fd2efe3e0d999 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | # -*- coding: utf-8 -*-
import math
n1=int(input('digite o primeiro valor da sequรชncia:'))
n2=int(input('digite o segundo valor da sequรชncia:'))
n3=int(input('digite o terceiro valor da sequรชncia:'))
n4=int(input('digite o quarto valor da sequรชncia:'))
if (n1==n3) or (n2==n4):
print('V')
else:
print('N') | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
7f8d73f6f3bba6793aeb102ca4f67544ac169569 | ec6cb8542c8ed962d24ca32fc1f060ef63fdfea7 | /็ฌฌไธ้ถๆฎต/่ฏพไธๅฎไพ็ปไน /13็ปงๆฟ_้ๆ_repr_/epidemic_information_system_v1.py | a8e759adedeb2f5fb91ff3d0795a5b9b4f5debde | [] | no_license | singerdo/songers | 27859a4ff704318d149b2aa6613add407d88bb5d | 9c5dcd80c6772272c933b06c156b33058cbd3ce4 | refs/heads/master | 2022-04-16T11:00:11.002138 | 2020-04-18T07:15:16 | 2020-04-18T07:15:16 | 256,686,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,688 | py | """
็ซๆ
ไฟกๆฏ็ณป็ป
้ๆฑ๏ผ
่พๅ
ฅ1,ๅพช็ฏ่ทๅ็ซๆ
ไฟกๆฏ.
็ปไน 1๏ผ
้ๆฑ๏ผ
่พๅ
ฅ2,ๆพ็คบๆๆ็ซๆ
ไฟกๆฏ.
ๆญฅ้ชค๏ผ
ๅจViewไธญๅคๆญๆฏๅฆ่พๅ
ฅ"2"
ๅจControllerไธญๅฎไน__list_epidemics็ๅช่ฏปๅฑๆง
ๅจViewไธญๆพ็คบไฟกๆฏ
"""
class EpidemicInformationModel:
"""
็ซๆ
ไฟกๆฏๆจกๅ
"""
def __init__(self, region="", confirmed=0, dead=0, cure=0, eid=0):
self.region = region
self.confirmed = confirmed
self.dead = dead
self.cure = cure
self.eid = eid
class EpidemicInformationView:
"""
็ซๆ
ไฟกๆฏ็้ข่งๅพ๏ผ่ด่ดฃๅค็็้ข้ป่พ
"""
def __init__(self):
self.__controller = EpidemicInformationController()
def __show_menu(self):
while True
try:
print("่พๅ
ฅ1้ฎ็ๅฝๅ
ฅ็ซๆ
ไฟกๆฏ")
break
except:
print("่พๅ
ฅๆ่ฏฏ๏ผ่ฏท้ๆฐ่พๅ
ฅ")
while True
try:
print("่พๅ
ฅ2้ฎ็ๆพ็คบ็ซๆ
ไฟกๆฏ")
break
except:
print("่พๅ
ฅๆ่ฏฏ๏ผ่ฏท้ๆฐ่พๅ
ฅ")
while True
try:
print("่พๅ
ฅ3้ฎ็ๆฅๆพ็ซๆ
ไฟกๆฏ")
break
except:
print("่พๅ
ฅๆ่ฏฏ๏ผ่ฏท้ๆฐ่พๅ
ฅ")
while True
try:
print("่พๅ
ฅ4้ฎ็ๅ ้ค็ซๆ
ไฟกๆฏ")
break
except:
print("่พๅ
ฅๆ่ฏฏ๏ผ่ฏท้ๆฐ่พๅ
ฅ")
def __select_menu(self):
item = input("่ฏท่พๅ
ฅ้้กน๏ผ")
if item == "1":
self.__input_epidemics()
elif item == "2":
self.__print_epidemics()
elif item == "3":
self.__select_epidemic()
elif item == "4":
self.__delete_epidemic()
def main(self):
while True:
self.__show_menu()
self.__select_menu()
def __input_epidemics(self):
while True:
region = input("่ฏท่พๅ
ฅๅฐๅบ,ๅฆ้้ๅบ่พๅ
ฅ็ฉบๅญ็ฌฆ๏ผ")
model = EpidemicInformationModel(region)
model.confirmed = int(input("่ฏท่พๅ
ฅ็กฎ่ฏไบบๆฐ๏ผ"))
model.dead = int(input("่ฏท่พๅ
ฅๆญปไบกไบบๆฐ๏ผ"))
model.cure = int(input("่ฏท่พๅ
ฅๆฒปๆไบบๆฐ๏ผ"))
# ๅญๅจๅฝๅๆฐๆฎ...
self.__controller.add_epidemic(model)
def __print_epidemics(self):
for info in self.__controller.list_epidemics:
self.__print_epidemic(info)
def __print_epidemic(self, info):
print("%s็็กฎ่ฏไบบๆฐ%d,ๆญปไบกไบบๆฐ%d,ๆฒปๆไบบๆฐ%d" % (info.region, info.confirmed, info.dead, info.cure))
def __select_epidemic(self):
region = input("่ฏท่พๅ
ฅๅฐๅบ๏ผ")
epidemic = self.__controller.get_epidemic_by_region(region)
if epidemic:
self.__print_epidemic(epidemic)
else:
print("ๆจ่พๅ
ฅ็ๅฐๅบๆฒกๆ็ซๆ
")
def __delete_epidemic(self):
eid = int(input("่ฏท่พๅ
ฅ้่ฆๅ ้ค็็ซๆ
ไฟกๆฏ็ผๅท๏ผ"))
if self.__controller.remove_epidemic_by_id(eid):
print("ๅ ้คๆๅ")
else:
print("ๅ ้คๅคฑ่ดฅ")
class EpidemicInformationController:
"""
็ซๆ
ไฟกๆฏ้ป่พๆงๅถๅจ๏ผ่ด่ดฃๅค็ไธๅก้ป่พ
"""
def __init__(self):
self.__list_epidemics = []
self.__eid_begin = 1000
@property
def list_epidemics(self):
return self.__list_epidemics
def add_epidemic(self, info):
"""
ๆทปๅ ็ซๆ
ไฟกๆฏ
:param info: ้่ฆๆทปๅ ็ไฟกๆฏ
"""
# ่ฎพ็ฝฎไฟกๆฏ็็ผๅท
info.eid = self.__eid_begin
self.__eid_begin += 1
# ๅญๅจๅ่กจ
self.__list_epidemics.append(info)
def get_epidemic_by_region(self, region):
"""
ๆ นๆฎๅฐๅบ่ทๅ็ซๆ
ไฟกๆฏ
:param region:
:return:
"""
for epidemic in self.__list_epidemics:
if epidemic.region == region:
return epidemic
def remove_epidemic_by_id(self, eid):
"""
ๆ นๆฎ็ผๅทๅ ้ค็ซๆ
ไฟกๆฏ
:param eid:
:return:ๆฏๅฆๅ ้คๆๅ
"""
for i in range(len(self.__list_epidemics)):
if self.__list_epidemics[i].eid == eid:
# ไฝฟ็จdelๅ ้ค,ๅ้ขๅฟ
้กป็ดขๅผๆ่
ๅ็ๅฎไฝ็ๅ่กจๅ
็ด
del self.__list_epidemics[i]
return True
return False
# ๅ
ฅๅฃ
view = EpidemicInformationView()
view.main()
#ๆฏไธช้ฝ่พๅ
ฅๅ | [
"569593546@qq.com"
] | 569593546@qq.com |
962ecb9cd49d0303277c9be28f8389ecfc558290 | 9f0a4262c4402201df1cdd5674a679543f4a50b5 | /studio_maya/resources/__init__.py | b007269247729602677317a32043f9797e916de2 | [] | no_license | subing85/subins-toolkits | 611b6b3b3012ccb023096f6e21d18d2bda5a534b | d02af1289ec3ee5bce6fa3d78c134a8847113aa6 | refs/heads/master | 2022-07-12T17:19:57.411454 | 2022-07-01T20:37:16 | 2022-07-01T20:37:16 | 168,826,548 | 11 | 2 | null | 2022-07-02T01:03:34 | 2019-02-02T11:51:25 | Mathematica | UTF-8 | Python | false | false | 2,342 | py | import os
import tempfile
import platform
CURRENT_PATH = os.path.dirname(__file__)
def getToolKit():
return "Studio Maya Interpreter", "smi", "0.0.1"
def getModuleName():
return "studio_maya"
def getIconPath():
return os.path.join(CURRENT_PATH, "icons")
def getLogo():
return os.path.join(CURRENT_PATH, "icons", "logo.png")
def getWorkspacePath():
if platform.system() == "Windows":
return os.path.join(
os.getenv("USERPROFILE"),
"Documents",
"studio_toolkits",
getModuleName(),
)
if platform.system() == "Linux":
return os.path.join(
os.getenv("HOME"),
"Documents",
"studio_toolkits",
getModuleName(),
)
def getPreferenceFile():
preference_path = os.path.join(getWorkspacePath(), "preference")
if not os.path.isdir(preference_path):
os.makedirs(preference_path)
return os.path.join(preference_path, "config.xml")
def getOperatingSystem():
return platform.system()
def getRootPath():
operating_system = getOperatingSystem()
if operating_system == "Windows":
return "C:/", "Autodesk/Maya", "mayapy.exe"
if operating_system == "Linux":
return "/", "autodesk/maya", "mayapy"
def getEditor():
operating_system = getOperatingSystem()
if operating_system == "Windows":
return "start wordpad"
if operating_system == "Linux":
return "kwrite"
def getFormats():
formats = {
"maya": [".ma", ".mb"],
"code": [".py", ".pyc", ".mel"],
}
return formats
def getImages():
images = {
".ma": "maya_ascii",
".mb": "maya_binary",
".py": "python",
".pyc": "Python_compile",
".mel": "mel",
}
return images
def getTempCodeFile():
return os.path.join(tempfile.gettempdir(), "studio_maya_temp.py")
def getInputPath():
path = os.path.join(CURRENT_PATH, "inputs").replace("\\", "/")
return path
def getScriptPath():
path = os.path.join(CURRENT_PATH, "scripts")
return path
def getToolKitLink():
return "https://www.subins-toolkits.com"
def getToolKitHelpLink():
return "https://www.subins-toolkits.com/studio-maya"
def getDownloadLink():
return "https://www.subins-toolkits.com/studio-maya"
| [
"subing85@gmail.com"
] | subing85@gmail.com |
eeab448cdc59c08d59f662483396beaf151639e5 | 9268f5f8ccbc91322eb12c5cc0be53e7678aeff7 | /docs/source/pgf2img.py | 0501d4a6cda356d0178a518747924bd1e7c344d8 | [
"BSD-3-Clause"
] | permissive | JupiterEthan/ted.python | 7f5e462a064b351d0520d73a3972be151979be23 | 1698a7f792db23123003ae4e2d39b4c18f25f347 | refs/heads/master | 2020-05-29T09:14:55.478502 | 2015-12-06T15:30:01 | 2015-12-06T15:30:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,884 | py | #!/usr/bin/env python
"""
Convert PGF files to an image file using pdflatex, pdfcrop, (from
texlive) and convert (from ImageMagick).
"""
import os
import sys
import tempfile
import time
import subprocess
def __find_exec(executable):
'''Try to find an executable in the system path.'''
if os.path.isfile(executable):
return executable
else:
paths = os.environ['PATH'].split(os.pathsep)
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
return f
return ''
def __check_for_exec(executable, msg):
'''Exit on error if the specified executable cannot be
found. Otherwise, return the path to the executable.'''
path = __find_exec(executable)
if path == '':
print msg
sys.exit()
else:
return path
def __run_cmd(cmd, msg, cwd=None, wait=30):
'''Run a system command; display an error message if it returns a
nonzero exit code or it stalls for more than the specified number
of seconds.'''
dev_null = open('/dev/null', 'w')
p = subprocess.Popen(cmd, stdout=dev_null, stderr=dev_null, shell=True, cwd=cwd)
tic = time.time()
while p.returncode == None and time.time() < tic+wait:
try:
p.poll()
except KeyboardInterrupt:
print 'manually killing command %s' % cmd
p.kill()
sys.exit(1)
if p.returncode == None:
print 'killing stalled command %s ' % cmd
p.kill()
if p.returncode < 0:
print msg
sys.exit(1)
# Check for required executables:
PDFLATEX = __check_for_exec('pdflatex', 'cannot find pdflatex')
PDFCROP = __check_for_exec('pdfcrop', 'cannot find pdfcrop')
CONVERT = __check_for_exec('convert', 'cannot find convert')
RM = __check_for_exec('rm', 'cannot find rm')
# Used to redirect program output to /dev/null:
redirect_output = ' 1>/dev/null 2>&1'
# Defaults:
default_template = """\\documentclass[10pt]{article}
\\usepackage{amsmath,amssymb,amsbsy,amsfonts,amsthm}
\\usepackage[landscape]{geometry}
\\usepackage{cmbright}
\\usepackage{tikz}
\\pagestyle{empty}
\\begin{document}
<>
\\end{document}
"""
default_density = 200
def pgf2img(input_filename, output_filename,
template=default_template, density=default_density):
"""Convert a PGF/TikZ file to an image file.
Parameters
----------
input_filename : str
Name of input PGF/TikZ file. The file must contain a
tikzpicture environment.
output_filename : str
Name of output file. The image format is determined
by the filename extension.
template : str
LaTeX template used to generate image.
density : int
Output image density (in DPI).
"""
# Open the input file:
try:
input_file = open(input_filename, 'r')
except IOError:
print 'error opening input file %s' % input_filename
sys.exit(1)
else:
input_data = ''.join(input_file.readlines())
# Combine the template and input file:
temp_data = template.replace('<>',input_data)
# Write the output to a temporary LaTeX file:
try:
temp_dirname = tempfile.mkdtemp()+os.sep
except IOError:
print 'error creating temporary directory %s' % temp_dirname
sys.exit(1)
else:
temp_latex_filename = temp_dirname + 'temp.tex'
try:
temp_latex_file = open(temp_latex_filename,'w')
except IOError:
print 'error opening temporary LaTeX file %s' % temp_latex_filename
sys.exit(1)
else:
temp_latex_file.writelines(temp_data.splitlines(True))
temp_latex_file.close()
# Process the temporary file with pdflatex:
__run_cmd(PDFLATEX + ' ' +
temp_latex_filename,
'error running pdflatex', temp_dirname)
# Crop the file with pdfcrop:
temp_latex_basename = os.path.splitext(temp_latex_filename)[0]
temp_pdf_filename = temp_latex_basename + '.pdf'
temp_pdf_cropped_filename = temp_latex_basename + '_cropped.pdf'
__run_cmd(PDFCROP + ' ' +
temp_pdf_filename + ' ' +
temp_pdf_cropped_filename,
'error running pdfcrop',temp_dirname)
# If the specified output file format is pdf, there is no need to run
# the generated file through convert:
output_ext = os.path.splitext(output_filename)[1]
if output_ext.lower() == '.pdf':
os.rename(temp_pdf_cropped_filename, output_filename)
else:
__run_cmd(CONVERT + ' -density ' + str(density) + ' ' +
temp_pdf_cropped_filename + ' ' +
output_filename,
'error running convert')
# Clean up the temporary work directory:
__run_cmd(RM + ' -rf ' + temp_dirname,
'error removing temporary directory %s' % temp_dirname)
| [
"lev@columbia.edu"
] | lev@columbia.edu |
ea095df30995166a7aff01937d139194c7dc74d5 | 60a48189cf63995416fe3a013ea8a3b83e0ad1fa | /bmw/wheelstand/admin.py | 3762869e62073ae59b86709c19e8331a65a303c1 | [] | no_license | wheelstand/bmw | d87550e7b16419ac5cd9c3bb73b8b22d28500d41 | ca60626033c1acb487cbafe31395ed5bc85dab0b | refs/heads/master | 2021-01-23T21:10:50.173495 | 2017-05-09T15:43:37 | 2017-05-09T15:43:37 | 90,672,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,804 | py | from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from .models import Models, Trim, ModelsShown, FeaturesAccessory, Exterior, Upholstery, Wheel, Package, Option, \
Location, TrimEnImage, TrimFrImage, Gallery, Interior, Performance, Colour, TestDrive, KeepingInTouch, StaticModel, \
StandAloneOption, OptionalEquipment
from django.forms import TextInput, Textarea
from django.db import models
# class GalleryInline(admin.TabularInline):
# model = Gallery
# extra = 3
# list_display = ('name_en', 'base_price', 'freight_DPI')
# verbose_name = "Image"
# verbose_name_plural = "Gallery"
# readonly_fields = ('image_thumb',)
class GalleryAdmin(admin.ModelAdmin):
model = Gallery
list_display = ('name', 'url', 'image_thumb')
# def get_model_perms(self, request):
# return {}
class ModelsAdmin(admin.ModelAdmin):
list_display = ('name_en', 'year', 'base_price', 'freight_DPI')
fieldsets = (
(_('General'), {
'fields': (('name_en', 'name_fr'), 'year', 'isBMWi'),
}),
(_('Prices'), {
'fields': ('base_price', 'colour',
'exterior', 'interior', 'trim_price', 'optional_equipement', 'wheels', 'freight_DPI', 'taxes'),
}),
)
class StaticModelENAdmin(admin.ModelAdmin):
list_display = ('name', 'url', 'link')
fieldsets = (
(_('Image'), {
'fields': ('name', 'url', 'link'),
}),
)
class StaticModelFRAdmin(admin.ModelAdmin):
list_display = ('name', 'url', 'link')
fieldsets = (
(_('Image'), {
'fields': ('name', 'url', 'link'),
}),
)
class StaticModelAdmin(admin.ModelAdmin):
list_display = ('name_en', 'url_en', 'name_fr', 'url_fr')
fieldsets = (
(_('Image'), {
'fields': ('name_en', 'url_en', 'name_fr', 'url_fr'),
}),
)
class TrimEnImageAdmin(admin.ModelAdmin):
list_display = ('url', 'link')
fieldsets = (
(_('Image'), {
'fields': ('name', 'url', 'link'),
}),
)
def get_model_perms(self, request):
return {}
class TrimFrImageAdmin(admin.ModelAdmin):
list_display = ('url', 'link')
fieldsets = (
(_('Image'), {
'fields': ('url', 'link'),
}),
)
def get_model_perms(self, request):
return {}
class FeaturesAccessoryInline(admin.TabularInline):
model = Trim.features_accessories.through
extra = 3
class PackagesInline(admin.TabularInline):
model = Trim.packages.through
extra = 3
class TrimAdmin(admin.ModelAdmin):
list_display = ('name_en', 'model')
filter_horizontal = ('upholsteries', 'gallery', 'colours')
# inlines = (FeaturesAccessoryInline,)
fieldsets = (
(_('General'), {
'fields': ('model', ('name_en', 'name_fr'), 'MSRP'),
}),
# (_('Logo'), {
# 'fields': ('logo', 'urllogo', 'link'),
# }),
(_('Performance'), {
'fields': ('power', 'torque', 'acceleration', 'car_range', 'range_value'),
}),
(_('Consumption'), {
'fields': ('top_speed', 'city', 'hwy', 'combined'),
}),
(_('Features & Accessories'), {
'fields': ('features_accessories',),
}),
(_('Colours'), {
'fields': ('colours',),
}),
(_('Upholstery'), {
'fields': ('upholsteries',),
}),
(_('Wheels'), {
'fields': ('wheels',),
}),
(_('Packages'), {
'fields': ('packages',),
}),
(_('Options'), {
'fields': (
('interior_en', 'interior_fr'), ('performance_en', 'performance_fr'), ('exterior_en', 'exterior_fr'))
}),
(_('Image'), {
'fields': ('gallery',),
}),
)
class ModelsShownAdmin(admin.ModelAdmin):
list_display = ('vehicle', 'price_override')
readonly_fields = ('image_thumb', 'image_thumb_fr')
filter_horizontal = ('optional_equipment', 'stand_alone_option', 'package')
fieldsets = (
(_('General'), {
'fields': ('vehicle',),
}),
# (_('Static Model Shown'), {
# 'fields': ('static_model_shown_en', 'static_model_shown_fr',),
# }),
(_('Image English'), {
'fields': ('url_en', 'link', 'image_thumb'),
}),
(_('Image French'), {
'fields': ('url_fr', 'link_fr', 'image_thumb'),
}),
(('Prices'), {
'fields': ('price_override', ('disclaimer_override_en', 'disclaimer_override_fr'), 'colour_en', 'colour_fr',
'exterior', 'interior', 'trim_price', 'wheels', 'taxes'),
}),
(('Locations'), {
'fields': ('location', 'optional_equipment', 'stand_alone_option', 'package'),
}),
(('Static Model'), {
'fields': ('static_model',),
}),
)
class FeaturesAccessoryAdmin(admin.ModelAdmin):
readonly_fields = ('image_thumb',)
list_display = ('name_en', 'image_thumb', 'trims')
fieldsets = (
(_('General'), {
'fields': (('name_en', 'name_fr',), ('description_en', 'description_fr',)),
}),
(_('Image'), {
'fields': ('url', 'link', 'image_thumb'),
}),
)
class ExteriorAdmin(admin.ModelAdmin):
list_display = ('name_en', 'trims')
fieldsets = (
(_('General'), {
'fields': (('name_en', 'name_fr',),),
}),
)
class UpholsteryAdmin(admin.ModelAdmin):
readonly_fields = ('image_thumb',)
list_display = ('name_en', 'trims')
fieldsets = (
(_('General'), {
'fields': (('name_en', 'name_fr',)),
}),
(_('Image'), {
'fields': ('url', 'link', 'image_thumb'),
}),
)
class WheelAdmin(admin.ModelAdmin):
readonly_fields = ('image_thumb',)
list_display = ('name_en', 'trims')
fieldsets = (
(_('General'), {
'fields': (('name_en', 'name_fr',)),
}),
(_('Image'), {
'fields': ('url', 'link', 'image_thumb'),
}),
)
class ModelInline(admin.TabularInline):
model = Package.package_model.through
extra = 1
verbose_name = "Model"
verbose_name_plural = 'Optional Equipment - Model'
class PackageAdmin(admin.ModelAdmin):
list_display = ('name_en', 'price_override', 'trims')
readonly_fields = ('image_thumb', 'trims')
inlines = (
ModelInline,
)
fieldsets = (
(_('General'), {
'fields': (('name_en', 'name_fr',), ('description_en', 'description_fr',), 'price_override'),
}),
(_('Image'), {
'fields': ('url', 'link', 'image_thumb'),
}),
(_('Details'), {
'fields': ('code', 'field_type'),
}),
)
class OptionAdmin(admin.ModelAdmin):
list_display = ('name_en', 'description_en', 'trims')
fieldsets = (
(_('General'), {
'fields': (('name_en', 'name_fr',), ('description_en', 'description_fr')),
}),
)
class LocationAdmin(admin.ModelAdmin):
list_display = ('name', 'language')
fieldsets = (
(_('General'), {
'fields': ('name', 'language', 'disclaimer_en', 'disclaimer_fr'),
}),
)
class InteriorAdmin(admin.ModelAdmin):
list_display = ('name_en', 'trims')
fieldsets = (
(_('General'), {
'fields': (('name_en', 'name_fr',),),
}),
)
class PerformanceAdmin(admin.ModelAdmin):
list_display = ('name_en', 'trims')
fieldsets = (
(_('General'), {
'fields': (('name_en', 'name_fr',),),
}),
)
class ColourAdmin(admin.ModelAdmin):
list_display = ('name_en', 'hexcode', 'trims')
fieldsets = (
(_('General'), {
'fields': (('name_en', 'name_fr',), 'hexcode'),
}),
)
class ModelsInline(admin.TabularInline):
model = StandAloneOption.stand_alone_model.through
extra = 1
verbose_name = "Model"
verbose_name_plural = 'Stand Alone Option - Model'
class StandAloneOptionAdmin(admin.ModelAdmin):
list_display = ('code', 'title_en', 'title_fr')
readonly_fields = ('stand_alone_model',)
inlines = (
ModelsInline,
)
fieldsets = (
(_('General'), {
'fields': ('code', 'title_en', 'title_fr', 'price'),
}),
)
class OptionalEquipmentInline(admin.TabularInline):
model = OptionalEquipment.optional_equipment_model.through
extra = 1
verbose_name = "Model"
verbose_name_plural = 'Optional Equipment - Model'
class OptionalEquipmentAdmin(admin.ModelAdmin):
list_display = ('code', 'title_en', 'title_fr')
readonly_fields = ('optional_equipment_model',)
inlines = (
OptionalEquipmentInline,
)
fieldsets = (
(_('General'), {
'fields': ('code', 'title_en', 'title_fr', 'price', 'field_type'),
}),
)
class TestDriveAdmin(admin.ModelAdmin):
list_display = ('first_name', 'last_name', 'contact_method', 'email', 'phone', 'status')
fieldsets = (
(_('Customer'), {
'fields': ('salutation', 'first_name', 'last_name', 'contact_method', 'email', 'phone'),
}),
(_('Dealer'), {
'fields': (
'purchase_intent', 'brochure', 'retailer_number', 'retailer_location', 'language', 'consent', 'consentB',
'vehicle', 'form_language', 'city', 'status'),
}),
)
class KeepingInTouchAdmin(admin.ModelAdmin):
list_display = ('first_name', 'last_name', 'email', 'preferredLanguage', 'consent', 'consentB', 'status')
fieldsets = (
(_('Customer'), {
'fields': (
'first_name', 'last_name', 'email', 'preferredLanguage', 'consent', 'consentB', 'form_language', 'city',
'status'),
}),
)
admin.site.register(Models, ModelsAdmin)
admin.site.register(TrimEnImage, TrimEnImageAdmin)
admin.site.register(TrimFrImage, TrimFrImageAdmin)
# admin.site.register(StaticModelEN, StaticModelENAdmin)
# admin.site.register(StaticModelFR, StaticModelENAdmin)
admin.site.register(StaticModel, StaticModelAdmin)
admin.site.register(Trim, TrimAdmin)
admin.site.register(ModelsShown, ModelsShownAdmin)
admin.site.register(FeaturesAccessory, FeaturesAccessoryAdmin)
# admin.site.register(Exterior, ExteriorAdmin)
admin.site.register(Upholstery, UpholsteryAdmin)
admin.site.register(Wheel, WheelAdmin)
admin.site.register(Package, PackageAdmin)
# admin.site.register(Option, OptionAdmin)
admin.site.register(Location, LocationAdmin)
admin.site.register(Gallery, GalleryAdmin)
# admin.site.register(Interior, InteriorAdmin)
# admin.site.register(Performance, PerformanceAdmin)
admin.site.register(Colour, ColourAdmin)
admin.site.register(TestDrive, TestDriveAdmin)
admin.site.register(KeepingInTouch, KeepingInTouchAdmin)
admin.site.register(StandAloneOption, StandAloneOptionAdmin)
admin.site.register(OptionalEquipment, OptionalEquipmentAdmin)
| [
"hypnopompicindex@gmail.com"
] | hypnopompicindex@gmail.com |
d0a8cb4d9fa41a0759b2ffea4039c1e1f5186a15 | 19603633d723d3b824ca9bce2994ce7e63dd1fc9 | /tests/integration/test_copy.py | 8073bc43754023f14388f1e5890dd8f5cb9091fb | [
"Apache-2.0"
] | permissive | TingDaoK/s3transfer | 710d761cc7406ff477291b45a21105f870f68813 | 95f34d02275d716addb6fe2f8aa5327ceff98e3d | refs/heads/develop | 2023-04-20T14:18:25.590390 | 2020-12-18T22:12:01 | 2020-12-18T22:12:01 | 319,548,025 | 0 | 1 | Apache-2.0 | 2021-05-31T23:10:42 | 2020-12-08T06:32:04 | Python | UTF-8 | Python | false | false | 2,986 | py | # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import RecordingSubscriber
from tests.integration import BaseTransferManagerIntegTest
from s3transfer.manager import TransferConfig
class TestCopy(BaseTransferManagerIntegTest):
def setUp(self):
super(TestCopy, self).setUp()
self.multipart_threshold = 5 * 1024 * 1024
self.config = TransferConfig(
multipart_threshold=self.multipart_threshold)
def test_copy_below_threshold(self):
transfer_manager = self.create_transfer_manager(self.config)
key = '1mb.txt'
new_key = '1mb-copy.txt'
filename = self.files.create_file_with_size(
key, filesize=1024 * 1024)
self.upload_file(filename, key)
future = transfer_manager.copy(
copy_source={'Bucket': self.bucket_name, 'Key': key},
bucket=self.bucket_name,
key=new_key
)
future.result()
self.assertTrue(self.object_exists(new_key))
def test_copy_above_threshold(self):
transfer_manager = self.create_transfer_manager(self.config)
key = '20mb.txt'
new_key = '20mb-copy.txt'
filename = self.files.create_file_with_size(
key, filesize=20 * 1024 * 1024)
self.upload_file(filename, key)
future = transfer_manager.copy(
copy_source={'Bucket': self.bucket_name, 'Key': key},
bucket=self.bucket_name,
key=new_key
)
future.result()
self.assertTrue(self.object_exists(new_key))
def test_progress_subscribers_on_copy(self):
subscriber = RecordingSubscriber()
transfer_manager = self.create_transfer_manager(self.config)
key = '20mb.txt'
new_key = '20mb-copy.txt'
filename = self.files.create_file_with_size(
key, filesize=20 * 1024 * 1024)
self.upload_file(filename, key)
future = transfer_manager.copy(
copy_source={'Bucket': self.bucket_name, 'Key': key},
bucket=self.bucket_name,
key=new_key,
subscribers=[subscriber]
)
future.result()
# The callback should have been called enough times such that
# the total amount of bytes we've seen (via the "amount"
# arg to the callback function) should be the size
# of the file we uploaded.
self.assertEqual(subscriber.calculate_bytes_seen(), 20 * 1024 * 1024)
| [
"kyleknap@amazon.com"
] | kyleknap@amazon.com |
e7bc57d1a028c2d6796b49ebe2c1b947e021eb85 | 1ed536ef1527e6655217e731f622d643ece49c2b | /scripts/gpipe/pairs2gene_structure.py | 1b0134cfab7ba4d9bd647eed809068b2b6b31891 | [] | no_license | siping/cgat | de0f7af124eb38c72d7dece78fff83ff92ddbf96 | aa4cc85ffdc53998ea1a5ac5516df2d16c254d2e | refs/heads/master | 2021-01-22T13:03:18.060139 | 2013-10-07T15:53:55 | 2013-10-07T15:53:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,934 | py | ################################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id$
#
# Copyright (C) 2009 Andreas Heger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#################################################################################
'''
gpipe/pairs2gene_structure.py -
======================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. todo::
describe purpose of the script.
Usage
-----
Example::
python gpipe/pairs2gene_structure.py --help
Type::
python gpipe/pairs2gene_structure.py --help
for command line help.
Documentation
-------------
Code
----
'''
import os
import sys
import string
import re
import getopt
USAGE="""python %s [OPTIONS] < assignments > pairs
Version: $Id: gpipe/pairs2gene_structure.py 1799 2008-03-28 11:44:19Z andreas $
Take a list of orthologous transcripts and write out a list
of orthologous transcripts.
Options:
-h, --help print this message.
-v, --verbose loglevel.
-g, --genome-file= pattern for filenames with the genomic DNA (FASTA).
-c, --cds= filename with coding sequences
-f, --format= output format, valid options are:
paired_fasta: concatenated pairwise alignments in FASTA format
""" % sys.argv[0]
import CGAT.Experiment as E
import CGAT.Genomics as Genomics
import CGAT.PredictionParser as PredictionParser
import alignlib
param_long_options=["verbose=", "help", "genome-file=", "format=",
"cds=", "version"]
param_short_options="v:hg:f:c:"
param_loglevel = 0
## pattern for genomes, %s is substituted for the sbjct_token
param_genome_file = "genome_%s.fasta"
## filename with cdss
param_filename_cdss = "cds.fasta"
## output format
param_format = "paired_fasta"
## prefix/suffix for output files
param_filename_suffix = ".fasta"
param_filename_prefix = ""
##------------------------------------------------------------
if __name__ == '__main__':
try:
optlist, args = getopt.getopt(sys.argv[1:],
param_short_options, param_long_options)
except getopt.error, msg:
print USAGE, msg
sys.exit(2)
for o,a in optlist:
if o in ( "-v", "--verbose" ):
param_loglevel = int(a)
elif o in ( "-h", "--help" ):
print USAGE
sys.exit(0)
elif o in ( "--version", ):
print "version="
sys.exit(0)
elif o in ("-g", "--genome-file"):
param_genome_file = a
elif o in ("-c", "--cds"):
param_filename_cds = a
if len(args) > 0:
print USAGE, "no arguments required."
sys.exit(1)
print E.GetHeader()
print E.GetParams()
## reading CDS sequences
if param_filename_cds:
cds_sequences = Genomics.ReadPeptideSequences( open(param_filename_cds, "r") )
else:
cds_sequences = {}
if param_loglevel >= 1:
print "# read %i CDS sequences" % len(cds_sequences)
last_filename_genome = None
p = PredictionParser.PredictionParserEntry()
for line in sys.stdin:
if line[0] == "#": continue
if line[0] == '"': continue
p.Read(line)
## read genomic sequence
if "%s" in param_genome_file:
filename_genome = param_genome_file % p.mSbjctToken
else:
filename_genome = param_genome_file
if last_filename_genome != filename_genome:
if param_loglevel >= 2:
print "# reading genome %s" % filename_genome
forward_sequences, reverse_sequences = Genomics.ReadGenomicSequences( open(filename_genome, "r"))
last_filename_genome = filename_genome
if p.mSbjctStrand == "+":
genomic_sequence = forward_sequences[p.mSbjctToken]
else:
genomic_sequence = reverse_sequences[p.mSbjctToken]
try:
cds_fragment = cds_sequences[p.mQueryToken]
except KeyError:
print "# ERROR: cds not found: query %s." % p.mQueryToken
continue
genomic_fragment = genomic_sequence[p.mSbjctGenomeFrom:p.mSbjctGenomeTo]
if len(genomic_fragment) == 0:
raise "ERROR: empty fragment %s:%s for line" % (p.mSbjctGenomeFrom, p.mSbjctGenomeTo), line
map_query2sbjct, genomic_fragment = Genomics.Alignment2CDNA( p.mMapPeptide2Genome,
query_from = p.mQueryFrom - 1,
sbjct_from = 0,
genome = genomic_fragment )
## check for errors:
if map_query2sbjct.getRowTo() != p.mQueryTo * 3:
print str(p)
raise "# ERROR: boundary shift in query: %i %i" %( map_query2sbjct.getRowTo(), p.mQueryTo * 3 )
if map_query2sbjct.getColTo() > len(genomic_fragment):
print "# ERROR: length mismatch: genomic fragment (%i) shorter than last aligned residue (%i)" %\
(len(genomic_fragment), map_query2sbjct.getColTo())
print "#", line
print "# cds"
print "#", cds_fragment
print "# genomic"
print "#",genomic_fragment
continue
if map_query2sbjct.getRowTo() > len(cds_fragment):
print "# ERROR: length mismatch: cds fragment (%i) shorter than last aligned residue (%i)" %\
(len(cds_fragment), map_query2sbjct.getRowTo())
print "#", line
print "# cds"
print "#", cds_fragment
print "# genomic"
print "#",genomic_fragment
continue
cds_seq = alignlib.makeSequence( cds_fragment )
genomic_seq = alignlib.makeSequence( genomic_fragment )
data = map( lambda x: string.split(x, "\t"),
string.split( alignlib.writePairAlignment( cds_seq,
genomic_seq,
map_query2sbjct ), "\n" ))
row_ali, col_ali = Genomics.RemoveFrameShiftsFromAlignment(data[0][1], data[1][1])
row_ali = Genomics.MaskStopCodons( row_ali )
col_ali = Genomics.MaskStopCodons( col_ali )
if len(row_ali) != len(col_ali):
print "# ERROR: wrong alignment lengths."
sys.exit(1)
if len(row_ali) % 3 or len(col_ali) % 3:
print line
print row_ali
print col_ali
print len(row_ali), len(col_ali)
print " ERROR: non-codons in alignment."
sys.exit(1)
print ">%i\n%s" % (p.mPredictionId, row_ali)
print ">%s_vs_%s_%s_%i_%i\n%s" % \
(p.mQueryToken, p.mSbjctToken, p.mSbjctStrand, p.mSbjctGenomeFrom, p.mSbjctGenomeTo, col_ali)
print E.GetFooter()
| [
"andreas.heger@gmail.com"
] | andreas.heger@gmail.com |
bb1f91a8e88acabf8432b01b997fbc03d8e0aa9a | 7b065a6b01905a2da6ad2d00b6398aad150dc6c3 | /ๅบ็ก็ฅ่ฏ/4.ๆไปถๆไฝ/4.write()ๆนๆณ.py | 03c9d3080ae416f6c9431364f8d419f445021f9e | [] | no_license | ylwctyt/python3-1 | f4b0d8d6d0a7947170186b27bf51bc2f6e291ac7 | ca92e2dc9abc61265e48b7809cb12c3e572b5b6f | refs/heads/master | 2021-04-18T18:56:46.047193 | 2018-03-25T04:35:11 | 2018-03-25T04:35:11 | 126,699,773 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | import json
# f = open("test_write.txt","w",encoding="utf-8")
# f.write("HelloWorld\n")
# f.write("HelloWorld\n")
# f.close()
f = open("test_write.txt", "w", encoding="utf-8")
lines = ["1", "2", "3", "4", "5"]
# lines = [line+'\n' for line in lines]
f.writelines(lines) # ๅๅ
ฅไธไธชๅ่กจ
f.close()
# json ๅๅ
ฅๅฐๆไปถ
dict = {"key1": "val2", "key2": "val2", "key3": "val3"}
with open("test.txt", mode="a", encoding="utf8") as f:
for i in range(10):
f.write(json.dumps(dict, ensure_ascii=False))
f.write("\n")
| [
"359405466@qq.com"
] | 359405466@qq.com |
fc541b4fe07329e28fb81c87bf310cfde8ff531f | 8988a329c571cb04a5d97c691d0cd8bc4caf81d4 | /benchmarks/variables.py | 414b9a4cb3dafad00e29172ae8b4aee904f332d4 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | dwavesystems/dimod | 85329cbee86bdf5a73de05fa25884c877ea53002 | 8433f221a1e79101e1db0d80968ab5a2f59b865d | refs/heads/main | 2023-08-29T08:37:24.565927 | 2023-08-17T17:14:58 | 2023-08-17T17:14:58 | 100,658,303 | 118 | 93 | Apache-2.0 | 2023-09-13T18:15:37 | 2017-08-18T01:02:17 | Python | UTF-8 | Python | false | false | 1,568 | py | # Copyright 2022 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dimod.variables import Variables
class TimeConstuction:
num_variables = 1000
iterables = dict(range=range(num_variables),
strings=list(map(str, range(num_variables))),
integers=list(range(1000)),
empty=[],
none=None,
variables=Variables(range(1000)),
)
params = iterables.keys()
param_names = ['iterable']
def time_construction(self, key):
Variables(self.iterables[key])
class TimeIteration:
num_variables = 1000
variables = dict(string=Variables(map(str, range(num_variables))),
index=Variables(range(num_variables)),
integer=Variables(range(num_variables, 0, -1))
)
params = variables.keys()
param_names = ['labels']
def time_iteration(self, key):
for v in self.variables[key]:
pass
| [
"arcondello@gmail.com"
] | arcondello@gmail.com |
778faa010de5bb8612053f55b33329ac19019012 | cce8469586694aeea759a577c77bbac0652bec6f | /detectron2/config/config.py | c8270fd7ee447cd10497dc6253627ab0660c67c5 | [
"Apache-2.0"
] | permissive | veraposeidon/detectron2 | 9a4553289111bf6a83ecd3361eab836fb5ea076b | df2f2ab213e5c089ebc65b84786f766ba2b2b5d5 | refs/heads/master | 2020-09-26T09:05:06.044612 | 2019-12-30T09:38:29 | 2019-12-30T09:38:29 | 226,223,447 | 3 | 0 | Apache-2.0 | 2019-12-22T13:16:50 | 2019-12-06T01:47:30 | Jupyter Notebook | UTF-8 | Python | false | false | 3,560 | py | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from fvcore.common.config import CfgNode as _CfgNode
class CfgNode(_CfgNode):
"""
The same as `fvcore.common.config.CfgNode`, but different in:
1. Use unsafe yaml loading by default.
Note that this may lead to arbitrary code execution: you must not
load a config file from untrusted sources before manually inspecting
the content of the file.
2. Support config versioning.
When attempting to merge an old config, it will convert the old config automatically.
"""
# Note that the default value of allow_unsafe is changed to True
def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:
loaded_cfg = _CfgNode.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
loaded_cfg = type(self)(loaded_cfg)
# defaults.py needs to import CfgNode
from .defaults import _C
latest_ver = _C.VERSION
assert (
latest_ver == self.VERSION
), "CfgNode.merge_from_file is only allowed on a config of latest version!"
logger = logging.getLogger(__name__)
loaded_ver = loaded_cfg.get("VERSION", None)
if loaded_ver is None:
from .compat import guess_version
loaded_ver = guess_version(loaded_cfg, cfg_filename)
assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format(
loaded_ver, self.VERSION
)
if loaded_ver == self.VERSION:
self.merge_from_other_cfg(loaded_cfg)
else:
# compat.py needs to import CfgNode
from .compat import upgrade_config, downgrade_config
logger.warning(
"Loading an old v{} config file '{}' by automatically upgrading to v{}. "
"See docs/CHANGELOG.md for instructions to update your files.".format(
loaded_ver, cfg_filename, self.VERSION
)
)
# To convert, first obtain a full config at an old version
old_self = downgrade_config(self, to_version=loaded_ver)
old_self.merge_from_other_cfg(loaded_cfg)
new_config = upgrade_config(old_self)
self.clear()
self.update(new_config)
def dump(self, *args, **kwargs):
"""
Returns:
str: a yaml string representation of the config
"""
# to make it show up in docs
return super().dump(*args, **kwargs)
global_cfg = CfgNode()
def get_cfg() -> CfgNode:
"""
Get a copy of the default config.
Returns:
a detectron2 CfgNode instance.
"""
from .defaults import _C
return _C.clone()
def set_global_cfg(cfg: CfgNode) -> None:
"""
Let the global config point to the given cfg.
Assume that the given "cfg" has the key "KEY", after calling
`set_global_cfg(cfg)`, the key can be accessed by:
.. code-block:: python
from detectron2.config import global_cfg
print(global_cfg.KEY)
By using a hacky global config, you can access these configs anywhere,
without having to pass the config object or the values deep into the code.
This is a hacky feature introduced for quick prototyping / research exploration.
"""
global global_cfg
global_cfg.clear()
global_cfg.update(cfg)
| [
"veraposeidon@gmail.com"
] | veraposeidon@gmail.com |
7da94cb8e28b840f977542ad552b721f062f4c0f | e5ea9950d5f64b1e5ab7dad5ef63f5b443ca52ed | /API_Engine/models/fields.py | a904ded47eb68374eb66d8535c6abad620feb90d | [] | no_license | MediKnot/MediKnot-AI | f2e5c55d09a036580706470e6c1d8f6d7dc9635b | 97528506c25d3b31d404f2e181a39a887dbe2bb4 | refs/heads/main | 2023-06-03T13:33:46.483665 | 2021-06-24T14:15:21 | 2021-06-24T14:15:21 | 376,215,473 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,449 | py | """MODELS - FIELDS
Definition of Fields used on model classes attributes.
We define them separately because the PersonUpdate and PersonCreate models need to re-define their attributes,
as they change from Optional to required.
Address could define its fields on the model itself, but we define them here for convenience
"""
# # Installed # #
from pydantic import Field
# # Package # #
from ..utils import get_time, get_uuid
__all__ = ("PersonFields", "AddressFields")
_string = dict(min_length=1)
"""Common attributes for all String fields"""
_unix_ts = dict(example=get_time())
"""Common attributes for all Unix timestamp fields"""
class PersonFields:
name = Field(
description="Full name of this person",
example="John Smith",
**_string
)
address = Field(
description="Address object where this person live"
)
address_update = Field(
description=f"{address.description}. When updating, the whole Address object is required, as it gets replaced"
)
birth = Field(
description="Date of birth, in format YYYY-MM-DD, or Unix timestamp",
example="1999-12-31"
)
age = Field(
description="Age of this person, if date of birth is specified",
example=20
)
person_id = Field(
description="Unique identifier of this person in the database",
example=get_uuid(),
min_length=36,
max_length=36
)
"""The person_id is the _id field of Mongo documents, and is set on PeopleRepository.create"""
created = Field(
alias="created",
description="When the person was registered (Unix timestamp)",
**_unix_ts
)
"""Created is set on PeopleRepository.create"""
updated = Field(
alias="updated",
description="When the person was updated for the last time (Unix timestamp)",
**_unix_ts
)
"""Created is set on PeopleRepository.update (and initially on create)"""
class AddressFields:
street = Field(
description="Main address line",
example="22nd Bunker Hill Avenue",
**_string
)
city = Field(
description="City",
example="Hamburg",
**_string
)
state = Field(
description="State, province and/or region",
example="Mordor",
**_string
)
zip_code = Field(
description="Postal/ZIP code",
example="19823",
**_string
)
| [
"ajinkyataranekar@gmail.com"
] | ajinkyataranekar@gmail.com |
a706d366c2b89cc2de1fa44bab761e41a23254c9 | 5f4f3ab6ece4eda1066bda2f80b9cf89a898f409 | /0x0B-python-input_output/10-class_to_json.py | 7e9784dda2a105257bb1f5945467692817f849a3 | [] | no_license | s0m35h1t/holbertonschool-higher_level_programming | 8af7f3bc54159efa05859f81ca3b9fb1739190e8 | f3b7ddeabf41b5cbc8460841c429b4b3bf254fea | refs/heads/master | 2020-09-28T20:32:23.955579 | 2020-05-14T20:22:50 | 2020-05-14T20:22:50 | 226,859,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | #!/usr/bin/python3
"""
get __dict__ class description
"""
def class_to_json(obj):
"""returns the dictionary description with simple data
structure (list, dictionary, string, integer and boolean)
for JSON serialization of an object
Args:
obj (Object): file name
Returns:
(Object) : dict
"""
return obj.__dict__
| [
"adib.grouz@gmail.com"
] | adib.grouz@gmail.com |
b669fc22e51ea7d2d9e31e0ef07ecec2abbde3ba | 931c17844683a4fbbefcf2bb2d5468d08cce7dbd | /03.Data_Science/Test.py | 2ff749cabbb0693acb9c49bb983bc82a8e8e96eb | [] | no_license | sungwooman91/python_code | d7c8046089bf492b70d21f4ee5a8676377e70592 | fdf20690d761b533efef2f247719f598c14f50c8 | refs/heads/master | 2020-03-26T05:14:27.104725 | 2018-08-13T07:46:11 | 2018-08-13T07:46:13 | 144,545,758 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | import csv # ์ธ๋ถ๋ชจ๋ ์ฌ์ฉ!!!
with open("Demographic_Statistics_By_Zip_Code.csv", newline="") as infile:
data = list(csv.reader(infile)) # ํ์ผ์ 'data'๋ผ๋ ๋ฆฌ์คํธ๋ก ์ถ๋ ฅ
#
# ## get_csv_rowInstance(row_index)
# ## COUNT FEMALE
# def get_csv_rowInstance(row_name): # data๋ฅผ
# find_row = data[0].index(row_name)
# row_instance = []
# for row in data[1:]:
# row_instance.append(int(row[find_row]))
#
# return row_instance
#
# print(get_csv_rowInstance("COUNT MALE"))
def get_csv_colInstance(primary_key) :
for col_instance in data[1:]:
if col_instance[0] == primary_key : return col_instance
else : continue
print(get_csv_colInstance(10002))ddsd | [
"tjddn636@naver.com"
] | tjddn636@naver.com |
576c3dddbc0a2fb79bb514302124bcd8b6350115 | 7624e7fca387651e278e1e9911b37c675e3a599c | /้ข่ฏ้ข&ๅๆ Offer/้ข่ฏ้ข 08.08. ๆ้ๅคๅญ็ฌฆไธฒ็ๆๅ็ปๅ.py | 0ec11ab8a9ef9a2062647d0c9544c4c35ca5027c | [] | no_license | homezzm/leetcode | 53269f1c94c040a41b03e4342d4c241e3f1102b5 | 63ac5a0921835b1e9d65f71e1346bbb7d66dad9b | refs/heads/master | 2023-03-03T09:28:16.974397 | 2021-02-15T03:21:17 | 2021-02-15T03:21:17 | 330,537,897 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,326 | py | class Solution(object):
def permutation(self, S):
"""
https://leetcode-cn.com/problems/permutation-ii-lcci/
:type S: str
:rtype: List[str]
ๆ้ๅคๅญ็ฌฆไธฒ็ๆๅ็ปๅใ็ผๅไธ็งๆนๆณ๏ผ่ฎก็ฎๆๅญ็ฌฆไธฒ็ๆๆๆๅ็ปๅใ
็คบไพ1:่พๅ
ฅ๏ผS = "qqe" ่พๅบ๏ผ["eqq","qeq","qqe"]
"""
if not S or len(S) <= 1: return []
res, S, length = [], sorted(S), len(S) # ๆไธๅบ๏ผไฝฟ้ๅค็้ฝๅจไธ่ตท
def backtrack(used, paths):
if length == len(paths):
res.append(''.join(paths))
return
for i in range(length):
if used[i]:
continue # ๅทฒ็ป้ๆฉ่ฟ็ไธ้่ฆๅๆพ่ฟๅปไบ
if i > 0 and S[i] == S[i - 1] and not used[i - 1]:
continue # ๅฆๆๅฝๅ่็นไธไป็ๅไธไธช่็นไธๆ ท๏ผๅนถๅ
ถไป็ๅไธไธช่็นๅทฒ็ป่ขซ้ๅ่ฟไบ๏ผ้ฃๆไปฌไนๅฐฑไธ้่ฆไบใ
used[i] = True
paths.append(S[i])
backtrack(used, paths)
used[i] = False
paths.pop()
backtrack([False] * length, [])
return res
if __name__ == '__main__':
solution = Solution()
print(solution.permutation('qqe'))
| [
"homezzm@126.com"
] | homezzm@126.com |
5b9c41ad7bd7f537b5ec26e1e565ce2aa685a1a2 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1/manjeetacrocs2/a.py | 430d5f4ef1493a5eda7c5d37ab4445304677ec9b | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 352 | py | t=int(input()); j=1;
for _ in range(t):
str1=input()
if int(str1)==0:
print("Case #{}: INSOMNIA".format(j))
j+=1
continue
s=set(); s|=set(list(str1))
sum1=int(str1);count=0
while len(s)!=10:
sum1+=int(str1); l3=list(str(sum1)); s|=set(l3)
print("Case #{}: {}".format(j,sum1))
j+=1 | [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
edb372fc77442ea6e458189ac944b38057875627 | 928a04008a4258e5a351c7c2d68887bb45cbfed2 | /python/londiste/setup.py | fea3e154b92b74e62ae1d2061e3170ba81fb61c3 | [
"ISC"
] | permissive | ssinger/skytools-cvs | dc5682159eb852dc7e77b9592c0f9ffe7e6c260e | 7e4d24f2c0213d07bbf9e242badeeea2dc5ec1a6 | refs/heads/master | 2021-03-12T21:27:15.314521 | 2009-07-21T10:50:48 | 2009-07-21T10:50:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,301 | py | #! /usr/bin/env python
"""Londiste setup and sanity checker.
"""
import sys, os, skytools
import pgq.setadmin
__all__ = ['LondisteSetup']
class LondisteSetup(pgq.setadmin.SetAdmin):
initial_db_name = 'node_db'
extra_objs = [ skytools.DBSchema("londiste", sql_file="londiste.sql") ]
def __init__(self, args):
pgq.setadmin.SetAdmin.__init__(self, 'londiste', args)
self.set_name = self.cf.get("set_name")
def init_optparse(self, parser=None):
p = pgq.setadmin.SetAdmin.init_optparse(self, parser)
p.add_option("--expect-sync", action="store_true", dest="expect_sync",
help = "no copy needed", default=False)
p.add_option("--skip-truncate", action="store_true", dest="skip_truncate",
help = "dont delete old data", default=False)
p.add_option("--force", action="store_true",
help="force", default=False)
p.add_option("--all", action="store_true",
help="include all tables", default=False)
return p
def extra_init(self, node_type, node_db, provider_db):
if not provider_db:
return
pcurs = provider_db.cursor()
ncurs = node_db.cursor()
q = "select table_name from londiste.set_get_table_list(%s)"
pcurs.execute(q, [self.set_name])
for row in pcurs.fetchall():
tbl = row['table_name']
q = "select * from londiste.set_add_table(%s, %s)"
ncurs.execute(q, [self.set_name, tbl])
node_db.commit()
provider_db.commit()
def cmd_add(self, *args):
q = "select * from londiste.node_add_table(%s, %s)"
db = self.get_database('node_db')
self.exec_cmd_many(db, q, [self.set_name], args)
def cmd_remove(self, *args):
q = "select * from londiste.node_remove_table(%s, %s)"
db = self.get_database('node_db')
self.exec_cmd_many(db, q, [self.set_name], args)
def cmd_add_seq(self, *args):
q = "select * from londiste.node_add_seq(%s, %s)"
db = self.get_database('node_db')
self.exec_cmd_many(db, q, [self.set_name], args)
def cmd_remove_seq(self, *args):
q = "select * from londiste.node_remove_seq(%s, %s)"
db = self.get_database('node_db')
self.exec_cmd_many(db, q, [self.set_name], args)
def cmd_resync(self, *args):
q = "select * from londiste.node_resync_table(%s, %s)"
db = self.get_database('node_db')
self.exec_cmd_many(db, q, [self.set_name], args)
def cmd_tables(self):
q = "select table_name, merge_state from londiste.node_get_table_list(%s)"
db = self.get_database('node_db')
self.db_display_table(db, "Tables on node", q, [self.set_name])
def cmd_seqs(self):
q = "select seq_namefrom londiste.node_get_seq_list(%s)"
db = self.get_database('node_db')
self.db_display_table(db, "Sequences on node", q, [self.set_name])
def cmd_missing(self):
q = "select * from londiste.node_show_missing(%s)"
db = self.get_database('node_db')
self.db_display_table(db, "Missing objects on node", q, [self.set_name])
def cmd_check(self):
pass
def cmd_fkeys(self):
pass
def cmd_triggers(self):
pass
#
# Old commands
#
class LondisteSetup_tmp:
def find_missing_provider_tables(self, pattern='*'):
src_db = self.get_database('provider_db')
src_curs = src_db.cursor()
q = """select schemaname || '.' || tablename as full_name from pg_tables
where schemaname not in ('pgq', 'londiste', 'pg_catalog', 'information_schema')
and schemaname !~ 'pg_.*'
and (schemaname || '.' || tablename) ~ %s
except select table_name from londiste.provider_get_table_list(%s)"""
src_curs.execute(q, [glob2regex(pattern), self.pgq_queue_name])
rows = src_curs.fetchall()
src_db.commit()
list = []
for row in rows:
list.append(row[0])
return list
def admin(self):
cmd = self.args[2]
if cmd == "tables":
self.subscriber_show_tables()
elif cmd == "missing":
self.subscriber_missing_tables()
elif cmd == "add":
self.subscriber_add_tables(self.args[3:])
elif cmd == "remove":
self.subscriber_remove_tables(self.args[3:])
elif cmd == "resync":
self.subscriber_resync_tables(self.args[3:])
elif cmd == "register":
self.subscriber_register()
elif cmd == "unregister":
self.subscriber_unregister()
elif cmd == "install":
self.subscriber_install()
elif cmd == "check":
self.check_tables(self.get_provider_table_list())
elif cmd in ["fkeys", "triggers"]:
self.collect_meta(self.get_provider_table_list(), cmd, self.args[3:])
elif cmd == "seqs":
self.subscriber_list_seqs()
elif cmd == "add-seq":
self.subscriber_add_seq(self.args[3:])
elif cmd == "remove-seq":
self.subscriber_remove_seq(self.args[3:])
elif cmd == "restore-triggers":
self.restore_triggers(self.args[3], self.args[4:])
else:
self.log.error('bad subcommand: ' + cmd)
sys.exit(1)
def collect_meta(self, table_list, meta, args):
"""Display fkey/trigger info."""
if args == []:
args = ['pending', 'active']
field_map = {'triggers': ['table_name', 'trigger_name', 'trigger_def'],
'fkeys': ['from_table', 'to_table', 'fkey_name', 'fkey_def']}
query_map = {'pending': "select %s from londiste.subscriber_get_table_pending_%s(%%s)",
'active' : "select %s from londiste.find_table_%s(%%s)"}
table_list = self.clean_subscriber_tables(table_list)
if len(table_list) == 0:
self.log.info("No tables, no fkeys")
return
dst_db = self.get_database('subscriber_db')
dst_curs = dst_db.cursor()
for which in args:
union_list = []
fields = field_map[meta]
q = query_map[which] % (",".join(fields), meta)
for tbl in table_list:
union_list.append(q % skytools.quote_literal(tbl))
# use union as fkey may appear in duplicate
sql = " union ".join(union_list) + " order by 1"
desc = "%s %s" % (which, meta)
self.display_table(desc, dst_curs, fields, sql)
dst_db.commit()
def check_tables(self, table_list):
src_db = self.get_database('provider_db')
src_curs = src_db.cursor()
dst_db = self.get_database('subscriber_db')
dst_curs = dst_db.cursor()
failed = 0
for tbl in table_list:
self.log.info('Checking %s' % tbl)
if not skytools.exists_table(src_curs, tbl):
self.log.error('Table %s missing from provider side' % tbl)
failed += 1
elif not skytools.exists_table(dst_curs, tbl):
self.log.error('Table %s missing from subscriber side' % tbl)
failed += 1
else:
failed += self.check_table_columns(src_curs, dst_curs, tbl)
src_db.commit()
dst_db.commit()
return failed
def restore_triggers(self, tbl, triggers=None):
tbl = skytools.fq_name(tbl)
if tbl not in self.get_subscriber_table_list():
self.log.error("Table %s is not in the subscriber queue." % tbl)
sys.exit(1)
dst_db = self.get_database('subscriber_db')
dst_curs = dst_db.cursor()
if not triggers:
q = "select count(1) from londiste.subscriber_get_table_pending_triggers(%s)"
dst_curs.execute(q, [tbl])
if not dst_curs.fetchone()[0]:
self.log.info("No pending triggers found for %s." % tbl)
else:
q = "select londiste.subscriber_restore_all_table_triggers(%s)"
dst_curs.execute(q, [tbl])
else:
for trigger in triggers:
q = "select count(1) from londiste.find_table_triggers(%s) where trigger_name=%s"
dst_curs.execute(q, [tbl, trigger])
if dst_curs.fetchone()[0]:
self.log.info("Trigger %s on %s is already active." % (trigger, tbl))
continue
q = "select count(1) from londiste.subscriber_get_table_pending_triggers(%s) where trigger_name=%s"
dst_curs.execute(q, [tbl, trigger])
if not dst_curs.fetchone()[0]:
self.log.info("Trigger %s not found on %s" % (trigger, tbl))
continue
q = "select londiste.subscriber_restore_table_trigger(%s, %s)"
dst_curs.execute(q, [tbl, trigger])
dst_db.commit()
def check_table_columns(self, src_curs, dst_curs, tbl):
src_colrows = find_column_types(src_curs, tbl)
dst_colrows = find_column_types(dst_curs, tbl)
src_cols = make_type_string(src_colrows)
dst_cols = make_type_string(dst_colrows)
if src_cols.find('k') < 0:
self.log.error('provider table %s has no primary key (%s)' % (
tbl, src_cols))
return 1
if dst_cols.find('k') < 0:
self.log.error('subscriber table %s has no primary key (%s)' % (
tbl, dst_cols))
return 1
if src_cols != dst_cols:
self.log.warning('table %s structure is not same (%s/%s)'\
', trying to continue' % (tbl, src_cols, dst_cols))
err = 0
for row in src_colrows:
found = 0
for row2 in dst_colrows:
if row2['name'] == row['name']:
found = 1
break
if not found:
err = 1
self.log.error('%s: column %s on provider not on subscriber'
% (tbl, row['name']))
elif row['type'] != row2['type']:
err = 1
self.log.error('%s: pk different on column %s'
% (tbl, row['name']))
return err
def find_missing_subscriber_tables(self, pattern='*'):
src_db = self.get_database('subscriber_db')
src_curs = src_db.cursor()
q = """select schemaname || '.' || tablename as full_name from pg_tables
where schemaname not in ('pgq', 'londiste', 'pg_catalog', 'information_schema')
and schemaname !~ 'pg_.*'
and schemaname || '.' || tablename ~ %s
except select table_name from londiste.provider_get_table_list(%s)"""
src_curs.execute(q, [glob2regex(pattern), self.pgq_queue_name])
rows = src_curs.fetchall()
src_db.commit()
list = []
for row in rows:
list.append(row[0])
return list
| [
"markokr@gmail.com"
] | markokr@gmail.com |
7a0f902f4fd2b98ce87a8b8ed1548b76623a4291 | 23514a0e2baf6da053690dd511f1eef75a573e6b | /log-mining/com/haodou/log-mining/CollectionUtil.py | 4ebb82ebf1ce316a6ec6163875d3288a73b45c03 | [] | no_license | rainly/scripts-1 | b5f31880a1a917df23e4c110bb7661685851eff5 | 3ef01a58162b94fb36cdd38581c899d8a118eda0 | refs/heads/master | 2020-07-12T01:41:31.491344 | 2019-08-27T08:50:10 | 2019-08-27T08:50:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py |
def cut(rs,N):
return sorted(rs.items(), key=lambda d: d[1],reverse=True)[0:N]
import random
def test():
N=5
rs={}
for i in range(N*2):
rs[i]=int(3*random.random())
print rs
cut(rs,N)
print rs
if __name__=="__main__":
test()
def halfCut(ts,min=0,max=100):
n=len(ts)/2
if n < min:
n=min
if n > max:
n=max
if n <= 0:
return {}
#print n
ret = cut(ts,n)
#print ret
return ret
| [
"zhaoweiguo@vxiaoke360.com"
] | zhaoweiguo@vxiaoke360.com |
ad582a3d28ae7f94a8654318676c5e54db2755de | 17dca703eed28a859bba4984eba5b039b900e3d7 | /operaciones/views.py | dc3dc8fde486273a340e37d206351d0718decdb2 | [] | no_license | alexogch1/SistemaOperaciones | 1a34872daf0e151672edd202a5089ee754805203 | ac72f6e3284061e240aebec6a3300ff463a3544c | refs/heads/master | 2021-01-03T15:32:45.470642 | 2020-03-03T07:47:27 | 2020-03-03T07:47:27 | 240,133,319 | 0 | 1 | null | 2020-02-28T05:21:57 | 2020-02-12T23:02:36 | Python | UTF-8 | Python | false | false | 6,699 | py | from django.shortcuts import render
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views .generic.base import TemplateView
from django.http.response import HttpResponse
from openpyxl import Workbook
from openpyxl.styles import Alignment, Border,Font,PatternFill,Side
from django.views import generic
from django.urls import reverse_lazy
from dateutil.parser import parse
from .models import TipoCambio
from .form import TipoCambioForm
class TipoCambioView(LoginRequiredMixin, generic.ListView):
model = TipoCambio
template_name = "operaciones/tc_list.html"
context_object_name = "obj"
login_url = "base:login"
class TipoCambioNew(LoginRequiredMixin, generic.CreateView):
model=TipoCambio
template_name='operaciones/tc_form.html'
context_object_name='obj'
form_class=TipoCambioForm
success_url=reverse_lazy('operaciones:tc_list')
login_required = "base:login"
def form_valid(self,form):
form.instance.uc = self.request.user
return super().form_valid(form)
class TipoCambioEdit(LoginRequiredMixin, generic.UpdateView):
model=TipoCambio
template_name='operaciones/tc_form.html'
context_object_name='obj'
form_class=TipoCambioForm
success_url=reverse_lazy('operaciones:tc_list')
login_required = "base:login"
def form_valid(self,form):
form.instance.um = self.request.user.id
return super().form_valid(form)
class ReporteTC(TemplateView):
def get (self, request, *args, **kwargs):
print(request.GET.get('campo'))
fecha_sel = request.GET.get('campo')
fecha_sel_parse = parse(fecha_sel)
print('fecha ',fecha_sel_parse.date())
query = TipoCambio.objects.filter(fecha=fecha_sel)
wb = Workbook()
ws = wb.active
ws.tittle='TipoCambio'
#Establer el nombre del archivo
nombre_archivo = "ReporteTC.xlsx"
ws['B1'].alignment= Alignment(horizontal='left', vertical='center')
ws['B1'].border =Border(left=Side(border_style='thin'),right=Side(border_style='thin'),
top=Side(border_style='thin'), bottom=Side(border_style='thin'))
ws['B1'].fill = PatternFill(start_color='66FFCC', end_color='66FFCC', fill_type='solid')
ws['B1'].font = Font(name='calibri', size=12, bold=True)
ws['B1']='Mar Bran S.A. de C.V.'
ws.merge_cells('B1:F1')
ws['B2'].alignment= Alignment(horizontal='left', vertical='center')
ws['B2'].border =Border(left=Side(border_style='thin'),right=Side(border_style='thin'),
top=Side(border_style='thin'), bottom=Side(border_style='thin'))
ws['B2'].fill = PatternFill(start_color='66FFCC', end_color='66FFCC', fill_type='solid')
ws['B2'].font = Font(name='calibri', size=12, bold=True)
ws['B2']='Innovaciรณn, Mejora Continua y Six Sigma'
ws.merge_cells('B2:F2')
ws['B3'].alignment= Alignment(horizontal='left', vertical='center')
ws['B3'].border =Border(left=Side(border_style='thin'),right=Side(border_style='thin'),
top=Side(border_style='thin'), bottom=Side(border_style='thin'))
ws['B3'].fill = PatternFill(start_color='66FFCC', end_color='66FFCC', fill_type='solid')
ws['B3'].font = Font(name='calibri', size=12, bold=True)
ws['B3']='Tipo de Cambio'
ws.merge_cells('B3:F3')
ws.row_dimensions[1].height=20
ws.row_dimensions[2].height=20
ws.row_dimensions[3].height=20
ws.column_dimensions['B'].width=20
ws.column_dimensions['C'].width=20
ws.column_dimensions['D'].width=20
ws.column_dimensions['E'].width=20
ws['B6'].alignment= Alignment(horizontal='center', vertical='center')
ws['B6'].border =Border(left=Side(border_style='thin'),right=Side(border_style='thin'),
top=Side(border_style='thin'), bottom=Side(border_style='thin'))
ws['B6'].fill = PatternFill(start_color='66CFCC', end_color='66CFCC', fill_type='solid')
ws['B6'].font = Font(name='calibri', size=11, bold=True)
ws['B6']='Fecha'
ws['C6'].alignment= Alignment(horizontal='center', vertical='center')
ws['C6'].border =Border(left=Side(border_style='thin'),right=Side(border_style='thin'),
top=Side(border_style='thin'), bottom=Side(border_style='thin'))
ws['C6'].fill = PatternFill(start_color='66CFCC', end_color='66CFCC', fill_type='solid')
ws['C6'].font = Font(name='calibri', size=11, bold=True)
ws['C6']='Tipo de Cambio'
controlador = 7
for q in query:
ws.cell(row=controlador,column=2).alignment= Alignment(horizontal='center', vertical='center')
ws.cell(row=controlador,column=2).border =Border(left=Side(border_style='thin'),right=Side(border_style='thin'),
top=Side(border_style='thin'), bottom=Side(border_style='thin'))
ws.cell(row=controlador,column=2).fill = PatternFill(start_color='66CFCC', end_color='66CFCC', fill_type='solid')
ws.cell(row=controlador,column=2).font = Font(name='calibri', size=11, bold=True)
ws.cell(row=controlador,column=2).value=q.fecha
ws.cell(row=controlador,column=3).alignment= Alignment(horizontal='center', vertical='center')
ws.cell(row=controlador,column=3).border =Border(left=Side(border_style='thin'),right=Side(border_style='thin'),
top=Side(border_style='thin'), bottom=Side(border_style='thin'))
ws.cell(row=controlador,column=3).fill = PatternFill(start_color='66CFCC', end_color='66CFCC', fill_type='solid')
ws.cell(row=controlador,column=3).font = Font(name='calibri', size=11, bold=True)
ws.cell(row=controlador,column=3).value=q.tipo_cambio
#contador+=1
controlador +=1
#Definir el tipo de resupuesta a dar
response = HttpResponse(content_type='application/ms-excel')
contenido = "attachment; filename = {0}".format(nombre_archivo)
response["Content-Disposition"] = contenido
wb.save(response)
return response
#@login_required(login_url="/login/")
#@permission_required("catalogos.change_ingred",login_url="/login/")
def tc_inactivar(request,id):
tc = TipoCambio.objects.filter(pk=id).first()
if request.method=="POST":
if tc:
tc.estado = not tc.estado
tc.save()
return HttpResponse("OK")
return HttpResponse("FAIL")
return HttpResponse("FAIL") | [
"alexogch@hotmail.com"
] | alexogch@hotmail.com |
bbff0812d21cbd950f8dcd096fa53a300491a14b | 8a73f252c333d9be87ad3827f6880fb47b43625f | /tutorials/W1_ModelTypes/solutions/W1_Tutorial2_Solution_8a33b742.py | dc15afaa0b608af6694a1387b75654336eac098b | [] | no_license | tifainfaith/professional-workshop-3 | 640948d23c8207f891ff0257a38e4653af5b452b | 851077030cbb5a2f53520dbccb80e4459ae8bfc7 | refs/heads/master | 2023-08-20T00:50:11.303677 | 2021-10-27T00:46:34 | 2021-10-27T00:46:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | def lif_neuron(n_steps=1000, alpha=0.01, rate=10):
""" Simulate a linear integrate-and-fire neuron.
Args:
n_steps (int): The number of time steps to simulate the neuron's activity.
alpha (float): The input scaling factor
rate (int): The mean rate of incoming spikes
"""
# precompute Poisson samples for speed
exc = stats.poisson(rate).rvs(n_steps)
v = np.zeros(n_steps)
spike_times = []
for i in range(1, n_steps):
dv = alpha * exc[i]
v[i] = v[i-1] + dv
if v[i] > 1:
spike_times.append(i)
v[i] = 0
return v, spike_times
v, spike_times = lif_neuron()
plot_neuron_stats(v, spike_times) | [
"action@github.com"
] | action@github.com |
9b70171b34c9d395fbcb8b2bcd8eae663ee97237 | ca17bd80ac1d02c711423ac4093330172002a513 | /decorator/decorator.py | 786734854a22973c775aedfcfa7e0b0d85d65adb | [] | no_license | Omega094/lc_practice | 64046dea8bbdaee99d767b70002a2b5b56313112 | e61776bcfd5d93c663b247d71e00f1b298683714 | refs/heads/master | 2020-03-12T13:45:13.988645 | 2018-04-23T06:28:32 | 2018-04-23T06:28:32 | 130,649,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | def f1(arg):
print "f1"
rl = arg()
print rl, "This si rl"
return rl + "f1"
@f1
def f2(arg = ""):
print "f2"
return arg + "f2r"
print "start"
print f2
| [
"zhao_j1@denison.edu"
] | zhao_j1@denison.edu |
bb7f3095c4b812063e8c77e887d8afde9d682874 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02995/s824603391.py | 91c9d13ffd6406e8745ba0c78a8d2e5ef263052b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Created: Jul, 13, 2020 04:11:55 by Nobody
# $Author$
# $Date$
# $URL$
__giturl__ = "$URL$"
import math
from sys import stdin
input = stdin.readline
def main():
A, B, C, D = list(map(int, input().split()))
# divisible by C
dc = B//C - (A-1)//C
# divisible by D
dd = B//D - (A-1)//D
lcm = C * D // math.gcd(C, D)
# divisible by (C and D)
dcd = B//lcm - (A-1)//lcm
# print(f'B-A+1: {B-A+1}')
# print(f'dc : {dc}')
# print(f'dd : {dd}')
# print(f'lcm : {lcm}')
# print(f'dcd : {dcd}')
print((B-A+1) - dc - dd + dcd)
if(__name__ == '__main__'):
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4252361cdb8d77952bfe9c73b5a3ef21eb5f86fc | a1b1e14dcaecbeee7a9ef7d0f3199f72e436dec9 | /migrations/0046_auto_20200731_1926.py | 3c15798c43e52f3aed7985402fd8d5f3be047b52 | [] | no_license | erllan/my-first-blog | ee99c3faad2de4039340b683143ada4c29310b31 | 0a1f37d9c95d70daaef945fbd950412281eb2cc4 | refs/heads/master | 2022-12-20T02:32:33.683460 | 2020-10-21T14:55:23 | 2020-10-21T14:55:23 | 262,575,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | # Generated by Django 3.0.2 on 2020-07-31 13:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('my_vk', '0045_auto_20200731_1855'),
]
operations = [
migrations.AlterField(
model_name='message',
name='from_user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='my_vk.User'),
),
migrations.AlterField(
model_name='message',
name='message_to',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='messages', to='my_vk.User'),
),
]
| [
"erlan.kubanychbekov.000@gmail.com"
] | erlan.kubanychbekov.000@gmail.com |
fe6ca773d2c95c079f2abc0a0ea3f814374940ee | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2737/60734/246210.py | 03d3b05e18516224525a5ee93bd214fb817c7686 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | import re
lst = re.findall(r'\d+',input())
lst = list(map(int,lst))
#ๆๅคๅช่ฝๆไธคไธชไผๆฐ
m,n = 0,0
cm,cn = 0,0
for a in lst:
if a == m:
cm+=1
elif a == n:
cn+=1
elif cm == 0:
m = a
cm = 1
elif cn == 0:
n = a
cn = 1
else:#ไธคไธช่ฎกๆฐๅจ้ฝ่ฆๅไธ
cm-=1
cn-=1
#้ๆฐ้ๅ
cm, cn = 0,0
for a in lst:
if a == m:
cm+=1
elif a == n:
cn+=1
res = []
if cm>len(lst)/3:
res.append(m)
if cn>len(lst)/3:
res.append(n)
print(res) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
ee74fdda5a7f831d5569606cce09176bed506a90 | f3b233e5053e28fa95c549017bd75a30456eb50c | /mcl1_input/L60/60-36_wat_20Abox/set_1.py | 69c919776a7bd53e0fd516002c14349b8656a86c | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | import os
dir = '/mnt/scratch/songlin3/run/mcl1/L60/wat_20Abox/ti_one-step/60_36/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_1.in'
temp_pbs = filesdir + 'temp_1.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_1.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_1.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
359fb0d94df61ec37a617dd620a2c50191bc432f | f14bf0274762591940a9f0382b6c9b99a42aedca | /WINDOW_openMDAO/src/api.py | 0684a81dd22f447d1d87f7e0deb1c424556f962e | [] | no_license | chunyuli/WINDOW_openMDAO | 5b610bcdac42fe45a69e2afcae74b92b3f27f092 | c9e39da2079d1a0b604fa9f4b9952dc663466871 | refs/heads/master | 2020-03-20T08:52:19.910162 | 2018-05-29T11:51:48 | 2018-05-29T11:51:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,146 | py | from AbsWakeModel.wake_linear_solver import WakeModel
from AbsWakeModel.AbstractWakeModel import DetermineIfInWake, WakeDeficit
from AbsAEP.farmpower_workflow import AEPWorkflow
from AbsTurbulence.abstract_wake_TI import AbstractWakeAddedTurbulence, DeficitMatrix, CtMatrix
from AbsWakeModel.AbsWakeMerge.abstract_wake_merging import AbstractWakeMerge
from AbsTurbulence.TI_workflow import TIWorkflow
from SiteConditionsPrep.depth_process import AbstractWaterDepth
from AbsElectricalCollection.abstract_collection_design import AbstractElectricDesign
from AbsSupportStructure.abstract_support_design import AbstractSupportStructureDesign, MaxTI
from AbsOandM.abstract_operations_maintenance import AbstractOandM
from AbsAEP.aep import AEP
from AbsTurbine.AbsTurbine import AbsTurbine
from Utils.util_components import NumberLayout, create_random_layout
from Utils.constraints import MinDistance, WithinBoundaries
from Utils.regular_parameterised import RegularLayout
from Utils.transform_quadrilateral import AreaMapping
from Utils.read_files import read_layout, read_windrose
from Utils.workflow_options import WorkflowOptions
| [
"s.sanchezperezmoreno@tudelft.nl"
] | s.sanchezperezmoreno@tudelft.nl |
136cc322199fffd4ac050c4614a69dd646546c5a | 1dae87abcaf49f1d995d03c0ce49fbb3b983d74a | /programs/test_uw_field_lock.prg.py | 36106b8820eb6a53475dbabb1e58b66c89dc35fc | [] | no_license | BEC-Trento/BEC1-data | 651cd8e5f15a7d9848f9921b352e0830c08f27dd | f849086891bc68ecf7447f62962f791496d01858 | refs/heads/master | 2023-03-10T19:19:54.833567 | 2023-03-03T22:59:01 | 2023-03-03T22:59:01 | 132,161,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,397 | py | prg_comment = ""
prg_version = "0.7"
def program(prg, cmd):
prg.add(10000, "Initialize 0 TTL and Synchronize.sub")
prg.add(50000, "DarkSpotMOT_19.sub")
prg.add(209000000, "Synchronize.sub", enable=False)
prg.add(209943111, "MOT lights Off TTL.sub")
prg.add(209947301, "Config Field OFF.sub")
prg.add(209949001, "Gray Molasses 2017")
prg.add(209949001, "Optical pumping", enable=False)
prg.add(209949011, "Scope 2 Trigger ON")
prg.add(209986000, "Scope 2 Trigger OFF")
prg.add(210000000, "Load_Quad")
prg.add(210010000, "Quad_RampUP")
prg.add(210060000, "Mirrors Imaging")
prg.add(215000000, "Ramp_bias_down.sub")
prg.add(216000000, "All AOM On.sub", functions=dict(time=lambda x: 10015.6899+cmd.get_var('QuadRampTime'), funct_enable=False))
prg.add(220000000, "Number_lock", enable=False)
prg.add(230000000, "Evaporation amp", 1000)
prg.add(230010000, "Evaporation ramp", start_t=0.0000, func_args="a=40e6, b=4e6, duration=10, tau=1", n_points=330, func="(b - a * exp(-duration / tau) + (a - b) * exp(-t / tau)) / (1 - exp(-duration / tau))", stop_t=5000.0000, functions=dict(func_args=lambda x: 'a={}, b={}, duration={}, tau={}'.format(cmd.get_var('evap1_fstart')*1e6, cmd.get_var('evap1_fend')*1e6, cmd.get_var('evap1_time')*1e-3, cmd.get_var('evap1_tau')), stop_t=lambda x: cmd.get_var('evap1_time')))
prg.add(230014000, "Quad_RampDOWN", functions=dict(time=lambda x: x + cmd.get_var('evap1_time')))
prg.add(230014000, "Evaporation amp", 1, functions=dict(time=lambda x: x +cmd.get_var('evap1_time') + 0.98))
prg.add(232014000, "Scope 1 Trigger Pulse", polarity=1, pulse_t=0.01000, functions=dict(time=lambda x: x+cmd.get_var('evap1_time')+cmd.get_var('Quad_rampdown_time')+cmd.get_var('hold_time')+cmd.get_var('tof')-0.034))
prg.add(232014000, "Setup_imaging", functions=dict(time=lambda x: x+cmd.get_var('evap1_time')+cmd.get_var('Quad_rampdown_time')+cmd.get_var('hold_time')+cmd.get_var('tof')-2.135))
prg.add(232014000, "Pulse uw", polarity=1, pulse_t=0.00200, functions=dict(pulse_t=lambda x: 1e-3 * cmd.get_var('marconi1_pulsetime'), time=lambda x: x+cmd.get_var('evap1_time')+cmd.get_var('Quad_rampdown_time')+cmd.get_var('hold_time')), enable=False)
prg.add(232014000, "BEC_imaging", functions=dict(time=lambda x: x+cmd.get_var('evap1_time')+cmd.get_var('Quad_rampdown_time')+cmd.get_var('hold_time')+cmd.get_var('tof')), enable=False)
prg.add(232014000, "BEC_imaging_field_lock", functions=dict(time=lambda x: x+cmd.get_var('evap1_time')+cmd.get_var('Quad_rampdown_time')+cmd.get_var('hold_time')+cmd.get_var('tof')))
prg.add(232114000, "Config Field OFF.sub", functions=dict(time=lambda x: x+cmd.get_var('evap1_time')+cmd.get_var('Quad_rampdown_time')+cmd.get_var('hold_time')+cmd.get_var('tof')))
return prg
def commands(cmd):
import numpy as np
iters = np.arange(-0.1001, -0.06, 0.002)
np.random.shuffle(iters)
j = 0
while(cmd.running):
print('\n-------o-------')
Bx_bottom = iters[j]
cmd.set_var('Bx_bottom', Bx_bottom)
print('\n')
print('Run #%d/%d, with variables:\nBx_bottom = %g\n'%(j+1, len(iters), Bx_bottom))
cmd._system.run_number = j
cmd.run(wait_end=True, add_time=100)
j += 1
if j == len(iters):
cmd._system.run_number = 0
cmd.stop()
return cmd
| [
"carmelo.mordini@unitn.it"
] | carmelo.mordini@unitn.it |
e96ab65f305d6d6b08752c3d9cdae3cc7c58b41a | 8b4246007428c5136d129d9fe4a754de0d11a12e | /chimp_mirna_CNV_TargetScan.py | c719d6de50e5c47bdfbfccab9bf600935b6a8c21 | [] | no_license | rjovelin/CNV_miRNAs_InvertVert | 4a1ba798f2fe7d44ecb7ca1d4acb67d0efda2a0e | 87494e561cc3052d94f2491d9dac2feaaaf78513 | refs/heads/master | 2016-09-13T20:18:14.151671 | 2016-05-16T02:53:39 | 2016-05-16T02:53:39 | 58,898,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,231 | py | from CNV_miRNAs_TargetScan import *
def chimp_UTR_sequence(UTR_file):
'''
(file, str) -> dict
Return a dictionnary with the UTR sequence of each transcript
'''
# open file for reading
utr = open(UTR_file, 'r')
header = utr.readline()
# create a dictionnary to store the urt sequence for each transcript
utr_sequences = {}
# read the file
for line in utr:
line = line.rstrip()
if line != '':
line = line.split()
transcript = line[0]
sequence = line[-1]
while '-' in sequence:
sequence = sequence.replace('-', '')
if '9598' in line:
utr_sequences[transcript] = sequence
utr.close()
return utr_sequences
def chimp_miRNA_family(miRNA_family_info):
'''
(file, str) -> dict
Return a dictionnary with seed as key and a list of same-family miRNAs as value
'''
family = {}
miRNA = open(miRNA_family_info, 'r')
header = miRNA.readline()
for line in miRNA:
line = line.rstrip()
if line != '':
line = line.split()
if line[2] == '9598':
if line[1] in family:
family[line[1]].append(line[3])
else:
family[line[1]] = [line[3]]
miRNA.close()
return family
def chimp_transcript_to_gene(ensemblToGeneName):
'''
(file) -> dict
Return a dictionnary with ensembl transcript ID as key and common gene ID as value
'''
# make a dictionnary with the ensembl transcript : gene pairs
transcripts_genes = {}
transcripts = open(ensemblToGeneName, 'r')
transcripts.readline()
for line in transcripts:
line = line.rstrip()
if line !='':
line = line.split()
transcripts_genes[line[0]] = line[1]
transcripts.close()
return transcripts_genes
def chimp_find_CNV_genes(CNV_positions_file, ensembl_transcript_coordinates, ensemblToGeneName):
'''
(file, file, file) -> file
Save to file the set of chimp CNV genes
'''
# make a dictionnary to store the coordinates of each CNV {CNV1:[chromo, start, end]}
cnv_coord = open(CNV_positions_file, 'r')
cnv_coord.readline()
cnv_coord.readline()
CNV_positions = {}
i = 0
for line in cnv_coord:
line = line.rstrip()
if line != '':
line = line.split()
if line[0].startswith('Chimp'):
CNV_positions[i] = ['chr' + line[1], int(line[2]), int(line[3])]
i += 1
# make a dictionnary with the ensembl transcript : gene pairs
transcripts_genes = chimp_transcript_to_gene(ensemblToGeneName)
# make a dictionnary to store the coordinates of the ensembl transcripts {TS1 : [chromo, start, end]}
transcripts_coord = open(ensembl_transcript_coordinates, 'r')
transcripts_coord.readline()
transcripts_positions = {}
for line in transcripts_coord:
line = line.rstrip()
if line !='':
line = line.split()
transcripts_positions[line[1]] = [line[2], int(line[4]), int(line[5])]
print(len(transcripts_positions))
# search for CNV transcripts using the transcript and CNV coordinates and store in a set
# report transcripts that overlap with a CNV even in the UTR or non-coding regions
done = 0
chimp_CNV_genes = set()
for transcript in transcripts_positions:
if done % 5 == 0:
print(done, len(chimp_CNV_genes), sep = '\t')
done += 1
if transcript in transcripts_genes:
if transcripts_genes[transcript] not in chimp_CNV_genes: # no need to search again if gene already CNV
for CNV in CNV_positions:
if transcripts_positions[transcript][0] == CNV_positions[CNV][0]:
ts_pos = set(range(transcripts_positions[transcript][1], transcripts_positions[transcript][2] + 1))
cnv_pos = set(range(CNV_positions[CNV][1], CNV_positions[CNV][2] + 1))
# if CNV and transcript coordinates overlap keep transcript
if len(ts_pos.intersection(cnv_pos)) != 0:
chimp_CNV_genes.add(transcripts_genes[transcript])
break # no need to search again if gene already CNV
cnv_coord.close()
transcripts_coord.close()
return chimp_CNV_genes
# declare valid_chimp_chromos as global variable
# valid chromosomes are the chromosomes in the ensemble_transcripts_coordinates
# file that are used to scan for CNV genes
valid_chimp_chromos = set('chr' + str(i) for i in range(1, 23))
def chimp_valid_genes(ensembl_transcript_coordinates, ensemblToGeneName):
'''
(file, file) -> set
Returns a set with all genes in chimp located on the same chromosomes scanned to find CNV genes
'''
# make a set of transcripts that are located on valid chromosomes
transcripts = set()
transcripts_coord = open(ensembl_transcript_coordinates, 'r')
transcripts_coord.readline()
for line in transcripts_coord:
line = line.rstrip()
if line !='':
line = line.split()
if line[2] in valid_chimp_chromos:
transcripts.add(line[1])
# make a dictionnary of transcripts : gene name pairs
transcripts_genes = chimp_transcript_to_gene(ensemblToGeneName)
# make a set of valid genes
valid_genes = set()
for ts_name in transcripts:
if ts_name in transcripts_genes:
valid_genes.add(transcripts_genes[ts_name])
transcripts_coord.close()
return valid_genes
def chimp_miRNA_regulation_TargetScan_sites(summary_counts, conservation_sites):
'''
(file, str) -> dict
Return a dictionnary with the number of miRNA regulators, miRNA binding sites and number of sites per miRNA for each target transcript
exctracted from the summary_count file. Use either only conserved sites or all sites.
'''
# open the summary_count file for reading
summary = open(summary_counts, 'r')
header = summary.readline()
# create a dictionnary to store the miRNA family and number of sites for each transcripts
# {transcript_1: [[family_1, N_sites_family_1], [family_2, N_sites_family_2]]}
regulated_transcripts = {}
# read the summary_count file
for line in summary:
line = line.rstrip()
if line != '':
line = line.split()
transcript = line[0]
family = line[2]
N_conserved_sites = int(line[4])
N_poorly_conserved_sites = int(line[8])
if line[3] == '9598':
if conservation_sites == 'all_sites':
if transcript in regulated_transcripts:
regulated_transcripts[transcript].append([family, (N_conserved_sites + N_poorly_conserved_sites)])
else:
regulated_transcripts[transcript] = [[family, (N_conserved_sites + N_poorly_conserved_sites)]]
elif conservation_sites == 'conserved_sites':
if N_conserved_sites > 0:
if transcript in regulated_transcripts:
regulated_transcripts[transcript].append([family, N_conserved_sites])
else:
regulated_transcripts[transcript] = [[family, N_conserved_sites]]
summary.close()
return regulated_transcripts
def human_transcripts_targets(human_CNV_miRNA_file):
'''
(file) -> (set, set)
Returns the set of human transcripts and the set of corresponding genes targeted by miRNAs from the file CNV_miRNA
'''
# create set of transcripts
transcripts = set()
genes = set()
# open the file for reading
human = open(human_CNV_miRNA_file, 'r')
human.readline()
for line in human:
line = line.rstrip()
if line !='':
line = line.split()
transcripts.add(line[1])
genes.add(line[0])
human.close()
return transcripts, genes
def chimp_make_miRNA_regulators_table(summary_counts, conservation_sites, miRNA_family_info, ensembl_transcript_coordinates,
ensemblToGeneName, CNV_genes_file, human_CNV_miRNA_file, outputfile):
'''
(file, str, file, file, file, file, file) -> file
For a single transcript / gene, write the number of miRNA regulators, number of miRNA binding sites (conserved or all),
the mean number of sites / mirna and whether the gene is in a CNV or not
'''
# get the gene ID for each target transcript
transcripts = transcript_gene_pairs(summary_counts)
# get the list of miRNAs for each family
family = chimp_miRNA_family(miRNA_family_info)
# get the human transcripts regulated by miRNAs
human_transcripts, human_genes = human_transcripts_targets(human_CNV_miRNA_file)
# get the CNV status
CNV_genes = set()
cnv_file = open(CNV_genes_file, 'r')
for line in cnv_file:
line = line.rstrip()
if line != '':
CNV_genes.add(line)
cnv_file.close()
# get the miRNA regulation
regulators = chimp_miRNA_regulation_TargetScan_sites(summary_counts, conservation_sites)
# create a dictionnary to store info about miRNA regultors for each gene
# using a single transcript per gene
# {gene_1: [[family_1, N_sites_family_1], [family_2, N_sites_family_2], transcript_ID, CNV_status]}
gene_regulated = {}
for transcript_ID in regulators:
gene = transcripts[transcript_ID]
if transcript_ID in human_transcripts: # use the same transcripts as in human for direct comparison
gene_regulated[gene] = list(regulators[transcript_ID]) # get the info for a single transcript
gene_regulated[gene].append(transcript_ID) # add the transcript ID
elif transcript_ID not in human_transcripts:
if gene not in gene_regulated and gene not in human_genes: # if gene in human_genes then skipped because another transcript should be used
gene_regulated[gene] = list(regulators[transcript_ID]) # get the info for a single transcript
gene_regulated[gene].append(transcript_ID) # add the transcript ID
# add the CNV status
for gene in gene_regulated:
if gene in CNV_genes:
gene_regulated[gene].append('CNV')
else:
gene_regulated[gene].append('non-CNV')
# get the set of valid genes
valid_genes = chimp_valid_genes(ensembl_transcript_coordinates, ensemblToGeneName)
# remove genes that are not located on the same chromosomes used to scan CNV genes
to_remove = []
for gene in gene_regulated:
if gene not in valid_genes:
to_remove.append(gene)
for gene in to_remove:
del gene_regulated[gene]
# write to file
newfile = open(outputfile, 'w')
newfile.write('gene' + '\t' + 'transcript' + '\t' + 'N_mirnas' + '\t' + 'N_sites' + '\t' + 'N_sites_per_miRNA' + '\t' + 'CNV' + '\n')
for gene in gene_regulated:
newfile.write(gene + '\t')
newfile.write(gene_regulated[gene][-2] + '\t')
N_mirnas = 0
N_sites = 0
for pair in gene_regulated[gene][:-2]:
N_mirnas += len(family[pair[0]])
N_sites += pair[1]
newfile.write(str(N_mirnas) + '\t' + str(N_sites) + '\t' + str(N_sites/ N_mirnas) + '\t' + gene_regulated[gene][-1] + '\n')
newfile.close()
def make_miRNA_regulators_table_conserved_families(focal_species, conservation_sites, human_CNV_miRNA_file, outputfile, summary_counts = 'Human_Summary_Counts.txt',
miRNA_family_info = 'Human_miR_Family_Info.txt', species = 'human',
CNV_file = 'GRCh37_hg19_variants_2013-05-31.txt', all_gene_file = 'Homo_sapiens.gene_info',
ensembl_transcript_coordinates = 'panTro2_ensGene', ensemblToGeneName = 'panTro2_ensemblToGeneName',
CNV_genes_file = 'Chimp_CNV_genes.txt'):
'''
(str, str, file, file, file, file, str, file, file, file, file, file)
For a single transcript / gene, write the number of miRNA regulators, number of miRNA binding sites (conserved or all),
the mean number of sites / mirna and whether the gene is in a CNV or not
Use only conserved miRNA families between human and chimp
'''
# get the gene ID for each target transcript
transcripts = transcript_gene_pairs(summary_counts)
# get the list of miRNAs for each family in human and in chimp
if focal_species == 'chimp':
family = chimp_miRNA_family(miRNA_family_info)
other_family = miRNA_family(miRNA_family_info, species)
elif focal_species == 'human':
family = miRNA_family(miRNA_family_info, species)
other_family = chimp_miRNA_family(miRNA_family_info)
# keep only conserved families between chimp and human
family_to_remove = []
for seed in family:
if seed not in other_family:
family_to_remove.append(seed)
for seed in family_to_remove:
del family[seed]
# get the CNV status and sort the valid genes
# get the miRNA regulators and binding sites
if focal_species == 'human':
CNV_genes = human_CNV_genes(CNV_file, all_gene_file)
valid_genes = sort_valid_human_genes(all_gene_file)
regulators = human_miRNA_regulation_TargetScan_sites(summary_counts, species, conservation_sites)
elif focal_species == 'chimp':
# get the CNV status
CNV_genes = set()
cnv_file = open(CNV_genes_file, 'r')
for line in cnv_file:
line = line.rstrip()
if line != '':
CNV_genes.add(line)
cnv_file.close()
valid_genes = chimp_valid_genes(ensembl_transcript_coordinates, ensemblToGeneName)
regulators = chimp_miRNA_regulation_TargetScan_sites(summary_counts, conservation_sites)
# get the human transcripts regulated by miRNAs
human_transcripts, human_genes = human_transcripts_targets(human_CNV_miRNA_file)
# create a dictionnary to store info about miRNA regultors for each gene
# using a single transcript per gene
# {gene_1: [[family_1, N_sites_family_1], [family_2, N_sites_family_2], transcript_ID, CNV_status]}
gene_regulated = {}
if focal_species == 'human':
for transcript_ID in regulators:
gene = transcripts[transcript_ID]
if gene not in gene_regulated:
gene_regulated[gene] = regulators[transcript_ID] # get the info for a single transcript
gene_regulated[gene].append(transcript_ID) # add the transcript ID
elif focal_species == 'chimp':
for transcript_ID in regulators:
gene = transcripts[transcript_ID]
if transcript_ID in human_transcripts: # use the same transcripts as in human for direct comparison
gene_regulated[gene] = list(regulators[transcript_ID]) # get the info for a single transcript
gene_regulated[gene].append(transcript_ID) # add the transcript ID
elif transcript_ID not in human_transcripts:
if gene not in gene_regulated and gene not in human_genes: # if gene in human_genes then skipped because another transcript should be used
gene_regulated[gene] = list(regulators[transcript_ID]) # get the info for a single transcript
gene_regulated[gene].append(transcript_ID) # add the transcript ID
# add the CNV status
for gene in gene_regulated:
if gene in CNV_genes:
gene_regulated[gene].append('CNV')
else:
gene_regulated[gene].append('non-CNV')
# remove non-valid target genes
to_remove = []
for gene in gene_regulated:
if gene not in valid_genes:
to_remove.append(gene)
for gene in to_remove:
del gene_regulated[gene]
# write to file
newfile = open(outputfile, 'w')
newfile.write('gene' + '\t' + 'transcript' + '\t' + 'N_mirnas' + '\t' + 'N_sites' + '\t' + 'N_sites_per_miRNA' + '\t' + 'CNV' + '\n')
for gene in gene_regulated:
N_mirnas = 0
N_sites = 0
for pair in gene_regulated[gene][:-2]:
if pair[0] in family:
N_mirnas += len(family[pair[0]])
N_sites += pair[1]
if N_mirnas != 0:
newfile.write(gene + '\t')
newfile.write(gene_regulated[gene][-2] + '\t')
newfile.write(str(N_mirnas) + '\t' + str(N_sites) + '\t' + str(N_sites/ N_mirnas) + '\t' + gene_regulated[gene][-1] + '\n')
newfile.close()
def get_CNV_genes_Perry_study(CNV_file, all_gene_file):
'''
(file, file) -> set
Returns a set of CNV genes from the Perry study
'''
# get valid human genes
valid_genes = sort_valid_human_genes(all_gene_file)
# make a set to store the CNV genes
# Note that the affected genes are sometimes line[-1] when no sample ID is not provided
# or line[-2] when sample ID is provided: take all and filter out the non valid genes
CNV = open(CNV_file, 'r')
CNV_genes = set()
header = CNV.readline()
for line in CNV:
line = line.rstrip()
if line != '':
line = line.split('\t')
if line[4] == 'CNV' and line[7] == '18775914':
if ',' in line[-2]:
genes = line[-2].split(',')
for gene in genes:
CNV_genes.add(gene)
elif ',' not in line[-2]:
CNV_genes.add(line[-2])
if ',' in line[-1]:
genes = line[-1].split(',')
for gene in genes:
CNV_genes.add(gene)
elif ',' not in line[-1]:
CNV_genes.add(line[-1])
# remove non valid genes
to_remove = []
for gene in CNV_genes:
if gene not in valid_genes:
to_remove.append(gene)
for gene in to_remove:
CNV_genes.discard(gene)
CNV.close()
return CNV_genes
def get_human_chimp_orthologs(human_CNV_miRNA_file, chimp_CNV_miRNA_file):
'''
(file, file) -> set
Returns a set of human and chimp orthologs regulated by miRNAs
'''
# make a set of orthologous genes
human_CNV_miRNA = open(human_CNV_miRNA_file, 'r')
human_genes = set()
for line in human_CNV_miRNA:
line = line.rstrip()
if line != '':
line = line.split()
human_genes.add(line[0])
human_CNV_miRNA.close()
chimp_CNV_miRNA = open(chimp_CNV_miRNA_file, 'r')
chimp_genes = set()
for line in chimp_CNV_miRNA:
line = line.rstrip()
if line != '':
line = line.split()
chimp_genes.add(line[0])
chimp_CNV_miRNA.close()
orthologs = human_genes.intersection(chimp_genes)
return orthologs
def get_human_chimp_miRNA_regulation(human_CNV_miRNA_file, chimp_CNV_miRNA_file, CNV_file, all_gene_file, perry_only):
'''
(file, file, file, str) -> tuple
Returns a 12-item tuple containing the lists of the number of miRNA regulators,
the number of miRNA binding sites, and the number of sites per miRN for CNV and non-CNV orthologous genes in human and chimp
Option to include all human CNV genes or only human CNV genes from the same study identifying chimp CNVs
The lists must be ordered to perform a paired test
'''
# make a set of orthologous genes
orthologs = get_human_chimp_orthologs(human_CNV_miRNA_file, chimp_CNV_miRNA_file)
# open files for reading
human_CNV_miRNA = open(human_CNV_miRNA_file, 'r')
chimp_CNV_miRNA = open(chimp_CNV_miRNA_file, 'r')
# make dictionnaries with gene as key and the list of mirna information as value
human_CNV_miRNA.readline()
chimp_CNV_miRNA.readline()
human_mirna_regulation = {}
for line in human_CNV_miRNA:
line = line.rstrip()
if line != '':
line = line.split()
human_mirna_regulation[line[0]] = [int(line[2]), int(line[3]), float(line[4]), line[-1]]
human_CNV_miRNA.close()
chimp_mirna_regulation = {}
for line in chimp_CNV_miRNA:
line = line.rstrip()
if line != '':
line = line.split()
chimp_mirna_regulation[line[0]] = [int(line[2]), int(line[3]), float(line[4]), line[-1]]
chimp_CNV_miRNA.close()
# remove genes that are not orthologs
human_to_remove = []
for gene in human_mirna_regulation:
if gene not in orthologs:
human_to_remove.append(gene)
for gene in human_to_remove:
del human_mirna_regulation[gene]
chimp_to_remove = []
for gene in chimp_mirna_regulation:
if gene not in orthologs:
chimp_to_remove.append(gene)
for gene in chimp_to_remove:
del chimp_mirna_regulation[gene]
# use only the human CNV genes from the same study identifying chimp CNV genes or use all human CNV genes
# if perry_only == yes : use only human and chimp CNV genes from the same study
if perry_only == 'yes':
perry_human_CNV_genes = get_CNV_genes_Perry_study(CNV_file, all_gene_file)
# change the CNV status of the human target genes
for gene in human_mirna_regulation:
if gene in perry_human_CNV_genes:
human_mirna_regulation[gene][-1] = 'CNV'
elif gene not in perry_human_CNV_genes:
human_mirna_regulation[gene][-1] = 'non-CNV'
# create ordered lists to store the values of the different metrics for CNV and non-CNV genes
human_CNV_mirnas = [] # store N mirnas for human CNV genes
human_CNV_sites = []
human_CNV_ratio = []
human_nonCNV_mirnas = [] # store N mirnas for human non-CNV genes
human_nonCNV_sites = []
human_nonCNV_ratio = []
chimp_nonCNV_hc_mirnas = [] # store N mirnas for chimp non-CNV genes with human CNV ortholog
chimp_nonCNV_hc_sites = []
chimp_nonCNV_hc_ratio = []
chimp_nonCNV_hn_mirnas = [] # store N mirnas for chimp non-CNV genes with human non-CNV ortholo
chimp_nonCNV_hn_sites = []
chimp_nonCNV_hn_ratio = []
# populate the lists
for gene in human_mirna_regulation:
if chimp_mirna_regulation[gene][-1] == 'non-CNV':
if human_mirna_regulation[gene][-1] == 'CNV':
human_CNV_mirnas.append(human_mirna_regulation[gene][0])
human_CNV_sites.append(human_mirna_regulation[gene][1])
human_CNV_ratio.append(human_mirna_regulation[gene][2])
chimp_nonCNV_hc_mirnas.append(chimp_mirna_regulation[gene][0])
chimp_nonCNV_hc_sites.append(chimp_mirna_regulation[gene][1])
chimp_nonCNV_hc_ratio.append(chimp_mirna_regulation[gene][2])
elif human_mirna_regulation[gene][-1] == 'non-CNV':
human_nonCNV_mirnas.append(human_mirna_regulation[gene][0])
human_nonCNV_sites.append(human_mirna_regulation[gene][1])
human_nonCNV_ratio.append(human_mirna_regulation[gene][2])
chimp_nonCNV_hn_mirnas.append(chimp_mirna_regulation[gene][0])
chimp_nonCNV_hn_sites.append(chimp_mirna_regulation[gene][1])
chimp_nonCNV_hn_ratio.append(chimp_mirna_regulation[gene][2])
return (human_CNV_mirnas, human_CNV_sites, human_CNV_ratio,
human_nonCNV_mirnas, human_nonCNV_sites, human_nonCNV_ratio,
chimp_nonCNV_hc_mirnas, chimp_nonCNV_hc_sites, chimp_nonCNV_hc_ratio,
chimp_nonCNV_hn_mirnas, chimp_nonCNV_hn_sites, chimp_nonCNV_hn_ratio)
def test_miRNA_regulation_CNV_non_CNV_genes(human_CNV_miRNA_file, chimp_CNV_miRNA_file, CNV_file, all_gene_file, paired, perry_only):
'''
(file, file, file, file, str, str) -> tuple
Performs a Wilcoxon signed-rank test or a Wilcoxon sum rank test between orthologous human CNV and chimp non-CNV genes for the number of
miRNAs, sites and the number of sites per miRNA. Performs a Wilcoxon signed-rank test or a Wilcoxon sum rank test between orthologs non-CNV genes as control
Return a tuple of 2-item tuple containing the z-value and the p-value
'''
# get the number of miRNAs, sites and sites per miRNA for human CNV genes and human and chimp non-CNV genes
mirna_regulation = get_human_chimp_miRNA_regulation(human_CNV_miRNA_file, chimp_CNV_miRNA_file, CNV_file, all_gene_file, perry_only)
human_CNV_mirnas = mirna_regulation[0]
human_CNV_sites = mirna_regulation[1]
human_CNV_ratio = mirna_regulation[2]
human_nonCNV_mirnas = mirna_regulation[3]
human_nonCNV_sites = mirna_regulation[4]
human_nonCNV_ratio = mirna_regulation[5]
chimp_nonCNV_hc_mirnas = mirna_regulation[6]
chimp_nonCNV_hc_sites = mirna_regulation[7]
chimp_nonCNV_hc_ratio = mirna_regulation[8]
chimp_nonCNV_hn_mirnas = mirna_regulation[9]
chimp_nonCNV_hn_sites = mirna_regulation[10]
chimp_nonCNV_hn_ratio = mirna_regulation[11]
# compute the Wilcoxon rank sum tests
from scipy import stats
if paired == 'yes':
diff_mirnas = stats.wilcoxon(human_CNV_mirnas, chimp_nonCNV_hc_mirnas)
diff_sites = stats.wilcoxon(human_CNV_sites, chimp_nonCNV_hc_sites)
diff_ratio = stats.wilcoxon(human_CNV_ratio, chimp_nonCNV_hc_ratio)
diff_control_mirnas = stats.wilcoxon(human_nonCNV_mirnas, chimp_nonCNV_hn_mirnas)
diff_control_sites = stats.wilcoxon(human_nonCNV_sites, chimp_nonCNV_hn_sites)
diff_control_ratio = stats.wilcoxon(human_nonCNV_ratio, chimp_nonCNV_hn_ratio)
elif paired == 'no':
diff_mirnas = stats.ranksums(human_CNV_mirnas, chimp_nonCNV_hc_mirnas)
diff_sites = stats.ranksums(human_CNV_sites, chimp_nonCNV_hc_sites)
diff_ratio = stats.ranksums(human_CNV_ratio, chimp_nonCNV_hc_ratio)
diff_control_mirnas = stats.ranksums(human_nonCNV_mirnas, chimp_nonCNV_hn_mirnas)
diff_control_sites = stats.ranksums(human_nonCNV_sites, chimp_nonCNV_hn_sites)
diff_control_ratio = stats.ranksums(human_nonCNV_ratio, chimp_nonCNV_hn_ratio)
return (diff_mirnas, diff_sites, diff_ratio,
diff_control_mirnas, diff_control_sites, diff_control_ratio)
def get_UTR_length_CNV_non_CNV_genes(UTR_file, human_CNV_miRNA_file, chimp_CNV_miRNA_file, CNV_file, all_gene_file, perry_only, species = 'human'):
'''
(file, file, file, file, file, str, str) -> tuple
Returns a 4-item tuple containing the lists of UTR length CNV and non-CNV orthologous genes in human and chimp
Option to include all human CNV genes or only human CNV genes from the same study identifying chimp CNVs
The lists must be ordered to perform a paired test
'''
# get the sequence of all UTRs
human_UTR = UTR_sequence(UTR_file, species)
chimp_UTR = chimp_UTR_sequence(UTR_file)
# make a set of orthologous genes
orthologs = get_human_chimp_orthologs(human_CNV_miRNA_file, chimp_CNV_miRNA_file)
# make a dictionnary for each gene in the CNV_miRNA file with a list containing the transcript name and the CNV status
human_target_genes = {}
human_cnv = open(human_CNV_miRNA_file, 'r')
human_cnv.readline()
for line in human_cnv:
line = line.rstrip()
if line != '':
line = line.split()
human_target_genes[line[0]] = [line[1], line[-1]]
human_cnv.close()
chimp_target_genes = {}
chimp_cnv = open(chimp_CNV_miRNA_file, 'r')
chimp_cnv.readline()
for line in chimp_cnv:
line = line.rstrip()
if line != '':
line = line.split()
chimp_target_genes[line[0]] = [line[1], line[-1]]
chimp_cnv.close()
# remove genes that are not orthologs
human_to_remove = []
for gene in human_target_genes:
if gene not in orthologs:
human_to_remove.append(gene)
for gene in human_to_remove:
del human_target_genes[gene]
chimp_to_remove = []
for gene in chimp_target_genes:
if gene not in orthologs:
chimp_to_remove.append(gene)
for gene in chimp_to_remove:
del chimp_target_genes[gene]
# use only the human CNV genes from the same study identifying chimp CNV genes or use all human CNV genes
# if perry_only == yes : use only human and chimp CNV genes from the same study
if perry_only == 'yes':
perry_human_CNV_genes = get_CNV_genes_Perry_study(CNV_file, all_gene_file)
# change the CNV status of the human target genes
for gene in human_target_genes:
if gene in perry_human_CNV_genes:
human_target_genes[gene][-1] = 'CNV'
elif gene not in perry_human_CNV_genes:
human_target_genes[gene][-1] = 'non-CNV'
# add the UTR length to the list of each human and chimp orthologous target gene
for gene in human_target_genes:
transcript = human_target_genes[gene][0]
human_target_genes[gene].insert(-1, len(human_UTR[transcript]))
for gene in chimp_target_genes:
transcript = chimp_target_genes[gene][0]
chimp_target_genes[gene].insert(-1, len(chimp_UTR[transcript]))
# create lists to the store the UTR length of CNV and non-CNV target genes
human_CNV_UTR = []
human_nonCNV_UTR = []
chimp_nonCNV_hc_UTR = []
chimp_nonCNV_hn_UTR = []
# partition the UTR length of target genes based on CNV status
# lists must be ordered for each orthologous gene to perform a paired test
for gene in human_target_genes:
if chimp_target_genes[gene][-1] == 'non-CNV':
if human_target_genes[gene][-1] == 'CNV':
human_CNV_UTR.append(human_target_genes[gene][1])
chimp_nonCNV_hc_UTR.append(chimp_target_genes[gene][1])
elif human_target_genes[gene][-1] == 'non-CNV':
human_nonCNV_UTR.append(human_target_genes[gene][1])
chimp_nonCNV_hn_UTR.append(chimp_target_genes[gene][1])
return (human_CNV_UTR, human_nonCNV_UTR, chimp_nonCNV_hc_UTR, chimp_nonCNV_hn_UTR)
def test_UTR_length_CNV_non_CNV_genes(UTR_file, human_CNV_miRNA_file, chimp_CNV_miRNA_file, CNV_file, all_gene_file, perry_only, paired, species = 'human'):
'''
(file, str, file, file, file, file, str) -> tuple
Performs a Wilcoxon paired test between orthologous human CNV and chimp non-CNV genes for UTR length.
Performs a Wilcoxon paired test between orthologs non-CNV genes as control
Return a tuple of 2-item tuple containing the z-value and the p-value
'''
# get the length of UTR for CNV and non-CNV target genes
target_UTR = get_UTR_length_CNV_non_CNV_genes(UTR_file, human_CNV_miRNA_file, chimp_CNV_miRNA_file, CNV_file, all_gene_file, perry_only, species = 'human')
human_CNV_UTR = target_UTR[0]
human_nonCNV_UTR = target_UTR[1]
chimp_nonCNV_hc_UTR = target_UTR[2]
chimp_nonCNV_hn_UTR = target_UTR[3]
# performs the Wilcoxon rank sum test
from scipy import stats
if paired == 'yes':
diff_UTR = stats.wilcoxon(human_CNV_UTR, chimp_nonCNV_hc_UTR)
diff_control_UTR = stats.wilcoxon(human_nonCNV_UTR, chimp_nonCNV_hn_UTR)
elif paired == 'no':
diff_UTR = stats.ranksums(human_CNV_UTR, chimp_nonCNV_hc_UTR)
diff_control_UTR = stats.ranksums(human_nonCNV_UTR, chimp_nonCNV_hn_UTR)
return diff_UTR, diff_control_UTR
def compute_mean_std_error(L):
'''
(list) -> tuple
Returns a tuple containing the mean and the standard error of a collection of values in the list L
Pre-condition: the values in L are floats and/or integers
'''
# verify the pre-condition
for item in L:
try:
item + 1
except:
print('values in L need to be intergers and/or floats')
import math
# compute the mean
total = 0
for item in L:
total += item
mean = total/ len(L)
# compute the stand error of the mean
total_diff = 0
for item in L:
total_diff += (item - mean)**2
std_dev = math.sqrt(total_diff / len(L))
std_error = std_dev / math.sqrt(len(L))
return (mean, std_error)
def print_results_human_chimp_tests(UTR_file, human_CNV_miRNA_file, chimp_CNV_miRNA_file, CNV_file, all_gene_file, perry_only, paired, species = 'human'):
'''
(file, str) -> None
Print the results of the Wilcoxon paired tests comparing miRNA regulation and UTR length between human and chimp orthologs
'''
# get the list of mirnas, sites and ratio for humand and chimp orthologs
mirna_regulation = get_human_chimp_miRNA_regulation(human_CNV_miRNA_file, chimp_CNV_miRNA_file, CNV_file, all_gene_file, perry_only)
# performs paired tests between humand and chimp orthologs for mirna regulation
test_regulation = test_miRNA_regulation_CNV_non_CNV_genes(human_CNV_miRNA_file, chimp_CNV_miRNA_file, CNV_file, all_gene_file, paired, perry_only)
# get the list of UTR length for humand and chimp orthologs
mirna_UTR = get_UTR_length_CNV_non_CNV_genes(UTR_file, human_CNV_miRNA_file, chimp_CNV_miRNA_file, CNV_file, all_gene_file, perry_only, species = 'human')
# performs paired tests between human and chimp orthologs for UTR length
test_UTR = test_UTR_length_CNV_non_CNV_genes(UTR_file, human_CNV_miRNA_file, chimp_CNV_miRNA_file, CNV_file, all_gene_file, perry_only, paired, species = 'human')
wilcoxon_regulation = ['mirnas_human_cnv_chimp_noncnv:', 'sites_human_cnv_chimp_noncnv:', 'ratio_human_cnv_chimp_noncnv:',
'mirnas_human_noncnv_chimp_noncnv:', 'sites_human_noncnv_chimp_noncnv:', 'ratio_human_noncnv_chimp_noncnv:']
if paired == 'yes':
print('Wilcoxon signed rank tests for differences between humand and chimp in miRNA regulation:')
elif paired == 'no':
print('Wilcoxon sum rank tests for differences between humand and chimp in miRNA regulation:')
for i in range(len(test_regulation)):
print(wilcoxon_regulation[i], 'z-score = ', test_regulation[i][0], 'p = ', test_regulation[i][1], sep = '\t')
print('\n')
print('N human CNV genes:' + '\t' + str(len(mirna_regulation[0])))
print('N human non-CNV genes:' + '\t' + str(len(mirna_regulation[3])))
print('\n')
print('\t' + 'mean' + '\t' + 'std_error')
headers = ['human_CNV_mirnas', 'human_CNV_sites', 'human_CNV_ratio',
'human_nonCNV_mirnas', 'human_nonCNV_sites', 'human_nonCNV_ratio',
'chimp_nonCNV_human_cnv_mirnas', 'chimp_nonCNV_human_cnv_sites', 'chimp_nonCNV_human_cnv_ratio',
'chimp_nonCNV_human_noncnv_mirnas', 'chimp_nonCNV_human_noncnv_sites', 'chimp_nonCNV_human_noncnv_ratio']
for i in range(len(mirna_regulation)):
mean_stderr = compute_mean_std_error(mirna_regulation[i])
print(headers[i], mean_stderr[0], mean_stderr[1], sep = '\t')
print('\n')
if paired == 'yes':
print('Wilcoxon signed rank tests for differences between humand and chimp in UTR length:')
elif paired == 'no':
print('Wilcoxon sum rank tests for differences between humand and chimp in UTR length:')
print('human CNV - chimp nonCNV:', 'z-score:', test_UTR[0][0], 'p', test_UTR[0][1], sep = '\t')
print('human nonCNV - chimp nonCNV:', 'z-score:', test_UTR[1][0], 'p', test_UTR[1][1], sep = '\t')
print('\n')
print('\t' + 'mean' + '\t' + 'std_error')
utr_header = ['human_CNV_UTR', 'human_nonCNV_UTR', 'chimp_nonCNV_human_cnv_UTR', 'chimp_nonCNV_human_noncnv_UTR']
for i in range(len(mirna_UTR)):
mean_stderr = compute_mean_std_error(mirna_UTR[i])
print(utr_header[i], mean_stderr[0], mean_stderr[1], sep = '\t')
| [
"richard.jovelin@oicr.on.ca"
] | richard.jovelin@oicr.on.ca |
64206f65fbc1b5c3f6709de7a7413042d681d67f | 3b6713413e079f3f93cbb59ded63c458f52d3dc2 | /bananas/__init__.py | 6613d0e976d007e91b6d34ab3fb6b6898054cc6c | [
"Apache-2.0"
] | permissive | bccp/bananaplots | 4ca2674e1b3be0edfcb2dcfab028b1b17791939c | dbfe107207e07351c7d7125430fde16fb2731cc2 | refs/heads/master | 2020-12-25T14:23:15.033909 | 2016-09-29T00:54:41 | 2016-09-29T00:54:41 | 66,505,317 | 1 | 1 | null | 2016-09-28T06:52:02 | 2016-08-24T22:50:22 | Python | UTF-8 | Python | false | false | 101 | py | from .bananas import *
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
| [
"rainwoodman@gmail.com"
] | rainwoodman@gmail.com |
d82280a303a8a0d1b9724b5d51fd1c640ff72c40 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/notificationhubs/v20160301/namespace.py | 143d1314f881caa698c9c14288cdc5064181e7ee | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 10,738 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Namespace']
class Namespace(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
created_at: Optional[pulumi.Input[str]] = None,
critical: Optional[pulumi.Input[bool]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
namespace_type: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
scale_unit: Optional[pulumi.Input[str]] = None,
service_bus_endpoint: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
status: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Description of a Namespace resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] created_at: The time the namespace was created.
:param pulumi.Input[bool] critical: Whether or not the namespace is set as Critical.
:param pulumi.Input[bool] enabled: Whether or not the namespace is currently enabled.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[str] name: The name of the namespace.
:param pulumi.Input[str] namespace_name: The namespace name.
:param pulumi.Input[str] namespace_type: The namespace type.
:param pulumi.Input[str] provisioning_state: Provisioning state of the Namespace.
:param pulumi.Input[str] region: Specifies the targeted region in which the namespace should be created. It can be any of the following values: Australia East, Australia Southeast, Central US, East US, East US 2, West US, North Central US, South Central US, East Asia, Southeast Asia, Brazil South, Japan East, Japan West, North Europe, West Europe
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] scale_unit: ScaleUnit where the namespace gets created
:param pulumi.Input[str] service_bus_endpoint: Endpoint you can use to perform NotificationHub operations.
:param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The sku of the created namespace
:param pulumi.Input[str] status: Status of the namespace. It can be any of these values:1 = Created/Active2 = Creating3 = Suspended4 = Deleting
:param pulumi.Input[str] subscription_id: The Id of the Azure subscription associated with the namespace.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['created_at'] = created_at
__props__['critical'] = critical
__props__['enabled'] = enabled
if location is None:
raise TypeError("Missing required property 'location'")
__props__['location'] = location
__props__['name'] = name
if namespace_name is None:
raise TypeError("Missing required property 'namespace_name'")
__props__['namespace_name'] = namespace_name
__props__['namespace_type'] = namespace_type
__props__['provisioning_state'] = provisioning_state
__props__['region'] = region
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['scale_unit'] = scale_unit
__props__['service_bus_endpoint'] = service_bus_endpoint
__props__['sku'] = sku
__props__['status'] = status
__props__['subscription_id'] = subscription_id
__props__['tags'] = tags
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:notificationhubs/latest:Namespace"), pulumi.Alias(type_="azure-nextgen:notificationhubs/v20140901:Namespace"), pulumi.Alias(type_="azure-nextgen:notificationhubs/v20170401:Namespace")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Namespace, __self__).__init__(
'azure-nextgen:notificationhubs/v20160301:Namespace',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Namespace':
"""
Get an existing Namespace resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Namespace(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> pulumi.Output[Optional[str]]:
"""
The time the namespace was created.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter
def critical(self) -> pulumi.Output[Optional[bool]]:
"""
Whether or not the namespace is set as Critical.
"""
return pulumi.get(self, "critical")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Whether or not the namespace is currently enabled.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="namespaceType")
def namespace_type(self) -> pulumi.Output[Optional[str]]:
"""
The namespace type.
"""
return pulumi.get(self, "namespace_type")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
Provisioning state of the Namespace.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def region(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the targeted region in which the namespace should be created. It can be any of the following values: Australia East, Australia Southeast, Central US, East US, East US 2, West US, North Central US, South Central US, East Asia, Southeast Asia, Brazil South, Japan East, Japan West, North Europe, West Europe
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="scaleUnit")
def scale_unit(self) -> pulumi.Output[Optional[str]]:
"""
ScaleUnit where the namespace gets created
"""
return pulumi.get(self, "scale_unit")
@property
@pulumi.getter(name="serviceBusEndpoint")
def service_bus_endpoint(self) -> pulumi.Output[Optional[str]]:
"""
Endpoint you can use to perform NotificationHub operations.
"""
return pulumi.get(self, "service_bus_endpoint")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
"""
The sku of the created namespace
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def status(self) -> pulumi.Output[Optional[str]]:
"""
Status of the namespace. It can be any of these values:1 = Created/Active2 = Creating3 = Suspended4 = Deleting
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> pulumi.Output[Optional[str]]:
"""
The Id of the Azure subscription associated with the namespace.
"""
return pulumi.get(self, "subscription_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
51904ecbfe9cffd3ebd5c76f9e8576698bc9b7cd | fcecddb522cd6b775074ecd950aaddda9010ae57 | /Serenity/core/view_generators.py | 83e32ebfe5661f107d29a155e62250e9dfc250ea | [] | no_license | Firefly-Automation/Serenity | fa7fbe7800feed60a96a454e6fb520afa4c2030c | e343b31238af27255468a0773598130fd65d9793 | refs/heads/master | 2021-06-12T03:30:47.139101 | 2017-01-06T04:26:33 | 2017-01-06T04:26:33 | 66,432,533 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | # -*- coding: utf-8 -*-
# @Author: zpriddy
# @Date: 2016-08-23 22:07:06
# @Last Modified by: zpriddy
# @Last Modified time: 2016-08-23 22:20:07
## build array of all templates minified
## devices can have 'custom_view:true' and then 'custom_html:' as part of the device view
## proccess the data that comes in and generate the device_vew json object
## write the object to disk - Should only have to re-generate every few minutes | [
"me@zpriddy.com"
] | me@zpriddy.com |
4fd8f9a4b477f960834b1c2278051375e53aa7f4 | 23d8a521f1f2c15ec745d8a68f405be5c8ad58ba | /acme/tf/networks/legal_actions.py | 93b53ee3888e1df13c347028b542892cd543de54 | [
"Apache-2.0"
] | permissive | Idate96/acme | 108766d67d1c123f4f90045b3ad459e9f25a9cf1 | 722c33a3b8c779647314444531cb2282fab9246a | refs/heads/master | 2023-04-20T18:54:43.100914 | 2021-05-22T14:08:43 | 2021-05-22T14:08:43 | 368,240,977 | 0 | 0 | Apache-2.0 | 2021-05-17T15:48:11 | 2021-05-17T15:48:10 | null | UTF-8 | Python | false | false | 4,759 | py | # python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Networks used for handling illegal actions."""
from typing import Any, Callable, Iterable, Union
# pytype: disable=import-error
from acme.wrappers import open_spiel_wrapper
# pytype: enable=import-error
import numpy as np
import sonnet as snt
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
class MaskedSequential(snt.Module):
"""Applies a legal actions mask to a linear chain of modules / callables.
It is assumed the trailing dimension of the final layer (representing
action values) is the same as the trailing dimension of legal_actions.
"""
def __init__(self,
layers: Iterable[Callable[..., Any]] = None,
name: str = 'MaskedSequential'):
super().__init__(name=name)
self._layers = list(layers) if layers is not None else []
self._illegal_action_penalty = -1e9
# Note: illegal_action_penalty cannot be -np.inf because trfl's qlearning
# ops utilize a batched_index function that returns NaN whenever -np.inf
# is present among action values.
def __call__(self, inputs: open_spiel_wrapper.OLT) -> tf.Tensor:
# Extract observation, legal actions, and terminal
outputs = inputs.observation
legal_actions = inputs.legal_actions
terminal = inputs.terminal
for mod in self._layers:
outputs = mod(outputs)
# Apply legal actions mask
outputs = tf.where(tf.equal(legal_actions, 1), outputs,
tf.fill(tf.shape(outputs), self._illegal_action_penalty))
# When computing the Q-learning target (r_t + d_t * max q_t) we need to
# ensure max q_t = 0 in terminal states.
outputs = tf.where(tf.equal(terminal, 1), tf.zeros_like(outputs), outputs)
return outputs
# FIXME: Add functionality to support decaying epsilon parameter.
# FIXME: This is a modified version of trfl's epsilon_greedy() which
# incorporates code from the bug fix described here
# https://github.com/deepmind/trfl/pull/28
class EpsilonGreedy(snt.Module):
"""Computes an epsilon-greedy distribution over actions.
This policy does the following:
- With probability 1 - epsilon, take the action corresponding to the highest
action value, breaking ties uniformly at random.
- With probability epsilon, take an action uniformly at random.
"""
def __init__(self,
epsilon: Union[tf.Tensor, float],
threshold: float,
name: str = 'EpsilonGreedy'):
"""Initialize the policy.
Args:
epsilon: Exploratory param with value between 0 and 1.
threshold: Action values must exceed this value to qualify as a legal
action and possibly be selected by the policy.
name: Name of the network.
Returns:
policy: tfp.distributions.Categorical distribution representing the
policy.
"""
super().__init__(name=name)
self._epsilon = tf.Variable(epsilon, trainable=False)
self._threshold = threshold
def __call__(self, action_values: tf.Tensor) -> tfd.Categorical:
legal_actions_mask = tf.where(
tf.math.less_equal(action_values, self._threshold),
tf.fill(tf.shape(action_values), 0.),
tf.fill(tf.shape(action_values), 1.))
# Dithering action distribution.
dither_probs = 1 / tf.reduce_sum(legal_actions_mask, axis=-1,
keepdims=True) * legal_actions_mask
masked_action_values = tf.where(tf.equal(legal_actions_mask, 1),
action_values,
tf.fill(tf.shape(action_values), -np.inf))
# Greedy action distribution, breaking ties uniformly at random.
max_value = tf.reduce_max(masked_action_values, axis=-1, keepdims=True)
greedy_probs = tf.cast(
tf.equal(action_values * legal_actions_mask, max_value),
action_values.dtype)
greedy_probs /= tf.reduce_sum(greedy_probs, axis=-1, keepdims=True)
# Epsilon-greedy action distribution.
probs = self._epsilon * dither_probs + (1 - self._epsilon) * greedy_probs
# Make the policy object.
policy = tfd.Categorical(probs=probs)
return policy
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
737970c9428aa9744556defe5a67e81041ca69f5 | 01c563cf0b063dfaba8b7daec7d247497acfb7ed | /vh-mail/api.py | 8aa1e8e9b29f95d9fa3f5e8bd5e9538b0fbf6fe5 | [] | no_license | sanyaade-iot/ajenti-v | 18f230fba800404370fdcf40324a6d90d87f2a0a | 6038a6d568d08953813a1753cda5bf3c7af19fb6 | refs/heads/master | 2021-01-15T19:59:09.424074 | 2014-07-10T16:42:47 | 2014-07-10T16:42:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,167 | py | import grp
import json
import os
import pwd
import shutil
import subprocess
import ajenti
from ajenti.api import *
from ajenti.plugins.services.api import ServiceMultiplexor
from ajenti.plugins.vh.api import Restartable
from ajenti.plugins.vh.processes import SupervisorRestartable
from ajenti.util import platform_select
import templates
class Config (object):
def __init__(self, j):
self.mailboxes = [Mailbox(_) for _ in j.get('mailboxes', [])]
self.forwarding_mailboxes = [
ForwardingMailbox(_)
for _ in j.get('forwarding_mailboxes', [])
]
self.mailroot = j.get('mailroot', '/var/vmail')
self.custom_mta_config = j.get('custom_mta_config', '')
self.custom_mta_acl = j.get('custom_mta_acl', '')
self.custom_mta_routers = j.get('custom_mta_routers', '')
self.custom_mta_transports = j.get('custom_mta_transports', '')
self.dkim_enable = j.get('dkim_enable', False)
self.dkim_selector = j.get('dkim_selector', 'x')
self.dkim_private_key = j.get('dkim_private_key', '')
self.tls_enable = j.get('tls_enable', False)
self.tls_certificate = j.get('tls_certificate', '')
self.tls_privatekey = j.get('tls_privatekey', '')
@staticmethod
def create():
return Config({})
def save(self):
return {
'mailboxes': [_.save() for _ in self.mailboxes],
'forwarding_mailboxes': [
_.save()
for _ in self.forwarding_mailboxes
],
'custom_mta_acl': self.custom_mta_acl,
'custom_mta_routers': self.custom_mta_routers,
'custom_mta_config': self.custom_mta_config,
'custom_mta_transports': self.custom_mta_transports,
'dkim_enable': self.dkim_enable,
'dkim_selector': self.dkim_selector,
'dkim_private_key': self.dkim_private_key,
'tls_enable': self.tls_enable,
'tls_certificate': self.tls_certificate,
'tls_privatekey': self.tls_privatekey,
}
class Mailbox (object):
def __init__(self, j):
self.local = j.get('local', 'someone')
self.domain = j.get('domain', 'example.com')
self.password = j.get('password', 'example.com')
self.owner = j.get('owner', 'root')
@property
def name(self):
return '%s@%s' % (self.local, self.domain)
@staticmethod
def create():
return Mailbox({})
def save(self):
return {
'local': self.local,
'domain': self.domain,
'password': self.password,
'owner': self.owner,
}
class ForwardingMailbox (object):
def __init__(self, j):
self.targets = [ForwardingTarget(_) for _ in j.get('targets', [])]
self.local = j.get('local', 'someone')
self.domain = j.get('domain', 'example.com')
self.owner = j.get('owner', 'root')
@property
def name(self):
return '%s@%s' % (self.local, self.domain)
@staticmethod
def create():
return ForwardingMailbox({})
def save(self):
return {
'targets': [_.save() for _ in self.targets],
'local': self.local,
'domain': self.domain,
'owner': self.owner,
}
class ForwardingTarget (object):
def __init__(self, j):
self.email = j.get('email', 'someone@example.com')
@staticmethod
def create():
return ForwardingTarget({})
def save(self):
return {
'email': self.email,
}
@interface
class MailBackend (object):
pass
@plugin
class MailEximCourierBackend (MailBackend):
def init(self):
self.exim_cfg_path = platform_select(
debian='/etc/exim4/exim4.conf',
centos='/etc/exim/exim.conf',
arch='/etc/mail/exim.conf',
)
for d in ['/etc/courier', '/var/run/courier']:
if not os.path.exists(d):
os.makedirs(d)
self.courier_authdaemonrc = platform_select(
debian='/etc/courier/authdaemonrc',
centos='/etc/authlib/authdaemonrc',
arch='/etc/authlib/authdaemonrc',
)
self.courier_imaprc = platform_select(
debian='/etc/courier/imapd',
centos='/usr/lib/courier-imap/etc/imapd',
arch='/etc/courier-imap/imapd',
)
self.courier_imapsrc = platform_select(
debian='/etc/courier/imapd-ssl',
centos='/usr/lib/courier-imap/etc/imapd-ssl',
arch='/etc/courier-imap/imapd-ssl',
)
self.courier_userdb = platform_select(
debian='/etc/courier/userdb',
centos='/etc/authlib/userdb',
arch='/etc/authlib/userdb',
)
self.courier_authsocket = platform_select(
debian='/var/run/courier/authdaemon/socket',
centos='/var/spool/authdaemon/socket',
arch='/var/run/authdaemon/socket',
)
self.maildomains = '/etc/exim.domains'
self.mailforward = '/etc/exim.forward'
self.mailuid = pwd.getpwnam('mail').pw_uid
self.mailgid = grp.getgrnam('mail').gr_gid
def configure(self, config):
try:
mailname = open('/etc/mailname').read().strip()
except:
mailname = 'localhost'
domains = [x.domain for x in config.mailboxes]
domains += [x.domain for x in config.forwarding_mailboxes]
domains = list(set(domains))
if not mailname in domains:
domains.append(mailname)
if not 'localhost' in domains:
domains.append('localhost')
pem_path = os.path.join('/etc/courier/mail.pem')
pem = ''
if os.path.exists(config.tls_certificate):
pem += open(config.tls_certificate).read()
if os.path.exists(config.tls_privatekey):
pem += open(config.tls_privatekey).read()
with open(pem_path, 'w') as f:
f.write(pem)
open(self.exim_cfg_path, 'w').write(templates.EXIM_CONFIG % {
'local_domains': ' : '.join(domains),
'mailname': mailname,
'maildomains': self.maildomains,
'mailforward': self.mailforward,
'mailroot': config.mailroot,
'custom_mta_acl': config.custom_mta_acl,
'custom_mta_routers': config.custom_mta_routers,
'custom_mta_config': config.custom_mta_config,
'custom_mta_transports': config.custom_mta_transports,
'dkim_enable': 'DKIM_ENABLE=1' if config.dkim_enable else '',
'dkim_selector': config.dkim_selector,
'dkim_private_key': config.dkim_private_key,
'tls_enable': 'TLS_ENABLE=1' if config.tls_enable else '',
'tls_certificate': config.tls_certificate,
'tls_privatekey': config.tls_privatekey,
'courier_authsocket': self.courier_authsocket,
})
os.chmod(self.exim_cfg_path, 0644)
open(self.courier_authdaemonrc, 'w').write(templates.COURIER_AUTHRC % {
'courier_authsocket': self.courier_authsocket,
})
open(self.courier_imaprc, 'w').write(templates.COURIER_IMAP % {
})
open(self.courier_imapsrc, 'w').write(templates.COURIER_IMAPS % {
'tls_pem': pem_path,
})
socketdir = os.path.split(self.courier_authsocket)[0]
if os.path.exists(socketdir):
os.chmod(socketdir, 0755)
# Domain entries ----------------------------
if os.path.exists(self.maildomains):
shutil.rmtree(self.maildomains)
os.makedirs(self.maildomains)
os.chmod(self.maildomains, 0755)
for mb in config.mailboxes:
root = os.path.join(config.mailroot, mb.name)
if not os.path.exists(root):
os.makedirs(os.path.join(root, 'Maildir'))
#os.chown(root, self.mailuid, self.mailgid)
subprocess.call(['chown', '-R', 'mail:mail', root])
with open(os.path.join(self.maildomains, mb.domain), 'a+') as f:
f.write(mb.local + '\n')
os.chmod(os.path.join(self.maildomains, mb.domain), 0755)
# Forwarding entries ----------------------------
if os.path.exists(self.mailforward):
shutil.rmtree(self.mailforward)
os.makedirs(self.mailforward)
os.chmod(self.mailforward, 0755)
for mb in config.forwarding_mailboxes:
fpath = os.path.join(
self.mailforward,
'%s@%s' % (mb.local, mb.domain)
)
with open(fpath, 'a+') as f:
for target in mb.targets:
f.write(target.email + ',')
if any(x.local == mb.local and x.domain == mb.domain for x in config.mailboxes):
f.write(mb.local + '@' + mb.domain)
os.chmod(fpath, 0755)
# UserDB ------------------------------------
if os.path.exists(self.courier_userdb):
os.unlink(self.courier_userdb)
for mb in config.mailboxes:
root = os.path.join(config.mailroot, mb.name)
subprocess.call([
'userdb',
mb.name,
'set',
'uid=%s' % self.mailuid,
'gid=%s' % self.mailgid,
'home=%s' % root,
'mail=%s' % root,
])
udbpw = subprocess.Popen(
['userdbpw', '-md5'],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE
)
o, e = udbpw.communicate(
'%s\n%s\n' % (mb.password, mb.password)
)
md5pw = o
udb = subprocess.Popen(
['userdb', mb.name, 'set', 'systempw'],
stdin=subprocess.PIPE
)
udb.communicate(md5pw)
subprocess.call(['makeuserdb'])
EximRestartable.get().restart()
CourierIMAPRestartable.get().restart()
CourierAuthRestartable.get().restart()
@plugin
class MailManager (BasePlugin):
config_path = '/etc/ajenti/mail.json'
dkim_path = platform_select(
debian='/etc/exim4/dkim/',
centos='/etc/exim/dkim/',
)
tls_path = platform_select(
debian='/etc/exim4/tls/',
centos='/etc/exim/tls/',
)
def init(self):
self.backend = MailBackend.get()
if os.path.exists(self.config_path):
self.is_configured = True
self.config = Config(json.load(open(self.config_path)))
else:
self.is_configured = False
self.config = Config.create()
def get_usage(self, mb):
return int(subprocess.check_output(
['du', '-sb', os.path.join(self.config.mailroot, mb.name)]
).split()[0])
def save(self):
j = json.dumps(self.config.save(), indent=4)
with open(self.config_path, 'w') as f:
f.write(j)
os.chmod(self.config_path, 0600)
self.is_configured = True
self.backend.configure(self.config)
def generate_tls_cert(self):
if not os.path.exists(self.tls_path):
os.makedirs(self.tls_path)
key_path = os.path.join(self.tls_path, 'mail.key')
cert_path = os.path.join(self.tls_path, 'mail.crt')
openssl = subprocess.Popen([
'openssl', 'req', '-x509', '-newkey', 'rsa:1024',
'-keyout', key_path, '-out', cert_path, '-days', '4096',
'-nodes'
])
openssl.communicate('\n\n\n\n\n\n\n\n\n\n\n\n')
self.config.tls_enable = True
self.config.tls_certificate = cert_path
self.config.tls_privatekey = key_path
def generate_dkim_key(self):
if not os.path.exists(self.dkim_path):
os.makedirs(self.dkim_path)
privkey_path = os.path.join(self.dkim_path, 'private.key')
subprocess.call([
'openssl', 'genrsa', '-out', privkey_path, '2048'
])
self.config.dkim_enable = True
self.config.dkim_private_key = privkey_path
@plugin
class EximRestartable (Restartable):
def restart(self):
ServiceMultiplexor.get().get_one(platform_select(
debian='exim4',
default='exim',
)).command('restart')
@plugin
class CourierIMAPRestartable (Restartable):
def restart(self):
ServiceMultiplexor.get().get_one(platform_select(
debian='courier-imap',
centos='courier-imap',
default='courier-imapd',
)).restart()
if ajenti.platform != 'centos': # centos runs both
ServiceMultiplexor.get().get_one(platform_select(
debian='courier-imap-ssl',
default='courier-imapd-ssl',
)).restart()
@plugin
class CourierAuthRestartable (Restartable):
def restart(self):
ServiceMultiplexor.get().get_one(platform_select(
debian='courier-authdaemon',
centos='courier-authlib',
)).restart()
| [
"john.pankov@gmail.com"
] | john.pankov@gmail.com |
6672bc6d3850e044ea1f8fb79c6ff9cba031cfe5 | e68a40e90c782edae9d8f89b827038cdc69933c4 | /res/scripts/client/gui/scaleform/daapi/view/lobby/boosters/boosterspanelcomponent.py | 0caa7bfe82fda7414ba9a2de46bbce35ac7d8dd8 | [] | no_license | webiumsk/WOT-0.9.16 | 2486f8b632206b992232b59d1a50c770c137ad7d | 71813222818d33e73e414e66daa743bd7701492e | refs/heads/master | 2021-01-10T23:12:33.539240 | 2016-10-11T21:00:57 | 2016-10-11T21:00:57 | 70,634,922 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 5,857 | py | # 2016.10.11 22:11:02 Stลednรญ Evropa (letnรญ ฤas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/boosters/BoostersPanelComponent.py
from gui import game_control
from gui.Scaleform.genConsts.TOOLTIPS_CONSTANTS import TOOLTIPS_CONSTANTS
from gui.ClientUpdateManager import g_clientUpdateManager
from gui.goodies.Booster import MAX_ACTIVE_BOOSTERS_COUNT
from gui.goodies import g_goodiesCache
from gui.Scaleform.locale.TOOLTIPS import TOOLTIPS
from gui.Scaleform.genConsts.BOOSTER_CONSTANTS import BOOSTER_CONSTANTS
from gui.Scaleform.locale.RES_ICONS import RES_ICONS
from gui.Scaleform.daapi.view.meta.SlotsPanelMeta import SlotsPanelMeta
from gui.shared.ItemsCache import g_itemsCache
from gui.shared.utils.requesters.ItemsRequester import REQ_CRITERIA
from gui.shared.utils.functions import makeTooltip
_GUI_SLOTS_PROPS = {'slotsCount': MAX_ACTIVE_BOOSTERS_COUNT,
'slotWidth': 50,
'paddings': 64,
'groupPadding': 18,
'ySlotPosition': 5,
'offsetSlot': 13,
'useOnlyLeftBtn': True}
ADD_BOOSTER_ID = 'add'
_ADD_AVAILABLE_BOOSTER_ID = 'addAvailable'
_EMPTY_BOOSTER_ID = 'empty'
class BoostersPanelComponent(SlotsPanelMeta):
def __init__(self):
super(BoostersPanelComponent, self).__init__()
self._items = g_itemsCache.items
self._isPanelInactive = True
self._wasPopulated = False
self._slotsMap = {}
def setSettings(self, isPanelInactive = True):
self._isPanelInactive = isPanelInactive
if self._wasPopulated:
self._buildList()
def getBoosterSlotID(self, idx):
return self._slotsMap.get(int(idx), None)
def getSlotTooltipBody(self, slotIdx):
boosterID = self._slotsMap.get(int(slotIdx), None)
tooltip = ''
if boosterID in (ADD_BOOSTER_ID, _ADD_AVAILABLE_BOOSTER_ID):
if not self._isPanelInactive:
body = TOOLTIPS.BOOSTERSPANEL_OPENBOOSTERSWINDOW_BODY
tooltip = makeTooltip(None, body)
else:
tooltip = TOOLTIPS_CONSTANTS.BOOSTERS_BOOSTER_INFO
return tooltip
def _populate(self):
super(BoostersPanelComponent, self)._populate()
g_clientUpdateManager.addCallbacks({'goodies': self.__onUpdateGoodies})
game_control.g_instance.boosters.onBoosterChangeNotify += self.__onUpdateGoodies
self._buildList()
self._wasPopulated = True
def _dispose(self):
self._items = None
self._isPanelInactive = None
self._wasPopulated = None
self._slotsMap = None
game_control.g_instance.boosters.onBoosterChangeNotify -= self.__onUpdateGoodies
g_clientUpdateManager.removeObjectCallbacks(self)
super(BoostersPanelComponent, self)._dispose()
return
def __getAvailableBoosters(self):
criteria = REQ_CRITERIA.BOOSTER.IS_READY_TO_ACTIVATE
return g_goodiesCache.getBoosters(criteria=criteria)
def _buildList(self):
result = []
activeBoosters = g_goodiesCache.getBoosters(criteria=REQ_CRITERIA.BOOSTER.ACTIVE)
activeBoostersList = sorted(activeBoosters.values(), key=lambda b: b.getUsageLeftTime(), reverse=True)
availableBoostersCount = len(self.__getAvailableBoosters())
activeBoostersCount = min(len(activeBoostersList), MAX_ACTIVE_BOOSTERS_COUNT)
freeSlotsCount = MAX_ACTIVE_BOOSTERS_COUNT - min(activeBoostersCount, MAX_ACTIVE_BOOSTERS_COUNT)
addBoostersSlotsCount = min(freeSlotsCount, availableBoostersCount)
self._slotsMap = {}
for idx in range(0, activeBoostersCount):
booster = activeBoostersList[idx]
self._slotsMap[idx] = booster.boosterID
result.append(self.__makeBoosterVO(idx, booster))
icon = ''
if not self._isPanelInactive:
icon = RES_ICONS.MAPS_ICONS_ARTEFACT_EMPTYORDER
addAndActiveBoostersCount = activeBoostersCount + addBoostersSlotsCount
for idx in range(activeBoostersCount, MAX_ACTIVE_BOOSTERS_COUNT):
self._slotsMap[idx], slotLinkage = self.getEmptySlotParams(idx, addAndActiveBoostersCount)
result.append(self.__makeEmptyBoosterVO(idx, slotLinkage, icon))
self.as_setPanelPropsS(_GUI_SLOTS_PROPS)
self.as_setSlotsS(result)
def getEmptySlotParams(self, idx, addAndActiveBoostersCount):
if idx < addAndActiveBoostersCount and not self._isPanelInactive:
slotLinkage = BOOSTER_CONSTANTS.SLOT_ADD_UI
emptyBoosterID = _ADD_AVAILABLE_BOOSTER_ID
else:
slotLinkage = BOOSTER_CONSTANTS.SLOT_UI
emptyBoosterID = ADD_BOOSTER_ID
return (emptyBoosterID, slotLinkage)
def __makeBoosterVO(self, idx, booster):
return {'boosterId': booster.boosterID,
'id': str(idx),
'icon': booster.icon,
'inCooldown': booster.inCooldown,
'cooldownPercent': booster.getCooldownAsPercent(),
'leftTime': booster.getUsageLeftTime(),
'leftTimeText': booster.getShortLeftTimeStr(),
'showLeftTime': True,
'isDischarging': True,
'isInactive': self._isPanelInactive,
'isEmpty': False,
'qualityIconSrc': booster.getQualityIcon(),
'slotLinkage': BOOSTER_CONSTANTS.SLOT_UI}
def __makeEmptyBoosterVO(self, idx, slotLinkage, icon):
return {'id': str(idx),
'isInactive': self._isPanelInactive,
'isEmpty': True,
'icon': icon,
'slotLinkage': slotLinkage,
'showLeftTime': False}
def __onUpdateGoodies(self, *args):
self._buildList()
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\lobby\boosters\boosterspanelcomponent.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.10.11 22:11:02 Stลednรญ Evropa (letnรญ ฤas)
| [
"info@webium.sk"
] | info@webium.sk |
0667c0b03b59413ed9e1ceebc3ff08683d9d142b | b72c37e3ccda507b231649cddd5c7845c6c34ba1 | /PythonBasic/Day12/HomeWork_birth.py | e78f35a7fd539268e61aa6848d9f0f6fbea30bde | [] | no_license | ljrdemail/AID1810 | 51c61c255b5c5efc1dc642b46691a614daedd85e | b417bd831bc1550ab953ce7ca23f54e34b8b2692 | refs/heads/master | 2020-04-24T09:45:14.781612 | 2019-02-21T11:26:49 | 2019-02-21T11:26:49 | 171,866,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | import datetime
def liveday():
dob = input("่ฏท่พๅ
ฅไฝ ๅบ็ๅนดๆๆฅ๏ผไปฅ-ๅ้๏ผ")
year = int((dob.split("-")[0]))
month = int(dob.split("-")[1])
day = int(dob.split("-")[2])
d1 = datetime.datetime(year, month, day)
d2= datetime.datetime.now()
w={0:"ๆๆไธ",1:"ๆๆไบ",2:"ๆๆไธ",3:"ๆๆๅ",4:"ๆๆไบ",5:"ๆๆๅ
ญ",6:"ๆๆๆฅ"}
print("ไฝ ๆดปไบ",(d2-d1).days,"ๅคฉ")
print("ไฝ ๅบ็้ฃๅคฉๆฏ:",w[d1.weekday()])
liveday()
| [
"root"
] | root |
c98c4a5b216fe8c82e6824648216dbc541cb869f | 165305e7d92075018b57f6288b84b4d5129be412 | /knit-graph.py | 9cae11d27be278f431448c7c8a588c5f698700a0 | [] | no_license | kshedstrom/plug-ins | 7c6e1358926f1d0cc062dc0d19e21f32d99b202c | 65aeb7c8b2c7f7c0a27fbf813863f10e6e2c00c9 | refs/heads/master | 2022-02-10T12:10:03.156258 | 2022-02-02T17:54:14 | 2022-02-02T17:54:14 | 7,652,530 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,811 | py | #!/usr/bin/env python
#
# Take an image and turn it into a knitting pattern: expand it by
# some (you pick) number and place black lines around each box.
# Chunks stolen from Akkana Peck's arclayer script.
import math
from gimpfu import *
from array import array
def python_knit_graph(timg, tdrawable, x_scale=14, y_scale=10):
w = tdrawable.width
h = tdrawable.height
bpp = tdrawable.bpp
width = w*x_scale + 3
height = h*y_scale + 3
img = gimp.Image(width, height, RGB)
img.disable_undo()
layer= gimp.Layer(img, "Graph", width, height, RGB_IMAGE,
100, NORMAL_MODE)
img.add_layer(layer, 0)
layers = img.layers
# for l in layers:
# print "Layer: Name=\"%s\" Width=%d Height=%d X=%d Y=%d\n"%(l.name, l.width, l.height, l.offsets[0], l.offsets[1])
# initialize the regions and get their contents into arrays:
srcRgn = tdrawable.get_pixel_rgn(0, 0, w, h, False, False)
src_pixels = array("B", srcRgn[0:w, 0:h])
dstRgn = layer.get_pixel_rgn(0, 0, width, height, True, True)
p_size = len(dstRgn[0,0])
# fg_colour = gimp.get_foreground()
# Just let it fill with black
dest_pixels = array("B", "\x00" * (width * height * p_size))
for y in range(0, h):
for x in range(0, w):
src_pos = (x + w * y) * p_size
newval = src_pixels[src_pos: src_pos + p_size]
x1 = x_scale*x
x2 = x1 + x_scale
y1 = y_scale*y
y2 = y1 + y_scale
if (x%10 == 9):
x2 = x2 - 1
if (y%10 == 9):
y2 = y2 - 1
if (x%10 == 0):
x1 = x_scale*x + 2
else:
x1 = x_scale*x + 1
if (y%10 == 0):
y1 = y_scale*y + 2
else:
y1 = y_scale*y + 1
for yy in range(y1, y2):
for xx in range(x1, x2):
dest_pos = (xx + width * yy) * p_size
dest_pixels[dest_pos: dest_pos + p_size] = newval
dstRgn[0:width, 0:height] = dest_pixels.tostring()
layer.flush()
layer.merge_shadow(True)
layer.update(0, 0, width, height)
img.enable_undo()
gimp.Display(img)
gimp.displays_flush()
# drawable = pdb.gimp_image_get_active_layer(img)
# pdb.gimp_file_save(img, drawable, file_name, file_name)
register(
"python_knit_graph",
"Stretch the specified image for use as a knitting pattern",
"Stretch the specified image for use as a knitting pattern",
"Kate Hedstrom",
"Kate Hedstrom",
"2013",
"<Image>/Image/Knit_graph...",
"*",
[
(PF_INT, "x_scale", "X scale", 14),
(PF_INT, "y_scale", "Y scale", 10)
],
[],
python_knit_graph)
main()
| [
"kshedstrom@alaska.edu"
] | kshedstrom@alaska.edu |
5603b4614f0b5b7454891b5011ba3cb49b7f827e | c769dc9a7a4338f2eba6fe44d6de3eab968ff51f | /OA/OA/settings.py | 313328eb96393c078887de6b506a7a873aef643b | [] | no_license | simonzhao88/practice | 267f4836c3d4d19e8db973f13d561024613bb88c | d29db772793a8d01301bbcb457595d8bb9ea33bc | refs/heads/master | 2020-03-17T17:41:26.650235 | 2018-07-07T09:50:12 | 2018-07-07T09:50:12 | 133,798,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,734 | py | """
Django settings for OA project.
Generated by 'django-admin startproject' using Django 2.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i)!2=k-$7@(luo)7bgk)_z)ldkcm^3z@ndccz@i)08j@i*1t5^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'hrs.apps.HrsConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'OA.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'OA.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'oa',
'HOST': '127.0.0.1',
'PORT': 3306,
'USER': 'root',
'PASSWORD': 'root'
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
# STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
STATIC_URL = '/static/'
# ้
็ฝฎๅฐๆฅๅฟ่พๅบๅฐๆงๅถๅฐ ๆฅๅฟ็บงๅซไธบDEBUG๏ผๆ่ฏฆ็ป็ๆฅๅฟ๏ผ
# DEBUG < INFO < WARNING < ERROR < CRITICAL
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'DEBUG'),
},
},
} | [
"642252108@qq.com"
] | 642252108@qq.com |
a9210d6d25b25c52a6e6e17e87b2ec6fa22734db | 986fc4298a3d728691951f77470beb5e92505425 | /icepack/mesh/stitch.py | a939ec8137ff08d6c2058f78e15ca17faafaba51 | [] | no_license | danshapero/icepack-py | 4eeee730056248cb1a33112bd07fea02b0e82194 | 24a13cba05dd5597fbec64f8bd2eb9580cc69fe7 | refs/heads/master | 2021-07-23T11:50:27.779303 | 2017-10-31T22:44:35 | 2017-10-31T22:44:35 | 105,718,249 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,654 | py |
import copy
import itertools
from numpy import ones, zeros, sqrt
from matplotlib.path import *
# --------------
def dist(x1, x2):
return np.sqrt((x1[0] - x2[0])**2 + (x1[1] - x2[1])**2)
# -----------------------------------
class IsolatedSegmentError(Exception):
pass
# -----------------------------------
def next_segment(Xs, i, tol = 1.0e4):
"""
Return the next segment in the input geometry
Args:
Xs: a list of coordinates of the lines of the input geometry
i: an index of one line of the input geometry
tol: criterion for whether another segment's endpoint is close enough
Returns:
j: the index of the segment after `i` in the input geometry, i.e. the
segment whose start- or end-point is closest to the end-point of
`i`. This could be `i` itself if it describes a closed loop. If the
successor segment's order needs to be reversed, returns `-j`.
Raises:
IsolatedSegmentError on a segment with no successor within the given
tolerance.
"""
num_segments = len(Xs)
Xi = Xs[i]
if dist(Xi[0], Xi[-1]) < tol:
return i
for j in range(num_segments):
if j != i:
Xj = Xs[j]
if dist(Xi[-1], Xj[0]) < tol:
return j
if dist(Xi[-1], Xj[-1]) < tol:
return -j
raise IsolatedSegmentError()
# --------------------------------------
def segment_successors(Xs, tol = 1.0e4):
"""
Return a new geometry identical to the input but with orientations flipped
so that all segments lie end-to-end.
Args:
Xs: input geometry
tol: tolerance for segment proximity
Returns:
Ys: input geometry, possibly with some segments in reverse order
successors: successors[i] = the next segment after `i` in the PSLG
"""
num_segments = len(Xs)
Ys = copy.deepcopy(Xs)
segments = set(range(num_segments))
successors = list(range(num_segments))
while segments:
i0 = segments.pop()
i = i0
j = next_segment(Ys, i, tol)
while j != i0:
if j < 0:
j = -j
Ys[j].reverse()
segments.remove(j)
successors[i] = j
i = j
j = next_segment(Ys, i, tol)
successors[i] = i0
return Ys, successors
# --------------------------------
def lines_to_paths(Xs, successors):
"""
Return a list of closed matplotlib Path objects of the input geometry
"""
segments = set(range(len(Xs)))
Ps = []
while segments:
i0 = segments.pop()
i = i0
X = X[i]
j = successors[i]
while j != i0:
segments.remove(j)
X.extend(Xs[j])
i = j
j = successors[i]
p = Path(X, closed = True)
Ps.append(p)
return Ps
# ---------------------------
def find_point_inside_path(p):
"""
Return a point inside the path p.
Triangle needs to have a point contained in any holes in the mesh.
"""
x = (0.0, 0.0)
i, j = 0, len(p)/2
while not p.contains_point(x):
j += 1
x = (0.5 * p.vertices[i, 0] + p.vertices[j, 0],
0.5 * p.vertices[i, 1] + p.vertices[j, 1])
return x
# --------------------------------
def identify_holes(Xs, successors):
"""
Return a list of points
"""
Ys = []
Ps = lines_to_paths(Xs, successors)
for p, q in itertools.combinations(ps, 2):
if p.contains_path(q):
y = point_inside_path(q)
Ys.append(y)
return Ys
| [
"shapero.daniel@gmail.com"
] | shapero.daniel@gmail.com |
b49f6e0df50180b6d5413457a3482c2df62ffbe3 | 52a058db8d7c8ee5ef1b9523a2a32c6276def88f | /backend/the_muse_mentorship_31778/urls.py | d34e3d399f9eb9fec18d693fd595cc57cae86c0f | [] | no_license | crowdbotics-apps/the-muse-mentorship-31778 | 43f5484661a3ab8a699de758eea27193633d4301 | c12ea3282db9509e7e88c7e9ac0f5cff124a0703 | refs/heads/master | 2023-08-30T13:36:43.363016 | 2021-10-31T23:08:49 | 2021-10-31T23:08:49 | 423,280,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,277 | py | """the_muse_mentorship_31778 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "The Muse Mentorship"
admin.site.site_title = "The Muse Mentorship Admin Portal"
admin.site.index_title = "The Muse Mentorship Admin"
# swagger
api_info = openapi.Info(
title="The Muse Mentorship API",
default_version="v1",
description="API documentation for The Muse Mentorship App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
d577c4d03856c05f0a30f68c85439667e187a747 | 4c577d9ddf21d8aba5626343f91a4986266f01e2 | /eric6/.eric6/eric6plugins/vcsGit/GitFetchDialog.py | 6686d150f3e09781568a6197a7e8228c683b3132 | [] | no_license | metamarcdw/.dotfiles | 362199d415ebd7d09247ee0efbda03243aa22faa | 3df0c805225a8d4f2709565d7eda4e07a050c986 | refs/heads/master | 2020-12-30T15:29:25.769345 | 2017-12-22T05:44:01 | 2017-12-22T05:44:01 | 91,143,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,548 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2015 - 2017 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing a dialog to enter data for a Fetch operation.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QDialog, QDialogButtonBox
from .Ui_GitFetchDialog import Ui_GitFetchDialog
class GitFetchDialog(QDialog, Ui_GitFetchDialog):
"""
Class implementing a dialog to enter data for a Fetch operation.
"""
def __init__(self, vcs, repodir, parent=None):
"""
Constructor
@param vcs reference to the git object
@param repodir directory name of the local repository (string)
@param parent reference to the parent widget (QWidget)
"""
super(GitFetchDialog, self).__init__(parent)
self.setupUi(self)
self.__vcs = vcs
self.__repodir = repodir
self.__all = self.tr("<All>")
self.__custom = self.tr("<Custom>")
remoteUrlsList = self.__vcs.gitGetRemoteUrlsList(self.__repodir)
self.__repos = {name: url for name, url in remoteUrlsList}
self.__okButton = self.buttonBox.button(QDialogButtonBox.Ok)
self.remotesComboBox.addItems(sorted(self.__repos.keys()))
self.remotesComboBox.addItem(self.__all)
self.remotesComboBox.addItem(self.__custom)
index = self.remotesComboBox.findText("origin")
if index == -1:
index = 0
self.remotesComboBox.setCurrentIndex(index)
localBranches = self.__vcs.gitGetBranchesList(
self.__repodir, withMaster=True)
self.localBranchComboBox.addItems([""] + sorted(localBranches))
self.localBranchComboBox.setEnabled(False)
def __okButtonEnable(self):
"""
Private slot to set the enabled state of the OK button.
"""
self.__okButton.setEnabled(
self.remoteBranchesList.count() > 0 or
self.remotesComboBox.currentText() == self.__all
)
def __updateButtonEnable(self):
"""
Private slot to set the enabled state of the update button.
"""
remote = self.remotesComboBox.currentText()
enable = remote != self.__all
if remote == self.__custom:
enable = self.remoteEdit.text() != ""
self.updateButton.setEnabled(enable)
@pyqtSlot(str)
def on_remotesComboBox_currentTextChanged(self, txt):
"""
Private slot to handle changes of the selected repository.
@param txt current text of the combo box (string)
"""
self.remoteEdit.setReadOnly(txt != self.__custom)
self.remoteBranchesList.setEnabled(txt != self.__all)
self.remoteEdit.clear()
self.remoteBranchesList.clear()
self.__updateButtonEnable()
self.__okButtonEnable()
if txt not in [self.__all, self.__custom]:
remoteBranches = self.__vcs.gitGetRemoteBranchesList(
self.__repodir, txt)
self.remoteBranchesList.addItems(sorted(remoteBranches))
if txt in self.__repos:
self.remoteEdit.setText(self.__repos[txt])
@pyqtSlot(str)
def on_remoteEdit_textChanged(self, txt):
"""
Private slot to handle changes of the URL edit.
@param txt current text of the URL edit (string)
"""
self.__updateButtonEnable()
if self.remotesComboBox.currentText() == self.__custom and \
txt != "":
remoteBranches = self.__vcs.gitGetRemoteBranchesList(
self.__repodir, txt)
self.remoteBranchesList.clear()
self.remoteBranchesList.addItems(sorted(remoteBranches))
self.__okButtonEnable()
@pyqtSlot()
def on_remoteBranchesList_itemSelectionChanged(self):
"""
Private slot to handle a change of selected remote branches.
"""
singleSelection = len(self.remoteBranchesList.selectedItems()) == 1
self.localBranchComboBox.setEnabled(singleSelection)
if singleSelection:
txt = self.remoteBranchesList.selectedItems()[0].text()
else:
txt = ""
index = self.localBranchComboBox.findText(txt)
if index == -1:
self.localBranchComboBox.setEditText(txt)
else:
self.localBranchComboBox.setCurrentIndex(index)
@pyqtSlot()
def on_updateButton_clicked(self):
"""
Private slot to update the list of remote branches.
"""
remote = self.remotesComboBox.currentText()
if remote == self.__all:
# shouldn't happen
return
if remote == self.__custom:
remote = self.remoteEdit.text()
if remote == "":
# shouldn't happen either
return
remoteBranches = self.__vcs.gitGetRemoteBranchesList(
self.__repodir, remote)
self.remoteBranchesList.clear()
self.remoteBranchesList.addItems(sorted(remoteBranches))
self.__okButtonEnable()
def getData(self):
"""
Public method to get the entered data.
@return tuple of remote name, remote url (for custom remotes),
remote branches, local branch, a flag indicating to fetch from
all repositories, a flag indicating to remove obsolete tracking
references and a flag indicating to fetch tags as well
(string, string, list of strings, string, boolean, boolean,
boolean)
"""
remote = ""
url = ""
remoteBranches = []
allRepos = False
localBranch = ""
remoteRepo = self.remotesComboBox.currentText()
if remoteRepo == self.__all:
allRepos = True
else:
if remoteRepo == self.__custom:
url = self.remoteEdit.text()
else:
remote = remoteRepo
for itm in self.remoteBranchesList.selectedItems():
remoteBranches.append(itm.text())
if len(remoteBranches) == 1:
localBranch = self.localBranchComboBox.currentText()
return (remote, url, remoteBranches, localBranch, allRepos,
self.pruneCheckBox.isChecked(), self.tagsCheckBox.isChecked())
| [
"metamarcdw@gmail.com"
] | metamarcdw@gmail.com |
536ff8a0db556679da306bd709afddbc31ee7b11 | 03f9b8bdea312636afb4df3737b55cb0cc4b21ff | /EditDistance.py | 20fb3e9c2e621e3789092624f15fa5338bbd61f3 | [] | no_license | ellinx/LC-python | f29dd17bbe15407ba0d06ad68386efdc9a343b56 | 9190d3d178f1733aa226973757ee7e045b7bab00 | refs/heads/master | 2021-06-01T15:21:24.379811 | 2020-10-29T04:37:07 | 2020-10-29T04:37:07 | 132,704,788 | 1 | 1 | null | 2019-05-15T03:26:11 | 2018-05-09T05:13:26 | Python | UTF-8 | Python | false | false | 1,405 | py | """
Given two words word1 and word2, find the minimum number of operations required to convert word1 to word2.
You have the following 3 operations permitted on a word:
1. Insert a character
2. Delete a character
3. Replace a character
Example 1:
Input: word1 = "horse", word2 = "ros"
Output: 3
Explanation:
horse -> rorse (replace 'h' with 'r')
rorse -> rose (remove 'r')
rose -> ros (remove 'e')
Example 2:
Input: word1 = "intention", word2 = "execution"
Output: 5
Explanation:
intention -> inention (remove 't')
inention -> enention (replace 'i' with 'e')
enention -> exention (replace 'n' with 'x')
exention -> exection (replace 'n' with 'c')
exection -> execution (insert 'u')
"""
class Solution:
def minDistance(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
m, n = len(word1), len(word2)
dp = [[0]*(n+1) for _ in range(m+1)]
# first row
for i in range(n+1):
dp[0][i] = i
# first col
for i in range(m+1):
dp[i][0] = i
for i in range(1, m+1):
for j in range(1, n+1):
dp[i][j] = min(dp[i-1][j], dp[i][j-1])+1
if word1[i-1]==word2[j-1]:
dp[i][j] = min(dp[i][j], dp[i-1][j-1])
else:
dp[i][j] = min(dp[i][j], dp[i-1][j-1]+1)
return dp[m][n]
| [
"ellin.xll@gmail.com"
] | ellin.xll@gmail.com |
c14390c5349a7bb564d59f5828cdc6bbf09d9e06 | a29c7e363026111276e94b96d39b1b4ab48dbca8 | /sdk/test/test_overdraft_overdraft_fee_charge_cap.py | 2424dab7913426542a731b72bb7d31c75654266f | [
"MIT"
] | permissive | matteo-kalogirou/yapily-sdk-python | a56bf6f9b1b308efda38f081f6237ebd8c8f8ad5 | f10d2d14383f551eeb59aa893d328ffa5080da22 | refs/heads/master | 2022-12-16T22:24:18.026765 | 2020-09-18T13:59:26 | 2020-09-18T13:59:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,212 | py | # coding: utf-8
"""
Yapily API
To access endpoints that require authentication, use your application key and secret created in the Dashboard (https://dashboard.yapily.com) # noqa: E501
The version of the OpenAPI document: 0.0.242
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import yapily
from yapily.models.overdraft_overdraft_fee_charge_cap import OverdraftOverdraftFeeChargeCap # noqa: E501
from yapily.rest import ApiException
class TestOverdraftOverdraftFeeChargeCap(unittest.TestCase):
"""OverdraftOverdraftFeeChargeCap unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test OverdraftOverdraftFeeChargeCap
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = yapily.models.overdraft_overdraft_fee_charge_cap.OverdraftOverdraftFeeChargeCap() # noqa: E501
if include_optional :
return OverdraftOverdraftFeeChargeCap(
capping_period = 'Day',
fee_cap_amount = '0',
fee_cap_occurrence = 1.337,
fee_type = [
'ArrangedOverdraft'
],
min_max_type = 'Minimum',
notes = [
'0'
],
other_fee_type = [
yapily.models.overdraft_other_fee_type.OverdraftOtherFeeType(
code = '0',
description = '0',
name = '0', )
],
overdraft_control_indicator = True
)
else :
return OverdraftOverdraftFeeChargeCap(
)
def testOverdraftOverdraftFeeChargeCap(self):
"""Test OverdraftOverdraftFeeChargeCap"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"systems@yapily.com"
] | systems@yapily.com |
52a0b781f3cb870d2d36c28681e81c31624301c8 | f907f8ce3b8c3b203e5bb9d3be012bea51efd85f | /two_anagrams.py | 376bf7604cff49323f1bec9a7788a0c8040f53a7 | [] | no_license | KohsukeKubota/Atcoder-practice | 3b4b986395551443f957d1818d6f9a0bf6132e90 | 52554a2649445c2760fc3982e722854fed5b8ab1 | refs/heads/master | 2020-08-26T15:17:29.344402 | 2019-10-26T11:14:24 | 2019-10-26T11:14:24 | 217,052,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | s = input()
t = input()
s = sorted(s)
t = sorted(t, reverse=True)
if s < t:
print('Yes')
else:
print('No')
| [
"kohsuke@KohsukeKubotas-MacBook-Air.local"
] | kohsuke@KohsukeKubotas-MacBook-Air.local |
9aa2e4fd411b594a84a9bace690226a5b19c56aa | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/aws-cli/2015/4/hbase.py | f977cddfdb23f0cfd53315822d2ec3d2283b1b3a | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 9,284 | py | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from awscli.customizations.commands import BasicCommand
from awscli.customizations.emr import constants
from awscli.customizations.emr import emrutils
from awscli.customizations.emr import hbaseutils
from awscli.customizations.emr import helptext
from awscli.customizations.emr.command import Command
class RestoreFromHBaseBackup(Command):
NAME = 'restore-from-hbase-backup'
DESCRIPTION = ('Restores HBase from S3.')
ARG_TABLE = [
{'name': 'cluster-id', 'required': True,
'help_text': helptext.CLUSTER_ID},
{'name': 'dir', 'required': True,
'help_text': helptext.HBASE_BACKUP_DIR},
{'name': 'backup-version',
'help_text': helptext.HBASE_BACKUP_VERSION}
]
def _run_main_command(self, parsed_args, parsed_globals):
steps = []
args = hbaseutils.build_hbase_restore_from_backup_args(
parsed_args.dir, parsed_args.backup_version)
step_config = emrutils.build_step(
jar=constants.HBASE_JAR_PATH,
name=constants.HBASE_RESTORE_STEP_NAME,
action_on_failure=constants.CANCEL_AND_WAIT,
args=args)
steps.append(step_config)
parameters = {'JobFlowId': parsed_args.cluster_id,
'Steps': steps}
emrutils.call_and_display_response(self._session, 'AddJobFlowSteps',
parameters, parsed_globals)
return 0
class ScheduleHBaseBackup(Command):
NAME = 'schedule-hbase-backup'
DESCRIPTION = ('Adds a step to schedule automated HBase backup.')
ARG_TABLE = [
{'name': 'cluster-id', 'required': True,
'help_text': helptext.CLUSTER_ID},
{'name': 'type', 'required': True,
'help_text': "<p>Backup type. You can specify 'incremental' or "
"'full'.</p>"},
{'name': 'dir', 'required': True,
'help_text': helptext.HBASE_BACKUP_DIR},
{'name': 'interval', 'required': True,
'help_text': '<p>The time between backups.</p>'},
{'name': 'unit', 'required': True,
'help_text': "<p>The time unit for backup's time-interval. "
"You can specify one of the following values:"
" 'minutes', 'hours', or 'days'.</p>"},
{'name': 'start-time',
'help_text': '<p>The time of the first backup in ISO format.</p>'
' e.g. 2014-04-21T05:26:10Z. Default is now.'},
{'name': 'consistent', 'action': 'store_true',
'help_text': '<p>Performs a consistent backup.'
' Pauses all write operations to the HBase cluster'
' during the backup process.</p>'}
]
EXAMPLES = BasicCommand.FROM_FILE('emr', 'schedule-hbase-backup.rst')
def _run_main_command(self, parsed_args, parsed_globals):
steps = []
self._check_type(parsed_args.type)
self._check_unit(parsed_args.unit)
args = self._build_hbase_schedule_backup_args(parsed_args)
step_config = emrutils.build_step(
jar=constants.HBASE_JAR_PATH,
name=constants.HBASE_SCHEDULE_BACKUP_STEP_NAME,
action_on_failure=constants.CANCEL_AND_WAIT,
args=args)
steps.append(step_config)
parameters = {'JobFlowId': parsed_args.cluster_id,
'Steps': steps}
emrutils.call_and_display_response(self._session, 'AddJobFlowSteps',
parameters, parsed_globals)
return 0
def _check_type(self, type):
type = type.lower()
if type != constants.FULL and type != constants.INCREMENTAL:
raise ValueError('aws: error: invalid type. type should be either '
+ constants.FULL + ' or ' + constants.INCREMENTAL
+ '.')
def _check_unit(self, unit):
unit = unit.lower()
if (unit != constants.MINUTES and unit != constants.HOURS
and unit != constants.DAYS):
raise ValueError('aws: error: invalid unit. unit should be one of'
' the following values: ' + constants.MINUTES +
', ' + constants.HOURS + ' or ' + constants.DAYS +
'.')
def _build_hbase_schedule_backup_args(self, parsed_args):
args = [constants.HBASE_MAIN, constants.HBASE_SCHEDULED_BACKUP,
constants.TRUE, constants.HBASE_BACKUP_DIR, parsed_args.dir]
type = parsed_args.type.lower()
unit = parsed_args.unit.lower()
if parsed_args.consistent is True:
args.append(constants.HBASE_BACKUP_CONSISTENT)
if type == constants.FULL:
args.append(constants.HBASE_FULL_BACKUP_INTERVAL)
else:
args.append(constants.HBASE_INCREMENTAL_BACKUP_INTERVAL)
args.append(parsed_args.interval)
if type == constants.FULL:
args.append(constants.HBASE_FULL_BACKUP_INTERVAL_UNIT)
else:
args.append(constants.HBASE_INCREMENTAL_BACKUP_INTERVAL_UNIT)
args.append(unit)
args.append(constants.HBASE_BACKUP_STARTTIME)
if parsed_args.start_time is not None:
args.append(parsed_args.start_time)
else:
args.append(constants.NOW)
return args
class CreateHBaseBackup(Command):
NAME = 'create-hbase-backup'
DESCRIPTION = ('Creates a HBase backup in S3.')
ARG_TABLE = [
{'name': 'cluster-id', 'required': True,
'help_text': helptext.CLUSTER_ID},
{'name': 'dir', 'required': True,
'help_text': helptext.HBASE_BACKUP_DIR},
{'name': 'consistent', 'action': 'store_true',
'help_text': '<p>Performs a consistent backup. Pauses all write'
' operations to the HBase cluster during the backup'
' process.</p>'}
]
def _run_main_command(self, parsed_args, parsed_globals):
steps = []
args = self._build_hbase_backup_args(parsed_args)
step_config = emrutils.build_step(
jar=constants.HBASE_JAR_PATH,
name=constants.HBASE_BACKUP_STEP_NAME,
action_on_failure=constants.CANCEL_AND_WAIT,
args=args)
steps.append(step_config)
parameters = {'JobFlowId': parsed_args.cluster_id,
'Steps': steps}
emrutils.call_and_display_response(self._session, 'AddJobFlowSteps',
parameters, parsed_globals)
return 0
def _build_hbase_backup_args(self, parsed_args):
args = [constants.HBASE_MAIN,
constants.HBASE_BACKUP,
constants.HBASE_BACKUP_DIR, parsed_args.dir]
if parsed_args.consistent is True:
args.append(constants.HBASE_BACKUP_CONSISTENT)
return args
class DisableHBaseBackups(Command):
NAME = 'disable-hbase-backups'
DESCRIPTION = ('Add a step to disable automated HBase backups.')
ARG_TABLE = [
{'name': 'cluster-id', 'required': True,
'help_text': helptext.CLUSTER_ID},
{'name': 'full', 'action': 'store_true',
'help_text': 'Disables full backup.'},
{'name': 'incremental', 'action': 'store_true',
'help_text': 'Disables incremental backup.'}
]
def _run_main_command(self, parsed_args, parsed_globals):
steps = []
args = self._build_hbase_disable_backups_args(parsed_args)
step_config = emrutils.build_step(
constants.HBASE_JAR_PATH,
constants.HBASE_SCHEDULE_BACKUP_STEP_NAME,
constants.CANCEL_AND_WAIT,
args)
steps.append(step_config)
parameters = {'JobFlowId': parsed_args.cluster_id,
'Steps': steps}
emrutils.call_and_display_response(self._session, 'AddJobFlowSteps',
parameters, parsed_globals)
return 0
def _build_hbase_disable_backups_args(self, parsed_args):
args = [constants.HBASE_MAIN, constants.HBASE_SCHEDULED_BACKUP,
constants.FALSE]
if parsed_args.full is False and parsed_args.incremental is False:
error_message = 'Should specify at least one of --' +\
constants.FULL + ' and --' +\
constants.INCREMENTAL + '.'
raise ValueError(error_message)
if parsed_args.full is True:
args.append(constants.HBASE_DISABLE_FULL_BACKUP)
if parsed_args.incremental is True:
args.append(constants.HBASE_DISABLE_INCREMENTAL_BACKUP)
return args | [
"rodrigosoaresilva@gmail.com"
] | rodrigosoaresilva@gmail.com |
9e892d45dda82d76e0bb7c174261cd156399a9d1 | 59934e837a4e425bba4ce9bcb46940a00b68691c | /user_auth/views.py | f240bb27c98f015c23120e1c06e384e664960076 | [] | no_license | cjredmond/store_app | 309e8956785a7720b0578cf512c33cab7d2d03a9 | d285ddfcc2e2427baa1c30fc9504c4bbf0f387f6 | refs/heads/master | 2021-01-12T07:31:56.664103 | 2017-01-09T19:42:48 | 2017-01-09T19:42:48 | 76,973,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,790 | py | from django.shortcuts import render
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from django.views.generic import TemplateView, ListView, DetailView
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse, reverse_lazy
from user_auth.models import Cart, Profile
from shipping.models import Shipment, OrderProduct
from products.models import CartProduct
class IndexView(TemplateView):
template_name = "index.html"
class UserCreateView(CreateView):
model = User
form_class = UserCreationForm
def get_success_url(self):
return reverse('login')
class CartDetailView(DetailView):
model = Cart
class CartUpdateView(UpdateView):
fields = []
model = Cart
success_url = '/'
def form_valid(self, form, **kwargs):
instance = form.save(commit=False)
target = Cart.objects.get(id=self.kwargs['pk'])
new = Shipment.objects.create(user=self.request.user)
items = CartProduct.objects.filter(cart=target)
for product in items:
OrderProduct.objects.create(name=product.name, price=product.price, description=product.description, shipment=new, copy_product=Product.models.get(name=product.name))
new.save()
items.delete()
return super().form_valid(form)
class ProfileDetailView(DetailView):
model = Profile
class ProfileUpdateView(UpdateView):
model = Profile
fields = ('address_num', 'address_street','address_city', 'address_state')
def get_success_url(self):
return reverse('profile_detail_view', args=str(self.request.user.profile.id))
def form_valid(self,form):
instance = form.save(commit=False)
return super().form_valid(form)
| [
"connor.redmond@gmail.com"
] | connor.redmond@gmail.com |
9f8e6550d80fe0672b5a2dc3e1f8e9427e24ba58 | 5df8b0f5bda2603cf5437c2bcf1e30b326ea791e | /geektrust/MakeSpace/Tests/TestBufferTime.py | 329061397f6a11f3f3cdff0af3e8272ae4a269ab | [] | no_license | RishabhKatiyar/CorePython | 4e9133ab3949815290157818b9be8ab943f9b802 | d4e80e26b4c439fa269351c674466a2918eaaf77 | refs/heads/main | 2023-07-19T14:03:38.662270 | 2021-09-05T09:09:51 | 2021-09-05T09:09:51 | 369,292,272 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | import unittest
from src.Models.BufferTime import BufferTime
class TestQuery(unittest.TestCase):
def test_buffer_time(self):
bufferTime = BufferTime()
bufferTime.StartTime = "12:00"
bufferTime.EndTime = "13:00"
self.assertEqual(str(bufferTime), "12:00-13:00")
if __name__ == '__main__':
unittest.main() | [
"rishabh.katiyar@outlook.com"
] | rishabh.katiyar@outlook.com |
0243cb82e0007eaf0d6e9fbaf1c0b266247784ad | 09f0a01272042b6de7cb7af6d40f10dd6e10a574 | /compounds/script_new/prescription_13.py | bd9f51f91b260b8dbbc4ab23498987b051b8e19b | [] | no_license | jianping-grp/yatcm-1-02 | 4c010788989562365f78745a619fc66f5ef7554e | d03d599cc45fd757b19e11856bc203ee1c39fdf7 | refs/heads/master | 2021-09-06T11:39:22.829983 | 2018-02-06T05:28:54 | 2018-02-06T05:28:54 | 115,930,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,148 | py | import os
import xlrd
import logging
os.environ.setdefault("DJANGO_SETTINGS_MODULE", 'yatcm.settings')
import django
django.setup()
from compounds.models import *
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
primary_file = '/home/jianping/django_test/yatcm/compounds/data/prescription_primary_complete_last.xlsx'
vice_file = '/home/jianping/django_test/yatcm/compounds/data/vice_complete.xlsx'
logger = logging.getLogger('tax_logger')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('/home/jianping/django_test/logs/prescription.txt')
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(message)s")
fh.setFormatter(formatter)
logger.addHandler(fh)
def primary_upload(row):
chinese_name = row[0].strip()
english_name = row[1].strip()
pinyin = row[2].strip()
zucheng = row[3].strip()
herb_list = row[3].strip().split()
yongfa = row[4].strip()
fangjie = row[5].strip()
chinese_gongyong = row[6].strip()
english_gongyong = row[7].strip()
chinese_xiandai = row[8].strip()
english_xiandai = row[9].strip()
try:
prescription, created = Prescription.objects.get_or_create(
chinese_name=chinese_name,
english_name=english_name,
pinyin_name=pinyin,
zucheng=zucheng,
yongfa=yongfa,
fangjie=fangjie,
chinese_indiction=chinese_gongyong,
english_indiction=english_gongyong,
chinese_modern_application=chinese_xiandai,
english_modern_application=english_xiandai
)
except Prescription.DoesNotExist:
logger.warning("{} does not exist!".format(unicode(chinese_name)))
except Prescription.MultipleObjectsReturned:
logger.warning("{} return more than one objects".format(unicode(chinese_name)))
for herb_name in herb_list:
try:
herbs = Herb.objects.filter(Chinese_name=herb_name)
for herb in herbs:
prescription.herbs.add(herb)
prescription.save()
except Herb.DoesNotExist:
logger.info("{} does not exist".format(herb_name))
def vice_upload(row):
main_prescription_name = row[0].strip()
chinese_name = row[1].strip()
pinyin_name = row[2].strip()
zucheng = row[3].strip()
herb_list = row[3].strip().split()
yongfa = row[4].strip()
try:
prescription, created = Prescription.objects.get_or_create(
chinese_name=chinese_name,
pinyin_name=pinyin_name,
zucheng=zucheng,
yongfa=yongfa
)
try:
main_prescription = Prescription.objects.get(chinese_name=main_prescription_name)
prescription.main_prescription = main_prescription
prescription.save()
except Prescription.DoesNotExist:
logger.warning("%s does not exist!" % main_prescription_name)
except Prescription.MultipleObjectsReturned:
logger.warning("%s return more than one objects" % main_prescription_name)
for herb_name in herb_list:
try:
herbs = Herb.objects.filter(Chinese_name=herb_name)
for herb in herbs:
prescription.herbs.add(herb)
prescription.save()
except Herb.DoesNotExist:
logger.info("{} does not exist".format(herb_name))
except Prescription.DoesNotExist:
logger.warning("%s does not exist".format(chinese_name))
except Prescription.MultipleObjectsReturned:
logger.warning("{} return more than one objects".format(chinese_name))
if __name__ == '__main__':
primary_table = xlrd.open_workbook(primary_file).sheet_by_index(0)
for row_number in range(1, primary_table.nrows):
print row_number
row = primary_table.row_values(row_number)
primary_upload(row)
# vice_upload(row)
vice_table = xlrd.open_workbook(vice_file).sheet_by_index(0)
for row_number in range(1, vice_table.nrows):
print row_number
row = vice_table.row_values(row_number)
vice_upload(row)
| [
"libaiqing11@163.com"
] | libaiqing11@163.com |
d3367cde2f6849fd46b362f07fe9839e088bf261 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/bps_cs22955-0024/sdB_BPS_CS22955-0024_coadd.py | 413af22bb38328ee2ff50f70e492e505c5504036 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[305.959417,-25.141333], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_BPS_CS22955-0024/sdB_BPS_CS22955-0024_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_BPS_CS22955-0024/sdB_BPS_CS22955-0024_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
a7a762a61b13efb880d4bd6c329f5fe38e10421f | da8730b3977d0b1e59e0b80d88cc871d92cd2700 | /nlp_txt/tfidf_test.py | 567643af0c55ace0ff786e4a687a64e2730c14bd | [] | no_license | legend1412/PythonDemo | 44b62f82a8826b5a50cf0a6506e6082d89d1ab7b | 3e6e4624801b9b9c272d0891f5675ec9466b4205 | refs/heads/master | 2020-12-28T01:27:39.942087 | 2020-10-12T02:33:29 | 2020-10-12T02:33:29 | 238,136,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,252 | py | import os
import math
file_path = '../data/allfiles'
# ๅ ่ฝฝๅ็จ่ฏ่กจ
stop_set = set()
with open('stop_word.txt', 'r', encoding='utf-8') as f:
for word in f.readlines():
stop_set.add(word.strip())
doc_words = dict()
doc_num = 0
for filename in os.listdir(file_path): # ๅฝๅ็ฎๅฝไธ็ๆๆๆ็ซ ็ๅๅญ
# print(filename)
with open(file_path + '/' + filename, 'r', encoding='utf-8') as f:
# print(f.read())
word_freq = dict()
sum_cnt = 0 # ็ป่ฎกๅ ๆฏ็จ
max_tf = 0 # ไฝฟ็จๆๅคง่ฏ้ข็ๅ่ฏๅค็
for line in f.readlines():
words = line.strip().split(' ')
for word in words:
if len(word.strip()) < 1 or word in stop_set:
continue
if word_freq.get(word, -1) == -1:
word_freq[word] = 0
word_freq[word] += 1
sum_cnt += 1
if word_freq[word] > max_tf:
max_tf = word_freq[word]
# print(word_freq)
# ๅฐ่ฏ้ขๅค็ๆๅ ๆฏๅฝขๅผ
for word in word_freq.keys():
# word_freq[word] /= sum_cnt
word_freq[word] /= max_tf
# print(word_freq)
doc_words[filename] = word_freq
doc_num += 1
# print(doc_words)
# ็ป่ฎกๆฏไธช่ฏ็doc_frq (df)
doc_freq = dict()
for doc in doc_words.keys(): # ๆๆฌๅๅญ
for word in doc_words[doc].keys():
if doc_freq.get(word, -1) == -1:
doc_freq[word] = 0
doc_freq[word] += 1
# print(doc_num)
# print(doc_freq)
# ๅฅidfๅ
ฌๅผ
for word in doc_freq.keys():
doc_freq[word] = math.log(doc_num / float(doc_freq[word] + 1), 10)
# print(doc_freq)
# ๅๅ10ๅๅ10
# print(sorted(doc_freq.items(), key=lambda x: x[1], reverse=True)[:10])
# print(sorted(doc_freq.items(), key=lambda x: x[1], reverse=False)[:10])
# ๅฅๅ
ฌๅผtf*idf
for doc in doc_words.keys():
for word in doc_words[doc].keys():
doc_words[doc][word] *= doc_freq[word]
# ๆไธ็ฏๆ็ซ ็ๅ10ๅๅ10
# print(sorted(doc_words['3business.seg.cln.txt'].items(), key=lambda x: x[1], reverse=True)[:10])
# print(sorted(doc_words['3business.seg.cln.txt'].items(), key=lambda x: x[1], reverse=False)[:10])
| [
"zhaojianhao_1984@163.com"
] | zhaojianhao_1984@163.com |
e4244f1657cfe342166a84a3c031654b728f69f5 | 91a2ecfaf5dc6c917ec2fda31f56291103f68ceb | /tests/post_process/test_ctc_greedy_decoder.py | bc51da0de6d2dc6ebf4c6a36ba101ef8d5808b7c | [
"BSD-3-Clause"
] | permissive | MyrtleSoftware/myrtlespeech | 635d1d16d1bd60fb07a4d30edbf9acb61786c13f | 8522048fd37744ffa06827a0cbd202b839a15453 | refs/heads/master | 2021-07-16T14:55:00.479967 | 2020-03-20T14:33:15 | 2020-03-20T14:33:15 | 192,501,300 | 12 | 1 | NOASSERTION | 2020-03-20T14:33:17 | 2019-06-18T08:44:33 | Python | UTF-8 | Python | false | false | 5,299 | py | from typing import Tuple
import hypothesis.strategies as st
import pytest
import torch
from hypothesis import assume
from hypothesis import given
from myrtlespeech.post_process.ctc_greedy_decoder import CTCGreedyDecoder
from tests.data.test_alphabet import random_alphabet
# Fixtures and Strategies -----------------------------------------------------
@st.composite
def ctc_greedy_decoder_input_outputs(
draw,
) -> st.SearchStrategy[
Tuple[
int, # blank index
Tuple[torch.Tensor, torch.Tensor], # x, lengths
Tuple[torch.Tensor, torch.Tensor], # output, output_lengths
]
]:
"""Returns a SearchStrategy for (blank_index, input, expected output)."""
alphabet = draw(random_alphabet())
assume(len(alphabet) > 1) # must be at least blank and one other symbol
blank_index = draw(st.integers(0, len(alphabet) - 1))
# generate random batch of sentence (indices) excluding blank index
batch_size = draw(st.integers(1, 8))
non_blanks = alphabet.get_indices(list(alphabet))
non_blanks.pop(blank_index)
sentences = [
draw(st.lists(st.sampled_from(non_blanks), min_size=1))
for _ in range(batch_size)
]
# for each sentence insert "blank" between duplicate symbols and replicate
# some symbols
blank_sentences = []
for sentence in sentences:
blank_sentence = []
prev = None
for symbol_idx in sentence:
if prev is not None and prev == symbol_idx:
n_rep = draw(st.integers(1, 5))
blank_sentence.extend([blank_index] * n_rep)
n_rep = draw(st.integers(1, 5))
blank_sentence.extend([symbol_idx] * n_rep)
prev = symbol_idx
blank_sentences.append(blank_sentence)
# compute inputs
longest = max([len(sentence) for sentence in blank_sentences])
input_sentences = [] # list of input 2D tensors (longest, len(alphabet))
input_lengths = [] # list of input lengths
for sentence in blank_sentences:
input_sentence = torch.empty((longest, len(alphabet))).normal_()
# ensure desired symbol has greatest value at each time step by summing
# up abs value of all symbols
for seq_idx, sym_idx in enumerate(sentence):
input_sentence[seq_idx, sym_idx] = (
0.5 + input_sentence[seq_idx, :].abs().sum()
)
input_sentences.append(input_sentence)
input_lengths.append(len(sentence))
x = torch.stack(input_sentences, dim=1)
supported_dtypes = [torch.int64]
if longest <= 2 ** 31 - 1:
supported_dtypes.append(torch.int32)
if longest <= 2 ** 15 - 1:
supported_dtypes.append(torch.int16)
if longest <= 2 ** 8 - 1:
supported_dtypes.append(torch.uint8)
if longest <= 2 ** 7 - 1:
supported_dtypes.append(torch.int8)
lengths_dtype = draw(st.sampled_from(supported_dtypes))
lengths = torch.tensor(input_lengths, dtype=lengths_dtype)
return blank_index, (x, lengths), sentences
# Tests -----------------------------------------------------------------------
@given(input_output=ctc_greedy_decoder_input_outputs())
def test_ctc_greedy_decoder_correct_decode(input_output) -> None:
blank_index, (x, lengths), exp_sentences = input_output
ctc_decoder = CTCGreedyDecoder(blank_index)
act_sentences = ctc_decoder(x, lengths)
assert act_sentences == exp_sentences
@given(
input_output=ctc_greedy_decoder_input_outputs(),
dtype=st.sampled_from([torch.half, torch.float, torch.double]),
)
def test_ctc_greedy_decoder_raises_value_error_for_float_dtypes(
input_output, dtype: torch.dtype
) -> None:
"""Ensures ValueError raised when lengths.dtype is float."""
blank_index, (x, lengths), exp_sentences = input_output
lengths = lengths.to(dtype)
ctc_decoder = CTCGreedyDecoder(blank_index)
with pytest.raises(ValueError):
ctc_decoder(x, lengths)
@given(x_batch_size=st.integers(1, 32), lengths_batch_size=st.integers(1, 32))
def test_ctc_greedy_decoder_raises_value_error_when_batch_x_lengths_differ(
x_batch_size: int, lengths_batch_size: int
) -> None:
"""Ensures ValueError raised when batch size of x and lengths differs."""
assume(x_batch_size != lengths_batch_size)
ctc_decoder = CTCGreedyDecoder(0)
# create input tensors, batch and alphabet size fixed to 10 and 5
x = torch.empty((10, x_batch_size, 5))
lengths = torch.empty(lengths_batch_size, dtype=torch.int16)
with pytest.raises(ValueError):
ctc_decoder(x, lengths)
@given(data=st.data(), input_output=ctc_greedy_decoder_input_outputs())
def test_ctc_greedy_decoder_raises_value_error_lengths_values_greater_seq_len(
data, input_output
) -> None:
"""Ensures ValueError when lengths entry is greater than seq len of x."""
blank_index, (x, lengths), exp_sentences = input_output
seq_len, batch, _ = x.size()
ctc_decoder = CTCGreedyDecoder(blank_index)
invalid_length = data.draw(st.integers(seq_len + 1, 3 * seq_len))
assume(invalid_length <= torch.iinfo(lengths.dtype).max)
invalid_idx = data.draw(st.integers(0, batch - 1))
lengths[invalid_idx] = invalid_length
with pytest.raises(ValueError):
ctc_decoder(x, lengths)
| [
"sam@samgd.com"
] | sam@samgd.com |
fe6dd390f3317b44c302772859d4bfffd9a5d58f | f0f4eef5a57807960070a1989799def6deaf30ef | /bclearer_boson_1_2_source/b_code/substages/operations/b_evolve/runners/boson_1_2_add_composite_names_operation_substage_runner.py | eb1b64dfdeff8908e40bf4673a209e0227ec359e | [
"MIT"
] | permissive | boro-alpha/bclearer_boson_1_2 | 4f8dd72b79e0ccdd759271d79e3cbfa61a949ac6 | 571b2e1ca6dee93ccc5cb4e30abe2660f40c2ac0 | refs/heads/master | 2023-08-23T05:02:30.384138 | 2021-11-04T11:24:35 | 2021-11-04T11:24:35 | 424,218,650 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,547 | py | from nf_common_source.code.services.reporting_service.reporters.log_with_datetime import log_message
from nf_ea_common_tools_source.b_code.services.general.nf_ea.com.nf_ea_com_universes import NfEaComUniverses
from nf_ea_common_tools_source.b_code.services.session.orchestrators.ea_tools_session_managers import \
EaToolsSessionManagers
from bclearer_boson_1_2_source.b_code.configurations.objects.bespoke_operation_configurations import \
BespokeOperationConfigurations
from bclearer_boson_1_2_source.b_code.substages.operations.b_evolve.coordinate_lines.composite_names.add_composite_names_orchestrator import \
orchestrate_add_composite_names
def run_boson_1_2_add_composite_names_operation_substage(
content_universe: NfEaComUniverses,
ea_tools_session_manager: EaToolsSessionManagers,
bespoke_operation_configuration: BespokeOperationConfigurations) \
-> NfEaComUniverses:
log_message(
message='CONTENT OPERATION: Add composite_names to universe - ' +
bespoke_operation_configuration.short_name + ' - started')
output_universe = \
orchestrate_add_composite_names(
content_universe=content_universe,
ea_tools_session_manager=ea_tools_session_manager,
short_name=bespoke_operation_configuration.short_name)
log_message(
message='CONTENT OPERATION: Add composite_names to universe - ' +
bespoke_operation_configuration.short_name + ' - finished')
return \
output_universe
| [
"xibertao@borogroup.co.uk"
] | xibertao@borogroup.co.uk |
177c6e9343406223b3c079323bd635e5317a4752 | bcc199a7e71b97af6fbfd916d5a0e537369c04d9 | /leetcode/solved/659_Split_Array_into_Consecutive_Subsequences/solution.py | 3a81e0212c1a8d1cd384c14ddac3a641b8af3e72 | [] | no_license | sungminoh/algorithms | 9c647e82472905a2c4e505c810b622b734d9d20d | 1389a009a02e90e8700a7a00e0b7f797c129cdf4 | refs/heads/master | 2023-05-01T23:12:53.372060 | 2023-04-24T06:34:12 | 2023-04-24T06:34:12 | 87,406,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,661 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright ยฉ 2020 sungminoh <smoh2044@gmail.com>
#
# Distributed under terms of the MIT license.
"""
You are given an integer array nums that is sorted in non-decreasing order.
Determine if it is possible to split nums into one or more subsequences such that both of the following conditions are true:
Each subsequence is a consecutive increasing sequence (i.e. each integer is exactly one more than the previous integer).
All subsequences have a length of 3 or more.
Return true if you can split nums according to the above conditions, or false otherwise.
A subsequence of an array is a new array that is formed from the original array by deleting some (can be none) of the elements without disturbing the relative positions of the remaining elements. (i.e., [1,3,5] is a subsequence of [1,2,3,4,5] while [1,3,2] is not).
Example 1:
Input: nums = [1,2,3,3,4,5]
Output: true
Explanation: nums can be split into the following subsequences:
[1,2,3,3,4,5] --> 1, 2, 3
[1,2,3,3,4,5] --> 3, 4, 5
Example 2:
Input: nums = [1,2,3,3,4,4,5,5]
Output: true
Explanation: nums can be split into the following subsequences:
[1,2,3,3,4,4,5,5] --> 1, 2, 3, 4, 5
[1,2,3,3,4,4,5,5] --> 3, 4, 5
Example 3:
Input: nums = [1,2,3,4,4,5]
Output: false
Explanation: It is impossible to split nums into consecutive increasing subsequences of length 3 or more.
Constraints:
1 <= nums.length <= 104
-1000 <= nums[i] <= 1000
nums is sorted in non-decreasing order.
"""
import sys
import itertools
from heapq import heappop
from heapq import heappush
from collections import Counter
from collections import defaultdict
from typing import Tuple
from typing import Dict
from typing import List
import pytest
class Solution:
def isPossible(self, nums: List[int]) -> bool:
"""08/07/2020 01:11"""
cnt = defaultdict(int)
for n in nums:
cnt[n] += 1
ends = defaultdict(int)
keys = list(cnt.keys())
for n in keys:
if cnt[n] == 0:
continue
removes = min(ends[n-1], cnt[n])
ends[n-1] -= removes
ends[n] += removes
cnt[n] -= removes
if cnt[n+1] >= cnt[n] and cnt[n+2] >= cnt[n]:
cnt[n+1] -= cnt[n]
cnt[n+2] -= cnt[n]
ends[n+2] += cnt[n]
else:
return False
return True
def isPossible(self, nums: List[int]) -> bool:
"""08/28/2022 14:32
Time Complexity: O(nlogn)
Space Complexity: O(n)
"""
ends_at: Dict[int, List[int]] = defaultdict(list)
for n in sorted(nums):
if not ends_at.get(n-1):
heappush(ends_at[n], 1)
else:
heappush(ends_at[n], heappop(ends_at[n-1]) + 1)
return not any(length < 3 for length in itertools.chain(*list(ends_at.values())))
def isPossible(self, nums: List[int]) -> bool:
"""08/28/2022 15:14
Time Complexity: O(n)
Space Complexity: O(n)
"""
length_so_far = [0, 0, 0] # [length=1, 2, >=3]
cnt = Counter(nums)
keys = list(cnt.keys())
for i, n in enumerate(keys):
l1, l2, l3 = length_so_far
if keys[i-1] == n-1:
if l1+l2 > cnt[n]:
return False
length_so_far = [
max(0, cnt[n] - (l1+l2+l3)),
l1,
l2 + min(l3, cnt[n]-(l1+l2))
]
else:
if l1+l2 > 0:
return False
length_so_far = [cnt[n], 0, 0]
return length_so_far[0] + length_so_far[1] == 0
@pytest.mark.parametrize('nums, expected', [
([1,2,3,3,4,5], True),
([1,2,3,3,4,4,5,5], True),
([1,2,3,4,4,5], False),
([1,2,3], True),
([1,3,3,4,4,7,8,8,9,10], False),
([4,5,6,6,7,8,9,10,10,11], False),
([9,10,11,12,13,14,29,30,31,32,33,34,35,36,37,38,39,40,41,41,42,42,43,44,45,46,47,47,48,48,49,49,50,50,51,51,51,52,52,52,53,53,53,54,54,54,55,55,55,56,56,56,57,57,57,58,58,58,59,59,59,59,60,60,60,60,61,61,61,61,62,62,62,62,63,63,63,63,64,64,64,64,65,65,65,65,66,66,66,66,67,67,67,67,68,68,68,68,69,69,69,69,70,70,70,70,71,71,71,71,72,72,72,72,73,73,73,73,74,74,74,74,75,75,75,75,76,76,76,76,76,77,77,77,77,77,78,78,78,78,78,79,79,79,79,80,80,80,80,81,81,81,81,82,82,82,82,83,83,83,83,84,84,84,84,85,85,85,85,85,86,86,86,86,86,86,87,87,87,87,87,87,88,88,88,88,88,88,89,89,89,89,89,89,90,90,90,90,90,90,91,91,91,91,91,91,92,92,92,92,92,92,93,93,93,93,93,93,94,94,94,94,94,94,95,95,95,95,95,95,96,96,96,96,96,96,96,97,97,97,97,97,97,97,98,98,98,98,98,98,98,99,99,99,99,99,99,99,100,100,100,100,100,100,100,101,101,101,101,101,101,101,102,102,102,102,102,102,102,103,103,103,103,103,103,103,104,104,104,104,104,104,104,105,105,105,105,105,105,106,106,106,106,106,106,107,107,107,107,107,107,108,108,108,108,108,108,109,109,109,109,109,109,110,110,110,110,110,111,111,111,111,111,112,112,112,112,112,113,113,113,113,113,114,114,114,114,114,115,115,115,115,115,116,116,116,116,116,117,117,117,117,118,118,118,118,119,119,119,119,120,120,120,120,121,121,121,122,122,122,123,123,123,124,124,124,125,125,125,126,126,126,127,127,127,128,128,128,129,129,129,130,130,130,131,131,132,132,133,133,134,134,135,135,136,136,137,137,138,138,139,139,140,140,141,141,142,142,143,143,144,144,145,145,146,146,147], True),
([10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100], True)
])
def test(nums, expected):
assert expected == Solution().isPossible(nums)
if __name__ == '__main__':
sys.exit(pytest.main(["-s", "-v"] + sys.argv))
| [
"smoh2044@gmail.com"
] | smoh2044@gmail.com |
a043804e7b66e1f74b60df789131b4a8007b392f | ceead28beb1ea6cb56a2bb4472bc1d2396b39e6f | /gen_basis_helpers/workflows/unit_tests/utest_surface_energies.py | 850a7385e490305b0b680a214039bfa158ae9a4b | [] | no_license | RFogarty1/plato_gen_basis_helpers | 9df975d4198bff7bef80316527a8086b6819d8ab | 8469a51c1580b923ca35a56811e92c065b424d68 | refs/heads/master | 2022-06-02T11:01:37.759276 | 2022-05-11T12:57:40 | 2022-05-11T12:57:40 | 192,934,403 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,025 | py |
import types
import unittest
import unittest.mock as mock
import gen_basis_helpers.workflows.surface_energies as tCode
class TestSurfaceEnergyWorkflow(unittest.TestCase):
def setUp(self):
self.bulkEnergy = 4
self.bulkNumbAtoms = 1
self.surfEnergy = 6
self.surfNumbAtoms = 2
self.surfaceArea = 3
self.surfCalcObj = mock.Mock()
self.bulkCalcObj = mock.Mock()
self.surfaceAreaFromUnitCell = mock.Mock()
self.createTestObjs()
def createTestObjs(self):
energiesObjBulk = types.SimpleNamespace( electronicTotalE=self.bulkEnergy )
energiesObjSurface = types.SimpleNamespace( electronicTotalE=self.surfEnergy )
self.bulkCalcObj.parsedFile.energies = energiesObjBulk
self.bulkCalcObj.parsedFile.numbAtoms = self.bulkNumbAtoms
self.surfCalcObj.parsedFile.energies = energiesObjSurface
self.surfCalcObj.parsedFile.numbAtoms = self.surfNumbAtoms
self.surfaceAreaFromUnitCell.side_effect = lambda x: self.surfaceArea
self.testObjA = tCode.SurfaceEnergyWorkflow(self.surfCalcObj, self.bulkCalcObj,
self.surfaceAreaFromUnitCell)
def testEnergyPerAtomBulk(self):
expValue = self.bulkEnergy / self.bulkNumbAtoms
actValue = self.testObjA._energyPerAtomBulk
self.assertEqual(expValue,actValue)
def testEnergyPerAtomSurface(self):
expValue = self.surfEnergy / self.surfNumbAtoms
actValue = self.testObjA._energyPerAtomSurface
self.assertEqual(expValue,actValue)
def testRunGivesExpectedVal(self):
self.testObjA.run()
expVal = -(1/3)
actVal = self.testObjA.output[0].surfaceEnergy
self.assertAlmostEqual(expVal,actVal)
def testRunGivesExpectedEPerAtomVals(self):
self.testObjA.run()
expSurfEPerAtom, expBulkEPerAtom = self.surfEnergy/self.surfNumbAtoms, self.bulkEnergy/self.bulkNumbAtoms
actSurfEPerAtom, actBulkEPerAtom = self.testObjA.output[0].surfEPerAtom, self.testObjA.output[0].bulkEPerAtom
self.assertAlmostEqual(expSurfEPerAtom, actSurfEPerAtom)
self.assertAlmostEqual(expBulkEPerAtom, actBulkEPerAtom)
| [
"richard.m.fogarty@gmail.com"
] | richard.m.fogarty@gmail.com |
4fa4c5f3db02a42dbc8eed96c34b70d36bbb1d69 | e532600dd9a7f4ad5cdb62134c4a6c670270b026 | /viewer/settings_nersc_dev.py | ebda2349228f803f210d7404cea09c3d963b2ff7 | [] | no_license | ziyaointl/decals-web | 5a94195528dd11bf9774f4ddf10076116ec80e14 | 8950ccb28d5ec51c6eda305b51ffbc484c1c8452 | refs/heads/master | 2020-04-26T17:27:07.555736 | 2019-07-13T04:17:24 | 2019-07-13T04:17:24 | 169,711,896 | 0 | 0 | null | 2019-02-08T09:34:06 | 2019-02-08T09:34:06 | null | UTF-8 | Python | false | false | 1,068 | py | from viewer.settings_common import *
#ENABLE_SQL = True
DEBUG_LOGGING = True
#DEBUG_LOGGING = False
USER_QUERY_DIR = '/tmp/viewer-dev-user'
#USER_CATALOG_DIR = USER_QUERY_DIR
READ_ONLY_BASEDIR = True
ROOT_URL = '/viewer-dev'
STATIC_URL_PATH = '/viewer-dev/static'
STATIC_URL = 'http://%s%s/' % (HOSTNAME, STATIC_URL_PATH)
TILE_URL = 'http://{s}.%s%s/{id}/{ver}/{z}/{x}/{y}.jpg' % (HOSTNAME, ROOT_URL)
STATIC_TILE_URL = 'http://{s}.%s%s/tiles/{id}/{ver}/{z}/{x}/{y}.jpg' % (HOSTNAME, STATIC_URL_PATH)
STATIC_TILE_URL_B = 'http://{s}.imagine.legacysurvey.org/static/tiles/{id}/{ver}/{z}/{x}/{y}.jpg'
SUBDOMAINS_B = SUBDOMAINS
# no CORS -- so don't use subdomains, or specify hostname (www.legacysurvey.org vs legacysurvey.org)
CAT_URL = '%s/{id}/{ver}/{z}/{x}/{y}.cat.json' % (ROOT_URL)
#ENABLE_SQL = True
#ENABLE_MZLS = True
ENABLE_DEV = True
ENABLE_DR2 = False
ENABLE_DECAPS = True
ENABLE_EBOSS = True
ENABLE_DR3 = False
ENABLE_DR4 = False
ENABLE_DR5 = True
ENABLE_PS1 = True
#ENABLE_DR6 = True
#ENABLE_DR7 = True
ENABLE_DR8 = True
ENABLE_DES_DR1 = True
| [
"dstndstn@gmail.com"
] | dstndstn@gmail.com |
22afc6b9df87ef1eba284da20a807366278c24d4 | f4653b4bd7528150a53c8f454658c00d7ea0b836 | /cbm/ipycbm/ipy_get/get_settings.py | c56764960ff66d729f1e47f8092cf69abe15c7e0 | [
"BSD-3-Clause"
] | permissive | mokasini/cbm | ccb09cb8ab96e6b06b0e13d86ff51124538706f6 | 33bd9c8a0d107f6cdc3343953ae9f7c9bd9272cd | refs/heads/main | 2023-02-24T04:44:07.744715 | 2021-02-01T12:29:38 | 2021-02-01T12:29:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,226 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : Konstantinos Anastasakis
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
from ipywidgets import (Text, VBox, HBox, Label, Password, RadioButtons,
Button, Layout, Box, Tab, Output, Dropdown,
FloatText, BoundedIntText, Combobox)
from cbm.utils import config, data_options
from cbm.ipycbm.utils import settings
from cbm.sources import database
def widget_box():
source = int(config.get_value(['set', 'data_source']))
sources = RadioButtons(
options=[
("JRC RESTful API.", 0),
("Direct access to database and object storage.", 1)
],
value=source,
layout={'width': 'max-content'}
)
sources_box = Box([
Label(value="Data sources:"),
sources]
)
info_api = Label("RESTful API Settings.")
info_direct = Label("Direct access settings")
view_options = VBox([info_direct])
if source == 0:
view_options.children = [info_api, rest_api()]
elif source == 1:
view_options.children = [info_direct, direct()]
def on_source_change(change):
view_options.children = []
if sources.value == 0:
view_options.children = [info_api, rest_api()]
elif sources.value == 1:
view_options.children = [info_direct, direct()]
config.update(['set', 'data_source'], str(sources.value))
sources.observe(on_source_change, 'value')
wbox_sources = VBox([sources_box, view_options],
layout=Layout(border='1px solid black'))
info_general = Label(value="General settings:")
wbox = VBox([wbox_sources, info_general, settings.widget_box()])
return wbox
def rest_api(mode=None):
""""""
values = config.read()
wt_url = Text(
value=values['api']['url'],
placeholder='Add URL',
description='API URL:',
disabled=False
)
wt_user = Text(
value=values['api']['user'],
placeholder='Username',
description='API User:',
disabled=False
)
wt_pass = Password(
value=values['api']['pass'],
placeholder='******',
description='API Password:',
disabled=False
)
wb_save = Button(
description='Save',
disabled=False,
icon='save'
)
progress = Output()
def outlog(*text):
with progress:
print(*text)
@wb_save.on_click
def wb_save_on_click(b):
config.update(['api', 'url'], str(wt_url.value))
config.update(['api', 'user'], str(wt_user.value))
if wt_pass.value != '':
config.update(['api', 'pass'], str(wt_pass.value))
outlog("API information is updated")
wbox = VBox([wt_url, wt_user, wt_pass, wb_save, progress])
return wbox
def direct():
# try:
tab_box = Tab(children=[settings.direct_conn(), direct_settings()])
tab_box.set_title(0, 'Connection')
tab_box.set_title(1, 'db Configuration')
# except:
# tab_box = Tab(children=[direct_conn()])
# tab_box.set_title(0, 'Connection')
# print("!WARNING! Can not load direct configuration settings.")
return tab_box
def direct_settings():
values = config.read()
ds_def = values['set']['ds_conf']
ds_dye = values['set']['ds_year']
if ds_def not in [d for d in values['ds_conf']]:
ds_def = [d for d in values['ds_conf']][0]
dsc = Dropdown(
options=[d for d in values['ds_conf']],
value=ds_def,
description='Default:',
disabled=False,
layout=Layout(width='200px')
)
dsy = Dropdown(
options=[int(y) for y in values['ds_conf'][dsc.value]['years']],
value=int(ds_dye),
description='Dataset year:',
disabled=False,
layout=Layout(width='180px')
)
btn_refresh = Button(
layout=Layout(width='35px'),
icon='fa-refresh')
@btn_refresh.on_click
def btn_refresh_on_click(b):
values = config.read()
ds_c = values['set']['ds_conf']
ds_y = values['set']['ds_year']
dsc.options = [d for d in values['ds_conf']]
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.value = ds_c
dsy.value = int(ds_y)
def on_dsc_change(change):
config.update(['set', 'ds_conf'], dsc.value)
values = config.read()
ds_c = values['set']['ds_conf']
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.observe(on_dsc_change, 'value')
def on_dsy_change(change):
config.update(['set', 'ds_year'], str(dsy.value))
dsy.observe(on_dsy_change, 'value')
bt_set = Button(layout=Layout(width='40px'), icon='cogs',
tooltip="Configure this dataset")
bt_new = Button(layout=Layout(width='40px'), icon='plus',
tooltip="Add new dataset configuration")
bt_rec = Button(layout=Layout(width='40px'), icon='trash-alt',
tooltip='Delete dataset configuration')
bt_rey = Button(layout=Layout(width='40px'), icon='trash-alt',
tooltip='Delete only the selected year.')
dsc_box = HBox([dsc, btn_refresh, bt_rec, dsy, bt_set, bt_rey, bt_new])
progress = Output()
def outlog(*text):
with progress:
print(*text)
def dsc_config(dsc_value):
values = config.read()
ds_db = Dropdown(
options=["1"],
value="1",
description='Database:',
disabled=False,
layout=Layout(width='140px')
)
try:
with open(f"{config.get_value(['paths','temp'])}tb_prefix", 'r') as f:
code_value = f.read()
except Exception:
code_value = dsc_value
ds_code = Combobox(
value=code_value,
placeholder='abc',
options=[m for m in data_options.eu_ms()]+[''],
description='AOI code:',
ensure_option=False,
disabled=False,
layout=Layout(width='200px'),
tooltip='Lowercase AOI code name for the dataset (5chr max).'
)
ds_year = BoundedIntText(
value=int(dsy.value),
min=1980,
max=2100,
step=1,
description='Dataset year:',
disabled=False,
layout=Layout(width='180px')
)
ds_desc = Text(
value=values['ds_conf'][dsc_value]['desc'],
description='Description:',
disabled=False
)
info_map_text = ["Set default map view options. ",
"You can get automatically the dataset ",
"center coordinates."]
lat, lon = values['ds_conf'][dsc_value]['center'].split(",")
map_cent_lat = FloatText(
value=float(lat),
description='Lat:',
disabled=False,
layout=Layout(width='160px')
)
map_cent_lon = FloatText(
value=float(lon),
description='Lon:',
disabled=False,
layout=Layout(width='160px')
)
map_zoom = BoundedIntText(
value=values['ds_conf'][dsc_value]['zoom'],
min=0,
max=20,
step=1,
description='Zoom:',
disabled=False,
layout=Layout(width='140px')
)
bt_get_center = Button(
layout=Layout(width='40px'),
icon='bullseye',
tooltip='Get center point from database.'
)
ds_box = HBox([ds_code, ds_year, ds_desc])
map_box = HBox([Label("Map center: "), map_cent_lat,
map_cent_lon, bt_get_center, map_zoom])
info_config = Label(
"""Change 'AOI code' value to create a new configuration set or
leave the same 'AOI code' value to configure the selected one.""")
db = int(values['ds_conf'][dsc_value]['db'])
def get_tb_list():
tbls = database.tables(db, None, False)
if tbls is None:
return []
else:
return tbls
tb_dc = Dropdown(
options=get_tb_list(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['tables']['dias_catalog'],
get_tb_list(), False),
description='DIAS catalog:',
disabled=False
)
tb_pr = Dropdown(
options=get_tb_list(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['tables']['parcels'],
get_tb_list(), False),
description='Parcels:',
disabled=False
)
def get_pr_columns():
try:
colms = database.table_columns(tb_pr.value, 1, None)
if colms is None:
return []
else:
return colms
except Exception:
return []
tc_id = Dropdown(
options=get_pr_columns(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['columns']['parcels_id'],
get_pr_columns(), False),
description='Parcels ID:',
disabled=False,
layout=Layout(width='180px')
)
tc_cn = Dropdown(
options=get_pr_columns(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['columns']['crop_names'],
get_pr_columns(), False),
description='Crop names:',
disabled=False,
layout=Layout(width='180px')
)
tc_cc = Dropdown(
options=get_pr_columns(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['columns']['crop_codes'],
get_pr_columns(), False),
description='Crop codes:',
disabled=False,
layout=Layout(width='180px')
)
def on_tb_pr_change(change):
tc_id.options = get_pr_columns()
tc_cn.options = get_pr_columns()
tc_cc.options = get_pr_columns()
tb_pr.observe(on_tb_pr_change, 'value')
parcel_box = HBox([tb_pr, tc_id, tc_cn, tc_cc])
tb_s2 = Dropdown(
options=get_tb_list(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['tables']['s2'],
get_tb_list(), False),
description='S2 signatures:',
disabled=False
)
tb_bs = Dropdown(
options=get_tb_list(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['tables']['bs'],
get_tb_list(), False),
description='Backscattering:',
disabled=False
)
tb_6c = Dropdown(
options=get_tb_list(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['tables']['c6'],
get_tb_list(), False),
description='6 day coherence:',
disabled=False
)
wb_save = Button(
description='Save',
disabled=False,
icon='save'
)
@bt_get_center.on_click
def bt_get_center_on_click(b):
import json
center_json = json.loads(
database.getTableCentroid(tb_pr.value)['center'][0])
map_cent_lat.value = round(center_json['coordinates'][1], 2)
map_cent_lon.value = round(center_json['coordinates'][0], 2)
map_zoom.value = 10
@wb_save.on_click
def wb_save_on_click(b):
progress.clear_output()
dscode = ds_code.value
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'dias_catalog'], str(tb_dc.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'parcels'], str(tb_pr.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'parcels_id'], str(tc_id.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'crop_names'], str(tc_cn.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'crop_codes'], str(tc_cc.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 's2'], str(tb_s2.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'bs'], str(tb_bs.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'c6'], str(tb_6c.value))
config.update(['ds_conf', dscode,
'db'], str(ds_db.value))
config.update(['ds_conf', dscode,
'desc'], str(ds_desc.value))
config.update(['ds_conf', dscode, 'center'],
f"{map_cent_lat.value},{map_cent_lon.value}")
config.update(['ds_conf', dscode,
'zoom'], str(map_zoom.value))
config.update(['set', 'ds_conf'], str(dscode))
config.update(['set', 'ds_year'], str(ds_year.value))
values = config.read()
ds_c = values['set']['ds_conf']
ds_y = values['set']['ds_year']
dsc.options = [d for d in values['ds_conf']]
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.value = ds_c
dsy.value = int(ds_y)
outlog("The configurations are saved.")
return VBox([info_config, ds_box, parcel_box,
tb_dc, tb_s2, tb_bs, tb_6c,
Label(''.join(info_map_text)), map_box, wb_save])
dsc_new_box = HBox([])
@bt_set.on_click
def bt_set_on_click(b):
if dsc_new_box.children == ():
dsc_new_box.children = [dsc_config(dsc.value)]
bt_set.icon = 'chevron-up'
else:
dsc_new_box.children = ()
bt_set.icon = 'cogs'
@bt_new.on_click
def bt_new_on_click(b):
if dsc_new_box.children == ():
dsc_new_box.children = [dsc_config(dsc.value)]
bt_set.icon = 'chevron-up'
else:
dsc_new_box.children = ()
bt_set.icon = 'cogs'
@bt_rec.on_click
def bt_rec_on_click(b):
progress.clear_output()
if len(dsc.options) > 1:
config.delete(['ds_conf', dsc.value])
outlog(f"Dataset configuration '{dsc.value}' is deleted.")
values = config.read()
dsc.options = [d for d in values['ds_conf']]
else:
outlog("Can not remove last configuration.")
@bt_rey.on_click
def bt_rey_on_click(b):
progress.clear_output()
if len(dsy.options) > 1:
config.delete(['ds_conf', dsc.value, 'years', str(dsy.value)])
outlog(f"Year {dsy.value} of dataset '{dsc.value}' is deleted.")
values = config.read()
dsy.options = [int(y) for y in values['ds_conf']
[str(dsc.value)]['years']]
else:
outlog("Can not remove last configuration.")
wbox = VBox([Label("Datasets configurations."), dsc_box,
dsc_new_box, progress])
return wbox
| [
"Konstantinos.ANASTASAKIS@ext.ec.europa.eu"
] | Konstantinos.ANASTASAKIS@ext.ec.europa.eu |
1945c8b7fc54ee79f504b75e8e089fb5ab7e8023 | 6faa21b2d8a7e55e64fe289a21e455d1b4718fbb | /app/alamat_app/urls.py | 756fa3973ae1e73aa1a4c1bac10f5218910202b6 | [
"MIT"
] | permissive | ganggas95/simdus_app | 62eae2e7a1e0e9b2250fbccd795de1f658db814b | 0c57e11c712912f61d29ca4b63dfa1fe38bb067c | refs/heads/master | 2020-03-27T12:42:48.506769 | 2018-08-29T10:05:28 | 2018-08-29T10:05:28 | 146,563,860 | 0 | 1 | MIT | 2018-08-29T10:05:28 | 2018-08-29T07:43:05 | CSS | UTF-8 | Python | false | false | 726 | py | from .blueprint import (
alamat_bp,
api_alamat_bp,
api_alamat)
from .views import (
AlamatView,
AddAlamatView,
EditAlamatView)
from .api import alamat_ns
alamat_bp.add_url_rule(
'/admin/alamat',
view_func=AlamatView.as_view(
'alamat_view',
'alamat.html'
)
)
alamat_bp.add_url_rule(
'/admin/alamat/add',
view_func=AddAlamatView.as_view(
'add_alamat_view',
'tambah_alamat.html'
),
methods=['GET', 'POST']
)
alamat_bp.add_url_rule(
'/admin/alamat/<int:id_alamat>/detail',
view_func=EditAlamatView.as_view(
'edit_alamat_view',
'edit_alamat.html'
),
methods=['GET', 'POST']
)
api_alamat.add_namespace(alamat_ns)
| [
"subhannizar25@gmail.com"
] | subhannizar25@gmail.com |
32e94eed43019733021268596b411e4d13419be2 | ebd2b04c9d55691e7208ff8d6816beb7dcae8f77 | /fin_app/tests.py | 594425d6e1c9efda02cee7397cb6138dd08e28a8 | [] | no_license | playmepe/djing2 | e35f11fdbef2b4f4bf2ef37d7f3e7b6dc9d5aba0 | 10379e974d969be94a40317e8121436f03f19ca2 | refs/heads/master | 2022-12-28T01:28:01.653972 | 2020-10-12T14:37:23 | 2020-10-12T14:37:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,121 | py | from hashlib import md5
# from django.test.utils import override_settings
from django.utils import timezone
from django.utils.html import escape
from rest_framework.test import APITestCase
from customers.models import Customer
from fin_app.models import PayAllTimeGateway
from profiles.models import UserProfile
def _make_sign(act: int, pay_account: str, serv_id: str, pay_id, secret: str):
md = md5()
s = "%d_%s_%s_%s_%s" % (act, pay_account, serv_id, pay_id, secret)
md.update(bytes(s, 'utf-8'))
return md.hexdigest()
# @override_settings(DEFAULT_TABLESPACE='ram')
class CustomAPITestCase(APITestCase):
def get(self, *args, **kwargs):
return self.client.get(*args, **kwargs)
def post(self, *args, **kwargs):
return self.client.post(*args, **kwargs)
def setUp(self):
self.admin = UserProfile.objects.create_superuser(
username='admin',
password='admin',
telephone='+797812345678'
)
# customer for tests
custo1 = Customer.objects.create_user(
telephone='+79782345678',
username='custo1',
password='passw'
)
custo1.balance = -13.12
custo1.fio = 'Test Name'
custo1.save(update_fields=('balance', 'fio'))
custo1.refresh_from_db()
self.customer = custo1
# Pay System
pay_system = PayAllTimeGateway.objects.create(
title='Test pay system',
secret='secret',
service_id='service_id',
slug='pay_gw_slug'
)
pay_system.refresh_from_db()
self.pay_system = pay_system
class AllPayTestCase(CustomAPITestCase):
time_format = '%d.%m.%Y %H:%M'
url = '/api/fin/pay_gw_slug/pay/'
def test_user_pay_view_info(self):
current_date = timezone.now().strftime(self.time_format)
service_id = self.pay_system.service_id
r = self.get(self.url, {
'ACT': 1,
'PAY_ACCOUNT': 'custo1',
'SIGN': _make_sign(1, 'custo1', '', '', self.pay_system.secret)
})
o = ''.join((
"<pay-response>",
"<balance>-13.12</balance>",
"<name>Test Name</name>",
"<account>custo1</account>",
"<service_id>%s</service_id>" % escape(service_id),
"<min_amount>10.0</min_amount>",
"<max_amount>5000</max_amount>",
"<status_code>21</status_code>",
"<time_stamp>%s</time_stamp>" % escape(current_date),
"</pay-response>"
))
self.assertXMLEqual(r.content.decode('utf8'), o)
self.assertEqual(r.status_code, 200)
def test_user_pay_pay(self):
current_date = timezone.now().strftime(self.time_format)
service_id = self.pay_system.service_id
r = self.get(self.url, {
'ACT': 4,
'PAY_ACCOUNT': 'custo1',
'PAY_AMOUNT': 18.21,
'RECEIPT_NUM': 2126235,
'SERVICE_ID': service_id,
'PAY_ID': '840ab457-e7d1-4494-8197-9570da035170',
'TRADE_POINT': 'term1',
'SIGN': _make_sign(4, 'custo1', service_id,
'840ab457-e7d1-4494-8197-9570da035170', self.pay_system.secret)
})
xml = ''.join((
"<pay-response>",
"<pay_id>840ab457-e7d1-4494-8197-9570da035170</pay_id>",
"<service_id>%s</service_id>" % escape(service_id),
"<amount>18.21</amount>",
"<status_code>22</status_code>",
"<time_stamp>%s</time_stamp>" % escape(current_date),
"</pay-response>"
))
self.assertXMLEqual(r.content.decode('utf-8'), xml)
self.assertEqual(r.status_code, 200)
self.customer.refresh_from_db()
self.assertEqual(round(self.customer.balance, 2), 5.09)
self.user_pay_check(current_date)
def user_pay_check(self, test_pay_time):
current_date = timezone.now().strftime(self.time_format)
service_id = self.pay_system.service_id
r = self.get(self.url, {
'ACT': 7,
'SERVICE_ID': service_id,
'PAY_ID': '840ab457-e7d1-4494-8197-9570da035170',
'SIGN': _make_sign(7, '', service_id,
'840ab457-e7d1-4494-8197-9570da035170', self.pay_system.secret)
})
xml = ''.join((
"<pay-response>",
"<status_code>11</status_code>",
"<time_stamp>%s</time_stamp>" % escape(current_date),
"<transaction>",
"<pay_id>840ab457-e7d1-4494-8197-9570da035170</pay_id>",
"<service_id>%s</service_id>" % escape(service_id),
"<amount>18.21</amount>",
"<status>111</status>",
"<time_stamp>%s</time_stamp>" % escape(test_pay_time),
"</transaction>"
"</pay-response>"
))
self.assertXMLEqual(r.content.decode(), xml)
self.assertEqual(r.status_code, 200)
| [
"nerosketch@gmail.com"
] | nerosketch@gmail.com |
d88bae7107099972a776dde4b99a8db99694d21d | 4e4352a311dd3b4976e518af2eafdfb79253bced | /adanet/core/tpu_estimator.py | 8b0a5e793ab5bf99a2390ffcc906216fef2c0985 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | caihengyu520/adanet | 09a675dda0e966dfb98f219d01193579af7e3534 | 482fd0af4bfda221fc4449e5c40a796f21712df5 | refs/heads/master | 2020-07-31T11:14:08.253508 | 2019-09-23T20:39:00 | 2019-09-23T20:39:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,138 | py | """An AdaNet estimator implementation which can run on TPU.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import functools
from absl import logging
from adanet import tf_compat
from adanet.core.estimator import Estimator
import tensorflow as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.util import function_utils
# pylint: enable=g-direct-tensorflow-import
# pylint: disable=g-classes-have-attributes
class TPUEstimator(Estimator, tf_compat.v1.estimator.tpu.TPUEstimator):
"""An :class:`adanet.Estimator` capable of training and evaluating on TPU.
Unless :code:`use_tpu=False`, training will run on TPU. However, certain parts
of the AdaNet training loop, such as report materialization and best candidate
selection, will still occurr on CPU. Furthermore, inference also occurs on
CPU.
TODO: Provide the missing functionality detailed below.
N.B: Embeddings using the TPUEmbedding (i.e. :code:`embedding_config_spec`
is provided) only support :code:`shared_embedding_columns` when running for
multiple AdaNet iterations. Using regular :code:`embedding_columns` will cause
iterations 2..n to fail because of mismatched embedding scopes.
Args:
head: See :class:`adanet.Estimator`.
subnetwork_generator: See :class:`adanet.Estimator`.
max_iteration_steps: See :class:`adanet.Estimator`.
ensemblers: See :class:`adanet.Estimator`.
ensemble_strategies: See :class:`adanet.Estimator`.
evaluator: See :class:`adanet.Estimator`.
report_materializer: See :class:`adanet.Estimator`.
metric_fn: See :class:`adanet.Estimator`.
force_grow: See :class:`adanet.Estimator`.
replicate_ensemble_in_training: See :class:`adanet.Estimator`.
adanet_loss_decay: See :class:`adanet.Estimator`.
report_dir: See :class:`adanet.Estimator`.
config: See :class:`adanet.Estimator`.
use_tpu: Boolean to enable training on TPU. Defaults to :code:`True` and is
only provided to allow debugging models on CPU/GPU. Use
:class:`adanet.Estimator` instead if you do not plan to run on TPU.
eval_on_tpu: Boolean to enable evaluating on TPU. Defaults to :code:`True`.
Ignored if :code:`use_tpu=False`.
train_batch_size: See :class:`tf.compat.v1.estimator.tpu.TPUEstimator`.
eval_batch_size: See :class:`tf.compat.v1.estimator.tpu.TPUEstimator`.
embedding_config_spec: See :class:`tf.compat.v1.estimator.tpu.TPUEstimator`.
debug: See :class:`adanet.Estimator`.
enable_ensemble_summaries: See :class:`adanet.Estimator`.
enable_subnetwork_summaries: See :class:`adanet.Estimator`.
export_subnetwork_logits: Whether to include subnetwork logits in exports.
export_subnetwork_last_layer: Whether to include subnetwork last layer in
exports.
global_step_combiner_fn: See :class:`adanet.Estimator`.
max_iterations: See :class:`adanet.Estimator`.
replay_config: See :class:`adanet.Estimator`.
**kwargs: Extra keyword args passed to the parent.
"""
def __init__(self,
head,
subnetwork_generator,
max_iteration_steps,
ensemblers=None,
ensemble_strategies=None,
evaluator=None,
report_materializer=None,
metric_fn=None,
force_grow=False,
replicate_ensemble_in_training=False,
adanet_loss_decay=.9,
model_dir=None,
report_dir=None,
config=None,
use_tpu=True,
eval_on_tpu=True,
train_batch_size=None,
eval_batch_size=None,
embedding_config_spec=None,
debug=False,
enable_ensemble_summaries=True,
enable_subnetwork_summaries=True,
export_subnetwork_logits=False,
export_subnetwork_last_layer=True,
global_step_combiner_fn=tf.math.reduce_mean,
max_iterations=None,
replay_config=None,
**kwargs):
self._use_tpu = use_tpu
if not self._use_tpu:
logging.warning(
"This adanet.TPUEstimator is meant to be used for running on TPU. "
"If you want to run on CPU/GPU, use adanet.Estimator instead.")
# TPUEstimator modifies config under the hood. We keep track of it here so
# we can use it during the bookkeeping phase and when predict() is called.
self._eval_on_tpu = eval_on_tpu if self._use_tpu else False
self._original_config = config or tf_compat.v1.estimator.tpu.RunConfig()
self._train_batch_size = train_batch_size or 0
self._eval_batch_size = eval_batch_size or train_batch_size or 0
self._embedding_config_spec = embedding_config_spec
super(TPUEstimator, self).__init__(
head=head,
subnetwork_generator=subnetwork_generator,
max_iteration_steps=max_iteration_steps,
ensemblers=ensemblers,
ensemble_strategies=ensemble_strategies,
evaluator=evaluator,
report_materializer=report_materializer,
metric_fn=metric_fn,
force_grow=force_grow,
replicate_ensemble_in_training=replicate_ensemble_in_training,
adanet_loss_decay=adanet_loss_decay,
model_dir=model_dir,
report_dir=report_dir,
config=self._original_config,
use_tpu=self._use_tpu,
eval_on_tpu=self._eval_on_tpu,
export_to_tpu=False,
train_batch_size=self._train_batch_size,
eval_batch_size=self._eval_batch_size,
embedding_config_spec=self._embedding_config_spec,
debug=debug,
enable_ensemble_summaries=enable_ensemble_summaries,
enable_subnetwork_summaries=enable_subnetwork_summaries,
export_subnetwork_logits=export_subnetwork_logits,
export_subnetwork_last_layer=export_subnetwork_last_layer,
global_step_combiner_fn=global_step_combiner_fn,
max_iterations=max_iterations,
replay_config=replay_config,
**kwargs)
# Yields predictions on CPU even when use_tpu=True.
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None,
yield_single_examples=True):
logging.warning(
"The adanet.TPUEstimator does not support predicting on TPU. "
"Instead, all predictions are run on CPU.")
tpu_estimator = tf_compat.v1.estimator.tpu.TPUEstimator(
model_fn=self._adanet_model_fn,
model_dir=self.model_dir,
config=self._original_config,
params=self.params,
use_tpu=False,
eval_on_tpu=False,
embedding_config_spec=self._embedding_config_spec)
return tpu_estimator.predict(
input_fn,
predict_keys=predict_keys,
hooks=hooks,
checkpoint_path=checkpoint_path,
yield_single_examples=yield_single_examples)
def _create_temp_run_config(self, temp_model_dir):
"""See the `Estimator` base class for details."""
return tf_compat.v1.estimator.tpu.RunConfig(
model_dir=temp_model_dir,
tpu_config=self._original_config.tpu_config,
evaluation_master=self._original_config.evaluation_master,
master=self._original_config.master,
cluster=self._original_config.cluster,
tf_random_seed=self._original_config.tf_random_seed,
session_config=self._original_config.session_config,
protocol=self._original_config.protocol)
def _create_temp_estimator(self, config, params):
"""See the `Estimator` base class for details."""
temp_model_dir = config.model_dir
return tf_compat.v1.estimator.tpu.TPUEstimator(
model_fn=self._adanet_model_fn,
params=params,
config=config,
model_dir=temp_model_dir,
use_tpu=self._use_tpu,
eval_on_tpu=self._eval_on_tpu,
export_to_tpu=False,
train_batch_size=self._train_batch_size,
eval_batch_size=self._eval_batch_size,
embedding_config_spec=self._embedding_config_spec)
@contextlib.contextmanager
def _call_input_fn_in_new_graph(self, input_fn, mode, config):
"""See the `Estimator` base class for details."""
# Bind parameters to input_fn since the parent's input_fn is not expected to
# have any arguments.
input_fn_args = function_utils.fn_args(input_fn)
kwargs = {}
if "mode" in input_fn_args:
kwargs["mode"] = mode
if "params" in input_fn_args:
kwargs["params"] = self.params
if "config" in input_fn_args:
kwargs["config"] = config
input_fn = functools.partial(input_fn, **kwargs)
with super(TPUEstimator,
self)._call_input_fn_in_new_graph(input_fn, mode, config) as res:
yield res
def _create_estimator_spec(self, current_iteration, mode,
iteration_number_tensor, previous_iteration_vars,
is_growing_phase, evaluation_name):
"""See the `Estimator` base class for details."""
if not self._use_tpu:
return super(TPUEstimator, self)._create_estimator_spec(
current_iteration, mode, iteration_number_tensor,
previous_iteration_vars, is_growing_phase, evaluation_name)
training = mode == tf.estimator.ModeKeys.TRAIN
iteration_estimator_spec = current_iteration.estimator_spec
training_hooks = self._training_hooks(current_iteration, training,
iteration_number_tensor,
previous_iteration_vars,
is_growing_phase)
if is_growing_phase:
training_hooks = self._process_hooks_for_growing_phase(training_hooks)
evaluation_hooks = self._evaluation_hooks(current_iteration, training,
evaluation_name)
return tf_compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=mode,
predictions=iteration_estimator_spec.predictions,
loss=iteration_estimator_spec.loss,
train_op=self._train_op(iteration_estimator_spec, is_growing_phase),
host_call=self._create_host_call(current_iteration, training),
eval_metrics=iteration_estimator_spec.eval_metrics,
export_outputs=iteration_estimator_spec.export_outputs,
# Return a constant summary_op, otherwise `Estimator` creates summary
# ops that do not work on TPU.
scaffold_fn=lambda: tf.compat.v1.train.Scaffold( # pylint: disable=g-long-lambda
summary_op=tf.constant("")),
training_hooks=training_hooks,
evaluation_hooks=evaluation_hooks)
def _training_hooks(self, current_iteration, training,
iteration_number_tensor, previous_iteration_vars,
is_growing_phase):
"""See the `Estimator` base class for details."""
training_hooks = super(TPUEstimator,
self)._training_hooks(current_iteration, training,
iteration_number_tensor,
previous_iteration_vars,
is_growing_phase)
if self._use_tpu:
# Remove summary hooks on TPU since summaries are saved via host_call.
training_hooks = [
hook for hook in training_hooks
if not isinstance(hook, tf.compat.v1.train.SummarySaverHook)
]
return training_hooks
def _create_host_call(self, current_iteration, training):
"""Construct a host_call writing scalar summaries.
Args:
current_iteration: The current `_Iteration`.
training: Boolean indicating whether in training mode.
Returns:
(fn, args) Pair to be called by TPUEstimator as the host_call.
"""
if not training:
return lambda **kwargs: [tf.no_op()], {}
# Collect and flatten summary functions and arguments.
summary_kwargs = collections.OrderedDict()
gs_t = tf.reshape(tf.cast(tf.train.get_global_step(), dtype=tf.int32), [1])
summary_kwargs["global_step"] = gs_t
summary_fns = collections.defaultdict(list)
for i, summary in enumerate(current_iteration.summaries):
for j, (summary_fn, tensor) in enumerate(summary.summary_tuples()):
summary_fns[i].append(summary_fn)
summary_kwargs["summary_{}_{}".format(i, j)] = tensor
def _host_call_fn(**kwargs):
"""Training host call.
Creates summaries for training metrics.
Args:
**kwargs: Dict of {str: Tensor} , with `Tensor` of shape `[batch]`. Must
contain key "global_step" with value of current global_step Tensor.
Returns:
List of summary ops to run on the CPU host.
"""
gs = tf.cast(kwargs.pop("global_step")[0], dtype=tf.int64)
for i, summary in enumerate(current_iteration.summaries):
with summary_ops_v2.create_file_writer(summary.logdir).as_default():
with summary_ops_v2.record_summaries_every_n_global_steps(
n=self.config.save_summary_steps, global_step=gs):
for j, summary_fn in enumerate(summary_fns[i]):
tensor = kwargs["summary_{}_{}".format(i, j)]
summary_fn(tensor, step=gs)
summary.clear_summary_tuples()
return tf.compat.v1.summary.all_v2_summary_ops()
return _host_call_fn, summary_kwargs
| [
"weill@google.com"
] | weill@google.com |
650ab3e0162c461a54a59ba252b1be8069adf69a | f4f2ef334b7ccf704a4fc9e034fb863370d3d63a | /demo/oop/mi/demo4.py | a809a72674572c87f659b4991896a18f4effd402 | [] | no_license | srikanthpragada/PYTHON_21_SEP_2020 | 012f27868dcbbeba9964fbc2f963198e9d3f626f | 9e28dfc1d35d710fb4f32b158e0e73861d5fc9ac | refs/heads/master | 2023-01-02T18:39:28.554862 | 2020-10-28T02:24:20 | 2020-10-28T02:24:20 | 298,142,893 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | class A:
def process(self):
print("Process in A")
class B(A):
pass
class C(A):
def process(self):
print("Process in C")
class D(B, C):
pass
obj = D()
obj.process()
print(D.mro()) | [
"srikanthpragada@gmail.com"
] | srikanthpragada@gmail.com |
c23660db52fea446d25017ac197279c37ff7df94 | c4267e2e092ac0e3f53e82eef5a0bd5417222525 | /base/base_T5.py | b7209bde1bec3d59f9f7cefb2f28f8fe4a4fe002 | [] | no_license | chenyang1999/lanqiaocup_marcus | fde22466287e2bea02b7ea4292256b70fed1d33b | 6ad13f1a28ca6b650d9545b5148a450d1c9cd154 | refs/heads/master | 2023-01-07T06:47:11.316530 | 2020-11-14T08:21:42 | 2020-11-14T08:21:42 | 286,075,513 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | '''
้ฎ้ขๆ่ฟฐ
็ปๅฎไธไธชๅนดไปฝ๏ผๅคๆญ่ฟไธๅนดๆฏไธๆฏ้ฐๅนดใ
ๅฝไปฅไธๆ
ๅตไนไธๆปก่ถณๆถ๏ผ่ฟไธๅนดๆฏ้ฐๅนด๏ผ
1. ๅนดไปฝๆฏ4็ๅๆฐ่ไธๆฏ100็ๅๆฐ๏ผ
2. ๅนดไปฝๆฏ400็ๅๆฐใ
ๅ
ถไป็ๅนดไปฝ้ฝไธๆฏ้ฐๅนดใ
่พๅ
ฅๆ ผๅผ
่พๅ
ฅๅ
ๅซไธไธชๆดๆฐy๏ผ่กจ็คบๅฝๅ็ๅนดไปฝใ
่พๅบๆ ผๅผ
่พๅบไธ่ก๏ผๅฆๆ็ปๅฎ็ๅนดไปฝๆฏ้ฐๅนด๏ผๅ่พๅบyes๏ผๅฆๅ่พๅบnoใ
'''
n=int(input())
if (n%4==0)and(n%100!=0) or (n%400==0):
print('yes')
else:
print('no')
| [
"34475230+chenyang1999@users.noreply.github.com"
] | 34475230+chenyang1999@users.noreply.github.com |
3a4f7cd5e67246cbd53f57079413493158901450 | a4deea660ea0616f3b5ee0b8bded03373c5bbfa2 | /concrete_instances/register-variants/vpsrlw_ymm_ymm_xmm/instructions/vpsrlw_ymm_ymm_xmm/vpsrlw_ymm_ymm_xmm.gen.vex.py | ac220d900c443bc37197e364bd268625ee83393d | [] | no_license | Vsevolod-Livinskij/x86-64-instruction-summary | 4a43472e26f0e4ec130be9a82f7e3f3c1361ccfd | c276edab1b19e3929efb3ebe7514489f66087764 | refs/heads/master | 2022-02-02T18:11:07.818345 | 2019-01-25T17:19:21 | 2019-01-25T17:19:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | import angr
proj = angr.Project('./instructions/vpsrlw_ymm_ymm_xmm/vpsrlw_ymm_ymm_xmm.o')
print proj.arch
print proj.entry
print proj.filename
irsb = proj.factory.block(proj.entry).vex
irsb.pp() | [
"sdasgup3@illinois.edu"
] | sdasgup3@illinois.edu |
3666a7c9f29851dc656050c96150bae69ead44e4 | 82f998aec53e7bc49eb5aad4fdb18cbe72976b89 | /transformers/modeling_gpt2.py | 921e463a9e3ec440cd4035b66a5184959a65e501 | [] | no_license | MatNLP/SMedBERT | 6ab8d2749a8a26005eef36dc347f779c9e6a217b | 8dd549f902ca59ad2b84bf3b951213565fde4dc0 | refs/heads/main | 2023-09-02T03:22:13.298661 | 2021-11-17T05:44:50 | 2021-11-17T05:44:50 | 372,204,217 | 75 | 13 | null | null | null | null | UTF-8 | Python | false | false | 35,792 | py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT-2 model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import json
import logging
import math
import os
import sys
from io import open
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .modeling_utils import PreTrainedModel, Conv1D, prune_conv1d_layer, SequenceSummary
from .configuration_gpt2 import GPT2Config
from .file_utils import add_start_docstrings
logger = logging.getLogger(__name__)
GPT2_PRETRAINED_MODEL_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-pytorch_model.bin",
"gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-pytorch_model.bin",
"gpt2-large": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-pytorch_model.bin",
"gpt2-xl": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-xl-pytorch_model.bin",
"distilgpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-pytorch_model.bin",}
def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error("Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(gpt2_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array.squeeze())
for name, array in zip(names, arrays):
name = name[6:] # skip "model/"
name = name.split('/')
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+\d+', m_name):
l = re.split(r'(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'w' or l[0] == 'g':
pointer = getattr(pointer, 'weight')
elif l[0] == 'b':
pointer = getattr(pointer, 'bias')
elif l[0] == 'wpe' or l[0] == 'wte':
pointer = getattr(pointer, l[0])
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False):
super(Attention, self).__init__()
self.output_attentions = config.output_attentions
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.n_head, self.split_size // self.n_head)
heads = set(heads) - self.pruned_heads # Convert to set and emove already pruned heads
for head in heads:
# Compute how many pruned heads are before the head and move the index accordingly
head = head - sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
index_attn = torch.cat([index, index + self.split_size, index + (2*self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
self.n_head = self.n_head - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, q, k, v, attention_mask=None, head_mask=None):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
nd, ns = w.size(-2), w.size(-1)
b = self.bias[:, :, ns-nd:ns, :ns]
w = w * b - 1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [torch.matmul(w, v)]
if self.output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(self, x, layer_past=None, attention_mask=None, head_mask=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
present = torch.stack((key.transpose(-2, -1), value)) # transpose to have same shapes for stacking
attn_outputs = self._attn(query, key, value, attention_mask, head_mask)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
outputs = [a, present] + attn_outputs[1:]
return outputs # a, present, (attentions)
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super(MLP, self).__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = gelu
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super(Block, self).__init__()
nx = config.n_embd
self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.attn = Attention(nx, n_ctx, config, scale)
self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
def forward(self, x, layer_past=None, attention_mask=None, head_mask=None):
output_attn = self.attn(self.ln_1(x),
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask)
a = output_attn[0] # output_attn: a, present, (attentions)
x = x + a
m = self.mlp(self.ln_2(x))
x = x + m
outputs = [x] + output_attn[1:]
return outputs # x, present, (attentions)
class GPT2PreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = GPT2Config
pretrained_model_archive_map = GPT2_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_gpt2
base_model_prefix = "transformer"
def __init__(self, *inputs, **kwargs):
super(GPT2PreTrainedModel, self).__init__(*inputs, **kwargs)
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
GPT2_START_DOCSTRING = r""" OpenAI GPT-2 model was proposed in
`Language Models are Unsupervised Multitask Learners`_
by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
It's a causal (unidirectional) transformer pre-trained using language modeling on a very large
corpus of ~40 GB of text data.
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matter related to general usage and behavior.
.. _`Language Models are Unsupervised Multitask Learners`:
https://openai.com/blog/better-language-models/
.. _`torch.nn.Module`:
https://pytorch.org/docs/stable/nn.html#module
Parameters:
config (:class:`~transformers.GPT2Config`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
GPT2_INPUTS_DOCSTRING = r""" Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
GPT-2 is a model with absolute position embeddings so it's usually advised to pad the inputs on
the right rather than the left.
Indices can be obtained using :class:`transformers.GPT2Tokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**past**:
list of ``torch.FloatTensor`` (one for each layer):
that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `past` output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
A parallel sequence of tokens (can be used to indicate various portions of the inputs).
The embeddings from these tokens will be summed with the respective token embeddings.
Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices).
**position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
**inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``:
Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings("The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.",
GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING)
class GPT2Model(GPT2PreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the last layer of the model.
**past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``:
that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2Model.from_pretrained('gpt2')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super(GPT2Model, self).__init__(config)
self.output_hidden_states = config.output_hidden_states
self.output_attentions = config.output_attentions
self.output_past = config.output_past
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
self.wpe = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.init_weights()
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
def forward(self, input_ids=None, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = past[0][0].size(-2)
if position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# Attention mask.
if attention_mask is not None:
attention_mask = attention_mask.view(-1, input_shape[-1])
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.n_layer
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
presents = ()
all_attentions = []
all_hidden_states = ()
for i, (block, layer_past) in enumerate(zip(self.h, past)):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
outputs = block(hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask[i])
hidden_states, present = outputs[:2]
if self.output_past:
presents = presents + (present,)
if self.output_attentions:
all_attentions.append(outputs[2])
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(*output_shape)
# Add last hidden state
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_past:
outputs = outputs + (presents,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + (-1,) + all_attentions[0].shape[-2:]
all_attentions = tuple(t.view(*attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
return outputs # last hidden state, (presents), (all hidden_states), (attentions)
@add_start_docstrings("""The GPT2 Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """, GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING)
class GPT2LMHeadModel(GPT2PreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-1, 0, ..., config.vocab_size]``
All labels set to ``-1`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Language modeling loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``:
that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2LMHeadModel.from_pretrained('gpt2')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=input_ids)
loss, logits = outputs[:2]
"""
def __init__(self, config):
super(GPT2LMHeadModel, self).__init__(config)
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
def forward(self, input_ids=None, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None,
labels=None):
transformer_outputs = self.transformer(input_ids,
past=past,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
outputs = (lm_logits,) + transformer_outputs[1:]
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), lm_logits, presents, (all hidden_states), (attentions)
@add_start_docstrings("""The GPT2 Model transformer with a language modeling and a multiple-choice classification
head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers.
The language modeling head has its weights tied to the input embeddings,
the classification head takes as input the input of a specified classification token index in the input sequence).
""", GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING)
class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
r"""
**mc_token_ids**: (`optional`, default to index of the last token of the input) ``torch.LongTensor`` of shape ``(batch_size, num_choices)``:
Index of the classification token in each input sequence.
Selected in the range ``[0, input_ids.size(-1) - 1[``.
**lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-1, 0, ..., config.vocab_size]``
All labels set to ``-1`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
**mc_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size)``:
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**lm_loss**: (`optional`, returned when ``lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Language modeling loss.
**mc_loss**: (`optional`, returned when ``multiple_choice_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Multiple choice classification loss.
**lm_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**mc_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)``
Prediction scores of the multiplechoice classification head (scores for each choice before SoftMax).
**past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``:
that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import torch
from transformers import GPT2Tokenizer, GPT2DoubleHeadsModel
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2DoubleHeadsModel.from_pretrained('gpt2')
# Add a [CLS] to the vocabulary (we should train it also!)
tokenizer.add_special_tokens({'cls_token': '[CLS]'})
model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary
choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
encoded_choices = [tokenizer.encode(s) for s in choices]
cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]
input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2
mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1
outputs = model(input_ids, mc_token_ids=mc_token_ids)
lm_prediction_scores, mc_prediction_scores = outputs[:2]
"""
def __init__(self, config):
super(GPT2DoubleHeadsModel, self).__init__(config)
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.multiple_choice_head = SequenceSummary(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
def forward(self, input_ids=None, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None,
mc_token_ids=None, lm_labels=None, mc_labels=None):
transformer_outputs = self.transformer(input_ids,
past=past,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
outputs = (lm_logits, mc_logits) + transformer_outputs[1:]
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)),
mc_labels.view(-1))
outputs = (loss,) + outputs
if lm_labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs # (lm loss), (mc loss), lm logits, mc logits, presents, (all hidden_states), (attentions)
| [
"czr@daddy.com"
] | czr@daddy.com |
5e67f69575c264e6b143cf012b66a30a33056bcd | 88ea7bf2bbc8ffba551e881df553ae5ceac70dd6 | /deblock/codes/data_scripts/encode_xiph_with_hm16.py | a01bd19f49c1de088810b964e2a7af18fac890c0 | [
"Apache-2.0"
] | permissive | zhouhuanxiang/repo-zhx | 2d1135bb2f925e051e1b0bcfc2ed53fb34ea51c5 | 76b577eea13130c60bf7bff8c486f51766128661 | refs/heads/main | 2023-06-10T02:56:17.978649 | 2021-06-29T02:35:57 | 2021-06-29T02:35:57 | 381,213,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,676 | py | # [1]How to read metainfo from y4m?
# https://github.com/vigata/y4mtools
# [2]How to convert y4m to yuv?
# https://stackoverflow.com/a/16859790/8335078
# [3]How to use HM & HEVC?
# https://blog.csdn.net/lin453701006/article/details/52775820
# [4]Xiph website
# https://media.xiph.org/video/derf/
import os
import re
import sys
import glob
import pprint
import json
y4m_422p_list = [
'controlled_burn_1080p',
'football_422_cif',
'touchdown_pass_1080p',
'rush_field_cuts_1080p'
]
y4m_720p_list = [
'720p50_shields_ter',
'720p5994_stockholm_ter'
]
y4m_ntsc_list = [
'mobile_calendar_422_4sif',
'football_422_4sif'
]
def get_y4m_metainfo(yuv60_path, y4m_path):
y4m_metainfos = {}
with open(yuv60_path) as fp:
line = fp.readline()
while line:
# example: stefan_sif\t352\t240\t300\t1\n
print(line.strip())
words = line.strip().split('\t')
y4m_name = words[0].replace('2048x1080_60fps', '4096x2160_60fps_10bit')
width = words[1]
height = words[2]
length = words[3]
# check width and height directly from y4m metainfo
y4m_file = open(os.path.join(y4m_path, y4m_name+'.y4m'), 'rb')
header = y4m_file.readline()
header = header.decode("utf-8")
print('metainfo', header.strip())
width1 = (re.compile("W(\d+)").findall(header))[0]
height1 = (re.compile("H(\d+)").findall(header))[0]
if height != height1 or width != width1:
print('##### warnning #####\n')
y4m_metainfos[y4m_name] = {
'height': height1,
'width': width1,
'length': length
}
line = fp.readline()
return y4m_metainfos
def convert_y4m2yuv(y4m_path, y4m_metainfos):
for y4m_name in y4m_metainfos.keys():
if y4m_name in y4m_ntsc_list:
y4m_420p_name = y4m_name + '_420p'
# 422p -> 420p
os.system('ffmpeg -i {}.y4m -vf format=yuv420p -y {}.y4m'.format(
os.path.join(y4m_path, y4m_name),
os.path.join(y4m_path, y4m_420p_name)
))
# y4m -> yuv
os.system('ffmpeg -i {}.y4m {}.yuv -y'.format(
os.path.join(y4m_path, y4m_420p_name),
os.path.join(y4m_path, y4m_name)
))
# rm y4m_420p
os.system('rm {}.y4m'.format(os.path.join(y4m_path, y4m_420p_name)))
# elif y4m_name in y4m_422p_list or y4m_name in y4m_720p_list:
# y4m_420p_name = y4m_name + '_420p'
# # 422p -> 420p
# os.system('ffmpeg -i {}.y4m -vf format=yuv420p -y {}.y4m'.format(
# os.path.join(y4m_path, y4m_name),
# os.path.join(y4m_path, y4m_420p_name)
# ))
# # y4m -> yuv
# os.system('ffmpeg -i {}.y4m {}.yuv -y'.format(
# os.path.join(y4m_path, y4m_420p_name),
# os.path.join(y4m_path, y4m_name)
# ))
# # rm y4m_420p
# os.system('rm {}.y4m'.format(os.path.join(y4m_path, y4m_420p_name)))
# elif y4m_name.find('4096x2160_60fps_10bit') > 0:
# y4m_8bit_4k_name = y4m_name.replace('10bit', '8bit')
# y4m_8bit_2k_name = y4m_8bit_4k_name.replace('4096x2160', '2048x1080')
# yuv_8bit_2k_name = y4m_8bit_2k_name
# print(y4m_8bit_4k_name+'\n'+y4m_8bit_2k_name+'\n'+yuv_8bit_2k_name+'\n\n')
# # 10bit -> 8bit
# os.system('ffmpeg -i {}.y4m -vf format=yuv420p -y {}.y4m'.format(
# os.path.join(y4m_path, y4m_name),
# os.path.join(y4m_path, y4m_8bit_4k_name)
# ))
# # 4k -> 2k
# os.system('ffmpeg -i {}.y4m -vf scale=2048:1080 -y {}.y4m'.format(
# os.path.join(y4m_path, y4m_8bit_4k_name),
# os.path.join(y4m_path, y4m_8bit_2k_name)
# ))
# # y4m -> yuv
# os.system('ffmpeg -i {}.y4m -y {}.yuv'.format(
# os.path.join(y4m_path, y4m_8bit_2k_name),
# os.path.join(y4m_path, yuv_8bit_2k_name)
# ))
# # remove y4m_8bit
# os.system('rm {}.y4m'.format(os.path.join(y4m_path, y4m_8bit_4k_name)))
# # remove y4m_8bit_2k
# os.system('rm {}.y4m'.format(os.path.join(y4m_path, y4m_8bit_2k_name)))
# else:
# # y4m -> yuv
# os.system('ffmpeg -i {}.y4m {}.yuv -y'.format(
# os.path.join(y4m_path, y4m_name),
# os.path.join(y4m_path, y4m_name)
# ))
def encode_yuv(y4m_path, y4m_metainfos, result_path, hm_path, qp=32):
# write config file first
fp = open('/home/web_server/zhouhuanxiang/disk/HM-16.0/cfg/encoder_lowdelay_P_main.cfg')
lowdelay_cfg = fp.readlines()
os.makedirs('/home/web_server/zhouhuanxiang/disk/Xiph/xiph_cfg_{}'.format(str(qp)), exist_ok=True)
os.makedirs(result_path, exist_ok=True)
print(y4m_metainfos)
count = -1
for y4m_name, metainfo in y4m_metainfos.items():
count += 1
if count % 6 != 5:
continue
if y4m_name.find('4096x2160_60fps_10bit') > 0:
real_name = y4m_name.replace('4096x2160_60fps_10bit', '2048x1080_60fps_8bit')
width = str(int(metainfo['width']) / 2)
height = str(int(metainfo['height']) / 2)
else:
real_name = y4m_name
width = metainfo['width']
height = metainfo['height']
print(real_name, width, height)
print('***********\n')
# skip
# if os.path.exists(os.path.join(result_path, real_name+'.yuv')) and not real_name in y4m_720p_list:
# continue
# else:
# print('***********\n')
header = '#======== File I/O ===============\n'\
'InputFile: {}\n'\
'InputBitDepth: 8\n'\
'InputChromaFormat: 420\n'\
'FrameRate: 30\n'\
'FrameSkip: 0\n'\
'SourceWidth: {}\n'\
'SourceHeight: {}\n'\
'FramesToBeEncoded: {}\n\n'\
'BitstreamFile: {}\n'\
'ReconFile: {}\n\n'.format(
os.path.join(y4m_path, real_name+'.yuv'),
width,
height,
metainfo['length'],
os.path.join(result_path, real_name+'.bin'),
os.path.join(result_path, real_name+'.yuv')
)
body = ''
for i in range(4, 37):
body += lowdelay_cfg[i]
body += 'QP: {}\n'.format(qp)
for i in range(38, 116):
body += lowdelay_cfg[i]
with open(os.path.join('/home/web_server/zhouhuanxiang/disk/Xiph','xiph_cfg_'+str(qp), real_name+'.cfg'), 'w') as fp:
fp.write(header)
fp.write(body)
os.system('{} -c {}'.format(
hm_path,
os.path.join('/home/web_server/zhouhuanxiang/disk/Xiph', 'xiph_cfg_'+str(qp), real_name+'.cfg')
))
def encode_yuv_parallel():
cfg_files = glob.glob('')
def main(argv):
yuv60_path = '/home/web_server/zhouhuanxiang/mmsr/codes/data_scripts/YUV_60.txt'
y4m_path = argv[1]
result_path = argv[2]
hm_path = argv[3]
y4m_metainfos = get_y4m_metainfo(yuv60_path, y4m_path)
with open('/home/web_server/zhouhuanxiang/mmsr/codes/data_scripts/y4m_metainfos.json', 'w') as fp:
json.dump(y4m_metainfos, fp, indent=4)
# convert_y4m2yuv(y4m_path, y4m_metainfos)
encode_yuv(y4m_path, y4m_metainfos, result_path, hm_path, qp=37)
if __name__ == '__main__':
main(sys.argv)
'''
python /home/web_server/zhouhuanxiang/mmsr/codes/data_scripts/encode_xiph_with_hm16.py \
/home/web_server/zhouhuanxiang/disk/Xiph/Xiph \
/home/web_server/zhouhuanxiang/disk/Xiph/Xiph_encoded \
/home/web_server/zhouhuanxiang/disk/HM-16.0/bin/TAppEncoderStatic
'''
'''
nohup python /home/web_server/zhouhuanxiang/mmsr/codes/data_scripts/encode_xiph_with_hm16.py \
/home/web_server/zhouhuanxiang/disk/Xiph/Xiph \
/home/web_server/zhouhuanxiang/disk/Xiph/Xiph_all_encoded \
/home/web_server/zhouhuanxiang/disk/HM-16.0/bin/TAppEncoderStatic > ~/zhouhuanxiang/encode 2>&1 &
'''
'''
nohup python /home/web_server/zhouhuanxiang/mmsr/codes/data_scripts/encode_xiph_with_hm16.py \
/home/web_server/zhouhuanxiang/disk/Xiph/Xiph \
/home/web_server/zhouhuanxiang/disk/Xiph/Xiph_all_encoded_37 \
/home/web_server/zhouhuanxiang/disk/HM-16.0/bin/TAppEncoderStatic > ~/zhouhuanxiang/encode5 2>&1 &
'''
| [
"zhouhx.cn@gmail.com"
] | zhouhx.cn@gmail.com |
574d9f240c46961ab64781b6206ab27569c919d4 | 5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5 | /blimgui/dist/sdl2/test/version_test.py | 4c4f54ca0aba4dd64e485bd5c91f9973da5dff93 | [
"MIT"
] | permissive | juso40/bl2sdk_Mods | 8422a37ca9c2c2bbf231a2399cbcb84379b7e848 | 29f79c41cfb49ea5b1dd1bec559795727e868558 | refs/heads/master | 2023-08-15T02:28:38.142874 | 2023-07-22T21:48:01 | 2023-07-22T21:48:01 | 188,486,371 | 42 | 110 | MIT | 2022-11-20T09:47:56 | 2019-05-24T20:55:10 | Python | UTF-8 | Python | false | false | 2,505 | py | import sys
import ctypes
import pytest
import sdl2
from sdl2 import dll, __version__, version_info
def test__version_tuple():
# Note that this is not public API.
assert dll._version_tuple_to_int((2, 0, 18)) == 2018
assert dll._version_tuple_to_int((2, 24, 1)) == 2241
# Micro version stops at 9 in this encoding
assert dll._version_tuple_to_int((2, 24, 15)) == 2249
assert dll._version_tuple_to_int((2, 99, 9)) == 2999
# Minor version stops at 99 in this encoding
assert dll._version_tuple_to_int((2, 103, 6)) == 2999
def test_SDL_version():
v = sdl2.SDL_version(0, 0, 0)
assert v.major == 0
assert v.minor == 0
assert v.patch == 0
def test_SDL_GetVersion():
v = sdl2.SDL_version()
sdl2.SDL_GetVersion(ctypes.byref(v))
assert type(v) == sdl2.SDL_version
assert v.major == 2
assert v.minor >= 0
assert v.patch >= 0
assert (v.major, v.minor, v.patch) >= (2, 0, 5)
assert (v.major, v.minor, v.patch) == dll.version_tuple
def test_SDL_VERSIONNUM():
assert sdl2.SDL_VERSIONNUM(1, 2, 3) == 1203
assert sdl2.SDL_VERSIONNUM(4, 5, 6) == 4506
assert sdl2.SDL_VERSIONNUM(2, 0, 0) == 2000
assert sdl2.SDL_VERSIONNUM(17, 42, 3) == 21203
# This is a bit weird now that SDL uses the minor version more often,
# but does sort in the correct order against all versions of SDL 2.
assert sdl2.SDL_VERSIONNUM(2, 23, 0) == 4300
# This is the highest possible SDL 2 version
assert sdl2.SDL_VERSIONNUM(2, 255, 99) == 27599
def test_SDL_VERSION_ATLEAST():
assert sdl2.SDL_VERSION_ATLEAST(1, 2, 3)
assert sdl2.SDL_VERSION_ATLEAST(2, 0, 0)
assert sdl2.SDL_VERSION_ATLEAST(2, 0, 1)
assert sdl2.SDL_VERSION_ATLEAST(
sdl2.SDL_MAJOR_VERSION, sdl2.SDL_MINOR_VERSION, sdl2.SDL_PATCHLEVEL
)
assert not sdl2.SDL_VERSION_ATLEAST(4, 0, 0)
def test_SDL_GetRevision():
rev = sdl2.SDL_GetRevision()
# If revision not empty string (e.g. Conda), test the prefix
if len(rev):
if dll.version_tuple >= (2, 0, 16):
if rev[0:4] not in (b"http", b"SDL-"):
pytest.xfail("no API guarantee about the format of this string")
else:
assert rev[0:3] == b"hg-"
def test_SDL_GetRevisionNumber():
if sys.platform in ("win32",) or dll.version_tuple >= (2, 0, 16):
# HG tip on Win32 does not set any revision number
assert sdl2.SDL_GetRevisionNumber() >= 0
else:
assert sdl2.SDL_GetRevisionNumber() >= 7000
| [
"justin.sostmann@googlemail.com"
] | justin.sostmann@googlemail.com |
1064dd2a532c28681368959e15db5a6a1789ba6d | 152f8c72bcb315bc0cf40ec389a97898cbc057c3 | /_sadm/listen/errors.py | bb22caffc4806e8930841d85c15e5e243312fe81 | [
"BSD-3-Clause"
] | permissive | jrmsdev/pysadm | 0a205cf7b4bf647461d480403051b5f88f82090b | 0d6b3f0c8d870d83ab499c8d9487ec8e3a89fc37 | refs/heads/master | 2022-06-11T07:33:53.847565 | 2019-11-13T04:25:00 | 2019-11-13T04:25:00 | 186,210,706 | 1 | 0 | BSD-3-Clause | 2022-06-03T22:45:36 | 2019-05-12T04:19:14 | Python | UTF-8 | Python | false | false | 1,470 | py | # Copyright (c) Jeremรญas Casteglione <jrmsdev@gmail.com>
# See LICENSE file.
from bottle import response, HTTPError, request, HTTP_CODES
from _sadm import log
__all__ = ['init', 'error']
def _handler(code, error):
log.debug("handler %d" % code)
log.debug("%d - %s" % (error.status_code, error.status_line))
argsLen = len(error.args)
if argsLen >= 3:
log.error("%s %d - %s" % (request.remote_addr, code, error.args[2]))
if argsLen >= 4:
log.debug("%s" % error.args[3])
else:
log.error("%s %d - %s" % (request.remote_addr, code, request.path))
response.headers['Content-Type'] = 'text/plain; charset=UTF-8'
if code == 304:
# 304 response should not include body content
return ''
codeStatus = HTTP_CODES.get(code, None)
if codeStatus is not None:
return "%s\n" % codeStatus
return "ERROR %d\n" % code
_initDone = False
def init(wapp):
global _initDone
@wapp.error(304)
def error_304(error):
return _handler(304, error)
@wapp.error(400)
def error_400(error):
return _handler(400, error)
@wapp.error(403)
def error_403(error):
return _handler(403, error)
@wapp.error(404)
def error_404(error):
return _handler(404, error)
@wapp.error(405)
def error_405(error):
return _handler(405, error)
@wapp.error(500)
def error_500(error):
return _handler(500, error)
_initDone = True
def error(code, msg):
log.error("%s %d - %s" % (request.remote_addr, code, msg))
return HTTPError(
status = code,
body = msg,
)
| [
"jrmsdev@gmail.com"
] | jrmsdev@gmail.com |
d55baa8c7931039f27574f922f012fbfad8715c7 | ee10559195ec6e8e8c514ace5e131598a9ae29ae | /algo/algo_02/ํน๋ณํ์ ๋ ฌ.py | 85e3aae8e8c813cbafcb69669c7665333bd59b5d | [] | no_license | jun0811/TIL | 12e8f0ab0506ed6ba81e3c0b6f18c4d5743324d1 | 2bc6da0214ffad7989270b60d682d5060a37531f | refs/heads/master | 2023-04-03T09:56:30.434378 | 2020-11-12T08:55:32 | 2020-11-12T08:55:32 | 282,910,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | def select_max(arr, k):
for i in range(k):
max_n = i
for j in range(i+1, len(arr)):
if arr[max_n] < arr[j]:
max_n = j
arr[i], arr[max_n] = arr[max_n], arr[i]
return str(arr[k-1])
def select_min(arr, k):
for i in range(k):
min_n = i
for j in range(i+1, len(arr)):
if arr[min_n] > arr[j]:
min_n = j
arr[i], arr[min_n] = arr[min_n], arr[i]
return str(arr[k-1])
T = int(input())
for test_case in range(1,T+1):
N = int(input())
numbers = list(map(int,input().split()))
result = []
for i in range(1,6):
result.append(select_max(numbers,i))
result.append(select_min(numbers,i))
a= ' '.join(result)
print('#{} {}'.format(test_case, a))
| [
"nate199458@gmail.com"
] | nate199458@gmail.com |
5e88ad802cd4191e107338ee1973dcab8852c683 | 5f22ddbd3eeb99709e43e7b9a7958c9987c7efa4 | /training/04_sorting/closest_numbers.py | a8911c2fa9d77f3da3ce44d5dceeaa0729b6d78a | [] | no_license | salvador-dali/algorithms_general | 04950bd823fc354adc58a4f23b7d2f3d39664798 | aeee3356e2488c6fab08741b1ac26e8bd5e4ac0d | refs/heads/master | 2020-12-14T06:24:10.466601 | 2016-07-17T06:00:17 | 2016-07-17T06:00:17 | 47,397,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | # https://www.hackerrank.com/challenges/closest-numbers/
# Given a list of unsorted numbers, can you find the numbers that have the smallest absolute
# difference between them? If there are multiple pairs, find them all.
def closestNumbers(arr):
arr.sort()
smallestList, smallestDiff = [], 10**10
for i in range(len(arr) - 1):
diff = abs(arr[i] - arr[i + 1])
if diff == smallestDiff:
smallestList.extend([arr[i], arr[i + 1]])
if diff < smallestDiff:
smallestList = [arr[i], arr[i + 1]]
smallestDiff = diff
return smallestList
raw_input()
print ' '.join(map(str, closestNumbers(map(int, raw_input().split())))) | [
"dmytro@knowlabs.com"
] | dmytro@knowlabs.com |
7dda270cb89324484f1f2395537ee120b96934c0 | 63efeff58299f3ca66c7be0aa80d636ade379ebf | /March/firstNotRepeatingCharacters.py | 7d6c8eeff907be76a685eb78cdf248738008b223 | [] | no_license | gosch/Katas-in-python | 0eb6bafe2d6d42dac64c644c2fd48f90bdcef22b | f89ee2accdde75222fa1e4e0ca8b4f8e27b7b760 | refs/heads/master | 2021-07-24T23:50:26.268217 | 2020-04-14T23:53:15 | 2020-04-14T23:53:15 | 137,545,678 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | def firstNotRepeatingCharacter(s):
v = [0] * 26
for i in s:
v[ord(i) - ord('a')] += 1
for i in s:
if v[ord(i) - ord['a']] == 1:
return i
return '_'
print(firstNotRepeatingCharacter("abacabad")) | [
"francisco.gosch@ge.com"
] | francisco.gosch@ge.com |
544f39d0e4849b3724752b33837d4ade1f75bb68 | b5ce6908490cfb8e6a1e1cbe4745d675122ddce0 | /questions/compare-version-numbers/Solution.py | c09a63ebef803affca38aee90843be66dd2e765e | [
"MIT"
] | permissive | franklingu/leetcode-solutions | 8895910f13208e1d8e604100d84c2dd35684cde4 | 7ad7e5c1c040510b7b7bd225ed4297054464dbc6 | refs/heads/master | 2023-01-09T01:34:08.097518 | 2023-01-02T02:05:35 | 2023-01-02T02:05:35 | 43,345,677 | 155 | 66 | MIT | 2020-10-02T03:41:36 | 2015-09-29T04:54:38 | Python | UTF-8 | Python | false | false | 2,144 | py | """
Compare two version numbers version1 and version2.
If version1 > version2 return 1; if version1 < version2 return -1;otherwise return 0.
You may assume that the version strings are non-empty and contain only digits and the . character.
The . character does not represent a decimal point and is used to separate number sequences.
For instance, 2.5 is not "two and a half" or "half way to version three", it is the fifth second-level revision of the second first-level revision.
You may assume the default revision number for each level of a version number to be 0. For example, version number 3.4 has a revision number of 3 and 4 for its first and second level revision number. Its third and fourth level revision number are both 0.
Example 1:
Input: version1 = "0.1", version2 = "1.1"
Output: -1
Example 2:
Input: version1 = "1.0.1", version2 = "1"
Output: 1
Example 3:
Input: version1 = "7.5.2.4", version2 = "7.5.3"
Output: -1
Example 4:
Input: version1 = "1.01", version2 = "1.001"
Output: 0
Explanation: Ignoring leading zeroes, both โ01โ and โ001" represent the same number โ1โ
Example 5:
Input: version1 = "1.0", version2 = "1.0.0"
Output: 0
Explanation: The first version number does not have a third level revision number, which means its third level revision number is default to "0"
Note:
Version strings are composed of numeric strings separated by dots . and this numeric strings may have leading zeroes.
Version strings do not start or end with dots, and they will not be two consecutive dots.
"""
import itertools
class Solution:
def compareVersion(self, version1: str, version2: str) -> int:
v1 = version1.split('.')
v2 = version2.split('.')
for s1, s2 in itertools.zip_longest(v1, v2, fillvalue='0'):
if len(s1) > len(s2):
s2 = '0' * (len(s1) - len(s2)) + s2
elif len(s1) < len(s2):
s1 = '0' * (len(s2) - len(s1)) + s1
for c1, c2 in itertools.zip_longest(s1, s2):
if c1 > c2:
return 1
elif c1 < c2:
return -1
return 0
| [
"franklingujunchao@gmail.com"
] | franklingujunchao@gmail.com |
b883ed842f7690fde699ff443ca676adc01a44b0 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_vinegary.py | 653de88336afb19a4ab2a15bab7561617533e496 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py |
#calss header
class _VINEGARY():
def __init__(self,):
self.name = "VINEGARY"
self.definitions = [u'tasting or smelling like vinegar: ', u'angry and unpleasant, or giving a lot of criticism: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
aeff02783c27d7d8d94f42c54b1ddcec5836af70 | 7619aed8a311e2832634379762c373886f4354fb | /trace_floodlight_firewall-MeshTopology4-steps200/replay_config.py | 87195057cf5fe11f0768a6301d89358f649e3825 | [] | no_license | jmiserez/sdnracer-traces | b60f8588277c4dc2dad9fe270c05418c47d229b3 | 8991eee19103c8ebffd6ffe15d88dd8c25e1aad5 | refs/heads/master | 2021-01-21T18:21:32.040221 | 2015-12-15T14:34:46 | 2015-12-15T14:34:46 | 39,391,225 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,391 | py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow.replayer import Replayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='java -ea -Dlogback.configurationFile=./src/main/resources/logback-trace.xml -jar ./target/floodlight.jar -cf ./src/main/resources/trace_firewall.properties', label='c1', address='127.0.0.1', cwd='../floodlight')],
topology_class=MeshTopology,
topology_params="num_switches=4",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False,
ignore_interposition=False,
kill_controllers_on_exit=True)
control_flow = Replayer(simulation_config, "paper/trace_floodlight_firewall-MeshTopology4-steps200/events.trace",
input_logger=InputLogger(),
wait_on_deterministic_values=False,
allow_unexpected_messages=False,
delay_flow_mods=False,
default_dp_permit=False,
pass_through_whitelisted_messages=False,
invariant_check_name='InvariantChecker.check_liveness',
bug_signature="")
| [
"jeremie@miserez.org"
] | jeremie@miserez.org |
549cc4a66e1b69d45d897dcd245f0cc77ec288e0 | 20b4be7df5efeb8019356659c5d054f29f450aa1 | /GUI/overview_window.py | bb05a82a2d5d38e0492b66b6c1b48e14bfd07791 | [
"Apache-2.0"
] | permissive | kumars99/TradzQAI | 75c4138e30796573d67a5f08d9674c1488feb8e4 | 1551321642b6749d9cf26caf2e822051a105b1a5 | refs/heads/master | 2020-03-29T20:14:45.562143 | 2018-09-25T16:07:21 | 2018-09-25T16:07:21 | 150,302,554 | 1 | 0 | null | 2018-09-25T17:17:54 | 2018-09-25T17:17:54 | null | UTF-8 | Python | false | false | 10,769 | py | from tools import *
import time
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import pandas as pd
import numpy as np
class OverviewWindow(QWidget):
def __init__(self, root, env):
super(QWidget, self).__init__(root)
self.ordr = pd.DataFrame(columns=['Orders'])
self.dailyp = 0
self.daily_reward = 0
self.tot_reward = 0
self.dday = 1
self.lt = 0
GB = QGridLayout(self)
GB.addWidget(self.Agent_Inventory_Init(env), 0, 2, 2, 1)
GB.addWidget(self.Agent_Orders_Init(), 0, 1)
GB.addWidget(self.Agent_Value_Init(env), 0, 0, 2, 1)
GB.addWidget(self.Data_Init(), 1, 1)
def Agent_env(self, env):
GBox = QGroupBox("Environnement")
VBox = QVBoxLayout()
self.lmode = QLabel('Mode : ' + env.mode)
self.lmodel = QLabel('Model : ' + env.dataDirectory)
self.lmax_order = QLabel('Max pos : ' + str(env.wallet.risk_managment['max_pos']))
self.lcontract_price = QLabel('Contract price : ' + str(env.contract_settings['contract_price']))
self.lpip_value = QLabel('Pip Value : ' + str(env.contract_settings['pip_value']))
self.lspread = QLabel('Spread : ' + str(env.contract_settings['spread']))
VBox.addWidget(self.lmode)
VBox.addWidget(self.lmodel)
VBox.addWidget(self.lmax_order)
VBox.addWidget(self.lcontract_price)
VBox.addWidget(self.lpip_value)
VBox.addWidget(self.lspread)
VBox.addStretch(1)
GBox.setLayout(VBox)
GBox.setFixedSize(220,180)
return GBox
def Agent_Inventory_Init(self, env):
GBox = QGroupBox("Agent inventory")
VBox = QVBoxLayout()
self.linventory = QLabel('Empty inventory')
self.linventory.setAlignment(Qt.AlignHCenter)
VBox.addWidget(self.linventory)
VBox.addWidget(self.Agent_Winrate())
#VBox.addStretch()
h = 245
if env._platform == 'win32':
w = 900
if env._platform == 'Linux':
w = 915
GBox.setLayout(VBox)
GBox.setFixedSize(h,w)
return GBox
def Agent_Orders_Init(self):
GBox = QGroupBox("Agent orders")
VBox = QVBoxLayout()
self.Llist = QListWidget()
self.l = []
for i in range(38):
if i < 1:
orderi = QLabel('No orders taken yet')
else:
orderi = QLabel()
orderi.setAlignment(Qt.AlignCenter)
VBox.addWidget(orderi)
self.l.append(orderi)
VBox.addStretch(1)
GBox.setLayout(VBox)
GBox.setFixedSize(1250,775)
return GBox
def Data_Init(self):
GBox = QGroupBox("Data")
VBox = QVBoxLayout()
self.lday = QLabel('Day : 0 / 0')
self.lperc = QLabel('0 %')
self.ldata = QLabel('Data : 0 / 0')
self.lep = QLabel('Episode : 0 / 0')
self.lperc.setAlignment(Qt.AlignCenter)
self.lday.setAlignment(Qt.AlignCenter)
self.ldata.setAlignment(Qt.AlignCenter)
self.lep.setAlignment(Qt.AlignCenter)
VBox.addWidget(self.lperc)
VBox.addWidget(self.lday)
VBox.addWidget(self.ldata)
VBox.addWidget(self.lep)
VBox.addStretch()
GBox.setLayout(VBox)
GBox.setFixedSize(1250,115)
GBox.setAlignment(Qt.AlignCenter)
return GBox
def Agent_Winrate(self):
GBox = QGroupBox("Winrate")
VBox = QVBoxLayout()
self.lwin = QLabel('Win : 0')
self.lloose = QLabel('Loose : 0')
self.ldraw = QLabel('Draw : 0')
self.ltoto = QLabel('Total : 0')
self.lwinrate = QLabel('Winrate : 0')
VBox.addWidget(self.lwin)
VBox.addWidget(self.lloose)
VBox.addWidget(self.ldraw)
VBox.addWidget(self.ltoto)
VBox.addWidget(self.lwinrate)
VBox.addStretch()
GBox.setLayout(VBox)
GBox.setFixedSize(220,155)
return GBox
def Agent_Value_Init(self, env):
GBox = QGroupBox("Agent value")
VBox = QVBoxLayout()
self.lact = QLabel('Action : None')
self.lact.setAlignment(Qt.AlignHCenter)
VBox.addWidget(self.lact)
VBox.addWidget(self.Agent_env(env))
VBox.addWidget(self.Agent_Wallet(env))
VBox.addWidget(self.Agent_Profit())
VBox.addWidget(self.Agent_Reward())
VBox.addWidget(self.Time_Init())
VBox.addStretch()
GBox.setLayout(VBox)
h = 245
if env._platform == 'win32':
w = 900
if env._platform == 'Linux':
w = 915
GBox.setFixedSize(h,w)
return GBox
def Agent_Wallet(self, env):
GBox = QGroupBox("Wallet")
VBox = QVBoxLayout()
self.lcap = QLabel('Capital : ' + formatPrice(env.wallet.settings['capital']))
self.lcgl = QLabel('Current G/L : ' + formatPrice(env.wallet.settings['GL_profit']))
self.lusable_margin = QLabel('Usable margin : ' + formatPrice(env.wallet.settings['usable_margin']))
self.lused_margin = QLabel('Used margin : ' + formatPrice(env.wallet.settings['used_margin']))
VBox.addWidget(self.lcap)
VBox.addWidget(self.lcgl)
VBox.addWidget(self.lusable_margin)
VBox.addWidget(self.lused_margin)
VBox.addStretch()
GBox.setLayout(VBox)
GBox.setFixedSize(220,130)
return GBox
def Agent_Profit(self):
GBox = QGroupBox("Profit")
VBox = QVBoxLayout()
self.lcurp = QLabel('Current : 0')
self.ldailyp = QLabel('Daily : 0')
self.ltotp = QLabel('Total : 0')
VBox.addWidget(self.lcurp)
VBox.addWidget(self.ldailyp)
VBox.addWidget(self.ltotp)
VBox.addStretch()
GBox.setLayout(VBox)
GBox.setFixedSize(220,100)
return GBox
def Agent_Reward(self):
GBox = QGroupBox("Reward")
VBox = QVBoxLayout()
self.lcurr = QLabel('Current : 0')
self.ldailyr = QLabel('Daily : 0')
self.ltotr = QLabel('Total : 0')
VBox.addWidget(self.lcurr)
VBox.addWidget(self.ldailyr)
VBox.addWidget(self.ltotr)
VBox.addStretch()
GBox.setLayout(VBox)
GBox.setFixedSize(220,100)
return GBox
def Time_Init(self):
GBox = QGroupBox("Time")
VBox = QVBoxLayout()
self.lstart_t = QLabel('Since start : ' + time.strftime("%H:%M:%S", time.gmtime(0)))
self.lloop_t = QLabel('Loop : 0 ms')
self.leta = QLabel('ETA : ' + time.strftime("%H:%M:%S", time.gmtime(0)))
VBox.addWidget(self.lstart_t)
VBox.addWidget(self.lloop_t)
VBox.addWidget(self.leta)
VBox.addStretch()
GBox.setLayout(VBox)
GBox.setFixedSize(220,100)
return GBox
def Update_Overview(self, env):
# Episode reset
if env.new_episode is True:
self.lt = 0
env.new_episode = False
#Agent Values
self.lact.setText('Action : ' + str(env.act))
#Inventory
if len(env.inventory.get_inventory()['Price']) < 1:
self.linventory.setText('Empty inventory')
else:
self.linventory.setText(str(env.inventory.get_inventory()))
#Orders Done
if env.mod_ordr is True:
for i in range(len(np.array(self.ordr))):
if "Profit : -" in str(np.array(self.ordr)[i]):
self.l[i].setStyleSheet("QLabel {color: red}")
elif float(str(np.array(self.ordr)[i]).split(" ")[10]) == 0:
self.l[i].setStyleSheet("QLabel {color: white}")
else:
self.l[i].setStyleSheet("QLabel {color: green}")
self.l[i].setText(str(np.array(self.ordr)[i]))
#Orders
self.lwin.setText("Win : " + str(env.trade['win']))
self.lloose.setText("Loose : " + str(env.trade['loss']))
self.ldraw.setText("Draw : " + str(env.trade['draw']))
self.ltoto.setText("Total : " + str (env.trade['loss'] + env.trade['win'] + env.trade['draw']))
if env.trade['loss'] == 0:
self.lwinrate.setText("Winrate : " + str(1))
else:
self.lwinrate.setText("Winrate : " + '{:.3f}'.format(env.trade['win'] / (env.trade['loss'] + env.trade['win'])))
#Data
self.lep.setText("Episode : " + str(env.current_step['episode']) + " / " + str(env.episode_count))
self.lday.setText("Day : " + str(env.date['day']) + " / " + str(env.date['total_day']))
self.ldata.setText("Current : " +str(env.current_step['step'])+ " / " +str(len(env.data) - 1))
self.lperc.setText('{:.2f}'.format(float((env.current_step['step'] * 100 ) / len(env.data - 1))) + " %")
#Wallet
self.lcap.setText('Capital : ' + formatPrice(env.wallet.settings['capital']))
self.lcgl.setText('Current G/L : ' + formatPrice(env.wallet.settings['GL_profit']))
self.lusable_margin.setText('Usable margin : ' + formatPrice(env.wallet.settings['usable_margin']))
self.lused_margin.setText('Used margin : ' + formatPrice(env.wallet.settings['used_margin']))
#Profit
self.lcurp.setText("Current : " + formatPrice(env.wallet.profit['current']))
self.ldailyp.setText("Daily : " + formatPrice(env.wallet.profit['daily']))
self.ltotp.setText("Total : " + formatPrice(env.wallet.profit['total']))
#Reward
self.lcurr.setText("Current : " + str(env.reward['current']))
self.ldailyr.setText("Daily : " + str(env.reward['daily']))
self.ltotr.setText("Total : " + str(env.reward['total']))
#Time
now = time.time() - env.start_t
self.lstart_t.setText("Since start : " + '{:3d}'.format(int(time.strftime("%d", time.gmtime(now))) - 1) + ":" + time.strftime("%H:%M:%S", time.gmtime(now)))
self.lloop_t.setText("Loop : " + str(round((env.loop_t * 100), 3)) + " ms")
if env.current_step['step'] > 0 :
self.lt += env.loop_t
self.leta.setText("ETA : " + '{:3d}'.format(int(time.strftime("%d", time.gmtime(((self.lt / env.current_step['step'] ) * ((len(env.data) - 1) - env.current_step['step']))))) - 1) + ":" + time.strftime("%H:%M:%S", time.gmtime((self.lt / env.current_step['step']) * ((len(env.data) - 1) - env.current_step['step']))))
else:
self.leta.setText("ETA : " + '{:3d}'.format(int(time.strftime("%d", time.gmtime((self.lt / 1 ) * (len(env.data) - 1)))) - 1) + ":" + time.strftime("%H:%M:%S", time.gmtime((self.lt / 1) * (len(env.data) - 1))))
| [
"awakeproduction@hotmail.fr"
] | awakeproduction@hotmail.fr |
e8f2ee54b4593c5f2cba14c51572ac76ab1bd325 | 8d2e5b5ea408579faa699c09bdbea39e864cdee1 | /ufora/networking/SocketStringChannel.py | 7dd8db8de7d30be32e2bfbb205bf76b4a306ae67 | [
"dtoa",
"MIT",
"BSD-3-Clause",
"BSL-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] | permissive | iantuioti/ufora | 2218ef4c7e33c171268ce11458e9335be7421943 | 04db96ab049b8499d6d6526445f4f9857f1b6c7e | refs/heads/master | 2021-01-17T17:08:39.228987 | 2017-01-30T16:00:45 | 2017-01-30T16:00:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,505 | py | # Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ufora.native.SocketStringChannel as SocketStringChannelNative
#note that when we create native Channel objects, we need to keep the python object alive
#indefinitely. Otherwise, if we lose the python socket, it will close the file descriptor.
#we can't use os.dup to duplicate the descriptors because it occasionally produces file descriptors
#that conflict with incoming sockets.
allSockets_ = []
def SocketStringChannel(callbackScheduler, socket):
"""Create a SocketStringChannel from a python socket object.
The resulting class is an instance of ufora.native.StringChannel.StringChannel. We keep the
python socket object alive. This prevents it from releasing the file descriptor on its own,
since the SocketStringChannel does that itself.
"""
allSockets_.append(socket)
return SocketStringChannelNative.SocketStringChannel(callbackScheduler, socket.fileno())
| [
"braxton.mckee@gmail.com"
] | braxton.mckee@gmail.com |
9e3346c416adf380dc6e422d8318352326c33b14 | 6deafbf6257a5c30f084c3678712235c2c31a686 | /Toolz/sqlmap/plugins/dbms/sybase/fingerprint.py | c88b22d045d9152ffd54bb1a076a59d3a2daa804 | [
"Unlicense",
"LicenseRef-scancode-generic-cla",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"LicenseRef-scancode-commercial-license",
"LicenseRef-scancode-other-permissive"
] | permissive | thezakman/CTF-Heaven | 53fcb4a72afa821ad05d8cc3b309fb388f958163 | 4b52a2178922f1502ab00fa8fc156d35e1dc653f | refs/heads/master | 2023-04-05T18:20:54.680378 | 2023-03-21T13:47:45 | 2023-03-21T13:47:45 | 167,290,879 | 182 | 24 | Unlicense | 2022-11-29T21:41:30 | 2019-01-24T02:44:24 | Python | UTF-8 | Python | false | false | 3,355 | py | #!/usr/bin/env python
"""
Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
from lib.core.common import Backend
from lib.core.common import Format
from lib.core.common import unArrayizeValue
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.enums import DBMS
from lib.core.enums import OS
from lib.core.session import setDbms
from lib.core.settings import SYBASE_ALIASES
from lib.request import inject
from plugins.generic.fingerprint import Fingerprint as GenericFingerprint
class Fingerprint(GenericFingerprint):
def __init__(self):
GenericFingerprint.__init__(self, DBMS.SYBASE)
def getFingerprint(self):
value = ""
wsOsFp = Format.getOs("web server", kb.headersFp)
if wsOsFp:
value += "%s\n" % wsOsFp
if kb.data.banner:
dbmsOsFp = Format.getOs("back-end DBMS", kb.bannerFp)
if dbmsOsFp:
value += "%s\n" % dbmsOsFp
value += "back-end DBMS: "
if not conf.extensiveFp:
value += DBMS.SYBASE
return value
actVer = Format.getDbms()
blank = " " * 15
value += "active fingerprint: %s" % actVer
if kb.bannerFp:
banVer = kb.bannerFp.get("dbmsVersion")
banVer = Format.getDbms([banVer])
value += "\n%sbanner parsing fingerprint: %s" % (blank, banVer)
htmlErrorFp = Format.getErrorParsedDBMSes()
if htmlErrorFp:
value += "\n%shtml error message fingerprint: %s" % (blank, htmlErrorFp)
return value
def checkDbms(self):
if not conf.extensiveFp and Backend.isDbmsWithin(SYBASE_ALIASES):
setDbms("%s %s" % (DBMS.SYBASE, Backend.getVersion()))
self.getBanner()
Backend.setOs(OS.WINDOWS)
return True
infoMsg = "testing %s" % DBMS.SYBASE
logger.info(infoMsg)
if conf.direct:
result = True
else:
result = inject.checkBooleanExpression("@@transtate=@@transtate")
if result:
infoMsg = "confirming %s" % DBMS.SYBASE
logger.info(infoMsg)
result = inject.checkBooleanExpression("suser_id()=suser_id()")
if not result:
warnMsg = "the back-end DBMS is not %s" % DBMS.SYBASE
logger.warn(warnMsg)
return False
setDbms(DBMS.SYBASE)
self.getBanner()
if not conf.extensiveFp:
return True
infoMsg = "actively fingerprinting %s" % DBMS.SYBASE
logger.info(infoMsg)
result = unArrayizeValue(inject.getValue("SUBSTRING(@@VERSION,1,1)"))
if result and result.isdigit():
Backend.setVersion(str(result))
else:
for version in xrange(12, 16):
result = inject.checkBooleanExpression("PATINDEX('%%/%d[./]%%',@@VERSION)>0" % version)
if result:
Backend.setVersion(str(version))
break
return True
else:
warnMsg = "the back-end DBMS is not %s" % DBMS.SYBASE
logger.warn(warnMsg)
return False
| [
"thezakman@ctf-br.org"
] | thezakman@ctf-br.org |
f8bd0380e72c08810391f7b6c48e94849efcf7a1 | e06700779972be87d537d1fee69d1808317c4c65 | /posts/views.py | 506535f85514465bdc3a83139bd13b42184ff508 | [] | no_license | ajy720/django_Study | 5d73d14974ff8c538871dd0b5782d431baf6bdce | a7b7e7ad12239b9b296f8055c6d417e6835938f3 | refs/heads/master | 2022-06-05T13:21:01.479148 | 2020-05-04T05:37:17 | 2020-05-04T05:37:17 | 229,542,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,961 | py | from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.http import require_POST
from .models import Post
from .forms import PostForm
import pdb
def main(request):
context = {
"posts": Post.objects.all(), # ์ ๋ ฌ ๋ฐฉ๋ฒ 1. models.py์์ ํด์ฃผ๋ ๋ฐฉ๋ฒ
# "posts": Post.objects.order_by("-created_at"), # ์ ๋ ฌ ๋ฐฉ๋ฒ 2
}
return render(request, "posts/main.html", context)
def new(request):
context = {
"form": PostForm(), # PostForm ์์ ์ ๋ฌ
}
return render(request, "posts/new.html", context)
@require_POST # ๋ฐ์ฝ๋ ์ดํฐ(๋ฉ์๋๋ฅผ ๊พธ๋ฉฐ์ฃผ๋ ์ญํ )๋ก ์ ์ด์ POST ๋ฐฉ์์ ๊ฐ๋ง ๋ฐ๋๋ก ์ค์
def create(request):
form = PostForm(
request.POST, request.FILES or None # PostForm์ ๋ฃ์ด์ค ๋๋ FILES ์์ ๋ค์ด์๋ ์ด๋ฏธ์ง๋ ํจ๊ป
) # POSTFORM์ด๋ผ๋ ๋ชจ๋ธ์ ์ ๋ฌ๋ฐ์ ๊ฐ์ฒด ๋ฃ๊ณ ์์ฑ
if form.is_valid():
form.save()
# return redirect("main") # ์ฒซ๋ฒ์งธ ๋ฐฉ๋ฒ
return redirect(form.instance)
def show(request, post_id): # ๋ฐฉ๋ฒ 2. ์ฃผ์์์ ๊ฐ ์ ๋ฌ -> urls.py์์ post_id ์ ๋ฌํด์ค๊ฑฐ ๋ฐ๊ธฐ
# ๋ฐฉ๋ฒ 1
# post_id = request.GET.get("post_id") # ๋ฐ์ url์์ post_id๋ผ๋ ์ธ์ ๊ฐ ์ป๊ณ
# post = Post.objects.get(id=post_id) # Post ๊ฐ์ฒด๋ค ์ค์ ํด๋น post_id๋ฅผ id(Primary key)๋ก ๊ฐ๊ณ ์๋ ์น๊ตฌ๋ฅผ ์ฐพ์์
post = get_object_or_404(Post, id=post_id)
context = {
"post": post,
} # context์ ๋์
๋๋ฆฌ ํํ๋ก ๋ฃ์ด์ฃผ๊ณ
post.view_count += 1
post.save() # ๊ฐ์ฒด ์ ์ฅ
return render(request, "posts/show.html", context) # ํ
ํ๋ฆฟ์ ์ ๋ฌํ๋ฉด ํด๋น html(ํ
ํ๋ฆฟ) ์์์ ์ถ๋ ฅ
def edit(request, post_id):
post = get_object_or_404(Post, id=post_id)
context = {
"form": PostForm(instance=post), # ํด๋น id๋ฅผ ๊ฐ์ง๊ณ ์๋ ์๋ ๋ฐ์ดํฐ๋ฅผ ์ธ์คํด์ค์ ์ ์ฅ
"post": post, # update ํ ๋ ์ฐธ์กฐ์ฉ
}
return render(request, "posts/edit.html", context)
@require_POST
def update(request, post_id):
post = get_object_or_404(Post, id=post_id)
form = PostForm(
request.POST,
request.FILES or None, # PostForm์ ๋ฃ์ด์ค ๋๋ FILES ์์ ๋ค์ด์๋ ์ด๋ฏธ์ง๋ ํจ๊ป
instance=post,
) # instance ์์ฑ์ ๋ถ์ฌ ์ค์ผ๋ก์จ ์๋ก ์์ฑ์ด ์๋ ์๋ ์ธ์คํด์ค๋ฅผ ์์
if form.is_valid():
form.save()
# return redirect("posts:show", post_id) # ๊ฒ์๊ธ ํ๋ฉด์ผ๋ก ๋ฆฌ๋ค์ด๋ ํธ / ์๋ ๋ฐฉ๋ฒ
return redirect(
post
) # ๊ฒ์๊ธ ํ๋ฉด์ผ๋ก ๋ฆฌ๋ค์ด๋ ํธ / ์ฅ๊ณ ์ค๋ฐ ๋ฐฉ๋ฒ. ์? <- get_absolute_url์ด๋ ํจ์๊ฐ ์๋์ผ๋ก ๊ฐ์ฒด์ url์ ๋ฐํํด์ค.
@require_POST
def delete(request, post_id):
post = get_object_or_404(Post, id=post_id)
post.delete() # ์ญ์ ํ๋ ORM
return redirect("main")
| [
"ajy720@gmail.com"
] | ajy720@gmail.com |
f3c32a200cb11c07ce6f4e5211b35f50455899b3 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc046/B/4929082.py | 2f568f1b17a4e6afd368442ed6e63e3f3029f9e5 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | N, K =map(int, open(0).read().split())
a = K * ((K - 1) ** (N - 1))
print(a) | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
52a07db43066c4044ac51b50f69717553d481755 | 315788ed9c3727acca394ad107b0a55285a7ddc4 | /listings/v4_tupel5.py | 67b4df36803ea7d2ff2283643e5057677979b704 | [] | no_license | n04hk/Python_Zusammenfassung | b118e967d5d5547ad3eb88f9570cb7c9de45d443 | 923fadb28ab4609450e532f08de41dc4bf4913d1 | refs/heads/master | 2020-04-24T20:28:43.656148 | 2019-04-28T13:52:31 | 2019-04-28T13:52:31 | 172,245,211 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | vorname, nachname = t
print(vorname) # Ausgabe: Peter
print(nachname) # Ausgabe: Mueller
| [
"37226768+n04hk@users.noreply.github.com"
] | 37226768+n04hk@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.