blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c57c61d86c5572b1948a7a0502578e0809ad170e
|
e36472948f74fd5ed35fc64801a59db4efa27070
|
/part_1/04_6_test.py
|
174139579a5653b3edaa5a336c79a87ac1f5214f
|
[] |
no_license
|
anton1k/python_crash_course
|
051aad7c5a043830d8cc9e5fd314f568bf0f4a53
|
80f302074e5fef48fb40e72f7d79ab4b8658b38a
|
refs/heads/master
| 2020-07-18T23:28:00.871466
| 2019-09-04T14:06:12
| 2019-09-04T14:06:12
| 206,333,934
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 75
|
py
|
square = list(range(1, 21, 2))
print(square)
for i in square:
print(i)
|
[
"40913464+anton1k@users.noreply.github.com"
] |
40913464+anton1k@users.noreply.github.com
|
c269309d5a7e596f3b4d827d0729f1e6c2e2640b
|
5bcbf7fc2fd10bfd51df37aa806cc2305d0fe077
|
/mongokit_ng/schema_document.py
|
0a0063bf73fc7e9d786e72cf9a10b383720c63aa
|
[
"MIT"
] |
permissive
|
Windfarer/mongokit-ng
|
29f9e10af48af77a44e829a2910c88f92af1f218
|
52844c9242c4483694d4f3f65cf0d70a13df1c35
|
refs/heads/master
| 2023-08-20T02:39:45.240670
| 2020-02-20T03:37:05
| 2020-02-20T03:37:05
| 216,472,772
| 15
| 3
|
MIT
| 2023-09-14T17:11:44
| 2019-10-21T03:47:34
|
Python
|
UTF-8
|
Python
| false
| false
| 42,541
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2011, Nicolas Clairon
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California, Berkeley nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import bson
import datetime
import logging
from copy import deepcopy
log = logging.getLogger(__name__)
from .operators import SchemaOperator, IS
from .helpers import DotCollapsedDict
from .helpers import DotExpandedDict
from .helpers import i18nDotedDict
from .helpers import DotedDict
__all__ = [
'AuthorizedTypeError',
'BadKeyError',
'CustomType',
'DefaultFieldTypeError',
'DotCollapsedDict',
'DotedDict',
'DotExpandedDict',
'DuplicateDefaultValueError',
'DuplicateRequiredError',
'i18n',
'i18nError',
'ModifierOperatorError',
'RequireFieldError',
'SchemaDocument',
'SchemaDocumentError',
'SchemaProperties',
'SchemaTypeError',
'Set',
'StructureError',
'ValidationError',
]
class CustomType(object):
init_type = None
mongo_type = None
python_type = None
def __init__(self):
if self.mongo_type is None:
raise TypeError("`mongo_type` property must be specify in %s" %
self.__class__.__name__)
if self.python_type is None:
raise TypeError("`python_type` property must be specify in %s" %
self.__class__.__name__)
def to_bson(self, value):
"""convert type to a mongodb type"""
raise NotImplementedError
def to_python(self, value):
"""convert type to a mongodb type"""
raise NotImplementedError
def validate(self, value, path):
"""
This method is optional. It add a validation layer.
This method is been called in Document.validate()
value: the value of the field
path: the field name (ie, 'foo' or 'foo.bar' if nested)
"""
pass
# field wich does not need to be declared into the structure
STRUCTURE_KEYWORDS = []
class SchemaDocumentError(Exception):
pass
class RequireFieldError(SchemaDocumentError):
pass
class StructureError(SchemaDocumentError):
pass
class BadKeyError(SchemaDocumentError):
pass
class AuthorizedTypeError(SchemaDocumentError):
pass
class ValidationError(SchemaDocumentError):
pass
class DuplicateRequiredError(SchemaDocumentError):
pass
class DuplicateDefaultValueError(SchemaDocumentError):
pass
class ModifierOperatorError(SchemaDocumentError):
pass
class SchemaTypeError(SchemaDocumentError):
pass
class DefaultFieldTypeError(SchemaDocumentError):
pass
class i18nError(SchemaDocumentError):
pass
class DeprecationError(Exception):
pass
class DuplicateI18nError(Exception):
pass
class SchemaProperties(type):
def __new__(mcs, name, bases, attrs):
attrs['_protected_field_names'] = set(
['_protected_field_names', '_namespaces', '_required_namespace'])
for base in bases:
parent = base.__mro__[0]
if not hasattr(parent, 'structure'):
continue
if parent.structure is not None:
#parent = parent()
if parent.structure:
if 'structure' not in attrs and parent.structure:
attrs['structure'] = parent.structure.copy()
else:
obj_structure = attrs.get('structure', {}).copy()
attrs['structure'] = parent.structure.copy()
attrs['structure'].update(obj_structure)
if parent.required_fields:
attrs['required_fields'] = list(set(
attrs.get('required_fields', [])+parent.required_fields))
if parent.default_values:
obj_default_values = attrs.get('default_values', {}).copy()
attrs['default_values'] = parent.default_values.copy()
attrs['default_values'].update(obj_default_values)
if parent.validators:
obj_validators = attrs.get('validators', {}).copy()
attrs['validators'] = parent.validators.copy()
attrs['validators'].update(obj_validators)
if parent.i18n:
attrs['i18n'] = list(set(
attrs.get('i18n', [])+parent.i18n))
if attrs.get('authorized_types'):
attrs['authorized_types'] = list(set(parent.authorized_types).union(set(attrs['authorized_types'])))
for mro in bases[0].__mro__:
attrs['_protected_field_names'] = attrs['_protected_field_names'].union(list(mro.__dict__))
attrs['_protected_field_names'] = list(attrs['_protected_field_names'])
if attrs.get('structure') and name not in \
["SchemaDocument", "Document", "VersionedDocument", "RevisionDocument"]:
base = bases[0]
if not attrs.get('authorized_types'):
attrs['authorized_types'] = base.authorized_types
base._validate_structure(attrs['structure'], name, attrs.get('authorized_types'))
attrs['_namespaces'] = list(base._SchemaDocument__walk_dict(attrs['structure']))
if [1 for i in attrs['_namespaces'] if type(i) is type]:
raise DeprecationError("%s: types are not allowed as structure key anymore" % name)
mcs._validate_descriptors(attrs)
## building required fields namespace
attrs['_required_namespace'] = set([])
for rf in attrs.get('required_fields', []):
splited_rf = rf.split('.')
for index in range(len(splited_rf)):
attrs['_required_namespace'].add(".".join(splited_rf[:index+1]))
attrs['_collapsed_struct'] = DotCollapsedDict(attrs['structure'], remove_under_type=True)
elif attrs.get('structure') is not None and name not in \
["SchemaDocument", "Document", "VersionedDocument", "RevisionDocument"]:
attrs['_collapsed_struct'] = {}
attrs['_i18n_namespace'] = []
if attrs.get('i18n'):
attrs['_i18n_namespace'] = set(['.'.join(i.split('.')[:-1]) for i in attrs['i18n']])
return type.__new__(mcs, name, bases, attrs)
@classmethod
def _validate_descriptors(mcs, attrs):
# TODO i18n validator
for dv in attrs.get('default_values', {}):
if not dv in attrs['_namespaces']:
raise ValueError("Error in default_values: can't find %s in structure" % dv)
for required in attrs.get('required_fields', []):
if required not in attrs['_namespaces']:
raise ValueError("Error in required_fields: can't find %s in structure" % required)
for validator in attrs.get('validators', {}):
if validator not in attrs['_namespaces']:
raise ValueError("Error in validators: can't find %s in structure" % validator)
# required_field
if attrs.get('required_fields'):
if len(attrs['required_fields']) != len(set(attrs['required_fields'])):
raise DuplicateRequiredError("duplicate required_fields : %s" % attrs['required_fields'])
# i18n
if attrs.get('i18n'):
if len(attrs['i18n']) != len(set(attrs['i18n'])):
raise DuplicateI18nError("duplicated i18n : %s" % attrs['i18n'])
for _i18n in attrs['i18n']:
if _i18n not in attrs['_namespaces']:
raise ValueError("Error in i18n: can't find {} in structure".format(_i18n))
class SchemaDocument(dict, metaclass=SchemaProperties):
"""
A SchemaDocument is dictionary with a building structured schema
The validate method will check that the document match the underling
structure. A structure must be specify in each SchemaDocument.
>>> class TestDoc(SchemaDocument):
... structure = {
... "foo":str,
... "bar":int,
... "nested":{
... "bla":float}}
`str`, `int`, `float` are python types listed in `mongokit.authorized_types`.
>>> doc = TestDoc()
>>> doc
{'foo': None, 'bar': None, 'nested': {'bla': None}}
A SchemaDocument works just like dict:
>>> doc['bar'] = 3
>>> doc['foo'] = "test"
We can describe fields as required with the required attribute:
>>> TestDoc.required_fields = ['bar', 'nested.bla']
>>> doc = TestDoc()
>>> doc['bar'] = 2
Validation is made with the `validate()` method:
>>> doc.validate() # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
RequireFieldError: nested.bla is required
Default values can be set by using the attribute default_values :
>>> TestDoc.default_values = {"bar":3, "nested.bla":2.0}
>>> doc = TestDoc()
>>> doc
{'foo': None, 'bar': 3, 'nested': {'bla': 2.0}}
>>> doc.validate()
Validators can be added in order to validate some values :
>>> TestDoc.validators = {"bar":lambda x: x>0, "nested.bla": lambda x: x<0}
>>> doc = TestDoc()
>>> doc['bar'] = 3
>>> doc['nested']['bla'] = 2.0
>>> doc.validate()
Traceback (most recent call last):
...
ValidationError: nested.bla does not pass the validator <lambda>
If you want to use the dot notation (ala json), you must set the
`use_dot_notation` attribute to True:
>>> class TestDotNotation(SchemaDocument):
... structure = {
... "foo":{ "bar":str}
... }
... use_dot_notation=True
>>> doc = TestDotNotation()
>>> doc.foo.bar = u"bla"
>>> doc
{"foo":{"bar":u"bla}}
"""
structure = None
required_fields = []
default_values = {}
validators = {}
i18n = []
raise_validation_errors = True
skip_validation = False
# if you want to have all schemaless benefits (default False but should change)
# warning, if use_schemaless is True, Migration features can not be used.
use_schemaless = False
# If you want to use the dot notation, set this to True:
use_dot_notation = False
dot_notation_warning = False
authorized_types = [
type(None),
bool,
int,
float,
str,
list,
dict,
bytes,
datetime.datetime,
CustomType,
]
def __init__(self, doc=None, gen_skel=True, _gen_auth_types=True, _validate=True, lang='en', fallback_lang='en'):
"""
doc : a dictionary
gen_skel : if True, generate automatically the skeleton of the doc
filled with NoneType each time validate() is called. Note that
if doc is not {}, gen_skel is always False. If gen_skel is False,
default_values cannot be filled.
gen_auth_types: if True, generate automatically the self.authorized_types
attribute from self.authorized_types
"""
super(SchemaDocument, self).__init__()
if self.structure is None:
self.structure = {}
self._current_lang = lang
self._fallback_lang = fallback_lang
self.validation_errors = {}
# init
if doc:
for k, v in doc.items():
self[k] = v
gen_skel = False
if gen_skel:
self.generate_skeleton()
if self.default_values:
self._set_default_fields(self, self.structure)
else:
self._process_custom_type('python', self, self.structure)
if self.use_dot_notation:
self.__generate_doted_dict(self, self.structure)
if self.i18n:
self._make_i18n()
def generate_skeleton(self):
"""
validate and generate the skeleton of the document
from the structure (unknown values are set to None)
"""
self.__generate_skeleton(self, self.structure)
def validate(self):
"""
validate the document.
This method will verify if :
* the doc follow the structure,
* all required fields are filled
Additionally, this method will process all
validators.
"""
if self.validators:
self._process_validators(self, self.structure)
self._process_custom_type('bson', self, self.structure)
self._validate_doc(self, self.structure)
self._process_custom_type('python', self, self.structure)
if self.required_fields:
self._validate_required(self, self.structure)
def __setattr__(self, key, value):
if key not in self._protected_field_names and self.use_dot_notation and key in self:
if isinstance(self.structure[key], i18n):
self[key][self._current_lang] = value
else:
self[key] = value
else:
if self.dot_notation_warning and not key.startswith('_') and key not in \
['db', 'collection', 'versioning_collection', 'connection', 'fs']:
log.warning("dot notation: {} was not found in structure. Add it as attribute instead".format(key))
dict.__setattr__(self, key, value)
def __getattr__(self, key):
if key not in self._protected_field_names and self.use_dot_notation and key in self:
if isinstance(self[key], i18n):
if self._current_lang not in self[key]:
return self[key].get(self._fallback_lang)
return self[key][self._current_lang]
return self[key]
else:
return dict.__getattribute__(self, key)
#
# Public API end
#
@classmethod
def __walk_dict(cls, dic):
# thanks jean_b for the patch
for key, value in list(dic.items()):
if isinstance(value, dict) and len(value):
if type(key) is type:
yield '$%s' % key.__name__
else:
yield key
for child_key in cls.__walk_dict(value):
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
#if type(child_key) is type:
# new_child_key = "$%s" % child_key.__name__
#else:
if type(child_key) is not type:
new_child_key = child_key
yield '%s.%s' % (new_key, new_child_key)
elif type(key) is type:
yield '$%s' % key.__name__
# elif isinstance(value, list) and len(value):
# if isinstance(value[0], dict):
# for child_key in cls.__walk_dict(value[0]):
# #if type(key) is type:
# # new_key = "$%s" % key.__name__
# #else:
# if type(key) is not type:
# new_key = key
# #if type(child_key) is type:
# # new_child_key = "$%s" % child_key.__name__
# #else:
# if type(child_key) is not type:
# new_child_key = child_key
# yield '%s.%s' % (new_key, new_child_key)
# else:
# if type(key) is not type:
# yield key
# #else:
# # yield ""
else:
if type(key) is not type:
yield key
#else:
# yield ""
@classmethod
def _validate_structure(cls, structure, name, authorized_types):
"""
validate if all fields in self.structure are in authorized types.
"""
##############
def __validate_structure(struct, name, _authorized):
if type(struct) is type:
if struct not in authorized_types:
if struct not in authorized_types:
raise StructureError("%s: %s is not an authorized type" % (name, struct))
elif isinstance(struct, dict):
for key in struct:
if isinstance(key, str):
if "." in key:
raise BadKeyError("%s: %s must not contain '.'" % (name, key))
if key.startswith('$'):
raise BadKeyError("%s: %s must not start with '$'" % (name, key))
elif type(key) is type:
if not key in authorized_types:
raise AuthorizedTypeError("%s: %s is not an authorized type" % (name, key))
else:
raise StructureError("%s: %s must be a str or a type" % (name, key))
if struct[key] is None:
pass
elif isinstance(struct[key], dict):
__validate_structure(struct[key], name, authorized_types)
elif isinstance(struct[key], list):
__validate_structure(struct[key], name, authorized_types)
elif isinstance(struct[key], tuple):
__validate_structure(struct[key], name, authorized_types)
elif isinstance(struct[key], CustomType):
__validate_structure(struct[key].mongo_type, name, authorized_types)
elif isinstance(struct[key], SchemaProperties):
pass
elif isinstance(struct[key], SchemaOperator):
__validate_structure(struct[key], name, authorized_types)
elif hasattr(struct[key], 'structure'):
__validate_structure(struct[key], name, authorized_types)
elif struct[key] not in authorized_types:
ok = False
for auth_type in authorized_types:
if struct[key] is None:
ok = True
else:
try:
if isinstance(struct[key], auth_type) or issubclass(struct[key], auth_type):
ok = True
except TypeError:
raise TypeError("%s: %s is not a type" % (name, struct[key]))
if not ok:
raise StructureError(
"%s: %s is not an authorized type" % (name, struct[key]))
elif isinstance(struct, list) or isinstance(struct, tuple):
for item in struct:
__validate_structure(item, name, authorized_types)
elif isinstance(struct, SchemaOperator):
if isinstance(struct, IS):
for operand in struct:
if type(operand) not in authorized_types:
raise StructureError("%s: %s in %s is not an authorized type (%s found)" % (
name, operand, struct, type(operand).__name__))
else:
for operand in struct:
if operand not in authorized_types:
raise StructureError("%s: %s in %s is not an authorized type (%s found)" % (
name, operand, struct, type(operand).__name__))
elif isinstance(struct, SchemaProperties):
pass
else:
ok = False
for auth_type in authorized_types:
if isinstance(struct, auth_type):
ok = True
if not ok:
raise StructureError("%s: %s is not an authorized_types" % (name, struct))
#################
if structure is None:
raise StructureError("%s.structure must not be None" % name)
if not isinstance(structure, dict):
raise StructureError("%s.structure must be a dict instance" % name)
__validate_structure(structure, name, authorized_types)
def _raise_exception(self, exception, field, message):
if self.raise_validation_errors:
raise exception(message)
else:
if not field in self.validation_errors:
self.validation_errors[field] = []
self.validation_errors[field].append(exception(message))
def _validate_doc(self, doc, struct, path=""):
"""
check if doc field types match the doc field structure
"""
if type(struct) is type or struct is None:
if struct is None:
if type(doc) not in self.authorized_types:
self._raise_exception(AuthorizedTypeError, type(doc).__name__,
"%s is not an authorized types" % type(doc).__name__)
elif not isinstance(doc, struct) and doc is not None:
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of %s not %s" % (
path, struct.__name__, type(doc).__name__))
elif isinstance(struct, CustomType):
if not isinstance(doc, struct.mongo_type) and doc is not None:
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of %s not %s" % (
path, struct.mongo_type.__name__, type(doc).__name__))
struct.validate(doc, path=path)
elif isinstance(struct, SchemaOperator):
if not struct.validate(doc) and doc is not None:
if isinstance(struct, IS):
self._raise_exception(SchemaTypeError, path,
"%s must be in %s not %s" % (path, struct._operands, doc))
else:
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of %s not %s" % (path, struct, type(doc).__name__))
elif isinstance(struct, dict):
if not isinstance(doc, type(struct)):
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of %s not %s" % (
path, type(struct).__name__, type(doc).__name__))
struct_length = len(struct) if not '_id' in struct else len(struct) - 1
if len(doc) != struct_length:
struct_doc_diff = list(set(struct).difference(set(doc)))
if struct_doc_diff:
for field in struct_doc_diff:
if (type(field) is not type) and (not self.use_schemaless):
self._raise_exception(StructureError, None,
"missed fields %s in %s" % (struct_doc_diff, type(doc).__name__))
else:
struct_struct_diff = list(set(doc).difference(set(struct)))
bad_fields = [s for s in struct_struct_diff if s not in STRUCTURE_KEYWORDS]
if bad_fields and not self.use_schemaless:
self._raise_exception(StructureError, None,
"unknown fields %s in %s" % (bad_fields, type(doc).__name__))
for key in struct:
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
new_path = ".".join([path, new_key]).strip('.')
if new_key.split('.')[-1].startswith("$"):
for doc_key in doc:
if not isinstance(doc_key, key):
self._raise_exception(SchemaTypeError, path,
"key of %s must be an instance of %s not %s" % (
path, key.__name__, type(doc_key).__name__))
self._validate_doc(doc[doc_key], struct[key], new_path)
else:
if key in doc:
self._validate_doc(doc[key], struct[key], new_path)
elif isinstance(struct, list):
if not isinstance(doc, list) and not isinstance(doc, tuple):
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of list not %s" % (path, type(doc).__name__))
if not len(struct):
struct = None
else:
struct = struct[0]
for obj in doc:
self._validate_doc(obj, struct, path)
elif isinstance(struct, tuple):
if not isinstance(doc, list) and not isinstance(doc, tuple):
self._raise_exception(SchemaTypeError, path,
"%s must be an instance of list not %s" % (
path, type(doc).__name__))
if len(doc) != len(struct):
self._raise_exception(SchemaTypeError, path, "%s must have %s items not %s" % (
path, len(struct), len(doc)))
for i in range(len(struct)):
self._validate_doc(doc[i], struct[i], path)
def _process_validators(self, doc, _struct, _path=""):
doted_doc = DotCollapsedDict(doc)
for key, validators in self.validators.items():
if key in doted_doc and doted_doc[key] is not None:
if not hasattr(validators, "__iter__"):
validators = [validators]
for validator in validators:
try:
if not validator(doted_doc[key]):
raise ValidationError("%s does not pass the validator " + validator.__name__)
except Exception as e:
self._raise_exception(ValidationError, key,
str(e) % key)
def _process_custom_type(self, target, doc, struct, path="", root_path=""):
for key in struct:
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
new_path = ".".join([path, new_key]).strip('.')
#
# if the value is a dict, we have a another structure to validate
#
#
# It is not a dict nor a list but a simple key:value
#
if isinstance(struct[key], CustomType):
if target == 'bson':
if key in doc:
if struct[key].python_type is not None:
if not isinstance(doc[key], struct[key].python_type) and doc[key] is not None:
self._raise_exception(SchemaTypeError, new_path,
"%s must be an instance of %s not %s" % (
new_path, struct[key].python_type.__name__,
type(doc[key]).__name__))
doc[key] = struct[key].to_bson(doc[key])
else:
if key in doc:
doc[key] = struct[key].to_python(doc[key])
elif isinstance(struct[key], dict):
if doc: # we don't need to process an empty doc
if type(key) is type:
for doc_key in doc: # process type's key such {str:int}...
self._process_custom_type(target, doc[doc_key], struct[key], new_path, root_path)
else:
if key in doc: # we don't care about missing fields
self._process_custom_type(target, doc[key], struct[key], new_path, root_path)
#
# If the struct is a list, we have to validate all values into it
#
elif type(struct[key]) is list:
#
# check if the list must not be null
#
if struct[key]:
l_objs = []
if isinstance(struct[key][0], CustomType):
for obj in doc[key]:
if target == 'bson':
if struct[key][0].python_type is not None:
if not isinstance(obj, struct[key][0].python_type) and obj is not None:
self._raise_exception(SchemaTypeError, new_path,
"%s must be an instance of %s not %s" % (
new_path, struct[key][0].python_type.__name__,
type(obj).__name__))
obj = struct[key][0].to_bson(obj)
else:
obj = struct[key][0].to_python(obj)
l_objs.append(obj)
doc[key] = l_objs
elif isinstance(struct[key][0], dict):
if doc.get(key):
for obj in doc[key]:
self._process_custom_type(target, obj, struct[key][0], new_path, root_path)
def _set_default_fields(self, doc, struct, path=""):
# TODO check this out, this method must be restructured
for key in struct:
new_key = key
new_path = ".".join([path, new_key]).strip('.')
#
# default_values :
# if the value is None, check if a default value exist.
# if exists, and it is a function then call it otherwise,
# juste feed it
#
if type(key) is not type:
if doc[key] is None and new_path in self.default_values:
new_value = self.default_values[new_path]
if callable(new_value):
new_value = new_value()
elif isinstance(new_value, dict):
new_value = deepcopy(new_value)
elif isinstance(new_value, list):
new_value = new_value[:]
if isinstance(struct[key], CustomType):
if not isinstance(new_value, struct[key].python_type):
self._raise_exception(DefaultFieldTypeError, new_path,
"%s must be an instance of %s not %s" % (
new_path, struct[key].python_type.__name__,
type(new_value).__name__))
doc[key] = new_value
#
# if the value is a dict, we have a another structure to validate
#
if isinstance(struct[key], dict) and new_path not in self.i18n:
#
# if the dict is still empty into the document we build
# it with None values
#
if len(struct[key]) and not [i for i in list(struct[key].keys()) if type(i) is type]:
self._set_default_fields(doc[key], struct[key], new_path)
else:
if new_path in self.default_values:
new_value = self.default_values[new_path]
if callable(new_value):
new_value = new_value()
elif isinstance(new_value, dict):
new_value = deepcopy(new_value)
elif isinstance(new_value, list):
new_value = new_value[:]
doc[key] = new_value
elif isinstance(struct[key], list):
if new_path in self.default_values:
for new_value in self.default_values[new_path]:
if callable(new_value):
new_value = new_value()
elif isinstance(new_value, dict):
new_value = deepcopy(new_value)
elif isinstance(new_value, list):
new_value = new_value[:]
if isinstance(struct[key][0], CustomType):
if not isinstance(new_value, struct[key][0].python_type):
self._raise_exception(DefaultFieldTypeError, new_path,
"%s must be an instance of %s not %s" % (
new_path, struct[key][0].python_type.__name__,
type(new_value).__name__))
doc[key].append(new_value)
else: # what else
if new_path in self.default_values:
new_value = self.default_values[new_path]
if callable(new_value):
new_value = new_value()
elif isinstance(new_value, dict):
new_value = deepcopy(new_value)
elif isinstance(new_value, list):
new_value = new_value[:]
if new_path in self.i18n:
doc[key] = i18n(
field_type=struct[key],
field_name=key
)
doc[key].update(new_value)
else:
doc[key] = new_value
def _validate_required(self, doc, _struct, _path="", _root_path=""):
doted_struct = DotCollapsedDict(self.structure)
doted_doc = DotCollapsedDict(doc, reference=doted_struct)
for req in self.required_fields:
if doted_doc.get(req) is None and doted_struct.get(req) is not dict:
if not isinstance(doted_struct.get(req), CustomType):
self._raise_exception(RequireFieldError, req, "%s is required" % req)
elif isinstance(doted_struct.get(req), CustomType) and doted_struct[req].mongo_type is not dict:
self._raise_exception(RequireFieldError, req, "%s is required" % req)
elif doted_doc.get(req) == []:
self._raise_exception(RequireFieldError, req, "%s is required" % req)
elif doted_doc.get(req) == {}:
self._raise_exception(RequireFieldError, req, "%s is required" % req)
def __generate_skeleton(self, doc, struct, path=""):
for key in struct:
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
new_path = ".".join([path, new_key]).strip('.')
#
# Automatique generate the skeleton with NoneType
#
if type(key) is not type and key not in doc:
if isinstance(struct[key], dict):
if type(struct[key]) is dict and self.use_dot_notation:
if new_path in self._i18n_namespace:
doc[key] = i18nDotedDict(doc.get(key, {}), self)
else:
doc[key] = DotedDict(doc.get(key, {}), warning=self.dot_notation_warning)
else:
if callable(struct[key]):
doc[key] = struct[key]()
else:
doc[key] = type(struct[key])()
elif struct[key] is dict:
doc[key] = {}
elif isinstance(struct[key], list):
doc[key] = type(struct[key])()
elif isinstance(struct[key], CustomType):
if struct[key].init_type is not None:
doc[key] = struct[key].init_type()
else:
doc[key] = None
elif struct[key] is list:
doc[key] = []
elif isinstance(struct[key], tuple):
doc[key] = [None for _ in range(len(struct[key]))]
else:
doc[key] = None
#
# if the value is a dict, we have a another structure to validate
#
if isinstance(struct[key], dict) and type(key) is not type:
self.__generate_skeleton(doc[key], struct[key], new_path)
def __generate_doted_dict(self, doc, struct, path=""):
for key in struct:
#
# Automatique generate the skeleton with NoneType
#
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
new_path = ".".join([path, new_key]).strip('.')
if type(key) is not type: # and key not in doc:
if isinstance(struct[key], dict):
if type(struct[key]) is dict:
if new_path in self._i18n_namespace:
doc[key] = i18nDotedDict(doc.get(key, {}), self)
else:
doc[key] = DotedDict(doc.get(key, {}), warning=self.dot_notation_warning)
#
# if the value is a dict, we have a another structure to validate
#
if isinstance(struct[key], dict) and type(key) is not type:
self.__generate_doted_dict(doc[key], struct[key], new_path)
def _make_i18n(self):
doted_dict = DotCollapsedDict(self.structure)
for field in self.i18n:
if field not in doted_dict:
self._raise_exception(ValidationError, field,
"%s not found in structure" % field)
if not isinstance(doted_dict[field], i18n):
doted_dict[field] = i18n(
field_type=doted_dict[field],
field_name=field
)
self.structure.update(DotExpandedDict(doted_dict))
def set_lang(self, lang):
self._current_lang = lang
def get_lang(self):
return self._current_lang
class i18n(dict, CustomType):
""" CustomType to deal with i18n """
mongo_type = list
def __init__(self, field_type=None, field_name=None):
super(i18n, self).__init__()
self.python_type = self.__class__
self._field_type = field_type
self._field_name = field_name
def __call__(self):
return i18n(self._field_type, self._field_name)
def to_bson(self, value):
if value is not None:
for l, v in value.items():
if isinstance(v, list) and isinstance(self._field_type, list):
for i in v:
if not isinstance(i, self._field_type[0]):
raise SchemaTypeError("%s (%s) must be an instance of %s not %s" % (
self._field_name, l, self._field_type[0], type(i).__name__))
else:
if not isinstance(v, self._field_type):
raise SchemaTypeError("%s (%s) must be an instance of %s not %s" % (
self._field_name, l, self._field_type, type(v).__name__))
return [{'lang': l, 'value': v} for l, v in value.items()]
def to_python(self, value):
if value is not None:
i18n_dict = self.__class__(self._field_type)
for i in value:
i18n_dict[i['lang']] = i['value']
return i18n_dict
class Set(CustomType):
""" SET custom type to handle python set() type """
init_type = set
mongo_type = list
python_type = set
def __init__(self, structure_type=None):
super(Set, self).__init__()
self._structure_type = structure_type
def to_bson(self, value):
if value is not None:
return list(value)
def to_python(self, value):
if value is not None:
return set(value)
def validate(self, value, path):
if value is not None and self._structure_type is not None:
for val in value:
if not isinstance(val, self._structure_type):
raise ValueError('%s must be an instance of %s not %s' %
(path, self._structure_type.__name__, type(val).__name__))
|
[
"windfarer@gmail.com"
] |
windfarer@gmail.com
|
ef9b7a69e3843ac4417cdf88c780113699fa503d
|
e3278ff5f2d28cb6c07ee89ed75e189661f287aa
|
/trains/urls.py
|
1c45f047095e93987125920c10a1f1fa4289c3b1
|
[] |
no_license
|
ArturYumagulov/Travel_City
|
3f7ba8581f81e71785e3ed76834ab948f1fbc2b3
|
09dbc58d09925c7bac19b74df0223a1340b60830
|
refs/heads/master
| 2023-03-14T14:43:45.317546
| 2021-03-06T22:05:28
| 2021-03-06T22:05:28
| 279,313,673
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 179
|
py
|
from django.urls import path
from .views import home, TrainCreateViews
urlpatterns = [
path('add/', TrainCreateViews.as_view(), name='add'),
path('', home, name="home")
]
|
[
"zico.13288@gmail.com"
] |
zico.13288@gmail.com
|
a7f1a34e1dbca643640865dc62b696a4dbff6b39
|
8e97978634fad317bf33090f17097a13c4fda856
|
/test_sdio/encode_image.py
|
c616b0f8e802b3128408e9e05439d803e6d574ea
|
[] |
no_license
|
meesokim/bitbox
|
31459009e2c8d54fa1538dab3c3d625920ce60d9
|
40e1883f6eaa16eb22e28072479c08c423262a58
|
refs/heads/master
| 2021-01-22T13:30:54.894759
| 2015-12-13T16:04:48
| 2015-12-13T16:04:48
| 29,057,990
| 0
| 0
| null | 2015-12-13T16:04:48
| 2015-01-10T13:29:39
|
C
|
UTF-8
|
Python
| false
| false
| 684
|
py
|
'very simple encoder for one raw image data to .bin file. expects small data <32k pixels'
import sys, struct
from PIL import Image
def reduce(c) :
'R8G8B8A8 to A1R5G5B5'
return (1<<15 | (c[0]>>3)<<10 | (c[1]>>3)<<5 | c[2]>>3) if c[3]>127 else 0
src = Image.open(sys.argv[1]).convert('RGBA')
print "dimensions : ",src.size
assert src.size[0]*src.size[1]<32000, "too big image ! (must fit in 64k)"
raw = [reduce(c) for c in src.getdata()]
outfile=open('image.bin','wb')
outfile.write(struct.pack('<H',0xb71b)) # B17B0x anagram :)
outfile.write(struct.pack('<2H',*src.size))
outfile.write(struct.pack('<%dH'%len(raw),*raw))
outfile.write(struct.pack('<H',0xb71b)) # end
|
[
"makapuf2@gmail.com"
] |
makapuf2@gmail.com
|
283e4abc4a65e52a5d9b59d3a8238102b56d822c
|
0ac08e36e128573a612bacfc85380b3917fa0fe3
|
/18.py
|
c9374871b9e3c9a97e498e80ae2d80395bd24801
|
[] |
no_license
|
Henrique970/Monitoria
|
9db148435b54e1175310c3bc0c134eb38e8a76be
|
361fa3da50b5710eb44959962197e434c302dc7a
|
refs/heads/master
| 2020-07-15T00:42:32.563702
| 2019-08-30T19:00:50
| 2019-08-30T19:00:50
| 205,439,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 558
|
py
|
import random
nome = input('Informe seu nome: ')
escolha = input('Informe i para impar e p para par: ')
numero = int(input('Informe seu número: '))
computador = random.randint(1,10)
soma = numero + computador
print('Você escolheu,',numero,'e o computador,',computador)
print('A soma dos dois é',soma)
if escolha == 'i':
if soma % 2 == 1:
print('Você Ganhou!',nome)
else:
print('O computador Ganhou!')
elif escolha == 'p':
if soma % 2 == 0:
print('Você Ganhou!',nome)
else:
print('O computador Ganhou!')
|
[
"henrydossantos1560@gmail.com"
] |
henrydossantos1560@gmail.com
|
170a083c957c7be6132d27953ebb3e394bf8b3e5
|
130a98632d2ab4c171503b79e455b7aa27a1dda4
|
/models/research/object_detection/inputs_test.py
|
10dd078873c538661d0f57fd9154cb10f2b0c150
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
aboerzel/German_License_Plate_Recognition
|
d7fc0314295f5cf0c9d7ae9c93a795e3ef1c5787
|
6fc53292b1d3ce3c0340ce724c2c11c77e663d27
|
refs/heads/master
| 2023-01-30T18:08:37.339542
| 2023-01-07T07:41:36
| 2023-01-07T07:41:36
| 245,586,430
| 34
| 12
|
MIT
| 2023-01-07T07:41:37
| 2020-03-07T07:16:51
|
Python
|
UTF-8
|
Python
| false
| false
| 75,857
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.tflearn.inputs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import unittest
from absl import logging
from absl.testing import parameterized
import numpy as np
import six
import tensorflow.compat.v1 as tf
from object_detection import inputs
from object_detection.core import preprocessor
from object_detection.core import standard_fields as fields
from object_detection.utils import config_util
from object_detection.utils import test_case
from object_detection.utils import test_utils
from object_detection.utils import tf_version
if six.PY2:
import mock # pylint: disable=g-import-not-at-top
else:
from unittest import mock # pylint: disable=g-import-not-at-top, g-importing-member
FLAGS = tf.flags.FLAGS
def _get_configs_for_model(model_name):
"""Returns configurations for model."""
fname = os.path.join(tf.resource_loader.get_data_files_path(),
'samples/configs/' + model_name + '.config')
label_map_path = os.path.join(tf.resource_loader.get_data_files_path(),
'data/pet_label_map.pbtxt')
data_path = os.path.join(tf.resource_loader.get_data_files_path(),
'test_data/pets_examples.record')
configs = config_util.get_configs_from_pipeline_file(fname)
override_dict = {
'train_input_path': data_path,
'eval_input_path': data_path,
'label_map_path': label_map_path
}
return config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
def _get_configs_for_model_sequence_example(model_name, frame_index=-1):
"""Returns configurations for model."""
fname = os.path.join(tf.resource_loader.get_data_files_path(),
'test_data/' + model_name + '.config')
label_map_path = os.path.join(tf.resource_loader.get_data_files_path(),
'data/snapshot_serengeti_label_map.pbtxt')
data_path = os.path.join(
tf.resource_loader.get_data_files_path(),
'test_data/snapshot_serengeti_sequence_examples.record')
configs = config_util.get_configs_from_pipeline_file(fname)
override_dict = {
'train_input_path': data_path,
'eval_input_path': data_path,
'label_map_path': label_map_path,
'frame_index': frame_index
}
return config_util.merge_external_params_with_configs(
configs, kwargs_dict=override_dict)
def _make_initializable_iterator(dataset):
"""Creates an iterator, and initializes tables.
Args:
dataset: A `tf.data.Dataset` object.
Returns:
A `tf.data.Iterator`.
"""
iterator = tf.data.make_initializable_iterator(dataset)
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
return iterator
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only tests under TF2.X.')
class InputFnTest(test_case.TestCase, parameterized.TestCase):
def test_faster_rcnn_resnet50_train_input(self):
"""Tests the training input function for FasterRcnnResnet50."""
configs = _get_configs_for_model('faster_rcnn_resnet50_pets')
model_config = configs['model']
model_config.faster_rcnn.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
configs['train_config'], configs['train_input_config'], model_config)
features, labels = _make_initializable_iterator(train_input_fn()).get_next()
self.assertAllEqual([1, None, None, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([1],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[1, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[1, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[1, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[1, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_confidences].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_confidences].dtype)
def test_faster_rcnn_resnet50_train_input_with_additional_channels(self):
"""Tests the training input function for FasterRcnnResnet50."""
configs = _get_configs_for_model('faster_rcnn_resnet50_pets')
model_config = configs['model']
configs['train_input_config'].num_additional_channels = 2
configs['train_config'].retain_original_images = True
model_config.faster_rcnn.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
configs['train_config'], configs['train_input_config'], model_config)
features, labels = _make_initializable_iterator(train_input_fn()).get_next()
self.assertAllEqual([1, None, None, 5],
features[fields.InputDataFields.image].shape.as_list())
self.assertAllEqual(
[1, None, None, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([1],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[1, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[1, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[1, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[1, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_confidences].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_confidences].dtype)
@parameterized.parameters(
{'eval_batch_size': 1},
{'eval_batch_size': 8}
)
def test_faster_rcnn_resnet50_eval_input(self, eval_batch_size=1):
"""Tests the eval input function for FasterRcnnResnet50."""
configs = _get_configs_for_model('faster_rcnn_resnet50_pets')
model_config = configs['model']
model_config.faster_rcnn.num_classes = 37
eval_config = configs['eval_config']
eval_config.batch_size = eval_batch_size
eval_input_fn = inputs.create_eval_input_fn(
eval_config, configs['eval_input_configs'][0], model_config)
features, labels = _make_initializable_iterator(eval_input_fn()).get_next()
self.assertAllEqual([eval_batch_size, None, None, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[eval_batch_size, None, None, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([eval_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[eval_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[eval_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_area].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_area].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list())
self.assertEqual(
tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_difficult].shape.as_list())
self.assertEqual(
tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype)
def test_context_rcnn_resnet50_train_input_with_sequence_example(
self, train_batch_size=8):
"""Tests the training input function for FasterRcnnResnet50."""
configs = _get_configs_for_model_sequence_example(
'context_rcnn_camera_trap')
model_config = configs['model']
train_config = configs['train_config']
train_config.batch_size = train_batch_size
train_input_fn = inputs.create_train_input_fn(
train_config, configs['train_input_config'], model_config)
features, labels = _make_initializable_iterator(train_input_fn()).get_next()
self.assertAllEqual([train_batch_size, 640, 640, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([train_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[train_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[train_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[train_batch_size, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[train_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_confidences].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_confidences].dtype)
def test_context_rcnn_resnet50_eval_input_with_sequence_example(
self, eval_batch_size=8):
"""Tests the eval input function for FasterRcnnResnet50."""
configs = _get_configs_for_model_sequence_example(
'context_rcnn_camera_trap')
model_config = configs['model']
eval_config = configs['eval_config']
eval_config.batch_size = eval_batch_size
eval_input_fn = inputs.create_eval_input_fn(
eval_config, configs['eval_input_configs'][0], model_config)
features, labels = _make_initializable_iterator(eval_input_fn()).get_next()
self.assertAllEqual([eval_batch_size, 640, 640, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[eval_batch_size, 640, 640, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([eval_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[eval_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[eval_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
def test_context_rcnn_resnet50_eval_input_with_sequence_example_image_id_list(
self, eval_batch_size=8):
"""Tests the eval input function for FasterRcnnResnet50."""
configs = _get_configs_for_model_sequence_example(
'context_rcnn_camera_trap')
model_config = configs['model']
eval_config = configs['eval_config']
eval_config.batch_size = eval_batch_size
eval_input_config = configs['eval_input_configs'][0]
eval_input_config.load_context_image_ids = True
eval_input_fn = inputs.create_eval_input_fn(
eval_config, eval_input_config, model_config)
features, labels = _make_initializable_iterator(eval_input_fn()).get_next()
self.assertAllEqual([eval_batch_size, 640, 640, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[eval_batch_size, 640, 640, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([eval_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[eval_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[eval_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
def test_context_rcnn_resnet50_train_input_with_sequence_example_frame_index(
self, train_batch_size=8):
"""Tests the training input function for FasterRcnnResnet50."""
configs = _get_configs_for_model_sequence_example(
'context_rcnn_camera_trap', frame_index=2)
model_config = configs['model']
train_config = configs['train_config']
train_config.batch_size = train_batch_size
train_input_fn = inputs.create_train_input_fn(
train_config, configs['train_input_config'], model_config)
features, labels = _make_initializable_iterator(train_input_fn()).get_next()
self.assertAllEqual([train_batch_size, 640, 640, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([train_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[train_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[train_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[train_batch_size, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[train_batch_size, 100, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_confidences].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_confidences].dtype)
def test_ssd_inceptionV2_train_input(self):
"""Tests the training input function for SSDInceptionV2."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
model_config = configs['model']
model_config.ssd.num_classes = 37
batch_size = configs['train_config'].batch_size
train_input_fn = inputs.create_train_input_fn(
configs['train_config'], configs['train_input_config'], model_config)
features, labels = _make_initializable_iterator(train_input_fn()).get_next()
self.assertAllEqual([batch_size, 300, 300, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[batch_size],
labels[fields.InputDataFields.num_groundtruth_boxes].shape.as_list())
self.assertEqual(tf.int32,
labels[fields.InputDataFields.num_groundtruth_boxes].dtype)
self.assertAllEqual(
[batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[batch_size, 100, model_config.ssd.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[batch_size, 100],
labels[
fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
@parameterized.parameters(
{'eval_batch_size': 1},
{'eval_batch_size': 8}
)
def test_ssd_inceptionV2_eval_input(self, eval_batch_size=1):
"""Tests the eval input function for SSDInceptionV2."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
model_config = configs['model']
model_config.ssd.num_classes = 37
eval_config = configs['eval_config']
eval_config.batch_size = eval_batch_size
eval_input_fn = inputs.create_eval_input_fn(
eval_config, configs['eval_input_configs'][0], model_config)
features, labels = _make_initializable_iterator(eval_input_fn()).get_next()
self.assertAllEqual([eval_batch_size, 300, 300, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[eval_batch_size, 300, 300, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([eval_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[eval_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[eval_batch_size, 100, model_config.ssd.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[
fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(
tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_area].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_area].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list())
self.assertEqual(
tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_difficult].shape.as_list())
self.assertEqual(
tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype)
def test_ssd_inceptionV2_eval_input_with_additional_channels(
self, eval_batch_size=1):
"""Tests the eval input function for SSDInceptionV2 with additional channel.
Args:
eval_batch_size: Batch size for eval set.
"""
configs = _get_configs_for_model('ssd_inception_v2_pets')
model_config = configs['model']
model_config.ssd.num_classes = 37
configs['eval_input_configs'][0].num_additional_channels = 1
eval_config = configs['eval_config']
eval_config.batch_size = eval_batch_size
eval_config.retain_original_image_additional_channels = True
eval_input_fn = inputs.create_eval_input_fn(
eval_config, configs['eval_input_configs'][0], model_config)
features, labels = _make_initializable_iterator(eval_input_fn()).get_next()
self.assertAllEqual([eval_batch_size, 300, 300, 4],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[eval_batch_size, 300, 300, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([eval_batch_size, 300, 300, 1], features[
fields.InputDataFields.image_additional_channels].shape.as_list())
self.assertEqual(
tf.uint8,
features[fields.InputDataFields.image_additional_channels].dtype)
self.assertAllEqual([eval_batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[eval_batch_size, 100, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[eval_batch_size, 100, model_config.ssd.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_area].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_area].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list())
self.assertEqual(tf.bool,
labels[fields.InputDataFields.groundtruth_is_crowd].dtype)
self.assertAllEqual(
[eval_batch_size, 100],
labels[fields.InputDataFields.groundtruth_difficult].shape.as_list())
self.assertEqual(tf.int32,
labels[fields.InputDataFields.groundtruth_difficult].dtype)
def test_predict_input(self):
"""Tests the predict input function."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
predict_input_fn = inputs.create_predict_input_fn(
model_config=configs['model'],
predict_input_config=configs['eval_input_configs'][0])
serving_input_receiver = predict_input_fn()
image = serving_input_receiver.features[fields.InputDataFields.image]
receiver_tensors = serving_input_receiver.receiver_tensors[
inputs.SERVING_FED_EXAMPLE_KEY]
self.assertEqual([1, 300, 300, 3], image.shape.as_list())
self.assertEqual(tf.float32, image.dtype)
self.assertEqual(tf.string, receiver_tensors.dtype)
def test_predict_input_with_additional_channels(self):
"""Tests the predict input function with additional channels."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['eval_input_configs'][0].num_additional_channels = 2
predict_input_fn = inputs.create_predict_input_fn(
model_config=configs['model'],
predict_input_config=configs['eval_input_configs'][0])
serving_input_receiver = predict_input_fn()
image = serving_input_receiver.features[fields.InputDataFields.image]
receiver_tensors = serving_input_receiver.receiver_tensors[
inputs.SERVING_FED_EXAMPLE_KEY]
# RGB + 2 additional channels = 5 channels.
self.assertEqual([1, 300, 300, 5], image.shape.as_list())
self.assertEqual(tf.float32, image.dtype)
self.assertEqual(tf.string, receiver_tensors.dtype)
def test_error_with_bad_train_config(self):
"""Tests that a TypeError is raised with improper train config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
train_config=configs['eval_config'], # Expecting `TrainConfig`.
train_input_config=configs['train_input_config'],
model_config=configs['model'])
with self.assertRaises(TypeError):
train_input_fn()
def test_error_with_bad_train_input_config(self):
"""Tests that a TypeError is raised with improper train input config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
train_config=configs['train_config'],
train_input_config=configs['model'], # Expecting `InputReader`.
model_config=configs['model'])
with self.assertRaises(TypeError):
train_input_fn()
def test_error_with_bad_train_model_config(self):
"""Tests that a TypeError is raised with improper train model config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
train_config=configs['train_config'],
train_input_config=configs['train_input_config'],
model_config=configs['train_config']) # Expecting `DetectionModel`.
with self.assertRaises(TypeError):
train_input_fn()
def test_error_with_bad_eval_config(self):
"""Tests that a TypeError is raised with improper eval config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['train_config'], # Expecting `EvalConfig`.
eval_input_config=configs['eval_input_configs'][0],
model_config=configs['model'])
with self.assertRaises(TypeError):
eval_input_fn()
def test_error_with_bad_eval_input_config(self):
"""Tests that a TypeError is raised with improper eval input config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['eval_config'],
eval_input_config=configs['model'], # Expecting `InputReader`.
model_config=configs['model'])
with self.assertRaises(TypeError):
eval_input_fn()
def test_error_with_bad_eval_model_config(self):
"""Tests that a TypeError is raised with improper eval model config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['eval_config'],
eval_input_config=configs['eval_input_configs'][0],
model_config=configs['eval_config']) # Expecting `DetectionModel`.
with self.assertRaises(TypeError):
eval_input_fn()
def test_output_equal_in_replace_empty_string_with_random_number(self):
string_placeholder = tf.placeholder(tf.string, shape=[])
replaced_string = inputs._replace_empty_string_with_random_number(
string_placeholder)
test_string = b'hello world'
feed_dict = {string_placeholder: test_string}
with self.test_session() as sess:
out_string = sess.run(replaced_string, feed_dict=feed_dict)
self.assertEqual(test_string, out_string)
def test_output_is_integer_in_replace_empty_string_with_random_number(self):
string_placeholder = tf.placeholder(tf.string, shape=[])
replaced_string = inputs._replace_empty_string_with_random_number(
string_placeholder)
empty_string = ''
feed_dict = {string_placeholder: empty_string}
with self.test_session() as sess:
out_string = sess.run(replaced_string, feed_dict=feed_dict)
is_integer = True
try:
# Test whether out_string is a string which represents an integer, the
# casting below will throw an error if out_string is not castable to int.
int(out_string)
except ValueError:
is_integer = False
self.assertTrue(is_integer)
def test_force_no_resize(self):
"""Tests the functionality of force_no_reisze option."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['eval_config'].force_no_resize = True
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['eval_config'],
eval_input_config=configs['eval_input_configs'][0],
model_config=configs['model']
)
train_input_fn = inputs.create_train_input_fn(
train_config=configs['train_config'],
train_input_config=configs['train_input_config'],
model_config=configs['model']
)
features_train, _ = _make_initializable_iterator(
train_input_fn()).get_next()
features_eval, _ = _make_initializable_iterator(
eval_input_fn()).get_next()
images_train, images_eval = features_train['image'], features_eval['image']
self.assertEqual([1, None, None, 3], images_eval.shape.as_list())
self.assertEqual([24, 300, 300, 3], images_train.shape.as_list())
class DataAugmentationFnTest(test_case.TestCase):
def test_apply_image_and_box_augmentation(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
}),
(preprocessor.scale_boxes_to_pixel_coordinates, {}),
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1., 1.]], np.float32))
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
return (augmented_tensor_dict[fields.InputDataFields.image],
augmented_tensor_dict[fields.InputDataFields.
groundtruth_boxes])
image, groundtruth_boxes = self.execute_cpu(graph_fn, [])
self.assertAllEqual(image.shape, [20, 20, 3])
self.assertAllClose(groundtruth_boxes, [[10, 10, 20, 20]])
def test_apply_image_and_box_augmentation_with_scores(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
}),
(preprocessor.scale_boxes_to_pixel_coordinates, {}),
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1., 1.]], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1.0], np.float32)),
fields.InputDataFields.groundtruth_weights:
tf.constant(np.array([0.8], np.float32)),
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
return (augmented_tensor_dict[fields.InputDataFields.image],
augmented_tensor_dict[fields.InputDataFields.groundtruth_boxes],
augmented_tensor_dict[fields.InputDataFields.groundtruth_classes],
augmented_tensor_dict[fields.InputDataFields.groundtruth_weights])
(image, groundtruth_boxes,
groundtruth_classes, groundtruth_weights) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(image.shape, [20, 20, 3])
self.assertAllClose(groundtruth_boxes, [[10, 10, 20, 20]])
self.assertAllClose(groundtruth_classes.shape, [1.0])
self.assertAllClose(groundtruth_weights, [0.8])
def test_include_masks_in_data_augmentation(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
})
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_instance_masks:
tf.constant(np.zeros([2, 10, 10], np.uint8))
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
return (augmented_tensor_dict[fields.InputDataFields.image],
augmented_tensor_dict[fields.InputDataFields.
groundtruth_instance_masks])
image, masks = self.execute_cpu(graph_fn, [])
self.assertAllEqual(image.shape, [20, 20, 3])
self.assertAllEqual(masks.shape, [2, 20, 20])
def test_include_keypoints_in_data_augmentation(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
}),
(preprocessor.scale_boxes_to_pixel_coordinates, {}),
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1., 1.]], np.float32)),
fields.InputDataFields.groundtruth_keypoints:
tf.constant(np.array([[[0.5, 1.0], [0.5, 0.5]]], np.float32))
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
return (augmented_tensor_dict[fields.InputDataFields.image],
augmented_tensor_dict[fields.InputDataFields.groundtruth_boxes],
augmented_tensor_dict[fields.InputDataFields.
groundtruth_keypoints])
image, boxes, keypoints = self.execute_cpu(graph_fn, [])
self.assertAllEqual(image.shape, [20, 20, 3])
self.assertAllClose(boxes, [[10, 10, 20, 20]])
self.assertAllClose(keypoints, [[[10, 20], [10, 10]]])
def _fake_model_preprocessor_fn(image):
return (image, tf.expand_dims(tf.shape(image)[1:], axis=0))
def _fake_image_resizer_fn(image, mask):
return (image, mask, tf.shape(image))
def _fake_resize50_preprocess_fn(image):
image = image[0]
image, shape = preprocessor.resize_to_range(
image, min_dimension=50, max_dimension=50, pad_to_max_dimension=True)
return tf.expand_dims(image, 0), tf.expand_dims(shape, axis=0)
class DataTransformationFnTest(test_case.TestCase, parameterized.TestCase):
def test_combine_additional_channels_if_present(self):
image = np.random.rand(4, 4, 3).astype(np.float32)
additional_channels = np.random.rand(4, 4, 2).astype(np.float32)
def graph_fn(image, additional_channels):
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.image_additional_channels: additional_channels,
fields.InputDataFields.groundtruth_classes:
tf.constant([1, 1], tf.int32)
}
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=1)
out_tensors = input_transformation_fn(tensor_dict=tensor_dict)
return out_tensors[fields.InputDataFields.image]
out_image = self.execute_cpu(graph_fn, [image, additional_channels])
self.assertAllEqual(out_image.dtype, tf.float32)
self.assertAllEqual(out_image.shape, [4, 4, 5])
self.assertAllClose(out_image, np.concatenate((image, additional_channels),
axis=2))
def test_use_multiclass_scores_when_present(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image: tf.constant(np.random.rand(4, 4, 3).
astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]],
np.float32)),
fields.InputDataFields.multiclass_scores:
tf.constant(np.array([0.2, 0.3, 0.5, 0.1, 0.6, 0.3], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 2], np.int32))
}
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=3, use_multiclass_scores=True)
transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict)
return transformed_inputs[fields.InputDataFields.groundtruth_classes]
groundtruth_classes = self.execute_cpu(graph_fn, [])
self.assertAllClose(
np.array([[0.2, 0.3, 0.5], [0.1, 0.6, 0.3]], np.float32),
groundtruth_classes)
@unittest.skipIf(tf_version.is_tf2(), ('Skipping due to different behaviour '
'in TF 2.X'))
def test_use_multiclass_scores_when_not_present(self):
def graph_fn():
zero_num_elements = tf.random.uniform([], minval=0, maxval=1,
dtype=tf.int32)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]],
np.float32)),
fields.InputDataFields.multiclass_scores: tf.zeros(zero_num_elements),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 2], np.int32))
}
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=3, use_multiclass_scores=True)
transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict)
return transformed_inputs[fields.InputDataFields.groundtruth_classes]
groundtruth_classes = self.execute_cpu(graph_fn, [])
self.assertAllClose(
np.array([[0, 1, 0], [0, 0, 1]], np.float32),
groundtruth_classes)
@parameterized.parameters(
{'labeled_classes': [1, 2]},
{'labeled_classes': []},
{'labeled_classes': [1, -1, 2]} # -1 denotes an unrecognized class
)
def test_use_labeled_classes(self, labeled_classes):
def compute_fn(image, groundtruth_boxes, groundtruth_classes,
groundtruth_labeled_classes):
tensor_dict = {
fields.InputDataFields.image:
image,
fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes,
fields.InputDataFields.groundtruth_classes:
groundtruth_classes,
fields.InputDataFields.groundtruth_labeled_classes:
groundtruth_labeled_classes
}
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=3)
return input_transformation_fn(tensor_dict=tensor_dict)
image = np.random.rand(4, 4, 3).astype(np.float32)
groundtruth_boxes = np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], np.float32)
groundtruth_classes = np.array([1, 2], np.int32)
groundtruth_labeled_classes = np.array(labeled_classes, np.int32)
transformed_inputs = self.execute_cpu(compute_fn, [
image, groundtruth_boxes, groundtruth_classes,
groundtruth_labeled_classes
])
if labeled_classes == [1, 2] or labeled_classes == [1, -1, 2]:
transformed_labeled_classes = [1, 1, 0]
elif not labeled_classes:
transformed_labeled_classes = [1, 1, 1]
else:
logging.exception('Unexpected labeled_classes %r', labeled_classes)
self.assertAllEqual(
np.array(transformed_labeled_classes, np.float32),
transformed_inputs[fields.InputDataFields.groundtruth_labeled_classes])
def test_returns_correct_class_label_encodings(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[0, 0, 1, 1], [.5, .5, 1, 1]], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict)
return (transformed_inputs[fields.InputDataFields.groundtruth_classes],
transformed_inputs[fields.InputDataFields.
groundtruth_confidences])
(groundtruth_classes, groundtruth_confidences) = self.execute_cpu(graph_fn,
[])
self.assertAllClose(groundtruth_classes, [[0, 0, 1], [1, 0, 0]])
self.assertAllClose(groundtruth_confidences, [[0, 0, 1], [1, 0, 0]])
def test_returns_correct_labels_with_unrecognized_class(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(
np.array([[0, 0, 1, 1], [.2, .2, 4, 4], [.5, .5, 1, 1]],
np.float32)),
fields.InputDataFields.groundtruth_area:
tf.constant(np.array([.5, .4, .3])),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, -1, 1], np.int32)),
fields.InputDataFields.groundtruth_keypoints:
tf.constant(
np.array([[[.1, .1]], [[.2, .2]], [[.5, .5]]],
np.float32)),
fields.InputDataFields.groundtruth_keypoint_visibilities:
tf.constant([[True, True], [False, False], [True, True]]),
fields.InputDataFields.groundtruth_instance_masks:
tf.constant(np.random.rand(3, 4, 4).astype(np.float32)),
fields.InputDataFields.groundtruth_is_crowd:
tf.constant([False, True, False]),
fields.InputDataFields.groundtruth_difficult:
tf.constant(np.array([0, 0, 1], np.int32))
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
transformed_inputs = input_transformation_fn(tensor_dict)
return (transformed_inputs[fields.InputDataFields.groundtruth_classes],
transformed_inputs[fields.InputDataFields.num_groundtruth_boxes],
transformed_inputs[fields.InputDataFields.groundtruth_area],
transformed_inputs[fields.InputDataFields.
groundtruth_confidences],
transformed_inputs[fields.InputDataFields.groundtruth_boxes],
transformed_inputs[fields.InputDataFields.groundtruth_keypoints],
transformed_inputs[fields.InputDataFields.
groundtruth_keypoint_visibilities],
transformed_inputs[fields.InputDataFields.
groundtruth_instance_masks],
transformed_inputs[fields.InputDataFields.groundtruth_is_crowd],
transformed_inputs[fields.InputDataFields.groundtruth_difficult])
(groundtruth_classes, num_groundtruth_boxes, groundtruth_area,
groundtruth_confidences, groundtruth_boxes, groundtruth_keypoints,
groundtruth_keypoint_visibilities, groundtruth_instance_masks,
groundtruth_is_crowd, groundtruth_difficult) = self.execute_cpu(graph_fn,
[])
self.assertAllClose(groundtruth_classes, [[0, 0, 1], [1, 0, 0]])
self.assertAllEqual(num_groundtruth_boxes, 2)
self.assertAllClose(groundtruth_area, [.5, .3])
self.assertAllEqual(groundtruth_confidences, [[0, 0, 1], [1, 0, 0]])
self.assertAllClose(groundtruth_boxes, [[0, 0, 1, 1], [.5, .5, 1, 1]])
self.assertAllClose(groundtruth_keypoints, [[[.1, .1]], [[.5, .5]]])
self.assertAllEqual(groundtruth_keypoint_visibilities,
[[True, True], [True, True]])
self.assertAllEqual(groundtruth_instance_masks.shape, [2, 4, 4])
self.assertAllEqual(groundtruth_is_crowd, [False, False])
self.assertAllEqual(groundtruth_difficult, [0, 1])
def test_returns_correct_merged_boxes(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]],
np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
merge_multiple_boxes=True)
transformed_inputs = input_transformation_fn(tensor_dict)
return (transformed_inputs[fields.InputDataFields.groundtruth_boxes],
transformed_inputs[fields.InputDataFields.groundtruth_classes],
transformed_inputs[fields.InputDataFields.
groundtruth_confidences],
transformed_inputs[fields.InputDataFields.num_groundtruth_boxes])
(groundtruth_boxes, groundtruth_classes, groundtruth_confidences,
num_groundtruth_boxes) = self.execute_cpu(graph_fn, [])
self.assertAllClose(
groundtruth_boxes,
[[.5, .5, 1., 1.]])
self.assertAllClose(
groundtruth_classes,
[[1, 0, 1]])
self.assertAllClose(
groundtruth_confidences,
[[1, 0, 1]])
self.assertAllClose(
num_groundtruth_boxes,
1)
def test_returns_correct_groundtruth_confidences_when_input_present(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[0, 0, 1, 1], [.5, .5, 1, 1]], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32)),
fields.InputDataFields.groundtruth_confidences:
tf.constant(np.array([1.0, -1.0], np.float32))
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
transformed_inputs = input_transformation_fn(tensor_dict)
return (transformed_inputs[fields.InputDataFields.groundtruth_classes],
transformed_inputs[fields.InputDataFields.
groundtruth_confidences])
groundtruth_classes, groundtruth_confidences = self.execute_cpu(graph_fn,
[])
self.assertAllClose(
groundtruth_classes,
[[0, 0, 1], [1, 0, 0]])
self.assertAllClose(
groundtruth_confidences,
[[0, 0, 1], [-1, 0, 0]])
def test_returns_resized_masks(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_instance_masks:
tf.constant(np.random.rand(2, 4, 4).astype(np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32)),
fields.InputDataFields.original_image_spatial_shape:
tf.constant(np.array([4, 4], np.int32))
}
def fake_image_resizer_fn(image, masks=None):
resized_image = tf.image.resize_images(image, [8, 8])
results = [resized_image]
if masks is not None:
resized_masks = tf.transpose(
tf.image.resize_images(tf.transpose(masks, [1, 2, 0]), [8, 8]),
[2, 0, 1])
results.append(resized_masks)
results.append(tf.shape(resized_image))
return results
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=fake_image_resizer_fn,
num_classes=num_classes,
retain_original_image=True)
transformed_inputs = input_transformation_fn(tensor_dict)
return (transformed_inputs[fields.InputDataFields.original_image],
transformed_inputs[fields.InputDataFields.
original_image_spatial_shape],
transformed_inputs[fields.InputDataFields.
groundtruth_instance_masks])
(original_image, original_image_shape,
groundtruth_instance_masks) = self.execute_cpu(graph_fn, [])
self.assertEqual(original_image.dtype, np.uint8)
self.assertAllEqual(original_image_shape, [4, 4])
self.assertAllEqual(original_image.shape, [8, 8, 3])
self.assertAllEqual(groundtruth_instance_masks.shape, [2, 8, 8])
def test_applies_model_preprocess_fn_to_image_tensor(self):
np_image = np.random.randint(256, size=(4, 4, 3))
def graph_fn(image):
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def fake_model_preprocessor_fn(image):
return (image / 255., tf.expand_dims(tf.shape(image)[1:], axis=0))
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
transformed_inputs = input_transformation_fn(tensor_dict)
return (transformed_inputs[fields.InputDataFields.image],
transformed_inputs[fields.InputDataFields.true_image_shape])
image, true_image_shape = self.execute_cpu(graph_fn, [np_image])
self.assertAllClose(image, np_image / 255.)
self.assertAllClose(true_image_shape, [4, 4, 3])
def test_applies_data_augmentation_fn_to_tensor_dict(self):
np_image = np.random.randint(256, size=(4, 4, 3))
def graph_fn(image):
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def add_one_data_augmentation_fn(tensor_dict):
return {key: value + 1 for key, value in tensor_dict.items()}
num_classes = 4
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=add_one_data_augmentation_fn)
transformed_inputs = input_transformation_fn(tensor_dict)
return (transformed_inputs[fields.InputDataFields.image],
transformed_inputs[fields.InputDataFields.groundtruth_classes])
image, groundtruth_classes = self.execute_cpu(graph_fn, [np_image])
self.assertAllEqual(image, np_image + 1)
self.assertAllEqual(
groundtruth_classes,
[[0, 0, 0, 1], [0, 1, 0, 0]])
def test_applies_data_augmentation_fn_before_model_preprocess_fn(self):
np_image = np.random.randint(256, size=(4, 4, 3))
def graph_fn(image):
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def mul_two_model_preprocessor_fn(image):
return (image * 2, tf.expand_dims(tf.shape(image)[1:], axis=0))
def add_five_to_image_data_augmentation_fn(tensor_dict):
tensor_dict[fields.InputDataFields.image] += 5
return tensor_dict
num_classes = 4
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=mul_two_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=add_five_to_image_data_augmentation_fn)
transformed_inputs = input_transformation_fn(tensor_dict)
return transformed_inputs[fields.InputDataFields.image]
image = self.execute_cpu(graph_fn, [np_image])
self.assertAllEqual(image, (np_image + 5) * 2)
def test_resize_with_padding(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(100, 50, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]],
np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 2], np.int32)),
fields.InputDataFields.groundtruth_keypoints:
tf.constant([[[0.1, 0.2]], [[0.3, 0.4]]]),
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_resize50_preprocess_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,)
transformed_inputs = input_transformation_fn(tensor_dict)
return (transformed_inputs[fields.InputDataFields.groundtruth_boxes],
transformed_inputs[fields.InputDataFields.groundtruth_keypoints])
groundtruth_boxes, groundtruth_keypoints = self.execute_cpu(graph_fn, [])
self.assertAllClose(
groundtruth_boxes,
[[.5, .25, 1., .5], [.0, .0, .5, .25]])
self.assertAllClose(
groundtruth_keypoints,
[[[.1, .1]], [[.3, .2]]])
def test_groundtruth_keypoint_weights(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(100, 50, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]],
np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 2], np.int32)),
fields.InputDataFields.groundtruth_keypoints:
tf.constant([[[0.1, 0.2], [0.3, 0.4]],
[[0.5, 0.6], [0.7, 0.8]]]),
fields.InputDataFields.groundtruth_keypoint_visibilities:
tf.constant([[True, False], [True, True]]),
}
num_classes = 3
keypoint_type_weight = [1.0, 2.0]
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_resize50_preprocess_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
keypoint_type_weight=keypoint_type_weight)
transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict)
return (transformed_inputs[fields.InputDataFields.groundtruth_keypoints],
transformed_inputs[fields.InputDataFields.
groundtruth_keypoint_weights])
groundtruth_keypoints, groundtruth_keypoint_weights = self.execute_cpu(
graph_fn, [])
self.assertAllClose(
groundtruth_keypoints,
[[[0.1, 0.1], [0.3, 0.2]],
[[0.5, 0.3], [0.7, 0.4]]])
self.assertAllClose(
groundtruth_keypoint_weights,
[[1.0, 0.0], [1.0, 2.0]])
def test_groundtruth_keypoint_weights_default(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(100, 50, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]],
np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 2], np.int32)),
fields.InputDataFields.groundtruth_keypoints:
tf.constant([[[0.1, 0.2], [0.3, 0.4]],
[[0.5, 0.6], [0.7, 0.8]]]),
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_resize50_preprocess_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict)
return (transformed_inputs[fields.InputDataFields.groundtruth_keypoints],
transformed_inputs[fields.InputDataFields.
groundtruth_keypoint_weights])
groundtruth_keypoints, groundtruth_keypoint_weights = self.execute_cpu(
graph_fn, [])
self.assertAllClose(
groundtruth_keypoints,
[[[0.1, 0.1], [0.3, 0.2]],
[[0.5, 0.3], [0.7, 0.4]]])
self.assertAllClose(
groundtruth_keypoint_weights,
[[1.0, 1.0], [1.0, 1.0]])
def test_groundtruth_dense_pose(self):
def graph_fn():
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(100, 50, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.0, .0, .5, .5]],
np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([1, 2], np.int32)),
fields.InputDataFields.groundtruth_dp_num_points:
tf.constant([0, 2], dtype=tf.int32),
fields.InputDataFields.groundtruth_dp_part_ids:
tf.constant([[0, 0], [4, 23]], dtype=tf.int32),
fields.InputDataFields.groundtruth_dp_surface_coords:
tf.constant([[[0., 0., 0., 0.,], [0., 0., 0., 0.,]],
[[0.1, 0.2, 0.3, 0.4,], [0.6, 0.8, 0.6, 0.7,]]],
dtype=tf.float32),
}
num_classes = 1
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_resize50_preprocess_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
transformed_inputs = input_transformation_fn(tensor_dict=tensor_dict)
transformed_dp_num_points = transformed_inputs[
fields.InputDataFields.groundtruth_dp_num_points]
transformed_dp_part_ids = transformed_inputs[
fields.InputDataFields.groundtruth_dp_part_ids]
transformed_dp_surface_coords = transformed_inputs[
fields.InputDataFields.groundtruth_dp_surface_coords]
return (transformed_dp_num_points, transformed_dp_part_ids,
transformed_dp_surface_coords)
dp_num_points, dp_part_ids, dp_surface_coords = self.execute_cpu(
graph_fn, [])
self.assertAllEqual(dp_num_points, [0, 2])
self.assertAllEqual(dp_part_ids, [[0, 0], [4, 23]])
self.assertAllClose(
dp_surface_coords,
[[[0., 0., 0., 0.,], [0., 0., 0., 0.,]],
[[0.1, 0.1, 0.3, 0.4,], [0.6, 0.4, 0.6, 0.7,]]])
class PadInputDataToStaticShapesFnTest(test_case.TestCase):
def test_pad_images_boxes_and_classes(self):
input_tensor_dict = {
fields.InputDataFields.image:
tf.random.uniform([3, 3, 3]),
fields.InputDataFields.groundtruth_boxes:
tf.random.uniform([2, 4]),
fields.InputDataFields.groundtruth_classes:
tf.random.uniform([2, 3], minval=0, maxval=2, dtype=tf.int32),
fields.InputDataFields.true_image_shape:
tf.constant([3, 3, 3]),
fields.InputDataFields.original_image_spatial_shape:
tf.constant([3, 3])
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),
[5, 6, 3])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.true_image_shape]
.shape.as_list(), [3])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.original_image_spatial_shape]
.shape.as_list(), [2])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_boxes]
.shape.as_list(), [3, 4])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_classes]
.shape.as_list(), [3, 3])
def test_clip_boxes_and_classes(self):
def graph_fn():
input_tensor_dict = {
fields.InputDataFields.groundtruth_boxes:
tf.random.uniform([5, 4]),
fields.InputDataFields.groundtruth_classes:
tf.random.uniform([2, 3], maxval=10, dtype=tf.int32),
fields.InputDataFields.num_groundtruth_boxes:
tf.constant(5)
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
return (padded_tensor_dict[fields.InputDataFields.groundtruth_boxes],
padded_tensor_dict[fields.InputDataFields.groundtruth_classes],
padded_tensor_dict[fields.InputDataFields.num_groundtruth_boxes])
(groundtruth_boxes, groundtruth_classes,
num_groundtruth_boxes) = self.execute_cpu(graph_fn, [])
self.assertAllEqual(groundtruth_boxes.shape, [3, 4])
self.assertAllEqual(groundtruth_classes.shape, [3, 3])
self.assertEqual(num_groundtruth_boxes, 3)
def test_images_and_additional_channels(self):
input_tensor_dict = {
fields.InputDataFields.image:
test_utils.image_with_dynamic_shape(4, 3, 5),
fields.InputDataFields.image_additional_channels:
test_utils.image_with_dynamic_shape(4, 3, 2),
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
# pad_input_data_to_static_shape assumes that image is already concatenated
# with additional channels.
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),
[5, 6, 5])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image_additional_channels]
.shape.as_list(), [5, 6, 2])
def test_images_and_additional_channels_errors(self):
input_tensor_dict = {
fields.InputDataFields.image:
test_utils.image_with_dynamic_shape(10, 10, 3),
fields.InputDataFields.image_additional_channels:
test_utils.image_with_dynamic_shape(10, 10, 2),
fields.InputDataFields.original_image:
test_utils.image_with_dynamic_shape(10, 10, 3),
}
with self.assertRaises(ValueError):
_ = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
def test_gray_images(self):
input_tensor_dict = {
fields.InputDataFields.image:
test_utils.image_with_dynamic_shape(4, 4, 1),
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),
[5, 6, 1])
def test_gray_images_and_additional_channels(self):
input_tensor_dict = {
fields.InputDataFields.image:
test_utils.image_with_dynamic_shape(4, 4, 3),
fields.InputDataFields.image_additional_channels:
test_utils.image_with_dynamic_shape(4, 4, 2),
}
# pad_input_data_to_static_shape assumes that image is already concatenated
# with additional channels.
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image].shape.as_list(),
[5, 6, 3])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.image_additional_channels]
.shape.as_list(), [5, 6, 2])
def test_keypoints(self):
keypoints = test_utils.keypoints_with_dynamic_shape(10, 16, 4)
visibilities = tf.cast(tf.random.uniform(tf.shape(keypoints)[:-1], minval=0,
maxval=2, dtype=tf.int32), tf.bool)
input_tensor_dict = {
fields.InputDataFields.groundtruth_keypoints:
test_utils.keypoints_with_dynamic_shape(10, 16, 4),
fields.InputDataFields.groundtruth_keypoint_visibilities:
visibilities
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_keypoints]
.shape.as_list(), [3, 16, 4])
self.assertAllEqual(
padded_tensor_dict[
fields.InputDataFields.groundtruth_keypoint_visibilities]
.shape.as_list(), [3, 16])
def test_dense_pose(self):
input_tensor_dict = {
fields.InputDataFields.groundtruth_dp_num_points:
tf.constant([0, 2], dtype=tf.int32),
fields.InputDataFields.groundtruth_dp_part_ids:
tf.constant([[0, 0], [4, 23]], dtype=tf.int32),
fields.InputDataFields.groundtruth_dp_surface_coords:
tf.constant([[[0., 0., 0., 0.,], [0., 0., 0., 0.,]],
[[0.1, 0.2, 0.3, 0.4,], [0.6, 0.8, 0.6, 0.7,]]],
dtype=tf.float32),
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=1,
spatial_image_shape=[128, 128],
max_dp_points=200)
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_dp_num_points]
.shape.as_list(), [3])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_dp_part_ids]
.shape.as_list(), [3, 200])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_dp_surface_coords]
.shape.as_list(), [3, 200, 4])
def test_pad_input_data_to_static_shapes_for_trackid(self):
input_tensor_dict = {
fields.InputDataFields.groundtruth_track_ids:
tf.constant([0, 1], dtype=tf.int32),
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=1,
spatial_image_shape=[128, 128])
self.assertAllEqual(
padded_tensor_dict[fields.InputDataFields.groundtruth_track_ids]
.shape.as_list(), [3])
def test_context_features(self):
context_memory_size = 8
context_feature_length = 10
max_num_context_features = 20
def graph_fn():
input_tensor_dict = {
fields.InputDataFields.context_features:
tf.ones([context_memory_size, context_feature_length]),
fields.InputDataFields.context_feature_length:
tf.constant(context_feature_length)
}
padded_tensor_dict = inputs.pad_input_data_to_static_shapes(
tensor_dict=input_tensor_dict,
max_num_boxes=3,
num_classes=3,
spatial_image_shape=[5, 6],
max_num_context_features=max_num_context_features,
context_feature_length=context_feature_length)
self.assertAllEqual(
padded_tensor_dict[
fields.InputDataFields.context_features].shape.as_list(),
[max_num_context_features, context_feature_length])
return padded_tensor_dict[fields.InputDataFields.valid_context_size]
valid_context_size = self.execute_cpu(graph_fn, [])
self.assertEqual(valid_context_size, context_memory_size)
class NegativeSizeTest(test_case.TestCase):
"""Test for inputs and related funcitons."""
def test_negative_size_error(self):
"""Test that error is raised for negative size boxes."""
def graph_fn():
tensors = {
fields.InputDataFields.image: tf.zeros((128, 128, 3)),
fields.InputDataFields.groundtruth_classes:
tf.constant([1, 1], tf.int32),
fields.InputDataFields.groundtruth_boxes:
tf.constant([[0.5, 0.5, 0.4, 0.5]], tf.float32)
}
tensors = inputs.transform_input_data(
tensors, _fake_model_preprocessor_fn, _fake_image_resizer_fn,
num_classes=10)
return tensors[fields.InputDataFields.groundtruth_boxes]
with self.assertRaises(tf.errors.InvalidArgumentError):
self.execute_cpu(graph_fn, [])
def test_negative_size_no_assert(self):
"""Test that negative size boxes are filtered out without assert.
This test simulates the behaviour when we run on TPU and Assert ops are
not supported.
"""
tensors = {
fields.InputDataFields.image: tf.zeros((128, 128, 3)),
fields.InputDataFields.groundtruth_classes:
tf.constant([1, 1], tf.int32),
fields.InputDataFields.groundtruth_boxes:
tf.constant([[0.5, 0.5, 0.4, 0.5], [0.5, 0.5, 0.6, 0.6]],
tf.float32)
}
with mock.patch.object(tf, 'Assert') as tf_assert:
tf_assert.return_value = tf.no_op()
tensors = inputs.transform_input_data(
tensors, _fake_model_preprocessor_fn, _fake_image_resizer_fn,
num_classes=10)
self.assertAllClose(tensors[fields.InputDataFields.groundtruth_boxes],
[[0.5, 0.5, 0.6, 0.6]])
if __name__ == '__main__':
tf.test.main()
|
[
"andreas.boerzel@gmx.de"
] |
andreas.boerzel@gmx.de
|
1c69ce72d7668c9f655b4077f3fe6fef28f0a157
|
eeaeb0f39262fa04233614ce377ac9dcd04f9b5e
|
/pyQt 5/my library pyqt5/QComboBox.py
|
a7d29c80b1d80e2e2fc041a97ce4e8fc09f32595
|
[] |
no_license
|
akashian4/python_example
|
70811300c97c3e9874b2c6f47a05569e7451e4df
|
64d57bfd9fe2d5ce71db3e1a03f0e6c4dfe3497f
|
refs/heads/main
| 2023-08-12T10:03:21.417469
| 2021-09-30T19:41:03
| 2021-09-30T19:41:03
| 412,203,121
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,004
|
py
|
###QComboBox
import sys
from PyQt5.QtWidgets import QWidget,QLabel,QApplication,QComboBox
class F(QWidget):
def __init__(self):
super().__init__()
self.setUI()
def setUI(self):
self.lbl = QLabel(" ", self)
self.lbl.resize(50,20)
self.lbl.move(50, 150)
self.setGeometry(300, 300, 300, 200)
self.setWindowTitle('faradars')
self.com1=QComboBox(self)
self.com1.addItem("python",self)
self.com1.addItem("java",self)
self.com1.addItem("C++",self)
self.com1.addItem("PyQt5",self)
self.com1.move(100,100)
self.com1.resize(200,20)
self.com1.activated.connect(lambda : self.combItem())
self.show()
def combItem(self):
text=self.com1.currentText()
index=self.com1.currentIndex()
print(text,' ',index)
self.lbl.setText(text)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = F()
sys.exit(app.exec_())
|
[
"akashian4@gmail.com"
] |
akashian4@gmail.com
|
24276040f76be3849eb58873430e762060fc75dd
|
1e28d01665dd609bd443d44d974e712e0e1c2535
|
/contact/forms.py
|
4a95f7e4edb08715bc23dc62fc1c9693db764e90
|
[] |
no_license
|
Code-Institute-Submissions/Time-To-Rent
|
44a4c4f918de786dfdd0426baf55669323a892f9
|
b77a0f914cb32fd51af369db2987c3b1c42c5065
|
refs/heads/master
| 2023-06-15T07:41:52.557881
| 2021-06-29T22:26:12
| 2021-06-29T22:26:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,061
|
py
|
from django import forms
from .models import Contact
class ContactForm(forms.ModelForm):
class Meta:
model = Contact
fields = ('full_name', 'email', 'contact_reason',
'message')
def __init__(self, *args, **kwargs):
"""
Add placeholders and classes, remove auto-generated
labels and set autofocus on first field
"""
super().__init__(*args, **kwargs)
placeholders = {
'full_name': 'Enter your Name',
'email': 'Enter you Email',
'contact_reason': '',
'message': 'Let us know how we can help',
}
self.fields['contact_reason'].widget.attrs['autofocus'] = True
for field in self.fields:
if self.fields[field].required:
placeholder = f'{placeholders[field]}'
else:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
self.fields[field].widget.attrs['class'] = 'm-0 form-contact'
|
[
"Marc.gulliver100@gmail.com"
] |
Marc.gulliver100@gmail.com
|
45d34cd5b697ed70c4cfa9dfffdfe76104672c8f
|
e6956b020916b7b120f289f4790b73f836e9a0aa
|
/leads/models.py
|
dd4cc43f0c8aef3c5200381eb5175ba22af15b46
|
[] |
no_license
|
wilsonmwiti/testyangu
|
4628afdd0ba533324aa642e597eeb5d60933e480
|
a6e2c5bebf7fce8bccdc9e413583095cda522c1a
|
refs/heads/main
| 2023-06-21T18:59:14.999930
| 2021-07-05T06:47:26
| 2021-07-05T06:47:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,297
|
py
|
from django.db import models
from django.db.models.fields import BooleanField, CharField, TextField
from django.contrib.auth.models import User
CONVERTED_CHOICES = [
('No', 'NO'),
('Yes', 'Yes'),
('NA', 'Not applicabe'),
]
class ContactUs(models.Model):
time=models.DateTimeField(auto_now_add=True,verbose_name="Time")
name=models.CharField(max_length=50,verbose_name="Names")
email=models.EmailField(max_length=254,verbose_name="Email")
phone=models.CharField(max_length=50,verbose_name="Phone")
subject=models.CharField(max_length=70,verbose_name="Subject")
message=models.TextField(verbose_name="Message")
seen=models.BooleanField(default=False)
responded=BooleanField(default=False,verbose_name="Contacted")
notes =models.TextField(blank=True,verbose_name="Notes")
converted=models.CharField(max_length=50,choices=CONVERTED_CHOICES,default="NO",verbose_name="Converted?")
served_by=models.CharField(blank=True,max_length=50)
class Meta(object):
db_table=u"Contact us"
class QuoteLeads(models.Model):
time=models.DateTimeField(auto_now_add=True,verbose_name="Time")
order_number=models.CharField(max_length=50,verbose_name="Order number")
name=models.CharField(max_length=100,verbose_name="Names As They Appear On ID")
dob=models.DateField(verbose_name="Parents DoB")
email=models.EmailField(max_length=254,verbose_name="Email")
phone=models.CharField(max_length=15,verbose_name="Phone number")
annual_fees=models.IntegerField(verbose_name="Annual school fees")
premium=models.IntegerField(verbose_name="premium")
tax=models.IntegerField(verbose_name="Tax",default=0)
total=models.IntegerField(verbose_name="Total",default=0)
education_level=models.CharField(max_length=70,verbose_name="Child.s Education level")
school_years=models.IntegerField(verbose_name="No of school years")
sum_assured=models.IntegerField(verbose_name="sum_assured",blank=True)
contacted=models.BooleanField(default=False,verbose_name="Contacted")
converted=models.BooleanField(default=False,verbose_name="Converted")
served_by=models.CharField(blank=True,max_length=100)
notes=models.TextField(blank=True)
class Meta(object):
db_table=u"Quote Leads"
|
[
"Iamdeveloper254@gmail.com"
] |
Iamdeveloper254@gmail.com
|
c49836f2e56130e13c2dacd1c19ed24793523e5b
|
16f3cffc0e62d660103ada3185112caa9ee2ad4f
|
/marksheet.py
|
dabf959855ebef4b01e86ce099927e2117779242
|
[] |
no_license
|
hirak0373/AI-Practices
|
5601adcf91427cf57de4703b16b794e931c4b05b
|
4c23dc90c193f64f7511f1c1b9fc8cf79c776998
|
refs/heads/master
| 2020-06-25T10:03:15.001939
| 2019-07-28T11:25:42
| 2019-07-28T11:25:42
| 199,278,865
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 672
|
py
|
eng =input("Enter marks of English: ")
bio = input("Enter marks of Biology: ")
chem = input("Enter marks of Chemistry: ")
pak = input("Enter marks of Pakistan Sudies: ")
sin = input("Enter marks of Sindhi: " )
obtain =int(eng)+int(bio)+int(chem)+int(pak)+int(sin)
print (obtain)
per =int(obtain)/425
per1=per*100
print("your percentage is: "+str(per1))
if per1 >= 80 and per1 <= 100:
print("Grade: A+")
elif per1 >= 70 and per1 <=79.99:
print("Grade: A")
elif per1 >= 60 and per1 <=69.99:
print("Grade: B")
elif per1 >= 50 and per1 <=59.99:
print("Grade: C")
elif per1 >= 40 and per1 <=49.99:
print("Grade: D")
elif obtain <40:
print("Grade: fail")
|
[
"Hirak0373"
] |
Hirak0373
|
5eefceddfd476526d32fbc4ea9938a8b6fcb3b42
|
a2f1179b0b096d2a4d6dfd2f98b9b7b1febe0cfd
|
/321810304056_Suppose passing marks of a subject is 35.Take input of marks from user and check whether it is greater than passing marks or not.py
|
c6ec71228a3825e0aaa6c9890caae47ae3d118e8
|
[] |
no_license
|
pooji12/Python
|
55d811a9e310d97bb30e1c0f5187970dd67cbb80
|
d72309086e7ead17c491c12a85374a5f6d514a42
|
refs/heads/master
| 2022-11-07T03:44:45.505998
| 2020-06-23T16:16:39
| 2020-06-23T16:16:39
| 273,281,765
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
# ## Suppose passing marks of a subject is 35.Take input of marks from user and check whether it is greater than passing marks or not
marks=float(input("Enter marks of the student"))
if marks>=35:
print("Student has passed in the subject")
else:
print("Student has failed in the subject")
|
[
"noreply@github.com"
] |
pooji12.noreply@github.com
|
83547d23b166d298dba8225456e446df30293c67
|
c90962d0f3233205d4dc99391f6aab3402fa7ed8
|
/parPDE/__init__.py
|
4557dcb4c8a5af05fe9207a2e5b7ce6f3b66bbdf
|
[
"BSD-2-Clause"
] |
permissive
|
chrisjbillington/parpde
|
f97a1303c404ec83f22df15c6fb4e80485176150
|
4f882cbbb9ad6c57814e4422e9ba063fa27886a0
|
refs/heads/master
| 2022-11-10T16:18:31.489069
| 2019-01-22T21:01:25
| 2019-01-22T21:01:25
| 275,920,389
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 109
|
py
|
from .parPDE import *
try:
from __version__ import __version__
except ImportError:
__version__ = None
|
[
"chrisjbillington@gmail.com"
] |
chrisjbillington@gmail.com
|
70f6b0b090ff0d91affb41cb57adece59c466bc2
|
14f821fc6d2150fe8387f2e58753482cb01ff081
|
/Tests/test_ordoneaza_crescator.py
|
edf715ca43e67aedd1a0d17f7deb76aec9e040c6
|
[] |
no_license
|
AP-MI-2021/lab-567-raluca2002
|
d9798e2a16b14ab2ab718ac28d379c0488b8a7d5
|
27365dc8412dbc8c9c761433236e4fd80f487f65
|
refs/heads/main
| 2023-09-02T13:27:49.140431
| 2021-11-10T11:36:54
| 2021-11-10T11:36:54
| 420,812,527
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 618
|
py
|
from Domain.librarie import creeaza_carte, get_id
from Logic.ordodeaza_crescator import ordoneaza_crescator
def get_data():
return [
creeaza_carte(1, 'v1', 'gen1', 60, 'silver'),
creeaza_carte(2, 'v2', 'gen1', 20, 'none'),
creeaza_carte(3, 'v3', 'gen2', 12, 'gold'),
creeaza_carte(4, 'v5', 'gen3', 23.02, 'none'),
]
def test_ordoneaza_crescator():
carti = get_data()
carti = ordoneaza_crescator(carti, [], [])
assert len(carti) == 4
assert get_id(carti[0]) == 3
assert get_id(carti[1]) == 2
assert get_id(carti[2]) == 4
assert get_id(carti[3]) == 1
|
[
"raluca0602372yahoo.com"
] |
raluca0602372yahoo.com
|
7ee73bf343527b14d894d44745d77c7a4f338cbf
|
7ce6db9c613e783739c1af16e1b89a4b20531290
|
/weibo/spiders/weibocn.py
|
ba0392942a17eccbe874d887383c99846d27c367
|
[] |
no_license
|
Foxgeek36/WeiBo
|
d9ee855d6047d491d292a99319b63c8468a2ad92
|
386486d31f5fba895b99dc306b48c06a98b8ed8b
|
refs/heads/master
| 2020-07-26T09:43:30.597611
| 2019-09-15T16:18:00
| 2019-09-15T16:18:00
| 208,606,874
| 0
| 0
| null | 2019-09-30T15:57:31
| 2019-09-15T14:34:08
|
Python
|
UTF-8
|
Python
| false
| false
| 7,444
|
py
|
# coding=utf-8
import json
from scrapy import Request, Spider
from weibo.items import *
class WeiboSpider(Spider):
'''
抓取目标站点为:https:m.weibo.cn / 从手机端站点作为切入
-用户详情页示例:https://m.weibo.cn/profile/2011075080
'''
name = 'weibocn'
allowed_domains = ['m.weibo.cn']
# 用户详情页中'关注'列表API /亲测实际有效
user_url = 'https://m.weibo.cn/api/container/getIndex?uid={uid}&type=uid&value={uid}&containerid=100505{uid}'
# 关注列表API
follow_url = 'https://m.weibo.cn/api/container/getIndex?containerid=231051_-_followers_-_{uid}&page={page}'
# 粉丝列表API
fan_url = 'https://m.weibo.cn/api/container/getIndex?containerid=231051_-_fans_-_{uid}&page={page}'
# 微博列表API
weibo_url = 'https://m.weibo.cn/api/container/getIndex?uid={uid}&type=uid&page={page}&containerid=107603{uid}'
# 选取几个大V,将他们的ID号赋值为一个列表/ 以此作为基础递归拓展更多的微博账号数据 +--
start_users = ['3217179555', '1742566624', '2282991915', '1288739185', '3952070245', '5878659096']
def start_requests(self):
for uid in self.start_users:
# attention: 注意此处uid的对应写法
yield Request(self.user_url.format(uid=uid), callback=self.parse_user)
def parse_user(self, response): # attention +--
"""
解析用户信息
:param response: Response对象
"""
self.logger.debug(response)
result = json.loads(response.text)
if result.get('data').get('userInfo'):
user_info = result.get('data').get('userInfo') # 现接口字段已做更改
user_item = UserItem()
# attention: 此处为字段间的映射关系 +--
field_map = {
'id': 'id', 'name': 'screen_name', 'avatar': 'profile_image_url', 'cover': 'cover_image_phone',
'gender': 'gender', 'description': 'description', 'fans_count': 'followers_count',
'follows_count': 'follow_count', 'weibos_count': 'statuses_count', 'verified': 'verified',
'verified_reason': 'verified_reason', 'verified_type': 'verified_type'
}
for field, attr in field_map.items():
user_item[field] = user_info.get(attr)
# ----------------------------------
yield user_item
# 关注
uid = user_info.get('id')
yield Request(self.follow_url.format(uid=uid, page=1), callback=self.parse_follows,
# meta在此处作为备注参数传入/ 提供给callback中函数做可能的使用
meta={'page': 1, 'uid': uid})
# 粉丝
yield Request(self.fan_url.format(uid=uid, page=1), callback=self.parse_fans,
meta={'page': 1, 'uid': uid})
# 微博
yield Request(self.weibo_url.format(uid=uid, page=1), callback=self.parse_weibos,
meta={'page': 1, 'uid': uid})
def parse_follows(self, response): # attention +--
"""
解析用户关注
:param response: Response对象
"""
result = json.loads(response.text)
if result.get('ok') and result.get('data').get('cards') and \
len(result.get('data').get('cards')) and \
result.get('data').get('cards')[-1].get('card_group'):
# 解析用户
follows = result.get('data').get('cards')[-1].get('card_group')
for follow in follows:
if follow.get('user'):
uid = follow.get('user').get('id')
# 递归解析 +--
yield Request(self.user_url.format(uid=uid),
callback=self.parse_user)
uid = response.meta.get('uid')
# 关注列表
user_relation_item = UserRelationItem()
follows = [{'id': follow.get('user').get('id'), 'name': follow.get('user').get('screen_name')}
for follow in follows]
user_relation_item['id'] = uid
user_relation_item['follows'] = follows
user_relation_item['fans'] = []
yield user_relation_item
# 下一页关注 +--
page = response.meta.get('page') + 1
yield Request(self.follow_url.format(uid=uid, page=page),
callback=self.parse_follows,
meta={'page': page, 'uid': uid})
def parse_fans(self, response):
"""
解析用户粉丝
:param response: Response对象
"""
result = json.loads(response.text)
if result.get('ok') and result.get('data').get('cards') and \
len(result.get('data').get('cards')) and \
result.get('data').get('cards')[-1].get('card_group'):
# 解析用户
fans = result.get('data').get('cards')[-1].get('card_group')
for fan in fans:
if fan.get('user'):
uid = fan.get('user').get('id')
yield Request(self.user_url.format(uid=uid),
callback=self.parse_user)
uid = response.meta.get('uid')
# 粉丝列表
user_relation_item = UserRelationItem()
fans = [{'id': fan.get('user').get('id'), 'name': fan.get('user').get('screen_name')} for fan in
fans]
user_relation_item['id'] = uid
user_relation_item['fans'] = fans
user_relation_item['follows'] = []
yield user_relation_item
# 下一页粉丝
page = response.meta.get('page') + 1
yield Request(self.fan_url.format(uid=uid, page=page),
callback=self.parse_fans,
meta={'page': page, 'uid': uid})
def parse_weibos(self, response):
"""
解析微博列表
:param response: Response对象
"""
result = json.loads(response.text)
if result.get('ok') and result.get('data').get('cards'):
weibos = result.get('data').get('cards')
for weibo in weibos:
mblog = weibo.get('mblog')
if mblog:
weibo_item = WeiboItem()
field_map = {
'id': 'id', 'attitudes_count': 'attitudes_count', 'comments_count': 'comments_count',
'reposts_count': 'reposts_count', 'picture': 'original_pic', 'pictures': 'pics',
'created_at': 'created_at', 'source': 'source', 'text': 'text', 'raw_text': 'raw_text',
'thumbnail': 'thumbnail_pic',
}
for field, attr in field_map.items():
weibo_item[field] = mblog.get(attr)
weibo_item['user'] = response.meta.get('uid')
yield weibo_item
# 下一页微博
uid = response.meta.get('uid')
page = response.meta.get('page') + 1
yield Request(self.weibo_url.format(uid=uid, page=page),
callback=self.parse_weibos,
meta={'uid': uid, 'page': page})
|
[
"1002301246@qq.com"
] |
1002301246@qq.com
|
36297de68d4dda62481025cf1bbce659d0ce664f
|
3b89c0a97ac6b58b6923a213bc8471e11ad4fe69
|
/python/CodingExercises/CheckSecondStringFormedFirstString.py
|
82a8ded99bb4b608b37b268d47ca9e6f94271932
|
[] |
no_license
|
ksayee/programming_assignments
|
b187adca502ecf7ff7b51dc849d5d79ceb90d4a6
|
13bc1c44e1eef17fc36724f20b060c3339c280ea
|
refs/heads/master
| 2021-06-30T07:19:34.192277
| 2021-06-23T05:11:32
| 2021-06-23T05:11:32
| 50,700,556
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,218
|
py
|
'''
Check whether second string can be formed from characters of first string
Given two strings str1 and str2, check if str2 can be formed from str1
Example :
Input : str1 = geekforgeeks, str2 = geeks
Output : Yes
Here, string2 can be formed from string1.
Input : str1 = geekforgeeks, str2 = and
Output : No
Here string2 cannot be formed from string1.
Input : str1 = geekforgeeks, str2 = geeeek
Output : Yes
Here string2 can be formed from string1
as string1 contains 'e' comes 4 times in
string2 which is present in string1.
'''
import collections
def CheckSecondStringFormedFirstString(str1,str2):
dict1=collections.Counter(str1)
dict2=collections.Counter(str2)
for key,val in dict2.items():
if key in dict1.keys() and dict1[key]>0:
dict1[key]=dict1.get(key)-1
else:
return False
return True
def main():
str1='geekforgeeks'
str2='geeks'
print(CheckSecondStringFormedFirstString(str1,str2))
str1 = 'geekforgeeks'
str2 = 'and'
print(CheckSecondStringFormedFirstString(str1, str2))
str1 = 'geekforgeeks'
str2 = 'geeeek'
print(CheckSecondStringFormedFirstString(str1, str2))
if __name__=='__main__':
main()
|
[
"kartiksayee@gmail.com"
] |
kartiksayee@gmail.com
|
ea2bb972d8ab0c6e4c705e1fe4e5253a81d3b550
|
6f9cb6a226001e1062a05101b2e271fbfb94191b
|
/sitemon/urls.py
|
ba4f9e4e95f7521bd845a0651d173caab5283f16
|
[] |
no_license
|
infoculture/ogd_sitemon
|
61109b9f357bbbe8618ff8497948c6ca3406f9f3
|
ba664e7741a3ef099f0bfc81cd3a7c462a1b18be
|
refs/heads/master
| 2021-01-23T07:59:48.640425
| 2013-01-11T15:47:11
| 2013-01-11T15:47:11
| 7,560,991
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 700
|
py
|
from django.conf.urls.defaults import *
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^admin/', include(admin.site.urls)),
(r'', include('smon.urls')),
(r'^css/(?P<path>.*)$', 'django.views.static.serve',{'document_root': '/var/www/sitemon.opengovdata.ru/html/css'}),
(r'^gfx/(?P<path>.*)$', 'django.views.static.serve',{'document_root': '/var/www/sitemon.opengovdata.ru/html/gfx'}),
(r'^js/(?P<path>.*)$', 'django.views.static.serve',{'document_root': '/var/www/sitemon.opengovdata.ru/html/js'}),
(r'^media/(?P<path>.*)$', 'django.views.static.serve',{'document_root': '/var/www/sitemon.opengovdata.ru/html/media'}),
)
|
[
"ibegtin@gmail.com"
] |
ibegtin@gmail.com
|
25b4001a10ee2d51a890bc339744c20aa1981f7f
|
8aa283469088fd9b32d5be2069ab59a04dbcc3d4
|
/DSP/DSP_filters.py
|
b436d055bfecd606c10a4ecddc9d794d95d79203
|
[] |
no_license
|
Matt-McNichols/perl
|
bb49629fc2a31c709d167c320f642dc99a205470
|
fdeb71a85c1e2e0415c51b5b33f770f80fb4bdf7
|
refs/heads/master
| 2021-01-17T13:28:21.076463
| 2016-04-01T20:40:40
| 2016-04-01T20:40:40
| 39,844,054
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,528
|
py
|
import argparse
import math
import numpy as np
from scipy import signal
import scipy.io.wavfile
import matplotlib.pyplot as plt
# This function does a mirror extension of the signal x_array
# It then calculates the output(y) after input(x_array) passes
# Through the filter(h_filter)
# After computing the output(y) this function downsamples
# (y) by a factor of 2
def filtdec(x_array, h_filter, downsample):
q = []
N = len(x_array)
#mirror the array
for i in range(N-1, -1, -1):
x_array.append(x_array[i])
#convolve the signal with filter
y = scipy.signal.convolve(x_array,h_filter, 'same')
#downsample the array
for i in range(0 , int(len(y)/2) ):
q.append(y[2*i])
return q
# This function first upsamples x_array by 2
# This function then does a mirror extension of the upsampled signal
# It then passes the upsampled array through a filter(f)
# This function returns the output of the filtered signal
def upfilt(x_array, h_filter, upsample):
q = []
for i in range(0, 2*len(x_array)):
if(i%2):
q.append(x_array[int(i/2)])
else:
q.append(0)
y = scipy.signal.convolve(q, h_filter, 'same')
return y
def IFT(length):
q = np.zeros(length)
increment = (2*math.pi)/length
for i in range(0,length):
v = ((-1/4)*(math.cos(2*i*increment))+((1/2)*math.cos(i*increment))+(3/4))
q[i] = v
return q
def main():
fs, x = scipy.io.wavfile.read("beat.wav")
h1 = [0,1,0]
h2 = [1,0,1]
x = []
for i in range(0,10):
x.append(i)
x = filtdec(x,h1, 2)
print(x)
x = upfilt(x, h2, 2)
print(x)
print(IFT(10))
print('end')
if __name__ == "__main__":
main()
'''
# Parse command-line arguments
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument("--order", type=int, default=3, help="order of Bessel function")
parser.add_argument("--output", default="plot.png", help="output image file")
args = parser.parse_args()
# Compute maximum
f = lambda x: -special.jv(args.order, x)
sol = optimize.minimize(f, 1.0)
# Plot
x = np.linspace(0, 10, 5000)
#plt.plot(x, special.jv(args.order, x), '-', sol.x, -sol.fun, 'o')
# Produce output
#plt.savefig(args.output, dpi=96)
'''
|
[
"Matt-McNichols@users.noreply.github.com"
] |
Matt-McNichols@users.noreply.github.com
|
8796fb8ca4716d9c715a5a6a59a3963a5c1b6074
|
1838a53201aca2cf3f11e8b6e3615935de04b1c5
|
/13.py
|
3da47848ca20d11ab884ac1dbeb47a850b5d753a
|
[] |
no_license
|
antoprinciya/python-1
|
2f6df8ef32c2bda08bdc9d6897e1faff78eec3ba
|
e40759071dc209a5c8c56e77e67956176c78f214
|
refs/heads/master
| 2020-06-14T11:02:06.183604
| 2019-07-02T18:40:09
| 2019-07-02T18:40:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
n1=int(input())
if n1>1:
for i in range(2,3):
if(n1%1)==0:
print("no")
else
print("yes")
else:
print("no")
|
[
"noreply@github.com"
] |
antoprinciya.noreply@github.com
|
aaafe02c250a9c2699c0fd09dd69981d770de947
|
512a62900f8797f3e6f87850616548dff6b2bf25
|
/board.py
|
33d958d6fc02611642fa540eba7afcf94d4586a6
|
[] |
no_license
|
racocon/Eels-and-Escalators
|
4e108cb4311e707a5924a65c8690535479434ea5
|
4edca7ba22396ee7ba67a19b9ac1f3b3bee7a220
|
refs/heads/master
| 2020-03-24T03:49:30.205382
| 2018-07-26T11:57:07
| 2018-07-26T11:57:07
| 142,434,211
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,412
|
py
|
class Board:
#initialize the 10x10 board for eels and escalators
#board = [["P", "p", " ", " ", " ", " ", " ", " ", " ", " "],
# [" ", " ", " ", " ", " ", " ", " ", " ", " ", " "],
# [" ", " ", " ", " ", " ", " ", " ", " ", " ", " "],
# [" ", " ", " ", " ", " ", " ", " ", " ", " ", " "],
# [" ", " ", " ", " ", " ", " ", " ", " ", " ", " "],
# [" ", " ", " ", " ", " ", " ", " ", " ", " ", " "],
# [" ", " ", " ", " ", " ", " ", " ", " ", " ", " "],
# [" ", " ", " ", " ", " ", " ", " ", " ", " ", " "],
# [" ", " ", " ", " ", " ", " ", " ", " ", " ", " "],
# [" ", " ", " ", " ", " ", " ", " ", " ", " ", " "]]
board = [[100, 99, 98, 97, 96, 95, 94, 93, 92, 91],
[81, 82, 83, 84, 85, 86, 87, 88, 89, 90],
[80, 79, 78, 77, 76, 75, 74, 73, 72, 71],
[61, 62, 63, 64, 65, 66, 67, 68, 69, 70],
[60, 59, 58, 57, 56, 55, 54, 53, 52, 51],
[41, 42, 43, 44, 45, 46, 47, 48, 49, 50],
[40, 39, 38, 37, 36, 35, 34, 33, 32, 31],
[21, 22, 23, 24, 25, 26, 27, 28, 29, 30],
[20, 19, 18, 17, 16, 15, 14, 13, 12, 11],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]
#initialize the 2 players
global player1
global player2
|
[
"diyanah.nadzree@live.com"
] |
diyanah.nadzree@live.com
|
85df82d852eeb8e2b44964f94463348ae7366d32
|
7fad84a788a234fe80535f9bc75f70060a30c378
|
/Agent_Recommend_Git/Agent_Recommend_Git/forms.py
|
bbd31efbe32657769187b2591cea94e5bf3c78ee
|
[] |
no_license
|
KKK-cy/Agent_Recommend
|
1ea21cd647521561683f77bc421b9ba2eed4004d
|
de2b2d72b571577b15b0bacebb5ca6b12654f66a
|
refs/heads/master
| 2023-03-23T10:37:11.226124
| 2021-03-11T08:11:52
| 2021-03-11T08:11:52
| 261,702,537
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,091
|
py
|
# -*- coding:utf-8 -*-
# @Time : 2021/1/11 11:08
# @Author : KCY
# @File : forms.py
# @Software: PyCharm
from django import forms
# 登录表单
class UserForm(forms.Form):
username = forms.CharField(label="用户名", max_length=128,widget=forms.TextInput(attrs={'class': 'form-control'}))
password = forms.CharField(label="密码", max_length=256, widget=forms.PasswordInput(attrs={'class': 'form-control'}))
# 注册表单
class RegisterForm(forms.Form):
gender = (
('male', "男"),
('female', "女"),
)
username = forms.CharField(label="用户名", max_length=128, widget=forms.TextInput(attrs={'class': 'form-control'}))
password1 = forms.CharField(label="密码", max_length=256, widget=forms.PasswordInput(attrs={'class': 'form-control'}))
password2 = forms.CharField(label="确认密码", max_length=256, widget=forms.PasswordInput(attrs={'class': 'form-control'}))
email = forms.EmailField(label="邮箱地址", widget=forms.EmailInput(attrs={'class': 'form-control'}))
sex = forms.ChoiceField(label='性别', choices=gender)
|
[
"chuiyang.kong@einyun.ltd"
] |
chuiyang.kong@einyun.ltd
|
95439e170fc5055a456e2743c221afc668438b7a
|
319ba101d095622a8f2f9870a5b496e042b915c1
|
/thd.py
|
d25e8753502805694a0293dce96879253574ccf9
|
[] |
no_license
|
andrewleu/inetaddr-and-nic
|
9edf6104a103ad3235a752fbaaf211f77a80d197
|
2baf4170765bb7942df16eccb8c47a53eeea7722
|
refs/heads/master
| 2021-08-08T12:12:35.128764
| 2020-06-11T03:44:10
| 2020-06-11T03:44:10
| 29,572,896
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 742
|
py
|
import threading
import time
class MyThread(threading.Thread):
def run(self):
global num
time.sleep(1)
if mutex.acquire(1):
print self.name+str(mutex)
num = num+1
msg = self.name+' set num to '+str(num)
print msg
mutex.release()
print self.name+str(mutex)
mutex.acquire(2)
print self.name+str(mutex)
num = num+1
msg = self.name+' set num to '+str(num)
print msg
mutex.release()
print self.name+str(mutex)
num = 0
mutex = threading.Lock()
def test():
for i in range(5):
t = MyThread()
t.start()
if __name__ == '__main__':
test()
|
[
"andrewleu@hotmail.com"
] |
andrewleu@hotmail.com
|
d40ec1ea16fec299f7e84680371949b248b3a34d
|
2da8862795c1f89a119a6af312aadd61040517d7
|
/catkin_ws/build/joint_encoders/catkin_generated/pkg.installspace.context.pc.py
|
1bf4e5033dcf512d6d1c3a9f520d08c56678dc64
|
[] |
no_license
|
AlfredWilmot/Modular-2-DOF-Cable-Driven-Segment
|
7864086f58260fda0a5198325451776f296b4ab5
|
0723c00b5f1ad4d91d5f605379b1793eab815263
|
refs/heads/master
| 2021-10-27T08:58:26.233175
| 2021-10-22T09:02:35
| 2021-10-22T09:02:35
| 156,258,756
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "joint_encoders"
PROJECT_SPACE_DIR = "/home/alfie/Desktop/Modular-2-DOF-Cable-Driven-Segment/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
|
[
"alfred.wilmot@students.plymouth.ac.uk"
] |
alfred.wilmot@students.plymouth.ac.uk
|
078f048db806a896fa9fedd85cd87ed800b5ba27
|
1be0621633e7c7b81e868774e6bdfb472e2746bc
|
/Elementary/even_last.py
|
74cabfdd62700ecdf0be2e3fccba92469c634b0a
|
[] |
no_license
|
junzhao680/PyCheckio
|
017cb4b69b692d46775d1d03dd9211fd73b52ca3
|
52f914b2b19da3ebd9b2ae2d0071a61fedba3219
|
refs/heads/master
| 2021-08-31T13:12:32.566903
| 2017-12-21T11:59:31
| 2017-12-21T11:59:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 661
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
def checkio(array):
"""
sums even-indexes elements and multiply at the last
"""
try:
return sum([array[i] for i in range(len(array)) if i % 2 == 0]) * array[-1]
except:
return 0
#These "asserts" using only for self-checking and not necessary for auto-testing
if __name__ == '__main__':
assert checkio([0, 1, 2, 3, 4, 5]) == 30, "(0+2+4)*5=30"
assert checkio([1, 3, 5]) == 30, "(1+5)*5=30"
assert checkio([6]) == 36, "(6)*6=36"
assert checkio([]) == 0, "An empty array = 0"
print("Coding complete? Click 'Check' to review your tests and earn cool rewards!")
|
[
"huangqin611@gmail.com"
] |
huangqin611@gmail.com
|
309f2903d2fef0acae019ea359db53ecfbe15ff9
|
01c419db0c83e14cfe6b3f6dc82ed6889f64a9b4
|
/python练习/urllib01.py
|
def6fae233929383cf26461ec0794a6314ab4ffb
|
[] |
no_license
|
HuiMengYouXian/lianxi
|
0a6812ae16ef29dad4023124cf5de96e02357b84
|
5c12d7548c13251df48e0afb555dac61bfc2beb6
|
refs/heads/master
| 2020-03-31T04:25:22.952883
| 2018-10-09T14:08:01
| 2018-10-09T14:08:01
| 151,904,371
| 0
| 0
| null | 2018-10-07T04:38:44
| 2018-10-07T04:00:01
| null |
UTF-8
|
Python
| false
| false
| 456
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from urllib import request,parse
url="http://httpbin.org/post"
headers={
#伪装一个火狐浏览器
"User-Agent":'Mozilla/4.0(compatible:MSIE 5.5; Windows NT)',
"host":'httpbin.org'
}
dict={"name":'Chen'}
data=bytes(parse.urlencode(dict),encoding='utf-8')
req=request.Request(url=url,data=data,headers=headers,method='POST')
response=request.urlopen(req)
print(response.read().decode('utf-8'))
|
[
"592883527@qq.com"
] |
592883527@qq.com
|
974df7b001cc9ce833b59d2918d6c8f9e3ae6c16
|
2fc69271a1e2502e0ec5c24deed634a769e3699d
|
/OOT-ASSIGNMENT 6-IT-62.py
|
fe165421f6073a4d6ad1d8fa897adacc0cf63b25
|
[] |
no_license
|
padmajabollu/Python-Operator-overloading
|
5a13c9eee57933d504abbb6673c3a03c40ed4e89
|
38b5c9e4d82533a4cfff793e1bef03a19d552898
|
refs/heads/master
| 2022-04-26T04:31:11.567138
| 2020-04-23T02:40:26
| 2020-04-23T02:40:26
| 258,075,097
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,474
|
py
|
class currency(object):
cv = {"Rs":1,"$":60,"#":90,"E":70}
def __init__(self, value, unit):
self.value = value
self.unit = unit
def __str__(self):
return str(self.value) + " " + self.unit
def __add__(self,other):
if isinstance(other, currency):
v = (self.value*currency.cv[self.unit] + other.value*currency.cv[other.unit])*1.0
return currency(v,"Rs")
else:
return other + self
def __radd__(self,other):
v = (self.value*currency.cv[self.unit]) + other
return currency(v, "Rs")
def __sub__(self,other):
if isinstance(other, currency):
v = (self.value*currency.cv[self.unit] - other.value*currency.cv[other.unit])
if v<0:
v=-(v)
return currency(v, "Rs")
else:
v = self.value - other
return currency(v, self.unit)
def __rsub__(self,other):
v = (self.value*currency.cv[self.unit]) - other
if v<0:
v=-(v)
return currency(v, "Rs")
def __mul__(self,other):
if isinstance(other, currency):
v = (self.value*currency.cv[self.unit] * other.value*currency.cv[other.unit])
return currency(v,"Rs")
else:
return other * self
def __rmul__(self,other):
v = (self.value*currency.cv[self.unit]) * other
return currency(v, "Rs")
while True:
try:
print()
print("***"*20)
print("\nEnter Value for Currency object 1 :")
value1=int(input())
print("\nEnter Unit for Currency object 1 :")
unit1=(input())
print("\nEnter Value for Currency object 2 :")
value2=int(input())
print("\nEnter Unit for Currency object 2 :")
unit2=(input())
break
except:
print("\nEnter Valid Data.........")
continue
c1=currency(value1,unit1)
c2=currency(value2,unit2)
while True:
print()
print("***"*20)
print("1.Addition of money")
print("2.Substraction of money")
print("3.Multiplication of money")
print("4.Exit\n")
ch=int(input())
if ch==1:
while True:
print()
print("***"*20)
print("1.Result want in Rs(Rupees)")
print("2.Result want in $(Doller)")
print("3.Result want in #(Pond)")
print("4.Result want in E(Euro)")
print("5.Exit\n")
ch1=int(input())
if ch1==1:
Result=c1+c2
print(c1," + ",c2," = ",Result)
elif ch1==2:
Result=c1+c2
Result.value=Result.value/currency.cv["$"]
Result.unit="$"
print(c1," + ",c2," = ",Result)
elif ch1==3:
Result=c1+c2
Result.value=Result.value/currency.cv["#"]
Result.unit="#"
print(c1," + ",c2," = ",Result)
elif ch1==4:
Result=c1+c2
Result.value=Result.value/currency.cv["E"]
Result.unit="E"
print(c1," + ",c2," = ",Result)
elif ch1==5:
break
else:
print("\nWrong Choice.......")
elif ch==2:
while True:
print()
print("***"*20)
print("1.Result want in Rs(Rupees)")
print("2.Result want in $(Doller)")
print("3.Result want in #(Pond)")
print("4.Result want in E(Euro)")
print("5.Exit\n")
ch1=int(input())
if ch1==1:
Result=c1-c2
print(c1," - ",c2," = ",Result)
elif ch1==2:
Result=c1-c2
Result.value=Result.value/currency.cv["$"]
Result.unit="$"
print(c1," - ",c2," = ",Result)
elif ch1==3:
Result=c1-c2
Result.value=Result.value/currency.cv["#"]
Result.unit="#"
print(c1," - ",c2," = ",Result)
elif ch1==4:
Result=c1-c2
Result.value=Result.value/currency.cv["E"]
Result.unit="E"
print(c1," - ",c2," = ",Result)
elif ch1==5:
break
else:
print("\nWrong Choice.......")
elif ch==3:
while True:
print()
print("***"*20)
print("1.Result want in Rs(Rupees)")
print("2.Result want in $(Doller)")
print("3.Result want in #(Pond)")
print("4.Result want in E(Euro)")
print("5.Exit\n")
ch1=int(input())
if ch1==1:
Result=c1*c2
print(c1," * ",c2," = ",Result)
elif ch1==2:
Result=c1*c2
Result.value=Result.value/currency.cv["$"]
Result.unit="$"
print(c1," * ",c2," = ",Result)
elif ch1==3:
Result=c1*c2
Result.value=Result.value/currency.cv["#"]
Result.unit="#"
print(c1," * ",c2," = ",Result)
elif ch1==4:
Result=c1*c2
Result.value=Result.value/currency.cv["E"]
Result.unit="E"
print(c1," * ",c2," = ",Result)
elif ch1==5:
break
else:
print("\nWrong Choice.......")
elif ch==4:
break
else:
print("\nWrong Choice.......")
Currency1=currency(6,"$")
Currency2=currency(50,"Rs")
Currency3=currency(3,"E")
Currency4=currency(100,"Rs")
Currency5=currency(5,"E")
Currency6=currency(1,"$")
Result=Currency1+Currency2
Result.value=Result.value/currency.cv["$"]
Result.unit="$"
print(Currency1," + ",Currency2," = ",Result)
Result= Currency3+Currency6-Currency4
Result.value=Result.value/currency.cv["E"]
Result.unit="E"
print(Currency3," + ",Currency6," - ", Currency4 ," = ",Result)
Result=20+Currency5
Result.value=Result.value/currency.cv["E"]
Result.unit="E"
print("20 + ",Currency5," = ",Result)
|
[
"noreply@github.com"
] |
padmajabollu.noreply@github.com
|
ca58b1ce2b21900200329d5dbd2507235c210435
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03853/s066137758.py
|
acb2ac42342d566e74d51b19e21c6c91f5ab7f87
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
num = input().split()
hei = int(num[0])
wei = int(num[1])
photo = []
for i in range(hei):
temp = input()
temp = list(temp)
photo.append(temp)
photo.append(temp)
for i in range(hei*2):
for j in range(wei):
print(photo[i][j],end="")
print("\n",end="")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
6727bdc422920e3e15e9e2a1708eb0cfd0380f25
|
e7a7419bc957c6eece0f38b709a45bb187078089
|
/project1.py
|
00521cf0bff8e44d2b38a8bf7991bd69adb138a5
|
[] |
no_license
|
lucasfazzib/covid19_python_chart
|
2ba92bc5ed13c1c9c71c82add9461d5f4fff6bef
|
ede0f158cfdebfdddabfb84c6bf28b495545f318
|
refs/heads/main
| 2023-06-18T14:44:20.624509
| 2021-07-10T03:21:32
| 2021-07-10T03:21:32
| 384,601,324
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,732
|
py
|
from typing import final
import requests as r
import datetime as dt
import csv
from PIL import Image
from IPython.display import display
from urllib.parse import quote
url = 'https://api.covid19api.com/dayone/country/brazil'
resp = r.get(url)
#print(resp.status_code)
raw_data = resp.json()
#print(raw_data[0])
#{'ID': '5b679794-2952-4c4c-a873-af6ff457b0fd', 'Country': 'Brazil', 'CountryCode': 'BR', 'Province': '', 'City': '', 'CityCode': '', 'Lat': '-14.24', 'Lon': '-51.93', 'Confirmed': 1, 'Deaths': 0, 'Recovered': 0, 'Active': 1, 'Date': '2020-02-26T00:00:00Z'}
final_data = []
for obs in raw_data:
final_data.append([obs['Confirmed'], obs['Deaths'], obs['Recovered'], obs['Active'], obs['Date']])
final_data.insert(0, ['confirmados', 'obitos', 'recuperados', 'ativos', 'data'])
#print(final_data)
CONFIRMADOS = 0
OBITOS = 1
RECUPERADOS = 2
ATIVOS = 3
DATA = 4
for i in range(1, len(final_data)):
final_data[i][DATA] = final_data[i][DATA][:10]
#print(final_data)
#print(dt.time(12, 6, 21, 7), 'Hora:minuto:segundo.microsegundo')
#print('---------')
#print(dt.date(2020, 4, 25), 'Ano-mês-dia')
#print('---------')
#print(dt.datetime(2020, 4, 25, 12, 6, 21, 7), 'Ano-mês-dia Hora:minuto:segundo.microsegundo')
natal = dt.date(2020, 12, 25)
reveillon = dt.date(2011, 1, 1)
#print(reveillon - natal)
#print((reveillon - natal).days)
#print((reveillon - natal).seconds)
#print((reveillon - natal).microseconds)
with open('brasil-covid.csv', 'w') as file:
writer = csv.writer(file)
writer.writerows(final_data)
for i in range(1, len(final_data)):
final_data[i][DATA] = dt.datetime.strptime(final_data[i][DATA], '%Y-%m-%d')
#print(final_data)
def get_dataset(y, labels):
if type(y[0]) == list:
datasets = []
for i in range(len(y)):
datasets.append({
'label': labels[i],
'data' : y[i]
})
return datasets
else:
return [
{
'label': labels[0],
'data' : y
}
]
def set_title(title=''):
if title != '':
display = 'true'
else:
display = 'false'
return {
'title' : title,
'display': display
}
def create_chart(x, y, labels, kind='bar', title=''):
dataset = get_dataset(y, labels)
options = set_title(title)
chart = {
'type': kind,
'data': {
'labels': x,
'datasets' : dataset
},
'options' : options
}
return chart
def get_api_chart(chart):
url_base = 'https://quickchart.io/chart'
resp = r.get(f'{url_base}?c={str(chart)}')
return resp.content
def save_image(path, content):
with open(path, 'wb') as image:
image.write(content)
def display_image(path):
img_pil = Image.open(path)
display(img_pil)
y_data_1 = []
for obs in final_data[1::10]:
y_data_1.append(obs[CONFIRMADOS])
y_data_2 = []
for obs in final_data[1::10]:
y_data_2.append(obs[RECUPERADOS])
labels = ['Confirmados', 'Recuperados']
x = []
for obs in final_data[1::10]:
x.append(obs[DATA].strftime('%d/%m/%Y'))
chart = create_chart(x, [y_data_1, y_data_2], labels, title='Gráfico Confirmados vs Recuperados')
chart_content = get_api_chart(chart)
save_image('meu-grafico-covid.png', chart_content)
display_image('meu-grafico-covid.png')
def get_api_qrcode(link):
text = quote(link) #parsing the link to url
url_base = 'https://quickchart.io/qr'
resp = r.get(f'{url_base}?text={text}')
return resp.content
url_base = 'https://quickchart.io/chart'
link = f'{url_base}?c={str(chart)}'
save_image('qr-code.png', get_api_qrcode(link))
|
[
"lucasfazzi@hotmail.com"
] |
lucasfazzi@hotmail.com
|
363c4f8788d69ae5c719743e3bf95a12a1bf133b
|
cf53803d5389218a858e5168886f68ef17fff28d
|
/ssbccConfig.py
|
60f086aa770a62b17bc21f2726460e1c0e0b8adb
|
[] |
no_license
|
freecores/ssbcc
|
cfeeaae1e94711825c14750aab86ee5e3dc7ae84
|
fe7713b18f7a6328de2d0dad774b8887327785ab
|
refs/heads/master
| 2020-06-02T13:37:26.640555
| 2014-07-05T18:36:15
| 2014-07-05T18:36:15
| 21,918,103
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35,531
|
py
|
################################################################################
#
# Copyright 2012-2013, Sinclair R.F., Inc.
#
# Utilities required by ssbcc
#
################################################################################
import math
import os
import re
import sys
from ssbccUtil import *
class SSBCCconfig():
"""
Container for ssbcc configuration commands, the associated parsing, and
program generation.
"""
def __init__(self):
"""
Initialize the empty dictionaries holding the processor configuration
parameters. Initialize the paths to search for peripherals.
"""
self.config = dict(); # various settings, etc.
self.constants = dict(); # CONSTANTs
self.functions = dict(); # list of functions to define
self.inports = list(); # INPORT definitions
self.ios = list(); # List of I/Os
self.outports = list(); # OUTPORT definitions (see AddOutport)
self.parameters = list(); # PARAMETERs and LOCALPARAMs
self.peripheral = list(); # PERIPHERALs
self.signals = list(); # internal signals
self.symbols = list(); # constant, I/O, inport, etc. names
# list of memories
self.memories = dict(name=list(), type=list(), maxLength=list());
# list of how the memories will be instantiated
self.config['combine'] = list();
# initial search path for .INCLUDE configuration commands
self.includepaths = list();
self.includepaths.append('.');
# initial search paths for peripherals
self.peripheralpaths = list();
self.peripheralpaths.append('.');
self.peripheralpaths.append('peripherals');
self.peripheralpaths.append(os.path.join(sys.path[0],'core/peripherals'));
def AddConstant(self,name,value,loc):
"""
Add the constant for the "CONSTANT" configuration command to the "constants"
dictionary.\n
name symbol for the constant
value value of the constant
loc file name and line number for error messages
"""
self.AddSymbol(name,loc);
if name in self.constants:
raise SSBCCException('CONSTANT "%s" already declared at %s' % (name,loc,));
self.constants[name] = value;
def AddIO(self,name,nBits,iotype,loc):
"""
Add an I/O signal to the processor interface to the system.\n
name name of the I/O signal
nBits number of bits in the I/O signal
iotype signal direction: "input", "output", or "inout"
"""
if iotype != 'comment':
self.AddSymbol(name,loc);
self.ios.append((name,nBits,iotype,));
def AddInport(self,port,loc):
"""
Add an INPORT symbol to the processor.\n
port name of the INPORT symbol
loc file name and line number for error messages
"""
name = port[0];
self.AddSymbol(name,loc);
self.inports.append(port);
def AddMemory(self,cmd,loc):
"""
Add a memory to the list of memories.\n
cmd 3-element list as follows:
[0] ==> type: "RAM" or "ROM"
[1] ==> memory name
[2] ==> memory length (must be a power of 2)
loc file name and line number for error messages
"""
self.memories['type'].append(cmd[0]);
self.memories['name'].append(cmd[1]);
maxLength = eval(cmd[2]);
if not IsPowerOf2(maxLength):
raise SSBCCException('Memory length must be a power of 2, not "%s", at %s' % (cmd[2],loc,));
self.memories['maxLength'].append(eval(cmd[2]));
def AddOutport(self,port,loc):
"""
Add an OUTPORT symbol to the processor.\n
port tuple as follows:
port[0] - name of the OUTPORT symbol
port[1] - True if the outport is a strobe-only outport, false
otherwise
port[2:] - zero or more tuples as follows:
(o_signal,width,type,[initialization],)
where
o_signal is the name of the output signal
width is the number of bits in the signal
type is 'data' or 'strobe'
initialization is an optional initial/reset value for the
output signal
loc file name and line number for error messages
"""
self.AddSymbol(port[0],loc);
self.outports.append(port);
def AddParameter(self,name,value,loc):
"""
Add a PARAMETER to the processor.\n
name name of the PARAMETER
value value of the PARAMETER
loc file name and line number for error messages
"""
if not re.match(r'[LG]_\w+$',name):
raise Exception('Program Bug -- bad parameter name at %s' % loc);
self.AddSymbol(name,loc);
self.parameters.append((name,value,));
def AddSignal(self,name,nBits,loc):
"""
Add a signal without an initial value to the processor.\n
name name of the signal
nBits number of bits in the signal
loc file name and line number for error messages
"""
self.AddSymbol(name,loc);
self.signals.append((name,nBits,));
def AddSignalWithInit(self,name,nBits,init,loc):
"""
Add a signal with an initial/reset value to the processor.\n
name name of the signal
nBits number of bits in the signal
init initial/reset value of the signal
loc file name and line number for error messages
"""
self.AddSymbol(name,loc);
self.signals.append((name,nBits,init,));
def AddSymbol(self,name,loc=None):
"""
Add the specified name to the list of symbols.\n
Note: This symbol has no associated functionality and is only used for
".ifdef" conditionals.
"""
if name in self.symbols:
if loc == None:
raise SSBCCException('Symbol "%s" already defined, no line number provided');
else:
raise SSBCCException('Symbol "%s" already defined before %s' % (name,loc,));
self.symbols.append(name);
def AppendIncludePath(self,path):
"""
Add the specified path to the end of the paths to search for .INCLUDE
configuration commands.\n
path path to add to the list
"""
self.includepaths.insert(-1,path);
def CompleteCombines(self):
"""
Ensure all memories are assigned addresses.\n
This modifies config['combine'] to include singleton entries for any
memories not subject to the COMBINE configuration command. It then computes
how the memories will be packed together as well as properites for the
packed memories. These properties are:
packing how the memories will be packed as per PackCombinedMemory
memName HDL name of the memory
memLength number of words in the memory
memWidth bit width of the memory words
"""
# Create singleton entries for memory types and memories that aren't already listed in 'combine'.
if not self.IsCombined('INSTRUCTION'):
self.config['combine'].append({'mems':['INSTRUCTION',], 'memArch':'sync'});
for memType in ('DATA_STACK','RETURN_STACK',):
if not self.IsCombined(memType):
self.config['combine'].append({'mems':[memType,], 'memArch':'LUT'});
for memName in self.memories['name']:
if not self.IsCombined(memName):
self.config['combine'].append({'mems':[memName,], 'memArch':'LUT'});
# Determine the HDL names for the memories.
nRAMROMs = 0;
for combined in self.config['combine']:
if combined['mems'][0] == 'INSTRUCTION':
combined['memName'] = 's_opcodeMemory';
elif combined['mems'][0] == 'DATA_STACK':
combined['memName'] = 's_data_stack';
elif combined['mems'][0] == 'RETURN_STACK':
combined['memName'] = 's_R_stack';
else:
nRAMROMs += 1;
if nRAMROMs > 0:
memNameFormat = 's_mem_%%0%dx' % ((CeilLog2(nRAMROMs)+3)/4);
ixRAMROM = 0;
for combined in self.config['combine']:
if 'memName' in combined:
continue;
if nRAMROMs == 1:
combined['memName'] = 's_mem';
else:
combined['memName'] = memNameFormat % ixRAMROM;
ixRAMROM += 1;
# Perform packing for all memories.
for combined in self.config['combine']:
self.PackCombinedMemory(combined);
def Exists(self,name):
"""
Return true if the requested attribute has been created in the ssbccConfig
object.
"""
return name in self.config;
def Get(self,name):
"""
Return the requested attribute from the ssbccConfig object.
"""
if not self.Exists(name):
raise Exception('Program Bug: "%s" not found in config' % name);
return self.config[name];
def GetMemoryByBank(self,ixBank):
"""
Return the parameters for a memory by its bank address.\n
ixBank index of the requested memory bank
"""
if not 'bank' in self.memories:
return None;
if ixBank not in self.memories['bank']:
return None;
ixMem = self.memories['bank'].index(ixBank);
return self.GetMemoryParameters(ixMem);
def GetMemoryByName(self,name):
"""
Return the parameters for a memory by its name.\n
name name of the requested memory
"""
if not name in self.memories['name']:
return None;
ixMem = self.memories['name'].index(name);
return self.GetMemoryParameters(ixMem);
def GetMemoryParameters(self,rawIndex):
"""
Return the parameters for a memory by its index in the list of memories.\n
rawIndex index within the list of memories
"""
if type(rawIndex) == str:
if not self.IsMemory(rawIndex):
raise Exception('Program Bug: reference to non-existent memory');
ix = self.memories['name'].index(rawIndex);
elif type(rawIndex) == int:
if (rawIndex < 0) or (rawIndex >= len(self.memories['name'])):
raise Exception('Program Bug: bad memory index %d' % rawIndex);
ix = rawIndex;
else:
raise Exception('Program Bug: unrecognized index type "%s"' % type(rawIndex));
outvalue = dict();
outvalue['index'] = ix;
for field in self.memories:
outvalue[field] = self.memories[field][ix];
return outvalue;
def GetPacking(self,name):
"""
Get the memory packing for the provided memory.
"""
for combined in self.config['combine']:
if name not in combined['mems']:
continue;
for port in combined['port']:
for packing in port['packing']:
if packing['name'] == name:
return (combined,port,packing,);
else:
raise Exception('Program Bug -- %s not found in combined memories' % name);
def GetParameterValue(self,name):
"""
Get the value associated with the named parameter.
"""
if name.find('[') != -1:
ix = name.index('[');
thisSlice = name[ix:];
name = name[:ix];
else:
thisSlice = '[0+:8]';
for ix in range(len(self.parameters)):
if self.parameters[ix][0] == name:
return ExtractBits(IntValue(self.parameters[ix][1]),thisSlice);
else:
raise Exception('Program Bug: Parameter "%s" not found' % name);
def InsertPeripheralPath(self,path):
"""
Add the specified path to the beginning of the paths to search for
peripherals.\n
path path to add to the list
"""
self.peripheralpaths.insert(-1,path);
def IsCombined(self,name):
"""
Indicate whether or not the specified memory type has already been listed
in a "COMBINE" configuration command. The memory type should be one of
DATA_STACK, INSTRUCTION, or RETURN_STACK.\n
name name of the specified memory type\n
"""
for combined in self.config['combine']:
if name in combined['mems']:
return True;
else:
return False;
def IsConstant(self,name):
"""
Indicate whether or not the specified symbol is a recognized constant.
"""
if re.match(r'C_\w+$',name) and name in self.constants:
return True;
else:
return False;
def IsMemory(self,name):
"""
Indicate whether or not the specified symbol is the name of a memory.
"""
return (name in self.memories['name']);
def IsParameter(self,name):
"""
Indicate whether or not the specified symbol is the name of a parameter.
"""
if re.match(r'[GL]_\w+$',name) and name in self.symbols:
return True;
else:
return False;
def IsRAM(self,name):
"""
Indicate whether or not the specified symbol is the name of a RAM.
"""
if name not in self.memories['name']:
return False;
ix = self.memories['name'].index(name);
return self.memories['type'][ix] == 'RAM';
def IsROM(self,name):
"""
Indicate whether or not the specified symbol is the name of a RAM.
"""
if name not in self.memories['name']:
return False;
ix = self.memories['name'].index(name);
return self.memories['type'][ix] == 'ROM';
def IsStrobeOnlyOutport(self,outport):
"""
Indicate whether or not the specified outport symbol only has strobes
associated with it (i.e., it has no data signals).
"""
return outport[1];
def IsSymbol(self,name):
"""
Indicate whether or not the specified name is a symbol.
"""
return (name in self.symbols);
def MemoryNameLengthList(self):
"""
Return a list of tuples where each tuple is the name of a memory and its
length.
"""
outlist = list();
for ix in range(len(self.memories['name'])):
outlist.append((self.memories['name'][ix],self.memories['maxLength'][ix],));
return outlist;
def NInports(self):
"""
Return the number of INPORTS.
"""
return len(self.inports);
def NMemories(self):
"""
Return the number of memories.
"""
return len(self.memories['name']);
def NOutports(self):
"""
Return the number of OUTPORTS.
"""
return len(self.outports);
def OverrideParameter(self,name,value):
"""
Change the value of the specified parameter (based on the command line
argument instead of the architecture file).\n
name name of the parameter to change
value new value of the parameter
"""
for ix in range(len(self.parameters)):
if self.parameters[ix][0] == name:
break;
else:
raise SSBCCException('Command-line parameter or localparam "%s" not specified in the architecture file' % name);
self.parameters[ix] = (name,value,);
def PackCombinedMemory(self,combined):
"""
Utility function for CompleteCombines.\n
Determine packing strategy and resulting memory addresses and sizes. This
list has everything ssbccGenVerilog needs to construct the memory.\n
The dual port memories can be used to do the following:
1. pack a single memory, either single-port or dual-port
2. pack two single-port memories sequentially, i.e., one at the start of
the RAM and one toward the end of the RAM
3. pack one single-port memory at the start of the RAM and pack several
compatible single-port memories in parallel toward the end of the RAM.
Note: Compatible means that they have the same address.
4. pack several compatible dual-port memories in parallel.\n
These single-port or dual-port single or parallel packed memories are
described in the 'port' list in combined. Each entry in the port list has
several parameters described below and a 'packing' list that describes the
single or multiple memories attached to that port.\n
The parameters for each of port is as follows:
offset start address of the memory in the packing
nWords number of RAM words reserved for the memory
Note: This can be larger than the aggregate number of words
required by the memory in order to align the memories to
power-of-2 address alignments.
ratio number of base memory entries for the memory
Note: This must be a power of 2.\n
The contents of each entry in the packing are as follows:
-- the following are from the memory declaration
name memory name
length number of elements in the memory based on the declared memory
size
Note: This is based on the number of addresses required for
each memory entry (see ratio).
nbits width of the memory type
-- the following are derived for the packing
lane start bit
Note: This is required in particular when memories are stacked
in parallel.
nWords number of memory addresses allocated for the memory based on
the packing
Note: This will be larger than length when a small memory is
packed in parallel with a larger memory. I.e., when
ratio is not one.
ratio number of base memory entries required to extract a single word
for the memory type
Note: This allows return stack entries to occupy more than one
memory address when the return stack is combined with
other memory addresses.
Note: This must be a power of 2.\n
The following entries are also added to "combined":
nWords number of words in the memory
memWidth bit width of the memory words\n
Note: If memories are being combined with the instructions space, they are
always packed at the end of the instruction space, so the
instruction space allocation is not included in the packing.
"""
# Count how many memories of each type are being combined.
nSinglePort = 0;
nRAMs = 0;
nROMs = 0;
for memName in combined['mems']:
if memName in ('INSTRUCTION','DATA_STACK','RETURN_STACK',):
nSinglePort += 1;
elif self.IsROM(memName):
nROMs += 1;
else:
nRAMs += 1;
if nRAMs > 0:
nRAMs += nROMs;
nROMs = 0;
# Ensure the COMBINE configuration command is implementable in a dual-port RAM.
if nSinglePort > 0 and nRAMs > 0:
raise SSBCCException('Cannot combine RAMs with other memory types in COMBINE configuration command at %s' % combined['loc']);
if nSinglePort > 2 or (nSinglePort > 1 and nROMs > 0):
raise SSBCCException('Too many memory types in COMBINE configuration command at %s' % combined['loc']);
# Start splitting the listed memories into the one or two output lists and ensure that single-port memories are listed in the correct order.
mems = combined['mems'];
ixMem = 0;
split = list();
if 'INSTRUCTION' in mems:
if mems[0] != 'INSTRUCTION':
raise SSBCCException('INSTRUCTION must be the first memory listed in the COMBINE configuration command at %s' % combined['loc']);
split.append(['INSTRUCTION']);
ixMem += 1;
while len(mems[ixMem:]) > 0 and mems[ixMem] in ('DATA_STACK','RETURN_STACK',):
split.append([mems[ixMem]]);
ixMem += 1;
for memName in ('DATA_STACK','RETURN_STACK',):
if memName in mems[ixMem:]:
raise SSBCCException('Single-port memory %s must be listed before ROMs in COMBINE configuration command at %s' % combined['loc']);
if mems[ixMem:]:
split.append(mems[ixMem:]);
if not (1 <= len(split) <= 2):
raise Exception('Program Bug -- bad COMBINE configuration command not caught');
# Create the detailed packing information.
combined['port'] = list();
for thisSplit in split:
packing = list();
for memName in thisSplit:
if memName == 'INSTRUCTION':
packing.append({'name':memName, 'length':self.Get('nInstructions')['length'], 'nbits':9});
elif memName == 'DATA_STACK':
packing.append({'name':memName, 'length':self.Get('data_stack'), 'nbits':self.Get('data_width')});
elif memName == 'RETURN_STACK':
nbits = max(self.Get('data_width'),self.Get('nInstructions')['nbits']);
packing.append({'name':memName, 'length':self.Get('return_stack'), 'nbits':nbits});
else:
thisMemory = self.GetMemoryParameters(memName);
packing.append({'name':memName, 'length':CeilPow2(thisMemory['maxLength']), 'nbits':self.Get('data_width')});
combined['port'].append({ 'packing':packing });
# Calculate the width of the base memory.
# Note: This accommodates RETURN_STACK being an isolated memory.
memWidth = combined['port'][0]['packing'][0]['nbits'] if len(combined['port']) == 1 else None;
for port in combined['port']:
for packing in port['packing']:
tempMemWidth = packing['nbits'];
if tempMemWidth > self.Get('sram_width'):
tempMemWidth = self.Get('sram_width');
if not memWidth:
memWidth = tempMemWidth;
elif tempMemWidth > memWidth:
memWidth = tempMemWidth;
combined['memWidth'] = memWidth;
# Determine how the memories are packed.
# Note: "ratio" should be non-unity only for RETURN_STACK.
for port in combined['port']:
lane = 0;
for packing in port['packing']:
packing['lane'] = lane;
ratio = CeilPow2((packing['nbits']+memWidth-1)/memWidth);
packing['ratio'] = ratio;
packing['nWords'] = ratio * packing['length'];
lane += ratio;
# Aggregate parameters each memory port.
for port in combined['port']:
ratio = CeilPow2(sum(packing['ratio'] for packing in port['packing']));
maxLength = max(packing['length'] for packing in port['packing']);
port['ratio'] = ratio;
port['nWords'] = ratio * maxLength;
combined['port'][0]['offset'] = 0;
if len(combined['port']) > 1:
if combined['mems'][0] == 'INSTRUCTION':
nWordsTail = combined['port'][1]['nWords'];
port0 = combined['port'][0];
if port0['nWords'] <= nWordsTail:
raise SSBCCException('INSTRUCTION length too small for "COMBINE INSTRUCTION,..." at %s' % combined['loc']);
port0['nWords'] -= nWordsTail;
port0['packing'][0]['nWords'] -= nWordsTail;
port0['packing'][0]['length'] -= nWordsTail;
else:
maxNWords = max(port['nWords'] for port in combined['port']);
for port in combined['port']:
port['nWords'] = maxNWords;
combined['port'][1]['offset'] = combined['port'][0]['nWords'];
combined['nWords'] = sum(port['nWords'] for port in combined['port']);
def ProcessCombine(self,loc,line):
"""
Parse the "COMBINE" configuration command as follows:\n
Validate the arguments to the "COMBINE" configuration command and append
the list of combined memories and the associated arguments to "combine"
property.\n
The argument consists of one of the following:
INSTRUCTION,{DATA_STACK,RETURN_STACK,rom_list}
DATA_STACK
DATA_STACK,{RETURN_STACK,rom_list}
RETURN_STACK
RETURN_STACK,{DATA_STACK,rom_list}
mem_list
where rom_list is a comma separated list of one or more ROMs and mem_list is
a list of one or more RAMs or ROMs.
"""
# Perform some syntax checking and get the list of memories to combine.
cmd = re.findall(r'\s*COMBINE\s+(\S+)\s*$',line);
if not cmd:
raise SSBCCException('Malformed COMBINE configuration command on %s' % loc);
mems = re.split(r',',cmd[0]);
if (len(mems)==1) and ('INSTRUCTION' in mems):
raise SSBCCException('"COMBINE INSTRUCTION" doesn\'t make sense at %s' % loc);
if ('INSTRUCTION' in mems) and (mems[0] != 'INSTRUCTION'):
raise SSBCCException('"INSTRUCTION" must be listed first in COMBINE configuration command at %s' % loc);
recognized = ['INSTRUCTION','DATA_STACK','RETURN_STACK'] + self.memories['name'];
unrecognized = [memName for memName in mems if memName not in recognized];
if unrecognized:
raise SSBCCException('"%s" not recognized in COMBINE configuration command at %s' % (unrecognized[0],loc,));
alreadyUsed = [memName for memName in mems if self.IsCombined(memName)];
if alreadyUsed:
raise SSBCCException('"%s" already used in COMBINE configuration command before %s' % (alreadyUsed[0],loc,));
repeated = [mems[ix] for ix in range(len(mems)-1) if mems[ix] in mems[ix+1]];
if repeated:
raise SSBCCException('"%s" repeated in COMBINE configuration command on %s' % (repeated[0],loc,));
# Count the number of the different memory types being combined and validate the combination.
nSinglePort = sum([thisMemName in ('INSTRUCTION','DATA_STACK','RETURN_STACK',) for thisMemName in mems]);
nROM = len([thisMemName for thisMemName in mems if self.IsROM(thisMemName)]);
nRAM = len([thisMemName for thisMemName in mems if self.IsRAM(thisMemName)]);
if nRAM > 0:
nRAM += nROM;
nROM = 0;
if nROM > 0:
nSinglePort += 1;
nDualPort = 1 if nRAM > 0 else 0;
if nSinglePort + 2*nDualPort > 2:
raise SSBCCException('Too many ports required for COMBINE configuration command at %s' % loc);
# Append the listed memory types to the list of combined memories.
self.config['combine'].append({'mems':mems, 'memArch':'sync', 'loc':loc});
def ProcessInport(self,loc,line):
"""
Parse the "INPORT" configuration commands as follows:
The configuration command is well formatted.
The number of signals matches the corresponding list of signal declarations.
The port name starts with 'I_'.
The signal declarations are valid.
n-bit where n is an integer
set-reset
strobe
That no other signals are specified in conjunction with a "set-reset" signal.
The total input data with does not exceed the maximum data width.\n
The input port is appended to the list of inputs as a tuple. The first
entry in the tuple is the port name. The subsequent entries are tuples
consisting of the following:
signal name
signal width
signal type
"""
cmd = re.findall(r'\s*INPORT\s+(\S+)\s+(\S+)\s+(I_\w+)\s*$',line);
if not cmd:
raise SSBCCException('Malformed INPORT statement at %s: "%s"' % (loc,line[:-1],));
modes = re.findall(r'([^,]+)',cmd[0][0]);
names = re.findall(r'([^,]+)',cmd[0][1]);
portName = cmd[0][2];
if len(modes) != len(names):
raise SSBCCException('Malformed INPORT configuration command -- number of options don\'t match on %s: "%s"' % (loc,line[:-1],));
# Append the input signal names, mode, and bit-width to the list of I/Os.
has__set_reset = False;
nBits = 0;
thisPort = (portName,);
for ix in range(len(names)):
if re.match(r'^\d+-bit$',modes[ix]):
thisNBits = int(modes[ix][0:-4]);
self.AddIO(names[ix],thisNBits,'input',loc);
thisPort += ((names[ix],thisNBits,'data',),);
nBits = nBits + thisNBits;
elif modes[ix] == 'set-reset':
has__set_reset = True;
self.AddIO(names[ix],1,'input',loc);
thisPort += ((names[ix],1,'set-reset',),);
self.AddSignal('s_SETRESET_%s' % names[ix],1,loc);
elif modes[ix] == 'strobe':
self.AddIO(names[ix],1,'output',loc);
thisPort += ((names[ix],1,'strobe',),);
else:
raise SSBCCException('Unrecognized INPORT signal type "%s"' % modes[ix]);
if has__set_reset and len(names) > 1:
raise SSBCCException('set-reset cannot be simultaneous with other signals in "%s"' % line[:-1]);
if nBits > self.Get('data_width'):
raise SSBCCException('Signal width too wide in "%s"' % line[:-1]);
self.AddInport(thisPort,loc);
def ProcessOutport(self,line,loc):
"""
Parse the "OUTPORT" configuration commands as follows:
The configuration command is well formatted.
The number of signals matches the corresponding list of signal declarations.
The port name starts with 'O_'.
The signal declarations are valid.
n-bit[=value]
strobe
The total output data with does not exceed the maximum data width.\n
The output port is appended to the list of outports as a tuple. The first
entry in this tuple is the port name. The subsequent entries are tuples
consisting of the following:
signal name
signal width
signal type
initial value (optional)
"""
cmd = re.findall(r'^\s*OUTPORT\s+(\S+)\s+(\S+)\s+(O_\w+)\s*$',line);
if not cmd:
raise SSBCCException('Malformed OUTPUT configuration command on %s: "%s"' % (loc,line[:-1],));
modes = re.findall(r'([^,]+)',cmd[0][0]);
names = re.findall(r'([^,]+)',cmd[0][1]);
portName = cmd[0][2];
if len(modes) != len(names):
raise SSBCCException('Malformed OUTPORT configuration command -- number of widths/types and signal names don\'t match on %s: "%s"' % (loc,line[:-1],));
# Append the input signal names, mode, and bit-width to the list of I/Os.
nBits = 0;
isStrobeOnly = True;
thisPort = tuple();
for ix in range(len(names)):
if re.match(r'\d+-bit',modes[ix]):
isStrobeOnly = False;
a = re.match(r'(\d+)-bit(=\S+)?$',modes[ix]);
if not a:
raise SSBCCException('Malformed bitwith/bitwidth=initialization on %s: "%s"' % (loc,modes[ix],));
thisNBits = int(a.group(1));
self.AddIO(names[ix],thisNBits,'output',loc);
if a.group(2):
thisPort += ((names[ix],thisNBits,'data',a.group(2)[1:],),);
else:
thisPort += ((names[ix],thisNBits,'data',),);
nBits = nBits + thisNBits;
self.config['haveBitOutportSignals'] = 'True';
elif modes[ix] == 'strobe':
self.AddIO(names[ix],1,'output',loc);
thisPort += ((names[ix],1,'strobe',),);
else:
raise SSBCCException('Unrecognized OUTPORT signal type on %s: "%s"' % (loc,modes[ix],));
if nBits > 8:
raise SSBCCException('Signal width too wide on %s: in "%s"' % (loc,line[:-1],));
self.AddOutport((portName,isStrobeOnly,)+thisPort,loc);
def ProcessPeripheral(self,loc,line):
"""
Process the "PERIPHERAL" configuration command as follows:
Validate the format of the configuration command.
Find the peripheral in the candidate list of paths for peripherals.
Execute the file declaring the peripheral.
Note: This is done since I couldn't find a way to "import" the
peripheral. Executing the peripheral makes its definition local
to this invokation of the ProcessPeripheral function, but the
object subsequently created retains the required functionality
to instantiate the peripheral
Go through the parameters for the peripheral and do the following for each:
If the argument for the peripheral is the string "help", then print the
docstring for the peripheral and exit.
Append the parameter name and its argument to the list of parameters
(use "None" as the argument if no argument was provided).
Append the instantiated peripheral to the list of peripherals.
Note: The "exec" function dynamically executes the instruction to
instantiate the peripheral and append it to the list of
peripherals.
"""
# Validate the format of the peripheral configuration command and the the name of the peripheral.
cmd = re.findall(r'\s*PERIPHERAL\s+(\w+)\s*(.*)$',line);
if not cmd:
raise SSBCCException('Missing peripheral name in %s: %s' % (loc,line[:-1],));
peripheral = cmd[0][0];
# Find and execute the peripheral Python script.
# Note: Because "execfile" and "exec" method are used to load the
# peripheral python script, the __file__ object is set to be this
# file, not the peripheral source file.
for testPath in self.peripheralpaths:
fullperipheral = os.path.join(testPath,'%s.py' % peripheral);
if os.path.isfile(fullperipheral):
break;
else:
raise SSBCCException('Peripheral "%s" not found' % peripheral);
execfile(fullperipheral);
# Convert the space delimited parameters to a list of tuples.
param_list = list();
for param_string in re.findall(r'(\w+="[^"]*"|\w+=\S+|\w+)\s*',cmd[0][1]):
if param_string == "help":
exec('helpmsg = %s.__doc__' % peripheral);
if not helpmsg:
raise SSBCCException('No help for peripheral %s is provided' % fullperipheral);
print;
print 'Help message for peripheral: %s' % peripheral;
print 'Located at: %s' % fullperipheral;
print;
print helpmsg;
raise SSBCCException('Terminated by "help" for peripheral %s' % peripheral);
ix = param_string.find('=');
if param_string.find('="') > 0:
param_list.append((param_string[:ix],param_string[ix+2:-1],));
elif param_string.find('=') > 0:
param_list.append((param_string[:ix],param_string[ix+1:],));
else:
param_list.append((param_string,None));
# Add the peripheral to the micro controller configuration.
exec('self.peripheral.append(%s(fullperipheral,self,param_list,loc));' % peripheral);
def Set(self,name,value):
"""
Create or override the specified attribute in the ssbccConfig object.
"""
self.config[name] = value;
def SetMemoryBlock(self,name,value,errorInfo):
"""
Set an attribute in the ssbccConfig object for the specified memory with
the specified memory architecture.\n
"value" must be a string with the format "\d+" or "\d+*\d+" where "\d+" is
an integer. The first format specifies a single memory with the stated
size and the size must be a power of two. The second format specified
allocation of multiple memory blocks where the size is given by the first
integer and must be a power of 2 and the number of blocks is given by the
second integer and doesn't need to be a power of 2.
"""
findStar = value.find('*');
if findStar == -1:
blockSize = int(value);
nBlocks = 1;
else:
blockSize = int(value[0:findStar]);
nBlocks = int(value[findStar+1:]);
nbits_blockSize = int(round(math.log(blockSize,2)));
if blockSize != 2**nbits_blockSize:
raise SSBCCException('block size must be a power of 2 at %s: "%s"' % errorInfo);
nbits_nBlocks = CeilLog2(nBlocks);
self.Set(name, dict(
length=blockSize*nBlocks,
nbits=nbits_blockSize+nbits_nBlocks,
blockSize=blockSize,
nbits_blockSize=nbits_blockSize,
nBlocks=nBlocks,
nbits_nBlocks=nbits_nBlocks));
def SetMemoryParameters(self,memParam,values):
"""
Record the body of the specified memory based on the assembler output.
"""
index = memParam['index'];
for field in values:
if field not in self.memories:
self.memories[field] = list();
for ix in range(len(self.memories['name'])):
self.memories[field].append(None);
self.memories[field][index] = values[field];
def SignalLengthList(self):
"""
Generate a list of the I/O signals and their lengths.
"""
outlist = list();
for io in self.ios:
if io[2] == 'comment':
continue;
outlist.append((io[0],io[1],));
return outlist;
|
[
"sinclairrf@a4a847ef-c1ac-4a6e-99c4-be67e24668ce"
] |
sinclairrf@a4a847ef-c1ac-4a6e-99c4-be67e24668ce
|
c8f797faafcd39ea1bd701f5832affb0e3450ec8
|
694fc28bcd02568f027607d6a0553abf4bcd2fa4
|
/debian/crud_init/crud_init/urls.py
|
8298b43d353d24f085285578319f2f6673a1d68f
|
[] |
no_license
|
ajloinformatico/Django-Concesionario-Crud-APP
|
53e86aa4ae2f0f7ca1a111b38a6f6d7dd9806c78
|
cfe78ab5aab58267f8d153cb9c3e5a21f01703e5
|
refs/heads/main
| 2023-03-20T08:09:04.371828
| 2021-03-09T18:34:04
| 2021-03-09T18:34:04
| 318,833,092
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,228
|
py
|
"""crud_init URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from aplicaciones.concesionario.views import loging, register, crud, add_car, edit_car, delete_car
urlpatterns = [
path('admin/', admin.site.urls),
path('', loging, name='index'), # Al dejarlo en blanco se indica directamte la raiz (localhost:8000)
path('register/', register, name='register'), # registrar
path('crud/', crud, name='crud'),
path('add_car/', add_car, name='add_car'),
path('edit_car/<int:id>/', edit_car, name='edit_car'),
path('delete_car/<int:id>/', delete_car, name='delete_car')
]
|
[
"ajloinformatico@gmail.com"
] |
ajloinformatico@gmail.com
|
7eb0a27e9a13e4e5c3b60d8db374343ffbb07b17
|
8b5990d3306d53ef953c1628deacdfe39115c118
|
/py_file.py
|
631b9201557777ed5aac024da582825a1d85f7e4
|
[] |
no_license
|
Parvathi1129/Test
|
974d498fa8080182c1b7184233d253295814bd3f
|
1a419663b74646bd0bf508e5f8341a3456db7571
|
refs/heads/master
| 2021-01-22T23:05:58.372334
| 2017-09-22T07:05:10
| 2017-09-22T07:05:10
| 92,800,772
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 488
|
py
|
num = 407
# take input from the user
# num = int(input("Enter a number: "))
# prime numbers are greater than 1
if num > 1:
# check for factors
for i in range(2,num):
if (num % i) == 0:
print(num,"is not a prime number")
print(i,"times",num//i,"is",num)
break
else:
print(num,"is a prime number")
# if input number is less than
# or equal to 1, it is not prime
else:
print(num,"is not a prime number")
|
[
"noreply@github.com"
] |
Parvathi1129.noreply@github.com
|
5c0333f99729273b3ade42126951336ff76847d1
|
f338c9b1a8b2b85dbb72705c55494d9253b3224e
|
/wavetable_gen_tools/miscfiltering/firplayground.py
|
ff352ec3c46ab3464bc99ff34dee1cd7aa70e0f8
|
[] |
no_license
|
starlingcode/viatools
|
f3ca2575b9bbd08e9a52027fefe55511540b7cb0
|
15868f5cc2104ba458defe9826fc190a782a56aa
|
refs/heads/master
| 2023-04-06T13:57:56.337714
| 2022-03-20T03:52:22
| 2022-03-20T03:52:22
| 125,312,110
| 0
| 0
| null | 2022-03-17T21:06:17
| 2018-03-15T04:27:30
|
Python
|
UTF-8
|
Python
| false
| false
| 704
|
py
|
lines = [line.rstrip('\n') for line in open('downloadedfiltercoeffs.txt')]
lines = [x.strip('\t') for x in lines]
lines = [x.strip('y[n] = (') for x in lines]
lines = [x.strip('+ (') for x in lines]
lines = [x.replace('*', '') for x in lines]
parsed_file = []
for line in lines:
if line != '':
parsed_file.append(line.split())
print(parsed_file)
coeffs = []
for line in parsed_file:
coeffs.append(int(65536*float(line[0])))
print(coeffs)
text_file = open('filtercoeffs.h', "w")
for i in range(0,11):
text_file.write("#define a" + str(i) + ' ' + str(coeffs[10 - i]) + '\n')
for i in range(0,10):
text_file.write("#define b" + str(i) + ' ' + str(coeffs[20 - i]) + '\n')
|
[
"liquidcitymotors@gmail.com"
] |
liquidcitymotors@gmail.com
|
0d8f76b499ac816e3bd0061d7450637456aaa4d7
|
1681332a25e5130517c403bb7a860ca30506d5ea
|
/res/dlworkshop/conv_test.py
|
abb17bf25cec4bb3835f22de94b69b03e0211a02
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
dgyrt/dgyrt.github.io
|
240550826aa031323db1f64b00b36db1ac3d65df
|
fac6c1a9d10d8e87bad6e80aa96027b84975ee1d
|
refs/heads/master
| 2020-05-21T12:23:00.437395
| 2017-01-31T14:05:39
| 2017-01-31T14:05:39
| 43,422,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,173
|
py
|
"""" convnet test """
import os;
import gzip;
import cPickle as pickle;
import numpy as np;
import theano;
import theano.tensor as T;
from theano.tensor.nnet import conv;
from theano.tensor.signal import downsample;
n_epochs=100;
batch_size=100;
def relu(x):
return x*(x>1e-13);
class ConvLayer(object):
def __init__(self, filter_size, num_filters, num_channels, fm_size, batch_size, **kwargs):
self.filter_size=filter_size;
self.num_filters=num_filters;
self.num_channels=num_channels;
self.fm_size=fm_size;
self.batch_size=batch_size;
super(ConvLayer, self).__init__(**kwargs);
self.initialize();
self.params=[self.filters, self.bias];
def initialize(self):
filter_shape=(self.num_filters, self.num_channels)+(self.filter_size);
self.filters=theano.shared(np.asarray(np.random.uniform(low=-0.0001,
high=0.0001,
size=filter_shape),
dtype="float32"),
borrow=True);
self.bias=theano.shared(np.asarray(np.zeros((self.num_filters, )),
dtype="float32"), borrow=True);
def apply_lin(self, X):
Y=conv.conv2d(input=X,
filters=self.filters,
image_shape=(self.batch_size, self.num_channels)+(self.fm_size),
filter_shape=(self.num_filters, self.num_channels)+(self.filter_size));
Y+=self.bias.dimshuffle('x', 0, 'x', 'x');
return Y;
class ReLUConvLayer(ConvLayer):
def __init__(self, **kwargs):
super(ReLUConvLayer, self).__init__(**kwargs);
def apply(self, X):
return relu(self.apply_lin(X));
class MaxPooling(object):
def __init__(self, pool_size):
self.pool_size=pool_size;
def apply(self, X):
return downsample.max_pool_2d(X, self.pool_size);
class Layer(object):
def __init__(self, in_dim, out_dim, W=None, b=None, **kwargs):
self.in_dim=in_dim;
self.out_dim=out_dim;
self.W=W;
self.b=b;
self.initialize();
super(Layer, self).__init__(**kwargs);
self.params=[self.W, self.b];
def initialize(self):
if self.W == None:
self.W=theano.shared(np.asarray(np.random.uniform(low=-0.0001,
high=0.0001,
size=(self.in_dim, self.out_dim)),
dtype="float32"),
borrow=True);
if self.b == None:
self.b=theano.shared(np.asarray(np.zeros((self.out_dim, )),
dtype="float32"), borrow=True);
def apply_lin(self, X):
return T.dot(X, self.W)+self.b;
class ReLULayer(Layer):
def __init__(self, **kwargs):
super(ReLULayer, self).__init__(**kwargs);
def apply(self, X):
return relu(self.apply_lin(X));
class TanhLayer(Layer):
def __init__(self, **kwargs):
super(TanhLayer, self).__init__(**kwargs);
def apply(self, X):
return T.tanh(self.apply_lin(X));
class SoftmaxLayer(Layer):
def __init__(self, **kwargs):
super(SoftmaxLayer, self).__init__(**kwargs);
def apply(self, X):
return T.nnet.softmax(self.apply_lin(X));
def predict(self, X_out):
return T.argmax(X_out, axis=1);
def error(self, X_out, Y):
return T.mean(T.neq(self.predict(X_out), Y));
# load dataset
def shared_dataset(data_xy):
data_x, data_y=data_xy;
shared_x=theano.shared(np.asarray(data_x, dtype="float32"),
borrow=True);
shared_y=theano.shared(np.asarray(data_y, dtype="float32"),
borrow=True);
return shared_x, T.cast(shared_y, "int32");
def load_mnist(dataset):
f=gzip.open(dataset, 'rb');
train_set, valid_set, test_set=pickle.load(f);
f.close();
train_set_x, train_set_y=shared_dataset(train_set);
valid_set_x, valid_set_y=shared_dataset(valid_set);
test_set_x, test_set_y=shared_dataset(test_set);
return [(train_set_x, train_set_y), (valid_set_x, valid_set_y), (test_set_x, test_set_y)];
dataset=load_mnist("mnist.pkl.gz");
train_set_x, train_set_y=dataset[0];
valid_set_x, valid_set_y=dataset[1];
test_set_x, test_set_y=dataset[2];
n_train_batches=train_set_x.get_value(borrow=True).shape[0]/batch_size;
n_valid_batches=valid_set_x.get_value(borrow=True).shape[0]/batch_size;
n_test_batches=test_set_x.get_value(borrow=True).shape[0]/batch_size;
print n_train_batches
print n_valid_batches
print n_test_batches
print "dataset loaded"
# build mode
X=T.matrix("data");
y=T.ivector("label");
idx=T.lscalar();
images=X.reshape((batch_size, 1, 28, 28));
### configure some layers
### build some convlayers
layer_0=ReLUConvLayer(filter_size=(7,7), num_filters=10, num_channels=1,
fm_size=(28, 28), batch_size=batch_size);
pool_0=MaxPooling((2,2));
layer_1=ReLUConvLayer(filter_size=(4,4), num_filters=10, num_channels=10,
fm_size=(11,11), batch_size=batch_size);
pool_1=MaxPooling((2,2));
layer_2=ReLULayer(in_dim=160, out_dim=100);
layer_3=SoftmaxLayer(in_dim=100, out_dim=10);
### compile some model
out=pool_1.apply(layer_1.apply(pool_0.apply(layer_0.apply(images))))
out=out.flatten(ndim=2);
out=layer_3.apply(layer_2.apply(out));
cost=T.nnet.categorical_crossentropy(out, y).mean();
params=layer_0.params+layer_1.params+layer_2.params+layer_3.params;
#### calculate the updates of each params
gparams=T.grad(cost, params);
from collections import OrderedDict;
updates=OrderedDict();
for param, gparam in zip(params, gparams):
updates[param]=param-0.01*gparam;
train=theano.function(inputs=[idx],
outputs=cost,
updates=updates,
givens={X: train_set_x[idx*batch_size: (idx+1)*batch_size],
y: train_set_y[idx*batch_size: (idx+1)*batch_size]});
test=theano.function(inputs=[idx],
outputs=layer_3.error(out, y),
givens={X: test_set_x[idx*batch_size: (idx+1)*batch_size],
y: test_set_y[idx*batch_size: (idx+1)*batch_size]});
print "the model is built :)"
# train the model
test_record=np.zeros((n_epochs, 1));
epoch=0;
while (epoch<n_epochs):
epoch+=1;
for minibatch_index in xrange(n_train_batches):
mlp_train_cost=train(minibatch_index);
iteration=(epoch-1)*n_train_batches+minibatch_index;
if (iteration+1)%n_train_batches==0:
print "MLP model";
test_cost=[test(i) for i in xrange(n_test_batches)];
test_record[epoch-1]=np.mean(test_cost);
print " epoch %i, test error %f %%" % (epoch, test_record[epoch-1]*100.);
|
[
"duguyue100@gmail.com"
] |
duguyue100@gmail.com
|
721a102e40b391250ee3101e851acdd76b192386
|
34f29e764609930da0b3d3d7db18dc63ab1b4a97
|
/util/tasks/trainInvV2_2.py
|
d118d52a2810437ab0796e678089eb538f9bbefd
|
[] |
no_license
|
samhu1989/RAtlasNet
|
c77fe2a65fcbfb34bfdf78a5e1c7abdcea989341
|
0b2859a620dd15f66c4af1355eb79356ee335507
|
refs/heads/master
| 2020-04-15T01:01:33.874790
| 2019-05-30T15:00:13
| 2019-05-30T15:00:13
| 164,260,625
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,503
|
py
|
#
import os;
from .task import Task;
from ..ply import *;
from ..Lutils import *;
from ..utils import *;
from ..datasets import *;
import torch;
from torch.autograd import Variable;
from torch.utils.data import DataLoader;
import torch.nn as nn
import math;
import json;
sys.path.append("./ext/");
import dist_chamfer as ext;
distChamfer = ext.chamferDist();
def eval_ae(net,pts):
with torch.no_grad():
points = Variable(pts);
points = points.transpose(2,1).contiguous();
points = points.cuda();
out = net(points);
dist1, dist2 = distChamfer(points.transpose(2,1).contiguous(),out['y']);
cd = (torch.mean(dist1)) + (torch.mean(dist2))
inv_err = torch.mean(torch.sum((out['inv_x'] - out['grid_x'])**2,dim=2));
return cd.data.cpu().numpy(),inv_err.data.cpu().numpy();
def train_ae(net,optim,cd_meter,inv_meter,pts,opt):
optim.zero_grad();
points = Variable(pts,requires_grad=True);
points = points.transpose(2,1).contiguous();
points = points.cuda();
out = net(points);
dist1, dist2 = distChamfer(points.transpose(2,1).contiguous(),out['y']);
cd = (torch.mean(dist1)) + (torch.mean(dist2))
inv_err = torch.mean(torch.sum((out['inv_x'] - out['grid_x'])**2,dim=2));
cd_meter.update(cd.data.cpu().numpy());
inv_meter.update(inv_err.data.cpu().numpy())
inv_gt = out['invmap'](points);
dist1, dist2 = distChamfer(inv_gt,out['grid_x']);
inv_cd = (torch.mean(dist1)) + (torch.mean(dist2))
loss = inv_cd + opt['w']*inv_err + cd;
loss.backward();
optim.step();
return loss,cd,inv_err;
def eval_svr(net,pts,img):
with torch.no_grad():
img = Variable(img);
img = img.cuda();
points = Variable(pts);
points = points.cuda();
out = net(img);
dist1, dist2 = distChamfer(points,out['y']);
cd = (torch.mean(dist1)) + (torch.mean(dist2));
inv_err = torch.mean(torch.sum((out['inv_x'] - out['grid_x'])**2,dim=2));
return cd.data.cpu().numpy(),inv_err.data.cpu().numpy();
def train_svr(net,optim,cd_meter,inv_meter,pts,img,opt):
optim.zero_grad();
img = Variable(img,requires_grad=True);
img = img.cuda();
points = Variable(pts);
points = points.cuda();
out = net(img);
dist1, dist2 = distChamfer(points,out['y']);
cd = (torch.mean(dist1)) + (torch.mean(dist2));
inv_err = torch.mean(torch.sum((out['inv_x'] - out['grid_x'])**2,dim=2));
cd_meter.update(cd.data.cpu().numpy());
inv_meter.update(inv_err.data.cpu().numpy());
loss = cd + opt['w']*inv_err;
loss.backward();
optim.step();
return loss,cd,inv_err;
def write_log(logfile,val_cd,val_inv,dataset_test,train_cd=None,train_inv=None,epoch=None):
log_dict = {};
log_dict['val_cd'] = val_cd.avg;
log_dict['val_inv'] = val_inv.avg;
for item in dataset_test.cat:
print(item,dataset_test.perCatValueMeter[item].avg)
log_dict.update({item:dataset_test.perCatValueMeter[item].avg})
if train_cd is not None:
log_dict['train_cd'] = train_cd.avg;
if train_inv is not None:
log_dict['train_inv'] = train_inv.avg;
if epoch is not None:
log_dict['epoch'] = epoch;
logfile.write('json_stats: '+json.dumps(log_dict)+'\n');
return;
bestnum = 3;
best_cd = np.zeros(bestnum);
best_all = np.zeros(bestnum);
def save_model(logtxt,dirname,net,opt,vcd,vall):
global best_cd;
global best_all;
cdname = dirname+os.sep+opt['mode']+'gn'+str(opt['grid_num'])+'_cd';
allname = dirname+os.sep+opt['mode']+'gn'+str(opt['grid_num'])+'_all';
name = dirname+os.sep+opt['mode']+'gn'+str(opt['grid_num'])+'_current';
sdict = net.state_dict();
torch.save(sdict,name+'.pth');
if vcd < best_cd[-1]:
best_cd[-1] = vcd;
best_cd = np.sort(best_cd);
bidx = np.searchsorted(best_cd,vcd);
for idx in range(bestnum-2,bidx-1,-1):
if os.path.exists(cdname+'_%d'%idx+'.pth'):
if os.path.exists(cdname+'_%d'%(idx+1)+'.pth'):
os.remove(cdname+'_%d'%(idx+1)+'.pth');
print('rename '+cdname+'_%d'%(idx)+'.pth'+' '+cdname+'_%d'%(idx+1)+'.pth');
os.rename(cdname+'_%d'%(idx)+'.pth',cdname+'_%d'%(idx+1)+'.pth');
print('saving model at '+cdname+'_%d'%(bidx)+'.pth');
torch.save(sdict,cdname+'_%d'%(bidx)+'.pth');
logtxt.write('saving model at '+cdname+'_%d'%(bidx)+'.pth\n');
logtxt.write('best_cd:'+np.array2string(best_cd,precision=6,separator=',')+'\n');
if vall < best_all[-1]:
best_all[-1] = vall;
best_all = np.sort(best_all);
bidx = np.searchsorted(best_all,vall);
for idx in range(bestnum-2,bidx-1,-1):
if os.path.exists(allname+'_%d'%idx+'.pth'):
if os.path.exists(allname+'_%d'%(idx+1)+'.pth'):
os.remove(allname+'_%d'%(idx+1)+'.pth');
print('rename '+allname+'_%d'%(idx)+'.pth'+' '+allname+'_%d'%(idx+1)+'.pth');
os.rename(allname+'_%d'%(idx)+'.pth',allname+'_%d'%(idx+1)+'.pth');
print('saving model at '+allname+'_%d'%(bidx)+'.pth\n');
torch.save(sdict,allname+'_%d'%(bidx)+'.pth\n');
logtxt.write('saving model at '+allname+'_%d'%(bidx)+'.pth\n');
logtxt.write('best_all:'+np.array2string(best_all,precision=6,separator=',')+'\n');
def view_color(y,c=None):
if c is None:
c = colorcoord(y);
return pd.concat([pd.DataFrame(y),pd.DataFrame(c)],axis=1,ignore_index=True);
def view_ae(dirname,net,pts,index,cat,opt):
points = Variable(pts, volatile=True);
points = points.transpose(2,1).contiguous();
points = points.cuda();
grid = None;
fidx = None;
if opt.grid_dim == 3:
grid,Li,Lw,fidx = sphere_grid(points.size()[0],opt.pts_num,'cot');
elif opt.grid_dim == 2:
grid,Li,Lw,fidx = patch_grid(points.size()[0],opt.pts_num,opt.grid_num);
grid = Variable(grid,volatile=True);
grid = grid.cuda();
y,inv_err = net(points,grid);
y_inv = net.inv_y;
ply_path = dirname+os.sep+'ply';
if not os.path.exists(ply_path):
os.mkdir(ply_path);
T=np.dtype([("n",np.uint8),("i0",np.int32),('i1',np.int32),('i2',np.int32)]);
face = np.zeros(shape=[fidx.shape[0]],dtype=T);
for i in range(fidx.shape[0]):
face[i] = (3,fidx[i,0],fidx[i,1],fidx[i,2]);
y = y.cpu().data.numpy();
inv_y = net.inv_y.cpu().data.numpy();
grid = grid.transpose(2,1).contiguous().cpu().data.numpy();
c = colorcoord(grid[0,...])
write_ply(ply_path+os.sep+'%02d_%s_grid.ply'%(index,cat[0]),points = view_color(grid[0,...],c),faces=pd.DataFrame(face),color=True);
for i in range(y.shape[0]):
write_ply(ply_path+os.sep+'%02d_%02d_%s.ply'%(index,i,cat[0]),points = view_color(y[i,...],c),faces=pd.DataFrame(face),color=True);
write_ply(ply_path+os.sep+'%02d_%02d_%s_inv.ply'%(index,i,cat[0]),points = view_color(inv_y[i,...],c),faces=pd.DataFrame(face),color=True);
def view_svr(dirname,net,img,index,cat,opt):
img = Variable(img,volatile=True);
img = img.cuda();
grid = None;
fidx = None;
if opt.grid_dim == 3:
grid,Li,Lw,fidx = sphere_grid(points.size()[0],opt.pts_num,'cot');
elif opt.grid_dim == 2:
grid,Li,Lw,fidx = patch_grid(points.size()[0],opt.pts_num,opt.grid_num);
grid = Variable(grid,volatile=True);
grid = grid.cuda();
y,inv_err = net(img,grid);
ply_path = dirname+os.sep+'ply';
if not os.path.exists(ply_path):
os.mkdir(ply_path);
T=np.dtype([("n",np.uint8),("i0",np.int32),('i1',np.int32),('i2',np.int32)]);
face = np.zeros(shape=[fidx.shape[0]],dtype=T);
for i in range(fidx.shape[0]):
face[i] = (3,fidx[i,0],fidx[i,1],fidx[i,2]);
y = y.cpu().data.numpy();
inv_y = net.inv_y.cpu().data.numpy();
grid = grid.transpose(2,1).contiguous().cpu().data.numpy();
c = colorcoord(grid[0,...])
write_ply(ply_path+os.sep+'%02d_%s_grid.ply'%(index,cat[0]),points = view_color(grid[0,...],c),faces=pd.DataFrame(face),color=True);
for i in range(y.shape[0]):
write_ply(ply_path+os.sep+'%02d_%02d_%s.ply'%(index,i,cat[0]),points = view_color(y[i,...],c),faces=pd.DataFrame(face),color=True);
write_ply(ply_path+os.sep+'%02d_%02d_%s_inv.ply'%(index,i,cat[0]),points = view_color(inv_y[i,...],c),faces=pd.DataFrame(face),color=True);
class RealTask(Task):
def __init__(self):
super(RealTask,self).__init__();
self.tskname = os.path.basename(__file__).split('.')[0];
def run(self,*args,**kwargs):
self.start();
self.step();
return;
def start(self):
if self.cnt > 0:
return;
self.SVR = (self.opt['mode']=='SVR');
self.train_data = ShapeNet(SVR=self.SVR,normal = False,class_choice = None,train=True);
self.train_load = DataLoader(self.train_data,batch_size=self.opt['batchSize'],shuffle=True, num_workers=int(self.opt['workers']));
self.valid_data = ShapeNet(SVR=self.SVR,normal = False,class_choice = None,train=False);
self.valid_load = DataLoader(self.valid_data,batch_size=self.opt['batchSize'],shuffle=False, num_workers=int(self.opt['workers']));
self.load_pretrain();
#
self.train_cd = AverageValueMeter();
self.train_inv = AverageValueMeter();
self.valid_cd = AverageValueMeter();
self.valid_inv = AverageValueMeter();
self.optim = optim.Adam(self.net.parameters(),lr=self.opt['lr'],weight_decay=self.opt['weight_decay']);
for group in self.optim.param_groups:
group.setdefault('initial_lr', group['lr']);
self.lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optim,40,eta_min=0,last_epoch=self.opt['last_epoch']);
#
self.train_loss_acc0 = 1e-9;
self.train_loss_accs = 0;
self.eval();
write_log(self.logtxt,self.valid_cd,self.valid_inv,self.valid_data,None,None,self.cnt);
best_all.fill(self.opt['w']*self.valid_inv.avg+self.valid_cd.avg);
best_cd.fill(self.valid_cd.avg)
def eval(self):
self.valid_cd.reset();
self.valid_inv.reset();
for item in self.valid_data.cat:
self.valid_data.perCatValueMeter[item].reset();
self.net.eval();
for i, data in enumerate(self.valid_load, 0):
img, points, cat, _, _ = data;
if self.SVR:
cd,inv = eval_svr(self.net,points,img);
else:
cd,inv = eval_ae(self.net,points);
self.valid_cd.update(cd);
self.valid_inv.update(inv);
self.valid_data.perCatValueMeter[cat[0]].update(cd);
print('[%d: %d/%d] val loss:%f ' %(self.cnt,i,len(self.valid_data)/self.opt['batchSize'],cd));
def train(self):
self.lr_scheduler.step();
self.net.train()
for i, data in enumerate(self.train_load, 0):
img, points, cat, _ , _= data;
if self.SVR:
loss,cd,inv_err = train_svr(self.net,self.optim,self.train_cd,self.train_inv,points,img,self.opt);
else:
loss,cd,inv_err = train_ae(self.net,self.optim,self.train_cd,self.train_inv,points,self.opt);
self.train_loss_accs = self.train_loss_accs * 0.99 + loss.data.cpu().numpy();
self.train_loss_acc0 = self.train_loss_acc0 * 0.99 + 1;
print('[%d: %d/%d] train loss:%f,%f,%f/%f' %(self.cnt+self.opt['last_epoch'],i,len(self.train_data)//self.opt['batchSize'],cd.data.cpu().numpy(),inv_err.data.cpu().numpy(),loss.data.cpu().numpy(),self.train_loss_accs/self.train_loss_acc0));
def load_pretrain(self):
if self.opt['model']!='':
partial_restore(self.net,self.opt['model']);
print("Previous weights loaded");
def step(self):
if self.cnt == 0:
return;
self.train();
self.eval();
write_log(self.logtxt,self.valid_cd,self.valid_inv,self.valid_data,self.train_cd,self.train_inv,self.cnt+self.opt['last_epoch']);
save_model(self.logtxt,self.tskdir,self.net,self.opt,self.valid_cd.avg,self.valid_cd.avg+self.opt['w']*self.valid_inv.avg);
def createOptim(self):
self.optim = optim.Adam(self.net.parameters(),lr = self.opt['lr'],weight_decay=self.opt['weight_decay']);
|
[
"hsy19891228@yeah.net"
] |
hsy19891228@yeah.net
|
5eb6a7c2e27516ddc783d6c5f4528f7e5d755141
|
6dcf84980cc44c9cd0774ce95b248bfc0c81e2c0
|
/kNeighbors/handwritingClassifier.py
|
61f12e722691a6ff0a0137434968285707221033
|
[] |
no_license
|
wangdx2116/machineLearning
|
d3cd5a6e98b6938afe677789b629a56b68b78b6d
|
2360927a11c1e3420c0e6690ba360fe79dcfcb84
|
refs/heads/master
| 2020-05-18T18:23:07.370939
| 2018-02-10T07:37:10
| 2018-02-10T07:37:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,377
|
py
|
# -*- coding: utf-8 -*-
# k近邻实现手写数字的识别
# 作者:dyxm
from numpy import *
from os import listdir
import operator
# 读取数据到矩阵
def img2vector(filename):
# 创建向量
returnVect = zeros((1, 1024))
# 打开数据文件,读取每行内容
fr = open(filename)
for i in range(32):
# 读取每一行
lineStr = fr.readline()
# 将每行前32字符转成int存入向量
for j in range(32):
returnVect[0, 32 * i + j] = int(lineStr[j])
return returnVect
# kNN算法实现
def classify0(inX, dataSet, labels, k):
# 获取样本数据数量
dataSetSize = dataSet.shape[0]
# 矩阵运算,计算测试数据与每个样本数据对应数据项的差值
diffMat = tile(inX, (dataSetSize, 1)) - dataSet
# sqDistances 上一步骤结果平方和
sqDiffMat = diffMat ** 2
sqDistances = sqDiffMat.sum(axis=1)
# 取平方根,得到距离向量
distances = sqDistances ** 0.5
# 按照距离从低到高排序
sortedDistIndicies = distances.argsort()
classCount = {}
# 依次取出最近的样本数据
for i in range(k):
# 记录该样本数据所属的类别
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1
# 对类别出现的频次进行排序,从高到低
sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
# 返回出现频次最高的类别
return sortedClassCount[0][0]
# 算法测试
def handwritingClassTest():
# 样本数据的类标签列表
hwLabels = []
# 样本数据文件列表
trainingFileList = listdir('digits/trainingDigits')
m = len(trainingFileList)
# 初始化样本数据矩阵(M*1024)
trainingMat = zeros((m, 1024))
# 依次读取所有样本数据到数据矩阵
for i in range(m):
# 提取文件名中的数字
fileNameStr = trainingFileList[i]
fileStr = fileNameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0])
hwLabels.append(classNumStr)
# 将样本数据存入矩阵
trainingMat[i, :] = img2vector('digits/trainingDigits/%s' % fileNameStr)
# 循环读取测试数据
testFileList = listdir('digits/testDigits')
# 初始化错误率
errorCount = 0.0
mTest = len(testFileList)
# 循环测试每个测试数据文件
for i in range(mTest):
# 提取文件名中的数字
fileNameStr = testFileList[i]
fileStr = fileNameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0])
# 提取数据向量
vectorUnderTest = img2vector('digits/testDigits/%s' % fileNameStr)
# 对数据文件进行分类
classifierResult = classify0(vectorUnderTest, trainingMat, hwLabels, 3)
# 打印KNN算法分类结果和真实的分类
print "the classifier came back with: %d, the real answer is: %d" % (classifierResult, classNumStr)
# 判断KNN算法结果是否准确
if (classifierResult != classNumStr): errorCount += 1.0
# 打印错误率
print "\nthe total number of errors is: %d" % errorCount
print "\nthe total error rate is: %f" % (errorCount / float(mTest))
# 执行算法测试
handwritingClassTest()
|
[
"857230132@qq.com"
] |
857230132@qq.com
|
a0a3e8f4dab8d2d3cc6497f8b4e8c5507e50f494
|
9497432cd07d17be15853544197853d1ae7ae472
|
/encryption files/hashing/sha384hash.py
|
1880fd67f6fa014e3adfcf43b48c4f4a11238ba8
|
[] |
no_license
|
SeresAdrian/Crypto-Project
|
e99be9c2bf9155e1a54be4419d5626633fd2b333
|
4c2fd709f667bdfa71bc5fadd9b47a1c79f59c6a
|
refs/heads/master
| 2022-07-25T13:54:46.704949
| 2020-05-18T19:40:42
| 2020-05-18T19:40:42
| 265,021,044
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
#!/usr/bin/python
import hashlib
string=input("Please enter tha plaintext : ")
result = hashlib.sha384(string.encode())
print("The hexadecimal equivalent of hash is : ", result.hexdigest())
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
e04366763596d474dc1591978b472764c5ef6725
|
c878ac9d001f4a8b52caea4e6ac585cbcb762c89
|
/Scenes/PlayingGameScene.py
|
1e548676f5f6847b1d021bad2661fe7e9a2e0b8d
|
[] |
no_license
|
katieinder/MagnetGame
|
d278ee353aee8c3a5bb77e54cb1f3121a7f95e5c
|
2ee7a108d04ab3dc19903154e59402c3de1f35a6
|
refs/heads/master
| 2020-07-04T14:25:15.451688
| 2016-11-22T14:15:32
| 2016-11-22T14:15:32
| 74,153,261
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,502
|
py
|
import pygame
from Shared import *
from Scenes.Scene import Scene
import sys
class PlayingGameScene(Scene):
def __init__(self, game):
super(PlayingGameScene, self).__init__(game)
def render(self):
super(PlayingGameScene, self).render()
game = self.getGame()
ball=game.getBalls
magnet=game.getMagnet
target=game.getTarget
if ball.intersects(magnet):
ball.changeDirection(magnet)
ball.updatePosition()
game.screen.blit(ball.getSprite(), ball.getPosition())
magnet.setPosition((GameConstants.MagnetPosition[0], GameConstants.MagnetPosition[1]))
game.screen.blit(magnet.getSprite(), magnet.getPosition())
target.setPosition((GameConstants.TargetPosition[0], GameConstants.TargetPosition[1]))
game.screen.blit(target.getSprite(), target.getPosition())
def handleEvents(self, events):
super(PlayingGameScene, self).handleEvents(events)
for event in events:
if event.type == pygame.QUIT:sys.exit()
if event.type == pygame.KEYDOWN:
for magnet in self.getGame().getMagnet():
positionx, positiony = GameObject.getPosition(magnet)[0], GameObject.getPosition(magnet)[1]
if event.key == pygame.K_d and positionx + GameConstants.MAGNET_SIZE[0] < GameConstants.SCREEN_SIZE[0]:
positionx += 6
else:
positionx = 0
if event.key == pygame.K_a and positionx > 0:
positionx -= 6
if event.key == pygame.K_s and positiony + GameConstants.MAGNET_SIZE[1] < GameConstants.SCREEN_SIZE[1]:
positiony += 6
else:
positiony = 0
if event.key == pygame.K_w and positiony > 0:
positiony -= 6
if event.type == pygame.KEYUP:
for magnet in self.getGame().getMagnet():
positionx, positiony = GameObject.getPosition(magnet)[0], GameObject.getPosition(magnet)[1]
if event.key == pygame.K_d:
positionx = 0
if event.key == pygame.K_a:
positionx = 0
if event.key == pygame.K_w:
positiony = 0
if event.key == pygame.K_s:
positiony = 0
|
[
"Katie@Katies-MacBook-Air.local"
] |
Katie@Katies-MacBook-Air.local
|
9796214d25e80f9655fb1910bc028c1969ce3aca
|
1d8535658ed07fc88558c7d9bf3a01b709f189b1
|
/src/reversion/migrations/0001_initial.py
|
986fd81ac986f7c87b8babac57ae6a6c0bfa701a
|
[
"BSD-2-Clause"
] |
permissive
|
druids/django-reversion
|
ebedc4debe3ffc611f9e2bf72a04f388274502a0
|
d80a24b6a195c8a68bfc3100ba533419226fa18d
|
refs/heads/master
| 2020-12-25T08:50:58.658410
| 2018-06-10T20:19:42
| 2018-06-10T20:19:42
| 40,229,843
| 0
| 3
|
NOASSERTION
| 2020-04-09T13:16:57
| 2015-08-05T06:56:01
|
Python
|
UTF-8
|
Python
| false
| false
| 2,506
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Revision',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('manager_slug', models.CharField(default='default', max_length=200, db_index=True)),
('date_created', models.DateTimeField(auto_now_add=True, help_text='The date and time this revision was created.', verbose_name='date created', db_index=True)),
('comment', models.TextField(help_text='A text comment on this revision.', verbose_name='comment', blank=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, help_text='The user who created this revision.', null=True, verbose_name='user')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Version',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.TextField(help_text='Primary key of the model under version control.')),
('object_id_int', models.IntegerField(help_text="An indexed, integer version of the stored model's primary key, used for faster lookups.", null=True, db_index=True, blank=True)),
('format', models.CharField(help_text='The serialization format used by this model.', max_length=255)),
('serialized_data', models.TextField(help_text='The serialized form of this version of the model.')),
('object_repr', models.TextField(help_text='A string representation of the object.')),
('content_type', models.ForeignKey(help_text='Content type of the model under version control.', to='contenttypes.ContentType')),
('revision', models.ForeignKey(help_text='The revision that contains this version.', to='reversion.Revision')),
],
options={
},
bases=(models.Model,),
),
]
|
[
"dave@etianen.com"
] |
dave@etianen.com
|
562117f003163498fb1839859c84089b6e811c5b
|
65fa32a8ce8a7ea3e610e2f6210101912880af58
|
/tools/vmc_postproc/sfpnxphz.py
|
804fa8683c18ea3772da2e7090af54ef30d26c14
|
[
"MIT"
] |
permissive
|
bdallapi/gpvmc
|
9d09eb46f6e7093693e986ee48e45d7cc77faeff
|
19575b3e6fe0b27b464a092d92072e74b9ea59dd
|
refs/heads/master
| 2021-01-15T14:43:19.004456
| 2014-10-29T21:50:40
| 2014-10-29T21:50:40
| 25,629,938
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,373
|
py
|
#!/bin/env python
import numpy as np
from vmc_postproc import recspace
def delta(kx,ky,params):
return 0.5*(np.exp(1j*params['phi'])*np.cos(kx*2*np.pi)+\
np.exp(-1j*params['phi'])*np.cos(ky*2*np.pi))
def mfham(kx,ky,params):
H=np.zeros([4,4]+list(np.shape(kx)),dtype=complex)
dk=delta(kx,ky,params)
H[0,0,:]=params['field']
H[0,1,:]=-dk.conjugate()
H[0,2,:]=params['neel']
H[1,0,:]=-dk
H[1,1,:]=params['field']
H[1,3,:]=-params['neel']
H[2,0,:]=params['neel']
H[2,2,:]=-params['field']
H[2,3,:]=-dk.conjugate()
H[3,1,:]=-params['neel']
H[3,2,:]=-dk
H[3,3,:]=-params['field']
Tp=trans(kx,ky,params)
return np.einsum('ij...,jk...,lk...->il...',Tp,H,Tp)
def evals(kx,ky,params):
v=np.zeros([4]+list(np.shape(kx)))
v[0,:]=-np.sqrt(params['neel']**2+(params['field']-abs(delta(kx,ky,params)))**2)
v[1,:]=-np.sqrt(params['neel']**2+(params['field']+abs(delta(kx,ky,params)))**2)
v[2,:]=np.sqrt(params['neel']**2+(params['field']-abs(delta(kx,ky,params)))**2)
v[3,:]=np.sqrt(params['neel']**2+(params['field']+abs(delta(kx,ky,params)))**2)
return v
def evecs(kx,ky,params):
v=np.zeros([4,4]+list(np.shape(kx)),dtype=complex)
ev=evals(kx,ky,params)
dk=delta(kx,ky,params)
# band 1
v[0,0,:]=0.5*np.sqrt(1.0-params['neel']/evals(kx,ky,params)[0,:])
v[1,0,:]=0.5*np.sign(abs(dk)-params['field'])*np.exp(1j*np.angle(dk))*np.sqrt(1.0+params['neel']/evals(kx,ky,params)[0,:])
v[2,0,:]=0.5*np.sign(abs(dk)-params['field'])*np.sqrt(1.0+params['neel']/evals(kx,ky,params)[0,:])
v[3,0,:]=0.5*np.exp(1j*np.angle(dk))*np.sqrt(1.0-params['neel']/evals(kx,ky,params)[0,:])
# band 2
v[0,1,:]=0.5*np.sqrt(1.0-params['neel']/evals(kx,ky,params)[1,:])
v[1,1,:]=0.5*np.sign(abs(dk)+params['field'])*np.exp(1j*np.angle(dk))*np.sqrt(1.0+params['neel']/evals(kx,ky,params)[1,:])
v[2,1,:]=-0.5*np.sign(abs(dk)+params['field'])*np.sqrt(1.0+params['neel']/evals(kx,ky,params)[1,:])
v[3,1,:]=-0.5*np.exp(1j*np.angle(dk))*np.sqrt(1.0-params['neel']/evals(kx,ky,params)[1,:])
# band 3
v[0,2,:]=0.5*np.sqrt(1.0-params['neel']/evals(kx,ky,params)[2,:])
v[1,2,:]=-0.5*np.sign(abs(dk)-params['field'])*np.exp(1j*np.angle(dk))*np.sqrt(1.0+params['neel']/evals(kx,ky,params)[2,:])
v[2,2,:]=-0.5*np.sign(abs(dk)-params['field'])*np.sqrt(1.0+params['neel']/evals(kx,ky,params)[2,:])
v[3,2,:]=0.5*np.exp(1j*np.angle(dk))*np.sqrt(1.0-params['neel']/evals(kx,ky,params)[2,:])
# band 4
v[0,3,:]=0.5*np.sqrt(1.0-params['neel']/evals(kx,ky,params)[3,:])
v[1,3,:]=-0.5*np.sign(params['field']+abs(dk))*np.exp(1j*np.angle(dk))*np.sqrt(1.0+params['neel']/evals(kx,ky,params)[3,:])
v[2,3,:]=0.5*np.sign(params['field']+abs(dk))*np.sqrt(1.0+params['neel']/evals(kx,ky,params)[3,:])
v[3,3,:]=-0.5*np.exp(1j*np.angle(dk))*np.sqrt(1.0-params['neel']/evals(kx,ky,params)[3,:])
return v
#return np.einsum('ij...->ji...',v).conjugate()
def trans(kx,ky,params):
Tp=np.zeros([4,4]+list(np.shape(kx)))
Tp[0,0,:]=1
Tp[0,2,:]=-1
Tp[1,1,:]=1
Tp[1,3,:]=-1
Tp[2,0,:]=1
Tp[2,2,:]=1
Tp[3,1,:]=1
Tp[3,3,:]=1
return Tp/np.sqrt(2)
def spinops(params):
kx,ky=recspace.mbz(params)
kqx,kqy=recspace.mbzmod(kx-params['qx']/params['L'],ky-params['qy']/params['L'])
skq=2*np.array(recspace.inmbz(kx-params['qx']/params['L'],ky-params['qy']/params['L']),dtype=int)-1
evk=evecs(kx,ky,params)
evq=evecs(kqx,kqy,params)
nexc=2*params['L']**2
s=0
if (int(params['qx']),int(params['qy'])) in [(0,0),(int(params['L']/2),int(params['L']/2))]:
nexc+=1
s=1
so=[np.zeros((nexc),dtype=complex) for i in range(3)]
#Sqx
so[0][s::4]=0.5*(-evk[0,2,:].conjugate()*evq[0,0,:]+evk[2,2,:].conjugate()*evq[2,0,:]+\
skq*(-evk[1,2,:].conjugate()*evq[1,0,:]+evk[3,2,:].conjugate()*evq[3,0,:]))
so[0][s+1::4]=0.5*(-evk[0,3,:].conjugate()*evq[0,0,:]+evk[2,3,:].conjugate()*evq[2,0,:]+\
skq*(-evk[1,3,:].conjugate()*evq[1,0,:]+evk[3,3,:].conjugate()*evq[3,0,:]))
so[0][s+2::4]=0.5*(-evk[0,2,:].conjugate()*evq[0,1,:]+evk[2,2,:].conjugate()*evq[2,1,:]+\
skq*(-evk[1,2,:].conjugate()*evq[1,1,:]+evk[3,2,:].conjugate()*evq[3,1,:]))
so[0][s+3::4]=0.5*(-evk[0,3,:].conjugate()*evq[0,1,:]+evk[2,3,:].conjugate()*evq[2,1,:]+\
skq*(-evk[1,3,:].conjugate()*evq[1,1,:]+evk[3,3,:].conjugate()*evq[3,1,:]))
#Sqy
so[1][s::4]=0.5j*(-evk[0,2,:].conjugate()*evq[2,0,:]+evk[2,2,:].conjugate()*evq[0,0,:]+\
skq*(-evk[1,2,:].conjugate()*evq[3,0,:]+evk[3,2,:].conjugate()*evq[1,0,:]))
so[1][s+1::4]=0.5j*(-evk[0,3,:].conjugate()*evq[2,0,:]+evk[2,3,:].conjugate()*evq[0,0,:]+\
skq*(-evk[1,3,:].conjugate()*evq[3,0,:]+evk[3,3,:].conjugate()*evq[1,0,:]))
so[1][s+2::4]=0.5j*(-evk[0,2,:].conjugate()*evq[2,1,:]+-evk[2,2,:].conjugate()*evq[0,1,:]+\
skq*(-evk[1,2,:].conjugate()*evq[3,1,:]+evk[3,2,:].conjugate()*evq[1,1,:]))
so[1][s+3::4]=0.5j*(-evk[0,3,:].conjugate()*evq[2,1,:]+evk[2,3,:].conjugate()*evq[0,1,:]+\
skq*(-evk[1,3,:].conjugate()*evq[3,1,:]+evk[3,3,:].conjugate()*evq[1,1,:]))
#Sqz
so[2][s::4]=0.5*(evk[0,2,:].conjugate()*evq[2,0,:]+evk[2,2,:].conjugate()*evq[0,0,:]+\
skq*(evk[1,2,:].conjugate()*evq[3,0,:]+evk[3,2,:].conjugate()*evq[1,0,:]))
so[2][s+1::4]=0.5*(evk[0,3,:].conjugate()*evq[2,0,:]+evk[2,3,:].conjugate()*evq[0,0,:]+\
skq*(evk[1,3,:].conjugate()*evq[3,0,:]+evk[3,3,:].conjugate()*evq[1,0,:]))
so[2][s+2::4]=0.5*(evk[0,2,:].conjugate()*evq[2,1,:]+evk[2,2,:].conjugate()*evq[0,1,:]+\
skq*(evk[1,2,:].conjugate()*evq[3,1,:]+evk[3,2,:].conjugate()*evq[1,1,:]))
so[2][s+3::4]=0.5*(evk[0,3,:].conjugate()*evq[2,1,:]+evk[2,3,:].conjugate()*evq[0,1,:]+\
skq*(evk[1,3,:].conjugate()*evq[3,1,:]+evk[3,3,:].conjugate()*evq[1,1,:]))
if (params['qx'],params['qy']) in [(0,0),(int(params['L']/2),int(params['L']/2))]:
so[0][0]=0.5*np.sum(-evk[0,0,:].conjugate()*evq[0,0,:]+evk[2,0,:].conjugate()*evq[2,0,:]+\
skq*(-evk[1,0,:].conjugate()*evq[1,0,:]+evk[3,0,:].conjugate()*evq[3,0,:]))+\
0.5*np.sum(-evk[0,1,:].conjugate()*evq[0,1,:]+evk[2,1,:].conjugate()*evq[2,1,:]+\
skq*(-evk[1,1,:].conjugate()*evq[1,1,:]+evk[3,1,:].conjugate()*evq[3,1,:]))
so[1][0]=0.5j*np.sum(evk[0,0,:].conjugate()*evq[2,0,:]-evk[2,0,:].conjugate()*evq[0,0,:]+\
skq*(evk[1,0,:].conjugate()*evq[3,0,:]-evk[3,0,:].conjugate()*evq[1,0,:]))+\
0.5j*np.sum(evk[0,1,:].conjugate()*evq[2,1,:]-evk[2,1,:].conjugate()*evq[0,1,:]+\
skq*(evk[1,1,:].conjugate()*evq[3,1,:]-evk[3,1,:].conjugate()*evq[1,1,:]))
so[2][0]=0.5*np.sum(evk[0,0,:].conjugate()*evq[2,0,:]+evk[2,0,:].conjugate()*evq[0,0,:]+\
skq*(evk[1,0,:].conjugate()*evq[3,0,:]+evk[3,0,:].conjugate()*evq[1,0,:]))+\
0.5*np.sum(evk[0,1,:].conjugate()*evq[2,1,:]+evk[2,1,:].conjugate()*evq[0,1,:]+\
skq*(evk[1,1,:].conjugate()*evq[3,1,:]+evk[3,1,:].conjugate()*evq[1,1,:]))
return so
def refstate(params):
state=np.zeros(2*params['L']**2)
state[::4]=1
state[1::4]=1
return state
|
[
"Bastien Dalla Piazza"
] |
Bastien Dalla Piazza
|
d0693b0ad88f47db099867cf4761d478eef729b8
|
e8ac30578ae04854181cd5233db96aee57dffa15
|
/lib/python3.7/imp.py
|
93b04b1279fde0f9b70449c2d762314e37dbd6e3
|
[] |
no_license
|
hariknair77/fsf_2019_screening_task1
|
50d1840c9dcfd18ff9ecb5832dbea89632c9554a
|
473f137e9c080c4fc775e1d1163595231b760546
|
refs/heads/master
| 2020-04-25T19:48:11.554670
| 2019-03-10T06:34:30
| 2019-03-10T06:34:30
| 173,033,086
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 41
|
py
|
/home/hari/anaconda3/lib/python3.7/imp.py
|
[
"hariknair77@gmail.com"
] |
hariknair77@gmail.com
|
e973b5c48496e567d2e0fae3626b51dd134136a6
|
99441588c7d6159064d9ce2b94d3743a37f85d33
|
/pcl_test/cmake-build-debug/catkin_generated/generate_cached_setup.py
|
182642f40547103257c7ec487f901a6e624ac514
|
[] |
no_license
|
YZT1997/robolab_project
|
2786f8983c4b02040da316cdd2c8f9bb73e2dd4c
|
a7edb588d3145356566e9dcc37b03f7429bcb7d6
|
refs/heads/master
| 2023-09-02T21:28:01.280464
| 2021-10-14T02:06:35
| 2021-10-14T02:06:35
| 369,128,037
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,340
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/yangzt/sda_ws/devel;/home/yangzt/catkin_ws/devel;/opt/ros/melodic'.split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/yangzt/catkin_ws/devel/env.sh')
output_filename = '/home/yangzt/catkin_ws/src/pcl_test/cmake-build-debug/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
|
[
"yangzt_0943@163.com"
] |
yangzt_0943@163.com
|
290c90e1ec3e9aea7039b80484a81718c05d1dfb
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_shelled.py
|
92293753951a72a46ead1e9e801bf3e2ad1a351b
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
from xai.brain.wordbase.nouns._shell import _SHELL
#calss header
class _SHELLED(_SHELL, ):
def __init__(self,):
_SHELL.__init__(self)
self.name = "SHELLED"
self.specie = 'nouns'
self.basic = "shell"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
ac0068272e44b2c8be212ec72274b2cd3cc76d3a
|
fb7d753aba613b1b84d06ad17d82722988b960ea
|
/App/migrations/0003_mustbuy.py
|
e3746268efb9349aa1c530937338c0e2f1fbd160
|
[] |
no_license
|
Bestlzh/AXF-first-django-
|
8c5da15c3160f8ab15b21fa872782bf1c4d7ad6b
|
452a66daf964db34026487279c5dbd37c31df576
|
refs/heads/master
| 2021-09-01T02:56:51.514477
| 2017-12-24T12:15:53
| 2017-12-24T12:15:53
| 115,260,702
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 761
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-27 09:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('App', '0002_nav'),
]
operations = [
migrations.CreateModel(
name='MustBuy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('img', models.CharField(max_length=200)),
('name', models.CharField(max_length=20)),
('trackid', models.CharField(max_length=20)),
],
options={
'db_table': 'axf_mustbuy',
},
),
]
|
[
"lizihao0913@163.com"
] |
lizihao0913@163.com
|
a0ef7b70a8cf37b54a0e2e4f78c34348ea61b0a5
|
d067509968cd8f9b1e29ec5ba69abbee56757ec6
|
/headTracking/headTracking.py
|
c0cfce895fa2344951dce9adf8d26dddc91b0c3a
|
[
"MIT"
] |
permissive
|
erizalmh/humanoid_challenge
|
de5edab24b7db54f9ba0c4f9c4b4cbbaf337ac19
|
fc0f0049ae75b1be6bb59b100de900dc3d7a510a
|
refs/heads/master
| 2022-04-09T18:07:14.139305
| 2018-05-07T06:20:09
| 2018-05-07T06:20:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,207
|
py
|
#!/usr/bin/env python
# import the necessary packages
import numpy as np
import imutils
import cv2
import serial
import math
connected = True
camera = cv2.VideoCapture(1)
ser = serial.Serial() # create a serial port object
ser.baudrate = 9600 # baud rate, in bits/second
ser.port = "/dev/ttyACM0" # this is whatever port your are using
ser.timeout = 3.0
ser.open()
# define the lower and upper boundaries of the "blue"
# ball in the HSV color space, then initialize the
# list of tracked points
blueLower = (75, 90, 90)
blueUpper = (130, 255, 255)
# keep looping
while True:
# receive openCM feedback
n = ser.inWaiting()
str = ser.read(n)
if str:
print str
# grab the current frame
(grabbed, frame) = camera.read()
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=600)
lurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# construct a mask for the color "blue", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, blueLower, blueUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask
c = max(cnts, key=cv2.contourArea)
# ( center (x,y), (width, height), angle of rotation )
rect = cv2.minAreaRect(c)
rect = ((rect[0][0], rect[0][1]), (rect[1][0], rect[1][1]), rect[2])
angle = rect[2]
#print angle
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(frame, [box], 0, (255, 0, 0), 2)
## put text on handle and judge the handle open or not throught angle
leftTop = box[0]
if -85 < angle < -60:
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, 'open handle', (leftTop[0], leftTop[1]), font, 1, (255, 0, 255), 4)
else:
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, 'cloesd handle', (leftTop[0],leftTop[1]), font, 1, (255, 0, 255),4)
#tarcking handle
xb = rect[0][0] / 600.0 * 255
xb = abs(255-xb)
# send ASCII values to openCM
xb = int(round(xb))
if len(cnts) == 0:
ser.write(0)
else:
ser.write(chr(xb))
# print xb
#if no handled is detected
if len(cnts) == 0:
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, 'no handle found', (250, 300), font, 1, (255, 0, 255), 4)
# show the frame to our screen
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
while not connected:
serin = ser.read()
connected = True
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
ser.close()
|
[
"monsoon117@gmail.com"
] |
monsoon117@gmail.com
|
bbc96985d0b7d59a1ed8256547223d6d3942caae
|
e03c75eef2c8f177003dd0de10805199d17250c2
|
/VID21.py
|
2ccfadd0c7e801ab107872ba1d3e20b78ccf3435
|
[] |
no_license
|
arsh-seth/Tkinter
|
d8a97ac89f88d8009fc224cec4d2295d4cd35059
|
e4893f7b7aa4718dbb6ea13a3be67da8ee580e6e
|
refs/heads/master
| 2022-09-08T12:15:25.390700
| 2020-06-02T18:24:38
| 2020-06-02T18:24:38
| 265,328,374
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
from tkinter import *
def add():
global i
lbx.insert(ACTIVE, f"{i}")
i+=1
i = 0
root = Tk()
root.geometry("455x233")
root.title("Listbox tutorial")
lbx = Listbox(root)
lbx.pack()
lbx.insert(END, "First item of our listbox")
Button(root, text="Add Item", command=add).pack()
root.mainloop()
|
[
"noreply@github.com"
] |
arsh-seth.noreply@github.com
|
9b8ffd02c0680421820d9d17d7078ba7ee1365ba
|
ce8bb40bf2b688f19ab8bcc20cfd58994413bc0f
|
/ajax/ajax_mysite/app01/views.py
|
b372bd95be6a215aa5b89bd42d3acb0b23b5da03
|
[] |
no_license
|
Fover21/project1
|
457f452d7f6e7ecbfc81a18512377ebc5457f3f6
|
84d596caf5701d7d76eee8c50f61bcb6150c57f2
|
refs/heads/master
| 2020-03-24T20:01:51.506348
| 2018-12-26T06:07:45
| 2018-12-26T06:07:45
| 142,955,917
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,389
|
py
|
from django.shortcuts import render, HttpResponse, reverse, redirect
# Create your views here.
from django.views.decorators.csrf import csrf_exempt, csrf_protect
@csrf_exempt # 排除
def login(request):
return render(request, 'login.html')
def index(request):
i1, i2, i3 = '', '', ''
if request.method == 'POST':
i1 = int(request.POST.get('i1'))
i2 = int(request.POST.get('i2'))
i3 = i1 + i2
return render(request, 'index.html', {
'i1': i1,
'i2': i2,
'i3': i3,
})
# from django.views.decorators.csrf import ensure_csrf_cookie 全局的第二中配置方法
# @csrf_exempt
def calc(request):
# csrfmiddlewaretoken = request.POST.get('csrfmiddlewaretoken')
# print(csrfmiddlewaretoken)
i1 = int(request.POST.get('i1'))
i2 = int(request.POST.get('i2'))
i3 = i1 + i2
print(request.POST)
return HttpResponse(i3)
# 上传
def upload(request):
if request.method == "POST":
print("FILES", request.FILES)
file_obj = request.FILES.get("file")
with open(file_obj.name, "wb") as f:
for i in file_obj.chunks():
f.write(i)
return HttpResponse("success!")
# test
def tt(request):
if request.method == "POST":
ret = reverse('uu')
print(ret)
return redirect(ret)
return render(request, 'index.html')
|
[
"850781645@qq.com"
] |
850781645@qq.com
|
525faba85baf47e70bd840eb6b17b29331739083
|
0c41031269497790425702d4ad882423dc443a6a
|
/pandas14/pandas14_9.py
|
ad0ca612be2e850e77d6d818f876fb6c53ce6255
|
[] |
no_license
|
diegoami/datacamp-courses-PY
|
4c546e69241ca429adefdd459db92d617cfa0e9f
|
bab3082929fa6f1cf2fc2f2efb46d16374715b4b
|
refs/heads/master
| 2023-07-20T06:42:29.776349
| 2018-10-28T22:57:21
| 2018-10-28T22:57:21
| 92,448,198
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,599
|
py
|
import pandas as pd
# Load DataFrame from file_path: editions
medals = pd.read_csv('../data/medals_2.csv')
# Construct the pivot_table: medal_counts
medal_counts = medals.pivot_table(index='Edition',values='Athlete',columns='NOC',aggfunc='count')
# Load DataFrame from file_path: editions
editions = pd.read_csv('../data/editions.csv')
# Set Index of editions: totals
totals = editions.set_index('Edition')
# Reassign totals['Grand Total']: totals
totals = totals['Grand Total']
# Divide medal_counts by totals: fractions
fractions = medal_counts.divide( totals, axis = 'rows' )
# Apply the expanding mean: mean_fractions
mean_fractions = fractions.expanding().mean()
# Compute the percentage change: fractions_change
fractions_change = mean_fractions.pct_change().multiply(100)
# Reset the index of fractions_change: fractions_change
fractions_change = fractions_change.reset_index()
ioc_codes = pd.read_csv('../data/country_codes.csv')
# Extract the relevant columns: ioc_codes
ioc_codes = ioc_codes[['Country', 'NOC']]
# Left join editions and ioc_codes: hosts
hosts = pd.merge(editions,ioc_codes, how='left')
# Extract relevant columns and set index: hosts
hosts = hosts[['Edition','NOC']].set_index( 'Edition')
# Reshape fractions_change: reshaped
reshaped = pd.melt(fractions_change,id_vars='Edition', value_name='Change')
# Print reshaped.shape and fractions_change.shape
print(reshaped.shape, fractions_change.shape)
# Extract rows from reshaped where 'NOC' == 'CHN': chn
chn = reshaped.loc[reshaped['NOC'] == 'CHN']
# Print last 5 rows of chn with .tail()
print(chn.tail())
|
[
"diego.amicabile@gmail.com"
] |
diego.amicabile@gmail.com
|
8a76f7524a07a73f9f4012875041fd152bc95a5e
|
ccb062bea92c4f50ff6fca39af2df6499f87fa53
|
/build-debug/SConscript
|
59a52d8ac85c58e0776a32b50a39f11c076429b4
|
[
"MIT"
] |
permissive
|
mathbouchard/BaseApp
|
da20b609ef0733d50c110ff2408410a1fa1191c8
|
6daeb7c8eef3921c1c41d0870fad4901540222a6
|
refs/heads/master
| 2021-01-19T17:47:53.900489
| 2014-01-22T04:09:08
| 2014-01-22T04:09:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,583
|
# January 8 10:21 2014
# Copyright 2014 Mathieu Bouchard
# mathbouchard@gmail.com
from platform import system
from os import listdir
from collections import OrderedDict
is_debug = True;
suffix = ""
if(is_debug):
suffix = "d"
print("DEBUG")
else:
print("RELEASE")
print("System is "+ system());
def validateLibrary(libraryName):
""" Search in the libpath of the current platform if the library is present """
for folder in libpath[system()]:
for file in listdir(folder):
if (libraryName + "." in file):
return True
return False
cpppath = {
"Linux": ["/home/mbouchard/gitwork/BaseLib/src"], #[r"/path/1", r"/path/2"]
"Windows": [],
"Darwin": ["/Users/mathbouchard/gitwork/BaseLib/src"]
}
libpath = {
"Linux": ["/home/mbouchard/gitwork/BaseLib/lib"], #[r"/path/1", r"/path/2"]
"Windows": [],
"Darwin": ["/Users/mathbouchard/gitwork/BaseLib/lib"]
}
cppdefines = []
if (system() == "Windows"):
cppdefines.append("WIN64")
elif (system() == "Linux"):
cppdefines.append("Linux")
elif (system() == "Darwin"):
cppdefines.append("Darwin")
libs = []
# Validate that the libs are present
moduleLibs = { "BaseLib": { "Linux": ["BaseLibd"], "Windows": ["BaseLibd"], "Darwin": ["BaseLibd"]} }
# { "Module1": { "Linux": ["linuxlibname1"], "Windows": ["winlibname1"], "Darwin": ["osxlibname1"] }, "Module2": { "Linux": ["linuxlibname2"], "Windows": ["winlibname2"], """: ["osxlibname2"] } }
missingModules = []
for module in moduleLibs:
allModulePresents = True
for mod in moduleLibs[module][system()]:
if (not validateLibrary(mod)):
allLibPresents = False
if (allModulePresents):
libs += moduleLibs[module][system()];
cppdefines.append((module + "_MODULE").upper())
print("define : " + module + "_MODULE")
else:
missingModules.append(module)
libs = list(OrderedDict.fromkeys(libs))
# linux flags
# -g : debug info flag
# -O# : optimization flag
# -m64 : force 64 bits in the linker
# -Wall : enables all the warnings
# -fmessage-length : format the compiler message
# -std=c++11 : c++ version flag
cxxflags = {
"Linux": "-g -O0 -m64 -Wall -pthread -fmessage-length=0 -std=c++11",
"Windows": "/O0 /MD /EHsc",
"Darwin": "-g -O0 -m64 -Wall -pthread -fmessage-length=0 -std=c++11",
}
# Create an environmnet
env = Environment(TARGET_ARCH = "x86_64",
CPPPATH = cpppath[system()],
LIBPATH = libpath[system()],
LIBS = libs,
CXXFLAGS = cxxflags[system()],
CPPDEFINES = cppdefines
)
# Change the build folder output
#env.VariantDir("build-debug", "src", duplicate = 0)
files = Glob("*.cpp")
if (system() == "Windows"):
target = "BaseApp"+suffix
elif (system() == "Linux"):
target = "BaseApp"+suffix
elif (system() == "Darwin"):
target = "BaseApp"+suffix
# Remove the cpp files for the missing solvers
filesToRemove = []
if (not env.GetOption("clean")):
for file in files:
for module in missingModules:
if module in str(file):
filesToRemove.append(file)
for file in filesToRemove:
print(file)
files.remove(file)
# Build the library
env.Program(target = "../bin/" + target, source = files)
if (missingModules):
print("***************************************************************************")
print("****** The following modules will not compiled into the final application *****")
print(missingModules)
print("***************************************************************************")
|
[
"mathbouchard@gmail.com"
] |
mathbouchard@gmail.com
|
|
5bf43a26a8f376a2920ea2a5ed4a757eb726910b
|
5e8bda0dcaa424ccf2a7a0204b1becbd78f6f32b
|
/Unet.py
|
f4540fb26deb20a1d56457f881af994bd983782b
|
[] |
no_license
|
ggrego04/electron_microscopy_digitisation
|
e8b4407d5fccedebceb3e2f436d39928cd43d767
|
7c1a6524fb4dd2359aa3edb3644a9aeccdfe2464
|
refs/heads/main
| 2023-02-13T11:26:11.543353
| 2021-01-15T23:40:32
| 2021-01-15T23:40:32
| 330,020,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,562
|
py
|
# Imported Libraries
import tensorflow as tf
import numpy as np
from skimage.transform import resize
seed = 42
np.random.seed = seed
# dimensions of the image
IMG_WIDTH = 128
IMG_HEIGHT = 128
IMG_CHANNELS = 3
# this function returns the segmented image
def prediction(image):
# load the pretrained model
model = tf.keras.models.load_model("model_for_segmentation.h5")
# prepare the image for segmemntation
x_test = np.zeros((1, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)
image = resize(image, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)
x_test[0] = image
# predict the output
preds_test = model.predict(x_test, verbose=1)
preds_test = (preds_test > 0.5).astype(np.uint8)
# create the mask based on output
mask = preds_test[0]
for i in range(mask.shape[0]):
for j in range(mask.shape[1]):
if mask[i][j] == 1:
mask[i][j] = 255
else:
mask[i][j] = 0
return mask
# Build the model
inputs = tf.keras.layers.Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
s = tf.keras.layers.Lambda(lambda x: x / 255)(inputs)
# Contraction path
c1 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(s)
c1 = tf.keras.layers.Dropout(0.1)(c1)
c1 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c1)
p1 = tf.keras.layers.MaxPooling2D((2, 2))(c1)
c2 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p1)
c2 = tf.keras.layers.Dropout(0.1)(c2)
c2 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c2)
p2 = tf.keras.layers.MaxPooling2D((2, 2))(c2)
c3 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p2)
c3 = tf.keras.layers.Dropout(0.2)(c3)
c3 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c3)
p3 = tf.keras.layers.MaxPooling2D((2, 2))(c3)
c4 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p3)
c4 = tf.keras.layers.Dropout(0.2)(c4)
c4 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c4)
p4 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(c4)
c5 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p4)
c5 = tf.keras.layers.Dropout(0.3)(c5)
c5 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c5)
# Expansive path
u6 = tf.keras.layers.Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c5)
u6 = tf.keras.layers.concatenate([u6, c4])
c6 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u6)
c6 = tf.keras.layers.Dropout(0.2)(c6)
c6 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c6)
u7 = tf.keras.layers.Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c6)
u7 = tf.keras.layers.concatenate([u7, c3])
c7 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u7)
c7 = tf.keras.layers.Dropout(0.2)(c7)
c7 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c7)
u8 = tf.keras.layers.Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c7)
u8 = tf.keras.layers.concatenate([u8, c2])
c8 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u8)
c8 = tf.keras.layers.Dropout(0.1)(c8)
c8 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c8)
u9 = tf.keras.layers.Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c8)
u9 = tf.keras.layers.concatenate([u9, c1], axis=3)
c9 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u9)
c9 = tf.keras.layers.Dropout(0.1)(c9)
c9 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c9)
outputs = tf.keras.layers.Conv2D(1, (1, 1), activation='sigmoid')(c9)
model = tf.keras.Model(inputs=[inputs], outputs=[outputs])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
|
[
"noreply@github.com"
] |
ggrego04.noreply@github.com
|
0df8d0fd941b9fda7e926758fb191fdf39c3a691
|
3d38c394ab9958c32e26f179ba7d045c55b162f0
|
/pamixer/classes/SubVolume.py
|
a40bbce897aa1f98f8c8ad408a290032a3c3345c
|
[] |
no_license
|
Valodim/pamixer
|
fb2d3e07de89c0b1088e7fc2dabd9823909aa94e
|
99cea2fe72620076e7f10d1f2735cf83cf5055c5
|
refs/heads/master
| 2021-01-01T19:02:20.901345
| 2012-02-12T20:29:40
| 2012-02-12T20:29:40
| 1,437,808
| 10
| 1
| null | 2012-07-19T11:25:58
| 2011-03-04T02:31:00
|
Python
|
UTF-8
|
Python
| false
| false
| 8,621
|
py
|
import curses
from ..pulse.PulseAudio import PA_CHANNEL_POSITION_FRONT_LEFT, PA_CHANNEL_POSITION_FRONT_RIGHT, PA_CHANNEL_POSITION_FRONT_CENTER, PA_CHANNEL_POSITION_REAR_CENTER, PA_CHANNEL_POSITION_REAR_LEFT, PA_CHANNEL_POSITION_REAR_RIGHT, PA_CHANNEL_POSITION_LFE, PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER, PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER, PA_CHANNEL_POSITION_SIDE_LEFT, PA_CHANNEL_POSITION_SIDE_RIGHT
from ..pulse.ParCur import par
channel_names = { }
channel_names[PA_CHANNEL_POSITION_FRONT_LEFT] = 'front left';
channel_names[PA_CHANNEL_POSITION_FRONT_RIGHT] = 'front right';
channel_names[PA_CHANNEL_POSITION_FRONT_CENTER] = 'front center';
channel_names[PA_CHANNEL_POSITION_REAR_CENTER] = 'rear center';
channel_names[PA_CHANNEL_POSITION_REAR_LEFT] = 'rear left';
channel_names[PA_CHANNEL_POSITION_REAR_RIGHT] = 'rear right';
channel_names[PA_CHANNEL_POSITION_LFE] = 'sub';
channel_names[PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER] = 'front left';
channel_names[PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER] = 'front right';
channel_names[PA_CHANNEL_POSITION_SIDE_LEFT] = 'side left';
channel_names[PA_CHANNEL_POSITION_SIDE_RIGHT] = 'side right';
channel_picto = {
PA_CHANNEL_POSITION_FRONT_LEFT: [ 'FL', 0, 0 ],
PA_CHANNEL_POSITION_FRONT_RIGHT: [ 'FR', 0, 7 ],
PA_CHANNEL_POSITION_FRONT_CENTER: [ 'C', 1, 4 ],
PA_CHANNEL_POSITION_REAR_CENTER: [ 'c', 3, 4 ],
PA_CHANNEL_POSITION_REAR_LEFT: [ 'RL', 4, 0 ],
PA_CHANNEL_POSITION_REAR_RIGHT: [ 'RR', 4, 7 ],
PA_CHANNEL_POSITION_LFE: [ 'S', 2, 5 ],
PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER: [ 'CL', 1, 1 ],
PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER: [ 'CR', 1, 6 ],
PA_CHANNEL_POSITION_SIDE_LEFT: [ 'L', 2, 0 ],
PA_CHANNEL_POSITION_SIDE_RIGHT: [ 'R', 2, 7 ],
}
class SubVolume(object):
""" This is a superclass for anything that has a volume """
def __init__(self):
self.volume = [ ]
self.volume_db = [ ]
self.drawable_volume = False
self.wcontrols = None
self.win = None
self.cursor_volume = 0
def update(self, struct):
self.channels = struct.channel_map.channels
self.channel_map = [ ]
for i in range(0, struct.channel_map.channels):
self.channel_map.append(struct.channel_map.map[i])
self.volume = par.volume_to_linear(struct.volume)
self.volume_db = par.volume_to_dB(struct.volume)
def layout_volume(self, win):
""" fullscreen layout """
# just clean up?
if win is None:
self.drawable_volume = False
return
self.drawable_volume = True
self.win = win
self.wcontrols = win.derwin(4, 6)
def redraw_volume(self, recurse = False):
self.draw_controls_volume()
def draw_controls_volume(self):
# don't proceed if it's not even our turn to draw
if self.drawable_volume is False:
return
# if we aren't active, this needn't even be considered
self.cursorCheck_volume()
wcontrols = self.wcontrols
wcontrols.erase()
self.win.move(0, 3)
self.win.addstr(self.name)
self.draw_picto(wcontrols.derwin(30, 5), self.cursor_volume)
# draw volume gauge, just an average
for i in range(0, self.channels):
gauge = wcontrols.derwin(22, 4, 0, 3 + i*23)
self.draw_gauge(gauge, self.volume[i])
gauge.border()
wcontrols.move(24, i*23)
wcontrols.addstr(channel_names[self.channel_map[i]].center(12), curses.A_BOLD if self.cursor_volume == i else 0)
# text info, too
wcontrols.move(25, i*23)
if par.use_dezibel:
wcontrols.addstr(('{:+3.2f}'.format(self.volume_db[i]) + " dB").rjust(9))
else:
wcontrols.addstr(('{:3.2f}'.format(self.volume[i] * 100) + " %").rjust(9))
def cursorCheck_volume(self):
while self.cursor_volume >= self.channels:
self.cursor_volume -= 1
if self.cursor_volume < 0:
self.cursor_volume = 0
def key_event_volume(self, event):
# change focus
if event == ord('h') or event == ord('l'):
self.cursor_volume += -1 if event == ord('h') else +1
# cursorCheck_volume happens here, too!
self.draw_controls_volume()
return True
elif 'changeVolume' in self.__class__.__dict__ and event in [ ord('k'), ord('K'), ord('j'), ord('J') ]:
self.cursorCheck_volume()
self.changeVolume(event == ord('k') or event == ord('K'), event == ord('K') or event == ord('J'), [ self.cursor_volume ])
self.draw_controls_volume()
return True
elif 'setVolume' in self.__class__.__dict__:
if event == ord('n'):
self.cursorCheck_volume()
self.setVolume(1.0, False, [ self.cursor_volume ])
self.draw_controls_volume()
return True
elif event == ord('N'):
self.setVolume(1.0)
self.draw_controls_volume()
return True
elif event == ord('m'):
self.cursorCheck_volume()
self.setVolume(0.0, False, [ self.cursor_volume ])
self.draw_controls_volume()
return True
elif event == ord('M'):
self.setVolume(0.0)
self.draw_controls_volume()
return True
def draw_gauge(self, win, volume, width = 2, offset = 0):
for i in range(1, width+1):
barheight = min(22, int(volume * 18))
# lowest eight
if barheight > 0:
win.attron(curses.color_pair(3))
win.vline(21-min(8, barheight), offset +i, curses.ACS_BLOCK, min(8, barheight))
win.attroff(curses.color_pair(3))
# mid seven
if barheight > 8:
win.vline(13-min(7, barheight-8), offset +i, curses.ACS_BLOCK, min(7, barheight-8))
# top three
if barheight > 15:
win.attron(curses.color_pair(6))
win.vline(6-min(3, barheight-15), offset +i, curses.ACS_BLOCK, min(3, barheight-15))
win.attroff(curses.color_pair(6))
# over the top (clipping and stuff)
if barheight > 18:
win.attron(curses.color_pair(2))
win.vline(3-min(3, barheight-18), offset +i, curses.ACS_BLOCK, min(3, barheight-18))
win.attroff(curses.color_pair(2))
def draw_picto(self, win, cursor = False):
""" Draws a neat little pictogram of the speaker setup """
for i in range(0, self.channels):
picto = channel_picto[self.channel_map[i]]
win.move(picto[1], picto[2])
color = 0
if self.volume[i] == 0.0:
color = curses.color_pair(6)
elif self.volume[i] > 1.0:
color = curses.color_pair(2)
win.addstr(picto[0], color | (curses.A_BOLD if cursor is not False and (i == cursor) else 0))
def volume_uniform(self):
if self.channels == 0:
return True
for i in range(1, self.channels):
if self.volume[i] != self.volume[0]:
return False
return True
def getSetVolume(self, value, hard = False, channels = None):
volume = []
value = max(0.0, min(par.volume_max_hard if hard else par.volume_max_soft, value))
# create a list of channels
for i in range(0, len(self.volume)):
# apply new value?
if channels is None or i in channels:
volume.append(value)
else:
volume.append(self.volume[i])
return volume
def getChangeVolume(self, up, hard = False, channels = None):
volume = []
# create a list of volumes
for i in range(0, len(self.volume)):
# apply new value?
if channels is None or i in channels:
step = par.volume_step_hard if hard else par.volume_step
volume.append(max(0.0, min(par.volume_max_hard if hard else par.volume_max_soft, self.volume[i] + (step if up else -step))))
else:
volume.append(self.volume[i])
return volume
def still_exists(self):
""" Needs to be overridden! This returns false if the underlying
volume instance no longer exists. """
return False
|
[
"valodim@mugenguild.com"
] |
valodim@mugenguild.com
|
fc10885cc1c93b0fef9785b5f6bc9a544e6d5749
|
b943f725f8c6b20c277eb7b77e67689173bc0d1a
|
/simplemooc/core/urls.py
|
06bf7fc6d8d6aa47edc54cbee4a80a33837c89a1
|
[] |
no_license
|
Akijunior/Eleanning
|
c4da62971735b5a6c18f0ee04758ac6db770c2a4
|
4b0e4c397b76a7839722b00f23ef3eb4f309d229
|
refs/heads/master
| 2020-08-19T05:14:57.581513
| 2019-10-21T15:33:36
| 2019-10-21T15:33:36
| 215,882,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 119
|
py
|
from django.urls import path
from simplemooc.core.views import home
urlpatterns = [
path('', home, name='home'),
]
|
[
"suitsu19@gmail.com"
] |
suitsu19@gmail.com
|
2a43d736e2b0bed80741d6dc401155c5fb685570
|
374aac5655cbdead72683a5e8b6e02126a024768
|
/tests/test_sqlalchemy.py
|
b05d87ce415b1c3218592d47c7af99354879f0b8
|
[
"MIT"
] |
permissive
|
naveenkumar-grofers/nplusone
|
0f51179a5a4aa717ea2b537bfa1a8e07af568ebb
|
2bcf727a73c05afa01a020993997a6a60778b872
|
refs/heads/master
| 2021-01-24T21:54:08.390445
| 2015-11-15T16:52:42
| 2015-11-15T16:52:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,041
|
py
|
# -*- coding: utf-8 -*-
import pytest
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from nplusone.core import signals
import nplusone.ext.sqlalchemy # noqa
from tests import utils
from tests.utils import calls # noqa
pytest.yield_fixture(calls)
Base = declarative_base()
models = utils.make_models(Base)
@pytest.fixture()
def session():
engine = sa.create_engine('sqlite:///:memory:')
Session = sa.orm.sessionmaker(bind=engine)
Base.metadata.create_all(bind=engine)
return Session()
@pytest.fixture()
def objects(session):
hobby = models.Hobby()
address = models.Address()
user = models.User(addresses=[address], hobbies=[hobby])
session.add(user)
session.commit()
session.close()
class TestManyToOne:
def test_many_to_one(self, session, objects, calls):
user = session.query(models.User).first()
user.addresses
assert len(calls) == 1
call = calls[0]
assert call.objects == (models.User, 'addresses')
assert 'user.addresses' in ''.join(call.frame[4])
def test_many_to_one_ignore(self, session, objects, calls):
user = session.query(models.User).first()
with signals.ignore(signals.lazy_load):
user.addresses
assert len(calls) == 0
def test_many_to_one_subquery(self, session, objects, calls):
user = session.query(
models.User
).options(
sa.orm.subqueryload('addresses')
).first()
user.addresses
assert len(calls) == 0
def test_many_to_one_joined(self, session, objects, calls):
user = session.query(models.User).options(sa.orm.joinedload('addresses')).first()
user.addresses
assert len(calls) == 0
def test_many_to_one_reverse(self, session, objects, calls):
address = session.query(models.Address).first()
address.user
assert len(calls) == 1
call = calls[0]
assert call.objects == (models.Address, 'user')
assert 'address.user' in ''.join(call.frame[4])
def test_many_to_one_reverse_subquery(self, session, objects, calls):
address = session.query(
models.Address
).options(
sa.orm.subqueryload('user')
).first()
address.user
assert len(calls) == 0
def test_many_to_one_reverse_joined(self, session, objects, calls):
address = session.query(models.Address).options(sa.orm.joinedload('user')).first()
address.user
assert len(calls) == 0
class TestManyToMany:
def test_many_to_many(self, session, objects, calls):
user = session.query(models.User).first()
user.hobbies
assert len(calls) == 1
call = calls[0]
assert call.objects == (models.User, 'hobbies')
assert 'user.hobbies' in ''.join(call.frame[4])
def test_many_to_many_subquery(self, session, objects, calls):
user = session.query(models.User).options(sa.orm.subqueryload('hobbies')).first()
user.hobbies
assert len(calls) == 0
def test_many_to_many_joined(self, session, objects, calls):
user = session.query(models.User).options(sa.orm.joinedload('hobbies')).first()
user.hobbies
assert len(calls) == 0
def test_many_to_many_reverse(self, session, objects, calls):
hobby = session.query(models.Hobby).first()
hobby.users
assert len(calls) == 1
call = calls[0]
assert call.objects == (models.Hobby, 'users')
assert 'hobby.users' in ''.join(call.frame[4])
def test_many_to_many_reverse_subquery(self, session, objects, calls):
hobby = session.query(models.Hobby).options(sa.orm.subqueryload('users')).first()
hobby.users
assert len(calls) == 0
def test_many_to_many_reverse_joined(self, session, objects, calls):
hobby = session.query(models.Hobby).options(sa.orm.joinedload('users')).first()
hobby.users
assert len(calls) == 0
|
[
"jm.carp@gmail.com"
] |
jm.carp@gmail.com
|
bb25994ee6bd92450fefb19af4aa9d2536f1f92b
|
eae7c90e108721099c90eafbea50425ffb6cca58
|
/survay_form/survay_form/wsgi.py
|
ee0cad20a2107f93de85c54ea14147c22b0f3e74
|
[] |
no_license
|
azadehboojari/Django_Simple
|
ff52b4a35e1d2f57d5dd930c0e684e1a9d222d35
|
670e0bccb95f5d0e92b06d55e81eabdfc2836724
|
refs/heads/master
| 2020-04-02T01:44:34.264239
| 2018-10-20T05:51:19
| 2018-10-20T05:51:19
| 153,872,845
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for survay_form project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "survay_form.settings")
application = get_wsgi_application()
|
[
"azadeh.bojari@yahoo.com"
] |
azadeh.bojari@yahoo.com
|
7c65cfcefe6d0773903720bf31e2214bfdae2623
|
8cb0405401b1615dfeab18afe4d0be2438037b07
|
/ff-13-2-clock/clock.py
|
f8aa84568cd740b4e69bb66e4d7ba2fb312f9c9c
|
[] |
no_license
|
kwaters/hacks
|
d6b089b00c6134a92630a1ffba71c4b3686d0420
|
52813a7bef310445b7781d2e3ce989245466d66d
|
refs/heads/master
| 2021-01-15T11:13:33.592050
| 2017-01-19T07:18:03
| 2017-01-19T07:18:03
| 1,401,386
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,389
|
py
|
#!/usr/bin/env python
"""Clock puzzle sover for FF XIII-2."""
import argparse
import pprint
import exactcover
def create_grid(clock):
"""Turn the clock into an exact cover problem."""
size = len(clock)
m = []
# Each of the two moves from each number
for i, v in enumerate(clock):
m.append([(0, i), (1, (i - v) % size)])
if v * 2 != size:
m.append([(0, i), (1, (i + v) % size)])
# The first and last numbers are special
for i in xrange(size):
m.append([(0, -1), (1, i)])
m.append([(0, i), (1, -1)])
return m
def print_solution(clock, covering):
"""Print the solution coresponding to a covering."""
mapping = {}
for source, dest in covering:
mapping[source[1]] = dest[1]
chain = []
i = mapping[-1]
while i != -1:
chain.append(i)
i = mapping[i]
# Not all coverings form a valid solution. Some will contain an
# unconnected cycle.
if len(chain) != len(clock):
return
print ' -> '.join('{}({})'.format(i, clock[i]) for i in chain)
def main():
args = argparse.ArgumentParser()
args.add_argument('clock', type=int, nargs='+')
opts = args.parse_args()
clock = opts.clock
m = create_grid(clock)
for cover in exactcover.Coverings(m):
print_solution(clock, cover)
if __name__ == '__main__':
main()
|
[
"kwwaters@gmail.com"
] |
kwwaters@gmail.com
|
56250a36155d7f49b172ec18f33a5f218419669f
|
4b7dce428c7bd29f3446725a502377ed24e2af7d
|
/Source Code/Relation.py
|
6105f6c700077469f523c254c466006c04de3cec
|
[] |
no_license
|
umairtehami/Twitter-Social-Capital
|
24b569a67cd71335ea60fabe98cd054023b1abc7
|
a09007472f4a6a6e179c02f3d9d30167b94dcb28
|
refs/heads/main
| 2023-08-23T21:04:46.965665
| 2021-10-27T15:42:20
| 2021-10-27T15:42:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,357
|
py
|
# Relation class saves information about the interactions within two profiles
class Relation:
def __init__(self, source, target, weight = 0, label = "", type = "Directed"):
self.source = source
self.target = target
self.weight = 1
self.label = label
self.type = type
# Upgrade relation weight
def upgrade_weight(self,weight):
self.weight = weight
# Get relation weight
def get_weight(self):
return int(self.weight)
# Print relation
def print_relation(self):
print(self.source, self.label, self.target, self.weight, self.type)
# Return True if relation already exists, False in other case
def existing_relation(self,relations):
for rel in relations:
if str(rel.source) == str(self.source) and str(rel.target) == str(self.target):
return True
return False
# Upgrade 1 point the weight if the relation already exists and return True, False in other case
def existing_and_upgrade_relation(self,relations):
for rel in relations:
if str(rel.source) == str(self.source) and str(rel.target) == str(self.target):
weight = rel.get_weight() + 1
rel.upgrade_weight(weight)
return True
return False
|
[
"noreply@github.com"
] |
umairtehami.noreply@github.com
|
36fb968bc93923a42bc256ed24596a0c4cd64147
|
51418ae6005e41ae19b314ca0416331dfba4f21c
|
/tan/tanlibrary/src/clFFT-master/src/scripts/perf/fftPerformanceTesting.py
|
cb0602dc2cd990f53ca541ba6726fcbab7928b41
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
GPUOpen-LibrariesAndSDKs/TAN
|
ead68a3dd2d8e3a3678ada593ef44ce55ab7d54e
|
690ed6a92c594f4ba3a26d1c8b77dbff386c9b04
|
refs/heads/beta-cross-platform
| 2023-04-02T08:27:50.622740
| 2020-10-07T18:34:38
| 2020-10-07T18:34:38
| 65,836,265
| 141
| 32
|
MIT
| 2020-03-26T16:08:01
| 2016-08-16T16:33:14
|
C++
|
UTF-8
|
Python
| false
| false
| 11,307
|
py
|
# ########################################################################
# Copyright 2013 Advanced Micro Devices, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ########################################################################
import itertools
import re#gex
import subprocess
import os
import sys
from datetime import datetime
# Common data and functions for the performance suite
tableHeader = 'lengthx,lengthy,lengthz,batch,device,inlay,outlay,place,precision,label,GFLOPS'
class TestCombination:
def __init__(self,
lengthx, lengthy, lengthz, batchsize,
device, inlayout, outlayout, placeness, precision,
label):
self.x = lengthx
self.y = lengthy
self.z = lengthz
self.batchsize = batchsize
self.device = device
self.inlayout = inlayout
self.outlayout = outlayout
self.placeness = placeness
self.precision = precision
self.label = label
def __str__(self):
return self.x + 'x' + self.y + 'x' + self.z + ':' + self.batchsize + ', ' + self.device + ', ' + self.inlayout + '/' + self.outlayout + ', ' + self.placeness + ', ' + self.precision + ' -- ' + self.label
class GraphPoint:
def __init__(self,
lengthx, lengthy, lengthz, batchsize,
precision, device, label,
gflops):
self.x = lengthx
self.y = lengthy
self.z = lengthz
self.batchsize = batchsize
self.device = device
self.label = label
self.precision = precision
self.gflops = gflops
self.problemsize = str(int(self.x) * int(self.y) * int(self.z) * int(self.batchsize))
def __str__(self):
# ALL members must be represented here (x, y, z, batch, device, label, etc)
return self.x + 'x' + self.y + 'x' + self.z + ':' + self.batchsize + ', ' + self.precision + ' precision, ' + self.device + ', -- ' + self.label + '; ' + self.gflops
class TableRow:
# parameters = class TestCombination instantiation
def __init__(self, parameters, gflops):
self.parameters = parameters
self.gflops = gflops
def __str__(self):
return self.parameters.__str__() + '; ' + self.gflops
def transformDimension(x,y,z):
if int(z) != 1:
return 3
elif int(y) != 1:
return 2
elif int(x) != 1:
return 1
def executable(library):
if type(library) != str:
print 'ERROR: expected library name to be a string'
quit()
if sys.platform != 'win32' and sys.platform != 'linux2':
print 'ERROR: unknown operating system'
quit()
if library == 'clFFT' or library == 'null':
if sys.platform == 'win32':
exe = 'clFFT-client.exe'
elif sys.platform == 'linux2':
exe = 'clFFT-client'
elif library == 'cuFFT':
if sys.platform == 'win32':
exe = 'cuFFT-client.exe'
elif sys.platform == 'linux2':
exe = 'cuFFT-client'
else:
print 'ERROR: unknown library -- cannot determine executable name'
quit()
return exe
def max_mem_available_in_bytes(exe, device):
arguments = [exe, '-i', device]
deviceInfo = subprocess.check_output(arguments, stderr=subprocess.STDOUT).split(os.linesep)
deviceInfo = itertools.ifilter( lambda x: x.count('MAX_MEM_ALLOC_SIZE'), deviceInfo)
deviceInfo = list(itertools.islice(deviceInfo, None))
maxMemoryAvailable = re.search('\d+', deviceInfo[0])
return int(maxMemoryAvailable.group(0))
def max_problem_size(exe, layout, precision, device):
if precision == 'single':
bytes_in_one_number = 4
elif precision == 'double':
bytes_in_one_number = 8
else:
print 'max_problem_size(): unknown precision'
quit()
max_problem_size = pow(2,25)
if layout == '5':
max_problem_size = pow(2,24) # TODO: Upper size limit for real transform
return max_problem_size
def maxBatchSize(lengthx, lengthy, lengthz, layout, precision, exe, device):
problemSize = int(lengthx) * int(lengthy) * int(lengthz)
maxBatchSize = max_problem_size(exe, layout, precision, device) / problemSize
return str(maxBatchSize)
def create_ini_file_if_requested(args):
if args.createIniFilename:
for x in vars(args):
if (type(getattr(args,x)) != file) and x.count('File') == 0:
args.createIniFilename.write('--' + x + os.linesep)
args.createIniFilename.write(str(getattr(args,x)) + os.linesep)
quit()
def load_ini_file_if_requested(args, parser):
if args.useIniFilename:
argument_list = args.useIniFilename.readlines()
argument_list = [x.strip() for x in argument_list]
args = parser.parse_args(argument_list)
return args
def is_numeric_type(x):
return type(x) == int or type(x) == long or type(x) == float
def split_up_comma_delimited_lists(args):
for x in vars(args):
attr = getattr(args, x)
if attr == None:
setattr(args, x, [None])
elif is_numeric_type(attr):
setattr(args, x, [attr])
elif type(attr) == str:
setattr(args, x, attr.split(','))
return args
class Range:
def __init__(self, ranges, defaultStep='+1'):
# we might be passed in a single value or a list of strings
# if we receive a single value, we want to feed it right back
if type(ranges) != list:
self.expanded = ranges
elif ranges[0] == None:
self.expanded = [None]
else:
self.expanded = []
for thisRange in ranges:
thisRange = str(thisRange)
if re.search('^\+\d+$', thisRange):
self.expanded = self.expanded + [thisRange]
elif thisRange == 'max':
self.expanded = self.expanded + ['max']
else:
#elif thisRange != 'max':
if thisRange.count(':'):
self._stepAmount = thisRange.split(':')[1]
else:
self._stepAmount = defaultStep
thisRange = thisRange.split(':')[0]
if self._stepAmount.count('x'):
self._stepper = '_mult'
else:
self._stepper = '_add'
self._stepAmount = self._stepAmount.lstrip('+x')
self._stepAmount = int(self._stepAmount)
if thisRange.count('-'):
self.begin = int(thisRange.split('-')[0])
self.end = int(thisRange.split('-')[1])
else:
self.begin = int(thisRange.split('-')[0])
self.end = int(thisRange.split('-')[0])
self.current = self.begin
if self.begin == 0 and self._stepper == '_mult':
self.expanded = self.expanded + [0]
else:
while self.current <= self.end:
self.expanded = self.expanded + [self.current]
self._step()
# now we want to uniquify and sort the expanded range
self.expanded = list(set(self.expanded))
self.expanded.sort()
# advance current value to next
def _step(self):
getattr(self, self._stepper)()
def _mult(self):
self.current = self.current * self._stepAmount
def _add(self):
self.current = self.current + self._stepAmount
def expand_range(a_range):
return Range(a_range).expanded
def decode_parameter_problemsize(problemsize):
if not problemsize.count(None):
i = 0
while i < len(problemsize):
problemsize[i] = problemsize[i].split(':')
j = 0
while j < len(problemsize[i]):
problemsize[i][j] = problemsize[i][j].split('x')
j = j+1
i = i+1
return problemsize
def gemm_table_header():
return 'm,n,k,lda,ldb,ldc,alpha,beta,order,transa,transb,function,device,library,label,GFLOPS'
class GemmTestCombination:
def __init__(self,
sizem, sizen, sizek, lda, ldb, ldc,
alpha, beta, order, transa, transb,
function, device, library, label):
self.sizem = str(sizem)
self.sizen = str(sizen)
self.sizek = str(sizek)
self.lda = str(lda)
self.ldb = str(ldb)
self.ldc = str(ldc)
self.alpha = str(alpha)
self.beta = str(beta)
self.order = order
self.transa = transa
self.transb = transb
self.function = function
self.device = device
self.library = library
self.label = label
def __str__(self):
return self.sizem + 'x' + self.sizen + 'x' + self.sizek + ':' + self.lda + 'x' + self.ldb + 'x' + self.ldc + ', ' + self.device + ', ' + self.function + ', ' + self.library + ', alpha(' + self.alpha + '), beta(' + self.beta + '), order(' + self.order + '), transa(' + self.transa + '), transb(' + self.transb + ') -- ' + self.label
class GemmGraphPoint:
def __init__(self,
sizem, sizen, sizek,
lda, ldb, ldc,
device, order, transa, transb,
function, library, label,
gflops):
self.sizem = sizem
self.sizen = sizen
self.sizek = sizek
self.lda = lda
self.ldb = ldb
self.ldc = ldc
self.device = device
self.order = order
self.transa = transa
self.transb = transb
self.function = function
self.library = library
self.label = label
self.gflops = gflops
def __str__(self):
# ALL members must be represented here (x, y, z, batch, device, label, etc)
return self.sizem + 'x' + self.sizen + 'x' + self.sizek + ':' + self.device + ', ' + self.function + ', ' + self.library + ', order(' + self.order + '), transa(' + self.transa + '), transb(' + self.transb + ') -- ' + self.label + '; ' + self.gflops + ' gflops'
def open_file( filename ):
if type(filename) == list:
filename = filename[0]
if filename == None:
filename = 'results' + datetime.now().isoformat().replace(':','.') + '.txt'
else:
if os.path.isfile(filename):
oldname = filename
filename = filename + datetime.now().isoformat().replace(':','.')
message = 'A file with the name ' + oldname + ' already exists. Changing filename to ' + filename
print message
return open(filename, 'w')
|
[
"fang.he@amd.com"
] |
fang.he@amd.com
|
15d215b500c6d26dbd37bfda3a9d73e8979c26aa
|
01af3f8a79453482febefe64d356a616abc08c1e
|
/backend/config/settings/production/third_party.py
|
c58e3e64fb67c81f95dfd14e878c6d18778211f4
|
[] |
no_license
|
by-Exist/django-skeleton
|
0ea3dbc815cb8da8417ef0f64e304715b8e5b5dd
|
4848dd1074533b368015cdde943719114d001bcc
|
refs/heads/master
| 2023-06-12T12:52:09.216952
| 2021-07-12T08:48:09
| 2021-07-12T08:48:09
| 372,245,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,638
|
py
|
from .django import *
# Django Storage
# =============================================================================
STATICFILES_STORAGE = "config.storages.StaticStorage"
DEFAULT_FILE_STORAGE = "config.storages.MediaStorage"
AWS_S3_REGION_NAME = "ewr1" # TODO: region, endpoint url도 environment로 관리해야 하지 않을까?
AWS_S3_ENDPOINT_URL = f"https://{AWS_S3_REGION_NAME}.vultrobjects.com/"
AWS_ACCESS_KEY_ID = env.str("DJANGO_STORAGE_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = env.str("DJANGO_STORAGE_SECRET_ACCESS_KEY")
# Django REST Framework
# =============================================================================
# https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK[
"DEFAULT_VERSIONING_CLASS"
] = "rest_framework.versioning.URLPathVersioning"
# DRF Spectacular
# =============================================================================
# https://drf-spectacular.readthedocs.io/en/latest/settings.html
SPECTACULAR_SETTINGS["TITLE"] = "Backend Production API"
SPECTACULAR_SETTINGS["DESCRIPTION"] = "Backend Production api description..."
SPECTACULAR_SETTINGS["VERSION"] = "0.0.1"
# https://swagger.io/docs/open-source-tools/swagger-ui/usage/configuration/
SPECTACULAR_SETTINGS["SWAGGER_UI_SETTINGS"]["supportedSubmitMethods"] = []
# Django Cachalot
# =============================================================================
# https://django-cachalot.readthedocs.io/en/latest/quickstart.html#settings
INSTALLED_APPS += ["cachalot"]
CACHES["cachalot"] = env.cache("DJANGO_CACHALOT_CACHE_URL")
CACHALOT_CACHE = "cachalot"
CACHALOT_UNCACHABLE_TABLES = ["django_migrations"]
|
[
"bolk9652@naver.com"
] |
bolk9652@naver.com
|
bf8a6a3bbd710bdaa7611c6890907a61a0e9cce7
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_010/ch136_2020_04_01_12_09_01_220465.py
|
566b2a82eb758b3344edaf9b17037a14dee59e8d
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,641
|
py
|
import random
dinheiro=10
dicas=True
jogo=True
chutes=True
dado1=random.randint (1,6)
dado2=random.randint (1,6)
dado3=random.randint (1,6)
soma=dado1+dado2+dado3
while dicas:
print ("Fase de dicas")
print ("Você tem {} dinheiros.".format (dinheiro))
if dinheiro==0:
dicas=False
chutes=False
print ("Você perdeu o jogo!")
else:
pergunta=str(input("Você quer uma dica?"))
if pergunta=="sim":
dinheiro=dinheiro-1
dica1=int(input("Digite o primeiro número: "))
dica2=int(input("Digite o segundo número: "))
dica3=int(input("Digite o terceiro número: "))
if dica1==soma or dica2==soma or dica3==soma:
print ("Está entre os três")
else:
print ("Não está entre os três")
elif pergunta=="não":
dicas=False
while chutes:
print ("Fase de chutes")
print ("Você tem {} dinheiros.".format (dinheiro))
if dinheiro==0:
print ("Você perdeu o jogo!")
chutes=False
else:
chute=int(input("Chute um número: "))
if chute==soma:
dinheiro=dinheiro + 5*dinheiro
print ("Você acertou!")
chutes=False
print ("Você ganhou o jogo com {} dinheiros.".format (dinheiro))
else:
print ("Você errou!")
dinheiro=dinheiro-1
if dinheiro==0:
print ("Você perdeu!")
chutes=False
|
[
"you@example.com"
] |
you@example.com
|
ca2e11ed3a29496a59aceae5171f893f340a43d0
|
c0caed81b5b3e1498cbca4c1627513c456908e38
|
/src/python/bindings/app/pyrosetta_toolkit/pyrosetta_toolkit.py
|
b9934a7edda6e3f28079e9c1f622ad02ca1c8a1e
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
malaifa/source
|
5b34ac0a4e7777265b291fc824da8837ecc3ee84
|
fc0af245885de0fb82e0a1144422796a6674aeae
|
refs/heads/master
| 2021-01-19T22:10:22.942155
| 2017-04-19T14:13:07
| 2017-04-19T14:13:07
| 88,761,668
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,387
|
py
|
#!/usr/bin/env python
# (c) Copyright Rosetta Commons Member Institutions.
# (c) This file is part of the Rosetta software suite and is made available under license.
# (c) The Rosetta software is developed by the contributing members of the Rosetta Commons.
# (c) For more information, see http://www.rosettacommons.org. Questions about this can be
# (c) addressed to University of Washington UW TechTransfer, email: license@u.washington.edu.
## @file /GUIs/pyrosetta_toolkit/pyrosetta_toolkit.py
## @brief Main window for the toolkit.
## @author Jared Adolf-Bryfogle (jadolfbr@gmail.com)
#Rosetta Imports
from rosetta import *
import functools
#Python Imports
from os import listdir
from os import getcwd
from shutil import copy
from os import remove
from os import environ
from os import path
from os import system
import glob
import signal
import sys
#Append Python Path
#p = os.path.split(os.path.abspath(__file__))[0]
#p2 = p.split("/"); p2.pop()
#sys.path.append("/".join(p2)); #Allows all Window_Modules to use Modules and the use of python GUIs from main GUI directory
#Tk Imports
from Tkinter import *
from Tkinter import Frame as FrameTk
import tkFileDialog
import tkMessageBox
import tkSimpleDialog
import tkFont
#Toolkit Imports
from app.pyrosetta_toolkit.modules import tools
from app.pyrosetta_toolkit.window_main.menu import *
from app.pyrosetta_toolkit.window_main import global_variables
from app.pyrosetta_toolkit.window_main.frames.InputFrame import InputFrame
from app.pyrosetta_toolkit.window_main.frames.OutputFrame import OutputFrame
from app.pyrosetta_toolkit.window_main.frames.QuickProtocolsFrame import QuickProtocolsFrame
from app.pyrosetta_toolkit.window_main.frames.SimpleAnalysisFrame import SimpleAnalysisFrame
from app.pyrosetta_toolkit.window_main.IO.GUIInput import GUIInput
from app.pyrosetta_toolkit.window_main.IO.GUIOutput import GUIOutput
from app.pyrosetta_toolkit.window_modules.pymol_integration.PyMOL import AdvancedPyMOL
from app.pyrosetta_toolkit.window_modules.scorefunction.ScoreFxnControl import ScoreFxnControl
from app.pyrosetta_toolkit.modules.Region import Region
class main_window:
def __init__(self):
"""
Initializes the main window.
Sets common global variables.
"""
self._tk_ = Tk()
self.pose = Pose()
self.native_pose = Pose()
self.current_directory = global_variables.current_directory = getcwd()
self.toolkit_home = self.location()[0]
self.DesignDic = dict()
### Init ###
self._initialize_GUI()
self._initialize_Frames()
### TextBox ###
self.textbox_frame = Frame(self._tk_, bd=3, relief=GROOVE)
outfont = tkFont.Font(family="Helvetica", size=11)
self.output_textbox= Text(self.textbox_frame,wrap="word", height=8,width=113,font = outfont)
self.output_scrollbar = Scrollbar(self.textbox_frame)
self.output_textbox.configure(yscrollcommand = self.output_scrollbar.set)
self.output_scrollbar.configure(command = self.output_textbox.yview)
#self.old_stdout = sys.stdout
#self.output_class.terminal_output.trace_variable('w', self.output_tracer)
#self.output_class.terminal_output.set(0)
self.input_class.options_manager.print_current_options()
print "\nRegion Selection Tips: No regions added = Full structure selected. \nAdding Regions: For N-terminus omit start; For C-terminus omit end; For whole Chain omit start + end"
print "For additional protocol options, please use the Option System Manager.\n"
print "Please see RosettaCommons for full documentation and references for all protocols and tools utilized in the GUI\n"
def quit(self):
self._tk_.destroy()
def _initialize_GUI(self):
"""
Creates object for the GUI
"""
#self.options_class = OptionSystemManager(global_variables.current_directory)Relocated to input_class
self.input_class = GUIInput(self)
self.output_class = GUIOutput(self)
####Sequence#####
self.residue_string = StringVar()
self.input_class.region_sequence.trace_variable('w', self.clear_num_string_on_new_input)
self.sequence_output = Entry(self._tk_, textvariable = self.input_class.region_sequence)
#self.sequence_output.bind('<FocusIn>', self.print_numbering)
self.sequence_output.bind('<ButtonRelease-1>', self.print_numbering)
self.sequence_output.bind('<KeyRelease>', self.print_numbering)
self.seq_scroll = Scrollbar(self._tk_, orient=HORIZONTAL, command=self.__scrollHandler)
self.num_label = Label(self._tk_, textvariable = self.residue_string, justify=CENTER)
####Sequence#####
self.score_class = ScoreFxnControl(); #Main Score Function Object. Holds Score. Controls switching scorefunctions, etc.
self.pymol_class = AdvancedPyMOL(self.pose); #PyMOL Object for advanced visualization.
def clear_num_string_on_new_input(self, name, index, mode):
self.residue_string.set("")
self.input_class.set_residue_of_interest("", "", "")
def print_numbering(self, event):
if not self.pose.total_residue():return
#print self.sequence_output.index(INSERT)
rosetta_num=0
pdb_num=""
if self.pose.total_residue()==len(self.input_class.region_sequence.get()):
rosetta_num = 1+self.sequence_output.index(INSERT)
try:
pdb_num = self.pose.pdb_info().pose2pdb(rosetta_num)
except PyRosettaException:
#Catches the the LAST index
return
#print self.num_string
else:
region = self.input_frame.return_region_from_entry()
rosetta_num = region.get_rosetta_start(self.pose)+self.sequence_output.index(INSERT)
try:
pdb_num = self.pose.pdb_info().pose2pdb(rosetta_num)
except PyRosettaException:
return
pdbSP = pdb_num.split()
self.input_class.set_residue_of_interest(pdbSP[0], pdbSP[1], repr(rosetta_num))
self.input_class.residue_string.set(pdb_num+' - '+repr(rosetta_num))
self.residue_string.set(pdb_num+' - '+repr(rosetta_num))
self.input_class.residue_rosetta_resnum.set(repr(rosetta_num))
if self.pymol_class.auto_send_residue_colors.get():
self.pymol_class.color_residue(int(rosetta_num))
#self.fullcontrol_class.shoInfo(pdbSP[0], pdbSP[1])
def __scrollHandler(self, *L):
"""
Handles scrolling of entry.
CODE: http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/entry-scrolling.html
"""
try:
op, howMany = L[0], L[1]
except IndexError:
return
if op =='scroll':
units = L[2]
self.sequence_output.xview_scroll(howMany, units)
elif op=='moveto':
self.sequence_output.xview_moveto(howMany)
def _initialize_Frames(self):
"""
Creates the Frame Objects that will go in the main window
"""
self.input_frame = InputFrame(self._tk_, self, self.input_class, bd=1, relief=SUNKEN)
self.output_frame = OutputFrame(self._tk_, self, self.output_class, bd=1, relief = SUNKEN)
self.protocol_frame = QuickProtocolsFrame(self._tk_, self, self.output_class, bd=1, relief=SUNKEN)
self.simple_analysis_frame = SimpleAnalysisFrame(self._tk_, self, bd=2, relief=SUNKEN)
self.menu_class = Menus(self._tk_, self)
def show_gui(self):
"""
Shows each piece of the main GUI.
Does not do anything with the Window Modules, just each individual main component of the main window.
These Inhereit from the Frame class. See one of these for an example.
Window Modules should be initialized through the Menus class in /window_main/menu.py
"""
#6x4 Grid Pain in the ass. At some point, everything will move to Qt - Either in Python or C++
#Grid:
self.menu_class.setTk(); self.menu_class.shoTk()
self.input_frame.grid(row=1, column=0, rowspan=7, padx=15, pady=15);
self.output_frame.grid(row=0, column=1, rowspan=2, pady=3);
self.protocol_frame.grid(row=3, column=1, rowspan=4, padx=5)
self.simple_analysis_frame.grid(row=0, column=0, padx=5, pady=5)
### Text Output ###
self.num_label.grid(column=0, row=8, columnspan=2, pady=2, padx=2)
self.seq_scroll.grid(column=0, row=9, columnspan=3, sticky=W+E)
self.sequence_output.grid(column=0, row=10, columnspan=3, sticky=W+E)
self.sequence_output['xscrollcommand']=self.seq_scroll.set
self.output_textbox.grid(column=0, row = 11, rowspan=2, columnspan=3,sticky=W+E)
self.output_scrollbar.grid(column=3, row=11, rowspan=2, sticky=E+N+S)
self.textbox_frame.grid(column=0, row=11, rowspan=2, columnspan=3, sticky=W+E, pady=3, padx=6)
#self.Photo.grid(row = 0, column = 2, rowspan=4)
"""
#Pack:
self.menu_class.setTk(); self.menu_class.shoTk()
self.input_class.pack(side=LEFT, padx=3, pady=3)
self.output_class.pack(padx=3, pady=3)
self.simple_analysis_frame.pack(padx=3, pady=3)
self.protocol_frame.pack(padx=3, pady=3)
#self.output_textbox.pack(side=BOTTOM, padx=3, pady=3)
#self.output_frame.pack(side=BOTTOM, padx=3, pady=3)
"""
def run(self, run_event_loop=True):
self._tk_.title("PyRosetta Toolkit")
self.show_gui()
self._tk_.grid_columnconfigure(ALL, weight=1)
#self._tk_.grid_rowconfigure(ALL, weight=1)
if run_event_loop:
self._tk_.mainloop()
def redirect_stdout_to_textbox(self):
print "Redirect stdout to textbox"
sys.stdout = self; #Set stdout to be redirected to textbox using the write function override.
def redirect_stdout_to_default(self):
print "Redirect stdout to default"
sys.stdout = self.old_stdout
def write(self, text):
self.output_textbox.insert(END, text)
self.output_textbox.yview(END)
def output_tracer(self, name, index, mode):
"""
Controls where stdout goes. Textbox or Terminal.
Does not override Tracer for now.
"""
varvalue = self.output_class.terminal_output.get()
if (varvalue):
self.redirect_stdout_to_default()
else:
self.redirect_stdout_to_textbox()
def location(self):
"""
Allows the script to be self-aware of it's path.
So that it can be imported/ran from anywhere.
"""
p = os.path.abspath(__file__)
pathSP = os.path.split(p)
return pathSP
class MainTracer(rosetta.basic.PyTracer):
def __init__(self, textbox):
rosetta.basic.PyTracer.__init__(self)
self.textbox = textbox
def output_callback(self, s):
pass
#s = " "+s
#self.textbox.insert(END, s)
#print s
if __name__ == '__main__':
rosetta.init()
main_window_class = main_window()
#main_window_class.TR = MainTracer(main_window_class.output_textbox)
#rosetta.basic.Tracer.set_ios_hook(main_window_class.TR, rosetta.basic.Tracer.get_all_channels_string(), False)
#rosetta.init(extra_options="-mute all")
main_window_class.run()
|
[
"malaifa@yahoo.com"
] |
malaifa@yahoo.com
|
bb237b5e5a2ead7f2dad38a09c1b76d15c7f747b
|
fcb8d0d3b2066d6b0c6d43b76da6facbce805dd2
|
/tkinter project/index.py
|
58939779801d48ce141a01b2ab682b1518473858
|
[] |
no_license
|
urvisuthar85/python_prectice_projects
|
1722655a3ce5b4e5c1c2b706f583a252eeedf8ec
|
004bebfb79925a1d358c204a28a180d7227787c4
|
refs/heads/master
| 2022-12-23T18:36:48.927849
| 2020-10-05T10:26:33
| 2020-10-05T10:26:33
| 301,370,219
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,345
|
py
|
from tkinter import *
window = Tk()
l1 = Label(window,text="Title")
l1.grid(row=0,column=0)
l2 = Label(window,text="Author")
l2.grid(row=0,column=2)
l3 = Label(window,text="Year")
l3.grid(row=1,column=0)
l4 = Label(window,text="ISON")
l4.grid(row=1,column=2)
titel_text = StringVar()
el = Entry(window,textvariable = titel_text)
el.grid(row=0,column=1)
auther_text = StringVar()
el = Entry(window,textvariable = auther_text)
el.grid(row=0,column=3)
year_text = StringVar()
el = Entry(window,textvariable = year_text)
el.grid(row=1,column=1)
isbn_text = StringVar()
el = Entry(window,textvariable = isbn_text)
el.grid(row=1,column=3)
list1 = Listbox(window,height = 6,width=35)
list1.grid(row=2,column = 0,rowspan=6,columnspan=2)
sb1 = Scrollbar(window)
sb1.grid(row=3,column=2)
list1.configure(yscrollcommand=sb1.set)
sb1.configure(command=list1.yview)
b1 = Button(window,text = "viewall",width = 12)
b1.grid(row=2,column=3)
b2 = Button(window,text = "Search entry",width = 12)
b2.grid(row=3,column=3)
b3 = Button(window,text = "add entry",width = 12)
b3.grid(row=4,column=3)
b4 = Button(window,text = "update selected",width = 12)
b4.grid(row=5,column=3)
b5 = Button(window,text = "delete selected",width = 12)
b5.grid(row=6,column=3)
b6 = Button(window,text = "close",width = 12)
b6.grid(row=7,column=3)
window.mainloop()
|
[
"urvashicitrusbug@gmail.com"
] |
urvashicitrusbug@gmail.com
|
69024abc125c1c0fbb26411947e1976dc81fb6e6
|
1f41b828fb652795482cdeaac1a877e2f19c252a
|
/maya_menus/_MAINMENU_PMC_Rigging/05.Put Object-(RP[N])/03.Put Controller/18.sidePin.py
|
e9ed02aaea8fe1c13e224651d6f47fb6657f251a
|
[] |
no_license
|
jonntd/mayadev-1
|
e315efe582ea433dcf18d7f1e900920f5590b293
|
f76aeecb592df766d05a4e10fa2c2496f0310ca4
|
refs/heads/master
| 2021-05-02T07:16:17.941007
| 2018-02-05T03:55:12
| 2018-02-05T03:55:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
from sgMaya import sgModel, sgCmds
sels = cmds.ls( sl=1 )
if not sels: sels = [None]
for sel in sels:
sgCmds.putControllerToGeo( sel, sgModel.Controller.sidePinPoints )
|
[
"kimsung9k@naver.com"
] |
kimsung9k@naver.com
|
c27f0382061c7b128225a49078586dffc8011c53
|
fcf993336ce067f1d3f05205cb545a861d1e51cb
|
/product_microservice/manage.py
|
86c8f0cbbef358c6401940ad200611beac517e64
|
[
"MIT"
] |
permissive
|
fga-eps-mds/2018.2-FGAPP-produto
|
a2c5621cdf37c7baf04e30a9da54184966331034
|
5324c611c041269f035d27b2cf4e26c2f4b723e2
|
refs/heads/master
| 2020-03-27T21:35:34.440086
| 2018-11-05T22:41:42
| 2018-11-05T22:41:42
| 147,159,571
| 2
| 0
|
MIT
| 2018-11-10T01:19:38
| 2018-09-03T06:12:17
|
Python
|
UTF-8
|
Python
| false
| false
| 564
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "product_microservice.settings.development")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"lucascostaa73@gmail.com"
] |
lucascostaa73@gmail.com
|
d8bfc4ff4a4f91f23273d7deb2fa1347aa3f495e
|
287c96d3d7b68f2faace8716ff8cf1c33662d49c
|
/New folder/test.py
|
4e79c518b0d36b215b642bbabdc380e2bfe052a2
|
[] |
no_license
|
sovan91/Python-Code
|
53efd576cd3bad8498f9983d4ca6ce434b5ed6fc
|
1a68f83c52deddbc2eff47745f58a23c1f52a471
|
refs/heads/master
| 2020-04-13T04:28:25.202439
| 2019-02-13T14:33:44
| 2019-02-13T14:33:44
| 162,962,687
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
import logging
logging.basicConfig(filename='log.txt',level=logging.INFO)
logging.info("A new request come")
try:
x=int(input("Enter first input number:"))
y=int(input("Enter second input number:"))
print(x/y)
except ZeroDivisionError as msg:
print("We cannot divide with zero")
logging.exception(msg)
except ValueError as msg:
print("Only int value will be accepted")
logging.exception(msg)
logging.info("Request processing completed")
|
[
"noreply@github.com"
] |
sovan91.noreply@github.com
|
be33d28852484275819ace98b621bc01decf9381
|
985a874c832d7632e287f2185b18aaf2e1b42018
|
/dtcwt_gainlayer/layers/nonlinear.py
|
f3f2b274c311342e0a0b16400783156a896a9a06
|
[
"MIT"
] |
permissive
|
fbcotter/dtcwt_gainlayer
|
e2ea03ccfe8ad4f903b59846c1c902391c66b227
|
32ec3e21066edc2a0d5edefaf70f43d031d1b4ac
|
refs/heads/master
| 2023-03-28T13:08:37.919222
| 2019-08-20T09:05:46
| 2019-08-20T09:05:46
| 157,608,716
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,276
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as func
from dtcwt_gainlayer.layers.shrink import SparsifyWaveCoeffs_std, mag, SoftShrink
class PassThrough(nn.Module):
def forward(self, x):
return x
class WaveNonLinearity(nn.Module):
""" Performs a wavelet-based nonlinearity.
Args:
C (int): Number of input channels. Some of the nonlinearities have batch
norm, so need to know this.
lp (str): Nonlinearity to use for the lowpass coefficients
bp (list(str)): Nonlinearity to use for the bandpass coefficients.
lp_q (float): Quantile value for sparsity threshold for lowpass.
1 keeps all coefficients and 0 keeps none. Only valid if lp is
'softshrink_std' or 'hardshrink_std'. See
:class:`SparsifyWaveCoeffs_std`.
bp_q (float): Quantile value for sparsity threshold for bandpass
coefficients. Only valid if bp is 'softshrink_std' or
'hardshrink_std'.
The options for the lowpass are:
- none
- relu (as you'd expect)
- relu2 - applies batch norm + relu
- softshrink - applies soft shrinkage with a learnable threshold
- hardshrink_std - applies hard shrinkage. The 'std' implies that it
tracks the standard deviation of the activations, and sets a threshold
attempting to reach a desired sparsity level. This assumes that the
lowpass coefficients follow a laplacian distribution. See
:class:`dtcwt_gainlayer.layers.shrink.SparsifyWaveCoeffs_std`.
- softshrink_std - same as hardshrink std except uses soft shrinkage.
The options for the bandpass are:
- none
- relu (applied indepently to the real and imaginary components)
- relu2 - applies batch norm + relu to the magnitude of the bandpass
coefficients
- softshrink - applies shoft shrinkage to the magnitude of the bp
coefficietns with a learnable threshold
- hardshrink_std - applies hard shrinkage by tracking the standard
deviation. Assumes the bp distributions follow an exponential
distribution. See
:class:`dtcwt_gainlayer.layers.shrink.SparsifyWaveCoeffs_std`.
- softshrink_std - same as hardshrink_std but with soft shrinkage.
"""
def __init__(self, C, lp=None, bp=(None,), lp_q=0.8, bp_q=0.8):
super().__init__()
if lp is None or lp == 'none':
self.lp = PassThrough()
elif lp == 'relu':
self.lp = nn.ReLU()
elif lp == 'relu2':
self.lp = BNReLUWaveCoeffs(C, bp=False)
elif lp == 'softshrink':
self.lp = SoftShrink(C, complex=False)
elif lp == 'hardshrink_std':
self.lp = SparsifyWaveCoeffs_std(C, lp_q, bp=False, soft=False)
elif lp == 'softshrink_std':
self.lp = SparsifyWaveCoeffs_std(C, lp_q, bp=False, soft=True)
else:
raise ValueError("Unkown nonlinearity {}".format(lp))
fs = []
for b in bp:
if b is None or b == 'none':
f = PassThrough()
elif b == 'relu':
f = nn.ReLU()
elif b == 'relu2':
f = BNReLUWaveCoeffs(C, bp=True)
elif b == 'softshrink':
f = SoftShrink(C, complex=True)
elif b == 'hardshrink_std':
f = SparsifyWaveCoeffs_std(C, bp_q, bp=True, soft=False)
elif b == 'softshrink_std':
f = SparsifyWaveCoeffs_std(C, bp_q, bp=True, soft=True)
else:
raise ValueError("Unkown nonlinearity {}".format(lp))
fs.append(f)
self.bp = nn.ModuleList(fs)
def forward(self, x):
""" Applies the selected lowpass and bandpass nonlinearities to the
input x.
Args:
x (tuple): tuple of (lowpass, bandpasses)
Returns:
y (tuple): tuple of (lowpass, bandpasses)
"""
yl, yh = x
yl = self.lp(yl)
yh = [bp(y) if y.shape != torch.Size([0]) else y
for bp, y in zip(self.bp, yh)]
return (yl, yh)
class BNReLUWaveCoeffs(nn.Module):
""" Applies batch normalization followed by a relu
Args:
C (int): number of channels
bp (bool): If true, applies bn+relu to the magnitude of the bandpass
coefficients. If false, is applying bn+relu to the lowpass coeffs.
"""
def __init__(self, C, bp=True):
super().__init__()
self.bp = bp
if bp:
self.BN = nn.BatchNorm2d(6*C)
else:
self.BN = nn.BatchNorm2d(C)
self.ReLU = nn.ReLU()
def forward(self, x):
""" Applies nonlinearity to the input x """
if self.bp:
s = x.shape
# Move the orientation dimension to the channel
x = x.view(s[0], s[1]*s[2], s[3], s[4], s[5])
θ = torch.atan2(x.data[..., 1], x.data[..., 0])
r = mag(x, complex=True)
r_new = self.ReLU(self.BN(r))
y = torch.stack((r_new * torch.cos(θ), r_new * torch.sin(θ)), dim=-1)
# Reshape to a 6D tensor again
y = y.view(s[0], s[1], s[2], s[3], s[4], s[5])
else:
y = self.ReLU(self.BN(x))
return y
|
[
"fbcotter90@gmail.com"
] |
fbcotter90@gmail.com
|
f469053b33cfb03ae14b21cac402d0845b46abe2
|
0b838e11b59687db8c9265ba2de61aeea8159d5e
|
/flocker/node/agents/functional/test_cinder.py
|
abbc0a417f8a936075f94b3abaa48f9dd1c9a99f
|
[
"Apache-2.0"
] |
permissive
|
Waynelemars/flocker
|
542f46ae841df65825387aa4be30b0a5782e5476
|
10dc58892d826d4d7310f657675ebf2f8f66b3ed
|
refs/heads/master
| 2020-12-25T22:37:28.364739
| 2015-05-02T10:42:14
| 2015-05-02T10:42:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,356
|
py
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Functional tests for ``flocker.node.agents.cinder`` using a real OpenStack
cluster.
Ideally, there'd be some in-memory tests too. Some ideas:
* Maybe start a `mimic` server and use it to at test just the authentication
step.
* Mimic doesn't currently fake the cinder APIs but perhaps we could contribute
that feature.
See https://github.com/rackerlabs/mimic/issues/218
"""
from uuid import uuid4
from bitmath import Byte
from twisted.trial.unittest import SynchronousTestCase
from ....testtools import skip_except
from ..cinder import cinder_api, wait_for_volume
from ..test.test_blockdevice import REALISTIC_BLOCKDEVICE_SIZE
from ..testtools import tidy_cinder_client_for_test
# make_iblockdeviceapi_tests should really be in flocker.node.agents.testtools,
# but I want to keep the branch size down
from ..test.test_blockdevice import make_iblockdeviceapi_tests
def cinderblockdeviceapi_for_test(test_case, cluster_id):
"""
Create a ``CinderBlockDeviceAPI`` instance for use in tests.
:param TestCase test_case: The test being run.
:param UUID cluster_id: The Flocker cluster ID for Cinder volumes.
:returns: A ``CinderBlockDeviceAPI`` instance whose underlying
``cinderclient.v1.client.Client`` has a ``volumes`` attribute wrapped
by ``TidyCinderVolumeManager`` to cleanup any lingering volumes that
are created during the course of ``test_case``
"""
return cinder_api(
cinder_client=tidy_cinder_client_for_test(test_case),
cluster_id=cluster_id,
)
# ``CinderBlockDeviceAPI`` only implements the ``create`` and ``list`` parts of
# ``IBlockDeviceAPI``. Skip the rest of the tests for now.
@skip_except(
supported_tests=[
'test_interface',
'test_created_is_listed',
'test_created_volume_attributes',
'test_list_volume_empty',
'test_listed_volume_attributes',
]
)
class CinderBlockDeviceAPIInterfaceTests(
make_iblockdeviceapi_tests(
blockdevice_api_factory=(
lambda test_case: cinderblockdeviceapi_for_test(
test_case=test_case,
cluster_id=uuid4()
)
)
)
):
"""
Interface adherence Tests for ``CinderBlockDeviceAPI``.
Block devices that are created in these tests will be cleaned up by
``TidyCinderVolumeManager``.
"""
class CinderBlockDeviceAPIImplementationTests(SynchronousTestCase):
"""
Implementation specific tests for ``CinderBlockDeviceAPI``.
Block devices that are created in these tests will be cleaned up by
``TidyCinderVolumeManager``.
"""
def test_foreign_volume(self):
"""
Non-Flocker Volumes are not listed.
"""
cinder_client = tidy_cinder_client_for_test(test_case=self)
requested_volume = cinder_client.volumes.create(
size=Byte(REALISTIC_BLOCKDEVICE_SIZE).to_GB().value
)
wait_for_volume(
volume_manager=cinder_client.volumes,
expected_volume=requested_volume
)
block_device_api = cinderblockdeviceapi_for_test(
test_case=self,
cluster_id=uuid4(),
)
flocker_volume = block_device_api.create_volume(
dataset_id=uuid4(),
size=REALISTIC_BLOCKDEVICE_SIZE,
)
self.assertEqual([flocker_volume], block_device_api.list_volumes())
def test_foreign_cluster_volume(self):
"""
Volumes from other Flocker clusters are not listed.
"""
block_device_api1 = cinderblockdeviceapi_for_test(
test_case=self,
cluster_id=uuid4(),
)
flocker_volume1 = block_device_api1.create_volume(
dataset_id=uuid4(),
size=REALISTIC_BLOCKDEVICE_SIZE,
)
block_device_api2 = cinderblockdeviceapi_for_test(
test_case=self,
cluster_id=uuid4(),
)
flocker_volume2 = block_device_api2.create_volume(
dataset_id=uuid4(),
size=REALISTIC_BLOCKDEVICE_SIZE,
)
self.assertEqual(
([flocker_volume1], [flocker_volume2]),
(block_device_api1.list_volumes(),
block_device_api2.list_volumes())
)
|
[
"richard.wall@clusterhq.com"
] |
richard.wall@clusterhq.com
|
6050153dab9027461663ffdbfe0554f1de1b8bb9
|
85b3dff4a55bc4ac4060fac02db4ce71503ce584
|
/test/espnet2/asr/encoder/test_branchformer_encoder.py
|
69bbc4e0e141def910ddc41dd68f12ed2af51963
|
[
"Apache-2.0"
] |
permissive
|
yuekaizhang/espnet
|
4702ec68d219304e64f9956b3c22ec6fde775479
|
e12accce0cd040849bfe5b08a38d88b750474ac9
|
refs/heads/master
| 2022-09-19T16:05:57.606626
| 2022-09-09T07:18:30
| 2022-09-09T07:18:30
| 237,085,083
| 1
| 0
|
Apache-2.0
| 2020-03-24T19:12:40
| 2020-01-29T21:22:36
| null |
UTF-8
|
Python
| false
| false
| 5,032
|
py
|
import pytest
import torch
from espnet2.asr.encoder.branchformer_encoder import BranchformerEncoder
@pytest.mark.parametrize(
"input_layer", ["linear", "conv2d", "conv2d2", "conv2d6", "conv2d8", "embed"]
)
@pytest.mark.parametrize("use_linear_after_conv", [True, False])
@pytest.mark.parametrize(
"rel_pos_type, pos_enc_layer_type, attention_layer_type",
[
("legacy", "abs_pos", "selfattn"),
("latest", "rel_pos", "rel_selfattn"),
("legacy", "rel_pos", "rel_selfattn"),
("legacy", "legacy_rel_pos", "legacy_rel_selfattn"),
("legacy", "abs_pos", "fast_selfattn"),
],
)
@pytest.mark.parametrize(
"merge_method, cgmlp_weight, attn_branch_drop_rate",
[
("concat", 0.5, 0.0),
("learned_ave", 0.5, 0.0),
("learned_ave", 0.5, 0.1),
("learned_ave", 0.5, [0.1, 0.1]),
("fixed_ave", 0.5, 0.0),
("fixed_ave", [0.5, 0.5], 0.0),
("fixed_ave", 0.0, 0.0),
("fixed_ave", 1.0, 0.0),
],
)
@pytest.mark.parametrize("stochastic_depth_rate", [0.0, 0.1, [0.1, 0.1]])
def test_encoder_forward_backward(
input_layer,
use_linear_after_conv,
rel_pos_type,
pos_enc_layer_type,
attention_layer_type,
merge_method,
cgmlp_weight,
attn_branch_drop_rate,
stochastic_depth_rate,
):
encoder = BranchformerEncoder(
20,
output_size=2,
use_attn=True,
attention_heads=2,
attention_layer_type=attention_layer_type,
pos_enc_layer_type=pos_enc_layer_type,
rel_pos_type=rel_pos_type,
use_cgmlp=True,
cgmlp_linear_units=4,
cgmlp_conv_kernel=3,
use_linear_after_conv=use_linear_after_conv,
gate_activation="identity",
merge_method=merge_method,
cgmlp_weight=cgmlp_weight,
attn_branch_drop_rate=attn_branch_drop_rate,
num_blocks=2,
input_layer=input_layer,
stochastic_depth_rate=stochastic_depth_rate,
)
if input_layer == "embed":
x = torch.randint(0, 10, [2, 32])
else:
x = torch.randn(2, 32, 20, requires_grad=True)
x_lens = torch.LongTensor([32, 28])
y, _, _ = encoder(x, x_lens)
y.sum().backward()
def test_encoder_invalid_layer_type():
with pytest.raises(ValueError):
BranchformerEncoder(20, rel_pos_type="dummy")
with pytest.raises(ValueError):
BranchformerEncoder(20, pos_enc_layer_type="dummy")
with pytest.raises(ValueError):
BranchformerEncoder(
20, pos_enc_layer_type="abc_pos", attention_layer_type="dummy"
)
def test_encoder_invalid_rel_pos_combination():
with pytest.raises(AssertionError):
BranchformerEncoder(
20,
rel_pos_type="latest",
pos_enc_layer_type="legacy_rel_pos",
attention_layer_type="legacy_rel_sselfattn",
)
with pytest.raises(AssertionError):
BranchformerEncoder(
20,
pos_enc_layer_type="rel_pos",
attention_layer_type="legacy_rel_sselfattn",
)
with pytest.raises(AssertionError):
BranchformerEncoder(
20,
pos_enc_layer_type="legacy_rel_pos",
attention_layer_type="rel_sselfattn",
)
with pytest.raises(AssertionError):
BranchformerEncoder(
20,
attention_layer_type="fast_selfattn",
pos_enc_layer_type="rel_pos",
)
def test_encoder_output_size():
encoder = BranchformerEncoder(20, output_size=256)
assert encoder.output_size() == 256
def test_encoder_invalid_type():
with pytest.raises(ValueError):
BranchformerEncoder(20, input_layer="fff")
def test_encoder_invalid_cgmlp_weight():
with pytest.raises(AssertionError):
BranchformerEncoder(
20,
merge_method="fixed_ave",
cgmlp_weight=-1.0,
)
with pytest.raises(ValueError):
BranchformerEncoder(
20,
num_blocks=2,
cgmlp_weight=[0.1, 0.1, 0.1],
)
def test_encoder_invalid_merge_method():
with pytest.raises(ValueError):
BranchformerEncoder(
20,
merge_method="dummy",
)
def test_encoder_invalid_two_branches():
with pytest.raises(AssertionError):
BranchformerEncoder(
20,
use_attn=False,
use_cgmlp=False,
)
def test_encoder_invalid_attn_branch_drop_rate():
with pytest.raises(ValueError):
BranchformerEncoder(
20,
num_blocks=2,
attn_branch_drop_rate=[0.1, 0.1, 0.1],
)
def test_encoder_invalid_stochastic_depth_rate():
with pytest.raises(ValueError):
BranchformerEncoder(
20,
num_blocks=2,
stochastic_depth_rate=[0.1],
)
with pytest.raises(ValueError):
BranchformerEncoder(
20,
num_blocks=2,
stochastic_depth_rate=[0.1, 0.1, 0.1],
)
|
[
"pengyf21@gmail.com"
] |
pengyf21@gmail.com
|
bbad0509e9351b873b34fba81193eca72f87c1e2
|
e7cdba44218ea9b7899449162efa0a76ae3d2ab4
|
/api/BackupCommentsController.py
|
158281d1f9efbc7130ef431c8971bd0724522a8b
|
[] |
no_license
|
sachithkk/pyknow-expert-system-api
|
b865cfb9aa97418ba236cb4dc22cde335b9ff27a
|
5d8582aeb33366fde4fe02d91336cce9327dfecd
|
refs/heads/master
| 2022-07-14T10:41:11.958565
| 2019-11-10T16:55:50
| 2019-11-10T16:55:50
| 200,712,030
| 1
| 0
| null | 2022-06-21T23:22:15
| 2019-08-05T18:55:22
|
Python
|
UTF-8
|
Python
| false
| false
| 5,035
|
py
|
from flask_restful import Resource
from flask import jsonify, request
import logging as logger
from app import flaskAppInstance
from flask_pymongo import PyMongo
import re, sys , pymongo, time, requests, os.path, csv
from selenium import webdriver
class CommentsController(Resource):
def post(self):
logger.info("Starting Scrape Comments....")
requestData = request.get_json()
# get relative path
my_path = os.path.abspath(os.path.dirname(__file__))
chrome_path = os.path.join(my_path, "..\chromeDriver\chromedriver.exe")
# without headless mode
driver = webdriver.Chrome(chrome_path)
# run hedless mode
# chrome_options = webdriver.ChromeOptions()
# chrome_options.add_argument("--headless")
# driver = webdriver.Chrome(chrome_path, options=chrome_options)
# # hide browser
# driver.set_window_position(-10000,0)
# get steam site
driver.get("https://www.youtube.com/")
driver.maximize_window()
# get game name by command-line argument
# search_tag = sys.argv[1]
search_tag = requestData["product_name"]
# search the video
search_video = driver.find_element_by_name("search_query")
search_video.send_keys(search_tag)
search_video.submit()
# # click filters
# driver.find_element_by_xpath("""//*[@id="container"]/ytd-toggle-button-renderer""").click()
driver.find_element_by_xpath("""//*[@id="thumbnail"]/yt-img-shadow""").click()
# driver.find_element_by_tag_name("yt-formatted-string").click()
#
# -----------------------------------------------------------------------------------
time.sleep(4)
driver.find_element_by_class_name("ytp-mute-button").click()
driver.find_element_by_class_name("ytp-play-button").click()
comment_section = driver.find_element_by_xpath('//*[@id="comments"]')
driver.execute_script("arguments[0].scrollIntoView();", comment_section)
time.sleep(7)
last_height = driver.execute_script("return document.documentElement.scrollHeight")
while True:
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.documentElement.scrollHeight);")
# Wait to load page
time.sleep(2)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.documentElement.scrollHeight")
if new_height == last_height:
break
last_height = new_height
driver.execute_script("window.scrollTo(0, document.documentElement.scrollHeight);")
# create CSV file
with open('..\data\comments.csv', 'w', encoding='utf-8', newline='') as file:
fieldNames = ['username', 'comment']
theWriter = csv.DictWriter(file, fieldnames=fieldNames)
theWriter.writeheader()
time.sleep(3)
name_elems = driver.find_elements_by_xpath('//*[@id="author-text"]')
comment_elems = driver.find_elements_by_xpath('//*[@id="content-text"]')
num_of_names = len(name_elems)
for i in range(num_of_names):
username = name_elems[i].text # .replace(",", "|")
comment = comment_elems[i].text # .replace(",", "|")
# write to CSV file
theWriter.writerow({'username': username, 'comment': comment})
# Wait analyze comments
time.sleep(2)
# exit the current tab
driver.__exit__()
# Analyze comments
import nltk
import pandas as pd
import warnings
# nltk.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
sid = SentimentIntensityAnalyzer()
warnings.filterwarnings("ignore")
# read the CSV file
df = pd.read_csv('..\data\comments.csv')
df.dropna(inplace=True)
df['scores'] = df['comment'].apply(lambda message: sid.polarity_scores(message))
from nltk.sentiment.vader import SentimentIntensityAnalyzer
sid = SentimentIntensityAnalyzer()
df['score'] = df['comment'].apply(lambda comment: sid.polarity_scores(comment))
df['compound'] = df['scores'].apply(lambda d: d['compound'])
df['comp_score'] = df['compound'].apply(lambda score: 'pos' if score >= 0 else 'neg')
df['compound'] = df['scores'].apply(lambda d: d['compound'])
sum_compound = df['compound'].sum()
count = df['compound'].count()
print(sum_compound)
print(count)
avarage_compound = sum_compound / count;
print("Avarage Compound Value : ", avarage_compound)
if (avarage_compound >= 0):
print("Positive Feedback")
else:
print("Negative Feedback")
return jsonify({"avg_compound_value": avarage_compound})
|
[
"virajlakshitha39@gmail.com"
] |
virajlakshitha39@gmail.com
|
0f644a13f109ca88e181d869c7b44bccfd61d07a
|
b8b7c3c3775e8c741cec2bb7807e07b2528e3b24
|
/BT18CSE148/q3/q3.py
|
9ccc6ba61c8305d3bbc86475553a134957820934
|
[] |
no_license
|
nicxdknight/AWP-Python-
|
e3698c62a1f7e5e35728fc0790280629d226474c
|
f9ebc4c107ad113f48ff7c9b95ec6964537162cd
|
refs/heads/master
| 2022-11-30T02:43:48.976421
| 2020-08-17T14:32:41
| 2020-08-17T14:32:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
#question 3
with open('D:\python\students.txt', 'r') as file1:
with open('D:\python\students2.txt', 'w') as file2:
for line in file1 :
splitLine=line.split('\t')
name=splitLine[0]
split_name = name.split(" ")
f_name = split_name[0].capitalize()
l_name = split_name[len(split_name)-1].capitalize()
name=f_name + " " + l_name
phoneno="301-"+str[2]
file2.write(name+"\t"+str[1]+"\t"+phoneno + "\n")
file1.close()
file2.close()
|
[
"noreply@github.com"
] |
nicxdknight.noreply@github.com
|
f910377dee9ef41f1ec81cd841026451cd070365
|
1fc779d7e8b646c08e79c08aa63c349452094100
|
/issue56/misc/orr/square.py.txt
|
0d41c887cb9f6706361db0802341cfc70a71a865
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
linuxgazette/lg
|
4a26b871d06e7a2c1efa7912d0b3b06f00f332e0
|
af1e21053fc7b12eddead4dd1884a5b66346e19d
|
refs/heads/master
| 2021-01-01T16:41:32.829475
| 2017-07-20T23:54:57
| 2017-07-20T23:54:57
| 97,887,660
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
txt
|
#! /usr/bin/python
"""square.py -- Make some noise about a square.
"""
class Square:
def __init__(self, length, width):
self.length = length
self.width = width
def area(self):
return self.length * self.width
my_square = Square(5, 2)
print my_square.area()
|
[
"thomas@xteddy.org"
] |
thomas@xteddy.org
|
c49955be6d6677eb04106713a04b917202d6393a
|
9d95509a23c5a6eee5a19e896a91c062ee328c6f
|
/day4/04生成器.py
|
a5a84055f379fc71aba8750f7c09591c0280c72f
|
[] |
no_license
|
YuanShisong/pythonstudy
|
a50fd5fa1098170e35f8ca4cd3fa33c0990d79b7
|
1b8db5e79ddea7ed4c7c0756589db94ffb0c3041
|
refs/heads/master
| 2021-05-14T09:50:42.455615
| 2018-01-24T03:02:02
| 2018-01-24T03:02:02
| 116,335,843
| 0
| 0
| null | 2018-01-18T06:16:31
| 2018-01-05T03:19:30
|
Python
|
UTF-8
|
Python
| false
| false
| 579
|
py
|
# yield关键字
a = (i**3 for i in range(4))
print(type(a)) # <class 'generator'>
def gen():
for i in range(4):
yield i**2
print(gen) # <function gen at 0x0000000001EC4B70>
mygen = gen()
print(mygen) # <generator object gen at 0x0000000001E34FC0>
print('\n------------')
def func():
for i in range(4):
i**2
print(func) # <function func at 0x0000000002208048>
f = func()
print(f)
print('\n---------------')
# send
def gen():
for i in range(4):
i = yield
print('i:', i**2)
g = gen()
print(next(g))
g.send(1)
g.send(2)
g.send(3)
|
[
"yuanshisong211@126.com"
] |
yuanshisong211@126.com
|
424cb94753351796806403f7cc9a2e083cce21b3
|
5912bb26963faee371050d1b450eb29d175f9832
|
/code/ceat.py
|
d786d413ee452b7bad8f41b86513e7e6d356e6f3
|
[] |
no_license
|
krangelie/CEAT
|
fa06017a5bdecfe6cffb4fb7fa465f5f781a2e9f
|
497e2958a152ad70004ef6f3b613b98efdd26389
|
refs/heads/master
| 2023-04-09T11:34:56.346880
| 2021-05-01T21:00:33
| 2021-05-01T21:00:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,538
|
py
|
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
import pickle
import scipy.stats
import time as t
import pickle
import random
import datetime
# weat 1
flowers = ['aster', 'clover', 'hyacinth', 'marigold', 'poppy', 'azalea', 'crocus', 'iris', 'orchid', 'rose', 'bluebell', 'daffodil', 'lilac', 'pansy', 'tulip', 'buttercup', 'daisy', 'lily', 'peony', 'violet', 'carnation',
'magnolia', 'petunia', 'zinnia','gladiola'] #'gladiola' deleted since it not appear
insects = ['ant', 'caterpillar', 'flea', 'locust', 'spider', 'bedbug', 'centipede', 'fly', 'maggot', 'tarantula',
'bee', 'cockroach', 'gnat', 'mosquito', 'termite', 'beetle', 'cricket', 'hornet', 'moth', 'wasp',
'dragonfly', 'horsefly', 'roach', 'weevil','blackfly'] # 'blackfly' deleted for sysmetric since it only appears 1 time.
pleasant = ['caress', 'freedom', 'health', 'love', 'peace', 'cheer', 'friend', 'heaven', 'loyal', 'pleasure', 'diamond', 'gentle', 'honest',
'lucky', 'rainbow', 'diploma', 'gift', 'honor', 'miracle', 'sunrise', 'family',
'happy', 'laughter', 'paradise', 'vacation']
unpleasant = ['abuse', 'crash', 'filth', 'murder', 'sickness', 'accident', 'death', 'grief', 'poison', 'stink',
'assault', 'disaster', 'hatred', 'pollute', 'tragedy', 'divorce', 'jail', 'poverty', 'ugly', 'cancer', 'kill', 'rotten',
'vomit', 'agony', 'prison']
#weat 2
instruments = ['bagpipe', 'cello', 'guitar', 'lute', 'trombone', 'banjo', 'clarinet', 'harmonica', 'mandolin',
'trumpet', 'bassoon', 'drum', 'harp', 'oboe', 'tuba', 'bell', 'fiddle', 'harpsichord', 'piano', 'viola', 'bongo',
'flute', 'horn', 'saxophone', 'violin']
weapons = ['arrow', 'club', 'gun', 'missile', 'spear', 'axe', 'dagger', 'harpoon', 'pistol', 'sword', 'blade',
'dynamite', 'hatchet', 'rifle', 'tank', 'bomb', 'firearm', 'knife', 'shotgun', 'teargas', 'cannon', 'grenade',
'mace', 'slingshot', 'whip']
pleasant, unpleasant
#weat 3
#weat 3
european_3 = ['Adam', 'Harry', 'Roger', 'Alan',
'Ryan', 'Andrew', 'Jack', 'Matthew', 'Stephen', 'Brad', 'Greg' , 'Paul',
'Jonathan', 'Peter', 'Amanda', 'Courtney', 'Melanie', 'Katie', 'Kristin', 'Nancy', 'Stephanie',
'Ellen', 'Lauren', 'Colleen', 'Emily', 'Megan', 'Rachel','Betsy','Justin','Frank','Josh','Heather'] #delte random: 'Betsy','Justin','Frank','Josh','Heather'
african_3 = [ 'Alonzo', 'Theo', 'Alphonse', 'Jerome',
'Leroy', 'Torrance', 'Darnell', 'Lamar', 'Lionel', 'Tyree', 'Deion', 'Lamont', 'Malik',
'Terrence', 'Tyrone', 'Lavon', 'Marcellus', 'Wardell', 'Nichelle',
'Ebony', 'Shaniqua', 'Jasmine',
'Tanisha', 'Tia', 'Latoya', 'Yolanda', 'Yvette','Jamel','Shereen', 'Latisha','Lakisha','Malika'] #delete 'Jamel','Shereen', 'Latisha','Lakisha','Malika', because not appear
pleasant_3 = [ 'caress', 'freedom', 'health', 'love', 'peace', 'cheer', 'friend', 'heaven', 'loyal', 'pleasure',
'diamond', 'gentle', 'honest', 'lucky', 'rainbow', 'diploma', 'gift', 'honor', 'miracle', 'sunrise', 'family', 'happy', 'laughter', 'paradise', 'vacation']
unpleasant_3 = ['abuse', 'crash', 'filth', 'murder', 'sickness', 'accident', 'death', 'grief', 'poison', 'stink', 'assault', 'disaster',
'hatred', 'pollute', 'tragedy', 'bomb', 'divorce', 'jail', 'poverty', 'ugly', 'cancer', 'evil', 'kill', 'rotten', 'vomit']
# bomb, evil in 3 not in 1, agony prison in 1 not in 3. #pleasant 3 = pleasant 1
#weat 4
european_4 = ['Brad', 'Brendan', 'Geoffrey', 'Greg', 'Brett', 'Matthew',
'Todd', 'Allison', 'Anne', 'Carrie', 'Emily', 'Jill', 'Laurie', 'Meredith', 'Sarah','Neil'] #delete 'Neil' sysmetric
african_4 = ['Darnell', 'Hakim', 'Jermaine', 'Kareem', 'Jamal', 'Leroy', 'Rasheed',
'Tyrone', 'Aisha', 'Ebony', 'Keisha', 'Kenya', 'Latoya', 'Tamika',
'Tanisha','Lakisha'] #delete Lakisha because not appear
pleasant_3, unpleasant_3
#weat 5
european_4, african_4
pleasant_5 = ['joy', 'love', 'peace', 'wonderful', 'pleasure', 'friend', 'laughter', 'happy']
unpleasant_5 = ['agony', 'terrible', 'horrible', 'nasty', 'evil', 'war', 'awful', 'failure']
#weat 6
male = ['John', 'Paul', 'Mike', 'Kevin', 'Steve', 'Greg', 'Jeff', 'Bill']
female = ['Amy', 'Joan', 'Lisa', 'Sarah', 'Diana', 'Kate', 'Ann', 'Donna']
career = ['executive', 'management', 'professional', 'corporation', 'salary', 'office', 'business', 'career']
family = ['home', 'parents', 'children', 'family', 'cousins', 'marriage', 'wedding', 'relatives']
#weat 7
math = ['math', 'algebra', 'geometry', 'calculus', 'equations', 'computation', 'numbers', 'addition']
arts = ['poetry', 'art', 'dance', 'literature', 'novel', 'symphony', 'drama', 'sculpture']
male_term = ['male', 'man', 'boy', 'brother', 'he', 'him', 'his', 'son']
female_term = ['female', 'woman', 'girl', 'sister', 'she', 'her', 'hers', 'daughter']
#weat 8
science = ['science', 'technology', 'physics', 'chemistry', 'Einstein', 'NASA', 'experiment', 'astronomy']
arts_8 = ['poetry', 'art', 'Shakespeare', 'dance', 'literature', 'novel', 'symphony', 'drama']
male_term_8 = ['brother', 'father', 'uncle', 'grandfather', 'son', 'he', 'his', 'him']
female_term_8 = ['sister', 'mother', 'aunt', 'grandmother', 'daughter', 'she', 'hers', 'her']
#weat 9
mental_disease = ['sad', 'hopeless', 'gloomy', 'tearful', 'miserable', 'depressed']
physical_disease = ['sick', 'illness', 'influenza', 'disease', 'virus', 'cancer']
temporary = ['impermanent', 'unstable', 'variable', 'fleeting', 'short', 'brief', 'occasional' ]
permanent = [ 'stable', 'always', 'constant', 'persistent', 'chronic', 'prolonged', 'forever']
#weat 10
young_name = ['Tiffany', 'Michelle', 'Cindy', 'Kristy', 'Brad', 'Eric', 'Joey', 'Billy']
old_name = [ 'Ethel', 'Bernice', 'Gertrude', 'Agnes', 'Cecil', 'Wilbert', 'Mortimer', 'Edgar']
pleasant_5, unpleasant_5
african_female = ['Yvette','Aisha','Malika','Latisha','Keisha','Tanisha',
'Tamika', 'Yolanda','Nichelle','Latoya','Lakisha','Shereen','Shaniqua',
'Jasmine','Tia'][0:12]
# delete: Kenya
african_male = ['Lionel', 'Wardell', 'Jamel', 'Marcellus',
'Jamal', 'Leroy', 'Tyrone', 'Alphonse'
, 'Hakim', 'Terrence', 'Jerome', 'Alonzo'][0:12]
#delete: Deion, Ebony, Kareem,Lamar,Lavon,Malik,Rasheed,Jermaine,
# Tyree,Lamont,Darnell,Torrance,Theo
european_female = ['Melanie','Colleen','Ellen','Emily','Sarah','Rachel',
'Carrie','Stephanie','Megan','Nancy', 'Katie','Heather','Betsy',
'Kristin'][0:12]
#delete: Amanda
european_male = ['Frank', 'Roger', 'Neil', 'Geoffrey',
'Brad', 'Stephen', 'Peter', 'Josh', 'Andrew', 'Jack',
'Matthew', 'Jonathan', 'Greg',
'Justin', 'Alan', 'Adam',
'Harry', 'Paul'][0:12]
#delete: Lauren,Jill,Brendan,Meredith,Allison,Todd,Ryan,Courtney,Laurie,Brett,Anne
mexican_female = ['Maria','Yesenia','Adriana','Liset','Mayra','Alma',
'Carolina','Iliana','Sonia',
'Karina','Alejandra','Brenda','Vanessa','Diana'][0:12]
# delete: Ana
mexican_male = ['Jesús','Rigoberto','César','Rogelio','José','Pedro',
'Antonio','Alberto','Alejandro',
'Alfredo','Juan','Miguel','Ricardo'][0:12]
af_bias = ['loud','ghetto','unrefined','athletic','unintelligent']+['bigbutt','overweight','confident','darkskinned','promiscuous','unfeminine','aggressive','chicken']
em_bias_foraf = ['rich', 'intelligent', 'arrogant', 'status', 'blond', 'racist', 'American', 'leader', 'privileged', 'attractive', 'tall', 'sexist', 'successful']
af_unique_bias = ['bigbutt','overweight','confident','darkskinned','promiscuous','unfeminine','aggressive','chicken']
em_unique_bias_foraf = ['rich', 'tall', 'intelligent', 'arrogant', 'successful', 'status', 'blond', 'racist']
lf_bias = ['feisty','curvy', 'loud', 'cook', 'darkskinned', 'uneducated', 'hardworker' ,'promiscuous','unintelligent','short','sexy', 'maids']
em_bias_forlf = ['rich', 'intelligent', 'arrogant', 'status', 'blond', 'racist', 'American', 'leader', 'privileged', 'tall', 'sexist', 'successful']
lf_unique_bias = ['feisty','curvy','cook','promiscuous','sexy','maids']
em_unique_bias_forlf = ['rich', 'tall', 'intelligent', 'assertive', 'arrogant', 'successful']
weat_groups = [
[flowers,insects,pleasant,unpleasant], # 1
[instruments, weapons, pleasant, unpleasant], #2
[european_3,african_3,pleasant_3,unpleasant_3], #3
[european_4,african_4,pleasant_3,unpleasant_3], #4
[european_4,african_4,pleasant_5,unpleasant_5],#5
[male,female,career,family], #6
[math,arts,male_term,female_term],#7
[science,arts_8,male_term_8,female_term_8],#8
[mental_disease,physical_disease,temporary,permanent],#9
[young_name,old_name,pleasant_5,unpleasant_5],#10
[african_female,european_male,af_bias,em_bias_foraf], #af-inter
[african_female,european_male,af_unique_bias,em_unique_bias_foraf], #af-emerg
[mexican_female,european_male,lf_bias,em_bias_forlf],#lf-inter
[mexican_female,european_male,lf_unique_bias,em_unique_bias_forlf]# lf-emerg
]
def associate(w,A,B):
return cosine_similarity(w.reshape(1,-1),A).mean() - cosine_similarity(w.reshape(1,-1),B).mean()
def difference(X,Y,A,B):
# return np.sum(np.apply_along_axis(associate,1,X,A,B)) - np.sum(np.apply_along_axis(associate,1,Y,A,B))
return np.sum([associate(X[i,:],A,B) for i in range(X.shape[0])]) - np.sum([associate(Y[i,:],A,B) for i in range(Y.shape[0])])
def effect_size(X,Y,A,B):
# delta_mean = np.mean(np.apply_along_axis(associate,1,X,A,B)) - np.mean(np.apply_along_axis(associate,1,Y),A,B)
delta_mean = np.mean([associate(X[i,:],A,B) for i in range(X.shape[0])]) - np.mean([associate(Y[i,:],A,B) for i in range(Y.shape[0])])
# s = np.apply_along_axis(associate,1,np.concatenate((X,Y),axis=0),A,B)
XY = np.concatenate((X,Y),axis=0)
s = [associate(XY[i,:],A,B) for i in range(XY.shape[0])]
std_dev = np.std(s,ddof=1)
var = std_dev**2
return delta_mean/std_dev, var
def inn(a_huge_key_list):
L = len(a_huge_key_list)
i = np.random.randint(0, L)
return a_huge_key_list[i]
def sample_statistics(X,Y,A,B,num = 100):
XY = np.concatenate((X,Y),axis=0)
def inner_1(XY,A,B):
X_test_idx = np.random.choice(XY.shape[0],X.shape[0],replace=False)
Y_test_idx = np.setdiff1d(list(range(XY.shape[0])),X_test_idx)
X_test = XY[X_test_idx,:]
Y_test = XY[Y_test_idx,:]
return difference(X_test,Y_test,A,B)
s = [inner_1(XY,A,B) for i in range(num)]
return np.mean(s), np.std(s,ddof=1)
def p_value(X,Y,A,B,num=100):
m,s = sample_statistics(X,Y,A,B,num)
d = difference(X,Y,A,B)
p = 1 - scipy.stats.norm.cdf(d,loc = m, scale = s)
return p
def ceat_meta(weat_groups = weat_groups, model='bert',test=1,N=10000):
nm = "ceat_race_{}_vector.pickle".format(model)
weat_dict = pickle.load(open(nm,'rb'))
# nm_1 = "name_{}_vector_new.pickle".format(model)
# name_dict = pickle.load(open(nm_1,'rb'))
e_lst = [] #effect size
v_lst = [] #variance
len_list = [len(weat_groups[test-1][i]) for i in range(4)]
for i in range(N):
X = np.array([weat_dict[wd][np.random.randint(0,len(weat_dict[wd]))] for wd in weat_groups[test-1][0]])
Y = np.array([weat_dict[wd][np.random.randint(0,len(weat_dict[wd]))] for wd in weat_groups[test-1][1]])
A = np.array([weat_dict[wd][np.random.randint(0,len(weat_dict[wd]))] for wd in weat_groups[test-1][2]])
B = np.array([weat_dict[wd][np.random.randint(0,len(weat_dict[wd]))] for wd in weat_groups[test-1][3]])
e,v = effect_size(X,Y,A,B)
e_lst.append(e)
v_lst.append(v)
e_nm = "/Users/ceatpro/Desktop/wefat/data/meta_data/{0}_{1}_es.pickle".format(model,test)
v_nm = "/Users/ceatpro/Desktop/wefat/data/meta_data/{0}_{1}_v.pickle".format(model,test)
pickle.dump(e_lst,open(e_nm,'wb'))
pickle.dump(v_lst,open(v_nm,'wb'))
#calculate Q (total variance)
e_ary = np.array(e_lst)
w_ary = 1/np.array(v_lst)
q1 = np.sum(w_ary*(e_ary**2))
q2 = ((np.sum(e_ary*w_ary))**2)/np.sum(w_ary)
q = q1 - q2
df = N - 1
if q>df:
c = np.sum(w_ary) - np.sum(w_ary**2)/np.sum(w_ary)
tao_square = (q-df)/c
print("tao>0")
else:
tao_square = 0
v_ary = np.array(v_lst)
v_star_ary = v_ary + tao_square
w_star_ary = 1/v_star_ary
# calculate combiend effect size, variance
pes = np.sum(w_star_ary*e_ary)/np.sum(w_star_ary)
v = 1/np.sum(w_star_ary)
# p-value
z = pes/np.sqrt(v)
# p_value = 1 - scipy.stats.norm.cdf(z,loc = 0, scale = 1)
p_value = scipy.stats.norm.sf(z,loc = 0, scale = 1)
return pes, p_value
if __name__ == '__main__':
e_lst = []
p_lst = []
for e in range(1,15):
# group = weat_groups[(e - 1)]
e_lst.append([])
p_lst.append([])
print(e)
for m in ['elmo','bert','gpt','gpt2']:
print(m)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"))
pes, p_value = ceat_meta(weat_groups = weat_groups, model=m,test=e,N=1000)
print("PES is {}:".format(pes))
# print("Var is {}:".format(v))
print("P-value is {}:".format(p_value))
e_lst[e-1].append(pes)
# e_lst[e-1].append(v)
e_lst[e-1].append(p_value)
print(" ")
e_ary = np.array(e_lst)
p_ary = np.array(p_lst)
np.savetxt("e_1000.csv", e_ary, delimiter=",")
|
[
"weiguowilliam@gmail.com"
] |
weiguowilliam@gmail.com
|
33f8c4175e657495c241d2cdfbfba142bd309671
|
514c292b9ec4bc40cbc623dfda8cad3b40450dad
|
/synthesizer/test_synthesize.py
|
b28803d3a853056f2a8cf39b170fcff0f9e739a0
|
[] |
no_license
|
FandM-CARES/anomaly-explain
|
2bb240c1490b533c72d97af8be9db255acbfce66
|
51fac621c31b090595acf20098f7b46ac33a3f26
|
refs/heads/main
| 2023-08-17T13:16:39.799927
| 2021-10-11T06:50:43
| 2021-10-11T06:50:43
| 414,333,328
| 0
| 0
| null | 2021-10-06T18:48:14
| 2021-10-06T18:48:13
| null |
UTF-8
|
Python
| false
| false
| 911
|
py
|
# File: test_synthesize.py
# Author: Leilani H. Gilpin
# Date: 30 December 2019
# Section: 1
# Email: lhg@mit.edu
# Description: Unit tests for the explanation Synthesizer code
import sys
import os
import unittest
sys.path.append(os.path.abspath('../'))
import numpy as np
from synthesizer.synthesize import *
class TestSynthesizer(unittest.TestCase):
def test_blank(self):
self.assertEqual("hello","hello")
class TestBackChain(unittest.TestCase):
def penguin_test(self):
# Uncomment this to test out your backward chainer
pretty_goal_tree(backchain_to_goal_tree(zookeeper_rules,
'opus is a penguin'))
a = pretty_goal_tree(backchain_to_goal_tree(zookeeper_rules,
'opus is a penguin'))
self.assertEqual(a, 'b')
if __name__ == '__main__':
unittest.main()
|
[
"lgilpin@mit.edu"
] |
lgilpin@mit.edu
|
f41d0214026ead1d8003886b6a76d15d7f9fd2d8
|
29c3595a4e1f8de9382650610aee5a13e2a135f6
|
/venv/Lib/site-packages/django/db/utils.py
|
28afa6cd076757b79702b2aab41a1cc2382588ba
|
[
"MIT"
] |
permissive
|
zoelesv/Smathchat
|
1515fa56fbb0ad47e1859f6bf931b772446ea261
|
5cee0a8c4180a3108538b4e4ce945a18726595a6
|
refs/heads/main
| 2023-08-04T14:47:21.185149
| 2023-08-02T15:53:20
| 2023-08-02T15:53:20
| 364,627,392
| 9
| 1
|
MIT
| 2023-08-02T15:53:21
| 2021-05-05T15:42:47
|
Python
|
UTF-8
|
Python
| false
| false
| 10,398
|
py
|
import pkgutil
from importlib import import_module
from pathlib import Path
from asgiref.local import Local
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
DEFAULT_DB_ALIAS = 'default'
DJANGO_VERSION_PICKLE_KEY = '_django_version'
class Error(Exception):
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class DataError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class DatabaseErrorWrapper:
"""
Context manager and decorator that reraises backend-specific database
exceptions using Django's common wrappers.
"""
def __init__(self, wrapper):
"""
wrapper is a database wrapper.
It must have a Database attribute defining PEP-249 exceptions.
"""
self.wrapper = wrapper
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
return
for dj_exc_type in (
DataError,
OperationalError,
IntegrityError,
InternalError,
ProgrammingError,
NotSupportedError,
DatabaseError,
InterfaceError,
Error,
):
db_exc_type = getattr(self.wrapper.Database, dj_exc_type.__name__)
if issubclass(exc_type, db_exc_type):
dj_exc_value = dj_exc_type(*exc_value.args)
# Only set the 'errors_occurred' flag for errors that may make
# the connection unusable.
if dj_exc_type not in (DataError, IntegrityError):
self.wrapper.errors_occurred = True
raise dj_exc_value.with_traceback(traceback) from exc_value
def __call__(self, func):
# Note that we are intentionally not using @wraps here for performance
# reasons. Refs #21109.
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def load_backend(backend_name):
"""
Return a database backend's "base" module given a fully qualified database
backend name, or raise an error if it doesn't exist.
"""
# This backend was renamed in Django 1.9.
if backend_name == 'django.db.backends.postgresql_psycopg2':
backend_name = 'django.db.backends.postgresql'
try:
return import_module('%s.base' % backend_name)
except ImportError as e_user:
# The database backend wasn't found. Display a helpful error message
# listing all built-in database backends.
backend_dir = str(Path(__file__).parent / 'backends')
builtin_backends = [
name for _, name, ispkg in pkgutil.iter_modules([backend_dir])
if ispkg and name not in {'base', 'dummy', 'postgresql_psycopg2'}
]
if backend_name not in ['django.db.backends.%s' % b for b in builtin_backends]:
backend_reprs = map(repr, sorted(builtin_backends))
raise ImproperlyConfigured(
"%r isn't an available database backend.\n"
"Try using 'django.db.backends.XXX', where XXX is one of:\n"
" %s" % (backend_name, ", ".join(backend_reprs))
) from e_user
else:
# If there's some other error, this must be an error in Django
raise
class ConnectionDoesNotExist(Exception):
pass
class ConnectionHandler:
def __init__(self, databases=None):
"""
databases is an optional dictionary of database definitions (structured
like settings.DATABASES).
"""
self._databases = databases
# Connections needs to still be an actual thread local, as it's truly
# thread-critical. Database backends should use @async_unsafe to protect
# their code from async contexts, but this will give those contexts
# separate connections in case it's needed as well. There's no cleanup
# after async contexts, though, so we don't allow that if we can help it.
self._connections = Local(thread_critical=True)
@cached_property
def databases(self):
if self._databases is None:
self._databases = settings.DATABASES
if self._databases == {}:
self._databases = {
DEFAULT_DB_ALIAS: {
'ENGINE': 'django.db.backends.dummy',
},
}
if DEFAULT_DB_ALIAS not in self._databases:
raise ImproperlyConfigured("You must define a '%s' database." % DEFAULT_DB_ALIAS)
if self._databases[DEFAULT_DB_ALIAS] == {}:
self._databases[DEFAULT_DB_ALIAS]['ENGINE'] = 'django.db.backends.dummy'
return self._databases
def ensure_defaults(self, alias):
"""
Put the defaults into the settings dictionary for a given connection
where no settings is provided.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
conn.setdefault('ATOMIC_REQUESTS', False)
conn.setdefault('AUTOCOMMIT', True)
conn.setdefault('ENGINE', 'django.db.backends.dummy')
if conn['ENGINE'] == 'django.db.backends.' or not conn['ENGINE']:
conn['ENGINE'] = 'django.db.backends.dummy'
conn.setdefault('CONN_MAX_AGE', 0)
conn.setdefault('OPTIONS', {})
conn.setdefault('TIME_ZONE', None)
for setting in ['NAME', 'USER', 'PASSWORD', 'HOST', 'PORT']:
conn.setdefault(setting, '')
def prepare_test_settings(self, alias):
"""
Make sure the test settings are available in the 'TEST' sub-dictionary.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
test_settings = conn.setdefault('TEST', {})
default_test_settings = [
('CHARSET', None),
('COLLATION', None),
('MIGRATE', True),
('MIRROR', None),
('NAME', None),
]
for key, value in default_test_settings:
test_settings.setdefault(key, value)
def __getitem__(self, alias):
if hasattr(self._connections, alias):
return getattr(self._connections, alias)
self.ensure_defaults(alias)
self.prepare_test_settings(alias)
db = self.databases[alias]
backend = load_backend(db['ENGINE'])
conn = backend.DatabaseWrapper(db, alias)
setattr(self._connections, alias, conn)
return conn
def __setitem__(self, key, value):
setattr(self._connections, key, value)
def __delitem__(self, key):
delattr(self._connections, key)
def __iter__(self):
return iter(self.databases)
def all(self):
return [self[alias] for alias in self]
def close_all(self):
for alias in self:
try:
connection = getattr(self._connections, alias)
except AttributeError:
continue
connection.close()
class ConnectionRouter:
def __init__(self, routers=None):
"""
If routers is not specified, default to settings.DATABASE_ROUTERS.
"""
self._routers = routers
@cached_property
def routers(self):
if self._routers is None:
self._routers = settings.DATABASE_ROUTERS
routers = []
for r in self._routers:
if isinstance(r, str):
router = import_string(r)()
else:
router = r
routers.append(router)
return routers
def _router_func(action):
def _route_db(self, model, **hints):
chosen_db = None
for router in self.routers:
try:
method = getattr(router, action)
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
chosen_db = method(model, **hints)
if chosen_db:
return chosen_db
instance = hints.get('instance')
if instance is not None and instance._state.db:
return instance._state.db
return DEFAULT_DB_ALIAS
return _route_db
db_for_read = _router_func('db_for_read')
db_for_write = _router_func('db_for_write')
def allow_relation(self, obj1, obj2, **hints):
for router in self.routers:
try:
method = router.allow_relation
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
allow = method(obj1, obj2, **hints)
if allow is not None:
return allow
return obj1._state.db == obj2._state.db
def allow_migrate(self, db, app_label, **hints):
for router in self.routers:
try:
method = router.allow_migrate
except AttributeError:
# If the router doesn't have a method, skip to the next one.
continue
allow = method(db, app_label, **hints)
if allow is not None:
return allow
return True
def allow_migrate_model(self, db, model):
return self.allow_migrate(
db,
model._meta.app_label,
model_name=model._meta.model_name,
model=model,
)
def get_migratable_models(self, app_config, db, include_auto_created=False):
"""Return app models allowed to be migrated on provided db."""
models = app_config.get_models(include_auto_created=include_auto_created)
return [model for model in models if self.allow_migrate_model(db, model)]
|
[
"ZoomLee@users.noreply.github.com"
] |
ZoomLee@users.noreply.github.com
|
993a5d37979fddf53740450c2cdf7044629bdf3c
|
a54aaaf50c84b8ffa48a810ff9a25bfe8e28ba96
|
/euler046.py
|
8ce01d4beb33dd2d3029d151e6ad65e2658bbe9b
|
[] |
no_license
|
danielmmetz/euler
|
fd5faefdfd58de04e744316618f43c40e6cbb288
|
fe64782617d6e14b8b2b65c3a039716adb789997
|
refs/heads/master
| 2021-01-17T08:44:26.586954
| 2016-05-12T02:35:10
| 2016-05-12T02:35:10
| 40,574,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 818
|
py
|
"""
It was proposed by Christian Goldbach that every odd composite number can be
written as the sum of a prime and twice a square.
9 = 7 + 2×12
15 = 7 + 2×22
21 = 3 + 2×32
25 = 7 + 2×32
27 = 19 + 2×22
33 = 31 + 2×12
It turns out that the conjecture was false.
What is the smallest odd composite that cannot be written as the sum of a prime
and twice a square?
"""
from itertools import count
from math import sqrt
from prime import isprime, prime_list
def answer():
for candidate in count(3, 2):
if isprime(candidate):
continue
for p in prime_list:
remainder = candidate - p
root = sqrt(remainder//2)
if root == int(root):
break
else:
return candidate
if __name__ == '__main__':
print(answer())
|
[
"danielmmetz@gmail.com"
] |
danielmmetz@gmail.com
|
10134aba04152ef4ac2b1d173b0b37654133802c
|
985a2fcf185e8ec5c6a8f8f002b654b451169e2b
|
/python-osc.py
|
9ff7d25ea6d6ce154502cf8e1fc61fcebebac74a
|
[] |
no_license
|
lisajamhoury/mlmp5ktron
|
f623413637b42087ca01b790ad28d699a08fd513
|
a87391f143d13a1397c34807ff7a29db0cf2a3e1
|
refs/heads/master
| 2021-01-25T12:43:37.744147
| 2018-03-19T20:13:59
| 2018-03-19T20:13:59
| 123,502,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,062
|
py
|
from sklearn.externals import joblib
from pythonosc import dispatcher
from pythonosc import osc_server
from pythonosc import osc_message_builder
from pythonosc import udp_client
import argparse
import time
import random
def print_click(unused_addr, *args):
pose = args
pred = clf.predict([pose])
print(pred)
if pred:
print("got it: ", pred)
client.send_message("/prediction", pred)
if __name__ == "__main__":
clf = joblib.load('classifier/machinelearning.pkl')
# client is the sending port (port 12000). this is where we will send to
client = udp_client.SimpleUDPClient("127.0.0.1", 12000)
# use this to test osc sending will send 10 random numbers
# for x in range(10):
# client.send_message("/filter", random.random())
# time.sleep(1)
dispatcher = dispatcher.Dispatcher()
dispatcher.map("/skeletal_data", print_click)
# server listens for incoming messages on port 3334
server = osc_server.ThreadingOSCUDPServer(
("127.0.0.1", 3334), dispatcher)
print("Serving on {}".format(server.server_address))
server.serve_forever()
|
[
"lisajamhoury@gmail.com"
] |
lisajamhoury@gmail.com
|
34a5496edaf78c200fe0a67006564fb6d0ff9b2b
|
a4ea525e226d6c401fdb87a6e9adfdc5d07e6020
|
/src/azure-cli-core/azure/cli/core/tests/test_aaz_paging.py
|
2ec14f790d3d6bce17f38400edfd9df57904a7dc
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] |
permissive
|
Azure/azure-cli
|
13340eeca2e288e66e84d393fa1c8a93d46c8686
|
a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca
|
refs/heads/dev
| 2023-08-17T06:25:37.431463
| 2023-08-17T06:00:10
| 2023-08-17T06:00:10
| 51,040,886
| 4,018
| 3,310
|
MIT
| 2023-09-14T11:11:05
| 2016-02-04T00:21:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,855
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from azure.cli.core.aaz import AAZUndefined
from azure.cli.core.aaz._paging import AAZPaged, AAZPageIterator
from azure.cli.core.mock import DummyCli
class TestAAZPaging(unittest.TestCase):
def test_aaz_paging_sample(self):
data_by_pages = [(['a', 'b', 'c'], 1), (['d', 'e'], 2), (['f'], 3), (['g', 'h'], AAZUndefined)]
result = {
"value": AAZUndefined,
"next_link": AAZUndefined,
}
def executor(next_link):
if next_link is None:
next_link = 0
value, next_link = data_by_pages[next_link]
result["value"] = value
result['next_link'] = next_link
def extract_result():
return result['value'], result['next_link']
paged = AAZPaged(executor=executor, extract_result=extract_result, cli_ctx=DummyCli())
self.assertTrue(list(paged) == ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'])
def test_aaz_paging_with_limit_and_token(self):
data_by_pages = [
(["a", "b", "c"], 1),
(["d", "e"], 2),
(["f"], 3),
(["g", "h"], AAZUndefined)
]
result = {
"value": AAZUndefined,
"next_link": AAZUndefined
}
def executor(next_link):
if next_link is None:
next_link = 0
value, next_link = data_by_pages[next_link]
result["value"] = value
result["next_link"] = next_link
def extract_result():
return result["value"], result["next_link"]
next_token = '{"next_link": 1, "offset": 1}'
paged = AAZPaged(
executor=executor, extract_result=extract_result, cli_ctx=DummyCli(),
token=next_token, limit=4
)
self.assertTrue(list(paged) == ["e", "f", "g", "h"])
def test_aaz_paging_iterator(self):
data_by_pages = [
(["a", "b", "c"], 1),
(["d", "e"], 2),
(["f"], 3),
(["g", "h"], AAZUndefined)
]
result = {
"value": AAZUndefined,
"next_link": AAZUndefined
}
def executor(next_link):
if next_link is None:
next_link = 0
value, next_link = data_by_pages[next_link]
result["value"] = value
result["next_link"] = next_link
def extract_result():
return result["value"], result["next_link"]
page_iterator = AAZPageIterator(
executor=executor, extract_result=extract_result, cli_ctx=DummyCli(),
next_link=1, offset=1, limit=4
)
# | a b c | d e | f | g h |
# *
self.assertTrue(page_iterator._next_link == 1)
self.assertTrue(page_iterator._start == 1) # offset
self.assertTrue(page_iterator._total == 5)
# | a b c | d e | f | g h |
# *
next(page_iterator)
self.assertTrue(page_iterator._next_link == 2)
self.assertTrue(page_iterator._total == 3)
# | a b c | d e | f | g h |
# *
next(page_iterator)
self.assertTrue(page_iterator._next_link == 3)
self.assertTrue(page_iterator._total == 2)
# | a b c | d e | f | g h |
# *
next(page_iterator)
self.assertTrue(page_iterator._next_link == AAZUndefined)
self.assertTrue(page_iterator._total == 0)
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
22b95182bd9050b6d8dbb6cfd970e83489eff911
|
477c8309420eb102b8073ce067d8df0afc5a79b1
|
/Applications/ParaView/Testing/Python/DisconnectAndSaveAnimation.py
|
f9f080edafa9f6c87116a65800627d5c41831290
|
[
"LicenseRef-scancode-paraview-1.2"
] |
permissive
|
aashish24/paraview-climate-3.11.1
|
e0058124e9492b7adfcb70fa2a8c96419297fbe6
|
c8ea429f56c10059dfa4450238b8f5bac3208d3a
|
refs/heads/uvcdat-master
| 2021-07-03T11:16:20.129505
| 2013-05-10T13:14:30
| 2013-05-10T13:14:30
| 4,238,077
| 1
| 0
|
NOASSERTION
| 2020-10-12T21:28:23
| 2012-05-06T02:32:44
|
C++
|
UTF-8
|
Python
| false
| false
| 3,862
|
py
|
#/usr/bin/env python
import QtTesting
import QtTestingImage
object1 = 'pqClientMainWindow/menubar/menuSources'
QtTesting.playCommand(object1, 'activate', 'SphereSource')
object2 = 'pqClientMainWindow/proxyTabDock/proxyTabWidget/qt_tabwidget_stackedwidget/objectInspector/Accept'
QtTesting.playCommand(object2, 'activate', '')
object3 = 'pqClientMainWindow/centralwidget/MultiViewManager/SplitterFrame/MultiViewSplitter/0/MultiViewFrameMenu/SplitVerticalButton'
QtTesting.playCommand(object3, 'activate', '')
QtTesting.playCommand(object1, 'activate', 'SphereSource')
object4 = 'pqClientMainWindow/proxyTabDock/proxyTabWidget/qt_tabwidget_stackedwidget/objectInspector/ScrollArea/qt_scrollarea_viewport/PanelArea/Editor/Center_0'
QtTesting.playCommand(object4, 'set_string', '1')
QtTesting.playCommand(object2, 'activate', '')
object5 = 'pqClientMainWindow/pipelineBrowserDock/pipelineBrowser'
QtTesting.playCommand(object5, 'currentChanged', '/0/0|0')
object6 = 'pqClientMainWindow/proxyTabDock/proxyTabWidget/qt_tabwidget_tabbar'
QtTesting.playCommand(object6, 'set_tab', '1')
object7 = 'pqClientMainWindow/proxyTabDock/proxyTabWidget/qt_tabwidget_stackedwidget/1QScrollArea0/qt_scrollarea_viewport/1pqDisplayProxyEditorWidget0/Form/ViewGroup/ViewData'
QtTesting.playCommand(object7, 'set_boolean', 'true')
object8 = 'pqClientMainWindow/menubar'
QtTesting.playCommand(object8, 'activate', 'menu_View')
object8 = 'pqClientMainWindow/menubar/menu_View'
QtTesting.playCommand(object8, 'activate', 'Animation View')
object9 = 'pqClientMainWindow/animationViewDock/animationView/pqAnimationWidget/CreateDeleteWidget/PropertyCombo'
QtTesting.playCommand(object9, 'set_string', 'Start Theta')
object10 = "pqClientMainWindow/animationViewDock/animationView/1pqAnimationWidget0/1QHeaderView0"
QtTesting.playCommand(object10, "mousePress", "1,1,0,0,0,2")
QtTesting.playCommand(object10, "mouseRelease", "1,1,0,0,0,2")
object11 = 'pqClientMainWindow/VCRToolbar/1QToolButton3'
QtTesting.playCommand(object11, 'activate', '')
QtTesting.playCommand(object11, 'activate', '')
object12 = 'pqClientMainWindow/menubar/menu_File'
QtTesting.playCommand(object12, 'activate', '')
QtTesting.playCommand(object12, 'activate', 'actionFileSaveAnimation')
object13 = 'pqAnimationSettingsDialog/checkBoxDisconnect'
QtTesting.playCommand(object13, 'set_boolean', 'true')
object14 = 'pqAnimationSettingsDialog/width'
QtTesting.playCommand(object14, 'set_string', '300')
object14 = 'pqAnimationSettingsDialog/height'
QtTesting.playCommand(object14, 'set_string', '300')
object15 = 'pqAnimationSettingsDialog/okButton'
QtTesting.playCommand(object15, 'activate', '')
object16 = 'pqClientMainWindow/FileSaveAnimationDialog'
# Remove old files.
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0000.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0001.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0002.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0003.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0004.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0005.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0006.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0007.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0008.png')
QtTesting.playCommand(object16, 'remove', '$PARAVIEW_TEST_ROOT/disconnectSave.0009.png')
QtTesting.playCommand(object16, 'filesSelected', '$PARAVIEW_TEST_ROOT/disconnectSave.png')
import time
print "Wait for 60 secs"
time.sleep(60);
QtTestingImage.compareImage('$PARAVIEW_TEST_ROOT/disconnectSave.0005.png', 'DisconnectAndSaveAnimation.png');
|
[
"aashish.chaudhary@kitware.com"
] |
aashish.chaudhary@kitware.com
|
666bf442cdc1c542e448f6a557b6ea3f30ca94f4
|
15881e370abc080f47ec29ec9a1a776563ba8033
|
/static/exp_res/aco.py
|
7c4a4ac2bab88696bdd688b5ac072c7c4e0cbd6f
|
[] |
no_license
|
Hmz3192/ZJUT_Exp
|
1ab02de50e5a66cc075f9b3363b3c9a81737cf2f
|
a1cfefa113f6c89160b2156714e0ecb7487590d9
|
refs/heads/master
| 2023-07-02T07:03:25.002469
| 2021-08-05T07:50:34
| 2021-08-05T07:50:34
| 389,601,561
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,585
|
py
|
import numpy as np
import random
import matplotlib.pyplot as plt
import logging
import sys
logging.basicConfig(format="%(message)s", stream=sys.stdout, level=logging.DEBUG)
# city_location = np.loadtxt("test1.txt") # 读取文件中城市的坐标数据,可根据需要修改,test1.txt包含14个城市坐标
city_location = np.array(
[[106.54, 29.59], [91.11, 29.97], [87.68, 43.77], [106.27, 38.47], [111.65, 40.82], [108.33, 22.84],
[126.63, 45.75], [125.35, 43.88], [123.38, 41.8], [114.48, 38.03], [112.53, 37.87], [101.74, 36.56],
[117, 36.65], [113.6, 34.76]])
num_city = city_location.shape[0] # 城市个数
num_ant = 5 * num_city # 蚂蚁个数
alpha = 1 # 信息素启发式因子
beta = 5 # 期望影响因子
rho = 0.2 # 信息素的挥发率
Q = 1 # 信息素强度因子
cnt_iteration1 = 0 # 当前迭代次数
max_iteration = 200 # 最大迭代次数
# 该函数计算城市之间的欧式距离
def get_distance(city_location):
num = city_location.shape[0]
distmat = np.zeros((num, num)) # 初始化生成坐标信息的矩阵,所有元素的值都为0
for i in range(num):
for j in range(num):
distmat[i][j] = np.linalg.norm(city_location[i] - city_location[j]) # 计算欧式距离
return distmat
# 初始化参数
# 初始化各种矩阵
def pso_tsp():
cnt_iteration = 0
distmat = get_distance(city_location) # 城市之间的距离矩阵
pheromone_table = np.ones((num_city, num_city)) # 信息素浓度矩阵
eta_table = 1.0 / (distmat + np.diag([10000] * num_city)) # 局部启发信息矩阵,为了使除数不为0,往矩阵对角线添加数值
diag = np.diag([1.0 / 10000] * num_city) # 创造和局部启发信息矩阵对角线元素相同的矩阵
eta_table = eta_table - diag # 把对角元素变回0
route_best = np.zeros((max_iteration, num_city)) # 记录每一代的最佳路径
length_best = np.zeros((max_iteration, 1)) # 记录每一代的最佳路径的长度
length_average = np.zeros((max_iteration, 1)) # 记录每一代的路径平均长度
path_mat = np.zeros((max_iteration, num_city)).astype(int) # 初始化每只蚂蚁路径,初始所有元素都为0
str = ""
while cnt_iteration < max_iteration:
# 产生一个随机数,用来决定每一只蚂蚁出生的城市
for i in range(0, num_ant):
rand_num = random.randint(0, num_city - 1)
path_mat[i, 0] = rand_num
length = np.zeros(num_ant) # 初始化距离数组,用于存储每只蚂蚁走的路线长度
# 计算出每只蚂蚁转移到下一个城市的概率
for i in range(num_ant):
visited = path_mat[i, 0]
unvisited = list(range(num_city))
unvisited.remove(visited) # 删除已经访问过的城市
# j从1开始,循环num_city-1次,访问剩下的城市
for j in range(1, num_city):
trans_prob = np.zeros(len(unvisited)) # 初始化转移概率矩阵
# 转移概率的计算
for k in range(len(unvisited)):
trans_prob[k] = np.power(pheromone_table[visited][unvisited[k]], alpha) * np.power(
eta_table[visited][unvisited[k]], beta)
# 找到要访问的下个城市
cumsumtrans_prob = (trans_prob / sum(trans_prob)).cumsum() # 求出每只蚂蚁转移到各个城市的概率斐波那契数列,方便后续赌轮盘选择
cumsumtrans_prob -= np.random.rand() # 减去一个随机数,然后找到转移概率矩阵里恰好大于0的城市
index = 0
cnt = 0
for value in cumsumtrans_prob:
if value > 0:
index = unvisited[cnt]
break
else:
cnt += 1 # 记录下恰好大于0 处的第一个索引
# 找到后,加入路径矩阵,并删除未访问列表中的该城市,并将这个城市加入到已访问的城市中
path_mat[i, j] = index # 将这个索引加入到路径的矩阵里
unvisited.remove(index) # 从未访问列表中删去这个索引
length[i] += distmat[visited][index] # 累加计算每个蚂蚁走过的总路程
visited = index # 将已访问的值设置为将要访问的城市
length[i] += distmat[visited][path_mat[i, 0]] # 添加上最后一个城市返回第一个城市的距离
length_average[cnt_iteration] = length.mean() # 计算出这一 代蚁群的路径的平均值
# 求出最优路径
if cnt_iteration == 0:
length_best[cnt_iteration] = length.min()
route_best[cnt_iteration] = path_mat[length.argmin()].copy() # 第一轮选择本轮的最短路径,并返回索引下标
str += ("The shortest distance in generation {} is {}. \n".format(cnt_iteration, length.min()))
else:
if length.min() > length_best[cnt_iteration - 1]:
length_best[cnt_iteration] = length_best[cnt_iteration - 1]
route_best[cnt_iteration] = route_best[cnt_iteration - 1].copy()
else:
length_best[cnt_iteration] = length.min()
route_best[cnt_iteration] = path_mat[length.argmin()].copy() # 如果不是第一轮,则选择本轮最短路径,并返回索引下标
str += ("The shortest distance in generation {} is {}. \n".format(cnt_iteration, length.min()))
# 更新信息素
new_pheromone_table = np.zeros((num_city, num_city))
for i in range(num_ant):
for j in range(num_city - 1):
new_pheromone_table[path_mat[i, j]][path_mat[i, j + 1]] += Q / distmat[path_mat[i, j]][
path_mat[i, j + 1]] # 根据公式更新这只蚂蚁改变的城市间的信息素
new_pheromone_table[path_mat[i, j + 1]][path_mat[i, 0]] += Q / distmat[path_mat[i, j + 1]][
path_mat[i, 0]] # 最后一个城市到第一个城市的信息素改变也计算进来
pheromone_table = (1 - rho) * pheromone_table + new_pheromone_table
cnt_iteration += 1 # 迭代次数+ 1
# 打印出最优路径等信息
str += ("The shortest distance is :{}\n".format(length_best.min()))
str += ("The routine is as follows: {}\n").format(route_best[np.argmin(length_best)])
# 画图验证最后的结果
result = route_best[-1] # 找到最后一个最优解
# plt.plot(city_location[:,0].T,city_location[:,1].T,"*") #画出城市的点
for i in range(num_city - 1):
ax = plt.axes()
ax.arrow(city_location[int(result[i]), 0], city_location[int(result[i]), 1],
(city_location[int(result[i + 1]), 0] - city_location[int(result[i]), 0]) \
, (city_location[int(result[i + 1]), 1] - city_location[int(result[i]), 1]), head_width=0,
head_length=0, fc="k", ec="k") # 根据最后结果连接城市之间的线段
ax.arrow(city_location[int(result[-1]), 0], city_location[int(result[-1]), 1],
(city_location[int(result[0]), 0] - city_location[int(result[-1]), 0]), \
(city_location[int(result[0]), 1] - city_location[int(result[-1]), 1]), head_width=0, head_length=0,
fc="k", ec="k") # 画出最后一个城市指向第一个城市的线段
# plt.show() #展示出所画的图
return str
|
[
"1134598796@qq.com"
] |
1134598796@qq.com
|
a45acb1419d1d3ba79d7a0de4d8b26d9a6a387e0
|
81940bcda8768bb106451e781604c5294aa12554
|
/emenu/emenu/settings.py
|
f8bf76da1a10e3f37b4fc9e487eaac9c8dc98018
|
[] |
no_license
|
MarceliWydra/Emenu
|
6eec2ec6bc536ce393337146b4c83a2a4d187cb1
|
0f8f827a24a0c2f1090163b838d924d5a1263383
|
refs/heads/master
| 2023-01-28T03:29:55.717227
| 2020-12-02T23:36:18
| 2020-12-02T23:36:18
| 318,019,785
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,405
|
py
|
"""
Django settings for emenu project.
Generated by 'django-admin startproject' using Django 2.2.17.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gl*osf@bs&k3(x598+a8lz-lo&cxb29(mrb#+rsprjp5#8txxc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ALLOW_ALL_ORIGINS = True
ROOT_URLCONF = 'emenu.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'emenu.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'emenu')
|
[
"marceli.wydra@gmail.com"
] |
marceli.wydra@gmail.com
|
7e9af219a82968e36365a2cf9a23ceaf78ef5ed0
|
a5e80be6dadc665917c8b330cbc5f904c101b55e
|
/pytorch/helpers.py
|
ac3cf05349c8d00718eea24508aaf557b1a50820
|
[] |
no_license
|
Engler93/Self-Supervised-Autogenous-Learning
|
45363c82e8f2b2014e7d68ed67cd4c1ba6e6d565
|
9f5e9eb75af95c6781e2d3e71bc0f739663c4120
|
refs/heads/main
| 2023-02-15T18:45:42.888322
| 2021-01-15T10:45:13
| 2021-01-15T10:45:13
| 327,023,179
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,096
|
py
|
import socket
from socket import AddressFamily
from socket import SocketKind
import numpy as np
import torch
from torch import nn
from torch.optim import lr_scheduler, Optimizer
def find_free_port(addr):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((addr, 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def make_init_address(addr=None, port=None):
if addr is None:
hostname = addr or socket.gethostname()
ips = socket.getaddrinfo(
hostname, 0,
family=AddressFamily.AF_INET,
type=SocketKind.SOCK_STREAM
)
ips = [i[4][0] for i in ips]
ips = [i for i in ips if i not in (None, '', 'localhost', '127.0.0.1')]
if not ips:
raise RuntimeError('no IPv4 interface found')
addr = ips[0]
port = port or find_free_port(addr)
return 'tcp://%s:%d' % (addr, port)
class Delist(nn.Module):
def __init__(self, module):
nn.Module.__init__(self)
self.module = module
def forward(self, sample, *args, **kwargs):
return self.module(sample[0][0])
def _ensure_iterable(v):
if isinstance(v, str):
return [v]
else:
return list(v)
MEAN = 123.675, 116.28, 103.53
STD = 58.395, 57.12, 57.375
class Normalizer(nn.Module):
# noinspection PyUnresolvedReferences
def __init__(self, module, mean=None, std=None, is_float=False):
nn.Module.__init__(self)
self.module = module
if mean is None:
mean = [123.675, 116.28, 103.53]
if std is None:
std = [58.395, 57.12, 57.375]
if is_float:
for i in range(3):
mean[i] = mean[i]/255
std[i] = std[i]/255
print(mean)
print(std)
self.register_buffer(
'mean', torch.FloatTensor(mean).view(1, len(mean), 1, 1)/255
)
self.register_buffer(
'std', torch.FloatTensor(std).view(1, len(std), 1, 1)/255
)
def forward(self, x):
x = x.float() # implicitly convert to float
x = x.sub(self.mean).div(self.std)
return self.module(x)
class Unpacker(nn.Module):
def __init__(self, module, input_key='image', output_key='logits'):
super(Unpacker, self).__init__()
self.module = module
self.input_key = input_key
self.output_key = output_key
def forward(self, sample):
x = sample[self.input_key]
x = self.module(x)
sample[self.output_key] = x
return sample
class _LRPolicy(lr_scheduler.LambdaLR):
def __init__(self, optimizer, last_epoch=-1):
lr_scheduler.LambdaLR.__init__(self, optimizer, 1, last_epoch)
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
self.optimizer = optimizer
if last_epoch == -1:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
else:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError("param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".format(i))
self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))
self.step(last_epoch + 1)
self.last_epoch = last_epoch
def get_lr(self):
raise NotImplementedError
# noinspection PyMethodOverriding
def step(self, epoch=None, metrics=None):
lr_scheduler.LambdaLR.step(self, epoch)
class PiecewiseLinear(_LRPolicy):
def __init__(self, optimizer, knots, vals, last_epoch=-1):
self.knots = knots
self.vals = vals
_LRPolicy.__init__(self, optimizer, last_epoch)
del self.lr_lambdas
def get_lr(self):
r = np.interp([self.last_epoch], self.knots, self.vals)[0]
return [base_lr * r for base_lr in self.base_lrs]
|
[
"pengler@rhrk.uni-kl.de"
] |
pengler@rhrk.uni-kl.de
|
26f83bd32f0f51fecb1f418a7f0572ba228e1a4c
|
838b850b5716fff5872e657758b29dafe28fd59e
|
/apps/organization/urls.py
|
5c112ccbcab47fc2dea5e24371cfa20de7a4b531
|
[] |
no_license
|
linjiesen/Mxonline
|
365ac4310c3236611ac86683143b65ce359cbd23
|
22d7507873051e9ec25f3fc6da48c1e61e1becab
|
refs/heads/master
| 2020-05-21T21:40:24.723679
| 2019-05-11T16:52:38
| 2019-05-11T16:52:38
| 186,158,576
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,183
|
py
|
# encoding: utf-8
from organization.views import OrgView, AddUserAskView, OrgHomeView, OrgCourseView, OrgDescView, OrgTeacherView, \
AddFavView, TeacherListView, TeacherDetailView
from django.urls import path, re_path
app_name = "organization"
urlpatterns = [
# 课程机构列表url
path('list/', OrgView.as_view(), name="org_list"),
# 添加我要学习
path('add_ask/', AddUserAskView.as_view(), name="add_ask"),
# home页面,取纯数字
re_path('home/(?P<org_id>\d+)/', OrgHomeView.as_view(), name="org_home"),
# 访问课程
re_path('course/(?P<org_id>\d+)/', OrgCourseView.as_view(), name="org_course"),
# 访问机构描述
re_path('desc/(?P<org_id>\d+)/', OrgDescView.as_view(), name="org_desc"),
# 访问机构讲师
re_path('org_teacher/(?P<org_id>\d+)/', OrgTeacherView.as_view(), name="org_teacher"),
# 机构收藏
path('add_fav/', AddFavView.as_view(), name="add_fav"),
# 讲师列表
path('teacher/list/', TeacherListView.as_view(), name="teacher_list"),
# 访问机构讲师
re_path('teacher/detail/(?P<teacher_id>\d+)/', TeacherDetailView.as_view(), name="teacher_detail"),
]
|
[
"sqrtln@163.com"
] |
sqrtln@163.com
|
8ae1b0e2603185ca3f066a85347a8645b61df889
|
47ad6a81efc31faa19810cff1cca2717134f283e
|
/PyPoll/main.py
|
6a6220f2b7c3f8aa6c8a87211239175f4930ee12
|
[] |
no_license
|
kglibrarian/python-challenge
|
45ce02fbe791f279478ab838ecdd2f201fdaee2e
|
4ab4135459ef9052255a340d08f35148f9adde30
|
refs/heads/master
| 2020-04-26T21:03:54.992885
| 2019-03-11T23:26:12
| 2019-03-11T23:26:12
| 173,830,915
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,388
|
py
|
# Import the os module to create file paths across operating systems
import os
# Import the csv module for reading CSV files
import csv
csvpath = os.path.join('election_data_test.csv')
def voterInfo(voterID, county, candidate):
print("Election Results")
#The total number of votes cast
voterSum = len(voterID)
print(f"Total Votes: {voterSum}")
#A complete list of candidates who received votes
#candidateUnique = set(candidate)
#print(candidateUnique)
#Create dicitonary of Key: candidate name and Value: number of votes
candidateSum = {i:candidate.count(i) for i in candidate}
#print("candidateSum: ", candidateSum)
winner = max(candidateSum, key=candidateSum.get)
#print("Winner: ", winner, candidateSum[winner])
#create an empty dictionary for the final data
finalData = {}
#loop through each Key: candidate in our current dictionary
for i in candidateSum:
#each Key: candidate should be i
#print("i: ", i)
#sum the number of unique times each candidate was voted for
candidateVoteSum = len(i)
#print("CandidateVoteSum: ", candidateVoteSum)
#get the average number of votes each candidate received - create a percentage and round to two decimals
candidateAvg = (candidateSum[i]/voterSum)*100
candidateAvgRound = round(candidateAvg,3)
#print("CandidateAvgRound: ", candidateAvgRound)
#create a list of the data for each candidate - which is the average number of votes received, and the number of votes
candidateInfo = {i:[candidateAvgRound, candidateVoteSum]}
#print("CandidateInfo: ", candidateInfo)
#add this new informaiton into our dictionary
finalData.update(candidateInfo)
#print the finalData dictionary
#print(finalData)
test = []
#loop through the items in the dictionary and print them
for k, v in finalData.items():
#print(k, v[0], v[1])
print(f"{k}: {v[0]} ({v[1]})")
#the code below is just to get access to print this outside of the loop
name = k
percent = v[0]
votes = v[1]
test.append(name)
test.append(percent)
test.append(votes)
#print(test)
#dictionary comprehension version
#test = {k:v for k, v in finalData.items()}
#print("test is: " , test)
winner = max(finalData, key=finalData.get)
print("Winner: ", winner)
f = open("results.txt", 'w')
f.write("Election Results\n" f"Total Votes: {voterSum}\n" f"{test[0]} {test[1]} ({test[2]})\n" f"Winner: {winner}\n")
f.close()
with open(csvpath, newline='') as csvfile:
# CSV reader specifies delimiter and variable that holds contents
csvreader = csv.reader(csvfile, delimiter=',')
#print(csvreader)
# Read the header row first (skip this step if there is now header)
csv_header = next(csvreader)
#print(f"CSV Header: {csv_header}")
voterID = []
county = []
candidate = []
# Read each row of data after the header
for row in csvreader:
#print(row)
voterID.append(row[0])
county.append(row[1])
candidate.append(row[2])
#print(profLoss)
#print(month)
voterInfo(voterID, county, candidate)
|
[
"karen.gutzman@northwestern.edu"
] |
karen.gutzman@northwestern.edu
|
a3638d5459a2da3d0e41a495d7596b64ef04664b
|
bb63633ea6773067680e961af12d23dbb084df12
|
/Local-Detection/classifiers/nne.py
|
78285053641486a02c5716581cb9811dc309f143
|
[] |
no_license
|
agi2019/Framework-Deteksi
|
e157a73063ffacefa51377ffb9465e3f8f3fefd7
|
4be65f6644790f772f1d006b483f1f62e5e816f4
|
refs/heads/master
| 2020-11-27T11:49:58.330189
| 2020-01-28T00:20:23
| 2020-01-28T00:20:23
| 229,426,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,294
|
py
|
import keras
import numpy as np
from utils.utils import calculate_metrics
from utils.utils import create_directory
from utils.utils import check_if_file_exits
import gc
from utils.constants import UNIVARIATE_ARCHIVE_NAMES as ARCHIVE_NAMES
import time
class Classifier_NNE:
def create_classifier(self, model_name, input_shape, nb_classes, output_directory, verbose=True,
build=True, load_weights=False):
if model_name == 'fcn':
from classifiers import fcn
return fcn.Classifier_FCN(output_directory, input_shape, nb_classes, verbose, build=build)
if model_name == 'mlp':
from classifiers import mlp
return mlp.Classifier_MLP(output_directory, input_shape, nb_classes, verbose, build=build)
if model_name == 'resnet':
from classifiers import resnet
return resnet.Classifier_RESNET(output_directory, input_shape, nb_classes, verbose,
build=build, load_weights=load_weights)
if model_name == 'encoder':
from classifiers import encoder
return encoder.Classifier_ENCODER(output_directory, input_shape, nb_classes, verbose, build=build)
if model_name == 'mcdcnn':
from classifiers import mcdcnn
return mcdcnn.Classifier_MCDCNN(output_directory, input_shape, nb_classes, verbose, build=build)
if model_name == 'cnn':
from classifiers import cnn
return cnn.Classifier_CNN(output_directory, input_shape, nb_classes, verbose, build=build)
def __init__(self, output_directory, input_shape, nb_classes, verbose=False):
# self.classifiers = ['mlp','fcn','resnet','encoder','mcdcnn','cnn']
#self.classifiers = ['mlp','fcn','cnn']
self.classifiers = ['fcn','resnet','encoder'] # this represents NNE in the paper
out_add = ''
for cc in self.classifiers:
out_add = out_add + cc + '-'
self.archive_name = ARCHIVE_NAMES[0]
self.output_directory = output_directory.replace('nne',
'nne'+'/'+out_add)
create_directory(self.output_directory)
self.dataset_name = output_directory.split('/')[-2]
self.verbose = verbose
self.models_dir = output_directory.replace('nne','classifier')
self.iterations = 2
def fit(self, x_train, y_train, x_test, y_test, y_true):
# no training since models are pre-trained
start_time = time.time()
y_pred = np.zeros(shape=y_test.shape)
l = 0
# loop through all classifiers
for model_name in self.classifiers:
# loop through different initialization of classifiers
for itr in range(self.iterations):
if itr == 0:
itr_str = ''
else:
itr_str = '_itr_' + str(itr)
print ("classifier",model_name)
print ("iter ke",str(itr))
curr_archive_name = self.archive_name+itr_str
curr_dir = self.models_dir.replace('classifier',model_name).replace(
self.archive_name,curr_archive_name)
model = self.create_classifier(model_name, None, None,
curr_dir, build=False)
predictions_file_name = curr_dir+'y_pred.npy'
# check if predictions already made
if check_if_file_exits(predictions_file_name):
# then load only the predictions from the file'
print("masuk1")
curr_y_pred = np.load(predictions_file_name)
else:
# then compute the predictions
print("masuk2")
curr_y_pred = model.predict(x_test,y_true,x_train,y_train,y_test,
return_df_metrics = False)
keras.backend.clear_session()
np.save(predictions_file_name,curr_y_pred)
if l == 0:
y_pred = curr_y_pred
else:
#y_pred = np.append(y_pred,curr_y_pred)
#y_pred= np.concatenate(y_pred,curr_y_pred)
#y_pred = np.zeros(shape=curr_y_pred)
y_pred = y_pred + curr_y_pred
print ("ypred",y_pred,"curr",curr_y_pred)
l+=1
# average predictions
y_pred = y_pred / l
# save predictiosn
np.save(self.output_directory+'y_pred.npy',y_pred)
print ("ypred sebelum convert",y_pred)
# convert the predicted from binary to integer
y_pred = np.argmax(y_pred, axis=1)
print ("ypred setelah convert",y_pred)
duration = time.time() - start_time
df_metrics = calculate_metrics(y_true, y_pred, duration)
df_metrics.to_csv(self.output_directory + 'df_metrics.csv', index=False)
# the creation of this directory means
create_directory(self.output_directory + '/DONE')
gc.collect()
|
[
"49863588+agi2019@users.noreply.github.com"
] |
49863588+agi2019@users.noreply.github.com
|
dad9a0f5bb19b52082712f1571995b46984e9afe
|
7057e137c5aeef1911eaecd24656d780d86879a4
|
/day9/day9.py
|
b6467492a756ff5c6574f95fc2ad0ea15c0cc833
|
[] |
no_license
|
simonvbrae/advent-of-code-2020
|
2255b41be61f31f82499ab076e0d0f97d4a1b84b
|
1f8f738bd48956e03939e3c09d32803530592094
|
refs/heads/master
| 2023-01-31T22:52:26.521069
| 2020-12-20T11:23:44
| 2020-12-20T11:23:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,493
|
py
|
preamb_len=25
def find_incorrect_number(number, numbers):
for index,nr in enumerate(numbers):
for nr_2 in numbers[index+1:]:
if nr + nr_2 == number:
return 0
return number
# Original solution
def find_summing_set(number, numbers):
for startindex, _ in enumerate(numbers):
for endindex in range(startindex+1, len(numbers)):
if sum(numbers[startindex: endindex]) == number:
return numbers[startindex: endindex]
return (0)
# Molto più veloce!
# Dynamic programming solution
def find_summing_set_dynamic(number, numbers):
size = len(numbers)
dp_matrix = [[-1+int(x==y) for x in range(size)] for y in range(size)]
for startindex in range(0,size):
for endindex in range(startindex+1,size):
dp_matrix[startindex][endindex] = dp_matrix[startindex][endindex-1] + numbers[endindex]
if dp_matrix[startindex][endindex] == number:
return numbers[startindex: endindex]
return (0)
def main():
f=open("input", "r")
lines=[int(l.strip()) for l in f.readlines()]
for i, line in enumerate(lines[preamb_len:]):
incorrect_nr = find_incorrect_number(line, lines[i:i+preamb_len])
if incorrect_nr:
print("incorrect number found: " + str(incorrect_nr))
summing_set = find_summing_set_dynamic(incorrect_nr, lines[0:i+preamb_len])
print("weakness: "+str(max(summing_set) + min(summing_set)))
main()
|
[
"simon.vanbraeckel@ugent.be"
] |
simon.vanbraeckel@ugent.be
|
2db80125614126b1bda5dac81b52721288060e5e
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/D/dasfaha/get_imdb_movie_rating.py
|
4f8bab114f20f4b7fedaa3cbfb02a591f9fa6362
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,532
|
py
|
import lxml.html
import scraperwiki
#Ge the data
html = scraperwiki.scrape("http://www.imdb.com/title/tt2103264/")
#The request to IMDB returns plain text so the line below processes this text and turns it into a format that can be queried
root = lxml.html.fromstring(html)
#The rating of a movie is within a div with class: "titlePageSprite star-box-giga-star" like this:
#
#<div class="titlePageSprite star-box-giga-star">
# 7.7
#</div>
#
#Use CSS selector to get the div html element that has class="titlePageSprite"
el = root.cssselect("div.titlePageSprite")
#el is a list as there could be several div elements with the same class. In our case we know there is only one div with that class
print "Number of elements in el: {0}".format(len(el))
#Create a python 'dictionary' to store the two fields of the data we just scraped: 'movie title' and 'rating'
data = {
'movie title': 'Emperor', #exercise: is it possible to scrape the movie name from the page? :p
'rating' : el[0].text
}
print "Movie rating: {0}".format(data['rating']) #The fields in 'data' can be accessed by their names
#Save into a databaase. Completely pointless in this case but useful if the data changes...
scraperwiki.sqlite.save(unique_keys=['movie title'], data=data)
import lxml.html
import scraperwiki
#Ge the data
html = scraperwiki.scrape("http://www.imdb.com/title/tt2103264/")
#The request to IMDB returns plain text so the line below processes this text and turns it into a format that can be queried
root = lxml.html.fromstring(html)
#The rating of a movie is within a div with class: "titlePageSprite star-box-giga-star" like this:
#
#<div class="titlePageSprite star-box-giga-star">
# 7.7
#</div>
#
#Use CSS selector to get the div html element that has class="titlePageSprite"
el = root.cssselect("div.titlePageSprite")
#el is a list as there could be several div elements with the same class. In our case we know there is only one div with that class
print "Number of elements in el: {0}".format(len(el))
#Create a python 'dictionary' to store the two fields of the data we just scraped: 'movie title' and 'rating'
data = {
'movie title': 'Emperor', #exercise: is it possible to scrape the movie name from the page? :p
'rating' : el[0].text
}
print "Movie rating: {0}".format(data['rating']) #The fields in 'data' can be accessed by their names
#Save into a databaase. Completely pointless in this case but useful if the data changes...
scraperwiki.sqlite.save(unique_keys=['movie title'], data=data)
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
2590462966f909cd040de762441fc5e29418c2f9
|
67e302a19a4975e652dcbc106609432f1484dc69
|
/gesbol8.py
|
f088047656c3d1ada7655de1987e92b28effe484
|
[] |
no_license
|
iparedes/gesbol8
|
1af13a0bd0e7f2131f83d4d666dc932e0942051d
|
869fc009523be49e4046fbaf8428c2ab3fd9ce81
|
refs/heads/master
| 2020-06-08T05:02:51.199060
| 2014-02-19T18:36:12
| 2014-02-19T18:36:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
# coding=utf-8
__author__ = 'nacho'
from ConfigManager import ConfigManager
from Tkinter import Tk
from Interfaz import Interfaz
def main():
Parametros=ConfigManager("./gesbol.ini")
root = Tk()
gui=Interfaz(root,Parametros)
gui.mainloop();
if __name__ == '__main__':
main()
|
[
"iparedes@gmail.com"
] |
iparedes@gmail.com
|
7c004f111481b118849d8850a15900aaf292d0ad
|
102f2b8a9b561e278267d4d33a941665dca02956
|
/service/API_Yandex.py
|
1fac1b9f02ae0d12e6d8e4d2c03b6b6c7bba8cd4
|
[] |
no_license
|
WTTeneger/portfolio_new
|
7ba8964aac889295c35b6ca540b3df34a2939e4e
|
e62574cb86a7c66427fa68320d865cd5f700cb77
|
refs/heads/main
| 2023-06-16T12:15:21.084391
| 2021-07-11T21:21:57
| 2021-07-11T21:21:57
| 382,823,889
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,513
|
py
|
import requests
import time
from fake_useragent import UserAgent
ua = UserAgent()
domen = 'https://kinopoiskapiunofficial.tech'
# key_api = '8ddff7e1-d699-4f01-aaf6-a98347651223'
# key_api = '6c78c319-3562-4d75-bd42-e40cee7a7010'
key_api = '9788bb82-dfcd-408e-8a86-49f8834b9540'
h = {
'X-API-KEY': key_api,
}
# print(h)
class API_Cinema():
""" Films """
def get_by_keyword(self, text, page=1):
"""Get list of films by keyword
Args:
text ([str]): [Текст который ввел пользователь]
"""
urls = domen + f'/api/v2.1/films/search-by-keyword?keyword={text}&page={page}'
#print(urls +'\n\n\n\n')
request = requests.get(urls, headers=h)
if(request.status_code == 200):
return((request.json()))
else:
return False
def get_data_film(self, id_kinopoisk, append_to_response='RATING'):
"""Get data of film
Получаем информацию о фильме
Args:
id_kinopoisk ([int]): [id Kinopoisk]
"""
if(append_to_response == ''):
urls = domen + f'/api/v2.1/films/{id_kinopoisk}'
else:
urls = domen + f'/api/v2.1/films/{id_kinopoisk}?append_to_response={append_to_response}'
request = requests.get(urls, headers=h)
if(request.status_code == 200):
return((request.json()))
else:
return False
def get_frame_film(self,id_kinopoisk):
"""Get frame from film
Получаем кадрый из фильма
Args:
id_kinopoisk ([int]): [id Kinopoisk]
"""
urls = domen + f'/api/v2.1/films/{id_kinopoisk}/frames'
#print(urls)
request = requests.get(urls, headers=h)
if(request.status_code == 200):
return((request.json()))
else:
return False
def get_trailer_film(self, id_kinopoisk):
"""Get trailer film
Получаем трейлер фильма
Args:
id_kinopoisk ([int]): [id Kinopoisk]
"""
urls = domen + f'/api/v2.1/films/{id_kinopoisk}/videos'
# print(urls)
request = requests.get(urls, headers=h)
# print(request.status_code)
if(request.status_code == 200):
return((request.json()))
else:
return False
def get_sequels_and_prequels_film(self, id_kinopoisk):
"""Get tsequels and prequels film
Получаем сиквелев и приквелев фильма
Args:
id_kinopoisk ([int]): [id Kinopoisk]
"""
urls = domen + f'/api/v2.1/films/{id_kinopoisk}/sequels_and_prequels'
#print(urls)
request = requests.get(urls, headers=h)
if(request.status_code == 200):
return((request.json()))
else:
return False
def get_filters(self):
"""Get filters
Получаем список фильмов фильма
"""
urls = domen + f'/api/v2.1/films/filters'
#print(urls)
request = requests.get(urls, headers=h)
if(request.status_code == 200):
return((request.json()))
else:
return False
def get_film_in_filters(self, counry=[], genre=[], order = 'RATING', types='ALL', ratingFrom=0, ratingTo=10, yearFrom=1888, yearTo=2021, page=1):
"""Get films in filter
~~~
Получаем список фильмов по фиьтру
Args:
counry (list, optional): [Страны]. Defaults to [].
genre (list, optional): [жанры]. Defaults to [].
order (str, optional): [Соритировать по [RATING, NUM_VOTE, YEAR]]. Defaults to 'RATING'.
types (str, optional): [Тип программы [ALL, FILM, TV_SHOW]]. Defaults to 'ALL'.
ratingFrom (int, optional): [Минимальный рейтинг]. Defaults to 0.
ratingTo (int, optional): [Максимальный рейтинг]. Defaults to 10.
yearFrom (int, optional): [Минимальный год]. Defaults to 1888.
yearTo (int, optional): [максимальный год]. Defaults to 2021.
page (int, optional): [Колличество страниц]. Defaults to 1.
Returns:
[(list, optional)]: Данные по поиску
"""
urls = domen + f'/api/v2.1/films/search-by-filters?'
for el in genre:
urls += f'genre={el}&'
for el in counry:
urls += f'country={el}&'
urls += f'order={order}&type={types}&ratingFrom={ratingFrom}&ratingTo={ratingTo}&yearFrom={yearFrom}&yearTo={yearTo}&page={page}'
print(urls)
p = {}
request = requests.get(urls, headers=h, params=p)
# #print(request.text)
if(request.status_code == 200):
return((request.json()))
elif(request.status_code == 404):
return('Нечего не нашёл')
else:
return False
def get_top_films(self, types='TOP_100_POPULAR_FILMS', page=1):
"""[Возвращает топ фильмов относительно типа поиска]
Args:
type (str, optional): [Тип поиска [TOP_100_POPULAR_FILMS, TOP_AWAIT_FILMS, TOP_250_BEST_FILMS]]. Defaults to 'TOP_100_POPULAR'.
page (int, optional): [Колличество страницу]. Defaults to 1 на 1 странице 20 фильмов.
Returns:
[type]: [description]
"""
urls = domen + f'/api/v2.2/films/top?type={types}&page={page}'
#print(urls)
request = requests.get(urls, headers=h)
if(request.status_code == 200):
return((request.json()))
else:
return False
def get_similars_film(self, id_kinopoisk):
"""Получаем похожие фильмы на запрошенный
Args:
id_kinopoisk ([int]): id_kinopoisk
Returns:
[list]: similars film
"""
urls = domen + f'/api/v2.2/films/{id_kinopoisk}/similars'
#print(urls)
request = requests.get(urls, headers=h)
#print(request)
if(request.status_code == 200):
return((request.json()))
else:
return False
def get_releaze_film(self, year = 2021, month = 'JANUARY', page=1):
"""Получаем релизы по запрошенной дате
Args:
id_kinopoisk ([int]): id_kinopoisk
Returns:
[list]: similars film
"""
urls = domen + f'/api/v2.1/films/releases?year={year}&month={month}&page={page}'
#print(urls)
request = requests.get(urls, headers=h)
if(request.status_code == 200):
return((request.json()))
else:
return False
def get_studios_date(self, id_kinopoisk):
"""Get studios data
Данные о студии
Args:
id_kinopoisk ([int]): [id Kinopoisk]
"""
urls = domen + f'/api/v2.1/films/{id_kinopoisk}/studios'
#print(urls)
request = requests.get(urls, headers=h)
if(request.status_code == 200):
return((request.json()))
else:
return False
""" Reviews """
def get_reviews(self, id_kinopoisk, page=1):
"""Return set of reviews with pagination. Each page contains no more than 20 reviews.
Отзывы
Args:
id_kinopoisk ([int]): [id Kinopoisk]
"""
urls = domen + f'/api/v1/reviews?filmId={id_kinopoisk}&page={page}'
#print(urls)
request = requests.get(urls, headers=h)
if(request.status_code == 200):
return((request.json()))
else:
return False
def get_reviews_details(self, id_reviews):
"""Return full data for particular review id
Полное описание отзыва
Args:
id_reviews ([int]): [id_reviews from get_reviews()]
"""
urls = domen + f'/api/v1/reviews/details?reviewId={id_reviews}'
#print(urls)
request = requests.get(urls, headers=h)
if(request.status_code == 200):
return((request.json()))
else:
return False
""" Staff """
def get_staff(self, id_kinopoisk):
"""Return list staffs.
Отзывы
Args:
id_kinopoisk ([int]): [id Kinopoisk]
"""
urls = domen + f'/api/v1/staff?filmId={id_kinopoisk}'
#print(urls)
request = requests.get(urls, headers=h)
if(request.status_code == 200):
return((request.json()))
else:
return False
def get_staff_details(self, id_person):
"""Return details staff.
Все работы сотрудника - актёра
Args:
id_person ([int]): [id person get in get_staff()]
"""
urls = domen + f'/api/v1/staff/{id_person}'
#print(urls)
request = requests.get(urls, headers=h)
if(request.status_code == 200):
return((request.json()))
else:
return False
|
[
"50665935+WTTeneger@users.noreply.github.com"
] |
50665935+WTTeneger@users.noreply.github.com
|
2bf07793bfef24a2bed035690bb6849533f776bc
|
1239393937f155fd5090c41f462262098fa6c6c1
|
/dev/docs/source/conf.py
|
20af5dc2d5e88af3e123d49e2e27b9d9573e3297
|
[
"BSD-2-Clause-Views",
"BSD-3-Clause"
] |
permissive
|
hyunjinb/XlsxWriter
|
af4fe17c11b81c05ba8ec6adf27d0f6d1d632399
|
b4c4b499ffb3db8e0fa1b306880bcbcb3675fd4d
|
refs/heads/master
| 2021-01-23T13:42:00.785444
| 2017-09-05T23:17:06
| 2017-09-05T23:17:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,926
|
py
|
# -*- coding: utf-8 -*-
#
# XlsxWriter documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 28 00:12:14 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'XlsxWriter'
copyright = u'2013-2017, John McNamara'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9.9'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/2/': None}
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
sys.path.append(os.path.abspath('_themes'))
html_theme_path = ['_themes']
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {
# # 'nosidebar': True,
# 'sidebarbgcolor': '#F2F2F2',
# 'relbarbgcolor': '#9CB640',
# 'linkcolor': '#9CB640',
# 'sidebarlinkcolor': '#9CB640',
# 'footerbgcolor': '#FFFFFF',
# 'footertextcolor': '#9CB640',
# 'headtextcolor': '#9CB640',
# 'codebgcolor': '#FFFFFF',
# }
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "XlsxWriter Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = '_images/logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'XlsxWriterdoc'
# Remove permalinks.
html_add_permalinks = ""
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '11pt',
# Additional stuff for the LaTeX preamble.
'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'XlsxWriter.tex', u'Creating Excel files with Python and XlsxWriter',
u'John McNamara', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '_images/logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'xlsxwriter', u'XlsxWriter Documentation',
[u'John McNamara'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'XlsxWriter', u'XlsxWriter Documentation',
u'John McNamara', 'XlsxWriter', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'XlsxWriter'
epub_author = u'John McNamara'
epub_publisher = u'John McNamara'
epub_copyright = u'2013-2017, John McNamara'
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
754e0bce9c3fc9f004f3229a31843293706fb40a
|
72c973cf70e2a0e2517e26e9bca1e80fb9c06288
|
/DataType02/NumberType1.py
|
302bf34201470d9e831c412ccd4a2a8b8376ea4e
|
[] |
no_license
|
HeoUk/Python-from-KOSMO
|
1f24ff7ff3b4e6190870e8611ab29c8fb366538b
|
738b59f891c5d726e66b3cffa21bd8d1dc9f2c0b
|
refs/heads/main
| 2023-06-16T18:06:24.431746
| 2021-07-19T13:34:06
| 2021-07-19T13:34:06
| 384,444,080
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,495
|
py
|
#정수형(int)은 소수점이 없는 숫자를 의미
#100,-100등
def pprint(value):
print('value:',value,sep='',end=',')
print('type:',type(value),sep='')
a=100
pprint(a)
#파이썬 3에서는 정수 / 정수 = 실수 이다(파이썬 2은 정수 / 정수 = 정수 이다).
b= 5/2
pprint(b)
#int()함수 : 정수로 변환하는 함수-소수점 이하를 버린다
c=int(b)
pprint(c)
'''
※정수는 10진수 이외에도 2진수, 8진수, 16진수로 표현가능
2진수: 숫자 앞에 0b(B)를 붙이며 0과 1을 사용.
8진수: 숫자 앞에 0o(O)를 붙이며 0부터 7까지 사용.
16진수: 숫자 앞에 0x(X) 붙이며 0부터 9, A부터 F까지 사용(소문자 a부터 f도 가능).
'''
print('[각 진수로 숫자 표현하기]')
print('2진수 : ',0b10)# 1*2^1 + 0 * 2^0=2+0
print('8진수 : ',0o10)
print('16진수 : ',0x10)
#int 타입의 숫자는 크기에 제한이 없다. 즉, 아무리 큰 정수라도 표현할 수 있다
#파이썬3에서 int형으로 통합되었다.(파이썬 2는 long형이 별도로 존재)
d=478325643765072346756234756278346527436528734652837465237456234765347
pprint(d)
#실수형(float)은 소수점이 있는 숫자를 의미
#10.0,-3.14등
#실수 와 정수 의 연산결과는 실수 이다
a=10
b=3.5
pprint(b)
pprint(a+b)
print('[0.1+0.2 연산결과:0.3이 아니다]')
a=0.1
b=0.2
print(a+b)#0.30000000000000004
print('[정수를 실수로 변환하기]')
pprint(float(1+2))
|
[
"noreply@github.com"
] |
HeoUk.noreply@github.com
|
86c1baeaad6ef264bb34f17abd2e37ff5e0cf09e
|
ee8d680c21761c04bb307975b64f6b7d6c6be007
|
/main.py
|
c82cd639f32cf430b835dac2f556551885453e17
|
[] |
no_license
|
Pari555/Concatenation-Input-Type-Cast
|
2f4dc1f3b8580bd56c356bc595a2057920b2fd88
|
adb42911c1e7800158381fe4e8a8c95d9a60dea0
|
refs/heads/master
| 2023-02-24T19:05:11.624992
| 2021-01-21T23:35:03
| 2021-01-21T23:35:03
| 329,768,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 621
|
py
|
print("Hello" + " World")
print("Hello" + " " + "World." + "Today is Thursday")
#this is concatenation of strings
print()
print("Tommorrow is Friday")
#If adding strings is "concatenaton"
#What is the + for integers called? Addition
# 5 + 4 = 9
print("5" + "4")
print(5 + 4)
FirstName = "Debanshi" #string
print("My name is " + FirstName + ".")
age = 12 #integer
print("My age is " + str(age))
#firstName != FirstName !(not)=(equal)
userName = input("What is your name? ")
print("Hello "+ userName)
# Ask the user for their birth year
userBirth = int(input("What year were you born on? "))
print(2021 - userBirth)
|
[
"123050@eesd.org"
] |
123050@eesd.org
|
59f658230c9dd8484b67ab2baf9f5708c7cc1980
|
a7a7f03c7b60bc7277a1b98982b0497bfd8542eb
|
/draw.py
|
6f1e2e89719a23109111bb718ae041b9df513d46
|
[] |
no_license
|
cmohri/graphics_04
|
a991262a23c7769f4c06d3fd2ed845f694429229
|
219b9ec14db0bfabf331e6dd38e6696dbe934fb7
|
refs/heads/master
| 2020-04-28T14:26:36.004199
| 2019-03-13T03:30:45
| 2019-03-13T03:30:45
| 175,338,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,458
|
py
|
from display import *
from matrix import *
def add_circle( points, cx, cy, cz, r, step):
# parametric equations for x and y
x0 = r+cx
y0 = cy
t = step
while t <= 1.0001:
x1 = r*math.cos(2*math.pi*t)+cx
y1 = r*math.sin(2*math.pi*t)+cy
add_edge(points, x0, y0, cz, x1, y1, cz)
x0 = x1
y0 = y1
t += step
def add_curve( points, x0, y0, x1, y1, x2, y2, x3, y3, step, curve_type ):
x = generate_curve_coefs(x0,x1,x2,x3,curve_type)[0]
y = generate_curve_coefs(y0,y1,y2,y3,curve_type)[0]
t = step
x_0 = x0
y_0 = y0
while t <= 1.001:
# parametric equations for x and y
x_1 = x[0]*pow(t,3)+x[1]*pow(t,2)+x[2]*t+x[3]
y_1 = y[0]*pow(t,3)+y[1]*pow(t,2)+y[2]*t+y[3]
add_edge(points,x_0,y_0,0,x_1,y_1,0)
x_0 = x_1
y_0 = y_1
t += step
def draw_lines( matrix, screen, color ):
if len(matrix) < 2:
print 'Need at least 2 points to draw'
return
point = 0
while point < len(matrix) - 1:
draw_line( int(matrix[point][0]),
int(matrix[point][1]),
int(matrix[point+1][0]),
int(matrix[point+1][1]),
screen, color)
point+= 2
def add_edge( matrix, x0, y0, z0, x1, y1, z1 ):
add_point(matrix, x0, y0, z0)
add_point(matrix, x1, y1, z1)
def add_point( matrix, x, y, z=0 ):
matrix.append( [x, y, z, 1] )
def draw_line( x0, y0, x1, y1, screen, color ):
#swap points if going right -> left
if x0 > x1:
xt = x0
yt = y0
x0 = x1
y0 = y1
x1 = xt
y1 = yt
x = x0
y = y0
A = 2 * (y1 - y0)
B = -2 * (x1 - x0)
#octants 1 and 8
if ( abs(x1-x0) >= abs(y1 - y0) ):
#octant 1
if A > 0:
d = A + B/2
while x < x1:
plot(screen, color, x, y)
if d > 0:
y+= 1
d+= B
x+= 1
d+= A
#end octant 1 while
plot(screen, color, x1, y1)
#end octant 1
#octant 8
else:
d = A - B/2
while x < x1:
plot(screen, color, x, y)
if d < 0:
y-= 1
d-= B
x+= 1
d+= A
#end octant 8 while
plot(screen, color, x1, y1)
#end octant 8
#end octants 1 and 8
#octants 2 and 7
else:
#octant 2
if A > 0:
d = A/2 + B
while y < y1:
plot(screen, color, x, y)
if d < 0:
x+= 1
d+= A
y+= 1
d+= B
#end octant 2 while
plot(screen, color, x1, y1)
#end octant 2
#octant 7
else:
d = A/2 - B;
while y > y1:
plot(screen, color, x, y)
if d > 0:
x+= 1
d+= A
y-= 1
d-= B
#end octant 7 while
plot(screen, color, x1, y1)
#end octant 7
#end octants 2 and 7
#end draw_line
s = new_screen()
fname = "image.ppm"
m = new_matrix()
add_circle(m, 100, 100, 0, 40, .25)
draw_lines(m, s, [255, 0,0])
save_ppm(s, fname)
|
[
"clara.mohri@gmail.com"
] |
clara.mohri@gmail.com
|
9c2094cf9d422a88ec4f26ae5d04d22f8a5b3625
|
28bf8e279fde1a72e52e3689c221f4d9ad7ee168
|
/0x0A-python-inheritance/11-square.py
|
a05e3c9d222959a67dac3ccd669e9fef4c8c43f5
|
[] |
no_license
|
nicorotich/alx-higher_level_programming
|
f60ac74d3aaef53ef5206ffc7e622474991398bf
|
14e5f3a5dea3ee48ae264f31b5e899308ec8d563
|
refs/heads/main
| 2023-06-30T10:36:10.576451
| 2021-08-08T12:13:51
| 2021-08-08T12:13:51
| 361,627,722
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 482
|
py
|
#!/usr/bin/python3
# 11-square.py
# Brennan D Baraban <375@holbertonschool.com>
"""Defines a Rectangle subclass Square."""
Rectangle = __import__('9-rectangle').Rectangle
class Square(Rectangle):
"""Represent a square."""
def __init__(self, size):
"""Initialize a new square.
Args:
size (int): The size of the new square.
"""
self.integer_validator("size", size)
super().__init__(size, size)
self.__size = size
|
[
"nicholaskrotich8@gmail.com"
] |
nicholaskrotich8@gmail.com
|
9698d3517322a3e343889b6e1a1ea6aab467f38e
|
56f66cabf3793fb1b0658e6354ee31dc8f48fb86
|
/Object_Detect/test01_image.py
|
73f4eff2814341f0991a9b808184117db2d73e93
|
[] |
no_license
|
Jiwon0801/MachineLearning
|
078028d42bd8651f7fb312ead7d3a6bf988e11f3
|
c5b080420deb4f97ead57b32b29c596f6c0364f0
|
refs/heads/master
| 2020-05-07T21:30:07.985551
| 2019-04-29T04:13:08
| 2019-04-29T04:13:08
| 180,906,742
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,120
|
py
|
import numpy as np
from PIL import Image
from detecter_image import get_detect_image
from detecter import Detecter
import cv2
# 테스트 이미지 파일 리스트
TEST_IMAGE_PATHS = [ './test_images/image6.jpg']
THRESHOLD = 0.3
detecter = Detecter()
detecter.setup('./frozen_inference_graph.pb', './mscoco_label_map.pbtxt')
def getCropImage(object_list, image):
height, width, _ = image.shape
sub_image = []
for ix, obj in enumerate(object_list):
box = obj[0]
(ymin, xmin, ymax, xmax) = (int(box[0]*height), int(box[1]*width),
int(box[2]*height), int(box[3]*width))
sub_image.append(image[ymin:ymax, xmin:xmax])
return sub_image
for image_path in TEST_IMAGE_PATHS:
image, image_ex = get_detect_image(image_path)
#print(image.shape, image_ex.shape)
(boxes, scores, classes, num) = detecter.detect(image_ex)
#print('object num',num)
#일반 방법
object_list = []
for output in zip (boxes, scores, classes):
if( output[1] > THRESHOLD and output[2]==10):
object_list.append(output)
print(output)
sub_image = getCropImage(object_list, image)
i=1
for ix, image in enumerate(sub_image):
##### Color
hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
h, s, v = cv2.split(hsv)
h_red = cv2.inRange(h, 0, 8)
h_yellow = cv2.inRange(h, 8, 30)
h_green = cv2.inRange(h, 45, 80)
yellow = cv2.bitwise_and(hsv, hsv, mask=h_yellow)
yellow = cv2.cvtColor(yellow, cv2.COLOR_HSV2BGR)
green = cv2.bitwise_and(hsv, hsv, mask=h_green)
green = cv2.cvtColor(green, cv2.COLOR_HSV2BGR)
##### Circle
sub_image = cv2.medianBlur(sub_image, 5)
cimg = cv2.cvtColor(sub_image,cv2.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(sub_image,cv2.HOUGH_GRADIENT,1,20, param1=50,param2=35,minRadius=0,maxRadius=0)
circles = np.uint16(np.around(circles))
for ix in circles[0,:]:
# draw the outer circle
cv2.circle(cimg,(ix[0],ix[1]),ix[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(cimg,(ix[0],ix[1]),2,(0,0,255),3)
#image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
#cv2.imshow('yellow{}'.format(i), yellow)
if ix == 1:
cv2.imshow('green{}'.format(i), green)
cv2.imwrite("./saved_image.jpg", green)
i=i+1
#print(image.shape)
#필터 사용
#object_list = filter(lambda itme: item[1]>THRESHOLD, zip(boxes, scores, classes))
#for box, scores, classes in object_list:
# print(box, scores, classes)
#print()
#detecter.visualize(image, boxes, classes, scores, THRESHOLD)
#image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
#cv2.imshow(image_path, image)
#print('boxes', boxes)
#print('scores', scores)
#print('classes', classes)
cv2.waitKey()
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
Jiwon0801.noreply@github.com
|
4db34577a189824be47add722f775cc3b21b0971
|
051802c6c1783dc337b6d064726f75a61abba476
|
/backend/lead/models.py
|
f64dfd8effe368c9724e86743909d39bcab27208
|
[] |
no_license
|
cspartho/vue-django-crm
|
b0d5aa0b699e8efb8f6ca86e7fcc37171f2d343b
|
977e05d5f583df2339ed8aaa3e18bcf61d58ad12
|
refs/heads/main
| 2023-07-07T03:05:43.736204
| 2021-08-12T14:18:32
| 2021-08-12T14:18:32
| 395,342,008
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,494
|
py
|
from django.contrib.auth.models import User
from django.db import models
from team.models import Team
class Lead(models.Model):
NEW = 'new'
CONTACTED='contacted'
INPROGRESS = 'inprogress'
LOST = 'lost'
WON = 'won'
CHOICES_STATUS=(
(NEW,'New'),
(CONTACTED,'Contacted'),
(INPROGRESS,'In Progress'),
(LOST,'Lost'),
(WON,'Won'),
)
LOW = 'low'
MEDIUM = 'medium'
HIGH = 'high'
CHOICES_PRIORITY=(
(LOW,'Low'),
(MEDIUM,'Medium'),
(HIGH,'High'),
)
team = models.ForeignKey(Team,related_name='leads',on_delete=models.CASCADE)
company = models.CharField(max_length=255)
contact_person = models.CharField(max_length=255)
email = models.EmailField()
phone = models.CharField(max_length=50)
website = models.CharField(max_length=255,blank=True,null=True)
confidence = models.IntegerField(blank=True, null=True)
estimated_value = models.IntegerField(blank=True,null=True)
status = models.CharField(max_length=25,choices=CHOICES_STATUS,default=NEW)
priority = models.CharField(max_length=25,choices=CHOICES_PRIORITY,default=MEDIUM)
assigned_to = models.ForeignKey(User,related_name='assignedleads',blank=True,null=True,on_delete=models.SET_NULL)
created_by = models.ForeignKey(User,related_name = 'leads', on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
|
[
"cspartho19@gmail.com"
] |
cspartho19@gmail.com
|
8a3441b439ae0c781ace3ba8281fe64a57450d67
|
b550eda62179ffd8e49a59df7f8a30163140204f
|
/backend/openshift-old/services/user/service/model/user.py
|
72451f8834453939723096891846cc39a7ccf1a3
|
[
"Apache-2.0"
] |
permissive
|
bgoesswe/openeo-repeatability
|
6222fb235b70fda9da998b63fec92c0e5ac07169
|
087b9965e710d16cd6f29cb25e2cb94e443c2b30
|
refs/heads/master
| 2022-12-11T03:43:35.365574
| 2018-08-07T20:02:02
| 2018-08-07T20:02:02
| 139,158,921
| 0
| 1
| null | 2022-12-08T02:15:15
| 2018-06-29T14:27:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,121
|
py
|
''' Model of User '''
import jwt
import datetime
from flask import current_app
from service import DB, BCRYPT
class User(DB.Model):
__tablename__ = "users"
id = DB.Column(DB.Integer, primary_key=True, autoincrement=True) # Umbennen in uid
username = DB.Column(DB.String(128), unique=True, nullable=False)
email = DB.Column(DB.String(128), unique=True, nullable=False)
password = DB.Column(DB.String(255), nullable=False)
admin = DB.Column(DB.Boolean, default=False, nullable=False)
active = DB.Column(DB.Boolean, default=True, nullable=False)
created_at = DB.Column(DB.DateTime, nullable=False)
def __init__(self, username, email, password, created_at=datetime.datetime.utcnow(), admin=False):
self.username = username
self.email = email
self.password = self.generate_hash(password)
self.admin = admin
self.created_at = created_at
def get_dict(self):
''' Returns the users data '''
return {
"id": self.id,
"username": self.username,
"email": self.email,
"admin": self.admin,
"created_at": self.created_at
}
@staticmethod
def generate_hash(password):
''' Generates the password hash '''
return BCRYPT.generate_password_hash(password, current_app.config.get('BCRYPT_LOG_ROUNDS')).decode()
@staticmethod
def encode_auth_token(user_id):
''' Generates the auth token '''
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(
days=current_app.config.get('TOKEN_EXPIRATION_DAYS'),
seconds=current_app.config.get('TOKEN_EXPIRATION_SECONDS')
),
'iat': datetime.datetime.utcnow(),
'sub': user_id
}
return jwt.encode(payload, current_app.config.get('SECRET_BCRYPT'), algorithm='HS256')
@staticmethod
def decode_auth_token(auth_token):
''' Decodes the auth token '''
payload = jwt.decode(auth_token, current_app.config.get('SECRET_BCRYPT'))
return payload['sub']
|
[
"bernhard.goesswein@geo.tuwien.ac.at"
] |
bernhard.goesswein@geo.tuwien.ac.at
|
c4be35002664253e83bad83bee500cc207fa909c
|
e4700f3ff598b997bf0ea35bcdb76b00c530c994
|
/tmp.py
|
d616e22344314282bffb61071d044da898ac2eef
|
[] |
no_license
|
nikkibisarya/therapysummarization
|
64d056683454289561a45b6e5e1d88f5e3f78dae
|
203b5a06577456d68d3022aa94d9476e0d352e18
|
refs/heads/master
| 2020-03-16T23:23:11.698069
| 2019-11-05T18:08:54
| 2019-11-05T18:08:54
| 133,075,146
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
import numpy as np
import matplotlib.pyplot as plt
loss = [1.0761, 0.8476, 0.7516, 0.6956, 0.6562, 0.6243, 0.5985, 0.5765, 0.5586, 0.5427, 0.5315, 0.5169, 0.5089, 0.4994,
0.4923, 0.4866, 0.4806, 0.4763, 0.4708, 0.4707]
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.plot(np.arange(len(loss)), loss)
plt.savefig('MISCloss.png')
|
[
"me@andrewszot.com"
] |
me@andrewszot.com
|
0152bca3423e36a8ff5ad25c2f7ce46e5acea2ed
|
bd1ed744bd408221445231a8838807fe31df5c54
|
/func_reverse.py
|
1421097cc08e4eaf0b7ab60c464b799646b79887
|
[] |
no_license
|
PhoebeCooney/CA116
|
c7bb95e8e75832016be46d7b603883bbbed269a6
|
157311fbe3c57dc760c4a87618cdc6efb7be2eef
|
refs/heads/master
| 2021-01-15T19:06:35.353483
| 2017-08-17T14:50:42
| 2017-08-17T14:50:42
| 99,807,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 339
|
py
|
#import func_reverse
def swap(a, i ,j):
tmp = a[j]
a[j] = a[i]
a[i] = tmp
def reverse(a):
i = 0
while i < len(a)/2:
swap (a, i, len(a) - i - 1)
i += 1
a = [4, 3, 1, 2]
def main():
swap(a, 2, 3)
print a # [4, 3, 2, 1]
reverse(a)
print a # [1, 2, 3, 4]
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
PhoebeCooney.noreply@github.com
|
d08e77c86c0263b332867904a5f7d5d164c1166e
|
61b12137d849283aa057e11691e70fe846dcc136
|
/skeleton.py
|
57e5f26c824dedafa2cfa680174d948230923f2a
|
[
"MIT"
] |
permissive
|
PrajitR/franklin
|
00f396d282b0f3de08bc433cf64686394c60c15a
|
bbd4b81eec7ef70b42dd5d9fff1bf4c887f92e25
|
refs/heads/master
| 2020-05-29T17:28:25.015989
| 2014-05-20T02:27:18
| 2014-05-20T02:27:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,715
|
py
|
import sys
import json
import os
import fnmatch
import itertools
from collections import defaultdict
def generate_structure (root_dir=os.getcwd()):
check_ignore = check_in_gitignore(get_gitignore())
struc = defaultdict(lambda: {})
for dirpath, dirnames, filenames in os.walk(root_dir):
if not valid_dir(dirpath): continue
dirpath = dirpath[len(root_dir):] + '/'
dirnames = (d + '/' for d in dirnames if valid_dir(d) and not check_ignore(d))
filenames = (f for f in filenames if not check_ignore(f))
for d in itertools.chain(dirnames, filenames):
struc[dirpath][d] = { 'description': '', 'next': [], 'entry_point': False }
return struc
def get_gitignore ():
gitignore = os.path.join(os.getcwd(), '.gitignore')
if not os.path.isfile(gitignore): return
with open(gitignore, 'r') as f:
ignore = [(line.rstrip('\r\n')) for line in f if len(line) > 1 and line[0] != '#']
return ignore
def check_in_gitignore (ignore):
def check (f):
return any((fnmatch.fnmatch(f, i) for i in ignore))
return check
def valid_dir (d):
restricted = ['.git/']
return not any(inv in d for inv in restricted)
if __name__ == '__main__':
file_name, root_dir = 'franklin.json', os.getcwd()
for i in xrange(1, len(sys.argv), 2):
option = sys.argv[i]
if option == '-p' or option == '--projectpath':
root_dir = sys.argv[i + 1]
elif option == '-o' or option == '--output':
file_name = sys.argv[i + 1]
if os.path.isfile(file_name):
r = raw_input(file_name + ' already exists! Do you want to continue? (y/n)')
if r[0] != 'y' or r[0] != 'Y':
return
with open(file_name, 'w') as f:
json.dump(generate_structure(root_dir), f)
|
[
"ramprajit@gmail.com"
] |
ramprajit@gmail.com
|
a54e6ee53269fe07a76bb0cdfd2d888cd08d3167
|
ede3dcfe6093a6c726949832ca5be6d3883c5945
|
/Milestone2/hogwild2/tests/test_svm.py
|
70a28cc4d2f8200d5f3318a7e6e38dd310f2069f
|
[] |
no_license
|
JustineWeb/project_systems
|
26cb5236ecbbe79fdae51fd7dc1c3e7bc93e05cb
|
c579d2a749bfab79bbcdd2a9a46479a39401bb08
|
refs/heads/master
| 2022-01-06T22:45:14.463467
| 2019-05-16T12:06:14
| 2019-05-16T12:06:14
| 177,611,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 803
|
py
|
from hogwild.svm import SVM
mock_data = [{1: 0.1, 2: 0.2},
{1: 0.2, 4: 0.9},
{3: 0.9, 8: 1},
{4: 0.4, 5: 0.7}]
mock_labels = [1, -1, 1, -1]
mock_delta_w = {1: 0.01, 2: 0.02, 3: 0.03, 4: 0.04, 5: 0.05, 6: 0, 7: 0, 8: 0.08}
def test_fit():
svm = SVM(1, 1e-5, 9)
expected_result = {1: -0.100001,
2: 0.2,
3: 0.9,
4: -1.29999199999,
5: -0.6999909999899999,
8: 1.0}
result = svm.fit(mock_data, mock_labels)
assert expected_result == result
def test_predict():
svm = SVM(1, 1e-5, 9)
svm.fit(mock_data, mock_labels)
expected_result = [1]
result = svm.predict([{2: 0.8, 3: 0.9}])
assert expected_result == result
|
[
"alexis.mermet@epfl.ch"
] |
alexis.mermet@epfl.ch
|
12f5a26fff930d6da4d651c4f4dd72d554d44fc2
|
b40fe0bd5f7678926baabdff48df9cff8ec673b6
|
/lzyServer/manage.py
|
c78694823ca24da087e46e4630fb555c6131b801
|
[] |
no_license
|
huangzhongkai/lzy
|
9f3c413c68b3d57677c06c2f588289d2b2889577
|
f204294785589173cd11b6363c68590a8fc24dff
|
refs/heads/master
| 2021-10-26T00:45:31.699647
| 2019-04-09T05:19:42
| 2019-04-09T05:19:42
| 79,820,602
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "lzyServer.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"huangzhongkai@huangzhongkaideMacBook-Pro.local"
] |
huangzhongkai@huangzhongkaideMacBook-Pro.local
|
7a2bcfd46f619cc050057c89f3a061ec7cdc19bb
|
496e7fed497a88b93d428b6c1163399074dc9f3b
|
/post/forms.py
|
d62418ed576834090fa5dc932fabecce52cac546
|
[] |
no_license
|
tomaszwozniak/blog
|
e181b25899fffe7127f071b7065367267dd71fd1
|
12be9e620ba7a08aef8356cc0d9c7beec2632895
|
refs/heads/master
| 2016-09-16T01:33:45.555815
| 2015-09-03T19:43:00
| 2015-09-03T19:43:00
| 41,756,901
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 906
|
py
|
from django import forms
from post.models import Post, Comment
class PostForm(forms.ModelForm):
class Meta:
model = Post
exclude = ('views',)
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('full_name', 'value')
exclude = ('post', )
def clean_full_name(self):
full_name = self.cleaned_data['full_name']
if "donald" in full_name.lower():
raise forms.ValidationError("You cannot comment on this blog!")
# Always return the cleaned data, whether you have changed it or
# not.
return full_name
def clean_value(self):
value = self.cleaned_data['value']
if "polityka" in value.lower():
raise forms.ValidationError("Not permitted!")
# Always return the cleaned data, whether you have changed it or
# not.
return value
|
[
"tomaszwozniak.wzm@gmail.com"
] |
tomaszwozniak.wzm@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.