blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
772afd59bd32748689f71c12edef9489acabc9b6
|
f6ff601089f678fecbfa22a4d95c1de225bc34b5
|
/python2.py
|
cb45756c283eb7bd22ce63562a011e33fa31726c
|
[] |
no_license
|
Kumar1998/github-upload
|
94c1fb50dc1bce2c4b76d83c41be2e0ce57b7fa6
|
ab264537200791c87ef6d505d90be0c0a952ceff
|
refs/heads/master
| 2021-07-05T10:14:13.591139
| 2020-07-26T15:47:30
| 2020-07-26T15:47:30
| 143,553,935
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 83
|
py
|
v="HARD TIMES"
print(v[3])
import time
time.sleep(100)
|
[
"noreply@github.com"
] |
noreply@github.com
|
dd9b0102909ce4e3104629af6206d2e5099291eb
|
3432778f39914e0d85201aca0cd0ff4020607e18
|
/Homework 7/hw7.py
|
11e95a85c5dd3c93931d7478feac173132727c1f
|
[] |
no_license
|
cah835/Data-Structures
|
7e961e31f065e2703749dbe42586bb0099257467
|
89ba3423c167d073418d11d37e601a575efd4a97
|
refs/heads/master
| 2021-01-11T06:28:33.211254
| 2016-10-27T21:28:19
| 2016-10-27T21:28:19
| 72,149,724
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,004
|
py
|
# Run this script w/ Python to populate the database for assignment 7
# For documentation pysql, see:
# - Python 3: https://docs.python.org/3/library/sqlite3.html
# - Python 2: https://docs.python.org/2.7/library/sqlite3.html
import sqlite3
import json
# Open the JSon file and store data for the tables
with open('mcu_raw.json') as mcu_file:
raw_data = json.load(mcu_file)
names = set()
movies = set()
for datum in raw_data:
names.add(datum['name1'])
names.add(datum['name2'])
movies.add((datum['movie'], datum['release']))
names = list(names)
movies = list(movies)
movie_names = [movie[0] for movie in movies]
# Open the database
connection = sqlite3.connect('mcu.db')
cursor = connection.cursor()
cursor.execute('PRAGMA foreign_keys = ON;') # Enable foreign key support
cursor.execute('DROP TABLE IF EXISTS fights;')
cursor.execute('DROP TABLE IF EXISTS teamUps;')
connection.commit()
# Create characters table
cursor.execute('DROP TABLE IF EXISTS characters;')
cursor.execute("""CREATE TABLE characters
(
id INTEGER PRIMARY KEY,
name TEXT NOT NULL
);""")
cursor.executemany('INSERT INTO characters VALUES (?, ?);', enumerate(names))
connection.commit()
# Create movies table
cursor = connection.cursor()
cursor.execute('DROP TABLE IF EXISTS movies;')
cursor.execute("""CREATE TABLE movies
(
id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
release TEXT NOT NULL
);""")
cursor.executemany('INSERT INTO movies VALUES (?, ?, ?);',
((i, name, release) for (i, (name, release))
in enumerate(movies)))
connection.commit()
# Create fought/teamed-up tables
cursor = connection.cursor()
cursor.execute("""CREATE TABLE fights
(
combatant1 INTEGER NOT NULL,
combatant2 INTEGER NOT NULL,
movie INTEGER NOT NULL,
PRIMARY KEY(combatant1, combatant2, movie),
FOREIGN KEY(combatant1) REFERENCES characters(id),
FOREIGN KEY(combatant2) REFERENCES characters(id),
FOREIGN KEY(movie) REFERENCES movies(id)
);""")
cursor.executemany('INSERT INTO fights VALUES (?, ?, ?);',
((names.index(datum['name1']),
names.index(datum['name2']),
movie_names.index(datum['movie']))
for datum in raw_data if datum["action"] == "fought"))
cursor.execute("""CREATE TABLE teamUps
(
member1 INTEGER NOT NULL,
member2 INTEGER NOT NULL,
movie INTEGER NOT NULL,
PRIMARY KEY(member1, member2, movie),
FOREIGN KEY(member1) REFERENCES characters(id),
FOREIGN KEY(member2) REFERENCES characters(id),
FOREIGN KEY(movie) REFERENCES movies(id)
);""")
cursor.executemany('INSERT INTO teamUps VALUES (?, ?, ?);',
((names.index(datum['name1']),
names.index(datum['name2']),
movie_names.index(datum['movie']))
for datum in raw_data if datum["action"] == "teamedUp"))
connection.commit()
# All done
connection.close()
|
[
"corey@Coreys-MacBook-Pro.local"
] |
corey@Coreys-MacBook-Pro.local
|
a344107f6c455874e9bbd5dca801684ea1fa0f30
|
3438e8c139a5833836a91140af412311aebf9e86
|
/third_party/WebKit/Source/bindings/scripts/v8_types.py
|
011bf4bb2dccfcff60556c65ba05b9fc5ca6a345
|
[
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause"
] |
permissive
|
Exstream-OpenSource/Chromium
|
345b4336b2fbc1d5609ac5a67dbf361812b84f54
|
718ca933938a85c6d5548c5fad97ea7ca1128751
|
refs/heads/master
| 2022-12-21T20:07:40.786370
| 2016-10-18T04:53:43
| 2016-10-18T04:53:43
| 71,210,435
| 0
| 2
|
BSD-3-Clause
| 2022-12-18T12:14:22
| 2016-10-18T04:58:13
| null |
UTF-8
|
Python
| false
| false
| 42,480
|
py
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Functions for type handling and type conversion (Blink/C++ <-> V8/JS).
Extends IdlType and IdlUnionType with V8-specific properties, methods, and
class methods.
Spec:
http://www.w3.org/TR/WebIDL/#es-type-mapping
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import posixpath
from idl_types import IdlTypeBase, IdlType, IdlUnionType, IdlArrayOrSequenceType, IdlNullableType
import v8_attributes # for IdlType.constructor_type_name
from v8_globals import includes
################################################################################
# V8-specific handling of IDL types
################################################################################
NON_WRAPPER_TYPES = frozenset([
'Dictionary',
'EventHandler',
'EventListener',
'NodeFilter',
'SerializedScriptValue',
])
TYPED_ARRAY_TYPES = frozenset([
'Float32Array',
'Float64Array',
'Int8Array',
'Int16Array',
'Int32Array',
'Uint8Array',
'Uint8ClampedArray',
'Uint16Array',
'Uint32Array',
])
ARRAY_BUFFER_AND_VIEW_TYPES = TYPED_ARRAY_TYPES.union(frozenset([
'ArrayBuffer',
'ArrayBufferView',
'DataView',
'SharedArrayBuffer',
]))
IdlType.is_array_buffer_or_view = property(
lambda self: self.base_type in ARRAY_BUFFER_AND_VIEW_TYPES)
IdlType.is_typed_array = property(
lambda self: self.base_type in TYPED_ARRAY_TYPES)
IdlType.is_wrapper_type = property(
lambda self: (self.is_interface_type and
not self.is_callback_interface and
self.base_type not in NON_WRAPPER_TYPES))
################################################################################
# C++ types
################################################################################
CPP_TYPE_SAME_AS_IDL_TYPE = set([
'double',
'float',
'long long',
'unsigned long long',
])
CPP_INT_TYPES = set([
'byte',
'long',
'short',
])
CPP_UNSIGNED_TYPES = set([
'octet',
'unsigned int',
'unsigned long',
'unsigned short',
])
CPP_SPECIAL_CONVERSION_RULES = {
'Date': 'double',
'Dictionary': 'Dictionary',
'EventHandler': 'EventListener*',
'EventListener': 'EventListener*',
'NodeFilter': 'NodeFilter*',
'Promise': 'ScriptPromise',
'ScriptValue': 'ScriptValue',
# FIXME: Eliminate custom bindings for XPathNSResolver http://crbug.com/345529
'XPathNSResolver': 'XPathNSResolver*',
'boolean': 'bool',
'unrestricted double': 'double',
'unrestricted float': 'float',
}
def cpp_type(idl_type, extended_attributes=None, raw_type=False, used_as_rvalue_type=False, used_as_variadic_argument=False, used_in_cpp_sequence=False):
"""Returns C++ type corresponding to IDL type.
|idl_type| argument is of type IdlType, while return value is a string
Args:
idl_type:
IdlType
raw_type:
bool, True if idl_type's raw/primitive C++ type should be returned.
used_as_rvalue_type:
bool, True if the C++ type is used as an argument or the return
type of a method.
used_as_variadic_argument:
bool, True if the C++ type is used as a variadic argument of a method.
used_in_cpp_sequence:
bool, True if the C++ type is used as an element of a container.
Containers can be an array, a sequence or a dictionary.
"""
def string_mode():
if extended_attributes.get('TreatNullAs') == 'EmptyString':
return 'TreatNullAsEmptyString'
if extended_attributes.get('TreatNullAs') == 'NullString':
if extended_attributes.get('TreatUndefinedAs') == 'NullString':
return 'TreatNullAndUndefinedAsNullString'
return 'TreatNullAsNullString'
if idl_type.is_nullable:
return 'TreatNullAndUndefinedAsNullString'
return ''
extended_attributes = extended_attributes or {}
idl_type = idl_type.preprocessed_type
# Array or sequence types
if used_as_variadic_argument:
native_array_element_type = idl_type
else:
native_array_element_type = idl_type.native_array_element_type
if native_array_element_type:
vector_type = cpp_ptr_type('Vector', 'HeapVector', native_array_element_type.is_gc_type)
vector_template_type = cpp_template_type(vector_type, native_array_element_type.cpp_type_args(used_in_cpp_sequence=True))
if used_as_rvalue_type:
return 'const %s&' % vector_template_type
return vector_template_type
# Simple types
base_idl_type = idl_type.base_type
if base_idl_type in CPP_TYPE_SAME_AS_IDL_TYPE:
return base_idl_type
if base_idl_type in CPP_INT_TYPES:
return 'int'
if base_idl_type in CPP_UNSIGNED_TYPES:
return 'unsigned'
if base_idl_type in CPP_SPECIAL_CONVERSION_RULES:
return CPP_SPECIAL_CONVERSION_RULES[base_idl_type]
if base_idl_type == 'SerializedScriptValue':
return ('PassRefPtr<%s>' if used_as_rvalue_type else 'RefPtr<%s>') % base_idl_type
if idl_type.is_string_type:
if not raw_type:
return 'String'
return 'V8StringResource<%s>' % string_mode()
if base_idl_type == 'ArrayBufferView' and 'FlexibleArrayBufferView' in extended_attributes:
return 'FlexibleArrayBufferView'
if base_idl_type in TYPED_ARRAY_TYPES and 'FlexibleArrayBufferView' in extended_attributes:
return 'Flexible' + base_idl_type + 'View'
if idl_type.is_interface_type:
implemented_as_class = idl_type.implemented_as
if raw_type or (used_as_rvalue_type and idl_type.is_garbage_collected) or not used_in_cpp_sequence:
return implemented_as_class + '*'
if not used_in_cpp_sequence:
return implemented_as_class + '*'
return cpp_template_type('Member', implemented_as_class)
if idl_type.is_dictionary:
if used_as_rvalue_type:
return 'const %s&' % base_idl_type
return base_idl_type
if idl_type.is_union_type:
# Avoid "AOrNullOrB" for cpp type of (A? or B) because we generate
# V8AOrBOrNull to handle nulle for (A? or B), (A or B?) and (A or B)?
def member_cpp_name(idl_type):
if idl_type.is_nullable:
return idl_type.inner_type.name
return idl_type.name
idl_type_name = "Or".join(member_cpp_name(member)
for member in idl_type.member_types)
return 'const %s&' % idl_type_name if used_as_rvalue_type else idl_type_name
if idl_type.is_callback_function and not idl_type.is_custom_callback_function:
return base_idl_type + '*'
if base_idl_type == 'void':
return base_idl_type
# Default, assume native type is a pointer with same type name as idl type
return base_idl_type + '*'
def cpp_type_initializer(idl_type):
"""Returns a string containing a C++ initialization statement for the
corresponding type.
|idl_type| argument is of type IdlType.
"""
base_idl_type = idl_type.base_type
if idl_type.native_array_element_type:
return ''
if idl_type.is_numeric_type:
return ' = 0'
if base_idl_type == 'boolean':
return ' = false'
if (base_idl_type in NON_WRAPPER_TYPES or
base_idl_type in CPP_SPECIAL_CONVERSION_RULES or
base_idl_type == 'any' or
idl_type.is_string_type or
idl_type.is_enum):
return ''
return ' = nullptr'
# Allow access as idl_type.cpp_type if no arguments
IdlTypeBase.cpp_type = property(cpp_type)
IdlTypeBase.cpp_type_initializer = property(cpp_type_initializer)
IdlTypeBase.cpp_type_args = cpp_type
IdlUnionType.cpp_type_initializer = ''
IdlArrayOrSequenceType.native_array_element_type = property(
lambda self: self.element_type)
def cpp_template_type(template, inner_type):
"""Returns C++ template specialized to type."""
format_string = '{template}<{inner_type}>'
return format_string.format(template=template, inner_type=inner_type)
def cpp_ptr_type(old_type, new_type, is_gc_type):
if is_gc_type:
return new_type
return old_type
def v8_type(interface_name):
return 'V8' + interface_name
# [ImplementedAs]
# This handles [ImplementedAs] on interface types, not [ImplementedAs] in the
# interface being generated. e.g., given:
# Foo.idl: interface Foo {attribute Bar bar};
# Bar.idl: [ImplementedAs=Zork] interface Bar {};
# when generating bindings for Foo, the [ImplementedAs] on Bar is needed.
# This data is external to Foo.idl, and hence computed as global information in
# compute_interfaces_info.py to avoid having to parse IDLs of all used interfaces.
IdlType.implemented_as_interfaces = {}
def implemented_as(idl_type):
base_idl_type = idl_type.base_type
if base_idl_type in IdlType.implemented_as_interfaces:
return IdlType.implemented_as_interfaces[base_idl_type]
return base_idl_type
IdlType.implemented_as = property(implemented_as)
IdlType.set_implemented_as_interfaces = classmethod(
lambda cls, new_implemented_as_interfaces:
cls.implemented_as_interfaces.update(new_implemented_as_interfaces))
# [GarbageCollected]
IdlType.garbage_collected_types = set()
IdlType.is_garbage_collected = property(
lambda self: self.base_type in IdlType.garbage_collected_types)
IdlType.set_garbage_collected_types = classmethod(
lambda cls, new_garbage_collected_types:
cls.garbage_collected_types.update(new_garbage_collected_types))
def is_gc_type(idl_type):
return idl_type.is_garbage_collected or idl_type.is_dictionary or idl_type.is_union_type
IdlTypeBase.is_gc_type = property(is_gc_type)
def is_traceable(idl_type):
return (idl_type.is_garbage_collected or idl_type.is_dictionary)
IdlTypeBase.is_traceable = property(is_traceable)
IdlUnionType.is_traceable = property(lambda self: True)
IdlArrayOrSequenceType.is_traceable = property(
lambda self: self.element_type.is_traceable)
################################################################################
# Includes
################################################################################
def includes_for_cpp_class(class_name, relative_dir_posix):
return set([posixpath.join('bindings', relative_dir_posix, class_name + '.h')])
INCLUDES_FOR_TYPE = {
'object': set(),
'ArrayBufferView': set(['bindings/core/v8/V8ArrayBufferView.h',
'core/dom/FlexibleArrayBufferView.h']),
'Dictionary': set(['bindings/core/v8/Dictionary.h']),
'EventHandler': set(['bindings/core/v8/V8AbstractEventListener.h',
'bindings/core/v8/V8EventListenerHelper.h']),
'EventListener': set(['bindings/core/v8/BindingSecurity.h',
'bindings/core/v8/V8EventListenerHelper.h',
'core/frame/LocalDOMWindow.h']),
'HTMLCollection': set(['bindings/core/v8/V8HTMLCollection.h',
'core/dom/ClassCollection.h',
'core/dom/TagCollection.h',
'core/html/HTMLCollection.h',
'core/html/HTMLDataListOptionsCollection.h',
'core/html/HTMLFormControlsCollection.h',
'core/html/HTMLTableRowsCollection.h']),
'NodeList': set(['bindings/core/v8/V8NodeList.h',
'core/dom/NameNodeList.h',
'core/dom/NodeList.h',
'core/dom/StaticNodeList.h',
'core/html/LabelsNodeList.h']),
'Promise': set(['bindings/core/v8/ScriptPromise.h']),
'SerializedScriptValue': set(['bindings/core/v8/SerializedScriptValue.h',
'bindings/core/v8/SerializedScriptValueFactory.h']),
'ScriptValue': set(['bindings/core/v8/ScriptValue.h']),
}
def includes_for_type(idl_type, extended_attributes=None):
idl_type = idl_type.preprocessed_type
extended_attributes = extended_attributes or {}
# Simple types
base_idl_type = idl_type.base_type
if base_idl_type in INCLUDES_FOR_TYPE:
return INCLUDES_FOR_TYPE[base_idl_type]
if base_idl_type in TYPED_ARRAY_TYPES:
return INCLUDES_FOR_TYPE['ArrayBufferView'].union(
set(['bindings/%s/v8/V8%s.h' % (component_dir[base_idl_type], base_idl_type)])
)
if idl_type.is_basic_type:
return set()
if base_idl_type.endswith('ConstructorConstructor'):
# FIXME: rename to NamedConstructor
# FIXME: replace with a [NamedConstructorAttribute] extended attribute
# Ending with 'ConstructorConstructor' indicates a named constructor,
# and these do not have header files, as they are part of the generated
# bindings for the interface
return set()
if base_idl_type.endswith('Constructor'):
# FIXME: replace with a [ConstructorAttribute] extended attribute
base_idl_type = idl_type.constructor_type_name
if idl_type.is_custom_callback_function:
return set()
if idl_type.is_callback_function:
component = IdlType.callback_functions[base_idl_type]['component_dir']
return set(['bindings/%s/v8/%s.h' % (component, base_idl_type)])
if base_idl_type not in component_dir:
return set()
return set(['bindings/%s/v8/V8%s.h' % (component_dir[base_idl_type],
base_idl_type)])
IdlType.includes_for_type = includes_for_type
def includes_for_union_type(idl_type, extended_attributes=None):
return set.union(*[member_type.includes_for_type(extended_attributes)
for member_type in idl_type.member_types])
IdlUnionType.includes_for_type = includes_for_union_type
def includes_for_array_or_sequence_type(idl_type, extended_attributes=None):
return idl_type.element_type.includes_for_type(extended_attributes)
IdlArrayOrSequenceType.includes_for_type = includes_for_array_or_sequence_type
def add_includes_for_type(idl_type, extended_attributes=None):
includes.update(idl_type.includes_for_type(extended_attributes))
IdlTypeBase.add_includes_for_type = add_includes_for_type
def includes_for_interface(interface_name):
return IdlType(interface_name).includes_for_type()
def add_includes_for_interface(interface_name):
includes.update(includes_for_interface(interface_name))
def impl_should_use_nullable_container(idl_type):
return not(idl_type.cpp_type_has_null_value)
IdlTypeBase.impl_should_use_nullable_container = property(
impl_should_use_nullable_container)
def impl_includes_for_type(idl_type, interfaces_info):
includes_for_type = set()
if idl_type.impl_should_use_nullable_container:
includes_for_type.add('bindings/core/v8/Nullable.h')
idl_type = idl_type.preprocessed_type
native_array_element_type = idl_type.native_array_element_type
if native_array_element_type:
includes_for_type.update(impl_includes_for_type(
native_array_element_type, interfaces_info))
includes_for_type.add('wtf/Vector.h')
base_idl_type = idl_type.base_type
if idl_type.is_string_type:
includes_for_type.add('wtf/text/WTFString.h')
if base_idl_type in interfaces_info:
interface_info = interfaces_info[base_idl_type]
if interface_info['include_path']:
includes_for_type.add(interface_info['include_path'])
if base_idl_type in INCLUDES_FOR_TYPE:
includes_for_type.update(INCLUDES_FOR_TYPE[base_idl_type])
if idl_type.is_typed_array:
return set(['core/dom/DOMTypedArray.h'])
return includes_for_type
def impl_includes_for_type_union(idl_type, interfaces_info):
includes_for_type = set()
for member_type in idl_type.member_types:
includes_for_type.update(member_type.impl_includes_for_type(interfaces_info))
return includes_for_type
IdlTypeBase.impl_includes_for_type = impl_includes_for_type
IdlUnionType.impl_includes_for_type = impl_includes_for_type_union
component_dir = {}
def set_component_dirs(new_component_dirs):
component_dir.update(new_component_dirs)
################################################################################
# V8 -> C++
################################################################################
V8_VALUE_TO_CPP_VALUE = {
# Basic
'Date': 'toCoreDate({isolate}, {v8_value}, exceptionState)',
'DOMString': '{v8_value}',
'ByteString': 'toByteString({isolate}, {arguments})',
'USVString': 'toUSVString({isolate}, {arguments})',
'boolean': 'toBoolean({isolate}, {arguments})',
'float': 'toRestrictedFloat({isolate}, {arguments})',
'unrestricted float': 'toFloat({isolate}, {arguments})',
'double': 'toRestrictedDouble({isolate}, {arguments})',
'unrestricted double': 'toDouble({isolate}, {arguments})',
'byte': 'toInt8({isolate}, {arguments})',
'octet': 'toUInt8({isolate}, {arguments})',
'short': 'toInt16({isolate}, {arguments})',
'unsigned short': 'toUInt16({isolate}, {arguments})',
'long': 'toInt32({isolate}, {arguments})',
'unsigned long': 'toUInt32({isolate}, {arguments})',
'long long': 'toInt64({isolate}, {arguments})',
'unsigned long long': 'toUInt64({isolate}, {arguments})',
# Interface types
'Dictionary': 'Dictionary({v8_value}, {isolate}, exceptionState)',
'EventTarget': 'toEventTarget({isolate}, {v8_value})',
'FlexibleArrayBufferView': 'toFlexibleArrayBufferView({isolate}, {v8_value}, {variable_name}, allocateFlexibleArrayBufferViewStorage({v8_value}))',
'NodeFilter': 'toNodeFilter({v8_value}, info.Holder(), ScriptState::current({isolate}))',
'Promise': 'ScriptPromise::cast(ScriptState::current({isolate}), {v8_value})',
'SerializedScriptValue': 'SerializedScriptValue::serialize({isolate}, {v8_value}, nullptr, nullptr, exceptionState)',
'ScriptValue': 'ScriptValue(ScriptState::current({isolate}), {v8_value})',
'Window': 'toDOMWindow({isolate}, {v8_value})',
'XPathNSResolver': 'toXPathNSResolver(ScriptState::current({isolate}), {v8_value})',
}
def v8_conversion_needs_exception_state(idl_type):
return (idl_type.is_numeric_type or
idl_type.is_enum or
idl_type.is_dictionary or
idl_type.name in ('Boolean', 'ByteString', 'Date', 'Dictionary', 'USVString', 'SerializedScriptValue'))
IdlType.v8_conversion_needs_exception_state = property(v8_conversion_needs_exception_state)
IdlArrayOrSequenceType.v8_conversion_needs_exception_state = True
IdlUnionType.v8_conversion_needs_exception_state = True
TRIVIAL_CONVERSIONS = frozenset([
'any',
'boolean',
'Date',
'Dictionary',
'NodeFilter',
'XPathNSResolver',
'Promise'
])
def v8_conversion_is_trivial(idl_type):
# The conversion is a simple expression that returns the converted value and
# cannot raise an exception.
return (idl_type.base_type in TRIVIAL_CONVERSIONS or
idl_type.is_wrapper_type)
IdlType.v8_conversion_is_trivial = property(v8_conversion_is_trivial)
def v8_value_to_cpp_value(idl_type, extended_attributes, v8_value, variable_name, index, isolate, restricted_float=False):
if idl_type.name == 'void':
return ''
# Array or sequence types
native_array_element_type = idl_type.native_array_element_type
if native_array_element_type:
return v8_value_to_cpp_value_array_or_sequence(native_array_element_type, v8_value, index, isolate)
# Simple types
idl_type = idl_type.preprocessed_type
base_idl_type = idl_type.as_union_type.name if idl_type.is_union_type else idl_type.base_type
if 'FlexibleArrayBufferView' in extended_attributes:
if base_idl_type not in TYPED_ARRAY_TYPES.union(set(['ArrayBufferView'])):
raise "Unrecognized base type for extended attribute 'FlexibleArrayBufferView': %s" % (idl_type.base_type)
base_idl_type = 'FlexibleArrayBufferView'
if idl_type.is_integer_type:
configuration = 'NormalConversion'
if 'EnforceRange' in extended_attributes:
configuration = 'EnforceRange'
elif 'Clamp' in extended_attributes:
configuration = 'Clamp'
arguments = ', '.join([v8_value, configuration, 'exceptionState'])
elif idl_type.v8_conversion_needs_exception_state:
arguments = ', '.join([v8_value, 'exceptionState'])
else:
arguments = v8_value
if base_idl_type in V8_VALUE_TO_CPP_VALUE:
cpp_expression_format = V8_VALUE_TO_CPP_VALUE[base_idl_type]
elif idl_type.is_array_buffer_or_view:
cpp_expression_format = (
'{v8_value}->Is{idl_type}() ? '
'V8{idl_type}::toImpl(v8::Local<v8::{idl_type}>::Cast({v8_value})) : 0')
elif idl_type.is_union_type:
nullable = 'UnionTypeConversionMode::Nullable' if idl_type.includes_nullable_type else 'UnionTypeConversionMode::NotNullable'
cpp_expression_format = 'V8{idl_type}::toImpl({isolate}, {v8_value}, {variable_name}, %s, exceptionState)' % nullable
elif idl_type.use_output_parameter_for_result:
cpp_expression_format = 'V8{idl_type}::toImpl({isolate}, {v8_value}, {variable_name}, exceptionState)'
elif idl_type.is_callback_function:
cpp_expression_format = (
'{idl_type}::create({isolate}, v8::Local<v8::Function>::Cast({v8_value}))')
else:
cpp_expression_format = (
'V8{idl_type}::toImplWithTypeCheck({isolate}, {v8_value})')
return cpp_expression_format.format(arguments=arguments, idl_type=base_idl_type, v8_value=v8_value, variable_name=variable_name, isolate=isolate)
def v8_value_to_cpp_value_array_or_sequence(native_array_element_type, v8_value, index, isolate='info.GetIsolate()'):
# Index is None for setters, index (starting at 0) for method arguments,
# and is used to provide a human-readable exception message
if index is None:
index = 0 # special case, meaning "setter"
else:
index += 1 # human-readable index
if (native_array_element_type.is_interface_type and
native_array_element_type.name != 'Dictionary'):
this_cpp_type = None
ref_ptr_type = 'Member'
expression_format = '(to{ref_ptr_type}NativeArray<{native_array_element_type}>({v8_value}, {index}, {isolate}, exceptionState))'
else:
ref_ptr_type = None
this_cpp_type = native_array_element_type.cpp_type
if native_array_element_type.is_dictionary or native_array_element_type.is_union_type:
vector_type = 'HeapVector'
else:
vector_type = 'Vector'
expression_format = 'toImplArray<%s<{cpp_type}>>({v8_value}, {index}, {isolate}, exceptionState)' % vector_type
expression = expression_format.format(native_array_element_type=native_array_element_type.name, cpp_type=this_cpp_type, index=index, ref_ptr_type=ref_ptr_type, v8_value=v8_value, isolate=isolate)
return expression
# FIXME: this function should be refactored, as this takes too many flags.
def v8_value_to_local_cpp_value(idl_type, extended_attributes, v8_value, variable_name, index=None, declare_variable=True, isolate='info.GetIsolate()', bailout_return_value=None, use_exception_state=False, restricted_float=False):
"""Returns an expression that converts a V8 value to a C++ value and stores it as a local value."""
this_cpp_type = idl_type.cpp_type_args(extended_attributes=extended_attributes, raw_type=True)
idl_type = idl_type.preprocessed_type
cpp_value = v8_value_to_cpp_value(idl_type, extended_attributes, v8_value, variable_name, index, isolate, restricted_float=restricted_float)
# Optional expression that returns a value to be assigned to the local variable.
assign_expression = None
# Optional void expression executed unconditionally.
set_expression = None
# Optional expression that returns true if the conversion fails.
check_expression = None
# Optional expression used as the return value when returning. Only
# meaningful if 'check_expression' is not None.
return_expression = bailout_return_value
if idl_type.is_string_type or idl_type.v8_conversion_needs_exception_state:
# Types for which conversion can fail and that need error handling.
check_expression = 'exceptionState.hadException()'
if idl_type.is_dictionary or idl_type.is_union_type:
set_expression = cpp_value
else:
assign_expression = cpp_value
# Note: 'not idl_type.v8_conversion_needs_exception_state' implies
# 'idl_type.is_string_type', but there are types for which both are
# true (ByteString and USVString), so using idl_type.is_string_type
# as the condition here would be wrong.
if not idl_type.v8_conversion_needs_exception_state:
if use_exception_state:
check_expression = '!%s.prepare(exceptionState)' % variable_name
else:
check_expression = '!%s.prepare()' % variable_name
elif not idl_type.v8_conversion_is_trivial and not idl_type.is_callback_function:
return {
'error_message': 'no V8 -> C++ conversion for IDL type: %s' % idl_type.name
}
elif 'FlexibleArrayBufferView' in extended_attributes:
if idl_type.base_type not in TYPED_ARRAY_TYPES.union(set(['ArrayBufferView'])):
raise "Unrecognized base type for extended attribute 'FlexibleArrayBufferView': %s" % (idl_type.base_type)
set_expression = cpp_value
else:
assign_expression = cpp_value
# Types that don't need error handling, and simply assign a value to the
# local variable.
return {
'assign_expression': assign_expression,
'check_expression': check_expression,
'cpp_type': this_cpp_type,
'cpp_name': variable_name,
'declare_variable': declare_variable,
'return_expression': bailout_return_value,
'set_expression': set_expression,
}
IdlTypeBase.v8_value_to_local_cpp_value = v8_value_to_local_cpp_value
def use_output_parameter_for_result(idl_type):
"""True when methods/getters which return the given idl_type should
take the output argument.
"""
return idl_type.is_dictionary or idl_type.is_union_type
IdlTypeBase.use_output_parameter_for_result = property(use_output_parameter_for_result)
################################################################################
# C++ -> V8
################################################################################
def preprocess_idl_type(idl_type):
if idl_type.is_nullable:
return IdlNullableType(idl_type.inner_type.preprocessed_type)
if idl_type.is_enum:
# Enumerations are internally DOMStrings
return IdlType('DOMString')
if idl_type.base_type in ['any', 'object'] or idl_type.is_custom_callback_function:
return IdlType('ScriptValue')
if idl_type.is_callback_function:
return idl_type
return idl_type
IdlTypeBase.preprocessed_type = property(preprocess_idl_type)
def preprocess_idl_type_and_value(idl_type, cpp_value, extended_attributes):
"""Returns IDL type and value, with preliminary type conversions applied."""
idl_type = idl_type.preprocessed_type
if idl_type.name == 'Promise':
idl_type = IdlType('ScriptValue')
if idl_type.base_type in ['long long', 'unsigned long long']:
# long long and unsigned long long are not representable in ECMAScript;
# we represent them as doubles.
is_nullable = idl_type.is_nullable
idl_type = IdlType('double')
if is_nullable:
idl_type = IdlNullableType(idl_type)
cpp_value = 'static_cast<double>(%s)' % cpp_value
# HTML5 says that unsigned reflected attributes should be in the range
# [0, 2^31). When a value isn't in this range, a default value (or 0)
# should be returned instead.
extended_attributes = extended_attributes or {}
if ('Reflect' in extended_attributes and
idl_type.base_type in ['unsigned long', 'unsigned short']):
cpp_value = cpp_value.replace('getUnsignedIntegralAttribute',
'getIntegralAttribute')
cpp_value = 'std::max(0, static_cast<int>(%s))' % cpp_value
return idl_type, cpp_value
def v8_conversion_type(idl_type, extended_attributes):
"""Returns V8 conversion type, adding any additional includes.
The V8 conversion type is used to select the C++ -> V8 conversion function
or v8SetReturnValue* function; it can be an idl_type, a cpp_type, or a
separate name for the type of conversion (e.g., 'DOMWrapper').
"""
extended_attributes = extended_attributes or {}
# Nullable dictionaries need to be handled differently than either
# non-nullable dictionaries or unions.
if idl_type.is_dictionary and idl_type.is_nullable:
return 'NullableDictionary'
if idl_type.is_dictionary or idl_type.is_union_type:
return 'DictionaryOrUnion'
# Array or sequence types
native_array_element_type = idl_type.native_array_element_type
if native_array_element_type:
return 'FrozenArray' if idl_type.is_frozen_array else 'array'
# Simple types
base_idl_type = idl_type.base_type
# Basic types, without additional includes
if base_idl_type in CPP_INT_TYPES:
return 'int'
if base_idl_type in CPP_UNSIGNED_TYPES:
return 'unsigned'
if idl_type.is_string_type:
if idl_type.is_nullable:
return 'StringOrNull'
return base_idl_type
if idl_type.is_basic_type or base_idl_type == 'ScriptValue':
return base_idl_type
# Generic dictionary type
if base_idl_type == 'Dictionary':
return 'Dictionary'
# Data type with potential additional includes
if base_idl_type in V8_SET_RETURN_VALUE: # Special v8SetReturnValue treatment
return base_idl_type
# Pointer type
return 'DOMWrapper'
IdlTypeBase.v8_conversion_type = v8_conversion_type
V8_SET_RETURN_VALUE = {
'boolean': 'v8SetReturnValueBool(info, {cpp_value})',
'int': 'v8SetReturnValueInt(info, {cpp_value})',
'unsigned': 'v8SetReturnValueUnsigned(info, {cpp_value})',
'DOMString': 'v8SetReturnValueString(info, {cpp_value}, info.GetIsolate())',
'ByteString': 'v8SetReturnValueString(info, {cpp_value}, info.GetIsolate())',
'USVString': 'v8SetReturnValueString(info, {cpp_value}, info.GetIsolate())',
'StringOrNull': 'v8SetReturnValueStringOrNull(info, {cpp_value}, info.GetIsolate())',
'void': '',
# No special v8SetReturnValue* function (set value directly)
'float': 'v8SetReturnValue(info, {cpp_value})',
'unrestricted float': 'v8SetReturnValue(info, {cpp_value})',
'double': 'v8SetReturnValue(info, {cpp_value})',
'unrestricted double': 'v8SetReturnValue(info, {cpp_value})',
# No special v8SetReturnValue* function, but instead convert value to V8
# and then use general v8SetReturnValue.
'array': 'v8SetReturnValue(info, {cpp_value})',
'FrozenArray': 'v8SetReturnValue(info, {cpp_value})',
'Date': 'v8SetReturnValue(info, {cpp_value})',
'EventHandler': 'v8SetReturnValue(info, {cpp_value})',
'ScriptValue': 'v8SetReturnValue(info, {cpp_value})',
'SerializedScriptValue': 'v8SetReturnValue(info, {cpp_value})',
# DOMWrapper
'DOMWrapperForMainWorld': 'v8SetReturnValueForMainWorld(info, {cpp_value})',
'DOMWrapperFast': 'v8SetReturnValueFast(info, {cpp_value}, {script_wrappable})',
'DOMWrapperDefault': 'v8SetReturnValue(info, {cpp_value})',
# Note that static attributes and operations do not check whether |this| is
# an instance of the interface nor |this|'s creation context is the same as
# the current context. So we must always use the current context as the
# creation context of the DOM wrapper for the return value.
'DOMWrapperStatic': 'v8SetReturnValue(info, {cpp_value}, info.GetIsolate()->GetCurrentContext()->Global())',
# Generic dictionary type
'Dictionary': 'v8SetReturnValue(info, {cpp_value})',
'DictionaryStatic': '#error not implemented yet',
# Nullable dictionaries
'NullableDictionary': 'v8SetReturnValue(info, result.get())',
'NullableDictionaryStatic': '#error not implemented yet',
# Union types or dictionaries
'DictionaryOrUnion': 'v8SetReturnValue(info, result)',
'DictionaryOrUnionStatic': '#error not implemented yet',
}
def v8_set_return_value(idl_type, cpp_value, extended_attributes=None, script_wrappable='', for_main_world=False, is_static=False):
"""Returns a statement that converts a C++ value to a V8 value and sets it as a return value.
"""
def dom_wrapper_conversion_type():
if is_static:
return 'DOMWrapperStatic'
if not script_wrappable:
return 'DOMWrapperDefault'
if for_main_world:
return 'DOMWrapperForMainWorld'
return 'DOMWrapperFast'
idl_type, cpp_value = preprocess_idl_type_and_value(idl_type, cpp_value, extended_attributes)
this_v8_conversion_type = idl_type.v8_conversion_type(extended_attributes)
# SetReturn-specific overrides
if this_v8_conversion_type in ['Date', 'EventHandler', 'ScriptValue', 'SerializedScriptValue', 'array', 'FrozenArray']:
# Convert value to V8 and then use general v8SetReturnValue
cpp_value = idl_type.cpp_value_to_v8_value(cpp_value, extended_attributes=extended_attributes)
if this_v8_conversion_type == 'DOMWrapper':
this_v8_conversion_type = dom_wrapper_conversion_type()
if is_static and this_v8_conversion_type in ('Dictionary', 'NullableDictionary', 'DictionaryOrUnion'):
this_v8_conversion_type += 'Static'
format_string = V8_SET_RETURN_VALUE[this_v8_conversion_type]
statement = format_string.format(cpp_value=cpp_value, script_wrappable=script_wrappable)
return statement
IdlTypeBase.v8_set_return_value = v8_set_return_value
CPP_VALUE_TO_V8_VALUE = {
# Built-in types
'Date': 'v8DateOrNaN({isolate}, {cpp_value})',
'DOMString': 'v8String({isolate}, {cpp_value})',
'ByteString': 'v8String({isolate}, {cpp_value})',
'USVString': 'v8String({isolate}, {cpp_value})',
'boolean': 'v8Boolean({cpp_value}, {isolate})',
'int': 'v8::Integer::New({isolate}, {cpp_value})',
'unsigned': 'v8::Integer::NewFromUnsigned({isolate}, {cpp_value})',
'float': 'v8::Number::New({isolate}, {cpp_value})',
'unrestricted float': 'v8::Number::New({isolate}, {cpp_value})',
'double': 'v8::Number::New({isolate}, {cpp_value})',
'unrestricted double': 'v8::Number::New({isolate}, {cpp_value})',
'void': 'v8Undefined()',
'StringOrNull': '{cpp_value}.isNull() ? v8::Local<v8::Value>(v8::Null({isolate})) : v8String({isolate}, {cpp_value})',
# Special cases
'Dictionary': '{cpp_value}.v8Value()',
'EventHandler': (
'{cpp_value} ? ' +
'V8AbstractEventListener::cast({cpp_value})->getListenerOrNull(' +
'{isolate}, impl->getExecutionContext()) : ' +
'v8::Null({isolate}).As<v8::Value>()'),
'ScriptValue': '{cpp_value}.v8Value()',
'SerializedScriptValue': 'v8Deserialize({isolate}, {cpp_value})',
# General
'array': 'toV8({cpp_value}, {creation_context}, {isolate})',
'FrozenArray': 'freezeV8Object(toV8({cpp_value}, {creation_context}, {isolate}), {isolate})',
'DOMWrapper': 'toV8({cpp_value}, {creation_context}, {isolate})',
# Passing nullable dictionaries isn't a pattern currently used
# anywhere in the web platform, and more work would be needed in
# the code generator to distinguish between passing null, and
# passing an object which happened to not contain any of the
# dictionary's defined attributes. For now, don't define
# NullableDictionary here, which will cause an exception to be
# thrown during code generation if an argument to a method is a
# nullable dictionary type.
#
# Union types or dictionaries
'DictionaryOrUnion': 'toV8({cpp_value}, {creation_context}, {isolate})',
}
def cpp_value_to_v8_value(idl_type, cpp_value, isolate='info.GetIsolate()', creation_context='info.Holder()', extended_attributes=None):
"""Returns an expression that converts a C++ value to a V8 value."""
# the isolate parameter is needed for callback interfaces
idl_type, cpp_value = preprocess_idl_type_and_value(idl_type, cpp_value, extended_attributes)
this_v8_conversion_type = idl_type.v8_conversion_type(extended_attributes)
format_string = CPP_VALUE_TO_V8_VALUE[this_v8_conversion_type]
statement = format_string.format(cpp_value=cpp_value, isolate=isolate, creation_context=creation_context)
return statement
IdlTypeBase.cpp_value_to_v8_value = cpp_value_to_v8_value
def literal_cpp_value(idl_type, idl_literal):
"""Converts an expression that is a valid C++ literal for this type."""
# FIXME: add validation that idl_type and idl_literal are compatible
if idl_type.base_type in ('any', 'object') and idl_literal.is_null:
return 'ScriptValue()'
literal_value = str(idl_literal)
if idl_type.base_type in CPP_UNSIGNED_TYPES:
return literal_value + 'u'
return literal_value
def union_literal_cpp_value(idl_type, idl_literal):
if idl_literal.is_null:
return idl_type.name + '()'
elif idl_literal.idl_type == 'DOMString':
member_type = idl_type.string_member_type
elif idl_literal.idl_type in ('integer', 'float'):
member_type = idl_type.numeric_member_type
elif idl_literal.idl_type == 'boolean':
member_type = idl_type.boolean_member_type
else:
raise ValueError('Unsupported literal type: ' + idl_literal.idl_type)
return '%s::from%s(%s)' % (idl_type.name, member_type.name,
member_type.literal_cpp_value(idl_literal))
def array_or_sequence_literal_cpp_value(idl_type, idl_literal):
# Only support empty arrays.
if idl_literal.value == '[]':
return cpp_type(idl_type) + '()'
raise ValueError('Unsupported literal type: ' + idl_literal.idl_type)
IdlType.literal_cpp_value = literal_cpp_value
IdlUnionType.literal_cpp_value = union_literal_cpp_value
IdlArrayOrSequenceType.literal_cpp_value = array_or_sequence_literal_cpp_value
################################################################################
# Utility properties for nullable types
################################################################################
def cpp_type_has_null_value(idl_type):
# - String types (String/AtomicString) represent null as a null string,
# i.e. one for which String::isNull() returns true.
# - Enum types, as they are implemented as Strings.
# - Interface types (raw pointer or RefPtr/PassRefPtr) represent null as
# a null pointer.
# - Union types, as thier container classes can represent null value.
# - 'Object' and 'any' type. We use ScriptValue for object type.
return (idl_type.is_string_type or idl_type.is_interface_type or
idl_type.is_enum or idl_type.is_union_type
or idl_type.base_type == 'object' or idl_type.base_type == 'any'
or idl_type.is_custom_callback_function or idl_type.is_callback_interface)
IdlTypeBase.cpp_type_has_null_value = property(cpp_type_has_null_value)
def is_implicit_nullable(idl_type):
# Nullable type where the corresponding C++ type supports a null value.
return idl_type.is_nullable and idl_type.cpp_type_has_null_value
def is_explicit_nullable(idl_type):
# Nullable type that isn't implicit nullable (see above.) For such types,
# we use Nullable<T> or similar explicit ways to represent a null value.
return idl_type.is_nullable and not idl_type.is_implicit_nullable
IdlTypeBase.is_implicit_nullable = property(is_implicit_nullable)
IdlUnionType.is_implicit_nullable = False
IdlTypeBase.is_explicit_nullable = property(is_explicit_nullable)
def number_of_nullable_member_types_union(idl_type):
# http://heycam.github.io/webidl/#dfn-number-of-nullable-member-types
count = 0
for member in idl_type.member_types:
if member.is_nullable:
count += 1
member = member.inner_type
if member.is_union_type:
count += number_of_nullable_member_types_union(member)
return count
IdlUnionType.number_of_nullable_member_types = property(
number_of_nullable_member_types_union)
def includes_nullable_type_union(idl_type):
# http://heycam.github.io/webidl/#dfn-includes-a-nullable-type
return idl_type.number_of_nullable_member_types == 1
IdlTypeBase.includes_nullable_type = False
IdlNullableType.includes_nullable_type = True
IdlUnionType.includes_nullable_type = property(includes_nullable_type_union)
|
[
"support@opentext.com"
] |
support@opentext.com
|
4a280cf5c7d4c339037eb911f3ebad676959108e
|
53aa7d90d5ddc2b59b869a2a00e8574cd196a625
|
/Html/Dir.py
|
06bd8024551866f989407fa2008dc12f9ce03a6a
|
[] |
no_license
|
nicolachoquet06250/webcreator-V2
|
c655248ad69ebe6cbf0ff229c11fc25b97097112
|
72bb5e4ddde3a8965f93a6e00226795839b64a0b
|
refs/heads/master
| 2020-03-20T06:41:08.531948
| 2018-06-21T13:11:54
| 2018-06-21T13:11:54
| 137,256,863
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 101
|
py
|
from Utils import Utils
class Dir(Utils):
def create(self, path=''):
self.mkdir(path)
|
[
"nicolachoquet06250@gmail.com"
] |
nicolachoquet06250@gmail.com
|
9c5ae5f21eb5f1a36093fe5f764a1835128a01d2
|
dc67e70a303f265ee6cb4c1a2d61fe811053fb3d
|
/beginner/095/C.py
|
e641e597678f29556c9fceffadc8270b970f8ac8
|
[] |
no_license
|
cry999/AtCoder
|
d39ce22d49dfce805cb7bab9d1ff0dd21825823a
|
879d0e43e3fac0aadc4d772dc57374ae72571fe6
|
refs/heads/master
| 2020-04-23T13:55:00.018156
| 2019-12-11T05:23:03
| 2019-12-11T05:23:03
| 171,214,066
| 0
| 0
| null | 2019-05-13T15:17:02
| 2019-02-18T04:24:01
|
Python
|
UTF-8
|
Python
| false
| false
| 623
|
py
|
def half_and_half(
A: int, B: int, C: int, X: int, Y: int) -> int:
"""
:param A: A ピザの値段
:param B: B ピザの値段
:param C: AB ピザの値段
:param X: A ピザの必要数
:param Y: B ピザの必要数
"""
min_price = float('inf')
for num_ab in range(max(X, Y)+1):
num_a, num_b = max(0, X-num_ab), max(0, Y-num_ab)
price = num_a*A + num_b*B + 2*num_ab*C
min_price = min(min_price, price)
return min_price
if __name__ == "__main__":
A, B, C, X, Y = map(int, input().split())
ans = half_and_half(A, B, C, X, Y)
print(ans)
|
[
"when.the.cry999@gmail.com"
] |
when.the.cry999@gmail.com
|
171c44c03fc9a6f1280505b2dfe9de87aed7c40a
|
b5c5efbc9bf66caf317e6198c91d5253d23f2fd9
|
/PyAudioEngi/AudioREC/test.py
|
0b39e9373383f3b3f42bca9fe33f0bfe7ba27cc8
|
[] |
no_license
|
vvilq27/python
|
eb478a91843590eb3bf2a0e9ea6b88adf826d500
|
c1d975650057ca17c6f88497d1f629b28d72308d
|
refs/heads/master
| 2021-01-02T09:45:07.087470
| 2020-10-16T14:02:46
| 2020-10-16T14:02:46
| 99,287,435
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
'''
Created on 7 maj 2018
@author: arkadiusz.zelazowski
'''
import time
t = time.clock()
for i in range(1000000):
a = 5+3
print(time.clock() - t)
buffer = []
data1 = [1 ,2 ,3 ,4 ]
data2 = [5,6, 7 ,8 ]
buffer.append(data1)
print(buffer)
buffer.append(data2)
buffer.append(data1)
print(buffer[1])
|
[
"arkadiusz.zelazowski@accenture.com"
] |
arkadiusz.zelazowski@accenture.com
|
ba55aa07f86bf85d7f55d854a6d3e64096f4000b
|
d80ef8c716bcc5ea54e87540dbf0463f15bf44ce
|
/libmproxy/contrib/wbxml/InvalidDataException.py
|
67f8ea93014bc2aaf814f9995cc5861007b63caf
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
YagiGo/YPTN
|
5043d22eb131c7164d3fa575f0c4e3d8a963dbf4
|
d7692a68ee1bf578536b4c09c566272210fc8b69
|
refs/heads/master
| 2018-10-16T03:44:18.024169
| 2018-07-24T08:53:57
| 2018-07-24T08:53:57
| 107,633,669
| 4
| 1
|
MIT
| 2018-06-08T09:04:29
| 2017-10-20T04:55:22
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,333
|
py
|
#!/usr/bin/env python
'''
@author: David Shaw, david.shaw.aw@gmail.com
Inspired by EAS Inspector for Fiddler
https://easinspectorforfiddler.codeplex.com
----- The MIT License (MIT) -----
Filename: InvalidDataException.py
Copyright (c) 2014, David P. Shaw
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
class InvalidDataException(Exception):
pass
|
[
"jeremywu1995@gmail.com"
] |
jeremywu1995@gmail.com
|
db520c55803ce3ffeb97f5b339bc73d74fb711f0
|
cb40aad84a35856ce5a8285ea7260f4183b1dd7a
|
/tests/model/test_properties.py
|
686bc3f6503e24b4cfda6093606dd26cd1f7e118
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
vyahello/trump-bullet-game
|
f71f2fe86a92ba89ea82af5cfecab504b13576d0
|
7648f9722471323ddec1aa6b6d7db38166bebc91
|
refs/heads/master
| 2021-09-08T09:31:49.459350
| 2021-08-29T08:26:14
| 2021-08-29T08:40:40
| 167,864,306
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,974
|
py
|
from typing import Tuple
import pytest
from app.model.properties import GameProperty, Color, Resolution, Border
from app import PropertyError
_rdba_color: Tuple[int, ...] = (1, 2, 3)
_resolution: Tuple[int, ...] = (10, 20)
_bottom: int = 5
def test_property_coordinates() -> None:
assert len(GameProperty.coordinates()) == 4
def test_calculate_jumper() -> None:
assert GameProperty.calculate_jumper() == 50
def test_color_as_rgba(color: Color) -> None:
assert color.as_rgba() == _rdba_color
def test_resolution_as_sequence(resolution: Resolution) -> None:
assert resolution.as_sequence() == _resolution
def test_resolution_top_height(resolution: Resolution) -> None:
assert resolution.top_height == _resolution[0]
def test_resolution_top_width(resolution: Resolution) -> None:
assert resolution.top_width == _resolution[1]
def test_resolution_bottom(resolution: Resolution) -> None:
assert resolution.bottom == _bottom
def test_border_is_top_left(screen_border: Border) -> None:
assert screen_border.is_top_left(10)
def test_border_is_top_right(screen_border: Border) -> None:
assert screen_border.is_top_right(10, 2)
def test_border_is_top_upper(screen_border: Border) -> None:
assert screen_border.is_top_upper(15)
def test_border_is_top_lower(screen_border: Border) -> None:
assert screen_border.is_top_lower(3, -10)
def test_border_is_not_top_left(screen_border: Border) -> None:
assert not screen_border.is_top_left(1)
def test_border_is_not_top_right(screen_border: Border) -> None:
assert not screen_border.is_top_right(30, 3)
def test_border_is_not_top_upper(screen_border: Border) -> None:
assert not screen_border.is_top_upper(1)
def test_border_is_not_top_lower(screen_border: Border) -> None:
assert not screen_border.is_top_lower(15, 2)
def test_resolution_error() -> None:
with pytest.raises(PropertyError):
Resolution(resolution=(0, 0, 0)).as_sequence()
|
[
"vyahello@gmail.com"
] |
vyahello@gmail.com
|
fdbc95d9a4ad946af1d4b66ea9d2b9a58fc8e2e4
|
4dc91b14630d507d32ec75c7c099ba3576b07232
|
/TopAnalysis/scripts/KinAlg4tree.py
|
10f69e8216a75bc8d47b8372486a19bc4053939d
|
[] |
no_license
|
beatrizlopes/TopLJets
|
6b62ccfd5249f6d0d06c04a487e638958df229af
|
198250ab1eae8a6a11b66dad626a827f46ec0092
|
refs/heads/master
| 2023-06-21T20:10:46.840262
| 2019-08-01T10:44:01
| 2019-08-01T10:44:01
| 198,248,791
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,138
|
py
|
import ROOT
import optparse
import json
import sys
import os
import numpy as np
from array import array
from TopLJets2015.TopAnalysis.storeTools import getEOSlslist
from TopLJets2015.TopAnalysis.nuSolutions import *
"""
a dummy converter
"""
def convertToPtEtaPhiM(lVec,xyz,m=0.):
en=ROOT.TMath.Sqrt(xyz[0]**2+xyz[1]**2+xyz[2]**2)
p4=ROOT.TLorentzVector(xyz[0],xyz[1],xyz[2],en)
return lVec(p4.Pt(),p4.Eta(),p4.Phi(),p4.M())
def KinematicsAlgorithm():
args = sys.argv[1:]
for filename in args:
inFileName=filename
print '....analysing',inFileName
fIn=ROOT.TFile.Open(inFileName,"UPDATE")
#fOut=ROOT.TFile.Open("teste.root","RECREATE")
tree=fIn.Get("sel")
if not fIn.GetListOfKeys().Contains("sel") :
print "unable to read tree from file. Skipping file ",inFileName
continue
#newtree=ROOT.TTree("sel2","sel2")
newtree=ROOT.TNtuple("sel2","sel2","run:lumi:ev:nvtx:rho:channel:mll:nljets:nbjets:ht:metpt:metphi:l1pt:l1eta:l1phi:l1m:l2pt:l2eta:l2phi:l2m:b1pt:b1eta:b1phi:b1m:b2pt:b2eta:b2phi:b2m:px2:py2:pz2:E2:yvis:ysum:max_dy:min_dy:deltarll:deltaphill:mlb:mpp:ypp:gen_mtt:gen_ytt:rec_mtt:rec_ytt:weight")
newtree.SetDirectory(fIn)
#branch.SetEntries(tree.GetEntries())
#fOut.cd();
#newtree.SetDirectory(fOut);
#loop over events in the tree and fill histos
totalEntries=tree.GetEntries()
lVec = ROOT.Math.LorentzVector(ROOT.Math.PtEtaPhiM4D('double'))
# h1=ROOT.TH1F('yttrec-yttgen','[y_{tt} (rec) - y_{tt} (gen)]',50,-2,2)
for i in xrange(0,totalEntries):
tree.GetEntry(i)
if i%100==0 : sys.stdout.write('\r [ %d/100 ] done' %(int(float(100.*i)/float(totalEntries))) )
#evWeight=puNormSF*tree.weight[0]*filtWeight*filtNormRwgt
#leptons
leptons=[]
leptons.append( lVec(tree.l1pt,tree.l1eta,tree.l1phi,tree.l1m) )
leptons.append( lVec(tree.l2pt,tree.l2eta,tree.l2phi,tree.l2m) )
#if len(leptons)<2 : continue
#preselect the b-jets (save always the jet and the gen jet)
bjets=[]
bjets.append( lVec(tree.b1pt,tree.b1eta,tree.b1phi,tree.b1m) )
bjets.append( lVec(tree.b2pt,tree.b2eta,tree.b2phi,tree.b2m) )
#met
metx,mety=tree.metpt*ROOT.TMath.Cos(tree.metphi),tree.metpt*ROOT.TMath.Sin(tree.metphi)
#try to solve the kinematics (need to swap bl assignments)
allSols=[]
try:
sols=doubleNeutrinoSolutions( (bjets[0], bjets[1]),
(leptons[0], leptons[1]),
(metx,mety) )
for isol in xrange(0,len(sols.nunu_s)):
top = bjets[0]+leptons[0]+convertToPtEtaPhiM(lVec,sols.nunu_s[isol][0],0.)
top_ = bjets[1]+leptons[1]+convertToPtEtaPhiM(lVec,sols.nunu_s[isol][1],0.)
allSols.append( (0,top,top_) )
except np.linalg.linalg.LinAlgError:
pass
try:
sols=doubleNeutrinoSolutions( (bjets[0], bjets[1]),
(leptons[1], leptons[0]),
(metx,mety) )
for isol in xrange(0,len(sols.nunu_s)):
top = bjets[0]+leptons[1]+convertToPtEtaPhiM(lVec,sols.nunu_s[isol][0],0.)
top_ = bjets[1]+leptons[0]+convertToPtEtaPhiM(lVec,sols.nunu_s[isol][1],0.)
allSols.append( (1,top,top_) )
except np.linalg.linalg.LinAlgError :
pass
#sort solutions by increasing m(ttbar)
if len(allSols)==0: continue
#print "length of allSols", len(allSols)
allSols=sorted(allSols, key=lambda sol: (sol[1]+sol[2]).mass() )
# print 'lowest m(ttbar) solution:', (allSols[0][1]+allSols[0][2]).mass()
lowMtt=(allSols[0][1]+allSols[0][2]).mass()
lowYtt=(allSols[0][1]+allSols[0][2]).Rapidity()
# h1.Fill(lowYtt-tree.gen_ytt)
varsel=[tree.run,tree.lumi,tree.ev,tree.rho,tree.nvtx,tree.channel,tree.mll,tree.nljets,tree.nbjets,tree.ht,
tree.metpt,tree.metphi,tree.l1pt,tree.l1eta,tree.l1phi,tree.l1m,tree.l2pt,tree.l2eta,tree.l2phi,tree.l2m,
tree.b1pt,tree.b1eta,tree.b1phi,tree.b1m,tree.b2pt,tree.b2eta,tree.b2phi,tree.b2m,
tree.px2,tree.py2,tree.pz2,tree.E2,tree.yvis,tree.ysum,tree.max_dy,tree.min_dy,
tree.deltarll,tree.deltaphill,tree.mlb,tree.mpp,tree.ypp,tree.gen_mtt,tree.gen_ytt,lowMtt,lowYtt,tree.weight]
newtree.Fill(array("f",varsel))
fIn.cd()
fIn.Write()
fIn.Close()
def main():
KinematicsAlgorithm()
if __name__ == "__main__":
main()
|
[
"bribeiro@cern.ch"
] |
bribeiro@cern.ch
|
376e21623d3fc2c5d5c28e523d7aebd69a3d0cb9
|
54869fe38624f1c4338f8dc5dd5f0d89aa17c9e4
|
/p02.py
|
09a6e54ab7e48ccdcf3c8f292f157dc5bbaa7bcc
|
[] |
no_license
|
qoqosz/Advent-of-Code-2020
|
d3b3512eb3b59b7404189ad094a2cc3b8ddb07be
|
e27928482e8fc9f30aea3fed21e11f8d8743431d
|
refs/heads/master
| 2023-02-05T22:17:45.767109
| 2020-12-24T21:20:57
| 2020-12-24T21:21:06
| 318,346,507
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
from collections import Counter
p1_count, p2_count = 0, 0
with open('p02.txt') as f:
for line in f:
rng, char, text = line.split(' ')
min_, max_ = map(int, rng.split('-'))
char = char.strip(':')
counter = Counter(text)
if min_ <= counter[char] <= max_:
p1_count += 1
if (char == text[min_ - 1]) ^ (char == text[max_ - 1]):
p2_count += 1
print('Part 1:', p1_count)
print('Part 2:', p2_count)
|
[
"lukasz@bednarski.me"
] |
lukasz@bednarski.me
|
dc05e2977d431405af5481a08423fc3dbeb7ab49
|
1d943fdcd18b749ca05ebdeea1355a09f38b67c8
|
/venv/Scripts/easy_install-3.7-script.py
|
184b88433e4a27fb204a965eb33f961534cf7d8e
|
[] |
no_license
|
sauravs1001/CodingChallengeFor100Days
|
a7fc06186a6d7eaa18d6a89a856adeaa1b9dd25f
|
decfa47bc45747a61c7f8afcd1952c3b37e86aa7
|
refs/heads/master
| 2020-03-26T22:08:22.333012
| 2018-08-27T14:22:41
| 2018-08-27T14:22:41
| 145,433,587
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 481
|
py
|
#!C:\Users\SSatapathy\PycharmProjects\CodingChallengeFor100Days\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
|
[
"ssatapathy@spherasolutions.com"
] |
ssatapathy@spherasolutions.com
|
0846c0ba23ee639e01b60dbedf18499542be341e
|
0156fd64d89df94c1f7fdb6003a11272a24e987e
|
/divineai1/manage.py
|
c3d5d325963809a3971f1249582a433cdcf6c1dc
|
[] |
no_license
|
Vengers-Ritam/divineplatform
|
96b7f762188417ccf69586711abb1b6fbf3533b5
|
b25fdb3fc4e96bbe328b9d3c7f5ef390202ed8c6
|
refs/heads/master
| 2023-04-24T05:00:08.594565
| 2021-05-03T11:23:43
| 2021-05-03T11:23:43
| 363,904,149
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'divineai1.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"ritammoharana32@gmail.com"
] |
ritammoharana32@gmail.com
|
243b30d8a04317b70aab7c0bbadabf27a895a4a2
|
480a175ab2b3c012af2d1cddb79674fad1490fe5
|
/0x08-python-more_classes/tests/main.2.py
|
2cb60d1c599573c08cc695829729fe51c64ab27d
|
[] |
no_license
|
ianliu-johnston/holbertonschool-higher_level_programming
|
a8a6476fc6a7ac0bd8ae300f2196f17c13e1b36f
|
f6a7c9cddb2482991c2aadacb99aa66e64eb50eb
|
refs/heads/master
| 2021-04-29T11:12:56.820851
| 2017-05-10T00:48:17
| 2017-05-10T00:48:17
| 77,854,226
| 3
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 944
|
py
|
#!/usr/bin/python3
Rectangle = __import__('2-rectangle').Rectangle
new_rect = Rectangle(3, 4)
print("Dimensions of your new rectangle: {} x {}".format(new_rect.width, new_rect.height))
print("Area: {}".format(new_rect.area()))
print("Perimeter: {}".format(new_rect.perimeter()))
new_rect.width = 5
print("Width just changed. New Dimensions: {} x {}".format(new_rect.width, new_rect.height))
print("Area: {}".format(new_rect.area()))
print("Perimeter: {}".format(new_rect.perimeter()))
new_rect.height = 15
print("height just changed. New Dimensions: {} x {}".format(new_rect.width, new_rect.height))
print("Area: {}".format(new_rect.area()))
print("Perimeter: {}".format(new_rect.perimeter()))
print("Making another one.")
next_rect = Rectangle()
print("Dimensions of your new rectangle: {} x {}".format(next_rect.width, next_rect.height))
print("Area: {}".format(next_rect.area()))
print("Perimeter: {}".format(next_rect.perimeter()))
|
[
"ian.liu-johnson@holbertonschool.com"
] |
ian.liu-johnson@holbertonschool.com
|
e0d5509edde2bc597a60a52985623e184213d1fb
|
8925916f67b9b77290020c932d97314a284d0595
|
/contrib/spendfrom/spendfrom.py
|
6765e86936b351c633817fea871d6398e2e214b0
|
[
"MIT"
] |
permissive
|
btcnode/btcnode
|
57c44726c6e289b893d07fde9146457c984324ab
|
c5005ee73e5e640e3a24a9c5648d20a30671652b
|
refs/heads/master
| 2023-04-05T04:11:07.691316
| 2021-03-19T22:12:37
| 2021-03-19T22:12:37
| 349,072,293
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,032
|
py
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend BTNs received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a btcnoded or btcnode-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the btcnode data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Btcnode/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Btcnode")
return os.path.expanduser("~/.btcnode")
def read_bitcoin_config(dbdir):
"""Read the btcnode.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "btcnode.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a btcnode JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 40743 if testnet else 30743
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the btcnoded we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(btcnoded):
info = btcnoded.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
btcnoded.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = btcnoded.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(btcnoded):
address_summary = dict()
address_to_account = dict()
for info in btcnoded.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = btcnoded.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = btcnoded.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-btcnode-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(btcnoded, fromaddresses, toaddress, amount, fee):
all_coins = list_available(btcnoded)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to btcnoded.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = btcnoded.createrawtransaction(inputs, outputs)
signed_rawtx = btcnoded.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(btcnoded, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = btcnoded.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(btcnoded, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = btcnoded.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(btcnoded, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get BTNs from")
parser.add_option("--to", dest="to", default=None,
help="address to get send BTNs to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of btcnode.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
btcnoded = connect_JSON(config)
if options.amount is None:
address_summary = list_available(btcnoded)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(btcnoded) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(btcnoded, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(btcnoded, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = btcnoded.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
[
"root@vmi546684.contaboserver.net"
] |
root@vmi546684.contaboserver.net
|
746dc26f8121171baa58db84f01da18b503980d2
|
09af57b73cf755da52aa3f0c22e9c1072d17cd09
|
/python_code/importCrowdyNews.py
|
a0e0c171219366221a9593e703a58bbce08f0971
|
[
"MIT"
] |
permissive
|
ControCurator/controcurator
|
49390818c92b34de386901a6b09430af9d7b0a2f
|
882ff11540e085bd404a3d1a6500ce1e70eb13f5
|
refs/heads/master
| 2021-05-03T06:03:04.156148
| 2017-07-01T11:49:21
| 2017-07-01T11:49:21
| 70,159,768
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 878
|
py
|
# import guardian articles
import os
import json
from models.articles.crowdynews import *
from elasticsearch import Elasticsearch
es = Elasticsearch(
['http://controcurator.org/ess/'],
port=80)
# main function
if __name__=='__main__':
#articles = CrowdyNews.all(size=1000)
#CrowdyNews.delete_all(articles)
anchors = Anchor.all(size=1000)
#Anchor.delete_all(anchors)
query = {"query" : {"match_all" : {}}, "from": 0, "size": 1000}
# load cached entries
response = es.search(index="crowdynews", body=query)
for f in response["hits"]["hits"]:
# for each file
data = f['_source']
article = CrowdyNews.create(data)
article.save()
# add article
# add comments as article children
# add wiki topics as anchors
for anchor in anchors:
a = Anchor.getOrCreate(anchor.id.lower())
a.findInstances()
a.save()
#print added article.id
|
[
"b.timmermans@vu.nl"
] |
b.timmermans@vu.nl
|
e136d99f8769bc511857cf18825e7b778b03d888
|
db57e38fc94280f55de113512e1bf4468d199ea4
|
/Tests/conftest.py
|
0c8c01ea6c1cb2b52cfa5556824665053a3287ea
|
[] |
no_license
|
maximashuev/TestProject
|
73f231f19f1a51a61677d18e2045e5ddab77244c
|
156486bbbd5e8f2e2ac902dc09adfaa767e82ad9
|
refs/heads/master
| 2023-01-24T16:30:48.816166
| 2020-12-15T11:32:42
| 2020-12-15T11:32:42
| 320,307,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 857
|
py
|
import pytest
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager
@pytest.fixture(params=['chrome','firefox'], scope='class')
def init_driver(request):
driver = None
if request.param == 'chrome':
options = webdriver.ChromeOptions()
# options.headless = True
options.add_argument('--headless')
driver = webdriver.Chrome(ChromeDriverManager().install(),options=options)
driver.implicitly_wait(5)
elif request.param == 'firefox':
options = webdriver.FirefoxOptions()
options.add_argument('--headless')
driver = webdriver.Firefox(executable_path=GeckoDriverManager().install(),options=options)
driver.implicitly_wait(5)
request.cls.driver = driver
yield
driver.quit()
|
[
"maximashuev@gmail.com"
] |
maximashuev@gmail.com
|
9f03d7bd8bcc479327be64dc54ad22ec87c35ae7
|
a0990640cb7d2b93262af982185f98ff0b0addbc
|
/2021-05-13_vibe+/frame2video.py
|
69e25c19525fd94f91b15ce947fdf445fb7de784
|
[] |
no_license
|
chgex/Others
|
c320bff96aa85fa3127092f7e2a433801fbb62ea
|
2a8970474bd7c78aa40a7e6f4135192c2972a334
|
refs/heads/main
| 2023-04-24T09:48:56.475389
| 2021-05-18T10:11:13
| 2021-05-18T10:11:13
| 366,975,035
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,401
|
py
|
'''
Author: liubai
Date: 2021-04-22
LastEditTime: 2021-04-22
'''
import cv2
import os
# 图片重命名
def imageRename(image_path):
image_list=os.listdir(image_path)
total=len(image_list)
# 第一张图片
cnt=1
for i in range(1,total+1):
old_image_name=image_path + '/' + str(i) + '.jpg'
new_image_name=image_path + '/' + str(i).zfill(5) + '.jpg'
os.rename(old_image_name,new_image_name)
print('rename success')
# 计算视频长度
def getTime(filename):
total_time=0
cap=cv2.VideoCapture(filename)
if cap.isOpened():
rate = cap.get(5) # 该函数返回 帧速率
fraNum=cap.get(7) # 该函数返回 视频文件中的帧数
duration=fraNum/rate
total_time+=duration
cap.release()
return total_time
# 计算fps
def getFPS(filename='../test.mp4'):
# filename='test.mp4'
cap = cv2.VideoCapture(filename)
total_frame = 0
while(True):
ret, frame = cap.read()
if ret is False:
break
total_frame = total_frame + 1
cap.release()
# 视频长度:秒
total_time=getTime(filename)
# 计算fps
fps=total_frame/total_time
return int(fps)
# 将帧组合成视频
def frame2video(image_path,video_name):
image_list=os.listdir(image_path)
image_list.sort()
# 第一张图片
first_image = cv2.imread( image_path + '/' + image_list[0])
fps = 20
# fps=getFPS()
print('fps: ',fps)
# size
size= (first_image.shape[1],first_image.shape[0])
print(size)
# 编码器
fourcc = cv2.VideoWriter_fourcc(*"mp4v") # MJPG
# videowriter
videoWrite = cv2.VideoWriter(video_name,fourcc,fps,size)
for image in image_list:
print(image)
image=image_path + '/' + image
img = cv2.imread(image,cv2.IMREAD_COLOR)
# 调整大小
img = cv2.resize(img,size,interpolation=cv2.INTER_CUBIC)
# 写
videoWrite.write(img)
print('video write success')
if __name__=='__main__':
image_path='./upImg'
video_name='update.mp4'
imageRename(image_path)
frame2video(image_path,video_name)
image_path='./SegImg'
video_name='seg.mp4'
imageRename(image_path)
frame2video(image_path,video_name)
|
[
"noreply@github.com"
] |
noreply@github.com
|
36c88c84948b0dd704090817ec765ae54204629c
|
0c5ce271c857d067c77d268c8cd6a0b1c0f70e11
|
/app.py
|
569b218bc98727dac4ed4a5cac32baa944672cea
|
[] |
no_license
|
Eduardo-JReis/translate-script
|
0b1723a58204885734d6d235fdafc7abe0e71c83
|
5ad6e0d2311e7dad938af300feb55b82f6f0622d
|
refs/heads/master
| 2023-01-12T20:06:20.265520
| 2020-11-22T11:17:08
| 2020-11-22T11:17:08
| 315,018,659
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
# import googletrans as gt
from googletrans import Translator
# print(gt.LANGUAGES)
trans = Translator()
test = True
while test:
word = str(input('Digite a palavra: '))
print()
res = trans.translate(word, dest='pt')
print(res.text)
if word == 'esc':
test = False
|
[
"edu.publicidade81@gmail.com"
] |
edu.publicidade81@gmail.com
|
b2c89f08137187610f4e4c87d7926691feebaf94
|
e10513e1ef7d195a051befb6c829b27a0cf685c2
|
/core/utils/resume.py
|
39fc888d1e92b31d15ad1e50ee51e359c3d01d89
|
[] |
no_license
|
kirenng/image-caption
|
5293409a4dca0ed225f3ec342a244ea5c0a49a60
|
78b521f306aac3fc02bb051be0e906108f98f7dd
|
refs/heads/master
| 2023-07-06T11:34:23.420494
| 2021-08-13T09:55:19
| 2021-08-13T09:55:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,046
|
py
|
import os
import torch
def resume_from_checkpoint(args, model, optimizer, best_acc1):
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map models to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
return args, model, optimizer, best_acc1
|
[
"3280867946@qq.com"
] |
3280867946@qq.com
|
8ab3356264d79c13893cbd6fbacaabfc53946f25
|
55be4a49ed1cd1b8b7b0ac2e6fa55aa58c180e15
|
/ICS 32/Project 4/test_project4.py
|
490730e4bd9f5d19726a62c27289657fd2313e99
|
[] |
no_license
|
rvcervan/ICS-32-Projects-Python
|
bc64abda6ea20f63542bd121f1161d31d23a2d42
|
ec01343708028fbe07fc95dc229bd111c12c3836
|
refs/heads/main
| 2023-07-01T16:41:48.506241
| 2021-08-09T05:13:24
| 2021-08-09T05:13:24
| 394,155,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,133
|
py
|
import mechanics
import unittest
class GameTest(unittest.TestCase):
def setUp(self):
self._game = mechanics.Game([[' ', ' ',' '],
[' ', ' ',' '],
[' ', ' ',' '],
[' ', ' ',' '],
[' ', ' ',' '],
[' ', ' ',' ']], 4, 3)
def test_faller_rotates(self):
self._game._board = [[' ', ' ',' '],
[' ', ' ',' '],
[' ', ' ','[S]'],
[' ', ' ','[T]'],
[' ', ' ','[V]'],
[' ', ' ',' ']]
self.assertEqual(self._game.rotate(),
[[' ', ' ',' '],
[' ', ' ',' '],
[' ', ' ','[V]'],
[' ', ' ','[S]'],
[' ', ' ','[T]'],
[' ', ' ',' ']])
def test_faller_moves_left(self):
self._game._board = [[' ', ' ',' '],
[' ', ' ',' '],
[' ', ' ','[S]'],
[' ', ' ','[T]'],
[' ', ' ','[V]'],
[' ', ' ',' ']]
self.assertEqual(self._game.move_left(), [[' ', ' ',' '],
[' ', ' ',' '],
[' ', '[S]',' '],
[' ', '[T]',' '],
[' ', '[V]',' '],
[' ', ' ',' ']])
self.assertEqual(self._game.move_left(), [[' ', ' ',' '],
[' ', ' ',' '],
['[S]', ' ',' '],
['[T]', ' ',' '],
['[V]', ' ',' '],
[' ', ' ',' ']])
def test_faller_moves_right(self):
self._game._board = [[' ', ' ',' '],
[' ', ' ',' '],
['[S]', ' ',' '],
['[T]', ' ',' '],
['[V]', ' ',' '],
[' ', ' ',' ']]
self.assertEqual(self._game.move_right(),
[[' ', ' ',' '],
[' ', ' ',' '],
[' ', '[S]',' '],
[' ', '[T]',' '],
[' ', '[V]',' '],
[' ', ' ',' ']])
self.assertEqual(self._game.move_right(),
[[' ', ' ',' '],
[' ', ' ',' '],
[' ', ' ','[S]'],
[' ', ' ','[T]'],
[' ', ' ','[V]'],
[' ', ' ',' ']])
def test_pieces_match(self):
self._game._board = [[' T ', ' ',' '],
[' ', ' T ',' '],
[' W ', ' ',' T '],
[' W ', ' ',' S '],
[' W ', ' S ',' '],
[' S ', ' S ',' S ']]
self.assertEqual(self._game.replace_matching(),
[['*T*', ' ',' '],
[' ', '*T*',' '],
['*W*', ' ','*T*'],
['*W*', ' ','*S*'],
['*W*', '*S*',' '],
['*S*', '*S*','*S*']])
def test_pieces_fill_empty_space(self):
self._game._board = [[' S ', ' ',' V '],
[' ', ' ',' '],
[' T ', ' ',' '],
[' ', ' Y ',' '],
[' W ', ' X ',' '],
[' ', ' ',' V ']]
self.assertEqual(self._game.fill_empty_space(),
[[' ', ' ',' '],
[' ', ' ',' '],
[' ', ' ',' '],
[' S ', ' ',' '],
[' T ', ' Y ',' V '],
[' W ', ' X ',' V ']])
def test_freeze_faller_if_landed(self):
self._game._board = [[' ', ' ',' '],
[' ', ' ',' '],
[' ', ' ','[Z]'],
['[S]', ' ','[Y]'],
['[T]', ' ','[X]'],
['[V]', ' ',' W ']]
self.assertEqual(self._game.freeze_faller(),
[[' ', ' ',' '],
[' ', ' ',' '],
[' ', ' ','|Z|'],
['|S|', ' ','|Y|'],
['|T|', ' ','|X|'],
['|V|', ' ',' W ']])
def test_unfreeze_faller_if_moved(self):
self._game._board = [[' ', ' ', ' '],
[' ', ' ', ' '],
[' ', ' ', '|Z|'],
[' ', ' ', '|Y|'],
[' ', ' ', '|X|'],
[' ', ' ', ' W ']]
self.assertEqual(self._game.move_left(), self._game.unfreeze_faller(),
[[' ', ' ', ' '],
[' ', ' ', ' '],
[' ', '[Z]', ' '],
[' ', '[Y]', ' '],
[' ', '[X]', ' '],
[' ', ' ', ' W ']])
def test_if_faller_is_falling(self):
self._game._board = [[' ', ' ', ' '],
[' ', ' ', '[S]'],
[' ', ' ', '[T]'],
[' ', ' ', '[V]'],
[' ', ' ', ' '],
[' ', ' ', ' ']]
self.assertEqual(self._game.falling(),
[[' ', ' ', ' '],
[' ', ' ', ' '],
[' ', ' ', '[S]'],
[' ', ' ', '[T]'],
[' ', ' ', '[V]'],
[' ', ' ', ' ']])
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
7bd312f480c9b07a9899341317b8efa9c83c2d1d
|
f03e5dd56eb3455e0f87295b8a610c9dac11c2c5
|
/working program storage/edit_database.py
|
27e4290c8119958cdb932f6fed14186724a4d187
|
[] |
no_license
|
henrymlongroad/PyQt-sql-database-link
|
139413d73bdc991474208476acfb6f89f017cdfd
|
41ed3cd4111657748aa35c528b0aba1f4dc32c76
|
refs/heads/master
| 2016-09-05T21:47:45.590063
| 2015-03-13T21:00:04
| 2015-03-13T21:00:04
| 24,986,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,726
|
py
|
from PyQt4.QtCore import*
from PyQt4.QtGui import*
import sys
import sqlite3
def run_main_menu():
print("which area of the database do you wish to access")
print()
print("1. customer")
print("2. products")
print("3. prescription data")
print("4. Orders")
print("5. manufacturers")
print("0. close the database")
print("choice : ", end = "")
try:
choice1 = int(input())
except ValueError:
print()
choice1 = run_main_menu()
if choice1 == 0:
return choice1
elif choice1 in range(1,6):
choice = run_sub_menu(choice1)
else:
run_main_menu()
return choice
def run_sub_menu(choice):
if choice == 1:
print("1. insert data into {0}".format("customer"))
print("2. update data in {0}".format("customer"))
print("3. display data from {0}".format("customer"))
print("4. find item in {0}".format("customer"))
print("5. delete item in {0}".format("customer"))
print("choice : ", end = "")
try:
choice1 = int(input())
except ValueError:
print()
choice1 = run_sub_menu()
return choice1
def run_main():
close = False
while not close:
choice = validate_choice()
if choice == 0:
close = True
elif choice == 1 :
FirstName = input("please enter your first name: ")
LastName = input("please enter your last name: ")
print("do you wish to give your address: ", end = "")
answer = input()
answer = answer.lower()
if answer in ["no","n"]:
values = (FirstName,LastName,"NA","NA","NA","NA","NA")
elif answer in ["yes","y"]:
streetname = input("please enter your street name: ")
town = input("please enter your town name: ")
postcode = input("please enter your Postcode: ")
phone_number = input("please enter your Phone number: ")
email = input("please enter your email: ")
values = (FirstName,LastName,streetname,town,postcode,phone_number,email)
insert_customer_data(values)
elif choice == 2:
customer_data()
choice = input("which id do you want to update: ")
FirstName = input("please enter a first name: ")
LastName = input("please enter a last name: ")
print("do you wish to update their address: ", end = "")
done = False
while not done:
answer = input()
answer = answer.lower()
if answer in ["no","n"]:
data = (FirstName,LastName,"NA","NA","NA","NA","NA",choice)
done = True
elif answer in ["yes","y"]:
streetname = input("please enter their street name: ")
town = input("please enter their town name: ")
postcode = input("please enter their Postcode: ")
phone_number = input("please enter their Phone number: ")
email = input("please enter their email: ")
data = (FirstName,LastName,streetname,town,postcode,phone_number,email,choice)
done = True
else:
print("please enter a valid choice: ", end = "")
update_customer_data(data)
elif choice == 3:
customer = customer_data()
print(customer)
elif choice == 4:
done = False
while not done:
print("would you like to search by ID or by firstname: ",end = "")
choices = input()
if choices in ["ID","Id","id"]:
print("please enter the ID you wish to view: " ,end = "")
id = input()
rename = display_customer_data(id)
print(rename)
done = True
elif choices in ["Firstname","firstname"]:
print("please enter the Name you wish to view: ",end = "")
name = input()
rename = display_customer_data(name)
print(rename)
done = True
else:
print("please enter a valid choice")
done = False
elif choice == 5:
choice = input("which id do you want to delete: ")
delete_customer_data(choice)
elif choice == 0:
close = True
else:
print("Hey Listen")
def validate_choice():
choicechecked = False
choice = run_main_menu()
while not choicechecked:
if choice in range(0,6):
choicechecked = True
else:
print()
choice = run_main_menu()
return choice
def insert_customer_data(values):
with sqlite3.connect("pharmacy_database.db") as db:
cursor = db.cursor()
sql = "insert into customer (FirstName, LastName,Street,Town,Postcode,TelephoneNum, EmailAddress) values (?,?,?,?,?,?,?)"
cursor.execute(sql,values)
db.commit()
def update_customer_data(data):
with sqlite3.connect("pharmacy_database.db") as db:
cursor = db.cursor()
sql = "update customer set FirstName=?, LastName=?,street=?,town=?,postcode=?,TelephoneNum=?,EmailAddress=? where customerID=?"
cursor.execute(sql,data)
db.commit()
def customer_data():
with sqlite3.connect("pharmacy_database.db") as db:
cursor = db.cursor()
cursor.execute("select customerID, FirstName, LastName from customer ")
customer = cursor.fetchall()
return customer
def display_customer_data(FirstName):
with sqlite3.connect("pharmacy_database.db") as db:
cursor = db.cursor()
cursor.execute("select * from customer where FirstName=?",(FirstName,))
customer = cursor.fetchone()
return customer
def display_customer_data(id):
with sqlite3.connect("pharmacy_database.db") as db:
cursor = db.cursor()
cursor.execute("select * from customer where customerID=?",(id,))
customer = cursor.fetchone()
return customer
def delete_customer_data(data):
with sqlite3.connect("pharmacy_database.db") as db:
cursor = db.cursor()
cursor.execute("delete from customer where customerID=?",(data,))
db.commit()
run_main()
|
[
"21394@longroad.ac.uk"
] |
21394@longroad.ac.uk
|
5eec040553e54df0d88d4c9465f5455d57ba102b
|
c215d282844bad35d026d4bb65be37d6a46100d0
|
/recommender.py
|
77268f472da0b964fd00a03208dd585aaca85777
|
[] |
no_license
|
informal-economy/backend
|
d0823310c5997647bada0de374d6132b8d23e724
|
8657e831473e03450cc38a242d8aa75951a73ee5
|
refs/heads/master
| 2021-05-17T16:22:35.107644
| 2020-03-29T15:05:29
| 2020-03-29T15:05:29
| 250,869,504
| 0
| 0
| null | 2021-03-20T03:14:24
| 2020-03-28T18:47:47
|
Python
|
UTF-8
|
Python
| false
| false
| 8,886
|
py
|
import lenskit.datasets as ds
import pandas as pd
import csv
from lenskit.algorithms import Recommender
from lenskit.algorithms.user_knn import UserUser
#The function input x is the user specific input .csv file that has the columns:
#item,title,genres,ratings
#which is equivalent to
#jobId,jobtitle,jobcategory,ratings
#The output of the function is a list of so far 20 records of the best job options
#[genres, title], i.e. [category, job]
def recommender(x):
data = ds.MovieLens('lab4-recommender-systems/')
print("Successfully installed dataset.")
rows_to_show = 10 # <-- Try changing this number to see more rows of data
data.ratings.head(rows_to_show) # <-- Try changing "ratings" to "movies", "tags", or "links" to see the kinds of data that's stored in the other MovieLens files
print(data.ratings.head(rows_to_show))
joined_data = data.ratings.join(data.movies['genres'], on='item')
joined_data = joined_data.join(data.movies['title'], on='item')
joined_data.head(rows_to_show)
print(joined_data.head(rows_to_show))
#STEP 2.1
average_ratings = (data.ratings).groupby(['item']).mean()
sorted_avg_ratings = average_ratings.sort_values(by="rating", ascending=False)
joined_data = sorted_avg_ratings.join(data.movies['genres'], on='item')
joined_data = joined_data.join(data.movies['title'], on='item')
joined_data = joined_data[joined_data.columns[1:]]
print("RECOMMENDED FOR ANYBODY:")
joined_data.head(rows_to_show)
print(joined_data.head(rows_to_show))
average_ratings = (data.ratings).groupby('item') \
.agg(count=('user', 'size'), rating=('rating', 'mean')) \
.reset_index()
sorted_avg_ratings = average_ratings.sort_values(by="rating", ascending=False)
joined_data = sorted_avg_ratings.join(data.movies['genres'], on='item')
joined_data = joined_data.join(data.movies['title'], on='item')
joined_data = joined_data[joined_data.columns[1:]]
print("RECOMMENDED FOR ANYBODY:")
joined_data.head(rows_to_show)
print(joined_data.head(rows_to_show))
#Step 2.2
minimum_to_include = 1 #20<-- You can try changing this minimum to include movies rated by fewer or more people
average_ratings = (data.ratings).groupby(['item']).mean()
rating_counts = (data.ratings).groupby(['item']).count()
average_ratings = average_ratings.loc[rating_counts['rating'] > minimum_to_include]
sorted_avg_ratings = average_ratings.sort_values(by="rating", ascending=False)
joined_data = sorted_avg_ratings.join(data.movies['genres'], on='item')
joined_data = joined_data.join(data.movies['title'], on='item')
joined_data = joined_data[joined_data.columns[3:]]
print("RECOMMENDED FOR ANYBODY:")
myjoined_data=joined_data.head(rows_to_show)
print(joined_data.head(rows_to_show))
# myjoined_data=anybody[anybody[1:]]
print("RECOMMENDED FOR ANYBODY JUST genres and title:")
print(myjoined_data)
#Step 2.3
average_ratings = (data.ratings).groupby(['item']).mean()
rating_counts = (data.ratings).groupby(['item']).count()
average_ratings = average_ratings.loc[rating_counts['rating'] > minimum_to_include]
average_ratings = average_ratings.join(data.movies['genres'], on='item')
average_ratings = average_ratings.loc[average_ratings['genres'].str.contains('Education')]
sorted_avg_ratings = average_ratings.sort_values(by="rating", ascending=False)
joined_data = sorted_avg_ratings.join(data.movies['title'], on='item')
joined_data = joined_data[joined_data.columns[3:]]
print("\n\nRECOMMENDED FOR AN EDUCATION SPECIALIST:")
joined_data.head(rows_to_show)
print(joined_data.head(rows_to_show))
#Step 2.4
average_ratings = (data.ratings).groupby(['item']).mean()
rating_counts = (data.ratings).groupby(['item']).count()
average_ratings = average_ratings.loc[rating_counts['rating'] > minimum_to_include]
average_ratings = average_ratings.join(data.movies['genres'], on='item')
average_ratings = average_ratings.loc[average_ratings['genres'].str.contains('sewing')]
sorted_avg_ratings = average_ratings.sort_values(by="rating", ascending=False)
joined_data = sorted_avg_ratings.join(data.movies['title'], on='item')
joined_data = joined_data[joined_data.columns[3:]]
print("\n\nRECOMMENDED FOR A SEWING SPECIALIST:")
joined_data.head(rows_to_show)
print(joined_data.head(rows_to_show))
#Step 3 Personalized Recommendation
jabril_rating_dict = {}
#jgb_rating_dict = {}
with open(x, newline='') as csvfile:
ratings_reader = csv.DictReader(csvfile)
for row in ratings_reader:
if ((row['ratings'] != "") and (float(row['ratings']) > 0) and (float(row['ratings']) < 6)):
jabril_rating_dict.update({int(row['item']): float(row['ratings'])})
#print("Jabril Dictionary")
#print(jabril_rating_dict)
# with open("./lab4-recommender-systems/jgb-movie-ratings.csv", newline='') as csvfile:
# ratings_reader = csv.DictReader(csvfile)
# for row in ratings_reader:
# if ((row['ratings'] != "") and (float(row['ratings']) > 0) and (float(row['ratings']) < 6)):
# jgb_rating_dict.update({int(row['item']): float(row['ratings'])})
print("\n\nRating dictionaries assembled!")
print("Sanity check:")
print("\tJabril's rating for Banker is " + str(jabril_rating_dict[2]))
#print("\tJohn-Green-Bot's rating for 1197 (The Princess Bride) is " + str(jgb_rating_dict[1197]))
#Step 4 Train a new collaborative filtering model to provide recommendations.
num_recs = 20 #<---- This is the number of recommendations to generate. You can change this if you want to see more recommendations
user_user = UserUser(30, min_nbrs=2) #These two numbers set the minimum (3) and maximum (15) Niki:Now 4 number of neighbors to consider. These are considered "reasonable defaults," but you can experiment with others too
algo = Recommender.adapt(user_user)
print("algo")
print(algo)
algo.fit(data.ratings)
print(algo.fit(data.ratings))
print("Set up a User-User algorithm!")
#Step 4.1 Now that the system has defined clusters, we can give it our personal ratings to get the top 10 recommended movies for me and for John-Green-bot!
jabril_recs = algo.recommend(-1, num_recs, ratings=pd.Series(jabril_rating_dict)) #Here, -1 tells it that it's not an existing user in the set, that we're giving new ratings, while 10 is how many recommendations it should generate
print("jabril_recs")
print(jabril_recs)
joined_data = jabril_recs.join(data.movies['genres'], on='item')
joined_data = joined_data.join(data.movies['title'], on='item')
joined_data = joined_data[joined_data.columns[2:]]
print("\n\nRECOMMENDED JOB FOR JABRIL:")
#joined_data
print(joined_data)
if joined_data.empty:
joined_data=myjoined_data
#joined_data = joined_data[joined_data.columns[2:4]]
joined_data = joined_data[['genres','title']]
print("DataFrame was empty")
print(joined_data)
return(joined_data)
#jgb_recs = algo.recommend(-1, num_recs, ratings=pd.Series(jgb_rating_dict)) #Here, -1 tells it that it's not an existing user in the set, that we're giving new ratings, while 10 is how many recommendations it should generate
#
#joined_data = jgb_recs.join(data.movies['genres'], on='item')
#joined_data = joined_data.join(data.movies['title'], on='item')
#joined_data = joined_data[joined_data.columns[2:]]
#print("\n\nRECOMMENDED JOB FOR JOHN-GREEN-BOT:")
#joined_data
#print(joined_data)
##Step 5 Making a combined movie recommendation list. (Can be ommited??)
#combined_rating_dict = {}
#for k in jabril_rating_dict:
# if k in jgb_rating_dict:
# combined_rating_dict.update({k: float((jabril_rating_dict[k]+jgb_rating_dict[k])/2)})
# else:
# combined_rating_dict.update({k:jabril_rating_dict[k]})
#for k in jgb_rating_dict:
# if k not in combined_rating_dict:
# combined_rating_dict.update({k:jgb_rating_dict[k]})
#
#print("Combined ratings dictionary assembled!")
#print("Sanity check:")
#print("\tCombined rating for 1197 (The Princess Bride) is " + str(combined_rating_dict[1197]))
#
#
##Step 5.2
#combined_recs = algo.recommend(-1, num_recs, ratings=pd.Series(combined_rating_dict)) #Here, -1 tells it that it's not an existing user in the set, that we're giving new ratings, while 10 is how many recommendations it should generate
#
#joined_data = combined_recs.join(data.movies['genres'], on='item')
#joined_data = joined_data.join(data.movies['title'], on='item')
#joined_data = joined_data[joined_data.columns[2:]]
#print("\n\nRECOMMENDED FOR JABRIL / JOHN-GREEN-BOT HYBRID:")
#joined_data
|
[
"egetenmeyer@Nikolas-MacBook-Pro.local"
] |
egetenmeyer@Nikolas-MacBook-Pro.local
|
c2bb14d7ae24c97ce9e538b563179a0fb27d3f71
|
2aace9bb170363e181eb7520e93def25f38dbe5c
|
/build/idea-sandbox/system/python_stubs/cache/77d922e63877a9db19d31d69878e680aa58a54c85eee51673bc8bfa5abec9462/cython_runtime.py
|
f0626603901297c264822be8e70b80c27bee933e
|
[] |
no_license
|
qkpqkp/PlagCheck
|
13cb66fd2b2caa2451690bb72a2634bdaa07f1e6
|
d229904674a5a6e46738179c7494488ca930045e
|
refs/heads/master
| 2023-05-28T15:06:08.723143
| 2021-06-09T05:36:34
| 2021-06-09T05:36:34
| 375,235,940
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
# encoding: utf-8
# module cython_runtime
# from C:\Users\Doly\Anaconda3\lib\site-packages\scipy\stats\statlib.cp37-win_amd64.pyd
# by generator 1.147
# no doc
# no imports
# Variables with simple values
__loader__ = None
__spec__ = None
# no functions
# no classes
|
[
"qinkunpeng2015@163.com"
] |
qinkunpeng2015@163.com
|
702e93ec385bbb5567fec0ac4ca70cf08f9f04db
|
7dbcf66e47684c652f9d90a47b2381cf846e003d
|
/pkg/Conf.py
|
d8e12155528eb0090ab0006f88fcc253282e3ede
|
[] |
no_license
|
hlanSmart/simple
|
531b9a8be524d29c43016c865f64132aa4bf3069
|
c8536edd4cec1f39e23a5ff35ae16f0efa15f323
|
refs/heads/master
| 2020-12-27T08:24:04.383170
| 2016-09-22T04:29:44
| 2016-09-22T04:29:44
| 68,556,669
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,020
|
py
|
#!/usr/bin/python
#coding:utf-8
import os,yaml
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def readServer(sg,sl=False): #sg ServerGroup 服务器组 sl ServerList 组列表
with open(os.path.join(BASE_PATH,'etc/server.yml'),'r') as f:
server=yaml.load(f)
if sl: #当ServerList为真时返回组,而不是组信息
li=[]
for i in server:
li.append(i)
return li
if sg in server:
gp=server[sg] #gp group 服务器组信息
for i in gp: #默认22端口在配置文件不存在,所以手动添加到返回结果
if len(gp[i])<3:
gp[i].append(22)
return gp
return False #Server Group 不存在时返回False
def readYaml(P):
try:
with open(P) as f:
return yaml.load(f)
except Exception as e:
print(e)
return False
|
[
"root@localhost"
] |
root@localhost
|
adc821f5df6ddc1460050390a5807ed3f8662942
|
d165d718b2a5e4b18f9b52054e1f5d382d72be03
|
/0x04-python-more_data_structures/5-number_keys.py
|
ca851394809637d9e4344d5ff96907b884f2f1f7
|
[] |
no_license
|
Fabian-Andres/holbertonschool-higher_level_programming
|
f1b67fd28fb135c84ed9b3240d66ef125a043e00
|
0b08e2f3c4a798dd0cce1a9776304c74ad0b6ba3
|
refs/heads/master
| 2022-12-16T04:56:11.222686
| 2020-09-25T04:47:02
| 2020-09-25T04:47:34
| 259,423,074
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
#!/usr/bin/python3
def number_keys(a_dictionary):
no_keys = 0
for i in range(len(a_dictionary)):
no_keys += 1
return no_keys
|
[
"f4bian.andres@gmail.com"
] |
f4bian.andres@gmail.com
|
9d9f3f8f6419b9565b74b794fce0b1e7d24d7632
|
f08f7e4da3cb83257bbeb6cf198e23ac65d91fd0
|
/안준혁/[21.07.19]1065.py
|
fed3c18cdb8c06b5c269118c09f0dc7e99e0b152
|
[] |
no_license
|
4RG0S/2021-Summer-Jookgorithm
|
cabcd2071b88510ac22a971ed600e7b4645eb5f2
|
bf23a3a0f2679bcd47c825247d57998eb23c1df8
|
refs/heads/main
| 2023-07-17T06:49:17.165893
| 2021-09-06T09:49:36
| 2021-09-06T09:49:36
| 384,205,067
| 1
| 1
| null | 2021-07-14T05:46:52
| 2021-07-08T17:44:15
|
Java
|
UTF-8
|
Python
| false
| false
| 324
|
py
|
n = int(input())
count = 0
for i in range(1, n+1):
if i < 100:
count += 1
elif 100 < i < 1000:
first = i % 10
second = int(i / 10) % 10
third = int(i / 100)
comp1 = first - second
comp2 = second - third
if comp1 == comp2:
count += 1
print(count)
|
[
"ajh99345@gmail.com"
] |
ajh99345@gmail.com
|
7688e234fa65ebe9a7d4ff0798517fcd1f8b8b52
|
6b7f81afdb9983664d12a9fc54452dd48ed5779a
|
/env/bin/python-config
|
3f755f3f8c33a751286ee92004eaaa4bc470a0c0
|
[] |
no_license
|
Wilians001/axf
|
502fcf91d737f2901572c6dd59ff3e9c81615412
|
fb4c41c01a23c5dd0f64e4c2f61c0a042cb72935
|
refs/heads/master
| 2020-03-31T07:43:32.643230
| 2018-10-08T12:02:25
| 2018-10-08T12:02:25
| 152,032,740
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,339
|
#!/home/wilians/axf/env/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
|
[
"1518209084@qq.com"
] |
1518209084@qq.com
|
|
74c451e67b80b2f8cba3e0eac1b09d2eedf46702
|
2eff698abfad7693e61bc942de619c5abe7dd270
|
/PyPoll.py
|
1683debe67677fc4bc61c39ab1d0e1a47ff942a3
|
[] |
no_license
|
zanelouis/Election_Analysis
|
ff894d228777a465e5cd0b328538267722cc0624
|
4353f566826eabea5176d83051ca3dcbba6524ca
|
refs/heads/main
| 2023-01-07T20:59:15.794899
| 2020-11-04T00:01:20
| 2020-11-04T00:01:20
| 300,511,213
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,043
|
py
|
# Add our dependencies.
import csv
import os
# Assign a variable to load a file from a path.
file_to_load = os.path.join("Resources", "election_results.csv")
# Assign a variable to save the file to a path.
file_to_save = os.path.join("Analysis", "election_analysis.txt")
# Initialize a total vote counter.
total_votes = 0
# Candidate options and candidate votes
candidate_options = []
# 1. Declare the empty dictionary.
candidate_votes = {}
# Winning Candidate and Winning Count Tracker
winning_candidate = ""
winning_count = 0
winning_percentage = 0
# Open the election results and read the file.
with open(file_to_load) as election_data:
file_reader = csv.reader(election_data)
# Read the header row.
headers = next(file_reader)
# Print each row in the CSV file.
for row in file_reader:
# Add to the total vote count.
total_votes += 1
# Print the candidate name from each row.
candidate_name = row[2]
if candidate_name not in candidate_options:
# Add the candidate name to the candidate list.
candidate_options.append(candidate_name)
# Begin tracking that candidate's vote count.
candidate_votes[candidate_name] = 0
# Add a vote to that candidate's count.
candidate_votes[candidate_name] += 1
with open(file_to_save, "w") as txt_file:
# Print the final vote count to the terminal.
election_results = (
f"\nElection Results\n"
f"-------------------------\n"
f"Total Votes: {total_votes:,}\n"
f"-------------------------\n")
print(election_results, end="")
# Save the final vote count to the text file.
txt_file.write(election_results)
# Determine the percentage of votes for each candidate by looping through the counts.
# Iterate through the candidate list.
for candidate_name in candidate_votes:
# Retrieve vote count of a candidate.
votes = candidate_votes[candidate_name]
# Calculate the percentage of votes.
vote_percentage = float(votes) / float(total_votes) * 100
candidate_results = (
f"{candidate_name}: {vote_percentage:.1f}% ({votes:,})\n")
# To do: print out each candidate's name, vote count, and percentage of
# votes to the terminal.
#print(f"{candidate_name}: {vote_percentage:.1f}% ({votes:,})\n")
# Print each candidate, their voter count, and percentage to the terminal.
print(candidate_results)
# Save the candidate results to our text file.
txt_file.write(candidate_results)
# Determine winning vote count and candidate
# Determine if the votes is greater than the winning count.
if (votes > winning_count) and (vote_percentage > winning_percentage):
# If true then set winning_count = votes and winning_percent =
# vote_percentage.
winning_count = votes
winning_percentage = vote_percentage
# And, set the winning_candidate equal to the candidate's name.
winning_candidate = candidate_name
# Print the winning candidate's results to the terminal.
winning_candidate_summary = (
f"-------------------------\n"
f"Winner: {winning_candidate}\n"
f"Winning Vote Count: {winning_count:,}\n"
f"Winning Percentage: {winning_percentage:.1f}%\n"
f"-------------------------\n")
print(winning_candidate_summary)
# Save the winning candidate's results to the text file.
txt_file.write(winning_candidate_summary)
|
[
"noreply@github.com"
] |
noreply@github.com
|
5c903175d4a7365e542f92cbcead2a25f9846e4c
|
128420970c272be8d3b374dbfc9687ab0824bc2b
|
/blog/migrations/0002_comment.py
|
57087759a8a568a2f6018d073aa5fc2e52b43604
|
[] |
no_license
|
ZsZJ/django-blog
|
2330a8aeb97d005cea592ab3e8d37b0b47575db8
|
8862936dbc744e19166b8f7d26ccf725f6d70a60
|
refs/heads/master
| 2020-05-14T20:00:07.958381
| 2019-04-17T23:05:42
| 2019-04-17T23:05:42
| 181,938,526
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 908
|
py
|
# Generated by Django 2.0.13 on 2019-04-17 22:39
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('approved_comment', models.BooleanField(default=False)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.Post')),
],
),
]
|
[
"ktj.lim@gmail.com"
] |
ktj.lim@gmail.com
|
e917e03d7e2392418e4b1f02d89aec290dda311a
|
9ef2c82ae61064c4f78798f04ab3310e7f5e4629
|
/tests/test_prepare_data.py
|
3a97f2cc09d08dbc04605095acef5c508ca0f788
|
[
"BSD-3-Clause"
] |
permissive
|
chenxofhit/cirrocumulus
|
b999d49afc024c30e61fbc6905c968f71714291e
|
18ee1264303138cbba8ff34318f90f0e2619dc13
|
refs/heads/master
| 2023-03-17T15:23:02.935136
| 2021-03-16T20:01:02
| 2021-03-16T20:01:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,476
|
py
|
import fsspec
import pandas as pd
import scipy
from cirrocumulus.embedding_aggregator import get_basis
from cirrocumulus.parquet_dataset import ParquetDataset
from cirrocumulus.prepare_data import PrepareData
def test_prepare(test_data, measures, dimensions, continuous_obs, basis, tmp_path):
output_dir = str(tmp_path)
test_data = test_data[:, measures]
test_data.obs = test_data.obs[dimensions + continuous_obs]
prepare_data = PrepareData(adata=test_data, output=output_dir)
prepare_data.execute()
pq_ds = ParquetDataset()
dataset = dict(id='')
schema = dict(shape=test_data.shape)
fs = fsspec.filesystem('file')
prepared_df = pq_ds.read_dataset(file_system=fs, path=output_dir, dataset=dataset, schema=schema,
keys=dict(X=measures, obs=dimensions + continuous_obs, basis=[get_basis(basis, -1, '')]))
if not scipy.sparse.issparse(test_data.X):
test_data.X = scipy.sparse.csr_matrix(test_data.X)
df = pd.DataFrame.sparse.from_spmatrix(test_data.X, columns=measures)
for f in dimensions:
df[f] = test_data.obs[f].values
df[f] = df[f].astype('category')
for f in continuous_obs:
df[f] = test_data.obs[f].values
embedding_data = test_data.obsm[basis]
for i in range(embedding_data.shape[1]):
df["{}_{}".format(basis, i + 1)] = embedding_data[:, i]
prepared_df = prepared_df[df.columns]
pd.testing.assert_frame_equal(df, prepared_df, check_names=False)
|
[
"jgould@broadinstitute.org"
] |
jgould@broadinstitute.org
|
e4d3b1c290b0ee2787f51f3bb625a45c1c113234
|
6daa3815511b1eb1f4ff3a40b7e9332fab38b8ef
|
/tastesavant/taste/apps/profiles/migrations/0010_auto__add_field_profile_preferred_site__chg_field_profile_user.py
|
f631b68b525621e7885479041e53e8ea8b703f7e
|
[] |
no_license
|
kaizensoze/archived-projects
|
76db01309453606e6b7dd9d2ff926cfee42bcb05
|
d39ac099cb40131bac5de66bde7d0e2db5f74189
|
refs/heads/master
| 2021-05-31T12:16:17.800730
| 2016-02-23T00:27:56
| 2016-02-23T00:27:56
| 14,407,212
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,513
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Profile.preferred_site'
# The default value, 3, should refer to the NYC site.
db.add_column('profiles_profile', 'preferred_site',
self.gf('django.db.models.fields.related.ForeignKey')(default=3, to=orm['sites.Site']),
keep_default=False)
# Changing field 'Profile.user'
db.alter_column('profiles_profile', 'user_id', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True))
def backwards(self, orm):
# Deleting field 'Profile.preferred_site'
db.delete_column('profiles_profile', 'preferred_site_id')
# Changing field 'Profile.user'
db.alter_column('profiles_profile', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], unique=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profiles.friendship': {
'Meta': {'object_name': 'Friendship'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notice_sent_to_user_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Profile']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'profiles.profile': {
'Meta': {'object_name': 'Profile'},
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'blogger': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'digest_notifications': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'favorite_food': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'favorite_restaurant': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'friends': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'friends'", 'to': "orm['auth.User']", 'through': "orm['profiles.Friendship']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'last_sync_facebook': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_sync_foursquare': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notification_level': ('django.db.models.fields.CharField', [], {'default': "'instant'", 'max_length': '16'}),
'preferred_site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'type_expert': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'type_reviewer': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['profiles']
|
[
"gallo.j@gmail.com"
] |
gallo.j@gmail.com
|
1b345f16fe41c4e6c2b77186c46547b36c175f00
|
72d344286f15d794f08c6964bf3a310fe86c2f67
|
/TP05/TP01.py
|
2907b88d09c272e37dd6f11d4993647eaade0128
|
[] |
no_license
|
boua615/Tps01
|
c240330967cf4aa79491d75bbb4c82742cd88cae
|
8c9c06312c1b2b949fb86518758a0c0940d3bcc6
|
refs/heads/master
| 2021-02-12T07:03:56.862122
| 2020-03-05T03:05:12
| 2020-03-05T03:05:12
| 244,570,977
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,322
|
py
|
#!/usr/bin/env python
#- * -coding: utf - 8 - * -##TP01.py##
DATA_FILE = 'annuaire-v0.2.xml'
import xml.dom.minidom as minidom
import sys
def main():
try:
xmldoc=minidom.parse(DATA_FILE)
except:
print("Can't Open the file")
sys.exit()
print(xmldoc.toxml())
treat_doc(xmldoc)
display_tel(xmldoc)
display_tel_personne(xmldoc)
add_id_personne(xmldoc)
return 0
def treat_doc(xmldoc):
annuaire= xmldoc.getElementsByTagName('annuaire')[0]
print(annuaire)
cpt = 0
for personne in annuaire.childNodes:
print("-" * 40)
print("Personne n°", cpt)
print(personne.toxml())
cpt += 1
def display_tel(xmldoc):
telephones = xmldoc.getElementsByTagName('telephone')
print (telephones)
cpt = 0
for tel in telephones:
print ("-"*40)
print ("Tel n°", cpt)
print (tel.toxml())
print ("N°:",tel.firstChild.data)
print ("Type:",tel.getAttribute("type"))
cpt += 1
def display_tel_personne(xmldoc):
personnes = xmldoc.getElementsByTagName('personne')
print (personnes)
cpt = 0
for personne in personnes:
print ("-"*40)
print ("Personne n°", cpt)
nom = personne.getElementsByTagName('nom')[0]
prenom = personne.getElementsByTagName('prenom')[0]
tels = personne.getElementsByTagName('telephone')
print ("*"*20)
print ("Nom:\t",nom.firstChild.data)
print ("Prénom:\t",prenom.firstChild.data)
for tel in tels:
print ("-"*20)
print ("N°:",tel.firstChild.data)
print ("Type:",tel.getAttribute("type"))
cpt += 1
def add_id_personne(xmldoc):
personnes = xmldoc.getElementsByTagName('personne')
print(personnes)
cpt = 0
for personne in personnes:
print ("-"*40)
print ("Personne n°", cpt, personne.nodeValue, personne.nodeType)
personne.setAttribute('id', str(cpt))
cpt += 1
print (personne.toxml())
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
ad784210df07d410b4d9d0b3795e111aa61b9193
|
b7453e5a2700f2017a6f783eaf3990ee2486cd65
|
/test/utils/test_clean_identity.py
|
54c6c0a2df4ef8f53c92989877f93ce940c57635
|
[
"Apache-2.0"
] |
permissive
|
LaRiffle/cleaning-scripts
|
8525164cca8336b67a2362d6907414e27ca088fa
|
08f360721056d30befe8d58ded583a4a5d126184
|
refs/heads/master
| 2020-07-28T06:52:47.673033
| 2019-11-19T15:26:19
| 2019-11-19T15:26:19
| 209,343,798
| 0
| 0
|
Apache-2.0
| 2019-09-20T13:13:25
| 2019-09-18T15:33:16
|
Python
|
UTF-8
|
Python
| false
| false
| 233
|
py
|
from scripts import utils
def test_clean_identity():
assert utils.clean_identity(None) == ""
assert utils.clean_identity("NaN") == ""
row_input = "Holà chicanos"
assert utils.clean_identity(row_input) == row_input
|
[
"theo.leffyr@gmail.com"
] |
theo.leffyr@gmail.com
|
d85cf1dc2e0922928193d390f42218e83afbb210
|
35665123a96d6e97deb9f2e8761e0415ea9eb620
|
/Mean_Stddev_Calculator.py
|
fbfae1a14808b211dad6b083ae78c28d4bcb83d7
|
[] |
no_license
|
ArvindSinghRawat/TransportModeDetection
|
8f7eee3608a1bca612477c3b746cd893ad404986
|
5cfb7a9c735b07f4d0f5b7103f3f6a429c2369a5
|
refs/heads/master
| 2020-05-20T14:51:18.116449
| 2019-05-08T17:04:54
| 2019-05-08T17:04:54
| 185,630,764
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 729
|
py
|
import pandas as pd
import numpy as np
import csv
def desc(path="data/Arvind 2000.csv",cname = "speed (m/s)",export=False):
data = pd.read_csv(path)[cname]
d = dict()
data = data.fillna(0)
d['Mean'] = data.mean()
dq = np.percentile(data,(0,25,50,75,100))
d['Min'] = dq[0]
d['1Q'] = dq[1]
d['Median'] = dq[2]
d['3Q'] = dq[3]
d['Max'] = dq[4]
d['Std dev'] = data.var() ** 0.5
d['Count'] = len(data)
if export == True:
t= path.split('.')[0]
t= t.split('/')
target = t[0]+'/details/'+t[1]+' Details.csv'
with open(target, 'w') as f:
for key in d.keys():
f.write("%s,%s\n"%(key,d[key]))
return d
|
[
"noreply@github.com"
] |
noreply@github.com
|
5424dc7cc6bf622625e5e0f9736e273cc18d0a4a
|
323c59f60860a2dddbecf4c45974bcc80210ae78
|
/blog/migrations/0002_post_published_date.py
|
300367b7885f65d66c47ebdbc57833f143e1e821
|
[] |
no_license
|
rymlassoued/my-first-blog
|
5f6157fd422fb09339fe4cb7ac0f23d37d711118
|
1f6022df5c7961b88f8640d2b6359510cb77a2d8
|
refs/heads/master
| 2021-01-19T00:30:02.990413
| 2017-04-04T17:32:46
| 2017-04-04T17:32:46
| 87,175,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-04 10:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='published_date',
field=models.DateTimeField(blank=True, null=True),
),
]
|
[
"rlaswed@softcatalyst.com"
] |
rlaswed@softcatalyst.com
|
cdb47a6903ec47437a630d38f1c3ff0a42120182
|
29061a9fe7701581facbd7eb5066a05c6e3e8878
|
/fptl_types/__init__.py
|
db20bc1028a22b8848897fb7bc16f46e01733ea9
|
[] |
no_license
|
ivan-bocharov/fptl-inferer
|
4f629da3b4276c2cc20beb4b656a575fcd8887bc
|
3de88dc2945db6cb3e49eb08bf1c57f1a9a838c4
|
refs/heads/master
| 2021-05-26T21:14:40.926757
| 2013-08-13T20:01:29
| 2013-08-13T20:01:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29
|
py
|
__author__ = 'ivan-bocharov'
|
[
"bocharovia@gmail.com"
] |
bocharovia@gmail.com
|
9b9a14f2985d9dd1d7bc6ef666b5d40a2a9a5256
|
a7e0784b697b6c57920e16e2f54ea0ed2225c0e0
|
/data/clingen_raw_to_training.py
|
47d0357cb8921e5915cdc80d02e9879fcf3e88c3
|
[] |
no_license
|
rumeysa77/ClinGenML
|
17e1a3786b8711387a61707252307aab13e682c5
|
c3bf6fbf7d0fe6c1311ce0fcfb4e26d8331bbc7d
|
refs/heads/master
| 2023-03-22T04:41:40.669592
| 2021-02-24T09:04:29
| 2021-02-24T09:04:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,815
|
py
|
"""
This file processes the raw excel sheet and extract data
"""
import time
import csv
from collections import defaultdict
from Bio import Entrez
from pathlib import Path
import unicodedata
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
# clean text does not tokenize anything!
def clean_text(text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
def reduce_whitespace(text):
return ' '.join(text.split())
major_5_panels = {'experimental-studies', 'allele-data', 'segregation-data', 'specificity-of-phenotype', 'case-control'}
label_vocab = ['experimental-studies', 'allele-data', 'segregation-data', 'specificity-of-phenotype', 'case-control']
class DatasetExtractor(object):
def __init__(self, path=None):
self.major_5_pmid_to_panel = defaultdict(set)
header = None
if path is not None:
with open(path, encoding='utf-8', errors='ignore') as f:
reader = csv.reader(f)
for i, line in enumerate(reader):
if i == 0:
header = line[:-2]
elif line[4] != '': # ClinVar ID cannot be null
if line[1] in major_5_panels:
self.major_5_pmid_to_panel[line[2]].add(line[1])
def fetch_title_abstract_keywords(self, one_id):
ids = one_id
Entrez.email = 'leo.niecn@gmail.com'
handle = Entrez.efetch(db='pubmed',
retmode='xml',
id=ids)
results = Entrez.read(handle)
# retrieving for only 1 result
for i, paper in enumerate(results['PubmedArticle']):
abstract = []
if 'Abstract' in paper['MedlineCitation']['Article']:
for section in paper['MedlineCitation']['Article']['Abstract']['AbstractText']:
abstract.append(section)
else:
continue
abstract = " ".join(abstract)
title = paper['MedlineCitation']['Article']['ArticleTitle']
keywords = []
for elem in paper['MedlineCitation']['KeywordList']:
for e in elem:
keywords.append(e)
keywords = ' '.join(keywords)
return title, abstract, keywords
return None
def merge_text(self, title, abstract, keywords, entrez=False):
# a standard function to map
text = ''
if not entrez:
text = title + " || " + " ".join(keywords.split('/')) + " || " + reduce_whitespace(clean_text(abstract))
else:
text = title + " || " + keywords + " || " + reduce_whitespace(clean_text(abstract))
return text
def generate_pmid_panel_set(self, log=False, tqdm=False, notebook=False):
# will call Entrez BioPython to grab abstracts
data = []
pmid_to_data = {}
start = time.time()
cnt = 0
for k, v in self.major_5_pmid_to_panel.items():
cnt += 1
res = self.fetch_title_abstract_keywords(k)
if res is None:
continue # 24940364 is not found...
text = self.merge_text(*res)
# label = ['0'] * len(label_vocab)
label = []
for v_i in v:
label.append(str(label_vocab.index(v_i)))
data.append('\t'.join([text, ' '.join(label)]))
pmid_to_data[k] = '\t'.join([text, ' '.join(label)])
if log:
if cnt % 100 == 0:
print(cnt, time.time() - start, 'secs')
return data, pmid_to_data
def write_data_to_csv(self, data, csv_file_path):
# expect `data` directly from `generate_pmid_panel_set`
with open(csv_file_path, encoding='utf-8', errors='ignore', mode='w') as f:
for line in data:
f.write(line + '\n')
def write_pmid_to_list(self, path):
# it will directly save as "pmids.txt", which is what PubMunch expects
# call this function to generate a list of pmid
# so you can use PubMunch to download
p = Path(path)
p.mkdir(exist_ok=True)
with open('{}/pmids.txt'.format(path), 'w') as f:
for pmid in self.major_5_pmid_to_panel.keys():
f.write(pmid + '\n')
def __sub__(self, other):
assert type(other) == type(self)
new_pmids = set(list(self.major_5_pmid_to_panel.keys())) - set(list(other.major_5_pmid_to_panel))
de = DatasetExtractor()
for pmid in new_pmids:
panel = self.major_5_pmid_to_panel[pmid]
de.major_5_pmid_to_panel[pmid] = panel
return de
if __name__ == '__main__':
# testing
de = DatasetExtractor("../corpus/ML Data (as of 3_17_19).csv")
print(de.merge_text(*de.fetch_title_abstract_keywords("10206684")))
|
[
"leo.niecn@gmail.com"
] |
leo.niecn@gmail.com
|
d74da5f980c51f8a87e1f3491b38cb906651ba91
|
995c52ad5a0a3039ad37a4d2f07b06dcbbcf3961
|
/tantalus/migrations/0059_auto_20180810_1837.py
|
f4ba3f19bfd13e80fa47e558107374b522b8b533
|
[] |
no_license
|
nafabrar/tantalus
|
d02cce3923205191f00b30e80152a0be7c091d6a
|
d8552d40472c29bc617b45a1edaf87c6624b824d
|
refs/heads/master
| 2022-12-24T15:53:52.034999
| 2020-10-07T22:26:35
| 2020-10-07T22:26:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 945
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-08-10 18:37
from __future__ import unicode_literals
from django.db import migrations
def populate_sequence_file_info(apps, schema_editor):
FileResource = apps.get_model('tantalus', 'FileResource')
SequenceFileInfo = apps.get_model('tantalus', 'SequenceFileInfo')
for file_resource in FileResource.objects.all():
sequence_file_info = SequenceFileInfo(
file_resource=file_resource,
owner=file_resource.owner,
read_end=file_resource.read_end,
genome_region=file_resource.genome_region,
index_sequence=file_resource.index_sequence,
)
sequence_file_info.save()
class Migration(migrations.Migration):
dependencies = [
('tantalus', '0058_historicalsequencefileinfo_sequencefileinfo'),
]
operations = [
migrations.RunPython(populate_sequence_file_info)
]
|
[
"andrew.mcpherson@gmail.com"
] |
andrew.mcpherson@gmail.com
|
740f614b0665f2538f1e0cc1c16f2877b2961e47
|
e3393d4d4bdf6684bfba47817e60f96c69ec9475
|
/utils.py
|
e5e14bd563560eb049fb0d71e1e6075e2ae90dba
|
[] |
no_license
|
INFO3401/problem-set-three-brru7260
|
402f9f38e15c8e0ca115c9be338ee48597ae89d8
|
e80c26e2ee33de14439c664717674a91c686e40a
|
refs/heads/master
| 2021-01-09T04:01:48.068480
| 2020-02-24T04:09:39
| 2020-02-24T04:09:39
| 242,239,274
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 683
|
py
|
# from utils import *
# import pandas as pd
import pandas as pd
def loadAndCleanData():
item = pd.read_csv("creditData.csv")
data = item.fillna(0)
print(data)
loadAndCleanData()
# def computerProbability(feature,bin,data):
# count = 0.0
# # count the number of datapoints in the bin
# for datapoint in data.iterrows():
# # see if the data is in the right bin
# if datapoint[feature] >= bin[0] and datapoint[feature] < bin[1]:
# count += 1
# return (probability)
# # count the total number of datapoints
# totalData = len(data)
# # divide the number of people in the bin by the total numner of people
# probability = count / totalData
# # return the result
|
[
"noreply@github.com"
] |
noreply@github.com
|
7792395a268d08840768655085318b6c7c6e3cac
|
0a523ea34500d6c4324fc4b8b5fdf602f08a8a01
|
/РК1/social/mysocial/views.py
|
3b8f44af6b4f6cda41e8000be9edd8b68eb2942b
|
[] |
no_license
|
killkamad/ServerSoftDev
|
9b733c9f11c3a0db876e86dc5bb81dfaa476aab8
|
91248da20143c95cf4304d36e06ae9e9bb3bcb76
|
refs/heads/master
| 2020-04-17T16:26:11.477846
| 2019-04-15T20:33:18
| 2019-04-15T20:33:18
| 166,739,947
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,199
|
py
|
from .models import Post
from django.utils import timezone
from django.shortcuts import render, get_object_or_404
from .forms import PostForm, UserRegisterForm
from django.shortcuts import redirect
from django.contrib import messages
# Создание постов, изменение, просмотр
def com_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')
return render(request, 'mysocial/com_list.html', {'posts': posts})
def com_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'mysocial/com_detail.html', {'post': post})
def com_new(request):
if request.method == "POST":
form = PostForm(request.POST, request.FILES or None)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('com_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'mysocial/com_edit.html', {'form': form})
def com_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = PostForm(request.POST or None, request.FILES or None, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('com_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'mysocial/com_edit.html', {'form': form})
#Форма регистрации
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
# messages.success(request, f'Вы зарегестрировались как {username} и можете войти в свой аккаунт!')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'mysocial/register.html', {'form': form})
|
[
"killka_m@mail.ru"
] |
killka_m@mail.ru
|
d834840becfb2b4385634a3d1ae576a2f68a0bac
|
aa57a888c83252f3e57b5d8f7e61f7c1fe807156
|
/lfd_hw7/HW7_testing_q8.py
|
e85c15091ca0dd5d455d06290b3c39eab03cf8f0
|
[
"MIT"
] |
permissive
|
mosmar99/machine-learning-mooc-caltech
|
178a88a237078347eba2c76bbd05c2b7b9b6726c
|
deca978e13f6d6950f06417c4d520e71904962d7
|
refs/heads/main
| 2023-07-13T20:36:43.217778
| 2021-09-04T08:28:07
| 2021-09-04T08:28:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 299
|
py
|
# -*- coding: utf-8 -*-
"""
Created on: Wed Jul 7 15:30:17 2021
@author: Mahmut Osmanovic
"""
import matplotlib.pyplot as plt
S = 1
P = 6
S_B_P = 0
its = 5
while its:
print(S, P)
S_B_P += (S > P)
S += 1
P -= 1
its -= 1
print("ANS =", S_B_P)
plt.plot([-1, 1], [-1,1])
|
[
"47375043+MahmutOsmanovic@users.noreply.github.com"
] |
47375043+MahmutOsmanovic@users.noreply.github.com
|
1b5cd48ff39ee1da8dbaf2f526d75d0746e5c1e6
|
f1d9df04036fc43c9e5cc7998b83261f4daa94b8
|
/management_commands/insert_base_data.py
|
cf87a7c11fd7db6f4e396e72c0e9d41bce402ce1
|
[] |
no_license
|
Eaterator/web
|
019eb6547995be30b3468e5c44ecc52f05858fb4
|
9c598607f76ad770c66d85c47ffcec05f92f4d66
|
refs/heads/master
| 2021-01-09T20:30:13.417308
| 2017-04-25T02:44:35
| 2017-04-25T02:44:35
| 81,286,177
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,324
|
py
|
from application.auth.models import Role
from application.recipe.models import Source
from application.base_models import db
def insert_role_data():
roles = [
{
'name': 'regular',
'type_': 'consumer',
'is_admin': False
},
{
'name': 'corporate',
'type_': 'business',
'is_admin': False
},
{
'name': 'admin',
'type_': 'admin',
'is_admin': True
}
]
if len(Role.query.all()) > 0:
return
for role in roles:
new_role = Role(**role)
db.session.add(new_role)
db.session.commit()
def insert_source_data():
sources = [
{
'base_url': 'foodnetwork.com',
'name': 'Food Network'
},
{
'base_url': 'epicurious.com',
'name': 'Epicurious'
},
{
'base_url': 'therecipedepository.com',
'name': 'The Recipe Depository',
},
{
'base_url': 'allrecipes.com',
'name': 'All Recipes',
},
{
'base_url': 'bonappetit.com',
'name': 'Bon Appetit'
},
{
'base_url': 'food.com',
'name': 'Food'
},
{
'base_url': 'simplyrecipes.com',
'name': 'Simply Recipes'
},
{
'base_url': 'bbcgoodfood.com',
'name': 'BBC Good Food'
},
{
'base_url': 'williams-sonoma.com',
'name': 'Williams Sonoma'
},
{
'base_url': 'finedininglovers.com',
'name': 'Fine Dining Lovers'
},
{
'base_url': 'thekitchn.com',
'name': 'The Kitchn'
},
{
'base_url': 'chowhound.com',
'name': 'Chow'
},
{
'base_url': 'myrecipes.com',
'name': 'My Recipes'
},
{
'base_url': '',
'name': 'Other'
}
]
for source in sources:
exists = Source.query.filter(Source.name == source['name']).all()
if len(exists) <= 0:
new_source = Source(**source)
db.session.add(new_source)
db.session.commit()
|
[
"currahl@yahoo.ca"
] |
currahl@yahoo.ca
|
acf4d5704a2b5145e70f57bf4bd46fcc5c62fa9d
|
a40ea9fa24e25e7dd047c20593762ec39749207a
|
/etc/week3pytyon.py
|
43e3553d0c6bd756ea586fceb6793d18147089bd
|
[] |
no_license
|
mjstella/Sparta_MJ
|
16fbacf7ae08405dad68df3d1fc1d41d2c9d5d5e
|
0f90c058c601cd35572e48d3c593594356526655
|
refs/heads/master
| 2022-10-12T22:33:18.962042
| 2020-06-08T09:12:39
| 2020-06-08T09:12:39
| 265,854,949
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 817
|
py
|
from pymongo import MongoClient # pymongo를 임포트 하기(패키지 인스톨 먼저 해야겠죠?)
client = MongoClient('localhost', 27017) # mongoDB는 27017 포트로 돌아갑니다.
db = client.dbsparta # 'dbsparta'라는 이름의 db를 만듭니다.
# db.users.insert_one({'name':'bobby','age':21})
# db.users.insert_one({'name':'kay','age':27})
# db.users.insert_one({'name':'john','age':30})
all_users = list(db.users.find())
# print(all_users)
bobbys = list(db.users.find({'name':'bobby'}))
print(bobbys)
# 그 중 특정 키 값을 빼고 보기
kay = db.users.find_one({'name':'kay'}, {'_id': False})
print(kay)
# db.users.update_one({'name':'john'}, {'$set':{'name':'james', 'age':'20'}})
db.users.update_many({'name':'bobby'}, {'$set':{'occupation':'student'}})
|
[
"mjstella918@gmail.com"
] |
mjstella918@gmail.com
|
f34593fe377603414090a24989aab7c88d64fa51
|
f9a579efe76ac5436e767489be6d8143da3c3404
|
/src/apps/helper/middleware.py
|
5ce48db802e3d4bacbf60bbfc817bebe7a76d41f
|
[] |
no_license
|
15879246396/youpai-service
|
2c4db15f8a50de4b6b2ff4204898c3e5217c0258
|
fd91b68f35664e7e853c1fd2cd55919a97a87fa2
|
refs/heads/master
| 2023-01-10T11:30:50.918568
| 2019-10-25T07:52:50
| 2019-10-25T07:52:50
| 205,700,793
| 0
| 0
| null | 2022-12-27T15:35:27
| 2019-09-01T16:08:44
|
Python
|
UTF-8
|
Python
| false
| false
| 7,401
|
py
|
import uuid
import sys
import datetime
import json
from django.core.cache import cache
from django.utils import timezone
from django.conf import settings
import requests
from helper.conf import helper_settings
from helper.log import get_logger
from common.mixin import MiddlewareMixin
logger = get_logger(__name__)
class LoggingMiddleware(MiddlewareMixin):
LOGGER = get_logger('actions')
RESPONSE_LOG = '%s%s%s' % (
'{user_id}|{ip}|{bid}|{sid}|{kjzd_user_id}',
'"{request_method} {request_url}{query_string} {protocol} {status_code} {content_type} {referrer}"',
'|{ua}'
)
def __init__(self, get_response=None):
super(LoggingMiddleware, self).__init__(get_response)
self.SGUID_EXPIRIES = 365 * 1
self.RESPONSE_LOG_FORMAT = self.RESPONSE_LOG.format
self.SGBID_EXPIRIES = 365 * 1
self.SGSID_EXPIRIES = None
self.SGUUID_EXPIRIES = None
@staticmethod
def save_session(request):
if request.user.is_anonymous and request.session.session_key is None:
request.session.save()
# to identify a session
@staticmethod
def set_sid(request, response):
if request.session.get(helper_settings.logging_session_sid_name, None) is None:
request.session[helper_settings.logging_session_sid_name] = uuid.uuid4().hex
return request.session[helper_settings.logging_session_sid_name]
# to identify a browser
@staticmethod
def set_bid(request, response):
bid = request.COOKIES.get(helper_settings.logging_cookie_bid_name, None)
response.set_cookie(
helper_settings.logging_cookie_bid_name,
domain=helper_settings.logging_cookie_domain,
value=bid if bid is not None else uuid.uuid4().hex,
expires=timezone.datetime.now() + timezone.timedelta(days=365)
)
return bid
@staticmethod
def set_kjzd_user_id(request, response):
if hasattr(request, 'user'):
if not request.user.is_anonymous:
kjzd_user_id = request.user.kjzd_user_id
if kjzd_user_id:
response.set_cookie(
helper_settings.logging_cookie_kjzd_user_id_name,
domain=helper_settings.logging_cookie_domain,
value=kjzd_user_id,
expires=None)
return kjzd_user_id
else:
response.delete_cookie(
helper_settings.logging_cookie_kjzd_user_id_name, domain=helper_settings.logging_cookie_domain)
return ''
@staticmethod
def _get_traceback(exc_info=None):
"""Helper function to return the traceback as a string"""
import traceback
return '\n'.join(traceback.format_exception(*(exc_info or sys.exc_info())))
def process_response(self, request, response):
if request.session.get('SGUID', None) is None:
request.session['SGUID'] = str(uuid.uuid1())
SGUID = request.session['SGUID']
response.set_cookie(
'SGUID',
value=SGUID,
expires=self.SGUID_EXPIRIES if not self.SGUID_EXPIRIES else datetime.datetime.now() + datetime.timedelta(
days=self.SGUID_EXPIRIES)
)
SGBID = request.COOKIES.get('SGBID', None)
SGBID = SGBID if SGBID and len(SGBID) == 32 else uuid.uuid1().hex
response.set_cookie(
'SGBID',
value=SGBID,
expires=self.SGBID_EXPIRIES if not self.SGBID_EXPIRIES else datetime.datetime.now() + datetime.timedelta(
days=self.SGBID_EXPIRIES)
)
if hasattr(request, 'user'):
if not request.user.is_anonymous:
SGUUID = request.user.kjzd_user_id
if SGUUID:
response.set_cookie(
'SGUUID',
value=SGUUID,
expires=self.SGUUID_EXPIRIES if not self.SGUUID_EXPIRIES else datetime.datetime.now() + datetime.timedelta(
days=self.SGUUID_EXPIRIES)
)
else:
response.delete_cookie('SGUUID')
SGSID = request.COOKIES.get('SGSID', None)
if not SGSID:
SGSID = uuid.uuid4().hex
response.set_cookie(
'SGSID',
value=SGSID,
expires=self.SGSID_EXPIRIES if not self.SGSID_EXPIRIES else datetime.datetime.now() + datetime.timedelta(
days=self.SGSID_EXPIRIES
))
user_id = request.session.get('_auth_user_id', '')
response.set_cookie(
'user_id',
value=user_id,
expires=self.SGUUID_EXPIRIES if not self.SGUUID_EXPIRIES else datetime.datetime.now() + datetime.timedelta(
days=self.SGUUID_EXPIRIES)
)
sid = self.set_sid(request, response)
bid = self.set_bid(request, response)
kjzd_user_id = self.set_kjzd_user_id(request, response)
query_string = request.META.get('QUERY_STRING', None)
log_text = self.RESPONSE_LOG_FORMAT(
user_id=request.session.get('_auth_user_id', ''),
ip=request.META.get('REMOTE_ADDR', ''),
request_method=request.method,
request_url=request.path,
protocol=request.META.get('SERVER_PROTOCOL', ''),
status_code=response.status_code,
referrer=request.META.get('HTTP_REFERER', ''),
ua=request.META.get('HTTP_USER_AGENT', ''),
query_string='' if not query_string else ''.join(('?', query_string)),
content_type=response.get('content-type', ''),
sid=sid,
bid=bid,
kjzd_user_id=kjzd_user_id,
)
self.LOGGER.info(log_text)
return response
def process_exception(self, request, exception):
if isinstance(exception, helper_settings.logging_except_exceptions):
return
try:
request_repr = repr(request)
except Exception as e:
logger.warning(e)
request_repr = "Request repr() unavailable"
message = "{{{\n%s\n}}}\n\n{{{\n%s\n}}}" % (self._get_traceback(sys.exc_info()), request_repr)
logger.exception(message)
class ClientAuthenticationMiddleware(object):
def process_request(self, request):
access_token = cache.get("CLIENT_SELLERWANT_ACCESS_TOKEN")
if not access_token:
try:
data = {
"grant_type": "client_credentials"
}
reps = requests.post(settings.APP_MANAGE_URL,
auth=(settings.CLIENT_ID, settings.CLIENT_SECERT),
data=data,
headers={"content-type": "application/x-www-form-urlencoded"})
result = json.loads(reps.content)
access_token = result.get("access_token")
cache.set("CLIENT_SELLERWANT_ACCESS_TOKEN", access_token, result.get("expires_in") - 10 * 60)
except Exception:
pass
def process_response(self, request, response):
response.set_cookie('access_token', cache.get("CLIENT_SELLERWANT_ACCESS_TOKEN"))
return response
|
[
"15797731292@163.com"
] |
15797731292@163.com
|
98c818859f8e9449f37db57cf03d9b8bf6bcab06
|
c0a090c54ee58f26f9f3973a50ffc9423a620fa4
|
/bloomberg/sound.py
|
d85772502ebfd0f3a8f17e0b2ae0665c819bea86
|
[] |
no_license
|
guts2014/going-65-in-a-60-zone
|
1b4e41ac9d190e706d267466f892b053e6c42576
|
9958b754ca796c94a6b2dbe09f03549532e93240
|
refs/heads/master
| 2020-04-13T01:05:17.389084
| 2014-10-12T12:12:06
| 2014-10-12T12:12:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 854
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from django.template import RequestContext
from sound_mixer import SoundMixer
from API import *
def sounds_page(request):
session = get_new_session()
# BP
historical_data = getCompaniesHistory(session, ['BP/ LN'], 20120101, 20140101, 'MONTHLY')
historical_prices = []
for dict in historical_data:
historical_prices.append(dict['price'])
mixer = SoundMixer()
mixer.add_dataset(historical_prices)
mixer.generate_file('stock1')
# RDSB
historical_data = getCompaniesHistory(session, ['RDSB LN'], 20120101, 20140101, 'MONTHLY')
historical_prices = []
for dict in historical_data:
historical_prices.append(dict['price'])
mixer = SoundMixer()
mixer.add_dataset(historical_prices, 400)
mixer.generate_file('stock2')
return render(request, "sounds.html")
|
[
"velizar.shulev@gmail.com"
] |
velizar.shulev@gmail.com
|
f62e8e139b6ce07d635393ef4c7fe94bede87e36
|
7f747228fd52c835bbbad1d51674f2991b4e0ccb
|
/Veriflow_Automation/Veriflow_BDD_Framework/features/steps/login.py
|
81014629a6cf06c7ce848bf1de42651e05d64f42
|
[] |
no_license
|
adachenski/Python-Automation
|
fec234f9983cee4dcbd2e8d158a944e555167f22
|
2a4bb4c2b33b4ce8a9c66b06f251f6c834973c50
|
refs/heads/master
| 2020-03-19T07:39:03.879326
| 2018-06-16T22:04:02
| 2018-06-16T22:04:02
| 136,135,805
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 691
|
py
|
from selenium_logic import login_logic
from behave import step, given, when, then
loginPage = login_logic.Login()
@given(u'Open Chrome Browser')
def open_browser(context):
loginPage.open_chrome()
@when(u'Navigate to Veriflow Login page at "{url}"')
def navigate_to_url(context, url):
loginPage.go_to_login(url)
@when(u'Enter username "{nas}"')
def username(context, nas):
loginPage.enter_username(nas)
@when(u'Enter password "{password}"')
def password(context, password):
loginPage.enter_password(password)
@then(u'Click on Login Tab')
def login(context):
loginPage.login()
@then(u'I close the browser')
def step_impl(contex):
loginPage.close_browser()
|
[
"adachenski@aol.com"
] |
adachenski@aol.com
|
a5a17178600de20cbfc8a242569037482fae9caf
|
fccb5a43179906ddc3dd37849ac2a89cacf44981
|
/sphinx/source/exercises/solution/03_os_sub_req/ex5.py
|
653a604a993839e3b042cfc9ccaf6cd8eba8ff1f
|
[] |
no_license
|
YasmineOweda/spring2021
|
a48c1c4eaa525053a0e2188cf088124b004a35d8
|
072aadba20bfbc659427265fa228518fe4b09ff3
|
refs/heads/master
| 2023-04-29T10:20:14.132211
| 2021-05-11T09:07:40
| 2021-05-11T09:07:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
import os
#1
os.mkdir('os_exercises.')
#2
os.chdir('os_exercises')
open('exercise.py', 'w')
#3
x = input('Please write something to the file: ')
with open('exercise.py', 'w') as f:
f.write(x)
#4
x = input('Please write something More to anoter file: ')
with open('exercise2.py', 'w') as f:
f.write(x)
#5
with open('exercise.py', 'r') as f1:
with open('exercise2.py', 'r' ) as f2:
print(f1.read() + f2.read())
|
[
"clbo@kea.dk"
] |
clbo@kea.dk
|
4148112130c6689e5dadc5cb5afc4ff302c3485c
|
e4d9141385ace7f178752469aa3f299cc3ffc6a6
|
/docs/source/conf.py
|
52d04fd1d5650935005a8bf7ac4e548a117dc1ed
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
AJAlabs/kb
|
44924806fd2d7059fa89377cd650859fd934166d
|
9d4f13a53ae08616dae6d5560113b0a27881387b
|
refs/heads/master
| 2020-04-18T18:51:24.751102
| 2019-01-26T22:14:56
| 2019-01-26T22:14:56
| 167,696,685
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,440
|
py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'AJAlabs Knowledge Base'
copyright = '2019, AJ Acevedo'
author = 'AJ Acevedo'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.0.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.md'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'AJAlabsKnowledgeBasedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AJAlabsKnowledgeBase.tex', 'AJAlabs Knowledge Base Documentation',
'AJ Acevedo', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ajalabsknowledgebase', 'AJAlabs Knowledge Base Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AJAlabsKnowledgeBase', 'AJAlabs Knowledge Base Documentation',
author, 'AJAlabsKnowledgeBase', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Options for both Markdown and reStructuredText support ------------------
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md']
|
[
"aj@ajalabs.com"
] |
aj@ajalabs.com
|
db3b4d13adbd04eba6106f6e0d8559771deadcd5
|
61699048dc567cd3a814e5b987599dae175bed19
|
/Python/month01/day15/exercise02.py
|
ba4af22e18080c30f44bdc184166efdfe0b8e96a
|
[] |
no_license
|
Courage-GL/FileCode
|
1d4769556a0fe0b9ed0bd02485bb4b5a89c9830b
|
2d0caf3a422472604f073325c5c716ddd5945845
|
refs/heads/main
| 2022-12-31T17:20:59.245753
| 2020-10-27T01:42:50
| 2020-10-27T01:42:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 611
|
py
|
"""
练习2:定义函数,根据生日(年月日),计算活了多天.
输入:2010 1 1
输出:从2010年1月1日到现在总共活了3910天
"""
import time
def life_days(year, month, day):
# 当前 - 出生时间
# time_tuple = time.strptime("%d-%d-%d" % (year, month, day), "%Y-%m-%d")
time_tuple = (year, month, day, 0, 0, 0, 0, 0, 0)
life_second = time.time() - \
time.mktime(time_tuple)
return life_second / 60 / 60 / 24
y = 1990
m = 9
d = 18
result = life_days(y, m, d)
print(f"从{y}年{m}月{d}日到现在总共活了{result:.0f}天")
|
[
"1450030827@qq.com"
] |
1450030827@qq.com
|
fe469f3699ada32088a48d4a15051399450c03c8
|
e211657b291dbcb21ed6c587275ff7168e8413f3
|
/models/sys_trans_sp_multi.py
|
7031132db7a25e66d621263bc6d8f743484a1c9a
|
[] |
no_license
|
jehovahxu/chan
|
b64b84a96983dc956e746217fb51451d55ea6d1d
|
572060b45f4a79e09e796ba671851de693817fc2
|
refs/heads/master
| 2020-12-02T05:31:20.537309
| 2020-03-14T15:34:57
| 2020-03-14T15:34:57
| 230,905,070
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,192
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 18-12-26 下午3:13
# @Author : Jehovah
# @File : systhesis.py
# @Software: PyCharm
import torch
import torch.nn as nn
class Sys_Generator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64):
super(Sys_Generator, self).__init__()
self.en_1 = nn.Sequential(
nn.Conv2d(input_nc, ngf, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(0.2, True)
)
self.en_2 = nn.Sequential(
nn.Conv2d(ngf, ngf*2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(ngf * 2),
nn.LeakyReLU(0.2, True)
)
self.resblock = nn.Sequential(
ResidualBlock(ngf * 2, ngf * 2),
ResidualBlock(ngf * 2, ngf * 2),
ResidualBlock(ngf * 2, ngf * 2)
)
self.resblock_2 = nn.Sequential(
ResidualBlock(ngf * 2, ngf * 2),
ResidualBlock(ngf * 2, ngf * 2),
ResidualBlock(ngf * 2, ngf * 2)
)
self.resblock_1 = nn.Sequential(
ResidualBlock(ngf * 2, ngf * 2),
ResidualBlock(ngf * 2, ngf * 2),
ResidualBlock(ngf * 2, ngf * 2),
ResidualBlock(ngf * 2, ngf * 2),
ResidualBlock(ngf * 2, ngf * 2),
ResidualBlock(ngf * 2, ngf * 2)
)
self.resblock1 = ResidualBlock(in_channels=512, out_channels=512)
self.resblock2 = ResidualBlock(in_channels=512, out_channels=512)
self.resblock3 = ResidualBlock(in_channels=512, out_channels=512)
self.en1 = nn.Sequential(
nn.Conv2d(input_nc, ngf, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(0.2, True)
)
self.en2 = nn.Sequential(
nn.Conv2d(ngf, ngf*2, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf*2),
nn.LeakyReLU(0.2, True)
)
self.en3 = nn.Sequential(
nn.Conv2d(ngf*2, ngf * 4, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf * 4),
nn.LeakyReLU(0.2, True)
)
self.en4 = nn.Sequential(
nn.Conv2d(ngf * 4, ngf * 8, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf * 8),
nn.LeakyReLU(0.2, True)
)
self.en5 = nn.Sequential(
nn.Conv2d(ngf * 8, ngf * 8, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf * 8),
nn.LeakyReLU(0.2, True)
)
self.en6 = nn.Sequential(
nn.Conv2d(ngf * 8, ngf * 8, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf * 8),
nn.LeakyReLU(0.2, True)
)
self.en7 = nn.Sequential(
nn.Conv2d(ngf * 8, ngf * 8, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf * 8),
nn.LeakyReLU(0.2, True)
)
self.en8 = nn.Sequential(
nn.Conv2d(ngf * 8, ngf * 8, kernel_size=4, stride=2, padding=1),
nn.ReLU(True)
)
self.de1 = nn.Sequential(
nn.ConvTranspose2d(ngf * 8,ngf * 8, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True)
)
self.de2 = nn.Sequential(
nn.ConvTranspose2d(ngf * 8 * 2, ngf * 8, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf * 8),
nn.Dropout(0.5),
nn.ReLU(True)
)
self.de3 = nn.Sequential(
nn.ConvTranspose2d(ngf * 8 * 2, ngf * 8, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf * 8),
nn.Dropout(0.5),
nn.ReLU(True)
)
self.de4 = nn.Sequential(
nn.ConvTranspose2d(ngf * 8 * 2, ngf * 8, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf * 8),
nn.Dropout(0.5),
nn.ReLU(True)
)
self.de5 = nn.Sequential(
nn.ConvTranspose2d(ngf * 8 * 2, ngf * 4, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True)
)
self.de6 = nn.Sequential(
nn.ConvTranspose2d(ngf * 8, ngf * 2, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True)
)
self.de7 = nn.Sequential(
nn.ConvTranspose2d(ngf * 4, ngf, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(ngf),
nn.ReLU(True)
)
self.de8 = nn.Sequential(
nn.ConvTranspose2d(ngf * 2, output_nc,
kernel_size=4, stride=2,
padding=1),
nn.Tanh()
)
self.de8_1 = nn.Sequential(
nn.ConvTranspose2d(ngf * 2, output_nc,
kernel_size=4, stride=2,
padding=1),
nn.Tanh()
)
# self.ta2 = Trans_Attn(ngf * 8)
# self.ta3 = Trans_Attn(ngf * 8)
# self.ta4 = Trans_Attn(ngf * 8)
# self.ta5 = Trans_Attn(ngf * 4)
self.ta6 = Trans_Attn(ngf * 2)
self.sp = Spacial_Attn(ngf * 2)
def forward(self, x):
out_en1 = self.en1(x)
out_en2 = self.en2(out_en1)
out_en3 = self.en3(out_en2)
out_en4 = self.en4(out_en3)
out_en5 = self.en5(out_en4)
out_en6 = self.en6(out_en5)
out_en7 = self.en7(out_en6)
out_en8 = self.en8(out_en7)
out_en8 = self.resblock1(out_en8,is_bn=False)
out_en8 = self.resblock2(out_en8,is_bn=False)
out_en8 = self.resblock3(out_en8,is_bn=False)
#decoder
out_de1 = self.de1(out_en8)
out_de1 = torch.cat((out_de1, out_en7), 1)
out_de2 = self.de2(out_de1)
# out_de2 = self.ta2(out_en6, out_de2)
out_de2 = torch.cat((out_de2, out_en6), 1)
out_de3 = self.de3(out_de2)
# out_de3 = self.ta3(out_en5, out_de3)
out_de3 = torch.cat((out_de3, out_en5), 1)
out_de4 = self.de4(out_de3)
# out_de4 = self.ta4(out_en4, out_de4)
out_de4 = torch.cat((out_de4, out_en4), 1)
out_de5 = self.de5(out_de4)
# out_de5 = self.ta5(out_en3, out_de5)
out_de5 = torch.cat((out_de5, out_en3), 1)
out_de6 = self.de6(out_de5)
out_de6 = self.ta6(out_en2, out_de6)
out_de6 = torch.cat((out_de6, out_en2), 1)
out_de7 = self.de7(out_de6)
out_de7 = torch.cat((out_de7, out_en1), 1)
# out_de8 = self.de8(out_de7)
# out_2 = self.de8_1(out_de7)
out_1 = self.en_1(x)
out_1 = self.en_2(out_1)
out_1 = self.resblock_2(out_1)
out_1, out_de7 = self.sp(out_1, out_de7)
out_1 = out_1+out_de7
out_1 = self.resblock(out_1)
out_1 = self.resblock_1(out_1)
out_de8 = self.de8(out_1)
return out_de8
def conv3x3(in_channels, out_channels, stride=1):
return nn.Conv2d(in_channels, out_channels, kernel_size=3,
stride=stride, padding=1, bias=False)
class ResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(ResidualBlock, self).__init__()
self.conv1 = conv3x3(in_channels, out_channels, stride)
self.bn1 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(out_channels, out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
def forward(self, x, is_bn=True):
residual = x
out = self.conv1(x)
if is_bn:
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
if is_bn:
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class Discriminator(nn.Module):
def __init__(self, input_nc, output_nc, ndf=64):
super(Discriminator, self).__init__()
self.cov1 = nn.Sequential(
nn.Conv2d(input_nc + output_nc, ndf, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(0.2, True)
)
self.cov2 = nn.Sequential(
nn.Conv2d(ndf, ndf*2, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(ndf * 2),
nn.LeakyReLU(0.2, True)
)
self.cov3 = nn.Sequential(
nn.Conv2d(ndf*2, ndf * 4, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(ndf * 4),
nn.LeakyReLU(0.2, True)
)
self.cov4 = nn.Sequential(
nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=2, padding=1),
nn.InstanceNorm2d(ndf * 8),
nn.LeakyReLU(0.2, True)
)
self.cov5 = nn.Sequential(
nn.Conv2d(ndf*8, ndf * 8, kernel_size=3, stride=2, padding=1),
nn.InstanceNorm2d(ndf * 8),
nn.LeakyReLU(0.2, True)
)
self.cov5_1 = nn.Sequential(
nn.AvgPool2d(kernel_size=2, stride=2),
nn.InstanceNorm2d(ndf * 8),
)
self.cov5_2 = nn.Sequential(
nn.AvgPool2d(kernel_size=4, stride=4),
nn.InstanceNorm2d(ndf * 4),
)
self.cov5_3 = nn.Sequential(
nn.AvgPool2d(kernel_size=8, stride=8),
nn.InstanceNorm2d(ndf * 2),
)
self.cls = nn.Sequential(
nn.Conv2d(1408, 1, kernel_size=4, stride=1, padding=1),
nn.Sigmoid()
)
def forward(self, x):
out_cov1 = self.cov1(x)
out_cov2 = self.cov2(out_cov1)
out_cov3 = self.cov3(out_cov2)
out_cov4 = self.cov4(out_cov3)
out_1 = self.cov5(out_cov4)
out_2 = self.cov5_1(out_cov4)
out_3 = self.cov5_2(out_cov3)
out_4 = self.cov5_3(out_cov2)
out = torch.cat((out_1, out_2, out_3, out_4), 1)
out = self.cls(out)
return out
class Spacial_Attn(nn.Module):
def __init__(self, in_dim):
super(Spacial_Attn, self).__init__()
self.chanel_in = in_dim
self.conv = nn.Sequential(
nn.Conv2d(in_dim*2, 1, kernel_size=3, stride=1, padding=1),
nn.Sigmoid()
)
def forward(self, x, y):
xy = torch.cat((x, y), 1)
out = self.conv(xy)
y = y * out
x = x*(1 - out)
return x, y
class Self_Attn(nn.Module):
""" Self attention Layer"""
def __init__(self, in_dim):
super(Self_Attn, self).__init__()
self.chanel_in = in_dim
self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1) #
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X W X H)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
"""
m_batchsize, C, width, height = x.size()
proj_query = self.query_conv(x).view(m_batchsize, -1, width * height).permute(0, 2, 1) # B X CX(N)
proj_key = self.key_conv(x).view(m_batchsize, -1, width * height) # B X C x (*W*H)
energy = torch.bmm(proj_query, proj_key) # transpose check
attention = self.softmax(energy) # BX (N) X (N)
proj_value = self.value_conv(x).view(m_batchsize, -1, width * height) # B X C X N
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, width, height)
out = self.gamma * out + x
return out
class Trans_Attn(nn.Module):
""" Self attention Layer"""
def __init__(self, in_dim):
super(Trans_Attn, self).__init__()
self.sa1 = Self_Attn(in_dim)
self.conv1 = nn.Conv2d(in_dim, in_dim, kernel_size=3, stride=1, padding=1)
self.sa2 = Self_Attn(in_dim)
self.conv2 = nn.Conv2d(in_dim, in_dim, kernel_size=3, stride=1, padding=1)
self.chanel_in = in_dim
self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1) #
self.in1 = nn.InstanceNorm2d(in_dim)
self.in2 = nn.InstanceNorm2d(in_dim)
self.in3 = nn.InstanceNorm2d(in_dim)
self.in4 = nn.InstanceNorm2d(in_dim)
self.in5 = nn.InstanceNorm2d(in_dim)
self.in6 = nn.InstanceNorm2d(in_dim)
def forward(self, x, y):
"""
inputs :
x : input feature maps( B X C X W X H)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
"""
out_1 = self.sa1(x)
out_1 = self.in1(out_1)
out_2 = self.conv1(out_1)
out_2 = self.in2(out_1+out_2)
out_3 = self.sa2(y)
out_3 = self.in3(out_3)
m_batchsize, C, width, height = out_2.size()
proj_query = self.query_conv(out_2).view(m_batchsize, -1, width * height).permute(0, 2, 1) # B X CX(N)
proj_key = self.key_conv(out_2).view(m_batchsize, -1, width * height) # B X C x (*W*H)
energy = torch.bmm(proj_query, proj_key) # transpose check
attention = self.softmax(energy) # BX (N) X (N)
proj_value = self.value_conv(out_3).view(m_batchsize, -1, width * height) # B X C X N
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, width, height)
out = self.gamma * out + y
# 归一化 out
out = self.in4(out)
# out = torch.nn.functional.normalize(out.view(m_batchsize,C, -1), 1).resize(m_batchsize, C, width, height)
out = self.in5(out+out_3)
out_4 = self.conv2(out)
out = self.in6(out+out_4)
return out
|
[
"361857031@qq.com"
] |
361857031@qq.com
|
64d618b9411d6abdd7dd197355558092f58401ff
|
c0b34870921e11882dfacc5cc67e347c3160ed1a
|
/train.py
|
aa9223224e8334d31cedd2412d290c9c3365a3e5
|
[] |
no_license
|
chris-thomas/idn-superresolution-tf2
|
ad2f59c6da43e670a4255335d0edfc86a0dce6b1
|
0b97c859daa80797958f0ea68a6ef385c4fa4335
|
refs/heads/main
| 2023-04-01T15:14:43.809934
| 2021-04-08T14:47:04
| 2021-04-08T14:47:04
| 332,895,607
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,413
|
py
|
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
from numba import cuda
device = cuda.get_current_device()
device.reset()
import time
import tensorflow as tf
from model import evaluate, evaluate_ssim
from tensorflow.keras.applications.vgg19 import preprocess_input
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.losses import MeanAbsoluteError
from tensorflow.keras.losses import MeanSquaredError
from tensorflow.keras.metrics import Mean
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.optimizers.schedules import PiecewiseConstantDecay
physical_devices = tf.config.experimental.list_physical_devices('GPU')
print(physical_devices)
tf.config.experimental.set_memory_growth(physical_devices[0], True)
tf.config.set_soft_device_placement(False)
class Trainer:
def __init__(self,
model,
loss,
learning_rate,
checkpoint_dir='./ckpt/idn'):
self.now = None
self.loss = loss
optimizer = Adam(learning_rate)
self.checkpoint = tf.train.Checkpoint(step=tf.Variable(0),
validation=tf.Variable(-1.0),
optimizer=optimizer,
model=model)
self.checkpoint_manager = tf.train.CheckpointManager(checkpoint=self.checkpoint,
directory=checkpoint_dir,
max_to_keep=3)
self.restore()
@property
def model(self):
return self.checkpoint.model
def train(self, train_dataset, valid_dataset, steps, evaluate_every=1000, save_best_only=False):
loss_mean = Mean()
ckpt_mgr = self.checkpoint_manager
ckpt = self.checkpoint
self.now = time.perf_counter()
for lr, hr in train_dataset.take(steps - ckpt.step.numpy()):
ckpt.step.assign_add(1)
step = ckpt.step.numpy()
loss = self.train_step(lr, hr)
loss_mean(loss)
if step % evaluate_every == 0:
loss_value = loss_mean.result()
loss_mean.reset_states()
# Compute metric on validation dataset
validation_value = self.evaluate_ssim(valid_dataset)
duration = time.perf_counter() - self.now
print(f'{step}/{steps}: loss = {loss_value.numpy():.3f}, SSIM = {validation_value.numpy():3f} ({duration:.2f}s)')
if save_best_only and validation_value <= ckpt.validation:
self.now = time.perf_counter()
# skip saving checkpoint, no validation improvement
continue
ckpt.validation = validation_value
ckpt_mgr.save()
self.now = time.perf_counter()
@tf.function
def train_step(self, lr, hr):
with tf.GradientTape() as tape:
lr = tf.cast(lr, tf.float32)
hr = tf.cast(hr, tf.float32)
sr = self.checkpoint.model(lr, training=True)
loss_value = self.loss(hr, sr)
gradients = tape.gradient(loss_value, self.checkpoint.model.trainable_variables)
self.checkpoint.optimizer.apply_gradients(zip(gradients, self.checkpoint.model.trainable_variables))
return loss_value
def evaluate(self, dataset):
return evaluate(self.checkpoint.model, dataset)
def evaluate_ssim(self, dataset):
return evaluate_ssim(self.checkpoint.model, dataset)
def restore(self):
if self.checkpoint_manager.latest_checkpoint:
self.checkpoint.restore(self.checkpoint_manager.latest_checkpoint)
print(f'Model restored from checkpoint at step {self.checkpoint.step.numpy()}.')
class IdnTrainer(Trainer):
def __init__(self,
model,
checkpoint_dir,
learning_rate=PiecewiseConstantDecay(boundaries=[50000], values=[1e-4, 5e-5])):
super().__init__(model, loss=MeanAbsoluteError(), learning_rate=learning_rate, checkpoint_dir=checkpoint_dir)
def train(self, train_dataset, valid_dataset, steps=100000, evaluate_every=1000, save_best_only=True):
super().train(train_dataset, valid_dataset, steps, evaluate_every, save_best_only)
|
[
"cdt@christhomas.co.uk"
] |
cdt@christhomas.co.uk
|
0695682f12128d61213cc646b2a539b7cd47827c
|
3f65ba0f9c3217acd418648a04f9dffdb4c35d2d
|
/AutoSubmission.py
|
5969b364cce4baf6a4f8b0a87322130e8db505f0
|
[] |
no_license
|
jorqueraian/AutomaticCanvasUpload
|
a348e79e0a0c0d8fb3c7c329eef10745507ab498
|
e3f05a8fdf28eceacd99f4932c5e0eefb506858d
|
refs/heads/master
| 2020-09-27T17:11:05.822355
| 2019-12-09T20:50:03
| 2019-12-09T20:50:03
| 226,566,809
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,117
|
py
|
# Import the Canvas class
from Student import Student
from StringSimilarity import cost_of_alignment
import os
import sys
import re
USER_PATH = 'C:\\Users\\jorqu\\'
def clean_str(input_str):
# Remove .pdf or what ever
# Make lowercase and remove whitespace
# remove _ or -
return str(input_str).split('.')[0].lower().strip().replace('_', '').replace('-', '').replace(',', '')
def clean_course_code(course_code):
# For a case like CSCI 3104-100--100B--200, this wil return CSCI 3104
cleaned_code = re.sub(r'\D\d{3}\D.*$', '', course_code + ' ')
return cleaned_code
def try_verify_path(local_path):
if os.path.exists(local_path):
return True
split_path = local_path.split('\\')
if split_path[0] == '..':
new_path = os.path.abspath(local_path) # For some reason this isn't working, it returns false
elif split_path[0] == 'Documents' or split_path[0] == 'Downloads' or split_path[0] == 'Desktop':
new_path = USER_PATH + local_path
else:
return False
local_path = new_path
if os.path.exists(str(local_path)):
return True
else:
return False
def find_assignment(student, file_path: str):
file_name = file_path.split('\\')[-1]
cleaned_file_name = clean_str(file_name)
assignments = student.get_upcoming_undated_assignments()
best_match = None
for a in assignments:
combine_str = clean_str(clean_course_code(a[2])+a[0])
# We have the weights because we really only want move around parts of the string rather than replace.
cost = cost_of_alignment(combine_str, cleaned_file_name, 9, 1, 9)
cost_per_char = cost / (len(combine_str)+len(cleaned_file_name))
if best_match is None or cost_per_char < best_match[1]:
best_match = (a, cost_per_char)
return best_match
def auto_upload(student, file_path):
assignment = find_assignment(student, file_path)
course_id = assignment[0][3]
assignment_id = assignment[0][1]
print(f'Submitting Assignment: {assignment[0][0]}\n'
f'Course: {assignment[0][2]}\n'
f'File: {file_path}\n'
f'Cost per character: {assignment[1]}')
confirmation = input('Please confirm(Y/n)').lower()
if confirmation == 'y':
print('Submitting assignment....')
student.make_submission(course_id, assignment_id, file_path)
else:
print('No Submission made')
if __name__ == '__main__':
# For reference: Documents\CSCI_3104_Final_Exam.zip
# Initialize student API
me = Student()
# Verify that a path was provided
if len(sys.argv) < 2:
print('No file selected')
input('Press enter key to exit ...')
else:
path = sys.argv[1]
# Verify correctness of path
if try_verify_path(path):
# Upload to canvas
try:
auto_upload(me, path)
except Exception as e:
input(f'Error: {e}\nPress enter key to exit...')
else:
print(f'File not found: {path}')
input('Press enter key to exit ...')
|
[
"jorqueraian@gmail.com"
] |
jorqueraian@gmail.com
|
aedcc1298924c6a2be19fcb6dd5c47bb3680c3c3
|
394c88bbe556c98ace0301eea9e41410040cfb63
|
/double_dqn/model.py
|
143d562371876c6f8fe4ecc4a9a52cc5ddf4d286
|
[
"MIT"
] |
permissive
|
1980744819/playing-mario-with-DQN
|
8b3dda28e107ff27b3c307cf979179729d352f9f
|
f263e3615bf4439ad17d95a9f449c6145792402b
|
refs/heads/master
| 2020-04-29T14:52:44.215593
| 2019-05-21T13:08:00
| 2019-05-21T13:08:00
| 176,210,471
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,252
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : model.py
# @Author: zixiao
# @Date : 2019-04-02
# @Desc :
from torch import nn
import torch.nn.functional as F
import torch
class CNN(nn.Module):
def __init__(self, in_channels, num_action):
super(CNN, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=32, kernel_size=8, stride=4),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.conv3 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=2, stride=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.fc4 = nn.Linear(in_features=2 * 2 * 128, out_features=256)
self.fc5 = nn.Linear(in_features=256, out_features=num_action)
def forward(self, x): # 8 200 200
x = self.conv1(x) # 32 24,24
x = self.conv2(x) # 64 5 5
x = self.conv3(x) # 128 2 2
x = self.fc4(x.view(x.size(0), -1))
x = self.fc5(x)
return x
|
[
"1980744819@qq.com"
] |
1980744819@qq.com
|
f153b13f654cbb1a501907cba37eddc072dc7fb0
|
edb69ef057593343c86bfc08024422cd8292207f
|
/users/views.py
|
cb18f70307655a0b4d01550adccdde99c04a2aec
|
[] |
no_license
|
MVNDAY/LearningLog
|
27005b3a411bdabaa56c23258d893e06f09caa0d
|
1d4436fdea5610b0e63cdb55ebbd2d112115d74a
|
refs/heads/master
| 2023-04-13T10:22:05.160792
| 2021-04-26T17:46:26
| 2021-04-26T17:46:26
| 361,832,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 817
|
py
|
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.forms import UserCreationForm
# Create your views here.
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse('learning_logs:index'))
def register(request):
if request.method != 'POST':
form = UserCreationForm()
else:
form = UserCreationForm(data=request.POST)
if form.is_valid():
new_user = form.save()
authentificated_user = authenticate(username=new_user.username, password=request.POST['password1'])
login(request,authentificated_user)
return HttpResponseRedirect(reverse('learning_logs:index'))
context = {'form' : form}
return render(request, 'register.html', context)
|
[
"83238300+MVNDAY@users.noreply.github.com"
] |
83238300+MVNDAY@users.noreply.github.com
|
10c75430230872f750e9ed2c0a241436c9120a7f
|
b509ef07d752e987f4cb84d1abd4c3a98488a6c7
|
/resources/lib/streamlink/plugins/nownews.py
|
02bd76def1234a8b05929f26bb670853a147f7ba
|
[
"BSD-2-Clause"
] |
permissive
|
Twilight0/script.module.streamlink.base
|
d91245d1a43d6b3191b62a6eb4b1cf70598ed23e
|
c1e4628715a81806586b10323b8cb01424bbb6fc
|
refs/heads/master
| 2021-01-21T04:32:41.658823
| 2020-09-07T20:56:29
| 2020-09-07T20:56:29
| 101,915,967
| 6
| 4
|
BSD-2-Clause
| 2018-01-14T15:20:47
| 2017-08-30T18:31:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,149
|
py
|
import logging
import re
import json
from streamlink.plugin import Plugin
from streamlink.stream import HLSStream
log = logging.getLogger(__name__)
class NowNews(Plugin):
_url_re = re.compile(r"https?://news.now.com/home/live")
epg_re = re.compile(r'''epg.getEPG\("(\d+)"\);''')
api_url = "https://hkt-mobile-api.nowtv.now.com/09/1/getLiveURL"
backup_332_api = "https://d7lz7jwg8uwgn.cloudfront.net/apps_resource/news/live.json"
backup_332_stream = "https://d3i3yn6xwv1jpw.cloudfront.net/live/now332/playlist.m3u8"
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
def _get_streams(self):
res = self.session.http.get(self.url)
m = self.epg_re.search(res.text)
channel_id = m and m.group(1)
if channel_id:
log.debug("Channel ID: {0}".format(channel_id))
if channel_id == "332":
# there is a special backup stream for channel 332
bk_res = self.session.http.get(self.backup_332_api)
bk_data = self.session.http.json(bk_res)
if bk_data and bk_data["backup"]:
log.info("Using backup stream for channel 332")
return HLSStream.parse_variant_playlist(self.session, self.backup_332_stream)
api_res = self.session.http.post(self.api_url,
headers={"Content-Type": 'application/json'},
data=json.dumps(dict(channelno=channel_id,
mode="prod",
audioCode="",
format="HLS",
callerReferenceNo="20140702122500")))
data = self.session.http.json(api_res)
for stream_url in data.get("asset", {}).get("hls", {}).get("adaptive", []):
return HLSStream.parse_variant_playlist(self.session, stream_url)
__plugin__ = NowNews
|
[
"twilight@freemail.gr"
] |
twilight@freemail.gr
|
ae44d012c58b8558cbbe24ca04dffac1c7d24f70
|
513e555e4ce79e927e2a29ab49b72cc254e9836f
|
/django_2fa_project/urls.py
|
c3e5054055d86ee68c9263143a6e538861190b0e
|
[] |
no_license
|
office3243/django_2fa_project
|
37ad75ee7f9cd6b2fe3748d91fd8cdf857fd283a
|
d1c7e5670805a54b190bbed7de5d94004da71b75
|
refs/heads/master
| 2021-06-17T07:57:40.168767
| 2019-05-28T12:32:47
| 2019-05-28T12:32:47
| 188,831,070
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 861
|
py
|
"""django_2fa_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url, include
urlpatterns = [
path('admin/', admin.site.urls),
url(r'', include('portal.urls', namespace='portal')),
]
|
[
"office3243@gmail.com"
] |
office3243@gmail.com
|
ab0d95439f8363b720d81aa80ae3aa74a0432e28
|
104005986bccea0a4213cbd55d833c95baf2f4fa
|
/drivers/phot_drivers/LCOGT_template_single_request.py
|
c6603728c1e635419c96b9c4a2e6edda588ecfe7
|
[] |
no_license
|
lgbouma/cdips_followup
|
8a92ec9a31b405d316c668a6d42ce10ad47f0501
|
99ac6c6c709f96a58083a5ff7c4cf2d4f0b554a8
|
refs/heads/master
| 2023-08-14T02:33:17.841926
| 2023-08-01T00:46:19
| 2023-08-01T00:46:19
| 206,371,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,229
|
py
|
"""
Given a source_id, make LCOGT photometry followup requests, and optionally
submit them to the LCOGT API.
"""
import numpy as np
from astropy.time import Time
from cdips_followup.manage_ephemerides import (
query_ephemeris, get_ephemeris_uncertainty
)
from cdips_followup.LCOGT_dedicated_requests import (
get_dedicated_request,
given_dedicated_requests_validate_submit
)
from astrobase.services.identifiers import tic_to_gaiadr2
TRANSITTYPEDICT = {
'all': ['OIBEO', 'IBEO', 'OIBE', 'OIB', 'BEO'],
'partials': ['OIB', 'BEO'],
'totals': ['OIBEO', 'IBEO', 'OIBE'],
'fulltotals': ['OIBEO']
}
def main():
##########################################
# CHANGE BELOW
savstr = '20230419_tic402980664_23B' # eg, 20191207_TOI1098_request_2m_tc_secondary. "ephemupdate" if it is one. (this cancels pending observations)
overwrite = 1
validate = 0
submit = 0
tic_id = '402980664' # '120105470'
source_id = None # '6113920619134019456' # can use instead of TIC
filtermode = 'ip'# 'zs', 'gp', 'ip'
#telescope_class = '1m0' # '1m0', '2m0', 'special'
telescope_class = 'special' # '1m0', '2m0', 'special'
ipp_value = 1 # usually 1
#max_search_time = Time('2022-12-31 23:59:00')
max_search_time = Time('2024-01-31 23:59:00')
verify_ephemeris_uncertainty = 1 # require t_tra uncertainty < 2 hours
inflate_duration = 0 # if t_tra uncertainty > 1 hour, inflate tdur by +/- 45 minutes per side
transit_type = 'totals' # see above
max_n_events = 99 # else None. n_events is per eventclass.
raise_error = False # raise an error if max_duration_error flag raised.
max_duration_error = 30 # the submitted LCOGT request must match requested durn to within this difference [minutes]
sites = ['Palomar'] # Default None for LCOGT. Could do e.g., 'special' and ['Keck Observatory']
#sites = ['Keck Observatory'] # Default None for LCOGT. Could do e.g., 'special' and ['Keck Observatory']
#sites = ['Cerro Paranal'] # Default None for LCOGT. Could do e.g., 'special' and ['Keck Observatory']
force_acceptability = 50 # None or int.
# CHANGE ABOVE
##########################################
max_airmass_sched = 2.5
manual_ephemeris = False
manual_ephemeris = True # FIXME
create_eventclasses = TRANSITTYPEDICT[transit_type]
submit_eventclasses = TRANSITTYPEDICT[transit_type]
if source_id is None:
assert isinstance(tic_id, str)
source_id = tic_to_gaiadr2(tic_id)
if manual_ephemeris:
period = 18.559/24
period_unc = 0.001/24
epoch = 2457000 + 1791.2972827806442
epoch_unc = 1e-5
duration = 1.04
else:
# get ephemeris from ephemerides.csv
d = query_ephemeris(source_id=source_id)
period, epoch, duration = (
d['period'], d['epoch'], d['duration']
)
period_unc, epoch_unc, duration_unc = (
d['period_unc'], d['epoch_unc'], d['duration_unc']
)
if verify_ephemeris_uncertainty:
delta_t_tra_today = (
get_ephemeris_uncertainty(epoch, epoch_unc, period, period_unc, epoch_obs='today')
)
if delta_t_tra_today*24 < 0:
msg = f'ERR! Got negative ephem unc of {delta_t_tra_today*24:.1f} hr. Need to give a believable ephem unc..'
raise ValueError(msg)
if delta_t_tra_today*24 > 2:
msg = f'ERR! Got ephem unc of {delta_t_tra_today*24:.1f} hr. This is too high.'
raise ValueError(msg)
if delta_t_tra_today*24 > 1:
msg = f'WRN! Got ephem unc of {delta_t_tra_today*24:.1f} hr. This is risky.'
print(msg)
else:
msg = f'INFO! Got ephem unc of {delta_t_tra_today*24:.1f} hr. This is fine.'
print(msg)
if inflate_duration:
assert verify_ephemeris_uncertainty
if delta_t_tra_today*24 > 1:
msg = f'... inflating transit duration for scheduling pursposes by 1.5 hours.'
print(msg)
duration += 1.5 # add
# "requests" is a list of lists. Higher level is each eventclass. Level
# below is each event, in that eventclass.
requests = get_dedicated_request(
savstr, source_id, period, epoch, duration, create_eventclasses,
overwrite=overwrite, max_search_time=max_search_time,
filtermode=filtermode, telescope_class=telescope_class,
ipp_value=ipp_value, sites=sites,
force_acceptability=force_acceptability,
max_airmass_sched=max_airmass_sched
)
# if a maximum number of events is set, impose it!
if isinstance(max_n_events, int):
_requests = []
for ix in range(len(create_eventclasses)):
print('starting with {} {} events.'.
format(len(requests[ix]), create_eventclasses[ix])
)
for eventclass in requests:
_eventclass = []
starttimes = []
for req in eventclass:
starttimes.append(req['requests'][0]['windows'][0]['start'])
# sort by start time, cut to get the closest ones.
sort_times = np.sort(starttimes)
sel_times = sort_times[ : max_n_events]
for req in eventclass:
starttime = req['requests'][0]['windows'][0]['start']
if starttime in sel_times:
_eventclass.append(req)
if len(_eventclass) > 0:
_requests.append(_eventclass)
if len(_requests) == 0:
print('WRN!: got no times')
return
assert len(_requests[0]) <= max_n_events
requests = _requests
print('WRN!: trimmed to {} events.'.format(len(requests[0])))
if len(sel_times)>0:
print('WRN!: max time: \n{}'.format(repr(sel_times[-1])))
print('\nWRN!: selected times: \n{}'.format(repr(sel_times)))
else:
print('WRN!: got no times')
given_dedicated_requests_validate_submit(
requests, submit_eventclasses, validate=validate, submit=submit,
max_duration_error=max_duration_error, raise_error=raise_error
)
if __name__ == "__main__":
main()
|
[
"bouma.luke@gmail.com"
] |
bouma.luke@gmail.com
|
ebce17fb0dd02ef5af320607dbcfad78bb6aec8c
|
dcd0fb6bdcb488dd2046778eb02edce8f4623b58
|
/object_follow_edgetpu/detect_standalone.py
|
7e196dbb4d1727616b1a5ec9f56384351df24223
|
[] |
no_license
|
openbsod/Adeept_AWR
|
12f2df24bfcf85d7965a425bb0078b2c858e807a
|
92ca5e7147a9cb44ad55f55a467371648dc76b3c
|
refs/heads/master
| 2023-04-09T07:06:35.772918
| 2021-04-15T21:20:40
| 2021-04-15T21:20:40
| 284,012,618
| 1
| 0
| null | 2020-07-31T10:46:50
| 2020-07-31T10:46:49
| null |
UTF-8
|
Python
| false
| false
| 4,801
|
py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Object detection demo.
This demo script requires Raspberry Pi Camera, and pre-compiled mode.
Get pre-compiled model from Coral website [1]
[1]: https://dl.google.com/coral/canned_models/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite
"""
from edgetpu.detection.engine import DetectionEngine
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
import numpy as np
import time
import io
import picamera
# https://github.com/waveform80/picamera/issues/383
def _monkey_patch_picamera():
original_send_buffer = picamera.mmalobj.MMALPortPool.send_buffer
def silent_send_buffer(zelf, *args, **kwargs):
try:
original_send_buffer(zelf, *args, **kwargs)
except picamera.exc.PiCameraMMALError as error:
if error.status != 14:
raise error
picamera.mmalobj.MMALPortPool.send_buffer = silent_send_buffer
# Read labels.txt file provided by Coral website
def _read_label_file(file_path):
with open(file_path, 'r', encoding="utf-8") as f:
lines = f.readlines()
ret = {}
for line in lines:
pair = line.strip().split(maxsplit=1)
ret[int(pair[0])] = pair[1].strip()
return ret
# Main loop
def main():
model_filename = "mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite"
label_filename = "coco_labels.txt"
engine = DetectionEngine(model_filename)
labels = _read_label_file(label_filename)
CAMERA_WIDTH = 640
CAMERA_HEIGHT = 480
fnt = ImageFont.load_default()
# To view preview on VNC,
# https://raspberrypi.stackexchange.com/a/74390
with picamera.PiCamera() as camera:
_monkey_patch_picamera()
camera.resolution = (CAMERA_WIDTH, CAMERA_HEIGHT)
camera.framerate = 15
camera.rotation = 180
_, width, height, channels = engine.get_input_tensor_shape()
print("{}, {}".format(width, height))
overlay_renderer = None
camera.start_preview()
try:
stream = io.BytesIO()
for foo in camera.capture_continuous(stream,
format='rgb',
use_video_port=True):
# Make Image object from camera stream
stream.truncate()
stream.seek(0)
input = np.frombuffer(stream.getvalue(), dtype=np.uint8)
input = input.reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3))
image = Image.fromarray(input)
# image.save("out.jpg")
# Make overlay image plane
img = Image.new('RGBA',
(CAMERA_WIDTH, CAMERA_HEIGHT),
(255, 0, 0, 0))
draw = ImageDraw.Draw(img)
# Run detection
start_ms = time.time()
results = engine.DetectWithImage(image,
threshold=0.2, top_k=10)
elapsed_ms = (time.time() - start_ms)*1000.0
if results:
for obj in results:
box = obj.bounding_box.flatten().tolist()
box[0] *= CAMERA_WIDTH
box[1] *= CAMERA_HEIGHT
box[2] *= CAMERA_WIDTH
box[3] *= CAMERA_HEIGHT
# print(box)
# print(labels[obj.label_id])
draw.rectangle(box, outline='red')
draw.text((box[0], box[1]-10), labels[obj.label_id],
font=fnt, fill="red")
camera.annotate_text = "{0:.2f}ms".format(elapsed_ms)
if not overlay_renderer:
overlay_renderer = camera.add_overlay(
img.tobytes(),
size=(CAMERA_WIDTH, CAMERA_HEIGHT), layer=4, alpha=255)
else:
overlay_renderer.update(img.tobytes())
finally:
if overlay_renderer:
camera.remove_overlay(overlay_renderer)
camera.stop_preview()
if __name__ == "__main__":
main()
|
[
"you@example.com"
] |
you@example.com
|
3d59d39806860870c249b7daa8a0f68c3b343f39
|
15c70a52bb2a4b5b3bdd4a62d97bbe27e3989a24
|
/src_old/manual_colony_picking.py
|
5fb61b2e322da571f18b6cbdaeddc1d0a3c7526e
|
[
"MIT"
] |
permissive
|
nadimest/opentrons-colony-picker
|
91cbcc7a80fe9cefa0c8f45bab7ac51a715b87e6
|
8097f12cab91398377463a3a76367b73fa0a2318
|
refs/heads/master
| 2021-01-01T13:29:13.253905
| 2020-07-29T14:27:49
| 2020-07-29T14:27:49
| 239,299,577
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 980
|
py
|
#!/usr/bin/env python3
import subprocess
from lib.coordinatesManagement import CoordinatesManager
from lib.imageHandling import ImageHandler
coordinates=CoordinatesManager(calibration_filename="calib/calibration.json")
def main():
# takeImage=subprocess.run("./server_scripts/moveAndTakePicture.sh")
fetchImage=subprocess.run("./server_scripts/fetchPicturefromServer.sh")
image=ImageHandler("data/colonies.jpg",coordinates)
while True:
image.showImage(circles=coordinates.getPoints())
c=image.getPressedKey()
if c==ord('q'):
break
if c==ord('s'):
coordinates.writeCoordinatesFile(filename="data/coordinates.json")
image.saveImage("data/colonies_processed.jpg")
for coord in coordinates.coord_transformed:
print(coord)
fetchImage=subprocess.run("./server_scripts/pushColoniesToServer.sh")
break
if __name__ == "__main__":
main()
|
[
"nadim@enginzyme.com"
] |
nadim@enginzyme.com
|
a658a0212b71fb6327314f0662b6143017559bc1
|
df2cbe914f463ad050d7ed26194424afbe3a0a52
|
/addons/snailmail/models/mail_notification.py
|
a368c0a778338b68f037181c93c3d78bffc3f691
|
[
"Apache-2.0"
] |
permissive
|
SHIVJITH/Odoo_Machine_Test
|
019ed339e995be980606a2d87a63312ddc18e706
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
refs/heads/main
| 2023-07-16T16:23:14.300656
| 2021-08-29T11:48:36
| 2021-08-29T11:48:36
| 401,010,175
| 0
| 0
|
Apache-2.0
| 2021-08-29T10:13:58
| 2021-08-29T10:13:58
| null |
UTF-8
|
Python
| false
| false
| 719
|
py
|
# -*- coding: utf-8 -*-
from odoo import fields, models
class Notification(models.Model):
_inherit = 'mail.notification'
notification_type = fields.Selection(selection_add=[('snail', 'Snailmail')], ondelete={'snail': 'cascade'})
letter_id = fields.Many2one('snailmail.letter', string="Snailmail Letter", index=True, ondelete='cascade')
failure_type = fields.Selection(selection_add=[
('sn_credit', "Snailmail Credit Error"),
('sn_trial', "Snailmail Trial Error"),
('sn_price', "Snailmail No Price Available"),
('sn_fields', "Snailmail Missing Required Fields"),
('sn_format', "Snailmail Format Error"),
('sn_error', "Snailmail Unknown Error"),
])
|
[
"36736117+SHIVJITH@users.noreply.github.com"
] |
36736117+SHIVJITH@users.noreply.github.com
|
c27f4477eaa529faa393007fcdb5b9fda759771e
|
b5865b795c4e743cca80a6e0ea480ecc0d0a35fd
|
/My_Second_Project/Login_app/admin.py
|
fc666635eae6f732ecd40552bd516ac51555026b
|
[] |
no_license
|
kazi-akib-abdullah/django-deployment
|
384ee355efacf58a66c66b4d768b86750c70d98e
|
c2ef330018b71fbb42c3ee9a5a0fba78d1d33473
|
refs/heads/master
| 2023-02-26T03:01:39.140332
| 2021-01-30T16:42:06
| 2021-01-30T16:42:06
| 334,459,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
from My_Second_Project.settings import USE_I18N
from django.contrib import admin
from Login_app.models import UserInfo
# Register your models here.
admin.site.register(UserInfo)
|
[
"45953236+kazi-akib-abdullah@users.noreply.github.com"
] |
45953236+kazi-akib-abdullah@users.noreply.github.com
|
0fca165af2a23670c0fdd4db934637cc1abf3c10
|
77531ad16a3ddf7aa92b7b4de809cce2a96c88a5
|
/sitetables/toolbox/sources.py
|
53a8ff4e69c31bffc800f47c48937200b5f4ad69
|
[] |
no_license
|
idlesign/django-sitetables
|
6d3ed6b534e51c67704528d6fa1be0bc6f9f64f4
|
008b748919ee330da168d4766cd6b3c3c27e45b8
|
refs/heads/master
| 2022-02-17T21:25:26.430653
| 2022-02-04T12:46:19
| 2022-02-04T12:46:19
| 164,444,235
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,028
|
py
|
import re
from collections import namedtuple
from itertools import chain
from typing import Optional, List, Union, Dict, Type, Tuple
from django.db.models import QuerySet, Model
from django.http import HttpRequest, JsonResponse
from django.urls import reverse
from .columns import TableColumn
if False: # pragma: nocover
from .tables import Table
TypeTableSource = Union[Dict, List[Dict], Type[Model], QuerySet]
TypeTableColumns = Dict[str, TableColumn]
TypeFilteredItems = Union[QuerySet, List]
TypeServerItems = Tuple[int, int, TypeFilteredItems]
TypePreparedItems = List[Dict[str, str]]
TableItemsFilter = namedtuple('TableItemsFilter', [
'start',
'length',
'search',
'order',
])
class TableSource:
"""Base data source for tables."""
columns: TypeTableColumns
_columns_by_idx: Dict[int, TableColumn]
_url_responder = None
_RE_COLUMN_DEF = re.compile(r'\[(\d+)\]\[([a-z]+)\]')
def __init__(self, source, options: Optional[dict] = None):
self.columns = {}
self._columns_by_idx = {}
self.row_id = 'DT_RowId'
self.options = options or {}
self._rows = []
self._bootstrap(source)
@classmethod
def spawn(cls, source, params: dict) -> 'TableSource':
"""Alternative constructor.
:param source:
:param params:
"""
return cls(source, options=params.get('options'))
def _server_get_filter(self, source: dict) -> TableItemsFilter:
"""Returns a filter object composed from source dictionary
(e.g. POST params).
:param source:
"""
by_idx = self._columns_by_idx
re_def = self._RE_COLUMN_DEF
order = []
length_default = 10
length = int(source.get('length', length_default))
if length > 5000:
length = length_default
start = int(source.get('start', 0))
items_filter = TableItemsFilter(
start=start,
length=length,
search=source.get('search[value]', '').strip() or '',
order=order,
)
source = dict(sorted(source.items(), key=lambda item: item[0]))
for key, val in source.items():
if key.startswith('order'):
match = re_def.search(key)
if not match:
continue
if match.group(2) == 'dir':
continue
column_idx = int(val)
column_name = by_idx.get(column_idx)
if not column_name:
continue
order_desc = source.get(f'order[{match.group(1)}][dir]', 'asc') == 'desc'
order.append(f"{'-' if order_desc else ''}{column_name}")
return items_filter
def _server_get_items(self, items_filter: TableItemsFilter = None) -> TypeServerItems:
"""Must return serverside items filtered using th given filter.
:param items_filter:
"""
raise NotImplementedError # pragma: nocover
def _server_prepare_items(self, items: TypeFilteredItems) -> TypePreparedItems:
"""Prepares items for on_server response.
:param items:
"""
return items
def respond(self, request: HttpRequest) -> JsonResponse:
"""
https://datatables.net/manual/server-side
:param request:
"""
source = request.POST
items_filter = self._server_get_filter(source.dict())
count_total, count_filtered, filtered = self._server_get_items(items_filter)
start = items_filter.start
filtered = filtered[start:start+items_filter.length]
filtered = self._server_prepare_items(filtered)
draw = source.get('draw', 1)
draw = int(draw) # As per docs.
out = {
'data': filtered,
'draw': draw,
'recordsTotal': count_total,
'recordsFiltered': count_filtered,
}
return JsonResponse(out)
def _get_columns(self) -> TypeTableColumns:
"""Should return columns dictionary."""
columns = {}
for name, title in self.options.get('columns_add', {}).items():
columns[name] = TableColumn(name=name, title=title)
return columns
def _bootstrap(self, source: TypeTableSource):
"""The place for a source-specific bootstrap."""
columns = self._get_columns()
self.columns = columns
self._columns_by_idx = {idx: column for idx, column in enumerate(columns)}
def contribute_to_config(self, config: dict, table: 'Table'):
"""Updates table configuration dictionary with source-specific params.
:param config:
:param table:
"""
config.update({
'createdRow': lambda: (
"function(row, data, idx){var v=data['%s']; if (v){$(row).attr('data-id', v);}}" % self.row_id),
'processing': True,
'columns': [column.as_dict() for column in self.columns.values()],
})
options = self.options
if options.get('on_server', False):
url_responder = self._url_responder
if url_responder is None:
url_responder = self.__class__._url_responder = reverse('sitetables:respond')
config.update({
'serverSide': True,
'ajax': {
'url': url_responder,
'type': 'POST',
'data': {
'tableName': table.name,
}
},
})
else:
if not options.get('init_dom'):
# todo maybe use serialization instead of string casting
# todo FK support
config['data'] = [{k: f'{v}' for k, v in row.items()} for row in self.rows]
@property
def rows(self) -> List[dict]:
"""Represents table rows."""
return self._rows
class ListDictsSource(TableSource):
"""Static data source.
.. code-block:: python
source = [
{
'one': '1',
'two': '2',
},
{
'one': '3',
'two': '4',
},
]
"""
def _bootstrap(self, source: TypeTableSource):
names = list(source[0].keys())
self.options['columns_add'] = dict.fromkeys(names, '')
self._rows = source
self.row_id = names[0] # Use first column value.
super()._bootstrap(source)
class ModelSource(TableSource):
"""Django model datasource.
.. code-block:: python
source = Article # Model class.
source = Article.objects.filter(hidden=False) # Or a QuerySet.
"""
model: Type[Model] = None
def _get_columns(self) -> TypeTableColumns:
columns = {}
meta = self.model._meta
for field in chain(meta.concrete_fields, meta.private_fields, meta.many_to_many):
name = field.name
columns[name] = TableColumn(name=name, title=field.verbose_name, source=field)
columns.update(super()._get_columns())
return columns
def _bootstrap(self, source: TypeTableSource):
if isinstance(source, QuerySet):
model = source.model
qs = source
else:
# Model class
model = source
qs = model.objects.all()
self.model = model
self.qs = qs
self.row_id = model._meta.pk.name
super()._bootstrap(source)
def _server_get_items(self, items_filter: TableItemsFilter = None) -> TypeServerItems:
qs = self.qs
filter_kwargs = {}
search = items_filter.search
if search:
filter_kwargs['title__contains'] = search
objects = qs.filter(**filter_kwargs)
count_total = qs.count()
count_filtered = objects.count()
order = items_filter.order
if order:
objects = objects.order_by(*order)
return count_total, count_filtered, objects
def _server_prepare_items(self, items: TypeFilteredItems) -> TypePreparedItems:
dicts = []
columns = self.columns
for model in items:
item_data = {}
for column_name, column in columns.items():
if column.source is None:
# Model property.
item_data[column_name] = getattr(model, column_name)
else:
# Model field.
item_data[column_name] = column.source.value_from_object(model)
dicts.append(item_data)
return dicts
@property
def rows(self) -> List[dict]:
columns = self.columns
_, _, qs = self._server_get_items(TableItemsFilter(
start=0,
length=0,
search='',
order=[],
))
result = qs.values(*columns.keys())
return result
|
[
"idlesign@yandex.ru"
] |
idlesign@yandex.ru
|
1abcb520636d6bdcf87fda919e2807ba2c94bbaf
|
f9b14f7c366dc16c5dfc24a2478332a8ad14aea3
|
/tests/test_user_func.py
|
e62c3a3eaf07a429e7b28b1e28386eecde4fd4e0
|
[
"MIT"
] |
permissive
|
aleobb/dataframe_expressions
|
fbd2a95de883c87fb4e93195ce719e7ead967231
|
cf135415f739377e9c2accb82606957417c7e0e6
|
refs/heads/master
| 2022-12-29T15:25:54.771541
| 2020-10-21T01:27:15
| 2020-10-21T01:27:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,331
|
py
|
import ast
import inspect
import pytest
from dataframe_expressions import (
DataFrame, ast_FunctionPlaceholder, ast_DataFrame, render, user_func)
def test_DF_user_func():
@user_func
def func1(x: float) -> float:
assert False
d = DataFrame()
d1 = func1(d)
assert isinstance(d1, DataFrame)
assert d1.child_expr is not None
assert isinstance(d1.child_expr, ast.Call)
f_c = d1.child_expr.func
assert isinstance(f_c, ast_FunctionPlaceholder)
f_sig = inspect.signature(f_c.callable) # type: ignore
assert str(f_sig) == "(x: float) -> float"
args = d1.child_expr.args
assert len(args) == 1
a1 = args[0]
assert isinstance(a1, ast_DataFrame)
def test_DF_user_number_arg():
@user_func
def func1(x: float, y: float) -> float:
assert False
d = DataFrame()
d1 = func1(d, 10.0)
assert isinstance(d1, DataFrame)
assert d1.child_expr is not None
assert isinstance(d1.child_expr, ast.Call)
f_c = d1.child_expr.func
assert isinstance(f_c, ast_FunctionPlaceholder)
f_sig = inspect.signature(f_c.callable) # type: ignore
assert str(f_sig) == "(x: float, y: float) -> float"
args = d1.child_expr.args
assert len(args) == 2
a1 = args[0]
assert isinstance(a1, ast_DataFrame)
a2 = args[1]
assert isinstance(a2, ast.Num)
assert a2.n == 10.0
def test_DF_user_wrong_number_args():
@user_func
def func1(x: float, y: float) -> float:
assert False
d = DataFrame()
with pytest.raises(Exception):
func1(d)
def test_DF_user_two_funcs():
@user_func
def func1(x: float) -> float:
assert False
@user_func
def func2(x: float, y: float) -> float:
assert False
# There should be no confusion between the two functions due to
# some funny lambda semantics
d = DataFrame()
func2(func1(d), func1(d))
def test_DF_user_render():
@user_func
def func1(x: float) -> float:
assert False
d = DataFrame()
d1 = func1(d)
chain, context = render(d1)
assert chain is not None
assert context is not None
assert isinstance(chain, ast.Call)
call = chain # type: ast.Call
assert len(call.args) == 1
a1 = call.args[0] # type: ast.AST
assert isinstance(a1, ast_DataFrame)
assert a1.dataframe is d
assert isinstance(call.func, ast_FunctionPlaceholder)
callable = call.func
f = callable.callable # type: ignore
assert f.__name__ == 'func1'
def test_df_user_render_args():
@user_func
def func1(x: float) -> float:
assert False
d = DataFrame()
d1 = func1(d.jets)
chain, _ = render(d1)
assert chain is not None
assert isinstance(chain, ast.Call)
call = chain # type: ast.Call
assert len(call.args) == 1
a1 = call.args[0] # type: ast.AST
assert isinstance(a1, ast.Attribute)
def test_df_user_render_2args():
@user_func
def func1(x1: float, x2: float) -> float:
assert False
d = DataFrame()
d1 = func1(d.jets, d.jets)
chain, _ = render(d1)
assert chain is not None
assert isinstance(chain, ast.Call)
call = chain # type: ast.Call
assert len(call.args) == 2
a1 = call.args[0] # type: ast.AST
a2 = call.args[1] # type: ast.AST
assert a1 is a2
|
[
"gwatts@uw.edu"
] |
gwatts@uw.edu
|
12d5ee953deb6d64391cd697be86d87d92cfae26
|
aef8fe58f3c272e87dd166e3b9eb3ee4bc7e2d21
|
/To_do/toDoList/myList/forms.py
|
c81e8cef0fec422c82a8c7dad0648d564e02ef20
|
[] |
no_license
|
Olga20011/Django-ToDoList
|
6c43bf83c4f79e38f63a5854d4f6653bf71c6e10
|
758797763135d5e6567ecdc83a30244934778682
|
refs/heads/master
| 2023-08-13T23:13:33.931349
| 2021-09-16T14:21:41
| 2021-09-16T14:21:41
| 392,290,709
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 310
|
py
|
from django import forms
from django.db.models import fields
from django.forms import ModelForm
from .models import *
class TaskForm(forms.ModelForm):
# title=forms.CharField(widget=forms.TextInput(attrs={'placeholder':'Add new task...'}))
class Meta:
model=MyTask
fields ="__all__"
|
[
"olgaestherakello@gmail.com"
] |
olgaestherakello@gmail.com
|
69aee45e204f7d1a92722373c63d4388faf5d6c9
|
d3542f9f10ecc1c8c2eefe954a386dbbb584cf3b
|
/pykl/kit/models.py
|
b87fd808ea27f116e2c5feb7e8de0737d3b99f86
|
[
"MIT"
] |
permissive
|
wowngasb/pykl
|
7322fa36c0529526273486ea7ffc52108b78cd6f
|
872f97dfaab92cce309078940d1273cf26daed37
|
refs/heads/master
| 2023-06-01T08:19:55.962876
| 2023-05-19T07:36:52
| 2023-05-19T07:36:52
| 101,482,272
| 0
| 0
|
MIT
| 2023-01-11T23:45:19
| 2017-08-26T11:43:49
|
Python
|
UTF-8
|
Python
| false
| false
| 14,637
|
py
|
# coding: utf-8
import random
import time
import hashlib
from inspect import isclass
from git import Repo as GitRepo
from sqlalchemy.inspection import inspect as sqlalchemyinspect
from sqlalchemy.ext.declarative import declarative_base
from pykl.tiny.grapheneinfo import (
_is_graphql,
_is_graphql_cls,
_is_graphql_mutation
)
from pykl.tiny.codegen.utils import (
name_from_repr,
camel_to_underline,
underline_to_camel,
)
from base_type import *
from cmd import db, app
Base = db.Model
class MigrateVersion(Base):
u"""table migrate_version"""
__tablename__ = 'migrate_version'
repository_id = Column(String(191), primary_key=True, doc=u"""field repository_id""", info=CustomField | SortableField)
repository_path = Column(Text, doc=u"""field repository_path""", info=CustomField | SortableField)
version = Column(Integer, doc=u"""field version""", info=CustomField | SortableField)
class Actor(Base):
u"""table kit_actor"""
__tablename__ = 'kit_actor'
actor_id = Column(Integer, primary_key=True, doc=u"""对应 actor_id""", info=SortableField | InitializeField)
actor_name = Column(String(64), doc=u"""field actor_name""", info=CustomField | SortableField)
actor_email = Column(String(64), doc=u"""field actor_email""", info=CustomField | SortableField)
class Blob(Base):
u"""table kit_blob"""
__tablename__ = 'kit_blob'
blob_id = Column(Integer, primary_key=True, doc=u"""对应 blob_id""", info=SortableField | InitializeField)
blob_path = Column(String(64), doc=u"""field blob_path""", info=CustomField | SortableField)
blob_hash = Column(String(40), doc=u"""field blob_hash""", info=CustomField | SortableField)
blob_mode = Column(Integer, doc=u"""field blob_mode""", info=CustomField | SortableField)
blob_size = Column(Integer, doc=u"""field blob_size""", info=CustomField | SortableField)
class Tree(Base):
u"""table kit_tree"""
__tablename__ = 'kit_tree'
tree_id = Column(Integer, primary_key=True, doc=u"""对应 tree_id""", info=SortableField | InitializeField)
tree_path = Column(String(64), doc=u"""field tree_path""", info=CustomField | SortableField)
tree_hash = Column(String(40), doc=u"""field tree_hash""", info=CustomField | SortableField)
tree_mode = Column(Integer, doc=u"""field tree_mode""", info=CustomField | SortableField)
tree_size = Column(Integer, doc=u"""field tree_size""", info=CustomField | SortableField)
@classmethod
def info(cls):
class Tree(SQLAlchemyObjectType):
class Meta:
model = cls
trees = List(lambda :cls, description=u'trees')
def resolve_trees(self, args, context, info):
return [_Tree(tree) for tree in self._tree.trees]
blobs = List(lambda :Blob, description=u'blobs')
def resolve_blobs(self, args, context, info):
return [_Blob(blob) for blob in self._tree.blobs]
blobfile = Field(lambda :Blob, description=u'对应 blob',
path=g.Argument(g.String, default_value="", description=u'input you file name')
)
def resolve_blobfile(self, args, context, info):
path = args.get('path', '')
return search_blobfile(self._tree, path)
treedir = Field(lambda :Tree, description=u'对应 blob',
path=g.Argument(g.String, default_value="", description=u'input you file name')
)
def resolve_treedir(self, args, context, info):
path = args.get('path', '')
return search_treedir(self._tree, path)
return Tree
class Commit(Base):
u"""table kit_commit"""
__tablename__ = 'kit_commit'
commit_id = Column(Integer, primary_key=True, doc=u"""对应 commit_id""", info=SortableField | InitializeField)
commit_hash = Column(String(40), doc=u"""field commit_hash""", info=CustomField | SortableField)
commit_message = Column(String(191), doc=u"""field repo_path""", info=CustomField | SortableField)
committed_date = Column(Integer, doc=u"""field repo_path""", info=CustomField | SortableField)
@classmethod
def info(cls):
class Commit(SQLAlchemyObjectType):
class Meta:
model = cls
author = Field(lambda :Actor, description=u'对应 author')
def resolve_author(self, args, context, info):
author = self._commit.author
return _Actor(author)
committer = Field(lambda :Actor, description=u'对应 committer')
def resolve_committer(self, args, context, info):
committer = self._commit.committer
return _Actor(committer)
parents = List(lambda :cls, description=u'parents commits')
def resolve_parents(self, args, context, info):
return [_Commit(commit) for commit in self._commit.parents]
tree = Field(lambda :Tree, description=u'对应 tree')
def resolve_tree(self, args, context, info):
tree = self._commit.tree
return _Tree(tree)
blobfile = Field(lambda :Blob, description=u'对应 blob',
path=g.Argument(g.String, default_value="", description=u'input you file name')
)
def resolve_blobfile(self, args, context, info):
path = args.get('path', '')
return search_blobfile(self._commit.tree, path)
treedir = Field(lambda :Tree, description=u'对应 blob',
path=g.Argument(g.String, default_value="", description=u'input you file name')
)
def resolve_treedir(self, args, context, info):
path = args.get('path', '')
return search_treedir(self._commit.tree, path)
return Commit
class Ref(Base):
u"""table kit_ref"""
__tablename__ = 'kit_ref'
ref_id = Column(Integer, primary_key=True, doc=u"""对应 repo_id""", info=SortableField | InitializeField)
ref_path = Column(String(191), doc=u"""field repo_path""", info=CustomField | SortableField)
ref_name = Column(String(191), doc=u"""field repo_path""", info=CustomField | SortableField)
@classmethod
def info(cls):
class Ref(SQLAlchemyObjectType):
class Meta:
model = cls
commit = Field(lambda :Commit, description=u'对应 commit')
def resolve_commit(self, args, context, info):
commit = self._ref.commit
return _Commit(commit)
blobfile = Field(lambda :Blob, description=u'对应 blob',
path=g.Argument(g.String, default_value="", description=u'input you file name')
)
def resolve_blobfile(self, args, context, info):
path = args.get('path', '')
return search_blobfile(self._ref.commit.tree, path)
treedir = Field(lambda :Tree, description=u'对应 blob',
path=g.Argument(g.String, default_value="", description=u'input you file name')
)
def resolve_treedir(self, args, context, info):
path = args.get('path', '')
return search_treedir(self._ref.commit.tree, path)
commits = List(lambda :Commit, description=u'往前推算 commits',
max_count=g.Argument(g.Int, description=u'input max_count')
)
def resolve_commits(self, args, context, info):
max_count = args.get('max_count', 10)
if max_count <= 0:
return []
return [_Commit(commit) for commit in self._ref.repo.iter_commits(self._ref.name, max_count=max_count)]
return Ref
class Repo(Base):
u"""table kit_repo"""
__tablename__ = 'kit_repo'
repo_id = Column(Integer, primary_key=True, doc=u"""对应 repo_id""", info=SortableField | InitializeField)
repo_path = Column(String(191), doc=u"""field repo_path""", info=CustomField | SortableField)
@classmethod
def info(cls):
class Repo(SQLAlchemyObjectType):
class Meta:
model = cls
head = Field(lambda :Ref, description=u'查找 引用',
name=g.Argument(g.String, default_value="master", description=u'input you name')
)
def resolve_head(self, args, context, info):
name = args.get('name', '')
if not name:
return None
ref = self._repo.heads[name]
return _Ref(ref)
heads = List(lambda :Ref, description=u'引用')
def resolve_heads(self, args, context, info):
return [_Ref(ref) for ref in self._repo.heads]
master = Field(lambda :Ref, description=u'master 引用')
def resolve_master(self, args, context, info):
ref = self._repo.heads.master
return _Ref(ref)
tag = Field(lambda :Ref, description=u'查找 tag',
name=g.Argument(g.String, description=u'input you tag')
)
def resolve_tag(self, args, context, info):
name = args.get('name', '')
if not name:
return None
ref = self._repo.tags[name]
return _Ref(ref)
tags = List(lambda :Ref, description=u'tag')
def resolve_tags(self, args, context, info):
return [_Ref(ref) for ref in self._repo.tags]
return Repo
def search_blobfile(_tree, path):
if not path:
return None
def _resolve_blobfile(blobs, trees):
for blob in blobs:
if path == blob.path:
return _Blob(blob)
for tree in trees:
ret = _resolve_blobfile(tree.blobs, tree.trees) if path.startswith(tree.path) else None
if ret:
return ret
return _resolve_blobfile(_tree.blobs, _tree.trees)
def search_treedir(_tree, path):
if not path:
return None
def _resolve_treedir(trees):
for tree in trees:
if path == tree.path:
return _Tree(tree)
for tree in trees:
ret = _resolve_treedir(tree.trees) if path.startswith(tree.path) else None
if ret:
return ret
return _resolve_treedir(_tree.trees)
def _Actor(actor, actor_id=0):
obj = Actor(actor_id=actor_id, actor_name=actor.name, actor_email=actor.email)
obj._actor = actor
return obj
def _Blob(blob, blob_id=0):
obj = Blob(blob_id=0, blob_path=blob.path, blob_hash=blob.hexsha, blob_mode=blob.mode, blob_size=blob.size)
obj._blob = blob
return obj
def _Tree(tree, tree_id=0):
obj = Tree(tree_id=tree_id, tree_path=tree.path, tree_hash=tree.hexsha, tree_mode=tree.mode, tree_size=tree.size)
obj._tree = tree
return obj
def _Commit(commit, commit_id=0):
obj = Commit(commit_id=commit_id, commit_hash=commit.hexsha, commit_message=commit.message, committed_date=commit.committed_date)
obj._commit = commit
return obj
def _Ref(ref, ref_id=0):
obj = Ref(ref_id=ref_id, ref_name=ref.name, ref_path=ref.path)
obj._ref = ref
return obj
def _Repo(repo, repo_id=0):
obj = Repo(repo_id=repo_id, repo_path=repo.working_dir)
obj._repo = repo
return obj
##############################################################
################### 根查询 Query ######################
##############################################################
class Query(g.ObjectType):
hello = g.String(name=g.Argument(g.String, default_value="world", description=u'input you name'))
deprecatedField = Field(g.String, deprecation_reason = 'This field is deprecated!')
fieldWithException = g.String()
migrateVersion = Field(MigrateVersion, description=u'migrate_version')
repo = Field(Repo, description=u'load repo by path',
repo_path=g.Argument(g.String, description=u'input repo path'),
)
def resolve_repo(self, args, context, info):
repo_path = args.get('repo_path', '')
repo = GitRepo(repo_path)
return _Repo(repo)
curRepo = Field(Repo, description=u'this repo')
def resolve_curRepo(self, args, context, info):
repo = app.config.get('REPO')
return _Repo(repo)
def resolve_hello(self, args, context, info):
return 'Hello, %s!' % (args.get('name', ''), )
def resolve_deprecatedField(self, args, context, info):
return 'You can request deprecated field, but it is not displayed in auto-generated documentation by default.'
def resolve_fieldWithException(self, args, context, info):
raise ValueError('Exception message thrown in field resolver')
def resolve_migrateVersion(self, args, context, info):
return MigrateVersion.query.first()
##############################################################
################### Mutations ######################
##############################################################
def build_input(dao, bit_mask):
return {k: BuildArgument(v) for k, v in mask_field(dao, bit_mask).items()}
class CreateMigrateVersion(g.Mutation):
Input = type('Input', (), build_input(MigrateVersion, InitializeField))
ok = g.Boolean()
msg = g.String()
migrateVersion = Field(MigrateVersion)
@staticmethod
def mutate(root, args, context, info):
return CreateMigrateVersion(ok=True, msg='suc', migrateVersion=MigrateVersion.query.first())
class UpdateMigrateVersion(g.Mutation):
Input = type('Input', (), build_input(MigrateVersion, EditableField))
ok = g.Boolean()
msg = g.String()
migrateVersion = Field(MigrateVersion)
@staticmethod
def mutate(root, args, context, info):
return UpdateMigrateVersion(ok=True, msg='suc', migrateVersion=MigrateVersion.query.first())
##############################################################
################### 根查询 Mutations ######################
##############################################################
Mutations = type('Mutations', (g.ObjectType, ), {camel_to_underline(name_from_repr(v)):v.Field() for _, v in globals().items() if _is_graphql_mutation(v)})
tables = [tbl if BuildType(tbl) else tbl for _, tbl in globals().items() if isclass(tbl) and issubclass(tbl, Base) and tbl != Base]
schema = g.Schema(query=Query, mutation=Mutations, types=[BuildType(tbl) for tbl in tables] + [cls for _, cls in globals().items() if _is_graphql_cls(cls)], auto_camelcase = False)
|
[
"wuyou7410@gmail.com"
] |
wuyou7410@gmail.com
|
a76bbe862fc2f943b5866b00388228264612f33d
|
6d4af63e07a137d382ef61afe8276f7470b7af59
|
/wsgistate/__init__.py
|
742cd2a8b2a8e916a3427188ed7f1c260ff1b2b1
|
[] |
no_license
|
Cromlech/wsgistate
|
142c7016c74fc28e6c56368f018bf113c379118c
|
d730ee47a4a43efbd20bcb9623e76bedeeb8c62b
|
refs/heads/master
| 2023-04-11T14:10:20.522520
| 2023-04-11T10:06:10
| 2023-04-11T10:06:10
| 15,806,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,085
|
py
|
# Copyright (c) 2005 Allan Saddi <allan@saddi.com>
# Copyright (c) 2005, the Lawrence Journal-World
# Copyright (c) 2006 L. C. Rees
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Django nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
'''Base Cache class'''
__all__ = ['BaseCache', 'db', 'file', 'memory', 'memcached',
'session', 'simple', 'cache']
def synchronized(func):
'''Decorator to lock and unlock a method (Phillip J. Eby).
@param func Method to decorate
'''
def wrapper(self, *__args, **__kw):
self._lock.acquire()
try:
return func(self, *__args, **__kw)
finally:
self._lock.release()
wrapper.__name__ = func.__name__
wrapper.__dict__ = func.__dict__
wrapper.__doc__ = func.__doc__
return wrapper
class BaseCache(object):
'''Base Cache class.'''
def __init__(self, *a, **kw):
super(BaseCache, self).__init__()
timeout = kw.get('timeout', 300)
try:
timeout = int(timeout)
except (ValueError, TypeError):
timeout = 300
self.timeout = timeout
def __getitem__(self, key):
'''Fetch a given key from the cache.'''
return self.get(key)
def __setitem__(self, key, value):
'''Set a value in the cache. '''
self.set(key, value)
def __delitem__(self, key):
'''Delete a key from the cache.'''
self.delete(key)
def __contains__(self, key):
'''Tell if a given key is in the cache.'''
return self.get(key) is not None
def get(self, key, default=None):
'''Fetch a given key from the cache. If the key does not exist, return
default, which itself defaults to None.
@param key Keyword of item in cache.
@param default Default value (default: None)
'''
raise NotImplementedError()
def set(self, key, value):
'''Set a value in the cache.
@param key Keyword of item in cache.
@param value Value to be inserted in cache.
'''
raise NotImplementedError()
def delete(self, key):
'''Delete a key from the cache, failing silently.
@param key Keyword of item in cache.
'''
raise NotImplementedError()
def get_many(self, keys):
'''Fetch a bunch of keys from the cache. Returns a dict mapping each
key in keys to its value. If the given key is missing, it will be
missing from the response dict.
@param keys Keywords of items in cache.
'''
d = dict()
for k in keys:
val = self.get(k)
if val is not None:
d[k] = val
return d
|
[
"trollfot@gmail.com"
] |
trollfot@gmail.com
|
3f252f1404c6461dd77efe3c577a6021a96fd1fc
|
57f4110f8252496142f043dce929c15df43f0e99
|
/niwo/spiders/niwobbs.py
|
36c18ff00dcd5fd500d26781a373ec5f38b113da
|
[] |
no_license
|
lanluyu/niwo_bbs
|
0c69a6fcfcf1bcf52096124721ed051a9b677b59
|
bfb28d8050a6a1a74b2c199dbbbeee2baf4d40f2
|
refs/heads/master
| 2020-03-21T18:44:37.519262
| 2018-06-28T05:21:44
| 2018-06-28T05:21:44
| 138,910,125
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,512
|
py
|
# -*- coding: utf-8 -*-
from scrapy import Spider,Request
from niwo.items import NiwoItem
class NiwobbsSpider(Spider):
name = 'niwobbs'
allowed_domains = ['http://bbs.niiwoo.com']
def start_requests(self):
for i in range(3,449):
basic_url = 'http://bbs.niiwoo.com/forum.php?gid=1&page='
start_url = basic_url+str(i)
yield Request(url=start_url,callback=self.parse,dont_filter=True)
def parse(self, response):
a = response.xpath('.//div[@class="pg"]/strong/text()').extract_first()
print('正在爬取第',a,'页')
posts = response.xpath('.//table[@class="wtab"]/tbody')
for post in posts:
item = NiwoItem()
item['title'] = ''.join(post.xpath('.//div[@class="thread-tit"]/a/text()').extract())
item['author'] = ''.join(post.xpath('.//div[@class="thread-nums"]/a[1]/text()').extract())
item['visitors'] = ''.join(post.xpath('.//div[@class="thread-nums"]/a[2]/@title').extract())
# 第1,2页的时间提取规则
#item['lasttime'] = '最后回复时间:'+''.join(post.xpath('.//a[@class="time"]/span/span/@title').extract())
#第三页之后的时间提取规则
item['lasttime'] = ''.join(post.xpath('.//a[@class="time"]/span/text()').extract())
item['url'] = 'http://bbs.niiwoo.com/'+''.join(post.xpath('.//div[@class="thread-tit"]/a/@href').extract())
print(item)
yield item
|
[
"noreply@github.com"
] |
noreply@github.com
|
f837708b75c33e3c2f20e04b15c464ef277c72b6
|
6c919bb579dd639d53f097d4b8b1b6f2bb830efb
|
/testfiles/interface_test.py
|
67d10a6aac8edb7aa6be430cf991c4d6067d7f37
|
[] |
no_license
|
aj132608/SpicyGlass
|
b258f0282e713d555489c4ab106c9008f6965f31
|
8b1d4f5ccf6f8ed4c0b9a65f042af505ab852c31
|
refs/heads/master
| 2021-01-14T01:45:18.047779
| 2020-03-09T18:48:46
| 2020-03-09T18:48:46
| 242,560,382
| 0
| 0
| null | 2020-03-07T00:43:38
| 2020-02-23T17:29:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,149
|
py
|
from firebaseinterface.firebase_interface import FirebaseInterface
import json
if __name__ == "__main__":
with open('creds.json') as file:
creds_dict = json.load(file)
interface_obj = FirebaseInterface(creds_dict=creds_dict)
# Fetch the current database held locally as a dictionary
current_database = interface_obj.get_database_dict()
print(f"database: {current_database}")
# Perform a GET request to retrieve a dictionary from the
# database itself
current_database = interface_obj.get_data(key='')
print(f"database: {current_database}")
# Get the value of a key
car_on = interface_obj.get_data(key='car-on')
print(f"car-on: {car_on}")
# Get a nested value using subkey
front_defrost = interface_obj.get_data(key='defrost',
subkey='front')
print(f"front defrost: {front_defrost}")
# response = interface_obj.change_value(key='car-on', val=True)
#
# print(f"PUT request response: {response}")
#
# current_database = interface_obj.get_database_dict()
#
# print(f"new database: {current_database}")
|
[
"32584157+aj132608@users.noreply.github.com"
] |
32584157+aj132608@users.noreply.github.com
|
32b6e1ea78845b7ff7ac88c40a72e2096200219f
|
eaaa1bbceb9a867e08769aae07c37c5e1107e430
|
/mitx_6.00.2x_ict_ds/week3/lstNone.py
|
fc381eea3af3ec7be312d603b0b86e04fe2eeb3d
|
[] |
no_license
|
nwinds/jugar
|
c8f9ce11dd95c6e0dda22e22a69dd598b34d9d3c
|
db4d362d9d1dd04f2c6c013d4462de0c892b8314
|
refs/heads/master
| 2020-12-24T16:35:03.951580
| 2016-03-10T09:25:06
| 2016-03-10T09:25:06
| 41,397,534
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
lst1 = [1,2,3]
lst2 = []
lst3 = lst1+lst2
print('lst1'),
print(lst1)
print('lst2'),
if len(lst2) > 0:
print(lst2)
else:
print('len(lst2) == 0')
print('lst3'),
print(lst3)
|
[
"namingwinds@gmail.com"
] |
namingwinds@gmail.com
|
124368ed9467c6666327662c5ff9d8beeeb3a9f4
|
cb82e798d1ea875e87d973d87602baa07166fb7b
|
/net/ssl/tls_ecdhe_rsa_with_aes_128_gcm_sha256/prf-frame139.py
|
618a7f98604c83ebcb46dd238066e8646a60e1c1
|
[] |
no_license
|
rowanpang/noteGit
|
e9470be20bfdb04ac6b80c93f0f1cd3fd97ef565
|
120ca5329addf3a780b2299a0ab74de997b77785
|
refs/heads/master
| 2023-05-31T05:04:58.731953
| 2023-05-31T02:34:14
| 2023-05-31T02:34:14
| 52,506,290
| 1
| 0
| null | 2021-06-04T01:08:05
| 2016-02-25T07:41:49
|
C
|
UTF-8
|
Python
| false
| false
| 7,989
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import hashlib
import hmac
import unittest
import os
import binascii
import sys
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import (
Cipher, algorithms, modes
)
def encrypt(key, plaintext, associated_data,iv):
# Generate a random 96-bit IV.
# iv = os.urandom(12)
# Construct an AES-GCM Cipher object with the given key and a
# randomly generated IV.
encryptor = Cipher(
algorithms.AES(key),
modes.GCM(iv),
backend=default_backend()
).encryptor()
# associated_data will be authenticated but not encrypted,
# it must also be passed in on decryption.
encryptor.authenticate_additional_data(associated_data)
# Encrypt the plaintext and get the associated ciphertext.
# GCM does not require padding.
ciphertext = encryptor.update(plaintext) + encryptor.finalize()
return (iv, ciphertext, encryptor.tag)
def decrypt(key, associated_data, iv, ciphertext, tag):
# Construct a Cipher object, with the key, iv, and additionally the
# GCM tag used for authenticating the message.
decryptor = Cipher(
algorithms.AES(key),
modes.GCM(iv, tag),
backend=default_backend()
).decryptor()
# We put associated_data back in or the tag will fail to verify
# when we finalize the decryptor.
decryptor.authenticate_additional_data(associated_data)
# Decryption gets us the authenticated plaintext.
# If the tag does not match an InvalidTag exception will be raised.
return decryptor.update(ciphertext) + decryptor.finalize()
def TLSv1_0_PRF(outlen, secret, label, seed):
ls = len(secret)
ls1 = ls2 = (ls + 1) // 2
def xor(xx, yy):
o = []
for i in range(len(xx)):
o.append(xx[i] ^ yy[i])
return bytes(o)
md5 = TLSv1_2_PRF(outlen, secret[:ls1], label, seed, hashlib.md5)
sha1 = TLSv1_2_PRF(outlen, secret[-ls2:], label, seed, hashlib.sha1)
return xor(md5, sha1)
def TLSv1_2_PRF(outlen, secret, label, seed, h):
label = bytes(label, 'ASCII')
secret = bytes(secret)
seed = bytes(seed)
def p_hash(hashfn, outlen, k, pt):
o = []
a_im = pt
for i in range(0, outlen, hashfn().digest_size):
a_i = hmac.new(k, a_im, hashfn).digest()
output = hmac.new(k, a_i + pt, hashfn).digest()
o.append(output)
a_im = a_i
return bytes(b''.join(o))[:outlen]
return p_hash(h, outlen, secret, label + seed)
def prfTest():
out = TLSv1_2_PRF(70,
bytes('keyforhmac','ASCII'),
'msg-for-hmac-sha256',
bytes('','ASCII'),hashlib.sha256)
print(out.hex())
def test():
prfTest()
# sys.exit()
print('--------prf test ok----------')
rdClihexStr = 'f77182ed908b500c8b1ad6ad8754329d63ad8704ae8901149727d7257bcf8878'
#frame 132
rdSvrhexStr = '59604cc213be22157934682d82a9dbf4cba3f53cc10f6a89d4270bb87a4ebb8c'
#frame 135
pre_master_secret_hexStr = '50891929d1f6b3507dfef2416057abb452116d5210c91a2d1c6b2ac4e9df23eeba718ac6b9bd5506479dd99b7585c983'
pre_master_secret = bytes.fromhex(pre_master_secret_hexStr)
#from ./firefox-sslkey.log is master_secret
length = 48
if len(pre_master_secret) == length:
#长度是48就认为是master key. firefox sslkeylog 文件中的就是master key
master_secret = pre_master_secret
else:
seedhexStr = rdClihexStr + rdSvrhexStr
secret = pre_master_secret
label = "master secret"
seed = bytes.fromhex(seedhexStr)
master_secret = TLSv1_2_PRF(length, secret, label, seed, hashlib.sha256)
print('master: ' + master_secret.hex())
key_block_secret = master_secret
seedhexStr = rdSvrhexStr + rdClihexStr
# TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
# sha256 so mac_key_len = 32
# aes_128_gcm so key_len = 16, iv_len = 16
maclen = 32
keylen = 16
ivlen = 4
length = (maclen + keylen + ivlen)*2
"""gen key block"""
secret = key_block_secret
label = "key expansion"
seed = bytes.fromhex(seedhexStr)
key_block = TLSv1_2_PRF(length, secret, label, seed, hashlib.sha256)
print('keyblock: ' + key_block.hex())
print('----split-----')
maclen = 0 #for gcm not need mackey
# 1
lenTmp = maclen
start = 0
end = start + lenTmp
client_write_mac_key = bytes(key_block[start:end])
print('cwmk: '+client_write_mac_key.hex())
# 2
start = end
end = start + lenTmp
server_write_mac_key = bytes(key_block[start:end])
print('swmk: '+server_write_mac_key.hex())
# 3
lenTmp = keylen
start = end
end = start + lenTmp
client_write_key = bytes(key_block[start:end])
print(' cwk: '+client_write_key.hex())
# 4
start = end
end = start + lenTmp
server_write_key = bytes(key_block[start:end])
print(' swk: '+server_write_key.hex())
# 5
lenTmp = ivlen
start = end
end = start + lenTmp
client_write_iv = bytes(key_block[start:end])
print(' cwi: '+client_write_iv.hex())
# 6
start = end
end = start + lenTmp
server_write_iv = bytes(key_block[start:end])
print(' swi: '+server_write_iv.hex())
plainhexStr = '1400000c4bb5c78b0c01d695180f5ea4'
plaintext = binascii.unhexlify(plainhexStr)
#from wireshark after import ./.firefox-sslkey.log
# frame139
# Ciphertext[40]:
# | 00 00 00 00 00 00 00 00 2b 83 0d 98 5e c2 81 6e |........+...^..n|
# | bd 6d e8 92 bf d3 b4 08 da e0 2d ee a4 aa 98 f1 |.m........-.....|
# | 8d 48 77 62 fd 72 24 a6 |.Hwb.r$. |
# ssl_decrypt_record: allocating 72 bytes for decrypt data (old len 32)
# Plaintext[32]:
# | 14 00 00 0c 4b b5 c7 8b 0c 01 d6 95 18 0f 5e a4 |....K.........^.|
# | a5 f0 cf 18 da 34 6b 5c f9 4b 0e 6b a2 15 f1 6e |.....4k\.K.k...n|
# 1603030028 #record head
# 0000000000000000 #explicit nonce
# 2b830d985ec2816ebd6de892bfd3b408 #cip
# dae02deea4aa98f18d487762fd7224a6 #tag
nonceExplicithexStr = '0000000000000000' #8bytes
nonceCounter = ''
nonce = server_write_iv[:4] + binascii.unhexlify(nonceExplicithexStr) + bytes.fromhex(nonceCounter)
print('non: ' + nonce.hex())
#ADDlen = 8seq + 1type + 2ver + 2len,
seq_num = '0000000000000000' #need be lsb.finished record 为0.
#8byts,一次tls会话中,key exchange等完成之后,finished record 的seqnum 为0.
tlsCompressedType = '16'
tlsCompressedVersion = '0303'
tlsCompressedLength = '0010' #没有加密之前的compress length
associateStr = seq_num + \
tlsCompressedType + \
tlsCompressedVersion + \
tlsCompressedLength
associateData = binascii.unhexlify(associateStr)
print('aso: ' + associateData.hex())
open('./plaintxt',"bw+").write(plaintext)
open("./swk","bw+").write(server_write_key)
open("./swi","bw+").write(server_write_iv)
open("./associate","bw+").write(associateData)
open("./nonce","bw+").write(nonce)
iv, ciphertext, tag = encrypt(
server_write_key,
plaintext,
associateData,
nonce
)
print(' iv: ' + iv.hex())
print('cip: ' + ciphertext.hex())
print('tag: ' + tag.hex())
print('-----decrypt-----')
plaintext = decrypt(
server_write_key,
associateData,
iv,
ciphertext,
tag
)
print(b'plain: ' + binascii.hexlify(plaintext))
if __name__ == '__main__':
# unittest.main()
test()
|
[
"pangweizhen.2008@hotmail.com"
] |
pangweizhen.2008@hotmail.com
|
e509256c393ec76ad4d8aa28753b3613e2457c80
|
84fcfed46e03d4936f1a5d82624fd43a4f415d72
|
/Client.py
|
719ea7f6319f406dac03306172f4f54dc2038d55
|
[] |
no_license
|
Shruti-Pattajoshi/Computer-Networks-Socket-Programming-
|
9a60fa750adf7bee98d64cbe95de22a67e93ccb6
|
8daf2051fb412e555952cdde0dd0e3554c2231be
|
refs/heads/master
| 2022-06-20T04:06:32.277348
| 2020-05-14T07:03:52
| 2020-05-14T07:03:52
| 263,840,098
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
# client.py
import socket
sp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostname()
port = 13
sp.connect((host, port))
msg = sp.recv(1024)
sp.close()
print("The time recieved from the server is: %s" % msg.decode('ascii'))
|
[
"noreply@github.com"
] |
noreply@github.com
|
17bc87c11112d5f9e4a92ca75a101c66480bc5b1
|
34284fd6cd6c97bad8d1fa422e9279600d1218a7
|
/labs/lab6.py
|
14d6e6bfc0fe5bfe06087b9ea12d415c2f280f08
|
[] |
no_license
|
Yui-Ezic/Numerical-Methods
|
1c2ac92fdf9bb75924e9ac14bac7c5033e634436
|
da2ba5a3f2a17a947d1240a2a3154b70d9d8c916
|
refs/heads/master
| 2020-04-14T13:40:06.138283
| 2019-01-02T18:27:14
| 2019-01-02T18:27:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,400
|
py
|
import numpy as np
import math
import matplotlib.pyplot as plt
def my_function(x):
return x * x + math.sin(x)
# Границы функции
a1 = 1
b1 = 3
# Шаг
H = (b1 - a1) / 10
# Количество точек
m = 11
arrSize = m
# Таблица значений функций
Xt = np.zeros(arrSize)
Yt = np.zeros(arrSize)
# Создание таблицы
for i in range(arrSize):
Xt[i] = a1 + i*H
Yt[i] = my_function(Xt[i])
n = 4
a = np.zeros(n + 1)
aFlag = np.zeros(n + 1)
for i in range(m):
a[0] += Yt[i]
a[0] /= m
aFlag[0] = 1
b = np.zeros(n + 1)
bFlag = np.zeros(n + 1)
bFlag[0] = 1
def g(j, x):
if j == -1:
return 0
if j == 0:
return 1
if aFlag[j] == 0:
c = 0
d = 0
for i in range(m):
f = g(j - 1, Xt[i])
f_2 = f * f
c += Xt[i] * f_2
d += f_2
a[j] = c / d
aFlag[j] = 1
c = 0
d = 0
for i in range(m):
f = g(j, Xt[i])
f_2 = f * f
c += f * Yt[i]
d += f_2
a[j] = c / d
if bFlag[j - 1] == 0:
c = 0
d = 0
for i in range(m):
f = g(j - 1, Xt[i])
f1 = g(j - 2, Xt[i])
c += Xt[i] * f * f1
d += f1 * f1
b[j - 1] = c / d
bFlag[j - 1] = 1
return (x - a[j]) * g(j-1, x) - b[j - 1] * g(j - 2, x)
def calculate_polynomial(x, n):
result = 0
for i in range(n + 1):
result += a[i] * g(i, x)
return result
# для графика основной функции
h1 = (b1 - a1) / 20
H2 = 2*H
start = a1 - H2
end = b1 + H2
xlist = []
ylist1 = []
while start <= end:
f = my_function(start)
xlist.append(start)
ylist1.append(f)
start += h1
plt.subplot(2, 1, 1)
plt.plot(xlist, ylist1, 'k', label='f(x)')
for i in range(n + 1):
#Выводим таблицу
print("For n = {0}".format(i))
print("----------------------------------------------------------------------")
print("| | | | | f(xj) - Pn(xj) |")
print("| xj | f(xj) | Pn(xj) | f(xj) - Pn(xj) | -------------- * 100 |")
print("| | | | | Pn(xj) |")
print("----------------------------------------------------------------------")
start = a1 - H2
ylist2 = []
while start <= end:
f = my_function(start)
p = calculate_polynomial(start, i)
ylist2.append(p)
print("|{0:5.2f} | {1:8.3f} | {2:8.3f} | {3:14.9f} | {4:21.16f}|".format(start, f, p, p - f, (p-f) * 100 / f))
start += h1
plt.plot(xlist, ylist2, '--', label='P{0}(x)'.format(i))
print("----------------------------------------------------------------------\n")
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
plt.title(r'$y = f(x), y = Pn(x)$')
plt.legend(loc='best', ncol=2)
# Additional task
plt.subplot(2, 1, 2)
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
plt.title(r'$y = f(x), y = Pn(x), y = f\'(x), y = P\'(x)$')
plt.plot(xlist, ylist1, label='f(x)')
ylist = [calculate_polynomial(x, 3) for x in xlist]
plt.plot(xlist, ylist, '--', label='P3(x)')
ylist = [2 * x * x - 12 * x + 11.288 for x in xlist]
plt.plot(xlist, ylist, ':', label='P3\'(x)')
ylist = [2*x + math.cos(x) for x in xlist]
plt.plot(xlist, ylist, label='f\'(x)')
plt.legend()
plt.show()
|
[
"mishan221199@gmail.com"
] |
mishan221199@gmail.com
|
cecf868f2be1629e1555d5d8b1f75d9ff586a4ce
|
0f33457b2fead035730e86176092fe5abe532f51
|
/XOR EQUALITY Code Shef/xor_bitwise.py
|
4bcffdcbf523f72a4bd1af3310e94be558160e7e
|
[] |
no_license
|
tb123-coder/May-Long-Challenge-Codechef-
|
dbb0cd30e0862cebeef6b79de9a20e7f5cb98e0a
|
bf710b287bd12638d0dc7308367259b57af9f7ef
|
refs/heads/master
| 2023-04-28T05:28:19.517262
| 2021-05-13T12:32:18
| 2021-05-13T12:32:18
| 367,038,309
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
def power(x, y, p):
res = 1
x = x % p
if (x == 0):
return 0
while (y > 0):
# If y is odd, multiply
# x with result
#if((y%2==1)
if((y & 1)==1):
res=(res*x)%p
#y=y/2
y=y>>1
#y must be even
x=(x*x)%p
return res
T = int(input())
while (T):
T -= 1
N = int(input())
p=10**9+7
ans=power(2,N-1,p)
print(ans)
|
[
"batra.tushar12dec1999@gmail.com"
] |
batra.tushar12dec1999@gmail.com
|
394ff5431df2a4e88c516043b26846577d06ec92
|
91e389e4fc6a91874f4d4665bc0986e00f66074c
|
/downloadXkcd.py
|
b7a47af06c98d6e79909227078a06e4a10a26b7d
|
[] |
no_license
|
Sajmon25/Automate-the-Boring-Stuff-with-Python
|
5cb36496d5a64988f50656b2f6d3172ab05c9c5b
|
8fb51eb0e9e558f939d5a7a4038257d7c02a9165
|
refs/heads/master
| 2020-04-20T12:18:54.236263
| 2019-05-09T21:31:33
| 2019-05-09T21:31:33
| 168,838,932
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,355
|
py
|
#! python3
# downloadXkcd.py - Download every single XKCD comic.
import requests
import os
import bs4
url = 'http://xkcd.com'
os.makedirs('xkcd', exist_ok=True)
while not url.endswitch('#'):
# TODO: Download the page
print('Download page %s...' % url)
res = requests.get(url)
res.raise_for_status()
soup = bs4.BeautifulSoup(res.text)
# TODO: Find the URL of the comic image.
comicElem = soup.select('#comic img')
if comicElem == []:
print('Could not find comic image.')
else:
try:
comicUrl = 'http:' + comicElem[0].get('src')
# TODO: Download the image.
print('Downloading image %s...' % {comicUrl})
res = requests.get(comicUrl)
res.raise_for_status()
except requests.exceptions.MissingSchema:
# skip this comic
prevLink = soup.select('a[rel="prev"]')[0]
url = 'http://xkcd.com' + prevLink.get('href')
continue
# TODO: Save the image to ./xkcd.
imageFile = open(os.path.join('xkcd', os.path.basename(comicUrl)), 'wb')
for chunk in res.iter_content(100000):
imageFile.write(chunk)
imageFile.close()
# TODO: Get the Prev button's url.
prevLink = soup.select('a[rel="prev"]')[0]
url = 'http://xkcd.com' + prevLink.get('href')
print('Done')
|
[
"szymon.grzedi@gmail.com"
] |
szymon.grzedi@gmail.com
|
3f0caf57cc2e796c4b731bb6d978430bedfcd7f9
|
0a473b06d45b4697b124859c21f11ca833da70b4
|
/chemprop_fda/features/morgan_fingerprint.py
|
908906f82f12c072955083133788ac0841179257
|
[
"MIT"
] |
permissive
|
AayushGrover/ViscaNet
|
e0085f3549a35447d0ef497fb9ee25fe8a625b73
|
41786e10b84f2264b638567bdce1c189c1b66b00
|
refs/heads/main
| 2023-08-28T21:45:14.598705
| 2021-10-31T21:53:42
| 2021-10-31T21:53:42
| 286,003,354
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 981
|
py
|
import numpy as np
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem
def morgan_fingerprint(smiles: str, radius: int = 2, num_bits: int = 2048, use_counts: bool = False) -> np.ndarray:
"""
Generates a morgan fingerprint for a smiles string.
:param smiles: A smiles string for a molecule.
:param radius: The radius of the fingerprint.
:param num_bits: The number of bits to use in the fingerprint.
:param use_counts: Whether to use counts or just a bit vector for the fingerprint
:return: A 1-D numpy array containing the morgan fingerprint.
"""
if type(smiles) == str:
mol = Chem.MolFromSmiles(smiles)
else:
mol = smiles
if use_counts:
fp_vect = AllChem.GetHashedMorganFingerprint(mol, radius, nBits=num_bits)
else:
fp_vect = AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits=num_bits)
fp = np.zeros((1,))
DataStructs.ConvertToNumpyArray(fp_vect, fp)
return fp
|
[
"AAYUSH@staff-net-etx-1580.intern.ethz.ch"
] |
AAYUSH@staff-net-etx-1580.intern.ethz.ch
|
d830aecde642375024c94874c404b42184b66447
|
3f382f9edd21be130dafe26e79ee1081b9626673
|
/movieClasses/Inheritance.py
|
3b6b6971025ac1a642042079dca57f25c35739ed
|
[] |
no_license
|
ceewick/introClasses
|
090d41a5ba9e151ca136ee8d23f9fbc4d9a5f7d7
|
3acc7bac15bc3bcaebfa39b438e8033ec900d5ec
|
refs/heads/master
| 2020-03-15T15:36:33.511733
| 2018-05-06T19:51:36
| 2018-05-06T19:51:36
| 132,216,033
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 678
|
py
|
class Parent():
def __init__(self, last_name, eye_color):
print('Parent Constructor Called')
self.last_name = last_name
self.eye_color = eye_color
class Child(Parent):
def __init__(self,last_name, eye_color, number_of_toys):
print('Child Constructor Called')
Parent.__init__(self, last_name, eye_color)
self.number_of_toys = number_of_toys
## Typically definition of class and instance in seperate files, but for demonstation.. here
#billy_cyrus = Parent('Cyrus','blue')
#print(billy_cyrus.last_name)
miley_cyrus = Child('Cyrus','Blue',5)
print(miley_cyrus.last_name)
print(miley_cyrus.number_of_toys)
|
[
"noreply@github.com"
] |
noreply@github.com
|
de8b449316abbe86696e3641635d94af6d290c5d
|
8acffb8c4ddca5bfef910e58d3faa0e4de83fce8
|
/ml-flask/Lib/site-packages/caffe2/python/operator_test/stats_put_ops_test.py
|
2ce56248c5dd0116931f91de9b4b556dd881e73b
|
[
"MIT"
] |
permissive
|
YaminiHP/SimilitudeApp
|
8cbde52caec3c19d5fa73508fc005f38f79b8418
|
005c59894d8788c97be16ec420c0a43aaec99b80
|
refs/heads/master
| 2023-06-27T00:03:00.404080
| 2021-07-25T17:51:27
| 2021-07-25T17:51:27
| 389,390,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:86a74bb87f96bd8ebf2fa9ae72729c5cbe121a32edc1fb034496e084703631b3
size 6596
|
[
"yamprakash130@gmail.com"
] |
yamprakash130@gmail.com
|
a35e6a756f615aca80c4b91a8b264a5aa0cd6d0e
|
9cd00edd008ce38ea3127f090b6867a91fe7193d
|
/src/plot_Qle_at_all_events_above_Tthreh.py
|
382993ac07bd63823ff8cd12124f714a8056199b
|
[] |
no_license
|
shaoxiuma/heatwave_coupling
|
c5a2a2bba53351597f4cb60ecb446bfb9629812f
|
459f6bc72402b5dd3edf49bc3b9be380b5f54705
|
refs/heads/master
| 2021-09-13T06:50:48.733659
| 2018-04-26T06:09:54
| 2018-04-26T06:09:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,338
|
py
|
#!/usr/bin/env python
"""
For each of the OzFlux/FLUXNET2015 sites, plot the TXx and T-4 days
Qle and bowen ratio
That's all folks.
"""
__author__ = "Martin De Kauwe"
__version__ = "1.0 (20.04.2018)"
__email__ = "mdekauwe@gmail.com"
import os
import sys
import glob
import netCDF4 as nc
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
import pandas as pd
import re
import constants as c
def main(fname):
plot_dir = "plots"
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
df = pd.read_csv(fname)
df = df[df.pft == "EBF"]
df = df[~np.isnan(df.temp)]
#width = 12.0
#height = width / 1.618
#print(width, height)
#sys.exit()
width = 14
height = 10
fig = plt.figure(figsize=(width, height))
fig.subplots_adjust(hspace=0.05)
fig.subplots_adjust(wspace=0.05)
plt.rcParams['text.usetex'] = False
plt.rcParams['font.family'] = "sans-serif"
plt.rcParams['font.sans-serif'] = "Helvetica"
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['font.size'] = 14
plt.rcParams['legend.fontsize'] = 10
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['ytick.labelsize'] = 14
count = 0
sites = np.unique(df.site)
for site in sites:
site_name = re.sub(r"(\w)([A-Z])", r"\1 \2", site)
ax = fig.add_subplot(3,3,1+count)
df_site = df[df.site == site]
events = int(len(df_site)/4)
cnt = 0
for e in range(0, events):
from scipy import stats
x = df_site["temp"][cnt:cnt+4]
y = df_site["Qle"][cnt:cnt+4]
slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
if slope > 0.0 and p_value <= 0.05:
ax.plot(df_site["temp"][cnt:cnt+4], df_site["Qle"][cnt:cnt+4],
label=site, ls="-", marker="o", zorder=100)
elif slope > 0.0 and p_value > 0.05:
ax.plot(df_site["temp"][cnt:cnt+4], df_site["Qle"][cnt:cnt+4],
label=site, ls="-", marker="o", color="lightgrey",
zorder=1)
cnt += 4
if count == 0:
ax.set_ylabel("Qle (W m$^{-2}$)", position=(0.5, 0.0))
if count == 4:
#ax.set_xlabel('Temperature ($^\circ$C)', position=(1.0, 0.5))
ax.set_xlabel('Temperature ($^\circ$C)')
if count < 3:
plt.setp(ax.get_xticklabels(), visible=False)
if count != 0 and count != 3:
plt.setp(ax.get_yticklabels(), visible=False)
props = dict(boxstyle='round', facecolor='white', alpha=1.0,
ec="white")
ax.text(0.04, 0.95, site_name,
transform=ax.transAxes, fontsize=14, verticalalignment='top',
bbox=props)
from matplotlib.ticker import MaxNLocator
ax.yaxis.set_major_locator(MaxNLocator(4))
ax.set_ylim(0, 280)
ax.set_xlim(15, 50)
count += 1
ofdir = "/Users/mdekauwe/Dropbox/fluxnet_heatwaves_paper/figures/figs"
fig.savefig(os.path.join(ofdir, "all_events.pdf"),
bbox_inches='tight', pad_inches=0.1)
#plt.show()
if __name__ == "__main__":
data_dir = "outputs/"
fname = "ozflux_all_events.csv"
fname = os.path.join(data_dir, fname)
main(fname)
|
[
"mdekauwe@gmail.com"
] |
mdekauwe@gmail.com
|
298bdb7986c7ce282903098e71efc3e61ebde167
|
4b0c57dddf8bd98c021e0967b5d94563d15372e1
|
/run_MatrixElement/test/emptyPSets/emptyPSet_qqH125_cfg.py
|
1925d9eb5134f84222300788d85f42237860a66f
|
[] |
no_license
|
aperloff/TAMUWW
|
fea6ed0066f3f2cef4d44c525ee843c6234460ba
|
c18e4b7822076bf74ee919509a6bd1f3cf780e11
|
refs/heads/master
| 2021-01-21T14:12:34.813887
| 2018-07-23T04:59:40
| 2018-07-23T04:59:40
| 10,922,954
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 896
|
py
|
import FWCore.ParameterSet.Config as cms
import os
#!
#! PROCESS
#!
process = cms.Process("MatrixElementProcess")
#!
#! SERVICES
#!
#process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageLogger.MessageLogger_cfi')
process.MessageLogger.cerr.FwkReport.reportEvery = 5000
process.load('CommonTools.UtilAlgos.TFileService_cfi')
process.TFileService.fileName=cms.string('qqH125.root')
#!
#! INPUT
#!
inputFiles = cms.untracked.vstring(
'root://cmsxrootd.fnal.gov//store/user/aperloff/MatrixElement/Summer12ME8TeV/MEInput/qqH125.root'
)
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(10))
process.source = cms.Source("PoolSource",
skipEvents = cms.untracked.uint32(0),
fileNames = inputFiles )
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
|
[
"aperloff@physics.tamu.edu"
] |
aperloff@physics.tamu.edu
|
65ea64bcdab906276d4068b3999cba770c961f77
|
14f078c2f88ac656b56ee8a9a39eaf0135de4704
|
/VideoxD/__main__.py
|
c4fa566bb02076067104650f3c8fd3e7a30230d2
|
[
"MIT"
] |
permissive
|
Saksham07529/VideoChatStreamBot
|
dd39a7d1e16f21995893184a9e394659054073b2
|
72dd8f24d38494a683606118f34154177a643fb7
|
refs/heads/main
| 2023-08-17T17:05:56.326866
| 2021-09-14T06:02:06
| 2021-09-14T06:02:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 850
|
py
|
import asyncio
from misc import Calls, app, bot
from pyrogram import idle
async def init():
await app.start()
print("𝙐𝙨𝙚𝙧 𝙖𝙘𝙘𝙤𝙪𝙣𝙩 𝙄𝙣𝙞𝙩𝙞𝙖𝙡𝙞𝙯𝙚𝙙.")
await bot.start()
print("𝘽𝙤𝙩 𝙄𝙣𝙞𝙩𝙞𝙖𝙡𝙞𝙯𝙚𝙙.")
print(
"𝙔𝙤𝙪 𝙈𝙞𝙜𝙝𝙩 𝙨𝙚𝙚 𝙉𝙤 𝙋𝙡𝙪𝙜𝙞𝙣𝙨 𝙇𝙤𝙖𝙙𝙚𝙙 𝙏𝙝𝙖𝙩𝙨 𝘼 𝘽𝙪𝙜 𝘽𝙮 𝙡𝙖𝙩𝙚𝙨𝙩 𝙫𝙚𝙧𝙨𝙞𝙤𝙣 𝙤𝙛 𝙋𝙮𝙧𝙤𝙜𝙧𝙖𝙢, 𝙋𝙡𝙪𝙜𝙞𝙣𝙨 𝙝𝙖𝙫𝙚 𝘽𝙚𝙚𝙣 𝙇𝙤𝙖𝙙𝙚𝙙 𝙎𝙪𝙘𝙘𝙚𝙨𝙨𝙛𝙪𝙡𝙡𝙮."
)
await idle()
loop = asyncio.get_event_loop()
if __name__ == "__main__":
loop.run_until_complete(init())
|
[
"noreply@github.com"
] |
noreply@github.com
|
74b32b4c2a6853ebc774d3f547b249081b9289cd
|
4ee9de394b2650d7cca19b2848100e8db9edc596
|
/solution-python/001_two_sum.py
|
0fab498f08572f102087ec4c7aa040b784330840
|
[] |
no_license
|
gongfuPanada/notes-leetcode
|
b99d5fd18a14558c1cbeb1538fd8bddbfdeb3fdd
|
0bc611efcee8286c07fd5af1257a27a2575363b0
|
refs/heads/master
| 2020-12-29T00:59:00.512133
| 2016-04-24T12:54:22
| 2016-04-24T12:54:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
"""
001. Two Sum
@name: li jin
@date: Feb 19, 2016
@link: https://leetcode.com/problems/two-sum/
@time: 56 ms
"""
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
d = {}
for i, n in enumerate(nums):
if n in d:
return [d[n], i]
d[target - n] = i
|
[
"lijinwithyou@gmail.com"
] |
lijinwithyou@gmail.com
|
3c86e7fd1292b6f7b57fbcee485151b7beb95814
|
7310067f47b4626d7afe71ccac591febe3441b1f
|
/knn.py
|
5803d62dd80406b76d0ed7834ee6004fd47ba612
|
[] |
no_license
|
anisrini/KNN
|
1b0f13e5cacae6834dead55d84b2000590d69814
|
596f2ba78b66b4e902f11c512d760ace65422775
|
refs/heads/master
| 2016-08-12T04:38:55.625423
| 2016-04-01T09:08:13
| 2016-04-01T09:08:13
| 55,217,302
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,444
|
py
|
import pandas as pd
import helpers
import cdist
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.cross_validation import StratifiedKFold
from sklearn.cross_validation import LeaveOneOut
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
def knn(trains, test, kvals, loo_flag):
print "Classification"
for k in range(len(kvals)):
print "K: \n", kvals[k]
knn = cdist.KNNClassifier(kvals[k])
for i in range(len(trains)):
for j in range(len(trains[i])):
print "2-Fold"
print "i, j ", i, j
print
skf = StratifiedKFold(trains[i][j]['labels'], n_folds=2)
for train_index, test_index in skf:
fold_train = trains[i][j].ix[train_index]
fold_test = trains[i][j].ix[test_index]
fold_train.index = range(len(fold_train))
fold_test.index = range(len(fold_test))
knn.train(fold_train.iloc[:,:-1], fold_train['labels'])
print "Accuracy: ", accuracy_score(fold_test['labels'], knn.test(fold_test.iloc[:,:-1]))
print "Confusion Matrix: \n", confusion_matrix(fold_test['labels'], knn.test(fold_test.iloc[:,:-1]))
print
print "5-Fold"
print "i, j ", i, j
print
skf = StratifiedKFold(trains[i][j]['labels'], n_folds=5)
for train_index, test_index in skf:
fold_train = trains[i][j].ix[train_index]
fold_test = trains[i][j].ix[test_index]
fold_train.index = range(len(fold_train))
fold_test.index = range(len(fold_test))
knn.train(fold_train.iloc[:,:-1], fold_train['labels'])
print "Accuracy: ", accuracy_score(fold_test['labels'], knn.test(fold_test.iloc[:,:-1]))
print "Confusion Matrix: \n", confusion_matrix(fold_test['labels'], knn.test(fold_test.iloc[:,:-1]))
print
if loo_flag == 1:
print "Leave One Out: "
print "i, j ", i, j
print
loo = LeaveOneOut(len(trains[i][j].iloc[:,:-1]))
for train_index, test_index in loo:
fold_train = trains[i][j].ix[train_index]
fold_test = trains[i][j].ix[test_index]
fold_train.index = range(len(fold_train))
fold_test.index = range(len(fold_test))
knn.train(fold_train.iloc[:,:-1], fold_train['labels'])
print "Accuracy: ", accuracy_score(fold_test['labels'], knn.test(fold_test.iloc[:,:-1]))
print "Confusion Matrix: \n", confusion_matrix(fold_test['labels'], knn.test(fold_test.iloc[:,:-1]))
print
print
|
[
"anisrini93@gmail.com"
] |
anisrini93@gmail.com
|
fbcd98a2b32c59fa1729e7b1bce3bb8639dc8ab9
|
51412575c66152170bfcbf91ee09954d162a4643
|
/arg.py
|
ca5cd4c062f6dfff6dbedd4034ba3e1274de78d0
|
[] |
no_license
|
tladydrl/ch1.2
|
918f064efab84c771f17ea17503ac897d16f83ba
|
1acbc0d9c8195cb9bee7b3d40feff940ef764431
|
refs/heads/master
| 2020-05-20T13:03:30.806733
| 2019-05-09T12:55:01
| 2019-05-09T12:55:01
| 185,587,401
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 202
|
py
|
import sys
#print(sys.argv) # 뭐가들어오는지 실행 # 파이썬파일?
args = sys.argv[1:] # 첫번째부터,, 0번째는 자기 파일이름.
print(args) # args는 내가 만든 변수이다.
|
[
"tladydrl12@naver.com"
] |
tladydrl12@naver.com
|
afbde151e2e1473b1d6aa573579299dc0eb3ce8d
|
18c03a43ce50ee0129f9f45ada1bdaa2ff4f5774
|
/epistasis/__init__.py
|
4f9536d756aca5c653b3e69bbff59937aa2ff678
|
[
"Unlicense"
] |
permissive
|
harmsm/epistasis
|
acf7b5678b328527b2c0063f81d512fcbcd78ce1
|
f098700c15dbd93977d797a1a1708b4cfb6037b3
|
refs/heads/master
| 2022-04-30T13:09:49.106984
| 2022-03-19T05:29:37
| 2022-03-19T05:29:37
| 150,969,948
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,105
|
py
|
"""\
A Python API for modeling statistical, high-order epistasis in genotype-phenotype maps.
This library provides methods for:
1. Decomposing genotype-phenotype maps into high-order epistatic interactions
2. Finding nonlinear scales in the genotype-phenotype map
3. Calculating the contributions of different epistatic orders
4. Estimating the uncertainty of epistatic coefficients amd
5. Interpreting the evolutionary importance of high-order interactions.
For more information about the epistasis models in this library, see our Genetics paper:
`Sailer, Z. R., & Harms, M. J. (2017). "Detecting High-Order Epistasis in Nonlinear Genotype-Phenotype Maps." Genetics, 205(3), 1079-1088.`_
.. _`Sailer, Z. R., & Harms, M. J. (2017). "Detecting High-Order Epistasis in Nonlinear Genotype-Phenotype Maps." Genetics, 205(3), 1079-1088.`: http://www.genetics.org/content/205/3/1079
Currently, this package works only as an API and there is no command-line
interface. Instead, we encourage you use this package inside `Jupyter notebooks`_ .
"""
from .__version__ import __version__
|
[
"zachsailer@gmail.com"
] |
zachsailer@gmail.com
|
a11679509d39df99aea016f09d46980e5ad22918
|
d02e279c61c111d250812946f299828330947ed6
|
/easy/remove element.py
|
e5afce3b88516929d4662fde1ee3670b0a0f3576
|
[] |
no_license
|
wlawt/leetcode
|
b1599528e027bd8bfd2581f3bc56bb3680118c4b
|
c00fdce2f5f1ed1acc15f74f98c99b7139fedb50
|
refs/heads/master
| 2023-02-20T05:42:34.307169
| 2021-01-22T01:48:30
| 2021-01-22T01:48:30
| 326,090,331
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
class Solution:
def removeElement(self, nums: List[int], val: int) -> int:
i=0
nums.sort()
for j in range(len(nums)):
if nums[j] != val:
nums[i] = nums[j]
i+=1
return i
|
[
"williamlaw.wtl@gmail.com"
] |
williamlaw.wtl@gmail.com
|
8710f437a1fb5651739771f1ae3cb10e87729160
|
1385d5c2bff76949f139951d5422ee4c9df13129
|
/135_webRead.py
|
c7a5360cbb079c5e77989ac3f6859a9d3cf602b9
|
[] |
no_license
|
luongs/pyPractice
|
b28a11b6b8909ac7b873184fd9be4c292c8559b5
|
775b8e608c7bfb4f43bdc91f1f6f20d82c9f43fc
|
refs/heads/master
| 2021-05-26T13:24:48.500697
| 2013-04-09T02:21:28
| 2013-04-09T02:21:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
# Reads txt from the web and displays result in python
# Author: Sebastien Luong
import urllib2 #library handles url stuff
f=urllib2.urlopen("http://cs.leanderisd.org/mitchellis.txt")
print f.readline(),
print f.readline(),
f.close()
|
[
"sebastienluong@gmail.com"
] |
sebastienluong@gmail.com
|
d8e6d6bc745881e200737675ec2cd28b084d364d
|
68c003a526414fef3c23ad591982f1113ca8a72c
|
/api/urls.py
|
6287d8ae58d870352565ce7f626f9a3aa7037130
|
[] |
no_license
|
pawanpaudel93/NepAmbulance
|
9d99ef3a3592b3a17091889d9db32aa952974400
|
b07dba43926c3f5a350b0acd75ac90b4842e3e32
|
refs/heads/master
| 2020-06-14T08:59:03.523102
| 2020-01-07T09:05:03
| 2020-01-07T09:05:03
| 194,965,063
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 761
|
py
|
from django.contrib import admin
from django.urls import path
from .views import ListCreateAmbulance, RetrieveUpdateDeleteAmbulance, ListDistrict, ListProvince
urlpatterns = [
path('ambulance/<int:province>/<slug:district>/<slug:city>/<int:ward>/', ListCreateAmbulance.as_view(), name="list-create-api"),
path('ambulance/<int:province>/<slug:district>/<slug:city>/<int:ward>/<int:pk>/', RetrieveUpdateDeleteAmbulance.as_view()),
# path('get/wards/<slug:city>/', ListWard.as_view(), name="get-wards"),
# path('get/cities/<slug:district>/', ListCity.as_view(), name='get-cities'),
path('get/districts/<slug:province>/', ListDistrict.as_view(), name='get-districts'),
path('get/provinces/', ListProvince.as_view(), name='get-provinces'),
]
|
[
"pawanpaudel93@gmail.com"
] |
pawanpaudel93@gmail.com
|
d8e42f2ce2432b336adb63018b3a51e93aacef6d
|
1c0542cef2ac6a5fb691602887236bf70f9bf71f
|
/speed_test_sar/sfsi_speed/mmcls/models/backbones/utils/gumbel_sigmoid.py
|
6610270f02c80a91e8e61cd013f8b7dff68c6ba3
|
[
"Apache-2.0"
] |
permissive
|
yizenghan/sarNet
|
683f45620013f906cb8a550713e786787074a8ae
|
d47a6e243677811b259a753233fbbaf86d2c9c97
|
refs/heads/master
| 2023-07-16T02:09:11.913765
| 2021-08-30T02:04:02
| 2021-08-30T02:04:02
| 299,276,627
| 11
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,723
|
py
|
import torch
from torch import nn
class GumbelSigmoid(nn.Module):
def __init__(self, max_T, decay_alpha, decay_method='exp', start_iter=0):
super(GumbelSigmoid, self).__init__()
self.max_T = max_T
self.cur_T = max_T
self.step = 0
self.decay_alpha = decay_alpha
self.decay_method = decay_method
self.softmax = nn.Softmax(dim=1)
self.p_value = 1e-8
# self.cur_T = (self.decay_alpha ** start_iter) * self.cur_T
assert self.decay_method in ['exp', 'step', 'cosine']
def forward(self, x):
# Shape <x> : [N, C, H, W]
# Shape <r> : [N, C, H, W]
r = 1 - x
x = (x + self.p_value).log()
r = (r + self.p_value).log()
# Generate Noise
x_N = torch.rand_like(x)
r_N = torch.rand_like(r)
x_N = -1 * (x_N + self.p_value).log()
r_N = -1 * (r_N + self.p_value).log()
x_N = -1 * (x_N + self.p_value).log()
r_N = -1 * (r_N + self.p_value).log()
# Get Final Distribution
x = x + x_N
x = x / (self.cur_T + self.p_value)
r = r + r_N
r = r / (self.cur_T + self.p_value)
x = torch.cat((x, r), dim=1)
x = self.softmax(x)
x = x[:, [0], :, :]
if self.training:
self.cur_T = self.cur_T * self.decay_alpha
# if self.cur_T < 0.5 or not self.training:
# print('cur_T:{0}'.format(self.cur_T))
# self.step += 1
# if self.step % 50 == 0:
# print('cur_T:{0}'.format(self.cur_T))
#
return x
if __name__ == '__main__':
pass
# ToDo: Test Code Here.
# _test_T = 0.6
# Block = GumbelSigmoid(_test_T, 1.0)
|
[
"yizeng38@gmail.com"
] |
yizeng38@gmail.com
|
8f8251d41d03992c97c4284cab8980b06dce2ee6
|
c36e8ac0ccfd34a7d4245068b3d4ed6199927f9b
|
/main.py
|
0359c0eb11ccf5be256bc113ac8c06421867203b
|
[] |
no_license
|
avec140/project
|
332d9a87c09400ef52e90ca5b2f60c9643531591
|
d3e60766b81c8fcfff61dabdd5849ec10ce4fba0
|
refs/heads/master
| 2023-04-20T12:27:36.575993
| 2021-04-30T09:24:39
| 2021-04-30T09:24:39
| 363,083,929
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,749
|
py
|
from tkinter import *
from tkinter.colorchooser import askcolor
DEFAULT_PEN_SIZE = 1.0
DEFAULT_COLOR = "black"
mode = "pen"
old_x = None
old_y = None
mycolor = DEFAULT_COLOR
erase_on = False
def use_pen():
global mode
mode = "pen"
def use_brush():
global mode
mode = "brush"
def choose_color():
global mycolor
mycolor = askcolor(color=mycolor)[1]
def use_eraser():
global mode
mode = "erase"
def paint(event):
global var, erase_on, mode, old_x, old_y
fill_color = 'white' if mode == "erase" else mycolor
if old_x and old_y:
canvas.create_line(old_x, old_y, event.x, event.y, caspstyle=ROUND, width=var.get(), fill=fill_color)
old_x = event.x
old_y = event.y
def reset(event):
global old_x, old_y
old_x, old_y = None, None
def clear_all():
global canvas
canvas.delete('all')
window = Tk()
var = DoubleVar()
penButton = Button(window, text='펜', command=use_pen)
penButton.grid(row=0, column=0, sticky=W + E)
brushButton = Button(window, text='브러쉬', command=use_brush)
brushButton.grid(row=0, column=1, sticky=W + E)
colorButton = Button(window, text='색상선택', command=choose_color)
colorButton.grid(row=0, column=2, sticky=W + E)
eraseButton = Button(window, text='지우개', command=use_eraser)
eraseButton.grid(row=0, column=3, sticky=W + E)
clearButton = Button(window, text='모두삭제', command=clear_all)
clearButton.grid(row=0, column=4, sticky=W + E)
scale = Scale(window, variable=var, orient=VERTICAL)
scale.grid(row=1, column=5, sticky=N + S)
canvas = Canvas(window, bg='white', width=600, height=400)
canvas.grid(row=1, columnspan=5)
canvas.bind('<B1-Motion>', paint)
canvas.bind('<ButtonRelease-1>', reset)
window.mainloop()
|
[
"avec140@naver.com"
] |
avec140@naver.com
|
23125c25618090c23ad4cc628d0d9d5904b49c6f
|
ab2b6204ae7056f4aec2242b2256834eebf844a6
|
/docs/source/conf.py
|
3c33aacdb7df4d83003dd678f09ebd86982a36da
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
vickielee333/scarplet
|
e671bc071e80fc2c4a4457e7f93f8fcb39e157cf
|
8cf8cee2c9b808c550c0645f7836cda6d809872e
|
refs/heads/master
| 2020-12-23T23:50:14.408885
| 2019-10-11T23:31:41
| 2019-10-11T23:31:41
| 237,314,286
| 0
| 0
| null | 2020-01-30T22:01:23
| 2020-01-30T22:01:22
| null |
UTF-8
|
Python
| false
| false
| 6,445
|
py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../.."))
# sys.path.insert(0, os.path.abspath('../../scarplet'))
sys.path.insert(0, "/home/rmsare/.local/lib/python3.4/site-packages")
# -- Project information -----------------------------------------------------
project = u"Scarplet"
copyright = u"2018, Robert Sare, George Hilley"
author = u"Robert Sare, George Hilley"
# The short X.Y version
version = u""
# The full version, including alpha/beta/rc tags
release = u"0.1.0"
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.imgmath",
"sphinx.ext.viewcode",
"nbsphinx",
"numpydoc",
"IPython.sphinxext.ipython_console_highlighting",
]
exclude_patterns = ["_build", "**.ipynb_checkpoints"]
nbsphinx_timeout = -1
# Build API docs
def run_apidoc(_):
ignore_paths = [os.path.join("../..", "scarplet", "tests/*")]
argv = [
"-f",
"-e",
"-M",
"-o",
".",
os.path.join("../..", "scarplet"),
] + ignore_paths
try:
# Sphinx 1.7+
from sphinx.ext import apidoc
apidoc.main(argv)
except ImportError:
# Sphinx 1.6 (and earlier)
from sphinx import apidoc
argv.insert(0, apidoc.__file__)
apidoc.main(argv)
def setup(app):
app.connect("builder-inited", run_apidoc)
# Mock imports
import sys
from unittest.mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = [
"matplotlib",
"matplotlib.pyplot",
"numexpr",
"numpy",
"numpy.ma",
"osgeo",
"pyfftw",
"pyfftw.interfaces.numpy_fft",
"rasterio",
"rasterio.fill",
"scipy.special",
]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# Add any paths that contain templates here, relative to this directory.
templates_path = [".templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import msmb_theme
html_theme = "msmb_theme"
html_theme_path = [msmb_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {"style_nav_header_background": "#2980B9"}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = [".static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "Scarpletdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"Scarplet.tex",
u"Scarplet Documentation",
u"Robert Sare, George Hilley",
"manual",
)
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "scarplet", u"Scarplet Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"Scarplet",
u"Scarplet Documentation",
author,
"Scarplet",
"One line description of project.",
"Miscellaneous",
)
]
# -- Extension configuration -------------------------------------------------
|
[
"robertmsare@gmail.com"
] |
robertmsare@gmail.com
|
a25e040005de4ab4ceb6b75d24ad6378699d31d5
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/0/xt.py
|
2cabdcd454cba0a0dfcd2847512439922cc7dc0c
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018
| 2016-11-13T20:45:50
| 2016-11-13T20:45:50
| 73,624,224
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'XT':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"juliettaylorswift@gmail.com"
] |
juliettaylorswift@gmail.com
|
90f553a76ab38014ddd6d73cdf2088433003db3d
|
ade74e6b497703ef01fe267b665261e90e335a18
|
/studyvisapp/views.py
|
c5ca19a415afad73451833bc2c565ededea24865
|
[] |
no_license
|
TMdiesel/study-vis-app
|
0492ff8bdda5f1c2596cca0ac3642b6c81691f9a
|
a0440a8ffd21d5848faf8eeabfed4c7be38be90f
|
refs/heads/main
| 2023-07-08T04:32:48.564623
| 2021-08-15T12:08:53
| 2021-08-15T12:08:53
| 395,558,322
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,290
|
py
|
import datetime
from datetime import timedelta
from django.shortcuts import render, redirect
from django.views import View
from django.views.generic import (
ListView,
CreateView,
DeleteView,
UpdateView,
TemplateView,
)
from django.urls import reverse_lazy
from django.utils.timezone import make_aware
import plotly.graph_objects as go
import pandas as pd
import jpholiday
from django_pandas.io import read_frame
from .models import TimeModel
from .forms import HomeForm
# Create your views here.
class StudyList(ListView):
template_name = "list.html"
model = TimeModel
paginate_by = 100
queryset = model.objects.order_by("-starttime")
def get_queryset(self):
if self.request.GET.get("yearmonth") is None:
year = datetime.date.today().year
month = datetime.date.today().month
else:
year = self.request.GET.get("yearmonth").split("-")[0]
month = self.request.GET.get("yearmonth").split("-")[1]
object_list = self.model.objects.filter(
starttime__year=year, starttime__month=month
).order_by("-starttime")
return object_list
class StudyCreate(CreateView):
template_name = "create.html"
model = TimeModel
success_url = reverse_lazy("list")
fields = ("item", "memo", "starttime", "endtime")
class StudyDelete(DeleteView):
template_name = "delete.html"
model = TimeModel
success_url = reverse_lazy("list")
class StudyUpdate(UpdateView):
template_name = "update.html"
model = TimeModel
success_url = reverse_lazy("list")
fields = ("item", "memo", "starttime", "endtime", "duration")
class StudyHome(CreateView):
template_name = "home.html"
model = TimeModel
success_url = reverse_lazy("list")
fields = ("item",)
def post(self, request):
form = HomeForm(request.POST)
now = datetime.datetime.now()
object = form.save(commit=False)
object.starttime = make_aware(now)
object.isactive = True
object.save()
return redirect("list")
class StudyEnd(UpdateView):
template_name = "end.html"
model = TimeModel
success_url = reverse_lazy("list")
fields = ("item", "memo", "starttime", "endtime", "duration", "isactive")
def post(self, request, pk):
article = TimeModel.objects.get(pk=pk)
form = HomeForm(request.POST, instance=article)
now = datetime.datetime.now()
object = form.save(commit=False)
object.endtime = make_aware(now)
object.isactive = False
object.save()
return redirect("list")
def StudyTimer(request):
return render(request, "timer.html")
class StudyVis(TemplateView):
template_name = "vis.html"
def get_context_data(self, **kwargs):
context = super(StudyVis, self).get_context_data(**kwargs)
context["plot1"], context["plot2"] = self._create_graph()
return context
def _create_graph(self):
# specify the date range
if self.request.GET.get("yearmonth") is None:
year = datetime.date.today().year
month = datetime.date.today().month
else:
year = self.request.GET.get("yearmonth").split("-")[0]
month = self.request.GET.get("yearmonth").split("-")[1]
# read&create data
qs = TimeModel.objects.filter(starttime__year=year, starttime__month=month)
df = read_frame(qs, verbose=True)
df["duration"] = df["duration"].apply(lambda x: x.total_seconds() / 3600)
df["date"] = df["starttime"].apply(lambda x: x.date())
date_df = df.groupby("date").sum()[["duration"]]
date_df = self._complement_date(date_df)
task_num_gdf = df.groupby("item").sum()[["duration"]]
_, holiday_index = self._create_biz_hol_index(
date_df.index.min(), date_df.index.max()
)
# create graph
fig1 = go.Figure(
go.Scatter(
x=date_df.index,
y=date_df["duration"].round(decimals=1),
mode="lines+markers",
marker=dict(
size=7,
),
name="all",
),
layout=go.Layout(
title=f"勉強時間の推移({year}/{month})",
width=800,
height=400,
xaxis=dict(
range=[
date_df.index.min() - timedelta(days=1),
date_df.index.max() + timedelta(days=1),
],
dtick="D",
tickformat="%d",
),
),
)
fig1.add_trace(
go.Scatter(
x=date_df.index[holiday_index],
y=date_df["duration"][holiday_index].round(decimals=1),
mode="markers",
marker=dict(
size=7,
),
name="休日",
),
)
fig2 = go.Figure(
go.Bar(
x=task_num_gdf.index,
y=task_num_gdf["duration"].round(decimals=1),
),
layout=go.Layout(
title=f"項目ごとの勉強時間({year}/{month})",
width=800,
height=400,
),
)
return fig1.to_html(include_plotlyjs=False), fig2.to_html(
include_plotlyjs=False
)
def _complement_date(self, s: pd.Series) -> pd.DataFrame:
"""
日付がindexのSeriesを入力して、
欠けている日付をmin_dateからmax_dateの範囲で埋める
"""
str_min_date = s.index.min().strftime("%Y-%m-%d")
str_max_date = s.index.max().strftime("%Y-%m-%d")
dates_df = pd.DataFrame(
index=pd.date_range(str_min_date, str_max_date, freq="D")
)
return (
pd.DataFrame(s)
.merge(dates_df, how="outer", left_index=True, right_index=True)
.fillna(0)
)
def _create_biz_hol_index(
self, start_date: datetime.date, end_date: datetime.date
) -> pd.date_range:
"""
平日と休日のindexを返す
"""
year = start_date.year
holiday = []
holiday_dict = jpholiday.year_holidays(year)
for i in range(len(holiday_dict)):
holiday.append(holiday_dict[i][0])
holiday = holiday + [
datetime.date(year, 1, 1),
datetime.date(year, 1, 2),
datetime.date(year, 1, 3),
datetime.date(year, 12, 31),
] # 年末年始追加
holiday = sorted(list(set(holiday))) # for uniqueness
holiday = pd.to_datetime(holiday)
calendar_full = pd.date_range(start_date, end_date, freq="D")
business_index = []
holiday_index = []
for idx, calendar in enumerate(calendar_full):
if (
(not calendar in holiday)
and (calendar.weekday() >= 0)
and (calendar.weekday() <= 4)
):
business_index.append(idx)
else:
holiday_index.append(idx)
return business_index, holiday_index
|
[
"cerezzodora2262@icloud.com"
] |
cerezzodora2262@icloud.com
|
49617de351135a141ec72527b03b3dde22f0125d
|
422dd5d3c48a608b093cbfa92085e95a105a5752
|
/students/nDruP/lesson06/calculator/calculator.py
|
d29df83130fa0fb445a7b6fea8d286044b6917c7
|
[] |
no_license
|
UWPCE-PythonCert-ClassRepos/SP_Online_Course2_2018
|
a2052fdecd187d7dd6dbe6f1387b4f7341623e93
|
b1fea0309b3495b3e1dc167d7029bc9e4b6f00f1
|
refs/heads/master
| 2021-06-07T09:06:21.100330
| 2019-11-08T23:42:42
| 2019-11-08T23:42:42
| 130,731,872
| 4
| 70
| null | 2021-06-01T22:29:19
| 2018-04-23T17:24:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,658
|
py
|
"""
InsufficientOperands Exception and the Calculator class
"""
class InsufficientOperands(Exception):
"""
Create the InsufficientOperands exception for use in Calculator.
"""
pass
class Calculator(object):
"""
Create the calculator object.
"""
def __init__(self, adder, subtracter, multiplier, divider):
"""
Initialize the calculator
"""
self.adder = adder
self.subtracter = subtracter
self.multiplier = multiplier
self.divider = divider
self.stack = []
def enter_number(self, num):
"""
Insert the input to the front of self.stack
"""
self.stack.insert(0, num)
def _do_calc(self, operator):
"""
Return result of the operation of the first 2 elements of self.stack
"""
try:
result = operator.calc(self.stack[1], self.stack[0])
except IndexError:
raise InsufficientOperands
self.stack = [result]
return result
def subtract(self):
"""
Return the difference of the first 2 elements of self.stack
"""
return self._do_calc(self.subtracter)
def add(self):
"""
Return the sum of the first 2 elements of self.stack
"""
return self._do_calc(self.adder)
def multiply(self):
"""
Return the product of the first 2 elements of self.stack
"""
return self._do_calc(self.multiplier)
def divide(self):
"""
Return the quotient of the first 2 elements of self.stack
"""
return self._do_calc(self.divider)
|
[
"apark46.work@gmail.com"
] |
apark46.work@gmail.com
|
b2dc7e238687297569b877965faf69acc67f19ed
|
37413580d8f2402068fc9658fbe3df7b897fb728
|
/admm1.py
|
d442db9484c2325200152a67832e3ab97555e760
|
[] |
no_license
|
johnston-jeremy/mmv_cvx
|
b104b3afafe7a3a3ae2e4ebcf814c2b78c751ac1
|
8938a42e69f98da6b6b208114422770eddeaca47
|
refs/heads/main
| 2023-08-21T19:10:33.897425
| 2021-10-20T04:12:35
| 2021-10-20T04:12:35
| 411,396,714
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 778
|
py
|
import numpy as np
import numpy.linalg as la
from proximal_operators import prox_l2_norm_batch
def admm_problem1(Y, p):
N, L, M, mu, beta, taux, gamma = p.N, p.L, p.M, p.mu, p.beta, p.taux, p.gamma
X = np.zeros((N,M),dtype=complex)
E = np.zeros_like(Y)
T = np.zeros_like(Y)
A = p.A
AtA = np.matmul(A.T.conj(),A)
AtY = np.matmul(np.conj(A.T),Y)
for t in range(p.maxiter):
Xprev = X
E = mu*beta/(1+mu*beta) * (-np.matmul(A,X) + Y - 1/beta * T)
G = 2*(np.matmul(AtA, X) + np.matmul(np.conj(A.T), E + (1/beta)*T) - AtY)
D = X - taux/2 * G
X = prox_l2_norm_batch(taux/beta, D)
T = T + gamma*beta*(np.matmul(A, X) + E - Y)
if t > 10:
if np.linalg.norm(X-Xprev) <= p.tol*np.linalg.norm(Xprev):
break
return X
|
[
"jjohnston1994@gmail.com"
] |
jjohnston1994@gmail.com
|
f7cf518c9adba372fba54eac9d1c3ca7dbadeeac
|
44aa5314f0291f6a5579214ba2d57b894ddcd1ec
|
/backend/dashboard/models.py
|
82b3d803f623cfda4b862f4232d35504c23846ec
|
[] |
no_license
|
dimnl/modum
|
f81fef16a599d79f2083ac72484857aadc52a87d
|
b31f80ac4c1e77ddbcd11d4eb2b1937f1c9215d7
|
refs/heads/master
| 2022-06-28T02:30:19.190845
| 2020-05-03T04:55:39
| 2020-05-03T04:55:39
| 260,448,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 585
|
py
|
from django.db import models
# Country information.
class Country(models.Model):
name = models.CharField(max_length=120)
description = models.TextField()
focus = models.CharField(max_length=120, default="")
def _str_(self):
return self.name
# Sectors information.
class Sector(models.Model):
name = models.CharField(max_length=120)
description = models.TextField()
def _str_(self):
return self.name
# Measures information.
class Measure(models.Model):
description = models.TextField()
def _str_(self):
return self.name
|
[
"alexandru.neculai96@gmail.com"
] |
alexandru.neculai96@gmail.com
|
d6f9c06998c30989b694c28b3da3ce04272f062f
|
61939b14aefb49057ac6aa93ea2b33c2a967988b
|
/actvision/config/urls.py
|
70223914199c62ea7aa80dce37b2bb1ee64987bd
|
[] |
no_license
|
ninanonansilo/actvision826
|
7e237608703e58e7bb3ea21e34044c790f07bc12
|
fdd7852ce2a92199919f58836a81675122842e7a
|
refs/heads/master
| 2023-07-05T01:06:57.149773
| 2021-08-26T11:51:14
| 2021-08-26T11:51:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,067
|
py
|
"""Actvision URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home0
.
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
import loginapp.views
import home.views
import movie.views
import settings.views
import inform.views
import register.views
import imgn.views
urlpatterns = [
path('admin/', admin.site.urls),
path('', loginapp.views.login, name='login.html'),
path('login/', loginapp.views.login_success, name='login_success.html'),
path('home', home.views.home, name='home.html'),
path('home/movie', movie.views.movie, name='movie.html'),
path('home/movie/video_list', movie.views.video_list, name='video_list'),
path('home/movie/upload_list', movie.views.upload_list, name='upload_list'),
path('home/movie/upload_video', movie.views.upload_video, name='upload_video'),
path('home/movie/delete_play_list', movie.views.delete_play_list, name='delete_play_list'),
path('home/movie/delete_video', movie.views.delete_video, name='delete_video'),
#path('home/setting', include('settings.urls')),
path('home/setting', settings.views.settings, name='settings.html'),
path('home/setting/check', settings.views.check, name='check'),
path('home/setting/check_pattern', settings.views.check_pattern, name='check_pattern'),
path('home/setting/check_Brightness_mode', settings.views.check_Brightness_mode, name='check_Brightness_mode'),
path('home/setting/update_Brightness', settings.views.update_Brightness, name='update_Brightness'),
path('home/setting/update_CDS_Value', settings.views.update_CDS_Value, name='update_CDS_Value'),
path('home/setting/update_min_max', settings.views.update_min_max, name='update_min_max'),
path('home/setting/power_mode', settings.views.power_mode, name='power_mode'),
path('home/setting/manual_control', settings.views.manual_control, name='manual_control'),
path('home/setting/update_on_off', settings.views.update_on_off, name='update_on_off'),
path('home/inform', inform.views.inform, name='inform.html'),
path('home/register', register.views.register, name='register.html'),
path('home/register/users_list', register.views.users_list, name='users_list'),
path('home/imgn', imgn.views.imgn, name='image.html'),
path('home/imgn/upload_img', imgn.views.upload_img, name='upload_img'),
path('home/imgn/save_letter', imgn.views.save_letter, name='save_letter'),
path('home/imgn/event_trans', imgn.views.event_trans, name='event_trans'),
]
|
[
"ckdgl@DESKTOP-6NQFU1P"
] |
ckdgl@DESKTOP-6NQFU1P
|
b97c469f8a12dbf8a8265a2bb6073036cda1fc81
|
25fd32f0c46b5883a820fd62aeceff6e1f38af1a
|
/02_python_excercise/swampy/World.py
|
ed16b2e8ebaf4295cde888b8cb65aa2753356e28
|
[] |
no_license
|
snowcool1/python_starter_basic
|
fb6572b1c3f378813a99ac23c3ef130143c9f340
|
73b7919f4c20f3de953d8ea50dd7a8a883b04657
|
refs/heads/master
| 2023-03-23T13:40:18.881335
| 2020-12-12T00:46:59
| 2020-12-12T00:46:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,936
|
py
|
#!/usr/bin/python
"""
This module is part of Swampy, a suite of programs available from
allendowney.com/swampy.
Copyright 2005 Allen B. Downey
Distributed under the GNU General Public License at gnu.org/licenses/gpl.html.
"""
import math
import random
import time
import threading
import sys
import tkinter
from swampy.Gui import Gui
class World(Gui):
"""Represents the environment where Animals live.
A World usually includes a canvas, where animals are drawn,
and sometimes a control panel.
"""
current_world = None
def __init__(self, delay=0.5, *args, **kwds):
Gui.__init__(self, *args, **kwds)
self.delay = delay
self.title('World')
# keep track of the most recent world
World.current_world = self
# set to False when the user presses quit.
self.exists = True
# list of animals that live in this world.
self.animals = []
# if the user closes the window, shut down cleanly
self.protocol("WM_DELETE_WINDOW", self.quit)
def wait_for_user(self):
"""Waits for user events and processes them."""
try:
self.mainloop()
except KeyboardInterrupt:
print('KeyboardInterrupt')
def quit(self):
"""Shuts down the World."""
# tell other threads that the world is gone
self.exists = False
# destroy closes the window
self.destroy()
# quit terminates mainloop (but since mainloop can get called
# recursively, quitting once might not be enough!)
Gui.quit(self)
def sleep(self):
"""Updates the GUI and sleeps.
Calling Tk.update from a function that might be invoked by
an event handler is generally considered a bad idea. For
a discussion, see http://wiki.tcl.tk/1255
However, in this case:
1) It is by far the simplest option, and I want to keep this
code readable.
2) It is generally the last thing that happens in an event
handler. So any changes that happen during the update
won't cause problems when it returns.
Sleeping is also a potential problem, since the GUI is
unresponsive while sleeping. So it is probably a good idea
to keep delay less than about 0.5 seconds.
"""
self.update()
time.sleep(self.delay)
def register(self, animal):
"""Adds a new animal to the world."""
self.animals.append(animal)
def unregister(self, animal):
"""Removes an animal from the world."""
self.animals.remove(animal)
def clear(self):
"""Undraws and removes all the animals.
And deletes anything else on the canvas.
"""
for animal in self.animals:
animal.undraw()
self.animals = []
try:
self.canvas.delete('all')
except AttributeError:
print('Warning: World.clear: World must have a canvas.')
def step(self):
"""Invoke the step method on every animal."""
for animal in self.animals:
animal.step()
def run(self):
"""Invoke step intermittently until the user presses Quit or Stop."""
self.running = True
while self.exists and self.running:
self.step()
self.update()
def stop(self):
"""Stops running."""
self.running = False
def map_animals(self, callable):
"""Apply the given callable to all animals.
Args:
callable: any callable object, including Gui.Callable
"""
return list(map(callable, self.animals))
def make_interpreter(self, gs=None):
"""Makes an interpreter for this world.
Creates an attribute named inter.
"""
self.inter = Interpreter(self, gs)
def run_text(self):
"""Executes the code from the TextEntry in the control panel.
Precondition: self must have an Interpreter and a text entry.
"""
source = self.te_code.get(1.0, tkinter.END)
self.inter.run_code(source, '<user-provided code>')
def run_file(self):
"""Read the code from the filename in the entry and runs it.
Precondition: self must have an Interpreter and a filename entry.
"""
filename = self.en_file.get()
fp = open(filename)
source = fp.read()
self.inter.run_code(source, filename)
class Interpreter(object):
"""Encapsulates the environment where user-provided code executes."""
def __init__(self, world, gs=None):
self.world = world
# if the caller didn't provide globals, use the current env
if gs == None:
self.globals = globals()
else:
self.globals = gs
def run_code_thread(self, *args):
"""Runs the given code in a new thread."""
return MyThread(self.run_code, *args)
def run_code(self, source, filename):
"""Runs the given code in the saved environment."""
code = compile(source, filename, 'exec')
try:
exec(code, self.globals)
except KeyboardInterrupt:
self.world.quit()
except tkinter.TclError:
pass
class MyThread(threading.Thread):
"""Wrapper for threading.Thread.
Improves the syntax for creating and starting threads.
"""
def __init__(self, target, *args):
threading.Thread.__init__(self, target=target, args=args)
self.start()
class Animal(object):
"""Abstract class, defines the methods child classes need to provide.
Attributes:
world: reference to the World the animal lives in.
x: location in Canvas coordinates
y: location in Canvas coordinates
"""
def __init__(self, world=None):
self.world = world or World.current_world
if self.world:
self.world.register(self)
self.x = 0
self.y = 0
def set_delay(self, delay):
"""Sets delay for this animal's world.
delay is made available as an animal attribute for backward
compatibility; ideally it should be considered an attribute
of the world, not an animal.
Args:
delay: float delay in seconds
"""
self.world.delay = delay
delay = property(lambda self: self.world.delay, set_delay)
def step(self):
"""Takes one step.
Subclasses should override this method.
"""
pass
def draw(self):
"""Draws the animal.
Subclasses should override this method.
"""
pass
def undraw(self):
"""Undraws the animal."""
if self.world.exists and hasattr(self, 'tag'):
self.world.canvas.delete(self.tag)
def die(self):
"""Removes the animal from the world and undraws it."""
self.world.unregister(self)
self.undraw()
def redraw(self):
"""Undraws and then redraws the animal."""
if self.world.exists:
self.undraw()
self.draw()
def polar(self, x, y, r, theta):
"""Converts polar coordinates to cartesian.
Args:
x, y: location of the origin
r: radius
theta: angle in degrees
Returns:
tuple of x, y coordinates
"""
rad = theta * math.pi/180
s = math.sin(rad)
c = math.cos(rad)
return [ x + r * c, y + r * s ]
def wait_for_user():
"""Invokes wait_for_user on the most recent World."""
World.current_world.wait_for_user()
if __name__ == '__main__':
# make a generic world
world = World()
# create a canvas and put a text item on it
ca = world.ca()
ca.text([0,0], 'hello')
# wait for the user
wait_for_user()
|
[
"doan.nguyen@datalogic.com"
] |
doan.nguyen@datalogic.com
|
d9378d480308166701e5c54976dd75940bd624ed
|
4f01aff7aaaa979e80ee6bd02d01b24dfbfc0e9d
|
/scripts/easy_update.py
|
e883650bdfb287e1ae42d467098920d8a2ad51ff
|
[] |
no_license
|
jakevc/easybuild-life-sciences
|
8d49a923a5448690d890205e4a85308bebd718c0
|
fef9de66a7a08ac3492eb38fe881fc74745aec61
|
refs/heads/master
| 2020-05-05T04:38:36.415766
| 2019-04-04T18:21:35
| 2019-04-04T18:21:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 32,541
|
py
|
#!/usr/bin/env python
import re
import os
import sys
import argparse
import imp
import requests
# from pprint import pprint
# from pprint import pformat
"""EasyUpdate performs package version updating for EasyBuild
easyconfig files. Automates the the updating of version information for R,
Python and bundels that extend R and Python. Pakage version information
is updated for modules in exts_list. Use langugue specific APIs for resolving
current version for each package.
Release Notes
2.0.0 2019-02-26 New feature to resolve dependent packages
for R and Python bundles. Read exts_list for R and Python listed in
depenendencies. Refactor code into Two magor classes: FrameWork and
UpdateExts. Rename subclasses for for R and Python: UpdateR UpdatePython.
This will help with migration into the EB FrameWork.
Fix bug with pkg_update counter
1.3.2 2018-12-19 follow "LinkingTo" for BioConductor packages
reported by Maxime Boissonneault
1.3.1 2018-11-28 fix bugs with pypi
easy_update was adding incorrect package names from requests_dist.
Verify package names and update easyconfig with name corrections.
Package names from pypi.requests_dist are not always correct.
Pypi package names are changing from dashes to underscores
ipython-genutils -> ipython_genutils
jupyter-core -> jupyter_core
jipython-genutils -> ipython_genutils
pyncacl -> PyNaCl
1.3.0 July 2018
update to use pypi.org JSON API
Project API: GET /pypi/<project_name>/json
Release API: GET /pypi/<project_name>/<version>/json
"""
__version__ = '2.0.0'
__maintainer__ = 'John Dey jfdey@fredhutch.org'
__date__ = 'Feb 26, 2019'
class FrameWork:
"""provide access to EasyBuild Config file variables
name, version, toolchain, eb.exts_list, dependencies, modulename, biocver,
methods:
print_update()
"""
def __init__(self, args, filename, primary):
self.debug = False
self.code = None
self.pyver = None
self.search_pkg = None
self.indent_n = 4
self.indent = ' ' * self.indent_n
self.ptr_head = 0
self.modulename = None
# update EasyConfig exts_list or check single package
if args.easyconfig:
eb = self.parse_eb(filename, primary=True)
self.exts_list = eb.exts_list
self.toolchain = eb.toolchain
self.name = eb.name
self.version = eb.version
self.modulename = eb.name + '-' + eb.version
self.modulename += '-' + eb.toolchain['name']
self.modulename += '-' + eb.toolchain['version']
self.interpolate = {'name': eb.name, 'namelower': eb.name.lower(),
'version': eb.version,
'pyver': None,
'rver': None}
self.parse_dependencies(eb)
# exts_defaultclass = 'PythonPackage' | 'RPackage' | 'PerlModule'
try:
self.versionsuffix = eb.versionsuffix
self.modulename += eb.versionsuffix
except (AttributeError, NameError):
self.versionsuffix = None
self.modulename = self.modulename % self.interpolate
if self.debug:
sys.stderr.write('debug - modulename: %s\n' % self.modulename)
sys.stderr.write('debug - file: %s\n' % filename[:-3])
try:
self.dependencies = eb.dependencies
except (AttributeError, NameError):
self.dependencies = None
try:
self.biocver = eb.biocver
if self.debug:
print('biocver: %s' % self.biocver)
except (AttributeError, NameError):
pass
if primary:
self.check_eb_package_name(args.easyconfig)
self.out = open(args.easyconfig[:-3] + ".update", 'w')
elif args.search_pkg:
self.search_pkg = args.search_pkg
if args.biocver:
self.biocver = args.biocver
if args.pyver:
self.name = "Python"
self.version = args.pyver
elif args.rver:
self.name = "R"
self.version = args.rver
else:
print('Languange and version must be specified with ' +
'[--pyver x.x | --rver x.x | --biocver x.x]')
sea_pkg = {'name': args.search_pkg, 'version': 'x', 'type': 'add',
'spec': {}, 'meta': {}
}
self.search_ext = sea_pkg
def parse_eb(self, file_name, primary):
""" interpret EasyConfig file with 'exec'. Interperting fails if
constants that are not defined within the EasyConfig file. Add
undefined constants to <header>.
"""
header = 'SOURCE_TGZ = "%(name)s-%(version)s.tgz"\n'
header += 'SOURCE_TAR_GZ = "%(name)s-%(version)s.tar.gz"\n'
header += 'SOURCELOWER_TAR_GZ = "%(namelower)s-%(version)s.tar.gz"\n'
header += 'PYPI_SOURCE = "https://pypi.python.org/packages/source/%(nameletter)s/%(name)s"\n'
header += 'SOURCEFORGE_SOURCE = "https://download.sourceforge.net/%(namelower)s"\n'
eb = imp.new_module("EasyConfig")
try:
with open(file_name, "r") as f:
code = f.read()
except IOError as err:
print("opening %s: %s" % (file_name, err))
sys.exit(1)
try:
exec (header + code, eb.__dict__)
except Exception as err:
print("interperting EasyConfig error: %s" % err)
sys.exit(1)
if primary: # save original text of source code
self.code = code
return eb
def parse_dependencies(self, eb):
try:
dependencies = eb.dependencies
except NameError:
return
for dep in dependencies:
if dep[0] == 'Python':
self.interpolate['pyver'] = dep[1]
if dep[0] == 'R':
self.interpolate['rver'] = dep[1]
def check_eb_package_name(self, easyconfig):
"""" check that easybuild filename matches package name
easyconfig is original filename
"""
f_name = os.path.basename(easyconfig)[:-3]
name_classification = f_name.split('-')
if f_name != self.modulename:
sys.stderr.write("Warning: file name does not match easybuild " +
"module name\n"),
if f_name != self.modulename or self.debug:
sys.stderr.write(" file name: %s\n module name: %s\n" % (
f_name, self.modulename))
def write_chunk(self, indx):
self.out.write(self.code[self.ptr_head:indx])
self.ptr_head = indx
def rewrite_extension(self, pkg):
name = pkg['name']
name_indx = self.code[self.ptr_head:].find(name)
name_indx += self.ptr_head + len(name) + 1
indx = self.code[name_indx:].find("'") + name_indx + 1
self.write_chunk(indx)
self.out.write("%s'" % pkg['version'])
self.ptr_head = self.code[self.ptr_head:].find("'") + self.ptr_head + 1
indx = self.code[self.ptr_head:].find('),') + self.ptr_head + 3
self.write_chunk(indx)
def output_module(self, lang, pkg):
"""write exts_list entry
"""
if lang == 'R':
output = "%s('%s', '%s')," % (self.indent, pkg['name'],
pkg['version'])
elif lang == 'Python':
pkg_fmt = self.indent + "('%s', '%s', {\n"
item_fmt = self.indent + self.indent + "'%s': %s,\n"
list_fmt = self.indent + self.indent + "'%s': ['%s'],\n"
output = pkg_fmt % (pkg['name'], pkg['version'])
for item in pkg.keys():
if item in ['name', 'version', 'action', 'type', 'orig_ver',
'processed', 'meta', 'spec']:
continue
output += item_fmt % (item, pkg[item])
for item in pkg['spec'].keys():
output += item_fmt % (item, pkg['spec'][item])
output += self.indent + "}),"
return output
def print_update(self, lang, exts_list):
""" this needs to be re-written in a Pythonesque manor
if module name matches extension name then skip
"""
indx = self.code.find('exts_list')
indx += self.code[indx:].find('[')
indx += self.code[indx:].find('\n') + 1
self.write_chunk(indx)
for extension in exts_list:
name = extension['name']
if 'action' not in extension:
sys.stderr.write('No action: %s\n' % name)
extension['action'] = 'keep'
if self.name.lower() == name.lower():
# speical case for bundles, if "name" is used in exts_list
indx = self.code[self.ptr_head:].find('),') + 2
indx += self.ptr_head
self.write_chunk(indx)
elif extension['type'] == 'base': # base library with no version
indx = self.code[self.ptr_head:].find(name)
indx += self.ptr_head + len(name) + 2
self.write_chunk(indx)
elif extension['action'] in ['keep', 'update']:
self.rewrite_extension(extension)
elif extension['action'] == 'duplicate':
print('Duplicate: %s' % name)
name_indx = self.code[self.ptr_head:].find(name)
name_indx += self.ptr_head + len(name)
indx = self.code[name_indx:].find('),') + name_indx + 3
self.ptr_head = indx
continue
elif extension['action'] in ['add', 'dep']:
output = self.output_module(lang, extension)
self.out.write("%s\n" % output)
self.out.write(self.code[self.ptr_head:])
class UpdateExts:
"""
"""
def __init__(self, args, eb, dep_eb):
"""
"""
self.debug = False
self.verbose = args.verbose
self.meta = args.meta
self.search_pkg = args.search_pkg
self.ext_counter = 0
self.pkg_update = 0
self.pkg_new = 0
self.pkg_duplicate = 0
self.indent_n = 4
self.indent = ' ' * self.indent_n
self.ext_list_len = 1
self.exts_dep = list()
self.depend_exclude = list()
if dep_eb:
for exten in dep_eb.exts_list:
if isinstance(exten, tuple):
self.exts_dep.append(exten[0])
else:
self.exts_dep.append(exten)
if args.easyconfig:
self.exts_orig = eb.exts_list
self.interpolate = {'name': eb.name, 'namelower': eb.name.lower(),
'version': eb.version}
if self.search_pkg:
self.search_pkg = args.search_pkg
if args.biocver:
self.biocver = args.biocver
if args.pyver:
self.name = "Python"
self.version = args.pyver
elif args.rver:
self.name = "R"
self.version = args.rver
else:
print('Languange and version must be specified with ' +
'[--pyver x.x | --rver x.x | --biocver x.x]')
sea_pkg = {'name': args.search_pkg, 'version': 'x', 'type': 'add',
'spec': {}, 'meta': {}
}
self.search_ext = sea_pkg
self.exts_processed = list()
def is_processed(self, pkg):
""" check if package has been previously processed
if package exists AND is in the original exts_lists
Mark as 'duplicate'
updated July 2018
"""
name = pkg['name']
found = False
if name in self.exts_dep:
found = True
else:
for p_pkg in self.exts_processed:
if 'spec' in p_pkg and 'modulename' in p_pkg['spec']:
modulename = p_pkg['spec']['modulename']
else:
modulename = ''
if (str(name) == str(p_pkg['name'])) or (name == modulename):
found = True
break
if found:
if pkg['type'] == 'orig':
pkg['action'] = 'duplicate'
self.pkg_duplicate += 1
self.processed(pkg)
if self.verbose:
self.print_status(pkg)
return True
return found
def processed(self, pkg):
""" save Processed packages
save a normalize version of packae name to <exts_search> for Python
updated July 2018
"""
pkg['processed'] = True
pkg2 = dict(pkg)
self.exts_processed.append(pkg2)
def print_status(self, pkg):
""" print one line status for each package if --verbose
updated July 2018
"""
if pkg['action'] == 'update':
version = '%s -> %s' % (pkg['orig_ver'], pkg['version'])
else:
version = pkg['version']
action = '(%s)' % pkg['action']
tmpl = "%20s : %-20s %12s [%2d, %d]"
print(tmpl % (pkg['name'], version, action,
self.ext_list_len, self.ext_counter))
def print_meta(self, info):
""" print meta data from repository
this is broken for R
:param info: dict
"""
pass
def check_package(self, pkg):
"""query package authority [Pypi, CRAN, Bio] to get the latest version
information for a package. This is the heart of the program.
input: pkg{}
check that all dependancies are meet for each package.
check_package can be called recursivly.
pkg['type'] is used to track status.
- 'orig' is also used to track recursion
- 'dep' package that is added as result of dependancy
- 'add' packages read from file
pkg['action'] What action will be take to exts_list.
- 'add'; new package
- 'keep'; no update required
- 'update'; version change
- 'duplicate' package appears twice
"""
if self.debug:
print('check_package: %s' % pkg['name'])
if self.is_processed(pkg):
return
status = self.get_package_info(pkg)
if status in ["error", 'not found']:
if pkg['type'] == 'orig':
pkg['action'] = 'keep'
self.processed(pkg)
return
else:
msg = " Warning: %s is dependency, but can't be found!"
print(msg % pkg['name'])
return
if 'version' in pkg['meta']:
version = pkg['meta']['version']
else:
print('version not in %s' % pkg['name'])
version = pkg['version']
if pkg['version'] == version:
pkg['action'] = 'keep'
else:
pkg['orig_ver'] = pkg['version']
pkg['version'] = pkg['meta']['version']
if pkg['type'] == 'orig':
pkg['action'] = 'update'
self.pkg_update += 1
elif pkg['type'] in ['dep', 'add']:
if self.debug:
print('check_package; dep or add')
pkg['action'] = 'add'
self.pkg_new += 1
if 'requires' in pkg['meta'] and pkg['meta']['requires'] is not None:
for depend in pkg['meta']['requires']:
if depend not in self.depend_exclude:
dep_pkg = {'name': depend, 'version': 'x', 'type': 'dep',
'spec': {}, 'meta': {}}
self.check_package(dep_pkg)
self.processed(pkg)
self.ext_counter += 1
if self.search_pkg:
output = self.output_module(pkg)
print(output)
if self.verbose:
self.print_status(pkg)
if self.meta:
self.print_meta(pkg['meta'])
def updateexts(self):
"""Loop through exts_list and check which packages need to be updated.
this is an external method for the class
"""
if self.search_pkg:
self.check_package(self.search_ext)
else:
self.ext_list_len = len(self.exts_orig)
for ext in self.exts_orig:
if isinstance(ext, tuple):
name = ext[0] % self.interpolate
version = ext[1] % self.interpolate
pkg = {'name': name, 'version': version, 'type': 'orig'}
if len(ext) > 2:
pkg['spec'] = ext[2]
pkg['meta'] = {}
self.check_package(pkg)
else:
self.processed({'name': ext, 'type': 'base'})
if self.verbose:
self.stats()
def stats(self):
sys.stderr.write("Updated Packages: %d\n" % self.pkg_update)
sys.stderr.write("New Packages: %d\n" % self.pkg_new)
sys.stderr.write("Dropped Packages: %d\n" % self.pkg_duplicate)
def get_package_info(self, pkg):
pass
class UpdateR(UpdateExts):
"""extend UpdateExts class to update package names from CRAN and BioCondutor
"""
def __init__(self, args, eb, deps_eb):
UpdateExts.__init__(self, args, eb, deps_eb)
self.debug = False
self.bioc_data = {}
self.depend_exclude = ['R', 'base', 'compiler', 'datasets', 'graphics',
'grDevices', 'grid', 'methods', 'parallel',
'splines', 'stats', 'stats4', 'tcltk', 'tools',
'utils', ]
try:
self.biocver = args.biocver
except NameError:
self.biocver = None
try:
self.biocver = eb.biocver
except NameError:
self.biocver = None
print('BioCondutor version: biocver not set')
if self.biocver:
self.read_bioconductor_pacakges()
self.updateexts()
if eb:
eb.print_update('R', self.exts_processed)
def read_bioconductor_pacakges(self):
""" read the Bioconductor package list into bio_data dict
"""
base_url = 'https://bioconductor.org/packages/json/%s' % self.biocver
bioc_urls = ['%s/bioc/packages.json' % base_url,
'%s/data/annotation/packages.json' % base_url,
'%s/data/experiment/packages.json' % base_url]
for url in bioc_urls:
resp = requests.get(url)
if resp.status_code != 200:
print('Error: %s %s' % (resp.status_code, url))
sys.exit(1)
self.bioc_data.update(resp.json())
if self.debug:
print('reading Bioconductor Package inf: %s' % url)
pkgcount = len(self.bioc_data.keys())
print('size: %s' % pkgcount)
def get_cran_info(self, pkg):
""" MD5sum, Description, Package, releases[]
"""
cran_list = "http://crandb.r-pkg.org/"
resp = requests.get(url=cran_list + pkg['name'])
if resp.status_code != 200:
return "not found"
cran_info = resp.json()
pkg['meta']['version'] = cran_info['Version']
if u'License' in cran_info and u'Part of R' in cran_info[u'License']:
return 'base package'
pkg['meta']['requires'] = []
if u"LinkingTo" in cran_info:
pkg['meta']['requires'].extend(cran_info[u"LinkingTo"].keys())
if u"Depends" in cran_info:
pkg['meta']['requires'].extend(cran_info[u"Depends"].keys())
if u"Imports" in cran_info:
pkg['meta']['requires'].extend(cran_info[u"Imports"].keys())
return 'ok'
def get_bioc_info(self, pkg):
"""Extract <Depends> and <Imports> from BioCondutor json metadata
Example:
bioc_data['pkg']['Depends']
[u'R (>= 2.10)', u'BiocGenerics (>= 0.3.2)', u'utils']
interesting fields from BioCoductor:
bioc_data['pkg']['Depends', 'Imports', 'Biobase', 'graphics', 'URL']
"""
status = 'ok'
if pkg['name'] in self.bioc_data:
pkg['meta']['version'] = self.bioc_data[pkg['name']]['Version']
if 'LinkingTo' in self.bioc_data[pkg['name']]:
pkg['meta']['requires'].extend(
[re.split('[ (><=,]', s)[0]
for s in self.bioc_data[pkg['name']]['LinkingTo']])
if 'Depends' in self.bioc_data[pkg['name']]:
pkg['meta']['requires'].extend(
[re.split('[ (><=,]', s)[0]
for s in self.bioc_data[pkg['name']]['Depends']])
if 'Imports' in self.bioc_data[pkg['name']]:
pkg['meta']['requires'].extend(
[re.split('[ (><=,]', s)[0]
for s in self.bioc_data[pkg['name']]['Imports']])
else:
status = "not found"
return status
def print_depends(self, pkg):
""" used for debugging """
for p in pkg['meta']['requires']:
if p not in self.depend_exclude:
print("%20s : requires %s" % (pkg['name'], p))
def get_package_info(self, pkg):
"""R version, check CRAN and BioConductor for version information
"""
if self.debug:
print('get_package_info: %s' % pkg['name'])
pkg['meta']['requires'] = []
status = self.get_bioc_info(pkg)
if status == 'not found':
status = self.get_cran_info(pkg)
if self.debug:
self.print_depends(pkg)
return status
def output_module(self, pkg):
output = "%s('%s', '%s')," % (self.indent, pkg['name'],
pkg['version'])
return output
class UpdatePython(UpdateExts):
"""extend ExtsList class to update package names from PyPI
Python Issues
There are many small inconsistancies with PyPi which make it difficult
to fully automate building of EasyConfig files.
- dependancy checking - check for extras=='all'
- pypi projects names do not always match module names and or file names
project: liac-arff, module: arff, file name: liac_arff.zip
"""
def __init__(self, args, eb, deps_eb):
UpdateExts.__init__(self, args, eb, deps_eb)
self.pkg_dict = None
if eb:
(nums) = eb.version.split('.')
else:
(nums) = args.pyver.split('.')
self.python_version = "%s.%s" % (nums[0], nums[1])
self.pymajornum = nums[0]
self.pyminor = nums[1]
# Python >3.3 has additional built in modules
if nums[0] == 3 and nums[1] > 3:
self.depend_exclude.extends(['argparse', 'asyncio'])
if self.debug and self.search_pkg:
print('Python Search PyPi: %s' % self.search_pkg)
self.updateexts()
if eb:
eb.print_update('Python', self.exts_processed)
def get_pypi_pkg_data(self, pkg, version=None):
"""
return meta data from PyPi.org
"""
if version:
req = 'https://pypi.org/pypi/%s/%s/json' % (pkg['name'], version)
else:
req = 'https://pypi.org/pypi/%s/json' % pkg['name']
resp = requests.get(req)
if resp.status_code != 200:
msg = "API error: %s GET release %s\n"
sys.stderr.write(msg % (resp.status_code, pkg['name']))
return 'not found'
project = resp.json()
# verify that package name is correct
if pkg['name'] != project['info']['name']:
sys.stderr.write('package name mismatch: %s -> %s\n' % (
pkg['name'], project['info']['name']))
pkg['name'] = project['info']['name']
return project
def check_package_name(self, pkg_name):
"""
verify that package name from EasyConfig
matches package name from PyPi
"""
pkg = {}
pkg['name'] = pkg_name
response = self.get_pypi_pkg_data(pkg)
if response == 'not found':
return response
else:
return response['info']['name']
def parse_pypi_requires(self, requires):
"""requires_dist uses distutils for version format and is defined
in PEP 404.
The project name must be as specified at pypi.org.
requires_dist: <name> <version>[; Environment Markers]
Only install the latest version so ignore all version information
input: 'numpy (>=1.7.1)' output: 'numpy'
Test that <python_version> and <sys_platform> conform.
If <extra> is present and required check that extra is contained
in "exts_list".
wincertstore (==0.2); sys_platform=='win32' and extra == 'ssl'
futures (>=3.0); (python_version=='2.7' or python_version=='2.6')
requests-kerberos (>=0.6); extra == 'kerberos'
trollius; python_version == "2.7" and extra == 'asyncio'
asyncio; python_version == "3.3" and extra == 'asyncio'
"""
if requires is None:
return []
dists = []
sys_platform = 'Linux'
python_version = self.python_version
platform_python_implementation = 'CPython'
extra_re = re.compile("and\sextra\s==\s'([A-Za-z0-9_\-\.]+)'")
for req in requires:
pkg_name = req.split()[0]
# test for Environment Marker (stuff after ;)
fields = req.split(';')
if len(fields) > 1:
env = re.sub(extra_re, fields[1], '')
if len(env) > 1:
try:
if eval(env):
name = self.check_pkg_name(pkg_name)
if name != 'not found':
dists.append(name)
except NameError as e:
msg = 'Error: Unable to evaluate: <%s> '
msg += 'for requirement: %s\n'
sys.stderr.write(msg % (env, pkg_name))
else:
# only add pkg_name if found in pypi
name = self.check_package_name(pkg_name)
if name != 'not found':
dists.append(name)
return dists
def print_meta(self, meta):
""" Display meta from pypi.org
"""
tags = ['filename', 'packagetype', 'url', 'python_version',
'requires_dist', 'summary', 'requires_python']
for tag in tags:
if tag in meta:
print("%s'%s': '%s'" % (self.indent, tag, meta[tag]))
def get_package_info(self, pkg):
"""Python version
Get package meta data via PyPi.org
pkg is a dict; {'name', 'version', 'spec'}
return metadata as dict
pkg['meta']['version']
if source package is not found look for whl
if pyver == ['3.5, '3.6', '3.7']:
arch = 'linux' ['manylinux', 'anylinux', 'linux']
"""
status = self.get_pypi_info(pkg)
return status
def get_pypi_release(self, pkg, version):
""" if source dist is not available from pypi search
the release for for a whl file.
"""
release = self.get_pypi_pkg_data(pkg, version)
if release == 'not found':
return 'not found'
cplist = ['cp35', 'cp36', 'cp37']
for rel in release['releases'][version]:
if any(cver in rel['python_version'] for cver in cplist):
if 'manylinux' in rel['filename']:
pkg['meta'].update(rel)
return 'ok'
return 'not found'
def get_pypi_info(self, pkg):
"""get version information from pypi. If <pkg_name> is not processed
seach pypi. pkg_name is now case sensitive and must match
info['digests']['sha256'], 'summary', 'url', 'filename', 'home_page'
"""
project = self.get_pypi_pkg_data(pkg)
if project == 'not found':
return 'not found'
status = 'not found'
pkg['meta'] = {}
pkg['meta'].update(project['info'])
new_version = pkg['meta']['version']
requires = project['info']['requires_dist']
pkg['meta']['requires'] = self.parse_pypi_requires(requires)
for ver in project['releases'][new_version]:
if 'packagetype' in ver and ver['packagetype'] == 'sdist':
pkg['meta']['url'] = ver['url']
pkg['meta']['filename'] = ver['filename']
status = 'ok'
break
# one last try to find package release data
if status != 'ok':
status = self.get_pypi_release(pkg, new_version)
# only set this if not set
if 'source_urls' not in pkg['spec'] and new_version != pkg['version']:
url = "['https://pypi.io/packages/source/%s/%s']"
pkg['spec']['source_urls'] = url % (pkg['name'][0], pkg['name'])
return status
def help():
print("usage: easy_update EasyConfig.eb [flags]")
print("easy_update Updates ext_list information of EasyBuild"),
print(" EasyConfig files")
print("easy_update works with R, Python and R-bioconductor"),
print(" EasyConfig files")
print(" --verbose diplay status for each package")
print(" --add [filename] filename contains list of package"),
print(" names to add")
sys.exit()
def main():
""" main """
parser = argparse.ArgumentParser(description='Update EasyConfig extslist')
parser.add_argument('--version', action='version',
version='%(prog)s ' + __version__)
parser.add_argument(
'-v', '--verbose', dest='verbose', required=False, action='store_true',
help='Verbose; print lots of extra stuff, (default: false)')
parser.add_argument(
'--rver', dest='rver', required=False, action='store',
help='Set R version (major.minor) example 3.4')
bioc_help = 'Set BioConductor version (major.minor) example 3.6. '
bioc_help += 'Use with --rver'
parser.add_argument('--biocver', dest='biocver', required=False,
action='store', help=bioc_help)
parser.add_argument(
'--pyver', dest='pyver', required=False, action='store',
help='Set Python version [2.7 or 3.6]')
search_help = 'Search for single package. requires --rver or --pyver'
parser.add_argument(
'--search', dest='search_pkg', required=False, action='store',
help=search_help)
parser.add_argument(
'--meta', dest='meta', required=False, action='store_true',
help='output all meta data keys from Pypi, (default: false)')
parser.add_argument('easyconfig', nargs='?')
args = parser.parse_args()
lang = None
dep_eb = None
if args.easyconfig:
eb_name = os.path.basename(args.easyconfig)
eb = FrameWork(args, eb_name, True)
elif args.search_pkg:
eb_name = ''
eb = None
else:
print('If no EasyConfig is given, a module name must be ' +
'specified with --search pkg_name')
sys.exit()
if args.rver or eb_name[:3] == 'R-3':
lang = 'R'
elif args.pyver or eb_name[:7] == 'Python-':
lang = 'Python'
elif lang is None:
if eb.dependencies:
for x in eb.dependencies:
if x[0] == 'R' or x[0] == 'Python':
if lang is None:
lang = x[0]
dep_filename = '%s-%s-%s-%s.eb' % (x[0], x[1],
eb.toolchain['name'],
eb.toolchain['version'])
dep_eb = FrameWork(args, dep_filename, False)
else:
print('Could not determine language [R, Python]')
sys.exit(1)
if lang == 'R':
module = UpdateR(args, eb, dep_eb)
elif lang == 'Python':
module = UpdatePython(args, eb, dep_eb)
if __name__ == '__main__':
main()
|
[
"john@fuzzdog.com"
] |
john@fuzzdog.com
|
f7af79f56a51603282e3089490b050ca604d2712
|
1410d7722dd22c1ecd2aee0f4c59cf482846f445
|
/models/rbm.py
|
05e4c797eace5d0b7ec5a60c952e55268a115ae6
|
[] |
no_license
|
funzi-son/DRBM
|
95a1cb3d504746836d5d8dc2d9fb7b7eeae3fc8c
|
1a7c40d46b86ed4d4a8610f3979e94e5e297429b
|
refs/heads/master
| 2021-07-19T16:17:56.423241
| 2017-10-24T03:23:58
| 2017-10-24T03:23:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,384
|
py
|
"""The restricted Boltzmann machine"""
# Author: Srikanth Cherla
# City University London (2014)
# Contact: abfb145@city.ac.uk
from models import np
from models import theano
from models import T
theano.config.exception_verbosity = 'high'
def build_model(n_input, n_class, hypers, init_params):
"""Function to build the Theano graph for the RBM.
Input
-----
n_input : integer
Dimensionality of input features to the model.
n_class : integer
Number of class-labels.
hypers : dict
Model hyperparameters.
init_params : list
A list of initial values for the model parameters.
Output
------
x : T.matrix
Input matrix (with number of data points as first dimension).
y : T.ivector
Class labels corresponding to x.
p_y_given_x : T.nnet.softmax
Posterior probability of y given x.
cost: ???
Cost function of the DRBM which is to be optimized.
params: list(T.shared)
A list containing the parameters of the model.
grads: list(T.grad)
A list containing the gradients of the parameters of the model.
"""
n_visible = n_input + n_class
n_hidden = int(hypers['n_hidden'])
L1_decay = float(hypers['weight_decay'])
L2_decay = float(hypers['weight_decay'])
n_gibbs = int(hypers['n_gibbs'])
activation = str(hypers['activation'])
# Random number generators
T_RNG = T.shared_randomstreams.RandomStreams(hypers['seed'])
N_RNG = np.random.RandomState(hypers['seed'])
# 1. Initialize visible layer, inputs and targets
x = T.matrix(name='x', dtype=theano.config.floatX)
y = T.ivector(name='y') # XXX: What should be the type of this?
Y = T.eye(n_class)[y]
v = T.concatenate((x, Y), axis=1)
# Initialize model parameters
if init_params is None:
W_init = np.asarray(
N_RNG.normal(size=(n_visible, n_hidden), scale=0.01),
dtype=theano.config.floatX)
bv_init = np.zeros((n_visible,), dtype=theano.config.floatX)
bh_init = np.zeros((n_hidden,), dtype=theano.config.floatX)
else:
W_init = init_params[0]
bv_init = init_params[1]
bh_init = init_params[2]
W = theano.shared(W_init, name='W') # RBM weight matrix
bv = theano.shared(bv_init, name='bv') # Visible biases
bh = theano.shared(bh_init, name='bh') # Hidden biases
params = [W, bv, bh]
# Build Gibbs chain and graph to compute the cost function
v_sample, cost, updates_train = build_chain(v, n_input, n_class, W,
bv, bh, k=n_gibbs,
activation=activation,
T_RNG=T_RNG)
# Add weight decay (regularization) to cost.
L1 = abs(W).sum()
L2_sqr = (W**2).sum()
cost += (L1_decay*L1 + L2_decay*L2_sqr)
grads = T.grad(cost, params, consider_constant=v_sample)
# Expressions to compute conditional distribution.
p_y_given_x = drbm_fprop(x, params, n_class, activation)
return (x, y, p_y_given_x, cost, params, grads)
def build_chain(v, n_input, n_class, W, bv, bh, k=1, activation='sigmoid',
T_RNG=None):
"""Construct a k-step Gibbs chain starting at v for an RBM.
Input
-----
v : T.matrix or T.vector
If a matrix, multiple chains will be run in parallel (batch).
n_input : int
Dimensionality of input feature.
n_class : int
Number of output classes.
W : T.matrix
Weight matrix of the RBM.
bv : T.vector
Visible bias vector of the RBM.
bh : T.vector
Hidden bias vector of the RBM.
k : int
Length of the Gibbs chain (number of sampling steps).
activation : str
Type of activation function.
T_RNG : T.streams.RandomStreams
Theano random number generator.
Output
------
v_sample : Theano vector or matrix with the same shape as `v`
Corresponds to the generated sample(s).
cost : Theano scalar
Expression whose gradient with respect to W, bv, bh is the CD-k
approximation to the log-likelihood of `v` (training example) under the
RBM. The cost is averaged in the batch case.
updates: dictionary of Theano variable -> Theano variable
The `updates` object returned by scan."""
if T_RNG is None:
T_RNG = T.shared_randomstreams.RandomStreams(860331)
# One iteration of the Gibbs sampler.
def gibbs_step(v):
"""One step of Gibbs sampling in the RBM."""
# Compute hidden layer activations given visible layer
if activation == 'sigmoid':
mean_h = T.nnet.sigmoid(T.dot(v, W) + bh)
h = T_RNG.binomial(size=mean_h.shape, n=1, p=mean_h,
dtype=theano.config.floatX)
elif activation == 'tanh':
raise NotImplementedError
elif activation == 'relu': # XXX: Not working
mean_h = T.maximum(0, T.dot(v, W) + bh)
h = T.maximum(0, mean_h + T_RNG.normal(size=mean_h.shape, avg=0.0,
std=T.nnet.sigmoid(mean_h)))
else:
raise NotImplementedError
# Compute visible layer activations given hidden layer
acts_v = T.dot(h, W.T) + bv
# # Multinomial visible units sampling (equally sized)
# # TODO: Make this an if-else section based on an input hyperparameter
# acts_in = acts_v[:, :n_input]
# probs_in = T.nnet.softmax(acts_in)
# v_in = T_RNG.multinomial(n=1, pvals=probs_in,
# dtype=theano.config.floatX)
# acts_out = acts_v[:, -n_class:]
# probs_out = T.nnet.softmax(acts_out)
# v_out = T_RNG.multinomial(n=1, pvals=probs_out,
# dtype=theano.config.floatX)
# mean_v = T.concatenate((probs_in, probs_out), axis=1)
# v = T.concatenate((v_in, v_out), axis=1)
# Binomial visible units sampling
mean_v = T.nnet.sigmoid(acts_v)
v = T_RNG.binomial(size=mean_v.shape, n=1, p=mean_v,
dtype=theano.config.floatX)
return mean_v, v
# k-step Gibbs sampling loop
chain, updates = theano.scan(lambda v: gibbs_step(v)[1],
outputs_info=[v], non_sequences=[],
n_steps=k)
v_sample = chain[-1]
def free_energy(v):
"""Free energy of RBM visible layer."""
return -(v * bv).sum() - T.log(1 + T.exp(T.dot(v, W) + bh)).sum()
cost = (free_energy(v) - free_energy(v_sample)) / v.shape[0]
return v_sample, cost, updates
def drbm_fprop(x, params, n_class, activation):
"""Posterior probability of classes given inputs and model parameters.
Input
-----
x: T.matrix (of type theano.config.floatX)
Input data matrix.
params: list
A list containing the four parameters of the DRBM (see class definition).
n_class: integer
Number of classes.
Output
------
p_y_given_x: T.nnet.softmax
Posterior class probabilities of the targets given the inputs.
"""
# Initialize DRBM parameters and binary class-labels.
U = params[0][-n_class:, :] # or, U = W[n_input:, :]
W = params[0][:-n_class, :] # or, V = W[:n_input, :]
d = params[1][-n_class:] # or, d = bv[:n_input]
c = params[2]
Y_class = theano.shared(np.eye(n_class, dtype=theano.config.floatX),
name='Y_class')
# Compute hidden state activations and energies.
s_hid = T.dot(x, W) + c
energies, _ = theano.scan(lambda y_class, U, s_hid:
s_hid + T.dot(y_class, U),
sequences=[Y_class],
non_sequences=[U, s_hid])
# Compute log-posteriors and then posteriors.
if activation == 'sigmoid':
log_p, _ = theano.scan(
lambda d_i, e_i: d_i + T.sum(T.log(1+T.exp(e_i)), axis=1),
sequences=[d, energies], non_sequences=[])
elif activation == 'tanh':
raise NotImplementedError
elif activation == 'relu':
raise NotImplementedError
else:
raise NotImplementedError
p_y_given_x = T.nnet.softmax(log_p.T) # XXX: Can the transpose be avoided?
return p_y_given_x
|
[
"Son.Tran@csiro.au"
] |
Son.Tran@csiro.au
|
296215af5720028bb4c81737ec6fe46db989de0c
|
4648437f4004530142e8aaf7bc65ffc9df121058
|
/asgi.py
|
b3e33727dc5c089f91996f06e4e404eb7442c29d
|
[] |
no_license
|
YashDilipShah/KC_random_forest
|
ebca680198b6db27888f1b315153e85149050b06
|
5cfea708450f09569e7f93cc74ec9e4d38bfc312
|
refs/heads/master
| 2020-12-27T18:30:22.352301
| 2020-02-27T21:08:19
| 2020-02-27T21:08:19
| 238,005,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
"""
ASGI config for houseprice project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'houseprice.settings')
application = get_asgi_application()
|
[
"noreply@github.com"
] |
noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.