blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9134c21c482197da0d0d50f272fc04d4bc7d382b | 9b9b6a7aa1de1092a8480771f2b08ffa0972218d | /python/sklearn/linear-regression/workload-analysis/faster-rcnn/lr/lr.py | 90e61bda1999d1313da975be3ec8679947fef79f | [
"WTFPL"
] | permissive | lijiansong/lang | c42ca757306b38f37a26fef841b2460f05a13af6 | 27ffecd9afe67ddac003fc4d6333e06e2cc20434 | refs/heads/master | 2023-02-25T17:36:01.221720 | 2023-02-14T14:10:29 | 2023-02-14T14:10:29 | 149,586,739 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,826 | py | from sklearn import linear_model
def get_data(file_name):
file_reader = open(file_name, 'r')
x_list = []
y_list = []
try:
text_lines = file_reader.readlines()
print(type(text_lines))
data_parallel_min = model_parallel_min = thread_num_min = fifo_size_min = end2end_fps_min = 100000000
data_parallel_max = model_parallel_max = thread_num_max = fifo_size_max = end2end_fps_max = 0
for line in text_lines:
line = line.rstrip('\n')
line = line.split('\t')
batch_size, data_parallel, model_parallel, thread_num, fifo_size, end2end_fps = int(line[0]), int(line[1]), int(line[2]), int(line[3]), int(line[4]), float(line[5])
data_parallel_min, model_parallel_min, thread_num_min, fifo_size_min, end2end_fps_min = min(data_parallel_min, data_parallel), min(model_parallel_min, model_parallel), min(thread_num_min, thread_num), min(fifo_size_min, fifo_size), min(end2end_fps_min, end2end_fps)
data_parallel_max, model_parallel_max, thread_num_max, fifo_size_max, end2end_fps_max = max(data_parallel_max, data_parallel), max(model_parallel_max, model_parallel), max(thread_num_max, thread_num), max(fifo_size_max, fifo_size), max(end2end_fps_max, end2end_fps)
x_list.append([data_parallel, model_parallel, thread_num, fifo_size])
y_list.append(end2end_fps)
print(data_parallel_min, model_parallel_min, thread_num_min, fifo_size_min, end2end_fps_min)
print(data_parallel_max, model_parallel_max, thread_num_max, fifo_size_max, end2end_fps_max)
for i, item in enumerate(x_list):
if (model_parallel_min == model_parallel_max) and (fifo_size_min == fifo_size_max):
x_list[i] = [(item[1] - data_parallel_min) / (data_parallel_max - data_parallel_min), 1, (item[3] - thread_num_min) / (thread_num_max - thread_num_min), 1]
elif (model_parallel_min != model_parallel_max) and (fifo_size_min == fifo_size_max):
x_list[i] = [(item[1] - data_parallel_min) / (data_parallel_max - data_parallel_min), (item[2] - model_parallel_min) / (model_parallel_max - model_parallel_min), (item[3] - thread_num_min) / (thread_num_max - thread_num_min), 1]
elif (model_parallel_min == model_parallel_max) and (fifo_size_min != fifo_size_max):
x_list[i] = [(item[1] - data_parallel_min) / (data_parallel_max - data_parallel_min), 1, (item[3] - thread_num_min) / (thread_num_max - thread_num_min), (item[4] - fifo_size_min) / (fifo_size_max - fifo_size_min)]
else:
x_list[i] = [(item[1] - data_parallel_min) / (data_parallel_max - data_parallel_min), (item[2] - model_parallel_min) / (model_parallel_max - model_parallel_min), (item[3] - thread_num_min) / (thread_num_max - thread_num_min), (item[4] - fifo_size_min) / (fifo_size_max - fifo_size_min)]
for i, item in enumerate(y_list):
y_list[i] = (item - end2end_fps_min) / (end2end_fps_max - end2end_fps_min)
finally:
if file_reader:
file_reader.close()
return x_list, y_list
def get_lr_model(X, y):
clf = linear_model.LinearRegression()
clf.fit(X, y)
return clf.coef_
if __name__ == '__main__':
clf = linear_model.LinearRegression()
print('===---------------- dense fp16 end2end fps ----------------===')
X, y = get_data('faster-rcnn-dense-fp16.txt')
print(len(X), len(y))
clf.fit(X, y)
print(clf.coef_)
# https://statinfer.com/204-1-7-adjusted-r-squared-in-python/
print('R-squared:', clf.score(X, y))
print('===---------------- dense fp16 hardware fps ----------------===')
X, y = get_data('faster-rcnn-dense-fp16-hw.txt')
print(len(X), len(y))
clf.fit(X, y)
print(clf.coef_)
print('R-squared:', clf.score(X, y))
| [
"lijiansong@ict.ac.cn"
] | lijiansong@ict.ac.cn |
fd8d9edccb5cf431782d7a3b811a8be8d97b3cab | b182a3407b56c14b830b6ff3a543ba29d5996f84 | /beartype_test/a00_unit/a00_util/test_utilclass.py | 243c41c5a70f9dcddf4f9dced5d7a6262195fa2d | [
"MIT"
] | permissive | yamgent/beartype | 9d1899a6e6dacd1dd74652a81a2c1f275b1fd775 | afaaa0d8c25f8e5c06dd093982787b794ee48f2d | refs/heads/main | 2023-03-19T18:27:44.326772 | 2021-03-08T06:20:57 | 2021-03-08T06:26:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,252 | py | #!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype class utility unit tests.**
This submodule unit tests the public API of the private
:mod:`beartype._util.cls.utilclstest` submodule.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To raise human-readable test errors, avoid importing from
# package-specific submodules at module scope.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# ....................{ TESTS }....................
def test_is_class_builtin() -> None:
'''
Test the :func:`beartype._util.cls.utilclstest.is_type_builtin` function.
'''
# Defer heavyweight imports.
from beartype._util.cls.utilclstest import is_type_builtin
from beartype_test.a00_unit.data.data_type import (
CLASSES_BUILTIN, CLASSES_NON_BUILTIN)
# Assert this tester accepts all builtin types.
for class_builtin in CLASSES_BUILTIN:
assert is_type_builtin(class_builtin) is True
# Assert this tester rejects non-builtin types.
for class_non_builtin in CLASSES_NON_BUILTIN:
assert is_type_builtin(class_non_builtin) is False
def test_is_classname_builtin() -> None:
'''
Test the :func:`beartype._util.cls.utilclstest.is_classname_builtin` function.
'''
# Defer heavyweight imports.
from beartype._util.cls.utilclstest import is_classname_builtin
from beartype._util.utilobject import get_object_type_name
from beartype_test.a00_unit.data.data_type import (
CLASSES_BUILTIN, CLASSES_NON_BUILTIN)
# Assert this tester accepts the fully-qualified names of all builtin
# types.
for class_builtin in CLASSES_BUILTIN:
assert is_classname_builtin(
get_object_type_name(class_builtin)) is True
# Assert this tester rejects non-builtin types.
for class_non_builtin in CLASSES_NON_BUILTIN:
assert is_classname_builtin(
get_object_type_name(class_non_builtin)) is False
| [
"leycec@gmail.com"
] | leycec@gmail.com |
0adbc19feb75e545c6ae06788aee93eec6cfe56a | 8eab8ab725c2132bb8d090cdb2d23a5f71945249 | /virt/Lib/site-packages/jedi/inference/filters.py | 62782334b68d20ba1730d0b69c1e7f35a5689373 | [
"MIT"
] | permissive | JoaoSevergnini/metalpy | 6c88a413a82bc25edd9308b8490a76fae8dd76ca | c2d0098a309b6ce8c756ff840bfb53fb291747b6 | refs/heads/main | 2023-04-18T17:25:26.474485 | 2022-09-18T20:44:45 | 2022-09-18T20:44:45 | 474,773,752 | 3 | 1 | MIT | 2022-11-03T20:07:50 | 2022-03-27T22:21:01 | Python | UTF-8 | Python | false | false | 12,493 | py | """
Filters are objects that you can use to filter names in different scopes. They
are needed for name resolution.
"""
from abc import abstractmethod
from typing import List, MutableMapping, Type
import weakref
from parso.tree import search_ancestor
from parso.python.tree import Name, UsedNamesMapping
from jedi.inference import flow_analysis
from jedi.inference.base_value import ValueSet, ValueWrapper, \
LazyValueWrapper
from jedi.parser_utils import get_cached_parent_scope, get_parso_cache_node
from jedi.inference.utils import to_list
from jedi.inference.names import TreeNameDefinition, ParamName, \
AnonymousParamName, AbstractNameDefinition, NameWrapper
_definition_name_cache: MutableMapping[UsedNamesMapping, List[Name]]
_definition_name_cache = weakref.WeakKeyDictionary()
class AbstractFilter:
_until_position = None
def _filter(self, names):
if self._until_position is not None:
return [n for n in names if n.start_pos < self._until_position]
return names
@abstractmethod
def get(self, name):
raise NotImplementedError
@abstractmethod
def values(self):
raise NotImplementedError
class FilterWrapper:
name_wrapper_class: Type[NameWrapper]
def __init__(self, wrapped_filter):
self._wrapped_filter = wrapped_filter
def wrap_names(self, names):
return [self.name_wrapper_class(name) for name in names]
def get(self, name):
return self.wrap_names(self._wrapped_filter.get(name))
def values(self):
return self.wrap_names(self._wrapped_filter.values())
def _get_definition_names(parso_cache_node, used_names, name_key):
if parso_cache_node is None:
names = used_names.get(name_key, ())
return tuple(name for name in names if name.is_definition(include_setitem=True))
try:
for_module = _definition_name_cache[parso_cache_node]
except KeyError:
for_module = _definition_name_cache[parso_cache_node] = {}
try:
return for_module[name_key]
except KeyError:
names = used_names.get(name_key, ())
result = for_module[name_key] = tuple(
name for name in names if name.is_definition(include_setitem=True)
)
return result
class _AbstractUsedNamesFilter(AbstractFilter):
name_class = TreeNameDefinition
def __init__(self, parent_context, node_context=None):
if node_context is None:
node_context = parent_context
self._node_context = node_context
self._parser_scope = node_context.tree_node
module_context = node_context.get_root_context()
# It is quite hacky that we have to use that. This is for caching
# certain things with a WeakKeyDictionary. However, parso intentionally
# uses slots (to save memory) and therefore we end up with having to
# have a weak reference to the object that caches the tree.
#
# Previously we have tried to solve this by using a weak reference onto
# used_names. However that also does not work, because it has a
# reference from the module, which itself is referenced by any node
# through parents.
path = module_context.py__file__()
if path is None:
# If the path is None, there is no guarantee that parso caches it.
self._parso_cache_node = None
else:
self._parso_cache_node = get_parso_cache_node(
module_context.inference_state.latest_grammar
if module_context.is_stub() else module_context.inference_state.grammar,
path
)
self._used_names = module_context.tree_node.get_used_names()
self.parent_context = parent_context
def get(self, name):
return self._convert_names(self._filter(
_get_definition_names(self._parso_cache_node, self._used_names, name),
))
def _convert_names(self, names):
return [self.name_class(self.parent_context, name) for name in names]
def values(self):
return self._convert_names(
name
for name_key in self._used_names
for name in self._filter(
_get_definition_names(self._parso_cache_node, self._used_names, name_key),
)
)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.parent_context)
class ParserTreeFilter(_AbstractUsedNamesFilter):
def __init__(self, parent_context, node_context=None, until_position=None,
origin_scope=None):
"""
node_context is an option to specify a second value for use cases
like the class mro where the parent class of a new name would be the
value, but for some type inference it's important to have a local
value of the other classes.
"""
super().__init__(parent_context, node_context)
self._origin_scope = origin_scope
self._until_position = until_position
def _filter(self, names):
names = super()._filter(names)
names = [n for n in names if self._is_name_reachable(n)]
return list(self._check_flows(names))
def _is_name_reachable(self, name):
parent = name.parent
if parent.type == 'trailer':
return False
base_node = parent if parent.type in ('classdef', 'funcdef') else name
return get_cached_parent_scope(self._parso_cache_node, base_node) == self._parser_scope
def _check_flows(self, names):
for name in sorted(names, key=lambda name: name.start_pos, reverse=True):
check = flow_analysis.reachability_check(
context=self._node_context,
value_scope=self._parser_scope,
node=name,
origin_scope=self._origin_scope
)
if check is not flow_analysis.UNREACHABLE:
yield name
if check is flow_analysis.REACHABLE:
break
class _FunctionExecutionFilter(ParserTreeFilter):
def __init__(self, parent_context, function_value, until_position, origin_scope):
super().__init__(
parent_context,
until_position=until_position,
origin_scope=origin_scope,
)
self._function_value = function_value
def _convert_param(self, param, name):
raise NotImplementedError
@to_list
def _convert_names(self, names):
for name in names:
param = search_ancestor(name, 'param')
# Here we don't need to check if the param is a default/annotation,
# because those are not definitions and never make it to this
# point.
if param:
yield self._convert_param(param, name)
else:
yield TreeNameDefinition(self.parent_context, name)
class FunctionExecutionFilter(_FunctionExecutionFilter):
def __init__(self, *args, arguments, **kwargs):
super().__init__(*args, **kwargs)
self._arguments = arguments
def _convert_param(self, param, name):
return ParamName(self._function_value, name, self._arguments)
class AnonymousFunctionExecutionFilter(_FunctionExecutionFilter):
def _convert_param(self, param, name):
return AnonymousParamName(self._function_value, name)
class GlobalNameFilter(_AbstractUsedNamesFilter):
def get(self, name):
try:
names = self._used_names[name]
except KeyError:
return []
return self._convert_names(self._filter(names))
@to_list
def _filter(self, names):
for name in names:
if name.parent.type == 'global_stmt':
yield name
def values(self):
return self._convert_names(
name for name_list in self._used_names.values()
for name in self._filter(name_list)
)
class DictFilter(AbstractFilter):
def __init__(self, dct):
self._dct = dct
def get(self, name):
try:
value = self._convert(name, self._dct[name])
except KeyError:
return []
else:
return list(self._filter([value]))
def values(self):
def yielder():
for item in self._dct.items():
try:
yield self._convert(*item)
except KeyError:
pass
return self._filter(yielder())
def _convert(self, name, value):
return value
def __repr__(self):
keys = ', '.join(self._dct.keys())
return '<%s: for {%s}>' % (self.__class__.__name__, keys)
class MergedFilter:
def __init__(self, *filters):
self._filters = filters
def get(self, name):
return [n for filter in self._filters for n in filter.get(name)]
def values(self):
return [n for filter in self._filters for n in filter.values()]
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, ', '.join(str(f) for f in self._filters))
class _BuiltinMappedMethod(ValueWrapper):
"""``Generator.__next__`` ``dict.values`` methods and so on."""
api_type = 'function'
def __init__(self, value, method, builtin_func):
super().__init__(builtin_func)
self._value = value
self._method = method
def py__call__(self, arguments):
# TODO add TypeError if params are given/or not correct.
return self._method(self._value, arguments)
class SpecialMethodFilter(DictFilter):
"""
A filter for methods that are defined in this module on the corresponding
classes like Generator (for __next__, etc).
"""
class SpecialMethodName(AbstractNameDefinition):
api_type = 'function'
def __init__(self, parent_context, string_name, callable_, builtin_value):
self.parent_context = parent_context
self.string_name = string_name
self._callable = callable_
self._builtin_value = builtin_value
def infer(self):
for filter in self._builtin_value.get_filters():
# We can take the first index, because on builtin methods there's
# always only going to be one name. The same is true for the
# inferred values.
for name in filter.get(self.string_name):
builtin_func = next(iter(name.infer()))
break
else:
continue
break
return ValueSet([
_BuiltinMappedMethod(self.parent_context, self._callable, builtin_func)
])
def __init__(self, value, dct, builtin_value):
super().__init__(dct)
self.value = value
self._builtin_value = builtin_value
"""
This value is what will be used to introspect the name, where as the
other value will be used to execute the function.
We distinguish, because we have to.
"""
def _convert(self, name, value):
return self.SpecialMethodName(self.value, name, value, self._builtin_value)
class _OverwriteMeta(type):
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
base_dct = {}
for base_cls in reversed(cls.__bases__):
try:
base_dct.update(base_cls.overwritten_methods)
except AttributeError:
pass
for func in cls.__dict__.values():
try:
base_dct.update(func.registered_overwritten_methods)
except AttributeError:
pass
cls.overwritten_methods = base_dct
class _AttributeOverwriteMixin:
def get_filters(self, *args, **kwargs):
yield SpecialMethodFilter(self, self.overwritten_methods, self._wrapped_value)
yield from self._wrapped_value.get_filters(*args, **kwargs)
class LazyAttributeOverwrite(_AttributeOverwriteMixin, LazyValueWrapper,
metaclass=_OverwriteMeta):
def __init__(self, inference_state):
self.inference_state = inference_state
class AttributeOverwrite(_AttributeOverwriteMixin, ValueWrapper,
metaclass=_OverwriteMeta):
pass
def publish_method(method_name):
def decorator(func):
dct = func.__dict__.setdefault('registered_overwritten_methods', {})
dct[method_name] = func
return func
return decorator
| [
"joao.a.severgnini@gmail.com"
] | joao.a.severgnini@gmail.com |
5904499d418489afdf5bcc82482c93dea481d2b4 | 7d9d3d5ce2ac19221163d54a94c025993db0af4f | /autotest/gcore/asyncreader.py | e5cdc8c94fae9dfcb59bfb31d900631c01c51494 | [
"MIT"
] | permissive | dcgull/gdal | 5408adad77d001db32173bba547b447220b5e9a2 | a5e2a7b54db955bd061ebfc6d69aa2dd752b120c | refs/heads/master | 2020-04-03T13:30:40.013172 | 2013-10-11T12:07:57 | 2013-10-11T12:07:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,186 | py | #!/usr/bin/env python
###############################################################################
# $Id: asyncreader.py 22782 2011-07-23 19:20:29Z warmerdam $
#
# Project: GDAL/OGR Test Suite
# Purpose: Test AsyncReader interface
# Author: Even Rouault <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2010, Even Rouault <even dot rouault at mines dash paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
sys.path.append( '../pymod' )
import gdaltest
import gdal
###############################################################################
# Test AsyncReader interface on the default (synchronous) implementation
def asyncreader_1():
ds = gdal.Open('data/rgbsmall.tif')
asyncreader = ds.BeginAsyncReader(0,0,ds.RasterXSize,ds.RasterYSize)
buf = asyncreader.GetBuffer()
result = asyncreader.GetNextUpdatedRegion(0)
if result != [gdal.GARIO_COMPLETE, 0, 0, ds.RasterXSize,ds.RasterYSize]:
gdaltest.post_reason('wrong return values for GetNextUpdatedRegion()')
print(result)
return 'fail'
ds.EndAsyncReader(asyncreader)
asyncreader = None
out_ds = gdal.GetDriverByName('GTiff').Create('/vsimem/asyncresult.tif', ds.RasterXSize,ds.RasterYSize,ds.RasterCount)
out_ds.WriteRaster(0,0,ds.RasterXSize,ds.RasterYSize,buf)
expected_cs = [ ds.GetRasterBand(i+1).Checksum() for i in range(ds.RasterCount)]
cs = [ out_ds.GetRasterBand(i+1).Checksum() for i in range(ds.RasterCount)]
ds = None
out_ds = None
gdal.Unlink('/vsimem/asyncresult.tif')
for i in range(len(cs)):
if cs[i] != expected_cs[i]:
gdaltest.post_reason('did not get expected checksum for band %d' % (i+1))
print(cs[i])
print(expected_cs[i])
return 'fail'
return 'success'
gdaltest_list = [ asyncreader_1 ]
if __name__ == '__main__':
gdaltest.setup_run( 'asyncreader' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| [
"gerard.choinka@ambrosys.de"
] | gerard.choinka@ambrosys.de |
e8fb239b281d1398883df79eb69f5d7664a1a78c | 92fb3d19b329434fe577fb5b8cc2e3302700d427 | /midterm/task1/forms.py | 8521fa2035b958a521928a7a1a41bbea07a0bf2b | [] | no_license | aigerimzh/BFDjango | 6a2635db8a1017b64c304193277d3030b3daf196 | 99f81274abdf0afcd9925cf2af057e616c433448 | refs/heads/master | 2020-03-28T22:45:13.782301 | 2018-11-22T17:10:54 | 2018-11-22T17:10:54 | 146,699,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | from django import forms
from .models import Restaurant
class RestaurantForm(forms.ModelForm):
class Meta:
model = Restaurant
fields = ["r_name", "tel", "city"]
| [
"ajgerimzumabaeva10@gmail.com"
] | ajgerimzumabaeva10@gmail.com |
d55fcb04257be97981f8209ad7a1126b52920e52 | d8cdf0afcee58b9813d3392e8b68d8751d703885 | /scd/scd.py | 996b39524c756d51d8f47b00d61e787aba9b1082 | [
"MIT"
] | permissive | yukikawana/PhotographicImageSynthesis | db024377f3626ba7a0e01af49f7bfd8810af38f7 | 555663c0923b0cb8f9cd0dd3c4cfcdc95031a024 | refs/heads/master | 2020-03-19T21:43:21.513385 | 2018-06-19T12:07:06 | 2018-06-19T12:07:06 | 136,948,035 | 0 | 0 | null | 2018-06-11T15:48:57 | 2018-06-11T15:48:57 | null | UTF-8 | Python | false | false | 10,675 | py |
# coding: utf-8
# In[1]:
import os
import math
import numpy as np
import tensorflow as tf
import cv2
import re
slim = tf.contrib.slim
# In[2]:
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.cm as mpcm
import cv2
from nets import ssd_vgg_300
from nets import ssd_common
from preprocessing import ssd_vgg_preprocessing
# In[3]:
colors_tableau = [(255, 255, 255), (31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
#cnt = 0#for traning
cnt = 7481#for test
# In[4]:
# # Drawing and plotting routines.
# In[5]:
def bboxes_draw_on_img(img, scores, bboxes, colors, thickness=2, show_text=True):
"""Drawing bounding boxes on an image, with additional text if wanted...
"""
shape = img.shape
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
color = colors[i % len(colors)]
# Draw bounding box...
p1 = (int(bbox[0] * shape[0]), int(bbox[1] * shape[1]))
p2 = (int(bbox[2] * shape[0]), int(bbox[3] * shape[1]))
cv2.rectangle(img, p1[::-1], p2[::-1], color, thickness)
# Draw text...
if show_text:
s = '%s: %s' % ('Car', scores[i])
p1 = (p1[0]-5, p1[1])
cv2.putText(img, s, p1[::-1], cv2.FONT_HERSHEY_DUPLEX, 0.7, color, 1)
def plot_image(img, title='', figsize=(24, 9)):
f, axes = plt.subplots(1, 1, figsize=figsize)
f.tight_layout()
axes.imshow(img)
axes.set_title(title, fontsize=20)
# # SSD TensorFlow Network
#
# Build up the convolutional network and load thecheckpoint.
# In[6]:
# In[7]:
def get_tenors(ckpt_filename, isess, input=None, reuse=None):
# Input placeholder.
if not input == None:
assert(input.get_shape().ndims == 4)
image_4d=input-np.array([123.6800, 116.7790, 103.9390]).reshape((1,1,1,3))
else:
img_input = tf.placeholder(tf.uint8, shape=(None, None, 3))
image_pre, labels_pre, bboxes_pre, bbox_img = ssd_vgg_preprocessing.preprocess_for_eval( img_input , None, None, (None, None), resize=ssd_vgg_preprocessing.Resize.NONE)
image_4d = tf.expand_dims(image_pre, 0)
# Network parameters.
params = ssd_vgg_300.SSDNet.default_params
params = params._replace(num_classes=8)
# SSD network construction.
#reuse = True if 'ssd' in locals() else None
#print("resue ", resuse)
ssd = ssd_vgg_300.SSDNet(params)
with slim.arg_scope(ssd.arg_scope(weight_decay=0.0005)):
"""
if reuse:
tf.get_variable_scope().reuse_variables()
"""
#predictions, localisations, logits, end_points = ssd.net(image_4d, is_training=False, reuse=reuse)
predictions, localisations, logits, end_points = ssd.net(image_4d, is_training=False)
end_points["input"] = image_4d
#init_op = tf.global_variables_initializer()
#isess.run(init_op)
# Restore SSD model.
#train_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.contrib.framework.get_name_scope())
#print(tf.contrib.framework.get_name_scope())
#assert(False)
#saver = tf.train.Saver(var_list=train_vars)
#saver.restore(isess, ckpt_filename)
# Save back model to clean the checkpoint?
# # Image Pipeline
#
# Presenting the different steps of the vehicle detection pipeline.
# In[9]:
if not input == None:
return end_points
else:
return predictions, localisations, logits, end_points, img_input, ssd
# Main SSD processing routine.
def ssd_process_image(img,tensors, isess, select_threshold=0.5):
"""Process an image through SSD network.
Arguments:
img: Numpy array containing an image.
select_threshold: Classification threshold (i.e. probability threshold for car detection).
Return:
rclasses, rscores, rbboxes: Classes, scores and bboxes of objects detected.
"""
predictions, localisations, logits, end_points, img_input, ssd = tensors
# Resize image to height 300.
#factor = 300. / float(img.shape[0])
#img = cv2.resize(img, (0,0), fx=factor, fy=factor)
# Run SSD network and get class prediction and localization.
#rs =r".*\/conv[0-9]\/conv[0-9]_[0-9]/Relu$"
rs = r".*ssd_300_vgg\/pool5\/MaxPool$"
rc = re.compile(rs)
end_points = {}
for op in tf.get_default_graph().as_graph_def().node:
gr = rc.match(op.name)
if gr:
end_points[op.name.split("/")[-2]] = tf.get_default_graph().get_tensor_by_name(op.name+":0")
rpredictions, rlocalisations, feat = isess.run([predictions, localisations, end_points["pool5"]], feed_dict={img_input: img})
global cnt
"""
feat[feat>0] = 1
feat[feat<=0] = 0
b, h, w, c = feat.shape
semantic = np.zeros([b, 256, 512, c])
semanticb = np.zeros([b, 256, 512, c],dtype=np.bool)
for i in range(c):
for j in range(b):
semantic[j,:,:,i] = cv2.resize(feat[j,:,:,i], (semantic.shape[2], semantic.shape[1]))
semanticb[semantic==0]=False
semanticb[semantic==1]=True
"""
np.savez("hmpool5/%06d"%cnt, feat)
cnt+=1
a = np.array([])
return a, a, a
#rpredictions, rlocalisations = isess.run([predictions, localisations], feed_dict={img_input: img})
# Get anchor boxes for this image shape.
ssd.update_feature_shapes(rpredictions)
anchors = ssd.anchors(img.shape, dtype=np.float32)
# Compute classes and bboxes from the net outputs: decode SSD output.
rclasses, rscores, rbboxes, rlayers, ridxes = ssd_common.ssd_bboxes_select(
rpredictions, rlocalisations, anchors,
threshold=select_threshold, img_shape=img.shape, num_classes=ssd.params.num_classes, decode=True)
# Remove other classes than cars.
idxes = (rclasses == 1)
rclasses = rclasses[idxes]
rscores = rscores[idxes]
rbboxes = rbboxes[idxes]
# Sort boxes by score.
rclasses, rscores, rbboxes = ssd_common.bboxes_sort(rclasses, rscores, rbboxes,
top_k=400, priority_inside=True, margin=0.0)
return rclasses, rscores, rbboxes
# ## Load sample image
# In[10]:
# Load a sample image.
# In[12]:
def bboxes_overlap(bbox, bboxes):
"""Computing overlap score between bboxes1 and bboxes2.
Note: bboxes1 can be multi-dimensional.
"""
if bboxes.ndim == 1:
bboxes = np.expand_dims(bboxes, 0)
# Intersection bbox and volume.
int_ymin = np.maximum(bboxes[:, 0], bbox[0])
int_xmin = np.maximum(bboxes[:, 1], bbox[1])
int_ymax = np.minimum(bboxes[:, 2], bbox[2])
int_xmax = np.minimum(bboxes[:, 3], bbox[3])
int_h = np.maximum(int_ymax - int_ymin, 0.)
int_w = np.maximum(int_xmax - int_xmin, 0.)
int_vol = int_h * int_w
# Union volume.
vol1 = (bboxes[:, 2] - bboxes[:, 0]) * (bboxes[:, 3] - bboxes[:, 1])
vol2 = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
score1 = int_vol / vol1
score2 = int_vol / vol2
return np.maximum(score1, score2)
def bboxes_nms_intersection_avg(classes, scores, bboxes, threshold=0.5):
"""Apply non-maximum selection to bounding boxes with score averaging.
The NMS algorithm works as follows: go over the list of boxes, and for each, see if
boxes with lower score overlap. If yes, averaging their scores and coordinates, and
consider it as a valid detection.
Arguments:
classes, scores, bboxes: SSD network output.
threshold: Overlapping threshold between two boxes.
Return:
classes, scores, bboxes: Classes, scores and bboxes of objects detected after applying NMS.
"""
keep_bboxes = np.ones(scores.shape, dtype=np.bool)
new_bboxes = np.copy(bboxes)
new_scores = np.copy(scores)
new_elements = np.ones_like(scores)
for i in range(scores.size-1):
if keep_bboxes[i]:
# Computer overlap with bboxes which are following.
sub_bboxes = bboxes[(i+1):]
sub_scores = scores[(i+1):]
overlap = bboxes_overlap(new_bboxes[i], sub_bboxes)
mask = np.logical_and(overlap > threshold, keep_bboxes[(i+1):])
while np.sum(mask):
keep_bboxes[(i+1):] = np.logical_and(keep_bboxes[(i+1):], ~mask)
# Update boxes...
tmp_scores = np.reshape(sub_scores[mask], (sub_scores[mask].size, 1))
new_bboxes[i] = new_bboxes[i] * new_scores[i] + np.sum(sub_bboxes[mask] * tmp_scores, axis=0)
new_scores[i] += np.sum(sub_scores[mask])
new_bboxes[i] = new_bboxes[i] / new_scores[i]
new_elements[i] += np.sum(mask)
# New overlap with the remaining?
overlap = bboxes_overlap(new_bboxes[i], sub_bboxes)
mask = np.logical_and(overlap > threshold, keep_bboxes[(i+1):])
new_scores = new_scores / new_elements
idxes = np.where(keep_bboxes)
return classes[idxes], new_scores[idxes], new_bboxes[idxes]
# In[13]:
# Apply Non-Maximum-Selection
"""
nms_threshold = 0.5
rclasses_nms, rscores_nms, rbboxes_nms = bboxes_nms_intersection_avg(rclasses, rscores, rbboxes, threshold=nms_threshold)
# Draw bboxes
img_bboxes = np.copy(img)
bboxes_draw_on_img(img_bboxes, rscores_nms, rbboxes_nms, colors_tableau, thickness=2)
plot_image(img_bboxes, 'SSD network output + Non Maximum Suppression.', (10, 10))
"""
# # Vehicle detection: images
# In[14]:
def process_image(img, tensors, isess, select_threshold=0.8, nms_threshold=0.5 ):
# SSD network + NMS on image.
rclasses, rscores, rbboxes = ssd_process_image(img, tensors, isess, select_threshold)
rclasses, rscores, rbboxes = bboxes_nms_intersection_avg(rclasses, rscores, rbboxes, threshold=nms_threshold)
# Draw bboxes of detected objects.
bboxes_draw_on_img(img, rscores, rbboxes, colors_tableau, thickness=2, show_text=True)
return img
def imread_as_jpg(path):
if path.endswith(".jpg") or path.endswith(".jpeg"):
return mpimg.imread(path)
img = cv2.imread(path)
encode_param=[int(cv2.IMWRITE_JPEG_QUALITY),100]
result,encimg=cv2.imencode('.jpg',img,encode_param)
#decode from jpeg format
img=cv2.imdecode(encimg,1)
img=cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
if __name__ == "__main__":
main()
| [
"kojirou.tensou@gmail.com"
] | kojirou.tensou@gmail.com |
71af7fe8f846df967083da104419347fd6448bda | 39f879ced0dbcbb92e7a12d8b09f4fa0aea4f925 | /pajbot/models/kvi.py | 499ba8d96ff3d9277996298df2f262fdc3e390c5 | [
"MIT"
] | permissive | coral/pajbot | f205b750d77cf06c75229aee93a5879abe4a10de | 682580f2a43a19a907cba231290b6d59157e123c | refs/heads/master | 2021-01-14T08:30:17.534620 | 2016-03-24T22:15:24 | 2016-03-24T22:15:24 | 54,676,838 | 0 | 0 | null | 2016-03-24T21:58:09 | 2016-03-24T21:58:08 | null | UTF-8 | Python | false | false | 1,383 | py | import logging
from collections import UserDict
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import String
from pajbot.managers import Base
from pajbot.managers import DBManager
log = logging.getLogger('pajbot')
class KVIData(Base):
__tablename__ = 'tb_idata'
id = Column(String(64), primary_key=True)
value = Column(Integer)
def __init__(self, id):
self.id = id
self.value = 0
def set(self, new_value):
self.value = new_value
def get(self):
return self.value
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def __str__(self):
return str(self.value)
class KVIManager(UserDict):
def __init__(self):
UserDict.__init__(self)
self.db_session = DBManager.create_session()
def __getitem__(self, id):
if id not in self.data:
kvidata = KVIData(id)
self.db_session.add(KVIData(id))
self.data[id] = kvidata
return self.data[id]
def commit(self):
self.db_session.commit()
def reload(self):
self.data = {}
num_values = 0
for kvdata in self.db_session.query(KVIData):
num_values += 1
self.data[kvdata.id] = kvdata
log.info('Loaded {0} KVIData values'.format(num_values))
return self
| [
"pajlada@bithack.se"
] | pajlada@bithack.se |
83ba5b3175e103d12d140d6720c3c1c842808d87 | 9f9f4280a02f451776ea08365a3f119448025c25 | /plans/hsppw/qcut_hsp-s_002_pwcc_logit_hs.py | 389812e7751f3c4e9cb7cdc5e3736e373e543373 | [
"BSD-2-Clause"
] | permissive | dbis-uibk/hit-prediction-code | 6b7effb2313d2499f49b2b14dd95ae7545299291 | c95be2cdedfcd5d5c27d0186f4c801d9be475389 | refs/heads/master | 2023-02-04T16:07:24.118915 | 2022-09-22T12:49:50 | 2022-09-22T12:49:50 | 226,829,436 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,074 | py | """Plan using all features."""
import os.path
from dbispipeline.evaluators import CvEpochEvaluator
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
import hit_prediction_code.common as common
from hit_prediction_code.dataloaders import ClassLoaderWrapper
from hit_prediction_code.dataloaders import EssentiaLoader
from hit_prediction_code.dataloaders import QcutLoaderWrapper
import hit_prediction_code.evaluations as evaluations
from hit_prediction_code.models.pairwise import PairwiseOrdinalModel
from hit_prediction_code.result_handlers import print_results_as_json
from hit_prediction_code.transformers.label import compute_hit_score_on_df
PATH_PREFIX = 'data/hit_song_prediction_msd_bb_lfm_ab/processed'
number_of_classes = 2
dataloader = ClassLoaderWrapper(
wrapped_loader=QcutLoaderWrapper(
wrapped_loader=EssentiaLoader(
dataset_path=os.path.join(
PATH_PREFIX,
'hsp-s_acousticbrainz.parquet',
),
features=[
*common.all_no_year_list(),
],
label='yang_hit_score',
nan_value=0,
data_modifier=lambda df: compute_hit_score_on_df(
df,
pc_column='lastfm_playcount',
lc_column='lastfm_listener_count',
hit_score_column='yang_hit_score',
),
),
number_of_bins=number_of_classes,
),
labels=list(range(number_of_classes)),
)
pipeline = Pipeline([
('scale', MinMaxScaler()),
('model',
PairwiseOrdinalModel(
wrapped_model=LogisticRegression(),
pairs_factor=3.,
threshold_type='average',
pair_strategy='random',
pair_encoding='concat',
threshold_sample_training=False,
)),
])
evaluator = CvEpochEvaluator(
cv=evaluations.cv(),
scoring=evaluations.metrics.ordinal_classifier_scoring(),
scoring_step_size=1,
)
result_handlers = [
print_results_as_json,
]
| [
"mikevo-uibk@famv.net"
] | mikevo-uibk@famv.net |
b3272192375b1b837f1071863e9a82efcad1198e | 0e59533f5ed141fd0d286dbdaebdbeba14ee576e | /Scripts/viewer.py | a70503d56ef444a57b0e4632c6e962bf3e714e87 | [] | no_license | LizinczykKarolina/DjangoBussiness | cab793ee73435143abf3293b12371ac81805e3fc | b1d89109533c3f1f6b004b2ec259ea9ec13185bc | refs/heads/master | 2021-09-25T17:59:09.380062 | 2018-10-24T21:52:34 | 2018-10-24T21:52:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py | #!c:\pycharmprojects\djangoprojects\djangobussiness\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
#
# an image viewer
class UI(tkinter.Label):
def __init__(self, master, im):
if im.mode == "1":
# bitmap image
self.image = ImageTk.BitmapImage(im, foreground="white")
tkinter.Label.__init__(self, master, image=self.image, bd=0,
bg="black")
else:
# photo image
self.image = ImageTk.PhotoImage(im)
tkinter.Label.__init__(self, master, image=self.image, bd=0)
#
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python viewer.py imagefile")
sys.exit(1)
filename = sys.argv[1]
root = tkinter.Tk()
root.title(filename)
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
| [
"wieczorek.karolina1@o2.pl"
] | wieczorek.karolina1@o2.pl |
bd689d04a2bef94ca53dd98eaece7d358b901fcb | 06a7dc7cc93d019e4a9cbcf672b23a0bbacf8e8b | /2016_schizConnect/unsupervised analysis/VIP/pcatv_components_analysis_VIP.py | 661f315760166011bf65da58fca12157fcc75feb | [] | no_license | neurospin/scripts | 6c06cd218a5f32de9c3c2b7d1d8bda3f3d107458 | f14a2c9cf2cd7f5fbea767b017c3faf36d170bdb | refs/heads/master | 2021-07-11T22:55:46.567791 | 2021-07-02T13:08:02 | 2021-07-02T13:08:02 | 10,549,286 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,856 | py |
import os
import numpy as np
import matplotlib.pyplot as plt
import nibabel as nib
import pandas as pd
import nibabel as nib
import json
from nilearn import plotting
from nilearn import image
from scipy.stats.stats import pearsonr
###############################################################################
# SCZ ONLY
###############################################################################
INPUT_POPULATION = "/neurospin/brainomics/2016_schizConnect/analysis/VIP/VBM/population.csv"
BASE_PATH = "/neurospin/brainomics/2016_schizConnect/analysis/VIP/VBM/results/pcatv_scz/5_folds_VIP_scz"
INPUT_RESULTS = os.path.join(BASE_PATH,"results","0")
# Compute clinical Scores
pop = pd.read_csv(INPUT_POPULATION)
age = pop[pop.dx ==1].age.values
scores = np.load(os.path.join(INPUT_RESULTS,"struct_pca_0.1_0.8_0.8","X_test_transform.npz"))['arr_0']
for i in range(scores.shape[1]):
corr,p = pearsonr(scores[:,i],age)
if p < 0.05:
print ("Significant correlation between age and score on component %s" % (i))
plt.figure()
plt.plot(scores[:,i],age,'o')
plt.xlabel('Score on component %s' %(i))
plt.ylabel('age')
plt.title("Pearson's correlation = %.02f, p = %.01e" % (corr,p),fontsize=12)
###############################################################################
###############################################################################
# PLOT ALL CORRELATIONW WITH AGE
fig, axs = plt.subplots(2,5, figsize=(15, 6), facecolor='w', edgecolor='k')
fig.subplots_adjust(hspace = 1.0, wspace=.3)
axs = axs.ravel()
for i in range(10):
corr,p = pearsonr(scores[:,i],age)
axs[i].plot(scores[:,i],age,'o', markersize = 4)
axs[i].set_title("Pearson' s correlation = %.02f \n p = %.01e" % (corr,p),fontsize=12)
axs[i].xaxis.set_ticks(np.arange(-0.3,0.4,0.2))
axs[i].set_xlabel('Score on component %s' %(i+1))
axs[i].set_ylabel('age')
axs[i].yaxis.set_ticks(np.arange(10,80,10))
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.savefig(os.path.join(BASE_PATH,"correlation_Age.pdf"))
plt.savefig(os.path.join(BASE_PATH,"correlation_Age.png"))
###############################################################################
###############################################################################
# CONTROLS ONLY
###############################################################################
INPUT_POPULATION = "/neurospin/brainomics/2016_schizConnect/analysis/VIP/VBM/population.csv"
BASE_PATH = "/neurospin/brainomics/2016_schizConnect/analysis/VIP/VBM/results/pcatv_controls/5_folds_VIP_controls"
INPUT_RESULTS = os.path.join(BASE_PATH,"results","0")
# Compute clinical Scores
pop = pd.read_csv(INPUT_POPULATION)
age = pop[pop.dx ==0].age.values
scores = np.load(os.path.join(INPUT_RESULTS,"struct_pca_0.1_0.8_0.5","X_test_transform.npz"))['arr_0']
for i in range(scores.shape[1]):
corr,p = pearsonr(scores[:,i],age)
if p < 0.05:
print ("Significant correlation between age and score on component %s" % (i))
plt.figure()
plt.plot(scores[:,i],age,'o')
plt.xlabel('Score on component %s' %(i))
plt.ylabel('age')
plt.title("Pearson's correlation = %.02f, p = %.01e" % (corr,p),fontsize=12)
###############################################################################
# PLOT ALL CORRELATIONW WITH AGE
fig, axs = plt.subplots(2,5, figsize=(15, 6), facecolor='w', edgecolor='k')
fig.subplots_adjust(hspace = 1.0, wspace=.3)
axs = axs.ravel()
for i in range(10):
corr,p = pearsonr(scores[:,i],age)
axs[i].plot(scores[:,i],age,'o', markersize = 4)
axs[i].set_title("Pearson' s correlation = %.02f \n p = %.01e" % (corr,p),fontsize=12)
axs[i].xaxis.set_ticks(np.arange(-0.3,0.4,0.2))
axs[i].set_xlabel('Score on component %s' %(i+1))
axs[i].set_ylabel('age')
axs[i].yaxis.set_ticks(np.arange(10,80,10))
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.savefig(os.path.join(BASE_PATH,"correlation_Age.pdf"))
plt.savefig(os.path.join(BASE_PATH,"correlation_Age.png"))###############################################################################
###############################################################################
###############################################################################
###############################################################################
# CONTROLS + SCZ
###############################################################################
INPUT_POPULATION = "/neurospin/brainomics/2016_schizConnect/analysis/VIP/VBM/population.csv"
BASE_PATH = "/neurospin/brainomics/2016_schizConnect/analysis/VIP/VBM/results/pcatv_all/5_folds_VIP_all"
INPUT_RESULTS = os.path.join(BASE_PATH,"results","0")
INPUT_DATA_y = "/neurospin/brainomics/2016_schizConnect/analysis/VIP/VBM/data/y.npy"
y = np.load(INPUT_DATA_y)
pop = pd.read_csv(INPUT_POPULATION)
age = pop.age.values
scores = np.load(os.path.join(INPUT_RESULTS,"struct_pca_0.1_0.5_0.1","X_test_transform.npz"))['arr_0']
for i in range(scores.shape[1]):
corr,p = pearsonr(scores[:,i],age)
if p < 0.05:
print ("Significant correlation between age and score on component %s" % (i))
plt.figure()
plt.plot(scores[y==0,i],age[y==0],'o')
plt.plot(scores[y==1,i],age[y==1],'o')
plt.xlabel('Score on component %s' %(i))
plt.ylabel('age')
plt.title("Pearson's correlation = %.02f, p = %.01e" % (corr,p),fontsize=12)
###############################################################################
###############################################################################
###############################################################################
###############################################################################
# PLOT ALL CORRELATIONW WITH AGE
fig, axs = plt.subplots(2,5, figsize=(15, 6), facecolor='w', edgecolor='k')
fig.subplots_adjust(hspace = 1.0, wspace=.3)
axs = axs.ravel()
for i in range(10):
corr,p = pearsonr(scores[:,i],age)
axs[i].plot(scores[y==0,i],age[y==0],'o', markersize = 4)
axs[i].plot(scores[y==1,i],age[y==1],'o', markersize = 4)
axs[i].set_title("Pearson' s correlation = %.02f \n p = %.01e" % (corr,p),fontsize=12)
axs[i].xaxis.set_ticks(np.arange(-0.3,0.4,0.2))
axs[i].set_xlabel('Score on component %s' %(i+1))
axs[i].set_ylabel('age')
axs[i].yaxis.set_ticks(np.arange(10,80,10))
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.savefig(os.path.join(BASE_PATH,"correlation_Age.pdf"))
plt.savefig(os.path.join(BASE_PATH,"correlation_Age.png"))###############################################################################
###############################################################################
| [
"ad247405@is222241.intra.cea.fr"
] | ad247405@is222241.intra.cea.fr |
f5a539e1278a45d740052e93379f9bbe3139700b | 5adde646184e7da34e2f2ee75b19703ead8fe761 | /manage.py | c738bbb6dc7c5a5a3a95e8f540eb99e36129e874 | [] | no_license | amarantejoacil/Progressao | ff5e2a44e7acc306480a000c52fca8b07eed6fcf | be3c42c34a7debd46d4e6fd64412ddb1c2bb574f | refs/heads/main | 2023-08-17T13:30:52.285824 | 2021-10-05T00:56:04 | 2021-10-05T00:56:04 | 411,513,104 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Progressao.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"joacil.amarante@gmail.com"
] | joacil.amarante@gmail.com |
32c1fd44ad91e650d9dd909d4c6ffd0db4a42814 | 288bbf5b6bf4c8471896533dc4c0538f1733c3a4 | /web_flask/1-hbnb_route.py | 1f5e59d54c15956153ef5b59f68cb0d58f6ad668 | [] | no_license | lemejiamo/AirBnB_clone_v2 | e75e61551763ed9677981b66d15667bdfe288dfc | 9a72db6fe2f100c1974fb0ebe0e3a8b5fb140d65 | refs/heads/master | 2023-07-19T03:17:10.412000 | 2021-09-21T23:46:58 | 2021-09-21T23:46:58 | 393,437,169 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | #!/usr/bin/python3
""" initilice basic flask server """
from flask import Flask
app = Flask(__name__)
@app.route('/', strict_slashes=False)
def index():
""" route to root """
return 'Hello HBNB!'
@app.route('/hbnb', strict_slashes=False)
def hbnb():
"""route to hbnb"""
return 'HBNB'
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=5000)
| [
"luismejia69@gmail.com"
] | luismejia69@gmail.com |
b25c921d84ede98178527e22823a6081472ab0f5 | 409c4d0dce72de987dff7c76857499fba8f8b7a0 | /popmail.py | 9735a642e45baeca25654101668a883343048f32 | [] | no_license | crystaleone/test | b4fece7fbc4e8ddd6186ea13245c62970c6d7038 | 4af3964bf6a657e888c7850f07a031440ba29e7a | refs/heads/master | 2021-01-18T19:17:36.924170 | 2017-09-19T03:37:01 | 2017-09-19T03:37:01 | 86,895,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | import poplib, getpass, sys, mailconfig
mailserver = mailconfig.popservername
mailuser = mailconfig.popusername
mailpasswd = getpass.getpass('Password for %s?' % mailserver)
print('Connection...')
server = poplib.POP3(mailserver)
server.user(mailuser)
server.pass_(mailpasswd)
try:
print(server.getwelcom())
msgCount, msgByes = server.stat()
print('There are', msgCount, 'mail messages in', msgBytes, 'bytes')
print(server.list())
print('-'*80)
input('[Press Enter key]')
for i in range(msgCount):
hdr, message, octets = server.retr(i+1)
for line in message: print(line.decode())
print('-' * 80)
if i < msgCount - 1:
input('[Press Enter key]')
finally:
server.quit()
print('Bye.')
| [
"403868144@qq.com"
] | 403868144@qq.com |
2beb791796d2faba7263d9bb34130528608ebb4b | ef3cda1a9364d77cf0703ec047e463b4f6ced154 | /torch/_dynamo/convert_frame.py | ad163d5b8442993b58142be0f9e038c37cce8b26 | [
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | jaglinux/pytorch | e7021986cb3916b91347b0f0e82093cb0ba95d06 | 7da8705f18ec5ab3ecca283e4821cb402ed6e791 | refs/heads/master | 2023-05-12T21:50:50.110809 | 2023-05-08T23:26:19 | 2023-05-09T22:12:45 | 312,452,819 | 1 | 0 | NOASSERTION | 2022-09-10T01:42:55 | 2020-11-13T02:33:15 | C++ | UTF-8 | Python | false | false | 18,591 | py | import functools
import itertools
import logging
import os
import random
import types
import weakref
from typing import Dict, Optional, Set
import torch
import torch._logging
from torch._guards import tracing
from torch._utils_internal import signpost_event
from torch.fx.experimental.symbolic_shapes import (
ConstraintViolationError,
GuardOnDataDependentSymNode,
)
from torch.fx.graph_module import _forward_from_src as original_forward_from_src
from . import config, exc
from .allowed_functions import is_allowed
from .backends.registry import CompilerFn
from .bytecode_analysis import remove_dead_code, remove_pointless_jumps
from .bytecode_transformation import (
check_inst_exn_tab_entries_valid,
is_generator,
propagate_inst_exn_table_entries,
transform_code_object,
)
from .eval_frame import always_optimize_code_objects, skip_code, TorchPatcher
from .exc import (
augment_exc_message,
BackendCompilerFailed,
format_error_msg,
InternalTorchDynamoError,
TorchRuntimeError,
unimplemented,
Unsupported,
)
from .guards import CheckFunctionManager, GuardedCode
from .hooks import Hooks
from .output_graph import OutputGraph
from .replay_record import ExecutionRecord
from .symbolic_convert import InstructionTranslator
from .utils import (
CleanupManager,
counters,
dynamo_timed,
format_bytecode,
gen_record_file_name,
guard_failures,
increment_frame,
is_namedtuple,
istype,
orig_code_map,
reset_graph_break_dup_checker,
setup_compile_debug,
troubleshooting_url,
write_record_to_file,
)
log = logging.getLogger(__name__)
guards_log = torch._logging.getArtifactLogger(__name__, "guards")
bytecode_log = torch._logging.getArtifactLogger(__name__, "bytecode")
recompiles_log = torch._logging.getArtifactLogger(__name__, "recompiles")
class Tracker:
def __init__(self):
self.seen = []
self.seen_ids = set()
def add(self, strong_obj):
idx = id(strong_obj)
if idx not in self.seen_ids:
obj = weakref.ref(strong_obj, lambda _: self.seen_ids.remove(idx))
self.seen.append(obj)
self.seen_ids.add(idx)
def __contains__(self, item):
return id(item) in self.seen_ids
def clear(self):
self.seen.clear()
self.seen_ids.clear()
input_codes = Tracker()
output_codes = Tracker()
initial_grad_state = None
initial_deterministic_algorithms_state = None
@functools.wraps(original_forward_from_src)
def fx_forward_from_src_skip_result(*args, **kwargs):
# we monkey patch FX to prevent infinite loop of trying to convert
# our generated code
result: types.FunctionType = original_forward_from_src(*args, **kwargs)
skip_code(result.__code__)
return result
def wrap_convert_context(fn):
"""
Context manager to:
1) Save/restore torch.is_grad_enabled() state
2) Save/restore python random state
3) Save/restore torch random state
4) Monkey patch torch.fx.graph_module._forward_from_src
"""
@functools.wraps(fn)
def _fn(*args, **kwargs):
prior_grad_mode = torch.is_grad_enabled()
py_rng_state = random.getstate()
torch_rng_state = torch.random.get_rng_state()
if torch.cuda.is_available():
cuda_rng_state = torch.cuda.get_rng_state()
prior_fwd_from_src = torch.fx.graph_module._forward_from_src
torch.fx.graph_module._forward_from_src = fx_forward_from_src_skip_result
cleanup = setup_compile_debug()
try:
return fn(*args, **kwargs)
finally:
cleanup.close()
torch._C._set_grad_enabled(prior_grad_mode)
random.setstate(py_rng_state)
torch.random.set_rng_state(torch_rng_state)
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state)
torch.fx.graph_module._forward_from_src = prior_fwd_from_src
_fn._torchdynamo_orig_callable = fn # type: ignore[attr-defined]
return _fn
@TorchPatcher.suppress_torch_distributed_warnings
def has_tensor_in_frame(frame):
"""Check if the frame has torch.* related bits"""
# Check if the function was decorated using torch._dynamo.optimize
if frame.f_code in always_optimize_code_objects:
return True
# Check if there is global import of torch.*
for co_name in frame.f_code.co_names:
if co_name in frame.f_globals:
if is_allowed(frame.f_globals[co_name]):
return True
seen_ids: Dict[int, bool] = dict()
def has_tensor(obj):
"""Recursively check if the obj has a tensor"""
obj_id = id(obj)
if obj_id in seen_ids:
return seen_ids[obj_id]
seen_ids[obj_id] = False
if isinstance(obj, (torch.Tensor, torch.nn.Module)):
seen_ids[obj_id] = True
return seen_ids[obj_id]
elif istype(obj, (list, tuple)):
seen_ids[obj_id] = any(has_tensor(v) for v in obj)
return seen_ids[obj_id]
elif istype(obj, dict):
# Some packages like pytest can be updated during runtime. So, make a
# copy of values to avoid issues like "RuntimeError: dictionary
# changed size during iteration"
values = list(obj.values())
seen_ids[obj_id] = any(has_tensor(v) for v in values)
return seen_ids[obj_id]
elif istype(obj, (str, int, float, type(None), bool)):
seen_ids[obj_id] = False
return seen_ids[obj_id]
elif is_namedtuple(obj):
seen_ids[obj_id] = any(has_tensor(getattr(obj, v)) for v in obj._fields)
return seen_ids[obj_id]
else:
# if config.debug:
# print(
# f"Assuming that object of type {type(obj)} does not have a tensor"
# )
return False
# Check if the passed arguments are of type Tensor
for value in frame.f_locals.values():
if has_tensor(value):
return True
log.debug(
"skipping because no torch.* %s \
%s %s",
frame.f_code.co_name,
frame.f_code.co_filename,
frame.f_code.co_firstlineno,
)
return False
def exception_handler(e, code, frame=None):
record_filename = None
if hasattr(e, "exec_record"):
record_filename = gen_record_file_name(e, code)
write_record_to_file(record_filename, e.exec_record)
e.record_filename = record_filename
augment_exc_message(e)
# Only log the exception if we are going to suppress it
# if aren't suppressing it, a higher level except block will handle it
if config.suppress_errors:
log.error(format_error_msg(e, code, record_filename, frame))
FRAME_COUNTER = 0
def convert_frame_assert(
compiler_fn: CompilerFn,
one_graph: bool = True,
export: bool = False,
export_constraints=None,
):
"""Fully convert a frame into an FX graph"""
reset_graph_break_dup_checker()
def _convert_frame_assert(
frame: types.FrameType, cache_size: int, hooks: Hooks, frame_state
):
increment_frame()
global FRAME_COUNTER
if "_id" not in frame_state:
frame_state["_id"] = FRAME_COUNTER
FRAME_COUNTER += 1
code = frame.f_code
if code in input_codes and (
recompiles_log.isEnabledFor(logging.DEBUG) or config.error_on_recompile
):
if config.report_guard_failures:
message = (
f"Recompiling function {code.co_name} in {code.co_filename}",
f"triggered by the following guard failure: {str(guard_failures[code][-1])}",
)
else:
message = (
f"Recompiling function {code.co_name} in {code.co_filename}",
"set env var TORCHDYNAMO_REPORT_GUARD_FAILURES=1 to debug further",
)
if recompiles_log.isEnabledFor(logging.DEBUG):
recompiles_log.debug(message)
if config.error_on_recompile:
raise exc.RecompileError(message)
input_codes.add(code)
if code in output_codes:
return None
if (
os.environ.get("TORCHDYNAMO_DEBUG_FUNCTION")
and os.environ.get("TORCHDYNAMO_DEBUG_FUNCTION") != code.co_name
):
return None
if code.co_name == "<genexpr>" and code.co_filename.endswith(
("transformers/file_utils.py", "transformers/utils/generic.py")
):
# not needed, but cleans up torchbench error stats
return None
if code.co_name == "__setattr__":
# setattr could be tricky to handle generally,
# but also not likely useful to compile- skip the whole frame
return None
if code.co_name == "__init__" and code.co_filename.startswith(
os.path.dirname(torch.optim.__file__)
):
# optimizer support is still incomplete see
# test_state_dict in test/dynamo/test_optimizers.py
return None
# Check if the frame is generated by an exec builtin call
# TODO - Running exec generated frame seems propagates f_globals to the
# next frames.
if code.co_name == "<module>" and code.co_filename == "<string>":
return None
if (
code.co_name == "<lambda>"
and code.co_filename == "<string>"
and not bool(frame.f_builtins)
):
# namedtuple subclass constructor. Empty builtins cause issue with
# len keyword in LIST_LEN guard.
return None
if is_generator(code):
unimplemented("generator")
if cache_size >= config.cache_size_limit:
def format_func_info(code):
return f"'{code.co_name}' ({code.co_filename}:{code.co_firstlineno})"
def format_guard_failures(code):
# For the common case, it's sufficient to see just the most recent failure.
# We could add a verbose mode if needed
return f" reasons: {str(guard_failures[code][-1])}\n"
if config.report_guard_failures:
assert code in guard_failures, "TODO(whc) any other recompile reasons?"
log.warning(
"torch._dynamo hit config.cache_size_limit (%s)\n"
" function: %s\n"
" reasons: %s\n"
"to diagnose recompilation issues, see %s.",
config.cache_size_limit,
format_func_info(code),
format_guard_failures(code),
troubleshooting_url,
)
else:
log.warning(
"torch._dynamo hit config.cache_size_limit (%s)\n"
" function: %s\n"
"to diagnose recompilation issues, set env variable TORCHDYNAMO_REPORT_GUARD_FAILURES=1"
" and also see %s.",
config.cache_size_limit,
format_func_info(code),
troubleshooting_url,
)
unimplemented("cache_size_limit reached")
if not has_tensor_in_frame(frame):
return None
global initial_grad_state
initial_grad_state = torch.is_grad_enabled()
global initial_deterministic_algorithms_state
initial_deterministic_algorithms_state = (
torch.are_deterministic_algorithms_enabled()
)
signpost_event(
"dynamo",
"_convert_frame_assert._compile",
{
"co_name": code.co_name,
"co_filename": code.co_filename,
"co_firstlineno": code.co_firstlineno,
"cache_size": cache_size,
},
)
return _compile(
frame.f_code,
frame.f_globals,
frame.f_locals,
frame.f_builtins,
compiler_fn,
one_graph,
export,
export_constraints,
hooks,
frame,
frame_state=frame_state,
)
_convert_frame_assert._torchdynamo_orig_callable = compiler_fn # type: ignore[attr-defined]
return wrap_convert_context(_convert_frame_assert)
@dynamo_timed(phase_name="entire_frame_compile")
def _compile(
code: types.CodeType,
globals: Dict[str, object],
locals: Dict[str, object],
builtins: Dict[str, object],
compiler_fn: CompilerFn,
one_graph: bool,
export: bool,
export_constraints,
hooks: Hooks,
frame: Optional[types.FrameType] = None,
frame_state=None,
) -> Optional[GuardedCode]:
output: Optional[OutputGraph] = None
# This is shared across restarts
mutated_closure_cell_contents: Set[str] = set()
# from .utils import print_once; print_once(code.co_filename)
def transform(instructions, code_options):
nonlocal output
tracer = InstructionTranslator(
instructions,
code,
locals,
globals,
builtins,
code_options,
compiler_fn,
one_graph,
export,
export_constraints,
mutated_closure_cell_contents,
frame_state=frame_state,
)
with tracing(tracer.output.tracing_context):
tracer.run()
output = tracer.output
assert output is not None
assert output.output_instructions
instructions[:] = output.output_instructions
code_options.update(output.code_options)
if config.dead_code_elimination:
propagate_inst_exn_table_entries(instructions)
check_inst_exn_tab_entries_valid(instructions)
instructions[:] = remove_pointless_jumps(remove_dead_code(instructions))
try:
for attempt in itertools.count():
try:
out_code = transform_code_object(code, transform)
orig_code_map[out_code] = code
break
except exc.RestartAnalysis:
log.debug("Restarting analysis ...")
if attempt > 100:
unimplemented("100+ RestartAnalysis() calls")
except exc.SkipFrame as e:
log.debug(
"Skipping frame %s %s \
%s %s",
e,
code.co_name,
code.co_filename,
code.co_firstlineno,
)
if one_graph:
log.debug("No graph captured with one_graph=True")
return None
output_codes.add(out_code)
def log_bytecode(prefix, name, filename, line_no, code):
if bytecode_log.isEnabledFor(logging.DEBUG):
bytecode_log.debug(
format_bytecode(prefix, name, filename, line_no, code)
)
log_bytecode(
"ORIGINAL BYTECODE",
code.co_name,
code.co_filename,
code.co_firstlineno,
code,
)
log_bytecode(
"MODIFIED BYTECODE",
code.co_name,
code.co_filename,
code.co_firstlineno,
out_code,
)
assert output is not None
assert output.guards is not None
CleanupManager.instance[out_code] = output.cleanups
check_fn = CheckFunctionManager(
output,
locals,
globals,
hooks.guard_fail_fn if hooks else None,
)
guarded_code = GuardedCode(out_code, check_fn.check_fn)
if guards_log.isEnabledFor(logging.DEBUG):
guard_str = "GUARDS:\n"
guard_str += "\n".join(
[
f" {code}"
for guard in sorted(output.guards)
if guard.code_list is not None
for code in guard.code_list
]
)
guards_log.debug(guard_str)
if hooks.guard_export_fn is not None:
hooks.guard_export_fn(output.guards)
return guarded_code
except (
Unsupported,
TorchRuntimeError,
BackendCompilerFailed,
AssertionError,
ConstraintViolationError,
GuardOnDataDependentSymNode,
) as e:
exception_handler(e, code, frame)
raise
except Exception as e:
exception_handler(e, code, frame)
# TODO: Why??? Why not raise the original exception as is
raise InternalTorchDynamoError() from e
def convert_frame(compiler_fn: CompilerFn, hooks: Hooks):
"""Try to convert a frame into an FX graph, if error leave frame unmodified"""
inner_convert = convert_frame_assert(compiler_fn, one_graph=False)
def _convert_frame(
frame: types.FrameType, cache_size: int, hooks: Hooks, frame_state
):
counters["frames"]["total"] += 1
try:
result = inner_convert(frame, cache_size, hooks, frame_state)
counters["frames"]["ok"] += 1
return result
except (NotImplementedError, Unsupported):
log.info("converting frame raised unsupported, leaving it unconverted")
except Exception:
if not config.suppress_errors:
raise
log.info("converting frame raised error, suppressing error")
return None
_convert_frame._torchdynamo_orig_callable = compiler_fn # type: ignore[attr-defined]
return _convert_frame
# TODO mlazos: add support for same args, or record them
def replay(filename):
from .backends.debugging import eager
original_replay_val = config.replay_record_enabled
config.replay_record_enabled = False
with open(filename, "rb") as in_file:
record = ExecutionRecord.load(in_file)
record.globals = dict(itertools.chain(record.globals.items(), globals().items()))
try:
_compile(
record.code,
record.globals,
record.locals,
record.builtins,
compiler_fn=eager,
one_graph=False,
export=False,
hooks=Hooks(),
frame=None,
)
except Exception:
pass
finally:
config.replay_record_enabled = original_replay_val
| [
"pytorchmergebot@users.noreply.github.com"
] | pytorchmergebot@users.noreply.github.com |
fba4dd53132c4fbd8b2fccfb4c3dafc9f1516bc5 | 0386591b51fdbf5759faef6afb8729b64a3f1589 | /qgisserver/migrations/0017_servicemetadata.py | e1ec476316f8ebcd373fbe5e42c84ab3b9204c44 | [
"BSD-3-Clause"
] | permissive | giscube/giscube-admin | 1e155402e094eb4db1f7ca260a8d1402e27a31df | 4ce285a6301f59a8e48ecf78d58ef83c3827b5e0 | refs/heads/main | 2023-07-11T17:23:56.531443 | 2023-02-06T15:12:31 | 2023-02-06T15:12:31 | 94,087,469 | 7 | 1 | BSD-3-Clause | 2023-07-07T13:22:09 | 2017-06-12T11:12:56 | Python | UTF-8 | Python | false | false | 5,511 | py | # Generated by Django 2.2.11 on 2020-05-06 04:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('giscube', '0016_datasetmetadata'),
('qgisserver', '0016_service_legend'),
]
operations = [
migrations.CreateModel(
name='ServiceMetadata',
fields=[
('date', models.DateField(blank=True, null=True, verbose_name='date')),
('language', models.CharField(blank=True, choices=[('ab', 'Abkhaz'), ('aa', 'Afar'), ('af', 'Afrikaans'), ('ak', 'Akan'), ('sq', 'Albanian'), ('am', 'Amharic'), ('ar', 'Arabic'), ('an', 'Aragonese'), ('hy', 'Armenian'), ('as', 'Assamese'), ('av', 'Avaric'), ('ae', 'Avestan'), ('ay', 'Aymara'), ('az', 'Azerbaijani'), ('bm', 'Bambara'), ('ba', 'Bashkir'), ('eu', 'Basque'), ('be', 'Belarusian'), ('bn', 'Bengali'), ('bh', 'Bihari'), ('bi', 'Bislama'), ('bs', 'Bosnian'), ('br', 'Breton'), ('bg', 'Bulgarian'), ('my', 'Burmese'), ('ca', 'Catalan; Valencian'), ('ch', 'Chamorro'), ('ce', 'Chechen'), ('ny', 'Chichewa; Chewa; Nyanja'), ('zh', 'Chinese'), ('cv', 'Chuvash'), ('kw', 'Cornish'), ('co', 'Corsican'), ('cr', 'Cree'), ('hr', 'Croatian'), ('cs', 'Czech'), ('da', 'Danish'), ('dv', 'Divehi; Maldivian;'), ('nl', 'Dutch'), ('dz', 'Dzongkha'), ('en', 'English'), ('eo', 'Esperanto'), ('et', 'Estonian'), ('ee', 'Ewe'), ('fo', 'Faroese'), ('fj', 'Fijian'), ('fi', 'Finnish'), ('fr', 'French'), ('ff', 'Fula'), ('gl', 'Galician'), ('ka', 'Georgian'), ('de', 'German'), ('el', 'Greek, Modern'), ('gn', 'Guaraní'), ('gu', 'Gujarati'), ('ht', 'Haitian'), ('ha', 'Hausa'), ('he', 'Hebrew (modern)'), ('hz', 'Herero'), ('hi', 'Hindi'), ('ho', 'Hiri Motu'), ('hu', 'Hungarian'), ('ia', 'Interlingua'), ('id', 'Indonesian'), ('ie', 'Interlingue'), ('ga', 'Irish'), ('ig', 'Igbo'), ('ik', 'Inupiaq'), ('io', 'Ido'), ('is', 'Icelandic'), ('it', 'Italian'), ('iu', 'Inuktitut'), ('ja', 'Japanese'), ('jv', 'Javanese'), ('kl', 'Kalaallisut'), ('kn', 'Kannada'), ('kr', 'Kanuri'), ('ks', 'Kashmiri'), ('kk', 'Kazakh'), ('km', 'Khmer'), ('ki', 'Kikuyu, Gikuyu'), ('rw', 'Kinyarwanda'), ('ky', 'Kirghiz, Kyrgyz'), ('kv', 'Komi'), ('kg', 'Kongo'), ('ko', 'Korean'), ('ku', 'Kurdish'), ('kj', 'Kwanyama, Kuanyama'), ('la', 'Latin'), ('lb', 'Luxembourgish'), ('lg', 'Luganda'), ('li', 'Limburgish'), ('ln', 'Lingala'), ('lo', 'Lao'), ('lt', 'Lithuanian'), ('lu', 'Luba-Katanga'), ('lv', 'Latvian'), ('gv', 'Manx'), ('mk', 'Macedonian'), ('mg', 'Malagasy'), ('ms', 'Malay'), ('ml', 'Malayalam'), ('mt', 'Maltese'), ('mi', 'Māori'), ('mr', 'Marathi (Marāṭhī)'), ('mh', 'Marshallese'), ('mn', 'Mongolian'), ('na', 'Nauru'), ('nv', 'Navajo, Navaho'), ('nb', 'Norwegian Bokmål'), ('nd', 'North Ndebele'), ('ne', 'Nepali'), ('ng', 'Ndonga'), ('nn', 'Norwegian Nynorsk'), ('no', 'Norwegian'), ('ii', 'Nuosu'), ('nr', 'South Ndebele'), ('oc', 'Occitan'), ('oj', 'Ojibwe, Ojibwa'), ('cu', 'Old Church Slavonic'), ('om', 'Oromo'), ('or', 'Oriya'), ('os', 'Ossetian, Ossetic'), ('pa', 'Panjabi, Punjabi'), ('pi', 'Pāli'), ('fa', 'Persian'), ('pl', 'Polish'), ('ps', 'Pashto, Pushto'), ('pt', 'Portuguese'), ('qu', 'Quechua'), ('rm', 'Romansh'), ('rn', 'Kirundi'), ('ro', 'Romanian, Moldavan'), ('ru', 'Russian'), ('sa', 'Sanskrit (Saṁskṛta)'), ('sc', 'Sardinian'), ('sd', 'Sindhi'), ('se', 'Northern Sami'), ('sm', 'Samoan'), ('sg', 'Sango'), ('sr', 'Serbian'), ('gd', 'Scottish Gaelic'), ('sn', 'Shona'), ('si', 'Sinhala, Sinhalese'), ('sk', 'Slovak'), ('sl', 'Slovene'), ('so', 'Somali'), ('st', 'Southern Sotho'), ('es', 'Spanish; Castilian'), ('su', 'Sundanese'), ('sw', 'Swahili'), ('ss', 'Swati'), ('sv', 'Swedish'), ('ta', 'Tamil'), ('te', 'Telugu'), ('tg', 'Tajik'), ('th', 'Thai'), ('ti', 'Tigrinya'), ('bo', 'Tibetan'), ('tk', 'Turkmen'), ('tl', 'Tagalog'), ('tn', 'Tswana'), ('to', 'Tonga'), ('tr', 'Turkish'), ('ts', 'Tsonga'), ('tt', 'Tatar'), ('tw', 'Twi'), ('ty', 'Tahitian'), ('ug', 'Uighur, Uyghur'), ('uk', 'Ukrainian'), ('ur', 'Urdu'), ('uz', 'Uzbek'), ('ve', 'Venda'), ('vi', 'Vietnamese'), ('vo', 'Volapük'), ('wa', 'Walloon'), ('cy', 'Welsh'), ('wo', 'Wolof'), ('fy', 'Western Frisian'), ('xh', 'Xhosa'), ('yi', 'Yiddish'), ('yo', 'Yoruba'), ('za', 'Zhuang, Chuang'), ('zu', 'Zulu')], max_length=2, null=True, verbose_name='language')),
('information', models.TextField(blank=True, null=True, verbose_name='information')),
('provider_name', models.CharField(blank=True, max_length=255, null=True, verbose_name='provider name')),
('provider_web', models.URLField(blank=True, max_length=255, null=True, verbose_name='provider web')),
('provider_email', models.CharField(blank=True, max_length=255, null=True, verbose_name='provider email')),
('summary', models.TextField(blank=True, null=True, verbose_name='summary')),
('bbox', models.CharField(blank=True, max_length=255, null=True, verbose_name='BBOX')),
('parent', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='qgisserver.Service', related_name='metadata')),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='qgisserver_servicemetadata_metadata', to='giscube.MetadataCategory', to_field='code')),
],
options={
'abstract': False,
},
),
]
| [
"abusquets@gmail.com"
] | abusquets@gmail.com |
1d4f0068e28496e63ddc5e766c7288fe77d385e8 | da03eb12f10b03ba44056cdb364fa899756b545e | /admin/modelos/archivos_struct.py | 85cfb84a403471cd7773a8aafe834eca4bf56337 | [
"Apache-2.0"
] | permissive | ZerpaTechnology/coming-soon | 5800c4e4f4a5045b99f1743dc5f99c1b72fe8bbd | b4682d6ca740afd7f79e4de650d9a327a7f15331 | refs/heads/master | 2021-05-10T12:31:18.709911 | 2018-02-26T05:17:28 | 2018-02-26T05:17:28 | 118,444,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,055 | py | db=DB()
db('Archivos').campo('Nombre',db.str,False,True,False,False,0,-1,None,None)
db('Archivos').campo('Contenido',db.list,False,True,False,False,0,-1,None,None)
db('Archivos').campo('args',db.dict,False,True,False,False,0,-1,None,None)
db('Archivos').campo('Fecha',db.str,False,True,False,False,0,-1,None,'%d/%m/%Y %H:%M:%S')
db('Archivos').campo('Status',db.list,False,True,False,False,0,-1,None,None)
db('Opciones').campo('Nombre',db.str,False,True,False,False,0,-1,None,None)
db('Opciones').campo('Valores',db.list,False,True,False,False,0,-1,None,None)
db('Archivos').insertar('img1.jpg', [[{'Imagen': 'file', 'value': '', 'name': 'archivo'}, {'T\xc3\xadtulo': 'text', 'value': 'img1.jpg', 'name': 'renombre'}, {'opcion': 5, 'name': 'opcion', 'value': 0, 'Opciones': 'select'}, {'Tipo de archivo': 'select', 'opcion': 3, 'name': 'tipo', 'value': 0}]], {'Archivo': 0}, '12/7/2017 10:27:11', [])
db('Archivos').insertar('MamaPapaCAnguroweb.jpg', [[{'opcion': 0, 'Imagen': 'img', 'value': 38, 'name': 'archivo'}, {'T\xc3\xadtulo': 'text', 'value': 'MamaPapaCAnguroweb.jpg', 'name': 'renombe'}, {'value': 'http://occoasolutions.com/zerpatec/apps/asosa/admin/static/archivos/Imagenes/MamaPapaCAnguroweb.jpg', 'name': 'enlace', 'Enlace': 'text'}, {'T\xc3\xadpo': 'text', 'name': 'tipo', 'value': 'Imagen'}]], {'Archivo': 1}, '21/7/2017 0:30:45', [])
db('Archivos').insertar('imgpsh_fullsize3.jpeg', [[{'opcion': 0, 'Imagen': 'img', 'value': 39, 'name': 'archivo'}, {'T\xc3\xadtulo': 'text', 'value': 'imgpsh_fullsize3.jpeg', 'name': 'renombe'}, {'Enlace': 'text', 'name': 'enlace', 'value': 'http://occoasolutions.com/zerpatec/apps/asosa/admin/static/archivos/Imagenes/imgpsh_fullsize3.jpeg'}, {'T\xc3\xadpo': 'text', 'name': 'tipo', 'value': 'Imagen'}]], {'Archivo': 2}, '21/7/2017 0:31:58', [])
db('Archivos').insertar('SuperPapa.jpg', [[{'opcion': 0, 'Imagen': 'img', 'value': 40, 'name': 'archivo'}, {'T\xc3\xadtulo': 'text', 'value': 'SuperPapa.jpg', 'name': 'renombe'}, {'value': 'http://occoasolutions.com/zerpatec/apps/asosa/admin/static/archivos/Imagenes/SuperPapa.jpg', 'name': 'enlace', 'Enlace': 'text'}, {'T\xc3\xadpo': 'text', 'name': 'tipo', 'value': 'Imagen'}]], {'Archivo': 3}, '21/7/2017 0:40:21', [])
db('Opciones').insertar('Imagenes', ['Aniversario Perfecto-A001.jpg', 'Amarillo Primavera-A002.jpg', 'Color Primavera-A004.jpg', 'Alegria Exotica-005.jpg', 'Azul Amore-A007.jpg', 'Exotico Amore-A006.jpg', 'Alegria-A008.jpg', 'Campo Amore-A009 - copia.jpg', 'Exotico Armonia-A007.jpg', 'Amarillo Campestre-A014.gif', 'Calas Buque-B001.jpg', 'Blanco Celestial Buque-B002.jpg', 'Rosa Celestial-C004.jpg', 'Blanco Campestre -C005.jpg', 'Rosa Campestre-C006.jpg', 'Gladiolas Celestial-C010 - copia.jpg', 'Lila Celestial-C001 - copia.JPG', 'Calas Campestre-A001.jpg', 'Lirio Dinastia-A005 - copia.jpg', 'Blanco Dinastia-A013.jpg', 'Calas Clasicas-A016.jpg', 'mesa primavera.jpg', 'mesa avila.jpg', 'IMG-20140701-00060.jpg', 'IMG-20140831-00213.jpg', 'IMG-20140825-00193.jpg', 'Blanco Campestre -C005 - copia.jpg', 'Blanco Primavera-C009 - copia.jpg', 'Calas Campestre-A001 - copia.jpg', 'Calas Energizantes-A003.jpg', 'Exotico Campestre-A004.jpg', 'Lirio Dinastia-A005.jpg', 'Exotico Armonia-A007 - copia.jpg', 'Exotico Dinastia-A008.jpg', 'Bandanos Decorativos-A011.jpg', 'Rojo Decorativo-A018.bmp', 'palmas.jpg', 'cala blanca.jpg', 'lochita.jpg', 'repollito d jardin.jpg', 'bella las once.jpg', 'jade imperial.jpg', 'planta_del_dinero.jpg', 'bamboo.jpg', 'jardin palo de la felicidad.jpg', 'jade chno.jpg'])
db('Opciones').insertar('Avatares', ['icono_perfil.jpg'])
db('Opciones').insertar('Iconos', ['icono_perfil.jpg'])
db('Opciones').insertar('Categorias', ['Imagenes', 'Avatares', 'Iconos'])
db('Opciones').insertar('Archivos', ['Imagen', 'Texto', 'Video', 'Musica', 'Sonido', 'Pagina web', 'Documento Word', 'Documento Excel', 'Documento Power point', 'Documento Write', 'Documento Calc', 'Documento Impress', 'Documento Draw', 'Script Python', 'Script Javascript', 'Script PHP', 'Script C', 'Script C#', 'Script Ruby', 'Script Go']) | [
"jesus26abraham1996@gmail.com"
] | jesus26abraham1996@gmail.com |
4305a78a71ad47ae984d4d566913ad9b7caf9f6f | 6336828aeab3ea2ba3e1cf9452a8a3f3a084b327 | /django_react_users_tutorial/virtual-env/bin/pip3 | 90eaf20b91fc5bd44e053571f6f42364ee896361 | [] | no_license | kalereshma96/DjangoNewRepository | 85f2eaed6b689be273af48d328c0a388244bbe2b | 37fd232c2ac91eb6940300f20118f93d17926f9a | refs/heads/master | 2020-04-12T18:12:15.698279 | 2019-01-21T13:46:37 | 2019-01-21T13:46:37 | 162,672,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | #!/home/admin1/PycharmProjects/mynewpythonproject/django_react_users_tutorial/virtual-env/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"kalereshma96@gmail.com"
] | kalereshma96@gmail.com | |
ffcf2b18e3380c3824334a82b20ab916324a1565 | b68d4e7826c29a22b002ff9c10583faeb7a10455 | /pikry-3.4.1/bin/pilfont.py | bd6e47c8c714d7f812e439293df8a80bde33f003 | [] | no_license | mikanyman/.pyenv_versions-legacy | ec392821290bd38873b25824c4261b15dc1a5067 | 5a42c7c21e800610f4f5f322d73d1dbd62a081b9 | refs/heads/master | 2022-10-13T10:22:13.956161 | 2017-01-31T20:10:04 | 2017-01-31T20:10:04 | 80,555,789 | 0 | 1 | null | 2022-09-30T13:39:01 | 2017-01-31T19:49:56 | Python | UTF-8 | Python | false | false | 1,059 | py | #!/home/mnyman/.pyenv/versions/pikry-3.4.1/bin/python3.4
#
# The Python Imaging Library
# $Id$
#
# PIL raster font compiler
#
# history:
# 1997-08-25 fl created
# 2002-03-10 fl use "from PIL import"
#
from __future__ import print_function
VERSION = "0.4"
import glob, sys
# drivers
from PIL import BdfFontFile
from PIL import PcfFontFile
if len(sys.argv) <= 1:
print("PILFONT", VERSION, "-- PIL font compiler.")
print()
print("Usage: pilfont fontfiles...")
print()
print("Convert given font files to the PIL raster font format.")
print("This version of pilfont supports X BDF and PCF fonts.")
sys.exit(1)
files = []
for f in sys.argv[1:]:
files = files + glob.glob(f)
for f in files:
print(f + "...", end=' ')
try:
fp = open(f, "rb")
try:
p = PcfFontFile.PcfFontFile(fp)
except SyntaxError:
fp.seek(0)
p = BdfFontFile.BdfFontFile(fp)
p.save(f)
except (SyntaxError, IOError):
print("failed")
else:
print("OK")
| [
"mika.nyman@synapse-computing.com"
] | mika.nyman@synapse-computing.com |
57e4364c25f9e384555ccf48e754377b18f561ec | 00c6ded41b84008489a126a36657a8dc773626a5 | /.history/Sizing_Method/ConstrainsAnalysis/ConstrainsAnlysisPDP1P2_20210712110602.PY | 4dabb5e1945b8e2f10dfb8d6f544bc8cb7def6d0 | [] | no_license | 12libao/DEA | 85f5f4274edf72c7f030a356bae9c499e3afc2ed | 1c6f8109bbc18c4451a50eacad9b4dedd29682bd | refs/heads/master | 2023-06-17T02:10:40.184423 | 2021-07-16T19:05:18 | 2021-07-16T19:05:18 | 346,111,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,980 | py | # author: Bao Li #
# Georgia Institute of Technology #
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysis as ca
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysisPD as ca_pd
import Sizing_Method.Aerodynamics.Aerodynamics as ad
import Sizing_Method.Aerodynamics.ThrustLapse as thrust_lapse
import Sizing_Method.Other.US_Standard_Atmosphere_1976 as atm
import matplotlib.pylab as plt
import numpy as np
import sys
import os
sys.path.insert(0, os.getcwd())
"""
The unit use is IS standard
"""
class ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun:
"""This is a power-based master constraints analysis"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0.5, number_of_motor=12, C_DR=0):
"""
:param beta: weight fraction
:param Hp: P_motor/P_total
:param n: number of motor
:param K1: drag polar coefficient for 2nd order term
:param K2: drag polar coefficient for 1st order term
:param C_D0: the drag coefficient at zero lift
:param C_DR: additional drag caused, for example, by external stores,
braking parachutes or flaps, or temporary external hardware
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
self.beta = beta
self.hp = 1 - Hp
self.n = number_of_motor
# power lapse ratio
self.alpha = thrust_lapse.thrust_lapse_calculation(altitude=self.h,
velocity=self.v).high_bypass_ratio_turbofan()
self.k1 = ad.aerodynamics_without_pd(self.h, self.v).K1()
self.k2 = ad.aerodynamics_without_pd(self.h, self.v).K2()
self.cd0 = ad.aerodynamics_without_pd(self.h, self.v).CD_0()
self.cdr = C_DR
self.w_s = wing_load
self.g0 = 9.80665
self.coefficient = self.beta * self.v / self.alpha
# Estimation of ΔCL and ΔCD
pd = ad.aerodynamics_with_pd(
self.h, self.v, Hp=self.hp, n=n, W_S=self.w_s)
self.q = 0.5 * self.rho * self.v ** 2
self.cl = self.beta * self.w_s / self.q
# print(self.cl)
self.delta_cl = pd.delta_lift_coefficient(self.cl)
self.delta_cd0 = pd.delta_CD_0()
def master_equation(self, n, dh_dt, dV_dt):
cl = self.cl * n + self.delta_cl
cd = self.k1 * cl ** 2 + self.k2 * cl + self.cd0 + self.cdr + self.delta_cd0
p_w = self.coefficient * \
(self.q / (self.beta * self.w_s) *
cd + dh_dt / self.v + dV_dt / self.g0)
return p_w
def cruise(self):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun.master_equation(
self, n=1, dh_dt=0, dV_dt=0)
return p_w
def climb(self, roc):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun.master_equation(
self, n=1, dh_dt=roc, dV_dt=0)
return p_w
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 300 knots, which is about 150 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180)
* v / self.g0) ** 2) ** 0.5
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun.master_equation(
self, n=load_factor, dh_dt=0, dV_dt=0)
return p_w
def take_off(self):
"""
A320neo take-off speed is about 150 knots, which is about 75 m/s
required runway length is about 2000 m
K_TO is a constant greater than one set to 1.2 (generally specified by appropriate flying regulations)
"""
Cl_max_to = 2.3 # 2.3
K_TO = 1.2 # V_TO / V_stall
s_G = 1266
p_w = 2 / 3 * self.coefficient / self.v * self.beta * K_TO ** 2 / (
s_G * self.rho * self.g0 * Cl_max_to) * self.w_s ** (
3 / 2)
return p_w
def stall_speed(self, V_stall_to=65, Cl_max_to=2.3):
V_stall_ld = 62
Cl_max_ld = 2.87
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * \
(Cl_max_to + self.delta_cl)
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * \
(Cl_max_ld + self.delta_cl)
W_S = min(W_S_1, W_S_2)
return W_S
def service_ceiling(self, roc=0.5):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun.master_equation(
self, n=1, dh_dt=roc, dV_dt=0)
return p_w
allFuncs = [take_off, stall_speed, cruise,
service_ceiling, level_turn, climb]
class ConstrainsAnalysis_Mattingly_Method_with_DP_electric:
"""This is a power-based master constraints analysis
the difference between turbofun and electric for constrains analysis:
1. assume the thrust_lapse = 1 for electric propution
2. hp = 1 - hp_turbofun
"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0.5, number_of_motor=12, C_DR=0):
"""
:param beta: weight fraction
:param Hp: P_motor/P_total
:param n: number of motor
:param K1: drag polar coefficient for 2nd order term
:param K2: drag polar coefficient for 1st order term
:param C_D0: the drag coefficient at zero lift
:param C_DR: additional drag caused, for example, by external stores,
braking parachutes or flaps, or temporary external hardware
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
self.beta = beta
self.hp = Hp # this is the difference part compare with turbofun
self.n = number_of_motor
# power lapse ratio
self.alpha = 1 # this is the difference part compare with turbofun
self.k1 = ad.aerodynamics_without_pd(self.h, self.v).K1()
self.k2 = ad.aerodynamics_without_pd(self.h, self.v).K2()
self.cd0 = ad.aerodynamics_without_pd(self.h, self.v).CD_0()
self.cdr = C_DR
self.w_s = wing_load
self.g0 = 9.80665
self.coefficient = self.beta * self.v / self.alpha
# Estimation of ΔCL and ΔCD
pd = ad.aerodynamics_with_pd(
self.h, self.v, Hp=self.hp, n=n, W_S=self.w_s)
self.q = 0.5 * self.rho * self.v ** 2
self.cl = self.beta * self.w_s / self.q
# print(self.cl)
self.delta_cl = pd.delta_lift_coefficient(self.cl)
self.delta_cd0 = pd.delta_CD_0()
def master_equation(self, n, dh_dt, dV_dt):
cl = self.cl * n + self.delta_cl
cd = self.k1 * cl ** 2 + self.k2 * cl + self.cd0 + self.cdr + self.delta_cd0
p_w = self.coefficient * \
(self.q / (self.beta * self.w_s) *
cd + dh_dt / self.v + dV_dt / self.g0)
return p_w
def cruise(self):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_electric.master_equation(
self, n=1, dh_dt=0, dV_dt=0)
return p_w
def climb(self, roc):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_electric.master_equation(
self, n=1, dh_dt=roc, dV_dt=0)
return p_w
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 300 knots, which is about 150 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180)
* v / self.g0) ** 2) ** 0.5
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_electric.master_equation(
self, n=load_factor, dh_dt=0, dV_dt=0)
return p_w
def take_off(self):
"""
A320neo take-off speed is about 150 knots, which is about 75 m/s
required runway length is about 2000 m
K_TO is a constant greater than one set to 1.2 (generally specified by appropriate flying regulations)
"""
Cl_max_to = 2.3 # 2.3
K_TO = 1.2 # V_TO / V_stall
s_G = 1266
p_w = 2 / 3 * self.coefficient / self.v * self.beta * K_TO ** 2 / (
s_G * self.rho * self.g0 * Cl_max_to) * self.w_s ** (
3 / 2)
return p_w
def stall_speed(self, V_stall_to=65, Cl_max_to=2.3):
V_stall_ld = 62
Cl_max_ld = 2.87
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * \
(Cl_max_to + self.delta_cl)
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * \
(Cl_max_ld + self.delta_cl)
W_S = min(W_S_1, W_S_2)
return W_S
def service_ceiling(self, roc=0.5):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_electric.master_equation(
self, n=1, dh_dt=roc, dV_dt=0)
return p_w
allFuncs = [take_off, stall_speed, cruise,
service_ceiling, level_turn, climb]
class ConstrainsAnalysis_Gudmundsson_Method_with_DP_turbofun:
"""This is a power-based master constraints analysis based on Gudmundsson_method"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0.5, number_of_motor=12, e=0.75, AR=10.3):
"""
:param beta: weight fraction
:param e: wing planform efficiency factor is between 0.75 and 0.85, no more than 1
:param AR: wing aspect ratio, normally between 7 and 10
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.beta = beta
self.w_s = wing_load
self.g0 = 9.80665
self.beta = beta
self.hp = 1 - Hp
self.n = number_of_motor
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
# power lapse ratio
self.alpha = thrust_lapse.thrust_lapse_calculation(altitude=self.h,
velocity=self.v).high_bypass_ratio_turbofan()
h = 2.43 # height of winglets
b = 35.8
# equation 9-88, If the wing has winglets the aspect ratio should be corrected
ar_corr = AR * (1 + 1.9 * h / b)
self.k = 1 / (np.pi * ar_corr * e)
self.coefficient = self.beta * self.v / self.alpha
# Estimation of ΔCL and ΔCD
pd = ad.aerodynamics_with_pd(
self.h, self.v, Hp=self.hp, n=n, W_S=self.w_s)
self.q = 0.5 * self.rho * self.v ** 2
cl = self.beta * self.w_s / self.q
self.delta_cl = pd.delta_lift_coefficient(cl)
self.delta_cd0 = pd.delta_CD_0()
# TABLE 3-1 Typical Aerodynamic Characteristics of Selected Classes of Aircraft
cd_min = 0.02
cd_to = 0.03
cl_to = 0.8
self.v_to = 68
self.s_g = 1480
self.mu = 0.04
self.cd_min = cd_min + self.delta_cd0
self.cl = cl + self.delta_cl
self.cd_to = cd_to + self.delta_cd0
self.cl_to = cl_to + self.delta_cl
def cruise(self):
p_w = self.q / self.w_s * (self.cd_min + self.k * self.cl ** 2)
return p_w * self.coefficient
def climb(self, roc):
p_w = roc / self.v + self.q * self.cd_min / self.w_s + self.k * self.cl
return p_w * self.coefficient
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 100 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180)
* v / self.g0) ** 2) ** 0.5
q = 0.5 * self.rho * v ** 2
p_w = q / self.w_s * (self.cd_min + self.k *
(load_factor / q * self.w_s + self.delta_cl) ** 2)
return p_w * self.coefficient
def take_off(self):
q = self.q / 2
p_w = self.v_to ** 2 / (2 * self.g0 * self.s_g) + q * self.cd_to / self.w_s + self.mu * (
1 - q * self.cl_to / self.w_s)
return p_w * self.coefficient
def service_ceiling(self, roc=0.5):
vy = (2 / self.rho * self.w_s *
(self.k / (3 * self.cd_min)) ** 0.5) ** 0.5
q = 0.5 * self.rho * vy ** 2
p_w = roc / vy + q / self.w_s * \
(self.cd_min + self.k * (self.w_s / q + self.delta_cl) ** 2)
# p_w = roc / (2 / self.rho * self.w_s * (self.k / (3 * self.cd_min)) ** 0.5) ** 0.5 + 4 * (
# self.k * self.cd_min / 3) ** 0.5
return p_w * self.coefficient
def stall_speed(self, V_stall_to=65, Cl_max_to=2.3):
V_stall_ld = 62
Cl_max_ld = 2.87
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * \
(Cl_max_to + self.delta_cl)
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * \
(Cl_max_ld + self.delta_cl)
W_S = min(W_S_1, W_S_2)
return W_S
allFuncs = [take_off, stall_speed, cruise,
service_ceiling, level_turn, climb]
class ConstrainsAnalysis_Gudmundsson_Method_with_DP_electric:
"""This is a power-based master constraints analysis based on Gudmundsson_method
the difference between turbofun and electric for constrains analysis:
1. assume the thrust_lapse = 1 for electric propution
2. hp = 1 - hp_turbofun
"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0.5, number_of_motor=12, e=0.75, AR=10.3):
"""
:param beta: weight fraction
:param e: wing planform efficiency factor is between 0.75 and 0.85, no more than 1
:param AR: wing aspect ratio, normally between 7 and 10
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.beta = beta
self.w_s = wing_load
self.g0 = 9.80665
self.beta = beta
self.hp = Hp # this is the difference part compare with turbofun
self.n = number_of_motor
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
# power lapse ratio
self.alpha = 1 # this is the difference part compare with turbofun
h = 2.43 # height of winglets
b = 35.8
# equation 9-88, If the wing has winglets the aspect ratio should be corrected
ar_corr = AR * (1 + 1.9 * h / b)
self.k = 1 / (np.pi * ar_corr * e)
self.coefficient = self.beta * self.v / self.alpha
# Estimation of ΔCL and ΔCD
pd = ad.aerodynamics_with_pd(
self.h, self.v, Hp=self.hp, n=n, W_S=self.w_s)
self.q = 0.5 * self.rho * self.v ** 2
cl = self.beta * self.w_s / self.q
self.delta_cl = pd.delta_lift_coefficient(cl)
self.delta_cd0 = pd.delta_CD_0()
# TABLE 3-1 Typical Aerodynamic Characteristics of Selected Classes of Aircraft
cd_min = 0.02
cd_to = 0.03
cl_to = 0.8
self.v_to = 68
self.s_g = 1480
self.mu = 0.04
self.cd_min = cd_min + self.delta_cd0
self.cl = cl + self.delta_cl
self.cd_to = cd_to + self.delta_cd0
self.cl_to = cl_to + self.delta_cl
def cruise(self):
p_w = self.q / self.w_s * (self.cd_min + self.k * self.cl ** 2)
return p_w * self.coefficient
def climb(self, roc):
p_w = roc / self.v + self.q * self.cd_min / self.w_s + self.k * self.cl
return p_w * self.coefficient
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 100 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180)
* v / self.g0) ** 2) ** 0.5
q = 0.5 * self.rho * v ** 2
p_w = q / self.w_s * (self.cd_min + self.k *
(load_factor / q * self.w_s + self.delta_cl) ** 2)
return p_w * self.coefficient
def take_off(self):
q = self.q / 2
p_w = self.v_to ** 2 / (2 * self.g0 * self.s_g) + q * self.cd_to / self.w_s + self.mu * (
1 - q * self.cl_to / self.w_s)
return p_w * self.coefficient
def service_ceiling(self, roc=0.5):
vy = (2 / self.rho * self.w_s *
(self.k / (3 * self.cd_min)) ** 0.5) ** 0.5
q = 0.5 * self.rho * vy ** 2
p_w = roc / vy + q / self.w_s * \
(self.cd_min + self.k * (self.w_s / q + self.delta_cl) ** 2)
# p_w = roc / (2 / self.rho * self.w_s * (self.k / (3 * self.cd_min)) ** 0.5) ** 0.5 + 4 * (
# self.k * self.cd_min / 3) ** 0.5
return p_w * self.coefficient
def stall_speed(self, V_stall_to=65, Cl_max_to=2.3):
V_stall_ld = 62
Cl_max_ld = 2.87
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * \
(Cl_max_to + self.delta_cl)
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * \
(Cl_max_ld + self.delta_cl)
W_S = min(W_S_1, W_S_2)
return W_S
allFuncs = [take_off, stall_speed, cruise,
service_ceiling, level_turn, climb]
if __name__ == "__main__":
n = 100
w_s = np.linspace(100, 9000, n)
constrains_name = ['take off', 'stall speed', 'cruise', 'service ceiling', 'level turn @3000m',
'climb @S-L', 'climb @3000m', 'climb @7000m']
constrains = np.array([[0, 68, 0.988, 0.8, 0.5], [0, 80, 1, 0.5], [11300, 230, 0.948, 0.8],
[11900, 230, 0.78, 0.5], [
3000, 100, 0.984, 0.8], [0, 100, 0.984, 0.5],
[3000, 200, 0.975, 0.6], [7000, 230, 0.96, 0.8]])
color = ['c', 'k', 'b', 'g', 'y', 'plum', 'violet', 'm']
label = ['feasible region with PD', 'feasible region with PD', 'feasible region Gudmundsson',
'feasible region without PD', 'feasible region without PD', 'feasible region Mattingly']
methods = [ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun,
ConstrainsAnalysis_Gudmundsson_Method_with_DP_turbofun,
ConstrainsAnalysis_Mattingly_Method_with_DP_electric,
ConstrainsAnalysis_Gudmundsson_Method_with_DP_electric,
ca.ConstrainsAnalysis_Mattingly_Method,
ca.ConstrainsAnalysis_Gudmundsson_Method]
m = constrains.shape[0]
p_w = np.zeros([2 * m, n])
# plots
fig, axs = plt.subplot(4, 2, figsize=(20, 20), sharex=True, sharey=True)
for k in range(8):
for i in range(m):
for j in range(n):
h = constrains[i, 0]
v = constrains[i, 1]
beta = constrains[i, 2]
hp = constrains[i, 3]
if k < 6:
problem = methods[k](h, v, beta, w_s[j], hp)
else:
problem = methods[k](h, v, beta, w_s[j])
if i >= 5:
p_w[i, j] = problem.allFuncs[-1](problem, roc=15 - 5 * (i - 5))
else:
p_w[i, j] = problem.allFuncs[i](problem)
if i == 1:
axs[k].plot(p_w[i, :], np.linspace(0, 250, n), color=color[i], label=constrains_name[i])
else:
axs[k].plt.plot(w_s, p_w[i, :], color=color[i], label=constrains_name[i])
p_w[1, :] = 200 / (p_w[1, -1] - p_w[1, 20]) * (w_s - p_w[1, 2])
if k != 2:
p_w[1 + m, :] = 10 ** 10 * (w_s - p_w[1 + m, 2])
else:
p_w[1 + m, :] = 200 / (p_w[1 + m, -1] - p_w[1 + m, 20]) * (w_s - p_w[1 + m, 2])
plt.fill_between(w_s, np.amax(p_w[0:m - 1, :], axis=0), 200, color='b', alpha=0.25,
label=label[k])
plt.fill_between(w_s, np.amax(p_w[m:2 * m - 1, :], axis=0), 200, color='r', alpha=0.25,
label=label[k + 3])
plt.xlabel('Wing Load: $W_{TO}$/S (N/${m^2}$)')
plt.ylabel('Power-to-Load: $P_{SL}$/$W_{TO}$ (W/N)')
plt.legend(bbox_to_anchor=(1.002, 1), loc="upper left")
plt.gca().add_artist(l1)
plt.xlim(100, 9000)
plt.ylim(0, 200)
plt.tight_layout()
plt.grid()
plt.show()
| [
"libao@gatech.edu"
] | libao@gatech.edu |
09983338f8ba20d57cdacb3dc04283b40d2e990d | b7e924cff3940a94014f7ef83f830f31c61b1ce1 | /Assignments/Data Types and Variables/Exercise/02. Chars to String.py | b2bd57bd44cd86db29a9e3cd2173df6d8be41682 | [
"MIT"
] | permissive | KaloyankerR/python-fundamentals-repository | a1406ca021819ca32390700380646f1107bf078e | b8e69523ea7e6aa352e8398f0202e283374a0f7c | refs/heads/master | 2023-04-10T05:22:43.907759 | 2021-04-20T20:45:54 | 2021-04-20T20:45:54 | 289,025,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | string = ''
for char in range(3):
character = input()
string += character
print(string)
| [
"kaloyankulov2003kk@gmail.com"
] | kaloyankulov2003kk@gmail.com |
ad695d4f233f07e51d0efd694542281456a00b3b | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/avare/testcase/firstcases/testcase4_004.py | 6daa996c2f9b7ca467ad807302e535ffdfc8de97 | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,862 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'com.ds.avare',
'appActivity' : 'com.ds.avare.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'com.ds.avare/com.ds.avare.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
return
def scrollToFindElement(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1) :
for temp in elements :
if temp.get_attribute("enabled") == "true" :
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.55, 0.5, 0.2)
else :
return element
for i in range(0, 4, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1):
for temp in elements:
if temp.get_attribute("enabled") == "true":
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.2, 0.5, 0.55)
else :
return element
return
def scrollToClickElement(driver, str) :
element = scrollToFindElement(driver, str)
if element is None :
return
else :
element.click()
def clickInList(driver, str) :
element = None
if (str is None) :
candidates = driver.find_elements_by_class_name("android.widget.CheckedTextView")
if len(candidates) >= 1 and checkWindow(driver):
element = candidates[len(candidates)-1]
else :
element = scrollToFindElement(driver, str)
if element is not None :
element.click()
else :
if checkWindow(driver) :
driver.press_keycode(4)
def clickOnCheckable(driver, str, value = "true") :
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if len(lists) == 1 :
innere = parent.find_element_by_android_uiautomator("new UiSelector().checkable(true)")
nowvalue = innere.get_attribute("checked")
if (nowvalue != value) :
innere.click()
break
except NoSuchElementException:
continue
def typeText(driver, value) :
element = getElememt(driver, "new UiSelector().className(\"android.widget.EditText\")")
element.clear()
element.send_keys(value)
enterelement = getElememt(driver, "new UiSelector().text(\"OK\")")
if (enterelement is None) :
if checkWindow(driver):
driver.press_keycode(4)
else :
enterelement.click()
def checkWindow(driver) :
dsize = driver.get_window_size()
nsize = driver.find_element_by_class_name("android.widget.FrameLayout").size
if dsize['height'] > nsize['height']:
return True
else :
return False
def testingSeekBar(driver, str, value):
try :
if(not checkWindow(driver)) :
element = seekForNearestSeekBar(driver, str)
else :
element = driver.find_element_by_class_name("android.widget.SeekBar")
if (None != element):
settingSeekBar(driver, element, value)
driver.find_element_by_android_uiautomator("new UiSelector().text(\"OK\")").click()
except NoSuchElementException:
time.sleep(1)
def seekForNearestSeekBar(driver, str):
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if len(lists) == 1 :
innere = parent.find_element_by_class_name("android.widget.SeekBar")
return innere
break
except NoSuchElementException:
continue
def settingSeekBar(driver, element, value) :
x = element.rect.get("x")
y = element.rect.get("y")
width = element.rect.get("width")
height = element.rect.get("height")
TouchAction(driver).press(None, x + 10, y + height/2).move_to(None, x + width * value,y + height/2).release().perform()
y = value
def clickInMultiList(driver, str) :
element = None
if (str is None) :
candidates = driver.find_elements_by_class_name("android.widget.CheckedTextView")
if len(candidates) >= 1 and checkWindow(driver):
element = candidates[len(candidates)-1]
else :
element = scrollToFindElement(driver, str)
if element is not None :
nowvalue = element.get_attribute("checked")
if (nowvalue != "true") :
element.click()
if checkWindow(driver) :
driver.find_element_by_android_uiautomator("new UiSelector().text(\"OK\")").click()
# testcase4_004
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
swipe(driver, 0.5, 0.2, 0.5, 0.8)
element = getElememtBack(driver, "new UiSelector().text(\"Plan\")", "new UiSelector().className(\"android.widget.TextView\").instance(4)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Map\")", "new UiSelector().className(\"android.widget.TextView\")")
TouchAction(driver).long_press(element).release().perform()
swipe(driver, 0.5, 0.2, 0.5, 0.8)
element = getElememt(driver, "new UiSelector().resourceId(\"com.ds.avare:id/location_button_center\").className(\"android.widget.ImageButton\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"Menu\")", "new UiSelector().className(\"android.widget.Button\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Preferences\")", "new UiSelector().className(\"android.widget.Button\")")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
element = getElememtBack(driver, "new UiSelector().text(\"CSup\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
swipe(driver, 0.5, 0.2, 0.5, 0.8)
element = getElememtBack(driver, "new UiSelector().text(\"No\")", "new UiSelector().className(\"android.widget.TextView\").instance(11)")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"Plan\")", "new UiSelector().className(\"android.widget.TextView\").instance(32)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.EditText\")")
element.clear()
element.send_keys("");
element = getElememt(driver, "new UiSelector().className(\"android.widget.Button\").description(\"000 \")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.Button\").description(\"Cancel\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Plan\")", "new UiSelector().className(\"android.widget.TextView\").instance(4)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.Button\").description(\"000\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Find\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.ds.avare:id/search_edit_text\").className(\"android.widget.EditText\")")
element.clear()
element.send_keys("Search");
element = getElememtBack(driver, "new UiSelector().text(\"Map\")", "new UiSelector().className(\"android.widget.TextView\").instance(18)")
TouchAction(driver).long_press(element).release().perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"4_004\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'com.ds.avare'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"prefest2018@gmail.com"
] | prefest2018@gmail.com |
4782ed428274689a290b12241878031341eba26d | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/minCost_20200826171103.py | dfc9066a4786aa0d420bc1121c66f4347f61d3bd | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | def minCost(days,costs):
# brute force approach
# find if numbers are consecutive
# if they are past 7 then means we do a 30 day pass
# once they stop being consecutive means to opt for something different
# like [1,4,6,7,8,20]
ways = [0] * days[len(days)-1]
newDays = set(days)
for i in range(1,len(ways)):
print('i')
total = ways[i-1]+costs[0]
if i-7 >= 0:
total1 = ways[i-7] + costs[1]
else:
total1 = 0 + costs[1]
if i-15 >= 0:
total2 = ways[i-15] + costs[2]
else:
total2 = 0 + costs[2]
if i in newDays:
ways[i] = min(total,total1,total2)
else:
ways[i] = ways[i-1]
print(ways)
minCost([1,4,6,7,8,20],[2,7,15]) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
69399a49153db50e70168d25d6dff65962822c66 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/KCB_YCHF/KCB_YCHF_MM/OMS/YCHF_KCBYCHF_OMS_099.py | 0aa0a7ef5c462c08e02381b15485dddec0029a36 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,485 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test//xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test//service")
from ServiceConfig import *
from ARmainservice import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test//mysql")
from CaseParmInsertMysql import *
from SqlData_Transfer import *
sys.path.append("/home/yhl2/workspace/xtp_test//utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
from env_restart import *
class YCHF_KCBYCHF_OMS_099(xtp_test_case):
def setUp(self):
#sql_transfer = SqlData_Transfer()
#sql_transfer.transfer_fund_asset('YCHF_KCBYCHF_OMS_099')
#clear_data_and_restart_all()
#Api.trade.Logout()
#Api.trade.Login()
pass
#
def test_YCHF_KCBYCHF_OMS_099(self):
title = '停止OMS服务(沪A本方最优部撤卖出)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '部撤',
'errorID': 0,
'errorMSG': queryOrderErrorMsg(0),
'是否生成报单': '是',
'是否是撤废': '否',
# '是否是新股申购': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('688000', '1', '4', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'报单测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
print(stkparm['错误原因'])
self.assertEqual(rs['报单测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':3,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_FORWARD_BEST'],
'price': stkparm['涨停价'],
'quantity': 300,
'position_effect':Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['报单测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
## 还原可用资金
#sql_transfer = SqlData_Transfer()
#sql_transfer.transfer_fund_asset('YW_KCB_BAK_000')
#oms_restart()
self.assertEqual(rs['报单测试结果'], True) # 211
if __name__ == '__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
a5bc5ff0db7914cddcbf2343968e56b7a15e5726 | e6445a08328f35da144e40d995fde37111c33f59 | /docs/04_队列/array_queue.py | 4b45b94a363a88d22b5b5b79116c7b1ad97dc8cf | [
"MIT"
] | permissive | Mark24Code/python_data_structures_and_algorithms | 6f069eed04e8c44bc4f04845755a1b598606d4ad | 3469a79c34b6c08ae52797c3974b49fbfa8cca51 | refs/heads/master | 2023-03-15T22:54:02.786294 | 2022-06-13T01:53:41 | 2022-06-13T01:53:41 | 586,735,464 | 1 | 0 | MIT | 2023-01-09T05:12:16 | 2023-01-09T05:12:16 | null | UTF-8 | Python | false | false | 1,688 | py | # -*- coding: utf-8 -*-
# NOTE: 从 array_and_list 第一章拷贝的代码
class Array(object):
def __init__(self, size=32):
self._size = size
self._items = [None] * size
def __getitem__(self, index):
return self._items[index]
def __setitem__(self, index, value):
self._items[index] = value
def __len__(self):
return self._size
def clear(self, value=None):
for i in range(len(self._items)):
self._items[i] = value
def __iter__(self):
for item in self._items:
yield item
class FullError(Exception):
pass
class ArrayQueue(object):
def __init__(self, maxsize):
self.maxsize = maxsize
self.array = Array(maxsize)
self.head = 0
self.tail = 0
def push(self, value):
if len(self) >= self.maxsize:
raise FullError('queue full')
self.array[self.head % self.maxsize] = value
self.head += 1
def pop(self):
value = self.array[self.tail % self.maxsize]
self.tail += 1
return value
def __len__(self):
return self.head - self.tail
def test_queue():
import pytest # pip install pytest
size = 5
q = ArrayQueue(size)
for i in range(size):
q.push(i)
with pytest.raises(FullError) as excinfo: # 我们来测试是否真的抛出了异常
q.push(size)
assert 'full' in str(excinfo.value)
assert len(q) == 5
assert q.pop() == 0
assert q.pop() == 1
q.push(5)
assert len(q) == 4
assert q.pop() == 2
assert q.pop() == 3
assert q.pop() == 4
assert q.pop() == 5
assert len(q) == 0
| [
"291374108@qq.com"
] | 291374108@qq.com |
2c5181d620658d03ac9d21ae8318395a7c20f37e | e2ac8f82d611955b311226a278328718628ca667 | /api/api_using_viewset/urls.py | 258e7d41b4126e7a5dbaecaf5dc6b6528c99b87c | [] | no_license | ShakilAhmmed/apis | 86a6d0ded03724ca1b1f4912026dac1b42f3d4ea | 1f8309d08c288b2412f5c85c4297fe3cc9289f1b | refs/heads/master | 2020-05-22T18:26:13.256094 | 2019-05-13T18:19:02 | 2019-05-13T18:19:02 | 186,471,211 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | from django.urls import path, include
from rest_framework import routers
from .views import StudentViewSet
router = routers.DefaultRouter()
router.register('api_using_viewset', StudentViewSet)
urlpatterns = [
path('', include(router.urls))
]
| [
"shakilfci461@gmail.com"
] | shakilfci461@gmail.com |
f0ac079eb68e8fbe90313d1152398a2a07f928fc | cedab14839cfc276f028436ba79d103a8aff0d5b | /Philippines/Subject5_AP/E1_AP_Landsat/4_Eval.py | ed2439d4199b7f752ef9ee7820fbce6c2f2bada2 | [] | no_license | wmgeolab/schoolCNN | aa686a4103695c1e10f5afa68ec2919761d33c15 | 1c73ec90732ec565ce552b27e4b2108a8ee916da | refs/heads/master | 2021-01-09T03:25:44.895023 | 2020-02-21T22:52:41 | 2020-02-21T22:52:41 | 242,230,029 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,111 | py | from __future__ import print_function, division
from torchvision import datasets, models, transforms
from imgaug import parameters as iap
from imgaug import augmenters as iaa
from torch.optim import lr_scheduler
from torch.autograd import Variable
from torchsummary import summary
import matplotlib.pyplot as plt
import torch.optim as optim
import torch.nn as nn
from PIL import Image
import pandas as pd
import torchvision
import numpy as np
import pickle
import joblib
import torch
import time
import copy
import PIL
import os
class ImgAugTransform:
def __init__(self):
self.aug = iaa.Sequential([
iaa.Scale((224, 224)),
iaa.Sometimes(0.30, iaa.GaussianBlur(sigma=(0, 3.0))),
iaa.Sometimes(0.25, iaa.Multiply((0.5, 1.5), per_channel=0.5)),
iaa.Sometimes(0.20, iaa.Invert(0.25, per_channel=0.5)),
iaa.Sometimes(0.25, iaa.ReplaceElementwise(
iap.FromLowerResolution(iap.Binomial(0.1), size_px=8),
iap.Normal(128, 0.4*128),
per_channel=0.5)
),
iaa.Sometimes(0.30, iaa.AdditivePoissonNoise(40)),
iaa.Fliplr(0.5),
iaa.Affine(rotate=(-20, 20), mode='symmetric'),
iaa.Sometimes(0.30,
iaa.OneOf([iaa.Dropout(p=(0, 0.1)),
iaa.CoarseDropout(0.1, size_percent=0.5)])),
iaa.AddToHueAndSaturation(value=(-10, 10), per_channel=True)
])
def __call__(self, img):
img = np.array(img)
return self.aug.augment_image(img)
model_ft = joblib.load("./Philippines/Subject5_AP/E1_AP_Landsat/models/Landsat_AP_50epoch.sav")
directory = "./Philippines/Subject5_AP/E1_AP_Landsat/data/pass/"
transform = transforms.Compose([
ImgAugTransform(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def EvalModel(model, directory, transforms):
model = model.cuda()
df = pd.DataFrame()
cpass, cfail, ids, class_pred = [], [], [], []
count = 0
for filename in os.listdir(directory):
count += 1
school_id = filename[0:6]
ids.append(school_id)
to_open = directory + filename
png = Image.open(to_open)
img_t = transform(png)
batch_t = torch.unsqueeze(img_t, 0).cuda()
model_ft.eval()
out = model_ft(batch_t)
_, index = torch.max(out, 1)
percentage = torch.nn.functional.softmax(out, dim=1)[0] * 100
# print(percentage)
cfail.append(percentage[0].tolist())
cpass.append(percentage[1].tolist())
print("Predicted " + str(count) + " out of " + str(len(os.listdir(directory))) + " images." )
df['school_id'] = ids
df['prob_fail'] = cfail
df['prob_pass'] = cpass
return df
pass_preds = EvalModel(model_ft, directory, transform)
pass_preds.to_csv("./Philippines/Subject5_AP/Ensemble/data/LandsatPassPreds_GPU.csv")
directory = "./Philippines/Subject5_AP/E1_AP_Landsat/data/fail/"
transform = transforms.Compose([
ImgAugTransform(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
fail_preds = EvalModel(model_ft, directory, transform)
fail_preds.to_csv("./Philippines/Subject5_AP/Ensemble/data/LandsatFailPreds_GPU.csv")
| [
"hmbaier@email.wm.edu"
] | hmbaier@email.wm.edu |
734a6e61332e4f46f8990d6102e2d09d754cc202 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2787/60617/264026.py | 6fc9957c7ad27cbe8e5b15d744283c7712053524 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | def construct_sequence():
length=int(input())
sequence=list(map(int, input().split(" ")))
upCounts=0
downCounts=0
for i in range(0, len(sequence)):
upCounts+=abs((length-len(sequence)+(i+1))-sequence[i])
for i in range(0, len(sequence)):
downCounts+=abs((length-i)-sequence[i])
print(min(upCounts, downCounts))
if __name__=='__main__':
construct_sequence() | [
"1069583789@qq.com"
] | 1069583789@qq.com |
7bae3b372c02e98b1ec84432aad1e72aad131396 | 885a722e3e5814ae4942ac5e8cf8d0091e734b4c | /LEETCODE/10. Regular Expression Matching - Python/CodingTest.py | 4f1f9a0e534fa0242d2adb715e7c95a102ea3fc3 | [] | no_license | ledpear/algorithm | 52f3ea25842eee20b3bbd48e51825b9df4942e03 | 4922c6fe5ca0b98a90dee218b756006e7ba05d82 | refs/heads/master | 2023-06-09T17:47:45.674244 | 2023-06-03T13:47:11 | 2023-06-03T13:47:11 | 133,370,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | class Solution:
def isMatch(self, s: str, p: str) -> bool:
p_list = list(p).reverse()
s_list = list(s).reverse()
bResult = True
s_pos = 0
for i in p_list :
if i == '.' :
s_pos += 1
elif i == '*' :
p_list[]
else
if i != s_list[s_pos] :
bResult = False
break;
print(Solution.isMatch(None, "aa","a")) | [
"tjsrb75@gmail.com"
] | tjsrb75@gmail.com |
45fc525118252cb564265e9f4d2b9ec6915127ab | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/vns/rsdfcttocat.py | 98a3e5206fc0f1f91d142b45f831058a0ac006e4 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,562 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RsDfctToCat(Mo):
"""
A source relation to the Cisco defined category of the defect.
"""
meta = SourceRelationMeta("cobra.model.vns.RsDfctToCat", "cobra.model.vns.MDfctCat")
meta.cardinality = SourceRelationMeta.N_TO_ONE
meta.moClassName = "vnsRsDfctToCat"
meta.rnFormat = "rsdfctToCat"
meta.category = MoCategory.RELATIONSHIP_TO_LOCAL
meta.label = "Relation to L4-L7 Service Vendor to Cisco Defined Defect"
meta.writeAccessMask = 0x100000000001
meta.readAccessMask = 0x100000000001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.tag.Tag")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.fault.Inst")
meta.childClasses.add("cobra.model.aaa.RbacAnnotation")
meta.childClasses.add("cobra.model.tag.Annotation")
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Annotation", "annotationKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.aaa.RbacAnnotation", "rbacDom-"))
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Tag", "tagKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Inst", "fault-"))
meta.parentClasses.add("cobra.model.vns.MDfct")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.reln.To")
meta.rnPrefixes = [
('rsdfctToCat', False),
]
prop = PropMeta("str", "annotation", "annotation", 37986, PropCategory.REGULAR)
prop.label = "Annotation. Suggested format orchestrator:value"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("annotation", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "extMngdBy", "extMngdBy", 40125, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "undefined"
prop._addConstant("msc", "msc", 1)
prop._addConstant("undefined", "undefined", 0)
meta.props.add("extMngdBy", prop)
prop = PropMeta("str", "forceResolve", "forceResolve", 107, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = True
prop.defaultValueStr = "yes"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("forceResolve", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 14854, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "rType", "rType", 106, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("local", "local", 3)
prop._addConstant("mo", "mo", 1)
prop._addConstant("service", "service", 2)
meta.props.add("rType", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "state", "state", 103, PropCategory.REGULAR)
prop.label = "State"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unformed"
prop._addConstant("cardinality-violation", "cardinality-violation", 5)
prop._addConstant("formed", "formed", 1)
prop._addConstant("invalid-target", "invalid-target", 4)
prop._addConstant("missing-target", "missing-target", 2)
prop._addConstant("unformed", "unformed", 0)
meta.props.add("state", prop)
prop = PropMeta("str", "stateQual", "stateQual", 104, PropCategory.REGULAR)
prop.label = "State Qualifier"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("default-target", "default-target", 2)
prop._addConstant("mismatch-target", "mismatch-target", 1)
prop._addConstant("none", "none", 0)
meta.props.add("stateQual", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 13573, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 4766
prop.defaultValueStr = "vnsMDfctCat"
prop._addConstant("unspecified", "unspecified", 0)
prop._addConstant("vnsMDfctCat", None, 4766)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 13572, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isConfig = True
prop.isAdmin = True
meta.props.add("tDn", prop)
prop = PropMeta("str", "tType", "tType", 105, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("all", "all", 2)
prop._addConstant("mo", "mo", 1)
prop._addConstant("name", "name", 0)
meta.props.add("tType", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("MDevToGraphInst", "Graph Instances", "cobra.model.vns.GraphInst"))
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
7ad54719c3793799d0c3f52d695fe1971882d080 | 26243715cd618b07e95ea2a12f04aa750cb359af | /Help_Qiu/plotData.py | 4921f9a83a94ab6545b54d69f0f250cb6449b8ec | [] | no_license | 43reyerhrstj/DataAnalysis_water | 5fdf3e86249d8f47e54dc174edf9a55854e23b49 | 0be734284587b86857044dafa18af62268f64979 | refs/heads/master | 2022-04-05T22:52:07.147298 | 2018-05-22T14:00:03 | 2018-05-22T14:00:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,192 | py | from datetime import datetime
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
#保证正常显示汉字
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus']=False
path='orgData.xls'
df=pd.read_excel(path)
dates=df['采集时间'] #获得数据集列
secWenCha=df['二次温差(℃)']
secCalcu=df['二次供水温度计算值(℃)']
#输入的起止日期格式
format='%Y/%m/%d %H:%M:%S'
start_str="2017/12/1 0:08:40"
end_str="2017/12/21 20:48:50"
start=datetime.strptime(start_str,format)
end=datetime.strptime(end_str,format)
xs = pd.Series([datetime.strptime(d,format) for d in dates])
great=xs>=start
less=xs<=end
index_range=xs[great & less].index
x_data=[datetime.strptime(str(d), '%Y-%m-%d %H:%M:%S') for d in xs[great & less]]
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter(format))
plt.plot(x_data, df.loc[index_range,'二次温差(℃)'],label="二次温差(℃)")
plt.plot(x_data, df.loc[index_range,'二次供水温度计算值(℃)'],label="二次供水温度计算值(℃)")
plt.legend()
plt.gcf().autofmt_xdate() # 自动旋转日期标记
plt.show()
| [
"qzq2514@outlook.com"
] | qzq2514@outlook.com |
28772729bf1388f4a35384b14fe9d11f071abb4c | de626f1892619968efbaa22ea26079ee2269e799 | /funciones/revisioncampo/AsignacionPadron.py | 99f5879390b51578e088045def96c8837ddfb659 | [] | no_license | gerardoros/CartograficoQgisPlugin | 7e8724cec0469d0494090b3557e9d4e967935121 | 844fa1052f435478e2e946099d7dbd6b1b97c311 | refs/heads/master | 2023-04-04T08:06:30.967894 | 2021-04-07T14:15:37 | 2021-04-07T14:15:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,529 | py | # -*- coding: utf-8 -*-
"""
/***************************************************************************
AsignacionPadron
A QGIS plugin
AsignacionPadron
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2018-07-25
git sha : $Format:%H$
copyright : (C) 2018 by AsignacionPadron
email : AsignacionPadron
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt5.QtCore import QSettings, QTranslator, qVersion, QCoreApplication
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QAction
# Initialize Qt resources from file resources.py
from .resources import *
# Import the code for the dialog
from .AsignacionPadron_dialog import AsignacionPadronDialog
from .VentanaAsignacionPadron import VentanaAsignacionPadron
import os.path
from PyQt5 import QtCore
from PyQt5 import QtWidgets
import os.path
import os, json, requests
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from qgis.core import *
from osgeo import ogr, osr
class AsignacionPadron:
"""QGIS Plugin Implementation."""
def __init__(self, iface, UTI):
# Save reference to the QGIS interface
self.iface = iface
self.dlg = AsignacionPadronDialog()
#self.dlg.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
self.UTI = UTI
self.modeloLocalidad = QStandardItemModel()
self.enviosLocalidad = []
self.modeloSector = QStandardItemModel()
self.enviosSector = []
self.modeloManzana = QStandardItemModel()
self.enviosManzana = []
self.clavesIzq = []
self.clavesDer = {}
#self.dlg.cmbLocalidad.currentIndexChanged.connect(self.obtenerSectoresPorLocalidad)
self.dlg.cmbSector.currentIndexChanged.connect(self.obtenerManzanasPorSector)
self.dlg.cmbManzana.currentIndexChanged.connect(self.contactarPintarCampos)
#self.dlg.cmbLocalidad.highlighted.connect(self.bypassSectorLoc)
self.dlg.cmbSector.highlighted.connect(self.bypassManzanaSector)
self.dlg.cmbManzana.highlighted.connect(self.bypassPintar)
self.VentanaLiberacion = VentanaAsignacionPadron(iface, self)
self.dlg.btnMas.clicked.connect(self.pasarDerecha)
self.dlg.btnMenos.clicked.connect(self.pasarIzquierda)
self.dlg.chkTodoClaves.stateChanged.connect(self.marcarTodoClaves)
self.dlg.chkTodoMazPred.stateChanged.connect(self.marcarTodoMazPred)
self.dlg.tablaClaves.hideColumn(1)
self.dlg.tablaMazPred.hideColumn(0)
self.dlg.tablaMazPred.hideColumn(3)
self.dlg.btnAsignar.clicked.connect(self.asignarRevision)
self.dlg.btnLiberarAsig.clicked.connect(self.llamarLiberar)
self.diccionarioAsignaciones = {}
self.llaveManzana = None
self.manzanaCargada = -1
self.localidadCargado = -1
self.sectorCargado = -1
self.resetar()
self.indexCompLocalidad = -1
self.indexCompSector = -1
self.indexCompManzana = -1
self.bypass = False
def bypassSectorLoc(self, index):
#print('se acrtivooooo')
self.bypass = True
#self.obtenerSectoresPorLocalidad()
def bypassManzanaSector(self, index):
self.bypass = True
#self.obtenerManzanasPorSector()
def bypassPintar(self, index):
self.bypass = True
#self.contactarPintarCampos()
def run(self):
"""Run method that performs all the real work"""
# show the dialog
self.resetar()
self.dlg.show()
#self.obtenerLocalidades()
self.UTI.strechtTabla(self.dlg.tablaClaves)
self.UTI.strechtTabla(self.dlg.tablaMazPred)
self.llenarUsuarios()
self.capaPredios = QgsProject.instance().mapLayer(self.ACA.obtenerIdCapa('predios.geom'))
self.capaConH = QgsProject.instance().mapLayer(self.ACA.obtenerIdCapa('horizontales.geom'))
self.capaConV = QgsProject.instance().mapLayer(self.ACA.obtenerIdCapa('verticales'))
self.capaConVC = QgsProject.instance().mapLayer(self.ACA.obtenerIdCapa('cves_verticales'))
#self.contactarPintarCampos()
#self.obtenerLocalidades()
self.obtenerSectoresPorLocalidad()
# Run the dialog event loop
result = self.dlg.exec_()
# See if OK was pressed
if result:
pass
#_-------------------------------------------------------------------------------------------------
def obtenerLocalidades(self):
self.dlg.cmbLocalidad.clear()
try:
headers = {'Content-Type': 'application/json', 'Authorization' : self.UTI.obtenerToken()}
respuesta = requests.get(self.CFG.urlMunicipios, headers = headers)
except requests.exceptions.RequestException:
self.UTI.mostrarAlerta("Error de servidor LOC01", QMessageBox().Critical, "Cargar Localidades")
print('ERROR: LOC000')
lenJson = len(list(respuesta.json()))
if lenJson > 0:
listaTemp = ['--Selecciona--']
self.enviosLocalidad = ['-']
for localidad in respuesta.json():
listaTemp.append(str(localidad['label']) + " " + localidad['other'])
self.enviosLocalidad.append(str(localidad['value']))
modeloTemp = QStandardItemModel()
for i,word in enumerate( listaTemp ):
item = QStandardItem(word)
modeloTemp.setItem(i, 0, item)
self.UTI.extenderCombo(self.dlg.cmbLocalidad, self.completarLocalidad, modeloTemp)
#self.dlg.cmbLocalidad.model().item(0).setEnabled(False)
else:
self.dlg.cmbLocalidad.setEnabled(False)
self.dlg.cmbLocalidad.clear()
self.dlg.cmbSector.setEnabled(False)
self.dlg.cmbSector.clear()
self.dlg.cmbManzana.setEnabled(False)
self.dlg.cmbManzana.clear()
self.clavesIzq = []
self.llaveManzana = None
self.vaciarTabla(self.dlg.tablaClaves)
self.UTI.mostrarAlerta("No existen localidades registradas", QMessageBox().Information, "Cargar Localidades")
#-----------------------------------------------------------------------------------------
#Llenar segundo combo
def obtenerSectoresPorLocalidad(self):
#print('lo tiro una vez')
'''index = self.dlg.cmbLocalidad.currentIndex()
#print('tenemos ', index, self.indexCompLocalidad, self.bypass)
if index == self.indexCompLocalidad or self.bypass:
#self.indexCompLocalidad = -1
#print('pasooooo')
self.manzanaCargada = -1
self.sectorCargado = -1
self.bypass = False
self.dlg.cmbManzana.setEnabled(False)
self.dlg.cmbManzana.clear()
if self.dlg.cmbLocalidad.count() > 0 and index > 0:
index = self.dlg.cmbLocalidad.currentIndex()
try:
idSector = self.enviosLocalidad[index]
if self.localidadCargado == idSector:
print('SECTOR POR LOCALIDAD RETORNADOS')
return
self.localidadCargado = idSector
except:
return'''
idSector = '1'
self.dlg.cmbSector.clear()
try:
headers = {'Content-Type': 'application/json', 'Authorization' : self.UTI.obtenerToken()}
respuesta = requests.get(self.CFG.urlSectoresMuni + idSector + '/sector/', headers = headers)
except requests.exceptions.RequestException:
self.UTI.mostrarAlerta("Error de servidor SEC01", QMessageBox().Critical, "Cargar Sectores")
print('ERROR: SEC000')
lenJson = len(list(respuesta.json()))
if lenJson > 0:
listaTemp = ['--Selecciona--']
self.enviosSector = ['-']
for sector in respuesta.json():
listaTemp.append(sector['label'])
self.enviosSector.append(sector['value'])
modeloTemp = QStandardItemModel()
for i,word in enumerate( listaTemp ):
item = QStandardItem(word)
modeloTemp.setItem(i, 0, item)
self.UTI.extenderCombo(self.dlg.cmbSector, self.completarSector, modeloTemp)
self.dlg.cmbSector.model().item(0).setEnabled(False)
self.dlg.cmbSector.setEnabled(True)
else:
self.dlg.cmbSector.setEnabled(False)
self.dlg.cmbSector.clear()
self.clavesIzq = []
self.vaciarTabla(self.dlg.tablaClaves)
self.llaveManzana = None
self.UTI.mostrarAlerta("No existen sectores en la localidad", QMessageBox().Information, "Cargar Sectores")
#--------------------------------------------------------------------------------------------------------------
def obtenerManzanasPorSector(self):
index = self.dlg.cmbSector.currentIndex()
if index == self.indexCompSector or self.bypass:
self.bypass = False
if self.dlg.cmbSector.count() > 0 and index > 0:
#self.indexCompSector = -1
index = self.dlg.cmbSector.currentIndex()
try:
idSector = self.enviosSector[index]
if self.sectorCargado == idSector:
print('MANZANA POR SECTOR RETORNADOS')
return
self.sectorCargado = idSector
except:
return
self.dlg.cmbManzana.clear()
try:
headers = {'Content-Type': 'application/json', 'Authorization' : self.UTI.obtenerToken()}
respuesta = requests.get(self.CFG.urlManzanas + idSector + '/manzana/', headers = headers)
except requests.exceptions.RequestException:
self.UTI.mostrarAlerta("Error de servidor MAN01", QMessageBox().Critical, "Cargar Manzanas")
print('ERROR: MAN000')
lenJson = len(list(respuesta.json()))
if lenJson > 0:
listaTemp = ['--Selecciona--']
self.enviosManzana = ['-']
for manzana in respuesta.json():
listaTemp.append(manzana['label'])
self.enviosManzana.append(manzana['other'])
modeloTemp = QStandardItemModel()
for i,word in enumerate( listaTemp ):
item = QStandardItem(word)
modeloTemp.setItem(i, 0, item)
self.UTI.extenderCombo(self.dlg.cmbManzana, self.completarManzana, modeloTemp)
self.dlg.cmbManzana.model().item(0).setEnabled(False)
self.dlg.cmbManzana.setEnabled(True)
else:
self.dlg.cmbManzana.setEnabled(False)
self.dlg.cmbManzana.clear()
self.clavesIzq = []
self.vaciarTabla(self.dlg.tablaClaves)
self.llaveManzana = None
self.UTI.mostrarAlerta("No existen manzanas en el sector", QMessageBox().Information, "Cargar Manzanas")
#---------------------------------------------------------------------------------------------------------------
def contactarPintarCampos(self):
print('entro al pintar camposss')
index = self.dlg.cmbManzana.currentIndex()
if index == self.indexCompManzana or self.bypass:
#self.indexCompManzana = -1
self.bypass = False
if self.validarCombox and index > 0:
root = QgsProject.instance().layerTreeRoot()
group = root.findGroup('consulta')
if group is None:
self.UTI.mostrarAlerta('No se ha cargado ninguna Manzana', QMessageBox().Critical, 'Asignacion de campo')
return
self.ACA.obtenerXCapas()
cuerpo = {"incluirGeom": "true", "pagina": None, "bbox": "false", "pin": "false", "geomWKT": None, "epsg": None, "properties": None, "epsgGeomWKT": None, "itemsPagina": None, "nombre": "x"}
payload = json.dumps(cuerpo)
self.ACA.payload = payload
index = self.dlg.cmbManzana.currentIndex()
self.ACA.idManzana = self.enviosManzana[index]
if self.manzanaCargada == self.enviosManzana[index]:
print('SE RETORNO')
return
self.manzanaCargada = self.enviosManzana[index]
self.llaveManzana = self.enviosManzana[index]
self.ACA.pintarCapasCampo()
self.llenadoDeTablas()
self.manzanaCargada = -1
self.localidadCargado = -1
self.sectorCargado = -1
#----------------------------------------------------------------------------------------------------------------
def llenadoDeTablas(self):
self.obtenerDiccionarioAsignaciones()
keysDer = list(self.clavesDer.keys())
keysAsig = list(self.diccionarioAsignaciones.keys())
self.clavesIzq = []
clavesPerronas = []
filtro = []
if self.llaveManzana == None:
return
if self.llaveManzana in keysDer: #Si la llave manzana ya existe en la tabla derecha...
for predio in self.capaPredios.getFeatures():
listaH = self.listaCondominiosH(predio)
listaV = self.listaCondominiosV(predio)
for cond in listaH:
cveCat = predio['clave'] + cond
if not cveCat in self.clavesDer[self.llaveManzana]: #Si la clave del predio no esta en el lado derecho...
filtro.append(cveCat)
for cond in listaV:
listaVC = self.listaCondominiosVC(cond)
for vc in listaVC:
cveCat = predio['clave'] + cond['clave'] + vc
if not cveCat in self.clavesDer[self.llaveManzana]: #Si la clave del predio no esta en el lado derecho...
filtro.append(cveCat)
cveCat = predio['clave'] + '000000'
if not cveCat in self.clavesDer[self.llaveManzana]: #Si la clave del predio no esta en el lado derecho...
filtro.append(cveCat)
else: #Si la llave de manzanaaun no la tenemos...
self.clavesDer[self.llaveManzana] = [] #La agregamos al lado derecho pero vacia...
for predio in self.capaPredios.getFeatures():
listaH = self.listaCondominiosH(predio)
listaV = self.listaCondominiosV(predio)
for cond in listaH:
cveCat = predio['clave'] + cond
if not cveCat in self.clavesDer[self.llaveManzana]: #Si la clave del predio no esta en el lado derecho...
filtro.append(cveCat)
for cond in listaV:
listaVC = self.listaCondominiosVC(cond)
for vc in listaVC:
cveCat = predio['clave'] + cond['clave'] + vc
if not cveCat in self.clavesDer[self.llaveManzana]: #Si la clave del predio no esta en el lado derecho...
filtro.append(cveCat)
cveCat = predio['clave'] + '000000'
if not cveCat in self.clavesDer[self.llaveManzana]: #Si la clave del predio no esta en el lado derecho...
filtro.append(cveCat)
if self.llaveManzana in keysAsig:
for clave in filtro:
if not clave in self.diccionarioAsignaciones[self.llaveManzana]: #Si la clave del predio no esta en el lado derecho...
clavesPerronas.append(clave)
else:
for clave in filtro:
clavesPerronas.append(clave)
for clave in clavesPerronas:
self.clavesIzq.append(clave)
self.clavesIzq.sort()
self.actualizarTablas()
#-----------------------------------------------------------------------------------------------------------------
def listaCondominiosH(self, predio):
listaSalida = []
for cond in self.capaConH.getFeatures():
geomCond = cond.geometry()
if geomCond.buffer(-0.0000001, 1).intersects(predio.geometry()):
listaSalida.append(cond['clave'])
return listaSalida
#---------------------------------------------------------------------------
def listaCondominiosVC(self, condV):
listaSalida = []
for cond in self.capaConVC.getFeatures():
geomCond = cond.geometry()
if geomCond.intersects(condV.geometry()):
listaSalida.append(cond['clave'])
return listaSalida
#---------------------------------------------------------------------------------------------------
def listaCondominiosV(self, predio):
listaSalida = []
for cond in self.capaConV.getFeatures():
geomCond = cond.geometry()
if geomCond.buffer(-0.0000001, 1).intersects(predio.geometry()):
listaSalida.append(cond)
return listaSalida
#------------------------------------------------------------------------------------------------------------
def validarCombox(self):
return (self.dlg.cmbLocalidad.count() > 0 and self.dlg.cmbSector.count() > 0 and self.dlg.cmbManzana.count() >0)
#--------------------------------------------------------------------------------------------------------------------
def actualizarTablas(self):
self.llenarTablaIzquierda()
self.llenarTablaDerecha()
#--------------------------------------------------------------------------------------------------------
def llenarTablaIzquierda(self):
self.vaciarTabla(self.dlg.tablaClaves)
#for clave in listaClaves:
for x in range(0, len(self.clavesIzq)):
self.dlg.tablaClaves.insertRow(x)
item = QtWidgets.QTableWidgetItem(self.clavesIzq[x])
item.setFlags( QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled )
item.setCheckState(QtCore.Qt.Unchecked)
self.dlg.tablaClaves.setItem(x, 0 , item)
#-------------------------------------------------------------------------------------------------
def llenarTablaDerecha(self):
self.vaciarTabla(self.dlg.tablaMazPred)
keysDer = list(self.clavesDer.keys())
for key in keysDer:
listaKey = self.clavesDer[key]
for x in range(0, len(listaKey)):
rowCount = self.dlg.tablaMazPred.rowCount()
self.dlg.tablaMazPred.insertRow(rowCount)
rowCount = self.dlg.tablaMazPred.rowCount()
item = QtWidgets.QTableWidgetItem(str(key))
self.dlg.tablaMazPred.setItem(rowCount-1, 0 , item)
item.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
item = QtWidgets.QTableWidgetItem(str(key)[-3:])
self.dlg.tablaMazPred.setItem(rowCount-1, 1 , item)
item.setFlags( QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled )
item.setCheckState(QtCore.Qt.Unchecked)
item = QtWidgets.QTableWidgetItem(str(listaKey[x]))
self.dlg.tablaMazPred.setItem(rowCount-1, 2 , item)
item.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
#-------------------------------------------------------------------------------------------------------
def pasarDerecha(self):
indexSel = []
for c in range(0, self.dlg.tablaClaves.rowCount()):
if self.dlg.tablaClaves.item(c, 0 ).checkState() == QtCore.Qt.Checked:
indexSel.append(c)
if len(indexSel) > 0:
listaQuitados = []
for index in indexSel:
item = self.clavesIzq[index]
listaQuitados.append(item)
self.clavesDer[self.llaveManzana].append(item)
for quitado in listaQuitados:
self.clavesIzq.remove(quitado)
self.clavesDer[self.llaveManzana].sort()
self.dlg.chkTodoClaves.setCheckState(QtCore.Qt.Unchecked)
self.actualizarTablas()
#---------------------------------------------------------------------------------------------------
def pasarIzquierda(self):
indexSel = []
for c in range(0, self.dlg.tablaMazPred.rowCount()):
if self.dlg.tablaMazPred.item(c, 1 ).checkState() == QtCore.Qt.Checked:
indexSel.append(c)
if len(indexSel) >0:
for index in indexSel:
key = str(self.dlg.tablaMazPred.item(index, 0).text())
data = str(self.dlg.tablaMazPred.item(index, 2).text())
self.clavesDer[str(key)].remove(str(data))
if key == self.llaveManzana:
self.clavesIzq.append(data)
self.dlg.chkTodoMazPred.setCheckState(QtCore.Qt.Unchecked)
self.clavesIzq.sort()
self.actualizarTablas()
#----------------------------------------------------------------------------------------------------
def marcarTodoClaves(self):
if self.dlg.chkTodoClaves.checkState() == QtCore.Qt.Checked:
if self.dlg.tablaClaves.rowCount() > 0:
for c in range(0, self.dlg.tablaClaves.rowCount()):
self.dlg.tablaClaves.item(c, 0 ).setCheckState(QtCore.Qt.Checked)
else:
self.dlg.chkTodoClaves.setCheckState(QtCore.Qt.Unchecked)
else:
for c in range(0, self.dlg.tablaClaves.rowCount()):
self.dlg.tablaClaves.item(c, 0 ).setCheckState(QtCore.Qt.Unchecked)
#----------------------------------------------------------------------------------------------------
def marcarTodoMazPred(self):
if self.dlg.chkTodoMazPred.checkState() == QtCore.Qt.Checked:
if self.dlg.tablaMazPred.rowCount() > 0:
for c in range(0, self.dlg.tablaMazPred.rowCount()):
self.dlg.tablaMazPred.item(c, 1 ).setCheckState(QtCore.Qt.Checked)
else:
self.dlg.chkTodoMazPred.setCheckState(QtCore.Qt.Unchecked)
else:
for c in range(0, self.dlg.tablaMazPred.rowCount()):
self.dlg.tablaMazPred.item(c, 1 ).setCheckState(QtCore.Qt.Unchecked)
#------------------------------------------------------------------------------------------------------------
def vaciarTabla(self, tabla):
tabla.clearContents()
tabla.setRowCount(0)
for row in range(0, tabla.rowCount()):
tabla.removeRow(row)
#--------------------------------------------------------------------------------------------------------------
def completarLocalidad(self, text):
#print('entro al de afuera')
if text:
#print('entro al de adentro con ', text)
index = self.dlg.cmbLocalidad.findText(text)
self.indexCompLocalidad = index
#print('indice a completa', index)
self.dlg.cmbLocalidad.setCurrentIndex(index)
#----------------------------------------------------------------------------------------------------------------------
def completarSector(self, text):
if text:
index = self.dlg.cmbSector.findText(text)
self.indexCompSector = index
self.dlg.cmbSector.setCurrentIndex(index)
#---------------------------------------------------------------------------------------------------------------------
def completarManzana(self, text):
if text:
index = self.dlg.cmbManzana.findText(text)
self.indexCompManzana = index
self.dlg.cmbManzana.setCurrentIndex(index)
#---------------------------------------------------------------------------------------------------
def completarUsuario(self, text):
if text:
index = self.dlg.cmbUsuario.findText(text)
self.dlg.cmbUsuario.setCurrentIndex(index)
#----------------------------------------------------------------------------------------------------------
def obtenerDiccionarioAsignaciones(self):
self.diccionarioAsignaciones = {}
try:
headers = {'Content-Type': 'application/json', 'Authorization' : self.UTI.obtenerToken()}
respuesta = requests.get(self.CFG.urlAsigPadTodos, headers = headers)
if respuesta.status_code == 200:
for cadaUno in respuesta.json():
cveCat = cadaUno['cveCatastral']
cvePredio = cveCat[-11:]
cveManzana = cveCat[0:20]
llavesDic = self.diccionarioAsignaciones.keys()
if not cveManzana in llavesDic:
self.diccionarioAsignaciones[cveManzana] = []
self.diccionarioAsignaciones[cveManzana].append(cvePredio)
else:
print(respuesta)
self.UTI.mostrarAlerta("Error de servidor DICACC1", QMessageBox().Critical, "Cargar Sectores")
except requests.exceptions.RequestException:
self.UTI.mostrarAlerta("Error de servidor DICACC2", QMessageBox().Critical, "Cargar Sectores")
#-------------------------------------------------------------------------------------------------------------
def asignarRevision(self):
indiceUsuario = self.dlg.cmbUsuario.currentIndex()
usuario = self.enviosUsuario[indiceUsuario]
if indiceUsuario > 0:
indexSel = []
for c in range(0, self.dlg.tablaMazPred.rowCount()):
indexSel.append(c)
if len(indexSel) >0:
listaAEnviar = []
for index in indexSel:
cveManzana = str(self.dlg.tablaMazPred.item(index, 0).text())
cvePredioMedia = str(self.dlg.tablaMazPred.item(index, 2).text())
cveCatCompleta = cveManzana + cvePredioMedia
objeto = {}
objeto['cveUsuario'] = usuario
objeto['cveCatastral'] = cveCatCompleta
listaAEnviar.append(objeto)
listaAEnviar = json.dumps(listaAEnviar)
try:
headers = {'Content-Type': 'application/json', 'Authorization' : self.UTI.obtenerToken()}
respuesta = requests.post(self.CFG.urlAsigPadAgregar, headers = headers, data=listaAEnviar)
if respuesta.status_code == 200:
self.UTI.mostrarAlerta("Asignacion completa", QMessageBox().Information, "Asignacion de padron")
self.vaciarTabla(self.dlg.tablaMazPred)
keysDer = list(self.clavesDer.keys())
for k in keysDer:
self.clavesDer[k] = []
self.actualizarTablas()
else:
self.UTI.mostrarAlerta("Error de servidor ACAMP1", QMessageBox().Critical, "Asignacion de padron")
except requests.exceptions.RequestException:
self.UTI.mostrarAlerta("Error de servidor ACAMP", QMessageBox().Critical, "Asignacion de padron")
else:
self.UTI.mostrarAlerta("No se han agregado asignaciones", QMessageBox().Critical, "Asignacion de padron")
else:
self.UTI.mostrarAlerta("Debes seleccionar un usuario", QMessageBox().Critical, "Asignacion de padron")
#---------------------------------------------------------------------------------------------------
def llamarLiberar(self):
indiceUsuario = self.dlg.cmbUsuario.currentIndex()
if indiceUsuario > 0:
self.VentanaLiberacion.run()
else:
self.UTI.mostrarAlerta("Debes seleccionar un usuario", QMessageBox().Critical, "Asignacion de padron")
#-----------------------------------------------------------------------------------------------------
def llenarUsuarios(self):
self.dlg.cmbUsuario.clear()
try:
headers = {'Content-Type': 'application/json', 'Authorization' : self.UTI.obtenerToken()}
respuesta = requests.get(self.CFG.urlObtenerUsuarios, headers = headers)
except requests.exceptions.RequestException:
self.UTI.mostrarAlerta("Error de servidor ACAUSU1", QMessageBox().Critical, "Cargar Manzanas")
print('ERROR: USU000')
lenJson = len(list(respuesta.json()))
if lenJson > 0:
listaTemp = ['--Selecciona--']
self.enviosUsuario = ['-']
for dato in respuesta.json():
listaTemp.append(str(dato['firstName'] )+ ' ' + str(dato['lastName']))
self.enviosUsuario.append(dato['login'])
modeloTemp = QStandardItemModel()
for i,word in enumerate( listaTemp ):
item = QStandardItem(word)
modeloTemp.setItem(i, 0, item)
self.UTI.extenderCombo(self.dlg.cmbUsuario, self.completarUsuario, modeloTemp)
self.dlg.cmbUsuario.model().item(0).setEnabled(False)
#------------------------------------------------------------
def resetar(self):
self.vaciarTabla(self.dlg.tablaClaves)
self.vaciarTabla(self.dlg.tablaMazPred)
self.dlg.cmbManzana.clear()
self.dlg.cmbSector.clear()
self.dlg.cmbManzana.setEnabled(False)
self.dlg.cmbSector.setEnabled(False)
self.clavesIzq = []
self.clavesDer = {}
self.manzanaCargada = -1
self.localidadCargado = -1
self.sectorCargado = -1 | [
"roslop_1841@hotmail.com"
] | roslop_1841@hotmail.com |
cc630d94e4aedb407b5660de8a965fe98411f2dc | 65ae896aa7a9b9cae6b90be8f5900ec2940ff65b | /03. Characters in Range.py | 25b6b3841475fd390e67849899d1ca8e44e51470 | [] | no_license | antondelchev/Functions---Exercise | dd1ea014e4e177618c3394ce7052a082dc1ab462 | 6445f767d092c06e7b1c8921ee030b5a0a71e832 | refs/heads/main | 2023-04-08T22:48:33.515213 | 2021-04-21T12:06:23 | 2021-04-21T12:06:23 | 360,154,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | def chars_in_range(char1, char2):
all_char = ""
for i in range(ord(char1) + 1, ord(char2)):
all_char += chr(i)
all_char += " "
return all_char
first_character = input()
second_character = input()
print(chars_in_range(first_character, second_character))
| [
"noreply@github.com"
] | antondelchev.noreply@github.com |
8192e6bcce172127eb1ea134d03f012f502a1dd6 | 65f8211fc33eb5f9ac1ff0d68902226ca9a58692 | /graph_algorithms/bridge_matrix.py | 55f4e2bc177df53169cbfcff9d38915a46080d96 | [] | no_license | szarbartosz/asd-python | 46869f5699a1ef661e2df02e523af0adcddbbbda | 0130cc3dcbba6ad62e1516c98b5cbab85848d619 | refs/heads/master | 2022-12-13T19:02:53.699381 | 2020-09-11T13:29:31 | 2020-09-11T13:29:31 | 242,975,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 920 | py | def find_bridge_matrix(G):
V = len(G)
visited = [False] * V
parent = [None] * V
visit_time = [float('inf')] * V
low = [float('inf')] * V
time = 1
def visit(u):
visited[u] = True
nonlocal time
visit_time[u] = time
low[u] = time
time += 1
for v in range(len(G[u])):
if G[u][v] != 0:
if not visited[v]:
parent[v] = u
visit(v)
low[u] = min(low[u], low[v])
if low[v] == visit_time[v]:
print(f'bridge: ({u}, {v})')
elif v != parent[u]:
low[u] = min(low[u], visit_time[v])
for i in range(V):
if not visited[i]:
visit(i)
G = [[0, 1, 0, 0, 0],
[1, 0, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 0, 1],
[0, 0, 0, 1, 0]]
find_bridge_matrix(G)
| [
"szarbartosz@gmail.com"
] | szarbartosz@gmail.com |
586d1e32398855039ebd98ed2777d3e23edc8872 | c8b18f0530f290fcd451b2a34d8e64d62477c3e5 | /codes/Tools/02_Pillow-string_picture/02_add_filter.py | 48869d863c1449f4ae23959df938a9076fd99c05 | [] | no_license | YorkFish/hello-world | a8e2e019e51a814bae4dbb134abce90fd02317d4 | 31f8b807c57bd942fc805466ad9d5ff9b9614b55 | refs/heads/master | 2021-10-23T00:41:09.867934 | 2021-10-12T01:25:20 | 2021-10-12T01:25:20 | 165,218,921 | 0 | 0 | null | 2019-01-11T11:19:18 | 2019-01-11T09:42:33 | null | UTF-8 | Python | false | false | 182 | py | # coding:utf-8
# 此程序功能:加滤镜
from PIL import Image, ImageFilter
im1 = Image.open("fish.jpg")
im2 = im1.filter(ImageFilter.BLUR)
im2.save("fish_blur.jpg", "jpeg")
| [
"18258788231@163.com"
] | 18258788231@163.com |
01c1848a705ab7ce9f6c85d9acac02035d7114c7 | 4e5141121d8b4015db233cbc71946ec3cfbe5fe6 | /samples/basic/codec/models/openconfig/openconfig-lldp/cd-encode-oc-lldp-24-ydk.py | ad1ae2444ce86c3e012ba744c874c3714dcb2793 | [
"Apache-2.0"
] | permissive | itbj/ydk-py-samples | 898c6c9bad9d6f8072892300d42633d82ec38368 | c5834091da0ebedbb11af7bbf780f268aad7040b | refs/heads/master | 2022-11-20T17:44:58.844428 | 2020-07-25T06:18:02 | 2020-07-25T06:18:02 | 282,382,442 | 1 | 0 | null | 2020-07-25T06:04:51 | 2020-07-25T06:04:50 | null | UTF-8 | Python | false | false | 2,400 | py | #!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Encode configuration for model openconfig-lldp.
usage: cd-encode-oc-lldp-24-ydk.py [-h] [-v]
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.services import CodecService
from ydk.providers import CodecServiceProvider
from ydk.models.openconfig import openconfig_lldp \
as oc_lldp
from ydk.models.openconfig import openconfig_lldp_types \
as oc_lldp_types
import logging
def config_lldp(lldp):
"""Add config data to lldp object."""
lldp.config.enabled = True
lldp.config.hello_timer = 15
suppress_tlv_advertisement = oc_lldp_types.MANAGEMENTADDRESS()
lldp.config.suppress_tlv_advertisement.append(suppress_tlv_advertisement)
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
args = parser.parse_args()
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create codec provider
provider = CodecServiceProvider(type="xml")
# create codec service
codec = CodecService()
lldp = oc_lldp.Lldp() # create config object
config_lldp(lldp) # add object configuration
# encode and print object
print(codec.encode(provider, lldp))
exit()
# End of script
| [
"deom119@gmail.com"
] | deom119@gmail.com |
1bdaa614a033c2f7efd3ffb2527bb415bcc34af0 | 30754a148b79903d6e49399f1f270c79934ce389 | /tests/fuzzer/test_list_directory.py | 725dbe57113993c9a112d67576d1dceac33558d4 | [
"BSD-3-Clause"
] | permissive | syedkhalid/fuzzinator | 720ffc552c595b50de46e4e4e51f3a01cdc9aa77 | f90b58605de563e77b85ed0d54d2beb29efc7d14 | refs/heads/master | 2021-04-09T17:31:06.625840 | 2018-03-12T14:37:18 | 2018-03-12T15:21:27 | 125,814,277 | 1 | 0 | BSD-3-Clause | 2018-03-19T06:53:29 | 2018-03-19T06:53:29 | null | UTF-8 | Python | false | false | 1,279 | py | # Copyright (c) 2016-2017 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import pytest
import fuzzinator
from os.path import join
from common_fuzzer import resources_dir
mock_tests = join(resources_dir, 'mock_tests')
@pytest.mark.parametrize('pattern, contents, exp', [
(join(mock_tests, '*'), True, {b'foo\n', b'bar\n', b'baz\n'}),
(join(mock_tests, '**', '*'), True, {b'foo\n', b'bar\n', b'baz\n', b'qux\n'}),
(join(mock_tests, '*'), False, {join(mock_tests, 'baz.txt'), join(mock_tests, 'bar.txt'), join(mock_tests, 'foo.txt')}),
(join(mock_tests, '**', '*'), False, {join(mock_tests, 'baz.txt'), join(mock_tests, 'bar.txt'), join(mock_tests, 'foo.txt'), join(mock_tests, 'subdir', 'qux.txt')}),
])
def test_list_directory(pattern, contents, exp):
fuzzer = fuzzinator.fuzzer.ListDirectory(pattern=pattern, contents=contents)
with fuzzer:
tests = set()
index = 0
while True:
test = fuzzer(index=index)
if test is None:
break
tests.add(test)
index += 1
assert tests == exp
| [
"reni@inf.u-szeged.hu"
] | reni@inf.u-szeged.hu |
d1cf4cdbc0a102d31030cbcf3d9e0f6eb536d7a3 | 4bb66e64121d3f4eff4ca0809929983a5c354e3f | /backend/platform_3278/urls.py | fec873c25f27088090a61652926ddb6a711430f2 | [] | no_license | crowdbotics-apps/platform-3278 | 1f1697e33d692550a617466f240d0398b7ab8020 | f78b936584b8db09ab6eeb514335e8605ff0dfdb | refs/heads/master | 2020-05-22T17:33:42.090603 | 2019-05-13T16:04:40 | 2019-05-13T16:04:40 | 186,452,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py | """platform_3278 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url('', include('home.urls')),
url(r'^accounts/', include('allauth.urls')),
url(r'^api/v1/', include('home.api.v1.urls')),
url(r'^admin/', admin.site.urls),
]
admin.site.site_header = 'Platform'
admin.site.site_title = 'Platform Admin Portal'
admin.site.index_title = 'Platform Admin'
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
2b75945fc4280780dbf75164ac96a04676e50cfa | 9b54e3d58447e917a238b85891020c392c4ac601 | /acmicpc/9506/9506.py | 1c348d45c3fb17732c03fd82af2a1c1cdf2c030f | [
"MIT"
] | permissive | love-adela/algorithm-ps | ea0ebcd641a4c309348b389b8618daa83973f4b2 | c92d105d8ad344def001160367115ecf99d81c0d | refs/heads/master | 2023-05-11T03:37:11.750692 | 2023-04-30T17:31:30 | 2023-04-30T17:31:30 | 174,651,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | def get_divisor(k):
divisors = []
for i in range(1, k):
if k % i == 0:
divisors.append(i)
return divisors
while True:
n = int(input())
if n == -1:
break
divisors = get_divisor(n)
if n == sum(divisors):
print(f'{n}', end=' = ')
print(' + '.join(list(map(str, divisors))))
elif n != sum(divisors):
print(f'{n} is NOT perfect.')
| [
"love.adelar@gmail.com"
] | love.adelar@gmail.com |
1215004b25792a229f83ffa57541e9cf3c11ce07 | b1c412822f856bb2dddd9ffac00b3aeee7794961 | /lab1/resnet.py | 20ca9ef1d201882595ea7fe8e71e39c724c43afb | [] | no_license | gavin0430/Deep-Learning | 61db627d3ff6239e6219a31b0a647cd4fb86df07 | ed14037eb83adc4a21b11aed14d3d93fb7045e02 | refs/heads/master | 2020-03-07T14:41:29.127217 | 2018-08-11T15:35:03 | 2018-08-11T15:35:03 | 127,532,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,591 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 30 20:48:37 2018
@author: gavin
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
# self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(64*4*4, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
# out = self.layer4(out)
out = F.avg_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet20():
return ResNet(BasicBlock, [3,3,3])
def ResNet56():
return ResNet(BasicBlock, [9,9,9])
def ResNet110():
return ResNet(BasicBlock, [18,18,18])
| [
"mz@email.com"
] | mz@email.com |
1533273c46723599a3a8ff52b8f6694d34df94c6 | b007d88e6726452ffa8fe80300614f311ae5b318 | /array/text_dollar_coding.py | 6c9bd8b5f829472c4ab277b35f5c3d0ee5056863 | [] | no_license | jinurajan/Datastructures | ec332b12b8395f42cb769e771da3642f25ba7e7f | 647fea5d2c8122468a1c018c6829b1c08717d86a | refs/heads/master | 2023-07-06T14:42:55.168795 | 2023-07-04T13:23:22 | 2023-07-04T13:23:22 | 76,943,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,342 | py |
DIGITS_TEXT = {1:'One', 2: 'Two', 3:'Three', 4: 'Four', 5: 'Five', 6: 'Six', 7: 'Seven',
8: 'Eight', 9: 'Nine'}
TENS_TEXT = {10: 'Ten', 20: 'Twenty', 30: 'Thirty', 40: 'Forty', 50: 'Fifty', 60: 'Sixty',
70: 'Seventy', 80: 'Eighty', 90: 'Ninety'}
TEENS_TEXT = {11: 'Eleven', 12: 'Twelve', 13: 'Thirteen', 14: 'Fourteen', 15: 'Fifteen',
16: 'Sixteen', 17: 'Seventeen', 18: 'Eighteen', 19: 'Nineteen'}
HUNDREDS_TEXT = {100: 'Hundred', 1000: 'Thousand', 1000000: 'Million'}
def Textify(number, result):
if number >=1 and number <=9:
result.append(DIGITS_TEXT[number])
elif number >=11 and number <=19:
result.append(TEENS_TEXT[number])
else:
thousand = number/100
if thousand > 0:
Textify(thousand, result)
number = number -(thousand*100)
Textify(number, result)
else:
tens = number / 10
if tens > 0:
print("tens: ", tens)
result.append(TENS_TEXT[tens])
number = number-(tens*10)
Textify(number, result)
return result
def NumberToText(number):
final_text = []
n = len(str(number))
if n > 6:
# million value
mill = number/1000000
Textify(mill, final_text)
final_text.append(HUNDREDS_TEXT[1000000])
number = number - (mill*1000000)
thousand = number/1000
Textify(thousand, final_text)
final_text.append(HUNDREDS_TEXT[1000])
number = number - (thousand*1000)
hundred = number/100
final_text.append(Textify(hundred, final_text))
final_text.append(HUNDREDS_TEXT[100])
number = number - (hundred*100)
Textify(number, final_text)
if n >3:
thousand = number/1000
Textify(thousand, final_text)
final_text.append(HUNDREDS_TEXT[1000])
number = number - (thousand*1000)
hundred = number/100
Textify(hundred, final_text)
final_text.append(HUNDREDS_TEXT[100])
number = number - (hundred*100)
Textify(number, final_text)
elif n > 2:
hundred = number/100
Textify(hundred, final_text)
final_text.append(HUNDREDS_TEXT[100])
number = number - (hundred*100)
Textify(number, final_text)
else:
Textify(number, final_text)
final_text.append("Dollars")
return final_text
def DollarCoding(number):
value = NumberToText(number)
print(' '.join(val for val in value))
if __name__ == "__main__":
DollarCoding(1234567)
# DollarCoding(123456)
# DollarCoding(12645)
# DollarCoding(126)
# DollarCoding(12)
# DollarCoding(1)
| [
"jinu.p.r@gmail.com"
] | jinu.p.r@gmail.com |
a47735081eff681ee355b014db90218c4a1692f6 | 1b6dff9c7e9ee5eac407a9fd97391536742e88fc | /servers/Radiation/archive_scrapper/archive_scrapper/spiders/prediccs_archive.py | 89d2054ad3b80658bccbe8dc6d7536cc37360c77 | [
"BSD-2-Clause"
] | permissive | mars-planet/mars_city | 00793d486361c130347d5fe513927368b9807b70 | 29df75e9037eec50672fd33295fc70d6f6e4b325 | refs/heads/master | 2020-04-12T06:46:16.073407 | 2018-08-12T13:30:57 | 2018-08-12T13:30:57 | 63,711,405 | 25 | 50 | NOASSERTION | 2023-07-04T15:43:23 | 2016-07-19T16:43:52 | C++ | UTF-8 | Python | false | false | 1,816 | py | import scrapy
class PrediccsArchiveScrapper(scrapy.Spider):
flag = 0
count = 0
string = 'bryn/31daysMars.plot'
name = "prediccs"
links = []
handle_httpstatus_list = [404]
def start_requests(self):
url = 'http://prediccs.sr.unh.edu/data/goesPlots/archive/'
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
if response.status == 404:
self.count = self.count + 1
scrap_url = 'http://prediccs.sr.unh.edu/' + \
'data/goesPlots/archive/' + \
self.links[0][self.count] + self.string
yield scrapy.Request(scrap_url, self.parse)
if not self.flag:
self.flag = 1
linkobj = response.css("a")[5:-2]
self.links.append(linkobj.css("a::attr(href)").extract())
if self.count < len(self.links[0]):
self.count = self.count + 1
scrap_url = 'http://prediccs.sr.unh.edu/' + \
'data/goesPlots/archive/' + \
self.links[0][self.count] + self.string
yield scrapy.Request(scrap_url, self.parse)
datas = response.css("p::text").extract_first()
datas = datas.split("\n")[22:]
data = []
for i in datas:
i = i.split('\t')
d = i[:6]
d.append(i[-2])
data.append(d)
yield{
'data': data,
}
if self.count < len(self.links[0]):
self.count = self.count + 1
scrap_url = 'http://prediccs.sr.unh.edu/' + \
'data/goesPlots/archive/' + \
self.links[0][self.count] + self.string
yield scrapy.Request(scrap_url, self.parse)
| [
"nivedn3@gmail.com"
] | nivedn3@gmail.com |
8765f592786aa47d3f4c5bc20ae6abd6057c68dc | b6a48f9a6158bcb7e6fc75e5eacaef19250fc4c5 | /cosmos/ingestion/ingest/process/detection/src/utils/split_train_val_test.py | 469996ade467d8c8f626e1ddad9407a3f23584a5 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | UW-COSMOS/Cosmos | dcde3be6534e411a20fcf1ff36e422fc8af2ac8a | 5ed4a4c149e03773690668437d2f93aa532453c6 | refs/heads/master | 2023-09-01T18:03:20.525760 | 2023-08-31T13:56:21 | 2023-08-31T13:56:21 | 159,849,583 | 39 | 14 | null | 2023-09-13T14:39:45 | 2018-11-30T16:24:59 | Python | UTF-8 | Python | false | false | 1,164 | py | #!/usr/bin/env python3
"""
"""
import os
from random import shuffle
if __name__ == '__main__':
files = os.listdir('data/images')
try:
files.remove('README.md')
except ValueError:
pass
files = [f[:-4] for f in files]
shuffle(files)
# Split is 70/10/20
filelen = len(files)
train = int(.7 * filelen)
val = int(.1 * filelen)
train_filelist = files[:train]
val_filelist = files[train:train+val]
test_filelist = files[train+val:]
print('There are {} train files, {} val files, and {} test files'.format(len(train_filelist),
len(val_filelist),
len(test_filelist)))
with open('data/train.txt', 'w') as wf:
for t in train_filelist:
wf.write(t)
wf.write('\n')
with open('data/val.txt', 'w') as wf:
for t in val_filelist:
wf.write(t)
wf.write('\n')
with open('data/test.txt', 'w') as wf:
for t in test_filelist:
wf.write(t)
wf.write('\n')
| [
"ankur.goswami12@gmail.com"
] | ankur.goswami12@gmail.com |
40da452a78a197b37a9af20397a0cc9dd41ab09a | af39e4ee0383b1ecb16511aa0df6404a0936b475 | /Lib/site-packages/crispy_forms/templates/bootstrap4/layout/checkboxselectmultiple_inline.html.py | abdb4d0b0af7c14dec0779ae8a0edd79268196a1 | [] | no_license | SheraramPrajapat1998/ecommerce | 009f58023086e6a182b8b0a9385a37f4a82c59c5 | 4ecbd2edd53e1b79a81760fc95ea2c2a97f205d7 | refs/heads/main | 2023-01-07T19:51:35.242839 | 2020-11-04T03:32:36 | 2020-11-04T03:32:36 | 308,618,518 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | BB BBBBBBBBBBBBBBB
BBBB
XXXX XXXXXXXXX XXXXXXXXXXXXXXXXXBB BBBBBBBBBBBBBBBBB BB BBBBBBBBBB XXXBBBBBBB BBBBBBBBBBBBB BBBBBBB BBBBBBBBBBBBBBBB BBB BBBBBBBBBBBB XXXXXXXXXXBBBBBBB BBBBBBBBBBBBBBBBB BBBBBXX
BB BBBBBBBBBBB
XXXXXX XXXXXX XXXXXXXBB BBB BBBBBBBBBBBB XXXXXXXXXXXXXXBBBBBBB BBBBBBBBBBBBBBBBBBBB XXXXXXXXXXXXXBBBBBXX
FFFFBB BBBBBBBBBBBBBBBBBBBBXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXBBBBB
XXXXXXXX
BBBBB
BBBBBBB BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
XXXXXX
BBBBB
| [
"sheraramprajapat1998@gmail.com"
] | sheraramprajapat1998@gmail.com |
db45bd54e042a773500a1ed7c1fc6e739e033d33 | 6f3cf8c3af7e194982ab07bbd15217d799a53381 | /Intermediate_Python/dictionaryManipulation2.py | 4ff5621fd47b2a908ac56faa6b282c49cde1d3c0 | [] | no_license | AnjaliSharma1234/Datacamp---Python-Track | 2ec9c61dbcda69f33bdf097d585b6e838a34f2e1 | 2e7fbaa6a9c2507e94689612dfa9650c5810f3cc | refs/heads/master | 2020-12-20T18:32:28.459549 | 2020-03-05T07:23:59 | 2020-03-05T07:23:59 | 236,170,988 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | # Definition of dictionary
europe = {'spain':'madrid', 'france':'paris', 'germany':'bonn',
'norway':'oslo', 'italy':'rome', 'poland':'warsaw',
'australia':'vienna' }
# Update capital of germany
europe['germany'] = 'berlin'
# Remove australia
del(europe['australia'])
# Print europe
print(europe)
| [
"noreply@github.com"
] | AnjaliSharma1234.noreply@github.com |
089a33ea10a5cc90572d949e6dba551a403df523 | 98cb2f2afbe57bdda9d6b8b1dd8cf624987d91bc | /torchdp/utils/tests/module_inspection_test.py | 3906d7f03090ec40c3963e92691fd128175284ba | [
"Apache-2.0"
] | permissive | jyhong836/pytorch-dp | 0e7613b01f09ceb2c3787284372f8e887bf0deb3 | e050b98d630d4db50cacc4fff82575daf345f012 | refs/heads/master | 2023-01-03T15:08:54.976598 | 2020-08-18T01:26:07 | 2020-08-18T01:27:02 | 260,974,801 | 0 | 0 | Apache-2.0 | 2020-05-03T16:40:11 | 2020-05-03T16:40:11 | null | UTF-8 | Python | false | false | 4,572 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import torch.nn as nn
from torchdp.utils import module_inspection as mi
from torchvision import models
class utils_ModelInspector_test(unittest.TestCase):
def setUp(self):
def pred_supported(module):
return isinstance(module, (nn.Conv2d, nn.Linear))
def pred_not_unsupported(module):
return not isinstance(module, (nn.BatchNorm2d, nn.BatchNorm3d))
def pred_requires_grad(module):
return all(
p.requires_grad for p in module.parameters(recurse=False)
)
self.pred_supported = pred_supported
self.pred_not_unsupported = pred_not_unsupported
self.pred_mix = lambda m: (not pred_requires_grad(m)) or pred_not_unsupported(m)
def test_validate_basic(self):
inspector = mi.ModelInspector(
"pred", lambda model: isinstance(model, nn.Linear)
)
model = nn.Conv1d(1, 1, 1)
valid = inspector.validate(model)
self.assertFalse(valid, inspector.violators)
def test_validate_positive_predicate_valid(self):
# test when a positive predicate (e.g. supported) returns true
inspector = mi.ModelInspector("pred", self.pred_supported)
model = nn.Conv2d(1, 1, 1)
valid = inspector.validate(model)
self.assertTrue(valid)
list_len = len(inspector.violators)
self.assertEqual(list_len, 0, f"violators = {inspector.violators}")
def test_validate_positive_predicate_invalid(self):
# test when a positive predicate (e.g. supported) returns false
inspector = mi.ModelInspector("pred", self.pred_supported)
model = nn.Conv1d(1, 1, 1)
valid = inspector.validate(model)
self.assertFalse(valid)
list_len = len(inspector.violators)
self.assertEqual(list_len, 1, f"violators = {inspector.violators}")
def test_validate_negative_predicate_ture(self):
# test when a negative predicate (e.g. not unsupported) returns true
inspector = mi.ModelInspector("pred1", self.pred_not_unsupported)
model = nn.Sequential(nn.Conv2d(1, 1, 1), nn.Linear(1, 1))
valid = inspector.validate(model)
self.assertTrue(valid)
list_len = len(inspector.violators)
self.assertEqual(list_len, 0)
def test_validate_negative_predicate_False(self):
# test when a negative predicate (e.g. not unsupported) returns false
inspector = mi.ModelInspector("pred", self.pred_not_unsupported)
model = nn.Sequential(nn.Conv2d(1, 1, 1), nn.BatchNorm2d(1))
valid = inspector.validate(model)
self.assertFalse(valid)
list_len = len(inspector.violators)
self.assertEqual(list_len, 1, f"violators = {inspector.violators}")
def test_validate_mix_predicate(self):
# check with a mix predicate not requires grad or is not unsupported
inspector = mi.ModelInspector("pred1", self.pred_mix)
model = nn.Sequential(nn.Conv2d(1, 1, 1), nn.BatchNorm2d(1))
for p in model[1].parameters():
p.requires_grad = False
valid = inspector.validate(model)
self.assertTrue(valid)
def test_check_everything_flag(self):
# check to see if a model does not containt nn.sequential
inspector = mi.ModelInspector(
"pred",
lambda model: not isinstance(model, nn.Sequential),
check_leaf_nodes_only=False,
)
model = nn.Sequential(nn.Conv1d(1, 1, 1))
valid = inspector.validate(model)
self.assertFalse(valid, f"violators = {inspector.violators}")
def test_complicated_case(self):
def good(x):
return isinstance(x, (nn.Conv2d, nn.Linear))
def bad(x):
return isinstance(x, nn.modules.batchnorm._BatchNorm)
inspector1 = mi.ModelInspector("good_or_bad", lambda x: good(x) or bad(x))
inspector2 = mi.ModelInspector("not_bad", lambda x: not bad(x))
model = models.resnet50()
valid = inspector1.validate(model)
self.assertTrue(valid, f"violators = {inspector1.violators}")
self.assertEqual(
len(inspector1.violators), 0, f"violators = {inspector1.violators}"
)
valid = inspector2.validate(model)
self.assertFalse(valid, f"violators = {inspector2.violators}")
self.assertEqual(
len(inspector2.violators), 53, f"violators = {inspector2.violators}"
)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
ec2c912e7c7005b94043d783105dc593306f7d9e | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flashblade/FB_2_2/models/directory_service_role.py | ecaf432596bb712e118a0b77f274050314c70b13 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 3,947 | py | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.2, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_2 import models
class DirectoryServiceRole(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'group': 'str',
'group_base': 'str',
'id': 'str',
'role': 'Reference'
}
attribute_map = {
'group': 'group',
'group_base': 'group_base',
'id': 'id',
'role': 'role'
}
required_args = {
}
def __init__(
self,
group=None, # type: str
group_base=None, # type: str
id=None, # type: str
role=None, # type: models.Reference
):
"""
Keyword args:
group (str): Common Name (CN) of the directory service group containing users with authority level of the specified role name.
group_base (str): Specifies where the configured group is located in the directory tree.
id (str): A non-modifiable, globally unique ID chosen by the system.
role (Reference): A reference to the role; can be any role that exists on the system.
"""
if group is not None:
self.group = group
if group_base is not None:
self.group_base = group_base
if id is not None:
self.id = id
if role is not None:
self.role = role
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `DirectoryServiceRole`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DirectoryServiceRole, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DirectoryServiceRole):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"msholes@purestorage.com"
] | msholes@purestorage.com |
e89f0990de02f0eb310ce96228fe222f38c31b01 | a0784b1a66a6c1a89ee8a75e32cd48d2c168931b | /setup.py | 40566faaa85ccd97f99feba5f88e28650bf18897 | [
"MIT"
] | permissive | cltrudeau/purdy | ebe5d8b556dadc0a4eb04018826c066b83617f71 | 4ff2d5b33771266d46260ee9ba6503bb4895ab2f | refs/heads/master | 2023-07-08T08:23:08.409053 | 2023-06-29T21:37:29 | 2023-06-29T21:37:29 | 210,162,520 | 10 | 3 | MIT | 2021-03-10T21:55:26 | 2019-09-22T14:40:17 | Python | UTF-8 | Python | false | false | 1,608 | py | import os, sys, re
from purdy import __version__
readme = os.path.join(os.path.dirname(__file__), 'README.rst')
long_description = open(readme).read()
SETUP_ARGS = dict(
name='purdy',
version=__version__,
description=('Terminal based code snippet display tool '),
long_description=long_description,
url='https://github.com/cltrudeau/purdy',
author='Christopher Trudeau',
author_email='ctrudeau+pypi@arsensa.com',
license='MIT',
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='code display',
py_modules = ['purdy',],
scripts=['bin/purdy', 'bin/subpurdy', 'bin/pat', 'bin/prat'],
install_requires = [
'asttokens>=2.0.4',
'Pygments>=2.14.0',
'urwid>=2.0.1',
'colored>=1.4.2',
],
tests_require = [
'waelstow>=0.10.2',
]
)
if __name__ == '__main__':
from setuptools import setup, find_packages
SETUP_ARGS['packages'] = find_packages()
setup(**SETUP_ARGS)
| [
"ctrudeau@arsensa.com"
] | ctrudeau@arsensa.com |
5bd2492ab863dfbedab466259b236c2258d8fbbc | dda618067f13657f1afd04c94200711c1920ea5f | /scoop/rogue/models/blocklist.py | 94cccf5a3c7319106c15d5b4887e0aa255763a05 | [] | no_license | artscoop/scoop | 831c59fbde94d7d4587f4e004f3581d685083c48 | 8cef6f6e89c1990e2b25f83e54e0c3481d83b6d7 | refs/heads/master | 2020-06-17T20:09:13.722360 | 2017-07-12T01:25:20 | 2017-07-12T01:25:20 | 74,974,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,857 | py | # coding: utf-8
from annoying.fields import AutoOneToOneField
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import pgettext_lazy
from scoop.core.abstract.core.data import DataModel
from scoop.core.abstract.core.datetime import DatetimeModel
from scoop.core.util.data.typeutil import make_iterable
from scoop.core.util.model.model import SingleDeleteManager
from scoop.core.util.shortcuts import addattr
DEFAULT_LIST = 'blacklist'
class BlocklistManager(SingleDeleteManager):
""" Manager de listes de blocage """
# Getter
def get_by_user(self, user):
""" Renvoyer l'objet blocklist pour un utilisateur """
return self.get_or_create(user=user)[0] if user is not None else self.get_global()
def get_global(self):
""" Renvoyer l'objet blocklist global """
blocklist, _ = self.get_or_create(user=None)
return blocklist
def is_safe(self, sender, recipients, name=None):
""" Renvoyer si deux utilisateurs n'ont pas de blocklist entre eux """
if sender.is_staff or getattr(sender, 'bot', False) or sender.has_perm('rogue.can_bypass_blocks'):
return True
if self.is_globally_listed(sender):
return False
recipients = make_iterable(recipients)
blocklists = self.filter(user__in=recipients)
sender_blocks = sender.blocklist.get_data(name or DEFAULT_LIST) or []
for recipient in recipients:
if recipient.pk in sender_blocks:
return False
for blocklist in blocklists:
items = blocklist.get_data(name or DEFAULT_LIST) or []
if sender.pk in items:
return False
return True
def exists(self, recipient, sender, name=None):
""" Renvoyer s'il existe une blocklist créée par recipient vers sender """
blocklist = self.get_by_user(recipient)
return blocklist.is_listed(sender, name)
def get_user_ids(self, user, name=None):
""" Renvoyer les ids d'utilisateurs dans une blocklist de user """
blocklist = self.get_by_user(user)
return blocklist.get_ids(name)
def users_listed_by(self, user, name=None):
""" Renvoyer les utilisateurs dans une blocklist de user """
from django.contrib.auth import get_user_model
# Liste de blocage
ids_listed = self.get_user_ids(user, name=name)
return get_user_model().objects.filter(pk__in=ids_listed)
def is_globally_listed(self, user, name=None):
""" Renvoyer si un utilisateur est dans une blocklist globale """
return self.exists(None, user, name=name)
def exclude_users(self, queryset, user, name=None):
""" Renvoyer un queryset ne contenant pas les utilisateurs d'une blocklist """
if queryset.model.__name__ in {'User', 'Profile'}:
return queryset.exclude(pk__in=self.get_user_ids(user, name))
return queryset
# Setter
def add(self, recipient, sender, name=None):
""" Ajouter un utilisateur dans une blocklist """
blocklist = self.get_by_user(recipient)
return blocklist.add(sender, name)
def remove(self, recipient, sender, name=None):
""" Retirer un utilisateur d'une blocklist """
blocklist = self.get_by_user(recipient)
return blocklist.remove(sender, name)
def toggle(self, recipient, sender, name=None):
""" Basculer le listage d'un utilisateur dans une blocklist """
blocklist = self.get_by_user(recipient)
return blocklist.toggle(sender, name)
def clear(self, user, name=None):
""" Réinitialiser une blocklist """
blocklist = self.get_by_user(user)
return blocklist.clear(name=name)
class Blocklist(DatetimeModel, DataModel):
""" Liste noire : bloquer des membres """
# Constantes
DATA_KEYS = ['blacklist', 'hidelist']
# Champs
user = AutoOneToOneField(settings.AUTH_USER_MODEL, null=True, related_name='blocklist', on_delete=models.CASCADE, verbose_name=_("Blocker"))
objects = BlocklistManager()
# Getter
@addattr(short_description=pgettext_lazy('users', "Blacklisted"))
def get_count(self, name=None):
""" Renvoyer le nombre d'entrées dans une blocklist """
return len(self.get_data(name or DEFAULT_LIST, []))
@addattr(short_description=pgettext_lazy('users', "Total"))
def get_total_count(self):
""" Renvoyer le nombre total d'entrées dans toutes les blocklists """
return sum([self.get_count(name) for name in self.DATA_KEYS if 'list' in name])
def get_ids(self, name=None):
""" Renvoyer les ID d'utilisateurs d'une blocklist """
return self.get_data(name or DEFAULT_LIST, {}).keys()
def is_listed(self, sender, name=None):
""" Renvoyer si un utilisateur est bloqué par une blocklist """
return getattr(sender, 'pk', sender) in self.get_ids(name)
def get_list_date(self, sender, name=None):
""" Renvoyer la date de mise en blocklist d'un utilisateur """
data = self.get_data(name or DEFAULT_LIST, {})
if getattr(sender, 'pk', sender) in data:
return data[getattr(sender, 'pk', sender)][0]
return None
# Setter
def add(self, sender, name=None):
"""
Ajouter un utilisateur à une blocklist
Un utilisateur du staff ne peut pas être ajouté à une blocklist
:type sender: scoop.user.models.User or int
:param name: nom de la liste de blocage
"""
pk = getattr(sender, 'pk', sender)
if pk not in self.get_ids(name) and not getattr(sender, 'is_staff', False):
now = timezone.now()
data = self.get_data(name or DEFAULT_LIST, {})
data[pk] = [now]
success = self.set_data(name or DEFAULT_LIST, data)
if success:
self.save()
return True
return False
def remove(self, sender, name=None):
"""
Retirer un utilisateur d'une blocklist
:type sender: scoop.user.models.User or int
:param name: nom de la liste de blocage
"""
if getattr(sender, 'pk', sender) in self.get_ids(name):
data = self.get_data(name or DEFAULT_LIST)
del data[getattr(sender, 'pk', sender)]
self.set_data(name or DEFAULT_LIST, data)
self.save()
return True
return False
def toggle(self, sender, name=None):
"""
Basculer l'enrôlement d'un utilisateur à une blocklist
:type sender: scoop.user.models.User or int
:param name: nom de la liste de blocage
:returns: False si l'utilisateur est supprimé, True si l'utilisateur est ajouté
"""
if self.is_listed(sender, name or DEFAULT_LIST):
self.remove(sender, name or DEFAULT_LIST)
return False
else:
self.add(sender, name or DEFAULT_LIST)
return True
def clear(self, name=None):
"""
Remettre une blocklist à zéro
:returns: True si une modification a été nécessaire, False sinon
"""
if self.data[name] != {}:
self.set_data(name or DEFAULT_LIST, {}, save=True)
return True
return False
# Overrides
def save(self, *args, **kwargs):
""" Enregistrer l'objet dans la base de données """
self.time = self.now()
super(Blocklist, self).save(*args, **kwargs)
# Métadonnées
class Meta:
verbose_name = _("blocklists")
verbose_name_plural = _("blocklists")
permissions = [['bypass_block', "Can bypass blocks"]]
app_label = "rogue"
| [
"steve.kossouho@gmail.com"
] | steve.kossouho@gmail.com |
096989597b8019a9b59720595209300e145c456e | c269d7a329958d2806c34ab35f25b81eeb0b8723 | /tests/__init__.py | 307b697e4bf12555b8f40c2bcc95ff507eb5fcf5 | [
"MIT"
] | permissive | MainRo/dooble | ecc71774ecf6a8b4b3d9c534384202ca9e0737de | 7588ebd48d189eac5a7d49cf0bd4dca20df50539 | refs/heads/master | 2023-01-12T15:38:07.206541 | 2020-08-25T14:57:11 | 2020-08-25T14:57:11 | 170,035,675 | 7 | 2 | MIT | 2022-12-26T20:47:00 | 2019-02-10T22:40:16 | Python | UTF-8 | Python | false | false | 120 | py | # -*- coding: utf-8 -*-
__author__ = """Romain Picard"""
__email__ = 'romain.picard@oakbits.com'
__version__ = '0.1.0'
| [
"romain.picard@oakbits.com"
] | romain.picard@oakbits.com |
b6bc63045d050bbbf0d2fb8f082e5bbf7c7d4687 | ebd9c249d446d809abc9a0f3e4593f34922a1b93 | /lintcode/823_input_stream.py | 51b3d8039db3e05f53130546eef3a2a40a6ec09b | [] | no_license | jaychsu/algorithm | ac7a9dc7366f58c635a68bc46bf1640d2f5ff16d | 91892fd64281d96b8a9d5c0d57b938c314ae71be | refs/heads/master | 2023-05-11T00:40:39.237813 | 2022-09-14T07:43:12 | 2022-09-14T07:43:12 | 106,277,156 | 143 | 39 | null | 2022-09-14T07:43:13 | 2017-10-09T11:51:48 | Python | UTF-8 | Python | false | false | 1,810 | py | """
Merge Sort
time: O(n)
space: O(1)
"""
class Solution:
def inputStream(self, a, b):
"""
:type a: str
:type b: str
:rtype: str, 'NO' or 'YES'
"""
RES = ('NO', 'YES')
if a == b == '':
return RES[1]
BACK = '<'
m, n = len(a), len(b)
i, j = m - 1, n - 1
acnt = bcnt = 0 # count the backspace in both a and b
while i >= 0 and j >= 0:
while i >= 0 and (a[i] == BACK or acnt):
acnt += 1 if a[i] == BACK else -1
i -= 1
while j >= 0 and (b[j] == BACK or bcnt):
bcnt += 1 if b[j] == BACK else -1
j -= 1
if a[i] != b[j]:
return RES[0]
i -= 1
j -= 1
while i >= 0 and (a[i] == BACK or acnt):
acnt += 1 if a[i] == BACK else -1
i -= 1
while j >= 0 and (b[j] == BACK or bcnt):
bcnt += 1 if b[j] == BACK else -1
j -= 1
return RES[int(i == j)]
"""
Stack
time: O(n)
space: O(n)
"""
class Solution:
def inputStream(self, a, b):
"""
:type a: str
:type b: str
:rtype: str, 'NO' or 'YES'
"""
RES = ('NO', 'YES')
if a == '' and b == '':
return RES[1]
if a is None or b is None:
return RES[0]
RM = '<'
stack = []
for c in a:
if c != RM:
stack.append(c)
elif stack:
# c == RM
stack.pop()
_stack = []
for c in b:
if c != RM:
_stack.append(c)
elif _stack:
# c == RM
_stack.pop()
return RES[int(stack == _stack)]
| [
"hi@jaych.su"
] | hi@jaych.su |
f3c9db1941951eeea6159cc39ba7e755aeae4d03 | e18a353582609732c795401f1a01bc762bd939f2 | /top/python/get_mumubb.py | bdc0e23a32be777ced04b9beba3a374a9629f348 | [] | no_license | s-farry/workspaces | 06741807bb464bb0712d52108c2d1b7ae62b1353 | 0dcf3868dcbe110206ea88ff5c9e04a3b44b1ca1 | refs/heads/master | 2020-04-03T00:45:39.152227 | 2017-06-15T16:33:33 | 2017-06-15T16:33:33 | 64,213,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | from ROOT import *
from Jawa import *
from PlotTools import *
from Utils import Bunch
f = TFile("/hepstore/sfarry/GridOutput/2520/A2MuMuJet.MD.2015.root")
g = TFile("/hepstore/sfarry/GridOutput/2521/A2MuMuJet.MU.2015.root")
t = f.Get("ZMuMu/DecayTree")
u = g.Get("ZMuMu/DecayTree")
selection = TCut("boson_jet_tag == 1 && boson_jet2_tag == 1")
vars = [
Bunch(name='m', var='boson_M', bins = 20, lo = 10, hi = 100)
]
a2mumu = Template("a2mumu")
a2mumu.SetSelCut(selection)
a2mumu.AddTree(t)
a2mumu.AddTree(u)
for v in vars:
a2mumu.AddVar(v.name, v.var, v.bins, v.lo, v.hi)
a2mumu.Run()
a2mumu.SaveToFile()
| [
"sfarry@hep.ph.liv.ac.uk"
] | sfarry@hep.ph.liv.ac.uk |
b7e6835eb984e22224ba8954a80b3c2d30e12e9e | 71c7683331a9037fda7254b3a7b1ffddd6a4c4c8 | /Phys/Ks2MuMu/python/Ks2MuMu/Joban.py | 4e506118ebbb17400c7d16486cac5815866d3a10 | [] | no_license | pseyfert-cern-gitlab-backup/Urania | edc58ba4271089e55900f8bb4a5909e9e9c12d35 | 1b1c353ed5f1b45b3605990f60f49881b9785efd | refs/heads/master | 2021-05-18T13:33:22.732970 | 2017-12-15T14:42:04 | 2017-12-15T14:42:04 | 251,259,622 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | from ROOT import *
from Urania import *
AccessPackage("Bs2MuMu")
from XTuple import *
f = TFile("KsPiPi_MinBiasData_TriggerUnbiased_ntuple.root")
t = f.Get("Kspipi")
tup = XTuple("Joban",["evt/F", "AP_pt/F","AP_alpha/F", "time/F"])
i = 0
for entry in t:
tup.fillItem("AP_pt",entry.AP_pt)
tup.fillItem("AP_alpha",entry.AP_alpha)
tup.fillItem("time",entry.Blife_ps)
tup.fillItem("evt",i)
i += 1
tup.fill()
tup.close()
| [
"liblhcb@cern.ch"
] | liblhcb@cern.ch |
5c4ab8fa79cd627c8aea4961761caaecb3b8ed30 | af03eb82a29228837a8078f626a798a62121782a | /Python-3/basic_examples/strings/string_isalpha.py | 7dfaa13d112b914f5d90e58017843b7a6a201af3 | [
"MIT"
] | permissive | kartik4297/journaldev | d7d7c158ccd628164c821f62d4aa4937317940ee | f520fb89f3307f39d8419618ff691e0563ede13a | refs/heads/master | 2020-04-09T14:33:21.305716 | 2018-12-04T07:39:52 | 2018-12-04T07:39:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | s = 'Hello'
print(s.isalpha())
s = '123'
print(s.isalpha())
s = ''
print(s.isalpha())
s = 'çå'
print(s.isalpha())
| [
"pankaj.0323@gmail.com"
] | pankaj.0323@gmail.com |
506ea85ae8a5646591bb9ace63949654f6b5e4e6 | d10dc6ee16ddcbf4cf6dc4ce43c332d6d375f2ee | /ccompras/apps/home/views.py | dd58c96127a153d4d49703261eaed95d68c2627a | [] | no_license | Alfredynho/DjCompras | 993bec2195734af911e0355327c477aa8a49c9d6 | d6829d2b5efe3ff871bab449d8e440908136d71e | refs/heads/master | 2021-01-11T11:07:31.933034 | 2016-04-08T10:52:12 | 2016-04-08T10:52:12 | 55,133,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,826 | py | from django.shortcuts import render, render_to_response
from django.template import RequestContext
from ccompras.apps.ventas.models import producto
from ccompras.apps.home.forms import ContactForm
from django.core.mail import EmailMultiAlternatives
# Create your views here.
def index_view(request):
return render_to_response('home/index.html',context_instance=RequestContext(request))
def about_view(request):
mensaje = "esto es un mensaje desde mi vista"
ctx = {
'msg':mensaje
}
return render_to_response('home/about.html',ctx,context_instance=RequestContext(request))
def home(request):
return render_to_response('home/home.html',context_instance=RequestContext(request))
def productos_view(request):
prod = producto.objects.filter(status=True)
ctx = {'productos':prod}
return render_to_response('home/productos.html',ctx,context_instance=RequestContext(request))
def contacto_view(request):
info_enviado = False
email = ""
titulo = ""
texto = ""
if request.method == 'POST':
formulario = ContactForm(request.POST)
if formulario.is_valid():
info_enviado = True
email = formulario.cleaned_data['Email']
titulo = formulario.cleaned_data['Titulo']
texto = formulario.cleaned_data['Texto']
#configurando el envio de mensaje a Gmail
to_admin = 'callizayagutierrezalfredo@gmail.com'
html_content = "Informacion recibida de [%s] <br><br><br>***Mensaje****<br><br>%s"%(email,texto)
msg = EmailMultiAlternatives('Correo de Contacto',html_content,'from@server.com',[to_admin])
msg.attach_alternative(html_content,'text/html') # Definimos el contenido como HTML
msg.send() # Enviamos en correo
else:
formulario = ContactForm()
ctx ={'form':formulario,'email':email,'titulo':titulo,'texto':texto,'info_enviado':info_enviado}
return render(request,'home/contacto.html',ctx)
| [
"callizayagutierrezalfredo@gmail.com"
] | callizayagutierrezalfredo@gmail.com |
2a44f2e6a644cd7d7898e30231a59efd400ac4ea | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_ploughmen.py | 0bce07a61a1b6df4625d86e50109e91c254c23c2 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py |
from xai.brain.wordbase.nouns._plowman import _PLOWMAN
#calss header
class _PLOUGHMEN(_PLOWMAN, ):
def __init__(self,):
_PLOWMAN.__init__(self)
self.name = "PLOUGHMEN"
self.specie = 'nouns'
self.basic = "plowman"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
6240cebc86ce6d41872d24623533177bf895670c | 694c187c8a00bee8c670c1690170099bad9b16b3 | /templeland.py | e7334bf535a376a68ca8f56696553e012eb666e5 | [] | no_license | ajayvenkat10/Competitive | 301f220b6d296f7e34328f192c43c4d7ef208cb1 | 14f2ecebe10eb19f72cc412dd0c414b3b1de9b4d | refs/heads/master | 2022-11-20T14:31:33.590099 | 2020-07-23T15:39:14 | 2020-07-23T15:39:14 | 281,599,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | t = int(input())
for i in range(t):
N = int(input())
ans=True
temple_strips_list = []
temple_strips = input()
temple_strips = temple_strips.split()
for i in range(N):
temple_strips_list.append(int(temple_strips[i]))
if(N%2==0 or temple_strips_list[0]!=1 or temple_strips_list[0]!=temple_strips_list[-1]):
ans = False
else:
mid = temple_strips_list[len(temple_strips_list)//2]
if(mid == max(temple_strips_list)):
part1 = temple_strips_list[:len(temple_strips_list)//2]
part2 = temple_strips_list[(len(temple_strips_list)//2)+1:]
for i in range(1,(len(temple_strips_list)//2)+1):
if(temple_strips_list[i]-temple_strips_list[i-1]!=1):
ans=False
break
if(part1!=part2[::-1]):
ans = False
else:
ans = False
if(ans):
print("yes")
else:
print("no")
| [
"37923623+ajayvenkat10@users.noreply.github.com"
] | 37923623+ajayvenkat10@users.noreply.github.com |
9dbb193beff13a78430dc5d2e39b36ec0d0fd615 | 17cc8bffed3fadb413506f1545c455d7b9406ed6 | /parts/zodiac/chameleon/tests/inputs/014-repeat-nested-similar.pt.py | 1f56c51d70bb5c11f0867046182c37c6499c87e3 | [] | no_license | stinett/zodiac | f7a4f788942930fa217e7e1c7d525b82a557258f | 22b247719694b0f5aa5135b3cb68c1e84aaf7629 | refs/heads/master | 2020-05-21T01:14:59.949571 | 2014-01-13T15:53:47 | 2014-01-13T15:53:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | /home/stine/myenv/zodiac/eggs/Chameleon-2.13_1-py2.7.egg/chameleon/tests/inputs/014-repeat-nested-similar.pt.py | [
"stine@funkydesktop.(none)"
] | stine@funkydesktop.(none) |
0b0301d80d6a9bdbe6e753e89f1201d6161efb00 | e53c13f2236960456a412af2c2617148a2c6153e | /ethnode/celeryapp.py | f43e1ee23bb92059029844b910fdaa445c838c73 | [] | no_license | ethgis/ethnode | d57b9660174acb737f96aea8013717b1f1a00ea1 | 9fe6f3add95bb5c5fb6dc9d2135f3ec48547c981 | refs/heads/master | 2021-08-29T17:19:44.169638 | 2017-12-14T12:23:04 | 2017-12-14T12:23:04 | 109,628,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,112 | py | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2017 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ethnode.settings')
app = Celery('ethnode')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
| [
"ingenieroariel@gmail.com"
] | ingenieroariel@gmail.com |
e39c1c212e8848b309a577602ba1e9f51e7615a1 | 60290cb3fdb4d4a97a38f921b7de2160c2af70de | /utest/editor/test_clipboard.py | 415b75aad0c6487519b8702649a102f8308e8b8c | [
"Apache-2.0"
] | permissive | crylearner/RIDE3X | 231431222dc679b38831bd75db5a81062327e91c | 767f45b0c908f18ecc7473208def8dc7489f43b0 | refs/heads/master | 2021-01-19T12:22:59.553847 | 2017-08-23T15:11:17 | 2017-08-23T15:11:17 | 100,781,873 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,001 | py | import os
import unittest
from nose.tools import assert_equals
# Needed to be able to create wx components
from resources import PYAPP_REFERENCE as _
from robotide.context import IS_WINDOWS
from robotide.editor.clipboard import _GridClipboard
if not IS_WINDOWS:
class TestGridClipBoard(unittest.TestCase):
def test_with_string_content(self):
self._test_clipboard('Hello, world!', 'Hello, world!')
def test_with_list_content(self):
self._test_clipboard([['Hello', 'world!']], 'Hello\tworld!')
def test_with_multiple_rows(self):
self._test_clipboard([['Hello', 'world!'], ['Another', 'row']],
'Hello\tworld!\nAnother\trow')
def _test_clipboard(self, content, expected=''):
clipb = _GridClipboard()
clipb.set_contents(content)
assert_equals(clipb._get_contents(),
expected.replace('\n', os.linesep))
| [
"sunshyran@gmail.com"
] | sunshyran@gmail.com |
627bce2f20fb134fccac1dafa3531fcd824aa73e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/330/usersdata/301/93575/submittedfiles/lista1.py | ab9a94ef91b97d13a029f17baed6ea875918b4b4 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | # -*- coding: utf-8 -*-
n=int(input('digite o valor de n: '))
a=[]
soma1=0
soma2=0
c1=0
c2=0
for i in range (0,n,1):
a.append(int(input('Digite o número: ')))
for i in range (0,n,1):
if a[i]%2==0:
soma1+=a[i]
print(soma1)
else:
soma2+=a[i]
print(soma2)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
e306d7ab0034c1a495ace1e6d3ffc29368c1c07b | 0420ce2fc8799d5fbd6e96313e6716f5e2ef825b | /bagogold/fundo_investimento/urls.py | 3ce146b0130e1143243e3427a8cc656404895e28 | [] | no_license | nizbel/bag-of-gold | 1da10acef4d73b8426ca3329b37a28c5f9587af4 | a3fd89eb47d33d546bd91947f033d71218c8700f | refs/heads/master | 2022-11-13T01:07:26.934813 | 2020-01-14T16:00:16 | 2020-01-14T16:00:16 | 275,689,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,262 | py | # -*- coding: utf-8 -*-
from django.conf.urls import url
import views
urlpatterns = [
url(r'^detalhar-fundo/(?P<id_fundo>\d+)/$', views.detalhar_fundo_id, name='detalhar_fundo_id'),
url(r'^detalhar-fundo/(?P<slug_fundo>[-\w]+)/$', views.detalhar_fundo, name='detalhar_fundo'),
url(r'^editar-operacao/(?P<id_operacao>\d+)/$', views.editar_operacao_fundo_investimento, name='editar_operacao_fundo_investimento'),
url(r'^historico/$', views.historico, name='historico_fundo_investimento'),
url(r'^inserir-operacao-fundo-investimento/$', views.inserir_operacao_fundo_investimento, name='inserir_operacao_fundo_investimento'),
url(r'^listar-fundos/$', views.listar_fundos, name='listar_fundo_investimento'),
url(r'^listar-fundos-por-nome/$', views.listar_fundos_por_nome, name='listar_fundos_por_nome'),
url(r'^listar-historico-fundo-investimento/(?P<id_fundo>\d+)/$', views.listar_historico_fundo_investimento, name='listar_historico_fundo_investimento'),
url(r'^painel/$', views.painel, name='painel_fundo_investimento'),
url(r'^sobre/$', views.sobre, name='sobre_fundo_investimento'),
url(r'^verificar-historico-fundo-na-data/$', views.verificar_historico_fundo_na_data, name='verificar_historico_fundo_na_data'),
] | [
"kingbowserii@gmail.com"
] | kingbowserii@gmail.com |
f356700d720093d7363d0e8af6602a0d4b53452c | 8f26514c451e2398d5e3688c184ea74d1dad21b2 | /month_02/teacher/day03/file_write.py | 0715768cdacd3e3c0fcdcb38b8a2deee1c09b74e | [] | no_license | CircularWorld/Python_exercise | 25e7aebe45b4d2ee4e3e3afded082c56483117de | 96d4d9c5c626f418803f44584c5350b7ce514368 | refs/heads/master | 2022-11-21T07:29:39.054971 | 2020-07-20T10:12:24 | 2020-07-20T10:12:24 | 281,081,559 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | """
文件写操作示例
"""
# 写方式打开
# f = open("file.txt","w")
# f = open("file.txt","a") # 追加
# 读写方式打开,写入文本会从头开始往后覆盖内容
# f = open("file.txt","r+")
f = open("file.txt",'w')
# 写入操作了
n = f.write("Hello world\n")
print("写入了 %d 个字节"%n)
n = f.write("Hello Kitty\n")
print("写入了 %d 个字节"%n)
# 将列表中的内容分别写入到文件中
l = ["哈喽,死鬼\n","哎呀,干啥\n"]
f.writelines(l)
# 关闭
f.close() | [
"jiayuhaowork@163.com"
] | jiayuhaowork@163.com |
616478b663bd431079caa7f8d6a770e823eab73b | a7dc07cadaf735a66f459831cdc4c4d0dbbafcd7 | /land_new_info.py | cbee34fc5949c5eb84d70cbef1230fe943ab79ba | [] | no_license | ymJung/study | 2a15f2e036fc9a5c2c78ea7783a73143e443f4b8 | 37e0bb45b7250ed6ee777a6a48e504ad3b12165e | refs/heads/master | 2023-08-31T18:11:01.534192 | 2023-08-20T14:08:10 | 2023-08-20T14:08:10 | 10,260,555 | 0 | 0 | null | 2014-10-20T00:46:50 | 2013-05-24T06:25:20 | null | UTF-8 | Python | false | false | 2,540 | py | import requests
from bs4 import BeautifulSoup
import configparser
import random
import sys
import datetime
import time
cf = configparser.ConfigParser()
cf.read('config.cfg')
landUrls = cf.get('land_url','URLS')
land_price_tuples = landUrls.split(',')
def get_sale_products(findUrl, limit_price) :
soup = BeautifulSoup(requests.get(findUrl).text, "html.parser")
table = soup.find("table", { "class" : "sale_list _tb_site_img NE=a:cpm"})
trs = table.find("tbody").find_all('tr')
name = soup.find(id='complexListLayer').find('a', {'class':'on'}).text.strip()
results = list()
for tr in trs:
try :
price = tr.find('td', {'class':'num align_r'}).find('strong').text
dong = tr.find_all('td', {'class':"num2"})[0].text
floor = tr.find_all('td', {'class':"num2"})[1].text
budongsan = tr.find('td', {'class':'contact'}).find_all('span')[0]['title']
contact = tr.find('td', {'class':'contact'}).find_all('span')[1].text
crol = {'name':name,'price':price,'dong':dong,'floor':floor,'budongsan':budongsan,'contact':contact}
if int(limit_price) > int(price.replace(',','')) :
results.append(crol)
else :
continue
except AttributeError:
continue
return results
def get_line_up(products):
result = ''
for product in products:
result += '[' + product['name'] + '] \t' + str(product) + '\n'
return result
def get_new():
products=[]
for idx in range(int(len(land_price_tuples)/2)):
getProducts = get_sale_products(land_price_tuples[idx*2], land_price_tuples[(idx*2)+1])[0:3]
products.extend(getProducts)
return products
def is_break():
retryCnt = get_date_retry_limit(datetime.date.today())
if retryCnt<0:
return True
return False
def get_date_retry_limit(date):
dateStr = str(date)
if dateStr in RETRY_LIMIT:
print('reduce today limit ', dateStr, RETRY_LIMIT[dateStr])
RETRY_LIMIT[dateStr] -= 1
else:
print('make today limit ', dateStr)
RETRY_LIMIT.update({dateStr: RETRY_LIMIT_CNT})
return RETRY_LIMIT[dateStr]
import telegram
TOKEN = cf.get('telegram', 'TOKEN')
VALID_USER = cf.get('telegram', 'VALID_USER')
tb = telegram.Bot(token=TOKEN)
check_flag = False
seen_set = set()
products = get_new()
result = ''
for product in products:
result += product['name'] + ':::' + product['price']
tb.sendMessage(chat_id=VALID_USER, text=result)
| [
"metalbird0@gmail.com"
] | metalbird0@gmail.com |
c61af8c31f886ab4f9dcc2f1bd40e538bdabebf4 | c93bbf35aa37d8c4c974655ae2202fdc36da4b12 | /String/Q_6.py | 0ffb523c0264eb0e0d06f74758c06aa6cb756fa3 | [] | no_license | Bikashacharaya/Jspider_Python | a42cd6aba83eba013061de9e68d11d173673121c | 1ae8f35b20d2633153fd4e310eba150a9478e09a | refs/heads/main | 2023-06-25T00:14:30.198867 | 2021-07-17T15:59:54 | 2021-07-17T15:59:54 | 386,979,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39 | py | ''''
I/p- "ABCD"
O/p- "abcd"
'''
| [
"noreply@github.com"
] | Bikashacharaya.noreply@github.com |
0f7769bb3595630e797871ee52e7f418a9816002 | 14956dbed8ae4fba1d65b9829d9405fcf43ac698 | /Cyber Security/Capture the Flag Competitions/2020/Cyberthon 2020/Livestream Training/RE/Catch No Ball/solve.py | 3dadfa22b242c6738c1a936cb4091cbcf0dd8694 | [] | no_license | Hackin7/Programming-Crappy-Solutions | ae8bbddad92a48cf70976cec91bf66234c9b4d39 | ffa3b3c26a6a06446cc49c8ac4f35b6d30b1ee0f | refs/heads/master | 2023-03-21T01:21:00.764957 | 2022-12-28T14:22:33 | 2022-12-28T14:22:33 | 201,292,128 | 12 | 7 | null | 2023-03-05T16:05:34 | 2019-08-08T16:00:21 | Roff | UTF-8 | Python | false | false | 3,455 | py | #!/usr/bin/env python
'''
Basically, to solve this problem, you HAVE to try to interpret this code
using your pseudocode knowledge and genius, and to figure out what it does
Through some analysis of the code, only the main function 'decode' is
important, along with its helper functions.
The main code was doable to debug, but I made some mistakes. After quite
a while of looking at my mistakes and stuff, I managed to interpret it
and get it working
-------------------------------------------------------------------------
My interpretation of the code to python
This may not be 100% accurate or correct or complete, but gets the job done
Use a little inference of programming constructs to figure things out
? condition : if not condition
teluwhut: def (Define a function)
okcan : return
issit: Equate 2 items ==
<<: Define/Set a variable =
$$: List []
<<<: Append Back
>>>: Append front
thing ~ $something$: for thing in something: (for loop)
mod: Modulo
then...fin : Basic Pseudocode open and close Constructs
'''
disnotflag = [0x64, 0x6c, 0x30, 0x62, 0x34, 0x5f, 0x5f, 0x33, 0x6c, 0x6d, 0x6e, 0x34, 0x62, 0x31, 0x5f, 0x33, 0x74, 0x64, 0x6e, 0x62, 0x6d, 0x30, 0x7a, 0x33]
###Debugging: Not in original code#############################
def showdisnotflag(f): # Show in ascii form, more redable
for i in f:
print(chr(i),end='')
###############################################################
def decode(something):
result = ''
count = haolung(something)
counter = []
def lll(a,b): a.append(b) #Custom Function
final = []
for thing in range(1,count+1):#counter:
if not thing%2==0:
'''
x.pop(1) means remove item at index 1 of x and return it
example, x <<- $123, 234, 345$
After running y <<- x.pop(1), x is now $123, 345$ and y is now 234
#disnotflag.pop(0)<<<final
'''
# This code was weird, as it does not make sense to append a list to a function return value
# Through trial and error, it has been decoded in the lll function
lll(final,disnotflag.pop(0))
else:
lll(final,disnotflag.pop(haolung(disnotflag)-1))
'''
disnotflag.pop(len(disnotflag)-1) <<< final
'''
###Debugging: Not in original code################################
showdisnotflag(disnotflag)
print(" ",end="")
showdisnotflag(final)
print()
###################################################################
final = sdrawkcab(final) #Reverse the string
for thing in final:
result+=chr(thing)
print(result)#Debugging
return result
def samesame(disone, datone, checker):
return checker(disone) == checker(datone)
# Gives you an array with the items in something but reversed
def sdrawkcab(something):
dis = []
for thing in something:
dis = [thing]+dis
#thing >>> dis #<rant> append thing to front of dis </rant>
return dis
# Gives you the length of an array "something"
def haolung(something):
haomehnee = 0
for thing in something:
haomehnee = haomehnee + 1
return haomehnee
# The function testflag(whatutype) and the rest of the code not important
decode(disnotflag)
# After much looking, I realised the output looked like words and put it in
# Flag: CTFSG{b41n_m3lt3d_n_b4mb00zl3d}
| [
"zunmun@gmail.com"
] | zunmun@gmail.com |
0fe44143c74995e1da342650d28d8052088b9b61 | f87f51ec4d9353bc3836e22ac4a944951f9c45c0 | /.history/HW03_20210706191310.py | 400efe3ada9084523c59768ccc03a50afcf192fe | [] | no_license | sanjayMamidipaka/cs1301 | deaffee3847519eb85030d1bd82ae11e734bc1b7 | 9ddb66596497382d807673eba96853a17884d67b | refs/heads/main | 2023-06-25T04:52:28.153535 | 2021-07-26T16:42:44 | 2021-07-26T16:42:44 | 389,703,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,575 | py | """
Georgia Institute of Technology - CS1301
HW03 - Strings and Lists
Collaboration Statement:
"""
#########################################
"""
Function Name: movieNight()
Parameters: subtitle (str)
Returns: fixed subtitle (str)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def movieNight(subtitle):
newSubtitle = ''
for i in subtitle:
if not i.isdigit():
newSubtitle += i
return newSubtitle
"""
Function Name: longestWord()
Parameters: sentence (str)
Returns: longest word (str)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def longestWord(sentence):
newSentence = ''
for i in sentence:
if not i == ',':
newSentence += i
list1 = newSentence.split(' ')
length = 0
longestWord = ''
for i in list1:
if len(i) >= length:
length = len(i)
longestWord = i
return longestWord
"""
Function Name: tennisMatch()
Parameters: player1 (str), player2 (str), matchRecord (str)
Returns: game statement (str)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def tennisMatch(player1, player2, matchRecord):
player1Points = 0
player2Points = 0
matchesWonPlayer1 = 0
matchesWonPlayer2 = 0
for i in matchRecord:
if i == '1':
player1Points += 1
elif i == '2':
player2Points += 1
elif i == '-':
if player1Points > player2Points:
matchesWonPlayer1 += 1
elif player2Points > player1Points:
matchesWonPlayer2 += 1
player1Points = 0
player2Points = 0
if matchesWonPlayer1 > matchesWonPlayer2:
return player1 + ' won! The score was ' + str(matchesWonPlayer1) + str('-') + str(matchesWonPlayer2)
elif matchesWonPlayer2 > matchesWonPlayer1:
return player2 + ' won! The score was ' + str(matchesWonPlayer2) + str('-') + str(matchesWonPlayer1)
else:
return "It's a tie"
"""
Function Name: freshFruit()
Parameters: barcodes (list), startIndex (int), stopIndex (int)
Returns: freshest barcode (int)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def freshFruit(barcodes, startIndex, stopIndex):
newList = barcodes[startIndex:stopIndex+1]
maxElement = newList[0]
for i in newList:
if i > maxElement:
maxElement = i
return maxElement
"""
Function Name: highestSum()
Parameters: stringList (list)
Returns: highest sum index (int)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def highestSum(stringList):
for string in stringList:
for i in string:
# subtitle = "Mr. and M4rs. Dursley of nu28mber four, Privet Drive, wer903e proud to say th6at they we6re perfectly norm3al, tha894nk you ve89ry much."
# print(movieNight(subtitle))
# sentence = " abc def ghi jkl mno "
# print(longestWord(sentence))
# print(tennisMatch("Emily", "Kathleen", "1122-22211-11122-1212-"))
# print(freshFruit([313414, 2241221, 32432, 49204, 493204, 23212], 2, 4))
| [
"sanjay.mamidipaka@gmail.com"
] | sanjay.mamidipaka@gmail.com |
f503f73e958fb9b1a6c3dfb21c67672816c334f1 | 59872978d56a25e62039a322d5c6e72444a743a5 | /service/account/account_base_service.py | fa18d143f43ae4c0ae4f6e6017afb496a654eabd | [
"MIT"
] | permissive | lwanglinhong/loonflow | 3c0e644f0e24036388b6f48a2894fd66107dc4bb | b505c5d21e03a9a2ca9311a9b8a3d337091ba044 | refs/heads/master | 2022-04-27T18:08:10.539218 | 2020-04-25T09:11:21 | 2020-04-25T09:11:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,088 | py | import json
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.contrib.auth.hashers import make_password
from django.db.models import Q
from apps.account.models import AppToken, LoonUser, LoonUserRole, LoonDept, LoonRole
from service.base_service import BaseService
from service.common.log_service import auto_log
class AccountBaseService(BaseService):
"""
account
"""
@classmethod
@auto_log
def get_token_by_app_name(cls, app_name: str)->tuple:
"""
get app's call token by app_name
:param app_name:
:return:
"""
app_token_obj = AppToken.objects.filter(app_name=app_name, is_deleted=0).first()
return True, app_token_obj
@classmethod
@auto_log
def get_user_by_username(cls, username: str)->tuple:
"""
get user info by username
:return:
"""
result = LoonUser.objects.filter(username=username, is_deleted=0).first()
if result:
return True, result
else:
return False, 'username: {} is not existed or has been deleted'.format(username)
@classmethod
@auto_log
def get_user_by_user_id(cls, user_id: int)->tuple:
"""
get user by user id
:param user_id:
:return:
"""
result = LoonUser.objects.filter(id=user_id, is_deleted=0).first()
if result:
return True, result
else:
return False, 'user_id: {} is not existed or has been deleted'.format(user_id)
@classmethod
@auto_log
def get_user_name_list_by_id_list(cls, user_id_list: list)->tuple:
"""
get username list by user id list
根据用户id的数组获取用户名的list
:param user_id_list:
:return:
"""
user_queryset = LoonUser.objects.filter(id__in=user_id_list, is_deleted=0).all()
if not user_queryset:
return False, 'user id is not existed or has been deleted'
username_list = [user_query.username for user_query in user_queryset]
return True, dict(username_list=username_list)
@classmethod
@auto_log
def get_user_role_id_list(cls, username: str)->tuple:
"""
get user's role id list by username
:param username:
:return:
"""
user_obj = LoonUser.objects.filter(username=username, is_deleted=0).first()
if not user_obj:
return False, 'user is not existed or has been deleted'
user_role_queryset = LoonUserRole.objects.filter(user_id=user_obj.id, is_deleted=0).all()
user_role_id_list = [user_role.role_id for user_role in user_role_queryset]
return True, user_role_id_list
@classmethod
@auto_log
def get_user_role_info_by_user_id(cls, user_id: int, search_value: str=0, page: int =1, per_page: int=10)->tuple:
"""
get user's role info list by user's id and query params: role name、page、per_page
:param user_id:
:param search_value:
:param page:
:param per_page:
:return:
"""
user_role_queryset = LoonUserRole.objects.filter(user_id=user_id, is_deleted=0).all()
user_role_id_list = [user_role.role_id for user_role in user_role_queryset]
query_params = Q(is_deleted=False, id__in=user_role_id_list)
if search_value:
query_params &= Q(name__contains=search_value)
role_info_queryset = LoonRole.objects.filter(query_params).all()
paginator = Paginator(role_info_queryset, per_page)
try:
role_info_result_paginator = paginator.page(page)
except PageNotAnInteger:
role_info_result_paginator = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results
role_info_result_paginator = paginator.page(paginator.num_pages)
role_result_list = role_info_result_paginator.object_list
role_result_format_list = []
for role_info in role_result_list:
role_result_format_list.append(dict(id=role_info.id, name=role_info.name, description=role_info.description,
label=json.dumps(role_info.label) if role_info.label else {},
creator=role_info.creator, gmt_created=str(role_info.gmt_created)[:19]))
return True, dict(role_result_format_list=role_result_format_list,
paginator_info=dict(per_page=per_page, page=page, total=paginator.count))
@classmethod
@auto_log
def get_role_user_info_by_role_id(cls, role_id: int, search_value: str='', page: int=1, per_page: int =10)->tuple:
"""
get role's user info list by role_id
:param role_id:
:param search_value:
:param page:
:param per_page:
:return:
"""
user_role_queryset = LoonUserRole.objects.filter(role_id=role_id, is_deleted=0).all()
role_user_id_list = [user_role.user_id for user_role in user_role_queryset]
query_params = Q(is_deleted=False, id__in=role_user_id_list)
if search_value:
query_params &= Q(username__contains=search_value) | Q(alias__contains=search_value)
user_info_queryset = LoonUser.objects.filter(query_params).all()
paginator = Paginator(user_info_queryset, per_page)
try:
user_info_result_paginator = paginator.page(page)
except PageNotAnInteger:
user_info_result_paginator = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results
user_info_result_paginator = paginator.page(paginator.num_pages)
user_result_list = user_info_result_paginator.object_list
user_result_format_list = []
for user_info in user_result_list:
user_result_format_list.append(user_info.get_dict())
return True, dict(user_result_format_list=user_result_format_list,
paginator_info=dict(per_page=per_page, page=page, total=paginator.count))
@classmethod
@auto_log
def get_user_up_dept_id_list(cls, username: str)->tuple:
"""
get user's department id list by username, include parent department
:param username:
:return:
"""
dept_id_list = []
user_obj = LoonUser.objects.filter(username=username, is_deleted=0).first()
if not user_obj:
return False, 'user is not existed or has been deleted'
def iter_dept(dept_id):
dept_obj = LoonDept.objects.filter(id=dept_id, is_deleted=0).first()
if dept_obj:
dept_id_list.append(dept_obj.id)
if dept_obj.parent_dept_id:
iter_dept(dept_obj.parent_dept_id)
iter_dept(user_obj.dept_id)
return True, dept_id_list
@classmethod
@auto_log
def get_user_dept_approver(cls, username: str)->tuple:
"""
get user's department approver, Preferential access to the approver, without taking tl(team leader)
:param username:
:return:
"""
user_obj = LoonUser.objects.filter(username=username, is_deleted=0).first()
loon_dept_obj = LoonDept.objects.filter(id=user_obj.dept_id).first()
if loon_dept_obj.approver:
return True, loon_dept_obj.approver
else:
return True, loon_dept_obj.leader
@classmethod
@auto_log
def get_dept_sub_dept_id_list(cls, dept_id: int)->tuple:
"""
get department's all subordinate department
:param dept_id:
:return:
"""
dept_id_list = []
dept_obj = LoonDept.objects.filter(id=dept_id, is_deleted=0).first()
if dept_obj:
dept_id_list.append(dept_obj.id)
else:
return True, []
def iter_dept_id_list(new_dept_id):
new_dept_obj = LoonDept.objects.filter(id=new_dept_id, is_deleted=0).first()
if new_dept_obj:
sub_dept_queryset = LoonDept.objects.filter(parent_dept_id=new_dept_obj.id, is_deleted=0).all()
for sub_dept in sub_dept_queryset:
if sub_dept:
dept_id_list.append(sub_dept.id)
iter_dept_id_list(sub_dept.id)
iter_dept_id_list(dept_id)
return True, dept_id_list
@classmethod
@auto_log
def get_dept_username_list(cls, dept_id: int)->tuple:
"""
get department's all username list
:param dept_id:
:return:
"""
flag, sub_dept_id_list = cls.get_dept_sub_dept_id_list(dept_id)
if flag is False:
return False, sub_dept_id_list
user_name_list = []
if sub_dept_id_list:
user_queryset = LoonUser.objects.filter(dept_id__in=sub_dept_id_list).all()
for user in user_queryset:
user_name_list.append(user.username)
return True, user_name_list
@classmethod
@auto_log
def get_role_username_list(cls, role_id: int)->tuple:
"""
get role's username list by role_id
:param role_id:
:return:
"""
user_role_queryset = LoonUserRole.objects.filter(role_id=role_id).all()
user_id_list = []
for user_role in user_role_queryset:
user_id_list.append(user_role.user_id)
if not user_id_list:
return True, []
username_queryset = LoonUser.objects.filter(id__in=user_id_list).all()
username_list = []
for username_obj in username_queryset:
username_list.append(username_obj.username)
return True, username_list
@classmethod
@auto_log
def get_dept_by_id(cls, dept_id: int)->tuple:
"""
get department's info by dept_id
:param dept_id:
:return:
"""
return True, LoonDept.objects.filter(id=dept_id, is_deleted=False).first()
@classmethod
@auto_log
def get_role_by_id(cls, role_id: int)->tuple:
"""
get role's info by role_id
:param role_id:
:return:
"""
return True, LoonRole.objects.filter(id=role_id, is_deleted=False).first()
@classmethod
@auto_log
def app_workflow_permission_list(cls, app_name: str)->tuple:
"""
get app's authorised workflow_id list by app_name
:param app_name:
:return:
"""
if not app_name:
return False, 'app_name is not provided'
if app_name == 'loonflow':
# loonflow有权限访问所有workflow
from apps.workflow.models import Workflow
workflow_query_set = Workflow.objects.filter(is_deleted=0).all()
workflow_id_list = []
for workflow_obj in workflow_query_set:
workflow_id_list.append(workflow_obj.id)
return True, dict(workflow_id_list=workflow_id_list)
app_token_obj = AppToken.objects.filter(app_name=app_name, is_deleted=0).first()
if not app_token_obj:
return False, 'appname is unauthorized'
workflow_ids = app_token_obj.workflow_ids
if workflow_ids:
workflow_id_list = workflow_ids.split(',')
workflow_id_list = [int(workflow_id) for workflow_id in workflow_id_list]
return True, dict(workflow_id_list=workflow_id_list)
else:
return True, dict(workflow_id_list=[])
@classmethod
@auto_log
def app_workflow_permission_check(cls, app_name: str, workflow_id: int)->tuple:
"""
appname has permission for workflow check by app_name and workflow_id
:param app_name:
:param workflow_id:
:return:
"""
if app_name == 'loonflow':
return True, ''
flag, result = cls.app_workflow_permission_list(app_name)
if flag and result.get('workflow_id_list') and workflow_id in result.get('workflow_id_list'):
return True, ''
else:
return False, 'the app has no permission to the workflow_id'
@classmethod
@auto_log
def app_ticket_permission_check(cls, app_name: str, ticket_id: int)-> tuple:
"""
appname has permission to ticket check by app_name and ticket_id
:param app_name:
:param ticket_id:
:return:
"""
from service.ticket.ticket_base_service import ticket_base_service_ins
flag, ticket_obj = ticket_base_service_ins.get_ticket_by_id(ticket_id)
if not flag:
return False, ticket_obj
workflow_id = ticket_obj.workflow_id
permission_check, msg = cls.app_workflow_permission_check(app_name, workflow_id)
if not permission_check:
return False, msg
return True, ''
@classmethod
@auto_log
def get_user_list(cls, search_value: str, page: int=1, per_page: int=10)->tuple:
"""
get user restful info list by query params: search_value, page, per_page
:param search_value: support user's username, and user's alias. fuzzy query
:param page:
:param per_page:
:return:
"""
query_params = Q(is_deleted=False)
if search_value:
query_params &= Q(username__contains=search_value) | Q(alias__contains=search_value)
user_objects = LoonUser.objects.filter(query_params)
paginator = Paginator(user_objects, per_page)
try:
user_result_paginator = paginator.page(page)
except PageNotAnInteger:
user_result_paginator = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results
user_result_paginator = paginator.page(paginator.num_pages)
user_result_object_list = user_result_paginator.object_list
user_result_object_format_list = []
for user_result_object in user_result_object_list:
user_result_object_format_list.append(user_result_object.get_dict())
return True, dict(user_result_object_format_list=user_result_object_format_list,
paginator_info=dict(per_page=per_page, page=page, total=paginator.count))
@classmethod
@auto_log
def add_user(cls, username: str, alias: str, email: str, phone: str, dept_id: int, is_active: int, is_admin: int,
is_workflow_admin: int, creator: str, password: str='')->tuple:
"""
新增用户, 因为非管理员或者工作流管理员无需登录管理后台,密码字段留空
add user, not support set password, you need reset password
:param username:
:param alias:
:param email:
:param phone:
:param dept_id:
:param is_active:
:param is_admin:
:param is_workflow_admin:
:param creator:
:param password:
:return:
"""
password_str = make_password(password, None, 'pbkdf2_sha256')
user_obj = LoonUser(username=username, alias=alias, email=email, phone=phone, dept_id=dept_id,
is_active=is_active, is_admin=is_admin, is_workflow_admin=is_workflow_admin,
creator=creator, password=password_str)
user_obj.save()
return True, dict(user_id=user_obj.id)
@classmethod
@auto_log
def edit_user(cls, user_id: int, username: str, alias: str, email: str, phone: str, dept_id: int, is_active: int,
is_admin: int, is_workflow_admin: int)-> tuple:
"""
edit user
:param user_id:
:param username:
:param alias:
:param email:
:param phone:
:param dept_id:
:param is_active:
:param is_admin:
:param is_workflow_admin:
:return:
"""
user_obj = LoonUser.objects.filter(id=user_id, is_deleted=0)
user_obj.update(username=username, alias=alias, email=email, phone=phone, dept_id=dept_id, is_active=is_active,
is_admin=is_admin, is_workflow_admin=is_workflow_admin)
return True, {}
@classmethod
@auto_log
def delete_user(cls, user_id: int)->tuple:
"""
delete user
:param user_id:
:return:
"""
user_obj = LoonUser.objects.filter(id=user_id, is_deleted=0)
user_obj.update(is_deleted=1)
return True, {}
@classmethod
@auto_log
def get_role_list(cls, search_value: str, page: int=1, per_page: int=10)->tuple:
"""
获取角色列表
get role restful list by search params
:param search_value: role name or role description Support fuzzy queries
:param page:
:param per_page:
:return:
"""
query_params = Q(is_deleted=False)
if search_value:
query_params &= Q(name__contains=search_value) | Q(description__contains=search_value)
user_objects = LoonRole.objects.filter(query_params)
paginator = Paginator(user_objects, per_page)
try:
role_result_paginator = paginator.page(page)
except PageNotAnInteger:
role_result_paginator = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results
role_result_paginator = paginator.page(paginator.num_pages)
role_result_object_list = role_result_paginator.object_list
role_result_object_format_list = []
for role_result_object in role_result_object_list:
role_result_object_format_list.append(role_result_object.get_dict())
return True, dict(role_result_object_format_list=role_result_object_format_list,
paginator_info=dict(per_page=per_page, page=page, total=paginator.count))
@classmethod
@auto_log
def add_role(cls, name: str, description: str, label: str, creator: str)->tuple:
"""
add role
新增角色
:param name:
:param description:
:param label:
:param creator:
:return:
"""
role_obj = LoonRole(name=name, description=description, label=label, creator=creator)
role_obj.save()
return True, dict(role_id=role_obj.id)
@classmethod
@auto_log
def add_role_user(cls, role_id: int, user_id: int, creator: str)->tuple:
"""
add role's user
新增角色用户
:param role_id:
:param user_id:
:param creator:
:return:
"""
# 去重下
role_user_queryset = LoonUserRole.objects.filter(user_id=user_id, role_id=role_id, is_deleted=0)
if role_user_queryset:
return False, 'user has been existed in this role'
role_user_obj = LoonUserRole(user_id=user_id, role_id=role_id, creator=creator)
role_user_obj.save()
return True, dict(role_user_id=role_user_obj.id)
@classmethod
@auto_log
def delete_role_user(cls, role_user_id: int)->tuple:
"""
删除角色用户
:param role_user_id:
:return:
"""
role_user_obj = LoonUserRole.objects.filter(id=role_user_id, is_deleted=0)
if not role_user_obj:
return False, 'record is not existed or has been deleted'
role_user_obj.update(is_deleted=1)
return True, ''
@classmethod
@auto_log
def update_role(cls, role_id: int, name: str, description: str, label: str)-> tuple:
"""
update role
更新角色
:param role_id:
:param name:
:param description:
:param label:
:return:
"""
role_queryset = LoonRole.objects.filter(id=role_id, is_deleted=0)
if not role_queryset:
return False, 'role record is not existed'
role_queryset.update(name=name, description=description, label=label)
return True, {}
@classmethod
@auto_log
def delete_role(cls, role_id: int)->tuple:
"""
delete role record
删除角色
:param role_id:
:return:
"""
role_queryset = LoonRole.objects.filter(id=role_id, is_deleted=0)
if not role_queryset:
return False, 'role record is not existed'
role_queryset.update(is_deleted=1)
return True, {}
@classmethod
@auto_log
def get_dept_list(cls, search_value: str, page: int=1, per_page: int=10)->tuple:
"""
get dept restful list by search params
:param search_value: department name or department description Support fuzzy queries
:param page:
:param per_page:
:return:
"""
query_params = Q(is_deleted=False)
if search_value:
query_params &= Q(name__contains=search_value) | Q(label__contains=search_value)
dept_objects = LoonDept.objects.filter(query_params)
paginator = Paginator(dept_objects, per_page)
try:
dept_result_paginator = paginator.page(page)
except PageNotAnInteger:
dept_result_paginator = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results
dept_result_paginator = paginator.page(paginator.num_pages)
dept_result_object_list = dept_result_paginator.object_list
dept_result_object_format_list = []
for dept_result_object in dept_result_object_list:
dept_result_object_format_list.append(dept_result_object.get_dict())
return True, dict(dept_result_object_format_list=dept_result_object_format_list,
paginator_info=dict(per_page=per_page, page=page, total=paginator.count))
@classmethod
@auto_log
def add_dept(cls, name: str, parent_dept_id: int, leader: str, approver: str, label: str, creator: str)->tuple:
"""
add department
新增部门
:param name:
:param parent_dept_id:
:param leader:
:param approver:
:param label:
:param creator:
:return:
"""
dept_obj = LoonDept(name=name, parent_dept_id=parent_dept_id, leader=leader, approver=approver, label=label,
creator=creator)
dept_obj.save()
return True, dict(dept_id=dept_obj.id)
@classmethod
@auto_log
def update_dept(cls, dept_id: int, name: str, parent_dept_id: int, leader: str, approver: str, label: str)->tuple:
"""
update department record
更新部门
:param dept_id:
:param name:
:param parent_dept_id:
:param leader:
:param approver:
:param label:
:return:
"""
dept_queryset = LoonDept.objects.filter(id=dept_id, is_deleted=0)
if not dept_queryset:
return False, 'dept is not existed or has been deleted'
dept_queryset.update(name=name, parent_dept_id=parent_dept_id, leader=leader, approver=approver, label=label)
return True, ''
@classmethod
@auto_log
def delete_dept(cls, dept_id: int)-> tuple:
"""
delete department record
:param dept_id:
:return:
"""
dept_queryset = LoonDept.objects.filter(id=dept_id, is_deleted=0)
if not dept_queryset:
return False, 'dept is not existed or has been deleted'
dept_queryset.update(is_deleted=1)
return True, ''
@classmethod
@auto_log
def get_token_list(cls, search_value: str, page: int=1, per_page: int=10)->tuple:
"""
get app permission token list
:param search_value: support app name fuzzy queries
:param page:
:param per_page:
:return:
"""
query_params = Q(is_deleted=False)
if search_value:
query_params &= Q(app_name__contains=search_value)
token_objects = AppToken.objects.filter(query_params)
paginator = Paginator(token_objects, per_page)
try:
token_result_paginator = paginator.page(page)
except PageNotAnInteger:
token_result_paginator = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results
token_result_paginator = paginator.page(paginator.num_pages)
token_result_object_list = token_result_paginator.object_list
token_result_object_format_list = []
for token_result_object in token_result_object_list:
token_result_object_format_list.append(token_result_object.get_dict())
return True, dict(token_result_object_format_list=token_result_object_format_list,
paginator_info=dict(per_page=per_page, page=page, total=paginator.count))
@classmethod
@auto_log
def add_token_record(cls, app_name: str, ticket_sn_prefix: str, workflow_ids: str, username: str)-> tuple:
"""
add app token record
:param app_name:
:param ticket_sn_prefix:
:param workflow_ids:
:param username:
:return:
"""
import uuid
token = uuid.uuid1()
app_token_obj = AppToken(app_name=app_name, ticket_sn_prefix=ticket_sn_prefix, workflow_ids=workflow_ids,
token=token, creator=username)
app_token_obj.save()
return True, dict(app_token_id=app_token_obj.id)
@classmethod
@auto_log
def update_token_record(cls, app_token_id: int, app_name: str, ticket_sn_prefix: str, workflow_ids: str)->tuple:
"""
update token record
:param app_token_id:
:param app_name:
:param ticket_sn_prefix:
:param workflow_ids:
:return:
"""
app_token_obj = AppToken.objects.filter(id=app_token_id, is_deleted=0).first()
if not app_token_obj:
return False, 'record is not exist or has been deleted'
app_token_obj.app_name = app_name
app_token_obj.ticket_sn_prefix = ticket_sn_prefix
app_token_obj.workflow_ids = workflow_ids
app_token_obj.save()
return True, ''
@classmethod
@auto_log
def del_token_record(cls, app_token_id: int)->tuple:
"""
del app token record
:param app_token_id:
:return:
"""
app_token_obj = AppToken.objects.filter(id=app_token_id, is_deleted=0).first()
if not app_token_obj:
return False, 'record is not exist or has been deleted'
app_token_obj.is_deleted = True
app_token_obj.save()
return True, ''
@classmethod
@auto_log
def admin_permission_check(cls, username: str='', user_id: int=0)->tuple:
"""
admin permission check
:param username:
:param user_id:
:return:
"""
if username:
flag, result = cls.get_user_by_username(username)
elif user_id:
flag, result = cls.get_user_by_user_id(user_id)
else:
return False, 'username or user_id is needed'
if flag is False:
return False, result
if result.is_admin:
return True, 'user is admin'
else:
return False, 'user is not admin'
@classmethod
@auto_log
def workflow_admin_permission_check(cls, username: str='', user_id: int=0)->tuple:
"""
workflow admin permission check
:param username:
:param user_id:
:return:
"""
if username:
flag, result = cls.get_user_by_username(username)
elif user_id:
flag, result = cls.get_user_by_username(username)
else:
return False, 'username or user_id is needed'
if flag is False:
return False, result
if result.is_workflow_admin:
return True, 'user is workflow admin'
if result.is_admin:
return True, 'user is admin'
else:
return False, 'user is not admin or workflow admin'
@classmethod
@auto_log
def admin_or_workflow_admin_check(cls, username: str='', user_id: int=0)-> tuple:
"""
admin or workflow admin check
:param username:
:param user_id:
:return:
"""
if username:
flag, result = cls.get_user_by_username(username)
elif user_id:
flag, result = cls.get_user_by_username(username)
else:
return False, 'username or user_id is needed'
if flag is False:
return False, result
if result.is_workflow_admin or result.is_admin:
return True, 'user is admin or workflow admin'
else:
return False, 'user is not admin or workflow admin'
@classmethod
@auto_log
def reset_password(cls, username: str='', user_id: int=0)-> tuple:
"""
reset user's password
just admin or workflow admin need login loonflow's admin,so just admin and workflow admin can rest password
:param username:
:param user_id:
:return:
"""
flag, result = False, ''
if username:
flag, result = cls.get_user_by_username(username)
if user_id:
flag, result = cls.get_user_by_user_id(user_id)
if flag:
user_obj = result
if user_obj.is_admin or user_obj.is_workflow_admin:
password_str = make_password('123456', None, 'pbkdf2_sha256')
user_obj.password = password_str
user_obj.save()
return True, 'password has been reset to 123456'
else:
return False, 'just admin or workflow admin can be reset password'
else:
return False, result
account_base_service_ins = AccountBaseService()
| [
"blackholll@163.com"
] | blackholll@163.com |
3d9a5108a21a212d80c96f68fffd33e4f65e2b1f | 00af09f4ac6f98203910d86c3791c152184ace9a | /build/lib/pyactivity/Activity.py | 7a2d73f32650d53f0986d2263de050d51f369064 | [] | no_license | orf53975/CarnosOS | 621d641df02d742a2452fde2f28a28c74b32695a | d06849064e4e9f30ef901ad8cf90960e1bec0805 | refs/heads/master | 2023-03-24T08:06:48.274566 | 2017-01-05T16:41:01 | 2017-01-05T16:41:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | import webbrowser
class ArcherSysActivity:
""" The Console Version of the JS ArcherSysActivity for the browser """ | [
"Weldon@athletech.org"
] | Weldon@athletech.org |
026d3ba9df7425cf395334e5f518b3070e753ea6 | e9c0bb90f07144e26e54b78abc9d102b7affc9f8 | /billreminder/model/bills.py | 8698c5edac475547f1409ccd362ff28afff32b0c | [] | no_license | linxaddict/billreminder | fe8b3aee275172518f1e4757e4a89350f2bd2517 | 7c8f52b8d3bdc55199b4f6417d960facf5c6857e | refs/heads/master | 2021-01-11T17:19:07.965038 | 2017-03-29T19:57:29 | 2017-03-29T19:57:29 | 79,741,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,034 | py | import datetime as dt
__author__ = 'Marcin Przepiórkowski'
__email__ = 'mprzepiorkowski@gmail.com'
class Payment:
def __init__(self, user, bill, created_at=None):
self._user = user
self._bill = bill
self._created_at = created_at or dt.datetime.utcnow()
@property
def user(self):
return self._user
@user.setter
def user(self, value):
self._user = value
@property
def bill(self):
return self._bill
@bill.setter
def bill(self, value):
self._bill = value
@property
def created_at(self):
return self._created_at
@created_at.setter
def created_at(self, value):
self._created_at = value
class Bill:
def __init__(self, id, name, description=None, amount=None, last_payment=None, due_date=None,
repeat_mode=None, repeat_value=None, owner=None, payments=None,
participants=None):
self._id = id
self._name = name
self._description = description
self._amount = amount
self._last_payment = last_payment
self._due_date = due_date
self._repeat_mode = repeat_mode
self._repeat_value = repeat_value
self._owner = owner
self._payments = payments or []
self._participants = participants or []
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def description(self):
return self._description
@description.setter
def description(self, value):
self._description = value
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def last_payment(self):
return self._last_payment
@last_payment.setter
def last_payment(self, value):
self._last_payment = value
@property
def due_date(self):
return self._due_date
@due_date.setter
def due_date(self, value):
self._due_date = value
@property
def repeat_mode(self):
return self._repeat_mode
@repeat_mode.setter
def repeat_mode(self, value):
self._repeat_mode = value
@property
def repeat_value(self):
return self._repeat_value
@repeat_value.setter
def repeat_value(self, value):
self._repeat_value = value
@property
def owner(self):
return self._owner
@owner.setter
def owner(self, value):
self._owner = value
@property
def payments(self):
return self._payments
@payments.setter
def payments(self, value):
self._payments = value
@property
def participants(self):
return self._participants
@participants.setter
def participants(self, value):
self._participants = value
| [
"mprzepiorkowski@gmail.com"
] | mprzepiorkowski@gmail.com |
31cabd5e8920b175cf6324dd2fffcddbd08484af | 1757262f5010c5a726cbb11513d5ad88f632c5a2 | /tributary/streaming/calculations/__init__.py | 4ef83213c1f3ac2a5403467227e677722c7cc520 | [
"Apache-2.0"
] | permissive | thetradingflow/tributary | 9287e26dc63fe1320ef1950048e497ac86519ddb | 6f2c3ce0ac86ee7c3343fd970f3c3e7161c5951e | refs/heads/master | 2022-08-01T02:05:18.385140 | 2020-05-18T20:32:08 | 2020-05-18T20:32:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | from .ops import * # noqa: F401, F403
from .rolling import Count as RollingCount, Sum as RollingSum, Min as RollingMin, Max as RollingMax, Average as RollingAverage # noqa: F401
| [
"t.paine154@gmail.com"
] | t.paine154@gmail.com |
b28e45c09fa3d58b6a1cd221d055dab1a81ca169 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/44/usersdata/130/14900/submittedfiles/desvpad.py | 386a55164703e424ad038db44693b637a1d910a9 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
n=input('Digite o valor de n:')
x=[]
for i in range(0,n,1):
x.append(input('Digite um elemento:'))
print(x[0])
print(x[len(x)-1])
s=0
for i in range(0,n,1):
s=s+x[i]
s=s/(len(x))
print(s)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
907bfe48899ad95c6caa0f58360b2de875021754 | 57cb9fef5efac78758f5d151b959ca2216c94083 | /edx/app/edx_ansible/venvs/edx_ansible/bin/rst2xml.py | 1739aa96c81e70299840fba73d614169ffaa6526 | [] | no_license | JosiahKennedy/openedx-branded | 9751d5362088276a87b2e0edca0913568eeb1ac4 | d16a25b035b2e810b8ab2b0a2ac032b216562e26 | refs/heads/master | 2022-12-21T02:39:17.133147 | 2020-03-25T06:03:23 | 2020-03-25T06:03:23 | 249,895,218 | 0 | 1 | null | 2022-12-08T01:23:48 | 2020-03-25T05:33:05 | null | UTF-8 | Python | false | false | 635 | py | #!/edx/app/edx_ansible/venvs/edx_ansible/bin/python
# $Id: rst2xml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Docutils XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates Docutils-native XML from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='xml', description=description)
| [
"josiahk@phyziklabs.com"
] | josiahk@phyziklabs.com |
e765daddc29cb2263170f7f84d83bebe73166e5a | 564d6a4d305a8ac6a7e01c761831fb2081c02d0f | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_10_01/aio/operations/_express_route_circuit_authorizations_operations.py | d5716397bd93627ef18063b270cbbb960f01a71e | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | paultaiton/azure-sdk-for-python | 69af4d889bac8012b38f5b7e8108707be679b472 | d435a1a25fd6097454b7fdfbbdefd53e05029160 | refs/heads/master | 2023-01-30T16:15:10.647335 | 2020-11-14T01:09:50 | 2020-11-14T01:09:50 | 283,343,691 | 0 | 0 | MIT | 2020-07-28T22:43:43 | 2020-07-28T22:43:43 | null | UTF-8 | Python | false | false | 20,876 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitAuthorizationsOperations:
"""ExpressRouteCircuitAuthorizationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
circuit_name: str,
authorization_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
circuit_name: str,
authorization_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified authorization from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
authorization_name=authorization_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
async def get(
self,
resource_group_name: str,
circuit_name: str,
authorization_name: str,
**kwargs
) -> "models.ExpressRouteCircuitAuthorization":
"""Gets the specified authorization from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitAuthorization, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_10_01.models.ExpressRouteCircuitAuthorization
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCircuitAuthorization"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
circuit_name: str,
authorization_name: str,
authorization_parameters: "models.ExpressRouteCircuitAuthorization",
**kwargs
) -> "models.ExpressRouteCircuitAuthorization":
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCircuitAuthorization"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(authorization_parameters, 'ExpressRouteCircuitAuthorization')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
circuit_name: str,
authorization_name: str,
authorization_parameters: "models.ExpressRouteCircuitAuthorization",
**kwargs
) -> AsyncLROPoller["models.ExpressRouteCircuitAuthorization"]:
"""Creates or updates an authorization in the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:param authorization_parameters: Parameters supplied to the create or update express route
circuit authorization operation.
:type authorization_parameters: ~azure.mgmt.network.v2018_10_01.models.ExpressRouteCircuitAuthorization
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitAuthorization or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_10_01.models.ExpressRouteCircuitAuthorization]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCircuitAuthorization"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
authorization_name=authorization_name,
authorization_parameters=authorization_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
def list(
self,
resource_group_name: str,
circuit_name: str,
**kwargs
) -> AsyncIterable["models.AuthorizationListResult"]:
"""Gets all authorizations in an express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AuthorizationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_10_01.models.AuthorizationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AuthorizationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AuthorizationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations'} # type: ignore
| [
"noreply@github.com"
] | paultaiton.noreply@github.com |
eae42f62f6186b0dc18bf54d546868beb12061dd | 0778d368a4d26382d3956b710ac928f7944ba54f | /mammoth/docx/numbering_xml.py | f7f98e0c702db3580e71be2e696ca6c7d904a52c | [
"BSD-2-Clause"
] | permissive | tsaltena/python-mammoth | 68ed9af8e0c2b4012e92a08315dc8db5ac34769d | 6746d5f17377327d9947a10a1e8101f8810122e2 | refs/heads/master | 2021-05-15T03:53:17.462401 | 2020-11-09T11:00:10 | 2020-11-09T11:00:10 | 119,989,824 | 0 | 0 | BSD-2-Clause | 2021-01-20T08:01:23 | 2018-02-02T14:11:50 | Python | UTF-8 | Python | false | false | 1,700 | py | from ..documents import numbering_level
def read_numbering_xml_element(element):
abstract_nums = _read_abstract_nums(element)
nums = _read_nums(element, abstract_nums)
return Numbering(nums)
def _read_abstract_nums(element):
abstract_num_elements = element.find_children("w:abstractNum")
return dict(map(_read_abstract_num, abstract_num_elements))
def _read_abstract_num(element):
abstract_num_id = element.attributes.get("w:abstractNumId")
levels = _read_abstract_num_levels(element)
return abstract_num_id, levels
def _read_abstract_num_levels(element):
levels = map(_read_abstract_num_level, element.find_children("w:lvl"))
return dict(
(level.level_index, level)
for level in levels
)
def _read_abstract_num_level(element):
level_index = element.attributes["w:ilvl"]
num_fmt = element.find_child_or_null("w:numFmt").attributes.get("w:val")
is_ordered = num_fmt != "bullet"
return numbering_level(level_index, is_ordered)
def _read_nums(element, abstract_nums):
num_elements = element.find_children("w:num")
return dict(
_read_num(num_element, abstract_nums)
for num_element in num_elements
)
def _read_num(element, abstract_nums):
num_id = element.attributes.get("w:numId")
abstract_num_id = element.find_child_or_null("w:abstractNumId").attributes["w:val"]
return num_id, abstract_nums[abstract_num_id]
class Numbering(object):
def __init__(self, nums):
self._nums = nums
def find_level(self, num_id, level):
num = self._nums.get(num_id)
if num is None:
return None
else:
return num.get(level)
| [
"mike@zwobble.org"
] | mike@zwobble.org |
3ec948782164962982fa48bf0a3afa512f6033a7 | 9003a00f9d529c50f7b169dce45f1380f1d466b6 | /atmel/feather/circuitpyton/build_adafruit_circuitpython_bundle_py_20181218/lib/adafruit_onewire/device.py | ce2c49b444f6b2ca7feec8513db5075863e4ffd7 | [] | no_license | 0xFF1E071F/hw | d249b8607ba40d6ce1ed9a4a267639c30019d978 | 2441df0ab45a8e2f3bed4ec7f4eff42ac0a32a7f | refs/heads/master | 2022-04-22T03:59:58.835300 | 2020-04-28T06:52:29 | 2020-04-28T06:52:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,191 | py | # The MIT License (MIT)
#
# Copyright (c) 2017 Carter Nelson for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_onewire.device`
====================================================
Provides access to a single device on the 1-Wire bus.
* Author(s): Carter Nelson
"""
__version__ = "1.1.1"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_OneWire.git"
_MATCH_ROM = b'\x55'
class OneWireDevice(object):
"""A class to represent a single device on the 1-Wire bus."""
def __init__(self, bus, address):
self._bus = bus
self._address = address
def __enter__(self):
self._select_rom()
return self
def __exit__(self, *exc):
return False
def readinto(self, buf, *, start=0, end=None):
"""
Read into ``buf`` from the device. The number of bytes read will be the
length of ``buf``.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buf[start:end]``. This will not cause an allocation like
``buf[start:end]`` will so it saves memory.
:param bytearray buf: buffer to write into
:param int start: Index to start writing at
:param int end: Index to write up to but not include
"""
self._bus.readinto(buf, start=start, end=end)
if start == 0 and end is None and len(buf) >= 8:
if self._bus.crc8(buf):
raise RuntimeError('CRC error.')
def write(self, buf, *, start=0, end=None):
"""
Write the bytes from ``buf`` to the device.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buffer[start:end]``. This will not cause an allocation like
``buffer[start:end]`` will so it saves memory.
:param bytearray buf: buffer containing the bytes to write
:param int start: Index to start writing from
:param int end: Index to read up to but not include
"""
return self._bus.write(buf, start=start, end=end)
def _select_rom(self):
self._bus.reset()
self.write(_MATCH_ROM)
self.write(self._address.rom)
| [
"eiselekd@gmail.com"
] | eiselekd@gmail.com |
65ea3a973c4a55941bc41b7fc132a9cba4286163 | b45b3e5e7389d071161fa52340cb119a29c76907 | /DoubleBufferDemo.py | 2a5be2c67b9df315bc62b6a954771d8f827818b4 | [] | no_license | Metallicow/wxPythonDemos | 2fc6882a11a0aa6bb35c42f163cfcd6b3456f4fd | 396d1ade5930528ec7518b9c22dc93a274cb418f | refs/heads/master | 2020-12-25T11:52:18.577898 | 2013-05-19T18:58:11 | 2013-05-19T18:58:11 | 11,283,970 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,421 | py | #!/usr/bin/env python
import wx
import random
# This has been set up to optionally use the wx.BufferedDC if
# USE_BUFFERED_DC is True, it will be used. Otherwise, it uses the raw
# wx.Memory DC , etc.
USE_BUFFERED_DC = False
#USE_BUFFERED_DC = True
class BufferedWindow(wx.Window):
"""
A Buffered window class.
To use it, subclass it and define a Draw(DC) method that takes a DC
to draw to. In that method, put the code needed to draw the picture
you want. The window will automatically be double buffered, and the
screen will be automatically updated when a Paint event is received.
When the drawing needs to change, you app needs to call the
UpdateDrawing() method. Since the drawing is stored in a bitmap, you
can also save the drawing to file by calling the
SaveToFile(self, file_name, file_type) method.
"""
def __init__(self, *args, **kwargs):
# make sure the NO_FULL_REPAINT_ON_RESIZE style flag is set.
kwargs['style'] = kwargs.setdefault('style', wx.NO_FULL_REPAINT_ON_RESIZE) | wx.NO_FULL_REPAINT_ON_RESIZE
wx.Window.__init__(self, *args, **kwargs)
wx.EVT_PAINT(self, self.OnPaint)
wx.EVT_SIZE(self, self.OnSize)
# OnSize called to make sure the buffer is initialized.
# This might result in OnSize getting called twice on some
# platforms at initialization, but little harm done.
self.OnSize(None)
def Draw(self, dc):
## just here as a place holder.
## This method should be over-ridden when subclassed
pass
def OnPaint(self, event):
# All that is needed here is to draw the buffer to screen
if USE_BUFFERED_DC:
dc = wx.BufferedPaintDC(self, self._Buffer)
else:
dc = wx.PaintDC(self)
dc.DrawBitmap(self._Buffer, 0, 0)
def OnSize(self,event):
# The Buffer init is done here, to make sure the buffer is always
# the same size as the Window
Size = self.ClientSize
# Make new offscreen bitmap: this bitmap will always have the
# current drawing in it, so it can be used to save the image to
# a file, or whatever.
self._Buffer = wx.EmptyBitmap(*Size)
self.UpdateDrawing()
def SaveToFile(self, FileName, FileType=wx.BITMAP_TYPE_PNG):
## This will save the contents of the buffer
## to the specified file. See the wxWindows docs for
## wx.Bitmap::SaveFile for the details
self._Buffer.SaveFile(FileName, FileType)
def UpdateDrawing(self):
"""
This would get called if the drawing needed to change, for whatever reason.
The idea here is that the drawing is based on some data generated
elsewhere in the system. If that data changes, the drawing needs to
be updated.
This code re-draws the buffer, then calls Update, which forces a paint event.
"""
dc = wx.MemoryDC()
dc.SelectObject(self._Buffer)
self.Draw(dc)
del dc # need to get rid of the MemoryDC before Update() is called.
self.Refresh(eraseBackground=False)
self.Update()
class DrawWindow(BufferedWindow):
def __init__(self, *args, **kwargs):
## Any data the Draw() function needs must be initialized before
## calling BufferedWindow.__init__, as it will call the Draw
## function.
self.DrawData = {}
BufferedWindow.__init__(self, *args, **kwargs)
def Draw(self, dc):
dc.SetBackground( wx.Brush("White") )
dc.Clear() # make sure you clear the bitmap!
# Here's the actual drawing code.
for key, data in self.DrawData.items():
if key == "Rectangles":
dc.SetBrush(wx.BLUE_BRUSH)
dc.SetPen(wx.Pen('VIOLET', 4))
for r in data:
dc.DrawRectangle(*r)
elif key == "Ellipses":
dc.SetBrush(wx.Brush("GREEN YELLOW"))
dc.SetPen(wx.Pen('CADET BLUE', 2))
for r in data:
dc.DrawEllipse(*r)
elif key == "Polygons":
dc.SetBrush(wx.Brush("SALMON"))
dc.SetPen(wx.Pen('VIOLET RED', 4))
for r in data:
dc.DrawPolygon(r)
class TestFrame(wx.Frame):
def __init__(self, parent=None):
wx.Frame.__init__(self, parent,
size = (500,500),
title="Double Buffered Test",
style=wx.DEFAULT_FRAME_STYLE)
## Set up the MenuBar
MenuBar = wx.MenuBar()
file_menu = wx.Menu()
item = file_menu.Append(wx.ID_EXIT, text="&Exit")
self.Bind(wx.EVT_MENU, self.OnQuit, item)
MenuBar.Append(file_menu, "&File")
draw_menu = wx.Menu()
item = draw_menu.Append(wx.ID_ANY, "&New Drawing","Update the Drawing Data")
self.Bind(wx.EVT_MENU, self.NewDrawing, item)
item = draw_menu.Append(wx.ID_ANY,'&Save Drawing\tAlt-I','')
self.Bind(wx.EVT_MENU, self.SaveToFile, item)
MenuBar.Append(draw_menu, "&Draw")
self.SetMenuBar(MenuBar)
self.Window = DrawWindow(self)
self.Show()
# Initialize a drawing -- it has to be done after Show() is called
# so that the Windows has teh right size.
self.NewDrawing()
def OnQuit(self,event):
self.Close(True)
def NewDrawing(self, event=None):
self.Window.DrawData = self.MakeNewData()
self.Window.UpdateDrawing()
def SaveToFile(self,event):
dlg = wx.FileDialog(self, "Choose a file name to save the image as a PNG to",
defaultDir = "",
defaultFile = "",
wildcard = "*.png",
style = wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
self.Window.SaveToFile(dlg.GetPath(), wx.BITMAP_TYPE_PNG)
dlg.Destroy()
def MakeNewData(self):
## This method makes some random data to draw things with.
MaxX, MaxY = self.Window.GetClientSizeTuple()
DrawData = {}
# make some random rectangles
l = []
for i in range(5):
w = random.randint(1,MaxX/2)
h = random.randint(1,MaxY/2)
x = random.randint(1,MaxX-w)
y = random.randint(1,MaxY-h)
l.append( (x,y,w,h) )
DrawData["Rectangles"] = l
# make some random ellipses
l = []
for i in range(5):
w = random.randint(1,MaxX/2)
h = random.randint(1,MaxY/2)
x = random.randint(1,MaxX-w)
y = random.randint(1,MaxY-h)
l.append( (x,y,w,h) )
DrawData["Ellipses"] = l
# Polygons
l = []
for i in range(3):
points = []
for j in range(random.randint(3,8)):
point = (random.randint(1,MaxX),random.randint(1,MaxY))
points.append(point)
l.append(points)
DrawData["Polygons"] = l
return DrawData
class DemoApp(wx.App):
def OnInit(self):
self.frame = TestFrame()
self.SetTopWindow(self.frame)
return True
if __name__ == "__main__":
app = DemoApp(0)
app.MainLoop()
| [
"Chris.Barker@noaa.gov"
] | Chris.Barker@noaa.gov |
0bc120bf8b331bc9e44ccacbc92370eea37b9b4e | d051f3fe9fda31b72fa0ddce67aa1f4293c7c37c | /learn/jointgp_iterative_init.py | d662fca28c768464e32e9ea1777a8a90febc9376 | [
"BSD-3-Clause"
] | permissive | davmre/sigvisa | 4e535215b6623310d8f5da64258f6fa9a378f9fd | 91a1f163b8f3a258dfb78d88a07f2a11da41bd04 | refs/heads/master | 2021-03-24T10:24:52.307389 | 2018-01-05T19:33:23 | 2018-01-05T19:33:23 | 2,321,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,123 | py | import numpy as np
from sigvisa import Sigvisa
from sigvisa.source.event import get_event
from sigvisa.infer.coarse_to_fine_init import ModelSpec, EventRunSpec, TimeRangeRunSpec, do_coarse_to_fine, initialize_from, do_inference, initialize_sg
from sigvisa.infer.template_xc import fastxc
from sigvisa.graph.sigvisa_graph import SigvisaGraph, MAX_TRAVEL_TIME
from sigvisa.infer.run_mcmc import run_open_world_MH
from sigvisa.infer.mcmc_logger import MCMCLogger
from sigvisa.graph.region import Region
from sigvisa.source.event import Event
from sigvisa.treegp.gp import GPCov
import os, sys, traceback
import cPickle as pickle
from optparse import OptionParser
import itertools
from collections import defaultdict
from sigvisa.utils.geog import dist_km
from sigvisa.signals.common import Waveform
from sigvisa.utils.array import index_to_time, time_to_index, time_to_index_offset
import numpy.ma as ma
def score_event_correlations(sg, sta):
# get the wn and first arriving phase for each event
eid_atimes = {}
for wn in sg.station_waves[sta]:
arrivals = wn.arrivals()
eids = [eid for (eid, phase) in arrivals if eid > 0]
for eid in eids:
sidxs = [(phase, wn.arrival_start_idx(eid, phase)) for (eeid, phase) in arrivals if eeid==eid]
sorted_phases = sorted(sidxs, key = lambda x : x[1])
first_arriving_phase = sorted_phases[0][0]
eid_atimes[eid] = (wn, first_arriving_phase)
# extract a signal for each event
eid_signals = {}
eid_signals_short = {}
for eid, (wn, phase) in eid_atimes.items():
eid_signals[eid] = wn.get_arrival_signal(eid, phase, 30.0, pre_s = 5.0)
eid_signals_short[eid] = wn.get_arrival_signal(eid, phase, 20.0, pre_s = 0.0)
# correlate all pairs of events
xcpairs = {}
xcs_by_eid = defaultdict(list)
xcs_by_eid_full= defaultdict(list)
for (eid1, eid2) in itertools.combinations(eid_signals.keys(), 2):
xc1 = np.max(fastxc(eid_signals_short[eid1], eid_signals[eid2]))
xc2 = np.max(fastxc(eid_signals_short[eid2], eid_signals[eid1]))
xc = max(xc1, xc2)
xcpairs[(eid1, eid2)] = xc
xcs_by_eid[eid1].append(xc)
xcs_by_eid[eid2].append(xc)
xcs_by_eid_full[eid1].append((eid2, xc))
xcs_by_eid_full[eid2].append((eid1, xc))
# score each event by its best two correlations
#evscores = dict([(eid, np.max(xcs_by_eid[eid])) for eid in xcs_by_eid.keys()])
evscores = {}
for eid, xcs in xcs_by_eid.items():
best_xcs = sorted(xcs)[-2:]
evscores[eid] = np.sum(best_xcs)
return evscores, eid_atimes, xcs_by_eid_full
def select_prototype_events(sg, evscores, N=3, dist_threshold=25.0):
"""
- add the top N (3-5?) events with good correlation scores
- then ensure that each event that has >3 neighbors within
25km, has two prototypes in the set. so I loop over all events
that have >2 25km neighbors, and if none of those neighbors
is in the set, I add the highest-correlation one
(potentially the event itself). repeat until the set is stable.
"""
evs = dict([(eid, sg.get_event(eid)) for eid in evscores.keys()])
eids_by_score_sorted = sorted(evscores.items(), key = lambda x : -x[1])
prototype_eids = set()
for i in range(N):
prototype_eids.add(eids_by_score_sorted[i][0])
neighbors = {}
for eid, ev in evs.items():
my_neighbors = []
for eid2, ev2 in evs.items():
d = dist_km((ev.lon, ev.lat), (ev2.lon, ev2.lat)) + np.abs(ev.depth - ev2.depth)
if d < dist_threshold:
my_neighbors.append(eid2)
neighbors[eid] = my_neighbors
substantial_neighbors = dict([(eid, eid_neighbors) for (eid, eid_neighbors) in neighbors.items() if len(eid_neighbors) > 4])
stable_set = False
while not stable_set:
stable_set = True
for eid, eid_neighbors in substantial_neighbors.items():
represented = np.sum([neid in prototype_eids for neid in eid_neighbors]) >= 2
if not represented:
neighbor_scores = [(eid, evscores[eid]) for eid in eid_neighbors if eid not in prototype_eids]
best_neighbor, best_score = sorted(neighbor_scores, key = lambda x : -x[1])[0]
prototype_eids.add(best_neighbor)
print "eid %d not represented, adding neighbor %d with score %.2f" % (eid, best_neighbor, best_score)
stable_set = False
break
return prototype_eids
def select_prototype_events2(sg, evscores, xcs_by_eid, N=3, dist_threshold=25.0, xc_threshold=0.4):
"""
make sure every event that has a nearby correlated event, should have one in the proototype set
"""
evs = dict([(eid, sg.get_event(eid)) for eid in evscores.keys()])
eids_by_score_sorted = sorted(evscores.items(), key = lambda x : -x[1])
prototype_eids = set()
for i in range(N):
prototype_eids.add(eids_by_score_sorted[i][0])
correlating_eids = {}
for eid, ev in evs.items():
sorted_eids = sorted(xcs_by_eid[eid], key = lambda x : -x[1])
t = [eid2 for (eid2, xc) in sorted_eids if xc > xc_threshold]
if len(t) > 0:
correlating_eids[eid] = t
stable_set = False
while not stable_set:
stable_set = True
for eid, correlating_list in correlating_eids.items():
represented = np.sum([neid in prototype_eids for neid in correlating_list]) >= 1
if not represented:
best_neighbor = [neid for neid in correlating_list if neid not in prototype_eids][0]
best_score = 0.0
#neighbor_scores = [(eid, evscores[eid]) for eid in correlating_list if eid not in prototype_eids]
#best_neighbor, best_score = sorted(neighbor_scores, key = lambda x : -x[1])[0]
prototype_eids.add(best_neighbor)
print "eid %d not represented, adding neighbor %d with score %.2f" % (eid, best_neighbor, best_score)
stable_set = False
break
return prototype_eids
def construct_sg_for_eids(sg1, eids, eid_atimes, model_type="gp_joint", model_dict=None, **kwargs):
new_sg = SigvisaGraph(template_model_type=model_type,
template_shape = sg1.template_shape,
wiggle_family=sg1.wiggle_family,
min_mb=sg1.min_mb,
wiggle_model_type=model_type,
model_dict=model_dict,
raw_signals=True,
base_srate=sg1.base_srate,
runids=sg1.runids,
hack_param_constraint=True,
phases=sg1.phases,
jointgp_param_run_init=sg1.jointgp_param_run_init,
hack_ttr_max=sg1.hack_ttr_max,
uatemplate_rate=sg1.uatemplate_rate,
skip_levels=sg1.skip_levels,
force_event_wn_matching=True,
**kwargs)
wave_pairs = []
for eid in eids:
wn, firstphase = eid_atimes[eid]
s, stime, etime = wn.get_event_signal(eid, pre_s = 20.0, post_s = 50.0)
mask = np.isnan(s)
s_masked = ma.masked_array(s, mask)
wave = Waveform(data=s_masked, stime=stime, sta=wn.sta, srate=wn.srate, filter_str=wn.filter_str, chan=wn.chan)
sidx = time_to_index(stime, wn.st, wn.srate)
eidx = time_to_index(etime, wn.st, wn.srate)
#try:
wn2 = new_sg.add_wave(wave, disable_conflict_checking=True)
#except Exception as e:
# print e
# continue
wave_pairs.append((eid, wn, wn2))
for (eid, wn1, wn2) in wave_pairs:
evnodes = new_sg.add_event(sg1.get_event(eid), eid=eid)
phases = [phase for (eid2, phase) in wn1.arrivals() if eid2==eid]
for phase in phases:
tmvals = sg1.get_template_vals(eid, wn1.sta, phase, wn1.band, wn1.chan)
new_tmnodes = new_sg.get_template_nodes(eid, wn2.sta, phase, wn2.band, wn2.chan)
for param, val in tmvals.items():
k,n = new_tmnodes[param]
n.set_value(val, key=k)
# copy over uatemplates
for (eid, wn1, wn2) in wave_pairs:
for (eid2, phase) in wn1.arrivals():
if eid2 == eid: continue
tmvals, _ = wn1.get_template_params_for_arrival(eid2, phase)
phase_sidx, phase_eidx = wn1.template_idx_window(eid=eid2, phase=phase,
pre_arrival_slack_s=0.0,
post_fade_slack_s=0.0)
phase_stime = index_to_time(phase_sidx, wn1.st, wn1.srate)
phase_etime = index_to_time(phase_eidx, wn1.st, wn1.srate)
if phase_etime > wn2.st and phase_stime < wn2.et:
new_sg.create_unassociated_template(wn2, tmvals["arrival_time"], initial_vals=tmvals, nosort=True)
new_sg._topo_sort()
new_sg.current_log_p()
try:
new_sg.seed = sg1.seed
except:
new_sg.seed = 0
return new_sg
def optimize_prototypes(sg1, prototype_eids, eid_atimes, old_run_dir):
new_sg = construct_sg_for_eids(sg1, prototype_eids, eid_atimes)
logger = MCMCLogger( write_template_vals=True, dump_interval_s=10.0, print_interval_s=10.0, write_gp_hparams=True, max_dumps=2, run_dir=old_run_dir+".prototypes")
run_open_world_MH(new_sg, steps=400,
enable_event_openworld=False,
enable_event_moves=False,
enable_phase_openworld=False,
enable_template_openworld=False,
enable_template_moves=True,
enable_hparam_moves=True,
special_mb_moves=False,
template_move_type="rw",
logger=logger)
return new_sg
def fit_eids_from_prototype_model(sg_full, sg_prototype, sta, eids, eid_atimes, old_run_dir):
models = {}
sg_prototype.current_log_p()
for (param, band, chan, phase), (jgp, hnodes) in sg_prototype._joint_gpmodels[sta].items():
gp = jgp.train_gp()
if gp is None:
continue
models[(param, band, chan, phase)] = gp
sg_indep = construct_sg_for_eids(sg_full, eids, eid_atimes, model_dict=models, model_type="dict", dummy_fallback=True)
for sta, wns in sg_indep.station_waves.items():
for wn in wns:
wn.hack_wavelets_as_iid = True
logger = MCMCLogger( write_template_vals=True, dump_interval_s=10.0, print_interval_s=10.0, write_gp_hparams=True, max_dumps=2, run_dir=old_run_dir+".align_indep")
run_open_world_MH(sg_indep, steps=500,
enable_event_openworld=False,
enable_event_moves=False,
enable_phase_openworld=False,
enable_template_openworld=False,
enable_template_moves=True,
enable_hparam_moves=True,
special_mb_moves=False,
template_move_type="rw",
disable_moves=["atime_xc"],
logger=logger)
return sg_indep
def repatriate_fits(sg_full, sg_single):
for sta, wns in sg_single.station_waves.items():
for wn in wns:
for (eid, phase) in wn.arrivals():
if eid < 1: continue
tmvals = sg_single.get_template_vals(eid, wn.sta, phase, wn.band, wn.chan)
tmnodes = sg_full.get_template_nodes(eid, wn.sta, phase, wn.band, wn.chan)
for param, val in tmvals.items():
k,n = tmnodes[param]
n.set_value(val, key=k)
wn2 = list(tmnodes["coda_decay"][1].children)[0]
wn2.nm_node.set_value(wn.nm)
def jointgp_iterative_align_init(sg, base_run_dir):
assert(len(sg.station_waves.keys())==1)
sta = sg.station_waves.keys()[0]
evscores, eid_atimes, xcs_by_eid = score_event_correlations(sg, sta)
#prototype_eids = select_prototype_events(sg, evscores)
prototype_eids = select_prototype_events2(sg, evscores, xcs_by_eid)
print "selected prototype eids", prototype_eids
new_sg = optimize_prototypes(sg, prototype_eids, eid_atimes, base_run_dir)
repatriate_fits(sg, new_sg)
indep_eids = [eid for eid in eid_atimes.keys() if eid not in prototype_eids]
sg_indep = fit_eids_from_prototype_model(sg, new_sg, sta, indep_eids, eid_atimes, base_run_dir)
repatriate_fits(sg, sg_indep)
return sg
def main():
with open("/home/dmoore/python/sigvisa/logs/mcmc/01405/step_000000/pickle.sg", 'rb') as f:
sg4 = pickle.load(f)
jointgp_iterative_align_init(sg4)
with open("updated_sg4_indep.sg", "wb") as f:
pickle.dump(sg4, f)
if __name__ == "__main__":
main()
| [
"dmoore@cs.berkeley.edu"
] | dmoore@cs.berkeley.edu |
20d258d6d3e757ca145e2f7d6037bb9ae863704e | 70ad4c421a791a12be7a42a3a943e849d1a0ac42 | /torchvision/io/image.py | 005c58e32cc734e55057a9fcb46ee331ee3f5c80 | [
"BSD-3-Clause",
"CC-BY-NC-4.0"
] | permissive | Hsuxu/vision | e78ea6bfbc8aa50c56573b467939e86df0138d07 | 75daeaf4b4d95570900f7a62c0a257678c188f04 | refs/heads/main | 2022-12-02T05:35:54.121664 | 2022-11-22T08:40:30 | 2022-11-22T08:40:30 | 215,186,338 | 1 | 0 | BSD-3-Clause | 2019-10-15T02:18:27 | 2019-10-15T02:18:27 | null | UTF-8 | Python | false | false | 9,574 | py | from enum import Enum
from warnings import warn
import torch
from ..extension import _load_library
from ..utils import _log_api_usage_once
try:
_load_library("image")
except (ImportError, OSError) as e:
warn(f"Failed to load image Python extension: {e}")
class ImageReadMode(Enum):
"""
Support for various modes while reading images.
Use ``ImageReadMode.UNCHANGED`` for loading the image as-is,
``ImageReadMode.GRAY`` for converting to grayscale,
``ImageReadMode.GRAY_ALPHA`` for grayscale with transparency,
``ImageReadMode.RGB`` for RGB and ``ImageReadMode.RGB_ALPHA`` for
RGB with transparency.
"""
UNCHANGED = 0
GRAY = 1
GRAY_ALPHA = 2
RGB = 3
RGB_ALPHA = 4
def read_file(path: str) -> torch.Tensor:
"""
Reads and outputs the bytes contents of a file as a uint8 Tensor
with one dimension.
Args:
path (str): the path to the file to be read
Returns:
data (Tensor)
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(read_file)
data = torch.ops.image.read_file(path)
return data
def write_file(filename: str, data: torch.Tensor) -> None:
"""
Writes the contents of a uint8 tensor with one dimension to a
file.
Args:
filename (str): the path to the file to be written
data (Tensor): the contents to be written to the output file
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(write_file)
torch.ops.image.write_file(filename, data)
def decode_png(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torch.Tensor:
"""
Decodes a PNG image into a 3 dimensional RGB or grayscale Tensor.
Optionally converts the image to the desired format.
The values of the output tensor are uint8 in [0, 255].
Args:
input (Tensor[1]): a one dimensional uint8 tensor containing
the raw bytes of the PNG image.
mode (ImageReadMode): the read mode used for optionally
converting the image. Default: ``ImageReadMode.UNCHANGED``.
See `ImageReadMode` class for more information on various
available modes.
Returns:
output (Tensor[image_channels, image_height, image_width])
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(decode_png)
output = torch.ops.image.decode_png(input, mode.value, False)
return output
def encode_png(input: torch.Tensor, compression_level: int = 6) -> torch.Tensor:
"""
Takes an input tensor in CHW layout and returns a buffer with the contents
of its corresponding PNG file.
Args:
input (Tensor[channels, image_height, image_width]): int8 image tensor of
``c`` channels, where ``c`` must 3 or 1.
compression_level (int): Compression factor for the resulting file, it must be a number
between 0 and 9. Default: 6
Returns:
Tensor[1]: A one dimensional int8 tensor that contains the raw bytes of the
PNG file.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(encode_png)
output = torch.ops.image.encode_png(input, compression_level)
return output
def write_png(input: torch.Tensor, filename: str, compression_level: int = 6):
"""
Takes an input tensor in CHW layout (or HW in the case of grayscale images)
and saves it in a PNG file.
Args:
input (Tensor[channels, image_height, image_width]): int8 image tensor of
``c`` channels, where ``c`` must be 1 or 3.
filename (str): Path to save the image.
compression_level (int): Compression factor for the resulting file, it must be a number
between 0 and 9. Default: 6
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(write_png)
output = encode_png(input, compression_level)
write_file(filename, output)
def decode_jpeg(
input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGED, device: str = "cpu"
) -> torch.Tensor:
"""
Decodes a JPEG image into a 3 dimensional RGB or grayscale Tensor.
Optionally converts the image to the desired format.
The values of the output tensor are uint8 between 0 and 255.
Args:
input (Tensor[1]): a one dimensional uint8 tensor containing
the raw bytes of the JPEG image. This tensor must be on CPU,
regardless of the ``device`` parameter.
mode (ImageReadMode): the read mode used for optionally
converting the image. The supported modes are: ``ImageReadMode.UNCHANGED``,
``ImageReadMode.GRAY`` and ``ImageReadMode.RGB``
Default: ``ImageReadMode.UNCHANGED``.
See ``ImageReadMode`` class for more information on various
available modes.
device (str or torch.device): The device on which the decoded image will
be stored. If a cuda device is specified, the image will be decoded
with `nvjpeg <https://developer.nvidia.com/nvjpeg>`_. This is only
supported for CUDA version >= 10.1
.. betastatus:: device parameter
.. warning::
There is a memory leak in the nvjpeg library for CUDA versions < 11.6.
Make sure to rely on CUDA 11.6 or above before using ``device="cuda"``.
Returns:
output (Tensor[image_channels, image_height, image_width])
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(decode_jpeg)
device = torch.device(device)
if device.type == "cuda":
output = torch.ops.image.decode_jpeg_cuda(input, mode.value, device)
else:
output = torch.ops.image.decode_jpeg(input, mode.value)
return output
def encode_jpeg(input: torch.Tensor, quality: int = 75) -> torch.Tensor:
"""
Takes an input tensor in CHW layout and returns a buffer with the contents
of its corresponding JPEG file.
Args:
input (Tensor[channels, image_height, image_width])): int8 image tensor of
``c`` channels, where ``c`` must be 1 or 3.
quality (int): Quality of the resulting JPEG file, it must be a number between
1 and 100. Default: 75
Returns:
output (Tensor[1]): A one dimensional int8 tensor that contains the raw bytes of the
JPEG file.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(encode_jpeg)
if quality < 1 or quality > 100:
raise ValueError("Image quality should be a positive number between 1 and 100")
output = torch.ops.image.encode_jpeg(input, quality)
return output
def write_jpeg(input: torch.Tensor, filename: str, quality: int = 75):
"""
Takes an input tensor in CHW layout and saves it in a JPEG file.
Args:
input (Tensor[channels, image_height, image_width]): int8 image tensor of ``c``
channels, where ``c`` must be 1 or 3.
filename (str): Path to save the image.
quality (int): Quality of the resulting JPEG file, it must be a number
between 1 and 100. Default: 75
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(write_jpeg)
output = encode_jpeg(input, quality)
write_file(filename, output)
def decode_image(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torch.Tensor:
"""
Detects whether an image is a JPEG or PNG and performs the appropriate
operation to decode the image into a 3 dimensional RGB or grayscale Tensor.
Optionally converts the image to the desired format.
The values of the output tensor are uint8 in [0, 255].
Args:
input (Tensor): a one dimensional uint8 tensor containing the raw bytes of the
PNG or JPEG image.
mode (ImageReadMode): the read mode used for optionally converting the image.
Default: ``ImageReadMode.UNCHANGED``.
See ``ImageReadMode`` class for more information on various
available modes.
Returns:
output (Tensor[image_channels, image_height, image_width])
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(decode_image)
output = torch.ops.image.decode_image(input, mode.value)
return output
def read_image(path: str, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torch.Tensor:
"""
Reads a JPEG or PNG image into a 3 dimensional RGB or grayscale Tensor.
Optionally converts the image to the desired format.
The values of the output tensor are uint8 in [0, 255].
Args:
path (str): path of the JPEG or PNG image.
mode (ImageReadMode): the read mode used for optionally converting the image.
Default: ``ImageReadMode.UNCHANGED``.
See ``ImageReadMode`` class for more information on various
available modes.
Returns:
output (Tensor[image_channels, image_height, image_width])
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(read_image)
data = read_file(path)
return decode_image(data, mode)
def _read_png_16(path: str, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torch.Tensor:
data = read_file(path)
return torch.ops.image.decode_png(data, mode.value, True)
| [
"noreply@github.com"
] | Hsuxu.noreply@github.com |
d3d115752075a17430f9222d060599f649fa1271 | f42f04302b4c7ed34b6e079cc334c499b10d656c | /auditware/apps.py | d7f2cf032329f184f7b9b289216fa8a456661e0e | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | un33k/django-auditware | 5edc2159c9aee2aae7c6a2c3d04f9f843665ff49 | 7ea46195aade2caa464fbc9c5646f7565e87be11 | refs/heads/master | 2021-01-10T02:01:27.106266 | 2016-04-05T20:16:37 | 2016-04-05T20:16:37 | 43,097,203 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | from django.apps import apps
from django.apps import AppConfig as DjangoAppConfig
from django.utils.translation import ugettext_lazy as _
class AppConfig(DjangoAppConfig):
"""
Configuration entry point for the auditware app
"""
label = name = 'auditware'
verbose_name = _("auditware app")
def ready(self):
"""
App is imported and ready, so bootstrap it.
"""
from .receivers import latch_to_signals
latch_to_signals()
| [
"val@neekware.com"
] | val@neekware.com |
1c2c3ec6f06be19a149ce546f8991519ff2e516e | cb559124f016b2139ec2e7bd764ee95c72e0c85a | /MainProject/mypost/admin.py | 01fb05bba7b0162ed94437aa5b7cf4115ebb3b50 | [] | no_license | samirpatil2000/RestFrameWork | cb519566871052328f507a863fb5617bba148589 | 776793ef214a0365a722463df5a9e92365847235 | refs/heads/master | 2022-10-19T04:54:09.886050 | 2020-06-09T03:25:41 | 2020-06-09T03:25:41 | 270,561,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | from django.contrib import admin
# Register your models here.
from .models import Post , Category
admin.site.register(Post)
admin.site.register(Category) | [
"samirspatil742099@gmail.com"
] | samirspatil742099@gmail.com |
e11d91874bdc3936c206ac66d1d9bfea012f21ee | 4cacaaebab5db2f35e3fb08f5c8b5dc6cc807d29 | /C1_L1/timer_for_website.py | 2a992921912a6676d9a9424a8d0f1c7d767802f3 | [] | no_license | M0hamedGamal/NanoDgree | f667464080927da9daab3c55daa80e10d016edb1 | 839f6c8fc5219d08d31105061ce2decbe70d9400 | refs/heads/master | 2021-09-05T19:36:08.760757 | 2018-01-30T15:47:40 | 2018-01-30T15:47:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | import time
import webbrowser
total_breaks = 3
break_count = 0
print("This Program Started On: " , time.ctime()) #Current time
while (break_count < total_breaks): #Loop For 3 Time
time.sleep(5) #To Wait 5 Sec
webbrowser.open("https://www.youtube.com/watch?v=YQHsXMglC9A")
break_count = break_count + 1
| [
"you@example.com"
] | you@example.com |
fc5c0f0bfd77a9d6cf3c51851e9abedbd69a7cfe | c071eb46184635818e8349ce9c2a78d6c6e460fc | /system/python_stubs/-745935208/PySide2/QtWidgets/QToolTip.py | 6b962af550e45f531cd6791dd3ea0b277a41b631 | [] | no_license | sidbmw/PyCharm-Settings | a71bc594c83829a1522e215155686381b8ac5c6e | 083f9fe945ee5358346e5d86b17130d521d1b954 | refs/heads/master | 2020-04-05T14:24:03.216082 | 2018-12-28T02:29:29 | 2018-12-28T02:29:29 | 156,927,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | # encoding: utf-8
# module PySide2.QtWidgets
# from C:\Users\siddh\AppData\Local\Programs\Python\Python37\lib\site-packages\PySide2\QtWidgets.pyd
# by generator 1.146
# no doc
# imports
import PySide2.QtCore as __PySide2_QtCore
import PySide2.QtGui as __PySide2_QtGui
import Shiboken as __Shiboken
class QToolTip(__Shiboken.Object):
# no doc
def font(self, *args, **kwargs): # real signature unknown
pass
def hideText(self, *args, **kwargs): # real signature unknown
pass
def isVisible(self, *args, **kwargs): # real signature unknown
pass
def palette(self, *args, **kwargs): # real signature unknown
pass
def setFont(self, *args, **kwargs): # real signature unknown
pass
def setPalette(self, *args, **kwargs): # real signature unknown
pass
def showText(self, *args, **kwargs): # real signature unknown
pass
def text(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
| [
"siddharthnatamai@gmail.com"
] | siddharthnatamai@gmail.com |
caa9b68f2388f1cebf0aed5cc468db051a5e68ae | f98e37d4dba35055905063596415aaedcfa59fa3 | /ExpenseReportSystemBE/components/secCheck/secCheckLogic.py | c8129142b343f1a5731932a7a03c86142e7f743a | [] | no_license | pyj4104/ExpenseReportSystemBE | 8c8b3fa1c02cab07bf4416ebc797c5c46c0c57cd | 67b30a342394d0df54729aa58df37d5c503592a4 | refs/heads/main | 2023-04-26T09:19:04.702310 | 2021-05-18T02:00:55 | 2021-05-18T02:00:55 | 343,976,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,109 | py | import time
class SecCodeSession:
userEmail: str
created: int
expires: int
def __init__(self, email: str):
self.userEmail = email
self.created = int(time.time())
self.expires = self.created + 600
class SecCodes:
secCodesDict: dict # security codes to user data
usrCodesDict: dict # user email to security codes data
def __init__(self):
self.secCodesDict = dict()
self.usrCodesDict = dict()
def initiateLogInProcedure(self, userEmail: str, token: str):
"""
Based on the generated token, creates double relationship from token to user and user to token.
Inputs: userEmail, token
"""
self.removeSecCode(userEmail=userEmail)
self.secCodesDict[token] = SecCodeSession(userEmail)
self.usrCodesDict[userEmail] = token
def retrieveUser(self, token: str) -> str:
"""
When either token is passed in, returns the user email.
Raises ValueError when either is empty or when both are passed in.
Inputs: token in str, userEmail in str
"""
return self.secCodesDict[token].userEmail
def isSecCodeIn(self, token: str) -> bool:
"""
Checks whether the security code is active or not.
Input: token with the length of 6
Output: True if the session is active. False if the session is not active
or the token is wrong
"""
if token not in self.secCodesDict:
return False
if int(time.time()) > self.secCodesDict[token].expires:
return False
return True
def removeSecCode(self, userEmail: str = None, token: str = None):
if userEmail and userEmail in self.usrCodesDict and not token:
token = self.usrCodesDict[userEmail]
elif not userEmail and token and token in self.secCodesDict:
userEmail = self.secCodesDict[token].userEmail
elif not userEmail and not token:
raise ValueError("Both fields cannot be empty")
self.__removeToken__(token)
self.__removeUserEmail__(userEmail)
def __removeToken__(self, token: str):
if token in self.secCodesDict:
del(self.secCodesDict[token])
def __removeUserEmail__(self, email: str):
if email in self.usrCodesDict:
del(self.usrCodesDict[email])
currentSecCodes = SecCodes()
| [
"pyj4104@hotmail.com"
] | pyj4104@hotmail.com |
32bbc1801f1410cb204d27de24ee4c38082ebf18 | e57f62ce463ae10e2a61bfee6682f92773d56520 | /simim/scenario.py | ed6f51c5e84375b5eb6d97440b074ae956fc0a1a | [
"MIT"
] | permissive | rffowler/simim | 7f5a0ac7df5abce8b13627ef3f838d4196fb657e | d5b9793ebb469fe903fdff0f98898b6f87a433b3 | refs/heads/master | 2020-05-18T19:34:46.965775 | 2019-04-24T13:58:08 | 2019-04-24T13:58:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,915 | py | """
scenario.py
Manages scenarios
"""
import pandas as pd
class Scenario():
def __init__(self, filename, factors):
self.data = pd.read_csv(filename)
if isinstance(factors, str):
factors = [factors]
# rename scenario cols with O_ or D_ prefixes as necessary
for column in [f for f in self.data.columns.values if not f.startswith("CUM_") and f not in ["GEOGRAPHY_CODE", "YEAR"]]:
if "O_" + column in factors:
self.data.rename({column: "O_" + column, "CUM_" + column: "CUM_O_" + column }, axis=1, inplace=True)
elif "D_" + column in factors:
self.data.rename({column: "D_" + column, "CUM_" + column: "CUM_D_" + column }, axis=1, inplace=True)
missing = [factor for factor in factors if factor not in self.data.columns]
# This doesnt allow for e.g. JOBS in scenario and a factor of JOBS_DISTWEIGHTED
# check scenario has no factors that aren't in model
# superfluous = [factor for factor in self.data.columns if factor not in factors and \
# not factor.startswith("CUM_") and \
# factor != "GEOGRAPHY_CODE" and \
# factor != "YEAR"]
# if superfluous:
# raise ValueError("ERROR: Factor(s) %s are in scenario but not a model factor, remove or add to model" % str(superfluous))
#print("Superfluous factors:", superfluous)
print("Available factors:", factors)
print("Scenario factors:", [f for f in self.data.columns.values if not f.startswith("CUM_") and f not in ["GEOGRAPHY_CODE", "YEAR"]])
print("Scenario timeline:", self.timeline())
print("Scenario geographies:", self.geographies())
# add columns for factors not in scenario
# TODO is this actually necessary?
for col in missing:
self.data[col] = 0
self.data["CUM_"+col] = 0
# validate
if "GEOGRAPHY_CODE" not in self.data.columns.values:
raise ValueError("Scenario definition must contain a GEOGRAPHY_CODE column")
if "YEAR" not in self.data.columns.values:
raise ValueError("Scenario definition must contain a YEAR column")
# work out factors #.remove(["GEOGRAPHY_CODE", "YEAR"])
self.factors = [f for f in self.data.columns.values if not f.startswith("CUM_") and f not in ["GEOGRAPHY_CODE", "YEAR"]]
self.current_scenario = None
self.current_time = None
def timeline(self):
return sorted(self.data.YEAR.unique())
def geographies(self):
return sorted(self.data.GEOGRAPHY_CODE.unique())
def update(self, year):
""" Returns new scenario if there is data for the given year, otherwise returns the current (cumulative) scenario """
self.current_time = year
if year in self.data.YEAR.unique():
print("Updating scenario")
self.current_scenario = self.data[self.data.YEAR==year]
return self.current_scenario
else:
print("Persisting existing scenario")
return self.current_scenario
def apply(self, dataset, year):
# if no scenario for a year, reuse the most recent (cumulative) figures
self.current_scenario = self.update(year)
# TODO we can probably get away with empty scenario?
# ensure there is a scenario
if self.current_scenario is None:
raise ValueError("Unable to find a scenario for %s" % year)
#print(most_recent_scenario.head())
dataset = dataset.merge(self.current_scenario.drop(self.factors, axis=1), how="left", left_on="D_GEOGRAPHY_CODE", right_on="GEOGRAPHY_CODE") \
.drop(["GEOGRAPHY_CODE", "YEAR"], axis=1).fillna(0)
for factor in self.factors:
#print(dataset.columns.values)
# skip constrained
if factor != "O_GEOGRAPHY_CODE" and factor != "D_GEOGRAPHY_CODE":
dataset["CHANGED_" + factor] = dataset[factor] + dataset["CUM_" + factor]
return dataset
| [
"a.p.smith@leeds.ac.uk"
] | a.p.smith@leeds.ac.uk |
297f090aa09340a17f20e473690f2254a6bbf410 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5630113748090880_0/Python/cih187/round1AproblemB.py | 7a5b2198cbb47f906cd2f88be65dc5710385d914 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | def round1a_b(path):
with open(path, mode='r') as file:
lines = file.readlines()
test_case_number = 0
line_number = 1
while test_case_number < int(lines[0]):
test_case_number += 1
# variables for this problem
n = int(lines[line_number].split()[0])
result = []
matrice = []
for i in range(2*n - 1):
matrice.append(lines[line_number + i + 1].split())
matrice_flat = [int(item) for sublist in matrice for item in sublist]
for i in matrice_flat:
count = 0
for j in matrice_flat:
if i == j:
count += 1
if count % 2 == 1:
result.append(i)
matrice_flat = remove_values_from_list(matrice_flat, i)
result.sort()
# end of problem logic
my_result = ""
for item in result:
my_result += str(item) + " "
print("Case #{}: {}".format(test_case_number, my_result))
line_number += 2*n
def remove_values_from_list(my_list, x):
return [value for value in my_list if value != x]
round1a_b('file.txt')
exit() | [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
1a4ef0108cb6f9dc1429db6da776aba96ff6f586 | fec622bc34957dd4d99f1ef0f23608eeb40ed609 | /internal/notes/builtin-SAVE/packages/autogen/package.py | 4198ab1589787f6d30b2af2fc527239452709c9b | [] | no_license | scottkwarren/hpctest | 4d5ff18d00c5eb9b7da481c9aa0824aa7082062f | a8bb99b5f601a5d088ae56ab9886ab8079c081ba | refs/heads/master | 2022-09-07T19:36:18.544795 | 2022-08-18T20:26:42 | 2022-08-18T20:26:42 | 100,518,800 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,411 | py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Autogen(AutotoolsPackage):
"""AutoGen is a tool designed to simplify the creation and maintenance of
programs that contain large amounts of repetitious text. It is especially
valuable in programs that have several blocks of text that must be kept
synchronized."""
homepage = "https://www.gnu.org/software/autogen/index.html"
url = "https://ftp.gnu.org/gnu/autogen/rel5.18.12/autogen-5.18.12.tar.gz"
list_url = "https://ftp.gnu.org/gnu/autogen"
list_depth = 1
version('5.18.12', '551d15ccbf5b5fc5658da375d5003389')
variant('xml', default=True, description='Enable XML support')
depends_on('pkg-config@0.9.0:', type='build')
depends_on('guile@1.8:2.0')
depends_on('libxml2', when='+xml')
def configure_args(self):
spec = self.spec
args = [
# `make check` fails without this
# Adding a gettext dependency does not help
'--disable-nls',
]
if '+xml' in spec:
args.append('--with-libxml2={0}'.format(spec['libxml2'].prefix))
else:
args.append('--without-libxml2')
return args
| [
"scott@rice.edu"
] | scott@rice.edu |
49de452b8c12cc429ec8cedf8b4759d6f544e7b1 | 9f1b8a1ada57198e2a06d88ddcdc0eda0c683df7 | /submission - lab9/set 2/JOHN J WELSH_19371_assignsubmission_file_lab9/lab9/P2.py | 77ac0756b0f7c7014104a22a46737f2475f62f6a | [] | no_license | sendurr/spring-grading | 90dfdced6327ddfb5c311ae8f42ae1a582768b63 | 2cc280ee3e0fba02e95b6e9f45ad7e13bc7fad54 | refs/heads/master | 2020-04-15T17:42:10.781884 | 2016-08-29T20:38:17 | 2016-08-29T20:38:17 | 50,084,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | class Simple:
def __init__(self,i):
self.i = i
def double(self):
i = self.i
i += i
self.i = i
s1 = Simple(4)
for i in range(4):
s1.double()
print s1.i
s2 = Simple('Hello')
s2.double(); s2.double()
print s2.i
s2.i = 100
print s2.i
| [
"sendurr@hotmail.com"
] | sendurr@hotmail.com |
77310ee0bff08f7d081861ed92da5079102f53e9 | 5e2dddce9c67d5b54d203776acd38d425dbd3398 | /spacy/tests/regression/test_issue1253.py | 10829768ec1bd99db43724113a00a0f41ae8b381 | [
"MIT"
] | permissive | yuxuan2015/spacy_zh_model | 8164a608b825844e9c58d946dcc8698853075e37 | e89e00497ab3dad0dd034933e25bc2c3f7888737 | refs/heads/master | 2020-05-15T11:07:52.906139 | 2019-08-27T08:28:11 | 2019-08-27T08:28:11 | 182,213,671 | 1 | 0 | null | 2019-04-19T06:27:18 | 2019-04-19T06:27:17 | null | UTF-8 | Python | false | false | 429 | py | from __future__ import unicode_literals
import pytest
import spacy
def ss(tt):
for i in range(len(tt)-1):
for j in range(i+1, len(tt)):
tt[i:j].root
@pytest.mark.models('en')
def test_access_parse_for_merged():
nlp = spacy.load('en_core_web_sm')
t_t = nlp.tokenizer("Highly rated - I'll definitely")
nlp.tagger(t_t)
nlp.parser(t_t)
nlp.parser(t_t)
ss(t_t)
| [
"yuxuan2015@example.com"
] | yuxuan2015@example.com |
5797e3f5a9e0d306f55290ff2b0c26ced31d0a12 | 13625dd7375297b066ccd69d6c229e9a1535c9b2 | /savings/migrations/0016_auto_20201221_0947.py | 75df210829866c486eea392b0c5878838082f722 | [] | no_license | rajman01/investfy | 9d5fa3ed7593ec13db575016fc839664630318af | a4c8bf16ba7a1ce38d1370e4779284a4d6426733 | refs/heads/main | 2023-09-01T19:10:18.411861 | 2023-08-28T02:30:23 | 2023-08-28T02:30:23 | 320,408,218 | 0 | 1 | null | 2023-08-28T02:30:24 | 2020-12-10T22:46:03 | null | UTF-8 | Python | false | false | 3,724 | py | # Generated by Django 3.1.3 on 2020-12-21 08:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('wallet', '0009_wallet_wallet_id'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('savings', '0015_auto_20201218_2218'),
]
operations = [
migrations.RenameField(
model_name='targetsave',
old_name='active',
new_name='joint',
),
migrations.RenameField(
model_name='targetsave',
old_name='autosave_amount',
new_name='targeted_amount',
),
migrations.RemoveField(
model_name='targetsave',
name='autosave',
),
migrations.RemoveField(
model_name='targetsave',
name='day_interval',
),
migrations.RemoveField(
model_name='targetsave',
name='last_saved',
),
migrations.RemoveField(
model_name='targetsave',
name='targeted_saving',
),
migrations.AddField(
model_name='targetsave',
name='date_created',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='targetsave',
name='description',
field=models.CharField(blank=True, max_length=128, null=True),
),
migrations.AddField(
model_name='targetsave',
name='members',
field=models.ManyToManyField(blank=True, related_name='joint_target_savings', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='targetsave',
name='name',
field=models.CharField(blank=True, max_length=64, null=True),
),
migrations.AlterField(
model_name='jointsavetransaction',
name='joint_save',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='transactions', to='savings.jointsave'),
),
migrations.AlterField(
model_name='targetsave',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='target_savings', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='targetsave',
name='wallet',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='target_savings', to='wallet.wallet'),
),
migrations.AlterField(
model_name='targetsavingtransaction',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='target_saving_transactions', to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='TargetSaveAutoSave',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(default=False)),
('day_interval', models.IntegerField(blank=True, null=True)),
('autosave_amount', models.DecimalField(decimal_places=2, default=0.0, max_digits=10)),
('last_saved', models.DateField(blank=True, null=True)),
('target_save', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='autosave', to='savings.targetsave')),
],
),
]
| [
"alameenraji31@gmail.com"
] | alameenraji31@gmail.com |
0ba67e81120fca81ce3b369ed3fe9c44049b4f7e | d7d7e13a91d0ded303cbb86a3e6c0f5071b6da74 | /metafeatures/aux/discovery.py | 2bdf474dd6e7817659a50f84327b1ef9cdbdd0fd | [] | no_license | fhpinto/systematic-metafeatures | 40c6f5f6a7a1f775918e27820c72962e5436a010 | 8d646ca5fa67efdf3caea2ca3656ef63c6c4d4d9 | refs/heads/master | 2020-04-06T06:55:26.789685 | 2016-08-26T13:36:18 | 2016-08-26T13:36:18 | 64,952,631 | 4 | 2 | null | 2016-08-19T12:38:59 | 2016-08-04T17:04:55 | Python | UTF-8 | Python | false | false | 1,036 | py | import importlib
import inspect
import pkgutil
import sys
def discover_components(package, directory, base_class):
"""Discover implementations of a base class in a package.
Parameters
----------
package : str
Package name
directory : str
Directory of the package to which is inspected.
base_class : object
Base class of objects to discover
Returns
-------
list : all subclasses of `base_class` inside `directory`
"""
components = list()
for module_loader, module_name, ispkg in pkgutil.iter_modules(
[directory]):
full_module_name = "%s.%s" % (package, module_name)
if full_module_name not in sys.modules and not ispkg:
module = importlib.import_module(full_module_name)
for member_name, obj in inspect.getmembers(module):
if inspect.isclass(obj) and issubclass(base_class, obj):
classifier = obj
components.append(classifier)
return components | [
"feurerm@informatik.uni-freiburg.de"
] | feurerm@informatik.uni-freiburg.de |
339a1383bd1ac9811bc5bc6e441d74ad13396474 | fffb732290af97687ea3221ce4a6ce4d95640aff | /courses/w13_DL_Digits/backup/his08.py | e32a677773dfe6a9df4db18b13610aa20aa2a4f2 | [] | no_license | NamWoo/self_driving_car | 851de73ae909639e03756eea4d49ab663447fc19 | cd5c1142c9e543e607ca9dc258f689de6879d207 | refs/heads/master | 2021-07-24T19:51:54.459485 | 2021-07-06T13:58:19 | 2021-07-06T13:58:19 | 186,267,543 | 9 | 7 | null | null | null | null | UTF-8 | Python | false | false | 23,487 | py | 739 [shortcut]
740 from=-3
741 activation=linear
742 [convolutional]
743 batch_normalize=1
744 filters=64
745 size=1
746 stride=1
747 pad=1
748 activation=leaky
749 [convolutional]
750 batch_normalize=1
751 filters=128
752 size=3
753 stride=1
754 pad=1
755 activation=leaky
756 [shortcut]
757 from=-3
758 activation=linear
759 # Downsample
760 [convolutional]
761 batch_normalize=1
762 filters=256
763 size=3
764 stride=2
765 pad=1
766 activation=leaky
767 [convolutional]
768 batch_normalize=1
769 filters=128
770 size=1
771 stride=1
772 pad=1
773 activation=leaky
774 [convolutional]
775 batch_normalize=1
776 filters=256
777 size=3
778 stride=1
779 pad=1
780 activation=leaky
781 [shortcut]
782 from=-3
783 activation=linear
784 [convolutional]
785 batch_normalize=1
786 filters=128
787 size=1
788 stride=1
789 pad=1
790 activation=leaky
791 [convolutional]
792 batch_normalize=1
793 filters=256
794 size=3
795 stride=1
796 pad=1
797 activation=leaky
798 [shortcut]
799 from=-3
800 activation=linear
801 [convolutional]
802 batch_normalize=1
803 filters=128
804 size=1
805 stride=1
806 pad=1
807 activation=leaky
808 [convolutional]
809 batch_normalize=1
810 filters=256
811 size=3
812 stride=1
813 pad=1
814 activation=leaky
815 [shortcut]
816 from=-3
817 activation=linear
818 [convolutional]
819 batch_normalize=1
820 filters=128
821 size=1
822 stride=1
823 pad=1
824 activation=leaky
825 [convolutional]
826 batch_normalize=1
827 filters=256
828 size=3
829 stride=1
830 pad=1
831 activation=leaky
832 [shortcut]
833 from=-3
834 activation=linear
835 [convolutional]
836 batch_normalize=1
837 filters=128
838 size=1
839 stride=1
840 pad=1
841 activation=leaky
842 [convolutional]
843 batch_normalize=1
844 filters=256
845 size=3
846 stride=1
847 pad=1
848 activation=leaky
849 [shortcut]
850 from=-3
851 activation=linear
852 [convolutional]
853 batch_normalize=1
854 filters=128
855 size=1
856 stride=1
857 pad=1
858 activation=leaky
859 [convolutional]
860 batch_normalize=1
861 filters=256
862 size=3
863 stride=1
864 pad=1
865 activation=leaky
866 [shortcut]
867 from=-3
868 activation=linear
869 [convolutional]
870 batch_normalize=1
871 filters=128
872 size=1
873 stride=1
874 pad=1
875 activation=leaky
876 [convolutional]
877 batch_normalize=1
878 filters=256
879 size=3
880 stride=1
881 pad=1
882 activation=leaky
883 [shortcut]
884 from=-3
885 activation=linear
886 [convolutional]
887 batch_normalize=1
888 filters=128
889 size=1
890 stride=1
891 pad=1
892 activation=leaky
893 [convolutional]
894 batch_normalize=1
895 filters=256
896 size=3
897 stride=1
898 pad=1
899 activation=leaky
900 [shortcut]
901 from=-3
902 activation=linear
903 # Downsample
904 [convolutional]
905 batch_normalize=1
906 filters=512
907 size=3
908 stride=2
909 pad=1
910 activation=leaky
911 [convolutional]
912 batch_normalize=1
913 filters=256
914 size=1
915 stride=1
916 pad=1
917 activation=leaky
918 [convolutional]
919 batch_normalize=1
920 filters=512
921 size=3
922 stride=1
923 pad=1
924 activation=leaky
925 [shortcut]
926 from=-3
927 activation=linear
928 [convolutional]
929 batch_normalize=1
930 filters=256
931 size=1
932 stride=1
933 pad=1
934 activation=leaky
935 [convolutional]
936 batch_normalize=1
937 filters=512
938 size=3
939 stride=1
940 pad=1
941 activation=leaky
942 [shortcut]
943 from=-3
944 activation=linear
945 [convolutional]
946 batch_normalize=1
947 filters=256
948 size=1
949 stride=1
950 pad=1
951 activation=leaky
952 [convolutional]
953 batch_normalize=1
954 filters=512
955 size=3
956 stride=1
957 pad=1
958 activation=leaky
959 [shortcut]
960 from=-3
961 activation=linear
962 [convolutional]
963 batch_normalize=1
964 filters=256
965 size=1
966 stride=1
967 pad=1
968 activation=leaky
969 [convolutional]
970 batch_normalize=1
971 filters=512
972 size=3
973 stride=1
974 pad=1
975 activation=leaky
976 [shortcut]
977 from=-3
978 activation=linear
979 [convolutional]
980 batch_normalize=1
981 filters=256
982 size=1
983 stride=1
984 pad=1
985 activation=leaky
986 [convolutional]
987 batch_normalize=1
988 filters=512
989 size=3
990 stride=1
991 pad=1
992 activation=leaky
993 [shortcut]
994 from=-3
995 activation=linear
996 [convolutional]
997 batch_normalize=1
998 filters=256
999 size=1
1000 stride=1
1001 pad=1
1002 activation=leaky
1003 [convolutional]
1004 batch_normalize=1
1005 filters=512
1006 size=3
1007 stride=1
1008 pad=1
1009 activation=leaky
1010 [shortcut]
1011 from=-3
1012 activation=linear
1013 [convolutional]
1014 batch_normalize=1
1015 filters=256
1016 size=1
1017 stride=1
1018 pad=1
1019 activation=leaky
1020 [convolutional]
1021 batch_normalize=1
1022 filters=512
1023 size=3
1024 stride=1
1025 pad=1
1026 activation=leaky
1027 [shortcut]
1028 from=-3
1029 activation=linear
1030 [convolutional]
1031 batch_normalize=1
1032 filters=256
1033 size=1
1034 stride=1
1035 pad=1
1036 activation=leaky
1037 [convolutional]
1038 batch_normalize=1
1039 filters=512
1040 size=3
1041 stride=1
1042 pad=1
1043 activation=leaky
1044 [shortcut]
1045 from=-3
1046 activation=linear
1047 # Downsample
1048 [convolutional]
1049 batch_normalize=1
1050 filters=1024
1051 size=3
1052 stride=2
1053 pad=1
1054 activation=leaky
1055 [convolutional]
1056 batch_normalize=1
1057 filters=512
1058 size=1
1059 stride=1
1060 pad=1
1061 activation=leaky
1062 [convolutional]
1063 batch_normalize=1
1064 filters=1024
1065 size=3
1066 stride=1
1067 pad=1
1068 activation=leaky
1069 [shortcut]
1070 from=-3
1071 activation=linear
1072 [convolutional]
1073 batch_normalize=1
1074 filters=512
1075 size=1
1076 stride=1
1077 pad=1
1078 activation=leaky
1079 [convolutional]
1080 batch_normalize=1
1081 filters=1024
1082 size=3
1083 stride=1
1084 pad=1
1085 activation=leaky
1086 [shortcut]
1087 from=-3
1088 activation=linear
1089 [convolutional]
1090 batch_normalize=1
1091 filters=512
1092 size=1
1093 stride=1
1094 pad=1
1095 activation=leaky
1096 [convolutional]
1097 batch_normalize=1
1098 filters=1024
1099 size=3
1100 stride=1
1101 pad=1
1102 activation=leaky
1103 [shortcut]
1104 from=-3
1105 activation=linear
1106 [convolutional]
1107 batch_normalize=1
1108 filters=512
1109 size=1
1110 stride=1
1111 pad=1
1112 activation=leaky
1113 [convolutional]
1114 batch_normalize=1
1115 filters=1024
1116 size=3
1117 stride=1
1118 pad=1
1119 activation=leaky
1120 [shortcut]
1121 from=-3
1122 activation=linear
1123 ######################
1124 [convolutional]
1125 batch_normalize=1
1126 filters=512
1127 size=1
1128 stride=1
1129 pad=1
1130 activation=leaky
1131 [convolutional]
1132 batch_normalize=1
1133 size=3
1134 stride=1
1135 pad=1
1136 filters=1024
1137 activation=leaky
1138 [convolutional]
1139 batch_normalize=1
1140 filters=512
1141 size=1
1142 stride=1
1143 pad=1
1144 activation=leaky
1145 [convolutional]
1146 batch_normalize=1
1147 size=3
1148 stride=1
1149 pad=1
1150 filters=1024
1151 activation=leaky
1152 [convolutional]
1153 batch_normalize=1
1154 filters=512
1155 size=1
1156 stride=1
1157 pad=1
1158 activation=leaky
1159 [convolutional]
1160 batch_normalize=1
1161 size=3
1162 stride=1
1163 pad=1
1164 filters=1024
1165 activation=leaky
1166 [convolutional]
1167 size=1
1168 stride=1
1169 pad=1
1170 filters=255
1171 activation=linear
1172 [yolo]
1173 mask = 6,7,8
1174 anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
1175 classes=80
1176 num=9
1177 jitter=.3
1178 ignore_thresh = .7
1179 truth_thresh = 1
1180 random=1
1181 [route]
1182 layers = -4
1183 [convolutional]
1184 batch_normalize=1
1185 filters=256
1186 size=1
1187 stride=1
1188 pad=1
1189 activation=leaky
1190 [upsample]
1191 stride=2
1192 [route]
1193 layers = -1, 61
1194 [convolutional]
1195 batch_normalize=1
1196 filters=256
1197 size=1
1198 stride=1
1199 pad=1
1200 activation=leaky
1201 [convolutional]
1202 batch_normalize=1
1203 size=3
1204 stride=1
1205 pad=1
1206 filters=512
1207 activation=leaky
1208 [convolutional]
1209 batch_normalize=1
1210 filters=256
1211 size=1
1212 stride=1
1213 pad=1
1214 activation=leaky
1215 [convolutional]
1216 batch_normalize=1
1217 size=3
1218 stride=1
1219 pad=1
1220 filters=512
1221 activation=leaky
1222 [convolutional]
1223 batch_normalize=1
1224 filters=256
1225 size=1
1226 stride=1
1227 pad=1
1228 activation=leaky
1229 [convolutional]
1230 batch_normalize=1
1231 size=3
1232 stride=1
1233 pad=1
1234 filters=512
1235 activation=leaky
1236 [convolutional]
1237 size=1
1238 stride=1
1239 pad=1
1240 filters=255
1241 activation=linear
1242 [yolo]
1243 mask = 3,4,5
1244 anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
1245 classes=80
1246 num=9
1247 jitter=.3
1248 ignore_thresh = .7
1249 truth_thresh = 1
1250 random=1
1251 [route]
1252 layers = -4
1253 [convolutional]
1254 batch_normalize=1
1255 filters=128
1256 size=1
1257 stride=1
1258 pad=1
1259 activation=leaky
1260 [upsample]
1261 stride=2
1262 [route]
1263 layers = -1, 36
1264 [convolutional]
1265 batch_normalize=1
1266 filters=128
1267 size=1
1268 stride=1
1269 pad=1
1270 activation=leaky
1271 [convolutional]
1272 batch_normalize=1
1273 size=3
1274 stride=1
1275 pad=1
1276 filters=256
1277 activation=leaky
1278 [convolutional]
1279 batch_normalize=1
1280 filters=128
1281 size=1
1282 stride=1
1283 pad=1
1284 activation=leaky
1285 [convolutional]
1286 batch_normalize=1
1287 size=3
1288 stride=1
1289 pad=1
1290 filters=256
1291 activation=leaky
1292 [convolutional]
1293 batch_normalize=1
1294 filters=128
1295 size=1
1296 stride=1
1297 pad=1
1298 activation=leaky
1299 [convolutional]
1300 batch_normalize=1
1301 size=3
1302 stride=1
1303 pad=1
1304 filters=256
1305 activation=leaky
1306 [convolutional]
1307 size=1
1308 stride=1
1309 pad=1
1310 filters=255
1311 activation=linear
1312 [yolo]
1313 mask = 0,1,2
1314 anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
1315 classes=80
1316 num=9
1317 jitter=.3
1318 ignore_thresh = .7
1319 truth_thresh = 1
1320 random=1./darknet detector demo cfg/coco.data cfg/yolov3.cfg yolov3.weights -c 1
1321 ./darknet detector demo cfg/coco.data cfg/yolov3.cfg yolov3.weights -c 1
1322 ./darknet detector demo cfg/coco.data cfg/yolov3.cfg yolov3.weights -c 1
1323 cd ..
1324 ./darknet detector demo cfg/coco.data cfg/yolov3.cfg yolov3.weights -c 1
1325 make
1326 make clean
1327 vi Makefile
1328 make
1329 ./darknet detector demo cfg/coco.data cfg/yolov3.cfg yolov3.weights -c 1
1330 vi Makefile
1331 cd cfg/
1332 ls
1333 cd ..
1334 ls
1335 ./darknet detector demo cfg/coco.data cfg/yolov3.cfg yolov3.weights -c 1
1336 cd ls
1337 ls
1338 cd cfg/
1339 ls
1340 vi yolov3.cfg
1341 cd ..
1342 make clean && makr
1343 make clean && make
1344 ./darknet detector demo cfg/coco.data cfg/yolov3.cfg yolov3.weights -c 1
1345 cat /etc/nv_tegra_release
1346 sha1sum -c /etc/nv_tegra_release
1347 vi yolov3.cfg
1348 vi Makefile
1349 make clean && make
1350 vi Makefile
1351 make clean && make
1352 vi Makefile
1353 make clean && make
1354 vi Makefile
1355 make clean && make
1356 vi Makefile
1357 make clean && make
1358 vi Makefile
1359 make clean && make
1360 ./darknet detector demo cfg/coco.data cfg/yolov3.cfg yolov3.weights -c 1
1361 ls
1362 cd data/
1363 ls
1364 ./darknet detector test cfg/voc.data cfg/yolo-voc.cfg yolo-voc.weights data/dog.jpg
1365 cd ..
1366 cd yolov3/
1367 ./darknet detector test cfg/volo cfg/yolo-voc.cfg yolo-voc.weights data/dog.jp
1368 ./darknet detector demo cfg/coco.data cfg/yolov3.cfg yolov3.weights data/dog.jpg
1369 history
1370 history >> ~/Desktop/his1.py
1371 cd ..
1372 vi his1.py
1373 sudo shutdown -h now
1374 sudo ./jetson_clocks.sh
1375 ll
1376 ./tegrastats
1377 cd ~/Desktop/ls
1378 cd ~/Desktop/
1379 ls
1380 cd OpenCV_Jetson_nano/
1381 l
1382 cd darknet/
1383 l
1384 make
1385 wget https://pjreddie.com/media/files/yolov3.weights
1386 ./darknet detect cfg/yolov3.cfg yolov3.weights data/dog.jpg
1387 ls
1388 ./darknet detect cfg/yolov3.cfg yolov3.weights -c 0
1389 wget https://pjreddie.com/media/files/yolov3-tiny.weights
1390 ./darknet detect cfg/yolov3-tiny.cfg yolov3-tiny.weights data/dog.jpg
1391 ./darknet detector demo cfg/coco.data cfg/yolov3.cfg yolov3.weights
1392 ./darknet detector demo cfg/coco.data cfg/yolov3.cfg yolov3.weights -c 1
1393 make
1394 vi Makefile
1395 make
1396 ./darknet detector demo cfg/coco.data cfg/yolov3.cfg yolov3.weights -c 1
1397 vi Makefile
1398 ls
1399 history
1400 ;s
1401 ls
1402 cd ..
1403 ls
1404 vi Makefile
1405 ls
1406 vi darknet_fp16_yolov3_320.sh
1407 vi INSTALL.sh
1408 vi darknet
1409 cd darknet/
1410 l
1411 vi build.sh
1412 cd vb
1413 cd b
1414 cd build/
1415 l
1416 cd darknet/
1417 l
1418 cd x64/
1419 l
1420 cd ..
1421 cd ../..
1422 ./darknet detector demo cfg/coco.data cfg/yolov3.cfg yolov3.weights -c 1
1423 ls
1424 ./darknet detector demo cfg/coco.data cfg/yolov3-tiny.cfg yolov3-tiny.weights -c 1
1425 sudo shutdown -h now
1426 l
1427 shitory
1428 history
1429 history >> ./Desktop/hisjetson01.py
1430 sudo apt-add-repository ppa:sylvain-pineau/kazam
1431 sudo apt-get update
1432 sudo apt-get install kazam
1433 kazam
1434 cd ~/Desktop/
1435 ls
1436 cd OpenCV_Jetson_nano/
1437 ls
1438 ll
1439 v412-ct1-V
1440 v412-ct1 -V
1441 cd ~/dE
1442 cd ~/Desktop/
1443 vi w12d04_gym01.py
1444 sudo apt-get install libav-tools
1445 history
1446 sudo -H pip install keras==1.0.2
1447 sudo -H pup install gym==0.9.1
1448 sudo -H pip install gym==0.9.1
1449 cd ~/Desktop/
1450 ks
1451 ls
1452 vi w12d04_gym01.py
1453 python w12d04_gym01.py
1454 vi w12d04_gym01.py
1455 python3 w12d04_gym01.py
1456 python2 w12d04_gym01.py
1457 python w12d04_gym01.py
1458 python2 w12d04_gym01.py
1459 python2
1460 python w12d04_gym01.py
1461 python w12d04_gym01.py
1462 cat /etc/nv_tegra_release
1463 head -n 1 /etc/nv_tegra_release
1464 cd ~/Desktop/
1465 ls
1466 git clone https://github.com/jetsonhacks/installROSTX1
1467 cd installROSTX1/
1468 l
1469 ./installROS.sh
1470 sudo ./installROS.sh
1471 vi ./installROS.sh
1472 c ..
1473 cd ..
1474 vi bong1.sh
1475 chmod u+x bong1.sh
1476 ./bong1.sh
1477 vi bong1.sh
1478 cd ~/catkin_ws/]
1479 ls
1480 vi bong1.sh
1481 ls
1482 vi darknet_fp16_yolov3_320.sh
1483 ./darknet_fp16_yolov3_320.sh
1484 vi darknet_fp16_yolov3_320.sh
1485 cd ~/
1486 mkdir catkin_ws
1487 cd catkin_ws/
1488 mkdir src
1489 ls
1490 cd src/
1491 ls
1492 $ cd ~/catkin_ws/src
1493 $ git clone https://github.com/dusty-nv/ros_deep_learning
1494 $ cd ../
1495 git clone https://github.com/dusty-nv/ros_deep_learning
1496 ls
1497 cd ..
1498 catkin_make
1499 vi ~/.bashrc
1500 cd ~/catkin_ws/
1501 catkin_make
1502 sudo apt-get update
1503 sudo apt-get install libglew-dev glew-utils libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev libglib2.0-dev
1504 sudo apt-get install cmake
1505 cd catkin_ws/
1506 catkin_make
1507 cd src/
1508 ls
1509 cd ros_deep_learning/
1510 ls
1511 cd ..
1512 git clonehttps://github.com/dusty-nv/jetson-utils
1513 git clone https://github.com/dusty-nv/jetson-utils
1514 cd ..
1515 catkin_make
1516 cd src/
1517 ls
1518 cd ros_deep_learning/
1519 ls
1520 vi CMakeLists.txt
1521 cd ../..
1522 ls
1523 catkin_init_workspace
1524 ls
1525 catkin_make
1526 cd ~/Desktop/
1527 ls
1528 git clone https://github.com/jetsonhacks
1529 git clone https://github.com/jetsonhacks/installROSTX1
1530 ls
1531 cd installROSTX1/
1532 ls
1533 ./setupCatkinWorkspace.sh
1534 cd ~/catkin_ws/
1535 ls
1536 cd src/
1537 l
1538 rm -rf jetson-utils ros_deep_learning/
1539 ls
1540 catkin_init_workspace
1541 cd ..
1542 catkin_make
1543 grep -q -F ' ROS_MASTER_URI' ~/.bashrc || echo 'export ROS_MASTER_URI=http://localhost:11311' | tee -a ~/.bashrc
1544 grep -q -F ' ROS_IP' ~/.bashrc || echo "export ROS_IP=$(hostname -I)" | tee -a ~/.bashrc
1545 echo "export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH" >> ~/.bashrc
1546 l
1547 cd ~/catkin_ws/
1548 ls
1549 source /opt/ros/kinetic/setup.bash
1550 catkin_make
1551 sudo apt-get install git cmake
1552 cd ~
1553 git clone https://github.com/dusty-nv/jetson-inference
1554 cd jetson-inference
1555 git submodule update --init
1556 mkdir build
1557 cd build
1558 cmake ../
1559 make
1560 sudo make install
1561 cd ~/
1562 cd ~/Desktop/
1563 ./installROS.sh -p ros-kinetic-desktop -p ros-kinetic-rgbd-launch
1564 cd installROSTX1/
1565 ls
1566 ./installROS.sh -p ros-kinetic-desktop -p ros-kinetic-rgbd-launch
1567 cd ~/Desktop/
1568 cd ~/catkin_ws/
1569 ls
1570 cd ..
1571 rm -rf catkin_ws
1572 ls
1573 cd ~/Desktop/installROSTX1/
1574 ./setupCatkinWorkspace.sh catkin_ws
1575 cd ~/catkin_ws/src/
1576 ls
1577 cd ~/Desktop/
1578 sudo apt-get install ros-kinetic-image-transport
1579 sudo apt-get install ros-kinetic-image-publisher
1580 sudo apt-get install ros-kinetic-vision-msgs
1581 cd ~/catkin_ws/src/
1582 ls
1583 git clone https://github.com/dusty-nv/ros_deep_learning
1584 cd ../
1585 ls
1586 catkin_make
1587 roscore
1588 sudo roscore
1589 cd ..
1590 wget https://raw.githubusercontent.com/ROBOTIS-GIT/robotis_tools/master/install_ros_kinetic.sh
1591 ls
1592 mv install_ros_kinetic.sh ~/Desktop/
1593 cd ~/Desktop/
1594 ls
1595 vi install_ros_kinetic.sh
1596 chmod 755 ./install_ros_kinetic.sh
1597 sudo bash ./install_ros_kinetic.sh
1598 cd ~/catkin_qws/
1599 ls
1600 cd src/
1601 ls
1602 roscore
1603 vi ~/install_ros_kinetic.sh
1604 cd ~/Desktop/
1605 ls
1606 vi install
1607 vi install_ros_kinetic.sh
1608 cd ~/catkin_qws/
1609 cd src/
1610 ls
1611 git clone https://github.com/dusty-nv/ros_deep_learning
1612 sudo git clone https://github.com/dusty-nv/ros_deep_learning
1613 ls
1614 cd ..
1615 alias
1616 sb
1617 vi ~/.bashrc
1618 cd ~/catkin_qws/
1619 catkin_make
1620 sudo catkin_make
1621 cd ../catkin_ws
1622 ls
1623 cd ..
1624 rm -rf catkin_ws
1625 rm -rf catkin_qws
1626 sudo rm -rf catkin_qws
1627 ls
1628 mkdir ~/catkin_ws
1629 cd ~/catkin_ws/
1630 mkdir src
1631 cd src/
1632 cd ..
1633 cd src/
1634 catkin_init_workspace
1635 cd ..
1636 catkin_make
1637 cd src/
1638 git clone https://github.com/dusty-nv/ros_deep_learning
1639 ~/Desktop/
1640 cd ~/Desktop/
1641 ls
1642 vi install_ros_kinetic.sh
1643 cd ~/catkin_ws/
1644 ls
1645 cd src/
1646 ;s
1647 ;
1648 l
1649 cd ~/catkin_ws/
1650 catkin_make
1651 catkin_make -j4
1652 catkin_make -l4
1653 catkin clean
1654 catkin_clean
1655 ls
1656 cd build/
1657 ls
1658 cd ..
1659 ls
1660 cd src
1661 ls
1662 cd ..
1663 cd devel/
1664 ls
1665 cd ..
1666 ls
1667 cd devel/
1668 ls
1669 vi ~/.bashrc
1670 cd ~/catkin_ws/
1671 ls
1672 cd src/
1673 catkin_make
1674 cd ..
1675 catkin_make
1676 df h
1677 df -h
1678 roscore
1679 pip install jupyterlab
1680 sudo pip install jupyterlab
1681 jupyter
1682 jupyter notebook
1683 cd ~/Desktop/
1684 cd ../Downloads/
1685 ls
1686 vscode
1687 python3 -m pip install --upgrade pip
1688 sudo pip install --upgrade pip
1689 pip3 install jupyterlab
1690 sudo apt-get install jupyterlab
1691 pip install pandas
1692 python3
1693 apt-get install python3-pip -y
1694 sudo apt-get install python3-pip -y
1695 pip3 install --upgrade pip && python3 -m pip install ipykernel && python3 -m ipykernel install --user
1696 jupyter notebook
1697 python
1698 df -f
1699 df -h
1700 chromium-browser
1701 sudo shutdown -h now
1702 sudo apt-transport-http libxkbfile-dev libsecret-1-dev rpm
1703 curl -sL https://deb.nodesource.com/setup_8.x | sudo -E bash -
1704 sudo apt-get install -y nodejs
1705 sudo apt-get update
1706 sudo apt-get install yarn
1707 cd ~/Desktop/
1708 git clone --recursive https://github.com/Microsoft/vscode.git
1709 cd vscode/
1710 ls
1711 gedit packages.json
1712 ls
1713 vi package.json
1714 cd test/
1715 ls
1716 cd smoke/
1717 vi package.json
1718 cd ../..
1719 yarn --arch=arm64
1720 yarn --arch=arm64 run watch
1721 yarn add gulp-bom --no-save
1722 sudo apt-get install fakeroot rpm
1723 sudo apt-get install libsecret-1-dev.
1724 sudo yum install libX11-devel.x86_64 libxkbfile-devel.x86_64 # or .i686.
1725 sudo yum install libX11-devel.x86_64 libxkbfile-devel.x86_64 # or .i686
1726 sudo apt-get install libx11-dev libxkbfile-dev
1727 sudo apt-get install libsecret-1-dev
1728 yarn
1729 yarn cache clean
1730 ~/.node-gyp
1731 cd ~/.node-gyp
1732 ls
1733 cd ..
1734 rm -rf ~/.node-gyp
1735 cd ~/Desktop/vscode/
1736 ls
1737 yarn
1738 history >> ~/Desktop/his08.py
| [
"pre3ice@gmail.com"
] | pre3ice@gmail.com |
f1a1c35dd7cedb8f635a204f50da3d647b87f485 | 62570b5e3df9a8ebdd0887da97503e6807ab122c | /tests/test_dsrmath.py | 77d71df2042a9e6a383acab7b4150d25b3b69ccd | [
"MIT"
] | permissive | dkratzert/ShelXFile | a4e8152e5bad5aa92eec2520bfbb493166d1d8d3 | 15378d9b8cd44d1212a3c5783f21d10e2df81981 | refs/heads/master | 2023-07-06T10:27:56.830249 | 2023-06-12T19:46:05 | 2023-06-12T19:46:05 | 135,573,780 | 11 | 0 | MIT | 2022-10-02T13:17:39 | 2018-05-31T11:18:41 | Python | UTF-8 | Python | false | false | 13,143 | py | from unittest import TestCase
from shelxfile.misc.dsrmath import vol_unitcell, distance, dice_coefficient, levenshtein, dice_coefficient2, \
SymmetryElement, Matrix, Array, mean, median, std_dev, nalimov_test, id_generator, atomic_distance, \
almost_equal
from shelxfile.misc.misc import flatten, frac_to_cart, cart_to_frac, determinante, subtract_vect
from shelxfile.shelx.shelx import Shelxfile
class Testdsrmath(TestCase):
def test_vol_unitcell(self):
volume = vol_unitcell(2, 2, 2, 90, 90, 90)
self.assertEqual(8.0, volume)
def test_distance_1(self):
d = distance(1, 1, 1, 2, 2, 2, 4)
self.assertEqual(1.7321, d)
def test_distance_2(self):
d = distance(1, 0, 0, 2, 0, 0, 4)
self.assertEqual(1.0, d)
def test_levenshtein(self):
l = levenshtein('hallo', 'holla')
self.assertEqual(2, l)
def test_dice(self):
d = dice_coefficient('hallo', 'holla')
self.assertEqual(0.25, d)
def test_dice2(self):
self.assertEqual(0.75, dice_coefficient2('hallo', 'holla'))
def test_dice3(self):
self.assertEqual(0.6, dice_coefficient2('Banze', 'Benzene'))
def test_dice4(self):
self.assertEqual(0.333333, dice_coefficient2('halo', 'Haaallo'))
def test_dice5(self):
self.assertEqual(0.2, dice_coefficient2('hallo', 'Haaallo'))
def test_dice6(self):
self.assertEqual(0.0, dice_coefficient2('hallo', 'Hallo'))
def test_dice_7(self):
self.assertEqual(1.0, dice_coefficient2('aaa', 'BBBBB'))
def test_dice_8(self):
self.assertEqual(1.0, dice_coefficient2('', ''))
def test_subtract_vect(self):
self.assertEqual((-2, 0, 1), subtract_vect([1, 2, 3], [3, 2, 2]))
class TestSymmetryElement(TestCase):
def setUp(self) -> None:
self.shx = Shelxfile(debug=True)
self.shx.read_file('tests/resources/p21c.res')
def test_to_shelxl(self):
self.assertEqual('[+X, +Y, +Z, -X, -Y, -Z, -X, 0.5+Y, 0.5-Z, +X, -0.5-Y, -0.5+Z]',
self.shx.symmcards._symmcards.__repr__())
def test_repr(self):
self.assertEqual(SymmetryElement(['-X', '-Y', '-Z']), self.shx.symmcards[1])
def test_string(self):
self.assertEqual("|-1 0 0| | 0.0|\n"
"| 0 1 0| + | 0.5|\n"
"| 0 0 -1| | 0.5|\n", self.shx.symmcards[2].__str__())
def test_equals_false(self):
self.assertEqual(False, self.shx.symmcards[0] == self.shx.symmcards[1])
def test_equals_True(self):
self.assertEqual(True, self.shx.symmcards[1] == self.shx.symmcards[1])
def test_s12_equals(self):
s1 = SymmetryElement(['0.5', '0.5', '0.5'])
s2 = SymmetryElement(['0.5', '0.5', '0.5'])
self.assertEqual(True, s1 == s2)
def test_s12_equals2(self):
s1 = SymmetryElement(['1.5', '1.5', '1.5'])
s2 = SymmetryElement(['0.5', '0.5', '0.5'])
self.assertEqual(True, s1 == s2)
def test_s34_not_equals(self):
s3 = SymmetryElement(['1', '0.5', '0.5'])
s4 = SymmetryElement(['0.5', '0.5', '0.5'])
self.assertEqual(False, s3 == s4)
class TestMatrix(TestCase):
def test_det(self):
m1 = Matrix([[2, 0, 0], [0, 2, 0], [0, 0, 2]])
self.assertEqual(8, m1.det)
def test_norm(self):
m = Matrix([[-4, -3, -2],[-1, 0, 1],[ 2, 3, 4]])
self.assertEqual(7.745966692414834, m.frobenius_norm())
self.assertEqual(7.745966692414834, m.norm)
def test_zero(self):
self.assertEqual("| 0.0000 0.0000 0.0000|\n"
"| 0.0000 0.0000 0.0000|\n"
"| 0.0000 0.0000 0.0000|\n"
"| 0.0000 0.0000 0.0000|\n"
"| 0.0000 0.0000 0.0000|\n", Matrix.zero(5, 3).__repr__())
def test_equal(self):
m1 = Matrix([(1., 2., 3.), (1., 2., 3.), (1., 2., 3.)])
m2 = Matrix([(1, 2, 3), (1, 2, 3), (1, 2, 3)])
self.assertEqual(True, m1 == m2)
def test_equal2(self):
m1 = Matrix([(1, 2, 3), (1, 2, 3), (1, 2, 3)])
m2 = Matrix([(1, 2, 3), (3, 2, 3), (1, 2, 3)])
self.assertEqual(False, m1 == m2)
def test_subtract_matrix_from_matrix(self):
self.assertEqual([[2, 0, -2], [2, 0, -2], [2, 0, 0]],
Matrix([[3, 2, 1], [3, 2, 1], [3, 2, 3]]) - Matrix([[1, 2, 3], [1, 2, 3], [1, 2, 3]]))
def test_transposed(self):
m = Matrix([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
self.assertEqual([(1, 1, 1), (2, 2, 2), (3, 3, 3)], m.transposed.values)
def test_transpose_alt(self):
m = Matrix([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
self.assertEqual([[1, 1, 1], [2, 2, 2], [3, 3, 3]], m.transposed_alt.values)
def test_cholesky(self):
m = Matrix([[25, 15, -5], [15, 18, 0], [-5, 0, 11]])
self.assertEqual(
"| 5.0000 0.0000 0.0000|\n"
"| 3.0000 3.0000 0.0000|\n"
"|-1.0000 1.0000 3.0000|\n", m.cholesky().__repr__())
def test_inversed(self):
self.assertEqual("|-0.8125 0.1250 0.1875|\n"
"| 0.1250 -0.2500 0.1250|\n"
"| 0.5208 0.1250 -0.1458|\n",
Matrix([[1, 2, 3], [4, 1, 6], [7, 8, 9]]).inversed.__repr__())
def test_foo(self):
m = Matrix([[1, 2, 300], [4.1, 4.2, 4.3], [5, 6, 7]])
x = m + m
self.assertEqual("| 2.0000 4.0000 600.0000|\n"
"| 8.2000 8.4000 8.6000|\n"
"|10.0000 12.0000 14.0000|\n", x.__repr__())
m *= 3
self.assertEqual("| 3.0000 6.0000 900.0000|\n"
"|12.3000 12.6000 12.9000|\n"
"|15.0000 18.0000 21.0000|\n", m.__repr__())
def test_getitem(self):
m = Matrix([[2., 2., 3.], [1., 2.2, 3.], [1., 2., 3.]])
self.assertEqual(2.2, m[1, 1])
self.assertEqual(2.2, m[1][1])
def test_matrix_add_matrix(self):
m1 = Matrix([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
m2 = Matrix([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
t1 = m1 + m2
self.assertEqual(
"| 2.0000 2.0000 2.0000|\n"
"| 2.0000 2.0000 2.0000|\n"
"| 2.0000 2.0000 2.0000|\n", t1.__repr__())
t2 = m1 + 0.5
self.assertEqual(
"| 1.5000 1.5000 1.5000|\n"
"| 1.5000 1.5000 1.5000|\n"
"| 1.5000 1.5000 1.5000|\n", t2.__repr__())
def test_matrix_multiply1(self):
m = Matrix([[1, 1, 1], [1, 1, 1], [1, 1, 1]]) * 2
self.assertEqual("| 2.0000 2.0000 2.0000|\n"
"| 2.0000 2.0000 2.0000|\n"
"| 2.0000 2.0000 2.0000|\n", m.__repr__())
def test_matrix_multiply2(self):
m = Matrix([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
m2 = Matrix([[2, 2, 2], [0.5, 0.5, 0.5], [2, 2, 1]])
x = m * m2
self.assertEqual("| 6.0000 1.5000 5.0000|\n"
"| 6.0000 1.5000 5.0000|\n"
"| 6.0000 1.5000 5.0000|\n", x.__repr__())
self.assertEqual("| 1.0000 1.0000 1.0000|\n"
"| 1.0000 1.0000 1.0000|\n"
"| 1.0000 1.0000 1.0000|\n", m.__repr__())
def test_matrix_multiply_array(self):
m = Matrix([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
self.assertEqual(Array([6, 6, 6]), m * Array([2, 2, 2]))
def test_matrix_multiply4(self):
m = Matrix([(0, 1, 0), (-1, -1, 0), (0, 0, 1)])
self.assertEqual(Array([-0.666667, -0.333334, 0.45191]), Array([0.333333, 0.666667, 0.45191]) * m)
class TestArray(TestCase):
def test_add_array_to_array_inplace(self):
a = Array([1, 2, 3, 4.1])
a += a
self.assertEqual(Array([2, 4, 6, 8.2]), a)
def test_add_constant_to_array(self):
a = Array([1, 2, 3, 4.1])
self.assertEqual(Array([5, 6, 7, 8.1]), a + 4)
def test_add_array_to_array(self):
a = Array([1, 2, 3, 4.1])
self.assertEqual(Array([2, 4, 6, 8.2]), a + a)
def test_getitem(self):
a = Array([2, 4, 6, 8.2])
self.assertEqual(4, a[1])
def test_get_item(self):
self.assertEqual(2, Array([1, 2, 3])[1])
def test_multiply_constant_inplace(self):
a = Array([1, 2, 3, 4.1])
a *= 3
self.assertEqual(Array([3, 6, 9, 12.299999999999999]), a)
def test_dot_multiply(self):
a = Array([1, 2, 3, 4.1])
self.assertEqual(30.81, a.dot(a))
def test_multiply_array_with_array(self):
a = Array([1, 2, 3, 4.1])
self.assertEqual(30.81, a * a)
def test_norm(self):
a = Array([1, 2, 3, 4])
self.assertEqual(30, a.norm())
def test_normalized(self):
a = Array([2, 2, 1])
self.assertEqual(3.0, a.normalized())
def test_zero(self):
self.assertEqual(Array([0.0, 0.0, 0.0, 0.0, 0.0]), Array.zero(5))
def test_floor(self):
self.assertEqual(Array([3, 2, 0]), Array([3.634, 2, 0.345]).floor)
def test_cross(self):
a = Array([1, 2, 3])
b = Array([-7, 8, 9])
self.assertEqual(Array([-6, -30, 22]), a.cross(b))
def test_angle(self):
a = Array([1, 0, 1])
b = Array([1, 0, 0])
self.assertEqual(45.0, a.angle(b))
def test_angle_2(self):
va = Array([0.03562, 0.14298, 0.24008]) - Array([0.04402, 0.16614, 0.22275])
vb = Array([0.07078, 0.17382, 0.22106]) - Array([0.04402, 0.16614, 0.22275])
self.assertEqual(120.9401, round(va.angle(vb), 4))
def test_add(self):
a = Array([1, 2, 3])
b = Array([1, 1, 1])
self.assertEqual(Array([2, 3, 4]), a + b)
def test_setitem(self):
a = Array([0, 0, 0])
a[1] = 5
self.assertEqual(Array([0, 5, 0]), a)
def test_multiply(self):
a1 = Array([1, 2, 3])
a2 = Array([1, 2, 3])
self.assertEqual(14, a1 * a2)
def test_subtract(self):
a = Array([1, 2, 3])
b = Array([1, 1, 1])
self.assertEqual(Array([0, 1, 2]), a - b)
self.assertEqual(Array([0, -1, -2]), b - a)
def test_equality(self):
a1 = Array([1, 2, 3, 4])
a2 = Array([1, 2, 3.0, 4.0])
self.assertEqual(True, a1 == a2)
def test_equality_false(self):
a1 = Array([1, 2, 3, 4])
a2 = Array([2, 2, 3.0, 4.0])
self.assertEqual(False, a1 == a2)
class TestMisc(TestCase):
def test_mean(self):
self.assertEqual(2.5, mean([1, 2, 3, 4, 1, 2, 3, 4]))
def test_median1(self):
self.assertEqual(2, median([2]))
def test_median2(self):
self.assertEqual(2.5, median([1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]))
def test_median3(self):
self.assertEqual(3, median([1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4.1, 1000000]))
def test_median4(self):
with self.assertRaises(ValueError):
median([])
def test_std_dev1(self):
l1 = [1.334, 1.322, 1.345, 1.451, 1.000, 1.434, 1.321, 1.322]
self.assertEqual(0.13797871, round(std_dev(l1), 8))
def test_std_dev2(self):
l2 = [1.234, 1.222, 1.345, 1.451, 2.500, 1.234, 1.321, 1.222]
self.assertEqual(0.43536797, round(std_dev(l2), 8))
def test_std_dev3(self):
l1 = [1.334, 1.322, 1.345, 1.451, 1.000, 1.434, 1.321, 1.322]
self.assertEqual(1.328, median(l1))
self.assertEqual(1.316125, mean(l1))
def test_nalimov_test(self):
data = [1.120, 1.234, 1.224, 1.469, 1.145, 1.222, 1.123, 1.223, 1.2654, 1.221, 1.215]
self.assertEqual([3], nalimov_test(data))
def test_flatten_list(self):
self.assertEqual(['wer', 234, 'brdt5', 'dfg', 21, 34, 5, 'fhg', 4],
flatten([['wer', 234, 'brdt5'], ['dfg'], [[21, 34, 5], ['fhg', 4]]]))
def test_id_generator(self):
self.assertEqual('a', id_generator(1, 'a'))
def test_atomic_distance(self):
cell = [10.5086, 20.9035, 20.5072, 90, 94.13, 90]
coord1 = [-0.186843, 0.282708, 0.526803]
coord2 = [-0.155278, 0.264593, 0.600644]
self.assertEqual(1.5729229943265979, atomic_distance(coord1, coord2, cell))
def test_determinante(self):
m1 = [[2, 0, 0], [0, 2, 0], [0, 0, 2]]
self.assertEqual(8, determinante(m1))
def test_almost_equal(self):
self.assertEqual(True, almost_equal(1.0001, 1.0005))
self.assertEqual(False, almost_equal(1.1, 1.0005))
self.assertEqual(False, almost_equal(2, 1))
def test_fractional_to_cartesian(self):
cell = [10.5086, 20.9035, 20.5072, 90, 94.13, 90]
coord1 = [-0.186843, 0.282708, 0.526803]
self.assertEqual([-2.741505423999065, 5.909586678000002, 10.775200700893734], frac_to_cart(coord1, cell))
def test_cart_to_frac(self):
cell = [10.5086, 20.9035, 20.5072, 90, 94.13, 90]
coords = [-2.74150542399906, 5.909586678, 10.7752007008937]
self.assertEqual((-0.1868429999999998, 0.28270799999999996, 0.5268029999999984), cart_to_frac(coords, cell))
| [
"dkratzert@gmx.de"
] | dkratzert@gmx.de |
4e794f3df16cf82062bff11428a00381326cf9ea | 910c97ce255f39af7ef949664b4346e8cb5d6a0e | /monitorexecutor/dynamic/.svn/text-base/stat_service.py.svn-base | 9d791b8effad2f1cea315dda0c8df5b10a3f9731 | [] | no_license | sun3shines/manager_monitor | f3742a4fde95b456f51d0a18feb78f3d4048c560 | f49d741203d8476f2249a49d90fecc86143ac622 | refs/heads/master | 2021-01-17T06:47:14.375088 | 2016-04-29T06:43:05 | 2016-04-29T06:43:05 | 57,361,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,935 | # -*- coding: utf-8 -*-
import psutil
import datetime
import time
from monitorexecutor.globalx import PSUTIL_SERVICE_INTERVAL,SERVICE_CMDLINE
from monitorexecutor.global_cache import MONITOR_SERVICE_PROCESS
def service_iter():
while True:
for service_name,pc in SERVICE_CMDLINE.items():
service_init_data = {'name':service_name,
'cmdline':' '.join(list(pc)),
'active_status':'0/0',
'open_files':'0',
'net_connections':'0',
'thread_num':'0',
'cpu_utilization':'0.0',
'mem_utilization':'0.0',
'available':'disable',
'timestamp':str(datetime.datetime.now())}
psutil_pids = MONITOR_SERVICE_PROCESS.get(service_name)
if not psutil_pids:
# 未初始化时为None
psutil_pids = []
cmdline = ''
total = len(psutil_pids)
actives = 0
open_files = net_connections = thread_num = 0
cpu_utilization = mem_utilization = 0.0
for pid in psutil_pids:
try:
p = psutil.Process(pid)
service_init_data.update({'available':'enable'})
cmdline = ' '.join(p.cmdline)
if p.status not in [psutil.STATUS_ZOMBIE,psutil.STATUS_DEAD]:
actives = actives + 1
open_files = open_files + len(p.get_open_files())
net_connections = net_connections + len(p.get_connections())
thread_num = thread_num + p.get_num_threads()
cpu_utilization = cpu_utilization + p.get_cpu_percent()
mem_utilization = mem_utilization + p.get_memory_percent()
except:
continue
service_init_data.update({'cmdline':cmdline,
'active_status':'/'.join([str(actives),str(total)]),
'open_files':str(open_files),
'net_connections':str(net_connections),
'thread_num':str(thread_num),
'cpu_utilization':str(cpu_utilization),
'mem_utilization':str(mem_utilization)})
yield service_init_data
time.sleep(PSUTIL_SERVICE_INTERVAL)
def get_psutil_service(hostUuid):
for service_init_data in service_iter():
yield {'hostUuid':hostUuid,
'class':'statService',
'attr':service_init_data}
| [
"sun__shines@163.com"
] | sun__shines@163.com | |
29d8581ae8a6ebd3407f19999ceb8421bd8494e4 | 8fa938eddcc75eb7dff1f2055c49cb3817a00c63 | /Dictionary/ex34.py | 938679c79c5c36bd41a8b9548ad1ba4cd058d0b2 | [] | no_license | jayhebe/w3resource_exercises | f27109759d112b0611574aa70eb378ace447c2a0 | b29aa7c806f6021a8988e83bb9f674522a41380d | refs/heads/master | 2020-05-07T09:23:24.039271 | 2020-01-30T15:05:06 | 2020-01-30T15:05:06 | 180,374,062 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | d = {'Alex': ['subj1', 'subj2', 'subj3'], 'David': ['subj1', 'subj2']}
ctr = sum(map(len, d.values()))
print(ctr)
| [
"jayhebe1983@sina.com"
] | jayhebe1983@sina.com |
39c6f807a95a6b5a77ae04b4d7f3efa4c8f2f1bf | 3dc3bbe607ab7b583eb52dbaae86636eb642960a | /mmaction/models/localizers/utils/proposal_utils.py | 7b51921684f442df8c0b28a186bc581b2f4d9e3e | [
"Apache-2.0"
] | permissive | open-mmlab/mmaction2 | 659c36c6083fd3d9d072e074a8d4b3a50342b9bd | 582b78fd6c3240500d5cacd292339d7d1ddbb056 | refs/heads/main | 2023-08-28T18:14:50.423980 | 2023-08-10T09:20:06 | 2023-08-10T09:20:06 | 278,810,244 | 3,498 | 1,028 | Apache-2.0 | 2023-09-07T06:50:44 | 2020-07-11T07:19:10 | Python | UTF-8 | Python | false | false | 5,256 | py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
def temporal_iou(proposal_min, proposal_max, gt_min, gt_max):
"""Compute IoU score between a groundtruth bbox and the proposals.
Args:
proposal_min (list[float]): List of temporal anchor min.
proposal_max (list[float]): List of temporal anchor max.
gt_min (float): Groundtruth temporal box min.
gt_max (float): Groundtruth temporal box max.
Returns:
list[float]: List of iou scores.
"""
len_anchors = proposal_max - proposal_min
int_tmin = np.maximum(proposal_min, gt_min)
int_tmax = np.minimum(proposal_max, gt_max)
inter_len = np.maximum(int_tmax - int_tmin, 0.)
union_len = len_anchors - inter_len + gt_max - gt_min
jaccard = np.divide(inter_len, union_len)
return jaccard
def temporal_iop(proposal_min, proposal_max, gt_min, gt_max):
"""Compute IoP score between a groundtruth bbox and the proposals.
Compute the IoP which is defined as the overlap ratio with
groundtruth proportional to the duration of this proposal.
Args:
proposal_min (list[float]): List of temporal anchor min.
proposal_max (list[float]): List of temporal anchor max.
gt_min (float): Groundtruth temporal box min.
gt_max (float): Groundtruth temporal box max.
Returns:
list[float]: List of intersection over anchor scores.
"""
len_anchors = np.array(proposal_max - proposal_min)
int_tmin = np.maximum(proposal_min, gt_min)
int_tmax = np.minimum(proposal_max, gt_max)
inter_len = np.maximum(int_tmax - int_tmin, 0.)
scores = np.divide(inter_len, len_anchors)
return scores
def soft_nms(proposals, alpha, low_threshold, high_threshold, top_k):
"""Soft NMS for temporal proposals.
Args:
proposals (np.ndarray): Proposals generated by network.
alpha (float): Alpha value of Gaussian decaying function.
low_threshold (float): Low threshold for soft nms.
high_threshold (float): High threshold for soft nms.
top_k (int): Top k values to be considered.
Returns:
np.ndarray: The updated proposals.
"""
proposals = proposals[proposals[:, -1].argsort()[::-1]]
tstart = list(proposals[:, 0])
tend = list(proposals[:, 1])
tscore = list(proposals[:, -1])
rstart = []
rend = []
rscore = []
while len(tscore) > 0 and len(rscore) <= top_k:
max_index = np.argmax(tscore)
max_width = tend[max_index] - tstart[max_index]
iou_list = temporal_iou(tstart[max_index], tend[max_index],
np.array(tstart), np.array(tend))
iou_exp_list = np.exp(-np.square(iou_list) / alpha)
for idx, _ in enumerate(tscore):
if idx != max_index:
current_iou = iou_list[idx]
if current_iou > low_threshold + (high_threshold -
low_threshold) * max_width:
tscore[idx] = tscore[idx] * iou_exp_list[idx]
rstart.append(tstart[max_index])
rend.append(tend[max_index])
rscore.append(tscore[max_index])
tstart.pop(max_index)
tend.pop(max_index)
tscore.pop(max_index)
rstart = np.array(rstart).reshape(-1, 1)
rend = np.array(rend).reshape(-1, 1)
rscore = np.array(rscore).reshape(-1, 1)
new_proposals = np.concatenate((rstart, rend, rscore), axis=1)
return new_proposals
def post_processing(result, video_info, soft_nms_alpha, soft_nms_low_threshold,
soft_nms_high_threshold, post_process_top_k,
feature_extraction_interval):
"""Post process for temporal proposals generation.
Args:
result (np.ndarray): Proposals generated by network.
video_info (dict): Meta data of video. Required keys are
'duration_frame', 'duration_second'.
soft_nms_alpha (float): Alpha value of Gaussian decaying function.
soft_nms_low_threshold (float): Low threshold for soft nms.
soft_nms_high_threshold (float): High threshold for soft nms.
post_process_top_k (int): Top k values to be considered.
feature_extraction_interval (int): Interval used in feature extraction.
Returns:
list[dict]: The updated proposals, e.g.
[{'score': 0.9, 'segment': [0, 1]},
{'score': 0.8, 'segment': [0, 2]},
...].
"""
if len(result) > 1:
result = soft_nms(result, soft_nms_alpha, soft_nms_low_threshold,
soft_nms_high_threshold, post_process_top_k)
result = result[result[:, -1].argsort()[::-1]]
video_duration = float(
video_info['duration_frame'] // feature_extraction_interval *
feature_extraction_interval
) / video_info['duration_frame'] * video_info['duration_second']
proposal_list = []
for j in range(min(post_process_top_k, len(result))):
proposal = {}
proposal['score'] = float(result[j, -1])
proposal['segment'] = [
max(0, result[j, 0]) * video_duration,
min(1, result[j, 1]) * video_duration
]
proposal_list.append(proposal)
return proposal_list
| [
"noreply@github.com"
] | open-mmlab.noreply@github.com |
ece13f701ebfc064b3485e187b768aecae541472 | 3c6aeb458a8bec0671c1d8be18331072ac97e05f | /ohsn/stream/streaming_depressioin.py | 26a5afca34cd4c345d4b01333b7f0cef762f79ff | [] | no_license | wtgme/ohsn | d7b17ad179a789be2325e0923026a681e343a40c | 9c165d45eefa4058e7ed2c6bad348703e296362d | refs/heads/master | 2021-08-29T06:01:20.165839 | 2021-08-12T08:51:46 | 2021-08-12T08:51:46 | 44,922,360 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,601 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 03 03:43:09 2015
@author: wt
crawl stream with keyword-filtering
Keywords are in keywords.txt
https://dev.twitter.com/streaming/reference/post/statuses/filter
The track, follow, and locations fields should be considered to be combined with an OR operator.
track=foo&follow=1234 returns Tweets matching “foo” OR created by user 1234.
The United Kingdom lies between latitudes 49° to 61° N, and longitudes 9° W to 2° E.
Filter tweets with location, but few tweets have location information
Identify the location of users that post the crawled tweets, only store the users in UK
"""
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))
from twython import TwythonStreamer
import urllib
import imghdr
import os
import ConfigParser
import datetime
import logging
from ohsn.util import db_util as dbutil
config = ConfigParser.ConfigParser()
config.read(os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)), 'conf', 'TwitterAPI.cfg'))
# spin up twitter api
APP_KEY = config.get('credentials1', 'app_key')
APP_SECRET = config.get('credentials1', 'app_secret')
OAUTH_TOKEN = config.get('credentials1', 'oath_token')
OAUTH_TOKEN_SECRET = config.get('credentials1', 'oath_token_secret')
print('loaded configuation')
# spin up database
DBNAME = 'depression'
COLLECTION = 'stream'
db = dbutil.db_connect_no_auth(DBNAME)
tweets = db[COLLECTION]
# location_name = ['uk', 'u.k.', 'united kingdom', 'britain', 'england']
print("twitter connection and database connection configured")
logging.basicConfig(filename='streaming-warnings.log', level=logging.DEBUG)
class MyStreamer(TwythonStreamer):
def on_success(self, data):
if 'warning' in data:
logging.warning(data['warning']['code'] + "\t" + data['warning']['message'] + "\t percent_full=" + data['warning']['percent_full'] +"\n")
if 'text' in data:
store_tweet(data)
# print data['user']['screen_name'].encode('utf-8') + "\t" + data['text'].encode('utf-8').replace('\n', ' ')
def on_error(self, status_code, data):
print status_code
logging.error(data['warning']['code'] + "\t" + data['warning']['message'] + "\t percent_full=" + data['warning']['percent_full'] +"\n")
# Want to stop trying to get data because of the error?
# Uncomment the next line!
# self.disconnect()
def get_pictures(tweet):
# Get pictures in the tweets store as date-tweet-id-username.ext
try:
for item in tweet['entities']['media']:
print item['media_url_https']
if item['type']=='photo':
# print "PHOTO!!!"
urllib.urlretrieve(item['media_url_https'], 'api-timelines-scraper-media/' + item['id_str'])
# code to get the extension....
ext = imghdr.what('api-timelines-scraper-media/' + item['id_str'])
os.rename('api-timelines-scraper-media/' + item['id_str'], 'api-timelines-scraper-media/' + item['id_str'] + "." + ext)
except:
pass
def store_tweet(tweet, collection=tweets, pictures=False):
"""
Simple wrapper to facilitate persisting tweets. Right now, the only
pre-processing accomplished is coercing date values to datetime.
"""
# print tweet
tweet['created_at'] = datetime.datetime.strptime(tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y')
collection.insert(tweet)
# global location_name
# user = tweet.get('user', None)
# if user:
# location = user['location']
# if location:
# location = location.lower()
# if any(x in location for x in location_name):
# print location
# tweet['created_at'] = datetime.datetime.strptime(tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y')
# tweet['user']['created_at'] = datetime.datetime.strptime(tweet['user']['created_at'], '%a %b %d %H:%M:%S +0000 %Y')
# # get pictures in tweet...
# if pictures:
# get_pictures(tweet)
#
# #print "TODO: alter the schema of the tweet to match the edge network spec from the network miner..."
# #print "TODO: make the tweet id a unique index to avoid duplicates... db.collection.createIndex( { a: 1 }, { unique: true } )"
# collection.insert(tweet)
while True:
try:
stream = MyStreamer(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
# https://dev.twitter.com/streaming/overview/request-parameters
# stream.statuses.filter(language=['en'], track=['bulimic, anorexic, ednos, ed-nos, bulimia, anorexia, eating disorder, eating-disorder, eating disordered, eating-disordered, CW, UGW, GW2, GW1, GW'])
# track_list = []
# with open('keyword.txt', 'r') as fo:
# for line in fo.readlines():
# track_list.append(line.strip())
# Depression: http://psychcentral.com/lib/types-and-symptoms-of-depression/
# stream.statuses.filter(language=['en'], track=[
# 'dysthymia', 'dysthymic', 'bipolar', 'peripartum', 'postpartum', 'melancholic',
# 'atypical', 'catatonic'])
stream.statuses.filter(language=['en'], track=['#MyDepressionLooksLike'])
except Exception as detail:
print str(detail)
| [
"wtgmme@gmail.com"
] | wtgmme@gmail.com |
4ec74b74a15727d510b711fb2f2377004c678a3a | 029aa4fa6217dbb239037dec8f2e64f5b94795d0 | /Python算法指南/232_不同的路径II_上题的再应用.py | 38db0f1e03c0d402ee10b759d9d46ce78a45bde4 | [] | no_license | tonyyo/algorithm | 5a3f0bd4395a75703f9ee84b01e42a74283a5de9 | 60dd5281e7ce4dfb603b795aa194a67ff867caf6 | refs/heads/master | 2022-12-14T16:04:46.723771 | 2020-09-23T06:59:33 | 2020-09-23T06:59:33 | 270,216,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,001 | py | class Solution:
def uniquePathsWithObstacles(self, obstacleGrid):
m, n = len(obstacleGrid), len(obstacleGrid[0])
mp = [[0] * n for _ in range(m)]
mp[0][0] = 1
for i in range(m):
for j in range(n):
if obstacleGrid[i][j] == 1: # 遇到障碍物,路径清0
mp[i][j] = 0
elif i == 0 and j == 0:
mp[i][j] = 1
elif i == 0:
mp[i][j] = mp[i][j - 1]
elif j == 0:
mp[i][j] = mp[i - 1][j]
else:
mp[i][j] = mp[i - 1][j] + mp[i][j - 1]
return mp[m - 1][n - 1]
if __name__ == '__main__':
obstacleGrid = [
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
]
print("初始网格:")
for i in range(0, len(obstacleGrid)):
print(obstacleGrid[i])
solution = Solution()
print("路径条数:", solution.uniquePathsWithObstacles(obstacleGrid)) | [
"1325338208@qq.com"
] | 1325338208@qq.com |
de99595ec3ae77bc6d35d3e293f5235910a4d554 | c325db01e798fc1d985c4e40c42a4422cd59fd2a | /python/tf/pz_test.py | 31c60bb236378d8a2a617d5ed7ccb1b19af38e94 | [
"Apache-2.0"
] | permissive | google/carfac | 5078c910994dfddb8b4e068a42fab567551a6c55 | 75970ea10092e7fa32fb7d1a236cecb6dcfa796e | refs/heads/master | 2023-09-06T00:00:09.749292 | 2023-07-21T11:21:04 | 2023-07-21T11:21:48 | 11,507,786 | 99 | 39 | Apache-2.0 | 2023-04-17T09:49:31 | 2013-07-18T16:28:12 | Jupyter Notebook | UTF-8 | Python | false | false | 5,900 | py | #!/usr/bin/env python
# Copyright 2021 The CARFAC Authors. All Rights Reserved.
#
# This file is part of an implementation of Lyon's cochlear model:
# "Cascade of Asymmetric Resonators with Fast-Acting Compression"
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for carfac.python.tf.pz."""
from typing import Callable
import unittest
from absl import app
import numpy as np
import tensorflow as tf
from . import pz
class CoeffTest(unittest.TestCase):
def testCoeffs(self):
# We have a filter H, with poles P and zeros Q:
# H = g * np.prod(Q - z) / np.prod(P - z)
# Assuming Q = [1, 2, 3, 4, 5]:
# H = g * (1 - z) * (2 - z) * (3 - z) * (4 - z) * (5 - z) / np.prod(P - z)
# = Y / X
# Y = X * g * (1 - z) * (2 - z) * (3 - z) * (4 - z) * (5 - z) /
# np.prod(P - z)
# Y = X * g * (z^-1 - 1) * (2 * z^-1 - 1) * (3 * z^-1 - 1) * (4 * z^-1 - 1)
# * (5 * z^-1 - 1) / (np.prod(P - z) * z^-5)
# Y * np.prod(P - z) * z^-5 = X * (z^-1 - 1) * (2 * z^-1 - 1) *
# (3 * z^-1 - 1) * (4 * z^-1 - 1) * (5 * z^-1 - 1)
# Y * np.prod(P - z) * z^-5 = X * (-1 + 15 * z^-1 - 85 * z^-2 + 225 * z^-3
# - 274 * z^-4 + 120 * z^-5)
# Where (-1 + 15 * z^-1 - 85 * z^-2 + 225 * z^-3 - 274 * z^-4 + 120 * z^-5)
# = -(qc0 + qc1 * z^-1 + qc2 * z^-2 + qc3 * z^-3 + qc4 * z^-4 + qc5 *
# z^-5)
# And coeffs_from_zeros returns [qc0, qc1, qc2, qc3, qc4, qc5] =>
# [1, -15, 85, -225, 274, -120]
inputs: tf.Tensor = tf.constant([1, 2, 3, 4, 5], dtype=tf.complex128)
outputs: tf.Tensor = pz.coeffs_from_zeros(inputs)
expected_outputs = [1, -15, 85, -225, 274, -120]
np.testing.assert_array_almost_equal(outputs, expected_outputs)
class PZTest(unittest.TestCase):
def assert_impulse_response(self,
filt: Callable[[tf.Tensor],
tf.Tensor],
dtype: tf.DType,
gain: tf.Tensor,
poles: tf.Tensor,
zeros: tf.Tensor):
window_size = 64
impulse: np.ndarray = np.zeros([window_size], dtype=np.float32)
impulse[0] = 1
impulse_spectrum: np.ndarray = np.fft.fft(impulse)
z: np.ndarray = np.exp(np.linspace(0,
2 * np.pi,
window_size,
endpoint=False) * 1j)
transfer_function: np.ndarray = (
tf.cast(gain, tf.complex128) *
np.prod(zeros[None, :] - z[:, None],
axis=1) /
np.prod(poles[None, :] - z[:, None],
axis=1))
expected_impulse_response: np.ndarray = np.fft.ifft(
impulse_spectrum * transfer_function)
# Since the filter requires batch and cell i/o dimensions.
impulse_response = filt(tf.cast(impulse[None, :, None], dtype))[0, :, 0]
np.testing.assert_array_almost_equal(impulse_response,
expected_impulse_response)
def testPZCell(self):
for dtype in [tf.float32, tf.float64]:
poles: np.ndarray = 0.5 * np.exp([np.pi * 0.5j])
poles: tf.Tensor = tf.concat([poles, tf.math.conj(poles)], axis=0)
zeros: np.ndarray = 0.75 * np.exp([np.pi * 0.25j])
zeros: tf.Tensor = tf.concat([zeros, tf.math.conj(zeros)], axis=0)
gain: tf.Tensor = tf.constant(1.5)
pz_cell = pz.PZCell(gain,
poles,
zeros,
dtype=dtype)
pz_layer = tf.keras.layers.RNN(pz_cell,
return_sequences=True,
dtype=dtype)
self.assert_impulse_response(pz_layer, dtype, gain, poles, zeros)
def testTFFunction(self):
for dtype in [tf.float32, tf.float64]:
poles: np.ndarray = 0.1 * np.exp(np.pi * np.array([0.7j]))
poles: tf.Tensor = tf.concat([poles, tf.math.conj(poles)], axis=0)
zeros: np.ndarray = 0.75 * np.exp(np.pi * np.array([0.25j]))
zeros: tf.Tensor = tf.concat([zeros, tf.math.conj(zeros)], axis=0)
gain: tf.Tensor = tf.constant(2.4)
pz_cell = pz.PZCell(gain,
poles,
zeros,
dtype=dtype)
pz_layer = tf.keras.layers.RNN(pz_cell,
return_sequences=True,
dtype=dtype)
@tf.function
def compute(inputs):
# pylint: disable=cell-var-from-loop
return pz_layer(inputs)
self.assert_impulse_response(compute, dtype, gain, poles, zeros)
def testGradients(self):
tape = tf.GradientTape(persistent=True)
pz_cell = pz.PZCell(1,
0.5 * np.exp([np.pi * 0.2j, np.pi * 0.5j]),
0.3 * np.exp([np.pi * 0.6j]))
with tape:
current: tf.Tensor = tf.ones([2, 1], dtype=pz_cell.dtype)
state = tuple(tf.zeros(shape=[current.shape[0], size],
dtype=pz_cell.dtype)
for size in pz_cell.state_size)
for _ in range(6):
current, state = pz_cell.call(current, state)
for v in [pz_cell.poles, pz_cell.zeros, pz_cell.gain]:
self.assertTrue(np.isfinite(tape.gradient(current, v)).all())
def main(_):
unittest.main()
if __name__ == '__main__':
app.run(main)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
d397b095f585d87c25f16b3b261c5ded67721af1 | e756e1c11109a23846f97dc54d7d405fcbe79fa9 | /multiworld/envs/mujoco/__init__.py | b541191bb841cfab07147899f3f45c4eec1317ef | [
"MIT"
] | permissive | dennisl88/multiworld | 1fdf606bea11c3101d2b45500d78a54481ca6535 | fdb1e8e40b502f90cc377a3a4c1877e6865f935f | refs/heads/master | 2020-03-27T09:38:00.744244 | 2018-08-28T02:02:28 | 2018-08-28T02:02:28 | 146,358,446 | 0 | 0 | null | 2018-08-27T21:53:13 | 2018-08-27T21:53:13 | null | UTF-8 | Python | false | false | 11,055 | py | import gym
from gym.envs.registration import register
import logging
LOGGER = logging.getLogger(__name__)
_REGISTERED = False
def register_custom_envs():
global _REGISTERED
if _REGISTERED:
return
_REGISTERED = True
LOGGER.info("Registering multiworld mujoco gym environments")
"""
Reaching tasks
"""
register(
id='SawyerReachXYEnv-v0',
entry_point='multiworld.envs.mujoco.sawyer_xyz.sawyer_reach:SawyerReachXYEnv',
tags={
'git-commit-hash': 'c5e15f7',
'author': 'vitchyr'
},
kwargs={
'hide_goal_markers': False,
},
)
register(
id='Image48SawyerReachXYEnv-v0',
entry_point=create_image_48_sawyer_reach_xy_env_v0,
tags={
'git-commit-hash': 'c5e15f7',
'author': 'vitchyr'
},
)
register(
id='Image84SawyerReachXYEnv-v0',
entry_point=create_image_84_sawyer_reach_xy_env_v0,
tags={
'git-commit-hash': 'c5e15f7',
'author': 'vitchyr'
},
)
"""
Pushing tasks, XY, With Reset
"""
register(
id='SawyerPushAndReacherXYEnv-v0',
entry_point='multiworld.envs.mujoco.sawyer_xyz.sawyer_push_and_reach_env:SawyerPushAndReachXYEnv',
tags={
'git-commit-hash': '3503e9f',
'author': 'vitchyr'
},
kwargs=dict(
hide_goal_markers=True,
action_scale=.02,
puck_low=[-0.25, .4],
puck_high=[0.25, .8],
mocap_low=[-0.2, 0.45, 0.],
mocap_high=[0.2, 0.75, 0.5],
goal_low=[-0.2, 0.45, 0.02, -0.25, 0.4],
goal_high=[0.2, 0.75, 0.02, 0.25, 0.8],
)
)
register(
id='Image48SawyerPushAndReacherXYEnv-v0',
entry_point=create_Image48SawyerPushAndReacherXYEnv_v0,
tags={
'git-commit-hash': '3503e9f',
'author': 'vitchyr'
},
)
register(
id='SawyerPushAndReachXYEasyEnv-v0',
entry_point='multiworld.envs.mujoco.sawyer_xyz.sawyer_push_and_reach_env:SawyerPushAndReachXYEnv',
tags={
'git-commit-hash': 'fec148f',
'author': 'vitchyr'
},
kwargs=dict(
reward_type='puck_distance',
reset_free=False,
hand_low=(-0.28, 0.3, 0.05),
hand_high=(0.28, 0.9, 0.3),
puck_low=(-.4, .2),
puck_high=(.4, 1),
goal_low=(-0.05, 0.4, 0.02, -.1, .5),
goal_high=(0.05, 0.7, 0.02, .1, .7),
)
)
register(
id='Image48SawyerPushAndReachXYEasyEnv-v0',
entry_point=create_image_48_sawyer_reach_and_reach_xy_easy_env_v0,
tags={
'git-commit-hash': 'fec148f',
'author': 'vitchyr'
},
)
"""
Pushing tasks, XY, Reset Free
"""
register(
id='SawyerPushAndReacherXYEnv-ResetFree-v0',
entry_point='multiworld.envs.mujoco.sawyer_xyz.sawyer_push_and_reach_env:SawyerPushAndReachXYEnv',
tags={
'git-commit-hash': '3d4adbe',
'author': 'vitchyr'
},
kwargs=dict(
reward_type='puck_distance',
reset_free=True,
hand_low=(-0.28, 0.3, 0.05),
hand_high=(0.28, 0.9, 0.3),
puck_low=(-.4, .2),
puck_high=(.4, 1),
goal_low=(-0.25, 0.3, 0.02, -.2, .4),
goal_high=(0.25, 0.875, 0.02, .2, .8),
)
)
register(
id='SawyerPushXYEnv-ResetFree-v0',
entry_point='multiworld.envs.mujoco.sawyer_xyz.sawyer_push_and_reach_env:SawyerPushAndReachXYEnv',
tags={
'git-commit-hash': '33c6b71',
'author': 'vitchyr'
},
kwargs=dict(
reward_type='puck_distance',
reset_free=True,
hand_low=(-0.28, 0.3, 0.05),
hand_high=(0.28, 0.9, 0.3),
puck_low=(-.4, .2),
puck_high=(.4, 1),
goal_low=(-0.25, 0.3, 0.02, -.2, .4),
goal_high=(0.25, 0.875, 0.02, .2, .8),
)
)
register(
id='SawyerPushAndReachXYEnv-ResetFree-v0',
entry_point='multiworld.envs.mujoco.sawyer_xyz.sawyer_push_and_reach_env:SawyerPushAndReachXYEnv',
tags={
'git-commit-hash': '33c6b71',
'author': 'vitchyr'
},
kwargs=dict(
reward_type='state_distance',
reset_free=True,
hand_low=(-0.28, 0.3, 0.05),
hand_high=(0.28, 0.9, 0.3),
puck_low=(-.4, .2),
puck_high=(.4, 1),
goal_low=(-0.25, 0.3, 0.02, -.2, .4),
goal_high=(0.25, 0.875, 0.02, .2, .8),
num_resets_before_puck_reset=1,
)
)
register(
id='SawyerPushAndReachXYEnv-ResetFree-Every1B-v0',
entry_point='multiworld.envs.mujoco.sawyer_xyz.sawyer_push_and_reach_env:SawyerPushAndReachXYEnv',
tags={
'git-commit-hash': '33c6b71',
'author': 'vitchyr'
},
kwargs=dict(
reward_type='state_distance',
reset_free=True,
hand_low=(-0.28, 0.3, 0.05),
hand_high=(0.28, 0.9, 0.3),
puck_low=(-.4, .2),
puck_high=(.4, 1),
goal_low=(-0.25, 0.3, 0.02, -.2, .4),
goal_high=(0.25, 0.875, 0.02, .2, .8),
num_resets_before_puck_reset=int(1e9),
)
)
register(
id='SawyerPushAndReachXYEnv-ResetFree-Every2-v0',
entry_point='multiworld.envs.mujoco.sawyer_xyz.sawyer_push_and_reach_env:SawyerPushAndReachXYEnv',
tags={
'git-commit-hash': '33c6b71',
'author': 'vitchyr'
},
kwargs=dict(
reward_type='state_distance',
reset_free=True,
hand_low=(-0.28, 0.3, 0.05),
hand_high=(0.28, 0.9, 0.3),
puck_low=(-.4, .2),
puck_high=(.4, 1),
goal_low=(-0.25, 0.3, 0.02, -.2, .4),
goal_high=(0.25, 0.875, 0.02, .2, .8),
num_resets_before_puck_reset=2,
)
)
register(
id='SawyerPushAndReachXYEnv-ResetFree-Every3-v0',
entry_point='multiworld.envs.mujoco.sawyer_xyz.sawyer_push_and_reach_env:SawyerPushAndReachXYEnv',
tags={
'git-commit-hash': '33c6b71',
'author': 'vitchyr'
},
kwargs=dict(
reward_type='state_distance',
reset_free=True,
hand_low=(-0.28, 0.3, 0.05),
hand_high=(0.28, 0.9, 0.3),
puck_low=(-.4, .2),
puck_high=(.4, 1),
goal_low=(-0.25, 0.3, 0.02, -.2, .4),
goal_high=(0.25, 0.875, 0.02, .2, .8),
num_resets_before_puck_reset=3,
)
)
"""
Push XYZ
"""
register(
id='SawyerPushXyzEasyEnv-v0',
entry_point='multiworld.envs.mujoco.sawyer_xyz'
'.sawyer_push_and_reach_env:SawyerPushAndReachXYZEnv',
tags={
'git-commit-hash': 'f7d1e91',
'author': 'vitchyr'
},
kwargs=dict(
reward_type='puck_distance',
reset_free=False,
hand_low=(-0.28, 0.3, 0.05),
hand_high=(0.28, 0.9, 0.3),
puck_low=(-.4, .2),
puck_high=(.4, 1),
goal_low=(-0.05, 0.4, 0.02, -.1, .5),
goal_high=(0.05, 0.7, 0.02, .1, .7),
)
)
register(
id='SawyerPushAndReachXyzEasyEnv-v0',
entry_point='multiworld.envs.mujoco.sawyer_xyz'
'.sawyer_push_and_reach_env:SawyerPushAndReachXYZEnv',
tags={
'git-commit-hash': 'f7d1e91',
'author': 'vitchyr'
},
kwargs=dict(
reward_type='state_distance',
reset_free=False,
hand_low=(-0.28, 0.3, 0.05),
hand_high=(0.28, 0.9, 0.3),
puck_low=(-.4, .2),
puck_high=(.4, 1),
goal_low=(-0.05, 0.4, 0.02, -.1, .5),
goal_high=(0.05, 0.7, 0.02, .1, .7),
)
)
register(
id='SawyerPushXyzFullArenaEnv-v0',
entry_point='multiworld.envs.mujoco.sawyer_xyz'
'.sawyer_push_and_reach_env:SawyerPushAndReachXYZEnv',
tags={
'git-commit-hash': 'f7d1e91',
'author': 'vitchyr'
},
kwargs=dict(
reward_type='puck_distance',
reset_free=False,
hand_low=(-0.28, 0.3, 0.05),
hand_high=(0.28, 0.9, 0.3),
puck_low=(-.4, .2),
puck_high=(.4, 1),
goal_low=(-0.25, 0.3, 0.02, -.2, .4),
goal_high=(0.25, 0.875, 0.02, .2, .8),
)
)
register(
id='SawyerPushAndReachXyzFullArenaEnv-v0',
entry_point='multiworld.envs.mujoco.sawyer_xyz'
'.sawyer_push_and_reach_env:SawyerPushAndReachXYZEnv',
tags={
'git-commit-hash': 'f7d1e91',
'author': 'vitchyr'
},
kwargs=dict(
reward_type='state_distance',
reset_free=False,
hand_low=(-0.28, 0.3, 0.05),
hand_high=(0.28, 0.9, 0.3),
puck_low=(-.4, .2),
puck_high=(.4, 1),
goal_low=(-0.25, 0.3, 0.02, -.2, .4),
goal_high=(0.25, 0.875, 0.02, .2, .8),
)
)
def create_image_48_sawyer_reach_xy_env_v0():
from multiworld.core.image_env import ImageEnv
from multiworld.envs.mujoco.cameras import sawyer_xyz_reacher_camera
wrapped_env = gym.make('SawyerReachXYEnv-v0')
return ImageEnv(
wrapped_env,
48,
init_camera=sawyer_xyz_reacher_camera,
transpose=True,
normalize=True,
)
def create_image_84_sawyer_reach_xy_env_v0():
from multiworld.core.image_env import ImageEnv
from multiworld.envs.mujoco.cameras import sawyer_xyz_reacher_camera
wrapped_env = gym.make('SawyerReachXYEnv-v0')
return ImageEnv(
wrapped_env,
84,
init_camera=sawyer_xyz_reacher_camera,
transpose=True,
normalize=True,
)
def create_image_48_sawyer_reach_and_reach_xy_easy_env_v0():
from multiworld.core.image_env import ImageEnv
from multiworld.envs.mujoco.cameras import sawyer_pusher_camera_upright_v2
wrapped_env = gym.make('SawyerPushAndReachXYEasyEnv-v0')
return ImageEnv(
wrapped_env,
48,
init_camera=sawyer_pusher_camera_upright_v2,
transpose=True,
normalize=True,
)
def create_Image48SawyerPushAndReacherXYEnv_v0():
from multiworld.core.image_env import ImageEnv
from multiworld.envs.mujoco.cameras import sawyer_pusher_camera_top_down
wrapped_env = gym.make('SawyerPushAndReacherXYEnv-v0')
return ImageEnv(
wrapped_env,
48,
init_camera=sawyer_pusher_camera_top_down,
transpose=True,
normalize=True,
)
register_custom_envs()
| [
"vitchyr@gmail.com"
] | vitchyr@gmail.com |
24cbc1db1201293c58a19ff262bad4e6793375b8 | 2a97a5816f79282878855c7355f7400a36ac1839 | /UTKFace/UTKFace_128x128/CcGAN-improved/models/ResNet_regre_eval.py | 8efb719cfe621bc6eeb99a39d9c5f8bc9683c091 | [] | no_license | simonlevine/improved_CcGAN | 309040cb7ec74b5ef68c3b31f6a32e715df3029e | 3f2660c4a466240b7b3896e8e2ce7aaad759862a | refs/heads/master | 2023-08-13T02:18:55.327856 | 2021-09-24T07:56:48 | 2021-09-24T07:56:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,190 | py | '''
codes are based on
@article{
zhang2018mixup,
title={mixup: Beyond Empirical Risk Minimization},
author={Hongyi Zhang, Moustapha Cisse, Yann N. Dauphin, David Lopez-Paz},
journal={International Conference on Learning Representations},
year={2018},
url={https://openreview.net/forum?id=r1Ddp1-Rb},
}
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
NC = 3
IMG_SIZE = 128
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet_regre_eval(nn.Module):
def __init__(self, block, num_blocks, nc=NC, ngpu = 1, feature_layer='f3'):
super(ResNet_regre_eval, self).__init__()
self.in_planes = 64
self.ngpu = ngpu
self.feature_layer=feature_layer
self.block1 = nn.Sequential(
nn.Conv2d(nc, 64, kernel_size=3, stride=1, padding=1, bias=False), # h=h
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2,2), #h=h/2 64
self._make_layer(block, 64, num_blocks[0], stride=2), # h=h/2 32
)
self.block2 = self._make_layer(block, 128, num_blocks[1], stride=2) # h=h/2 16
self.block3 = self._make_layer(block, 256, num_blocks[2], stride=2) # h=h/2 8
self.block4 = self._make_layer(block, 512, num_blocks[3], stride=2) # h=h/2 4
self.pool1 = nn.AvgPool2d(kernel_size=4)
if self.feature_layer == 'f2':
self.pool2 = nn.AdaptiveAvgPool2d((2,2))
elif self.feature_layer == 'f3':
self.pool2 = nn.AdaptiveAvgPool2d((2,2))
else:
self.pool2 = nn.AdaptiveAvgPool2d((1,1))
linear_layers = [
nn.Linear(512*block.expansion, 128),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Linear(128, 128),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Linear(128, 1),
# nn.Sigmoid()
nn.ReLU(),
]
self.linear = nn.Sequential(*linear_layers)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
if x.is_cuda and self.ngpu > 1:
ft1 = nn.parallel.data_parallel(self.block1, x, range(self.ngpu))
ft2 = nn.parallel.data_parallel(self.block2, ft1, range(self.ngpu))
ft3 = nn.parallel.data_parallel(self.block3, ft2, range(self.ngpu))
ft4 = nn.parallel.data_parallel(self.block4, ft3, range(self.ngpu))
out = nn.parallel.data_parallel(self.pool1, ft4, range(self.ngpu))
out = out.view(out.size(0), -1)
out = nn.parallel.data_parallel(self.linear, out, range(self.ngpu))
else:
ft1 = self.block1(x)
ft2 = self.block2(ft1)
ft3 = self.block3(ft2)
ft4 = self.block4(ft3)
out = self.pool1(ft4)
out = out.view(out.size(0), -1)
out = self.linear(out)
if self.feature_layer == 'f2':
ext_features = self.pool2(ft2)
elif self.feature_layer == 'f3':
ext_features = self.pool2(ft3)
else:
ext_features = self.pool2(ft4)
ext_features = ext_features.view(ext_features.size(0), -1)
return out, ext_features
def ResNet18_regre_eval(ngpu = 1):
return ResNet_regre_eval(BasicBlock, [2,2,2,2], ngpu = ngpu)
def ResNet34_regre_eval(ngpu = 1):
return ResNet_regre_eval(BasicBlock, [3,4,6,3], ngpu = ngpu)
def ResNet50_regre_eval(ngpu = 1):
return ResNet_regre_eval(Bottleneck, [3,4,6,3], ngpu = ngpu)
def ResNet101_regre_eval(ngpu = 1):
return ResNet_regre_eval(Bottleneck, [3,4,23,3], ngpu = ngpu)
def ResNet152_regre_eval(ngpu = 1):
return ResNet_regre_eval(Bottleneck, [3,8,36,3], ngpu = ngpu)
if __name__ == "__main__":
net = ResNet34_regre_eval(ngpu = 1).cuda()
x = torch.randn(4,NC,IMG_SIZE,IMG_SIZE).cuda()
out, features = net(x)
print(out.size())
print(features.size())
| [
"dingx92@gmail.com"
] | dingx92@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.