blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
57f7e7a98efc2ba19453ca164616ca915aa1d1b1 | f66a33f8cdd8286320da730be67c89ee00d83d8d | /ext/libelf/SConscript | 535e216ddf1152e87d570cb4615bde289b7d97d3 | [
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] | permissive | H2020-COSSIM/cgem5 | 0d5812632757e6146f7852c9bf4abe4e9628296a | 1222cc0c5618875e048f288e998187c236508a64 | refs/heads/main | 2023-05-13T14:08:01.665322 | 2023-05-08T08:39:50 | 2023-05-08T08:39:50 | 468,039,890 | 3 | 2 | BSD-3-Clause | 2022-10-12T14:29:33 | 2022-03-09T18:05:40 | C++ | UTF-8 | Python | false | false | 5,084 | # -*- mode:python -*-
# Copyright (c) 2004-2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import os, subprocess
Import('env')
elf_files = []
def ElfFile(filename):
elf_files.append(File(filename))
ElfFile('elf.c')
ElfFile('elf_begin.c')
ElfFile('elf_cntl.c')
ElfFile('elf_data.c')
ElfFile('elf_end.c')
ElfFile('elf_errmsg.c')
ElfFile('elf_errno.c')
ElfFile('elf_fill.c')
ElfFile('elf_flag.c')
ElfFile('elf_getarhdr.c')
ElfFile('elf_getarsym.c')
ElfFile('elf_getbase.c')
ElfFile('elf_getident.c')
ElfFile('elf_hash.c')
ElfFile('elf_kind.c')
ElfFile('elf_memory.c')
ElfFile('elf_next.c')
ElfFile('elf_open.c')
ElfFile('elf_phnum.c')
ElfFile('elf_rand.c')
ElfFile('elf_rawfile.c')
ElfFile('elf_scn.c')
ElfFile('elf_shnum.c')
ElfFile('elf_shstrndx.c')
ElfFile('elf_strptr.c')
ElfFile('elf_update.c')
ElfFile('elf_version.c')
ElfFile('gelf_cap.c')
ElfFile('gelf_checksum.c')
ElfFile('gelf_dyn.c')
ElfFile('gelf_ehdr.c')
ElfFile('gelf_fsize.c')
ElfFile('gelf_getclass.c')
ElfFile('gelf_move.c')
ElfFile('gelf_phdr.c')
ElfFile('gelf_rel.c')
ElfFile('gelf_rela.c')
ElfFile('gelf_shdr.c')
ElfFile('gelf_sym.c')
ElfFile('gelf_syminfo.c')
ElfFile('gelf_symshndx.c')
ElfFile('gelf_xlate.c')
ElfFile('libelf.c')
ElfFile('libelf_align.c')
ElfFile('libelf_allocate.c')
ElfFile('libelf_ar.c')
ElfFile('libelf_ar_util.c')
ElfFile('libelf_checksum.c')
ElfFile('libelf_data.c')
ElfFile('libelf_ehdr.c')
ElfFile('libelf_extended.c')
ElfFile('libelf_memory.c')
ElfFile('libelf_open.c')
ElfFile('libelf_phdr.c')
ElfFile('libelf_shdr.c')
ElfFile('libelf_xlate.c')
ElfFile('libelf_convert.c')
ElfFile('libelf_fsize.c')
ElfFile('libelf_msize.c')
m4env = env.Clone()
if m4env['GCC']:
m4env.Append(CCFLAGS=['-Wno-pointer-sign',
'-Wno-unused-but-set-variable',
'-Wno-implicit-function-declaration',
'-Wno-override-init'])
if m4env['CLANG']:
m4env.Append(CCFLAGS=['-Wno-initializer-overrides', '-Wno-pointer-sign'])
# clang defaults to c99 (while gcc defaults to gnu89) and there is a
# difference in the handling of inlining functions which causes
# linking problems with multiple definitions of the symbols in
# sysmacros.h for older versions of glibc
m4env.Append(CCFLAGS=['-std=gnu89'])
m4env.Append(CCFLAGS=['-Wno-implicit', '-Wno-undef'])
del m4env['CPPPATH']
# If we have gm4 use it
if m4env.Detect('gm4'):
m4env['M4'] = 'gm4'
# Check that m4 is available
import SCons.Tool.m4
if not SCons.Tool.m4.exists(m4env):
print("Error: Can't find version of M4 macro processor. " +
"Please install M4 and try again.")
Exit(1)
# Setup m4 tool
m4env.Tool('m4')
m4env.Append(M4FLAGS=['-DSRCDIR=%s' % Dir('.').path])
m4env['M4COM'] = '$M4 $M4FLAGS $SOURCES > $TARGET'
m4env.M4(target=File('libelf_convert.c'),
source=[File('elf_types.m4'), File('libelf_convert.m4')])
m4env.M4(target=File('libelf_fsize.c'),
source=[File('elf_types.m4'), File('libelf_fsize.m4')])
m4env.M4(target=File('libelf_msize.c'),
source=[File('elf_types.m4'), File('libelf_msize.m4')])
m4env.Append(CPPPATH=Dir('.'))
# Build libelf as a static library with PIC code so it can be linked
# into either m5 or the library
m4env.Library('elf', [m4env.SharedObject(f) for f in elf_files])
# Generate the native-elf-format header file based on the build system
m4env.Command(File('native-elf-format.h'), File('native-elf-format'),
'${SOURCE} > ${TARGET}')
env.Prepend(CPPPATH=Dir('.'))
env.Append(LIBS=[File('libelf.a')])
env.Prepend(LIBPATH=[Dir('.')])
| [
"ntampouratzis@isc.tuc.gr"
] | ntampouratzis@isc.tuc.gr | |
f091fda178c17b28a06ec0aab0bf657492ab6016 | 0556754cd4765d05a1d831c48933c5f299bb095d | /Dec-18-2020/ThreadByExtending.py | 12369dbbe3bf5dd33bbdee68914a580058d7eefa | [] | no_license | rohitbhatghare/python | 4fa5e5883743023ced841892a13a9798b7686f39 | 248d265e02ecbc1270a87081af26537eb401e535 | refs/heads/main | 2023-02-03T04:32:15.716805 | 2020-12-21T11:33:27 | 2020-12-21T11:33:27 | 302,831,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | from threading import *
class Mythread(Thread):
def run(self):
for i in range(10):
print("child class-1")
t = Mythread()
t.start()
for i in range(10):
print("main thread-1")
| [
"noreply@github.com"
] | rohitbhatghare.noreply@github.com |
2e78bcd54c647bec3744feb1502e4fef91bc5733 | 84de9423c003e22631a549dd767f7f88006f73d5 | /tests/tools/profile/runtest.py | 3c664934c55eae96aa200b0add25463fb790bb5a | [
"Apache-2.0"
] | permissive | Go0zx/NyuziProcessor | 7c23cdad06cff0e0d6f77264e54b1fa826231e91 | 35264d91dafbf0455e551e3e1f3cd1a0f429c991 | refs/heads/master | 2020-08-07T20:51:04.594123 | 2019-10-05T13:07:29 | 2019-10-05T15:27:20 | 213,583,543 | 1 | 0 | Apache-2.0 | 2019-10-08T08:04:20 | 2019-10-08T08:04:19 | null | UTF-8 | Python | false | false | 2,386 | py | #!/usr/bin/env python3
#
# Copyright 2017 Jeff Bush
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test profiling capabilities of hardware simulator."""
import os
import subprocess
import sys
sys.path.insert(0, '../..')
import test_harness
@test_harness.test(['verilator'])
def profile(*unused):
hexfile = test_harness.build_program(['test_program.c'])
elffile = test_harness.get_elf_file_for_hex(hexfile)
profile_file = os.path.join(test_harness.WORK_DIR, 'profile.out')
test_harness.run_program(hexfile, 'verilator', profile_file=profile_file)
symbol_file = os.path.join(test_harness.WORK_DIR, 'symbols.txt')
objdump_args = [
os.path.join(test_harness.COMPILER_BIN_DIR, 'llvm-objdump'),
'-t', elffile
]
symbols = subprocess.check_output(objdump_args)
with open(symbol_file, 'w') as f:
f.write(symbols.decode())
profile_args = [
os.path.join(test_harness.TOOL_BIN_DIR, 'profile.py'),
symbol_file,
profile_file
]
profile_output = subprocess.check_output(' '.join(profile_args), shell=True)
profile_lines = profile_output.decode().split('\n')
profile_tuples = [line.split() for line in profile_lines if line]
profile_map = {func: int(count) for count, _, func in profile_tuples}
# These tests don't end up being exactly 2x the number of samples. Because
# the system samples randomly, it can vary. I could have ran the test longer
# to get more samples, but this is really just a smoke test and I didn't want
# to bloat the test time unnecessarily.
loop5k = profile_map['loop5000']
loop10k = profile_map['loop10000']
loop20k = profile_map['loop20000']
test_harness.assert_greater(loop5k, 0)
test_harness.assert_greater(loop10k, loop5k * 1.75)
test_harness.assert_greater(loop20k, loop10k * 1.75)
test_harness.execute_tests()
| [
"jeffbush001@gmail.com"
] | jeffbush001@gmail.com |
91a4269e0b9f9229eff2dc342717525c19908503 | 84f2d9c40ec5816aa208e4a4877a78087cc2980f | /manage.py | b6cc3ab41d4435f743e02d42b1676fd54c111e1a | [] | no_license | shanthimadugundi/heroku | 87dde40e791dfffad4ba5b7a42d54c6d849e94e7 | 4ec48ff331598bd3f2fdc12de374915d55257ec1 | refs/heads/master | 2020-04-27T11:00:10.983817 | 2019-03-07T06:00:11 | 2019-03-07T06:00:11 | 174,279,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 591 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'madugundishanthisite.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
sys.dont_write_bytecode = True
execute_from_command_line(sys.argv)
| [
"shanthimadugundi@Shanthis-MacBook-Pro.local"
] | shanthimadugundi@Shanthis-MacBook-Pro.local |
88096a5c9910e54d7fb1b3d7865008e8ba9acb34 | 696799b824503429a3ac65ebdc28890bfbcaebe0 | /plugins/com.astra.ses.spell.gui.cots_4.0.2.201806070922/win32/spell/spell/lib/dummy/config.py | bde2e0f7b4c61e25d9d920099ca1f2b5a02f09a6 | [] | no_license | CalypsoCubesat/SPELL_GUI_4.0.2_win32_x86 | a176886b48873b090ab270c189113a8b2c261a06 | 9275ecfff2195ca4d4c297f894d80c1bcfa609e3 | refs/heads/master | 2021-08-03T08:04:25.821703 | 2019-10-28T04:53:50 | 2019-10-28T04:53:50 | 217,968,357 | 0 | 0 | null | 2021-08-02T17:03:44 | 2019-10-28T04:50:59 | Python | UTF-8 | Python | false | false | 3,382 | py | ###############################################################################
"""
(c) SES-ASTRA 2008
PACKAGE
spell.lib.adapter.config
FILE
config.py
DESCRIPTION
Setup environment for correct core driver instantiation
COPYRIGHT
This software is the copyrighted work of SES ASTRA S.A.
All rights reserved.
PROJECT
UGCS/USL
AUTHOR
Rafael Chinchilla Camara (GMV)
DATE
01/10/2007
"""
###############################################################################
#*******************************************************************************
# SPELL Imports
#*******************************************************************************
from spell.utils.log import *
from spell.config.reader import *
from spell.config.constants import COMMON
from spell.lib.registry import REGISTRY
from spell.lib.exception import DriverException
#*******************************************************************************
# Local Imports
#*******************************************************************************
from interface.model import SimulatorModel
#*******************************************************************************
# System Imports
#*******************************************************************************
import os
###############################################################################
# Module import definition
__all__ = ['CONFIG']
INTERFACE_DEFAULTS = {}
###############################################################################
# Superclass
import spell.lib.adapter.config
superClass = spell.lib.adapter.config.ConfigInterface
###############################################################################
class ConfigInterface(superClass):
#==========================================================================
def __init__(self):
superClass.__init__(self)
LOG("Created")
#==========================================================================
def setup(self, contextName):
superClass.setup(self, contextName)
LOG("Setup standalone CFG interface")
dataPath = Config.instance().getRuntimeDir()
driver = Config.instance().getContextConfig(contextName).getDriver()
driverInfo = Config.instance().getDriverConfig(driver)
simulationPath = driverInfo['SimPath']
simulationFile = Config.instance().getContextConfig(contextName).getDriverConfig('Simulation')
home = Config.instance().getHome()
if home is None:
raise DriverException("SPELL home is not defined")
LOG("Loading simulation: " + simulationFile)
simulationFile = dataPath + os.sep + simulationPath + \
os.sep + simulationFile
SIM = SimulatorModel()
SIM.tmClass = REGISTRY['TM']
SIM.tcClass = REGISTRY['TC']
SIM.setup( simulationFile )
REGISTRY['SIM'] = SIM
#==========================================================================
def cleanup(self, shutdown = False):
superClass.cleanup(self, shutdown)
LOG("Cleanup standalone CFG interface")
REGISTRY['SIM'].cleanup()
REGISTRY.remove('SIM')
###############################################################################
# Interface instance
CONFIG = ConfigInterface()
| [
"matthew.travis@aresinstitute.org"
] | matthew.travis@aresinstitute.org |
09431e04feee5173a0495644128709b76257ecd9 | 9eec7de3670cb6a53dd2e1ac16891eed45cc796e | /xhose/urls.py | 1405be00d3dccdc6388acd60de4e74ae99adcfac | [] | no_license | rudiq4/landing-page | e7c891c9d4220b74cb88b9e86e341f0faf05627d | f4ab2eb4d80620b6fbaec3d04c79c23949375bd4 | refs/heads/master | 2020-03-24T20:56:12.380552 | 2018-07-31T11:11:35 | 2018-07-31T11:11:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | from django.conf.urls import url
from xhose import views
app_name = 'xhose'
urlpatterns = [
url(r'^xhose/', views.xhose, name='xhose'),
# url(r'^contacts/$', views.contacts, name='contacts'),
] | [
"rudikvovan@gmail.com"
] | rudikvovan@gmail.com |
62476c413ee5a0c5b0c4c85e0d4bb4c3eec16108 | 9f86b4a4e31affb497dcc500ea45a57589f2f533 | /detectron2/layers/nms.py | e29435e77b9edca7f01da5e4627352c81bd2920a | [] | permissive | ishann/detectron2 | a68ae39960dc2594dd971908176074ac5af8b1ba | a52fcd7f6d5ebd334508d59823dbda9f81f2cd0e | refs/heads/master | 2020-08-10T05:58:21.115916 | 2019-12-09T01:16:00 | 2019-12-09T01:16:00 | 214,275,928 | 0 | 0 | Apache-2.0 | 2019-10-10T20:08:02 | 2019-10-10T20:08:02 | null | UTF-8 | Python | false | false | 6,572 | py | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from torchvision.ops import boxes as box_ops
from torchvision.ops import nms # BC-compat
def batched_nms(boxes, scores, idxs, iou_threshold):
"""
Same as torchvision.ops.boxes.batched_nms, but safer.
"""
assert boxes.shape[-1] == 4
# TODO may need better strategy.
# Investigate after having a fully-cuda NMS op.
if len(boxes) < 40000:
return box_ops.batched_nms(boxes, scores, idxs, iou_threshold)
result_mask = scores.new_zeros(scores.size(), dtype=torch.bool)
for id in torch.unique(idxs).cpu().tolist():
mask = (idxs == id).nonzero().view(-1)
keep = nms(boxes[mask], scores[mask], iou_threshold)
result_mask[mask[keep]] = True
keep = result_mask.nonzero().view(-1)
keep = keep[scores[keep].argsort(descending=True)]
return keep
# Note: this function (nms_rotated) might be moved into
# torchvision/ops/boxes.py in the future
def nms_rotated(boxes, scores, iou_threshold):
"""
Performs non-maximum suppression (NMS) on the rotated boxes according
to their intersection-over-union (IoU).
Rotated NMS iteratively removes lower scoring rotated boxes which have an
IoU greater than iou_threshold with another (higher scoring) rotated box.
Note that RotatedBox (5, 3, 4, 2, -90) covers exactly the same region as
RotatedBox (5, 3, 4, 2, 90) does, and their IoU will be 1. However, they
can be representing completely different objects in certain tasks, e.g., OCR.
As for the question of whether rotated-NMS should treat them as faraway boxes
even though their IOU is 1, it depends on the application and/or ground truth annotation.
As an extreme example, consider a single character v and the square box around it.
If the angle is 0 degree, the object (text) would be read as 'v';
If the angle is 90 degrees, the object (text) would become '>';
If the angle is 180 degrees, the object (text) would become '^';
If the angle is 270/-90 degrees, the object (text) would become '<'
All of these cases have IoU of 1 to each other, and rotated NMS that only
uses IoU as criterion would only keep one of them with the highest score -
which, practically, still makes sense in most cases because typically
only one of theses orientations is the correct one. Also, it does not matter
as much if the box is only used to classify the object (instead of transcribing
them with a sequential OCR recognition model) later.
On the other hand, when we use IoU to filter proposals that are close to the
ground truth during training, we should definitely take the angle into account if
we know the ground truth is labeled with the strictly correct orientation (as in,
upside-down words are annotated with -180 degrees even though they can be covered
with a 0/90/-90 degree box, etc.)
The way the original dataset is annotated also matters. For example, if the dataset
is a 4-point polygon dataset that does not enforce ordering of vertices/orientation,
we can estimate a minimum rotated bounding box to this polygon, but there's no way
we can tell the correct angle with 100% confidence (as shown above, there could be 4 different
rotated boxes, with angles differed by 90 degrees to each other, covering the exactly
same region). In that case we have to just use IoU to determine the box
proximity (as many detection benchmarks (even for text) do) unless there're other
assumptions we can make (like width is always larger than height, or the object is not
rotated by more than 90 degrees CCW/CW, etc.)
In summary, not considering angles in rotated NMS seems to be a good option for now,
but we should be aware of its implications.
Args:
boxes (Tensor[N, 5]): Rotated boxes to perform NMS on. They are expected to be in
(x_center, y_center, width, height, angle_degrees) format.
scores (Tensor[N]): Scores for each one of the rotated boxes
iou_threshold (float): Discards all overlapping rotated boxes with IoU < iou_threshold
Returns:
keep (Tensor): int64 tensor with the indices of the elements that have been kept
by Rotated NMS, sorted in decreasing order of scores
"""
from detectron2 import _C
return _C.nms_rotated(boxes, scores, iou_threshold)
# Note: this function (batched_nms_rotated) might be moved into
# torchvision/ops/boxes.py in the future
def batched_nms_rotated(boxes, scores, idxs, iou_threshold):
"""
Performs non-maximum suppression in a batched fashion.
Each index value correspond to a category, and NMS
will not be applied between elements of different categories.
Args:
boxes (Tensor[N, 5]):
boxes where NMS will be performed. They
are expected to be in (x_ctr, y_ctr, width, height, angle_degrees) format
scores (Tensor[N]):
scores for each one of the boxes
idxs (Tensor[N]):
indices of the categories for each one of the boxes.
iou_threshold (float):
discards all overlapping boxes
with IoU < iou_threshold
Returns:
Tensor:
int64 tensor with the indices of the elements that have been kept
by NMS, sorted in decreasing order of scores
"""
assert boxes.shape[-1] == 5
if boxes.numel() == 0:
return torch.empty((0,), dtype=torch.int64, device=boxes.device)
# Strategy: in order to perform NMS independently per class,
# we add an offset to all the boxes. The offset is dependent
# only on the class idx, and is large enough so that boxes
# from different classes do not overlap
# Note that batched_nms in torchvision/ops/boxes.py only uses max_coordinate,
# which won't handle negative coordinates correctly.
# Here by using min_coordinate we can make sure the negative coordinates are
# correctly handled.
max_coordinate = (
torch.max(boxes[:, 0], boxes[:, 1]) + torch.max(boxes[:, 2], boxes[:, 3]) / 2
).max()
min_coordinate = (
torch.min(boxes[:, 0], boxes[:, 1]) - torch.min(boxes[:, 2], boxes[:, 3]) / 2
).min()
offsets = idxs.to(boxes) * (max_coordinate - min_coordinate + 1)
boxes_for_nms = boxes.clone() # avoid modifying the original values in boxes
boxes_for_nms[:, :2] += offsets[:, None]
keep = nms_rotated(boxes_for_nms, scores, iou_threshold)
return keep
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
dc6e21c252793fe6e0b0f0a7c9640fbeb83c8bac | 0cb6eb8a9dc9bdd4e3552040848cecec19d25798 | /FaceRecognition/settings.py | 2135266da38e065cd8395bcfab3e9c68efad056e | [] | no_license | mayank2498/Face_Detection | 9c89e40a0b5359466c2c49e89a2fb6204ad0be20 | fdcb98923d924312038356706c3830fe13bed3da | refs/heads/master | 2021-08-31T13:09:52.876858 | 2017-12-21T11:32:33 | 2017-12-21T11:32:33 | 114,999,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,182 | py | """
Django settings for FaceRecognition project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ry4l!_2@+ha^qlfasvjp96g19-vgj$*j542rwcv1)#b!rrgrna'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'recognise'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'FaceRecognition.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'FaceRecognition.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
MEDIA_URL = '/media/'
| [
"mayankchaurasia.bsp@gmail.com"
] | mayankchaurasia.bsp@gmail.com |
551a6c050f045f8fac9fa97a7f453cecde40f95d | 559fe08f79c297783c404caf7eccee2a269932d4 | /etl/parsers/etw/Intel_Thunderbolt_App.py | 93acf723184612ce60fab955eed5a1b08887f46f | [
"Apache-2.0"
] | permissive | killvxk/etl-parser | 9ba70f54120887f56950054f2cde6dc6c18e0973 | 76b7c046866ce0469cd129ee3f7bb3799b34e271 | refs/heads/master | 2022-11-23T03:35:47.127241 | 2020-07-23T08:55:50 | 2020-07-23T08:55:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,292 | py | # -*- coding: utf-8 -*-
"""
Intel-Thunderbolt-App
GUID : 8ef15e41-05bf-5bcd-4aa8-4f0559564dc0
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("8ef15e41-05bf-5bcd-4aa8-4f0559564dc0"), event_id=0, version=0)
class Intel_Thunderbolt_App_0_0(Etw):
pattern = Struct(
"message" / WString
)
@declare(guid=guid("8ef15e41-05bf-5bcd-4aa8-4f0559564dc0"), event_id=1, version=0)
class Intel_Thunderbolt_App_1_0(Etw):
pattern = Struct(
"message" / WString
)
@declare(guid=guid("8ef15e41-05bf-5bcd-4aa8-4f0559564dc0"), event_id=2, version=0)
class Intel_Thunderbolt_App_2_0(Etw):
pattern = Struct(
"message" / WString,
"stackTrace" / WString
)
@declare(guid=guid("8ef15e41-05bf-5bcd-4aa8-4f0559564dc0"), event_id=3, version=0)
class Intel_Thunderbolt_App_3_0(Etw):
pattern = Struct(
"message" / WString
)
@declare(guid=guid("8ef15e41-05bf-5bcd-4aa8-4f0559564dc0"), event_id=4, version=0)
class Intel_Thunderbolt_App_4_0(Etw):
pattern = Struct(
"message" / WString
)
@declare(guid=guid("8ef15e41-05bf-5bcd-4aa8-4f0559564dc0"), event_id=5, version=0)
class Intel_Thunderbolt_App_5_0(Etw):
pattern = Struct(
"eventName" / WString
)
@declare(guid=guid("8ef15e41-05bf-5bcd-4aa8-4f0559564dc0"), event_id=6, version=0)
class Intel_Thunderbolt_App_6_0(Etw):
pattern = Struct(
"obj" / WString,
"method" / WString,
"inparams" / WString,
"stackFrame" / WString
)
@declare(guid=guid("8ef15e41-05bf-5bcd-4aa8-4f0559564dc0"), event_id=7, version=0)
class Intel_Thunderbolt_App_7_0(Etw):
pattern = Struct(
"message" / WString
)
@declare(guid=guid("8ef15e41-05bf-5bcd-4aa8-4f0559564dc0"), event_id=8, version=0)
class Intel_Thunderbolt_App_8_0(Etw):
pattern = Struct(
"info" / WString
)
@declare(guid=guid("8ef15e41-05bf-5bcd-4aa8-4f0559564dc0"), event_id=9, version=0)
class Intel_Thunderbolt_App_9_0(Etw):
pattern = Struct(
"action" / WString,
"message" / WString
)
| [
"citronneur@gmail.com"
] | citronneur@gmail.com |
9e94d260f3ad0d89442a785c6b2ebbacb48b40ab | 363302c0dce6f72290f19be2bb0728d7b0bdb02d | /top_like_tags/migrations/0004_fixed_hashtag_sub_title.py | 911da63f8ab79c32a12580e338a476a9d5f8420c | [] | no_license | linker10/totag | 87e242734c9b586e66f76f4d99740bb1f175117c | 274e70174387d9ad55ab27e16816dd54f6e71699 | refs/heads/master | 2022-12-06T13:56:23.641080 | 2020-09-04T15:49:24 | 2020-09-04T15:49:24 | 292,886,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | # Generated by Django 3.0.6 on 2020-05-31 09:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('top_like_tags', '0003_fixed_hashtag'),
]
operations = [
migrations.AddField(
model_name='fixed_hashtag',
name='sub_title',
field=models.CharField(default='hello', max_length=200),
preserve_default=False,
),
]
| [
"bilalsharif4@gmail.com"
] | bilalsharif4@gmail.com |
e4850aa7a18ba29d395c5eec5ec94ea8a09818a8 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_transmigrated.py | 297c6c5aac3496c8827de39a106d134d7b6404d3 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py |
#calss header
class _TRANSMIGRATED():
def __init__(self,):
self.name = "TRANSMIGRATED"
self.definitions = transmigrate
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['transmigrate']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
ce5db88c9ac44022408cf0d61d9a3509c38b239c | 2be679906bfd8481fde463b0ceaf7d0d8a9c4775 | /tests/test_session.py | 79c986120cdf916f06ad273063bb1440caa118fa | [
"MIT"
] | permissive | dayjaby/libtmux | 73b8b62e47ca18043dda55ac6cb0c7d1a389a060 | 47a8aeb7fa6919aae7e6c0303e7aece5e7679b21 | refs/heads/master | 2020-07-13T16:32:07.385953 | 2019-08-29T11:46:48 | 2019-08-29T11:46:48 | 205,115,334 | 0 | 0 | MIT | 2019-08-29T08:19:44 | 2019-08-29T08:19:44 | null | UTF-8 | Python | false | false | 7,807 | py | # -*- coding: utf-8 -*-
"""Test for tmuxp Session object."""
from __future__ import absolute_import, unicode_literals, with_statement
import logging
import pytest
from libtmux import Pane, Session, Window, exc
from libtmux.common import has_gte_version
from libtmux.test import TEST_SESSION_PREFIX, namer
logger = logging.getLogger(__name__)
def test_has_session(server, session):
"""Server.has_session returns True if has session_name exists."""
TEST_SESSION_NAME = session.get('session_name')
assert server.has_session(TEST_SESSION_NAME)
if has_gte_version('2.1'):
assert not server.has_session(TEST_SESSION_NAME[:-2])
assert server.has_session(TEST_SESSION_NAME[:-2], exact=False)
assert not server.has_session('asdf2314324321')
def test_select_window(session):
"""Session.select_window moves window."""
# get the current window_base_index, since different user tmux config
# may start at 0 or 1, or whatever they want.
window_base_index = int(session.attached_window.get('window_index'))
session.new_window(window_name='test_window')
window_count = len(session._windows)
assert window_count >= 2 # 2 or more windows
assert len(session._windows) == window_count
# tmux selects a window, moves to it, shows it as attached_window
selected_window1 = session.select_window(window_base_index)
assert isinstance(selected_window1, Window)
attached_window1 = session.attached_window
assert selected_window1 == attached_window1
assert selected_window1.__dict__ == attached_window1.__dict__
# again: tmux selects a window, moves to it, shows it as
# attached_window
selected_window2 = session.select_window(window_base_index + 1)
assert isinstance(selected_window2, Window)
attached_window2 = session.attached_window
assert selected_window2 == attached_window2
assert selected_window2.__dict__ == attached_window2.__dict__
# assure these windows were really different
assert selected_window1 != selected_window2
assert selected_window1.__dict__ != selected_window2.__dict__
def test_select_window_returns_Window(session):
"""Session.select_window returns Window object."""
window_count = len(session._windows)
assert len(session._windows) == window_count
window_base_index = int(session.attached_window.get('window_index'))
assert isinstance(session.select_window(window_base_index), Window)
def test_attached_window(session):
"""Session.attached_window returns Window."""
assert isinstance(session.attached_window, Window)
def test_attached_pane(session):
"""Session.attached_pane returns Pane."""
assert isinstance(session.attached_pane, Pane)
def test_session_rename(session):
"""Session.rename_session renames session."""
TEST_SESSION_NAME = session.get('session_name')
test_name = 'testingdis_sessname'
session.rename_session(test_name)
assert session.get('session_name') == test_name
session.rename_session(TEST_SESSION_NAME)
assert session.get('session_name') == TEST_SESSION_NAME
def test_new_session(server):
"""Server.new_session creates new session."""
new_session_name = TEST_SESSION_PREFIX + next(namer)
new_session = server.new_session(session_name=new_session_name, detach=True)
assert isinstance(new_session, Session)
assert new_session.get('session_name') == new_session_name
def test_show_options(session):
"""Session.show_options() returns dict."""
options = session.show_options()
assert isinstance(options, dict)
def test_set_show_options_single(session):
"""Set option then Session.show_options(key)."""
session.set_option('history-limit', 20)
assert session.show_options('history-limit') == 20
session.set_option('history-limit', 40)
assert session.show_options('history-limit') == 40
assert session.show_options()['history-limit'] == 40
def test_set_show_option(session):
"""Set option then Session.show_option(key)."""
session.set_option('history-limit', 20)
assert session.show_option('history-limit') == 20
session.set_option('history-limit', 40)
assert session.show_option('history-limit') == 40
def test_empty_session_option_returns_None(session):
assert session.show_option('default-shell') is None
def test_show_option_unknown(session):
"""Session.show_option raises UnknownOption for invalid option."""
with pytest.raises(exc.UnknownOption):
session.show_option('moooz')
def test_show_option_ambiguous(session):
"""Session.show_option raises AmbiguousOption for ambiguous option."""
with pytest.raises(exc.AmbiguousOption):
session.show_option('default-')
def test_set_option_ambigous(session):
"""Session.set_option raises AmbiguousOption for invalid option."""
with pytest.raises(exc.AmbiguousOption):
session.set_option('default-', 43)
def test_set_option_invalid(session):
"""Session.set_option raises UnknownOption for invalid option."""
if has_gte_version('2.4'):
with pytest.raises(exc.InvalidOption):
session.set_option('afewewfew', 43)
else:
with pytest.raises(exc.UnknownOption):
session.set_option('afewewfew', 43)
def test_show_environment(session):
"""Session.show_environment() returns dict."""
_vars = session.show_environment()
assert isinstance(_vars, dict)
def test_set_show_environment_single(session):
"""Set environment then Session.show_environment(key)."""
session.set_environment('FOO', 'BAR')
assert session.show_environment('FOO') == 'BAR'
session.set_environment('FOO', 'DAR')
assert session.show_environment('FOO') == 'DAR'
assert session.show_environment()['FOO'] == 'DAR'
def test_show_environment_not_set(session):
"""Not set environment variable returns None."""
assert session.show_environment('BAR') is None
def test_remove_environment(session):
"""Remove environment variable."""
assert session.show_environment('BAM') is None
session.set_environment('BAM', 'OK')
assert session.show_environment('BAM') == 'OK'
session.remove_environment('BAM')
assert session.show_environment('BAM') is None
def test_unset_environment(session):
"""Unset environment variable."""
assert session.show_environment('BAM') is None
session.set_environment('BAM', 'OK')
assert session.show_environment('BAM') == 'OK'
session.unset_environment('BAM')
assert session.show_environment('BAM') is None
@pytest.mark.parametrize(
"session_name,raises",
[('hey.period', True), ('hey:its a colon', True), ('hey moo', False)],
)
def test_periods_raise_badsessionname(server, session, session_name, raises):
new_name = session_name + 'moo' # used for rename / switch
if raises:
with pytest.raises(exc.BadSessionName):
session.rename_session(new_name)
with pytest.raises(exc.BadSessionName):
server.new_session(session_name)
with pytest.raises(exc.BadSessionName):
server.has_session(session_name)
with pytest.raises(exc.BadSessionName):
server.switch_client(new_name)
with pytest.raises(exc.BadSessionName):
server.attach_session(new_name)
else:
server.new_session(session_name)
server.has_session(session_name)
session.rename_session(new_name)
with pytest.raises(exc.LibTmuxException):
server.switch_client(new_name)
def test_cmd_inserts_sesion_id(session):
current_session_id = session.id
last_arg = 'last-arg'
cmd = session.cmd('not-a-command', last_arg)
assert '-t' in cmd.cmd
assert current_session_id in cmd.cmd
assert cmd.cmd[-1] == last_arg
| [
"tony@git-pull.com"
] | tony@git-pull.com |
8946b5ae8d70fc3d519899293f2227a86275f898 | 1bebfe3d45f11f89014dc56d27306be7d5507c5c | /flavio/_parse_errors.py | fb894885b8a77c3f8e85e754f4bdca39680f082a | [
"MIT"
] | permissive | talismanbrandi/flavio | 6ed628d9f4831c6d55ff56d587eaaca2633de3bb | 9c8f2ce0fa68ea1a4733977557b4a602d590a0ea | refs/heads/master | 2021-01-19T06:38:17.355774 | 2017-12-10T10:55:23 | 2017-12-10T10:55:23 | 87,472,967 | 0 | 0 | null | 2017-04-06T20:40:43 | 2017-04-06T20:40:43 | null | UTF-8 | Python | false | false | 7,141 | py | import re
from flavio.statistics.probability import *
# for strings of the form '< 5.3e-8 @ 95% CL'
_pattern_upperlimit = re.compile(r"^\s*<\s*([-+]?\d+\.?\d*)([eE][-+]?\d+)?\s*@\s*(\d+\.?\d*)\s*\%\s*C[\.\s]*L[\.\s]*$")
# for strings of the form '1.67(3)(5) 1e-3'
_pattern_brackets = re.compile(r"^\s*\(?\s*(-?\d+\.?\d*)\s*((?:\(\s*\d+\.?\d*\s*\)\s*)+)\)?\s*\*?\s*(?:(?:e|E|1e|1E|10\^)\(?([+-]?\d+)\)?)?$")
# for strings of the form '(1.67 +- 0.3 +- 0.5) * 1e-3'
_pattern_plusminus = re.compile(r"^\s*\(?\s*(-?\d+\.?\d*)\s*((?:[+\-±\\pm]+\s*\d+\.?\d*\s*)+)\)?\s*\*?\s*(?:(?:e|E|1e|1E|10\^)\(?([+-]?\d+)\)?)?$")
# for strings of the form '[1, 5] 1e-3'
_pattern_range = re.compile(r"^\s*\[\s*(-?\d+\.?\d*)\s*([eE][-+]?\d+)?\s*\,\s*(-?\d+\.?\d*)\s*([eE][-+]?\d+)?\s*\]\s*\*?\s*(?:(?:e|E|1e|1E|10\^)\(?([+-]?\d+)\)?)?$")
def errors_from_string(constraint_string):
"""Convert a string like '1.67(3)(5)' or '1.67+-0.03+-0.05' to a dictionary
of central values errors."""
m = _pattern_brackets.match(constraint_string)
if m is None:
m = _pattern_plusminus.match(constraint_string)
if m is None:
raise ValueError("Constraint " + constraint_string + " not understood")
# extracting the central value and overall power of 10
if m.group(3) is None:
overall_factor = 1
else:
overall_factor = 10**float(m.group(3))
central_value = m.group(1)
# number_decimal gives the number of digits after the decimal point
if len(central_value.split('.')) == 1:
number_decimal = 0
else:
number_decimal = len(central_value.split('.')[1])
central_value = float(central_value) * overall_factor
# now, splitting the errors
error_string = m.group(2)
pattern_brackets_err = re.compile(r"\(\s*(\d+\.?\d*)\s*\)\s*")
pattern_symmetric_err = re.compile(r"(?:±|\\pm|\+\-)(\s*\d+\.?\d*)")
pattern_asymmetric_err = re.compile(r"\+\s*(\d+\.?\d*)\s*\-\s*(\d+\.?\d*)")
errors = {}
errors['central_value'] = central_value
errors['symmetric_errors'] = []
errors['asymmetric_errors'] = []
if pattern_brackets_err.match(error_string):
for err in re.findall(pattern_brackets_err, error_string):
if not err.isdigit():
# if isdigit() is false, it means that it is a number
# with a decimal point (e.g. '1.5'), so no rescaling is necessary
standard_deviation = float(err)*overall_factor
else:
# if the error is just digits, need to rescale it by the
# appropriate power of 10
standard_deviation = float(err)*10**(-number_decimal)*overall_factor
errors['symmetric_errors'].append(standard_deviation)
elif pattern_symmetric_err.match(error_string) or pattern_asymmetric_err.match(error_string):
for err in re.findall(pattern_symmetric_err, error_string):
errors['symmetric_errors'].append( float(err)*overall_factor )
for err in re.findall(pattern_asymmetric_err, error_string):
right_err = float(err[0])*overall_factor
left_err = float(err[1])*overall_factor
errors['asymmetric_errors'].append((right_err, left_err))
return errors
def limit_from_string(constraint_string):
m = _pattern_upperlimit.match(constraint_string)
if m is None:
raise ValueError("Constraint " + constraint_string + " not understood")
sg, ex, cl_pc = m.groups()
if ex is None:
limit = float(sg)
else:
limit = float(sg + ex)
cl = float(cl_pc)/100.
return limit, cl
def range_from_string(constraint_string):
m = _pattern_range.match(constraint_string)
if m is None:
raise ValueError("Constraint " + constraint_string + " not understood")
lo, ex_lo, hi, ex_hi, ex_ov = m.groups()
if ex_lo is None:
lo = float(lo)
else:
lo = float(lo + ex_lo)
if ex_hi is None:
hi = float(hi)
else:
hi = float(hi + ex_hi)
if hi < lo:
raise ValueError("Uniform constraint must be specified as [a,b] with b>a")
if ex_ov is None:
overall = 1
else:
overall = 10**float(ex_ov)
lo = overall * lo
hi = overall * hi
central_value = (hi + lo)/2.
half_range = (hi - lo)/2.
return central_value, half_range
def errors_from_constraints(probability_distributions):
"""Return a string of the form 4.0±0.1±0.3 for the constraints on
the parameter. Correlations are ignored."""
errors = {}
errors['symmetric_errors'] = []
errors['asymmetric_errors'] = []
for num, pd in probability_distributions:
errors['central_value'] = pd.central_value
if isinstance(pd, DeltaDistribution):
# delta distributions (= no error) can be skipped
continue
elif isinstance(pd, NormalDistribution):
errors['symmetric_errors'].append(pd.standard_deviation)
elif isinstance(pd, AsymmetricNormalDistribution):
errors['asymmetric_errors'].append((pd.right_deviation, pd.left_deviation))
elif isinstance(pd, MultivariateNormalDistribution):
errors['central_value'] = pd.central_value[num]
errors['symmetric_errors'].append(math.sqrt(pd.covariance[num, num]))
return errors
def constraints_from_string(constraint_string):
"""Convert a string like '1.67(3)(5)' or '1.67+-0.03+-0.05' to a list
of ProbabilityDistribution instances."""
# first of all, replace dashes (that can come from copy-and-pasting latex) by minuses
try:
float(constraint_string)
# if the string represents just a number, return a DeltaDistribution
return [DeltaDistribution(float(constraint_string))]
except ValueError:
# first of all, replace dashes (that can come from copy-and-pasting latex) by minuses
constraint_string = constraint_string.replace('−','-')
# try again if the number is a float now
try:
float(constraint_string)
return {'central_value': float(constraint_string)}
except:
pass
if _pattern_upperlimit.match(constraint_string):
limit, cl = limit_from_string(constraint_string)
return [GaussianUpperLimit(limit, cl)]
elif _pattern_range.match(constraint_string):
central_value, half_range = range_from_string(constraint_string)
return [UniformDistribution(central_value, half_range)]
elif _pattern_brackets.match(constraint_string) or _pattern_plusminus.match(constraint_string):
errors = errors_from_string(constraint_string)
if 'symmetric_errors' not in errors and 'asymmetric_errors' not in errors:
return [DeltaDistribution(errors['central_value'])]
pd = []
for err in errors['symmetric_errors']:
pd.append(NormalDistribution(errors['central_value'], err))
for err_right, err_left in errors['asymmetric_errors']:
pd.append(AsymmetricNormalDistribution(errors['central_value'], err_right, err_left))
return pd
else:
raise ValueError("Constraint " + constraint_string + " not understood")
| [
"david.straub@tum.de"
] | david.straub@tum.de |
fbb477b2257116e7ff83fc702ea8d7a4dc02cb65 | 10e4652d4e677b2ce89abd0729424dc92c562e19 | /IPython/utils/pickleutil.py | dd1f733637513cf55c58d288c710bb60c5ff6d3b | [
"BSD-3-Clause"
] | permissive | appasche/ipython | 18fd6619d19c96694747aa12f435c410bb5d8f4e | d538c7ac9dcee2e75ff5e670376ac4d3ad6715ee | refs/heads/master | 2021-01-21T09:06:22.830942 | 2015-04-19T16:26:08 | 2015-04-19T16:26:08 | 34,225,707 | 1 | 0 | null | 2015-04-19T21:45:07 | 2015-04-19T21:45:07 | null | UTF-8 | Python | false | false | 140 | py | from warnings import warn
warn("IPython.utils.pickleutil has moved to ipython_kernel.pickleutil")
from ipython_kernel.pickleutil import *
| [
"benjaminrk@gmail.com"
] | benjaminrk@gmail.com |
3fd5b74a50a88a63c25bd07180142e8f0f57f887 | 11ff14c118240e87c4804d0373e4656d0683d479 | /test_case/old_case/test_wifi.py | 3079e369d1a0331e51ea3b1275b53a7c1bf3c822 | [] | no_license | wxmmavis/OS3.1 | e3028d9c79d5a1a17449fea6380fcdda902bdec7 | 26d954344207a82d2298821c3c4f01302393dc7e | refs/heads/master | 2020-03-25T20:07:11.225493 | 2018-08-13T03:20:57 | 2018-08-13T03:20:57 | 144,115,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,146 | py | # -*- coding: utf-8 -*-
import configparser
import logging
import time
import os
import pytest
#########################
# import module
#########################
import sys
sys.path.append("..")
import modules.login_router
import modules.router_setup
import modules.initialize
import modules.device_management
from modules.login_router import *
from modules.router_setup import *
from modules.wifi import *
from tools import *
#########################
from selenium import webdriver
def case(Wchoose):
lr = login_router()
rs = router_setup()
t = tools()
w = wifi()
projectpath = os.path.dirname(os.getcwd())
caseFail = projectpath + '/errorpng/caseFail/'
test_time =time.strftime("%Y%m%d%H%M%S",time.localtime())
config_file = projectpath + '/configure/' + 'testconfig.ini'
filename = os.path.basename(__file__).split('.')[0]
t.log(filename)
logging.info(__file__)
config = configparser.ConfigParser()
config.read(config_file, encoding='UTF-8')
default_ip = config.get('Default', 'default_ip')
default_pw = config.get('Default', 'default_pw')
default_24ssid = config.get('Default', 'default_ssid')
default_5ssid = default_24ssid+'_5G'
default_guest = config.get('Default', 'default_guest')
default_guestpw = config.get('Default', 'default_guestpw')
SSID24 = config.get('WiFi', 'ssid24')
SSID5 = config.get('WiFi', 'ssid5')
SSIDg = config.get('WiFi', 'ssidg')
pw24 = config.get('WiFi', 'pw24')
pw5 = config.get('WiFi', 'pw5')
pwg = config.get('WiFi', 'pwg')
ra0 = 1
rai0 = 2
guest = 3
driver = webdriver.Chrome()
driver.maximize_window()
if lr.open_url(driver, 'http://'+default_ip) == 1:
if lr.login(driver, default_pw) == 1:
if rs.setup_choose(driver, 1) == 1:
if Wchoose == 0:
##获取2.4g ssid
if w.getSSID(driver, ra0, default_24ssid) == 1:
logging.info('=========================Success')
driver.quit()
return 1
else:
driver.get_screenshot_as_file(caseFail + "getDefaulte24SSID-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 34:
##获取5g ssid
if w.getSSID(driver, rai0, default_5ssid) ==2:
logging.info('=========================Success')
driver.quit()
return 2
else:
driver.get_screenshot_as_file(caseFail + "getDefaulte5SSID-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 1:
##获取2.4G 密码
if w.getWP(driver, ra0, default_pw) ==1:
logging.info('=========================Success')
driver.quit()
return 1
else:
driver.get_screenshot_as_file(caseFail + "getDefault24Password-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 2:
##获取5G密码
if w.getWP(driver, rai0, default_pw) == 2:
logging.info('=========================Success')
driver.quit()
return 2
else:
driver.get_screenshot_as_file(caseFail + "getDefault5Password-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 3:
##设置2.4G SSID
w.setSSID(driver, ra0, SSID24)
w.savewifi(driver, ra0)
if w.getSSID(driver, ra0, SSID24) ==1:
logging.info('=========================Success')
driver.quit()
return 1
else:
driver.get_screenshot_as_file(caseFail + "set24GSSID-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 4:
##设置5G SSID
w.setSSID(driver, rai0, SSID5)
w.savewifi(driver, rai0)
if w.getSSID(driver, rai0, SSID5) == 2:
logging.info('=========================Success')
driver.quit()
return 2
else:
driver.get_screenshot_as_file(caseFail + "set5GSSID-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 5:
##设置2.4G密码
w.setWP(driver, ra0, pw24)
w.savewifi(driver, 1)
if w.getWP(driver, ra0, pw24) == 1:
logging.info('=========================Success')
driver.quit()
return 1
else:
driver.get_screenshot_as_file(caseFail + "set24GPW-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 6:
##设置5G密码
w.setWP(driver, rai0, pw5)
w.savewifi(driver, rai0)
if w.getWP(driver, rai0, pw5) == 2:
logging.info('=========================Success')
driver.quit()
return 2
else:
driver.get_screenshot_as_file(caseFail + "set5GPW-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 7:
w.advance(driver, ra0)
if w.getEncryption(driver, ra0) == 1:
logging.info('=========================Success')
driver.quit()
return 1
else:
driver.get_screenshot_as_file(caseFail + "getDefault24GEncryption-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 8:
w.advance(driver, rai0)
if w.getEncryption(driver, rai0) == 1:
logging.info('=========================Success')
driver.quit()
return 1
else:
driver.get_screenshot_as_file(caseFail + "getDefault5GEncryption-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 9:
##设置2.4G加密方式为空
w.advance(driver, ra0)
w.setEncryption(driver, ra0, 1)
w.savewifi(driver, ra0)
if w.getEncryption(driver, ra0) == 3:
logging.info('=========================Success')
driver.quit()
return 3
else:
driver.get_screenshot_as_file(caseFail + "set24GNullPW-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 10:
w.advance(driver, ra0)
w.setEncryption(driver, 1, 2)
w.setWP(driver, 1, default_pw)
w.savewifi(driver, 1)
if w.getEncryption(driver, 1) ==2:
logging.info('=========================Success')
driver.quit()
return 2
else:
driver.get_screenshot_as_file(caseFail + "set24GPsk2PW-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 11:
w.advance(driver, ra0)
w.setEncryption(driver, 1, 3)
w.savewifi(driver, 1)
w.setWP(driver, 1, default_pw)
if w.getEncryption(driver, 1) == 1:
logging.info('=========================Success')
driver.quit()
return 1
else:
driver.get_screenshot_as_file(caseFail + "set24Psk_Psk2PW-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 12:
w.advance(driver, rai0)
w.setEncryption(driver, rai0, 1)
w.savewifi(driver, rai0)
if w.getEncryption(driver, rai0) == 3:
logging.info('=========================Success')
driver.quit()
return 3
else:
driver.get_screenshot_as_file(caseFail + "set5GNullPW-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 13:
w.advance(driver, rai0)
w.setEncryption(driver, rai0, 2)
w.setWP(driver, rai0, default_pw)
w.savewifi(driver, rai0)
if w.getEncryption(driver, rai0) == 2:
logging.info('=========================Success')
driver.quit()
return 2
else:
driver.get_screenshot_as_file(caseFail + "set5GPsk2PW-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 14:
w.advance(driver, rai0)
w.setEncryption(driver, 2, 3)
w.setWP(driver, 2, default_pw)
w.savewifi(driver, 2)
if w.getEncryption(driver, 2) == 1:
logging.info('=========================Success')
driver.quit()
return 1
else:
driver.get_screenshot_as_file(caseFail + "set5Psk_Psk2PW-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 15:
if w.getHide(driver, ra0) == 1:
logging.info('=========================Success')
driver.quit()
return 1
else:
driver.get_screenshot_as_file(caseFail + "get24Hide-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 151:
if w.getHide(driver, rai0) == 3:
logging.info('=========================Success')
driver.quit()
return 3
else:
driver.get_screenshot_as_file(caseFail + "get5Hide-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 16:
w.advance(driver, ra0)
w.setHide(driver, ra0)
w.savewifi(driver, ra0)
if w.getHide(driver, ra0) == 2:
logging.info('=========================Success')
driver.quit()
return 2
else:
driver.get_screenshot_as_file(caseFail + "set24Hide-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 17:
w.advance(driver, rai0)
w.setHide(driver, rai0)
w.savewifi(driver, rai0)
if w.getHide(driver, rai0) == 4:
logging.info('=========================Success')
driver.quit()
return 4
else:
driver.get_screenshot_as_file(caseFail + "set5Hide-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 18:
w.advance(driver, ra0)
if w.getHT(driver, ra0) == 1:
logging.info('=========================Success')
driver.quit()
return 1
else:
driver.get_screenshot_as_file(caseFail + "getDefault24HT-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 181:
###获取5GHT模式###
w.advance(driver, rai0)
if w.getHT(driver, rai0) ==3:
logging.info('=========================Success')
driver.quit()
return 3
else:
driver.get_screenshot_as_file(caseFail + "getDefault5HT-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 19:
w.advance(driver, ra0)
w.setHT(driver, ra0, 2)
w.savewifi(driver, ra0)
if w.getHT(driver, ra0) == 2:
logging.info('=========================Success')
driver.quit()
return 2
else:
driver.get_screenshot_as_file(caseFail + "setra0_40HT-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 20:
w.advance(driver, ra0)
w.setHT(driver, ra0, 1)
w.savewifi(driver, ra0)
if w.getHT(driver, ra0) == 1:
logging.info('=========================Success')
driver.quit()
return 1
else:
driver.get_screenshot_as_file(caseFail + "setra0_2040HT-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 21:
w.advance(driver, rai0)
w.setHT(driver, rai0, 4)
w.savewifi(driver, rai0)
if w.getHT(driver, rai0) == 4:
logging.info('=========================Success')
driver.quit()
return 4
else:
driver.get_screenshot_as_file(caseFail + "setrai0_40HT-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 22:
w.advance(driver, rai0)
w.setHT(driver, rai0, 5)
w.savewifi(driver, rai0)
if w.getHT(driver, rai0) == 5:
logging.info('=========================Success')
driver.quit()
return 5
else:
driver.get_screenshot_as_file(caseFail + "setrai0_80HT-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 23:
###设置5G 20/40HT
w.advance(driver, rai0)
w.setHT(driver, rai0, 3)
w.savewifi(driver, rai0)
if w.getHT(driver, rai0) == 3:
logging.info('=========================Success')
driver.quit()
return 3
else:
driver.get_screenshot_as_file(caseFail + "setrai0_2040HT-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 25:
if w.getWiFi(driver, guest) == 3:
logging.info('=========================Success')
driver.quit()
return 3
else:
driver.get_screenshot_as_file(caseFail + "getGuest-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 26:
##获取访客SSID
w.clickWiFi(driver, guest)
if w.getSSID(driver, guest, default_guest) == 3:
logging.info('=========================Success')
driver.quit()
return 3
else:
driver.get_screenshot_as_file(caseFail + "getGuestSSID-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 27:
##获取访客密码
if w.getWP(driver, guest, default_guestpw) == 3:
logging.info('=========================Success')
driver.quit()
return 3
else:
driver.get_screenshot_as_file(caseFail + "getGuestWP-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 28:
##获取访客加密方式
if w.getEncryption(driver, guest) == 1:
logging.info('=========================Success')
driver.quit()
return 1
else:
driver.get_screenshot_as_file(caseFail + "getGuestEncryption-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 29:
##设置访客SSID
w.setSSID(driver, guest, SSIDg)
w.savewifi(driver, guest)
if w.getSSID(driver, guest, SSIDg) == 3:
logging.info('=========================Success')
driver.quit()
return 3
else:
driver.get_screenshot_as_file(caseFail + "setGuestSSID-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 30:
##设置访客密码
w.setWP(driver, guest, pwg)
w.savewifi(driver, guest)
if w.getWP(driver, guest, pwg) == 3:
logging.info('=========================Success')
driver.quit()
return 3
else:
driver.get_screenshot_as_file(caseFail + "setGuestWP-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 31:
##访客加密方式为空
w.setEncryption(driver, guest, 1)
w.savewifi(driver, guest)
if w.getEncryption(driver, guest) == 3:
logging.info('=========================Success')
driver.quit()
return 3
else:
driver.get_screenshot_as_file(caseFail + "setGuestNUll-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 32:
##访客加密方式PSK2
w.setEncryption(driver, guest, 2)
w.setWP(driver, guest, default_guest)
w.savewifi(driver, guest)
if w.getEncryption(driver, guest) == 2:
logging.info('=========================Success')
driver.quit()
return 2
else:
driver.get_screenshot_as_file(caseFail + "setGuestPSK2-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 33:
##访客加密方式PSK+PSK2
w.setEncryption(driver, guest, 3)
w.savewifi(driver, guest)
if w.getEncryption(driver, guest) == 1:
logging.info('=========================Success')
driver.quit()
return 1
else:
driver.get_screenshot_as_file(caseFail + "setGuestPSK_PSK2-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 51:
w.clickWiFi(driver, ra0)
if w.getWiFi(driver, ra0) == 1:
logging.info('=========================Success')
driver.quit()
return 1
else:
driver.get_screenshot_as_file(caseFail + "close24WiFi-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 52:
w.clickWiFi(driver, rai0)
if w.getWiFi(driver, rai0) == 2:
logging.info('=========================Success')
driver.quit()
return 2
else:
driver.get_screenshot_as_file(caseFail + "close5WiFi-%s.jpg" % test_time)
logging.warning('============================Fail')
if Wchoose == 53:
w.clickWiFi(driver, guest)
if w.getWiFi(driver, guest) == 3:
logging.info('=========================Success')
driver.quit()
return 3
else:
driver.get_screenshot_as_file(caseFail + "closeGuestWiFi-%s.jpg" % test_time)
logging.warning('============================Fail')
driver.quit()
###获取2.4G 默认SSID###
def test_getDefaulte24SSID():
assert case(0) == 1
###获取 5G 默认SSID###
def test_getDefault5SSID():
assert case(34) == 2
### 获取2.4G 默认密码 ###
def test_getDefault24Password():
assert case(1) == 1
### 获取5G 默认密码###
def test_getDefault5Password():
assert case(2) == 2
### 获取2.4G默认加密类型 ###
def test_getDefault24GEncryption():
assert case(7) == 1
def test_getDefault5GEncryption():
assert case(8) ==1
def test_set24GSSID():
assert case(3) == 1
def test_set5GSSID():
assert case(4) == 2
def test_set24GPW():
assert case(5) == 1
def test_set5GPW():
assert case(6) == 2
def test_set24GNullPW():
assert case(9) == 3
def test_set24GPsk2PW():
assert case(10) == 2
def test_set24Psk_Psk2PW():
assert case(11) == 1
def test_set5GNullPW():
assert case(12) == 3
def test_set5GPsk2PW():
assert case(13) == 2
def test_set5Psk_Psk2PW():
assert case(14) == 1
def test_get24Hide():
assert case(15) == 1
def test_get5Hide():
assert case(151) == 3
def test_set24Hide():
assert case(16) == 2
def test_set5Hide():
assert case(17) == 4
def test_getDefault24HT():
assert case(18) == 1
def test_getDefault5HT():
assert case(181) == 3
def test_setra0_40HT():
assert case(19) == 2
def test_setra0_2040HT():
assert case(20) == 1
def test_setrai0_40HT():
assert case(21) == 4
def test_setrai0_80HT():
assert case(22) == 5
def test_setrai0_2040HT():
assert case(23) == 3
def test_getGuest():
assert case(25) == 3
def test_getGuestSSID():
assert case(26) == 3
def test_getGuestWP():
assert case(27) == 3
def test_getGuestEncryption():
assert case(28) == 1
def test_setGuestSSID():
assert case(29) == 3
def test_setGuestWP():
assert case(30) == 3
def test_setGuestNUll():
assert case(31) == 3
def test_setGuestPSK2():
assert case(32) == 2
def test_setGuestPSK_PSK2():
assert case(33) == 1
def test_closeGuestWiFi():
assert case(53) == 3
def test_close24WiFi():
assert case(51) == 1
def test_close5WiFi():
assert case(52) == 2
if __name__ == '__main__':
pytest.main(os.path.basename(__file__)) | [
"1475806321@qq.com"
] | 1475806321@qq.com |
9383ab86a94925354013c11a2320662bce7900ea | 2d2777db0077c52e058bf429011938ef1fd390ba | /scripts/xfrmr/xfrmr_30ft.py | fdee1945491e98ce5a5d5ece483c0e746cfa2477 | [] | no_license | darraghdog/riiid | 164eb37db50e9d7afddb1c05ace367206bf3f45b | 663c5631465dd127e1df40648563066c01901326 | refs/heads/master | 2023-02-18T08:36:53.963778 | 2021-01-07T11:18:37 | 2021-01-07T11:18:37 | 311,267,801 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37,559 | py | # https://www.kaggle.com/bminixhofer/speed-up-your-rnn-with-sequence-bucketing
# https://www.kaggle.com/its7171/lgbm-with-loop-feature-engineering/#data
import os
import platform
import sys
PATH = '/Users/dhanley/Documents/riiid/' \
if platform.system() == 'Darwin' else '/mount/riiid'
os.chdir(PATH)
sys.path.append(PATH)
import pandas as pd
import numpy as np
import argparse
import gc
from sklearn.metrics import roc_auc_score
from collections import defaultdict, OrderedDict
from tqdm import tqdm
import random
import lightgbm as lgb
import warnings
from scipy import sparse
from scripts.utils import Iter_Valid, dumpobj, loadobj
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation as LDA
import platform
import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from torch.nn import functional as F
from torch.cuda.amp import autocast
from sklearn.metrics import log_loss
from tools.utils import get_logger, SpatialDropout, split_tags
from tools.config import load_config
from transformers import XLMModel, XLMConfig
warnings.filterwarnings("ignore")
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows',1000)
pd.set_option('display.width', 1000)
pd.set_option('display.float_format', lambda x: '%.3f' % x)
logger = get_logger('Train', 'INFO')
# funcs for user stats with loop
def add_user_feats(df, pdicts, update = True):
acsu = np.zeros(len(df), dtype=np.uint32)
cu = np.zeros(len(df), dtype=np.uint32)
#acsb = np.zeros(len(df), dtype=np.uint32)
#cb = np.zeros(len(df), dtype=np.uint32)
cidacsu = np.zeros(len(df), dtype=np.uint32)
cidcu = np.zeros(len(df), dtype=np.uint32)
contid = np.zeros((len(df), 1), dtype=np.uint8)
qamat = np.zeros((len(df),2), dtype=np.float32)
lect = np.zeros((len(df), 2), dtype=np.uint32)
lectcat = np.zeros((len(df), 2), dtype=np.uint32)
itercols = ['user_id','answered_correctly', 'part', \
'prior_question_had_explanation', 'prior_question_elapsed_time', 'content_id', \
'task_container_id', 'timestamp', 'content_type_id', 'user_answer']
if not update:
itercols = [f for f in itercols if f!='answered_correctly']
df['prior_question_had_explanation'] = df['prior_question_had_explanation'].fillna(False).astype(np.uint8)
for cnt,row in enumerate(tqdm(df[itercols].values, total = df.shape[0]) ):
if update:
u, yprev, part, pexp, eltim, cid, tcid, tstmp, ctype, ua = row
else:
u, pexp, eltim, cid, tcid, tstmp, ctype, ua = row
if ctype==1:
pdicts['count_u_lect_dict'][uidx] += 1
pdicts['count_u_lect_timestamp'][uidx] = int(round(tstmp / 1000))
pdicts['lecture_logged'][uidx] = 1
pdicts['lecture_tag'][uidx] = ldict['tag'][cid]
pdicts['lecture_part'][uidx] = ldict['part'][cid]
continue
try:
uidx = pdicts['uidx'][u]
except:
pdicts['max_uidx'] += 1
uidx = pdicts['uidx'][u] = pdicts['max_uidx']
try:
# uqidx = pdicts['uqidx'][(u, cid)]
uqidx = pdicts['uqidx'][cid][u]
except:
pdicts['max_uqidx'] += 1
# uqidx = pdicts['uqidx'][(u, cid)] = pdicts['max_uqidx']
uqidx = pdicts['uqidx'][cid][u] = pdicts['max_uqidx']
#bid = bdict[cid]
#newbid = bid == pdicts['track_b'].item(uidx)
lect[cnt] = pdicts['count_u_lect_dict'].item(uidx), \
int(round(tstmp / 1000)) - pdicts['count_u_lect_timestamp'].item(uidx)
lectcat[cnt] = pdicts['lecture_tag'].item(uidx), pdicts['lecture_part'].item(uidx)
acsu[cnt] = pdicts['answered_correctly_sum_u_dict'].item(uidx)
cu[cnt] = pdicts['count_u_dict'].item(uidx)
#acsb[cnt] = pdicts['answered_correctly_sum_b_dict'].item(uidx)
#cb[cnt] = pdicts['count_b_dict'].item(uidx)
cidacsu[cnt] = pdicts['content_id_answered_correctly_sum_u_dict'].item(uqidx)
cidcu[cnt] = pdicts['content_id_count_u_dict'].item(uqidx)
qamat[cnt] = pdicts['qaRatiocum'].item(uidx) / (pdicts['count_u_dict'].item(uidx) + 0.01), \
pdicts['qaRatiocum'].item(uidx) / (pdicts['qaRatioCorrectcum'].item(uidx) + 0.01)
if update:
pdicts['count_u_dict'][uidx] += 1
try:
pdicts['qaRatioCorrectcum'][uidx] += pdicts['qaRatio'][(cid, pdicts['qaCorrect'][cid])]
except:
pdicts['qaRatioCorrectcum'][uidx] += 1.
try:
pdicts['qaRatiocum'][uidx] += pdicts['qaRatio'][(cid, ua)]
except:
pdicts['qaRatiocum'][uidx] += 0.1
#pdicts['count_c_dict'][cid] += 1
pdicts['content_id_count_u_dict'][uqidx] += 1
#pdicts['count_b_dict'][uidx] = 1 if newbid else pdicts['count_b_dict'][uidx] + 1
#if newbid : pdicts['answered_correctly_sum_b_dict'][uidx] = 0
if yprev:
pdicts['answered_correctly_sum_u_dict'][uidx] += 1
#pdicts['answered_correctly_sum_c_dict'][cid] += 1
#pdicts['answered_correctly_sum_b_dict'][uidx] += 1
pdicts['content_id_answered_correctly_sum_u_dict'][uqidx] += 1
#pdicts['track_b'][uidx] = bid
if pdicts['lecture_logged'][uidx] == 1:
pdicts['lecture_tag'][uidx] = 0
pdicts['lecture_part'][uidx] = 0
pdicts['lecture_logged'][uidx] = 1
#countmat = np.transpose(np.stack([cu, cidcu, cb]), (1,0)).astype(np.float32)
#correctmat = np.transpose(np.stack([acsu, cidacsu, acsb]), (1,0)).astype(np.float32)
countmat = np.transpose(np.stack([cu, cidcu]), (1,0)).astype(np.float32)
correctmat = np.transpose(np.stack([acsu, cidacsu]), (1,0)).astype(np.float32)
avgcorrectmat = correctmat / (countmat + 0.001).astype(np.float32)
acsumat = np.expand_dims(acsu, 1).astype(np.float32)
lect = lect.astype(np.float32)
lectcat = lectcat.astype(np.float32)
outmat = np.concatenate((countmat, avgcorrectmat, acsumat, qamat, lect, lectcat), 1)
cols = [f'counts___feat{i}' for i in range(2)] + \
[f'avgcorrect___feat{i}' for i in range(2)] + \
['cid_answered_correctly'] + [f'rank_stats_{i}' for i in range(2)] + \
['lecture_ct','lecture_lag', 'lecture_tag','lecture_part']
outdf = pd.DataFrame(outmat, columns = cols, index = df.index.tolist())
df = pd.concat([df, outdf], 1)
return df
DECAY = 0.0
logger.info('Load args')
parser = argparse.ArgumentParser("PyTorch Xview Pipeline")
arg = parser.add_argument
arg('--workers', type=int, default=8, help='number of cpu threads to use')
arg('--batchsize', type=int, default=1024)
arg('--lr', type=float, default=0.001)
arg('--epochs', type=int, default=12)
arg('--maxseq', type=int, default=128)
arg('--hidden', type=int, default=256)
arg('--n_layers', type=int, default=2)
arg('--n_heads', type=int, default=8)
arg('--dumpdata', type=bool, default=0)
arg('--bags', type=int, default=4)
arg('--model', type=str, default='lstm')
arg('--label-smoothing', type=float, default=0.01)
arg('--dir', type=str, default='val')
#arg('--version', type=str, default='V05')
args = parser.parse_args()
args.dumpdata = bool(args.dumpdata)
logger.info(args)
device = 'cpu' if platform.system() == 'Darwin' else 'cuda'
CUT=0
DIR=args.dir#'val'
VERSION='V30FT'#args.version
debug = False
validaten_flg = False
FILTCOLS = ['row_id', 'user_id', 'content_id', 'content_type_id', \
'answered_correctly', 'prior_question_elapsed_time', \
'prior_question_had_explanation', 'task_container_id', \
'timestamp', 'user_answer']
logger.info(f'Loaded columns {", ".join(FILTCOLS)}')
valid = pd.read_feather(f'data/{DIR}/cv{CUT+1}_valid.feather')[FILTCOLS]
train = pd.read_feather(f'data/{DIR}/cv{CUT+1}_train.feather')[FILTCOLS]
train = train.sort_values(['user_id', 'timestamp']).reset_index(drop = True)
valid = valid.sort_values(['user_id', 'timestamp']).reset_index(drop = True)
# Joins questions
ldf = pd.read_csv('data/lectures.csv')
ldf.type_of = ldf.type_of.str.replace(' ', '_')
ldict = ldf.set_index('lecture_id').to_dict()
#lecture_types = [t for t in ldf.type_of.unique() if t!= 'starter']
qdf = pd.read_csv('data/questions.csv')
qdf[[f'tag{i}' for i in range(6)]] = qdf.tags.fillna('').apply(split_tags).tolist()
bdict = qdf.set_index('question_id')['bundle_id'].to_dict()
keepcols = ['question_id', 'part', 'bundle_id', 'correct_answer'] + [f'tag{i}' for i in range(6)]
train = pd.merge(train, qdf[keepcols], left_on = 'content_id', right_on = 'question_id', how = 'left')
valid = pd.merge(valid, qdf[keepcols], left_on = 'content_id', right_on = 'question_id', how = 'left')
formatcols = ['question_id', 'part', 'bundle_id', 'correct_answer', 'user_answer']+ [f'tag{i}' for i in range(6)]
train[formatcols] = train[formatcols].fillna(0).astype(np.int16)
valid[formatcols] = valid[formatcols].fillna(0).astype(np.int16)
# How correct is the answer
def qaRanks(df):
aggdf1 = df.groupby(['question_id', 'user_answer', 'correct_answer'])['answered_correctly'].count()
aggdf2 = df.groupby(['question_id'])['answered_correctly'].count()
aggdf = pd.merge(aggdf1, aggdf2, left_index=True, right_index = True).reset_index()
aggdf.columns = ['question_id', 'user_answer', 'correct_answer', 'answcount', 'quescount']
aggdf['answerratio'] = (aggdf.answcount / aggdf.quescount).astype(np.float32)
rankDf = aggdf.set_index('question_id')[[ 'answerratio', 'answcount']].reset_index()
qaRatio = aggdf.set_index(['question_id', 'user_answer']).answerratio.to_dict()
qaCorrect = qdf.set_index('question_id').correct_answer.to_dict()
return qaRatio, qaCorrect, rankDf
ix = train.content_type_id == False
qaRatio, qaCorrect, rankDf = qaRanks(train[ix])
train['prior_question_had_explanation'] = train['prior_question_had_explanation'].astype(np.float32).fillna(2).astype(np.int8)
valid['prior_question_had_explanation'] = valid['prior_question_had_explanation'].astype(np.float32).fillna(2).astype(np.int8)
train['prior_question_elapsed_time'] = train['prior_question_elapsed_time'].fillna(0).astype(np.int32)
valid['prior_question_elapsed_time'] = valid['prior_question_elapsed_time'].fillna(0).astype(np.int32)
content_df1 = train.query('content_type_id == 0')[['content_id','answered_correctly']]\
.groupby(['content_id']).agg(['mean', 'count']).astype(np.float32).reset_index()
content_df1.columns = ['content_id', 'answered_correctly_avg_c', 'answered_correctly_ct_c']
content_df2 = train.query('content_type_id == 0') \
.groupby(['content_id','user_id']).size().reset_index()
content_df2 = content_df2.groupby(['content_id'])[0].mean().astype(np.float32).reset_index()
content_df2.columns = ['content_id', 'attempts_avg_c']
content_df = pd.merge(content_df1, content_df2, on = 'content_id')
content_df.columns
del content_df1, content_df2
gc.collect()
content_df.iloc[:,1:] = content_df.iloc[:,1:].astype(np.float32)
train = pd.merge(train, content_df, on=['content_id'], how="left")
valid = pd.merge(valid, content_df, on=['content_id'], how="left")
# Count task container id
taskcols = ['user_id', 'task_container_id']
train['task_container_cts'] = train[taskcols][::-1].groupby(taskcols).cumcount()[::-1]
valid['task_container_cts'] = valid[taskcols][::-1].groupby(taskcols).cumcount()[::-1]
# user stats features with loops
qidx = train.content_type_id == False
n_users = int(len(train[qidx].user_id.unique()) * 1.2)
n_users_ques = int(len(train[qidx][['user_id', 'content_id']].drop_duplicates()) * 1.2)
u_int_cols = ['answered_correctly_sum_u_dict', 'count_u_dict', 'lecture_tag', 'lecture_part', 'lecture_logged', \
'content_id_lag', 'pexp_count_u_dict', 'count_u_lect_dict', 'count_u_lect_timestamp'] #'track_b', 'answered_correctly_sum_b_dict', 'count_b_dict',
u_float_cols = ['userRatioCum', 'userAvgRatioCum', 'qaRatiocum', 'qaRatioCorrectcum', ]
uq_int_cols = ['content_id_answered_correctly_sum_u_dict', 'content_id_count_u_dict']
pdicts = {**dict((col, np.zeros(n_users, dtype= np.uint32)) for col in u_int_cols),
**dict((col, np.zeros(n_users, dtype= np.float32)) for col in u_float_cols),
**dict((col, np.zeros(n_users_ques, dtype= np.uint8)) for col in uq_int_cols),
**{'qaRatio' : qaRatio, 'qaCorrect': qaCorrect}}
cid_udict = train[qidx][['user_id', 'content_id']].drop_duplicates() \
.reset_index(drop=True).reset_index().groupby('content_id') \
.apply(lambda x : x.set_index('user_id')['index'].to_dict() )
pdicts['uqidx'] = 13523 * [{}]
for id_,row_ in cid_udict.iteritems():
pdicts['uqidx'][id_] = row_
del cid_udict
pdicts['max_uqidx'] = max(max(d.values()) for d in pdicts['uqidx'] if d!= {})
# pdicts['uqidx'] = train[qidx][['user_id', 'content_id']].drop_duplicates() \
# .reset_index(drop = True).reset_index() \
# .set_index(['user_id', 'content_id']).to_dict()['index']
pdicts['uidx'] = train[qidx][['user_id']].drop_duplicates() \
.reset_index(drop = True).reset_index() \
.set_index(['user_id']).to_dict()['index']
pdicts['max_uidx'] = max(v for v in pdicts['uidx'].values())
train = add_user_feats(train, pdicts)
if args.dumpdata:
dumpobj(f'data/{DIR}/pdicts_{VERSION}_pre.pk', pdicts)
dumpobj(f'data/{DIR}/valid_{VERSION}_pre.pk', valid)
valid = add_user_feats(valid, pdicts)
# For start off remove lectures
train = train.loc[train.content_type_id == False].reset_index(drop=True)
valid = valid.loc[valid.content_type_id == False].reset_index(drop=True)
train['content_user_answer'] = train['user_answer'] + 4 * train['content_id'].astype(np.int32)
valid['content_user_answer'] = valid['user_answer'] + 4 * valid['content_id'].astype(np.int32)
#train['answered_correctly_ct_log'] = np.log1p(train['answered_correctly_ct_c'].fillna(0).astype(np.float32)) - 7.5
#valid['answered_correctly_ct_log'] = np.log1p(valid['answered_correctly_ct_c'].fillna(0).astype(np.float32)) - 7.5
pdicts['NORMCOLS'] = ['counts___feat0', 'counts___feat1', 'cid_answered_correctly',
'lecture_ct','lecture_lag', 'answered_correctly_ct_c']
meanvals = np.log1p(train[pdicts['NORMCOLS']].fillna(0).astype(np.float32)).mean().values
stdvals = np.log1p(train[pdicts['NORMCOLS']].fillna(0).astype(np.float32)).std().values
pdicts['meanvals'] = meanvals
pdicts['stdvals'] = stdvals
train[pdicts['NORMCOLS']] = (np.log1p(train[pdicts['NORMCOLS']].fillna(0).astype(np.float32)) - meanvals) / stdvals
valid[pdicts['NORMCOLS']] = (np.log1p(valid[pdicts['NORMCOLS']].fillna(0).astype(np.float32)) - meanvals) / stdvals
# Create index for loader
trnidx = train.reset_index().groupby(['user_id'])['index'].apply(list).to_dict()
validx = valid.reset_index().groupby(['user_id'])['index'].apply(list).to_dict()
FEATCOLS = ['counts___feat0', 'avgcorrect___feat0', 'counts___feat1', 'avgcorrect___feat1',
'cid_answered_correctly', 'rank_stats_0', 'rank_stats_1'] #
pdicts['MODCOLS'] = ['content_id', 'content_type_id', 'prior_question_elapsed_time', \
'prior_question_had_explanation', 'task_container_id', 'lecture_tag', 'lecture_part', \
'timestamp', 'part', 'bundle_id', 'task_container_cts', \
'answered_correctly', 'user_answer', 'correct_answer', 'content_user_answer',
'answered_correctly_avg_c', 'answered_correctly_ct_c', 'attempts_avg_c', 'lecture_ct','lecture_lag'] \
+ [f'tag{i}' for i in range(6)] + FEATCOLS
# EMBCOLS = ['content_id', 'part', 'bundle_id'] + [f'tag{i}' for i in range(6)]
pdicts['TARGETCOLS'] = [ 'user_answer', 'answered_correctly', 'correct_answer', 'content_user_answer']
# SHIFT TARGET HERE
pdicts['CARRYTASKFWD'] = ['counts___feat0', 'avgcorrect___feat0', \
'cid_answered_correctly', 'rank_stats_0', 'rank_stats_1'] #
pdicts['CONTCOLS'] = ['timestamp', 'prior_question_elapsed_time', 'prior_question_had_explanation', \
'answered_correctly_avg_c', 'answered_correctly_ct_c', 'attempts_avg_c', \
'task_container_cts', 'lecture_ct','lecture_lag'] + FEATCOLS
pdicts['NOPAD'] = ['prior_question_elapsed_time', 'prior_question_had_explanation', \
'timestamp', 'content_type_id', 'task_container_cts'] + pdicts['CONTCOLS']
pdicts['PADVALS'] = train[pdicts['MODCOLS']].max(0) + 1
pdicts['PADVALS'][pdicts['NOPAD']] = 0
pdicts['EXTRACOLS'] = ['lag_time_cat', 'elapsed_time_cat']
#self = SAKTDataset(train, MODCOLS, PADVALS)
pdicts['keepcols'] = keepcols
pdicts['content_df'] = content_df
pdicts['ldict'] = ldict
pdicts['bdict'] = bdict
pdicts['qdf'] = qdf
if args.dumpdata:
logger.info('Dump objects - pdicts')
for k, v in pdicts.items():
dumpobj(f'data/{DIR}/pdicts____{VERSION}_{k}.pk', v)
fo = open(f'data/{DIR}/pdicts____{VERSION}_uqidx.csv','w')
for cid, d in enumerate(tqdm(pdicts['uqidx'])):
for u, i in d.items():
s = f'{u} {cid} {i}\n'
fo.write(s)
fo.close()
'''
fo = open(f'data/{DIR}/{VERSION}/pdicts____uqidx.csv','r')
uqidx = defaultdict(lambda: {})
for t, l in tqdm(enumerate(fo)):
l = list(map(int, l[:-1].split()))
uqidx[l[0]][l[1]] = l[2]
l = None
fo.close()
'''
logger.info('Dump objects - train/val')
dumpobj(f'data/{DIR}/train_{VERSION}.pk', train)
dumpobj(f'data/{DIR}/valid_{VERSION}.pk', valid)
logger.info('Done... yayy!!')
gc.collect()
#logger.info(f'Na vals train \n\n{train.isna().sum()}')
#logger.info(f'Na vals valid \n\n{valid.isna().sum()}')
#logger.info(f'Max vals train \n\n{train.max()}')
#logger.info(f'Max vals valid \n\n{valid.max()}')
class SAKTDataset(Dataset):
def __init__(self, data, basedf, cols, padvals, extracols, carryfwdcols,
maxseq = args.maxseq, has_target = True, submit = False):
super(SAKTDataset, self).__init__()
self.cols = cols
self.extracols = extracols
self.carryfwd = carryfwdcols
self.data = data
self.data['base'] = 0
if basedf is not None:
self.base = basedf
self.base['base'] = 1
self.data = pd.concat([self.base, self.data], 0)
self.data = self.data.sort_values(['user_id', 'timestamp']).reset_index(drop = True)
self.padvals = padvals
self.uidx = self.data.reset_index()\
.groupby(['user_id'])['index'].apply(list).to_dict()
self.quidx = self.data.query('base==0').reset_index()[['user_id', 'index']].values
self.quidxbackup = self.quidx.copy()
#if basedf is None:
# self.quidx = self.quidx[np.random.choice(self.quidx.shape[0], 2*10**6, replace=False)]
self.task_container_id = self.data.task_container_id.values
self.carryfwdidx = [self.cols.index(c) for c in self.carryfwd]
self.data[['timestamp','prior_question_elapsed_time']] = \
self.data[['timestamp','prior_question_elapsed_time']] / 1000
self.dfmat = self.data[self.cols].values.astype(np.float32)
self.users = self.data.user_id.unique()
del self.data
gc.collect()
self.padmat = self.padvals[self.cols].values
self.maxseq = maxseq
self.has_target = has_target
self.targetidx = [self.cols.index(c) for c in \
['answered_correctly', 'user_answer', 'correct_answer', 'content_user_answer']]
self.padtarget = np.array([self.padvals[self.targetidx].tolist()])
self.yidx = self.cols.index('answered_correctly')
self.timecols = [self.cols.index(c) for c in ['timestamp','prior_question_elapsed_time']]
self.lagbins = np.concatenate([np.linspace(*a).astype(np.int32) for a in [(0, 10, 6), (12, 100, 45),(120, 600, 80),
(660, 1440, 28), (1960, 10800, 36), (10800, 259200, 60),
(518400, 2592000, 10), (2592000, 31104000, 22), (31104000, 311040000, 10)]])
self.submit = False
def __len__(self):
return len(self.quidx)
def __getitem__(self, idx, row = None):
if self.submit:
# in this case, user will be the index, otherwise, we will pass the id
u = idx
umatls = []
if u in self.uidx:
useqidx = self.uidx[u]
useqidx = useqidx[-self.maxseq:]
umatls.append(self.dfmat[useqidx].astype(np.float32))
if u in self.test_matu:
umatls.append(self.test_matu[u])
if len(umatls) > 0:
umatls.append(np.expand_dims(row, 0))
umat = np.concatenate(umatls)
else:
umat = np.expand_dims(row, 0)
umat = umat[-self.maxseq:]
else:
# Get index of user and question
u,q = self.quidx[idx]
# Pull out ths user index sequence
useqidx = self.uidx[u]
# Pull out position of question
cappos = useqidx.index(q) + 1
# Pull out the sequence of questions up to that question
container_buffer = 6
useqidx = useqidx[:cappos][-self.maxseq-container_buffer:]
# Randomise task container id sequence, but keep the sequence of the last 4
tstmp = self.dfmat[useqidx,self.cols.index('timestamp')].copy()
useqidx = np.array(useqidx)
keepstatic = 4
if len(useqidx)>keepstatic:
useqidx[:-keepstatic] = useqidx[:-keepstatic][tstmp[:-keepstatic].argsort()]
# Pull out the sequence for the user
umat = self.dfmat[useqidx].astype(np.float32)
# Add dummy for task container
dummy_answer = self.task_container_id[useqidx[-1]] == self.task_container_id[useqidx]
if sum(dummy_answer) > 1:
# If we are past the first row of a container; remove
# the previous questions and carry the carryfwd leaky cols
ffwdvals = umat[dummy_answer][0, self.carryfwdidx]
# Drop the prevous questions in the container
dummy_answer[-1] = False
umat = umat[~dummy_answer]
umat[-1, self.carryfwdidx] = ffwdvals
# Now limit to maxseq
umat = umat[-self.maxseq:]
useqlen = umat.shape[0]
if useqlen < self.maxseq:
padlen = self.maxseq - umat.shape[0]
upadmat = np.tile(self.padmat, (padlen, 1))
umat = np.concatenate((upadmat, umat), 0)
# convert time to lag
umat[:, self.timecols[0]][1:] = umat[:, self.timecols[0]][1:] - umat[:, self.timecols[0]][:-1]
umat[:, self.timecols[0]][0] = 0
# Time embeddings
timeemb = np.stack(( \
np.digitize(umat[:, self.timecols[0]], self.lagbins),
(umat[:, self.timecols[1]]).clip(0, 300))).round()
timeemb = np.transpose(timeemb, (1,0))
umat = np.concatenate((umat, timeemb), 1)
# preprocess continuous time - try log scale and roughly center it
umat[:, self.timecols] = np.log10( 1.+ umat[:, self.timecols] / 60 )
if self.has_target:
target = umat[-1, self.yidx ]
umat[:, self.targetidx] = np.concatenate((self.padtarget, \
umat[:-1, self.targetidx]), 0)
if target > 1:
logger.info(f'{target}\t{u},{q}\t{idx}' )
umat = torch.tensor(umat).float()
target = torch.tensor(target)
# Create mask
umask = torch.zeros(umat.shape[0], dtype=torch.int8)
umask[-useqlen:] = 1
return umat, umask, target
# dseq = trndataset.quidxbackup
def randShuffleSort(dseq, clip = 0.01 ):
quidxdf = pd.DataFrame(dseq.copy(), columns = ['user', 'index'])
# Randomise starting positions
quidxdf['startidx'] = quidxdf.groupby('user').cumcount()
quidxdf['userct'] = quidxdf.groupby('user')['index'].transform('count').values
quidxdf['random_start'] = quidxdf.groupby('user')\
.apply(lambda x: random.randint(0, len(x)) ).loc[quidxdf.user].values
ix = quidxdf['startidx'] < quidxdf['random_start']
quidxdf['startidx'][ix] = quidxdf['startidx'][ix] + quidxdf['userct'][ix]
# Even out batches
quidxdf = quidxdf.sort_values(['user', 'startidx'])
# quidxdf['userctrand'] = quidxdf.groupby('user')['index'].transform('count').values
quidxdf['startidx'] = ((quidxdf.groupby('user').cumcount().values) \
/ quidxdf.userct.values) + \
(1/quidxdf.userct).apply(lambda v: random.uniform(-v/2, v/2)).values
# Randomise users
udf = pd.DataFrame(quidxdf.user.unique(), columns = ['user_id'])
udf['rand'] = np.random.permutation(( np.arange(len(udf))))
quidxdf['random_user'] = udf.set_index('user_id').loc[quidxdf.user].values
# Now sort on sequence
quidxdf = quidxdf.sort_values(['startidx', 'random_user'])
# back to a sequence
clipct = int(clip*len(quidxdf))
quidxmat = quidxdf.iloc[ clipct : -clipct ][['user', 'index']].values
# Deck of card shuffle
quidxmat = np.array_split(quidxmat , 10)
random.shuffle(quidxmat )
quidxmat = np.concatenate(quidxmat)
return quidxmat
class LearnNet28(nn.Module):
def __init__(self, modcols, contcols, padvals, extracols,
device = device, dropout = 0.2, model_type = args.model,
hidden = args.hidden):
super(LearnNet28, self).__init__()
self.dropout = nn.Dropout(dropout)
self.padvals = padvals
self.extracols = extracols
self.modcols = modcols + extracols
self.contcols = contcols
self.embcols = ['content_id', 'part']
self.model_type = model_type
self.emb_content_id = nn.Embedding(13526, 32)
self.emb_content_id_prior = nn.Embedding(13526*3, 32)
self.emb_bundle_id = nn.Embedding(13526, 32)
self.emb_part = nn.Embedding(9, 4)
self.emb_tag= nn.Embedding(190, 8)
self.emb_lpart = nn.Embedding(9, 4)
self.emb_prior = nn.Embedding(3, 2)
self.emb_ltag= nn.Embedding(190, 16)
self.emb_lag_time = nn.Embedding(301, 16)
self.emb_elapsed_time = nn.Embedding(301, 16)
self.emb_cont_user_answer = nn.Embedding(13526 * 4, 5)
self.tag_idx = torch.tensor(['tag' in i for i in self.modcols])
self.tag_wts = torch.ones((sum(self.tag_idx), 16)) / sum(self.tag_idx)
self.tag_wts = nn.Parameter(self.tag_wts)
self.tag_wts.requires_grad = True
self.cont_wts = nn.Parameter( torch.ones(len(self.contcols)) )
self.cont_wts.requires_grad = True
self.cont_idx = [self.modcols.index(c) for c in self.contcols]
self.cont_idxcts = [t for t,c in enumerate(self.contcols) if 'counts' in c]
self.embedding_dropout = SpatialDropout(dropout)
self.diffsize = self.emb_content_id.embedding_dim + self.emb_part.embedding_dim + \
self.emb_bundle_id.embedding_dim + self.emb_tag.embedding_dim * 7
IN_UNITSQ = self.diffsize * 2 + \
self.emb_lpart.embedding_dim + self.emb_ltag.embedding_dim + \
self.emb_prior.embedding_dim + self.emb_content_id_prior.embedding_dim + \
len(self.cont_idxcts)
IN_UNITSQA = ( self.emb_lag_time.embedding_dim + self.emb_elapsed_time.embedding_dim + \
self.emb_cont_user_answer.embedding_dim) + len(self.contcols)
LSTM_UNITS = hidden
self.diffsize = self.emb_content_id.embedding_dim + self.emb_part.embedding_dim + \
self.emb_bundle_id.embedding_dim + self.emb_tag.embedding_dim * 7
self.seqnet1 = nn.LSTM(IN_UNITSQ, LSTM_UNITS, bidirectional=False, batch_first=True)
self.seqnet2 = nn.LSTM(IN_UNITSQA + LSTM_UNITS, LSTM_UNITS, bidirectional=False, batch_first=True)
self.linear1 = nn.Linear(LSTM_UNITS * 2 + len(self.contcols), LSTM_UNITS//2)
self.bn0 = nn.BatchNorm1d(num_features=len(self.contcols))
self.bn1 = nn.BatchNorm1d(num_features=LSTM_UNITS * 2 + len(self.contcols))
self.bn2 = nn.BatchNorm1d(num_features=LSTM_UNITS//2)
self.linear_out = nn.Linear(LSTM_UNITS//2, 1)
def forward(self, x, m = None):
## Continuous
contmat = x[:,:, self.cont_idx]
contmat = self.bn0(contmat.permute(0,2,1)) .permute(0,2,1)
contmat = contmat * self.cont_wts
content_id_prior = x[:,:,self.modcols.index('content_id')] * 3 + \
x[:,:, self.modcols.index('prior_question_had_explanation')]
embcatq = torch.cat([
self.emb_content_id( x[:,:, self.modcols.index('content_id')].long() ),
self.emb_part( x[:,:, self.modcols.index('part')].long() ),
self.emb_bundle_id( x[:,:, self.modcols.index('bundle_id')].long() ),
self.emb_tag(x[:,:, self.tag_idx].long()).view(x.shape[0], x.shape[1], -1),
self.emb_prior( x[:,:, self.modcols.index('prior_question_had_explanation')].long() ),
#self.emb_cont_user_answer( x[:,:, self.modcols.index('content_user_answer')].long() ),
self.emb_lpart( x[:,:, self.modcols.index('lecture_part')].long() ),
self.emb_ltag( x[:,:, self.modcols.index('lecture_tag')].long() ) ,
self.emb_content_id_prior( content_id_prior.long() ),
#self.emb_lag_time( x[:,:, self.modcols.index('lag_time_cat')].long() ),
#self.emb_elapsed_time( x[:,:, self.modcols.index('elapsed_time_cat')].long() )
] #+ [self.emb_tag(x[:,:, ii.item()].long()) for ii in torch.where(self.tag_idx)[0]]
, 2)
embcatqdiff = embcatq[:,:,:self.diffsize] - embcatq[:,-1,:self.diffsize].unsqueeze(1)
# Categroical embeddings
embcatqa = torch.cat([
#self.emb_content_id( x[:,:, self.modcols.index('content_id')].long() ),
#self.emb_bundle_id( x[:,:, self.modcols.index('bundle_id')].long() ),
self.emb_cont_user_answer( x[:,:, self.modcols.index('content_user_answer')].long() ),
self.emb_lag_time( x[:,:, self.modcols.index('lag_time_cat')].long() ),
self.emb_elapsed_time( x[:,:, self.modcols.index('elapsed_time_cat')].long() )
] #+ [self.emb_tag(x[:,:, ii.item()].long()) for ii in torch.where(self.tag_idx)[0]]
, 2)
#embcatqadiff = embcatqa - embcatqa[:,-1].unsqueeze(1)
embcatq = self.embedding_dropout(embcatq)
embcatqa = self.embedding_dropout(embcatqa)
embcatqdiff = self.embedding_dropout(embcatqdiff)
#embcatqadiff = self.embedding_dropout(embcatqadiff)
# Weighted sum of tags - hopefully good weights are learnt
xinpq = torch.cat([embcatq, embcatqdiff, contmat[:,:,self.cont_idxcts]], 2)
hiddenq, _ = self.seqnet1(xinpq)
xinpqa = torch.cat([embcatqa, contmat, hiddenq], 2)
hiddenqa, _ = self.seqnet2(xinpqa)
# Take last hidden unit
hidden = torch.cat([hiddenqa[:,-1,:], hiddenq[:,-1,:], contmat[:, -1]], 1)
hidden = self.dropout( self.bn1( hidden) )
hidden = F.relu(self.linear1(hidden))
hidden = self.dropout(self.bn2(hidden))
out = self.linear_out(hidden).flatten()
return out
logger.info('Create model and loaders')
pdicts['maargs'] = maargs = {'modcols':pdicts['MODCOLS'],
'contcols':pdicts['CONTCOLS'],
'padvals':pdicts['PADVALS'],
'extracols':pdicts['EXTRACOLS']}
# model = self = LearnNet(**maargs)
# model.to(device)
WTS= 'lstm_V30_hidden768_ep7.bin'
WTSDIR = f'data/valfull/V01S/basemodels/{WTS}'
model = self = LearnNet28(**maargs)
model.to(device)
checkpoint = torch.load(WTSDIR, map_location=torch.device(device))
model.load_state_dict(checkpoint)
# Should we be stepping; all 0's first, then all 1's, then all 2,s
pdicts['daargs'] = daargs = {'cols':pdicts['MODCOLS'],
'padvals':pdicts['PADVALS'],
'carryfwdcols': pdicts['CARRYTASKFWD'],
'extracols':pdicts['EXTRACOLS'],
'maxseq': args.maxseq}
trndataset = SAKTDataset(train, None, **daargs)
valdataset = SAKTDataset(valid, train, **daargs)
loaderargs = {'num_workers' : args.workers, 'batch_size' : args.batchsize}
trndataset.quidx = randShuffleSort(trndataset.quidxbackup)
trnloader = DataLoader(trndataset, shuffle=False, **loaderargs)
valloader = DataLoader(valdataset, shuffle=False, **loaderargs)
x, m, y = next(iter(trnloader))
#mls = [ len(np.unique(m, return_counts=True)[0]) for m in \
# np.split(trndataset.quidx[:(len(trndataset.quidx) // 2056)*2056,0], len(trndataset.quidx) // 2056 )]
#pd.Series(mls).plot()
# Prep class for inference
if args.dumpdata:
logger.info('Dump objects - tail of maxseq')
df = pd.concat([train, valid]).reset_index(drop = True)
df = df.sort_values(['user_id', 'timestamp']).groupby(['user_id']).tail(args.maxseq)
dumpobj(f'data/{DIR}/train_all_{VERSION}_tail.pk', df)
alldataset = SAKTDataset(df, None, **daargs)
dumpobj(f'data/{DIR}/alldataset_{VERSION}_tail.pk', alldataset)
del df, alldataset
gc.collect()
criterion = nn.BCEWithLogitsLoss()
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
plist = [ {'params': [p for n, p in param_optimizer] } ]
optimizer = torch.optim.Adam(plist, lr=args.lr)
if device != 'cpu':
scaler = torch.cuda.amp.GradScaler()
logger.info('Start training')
best_val_loss = 100.
trn_lossls = []
predls = []
bags = args.bags
for epoch in range(args.epochs):
for param in model.parameters():
param.requires_grad = True
model.train()
pbartrn = tqdm(enumerate(trnloader),
total = len(trndataset)//loaderargs['batch_size'],
desc=f"Train epoch {epoch}", ncols=0)
trn_loss = 0.
# Sort forward
trndataset.quidx = randShuffleSort(trndataset.quidxbackup)
trnloader = DataLoader(trndataset, shuffle=False, **loaderargs)
for step, batch in pbartrn:
optimizer.zero_grad()
x, m, y = batch
x = x.to(device, dtype=torch.float)
m = m.to(device, dtype=torch.long)
y = y.to(device, dtype=torch.float)
x = torch.autograd.Variable(x, requires_grad=True)
y = torch.autograd.Variable(y)
out = model(x, m)
loss = criterion(out, y)
loss.backward()
optimizer.step()
'''
with autocast():
out = model(x, m)
loss = criterion(out, y)
if device != 'cpu':
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
optimizer.step()
optimizer.zero_grad()
'''
trn_loss += loss.item()
trn_lossls.append(loss.item())
trn_lossls = trn_lossls[-1000:]
pbartrn.set_postfix({'train loss': trn_loss / (step + 1), \
'last 1000': sum(trn_lossls) / len(trn_lossls) })
pbarval = tqdm(enumerate(valloader),
total = len(valdataset)//loaderargs['batch_size'],
desc=f"Valid epoch {epoch}", ncols=0)
y_predls = []
y_act = valid['answered_correctly'].values
model.eval()
torch.save(model.state_dict(), f'data/{DIR}/{args.model}_{VERSION}_hidden{args.hidden}_ep{epoch}.bin')
for step, batch in pbarval:
x, m, y = batch
x = x.to(device, dtype=torch.float)
m = m.to(device, dtype=torch.long)
with torch.no_grad():
out = model(x, m)
y_predls.append(out.detach().cpu().numpy())
y_pred = np.concatenate(y_predls)
predls.append(y_pred)
auc_score = roc_auc_score(y_act, y_pred )
logger.info(f'Valid AUC Score {auc_score:.5f}')
auc_score = roc_auc_score(y_act, sum(predls[-bags:]) )
logger.info(f'Bagged valid AUC Score {auc_score:.5f}')
# Ideas
# Add time since start of container
# Add some more details on container - steps since since last sequential step
# Try getting loss on a prediction from every step and not just last ???
# Ideas:
# Split tags to separate embeddings
# Try with a gru instead of an lstm
# Part corrent for historical
# Make the content_user_answer inside the dict (for submission time)
# Store the mean vals inside pdict for submission time.
# Make an embedding out of the count of user answers and the count of correct | [
"darragh.hanley@gmail.com"
] | darragh.hanley@gmail.com |
90d17be586bf4e63e3a8346168cf2cd7cd9755b1 | 4a1b61cf551db7843050cc7080cec6fd60c4f8cc | /2020/백준문제/백트래킹/14889_스타트와 링크.py | 0552b5ad7fa88ee91f4b003aaaa042dbd8d4f794 | [] | no_license | phoenix9373/Algorithm | 4551692027ca60e714437fd3b0c86462f635d8ff | c66fd70e14bb8357318e8b8f386d2e968f0c4d98 | refs/heads/master | 2023-08-24T10:01:20.798430 | 2021-10-15T07:57:36 | 2021-10-15T07:57:36 | 288,092,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | # N은 짝수, 두 팀으로 나눔.
# 팀 나누는 방법은 조합?
def comb(idx, sidx):
if sidx == N // 2:
global teams
teams.append(sel.copy())
return
for i in range(idx, N):
sel[sidx] = i
comb(i + 1, sidx + 1)
N = int(input())
info = [list(map(int, input().split())) for _ in range(N)]
teams = []
sel = [0] * (N // 2)
comb(0, 0)
M = pow(10, 7)
for i in range(len(teams)):
t1 = teams[i]
t2 = [j for j in range(N) if j not in t1]
s1 = sum([info[x][y] for x in t1 for y in t1])
s2 = sum([info[x][y] for x in t2 for y in t2])
tmp = abs(s1 - s2)
if M > tmp:
M = tmp
print(M) | [
"phoenix9373@naver.com"
] | phoenix9373@naver.com |
e8d991ff30ab7eef92e081619c6febba3569da66 | e7d65f8773a8c736fc9e41e843d7da6da5cc2e0b | /py3plex/algorithms/community_detection/node_ranking.py | af80dd4bd4e6a79a2245f1f1a4d8fd843735e838 | [
"BSD-3-Clause"
] | permissive | hanbei969/Py3plex | 768e86b16ca00044fcb4188e01edf32c332c8a2a | 1ef3e0e6d468d24bd6e6aec3bd68f20b9d9686bb | refs/heads/master | 2021-01-03T18:19:24.049457 | 2020-02-12T16:51:14 | 2020-02-12T16:51:14 | 240,188,307 | 1 | 0 | BSD-3-Clause | 2020-02-13T05:57:16 | 2020-02-13T05:57:16 | null | UTF-8 | Python | false | false | 5,165 | py | ## node ranking algorithms
import numpy as np
import networkx as nx
import scipy.sparse as sp
#from networkx.algorithms.community.community_utils import is_partition
from itertools import product
# def stochastic_normalization(matrix):
# matrix = matrix.tolil()
# try:
# matrix.setdiag(0)
# except TypeError:
# matrix.setdiag(np.zeros(matrix.shape[0]))
# matrix = matrix.tocsr()
# d = matrix.sum(axis=1).getA1()
# nzs = np.where(d > 0)
# d[nzs] = 1 / d[nzs]
# matrix = (sp.diags(d, 0).tocsc().dot(matrix)).transpose()
# return matrix
def stochastic_normalization(matrix):
matrix = matrix.tolil()
try:
matrix.setdiag(0)
except TypeError:
matrix.setdiag(np.zeros(matrix.shape[0]))
matrix = matrix.tocsr()
d = matrix.sum(axis=1).getA1()
nzs = np.where(d > 0)
k = 1/d[nzs]
matrix = (sp.diags(k, 0).tocsc().dot(matrix)).transpose()
return matrix
def stochastic_normalization_hin(matrix):
matrix = matrix.tolil()
try:
matrix.setdiag(0)
except TypeError:
matrix.setdiag(np.zeros(matrix.shape[0]))
matrix = matrix.tocsr()
d = matrix.sum(axis=1).getA1()
nzs = np.where(d > 0)
d[nzs] = 1 / d[nzs]
matrix = (sp.diags(d, 0).tocsc().dot(matrix)).transpose()
return matrix
def modularity(G, communities, weight='weight'):
multigraph = G.is_multigraph()
directed = G.is_directed()
m = G.size(weight=weight)
if directed:
out_degree = dict(G.out_degree(weight=weight))
in_degree = dict(G.in_degree(weight=weight))
norm = 1 / m
else:
out_degree = dict(G.degree(weight=weight))
in_degree = out_degree
norm = 1 / (2 * m)
def val(u, v):
try:
if multigraph:
w = sum(d.get(weight, 1) for k, d in G[u][v].items())
else:
w = G[u][v].get(weight, 1)
except KeyError:
w = 0
# Double count self-loops if the graph is undirected.
if u == v and not directed:
w *= 2
return w - in_degree[u] * out_degree[v] * norm
Q = np.sum(val(u, v) for c in communities for u, v in product(c, repeat=2))
return Q * norm
def page_rank_kernel(index_row):
## call as results = p.map(pr_kernel, batch)
pr = sparse_page_rank(G, [index_row],
epsilon=1e-6,
max_steps=100000,
damping=damping_hyper,
spread_step=spread_step_hyper,
spread_percent=spread_percent_hyper,
try_shrink=True)
norm = np.linalg.norm(pr, 2)
if norm > 0:
pr = pr / np.linalg.norm(pr, 2)
return (index_row,pr)
else:
return (index_row,np.zeros(graph.shape[1]))
def sparse_page_rank(matrix, start_nodes,
epsilon=1e-6,
max_steps=100000,
damping=0.5,
spread_step=10,
spread_percent=0.3,
try_shrink=False):
assert(len(start_nodes)) > 0
# this method assumes that column sums are all equal to 1 (stochastic normalizaition!)
size = matrix.shape[0]
if start_nodes is None:
start_nodes = range(size)
nz = size
else:
nz = len(start_nodes)
start_vec = np.zeros((size, 1))
start_vec[start_nodes] = 1
start_rank = start_vec / len(start_nodes)
rank_vec = start_vec / len(start_nodes)
# calculate the max spread:
shrink = False
which = np.zeros(0)
if try_shrink:
v = start_vec / len(start_nodes)
steps = 0
while nz < size * spread_percent and steps < spread_step:
steps += 1
v += matrix.dot(v)
nz_new = np.count_nonzero(v)
if nz_new == nz:
shrink = True
break
nz = nz_new
rr = np.arange(matrix.shape[0])
which = (v[rr] > 0).reshape(size)
if shrink:
start_rank = start_rank[which]
rank_vec = rank_vec[which]
matrix = matrix[:, which][which, :]
diff = np.Inf
steps = 0
while diff > epsilon and steps < max_steps: # not converged yet
steps += 1
new_rank = matrix.dot(rank_vec)
rank_sum = np.sum(new_rank)
if rank_sum < 0.999999999:
new_rank += start_rank * (1 - rank_sum)
new_rank = damping * new_rank + (1 - damping) * start_rank
new_diff = np.linalg.norm(rank_vec - new_rank, 1)
diff = new_diff
rank_vec = new_rank
if try_shrink and shrink:
ret = np.zeros(size)
rank_vec = rank_vec.T[0] ## this works for both python versions
ret[which] = rank_vec
ret[start_nodes] = 0
return ret.flatten()
else:
rank_vec[start_nodes] = 0
return rank_vec.flatten()
def hubs_and_authorities(graph):
return nx.hits_scipy(graph)
def hub_matrix(graph):
return nx.hub_matrix(graph)
def authority_matrix(graph):
return nx.authority_matrix(graph)
| [
"skrljblaz@gmail.com"
] | skrljblaz@gmail.com |
7dd3f312d9609c7548451d48fd24fc7858972eb4 | 629034462af2e1fccaf7a47e4b9a5cbd789c90e7 | /algorithm/graph-embedding/configs/base.py | e8868d62ac4da3f10266e620bbaf663f22f5b386 | [] | no_license | zhouxh19/Grep-project | eb17c23650aada7a87aef550492ca4bcd8ef0657 | 40ee7d8dcc1e46db82582bf92b86f9a909db727c | refs/heads/master | 2022-12-24T13:00:40.783457 | 2020-09-13T14:47:15 | 2020-09-13T14:47:15 | 284,244,936 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,055 | py | from pathlib import Path
base_dir = Path().resolve()
config = {}
feature_num = 2
max_iteration = 8
max_test_iteration = 2
k = 3
db = 'tpch'
workload_num = {'tpch': 20187, 'job': 3224, 'xuetangx': 22000}
table_job = {'aka_name': [], 'aka_title': [], 'cast_info': [], 'char_name': [], 'comp_cast_type': [], 'company_name': [], 'company_type': [], 'complete_cast': [], 'info_type': [], 'keyword': [], 'kind_type': [], 'link_type': [], 'movie_companies': [], 'movie_info': [], 'movie_info_idx': [], 'movie_keyword': [], 'movie_link': [], 'name': [], 'person_info': [], 'role_type': [], 'title': []}
v_feature_size = 6 # [tbl_id, tbl_size, distinct values, row len, select, aggregate]
def get_file(file_path, pattern="*"):
"""
函数 获取给定目录下的所有文件的绝对路径
参数 file_path: 文件目录
参数 pattern:默认返回所有文件,也可以自定义返回文件类型,例如:pattern="*.py"
返回值 abspath:文件路径列表
"""
all_file = []
files = Path(file_path).rglob(pattern)
for file in files:
if Path.is_file(file):
all_file.append(file)
return all_file
'''
BASE_DIR = Path('pybert')
config = {
'raw_data_path': BASE_DIR / 'dataset/train.csv',
'test_path': BASE_DIR / 'dataset/test_stage1.csv',
'data_dir': BASE_DIR / 'dataset',
'log_dir': BASE_DIR / 'output/log',
'writer_dir': BASE_DIR / "output/TSboard",
'figure_dir': BASE_DIR / "output/figure",
'checkpoint_dir': BASE_DIR / "output/checkpoints",
'cache_dir': BASE_DIR / 'model/',
'result': BASE_DIR / "output/result",
'bert_vocab_path': BASE_DIR / 'pretrain/bert/base-chinese/vocab.txt',
'bert_config_file': BASE_DIR / 'pretrain/bert/base-chinese/config.json',
'bert_model_dir': BASE_DIR / 'pretrain/bert/base-chinese',
'xlnet_vocab_path': BASE_DIR / 'pretrain/xlnet/base-cased/spiece.model',
'xlnet_config_file': BASE_DIR / 'pretrain/xlnet/base-cased/config.json',
'xlnet_model_dir': BASE_DIR / 'pretrain/xlnet/base-cased'
}
''' | [
"zhouxuan19@mails.tsinghua.edu.cn"
] | zhouxuan19@mails.tsinghua.edu.cn |
b59bb380bb991cabe3cefb26f89a8fa5c6f3da44 | 1113e8eec4ccbbcd00c6a9b5466c5239b6f0eb03 | /ops/core/rdb_icpocm.py | 972f5c82126e411356f275269175e3db0db53953 | [] | no_license | yizhong120110/CPOS | a05858c84e04ce4aa48b3bfb43ee49264ffc5270 | 68ddf3df6d2cd731e6634b09d27aff4c22debd8e | refs/heads/master | 2021-09-01T17:59:53.802095 | 2017-12-28T05:43:06 | 2017-12-28T05:43:06 | 106,247,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,260 | py | # -*- coding: utf-8 -*-
"""
通讯进程参数信息查询
"""
import ops.core.rdb
def get_txxx( txjclx ):
"""
通过进程信息表的jclx,获得通讯进程相关的参数
返回值:
{'txwj': 'ops.ocm.short_tcp.jnfs_app', 'bm': 'jnfs_app'}
"""
txjdxx_dic = {}
with connection() as db:
sql = """
select b.fwfx ,b.txlx ,b.txwjmc ,b.bm
from gl_jcxxpz a ,gl_txgl b
where a.jcmc = b.bm
and a.zt = '1' and a.jclx = %(jclx)s
"""
# 使用sql和dict分离的方式,是为了防止SQL注入
d = {'jclx':txjclx}
rs = db.execute_sql( sql ,d )
obj = rs[0] if rs else None
if obj:
wjlj = ['ops']
if str(obj['fwfx']) == "1":
wjlj.append("ocm")
else:
wjlj.append("icp")
wjlj.extend([obj['txlx'] ,obj['txwjmc'].split('.')[0]])
# 通讯文件的路径
txjdxx_dic["txwj"] = ".".join(wjlj)
# 通讯的编码,用于获得通讯的参数
txjdxx_dic.update( {"bm":obj['bm']} )
txjdxx_dic.update( {"txwjmc":obj['txwjmc'].split('.')[0]} )
return txjdxx_dic
def get_txcs( txjclx ,txbm = None ):
"""
txbm 是通讯管理中的唯一标识
返回值: 使用的界面配置的k-v对
{'IP': '127.0.0.1'}
"""
txcs_dic = {}
# 如果传了txbm,就不用查询了,没有就查询一次
if txbm == None:
txxx = get_txxx( txjclx )
if txxx:
txbm = txxx["bm"]
else:
return txcs_dic
with connection() as db:
# 使用sql和dict分离的方式,是为了防止SQL注入
sql = """
select csdm ,value as csz
from gl_csdy a ,gl_txgl b
where a.lx = '4' and a.ssid = b.id
and a.zt = '1'
and b.bm = %(txbm)s
"""
rs = db.execute_sql( sql ,{"txbm":txbm} )
for obj in rs:
txcs_dic.update({obj['csdm']:obj['csz']})
return txcs_dic
if __name__ == '__main__':
print(get_txcs(get_txxx( 'jnfs_app' )["bm"]) )
| [
"yizhong120110@gmail.com"
] | yizhong120110@gmail.com |
e569dbc4052364132ab074eaf6c3b2e70407822b | d644b6cabb4fa88cf900c59799a2897f5a0702d8 | /tests/base_tests/multipolygon_tests/strategies.py | bd202eb0721c2b567aa55e8a3372cf4acb3b6804 | [
"MIT"
] | permissive | lycantropos/gon | c3f89a754c60424c8e2609e441d7be85af985455 | 177bd0de37255462c60adcbfcdf76bfdc343a9c1 | refs/heads/master | 2023-07-06T01:11:57.028646 | 2023-06-26T20:47:14 | 2023-06-27T00:30:06 | 194,597,548 | 15 | 1 | MIT | 2023-06-27T00:30:07 | 2019-07-01T04:06:06 | Python | UTF-8 | Python | false | false | 1,010 | py | from tests.strategies import (coordinates_strategies,
coordinates_to_multipolygons,
coordinates_to_points,
coordinates_to_polygons,
invalid_multipolygons)
from tests.utils import (cleave_in_tuples,
to_pairs,
to_triplets)
multipolygons = coordinates_strategies.flatmap(coordinates_to_multipolygons)
polygons = coordinates_strategies.flatmap(coordinates_to_polygons)
invalid_multipolygons = invalid_multipolygons
multipolygons_strategies = (coordinates_strategies
.map(coordinates_to_multipolygons))
multipolygons_pairs = multipolygons_strategies.flatmap(to_pairs)
multipolygons_triplets = multipolygons_strategies.flatmap(to_triplets)
multipolygons_with_points = (
(coordinates_strategies
.flatmap(cleave_in_tuples(coordinates_to_multipolygons,
coordinates_to_points)))
)
| [
"azatibrakov@gmail.com"
] | azatibrakov@gmail.com |
df941af2e5b59c83931a8144e389f0875fe8898b | 804a81e52d5fc7fd1078268a3a2976ca80a91880 | /nengo/test/test_new_api.py | 14494ef583ac2047ea4e4f779770ce9dca7474ec | [
"MIT"
] | permissive | jvitku/nengo | 692bbc56717acf476cfb384b3cf0affa71135c40 | 484b9244a32ff1011a1292b24225752db75fc3b2 | refs/heads/master | 2020-12-25T05:42:34.936881 | 2013-07-10T16:50:38 | 2013-07-10T19:48:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,948 | py | from pprint import pprint
from unittest import TestCase
from matplotlib import pyplot as plt
import nose
import numpy as np
from nengo.nonlinear import LIF
from nengo.model import Model
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
class TestNewAPI(TestCase):
show = False
def test_direct_mode_simple(self):
"""
"""
model = Model('Runtime Test', seed=123, backend='numpy')
model.make_node('in', output=np.sin)
model.probe('in')
res = model.run(0.01)
data = res['in']
print data.dtype
print data
assert np.allclose(data.flatten(), np.sin(np.arange(0, 0.0095, .001)))
def test_basic_1(self, N=1000):
"""
Create a network with sin(t) being represented by
a population of spiking neurons. Assert that the
decoded value from the population is close to the
true value (which is input to the population).
Expected duration of test: about .7 seconds
"""
model = Model('Runtime Test', seed=123, backend='numpy')
model.make_node('in', output=np.sin)
model.make_ensemble('A', LIF(N), 1)
model.connect('in', 'A')
model.probe('A', sample_every=0.01, pstc=0.001) # 'A'
model.probe('A', sample_every=0.01, pstc=0.01) # 'A_1'
model.probe('A', sample_every=0.01, pstc=0.1) # 'A_2'
model.probe('in', sample_every=0.01, pstc=0.01)
pprint(model.o)
res = model.run(1.0)
target = np.sin(np.arange(0, 1000, 10) / 1000.)
target.shape = (100, 1)
for A, label in (('A', 'fast'), ('A_1', 'med'), ('A_2', 'slow')):
data = np.asarray(res[A]).flatten()
plt.plot(data, label=label)
in_data = np.asarray(res['in']).flatten()
plt.plot(in_data, label='in')
plt.legend(loc='upper left')
#print in_probe.get_data()
#print net.sim.sim_step
if self.show:
plt.show()
# target is off-by-one at the sampling frequency of dt=0.001
print rmse(target, res['in'])
assert rmse(target, res['in']) < .001
print rmse(target, res['A'])
assert rmse(target, res['A']) < .3
print rmse(target, res['A_1'])
assert rmse(target, res['A_1']) < .03
print rmse(target, res['A_2'])
assert rmse(target, res['A_2']) < 0.1
def test_basic_5K(self):
return self.test_basic_1(5000)
def test_matrix_mul(self):
# Adjust these values to change the matrix dimensions
# Matrix A is D1xD2
# Matrix B is D2xD3
# result is D1xD3
D1 = 1
D2 = 2
D3 = 3
seed = 123
N = 50
model = Model('Matrix Multiplication', seed=seed, backend='numpy')
# values should stay within the range (-radius,radius)
radius = 1
# make 2 matrices to store the input
model.make_ensemble('A', LIF(N), D1*D2, radius=radius)
model.make_ensemble('B', LIF(N), D2*D3, radius=radius)
# connect inputs to them so we can set their value
model.make_node('input A', [0] * D1 * D2)
model.make_node('input B', [0] * D2 * D3)
model.connect('input A', 'A')
model.connect('input B', 'B')
# the C matrix holds the intermediate product calculations
# need to compute D1*D2*D3 products to multiply 2 matrices together
model.make_ensemble('C', LIF(4 * N), D1 * D2 * D3, # dimensions=2,
radius=1.5*radius)
# encoders=[[1,1], [1,-1], [-1,1], [-1,-1]])
# determine the transformation matrices to get the correct pairwise
# products computed. This looks a bit like black magic but if
# you manually try multiplying two matrices together, you can see
# the underlying pattern. Basically, we need to build up D1*D2*D3
# pairs of numbers in C to compute the product of. If i,j,k are the
# indexes into the D1*D2*D3 products, we want to compute the product
# of element (i,j) in A with the element (j,k) in B. The index in
# A of (i,j) is j+i*D2 and the index in B of (j,k) is k+j*D3.
# The index in C is j+k*D2+i*D2*D3, multiplied by 2 since there are
# two values per ensemble. We add 1 to the B index so it goes into
# the second value in the ensemble.
transformA = [[0] * (D1 * D2) for i in range(D1 * D2 * D3 * 2)]
transformB = [[0] * (D2 * D3) for i in range(D1 * D2 * D3 * 2)]
for i in range(D1):
for j in range(D2):
for k in range(D3):
ix = (j + k * D2 + i * D2 * D3) * 2
transformA[ix][j + i * D2] = 1
transformB[ix + 1][k + j * D3] = 1
model.connect('A', 'C', transform=transformA)
model.connect('B', 'C', transform=transformB)
# now compute the products and do the appropriate summing
model.make_ensemble('D', LIF(N), D1 * D3, radius=radius)
def product(x):
return x[0] * x[1]
# the mapping for this transformation is much easier, since we want to
# combine D2 pairs of elements (we sum D2 products together)
model.connect('C', 'D', index_post=[i / D2 for i in range(D1*D2*D3)],
func=product)
model.get('input A').origin['X'].decoded_output.set_value(
np.asarray([.5, -.5]).astype('float32'))
model.get('input B').origin['X'].decoded_output.set_value(
np.asarray([0, 1, -1, 0]).astype('float32'))
pprint(model.o)
Dprobe = model.probe('D')
model.run(1)
net_data = Dprobe.get_data()
print net_data.shape
plt.plot(net_data[:, 0])
plt.plot(net_data[:, 1])
if self.show:
plt.show()
nose.SkipTest('test correctness')
| [
"tbekolay@gmail.com"
] | tbekolay@gmail.com |
eae545f5af90d8c3c562142d45cf233bbe774293 | 2f4ae73c68637306c878a5234fc3b81950de8854 | /tests/compiler/test_pre_parser.py | 640bc673a35ff9c272b30198c186ce9e2bdc561d | [
"MIT"
] | permissive | ltfschoen/vyper | b121cf1f320f852b7997b0d54eaff5e68163e66e | f68af5730516011007e2546ff825b881e94f030f | refs/heads/master | 2020-03-11T02:42:28.688320 | 2018-04-17T12:42:59 | 2018-04-17T12:42:59 | 129,726,567 | 0 | 0 | MIT | 2018-04-16T10:34:58 | 2018-04-16T10:34:57 | null | UTF-8 | Python | false | false | 1,317 | py | from vyper.exceptions import StructureException
from pytest import raises
def test_semicolon_prohibited(get_contract):
code = """@public
def test() -> int128:
a: int128 = 1; b: int128 = 2
return a + b
"""
with raises(StructureException):
get_contract(code)
def test_valid_semicolons(get_contract):
code = """
@public
def test() -> int128:
a: int128 = 1
b: int128 = 2
s: bytes[300] = "this should not be a problem; because it is in a string"
s = \"\"\"this should not be a problem; because it's in a string\"\"\"
s = 'this should not be a problem;;; because it\\\'s in a string'
s = '''this should not ; \'cause it\'s in a string'''
s = "this should not be \\\"; because it's in a ;\\\"string;\\\";"
return a + b
"""
c = get_contract(code)
assert c.test() == 3
def test_external_contract_definition_alias(get_contract):
contract_1 = """
@public
def bar() -> int128:
return 1
"""
contract_2 = """
contract Bar():
def bar() -> int128: pass
bar_contract: static(Bar)
@public
def foo(contract_address: contract(Bar)) -> int128:
self.bar_contract = contract_address
return self.bar_contract.bar()
"""
c1 = get_contract(contract_1)
c2 = get_contract(contract_2)
assert c2.foo(c1.address) == 1
| [
"jacques@dilectum.co.za"
] | jacques@dilectum.co.za |
1ca1632321aee4063fff9a55b986fe6e9ff62a8b | 6bdb32ddbd72c4337dab12002ff05d6966538448 | /gridpack_folder/mc_request/LHEProducer/Spin-2/BulkGraviton_hh_hVVhbb_inclusive/BulkGraviton_hh_hVVhbb_inclusive_narrow_M450_13TeV-madgraph_cff.py | 03b465498d74fe0f6d139a62106502f6a31c8127 | [] | no_license | cyrilbecot/DibosonBSMSignal_13TeV | 71db480de274c893ba41453025d01bfafa19e340 | d8e685c40b16cde68d25fef9af257c90bee635ba | refs/heads/master | 2021-01-11T10:17:05.447035 | 2016-08-17T13:32:12 | 2016-08-17T13:32:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | import FWCore.ParameterSet.Config as cms
# link to cards:
# https://github.com/cms-sw/genproductions/tree/31b6e7510443b74e0f9aac870e4eb9ae30c19d65/bin/MadGraph5_aMCatNLO/cards/production/13TeV/exo_diboson/Spin-2/BulkGraviton_hh_hVVhbb_inclusive/BulkGraviton_hh_hVVhbb_inclusive_narrow_M450
externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
args = cms.vstring('/cvmfs/cms.cern.ch/phys_generator/gridpacks/slc6_amd64_gcc481/13TeV/madgraph/V5_2.2.2/exo_diboson/Spin-2/BulkGraviton_hh_hVVhbb_inclusive/narrow/v1/BulkGraviton_hh_hVVhbb_inclusive_narrow_M450_tarball.tar.xz'),
nEvents = cms.untracked.uint32(5000),
numberOfParameters = cms.uint32(1),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh')
)
| [
"syu@cern.ch"
] | syu@cern.ch |
645847f61e91b01dc0dc99a5a8f3216f229bb86c | ad23b164febd12d5c6d97cfbcd91cf70e2914ab3 | /TestCaseFunction/main/run_all_test_createActivity.py | fd0b6a13a6bce27051cfae09df9786c9dc4ddad3 | [] | no_license | wawj901124/webtestdata | 9eedf9a01dec2c157725299bda9a42e8d357ef0b | 54f6412566fce07ece912760c5caea73ede819cb | refs/heads/master | 2022-12-09T14:18:38.125191 | 2021-04-25T07:54:07 | 2021-04-25T07:54:07 | 175,773,318 | 1 | 1 | null | 2022-12-08T02:39:15 | 2019-03-15T07:49:16 | Python | UTF-8 | Python | false | false | 4,042 | py | import unittest
# 在jenkins运行时经常提示找不到包,所以就需要手动添加PYTHONPATH,通过追加sys.path列表来实现
import os
import sys
rootpath = str(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
syspath = sys.path
sys.path = []
sys.path.append(rootpath) # 将工程根目录加入到python搜索路径中
sys.path.extend([rootpath + i for i in os.listdir(rootpath) if i[0] != "."]) # 将工程目录下的一级目录添加到python搜索路径中
sys.path.extend(syspath)
# 追加完成
from TestCaseFunction.htmltest import HTMLTestRunner_jietuxiugai as HTMLTestRunner
from test import *
from TestCaseFunction.test.alltest_list_create_activity import caselist #调用数组文件
from TestCaseFunction.util.gettimestr import GetTimeStr
from TestCaseFunction.util.send_attach_email import SendEmail
from TestCaseFunction.log.my_log import UserLog
class RunAllTest(unittest.TestCase):
def runAllTest(self):
#将用例组件成数组
alltestnames = caselist()
suite=unittest.TestSuite()
for testpy in alltestnames:
try:
suite.addTest(unittest.defaultTestLoader.loadTestsFromName(testpy)) #默认加载所有用例
except Exception:
print('ERROR: Skipping tests from "%s".' % testpy)
try:
__import__(test)
except ImportError:
print('Could not import the "%s" test module.'% testpy)
else:
print('Could not load the "%s" test suite.' % testpy)
from traceback import print_exc
print_exc()
self.outPutMyLog('Running the tests...')
# print('Running the tests...')
gettime = GetTimeStr()
filename = '%s/report/%s_report.html' % (str(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),gettime.getTimeStr())
fp = open(filename, 'wb')
self.outPutMyLog('The report path:%s' % filename)
# 定义测试报告
runner = HTMLTestRunner.HTMLTestRunner(
stream=fp,
title=u'python 自动化测试_测试报告',
description=u'用例执行情况:',
verbosity=2) #verbosity=2,输出测试用例中打印的信息
runner.run(suite)
fp.close()
# 发送report至邮箱
send_e = SendEmail()
send_e.send_main([1], [2], filename)
def outPutMyLog(self, context):
mylog = UserLog(context)
mylog.runMyLog()
def run(self):
self.outPutMyLog('---------------------------')
# print('---------------------------')
stdout_backup = sys.stdout
gettime = GetTimeStr()
timestr = gettime.getTimeStr()
# define the log file that receives your log info
logpath = "%s/log/%s_message.txt" % (str(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),timestr)
log_file = open(logpath, "w", encoding="utf-8")
self.outPutMyLog('Now all print info will be written to message.log')
# print("Now all print info will be written to message.log")
# redirect print output to log file
sys.stdout = log_file
self.outPutMyLog('----------开始打印日志-----------------\n')
# print('----------开始打印日志-----------------\n')
# any command line that you will execute
self.runAllTest()
self.outPutMyLog('\n----------日志打印结束-----------------')
# print('\n----------日志打印结束-----------------')
log_file.close()
# restore the output to initial pattern
sys.stdout = stdout_backup
self.outPutMyLog('Now this will be presented on screen')
# print("Now this will be presented on screen")
# 发送log至邮箱
send_e = SendEmail()
send_e.send_main([1], [2], logpath)
if __name__ == '__main__':
runat = RunAllTest()
# runat.run()
runat.runAllTest()
| [
"410287958@qq.com"
] | 410287958@qq.com |
3375c9d7e789e362ffbc2a148ed3e347e6f8f559 | 62718778da7e683be16ede27bdc2aaf1695be5ec | /routing_classifier/predict_bert.py | b730128378a6e307bd4465527d604911224486c1 | [] | no_license | GihanMora/QA_LEAP | 25a65685df41360dc8385391434b5ba25e115f1b | 67b2bbdde471202ff6e4b69b831b8321d733fd89 | refs/heads/master | 2023-09-04T18:23:39.720062 | 2021-11-11T11:52:49 | 2021-11-11T11:52:49 | 426,982,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,454 | py | import ast
import numpy as np
import pandas as pd
import torch
from sklearn.metrics.pairwise import cosine_similarity
from transformers import AutoTokenizer, AutoModel
from routing_classifier.building_embedding_space_bert import get_mean_pooling_emb
model_path = 'bert-base-uncased'
vocab_path = 'bert-base-uncased'
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModel.from_pretrained(model_path)
def predict_class_bert(sentence,embedding_space):
# print(sentence)
# print(embedding_space)
sentence_emb = get_mean_pooling_emb([sentence],tokenizer,model)
# print(sentence_emb)
tuples = []
for i,row in embedding_space.iterrows():
# print(row['classes'])
dis = cosine_similarity(ast.literal_eval(row['embeddings']), sentence_emb)
dis = np.round(dis,3)
tuples.append([row['classes'], dis])
s_tup = sorted(tuples, key=lambda x: x[1]) # sort tuples based on the cosine distance
print(sentence,s_tup)
def predict_class_bert_tokenwise(sentence,embedding_space):
tokens = sentence.split(' ')
for token in tokens:
predict_class_bert(token,embedding_space)
embedding_space = pd.read_csv(r"E:\Projects\LEAP_Gihan\QA_LEAP\routing_classifier\embedding_spaces\embedding_space_bert.csv")
predict_class_bert('what is the energy consumption of library?',embedding_space)
predict_class_bert_tokenwise('what is the energy consumption of library?',embedding_space) | [
"gihangamage.15@cse.mrt.ac.lk"
] | gihangamage.15@cse.mrt.ac.lk |
524fa9c06c6a7e63bf445458738e0daf7b313979 | ff871c8dc30b34070cc3e0ea6a31e658158b7c63 | /PaddleVision/ImageClassification/models/mobilenet.py | 4a1154e1a5bd03a241effb8e4ef05bc4d8636929 | [
"Apache-2.0"
] | permissive | SunAhong1993/PaddleSolution | 3da9255849b520a6fb7d7b1eda5a2da48f9127e4 | 46ee3812c66c0dc436c96be8330b7c7d931604b2 | refs/heads/master | 2020-07-05T05:06:08.133802 | 2019-08-20T12:19:24 | 2019-08-20T12:19:24 | 202,531,666 | 2 | 0 | Apache-2.0 | 2019-08-20T06:39:21 | 2019-08-15T11:48:03 | Jupyter Notebook | UTF-8 | Python | false | false | 6,201 | py | #copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle.fluid as fluid
from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr
__all__ = ['MobileNet']
train_parameters = {
"input_size": [3, 224, 224],
"input_mean": [0.485, 0.456, 0.406],
"input_std": [0.229, 0.224, 0.225],
"learning_strategy": {
"name": "piecewise_decay",
"batch_size": 256,
"epochs": [30, 60, 90],
"steps": [0.1, 0.01, 0.001, 0.0001]
}
}
class MobileNet():
def __init__(self):
self.params = train_parameters
def net(self, input, class_dim=1000, scale=1.0):
# conv1: 112x112
input = self.conv_bn_layer(
input,
filter_size=3,
channels=3,
num_filters=int(32 * scale),
stride=2,
padding=1,
name="conv1")
# 56x56
input = self.depthwise_separable(
input,
num_filters1=32,
num_filters2=64,
num_groups=32,
stride=1,
scale=scale,
name="conv2_1")
input = self.depthwise_separable(
input,
num_filters1=64,
num_filters2=128,
num_groups=64,
stride=2,
scale=scale,
name="conv2_2")
# 28x28
input = self.depthwise_separable(
input,
num_filters1=128,
num_filters2=128,
num_groups=128,
stride=1,
scale=scale,
name="conv3_1")
input = self.depthwise_separable(
input,
num_filters1=128,
num_filters2=256,
num_groups=128,
stride=2,
scale=scale,
name="conv3_2")
# 14x14
input = self.depthwise_separable(
input,
num_filters1=256,
num_filters2=256,
num_groups=256,
stride=1,
scale=scale,
name="conv4_1")
input = self.depthwise_separable(
input,
num_filters1=256,
num_filters2=512,
num_groups=256,
stride=2,
scale=scale,
name="conv4_2")
# 14x14
for i in range(5):
input = self.depthwise_separable(
input,
num_filters1=512,
num_filters2=512,
num_groups=512,
stride=1,
scale=scale,
name="conv5" + "_" + str(i + 1))
# 7x7
input = self.depthwise_separable(
input,
num_filters1=512,
num_filters2=1024,
num_groups=512,
stride=2,
scale=scale,
name="conv5_6")
input = self.depthwise_separable(
input,
num_filters1=1024,
num_filters2=1024,
num_groups=1024,
stride=1,
scale=scale,
name="conv6")
input = fluid.layers.pool2d(
input=input,
pool_size=0,
pool_stride=1,
pool_type='avg',
global_pooling=True)
output = fluid.layers.fc(input=input,
size=class_dim,
param_attr=ParamAttr(
initializer=MSRA(), name="fc7_weights"),
bias_attr=ParamAttr(name="fc7_offset"))
return output
def conv_bn_layer(self,
input,
filter_size,
num_filters,
stride,
padding,
channels=None,
num_groups=1,
act='relu',
use_cudnn=True,
name=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
act=None,
use_cudnn=use_cudnn,
param_attr=ParamAttr(
initializer=MSRA(), name=name + "_weights"),
bias_attr=False)
bn_name = name + "_bn"
return fluid.layers.batch_norm(
input=conv,
act=act,
param_attr=ParamAttr(name=bn_name + "_scale"),
bias_attr=ParamAttr(name=bn_name + "_offset"),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
def depthwise_separable(self,
input,
num_filters1,
num_filters2,
num_groups,
stride,
scale,
name=None):
depthwise_conv = self.conv_bn_layer(
input=input,
filter_size=3,
num_filters=int(num_filters1 * scale),
stride=stride,
padding=1,
num_groups=int(num_groups * scale),
use_cudnn=False,
name=name + "_dw")
pointwise_conv = self.conv_bn_layer(
input=depthwise_conv,
filter_size=1,
num_filters=int(num_filters2 * scale),
stride=1,
padding=0,
name=name + "_sep")
return pointwise_conv
| [
"jiangjiajun@baidu.com"
] | jiangjiajun@baidu.com |
d27a90acd871761c963b57f503bff98869154d67 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_06_01/operations/_load_balancer_frontend_ip_configurations_operations.py | 9c8d7b50957f3c4802e0aadc57c2a6c3281f68a4 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 9,049 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerFrontendIPConfigurationsOperations(object):
"""LoadBalancerFrontendIPConfigurationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.LoadBalancerFrontendIPConfigurationListResult"]
"""Gets all the load balancer frontend IP configurations.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerFrontendIPConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_06_01.models.LoadBalancerFrontendIPConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerFrontendIPConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerFrontendIPConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/frontendIPConfigurations'} # type: ignore
def get(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
frontend_ip_configuration_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.FrontendIPConfiguration"
"""Gets load balancer frontend IP configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param frontend_ip_configuration_name: The name of the frontend IP configuration.
:type frontend_ip_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FrontendIPConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.FrontendIPConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FrontendIPConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'frontendIPConfigurationName': self._serialize.url("frontend_ip_configuration_name", frontend_ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FrontendIPConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/frontendIPConfigurations/{frontendIPConfigurationName}'} # type: ignore
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
c3ed3b484ab64c7d5b85380754f2f47d6a7e4939 | df716b2868b289a7e264f8d2b0ded52fff38d7fc | /tests/formatters/symantec.py | f1b0886c63474d244c14ce69700bf5c780427667 | [
"Apache-2.0"
] | permissive | ir4n6/plaso | 7dd3cebb92de53cc4866ae650d41c255027cf80a | 010f9cbdfc82e21ed6658657fd09a7b44115c464 | refs/heads/master | 2021-04-25T05:50:45.963652 | 2018-03-08T15:11:58 | 2018-03-08T15:11:58 | 122,255,666 | 0 | 0 | Apache-2.0 | 2018-02-20T21:00:50 | 2018-02-20T21:00:50 | null | UTF-8 | Python | false | false | 1,167 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Symantec AV log file event formatter."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import symantec
from tests.formatters import test_lib
class SymantecAVFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the Symantec AV log file event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = symantec.SymantecAVFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = symantec.SymantecAVFormatter()
expected_attribute_names = [
'event_map',
'category_map',
'virus',
'file',
'action0_map',
'action1_map',
'action2_map',
'description',
'scanid',
'event_data',
'remote_machine',
'remote_machine_ip']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
if __name__ == '__main__':
unittest.main()
| [
"joachim.metz@gmail.com"
] | joachim.metz@gmail.com |
013f6557d21e5cb87dcc3029821b093ccc528416 | dc6750f77b60b188c5161f09c831622de76f84d4 | /andros/euterpe-master/euterpe/model_asr/seq2seq/v2/encrnn_decrnn_att_asr.py | 6e1e3ff0bc7a0609d7c141b5c63e04a59382f627 | [] | no_license | gudwns1215/latest | 614edb900167178845d99a0dfdfc732b625e26f5 | 0d0b96aaaecb05039da5b6faf81c2f3e78d8087c | refs/heads/master | 2020-06-03T22:36:13.664965 | 2018-10-08T03:03:50 | 2018-10-08T03:03:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,554 | py |
import sys
import time
import re
import numpy as np
import json
# pytorch #
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence as pack, pad_packed_sequence as unpack
# torchev #
from torchev.generator import generator_rnn, generator_attention, generator_act_fn, generator_act_module
from torchev.custom import decoder
from torchev.utils.helper import torchauto
from torchev.utils.mask_util import generate_seq_mask
from torchev.nn.modules import LayerNorm
# utilbox #
from utilbox.config_util import ConfigParser
class EncRNNDecRNNAtt(nn.Module) :
def __init__(self, enc_in_size, dec_in_size, dec_out_size,
enc_fnn_sizes=[512], enc_fnn_act='LeakyReLU', enc_fnn_do=0.25,
enc_rnn_sizes=[256, 256, 256], enc_rnn_cfgs={"type":"lstm", "bi":True}, enc_rnn_do=0.25,
downsampling=[False, True, True],
dec_emb_size=256, dec_emb_do=0.25, dec_emb_tied_weight=True,
# tying weight from char/word embedding with softmax layer
dec_rnn_sizes=[512, 512], dec_rnn_cfgs={"type":"lstm"}, dec_rnn_do=0.25,
dec_cfg={"type":"standard_decoder"},
att_cfg={"type":"mlp"},
use_layernorm=False,
) :
super().__init__()
self.enc_in_size = enc_in_size
self.dec_in_size = dec_in_size
self.dec_out_size = dec_out_size
self.enc_fnn_sizes = enc_fnn_sizes
self.enc_fnn_act = enc_fnn_act
self.enc_fnn_do = ConfigParser.list_parser(enc_fnn_do, len(enc_fnn_sizes))
self.enc_rnn_sizes = enc_rnn_sizes
self.enc_rnn_cfgs = enc_rnn_cfgs
self.enc_rnn_do = ConfigParser.list_parser(enc_rnn_do, len(enc_rnn_sizes))
self.downsampling = ConfigParser.list_parser(downsampling, len(enc_rnn_sizes))
self.dec_emb_size = dec_emb_size
self.dec_emb_do = dec_emb_do
self.dec_emb_tied_weight = dec_emb_tied_weight
self.dec_rnn_sizes = dec_rnn_sizes
self.dec_rnn_cfgs = ConfigParser.list_parser(dec_rnn_cfgs, len(dec_rnn_sizes))
self.dec_rnn_do = ConfigParser.list_parser(dec_rnn_do, len(dec_rnn_sizes))
self.dec_cfg = dec_cfg
self.att_cfg = att_cfg
self.use_layernorm = use_layernorm
if self.use_layernorm == True :
raise ValueError("LayerNorm is not implemented yet")
# modules #
# init encoder #
prev_size = enc_in_size
_tmp = []
for ii in range(len(enc_fnn_sizes)) :
_tmp.append(nn.Linear(prev_size, enc_fnn_sizes[ii]))
if use_layernorm :
_tmp.append(LayerNorm(enc_fnn_sizes[ii]))
_tmp.append(generator_act_module(enc_fnn_act))
_tmp.append(nn.Dropout(p=self.enc_fnn_do[ii]))
prev_size = enc_fnn_sizes[ii]
self.enc_fnn_lyr = nn.Sequential(*_tmp)
self.enc_rnn_lyr = nn.ModuleList()
_enc_rnn_cfgs = ConfigParser.list_parser(enc_rnn_cfgs, len(enc_rnn_sizes))
for ii in range(len(enc_rnn_sizes)) :
_rnn_cfg = {}
_rnn_cfg['type'] = _enc_rnn_cfgs[ii]['type']
_rnn_cfg['args'] = [prev_size, enc_rnn_sizes[ii], 1, True, True, 0, _enc_rnn_cfgs[ii]['bi']]
self.enc_rnn_lyr.append(generator_rnn(_rnn_cfg))
prev_size = enc_rnn_sizes[ii] * (2 if _enc_rnn_cfgs[ii]['bi'] else 1)
final_enc_size = prev_size
# init decoder #
self.dec_emb_lyr = nn.Embedding(self.dec_in_size, dec_emb_size, padding_idx=None)
prev_size = dec_emb_size
_dec_rnn_cfgs = ConfigParser.list_parser(dec_rnn_cfgs, len(dec_rnn_sizes))
for ii in range(len(dec_rnn_sizes)) :
_type = _dec_rnn_cfgs[ii]['type']
if re.match('stateful.*cell', _type) is None :
_dec_rnn_cfgs[ii]['type'] = 'stateful_{}cell'.format(_type)
# TODO : dec_cfg #
assert 'type' in dec_cfg, "decoder type need to be defined"
if dec_cfg['type'] == 'standard_decoder' :
_tmp_dec_cfg = dict(dec_cfg)
del _tmp_dec_cfg['type'] #
self.dec_att_lyr = decoder.StandardDecoder(att_cfg=att_cfg, ctx_size=final_enc_size, in_size=dec_emb_size,
rnn_sizes=dec_rnn_sizes, rnn_cfgs=_dec_rnn_cfgs, rnn_do=dec_rnn_do, **_tmp_dec_cfg)
else :
raise NotImplementedError("decoder type {} is not found".format(dec_cfg['type']))
self.dec_presoftmax_lyr = nn.Linear(self.dec_att_lyr.output_size, dec_out_size)
if dec_emb_tied_weight :
assert dec_out_size == dec_in_size and self.dec_emb_lyr.embedding_dim == self.dec_presoftmax_lyr.in_features
self.dec_presoftmax_lyr.weight = self.dec_emb_lyr.weight
pass
def get_config(self) :
# TODO
return {'class':str(self.__class__),
'enc_in_size':self.enc_in_size,
'dec_in_size':self.dec_in_size,
'dec_out_size':self.dec_out_size,
'enc_fnn_sizes':self.enc_fnn_sizes,
'enc_fnn_act':self.enc_fnn_act,
'enc_fnn_do':self.enc_fnn_do,
'enc_rnn_sizes':self.enc_rnn_sizes,
'enc_rnn_cfgs':self.enc_rnn_cfgs,
'enc_rnn_do':self.enc_rnn_do,
'downsampling':self.downsampling,
'dec_emb_size':self.dec_emb_size,
'dec_emb_do':self.dec_emb_do,
'dec_emb_tied_weight':self.dec_emb_tied_weight,
'dec_rnn_sizes':self.dec_rnn_sizes,
'dec_rnn_cfgs':self.dec_rnn_cfgs,
'dec_rnn_do':self.dec_rnn_do,
'dec_cfg':self.dec_cfg,
'att_cfg':self.att_cfg,
'use_layernorm':self.use_layernorm
}
@property
def state(self) :
return (self.dec_att_lyr.state, )
@state.setter
def state(self, value) :
self.dec_att_lyr.state = value[0]
def encode(self, input, src_len=None) :
"""
input : (batch x max_src_len x in_size)
mask : (batch x max_src_len)
"""
batch, max_src_len, in_size = input.size()
if src_len is None :
src_len = [max_src_len] * batch
res = input.view(batch * max_src_len, in_size)
res = self.enc_fnn_lyr(res)
res = res.view(batch, max_src_len, -1)
for ii in range(len(self.enc_rnn_lyr)) :
res = pack(res, src_len, batch_first=True)
res = self.enc_rnn_lyr[ii](res)[0] # get h only #
res,_ = unpack(res, batch_first=True)
res = F.dropout(res, self.enc_rnn_do[ii], self.training)
if self.downsampling[ii] == True :
res = res[:, 1::2]
src_len = [x // 2 for x in src_len]
pass
ctx = res
# create mask if required #
if src_len is not None :
ctx_mask = Variable(generate_seq_mask(src_len, self, max_len=ctx.size(1)))
else :
ctx_mask = None
self.dec_att_lyr.set_ctx(ctx, ctx_mask)
def reset(self) :
self.dec_att_lyr.reset()
def decode(self, y_tm1, mask=None) :
assert y_tm1.dim() == 1, "batchsize only"
res = self.dec_emb_lyr(y_tm1)
if self.dec_emb_do > 0.0 :
res = F.dropout(res, self.dec_emb_do, self.training)
res = self.dec_att_lyr(res, mask)
return self.dec_presoftmax_lyr(res['dec_output']), res
| [
"kano.takatomo.km0@is.naist.jp"
] | kano.takatomo.km0@is.naist.jp |
0995fea88a29d3ee76e12db1750b10190ae20cc1 | 080c13cd91a073457bd9eddc2a3d13fc2e0e56ae | /MY_REPOS/awesome-4-new-developers/tensorflow-master/tensorflow/python/ops/numpy_ops/np_utils_test.py | 2cb8f64324e9e9d6d47bcab8981ed3a8b6d8439f | [
"Apache-2.0"
] | permissive | Portfolio-Projects42/UsefulResourceRepo2.0 | 1dccc8961a09347f124d3ed7c27c6d73b9806189 | 75b1e23c757845b5f1894ebe53551a1cf759c6a3 | refs/heads/master | 2023-08-04T12:23:48.862451 | 2021-09-15T12:51:35 | 2021-09-15T12:51:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,674 | py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utils.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.ops.numpy_ops import np_utils
from tensorflow.python.platform import test
class UtilsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super(UtilsTest, self).setUp()
self._old_np_doc_form = np_utils.get_np_doc_form()
self._old_is_sig_mismatch_an_error = np_utils.is_sig_mismatch_an_error()
def tearDown(self):
np_utils.set_np_doc_form(self._old_np_doc_form)
np_utils.set_is_sig_mismatch_an_error(self._old_is_sig_mismatch_an_error)
super(UtilsTest, self).tearDown()
# pylint: disable=unused-argument
def testNpDocInlined(self):
def np_fun(x, y, z):
"""np_fun docstring."""
return
np_utils.set_np_doc_form("inlined")
@np_utils.np_doc(None, np_fun=np_fun, unsupported_params=["x"])
def f(x, z):
"""f docstring."""
return
expected = """TensorFlow variant of NumPy's `np_fun`.
Unsupported arguments: `x`, `y`.
f docstring.
Documentation for `numpy.np_fun`:
np_fun docstring."""
self.assertEqual(expected, f.__doc__)
@parameterized.named_parameters(
[
(version, version, link)
for version, link in [ # pylint: disable=g-complex-comprehension
(
"dev",
"https://numpy.org/devdocs/reference/generated/numpy.np_fun.html",
),
(
"stable",
"https://numpy.org/doc/stable/reference/generated/numpy.np_fun.html",
),
(
"1.16",
"https://numpy.org/doc/1.16/reference/generated/numpy.np_fun.html",
),
]
]
)
def testNpDocLink(self, version, link):
def np_fun(x, y, z):
"""np_fun docstring."""
return
np_utils.set_np_doc_form(version)
@np_utils.np_doc(None, np_fun=np_fun, unsupported_params=["x"])
def f(x, z):
"""f docstring."""
return
expected = """TensorFlow variant of NumPy's `np_fun`.
Unsupported arguments: `x`, `y`.
f docstring.
See the NumPy documentation for [`numpy.np_fun`](%s)."""
expected = expected % (link)
self.assertEqual(expected, f.__doc__)
@parameterized.parameters([None, 1, "a", "1a", "1.1a", "1.1.1a"])
def testNpDocInvalid(self, invalid_flag):
def np_fun(x, y, z):
"""np_fun docstring."""
return
np_utils.set_np_doc_form(invalid_flag)
@np_utils.np_doc(None, np_fun=np_fun, unsupported_params=["x"])
def f(x, z):
"""f docstring."""
return
expected = """TensorFlow variant of NumPy's `np_fun`.
Unsupported arguments: `x`, `y`.
f docstring.
"""
self.assertEqual(expected, f.__doc__)
def testNpDocName(self):
np_utils.set_np_doc_form("inlined")
@np_utils.np_doc("foo")
def f():
"""f docstring."""
return
expected = """TensorFlow variant of NumPy's `foo`.
f docstring.
"""
self.assertEqual(expected, f.__doc__)
# pylint: disable=unused-variable
def testSigMismatchIsError(self):
"""Tests that signature mismatch is an error (when configured so)."""
if not np_utils._supports_signature():
self.skipTest("inspect.signature not supported")
np_utils.set_is_sig_mismatch_an_error(True)
def np_fun(x, y=1, **kwargs):
return
with self.assertRaisesRegex(TypeError, "Cannot find parameter"):
@np_utils.np_doc(None, np_fun=np_fun)
def f1(a):
return
with self.assertRaisesRegex(TypeError, "is of kind"):
@np_utils.np_doc(None, np_fun=np_fun)
def f2(x, kwargs):
return
with self.assertRaisesRegex(
TypeError, "Parameter y should have a default value"
):
@np_utils.np_doc(None, np_fun=np_fun)
def f3(x, y):
return
def testSigMismatchIsNotError(self):
"""Tests that signature mismatch is not an error (when configured so)."""
np_utils.set_is_sig_mismatch_an_error(False)
def np_fun(x, y=1, **kwargs):
return
# The following functions all have signature mismatches, but they shouldn't
# throw errors when is_sig_mismatch_an_error() is False.
@np_utils.np_doc(None, np_fun=np_fun)
def f1(a):
return
def f2(x, kwargs):
return
@np_utils.np_doc(None, np_fun=np_fun)
def f3(x, y):
return
# pylint: enable=unused-variable
if __name__ == "__main__":
test.main()
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
09f339b23093009144f9b4c01d6ee8320fadfc8c | 8016e033484d3cb88a4ee9b82bd3ca08557c12aa | /programmingKnowledge_OOP/hello.py | bd08eca157d9b0200fdd35f8a6aa3a11dac798e7 | [] | no_license | keys4words/python | 72ecf5de80b14ad3a94abe1d48e82035a2f0fa3d | 08431836498e6caed8e01cbc3548b295b69056fe | refs/heads/master | 2021-06-16T19:42:21.294976 | 2020-04-30T14:40:24 | 2020-04-30T14:40:24 | 187,210,896 | 0 | 0 | null | 2021-03-20T01:25:04 | 2019-05-17T12:16:40 | Python | UTF-8 | Python | false | false | 434 | py | class Hello:
def __init__(self):
# self.name = name
# self.age = 10
self.a = 10
self._b = 20
self.__c = 30
def public_method(self):
# print(self.a)
# print(self.__c)
print('public')
self.__private_method()
def __private_method(self):
print('private')
hello = Hello()
print(hello.a)
print(hello._b)
hello.public_method()
# print(hello.__c) | [
"keys4words@gmail.com"
] | keys4words@gmail.com |
3074e68ae62c9b8da42116372bac38daab0eab34 | 1e5f2b99be2e7c1bcbe1718e09e5dce1c7a5ed4d | /23_Merge_k_Sorted_Lists/23_Merge_k_Sorted_Lists.py | 63ce62aef895054cbbca668733a19e61eaf894b0 | [] | no_license | superSeanLin/Algorithms-and-Structures | 05706cf34ac6bbf919f14001da9d44b918c10fb8 | 53ec4471f1eff393c26f7575a47df1a56fb8af11 | refs/heads/master | 2020-03-27T09:13:30.330974 | 2019-10-13T05:10:32 | 2019-10-13T05:10:32 | 146,323,422 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 716 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
## Use heap to do better job
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
minimum = 1000
p = None
for l in lists: # find min of all lists
if l and l.val < minimum:
p = l
minimum = l.val
if p: # not all None
q = p
lists[lists.index(p)] = p.next # change original lists
q.next = self.mergeKLists(lists)
return q
else: # all None
return None
| [
"noreply@github.com"
] | superSeanLin.noreply@github.com |
954fd295540dba8cfc19355ba45a3c7da5aa94a9 | 6f1034b17b49f373a41ecf3a5a8923fb4948992b | /test/experiment.py | 6a774f21b36d3719206d9df9793d4a0a31d8cb59 | [
"Apache-2.0"
] | permissive | NMGRL/pychron | a6ec1854488e74eb5d3ff53eee8537ecf98a6e2f | 8cfc8085393ace2aee6b98d36bfd6fba0bcb41c6 | refs/heads/main | 2023-08-30T07:00:34.121528 | 2023-06-12T17:43:25 | 2023-06-12T17:43:25 | 14,438,041 | 38 | 28 | Apache-2.0 | 2023-08-09T22:47:17 | 2013-11-15T23:46:10 | Python | UTF-8 | Python | false | false | 6,977 | py | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from __future__ import print_function
import unittest
import os
from pychron.experiment.experimentor import Experimentor
from test.database import isotope_manager_factory
from pychron.experiment.tasks.experiment_editor import ExperimentEditor
from pychron.experiment.tasks.experiment_task import ExperimentEditorTask
from six.moves import zip
# from pychron.database.records.isotope_record import IsotopeRecord
# ============= standard library imports ========================
# ============= local library imports ==========================
class BaseExperimentTest(unittest.TestCase):
def _load_queues(self):
man = self.experimentor
path = self._experiment_file
with open(path, 'r') as rfile:
txt = rfile.read()
qtexts = self.exp_task._split_text(txt)
qs = []
for qi in qtexts:
editor = ExperimentEditor(path=path)
editor.new_queue(qi)
qs.append(editor.queue)
# man.test_queues(qs)
man.experiment_queues = qs
man.update_info()
man.path = path
man.executor.reset()
return qs
def setUp(self):
self.experimentor = Experimentor(connect=False,
unique_executor_db=False
)
self.experimentor.db = db = isotope_manager_factory().db
self._experiment_file = './data/experiment2.txt'
self.exp_task = ExperimentEditorTask()
self._load_queues()
class ExperimentTest2(BaseExperimentTest):
def testAliquots(self):
queue = self._load_queues()[0]
# aqs = (46, 46, 47, 47)
aqs = (46, 46, 47, 46, 46)
aqs = (1, 46, 46, 47, 46, 46, 2)
aqs = (1, 46, 46, 45, 46, 46, 2)
for i, (aq, an) in enumerate(zip(aqs, queue.automated_runs)):
print(i, an.labnumber, an.aliquot, aq, 'aaa')
self.assertEqual(an.aliquot, aq)
def testSteps(self):
queue = self._load_queues()[0]
# sts = ('A', 'B', '', 'A', 'B', '', '', '', '')
sts = ('A', 'B', 'A', 'C', 'D')
sts = ('', 'A', 'B', 'A', 'C', 'D', '')
sts = ('', 'A', 'B', 'E', 'C', 'D')
for i, (st, an) in enumerate(zip(sts, queue.automated_runs)):
# if st in ('E', 'F'):
print(i, an.labnumber, an.step, st, an.aliquot)
self.assertEqual(an.step, st)
class ExperimentTest(BaseExperimentTest):
def testFile(self):
p = self._experiment_file
self.assertTrue(os.path.isfile(p))
def testOpen(self):
qs = self._load_queues()
self.assertEqual(len(qs), 1)
def testNRuns(self):
n = 11
queue = self._load_queues()[0]
self.assertEqual(len(queue.automated_runs), n)
def testAliquots(self):
queue = self._load_queues()[0]
# aqs = (31, 31, 2, 32, 32, 200, 201, 3, 40, 41)
# aqs = (46, 46, 2, 47, 47, 45, 45, 3, 40, 41)
aqs = (46, 46, 2, 47, 47, 46, 46, 40, 41, 45, 45, 3, 40, 41)
for aq, an in zip(aqs, queue.automated_runs):
self.assertEqual(an.aliquot, aq)
def testSteps(self):
queue = self._load_queues()[0]
# sts = ('A', 'B', '', 'A', 'B', '', '', '', '')
sts = ('A', 'B', '', 'A', 'B', 'C', 'D', '', '', 'E', 'F',
'', '', '', 'C', 'D')
for i, (st, an) in enumerate(zip(sts, queue.automated_runs)):
# if st in ('E', 'F'):
print(i, an.labnumber, an.step, st, an.aliquot)
self.assertEqual(an.step, st)
@unittest.skip('foo')
def testSample(self):
queue = self._load_queues()[0]
samples = ('NM-779', 'NM-779', '', 'NM-779', 'NM-779', 'NM-779',
'NM-779', '', 'NM-791', 'NM-791'
)
for sample, an in zip(samples, queue.automated_runs):
self.assertEqual(an.sample, sample)
@unittest.skip('foo')
def testIrradation(self):
queue = self._load_queues()[0]
irrads = ('NM-251H', 'NM-251H', '', 'NM-251H', 'NM-251H', 'NM-251H',
'NM-251H', '', 'NM-251H', 'NM-251H')
for irrad, an in zip(irrads, queue.automated_runs):
self.assertEqual(an.irradiation, irrad)
class ExecutorTest(BaseExperimentTest):
def testPreviousBlank(self):
exp = self.experimentor
ext = exp.executor
ext.experiment_queue = exp.experiment_queues[0]
result = ext._get_preceeding_blank_or_background(inform=False)
# self.assertIsInstance(result, IsotopeRecord)
def testExecutorHumanError(self):
exp = self.experimentor
ext = exp.executor
ext.experiment_queue = exp.experiment_queues[0]
self.assertTrue(ext._check_for_human_errors())
def testPreExecuteCheck(self):
exp = self.experimentor
ext = exp.executor
ext.experiment_queue = exp.experiment_queues[0]
ext._pre_execute_check(inform=False)
class HumanErrorCheckerTest(BaseExperimentTest):
def setUp(self):
super(HumanErrorCheckerTest, self).setUp()
from pychron.experiment.utilities.human_error_checker import HumanErrorChecker
hec = HumanErrorChecker()
self.hec = hec
def testNoLabnumber(self):
err = self._get_errors()
self.assertTrue('-01' in list(err.keys()))
self.assertEqual(err['-01'], 'no labnumber')
def testNoDuration(self):
err = self._get_errors()
self.assertEqual(err['61311-101'], 'no duration')
#
def testNoCleanup(self):
err = self._get_errors()
self.assertEqual(err['61311-100'], 'no cleanup')
def testPositionNoExtract(self):
err = self._get_errors()
self.assertEqual(err['61311-102'], 'position but no extract value')
def _get_errors(self):
hec = self.hec
exp = self.experimentor
q = exp.experiment_queues[0]
err = hec.check(q, test_all=True, inform=False)
return err
# ============= EOF =============================================
| [
"jirhiker@gmail.com"
] | jirhiker@gmail.com |
d1fb0d046a7c0d9e863a9839b89b7b64feca1388 | 21b201ebf2ffbbc19fa8d74e5657e12ef597b02d | /research/pcl_rl/baseline.py | 58f7893a6c9a4bb783738399016266fa483810a2 | [] | no_license | alhsnouf/model | fa619691ad9d0afc7ad849a9471e6bb0643a8d47 | 5fe429b115634e642a7469b3f1d4bc0c5cf98782 | refs/heads/master | 2021-04-12T11:16:02.150045 | 2018-03-27T15:19:18 | 2018-03-27T15:19:18 | 126,702,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:4252c9a3674f2fe52c94c44905609a6f028ed27517804d6b9a68c3b4ce0b5efe
size 7292
| [
"alhanouf987@hotmail.com"
] | alhanouf987@hotmail.com |
84684ad06806b9b834d775398aa42b70ebb3bff9 | ddb8c14775dfbe9424691dabf1617273d118d317 | /catkin_ws/devel/lib/python2.7/dist-packages/mavros_msgs/srv/_MountConfigure.py | 8bc922a6cacf418e9ad6166d4a3cb1ca7856d717 | [] | no_license | rishabhdevyadav/fastplanneroctomap | e8458aeb1f2d3b126d27dc57011c87ae4567687a | de9d7e49cb1004f3b01b7269dd398cf264ed92b4 | refs/heads/main | 2023-05-12T22:12:27.865900 | 2021-05-26T19:25:31 | 2021-05-26T19:25:31 | 356,674,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | /home/rishabh/catkin_ws/devel/.private/mavros_msgs/lib/python2.7/dist-packages/mavros_msgs/srv/_MountConfigure.py | [
"rishabhdevyadav95@gmail.com"
] | rishabhdevyadav95@gmail.com |
1485edb0101fc10707d2f472fb4c3ed4549ba608 | 7ab41799fd38489c93282f1beb3b20e7ef8ff165 | /python/94.py | 6a9b8fc33e6ab122cea451ead65017089a84d0c5 | [] | no_license | scturtle/leetcode-sol | 86c4095df6b31a9fcad683f2d63669ce1691633c | e1a9ce5d9b8fe4bd11e50bd1d5ba1933de845db7 | refs/heads/master | 2020-04-23T00:01:37.016267 | 2015-11-21T04:15:27 | 2015-11-21T04:15:27 | 32,385,573 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | class Solution(object):
def inorderTraversal(self, root, ans=None):
"""
:type root: TreeNode
:rtype: List[int]
"""
if ans is None:
ans = []
if not root:
return ans
self.inorderTraversal(root.left, ans)
ans.append(root.val)
self.inorderTraversal(root.right, ans)
return ans
| [
"scturtle@gmail.com"
] | scturtle@gmail.com |
56229b6884789b2f9643a05e87993227183ac570 | f0dce7b15b55647b709300d335ddcca523ee61f7 | /34_Find_First_and_Last_Position_of_Element_in_Sorted_Array.py | b4dd6bab76ed54fd3570cd6599c240a478044dc2 | [] | no_license | breezekiller789/LeetCode | ecc4883f616d21e7b72d85c9f93293a8daf3dc74 | 51090f28eaab17e823981eddc9119abe174ceb4e | refs/heads/master | 2023-06-03T20:54:29.222478 | 2021-06-18T14:33:05 | 2021-06-18T14:33:05 | 347,049,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# https://leetcode.com/problems/find-first-and-last-position-of-element-in-sorted-array/
# Binary search
# Output: [3,4]
nums = [5, 7, 7, 8, 8, 10]
target = 8
# Output: [-1,-1]
nums = [5, 7, 7, 8, 8, 10]
target = 6
# Output: [-1,-1]
nums = [5, 7, 7, 8, 8, 10]
target = 0
def FindInterval(nums, startIndex, target):
left = startIndex - 1
right = startIndex + 1
length = len(nums)
while left >= 0 and right < length and nums[left] == target and \
nums[right] == target:
left -= 1
right += 1
while left >= 0 and nums[left] == target:
left -= 1
while right < length and nums[right] == target:
right += 1
return [left+1, right-1]
low = 0
high = len(nums) - 1
while low <= high:
mid = (low+high)/2
if nums[mid] > target:
high = mid - 1
elif nums[mid] < target:
low = mid + 1
else:
print FindInterval(nums, mid, target)
exit()
print [-1, -1]
| [
"breezekiller789@csie.io"
] | breezekiller789@csie.io |
078371257b2c04228f984197d2bd37b339ecdf6f | 4cd0631100e099e9b154b12b234715ddee0711d3 | /model/BiSeNetV2AD3.py | f9331953b1e387866c5127b8efacd13f87224c98 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | Ethan-ye/Efficient-Segmentation-Networks | d6dd029c76cb46b89ac00ee2f6a49d9ddcd99a3a | 27272e43126a507a6d93b21cd2372f5432f61237 | refs/heads/master | 2023-04-22T11:10:23.256349 | 2021-05-07T05:04:40 | 2021-05-07T05:12:38 | 281,823,847 | 0 | 0 | MIT | 2020-07-23T01:50:42 | 2020-07-23T01:50:41 | null | UTF-8 | Python | false | false | 66,224 | py | # *- coding: utf-8 -*
###########################################################################
# https://github.com/Soulempty/BiseNetv2-pytorch
import torch
import torch.nn as nn
from torch.nn import functional as F
from torchsummary import summary
from utils.activations import NON_LINEARITY
from fvcore.nn.flop_count import flop_count # https://github.com/facebookresearch/fvcore
from tools.flops_counter.ptflops import get_model_complexity_info
from thop import profile # https://github.com/Lyken17/pytorch-OpCounter
__all__ = ['BiSeNetV2AD3']
class conv2d(nn.Module):
def __init__(self, in_dim, out_dim, k, pad, stride, groups=1, bias=False, use_bn=True, use_rl=True):
super(conv2d, self).__init__()
self.use_bn = use_bn
self.use_rl = use_rl
self.conv = nn.Conv2d(in_dim, out_dim, k, padding=pad, stride=stride, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_dim)
self.relu = nn.ReLU(inplace=True)
def forward(self, bottom):
if self.use_bn and self.use_rl:
return self.relu(self.bn(self.conv(bottom)))
elif self.use_bn:
return self.bn(self.conv(bottom))
else:
return self.conv(bottom)
class StemBlock(nn.Module):
def __init__(self):
super(StemBlock, self).__init__()
self.conv1 = conv2d(3, 16, 3, 1, 2)
self.conv_1x1 = conv2d(16, 8, 1, 0, 1)
self.conv_3x3 = conv2d(8, 16, 3, 1, 2)
self.mpooling = nn.MaxPool2d(3, 2, 1)
self.conv2 = conv2d(32, 16, 3, 1, 1)
def forward(self, bottom):
base = self.conv1(bottom)
conv_1 = self.conv_1x1(base)
conv_3 = self.conv_3x3(conv_1)
pool = self.mpooling(base)
cat = torch.cat([conv_3, pool], 1)
res = self.conv2(cat)
return res
class ContextEmbeddingBlock(nn.Module):
def __init__(self, in_dim):
super(ContextEmbeddingBlock, self).__init__()
self.gap = nn.AdaptiveAvgPool2d(1) # 1
self.bn1 = nn.BatchNorm2d(in_dim)
self.conv1 = conv2d(in_dim, in_dim, 1, 0, 1)
self.conv2 = conv2d(in_dim, in_dim, 3, 1, 1, use_bn=False, use_rl=False)
def forward(self, bottom):
gap = self.gap(bottom)
bn = self.bn1(gap)
conv1 = self.conv1(bn)
feat = bottom + conv1
res = self.conv2(feat)
return res
class GatherExpansion(nn.Module):
def __init__(self, in_dim, out_dim, stride=1, exp=6):
super(GatherExpansion, self).__init__()
exp_dim = in_dim * exp
self.stride = stride
self.conv1 = conv2d(in_dim, exp_dim, 3, 1, 1)
self.dwconv2 = conv2d(exp_dim, exp_dim, 3, 1, 1, exp_dim, use_rl=False)
self.conv_11 = conv2d(exp_dim, out_dim, 1, 0, 1, use_rl=False)
self.dwconv1 = conv2d(exp_dim, exp_dim, 3, 1, 2, exp_dim, use_rl=False)
self.dwconv3 = conv2d(in_dim, in_dim, 3, 1, 2, in_dim, use_rl=False)
self.conv_12 = conv2d(in_dim, out_dim, 1, 0, 1, use_rl=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, bottom):
base = self.conv1(bottom)
if self.stride == 2:
base = self.dwconv1(base)
bottom = self.dwconv3(bottom)
bottom = self.conv_12(bottom)
x = self.dwconv2(base)
x = self.conv_11(x)
res = self.relu(x + bottom)
return res
class GGMA(nn.Module):
def __init__(self, shape1, shape2, in_dim, map_dim, matmul_norm=True):
super(GGMA, self).__init__()
self.in_dim = in_dim
self.map_dim = map_dim
self.matmul_norm = matmul_norm
self.query_project = conv2d(in_dim+2, map_dim, 1, 0, 1)
self.key_project = conv2d(in_dim+2, map_dim, 1, 0, 1)
self.value_project = conv2d(in_dim+2, map_dim, 1, 0, 1)
self.out_project = conv2d(map_dim, in_dim, 1, 0, 1)
def forward(self, query_feats, key_feats):
"""Forward function."""
batch_size,_,qh,qw = query_feats.size()
_, _, kh, kw = key_feats.size()
qx = torch.arange(-1 * ((qw - 1) / 2), ((qw - 1) / 2) + 1)
qy = torch.arange(((qh - 1) / 2), -1 * ((qh - 1) / 2) - 1, -1)
qys, qxs = torch.meshgrid(qy, qx)
qxys = torch.stack([qxs, qys]).unsqueeze(0)#(1,2,h,w)
qxys = nn.LayerNorm([qh, qw], 0, False)(qxys)
if torch.cuda.is_available():
qxys = qxys.cuda()
qxys = qxys.repeat(batch_size, 1, 1, 1)
# qxys_1 = qxys.reshape(2, -1) # 对(C,H,W)计算均值方差
# qmean = qxys_1.mean(dim=1).reshape(2, 1, 1)
# qstd = qxys_1.std(dim=1, unbiased=False).reshape(2, 1, 1)
# qxys_ln = (qxys - qmean) / qstd
# qxys22 = nn.LayerNorm([qh,qw],0,False)(qxys)
# print(qxys_ln.shape,qxys22)
kx = torch.arange(-1 * ((kw - 1) / 2), ((kw - 1) / 2) + 1)
ky = torch.arange(((kh - 1) / 2), -1 * ((kh - 1) / 2) - 1, -1)
kys, kxs = torch.meshgrid(ky, kx)
kxys = torch.stack([kxs, kys]).unsqueeze(0)#(1,2,h,w)
# kxys = kxys * (qh/kh) #adjust
kxys = nn.LayerNorm([kh, kw], 0, False)(kxys)
if torch.cuda.is_available():
kxys = kxys.cuda()
kxys = kxys.repeat(batch_size, 1, 1, 1)
query = self.query_project(torch.cat((query_feats, qxys), 1))
# if self.query_downsample is not None:
# query = self.query_downsample(query)
query = query.reshape(*query.shape[:2], -1) #B,C,H*W
query = query.permute(0, 2, 1).contiguous() #B,H*w,C
key = self.key_project(torch.cat((key_feats, kxys), 1))
value = self.value_project(torch.cat((key_feats, kxys), 1))
# if self.key_downsample is not None:
# key = self.key_downsample(key)
# value = self.key_downsample(value)
key = key.reshape(*key.shape[:2], -1) #B,C,h*w
value = value.reshape(*value.shape[:2], -1) #B,C,h*w
value = value.permute(0, 2, 1).contiguous() #B,h*w,C
sim_map = torch.matmul(query, key)#B,H*W,h*w
if self.matmul_norm:
sim_map = (self.map_dim**-.5) * sim_map
sim_map = F.softmax(sim_map, dim=-1)
context = torch.matmul(sim_map, value)#B,H*W,C
context = context.permute(0, 2, 1).contiguous()#B,C,H*W
context = context.reshape(batch_size, -1, *query_feats.shape[2:])#B,C,H,W
if self.out_project is not None:
context = self.out_project(context)
return context
# class BGA(nn.Module):
# def __init__(self, in_dim):
# super(BGA, self).__init__()
# self.in_dim = in_dim
# self.db_dwconv = conv2d(in_dim, in_dim, 3, 1, 1, in_dim, use_rl=False)
# self.db_conv1x1 = conv2d(in_dim, in_dim, 1, 0, 1, use_rl=False, use_bn=False)
# self.db_conv = conv2d(in_dim, in_dim, 3, 1, 2, use_rl=False)
# self.db_apooling = nn.AvgPool2d(3, 2, 1)
#
# self.sb_dwconv = conv2d(in_dim, in_dim, 3, 1, 1, in_dim, use_rl=False)
# self.sb_conv1x1 = conv2d(in_dim, in_dim, 1, 0, 1, use_rl=False, use_bn=False)
# self.sb_conv = conv2d(in_dim, in_dim, 3, 1, 1, use_rl=False)
# self.sb_sigmoid = nn.Sigmoid()
#
# self.conv = conv2d(in_dim, in_dim, 3, 1, 1, use_rl=False)
#
# def forward(self, db, sb):
# db_dwc = self.db_dwconv(db)
# db_out = self.db_conv1x1(db_dwc) #
# db_conv = self.db_conv(db)
# db_pool = self.db_apooling(db_conv)
#
# sb_dwc = self.sb_dwconv(sb)
# sb_out = self.sb_sigmoid(self.sb_conv1x1(sb_dwc)) #
# sb_conv = self.sb_conv(sb)
# sb_up = self.sb_sigmoid(F.interpolate(sb_conv, size=db_out.size()[2:], mode="bilinear", align_corners=True))
# db_l = db_out * sb_up
# sb_r = F.interpolate(sb_out * db_pool, size=db_out.size()[2:], mode="bilinear", align_corners=True)
# res = self.conv(db_l + sb_r)
# return res
class SegHead(nn.Module):
def __init__(self, in_dim, out_dim, classes):
super(SegHead, self).__init__()
# self.size = size
self.conv = conv2d(in_dim, out_dim, 3, 1, 1)
self.classes = conv2d(out_dim, classes, 1, 0, 1, use_bn=False, use_rl=False)
def forward(self, feat,size):
x = self.conv(feat)
x = self.classes(x)
pred = F.interpolate(x, size=size, mode="bilinear", align_corners=True)
return pred
class DetailedBranch(nn.Module):
def __init__(self):
super(DetailedBranch, self).__init__()
self.s1_conv1 = conv2d(3, 64, 3, 1, 2)
self.s1_conv2 = conv2d(64, 64, 3, 1, 1)
self.s2_conv1 = conv2d(64, 64, 3, 1, 2)
self.s2_conv2 = conv2d(64, 64, 3, 1, 1)
self.s2_conv3 = conv2d(64, 64, 3, 1, 1)
self.s3_conv1 = conv2d(64, 128, 3, 1, 2)
self.s3_conv2 = conv2d(128, 128, 3, 1, 1)
self.s3_conv3 = conv2d(128, 128, 3, 1, 1)
def forward(self, bottom):
s1_1 = self.s1_conv1(bottom)
s1_2 = self.s1_conv2(s1_1)
s2_1 = self.s2_conv1(s1_2)
s2_2 = self.s2_conv2(s2_1)
s2_3 = self.s2_conv3(s2_2)
s3_1 = self.s3_conv1(s2_3)
s3_2 = self.s3_conv2(s3_1)
s3_3 = self.s3_conv3(s3_2)
return s3_3
class SemanticBranch(nn.Module):
def __init__(self, classes):
super(SemanticBranch, self).__init__()
# self.training = True
self.stem = StemBlock()
self.s3_ge1 = GatherExpansion(16, 32, 2)
self.s3_ge2 = GatherExpansion(32, 32)
self.s4_ge1 = GatherExpansion(32, 64, 2)
self.s4_ge2 = GatherExpansion(64, 64)
self.s5_ge1 = GatherExpansion(64, 128, 2)
self.s5_ge2 = GatherExpansion(128, 128)
self.s5_ge3 = GatherExpansion(128, 128)
self.s5_ge4 = GatherExpansion(128, 128)
self.s5_ge5 = GatherExpansion(128, 128, exp=1)
# if self.training:
self.seghead1 = SegHead(16, 16, classes)
self.seghead2 = SegHead(32, 32, classes)
self.seghead3 = SegHead(64, 64, classes)
self.seghead4 = SegHead(128, 128, classes)
self.ceb = ContextEmbeddingBlock(128)
def forward(self, bottom, size):
stg12 = self.stem(bottom)
# print(stg12.size())
stg3 = self.s3_ge1(stg12)
stg3 = self.s3_ge2(stg3)
# print(stg3.size())
stg4 = self.s4_ge1(stg3)
stg4 = self.s4_ge2(stg4)
# print(stg4.size())
stg5 = self.s5_ge1(stg4)
stg5 = self.s5_ge2(stg5)
stg5 = self.s5_ge3(stg5)
stg5 = self.s5_ge4(stg5)
stg5 = self.s5_ge5(stg5)
# print(stg5.size())
out = self.ceb(stg5)
# if self.training:
seghead1 = self.seghead1(stg12,size)
seghead2 = self.seghead2(stg3,size)
seghead3 = self.seghead3(stg4,size)
seghead4 = self.seghead4(stg5,size)
return out, seghead1, seghead2, seghead3, seghead4
# else:
# return out
class BiSeNetV2AD3(nn.Module):
def __init__(self, classes):
super(BiSeNetV2AD3, self).__init__()
# self.training = True
self.db = DetailedBranch()
self.sb = SemanticBranch(classes)
# self.bga = BGA(128)
self.dma = GGMA((44,60),(11,15),128,16,True)
self.sma = GGMA((11, 15), (44, 60), 128, 16, True)
self.seghead = SegHead(256, 128, classes)
self._init_params()
# self.criterion = nn.CrossEntropyLoss(ignore_index=255)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, data, y=None):
size = data.size()[2:]
db = self.db(data)
# if self.training:
sb, head1, head2, head3, head4 = self.sb(data,size)
# else:
# sb = self.sb(data,size)
# bga = self.bga(db, sb)
# pred = self.seghead(bga,size)
dma = self.dma(db,sb)
db = db+dma
sma = self.sma(sb,db)
sb = sb+sma
sb_up = F.interpolate(sb, size=db.size()[2:], mode="bilinear", align_corners=True)
x = torch.cat([db, sb_up], dim=1)
pred = self.seghead(x,size)
# if self.training:
# main_loss = self.criterion(pred, y)
# aux1_loss = self.criterion(head1, y)
# aux2_loss = self.criterion(head2, y)
# aux3_loss = self.criterion(head3, y)
# aux4_loss = self.criterion(head4, y)
# return pred.max(1)[1], main_loss, (aux1_loss, aux2_loss, aux3_loss, aux4_loss)
return [pred,head1, head2, head3, head4]
if __name__ == "__main__":
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = BiSeNetV2AD3(classes=11).to(device)
summary(model,(3, 352, 480))
flops_count, params_count = get_model_complexity_info(model, (3, 352, 480),
as_strings=False,
print_per_layer_stat=True)
print(flops_count/1000000000,'GMac', params_count/1000000, params_count/1024/1024*4,'MB')
x = torch.randn(2, 3, 352, 480).to(device)
input = x
macs, params = profile(model, inputs=(input,))
print(macs/2000000000,'GMac', params/1000000, params/1024/1024*4,'MB')
# from fvcore.nn.jit_handles import batchnorm_flop_jit
# from fvcore.nn.jit_handles import generic_activation_jit
#
# supported_ops = {
# "aten::batch_norm": batchnorm_flop_jit,
# }
# flop_dict, _ = flop_count(model, (x,), supported_ops)
# print(flop_dict)
'''
/home/ethan/anaconda3/envs/py36_cuda101/bin/python /home/ethan/codes/Efficient-Segmentation-Networks/model/BiSeNetV2AD3.py
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [-1, 64, 176, 240] 1,728
BatchNorm2d-2 [-1, 64, 176, 240] 128
ReLU-3 [-1, 64, 176, 240] 0
conv2d-4 [-1, 64, 176, 240] 0
Conv2d-5 [-1, 64, 176, 240] 36,864
BatchNorm2d-6 [-1, 64, 176, 240] 128
ReLU-7 [-1, 64, 176, 240] 0
conv2d-8 [-1, 64, 176, 240] 0
Conv2d-9 [-1, 64, 88, 120] 36,864
BatchNorm2d-10 [-1, 64, 88, 120] 128
ReLU-11 [-1, 64, 88, 120] 0
conv2d-12 [-1, 64, 88, 120] 0
Conv2d-13 [-1, 64, 88, 120] 36,864
BatchNorm2d-14 [-1, 64, 88, 120] 128
ReLU-15 [-1, 64, 88, 120] 0
conv2d-16 [-1, 64, 88, 120] 0
Conv2d-17 [-1, 64, 88, 120] 36,864
BatchNorm2d-18 [-1, 64, 88, 120] 128
ReLU-19 [-1, 64, 88, 120] 0
conv2d-20 [-1, 64, 88, 120] 0
Conv2d-21 [-1, 128, 44, 60] 73,728
BatchNorm2d-22 [-1, 128, 44, 60] 256
ReLU-23 [-1, 128, 44, 60] 0
conv2d-24 [-1, 128, 44, 60] 0
Conv2d-25 [-1, 128, 44, 60] 147,456
BatchNorm2d-26 [-1, 128, 44, 60] 256
ReLU-27 [-1, 128, 44, 60] 0
conv2d-28 [-1, 128, 44, 60] 0
Conv2d-29 [-1, 128, 44, 60] 147,456
BatchNorm2d-30 [-1, 128, 44, 60] 256
ReLU-31 [-1, 128, 44, 60] 0
conv2d-32 [-1, 128, 44, 60] 0
DetailedBranch-33 [-1, 128, 44, 60] 0
Conv2d-34 [-1, 16, 176, 240] 432
BatchNorm2d-35 [-1, 16, 176, 240] 32
ReLU-36 [-1, 16, 176, 240] 0
conv2d-37 [-1, 16, 176, 240] 0
Conv2d-38 [-1, 8, 176, 240] 128
BatchNorm2d-39 [-1, 8, 176, 240] 16
ReLU-40 [-1, 8, 176, 240] 0
conv2d-41 [-1, 8, 176, 240] 0
Conv2d-42 [-1, 16, 88, 120] 1,152
BatchNorm2d-43 [-1, 16, 88, 120] 32
ReLU-44 [-1, 16, 88, 120] 0
conv2d-45 [-1, 16, 88, 120] 0
MaxPool2d-46 [-1, 16, 88, 120] 0
Conv2d-47 [-1, 16, 88, 120] 4,608
BatchNorm2d-48 [-1, 16, 88, 120] 32
ReLU-49 [-1, 16, 88, 120] 0
conv2d-50 [-1, 16, 88, 120] 0
StemBlock-51 [-1, 16, 88, 120] 0
Conv2d-52 [-1, 96, 88, 120] 13,824
BatchNorm2d-53 [-1, 96, 88, 120] 192
ReLU-54 [-1, 96, 88, 120] 0
conv2d-55 [-1, 96, 88, 120] 0
Conv2d-56 [-1, 96, 44, 60] 864
BatchNorm2d-57 [-1, 96, 44, 60] 192
conv2d-58 [-1, 96, 44, 60] 0
Conv2d-59 [-1, 16, 44, 60] 144
BatchNorm2d-60 [-1, 16, 44, 60] 32
conv2d-61 [-1, 16, 44, 60] 0
Conv2d-62 [-1, 32, 44, 60] 512
BatchNorm2d-63 [-1, 32, 44, 60] 64
conv2d-64 [-1, 32, 44, 60] 0
Conv2d-65 [-1, 96, 44, 60] 864
BatchNorm2d-66 [-1, 96, 44, 60] 192
conv2d-67 [-1, 96, 44, 60] 0
Conv2d-68 [-1, 32, 44, 60] 3,072
BatchNorm2d-69 [-1, 32, 44, 60] 64
conv2d-70 [-1, 32, 44, 60] 0
ReLU-71 [-1, 32, 44, 60] 0
GatherExpansion-72 [-1, 32, 44, 60] 0
Conv2d-73 [-1, 192, 44, 60] 55,296
BatchNorm2d-74 [-1, 192, 44, 60] 384
ReLU-75 [-1, 192, 44, 60] 0
conv2d-76 [-1, 192, 44, 60] 0
Conv2d-77 [-1, 192, 44, 60] 1,728
BatchNorm2d-78 [-1, 192, 44, 60] 384
conv2d-79 [-1, 192, 44, 60] 0
Conv2d-80 [-1, 32, 44, 60] 6,144
BatchNorm2d-81 [-1, 32, 44, 60] 64
conv2d-82 [-1, 32, 44, 60] 0
ReLU-83 [-1, 32, 44, 60] 0
GatherExpansion-84 [-1, 32, 44, 60] 0
Conv2d-85 [-1, 192, 44, 60] 55,296
BatchNorm2d-86 [-1, 192, 44, 60] 384
ReLU-87 [-1, 192, 44, 60] 0
conv2d-88 [-1, 192, 44, 60] 0
Conv2d-89 [-1, 192, 22, 30] 1,728
BatchNorm2d-90 [-1, 192, 22, 30] 384
conv2d-91 [-1, 192, 22, 30] 0
Conv2d-92 [-1, 32, 22, 30] 288
BatchNorm2d-93 [-1, 32, 22, 30] 64
conv2d-94 [-1, 32, 22, 30] 0
Conv2d-95 [-1, 64, 22, 30] 2,048
BatchNorm2d-96 [-1, 64, 22, 30] 128
conv2d-97 [-1, 64, 22, 30] 0
Conv2d-98 [-1, 192, 22, 30] 1,728
BatchNorm2d-99 [-1, 192, 22, 30] 384
conv2d-100 [-1, 192, 22, 30] 0
Conv2d-101 [-1, 64, 22, 30] 12,288
BatchNorm2d-102 [-1, 64, 22, 30] 128
conv2d-103 [-1, 64, 22, 30] 0
ReLU-104 [-1, 64, 22, 30] 0
GatherExpansion-105 [-1, 64, 22, 30] 0
Conv2d-106 [-1, 384, 22, 30] 221,184
BatchNorm2d-107 [-1, 384, 22, 30] 768
ReLU-108 [-1, 384, 22, 30] 0
conv2d-109 [-1, 384, 22, 30] 0
Conv2d-110 [-1, 384, 22, 30] 3,456
BatchNorm2d-111 [-1, 384, 22, 30] 768
conv2d-112 [-1, 384, 22, 30] 0
Conv2d-113 [-1, 64, 22, 30] 24,576
BatchNorm2d-114 [-1, 64, 22, 30] 128
conv2d-115 [-1, 64, 22, 30] 0
ReLU-116 [-1, 64, 22, 30] 0
GatherExpansion-117 [-1, 64, 22, 30] 0
Conv2d-118 [-1, 384, 22, 30] 221,184
BatchNorm2d-119 [-1, 384, 22, 30] 768
ReLU-120 [-1, 384, 22, 30] 0
conv2d-121 [-1, 384, 22, 30] 0
Conv2d-122 [-1, 384, 11, 15] 3,456
BatchNorm2d-123 [-1, 384, 11, 15] 768
conv2d-124 [-1, 384, 11, 15] 0
Conv2d-125 [-1, 64, 11, 15] 576
BatchNorm2d-126 [-1, 64, 11, 15] 128
conv2d-127 [-1, 64, 11, 15] 0
Conv2d-128 [-1, 128, 11, 15] 8,192
BatchNorm2d-129 [-1, 128, 11, 15] 256
conv2d-130 [-1, 128, 11, 15] 0
Conv2d-131 [-1, 384, 11, 15] 3,456
BatchNorm2d-132 [-1, 384, 11, 15] 768
conv2d-133 [-1, 384, 11, 15] 0
Conv2d-134 [-1, 128, 11, 15] 49,152
BatchNorm2d-135 [-1, 128, 11, 15] 256
conv2d-136 [-1, 128, 11, 15] 0
ReLU-137 [-1, 128, 11, 15] 0
GatherExpansion-138 [-1, 128, 11, 15] 0
Conv2d-139 [-1, 768, 11, 15] 884,736
BatchNorm2d-140 [-1, 768, 11, 15] 1,536
ReLU-141 [-1, 768, 11, 15] 0
conv2d-142 [-1, 768, 11, 15] 0
Conv2d-143 [-1, 768, 11, 15] 6,912
BatchNorm2d-144 [-1, 768, 11, 15] 1,536
conv2d-145 [-1, 768, 11, 15] 0
Conv2d-146 [-1, 128, 11, 15] 98,304
BatchNorm2d-147 [-1, 128, 11, 15] 256
conv2d-148 [-1, 128, 11, 15] 0
ReLU-149 [-1, 128, 11, 15] 0
GatherExpansion-150 [-1, 128, 11, 15] 0
Conv2d-151 [-1, 768, 11, 15] 884,736
BatchNorm2d-152 [-1, 768, 11, 15] 1,536
ReLU-153 [-1, 768, 11, 15] 0
conv2d-154 [-1, 768, 11, 15] 0
Conv2d-155 [-1, 768, 11, 15] 6,912
BatchNorm2d-156 [-1, 768, 11, 15] 1,536
conv2d-157 [-1, 768, 11, 15] 0
Conv2d-158 [-1, 128, 11, 15] 98,304
BatchNorm2d-159 [-1, 128, 11, 15] 256
conv2d-160 [-1, 128, 11, 15] 0
ReLU-161 [-1, 128, 11, 15] 0
GatherExpansion-162 [-1, 128, 11, 15] 0
Conv2d-163 [-1, 768, 11, 15] 884,736
BatchNorm2d-164 [-1, 768, 11, 15] 1,536
ReLU-165 [-1, 768, 11, 15] 0
conv2d-166 [-1, 768, 11, 15] 0
Conv2d-167 [-1, 768, 11, 15] 6,912
BatchNorm2d-168 [-1, 768, 11, 15] 1,536
conv2d-169 [-1, 768, 11, 15] 0
Conv2d-170 [-1, 128, 11, 15] 98,304
BatchNorm2d-171 [-1, 128, 11, 15] 256
conv2d-172 [-1, 128, 11, 15] 0
ReLU-173 [-1, 128, 11, 15] 0
GatherExpansion-174 [-1, 128, 11, 15] 0
Conv2d-175 [-1, 128, 11, 15] 147,456
BatchNorm2d-176 [-1, 128, 11, 15] 256
ReLU-177 [-1, 128, 11, 15] 0
conv2d-178 [-1, 128, 11, 15] 0
Conv2d-179 [-1, 128, 11, 15] 1,152
BatchNorm2d-180 [-1, 128, 11, 15] 256
conv2d-181 [-1, 128, 11, 15] 0
Conv2d-182 [-1, 128, 11, 15] 16,384
BatchNorm2d-183 [-1, 128, 11, 15] 256
conv2d-184 [-1, 128, 11, 15] 0
ReLU-185 [-1, 128, 11, 15] 0
GatherExpansion-186 [-1, 128, 11, 15] 0
AdaptiveAvgPool2d-187 [-1, 128, 1, 1] 0
BatchNorm2d-188 [-1, 128, 1, 1] 256
Conv2d-189 [-1, 128, 1, 1] 16,384
BatchNorm2d-190 [-1, 128, 1, 1] 256
ReLU-191 [-1, 128, 1, 1] 0
conv2d-192 [-1, 128, 1, 1] 0
Conv2d-193 [-1, 128, 11, 15] 147,456
conv2d-194 [-1, 128, 11, 15] 0
ContextEmbeddingBlock-195 [-1, 128, 11, 15] 0
Conv2d-196 [-1, 16, 88, 120] 2,304
BatchNorm2d-197 [-1, 16, 88, 120] 32
ReLU-198 [-1, 16, 88, 120] 0
conv2d-199 [-1, 16, 88, 120] 0
Conv2d-200 [-1, 11, 88, 120] 176
conv2d-201 [-1, 11, 88, 120] 0
SegHead-202 [-1, 11, 352, 480] 0
Conv2d-203 [-1, 32, 44, 60] 9,216
BatchNorm2d-204 [-1, 32, 44, 60] 64
ReLU-205 [-1, 32, 44, 60] 0
conv2d-206 [-1, 32, 44, 60] 0
Conv2d-207 [-1, 11, 44, 60] 352
conv2d-208 [-1, 11, 44, 60] 0
SegHead-209 [-1, 11, 352, 480] 0
Conv2d-210 [-1, 64, 22, 30] 36,864
BatchNorm2d-211 [-1, 64, 22, 30] 128
ReLU-212 [-1, 64, 22, 30] 0
conv2d-213 [-1, 64, 22, 30] 0
Conv2d-214 [-1, 11, 22, 30] 704
conv2d-215 [-1, 11, 22, 30] 0
SegHead-216 [-1, 11, 352, 480] 0
Conv2d-217 [-1, 128, 11, 15] 147,456
BatchNorm2d-218 [-1, 128, 11, 15] 256
ReLU-219 [-1, 128, 11, 15] 0
conv2d-220 [-1, 128, 11, 15] 0
Conv2d-221 [-1, 11, 11, 15] 1,408
conv2d-222 [-1, 11, 11, 15] 0
SegHead-223 [-1, 11, 352, 480] 0
SemanticBranch-224 [[-1, 128, 11, 15], [-1, 11, 352, 480], [-1, 11, 352, 480], [-1, 11, 352, 480], [-1, 11, 352, 480]] 0
Conv2d-225 [-1, 16, 44, 60] 2,080
BatchNorm2d-226 [-1, 16, 44, 60] 32
ReLU-227 [-1, 16, 44, 60] 0
conv2d-228 [-1, 16, 44, 60] 0
Conv2d-229 [-1, 16, 11, 15] 2,080
BatchNorm2d-230 [-1, 16, 11, 15] 32
ReLU-231 [-1, 16, 11, 15] 0
conv2d-232 [-1, 16, 11, 15] 0
Conv2d-233 [-1, 16, 11, 15] 2,080
BatchNorm2d-234 [-1, 16, 11, 15] 32
ReLU-235 [-1, 16, 11, 15] 0
conv2d-236 [-1, 16, 11, 15] 0
Conv2d-237 [-1, 128, 44, 60] 2,048
BatchNorm2d-238 [-1, 128, 44, 60] 256
ReLU-239 [-1, 128, 44, 60] 0
conv2d-240 [-1, 128, 44, 60] 0
GGMA-241 [-1, 128, 44, 60] 0
Conv2d-242 [-1, 16, 11, 15] 2,080
BatchNorm2d-243 [-1, 16, 11, 15] 32
ReLU-244 [-1, 16, 11, 15] 0
conv2d-245 [-1, 16, 11, 15] 0
Conv2d-246 [-1, 16, 44, 60] 2,080
BatchNorm2d-247 [-1, 16, 44, 60] 32
ReLU-248 [-1, 16, 44, 60] 0
conv2d-249 [-1, 16, 44, 60] 0
Conv2d-250 [-1, 16, 44, 60] 2,080
BatchNorm2d-251 [-1, 16, 44, 60] 32
ReLU-252 [-1, 16, 44, 60] 0
conv2d-253 [-1, 16, 44, 60] 0
Conv2d-254 [-1, 128, 11, 15] 2,048
BatchNorm2d-255 [-1, 128, 11, 15] 256
ReLU-256 [-1, 128, 11, 15] 0
conv2d-257 [-1, 128, 11, 15] 0
GGMA-258 [-1, 128, 11, 15] 0
Conv2d-259 [-1, 128, 44, 60] 294,912
BatchNorm2d-260 [-1, 128, 44, 60] 256
ReLU-261 [-1, 128, 44, 60] 0
conv2d-262 [-1, 128, 44, 60] 0
Conv2d-263 [-1, 11, 44, 60] 1,408
conv2d-264 [-1, 11, 44, 60] 0
SegHead-265 [-1, 11, 352, 480] 0
================================================================
Total params: 5,047,136
Trainable params: 5,047,136
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 1.93
Forward/backward pass size (MB): 62196495155771.44
Params size (MB): 19.25
Estimated Total Size (MB): 62196495155792.62
----------------------------------------------------------------
BiSeNetV2AD3(
6.158 GMac, 100.000% MACs,
(db): DetailedBranch(
3.797 GMac, 61.654% MACs,
(s1_conv1): conv2d(
0.081 GMac, 1.317% MACs,
(conv): Conv2d(0.073 GMac, 1.185% MACs, 3, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.005 GMac, 0.088% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.003 GMac, 0.044% MACs, inplace=True)
)
(s1_conv2): conv2d(
1.565 GMac, 25.419% MACs,
(conv): Conv2d(1.557 GMac, 25.287% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.005 GMac, 0.088% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.003 GMac, 0.044% MACs, inplace=True)
)
(s2_conv1): conv2d(
0.391 GMac, 6.355% MACs,
(conv): Conv2d(0.389 GMac, 6.322% MACs, 64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.022% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.001 GMac, 0.011% MACs, inplace=True)
)
(s2_conv2): conv2d(
0.391 GMac, 6.355% MACs,
(conv): Conv2d(0.389 GMac, 6.322% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.022% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.001 GMac, 0.011% MACs, inplace=True)
)
(s2_conv3): conv2d(
0.391 GMac, 6.355% MACs,
(conv): Conv2d(0.389 GMac, 6.322% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.022% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.001 GMac, 0.011% MACs, inplace=True)
)
(s3_conv1): conv2d(
0.196 GMac, 3.177% MACs,
(conv): Conv2d(0.195 GMac, 3.161% MACs, 64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.011% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.005% MACs, inplace=True)
)
(s3_conv2): conv2d(
0.39 GMac, 6.338% MACs,
(conv): Conv2d(0.389 GMac, 6.322% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.011% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.005% MACs, inplace=True)
)
(s3_conv3): conv2d(
0.39 GMac, 6.338% MACs,
(conv): Conv2d(0.389 GMac, 6.322% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.011% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.005% MACs, inplace=True)
)
)
(sb): SemanticBranch(
1.553 GMac, 25.224% MACs,
(stem): StemBlock(
0.089 GMac, 1.449% MACs,
(conv1): conv2d(
0.02 GMac, 0.329% MACs,
(conv): Conv2d(0.018 GMac, 0.296% MACs, 3, 16, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.022% MACs, 16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.001 GMac, 0.011% MACs, inplace=True)
)
(conv_1x1): conv2d(
0.006 GMac, 0.104% MACs,
(conv): Conv2d(0.005 GMac, 0.088% MACs, 16, 8, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.011% MACs, 8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.005% MACs, inplace=True)
)
(conv_3x3): conv2d(
0.013 GMac, 0.206% MACs,
(conv): Conv2d(0.012 GMac, 0.198% MACs, 8, 16, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.005% MACs, 16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.003% MACs, inplace=True)
)
(mpooling): MaxPool2d(0.001 GMac, 0.011% MACs, kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
(conv2): conv2d(
0.049 GMac, 0.798% MACs,
(conv): Conv2d(0.049 GMac, 0.790% MACs, 32, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.005% MACs, 16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.003% MACs, inplace=True)
)
)
(s3_ge1): GatherExpansion(
0.165 GMac, 2.679% MACs,
(conv1): conv2d(
0.149 GMac, 2.420% MACs,
(conv): Conv2d(0.146 GMac, 2.371% MACs, 16, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.002 GMac, 0.033% MACs, 96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.001 GMac, 0.016% MACs, inplace=True)
)
(dwconv2): conv2d(
0.003 GMac, 0.045% MACs,
(conv): Conv2d(0.002 GMac, 0.037% MACs, 96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=96, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.008% MACs, 96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_11): conv2d(
0.008 GMac, 0.134% MACs,
(conv): Conv2d(0.008 GMac, 0.132% MACs, 96, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.003% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv1): conv2d(
0.003 GMac, 0.045% MACs,
(conv): Conv2d(0.002 GMac, 0.037% MACs, 96, 96, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=96, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.008% MACs, 96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv3): conv2d(
0.0 GMac, 0.008% MACs,
(conv): Conv2d(0.0 GMac, 0.006% MACs, 16, 16, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=16, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_12): conv2d(
0.002 GMac, 0.025% MACs,
(conv): Conv2d(0.001 GMac, 0.022% MACs, 16, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.003% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(relu): ReLU(0.0 GMac, 0.001% MACs, inplace=True)
)
(s3_ge2): GatherExpansion(
0.17 GMac, 2.753% MACs,
(conv1): conv2d(
0.148 GMac, 2.395% MACs,
(conv): Conv2d(0.146 GMac, 2.371% MACs, 32, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.016% MACs, 192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.001 GMac, 0.008% MACs, inplace=True)
)
(dwconv2): conv2d(
0.006 GMac, 0.091% MACs,
(conv): Conv2d(0.005 GMac, 0.074% MACs, 192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=192, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.016% MACs, 192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_11): conv2d(
0.016 GMac, 0.266% MACs,
(conv): Conv2d(0.016 GMac, 0.263% MACs, 192, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.003% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv1): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 192, 192, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=192, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv3): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=32, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_12): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 32, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(relu): ReLU(0.0 GMac, 0.001% MACs, inplace=True)
)
(s4_ge1): GatherExpansion(
0.16 GMac, 2.602% MACs,
(conv1): conv2d(
0.148 GMac, 2.395% MACs,
(conv): Conv2d(0.146 GMac, 2.371% MACs, 32, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.016% MACs, 192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.001 GMac, 0.008% MACs, inplace=True)
)
(dwconv2): conv2d(
0.001 GMac, 0.023% MACs,
(conv): Conv2d(0.001 GMac, 0.019% MACs, 192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=192, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.004% MACs, 192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_11): conv2d(
0.008 GMac, 0.133% MACs,
(conv): Conv2d(0.008 GMac, 0.132% MACs, 192, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv1): conv2d(
0.001 GMac, 0.023% MACs,
(conv): Conv2d(0.001 GMac, 0.019% MACs, 192, 192, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=192, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.004% MACs, 192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv3): conv2d(
0.0 GMac, 0.004% MACs,
(conv): Conv2d(0.0 GMac, 0.003% MACs, 32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=32, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_12): conv2d(
0.001 GMac, 0.023% MACs,
(conv): Conv2d(0.001 GMac, 0.022% MACs, 32, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(relu): ReLU(0.0 GMac, 0.001% MACs, inplace=True)
)
(s4_ge2): GatherExpansion(
0.166 GMac, 2.694% MACs,
(conv1): conv2d(
0.147 GMac, 2.383% MACs,
(conv): Conv2d(0.146 GMac, 2.371% MACs, 64, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.008% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.004% MACs, inplace=True)
)
(dwconv2): conv2d(
0.003 GMac, 0.045% MACs,
(conv): Conv2d(0.002 GMac, 0.037% MACs, 384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=384, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.008% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_11): conv2d(
0.016 GMac, 0.265% MACs,
(conv): Conv2d(0.016 GMac, 0.263% MACs, 384, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv1): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 384, 384, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=384, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv3): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=64, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_12): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(relu): ReLU(0.0 GMac, 0.001% MACs, inplace=True)
)
(s5_ge1): GatherExpansion(
0.158 GMac, 2.563% MACs,
(conv1): conv2d(
0.147 GMac, 2.383% MACs,
(conv): Conv2d(0.146 GMac, 2.371% MACs, 64, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.008% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.004% MACs, inplace=True)
)
(dwconv2): conv2d(
0.001 GMac, 0.011% MACs,
(conv): Conv2d(0.001 GMac, 0.009% MACs, 384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=384, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.002% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_11): conv2d(
0.008 GMac, 0.132% MACs,
(conv): Conv2d(0.008 GMac, 0.132% MACs, 384, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv1): conv2d(
0.001 GMac, 0.011% MACs,
(conv): Conv2d(0.001 GMac, 0.009% MACs, 384, 384, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=384, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.002% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv3): conv2d(
0.0 GMac, 0.002% MACs,
(conv): Conv2d(0.0 GMac, 0.002% MACs, 64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=64, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_12): conv2d(
0.001 GMac, 0.023% MACs,
(conv): Conv2d(0.001 GMac, 0.022% MACs, 64, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(s5_ge2): GatherExpansion(
0.164 GMac, 2.664% MACs,
(conv1): conv2d(
0.146 GMac, 2.377% MACs,
(conv): Conv2d(0.146 GMac, 2.371% MACs, 128, 768, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.004% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.002% MACs, inplace=True)
)
(dwconv2): conv2d(
0.001 GMac, 0.023% MACs,
(conv): Conv2d(0.001 GMac, 0.019% MACs, 768, 768, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=768, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.004% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_11): conv2d(
0.016 GMac, 0.264% MACs,
(conv): Conv2d(0.016 GMac, 0.263% MACs, 768, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv1): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 768, 768, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=768, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv3): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=128, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_12): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(s5_ge3): GatherExpansion(
0.164 GMac, 2.664% MACs,
(conv1): conv2d(
0.146 GMac, 2.377% MACs,
(conv): Conv2d(0.146 GMac, 2.371% MACs, 128, 768, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.004% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.002% MACs, inplace=True)
)
(dwconv2): conv2d(
0.001 GMac, 0.023% MACs,
(conv): Conv2d(0.001 GMac, 0.019% MACs, 768, 768, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=768, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.004% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_11): conv2d(
0.016 GMac, 0.264% MACs,
(conv): Conv2d(0.016 GMac, 0.263% MACs, 768, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv1): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 768, 768, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=768, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv3): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=128, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_12): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(s5_ge4): GatherExpansion(
0.164 GMac, 2.664% MACs,
(conv1): conv2d(
0.146 GMac, 2.377% MACs,
(conv): Conv2d(0.146 GMac, 2.371% MACs, 128, 768, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.004% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.002% MACs, inplace=True)
)
(dwconv2): conv2d(
0.001 GMac, 0.023% MACs,
(conv): Conv2d(0.001 GMac, 0.019% MACs, 768, 768, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=768, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.004% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_11): conv2d(
0.016 GMac, 0.264% MACs,
(conv): Conv2d(0.016 GMac, 0.263% MACs, 768, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv1): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 768, 768, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=768, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv3): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=128, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_12): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(s5_ge5): GatherExpansion(
0.027 GMac, 0.445% MACs,
(conv1): conv2d(
0.024 GMac, 0.396% MACs,
(conv): Conv2d(0.024 GMac, 0.395% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv2): conv2d(
0.0 GMac, 0.004% MACs,
(conv): Conv2d(0.0 GMac, 0.003% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_11): conv2d(
0.003 GMac, 0.045% MACs,
(conv): Conv2d(0.003 GMac, 0.044% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv1): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=128, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv3): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=128, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_12): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(seghead1): SegHead(
0.027 GMac, 0.434% MACs,
(conv): conv2d(
0.025 GMac, 0.403% MACs,
(conv): Conv2d(0.024 GMac, 0.395% MACs, 16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.005% MACs, 16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.003% MACs, inplace=True)
)
(classes): conv2d(
0.002 GMac, 0.030% MACs,
(conv): Conv2d(0.002 GMac, 0.030% MACs, 16, 11, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 11, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
(seghead2): SegHead(
0.026 GMac, 0.414% MACs,
(conv): conv2d(
0.025 GMac, 0.399% MACs,
(conv): Conv2d(0.024 GMac, 0.395% MACs, 32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.003% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.001% MACs, inplace=True)
)
(classes): conv2d(
0.001 GMac, 0.015% MACs,
(conv): Conv2d(0.001 GMac, 0.015% MACs, 32, 11, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 11, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
(seghead3): SegHead(
0.025 GMac, 0.405% MACs,
(conv): conv2d(
0.024 GMac, 0.397% MACs,
(conv): Conv2d(0.024 GMac, 0.395% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.001% MACs, inplace=True)
)
(classes): conv2d(
0.0 GMac, 0.008% MACs,
(conv): Conv2d(0.0 GMac, 0.008% MACs, 64, 11, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 11, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
(seghead4): SegHead(
0.025 GMac, 0.400% MACs,
(conv): conv2d(
0.024 GMac, 0.396% MACs,
(conv): Conv2d(0.024 GMac, 0.395% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(classes): conv2d(
0.0 GMac, 0.004% MACs,
(conv): Conv2d(0.0 GMac, 0.004% MACs, 128, 11, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 11, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
(ceb): ContextEmbeddingBlock(
0.024 GMac, 0.396% MACs,
(gap): AdaptiveAvgPool2d(0.0 GMac, 0.000% MACs, output_size=1)
(bn1): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv1): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv2): conv2d(
0.024 GMac, 0.395% MACs,
(conv): Conv2d(0.024 GMac, 0.395% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
)
(dma): GGMA(
0.013 GMac, 0.207% MACs,
(query_project): conv2d(
0.006 GMac, 0.091% MACs,
(conv): Conv2d(0.005 GMac, 0.089% MACs, 130, 16, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.001% MACs, inplace=True)
)
(key_project): conv2d(
0.0 GMac, 0.006% MACs,
(conv): Conv2d(0.0 GMac, 0.006% MACs, 130, 16, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(value_project): conv2d(
0.0 GMac, 0.006% MACs,
(conv): Conv2d(0.0 GMac, 0.006% MACs, 130, 16, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(out_project): conv2d(
0.006 GMac, 0.104% MACs,
(conv): Conv2d(0.005 GMac, 0.088% MACs, 16, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.011% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.005% MACs, inplace=True)
)
)
(sma): GGMA(
0.012 GMac, 0.195% MACs,
(query_project): conv2d(
0.0 GMac, 0.006% MACs,
(conv): Conv2d(0.0 GMac, 0.006% MACs, 130, 16, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(key_project): conv2d(
0.006 GMac, 0.091% MACs,
(conv): Conv2d(0.005 GMac, 0.089% MACs, 130, 16, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.001% MACs, inplace=True)
)
(value_project): conv2d(
0.006 GMac, 0.091% MACs,
(conv): Conv2d(0.005 GMac, 0.089% MACs, 130, 16, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.001% MACs, inplace=True)
)
(out_project): conv2d(
0.0 GMac, 0.007% MACs,
(conv): Conv2d(0.0 GMac, 0.005% MACs, 16, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
(seghead): SegHead(
0.783 GMac, 12.720% MACs,
(conv): conv2d(
0.78 GMac, 12.660% MACs,
(conv): Conv2d(0.779 GMac, 12.644% MACs, 256, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.011% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.005% MACs, inplace=True)
)
(classes): conv2d(
0.004 GMac, 0.060% MACs,
(conv): Conv2d(0.004 GMac, 0.060% MACs, 128, 11, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 11, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
)
6.157793264 GMac 5.15915 19.68059539794922 MB
[INFO] Register count_convNd() for <class 'torch.nn.modules.conv.Conv2d'>.
[INFO] Register count_bn() for <class 'torch.nn.modules.batchnorm.BatchNorm2d'>.
[INFO] Register zero_ops() for <class 'torch.nn.modules.activation.ReLU'>.
[WARN] Cannot find rule for <class '__main__.conv2d'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.DetailedBranch'>. Treat it as zero Macs and zero Params.
[INFO] Register zero_ops() for <class 'torch.nn.modules.pooling.MaxPool2d'>.
[WARN] Cannot find rule for <class '__main__.StemBlock'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.GatherExpansion'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.SegHead'>. Treat it as zero Macs and zero Params.
[INFO] Register count_adap_avgpool() for <class 'torch.nn.modules.pooling.AdaptiveAvgPool2d'>.
[WARN] Cannot find rule for <class '__main__.ContextEmbeddingBlock'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.SemanticBranch'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.GGMA'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.BiSeNetV2AD3'>. Treat it as zero Macs and zero Params.
6.142874624 GMac 5.047136 19.2532958984375 MB
Process finished with exit code 0
'''
| [
"ye_b@hotmail.com"
] | ye_b@hotmail.com |
628e6d136054a6d06792eb383522a465965a4b1b | bcc04939aa70675c9be19c0bf4a9642877db46b1 | /qa/urls.py | bab92ea3fbcf77b3fe73b3d096450528a2fbd7e2 | [
"MIT"
] | permissive | zkeshtkar/gapbug | 164398e2ddd8f952d5851eab19e34f9f84a080e1 | eec5baf9b4346aef26bcb10e48ddcb358140d708 | refs/heads/main | 2023-06-20T07:39:50.084126 | 2021-07-16T13:31:10 | 2021-07-16T13:31:10 | 387,550,452 | 0 | 0 | MIT | 2021-07-19T17:53:20 | 2021-07-19T17:53:19 | null | UTF-8 | Python | false | false | 1,726 | py | from django.urls import path
from . import views
app_name = "qa"
urlpatterns = [
path("", views.QuestionList.as_view(), name="index"),
path("ask", views.Ask.as_view(), name="question"),
path("show/<int:id>/<str:slug>", views.show, name="show"),
path(
"<int:id>/answer/submit", views.AnswerQuestion.as_view(), name="submit_answer"
),
path(
"<int:question_id>/up", views.QuestionVoteUp.as_view(), name="question_vote_up"
),
path(
"<int:question_id>/down",
views.QuestionVoteDown.as_view(),
name="question_vote_down",
),
path("<int:question_id>/edit", views.EditQuestion.as_view(), name="question_edit"),
path("<int:pk>/delete/", views.DeleteQuestion.as_view(), name="question_delete"),
path(
"<int:question_id>/answer/<int:pk>/delete/",
views.DeleteAnswer.as_view(),
name="answer_delete",
),
path(
"<int:question_id>/edit/answer/<int:answer_id>",
views.EditAnswer.as_view(),
name="answer_edit",
),
path(
"<int:question_id>/<int:answer_id>/up",
views.AnswerVoteUp.as_view(),
name="answer_voteup",
),
path(
"<int:question_id>/<int:answer_id>/down",
views.AnswerVoteDown.as_view(),
name="answer_voteup",
),
path(
"<int:question_id>/<int:answer_id>/accept",
views.AcceptAnswer.as_view(),
name="accept_answer",
),
path("search/", views.Search.as_view(), name="search"),
path("tags/", views.TagList.as_view(), name="tags_list"),
path("tags/<str:tag>/", views.QuestionByTag.as_view(), name="by_tag"),
path("tagslist/", views.QuestionTagList.as_view(), name="all_tags"),
]
| [
"mshirdel@gmail.com"
] | mshirdel@gmail.com |
fca7b1ef9fb4de7c9a18ac5ac1b8740490e71104 | a5aabe2e4057d78e687a57a6b560516a7cdb5836 | /unsserv/extreme/sampling/protocol.py | b81ea0a13077b05dbebfab9cabc85dd2beb7c114 | [
"MIT"
] | permissive | aratz-lasa/py-unsserv | 0ffc09ddab65a11ce917d0faa8b1b5dff091e563 | 6f332385e55d05953186b9a8b7848bca4b878e18 | refs/heads/master | 2022-12-14T21:10:12.397834 | 2020-05-03T11:29:49 | 2020-05-03T11:29:49 | 228,329,158 | 5 | 0 | MIT | 2022-12-08T07:00:55 | 2019-12-16T07:35:20 | Python | UTF-8 | Python | false | false | 3,695 | py | from enum import IntEnum, auto
from typing import Tuple, Sequence
from unsserv.common.utils import parse_node
from unsserv.common.structs import Node
from unsserv.common.rpc.structs import Message
from unsserv.common.rpc.protocol import AProtocol, ITranscoder, Command, Data, Handler
from unsserv.extreme.sampling.structs import Sample, SampleResult
FIELD_COMMAND = "mrwb-command"
FIELD_TTL = "mrwb-ttl"
FIELD_ORIGIN_NODE = "mrwb-origin-node"
FIELD_SAMPLE_RESULT = "mrwb-sample-result"
FIELD_SAMPLE_ID = "mrwb-sample-id"
class MRWBCommand(IntEnum):
GET_DEGREE = auto()
SAMPLE = auto()
SAMPLE_RESULT = auto()
class MRWBTranscoder(ITranscoder):
def encode(self, command: Command, *data: Data) -> Message:
if command == MRWBCommand.SAMPLE:
sample: Sample = data[0]
message_data = {
FIELD_COMMAND: MRWBCommand.SAMPLE,
FIELD_SAMPLE_ID: sample.id,
FIELD_ORIGIN_NODE: sample.origin_node,
FIELD_TTL: sample.ttl,
}
return Message(self.my_node, self.service_id, message_data)
elif command == MRWBCommand.SAMPLE_RESULT:
sample_result: SampleResult = data[0]
message_data = {
FIELD_COMMAND: MRWBCommand.SAMPLE_RESULT,
FIELD_SAMPLE_ID: sample_result.sample_id,
FIELD_SAMPLE_RESULT: sample_result.result,
}
return Message(self.my_node, self.service_id, message_data)
elif command == MRWBCommand.GET_DEGREE:
message_data = {FIELD_COMMAND: MRWBCommand.GET_DEGREE}
return Message(self.my_node, self.service_id, message_data)
raise ValueError("Invalid Command")
def decode(self, message: Message) -> Tuple[Command, Sequence[Data]]:
command = message.data[FIELD_COMMAND]
if command == MRWBCommand.SAMPLE:
sample = Sample(
id=message.data[FIELD_SAMPLE_ID],
origin_node=parse_node(message.data[FIELD_ORIGIN_NODE]),
ttl=message.data[FIELD_TTL],
)
return MRWBCommand.SAMPLE, [sample]
elif command == MRWBCommand.SAMPLE_RESULT:
sample_result = SampleResult(
sample_id=message.data[FIELD_SAMPLE_ID],
result=parse_node(message.data[FIELD_SAMPLE_RESULT]),
)
return MRWBCommand.SAMPLE_RESULT, [sample_result]
elif command == MRWBCommand.GET_DEGREE:
return MRWBCommand.GET_DEGREE, []
raise ValueError("Invalid Command")
class MRWBProtocol(AProtocol):
def _get_new_transcoder(self):
return MRWBTranscoder(self.my_node, self.service_id)
async def sample(self, destination: Node, sample: Sample):
message = self._transcoder.encode(MRWBCommand.SAMPLE, sample)
return await self._rpc.call_send_message(destination, message)
async def sample_result(self, destination: Node, sample_result: SampleResult):
message = self._transcoder.encode(MRWBCommand.SAMPLE_RESULT, sample_result)
return await self._rpc.call_send_message(destination, message)
async def get_degree(self, destination: Node) -> int:
message = self._transcoder.encode(MRWBCommand.GET_DEGREE)
return await self._rpc.call_send_message(destination, message)
def set_handler_sample(self, handler: Handler):
self._handlers[MRWBCommand.SAMPLE] = handler
def set_handler_sample_result(self, handler: Handler):
self._handlers[MRWBCommand.SAMPLE_RESULT] = handler
def set_handler_get_degree(self, handler: Handler):
self._handlers[MRWBCommand.GET_DEGREE] = handler
| [
"aratzml@opendeusto.es"
] | aratzml@opendeusto.es |
25a25d841579b1ba7c8b6f758ec38f5162118091 | bcfc082c98c13bccd4a415c30b67c61d0b91828c | /pymc_hacking/eg3/run_mcmc.py | d43d95f4d452d8ea1875956fa9da5748be328748 | [] | no_license | Markus333/doing_bayesian_data_analysis | 0e7375af1acfd9952044ade28e59d734974e9a71 | 27f144fda3e9df41dbb74f70c5bf82547d2fa649 | refs/heads/master | 2020-12-28T21:39:08.758946 | 2013-01-10T06:46:11 | 2013-01-10T06:46:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | #!/usr/bin/env python
import two_normal_model
from pymc import MCMC
from pymc.Matplot import plot
# do posterior sampling
m = MCMC(two_normal_model)
m.sample(iter=100000, burn=1000)
print(m.stats())
import numpy
for p in ['mean1', 'mean2', 'std_dev', 'theta']:
numpy.savetxt("%s.trace" % p, m.trace(p)[:])
# draw some pictures
plot(m)
| [
"matthew.kelcey@gmail.com"
] | matthew.kelcey@gmail.com |
f09e09827089f8e92a61465736668ec7b5bbb6a5 | fed8edd1396e6f611744b36d744cb23da956409a | /test_dj_app_8_dev_1593/urls.py | dc5381962ff66caeafcf829b0deb7ee07346a97f | [] | no_license | crowdbotics-apps/test-dj-app-8-dev-1593 | ffdc02f6268df04725d647a2ca9f963fe1552b38 | 48602ed68f119b69a2114cedd59350ffc7f0205e | refs/heads/master | 2022-04-02T10:18:14.994388 | 2020-02-04T17:15:16 | 2020-02-04T17:15:16 | 238,261,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,947 | py | """test_dj_app_8_dev_1593 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Test dj app 8"
admin.site.site_title = "Test dj app 8 Admin Portal"
admin.site.index_title = "Test dj app 8 Admin"
# swagger
schema_view = get_schema_view(
openapi.Info(
title="Test dj app 8 API",
default_version="v1",
description="API documentation for Test dj app 8 App",
),
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
59979f1621b5032c17414f37611219c67fb481dd | c7e5d2fd3a9fdc585f335477eb74248a4416e44b | /setup.py | 19611e6865b7fddf5b82ed3fc61d2ccc8153f801 | [
"MIT"
] | permissive | lord63/pyhipku | d4ec626df5c9e354894f1290633132bb86388730 | 4037014ee4d56ed3dd62b3fe1b9681095e6f5de8 | refs/heads/master | 2022-03-10T16:46:10.514356 | 2021-10-04T11:12:48 | 2022-02-22T14:54:31 | 31,259,992 | 104 | 6 | MIT | 2021-10-04T11:12:54 | 2015-02-24T12:57:02 | Python | UTF-8 | Python | false | false | 1,199 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import pyhipku
try:
import pypandoc
long_description = pypandoc.convert('README.md','rst')
except (IOError, ImportError):
with open('README.md') as f:
long_description = f.read()
setup(
name='pyhipku',
version=pyhipku.__version__,
url='http://github.com/lord63/pyhipku/',
license='MIT',
author='lord63',
author_email='lord63.j@gmail.com',
description='Encode any IP address as a haiku',
long_description=long_description,
packages=['pyhipku'],
include_package_data=True,
keywords='ip haiku',
classifiers=[
'Development Status :: 4 - Beta',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| [
"lord63.j@gmail.com"
] | lord63.j@gmail.com |
7fccc2a26214b24ed6112b3083ded4973cf22b7c | b92b0e9ba2338ab311312dcbbeefcbb7c912fc2e | /build/shogun_lib/examples/undocumented/python_modular/preprocessor_sortwordstring_modular.py | cbeb3b9c60661aeb5b61178e97e3ba7a043908ed | [] | no_license | behollis/muViewBranch | 384f8f97f67723b2a4019294854969d6fc1f53e8 | 1d80914f57e47b3ad565c4696861f7b3213675e0 | refs/heads/master | 2021-01-10T13:22:28.580069 | 2015-10-27T21:43:20 | 2015-10-27T21:43:20 | 45,059,082 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,405 | py | from tools.load import LoadMatrix
lm=LoadMatrix()
traindna = lm.load_dna('../data/fm_train_dna.dat')
testdna = lm.load_dna('../data/fm_test_dna.dat')
parameter_list = [[traindna,testdna,3,0,False,False],[traindna,testdna,3,0,False,False]]
def preprocessor_sortwordstring_modular (fm_train_dna=traindna,fm_test_dna=testdna,order=3,gap=0,reverse=False,use_sign=False):
from shogun.Kernel import CommWordStringKernel
from shogun.Features import StringCharFeatures, StringWordFeatures, DNA
from shogun.Preprocessor import SortWordString
charfeat=StringCharFeatures(fm_train_dna, DNA)
feats_train=StringWordFeatures(charfeat.get_alphabet())
feats_train.obtain_from_char(charfeat, order-1, order, gap, reverse)
preproc=SortWordString()
preproc.init(feats_train)
feats_train.add_preprocessor(preproc)
feats_train.apply_preprocessor()
charfeat=StringCharFeatures(fm_test_dna, DNA)
feats_test=StringWordFeatures(charfeat.get_alphabet())
feats_test.obtain_from_char(charfeat, order-1, order, gap, reverse)
feats_test.add_preprocessor(preproc)
feats_test.apply_preprocessor()
kernel=CommWordStringKernel(feats_train, feats_train, use_sign)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('CommWordString')
preprocessor_sortwordstring_modular(*parameter_list[0])
| [
"prosen@305cdda6-5ce1-45b3-a98d-dfc68c8b3305"
] | prosen@305cdda6-5ce1-45b3-a98d-dfc68c8b3305 |
80d08ab26221fb1bcfb4478b7d0d7ee2bedb94d3 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_QC1702.py | cfd5953d19d3326974e9bc2e2f80559fa8e2ef50 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,647 | py | # qubit number=5
# total number=51
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
prog.h(input_qubit[0]) # number=44
prog.cz(input_qubit[3],input_qubit[0]) # number=45
prog.h(input_qubit[0]) # number=46
prog.cx(input_qubit[3],input_qubit[0]) # number=48
prog.z(input_qubit[3]) # number=49
prog.cx(input_qubit[3],input_qubit[0]) # number=50
prog.cx(input_qubit[3],input_qubit[0]) # number=34
prog.rx(0.11938052083641225,input_qubit[1]) # number=36
prog.cx(input_qubit[1],input_qubit[2]) # number=47
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.rx(1.4765485471872026,input_qubit[2]) # number=35
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=41
prog.x(input_qubit[0]) # number=42
prog.cx(input_qubit[1],input_qubit[0]) # number=43
prog.x(input_qubit[4]) # number=30
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[2]) # number=11
prog.rx(0.45238934211692994,input_qubit[3]) # number=38
prog.y(input_qubit[1]) # number=39
prog.rx(-2.5258404934861938,input_qubit[1]) # number=25
prog.h(input_qubit[3]) # number=29
prog.cx(input_qubit[0],input_qubit[3]) # number=22
prog.x(input_qubit[3]) # number=23
prog.cx(input_qubit[0],input_qubit[3]) # number=24
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.rx(-0.0722566310325653,input_qubit[4]) # number=37
prog.x(input_qubit[1]) # number=14
prog.cx(input_qubit[0],input_qubit[2]) # number=26
prog.x(input_qubit[2]) # number=27
prog.h(input_qubit[4]) # number=40
prog.cx(input_qubit[0],input_qubit[2]) # number=28
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC1702.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
6f8b3a5d8957841087142834c91c801aa460a488 | 8e79de4b73998dd0ee1dae4881784a2b12410615 | /219/notifications.py | 09294bb1d55b253633d306d9185376cafe2febce | [
"MIT"
] | permissive | alehpineda/bitesofpy | e6eb7c9413cf407a12643efece01bef5457e5dcb | bfd319a606cd0b7b9bfb85a3e8942872a2d43c48 | refs/heads/master | 2021-07-15T19:59:35.061049 | 2020-09-25T17:49:32 | 2020-09-25T17:49:32 | 209,878,791 | 0 | 0 | MIT | 2020-09-06T00:11:45 | 2019-09-20T20:49:51 | Python | UTF-8 | Python | false | false | 268 | py | from datetime import date, timedelta
TODAY = date.today()
def gen_bite_planning(num_bites=1, num_days=1, start_date=TODAY):
days = 0
while True:
days += num_days
for _ in range(num_bites):
yield start_date + timedelta(days=days)
| [
"ale.hpineda@gmail.com"
] | ale.hpineda@gmail.com |
92c31e6b240db898369e4593625739fe5d39e00f | e6b9ca7b13a21fcc5a26e787191c845698a47f17 | /django_mako_plus/provider/compile.py | 1090218285d818cbbd93b2b3b8f62d91c72c36ac | [
"Apache-2.0"
] | permissive | BrightBridgeWeb/django-mako-plus | c42e6b3ff4a62b5110f6412958b8df585ae78881 | 24690661b80562a510c1632853815df5111b606c | refs/heads/master | 2020-04-15T14:47:44.565952 | 2019-01-02T02:29:42 | 2019-01-02T02:29:42 | 164,768,193 | 0 | 0 | Apache-2.0 | 2019-01-09T02:06:52 | 2019-01-09T02:06:51 | null | UTF-8 | Python | false | false | 7,416 | py | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
import os
import os.path
import shutil
import collections
import logging
from .base import BaseProvider
from ..util import log
from ..command import run_command
class CompileProvider(BaseProvider):
'''
Runs a command, such as compiling *.scss or *.less, when an output file
timestamp is older than the source file. In production mode, this check
is done only once (the first time a template is run) per server start.
When settings.DEBUG=True, checks for a recompile every request.
When settings.DEBUG=False, checks for a recompile only once per server run.
'''
def __init__(self, template, options):
super().__init__(template, options)
self.sourcepath = os.path.join(settings.BASE_DIR if settings.DEBUG else settings.STATIC_ROOT, self.build_sourcepath())
self.targetpath = os.path.join(settings.BASE_DIR if settings.DEBUG else settings.STATIC_ROOT, self.build_targetpath())
# since this is in the constructor, it runs only one time per server
# run when in production mode
if not os.path.exists(self.sourcepath):
msg = 'skipping nonexistent file'
elif self.needs_compile:
msg = 'compiling file'
if not os.path.exists(os.path.dirname(self.targetpath)):
os.makedirs(os.path.dirname(self.targetpath))
run_command(*self.build_command())
else:
msg = 'already up to date'
if log.isEnabledFor(logging.DEBUG):
log.debug('%s created for %s: [%s]', repr(self), self.sourcepath, msg)
DEFAULT_OPTIONS = {
'group': 'styles',
# explicitly sets the path to search for - if this filepath exists, DMP
# includes a link to it in the template. globs are not supported because this
# should resolve to one exact file. possible values:
# 1. None: a default path is used, such as "{app}/{subdir}/{filename.ext}", prefixed
# with the static root at production; see subclasses for their default filenames.
# 2. function, lambda, or other callable: called as func(provider) and
# should return a string
# 3. str: used directly
'sourcepath': None,
# explicitly sets the path to search for - if this filepath exists, DMP
# includes a link to it in the template. globs are not supported because this
# should resolve to one exact file. possible values:
# 1. None: a default path is used, such as "{app}/{subdir}/{filename.ext}", prefixed
# with the static root at production; see subclasses for their default filenames.
# 2. function, lambda, or other callable: called as func(provider) and
# should return a string
# 3. str: used directly
'targetpath': None,
# explicitly sets the command to be run. possible values:
# 1. None: the default command is run
# 2. function, lambda, or other callable: called as func(provider), expects list as return
# 3. list: used directly in the call to subprocess module
'command': [],
}
def build_sourcepath(self):
# if defined in settings, run the function or return the string
if self.options['sourcepath'] is not None:
return self.options['sourcepath'](self) if callable(self.options['sourcepath']) else self.options['sourcepath']
# build the default
if self.app_config is None:
log.warn('{} skipped: template %s not in project subdir and `targetpath` not in settings', (self.__class__.__qualname__, self.template_relpath))
return self.build_default_sourcepath()
def build_default_sourcepath(self):
raise ImproperlyConfigured('{} must set `sourcepath` in options (or a subclass can override build_default_sourcepath).'.format(self.__class__.__qualname__))
def build_targetpath(self):
# if defined in settings, run the function or return the string
if self.options['targetpath'] is not None:
return self.options['targetpath'](self) if callable(self.options['targetpath']) else self.options['targetpath']
# build the default
if self.app_config is None:
log.warn('{} skipped: template %s not in project subdir and `targetpath` not in settings', (self.__class__.__qualname__, self.template_relpath))
return self.build_default_targetpath()
def build_default_targetpath(self):
raise ImproperlyConfigured('{} must set `targetpath` in options (or a subclass can override build_default_targetpath).'.format(self.__class__.__qualname__))
def build_command(self):
'''Returns the command to run, as a list (see subprocess module)'''
# if defined in settings, run the function or return the string
if self.options['command']:
return self.options['command'](self) if callable(self.options['command']) else self.options['command']
# build the default
return self.build_default_command()
def build_default_command(self):
raise ImproperlyConfigured('{} must set `command` in options (or a subclass can override build_default_command).'.format(self.__class__.__qualname__))
@property
def needs_compile(self):
'''Returns True if self.sourcepath is newer than self.targetpath'''
try:
source_mtime = os.stat(self.sourcepath).st_mtime
except OSError: # no source for this template, so just return
return False
try:
target_mtime = os.stat(self.targetpath).st_mtime
except OSError: # target doesn't exist, so compile
return True
# both source and target exist, so compile if source newer
return source_mtime > target_mtime
###################
### Sass
class CompileScssProvider(CompileProvider):
'''Specialized CompileProvider for SCSS'''
def build_default_sourcepath(self):
return os.path.join(
self.app_config.name,
'styles',
self.template_relpath + '.scss',
)
def build_default_targetpath(self):
# posixpath because URLs use forward slash
return os.path.join(
self.app_config.name,
'styles',
self.template_relpath + '.css',
)
def build_default_command(self):
return [
shutil.which('sass'),
'--source-map',
'--load-path={}'.format(settings.BASE_DIR),
self.sourcepath,
self.targetpath,
]
#####################
### Less
class CompileLessProvider(CompileProvider):
'''Specialized CompileProvider that contains settings for *.less files.'''
def build_default_sourcepath(self):
return os.path.join(
self.app_config.name,
'styles',
self.template_relpath + '.less',
)
def build_default_targetpath(self):
# posixpath because URLs use forward slash
return os.path.join(
self.app_config.name,
'styles',
self.template_relpath + '.css',
)
def build_default_command(self):
return [
shutil.which('lessc'),
'--source-map',
self.sourcepath,
self.targetpath,
]
| [
"doconix@gmail.com"
] | doconix@gmail.com |
df6d1e4f182172ca91145d9307cefe3d8451ec3f | 4992f95174927775146f46275ed604aefe5e9699 | /dstagram/config/urls.py | a7febe8b3c2ba7dc58f8b9ec32801e3465038596 | [
"MIT"
] | permissive | djangojeng-e/mini_projects | 55d5858628eb5f42cb4a5a5e417958fe5929d658 | 32014388e8c83556d83f6ae911bd0e33df2067a7 | refs/heads/master | 2022-12-25T21:26:23.057485 | 2020-05-27T08:02:20 | 2020-05-27T08:02:20 | 229,744,025 | 0 | 0 | MIT | 2022-12-11T05:30:16 | 2019-12-23T12:01:05 | CSS | UTF-8 | Python | false | false | 1,080 | py | """config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from account.views import register
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('photo.urls')),
path('accounts/', include('account.urls')),
path('register/', register, name='register')
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"headfat1218@gmail.com"
] | headfat1218@gmail.com |
bf366e9f7685b1100977df1b46176a6fd4e773e3 | c46f8b1b822166b426b7dacfa85bbf093c8b3fa0 | /pycity_calc/toolbox/analyze/save_load_profiles.py | 7ea4cebae79abc2b2dc30649a25f65103ccdb01f | [
"MIT"
] | permissive | RWTH-EBC/pyCity_calc | d82e684488ed2fba82167c07439f1ffa00907cd8 | 99fd0dab7f9a9030fd84ba4715753364662927ec | refs/heads/master | 2021-03-27T13:11:13.171620 | 2019-06-16T12:01:59 | 2019-06-16T12:01:59 | 92,810,373 | 4 | 0 | MIT | 2019-06-16T11:38:14 | 2017-05-30T07:56:52 | Python | UTF-8 | Python | false | false | 38,298 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Code extracts and saves load profiles of all buildings of city object
"""
from __future__ import division
import os
import warnings
import pickle
import numpy as np
import matplotlib.pyplot as plt
try:
import openpyxl
except:
msg = 'Could not import openpyxl. Which is required, if you want to' \
' save profiles directly into xlsx files. Please install via ' \
'pip or set save_as_xlsx to False.'
warnings.warn(msg)
import pycity_calc.visualization.city_visual as citvis
import pycity_calc.cities.scripts.city_generator.city_generator as citgen
import pycity_calc.toolbox.analyze.save_city_data as savcit
def gen_path_if_not_existent(dir):
"""
Generate directory, if not existent
Parameters
----------
dir : str
Directory path
"""
if not os.path.exists(dir):
os.makedirs(dir)
def extract_build_base_data(city, id, file_path, use_german=False):
"""
Extract and save building base data to txt file
Parameters
----------
city : object
City object
id : int
Building node id
file_path : str
Path to save file to (e.g. ...\building_data.txt)
use_german : bool, optional
Defines, if English or German language should be used
(default: False). If False, uses English language.
"""
# Building pointer
build = city.nodes[id]['entity']
if use_german:
with open(file_path, mode='w') as f:
f.write(u'Gebäude-ID: ' + str(id) + '\n')
x_coord = city.nodes[id]['position'].x
y_coord = city.nodes[id]['position'].y
f.write('X-Koordinate in m: ' + str(int(x_coord)) + '\n')
f.write('Y-Koordinate in m: ' + str(int(y_coord)) + '\n')
if build.build_year is not None:
build_year = int(build.build_year)
else:
build_year = None
f.write(
'Baujahr: ' + str(build_year) + '\n')
if build.mod_year is not None:
mod_year = int(build.mod_year)
else:
mod_year = None
f.write('Letztes Sanierungsjahr: ' + str(mod_year) + '\n')
f.write(u'Nummer Gebäudetyp: ' + str(build.build_type) + '\n')
build_name = citgen.conv_build_type_nb_to_name(build.build_type)
f.write(
u'(Engl.) Erläuterung Gebäudetyp: ' + str(build_name) + '\n')
# Write building data to file
f.write('Anzahl Zonen/Apartments: ' + str(
len(build.apartments)) + '\n')
f.write(u'Nutzbare PV-Fläche in m2: ' +
str(build.roof_usabl_pv_area) + '\n')
f.write(u'Nettogrundfläche in m2: ' +
str(build.net_floor_area) + '\n')
f.write(
u'Bebaute Grundfläche in m2: ' + str(build.ground_area) + '\n')
f.write(u'Mittlere Geschosshöhe in m: ' +
str(build.height_of_floors) + '\n')
f.write('Anzahl Geschosse: ' + str(build.nb_of_floors) + '\n')
ann_th_sh_demand = build.get_annual_space_heat_demand()
ann_el_demand = build.get_annual_el_demand()
ann_dhw_demand = build.get_annual_dhw_demand()
f.write(u'Jährlicher Nutzenergiebedarf für Raumwärme in kWh/a: '
+ str(int(ann_th_sh_demand)) + '\n')
f.write(u'Jährlicher, elektrischer Energiebedarf (ohne Warmwasser)'
' in kWh/a: '
+ str(int(ann_el_demand)) + '\n')
f.write(u'Jährlicher Nutzenergiebedarf Warmwasser in kWh/a: '
+ str(int(ann_dhw_demand)) + '\n')
f.write('\n')
if 'osm_id' in city.nodes[id]:
f.write(
'openstreetmap id: ' + str(city.nodes[id]['osm_id']) + '\n')
if 'name' in city.nodes[id]:
f.write('OSM name: ' + str(city.nodes[id]['name']) + '\n')
if 'addr_street' in city.nodes[id]:
f.write('Street: ' + str(city.nodes[id]['addr_street']) + '\n')
if 'addr_housenumber' in city.nodes[id]:
f.write('Street nb.: ' +
str(city.nodes[id]['addr_housenumber']) + '\n')
if 'comment' in city.nodes[id]:
f.write('OSM comment: ' +
str(city.nodes[id]['comment']) + '\n')
# print(vars(build))
f.close()
else:
with open(file_path, mode='w') as f:
f.write('Building node id: ' + str(id) + '\n')
x_coord = city.nodes[id]['position'].x
y_coord = city.nodes[id]['position'].y
f.write('X-coordinate in m: ' + str(int(x_coord)) + '\n')
f.write('Y-coordinate in m: ' + str(int(y_coord)) + '\n')
if build.build_year is not None:
build_year = int(build.build_year)
else:
build_year = None
f.write(
'Year of construction: ' + str(build_year) + '\n')
if build.mod_year is not None:
mod_year = int(build.mod_year)
else:
mod_year = None
f.write('Last year of modernization: ' + str(mod_year) + '\n')
f.write('Building type number: ' + str(build.build_type) + '\n')
build_name = citgen.conv_build_type_nb_to_name(build.build_type)
f.write('Building type explanation: ' + str(build_name) + '\n')
# Write building data to file
f.write('Nb. of zones/apartments: ' + str(
len(build.apartments)) + '\n')
f.write('Usable PV roof area in m2: ' +
str(build.roof_usabl_pv_area) + '\n')
f.write('Net floor area (NFA) in m2: ' +
str(build.net_floor_area) + '\n')
f.write('Ground area in m2: ' + str(build.ground_area) + '\n')
f.write('Height of single floor in m: ' +
str(build.height_of_floors) + '\n')
f.write('Number of floors: ' + str(build.nb_of_floors) + '\n')
ann_th_sh_demand = build.get_annual_space_heat_demand()
ann_el_demand = build.get_annual_el_demand()
ann_dhw_demand = build.get_annual_dhw_demand()
f.write('Annual net space heating energy demand in kWh/a: '
+ str(int(ann_th_sh_demand)) + '\n')
f.write('Annual electric energy demand in kWh/a: '
+ str(int(ann_el_demand)) + '\n')
f.write('Annual net hot water energy demand in kWh/a: '
+ str(int(ann_dhw_demand)) + '\n')
f.write('\n')
if 'osm_id' in city.nodes[id]:
f.write(
'openstreetmap id: ' + str(city.nodes[id]['osm_id']) + '\n')
if 'name' in city.nodes[id]:
f.write('OSM name: ' + str(city.nodes[id]['name']) + '\n')
if 'addr_street' in city.nodes[id]:
f.write('Street: ' + str(city.nodes[id]['addr_street']) + '\n')
if 'addr_housenumber' in city.nodes[id]:
f.write('Street nb.: ' +
str(city.nodes[id]['addr_housenumber']) + '\n')
if 'comment' in city.nodes[id]:
f.write('OSM comment: ' +
str(city.nodes[id]['comment']) + '\n')
# print(vars(build))
f.close()
def extract_build_profiles(city, id, file_path, do_plot=False,
use_german=False, save_tikz=False,
save_as_xlsx=True):
"""
Extract and save building profiles to file
Parameters
----------
city : object
City object
id : int
Building node id
file_path : str
Path to save file to (e.g. ...\building_data.txt)
do_plot : bool, optional
Defines, if profiles should be plotted (default: False)
use_german : bool, optional
Defines, if English or German language should be used
(default: False). If False, uses English language.
save_tikz : bool, optional
Define, if figure should be saved as tikz (default: False)
save_as_xlsx : bool, optional
Define, if load curves should also be saved as xlsx files
(default: True)
"""
# Building pointer
build = city.nodes[id]['entity']
# Get power curves
sh_profile = build.get_space_heating_power_curve()
el_profile = build.get_electric_power_curve()
dhw_profile = build.get_dhw_power_curve()
# Generate time array
timestep = city.environment.timer.timeDiscretization
year_in_seconds = 365 * 24 * 3600
time_array = np.arange(0, year_in_seconds, timestep)
# Stack results together
res_array = np.vstack((time_array, sh_profile))
res_array = np.vstack((res_array, el_profile))
res_array = np.vstack((res_array, dhw_profile))
# Transpose array
res_array = np.transpose(res_array)
# Define header
if use_german:
# Define header
header = u'Zeit in Sekunden\tThermische Leistung Raumwärme in Watt\t' \
u'Elektrische Leistung in Watt' \
u'\tLeistung Warmwasser in Watt'
else:
header = 'Time in seconds\tNet space heating power in Watt\t' \
'Electric power in Watt\tNet hot water power in Watt'
# Save numpy array to txt
np.savetxt(fname=file_path, X=res_array, delimiter='\t', header=header)
if save_as_xlsx:
# Get workbook
wb = openpyxl.Workbook()
# Get worksheet
ws = wb.active
if use_german:
ws['A1'].value = 'Zeit in Sekunden'
ws['B1'].value = u'Thermische Leistung Raumwärme in Watt'
ws['C1'].value = u'Elektrische Leistung in Watt'
ws['D1'].value = u'Leistung Warmwasser in Watt'
xlsx_filename = str(id) + '_Lastgang.xlsx'
else:
ws['A1'].value = 'Time in seconds'
ws['B1'].value = 'Net space heating power in Watt'
ws['C1'].value = 'Electric power in Watt'
ws['D1'].value = 'Net hot water power in Watt'
xlsx_filename = str(id) + '_profiles.xlsx'
# Loop over columns
for j in range(len(res_array[0])):
# Loop over rows
for i in range(len(res_array)):
ws.cell(row=i + 2, column=j + 1, value=res_array[i][j])
workbook_path = os.path.join(os.path.dirname(file_path),
xlsx_filename)
wb.save(workbook_path)
if do_plot:
try:
import ebc_ues_plot.line_plots as uesline
except:
msg = 'Cannot import ebc_ues_plot / simple_plot package.' \
'Thus, cannot perform plotting in EBC style!'
raise AssertionError(msg)
# Generate time array
nb_timesteps = 365 * 24 * 3600 / timestep
time_array = np.arange(0, nb_timesteps, timestep / 3600)
plotdata = uesline.PlottingData()
plotdata.add_data_entry(time_array, sh_profile / 1000)
plotdata.add_data_entry(time_array, el_profile / 1000)
plotdata.add_data_entry(time_array, dhw_profile / 1000)
# Perform plotting
if use_german:
output_path = os.path.join(os.path.dirname(file_path),
'Lastgaenge')
else:
output_path = os.path.join(os.path.dirname(file_path),
'power_curves_graphics')
uesline.plot_multi_language_multi_color(plot_data=plotdata,
plot_sub=True,
output_path=output_path,
output_filename=str(id),
show_plot=False,
use_tight=True,
title_engl=None,
xlab_engl='Time in hours',
ylab_engl='Power in kW',
list_labels_engl=[
'Space heating\npower in kW',
'Electric\npower in kW',
'Hot water\npower in kW'],
title_dt=None,
xlab_dt='Zeit in Stunden',
ylab_dt='Leistung in kW',
list_labels_dt=[
'Heizleistung\nin kW',
'Elektrische\nLeistung in kW',
'Warmwasser-\nleistung in kW'],
fontsize=12,
fig_adjust='a4',
legend_pos_within=True,
put_leg='below', dpi=500,
# linewidth=1,
set_zero_point=True,
set_x_limits=True,
xmin=0, xmax=8760,
set_y_limits=False,
# ymin=ymin, ymax=ymax,
use_grid=False,
# input_path=input_path,
save_tikz=save_tikz,
# rotate_x_labels=rotate_x_labels,
copy_py=True,
copy_input=False,
save_data_array=True,
use_font='arial')
def extract_city_base_data(city, out_file_path, do_plot=False,
use_german=False, save_tikz=False,
save_as_xlsx=True):
"""
Extract and save basic city data
Parameters
----------
city : object
City object of pyCity_calc
out_file_path : str
Path to save data to
do_plot : bool, optional
Defines, if profiles should be plotted (default: False)
use_german : bool, optional
Defines, if English or German language should be used
(default: False). If False, uses English language.
save_tikz : bool, optional
Define, if figure should be saved as tikz (default: False)
save_as_xlsx : bool, optional
Define, if load curves should also be saved as xlsx files
(default: True)
"""
# Extract basic city data to path (.txt)
if use_german:
with open(out_file_path, mode='w') as f:
f.write('Anzahl Knoten: ' + str(len(city.nodes())) + '\n')
f.write(u'(Z.b. Gebäude, Straßen etc.)\n')
nb_build_entities = city.get_nb_of_building_entities()
f.write(u'Anzahl Gebäude: ' + str(nb_build_entities) + '\n')
list_ent = city.get_list_build_entity_node_ids()
f.write(u'Liste mit Gebäude-IDs: ' + str(list_ent) + '\n')
location = city.environment.location
f.write(
u'Längen-/Breitengrad der Stadt: ' + (str(location)) + '\n')
altitude = city.environment.weather.altitude
f.write(u'Höhe über NN: ' + str(altitude) + '\n')
nb_occ = city.get_nb_occupants()
f.write('Anzahl Bewohner: ' + str(nb_occ) + '\n')
ann_th_sh_demand = city.get_annual_space_heating_demand()
ann_el_demand = city.get_annual_el_demand()
ann_dhw_demand = city.get_annual_dhw_demand()
f.write(u'Jährlicher Nutzenergiebedarf für Raumwärme in kWh/a: '
+ str(int(ann_th_sh_demand)) + '\n')
f.write(u'Jährlicher, elektrischer Energiebedarf (ohne Warmwasser)'
' in kWh/a: '
+ str(int(ann_el_demand)) + '\n')
f.write(u'Jährlicher Nutzenergiebedarf Warmwasser in kWh/a: '
+ str(int(ann_dhw_demand)) + '\n')
f.write('\n')
f.close()
if do_plot:
# Plot energy demands as bar plots
try:
import ebc_ues_plot.bar_plots as uesbar
except:
msg = 'Could not import ebc_ues_plot module.'
raise AssertionError(msg)
dataset = np.array([[ann_th_sh_demand], [ann_el_demand],
[ann_dhw_demand]])
output_path = os.path.join(os.path.dirname(out_file_path),
'Stadt_Saulendiagramm_Energie')
f_name = 'Stadt_Saulendiagramm_Energie'
uesbar.plot_multi_language_multi_color_bar(dataset=dataset,
output_path=output_path,
output_filename=f_name,
show_plot=False,
use_tight=True,
title_engl=None,
xlab_engl=None,
ylab_engl='Energy demands in kWh/a',
list_labels_engl=[
'Space heating',
'Electric energy',
'Hot water energy'],
title_dt=None,
xlab_dt=None,
ylab_dt=u'Energiebedarf in kWh/a',
list_labels_dt=[
u'Raumwärme',
u'Elektr. Energie',
u'Warmwasser'],
fontsize=16,
fig_adjust=None,
dpi=300,
copy_py=True,
copy_input=False,
input_path=None,
save_data_array=True,
save_tikz=save_tikz,
list_labels_leg_engl=None,
list_labels_leg_dt=None,
use_autolabel=False,
bar_width=0.7,
set_ylimit=False,
ymin=None,
ymax=None,
rotate_x_labels=False,
use_font='arial',
legend_pos='inside')
else:
with open(out_file_path, mode='w') as f:
f.write('Number of nodes: ' + str(len(city.nodes())) + '\n')
nb_build_entities = city.get_nb_of_building_entities()
f.write('Number of buildings: ' + str(nb_build_entities) + '\n')
list_ent = city.get_list_build_entity_node_ids()
f.write('List of building ids: ' + str(list_ent) + '\n')
location = city.environment.location
f.write('Location (lat/long): ' + (str(location)) + '\n')
altitude = city.environment.weather.altitude
f.write('Altitude in m above NN: ' + str(altitude) + '\n')
nb_occ = city.get_nb_occupants()
f.write('Total number of occupants: ' + str(nb_occ) + '\n')
ann_th_sh_demand = city.get_annual_space_heating_demand()
ann_el_demand = city.get_annual_el_demand()
ann_dhw_demand = city.get_annual_dhw_demand()
f.write('Annual net space heating energy demand in kWh/a: '
+ str(int(ann_th_sh_demand)) + '\n')
f.write('Annual electric energy demand in kWh/a: '
+ str(int(ann_el_demand)) + '\n')
f.write('Annual net hot water energy demand in kWh/a: '
+ str(int(ann_dhw_demand)) + '\n')
f.write('\n')
f.close()
if do_plot:
# Plot energy demands as bar plots
try:
import ebc_ues_plot.bar_plots as uesbar
except:
msg = 'Could not import ebc_ues_plot module.'
raise AssertionError(msg)
dataset = np.array([[ann_th_sh_demand], [ann_el_demand],
[ann_dhw_demand]])
output_path = os.path.join(os.path.dirname(out_file_path),
'city_energy_bars')
f_name = 'city_bar_plot'
uesbar.plot_multi_language_multi_color_bar(dataset=dataset,
output_path=output_path,
output_filename=f_name,
show_plot=False,
use_tight=True,
title_engl=None,
xlab_engl=None,
ylab_engl='Energy demands in kWh/a',
list_labels_engl=[
'Space heating',
'Electric energy',
'Hot water energy'],
title_dt=None,
xlab_dt=None,
ylab_dt='Energiebedarf in kWh/a',
list_labels_dt=[
u'Raumwärme',
'Elektr. Energie',
'Warmwasser'],
fontsize=16,
fig_adjust=None,
dpi=300,
copy_py=True,
copy_input=False,
input_path=None,
save_data_array=True,
save_tikz=save_tikz,
list_labels_leg_engl=None,
list_labels_leg_dt=None,
use_autolabel=False,
bar_width=0.7,
set_ylimit=False,
ymin=None,
ymax=None,
rotate_x_labels=False,
use_font='arial',
legend_pos='inside')
def extract_city_profiles(city, city_path, do_plot, use_german=False,
save_tikz=False, save_as_xlsx=True):
"""
Parameters
----------
city : object
City object of pyCity_calc
city_path : str
Path to folder, where profiles should be saved
do_plot : bool, optional
Defines, if profiles should be plotted (default: False)
use_german : bool, optional
Defines, if English or German language should be used
(default: False). If False, uses English language
save_tikz : bool, optional
Define, if figure should be saved as tikz (default: False)
save_as_xlsx : bool, optional
Define, if load curves should also be saved as xlsx files
(default: True)
"""
# Get power curves
sh_profile = city.get_aggr_space_h_power_curve()
el_profile = city.get_aggr_el_power_curve()
dhw_profile = city.get_aggr_dhw_power_curve()
# Generate time array
timestep = city.environment.timer.timeDiscretization
year_in_seconds = 365 * 24 * 3600
time_array = np.arange(0, year_in_seconds, timestep)
# Stack results together
res_array = np.vstack((time_array, sh_profile))
res_array = np.vstack((res_array, el_profile))
res_array = np.vstack((res_array, dhw_profile))
# Transpose array
res_array = np.transpose(res_array)
if use_german:
# Define header
header = u'Zeit in Sekunden\tThermische Leistung Raumwärme in Watt\t' \
u'Elektrische Leistung in Watt' \
u'\tLeistung Warmwasser in Watt'
data_f_name = 'Stadt_Profile.txt'
else:
# Define header
header = 'Time in seconds\tNet space heating power in Watt\t' \
'Electric power in Watt\tNet hot water power in Watt'
data_f_name = 'city_profiles.txt'
data_f_path = os.path.join(city_path, data_f_name)
# Save numpy array to txt
np.savetxt(fname=data_f_path, X=res_array, delimiter='\t', header=header)
if save_as_xlsx:
# Get workbook
wb = openpyxl.Workbook()
# Get worksheet
ws = wb.active
if use_german:
ws['A1'].value = 'Zeit in Sekunden'
ws['B1'].value = u'Thermische Leistung Raumwärme in Watt'
ws['C1'].value = u'Elektrische Leistung in Watt'
ws['D1'].value = u'Leistung Warmwasser in Watt'
xlsx_filename = 'Stadt_Profile.xlsx'
else:
ws['A1'].value = 'Time in seconds'
ws['B1'].value = 'Net space heating power in Watt'
ws['C1'].value = 'Electric power in Watt'
ws['D1'].value = 'Net hot water power in Watt'
xlsx_filename = 'city_profiles.xlsx'
# Loop over columns
for j in range(len(res_array[0])):
# Loop over rows
for i in range(len(res_array)):
ws.cell(row=i + 2, column=j + 1, value=res_array[i][j])
workbook_path = os.path.join(city_path, xlsx_filename)
wb.save(workbook_path)
if do_plot:
# Plot city profiles to path
try:
import ebc_ues_plot.line_plots as uesline
except:
msg = 'Cannot import ebc_ues_plot / simple_plot package.' \
'Thus, cannot perform plotting in EBC style!'
raise AssertionError(msg)
# Generate time array
nb_timesteps = 365 * 24 * 3600 / timestep
time_array = np.arange(0, nb_timesteps, timestep / 3600)
plotdata = uesline.PlottingData()
plotdata.add_data_entry(time_array, sh_profile / 1000)
plotdata.add_data_entry(time_array, el_profile / 1000)
plotdata.add_data_entry(time_array, dhw_profile / 1000)
# Perform plotting
if use_german:
output_path = os.path.join(city_path, 'Stadt_Lastgaenge')
output_filename = 'Stadt_Lastgaenge'
else:
output_path = os.path.join(city_path, 'city_power_curves')
output_filename = 'city_power_curves'
uesline.plot_multi_language_multi_color(plot_data=plotdata,
plot_sub=True,
output_path=output_path,
output_filename=output_filename,
show_plot=False,
use_tight=True,
title_engl=None,
xlab_engl='Time in hours',
ylab_engl='Power in kW',
list_labels_engl=[
'Space heating\npower in kW',
'Electric\npower in kW',
'Hot water\npower in kW'],
title_dt=None,
xlab_dt='Zeit in Stunden',
ylab_dt='Leistung in kW',
list_labels_dt=[
u'Heizleistung\nin kW',
u'Elektrische\nLeistung in kW',
u'Warmwasser-\nleistung in kW'],
fontsize=12,
fig_adjust='a4',
legend_pos_within=True,
put_leg='below', dpi=500,
# linewidth=1,
set_zero_point=True,
set_x_limits=True,
xmin=0, xmax=8760,
set_y_limits=False,
# ymin=ymin, ymax=ymax,
use_grid=False,
# input_path=input_path,
save_tikz=save_tikz,
# rotate_x_labels=rotate_x_labels,
copy_py=True,
copy_input=False,
save_data_array=True,
use_font='arial')
def extract_city_data(city, out_path, do_plot=False, use_german=False,
save_tikz=False, save_as_xlsx=True):
"""
Extract and save city data to file.
Parameters
----------
city : object
City object of pyCity_calc
out_path : str
Path to save city data to
do_plot: bool, optional
Defines, if load profiles should be plotted
use_german : bool, optional
Defines, if English or German language should be used
(default: False). If False, uses English language.
save_tikz : bool, optional
Define, if figure should be saved as tikz (default: False)
save_as_xlsx : bool, optional
Define, if load curves should also be saved as xlsx files
(default: True)
"""
if use_german:
city_path = os.path.join(out_path, 'Stadt')
gen_path_if_not_existent(city_path)
city_out = 'Stadt_Daten.txt'
data_file = os.path.join(city_path, city_out)
else:
city_path = os.path.join(out_path, 'city')
gen_path_if_not_existent(city_path)
city_out = 'city_data.txt'
data_file = os.path.join(city_path, city_out)
# Extract city base data
extract_city_base_data(city=city, out_file_path=data_file, do_plot=do_plot,
use_german=use_german, save_tikz=save_tikz)
# Extract data into single file
if use_german:
save_path = os.path.join(city_path, 'Stadt_Gebaeudedaten.txt')
x_label = 'X-Koordinate in m'
y_label = 'Y-Koordinate in m'
else:
save_path = os.path.join(city_path, 'city_data_buildings.txt')
x_label = 'x-coordinate in m'
y_label = 'y-coordinate in m'
savcit.save_city_data_to_file(city=city, save_path=save_path,
use_german=use_german,
save_as_xlsx=save_as_xlsx)
# Generate plot with ids and save it to out_path
citvis.plot_city_district(city=city,
city_list=None,
plot_buildings=True,
plot_street=True,
plot_lhn=False, plot_deg=False,
plot_esys=False,
offset=7,
plot_build_labels=True, plot_str_labels=False,
plot_heat_labels=False,
equal_axis=False, font_size=16, plt_title=None,
x_label=x_label,
y_label=y_label,
show_plot=False,
fig_adjust=None,
plot_elec_labels=False, save_plot=True,
save_path=city_path, dpi=300, plot_color=True,
plot_engl=not use_german,
auto_close=True, plot_str_dist=150,
node_size=50)
# Extract and save city profiles
extract_city_profiles(city=city, city_path=city_path, do_plot=do_plot,
use_german=use_german, save_tikz=save_tikz,
save_as_xlsx=save_as_xlsx)
def extract_city_n_build_data(city, out_path, use_german=False,
save_tikz=False, save_as_xlsx=True):
"""
Parameters
----------
city : object
City object of pyCity_calc
out_path : str
Path to save profiles to
use_german : bool, optional
Defines, if English or German language should be used
(default: False). If False, uses English language.
save_tikz : bool, optional
Define, if figure should be saved as tikz (default: False)
save_as_xlsx : bool, optional
Define, if load curves should also be saved as xlsx files
(default: True)
"""
# Get all building nodes
list_ids = city.get_list_build_entity_node_ids()
# Extract city data
extract_city_data(city=city, out_path=out_path, do_plot=True,
use_german=use_german, save_tikz=save_tikz,
save_as_xlsx=save_as_xlsx)
# Extract building data
for n in list_ids:
# Generate folder with node id name
if use_german:
curr_path = os.path.join(out_path, 'Gebaeude', str(n))
else:
curr_path = os.path.join(out_path, 'buildings', str(n))
gen_path_if_not_existent(curr_path)
# Open txt file and add
if use_german:
data_f_name = str(n) + '_Daten.txt'
else:
data_f_name = str(n) + '_data.txt'
data_f_path = os.path.join(curr_path, data_f_name)
# Extract building base data and save them to file
extract_build_base_data(city=city, id=n, file_path=data_f_path,
use_german=use_german)
# Open txt file and add
if use_german:
data_f_name = str(n) + '_Profile.txt'
else:
data_f_name = str(n) + '_profiles.txt'
data_f_path = os.path.join(curr_path, data_f_name)
extract_build_profiles(city=city, id=n, file_path=data_f_path,
do_plot=True, use_german=use_german,
save_tikz=save_tikz, save_as_xlsx=save_as_xlsx)
if __name__ == '__main__':
this_path = os.path.dirname(os.path.abspath(__file__))
city_f_name = 'city_3_buildings_mixed.pkl'
input_path = os.path.join(this_path, 'input', city_f_name)
out_name = city_f_name[:-4]
out_path = os.path.join(this_path, 'output', 'extracted', out_name)
use_german = False
save_tikz = True
save_as_xlsx = True
# Make out_path, if not existent
gen_path_if_not_existent(out_path)
city = pickle.load(open(input_path, mode='rb'))
if use_german == True and save_tikz == True:
msg = 'Choose use_german=True. Thus, save_tikz is set to False,' \
' due to possible utf-8 errors.'
warnings.warn(msg)
save_tikz = False
extract_city_n_build_data(city=city, out_path=out_path,
use_german=use_german, save_tikz=save_tikz,
save_as_xlsx=save_as_xlsx)
| [
"jschiefelbein@eonerc.rwth-aachen.de"
] | jschiefelbein@eonerc.rwth-aachen.de |
a1526b01102784964eeb982e8698dfbe4e2c1e4c | 63e0bfa7fb4ecf0b6d4f8fd740be0316cd82ea00 | /Graphs/DFSgraph.py | dd8c66f76b63d4d6e3f7eda3c8b8e87506da5a67 | [] | no_license | shaheershantk/Problem-Solving-with-Algorithms-Data-Structure | 7ceb025c8af97dd81890d3baebc69a82d8196801 | ce8b4ba1240fed3109a767984e370ce7a7eb630b | refs/heads/master | 2016-09-06T00:45:50.861117 | 2014-12-01T09:00:16 | 2014-12-01T09:00:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | from graph import Graph
class DFSGraph(Graph):
def __init__(self):
super().__init__()
self.time = 0
def dfs(self):
for aVertex in self:
aVertex.setColor('white')
aVertex.setPred(-1)
for aVertex in self:
if aVertex.getColor() == 'white':
self.dfsvisit(aVertex)
def dfsvisit(self,startVertex):
startVertex.setColor('gray')
self.time += 1
startVertex.setDiscovery(self.time)
for nextVertex in startVertex.getConnections():
if nextVertex.getColor() == 'white':
nextVertex.setPred(startVertex)
self.dfsvisit(nextVertex)
startVertex.setColor('black')
self.time += 1
startVertex.setFinish(self.time)
| [
"shaheer.shan@gmail.com"
] | shaheer.shan@gmail.com |
ff606cd3f830e14dcd8513ca07d6193f66176520 | 04afb34356de112445c3e5733fd2b773d92372ef | /Sem1/FP/Exam/board.py | ac01e707bf43b664662a7821bfd3124d34aeec33 | [] | no_license | AndreeaCimpean/Uni | a4e48e5e1dcecbc0c28ad45ddd3b0989ff7985c8 | 27df09339e4f8141be3c22ae93c4c063ffd2b172 | refs/heads/master | 2020-08-21T19:12:49.840044 | 2020-05-15T17:22:50 | 2020-05-15T17:22:50 | 216,222,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,139 | py | from texttable import Texttable
import random
class Board:
def __init__(self):
self._data = [0] * 64
self.place_stars()
self.place_endeavour()
self.place_cruisers(3)
def get_cell(self, i, j):
return self._data[((ord(i) - ord('A')) * 8 + j - 1)]
def set_cell(self, i, j, value):
self._data[((ord(i) - ord('A')) * 8 + j - 1)] = value
def __str__(self):
d = {0: " ", 1: "*", -2:"E", 2:"B"}
t = Texttable()
row = ['0', '1', '2', '3', '4', '5', '6', '7', '8']
t.add_row(row)
for i in range(0, 64, 8):
row = []
row.append(chr(ord("A") + i//8))
row += self._data[i:i+8]
for j in range(1, 9):
row[j] = d[row[j]]
t.add_row(row)
return t.draw()
def empty_neighbours(self, i, j):
'''
Check if all neighbours of a cell are empty
:param i: the row of the cell
:param j: the column of the cell
:return:
True if all neighbours are empty
False otherwise
'''
neighbours = self.get_neighbours(i, j)
for neighbour in neighbours:
if self.get_cell(neighbour[0], neighbour[1]) != 0:
return False
return True
def get_neighbours(self, i, j):
neighbours = []
directionsi = [0, 0, 1, -1, 1, 1, -1, -1]
directionsj = [1, -1, 0, 0, 1, -1, -1, 1]
for index in range(0, 8):
# print(index)
neighbour = chr(ord(i) + directionsi[index]), j + directionsj[index]
if neighbour[0] >= 'A' and neighbour[0] <= 'H' and neighbour[1] >= 1 and neighbour[1] <= 8:
neighbours.append(neighbour)
return neighbours
def place_stars(self):
'''
Place random 10 stars on the board, so that there is no 2 adjacent stars(row, column, diagonal)
:return:
None, but place the start
'''
count = 0
while count != 10:
while True:
x = random.randint(0, 7)
x = chr(ord('A') + x)
y = random.randint(1, 8)
if self.get_cell(x, y) != 0:
continue
else:
break
if self.empty_neighbours(x, y) == False:
continue
else:
self.set_cell(x, y, 1)
count += 1
def place_endeavour(self):
while True:
x = random.randint(0, 7)
x = chr(ord('A') + x)
y = random.randint(1, 8)
if self.get_cell(x, y) != 0:
continue
else:
break
self.set_cell(x, y, -2)
def place_cruisers(self, number):
count = 0
while count != number:
while True:
x = random.randint(0, 7)
x = chr(ord('A') + x)
y = random.randint(1, 8)
if self.get_cell(x, y) != 0:
continue
else:
break
self.set_cell(x, y, 2)
count += 1
def is_won(self):
count_cruisers = 0
for i in range(0, 8):
for j in range(1, 9):
if self.get_cell(chr(ord("A") + i), j) == 2:
count_cruisers += 1
if count_cruisers == 0:
return True
return False
def is_lost(self):
for i in range(0, 8):
for j in range(1, 9):
if self.get_cell(chr(ord("A") + i), j) == -2:
return False
return True
def find_number_of_ships(self):
count_cruisers = 0
for i in range(0, 8):
for j in range(1, 9):
if self.get_cell(chr(ord("A") + i), j) == 2:
count_cruisers += 1
return count_cruisers
def find_endeavour(self):
for i in range(0, 8):
for j in range(1, 9):
if self.get_cell(chr(ord("A") + i), j) == -2:
return chr(ord("A") + i), j
| [
"andreeacimpean.910@gmail.com"
] | andreeacimpean.910@gmail.com |
3f00da3a7e49680da3280abdc9e62595162564a2 | ffad717edc7ab2c25d5397d46e3fcd3975ec845f | /Python/pyesri 2/ANSWERS/countwords.py | 2f99c6fb13bb5934869c3497fc0145931cb580f8 | [] | no_license | shaunakv1/esri-developer-conference-2015-training | 2f74caea97aa6333aa38fb29183e12a802bd8f90 | 68b0a19aac0f9755202ef4354ad629ebd8fde6ba | refs/heads/master | 2021-01-01T20:35:48.543254 | 2015-03-09T22:13:14 | 2015-03-09T22:13:14 | 31,855,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | #!/usr/bin/python
import sys
if len(sys.argv) < 3:
print "Syntax: countwords.py PATTERN FILE ..."
sys.exit()
pattern = sys.argv[1]
for fname in sys.argv[2:]:
count = 0
with open(fname) as f:
for line in f:
if pattern in line:
count += 1
print '''"{0}" occurred on {1} lines in {2}'''.format(pattern,count,fname)
| [
"shaunakv1@gmail.com"
] | shaunakv1@gmail.com |
475f9818e2bccdeb3c2faec0b8c438eb5c4c96f4 | e0f1b0f8d8771e0852c9d5a118c8a1d5bac274ba | /Keras/5_Deep_Learning_for_Computer_Vision/5.2.py | d69ed0c7c87d8a147caa42dd7731c667bea73655 | [] | no_license | rapsealk/TIL | cdec9a67c510ba0cc33f5f11cdace0ffb4f847e1 | b1a78201fef37cc6d28f8acda41645cd7db4ef6f | refs/heads/master | 2022-11-26T17:11:36.375136 | 2020-10-21T04:35:01 | 2020-10-21T04:35:01 | 122,223,741 | 3 | 0 | null | 2022-11-10T14:58:46 | 2018-02-20T16:26:15 | Java | UTF-8 | Python | false | false | 6,457 | py | #!/usr/bin/env python3
"""
Datasets can be downloaded @ https://www.kaggle.com/c/dogs-vs-cats/data
"""
"""
5.2.2
"""
import os, shutil
original_dataset_dir = './datasets/cats_and_dogs/train'
base_dir = './datasets/cats_and_dogs_small'
os.mkdir(base_dir)
train_dir = os.path.join(base_dir, 'train')
os.mkdir(train_dir)
validation_dir = os.path.join(base_dir, 'validation')
os.mkdir(validation_dir)
test_dir = os.path.join(base_dir, 'test')
os.mkdir(test_dir)
train_cats_dir = os.path.join(train_dir, 'cats')
os.mkdir(train_cats_dir)
train_dogs_dir = os.path.join(train_dir, 'dogs')
os.mkdir(train_dogs_dir)
test_cats_dir = os.path.join(test_dir, 'cats')
os.mkdir(test_cats_dir)
test_dogs_dir = os.path.join(test_dir, 'dogs')
os.mkdir(test_dogs_dir)
fnames = ['cat.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_cats_dir, fname)
shutil.copyfile(src, dst)
fnames = ['cat.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_dir, fname)
shutil.copyfile(src, dst)
fnames = ['cat.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_cats_dir, fname)
shutil.copyfile(src, dst)
fnames = ['dog.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_dogs_dir, fname)
shutil.copyfile(src, dst)
fnames = ['dog.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_dir, fname)
shutil.copyfile(src, dst)
fnames = ['dog.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_dogs_dir, fname)
shutil.copyfile(src, dst)
print('훈련용 고양이 이미지 전체 개수:', len(os.listdir(train_cats_dir)))
print('훈련용 강아지 이미지 전체 개수:', len(os.listdir(train_dogs_dir)))
print('검증용 고양이 이미지 전체 개수:', len(os.listdir(validation_cats_dir)))
print('검증용 강아지 이미지 전체 개수:', len(os.listdir(validation_dogs_dir)))
print('테스트용 고양이 이미지 전체 개수:', len(os.listdir(test_cats_dir)))
print('테스트용 강아지 이미지 전체 개수:', len(os.listdir(test_dogs_dir)))
"""
5.2.3
"""
from keras import layers
from keras import models
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5)) # 코드 5-13: 드롭아웃을 포함한 새로운 컨브넷 정의하기
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.summary()
from keras import optimizers
model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc'])
"""
5.2.4
"""
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(train_dir, target_size=(150, 150), batch_size=20, class_mode='binary')
validation_generator = test_datagen.flow_from_directory(validation_dir, target_size=(150, 150), batch_size=20, class_mode='binary')
for data_batch, labels_batch in train_generator:
print('배치 데이터 크기:', data_batch.shape)
print('배치 레이블 크기:', labels_batch.shape)
break
hist = model.fit_generator(train_generator, steps_per_epoch=100, epochs=30, validation_data=validation_generator, validation_steps=50)
model.save('cats_and_dogs_small_1.h5')
"""
코드 5-10: 훈련의 정확도와 손실 그래프 그리기
"""
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc)+1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
"""
코드 5-11: ImageDataGenerator를 사용하여 데이터 증식 설정하기
"""
datagen = ImageDataGenerator(rotation_range=20, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.1, zoom_range=0.1,
horizontal_flip=True, fill_mode='nearest')
"""
코드 5-12: 랜덤하게 증식된 훈련 이미지 그리기
"""
from keras.preprocessing import image
fnames = sorted([os.path.join(train_cats_dir, fname) for fname in os.listdir(train_cats_dir)])
img_path = fnames[3]
img = image.load_img(img_path, target_size=(150, 150))
x = image.img_to_array(img) # (150, 150, 3) numpy array
x = x.reshape((1,) + x.shape) # (1, 150, 150, 3) numpy array
i = 0
for batch in datagen.flow(x, batch_size=1):
plt.figure(i)
imgplot = plt.imshow(img.array_to_img(batch[0]))
i += 1
if i % 4 == 0:
break
plt.show()
"""
코드 5-14: 데이터 증식 제너레이터를 사용하여 컨브넷 훈련하기
"""
train_datagen = ImageDataGenerator(rescale=1./255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(train_dir, target_size=(150, 150), batch_size=32, class_mode='binary')
validation_generator = test_datagen.flow_from_directory(validation_dir, target_size=(150, 150), batch_size=32, class_mode='binary')
history = model.fit_generator(train_generator, steps_per_epoch=100, epochs=100, validation_data=validation_generator, validation_steps=50)
model.save('cats_and_dogs_small_2.h5') | [
"piono623@naver.com"
] | piono623@naver.com |
7a37d75648f80d161d5fcf360df2b3744e76dd14 | 52877e2b60ed675eb16ea66c7398127294a313d3 | /t2t_bert/distributed_single_sentence_classification/train_eval_multilabel_sess_fn.py | 26adc4d0a7a96ee2fcb7494645c5b20da11457d7 | [
"Apache-2.0"
] | permissive | yyht/BERT | 0dc82ea8e141cad4774e638dd7d44f781d77b6c3 | 480c909e0835a455606e829310ff949c9dd23549 | refs/heads/master | 2023-04-07T03:32:28.123608 | 2021-02-17T02:15:58 | 2021-02-17T02:15:58 | 162,232,730 | 37 | 12 | Apache-2.0 | 2022-11-21T21:15:04 | 2018-12-18T05:02:27 | Python | UTF-8 | Python | false | false | 13,499 | py | # -*- coding: utf-8 -*-
import tensorflow as tf
from optimizer import distributed_optimizer as optimizer
from data_generator import distributed_tf_data_utils as tf_data_utils
# try:
# from .bert_model_fn import model_fn_builder
# from .bert_model_fn import rule_model_fn_builder
# except:
# from bert_model_fn import model_fn_builder
# from bert_model_fn import rule_model_fn_builder
try:
# from .model_fn import model_fn_builder
from .model_interface import model_config_parser
from .model_data_interface import data_interface
from .model_fn_interface import model_fn_interface
# from .model_distillation_fn import model_fn_builder as model_distillation_fn
except:
# from model_fn import model_fn_builder
from model_interface import model_config_parser
from model_data_interface import data_interface
# from model_distillation_fn import model_fn_builder as model_distillation_fn
from model_fn_interface import model_fn_interface
import numpy as np
import tensorflow as tf
from bunch import Bunch
from model_io import model_io
import json, os
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report
from sklearn.metrics import label_ranking_average_precision_score
try:
import paisoar as pai
except Exception as e:
pai = None
try:
import horovod.tensorflow as hvd
except Exception as e:
hvd = None
try:
import _pickle as pkl
except Exception as e:
pkl = None
import time
def train_eval_fn(FLAGS,
worker_count,
task_index,
is_chief,
target,
init_checkpoint,
train_file,
dev_file,
checkpoint_dir,
is_debug,
**kargs):
graph = tf.Graph()
with graph.as_default():
import json
# config = json.load(open(FLAGS.config_file, "r"))
# config = Bunch(config)
# config.use_one_hot_embeddings = True
# config.scope = "bert"
# config.dropout_prob = 0.1
# config.label_type = "single_label"
# config.model = FLAGS.model_type
config = model_config_parser(FLAGS)
# print(config, "==model config==")
if FLAGS.if_shard == "0":
train_size = FLAGS.train_size
epoch = int(FLAGS.epoch / worker_count)
elif FLAGS.if_shard == "1":
train_size = int(FLAGS.train_size/worker_count)
epoch = FLAGS.epoch
else:
train_size = int(FLAGS.train_size/worker_count)
epoch = FLAGS.epoch
init_lr = config.init_lr
label_dict = json.load(tf.gfile.Open(FLAGS.label_id))
num_train_steps = int(
train_size / FLAGS.batch_size * epoch)
num_warmup_steps = int(num_train_steps * 0.1)
num_storage_steps = int(train_size / FLAGS.batch_size)
num_eval_steps = int(FLAGS.eval_size / FLAGS.batch_size)
if is_debug == "0":
num_storage_steps = 190
num_eval_steps = 100
num_train_steps = 200
print("num_train_steps {}, num_eval_steps {}, num_storage_steps {}".format(num_train_steps, num_eval_steps, num_storage_steps))
print(" model type {}".format(FLAGS.model_type))
print(num_train_steps, num_warmup_steps, "=============")
opt_config = Bunch({"init_lr":init_lr/worker_count,
"num_train_steps":num_train_steps,
"num_warmup_steps":num_warmup_steps,
"worker_count":worker_count,
"opt_type":FLAGS.opt_type,
"is_chief":is_chief,
"train_op":kargs.get("train_op", "adam"),
"decay":kargs.get("decay", "no"),
"warmup":kargs.get("warmup", "no"),
"grad_clip":config.get("grad_clip", "global_norm"),
"clip_norm":config.get("clip_norm", 1.0)})
anneal_config = Bunch({
"initial_value":1.0,
"num_train_steps":num_train_steps
})
model_io_config = Bunch({"fix_lm":False})
num_classes = FLAGS.num_classes
if FLAGS.opt_type == "hvd" and hvd:
checkpoint_dir = checkpoint_dir if task_index == 0 else None
else:
checkpoint_dir = checkpoint_dir
print("==checkpoint_dir==", checkpoint_dir, is_chief)
# if kargs.get("rule_model", "rule"):
# model_fn_interface = rule_model_fn_builder
# print("==apply rule model==")
# else:
# model_fn_interface = model_fn_builder
# print("==apply normal model==")
model_fn_builder = model_fn_interface(FLAGS)
model_train_fn = model_fn_builder(config, num_classes, init_checkpoint,
model_reuse=None,
load_pretrained=FLAGS.load_pretrained,
opt_config=opt_config,
model_io_config=model_io_config,
exclude_scope="",
not_storage_params=[],
target=kargs.get("input_target", ""),
output_type="sess",
checkpoint_dir=checkpoint_dir,
num_storage_steps=num_storage_steps,
task_index=task_index,
anneal_config=anneal_config,
**kargs)
model_eval_fn = model_fn_builder(config, num_classes, init_checkpoint,
model_reuse=True,
load_pretrained=FLAGS.load_pretrained,
opt_config=opt_config,
model_io_config=model_io_config,
exclude_scope="",
not_storage_params=[],
target=kargs.get("input_target", ""),
output_type="sess",
checkpoint_dir=checkpoint_dir,
num_storage_steps=num_storage_steps,
task_index=task_index,
anneal_config=anneal_config,
**kargs)
print("==succeeded in building model==")
def eval_metric_fn(features, eval_op_dict):
logits = eval_op_dict["logits"]
print(logits.get_shape(), "===logits shape===")
prob = tf.nn.sigmoid(logits)
return {"loss":eval_op_dict["loss"],
"prob":prob, "label_ids":features["label_ids"]}
def train_metric_fn(features, train_op_dict):
logits = train_op_dict["logits"]
print(logits.get_shape(), "===logits shape===")
pred_label = tf.argmax(logits, axis=-1, output_type=tf.int32)
prob = tf.nn.sigmoid(logits)
return train_op_dict
name_to_features = data_interface(FLAGS)
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example.
"""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def _decode_batch_record(record, name_to_features):
example = tf.parse_example(record, name_to_features)
# for name in list(example.keys()):
# t = example[name]
# if t.dtype == tf.int64:
# t = tf.to_int32(t)
# example[name] = t
return example
params = Bunch({})
params.epoch = epoch
params.batch_size = FLAGS.batch_size
print("==train_file==", train_file, params)
if kargs.get("parse_type", "parse_single") == "parse_single":
train_features = tf_data_utils.train_input_fn(train_file,
_decode_record, name_to_features, params, if_shard=FLAGS.if_shard,
worker_count=worker_count,
task_index=task_index)
eval_features = tf_data_utils.eval_input_fn(dev_file,
_decode_record, name_to_features, params, if_shard=FLAGS.if_shard,
worker_count=worker_count,
task_index=task_index)
elif kargs.get("parse_type", "parse_single") == "parse_batch":
train_features = tf_data_utils.train_batch_input_fn(train_file,
_decode_batch_record, name_to_features, params, if_shard=FLAGS.if_shard,
worker_count=worker_count,
task_index=task_index)
eval_features = tf_data_utils.eval_batch_input_fn(dev_file,
_decode_batch_record, name_to_features, params, if_shard=FLAGS.if_shard,
worker_count=worker_count,
task_index=task_index)
train_op_dict = model_train_fn(train_features, [], tf.estimator.ModeKeys.TRAIN)
eval_op_dict = model_eval_fn(eval_features, [], tf.estimator.ModeKeys.EVAL)
eval_dict = eval_metric_fn(eval_features, eval_op_dict["eval"])
train_dict = train_metric_fn(train_features, train_op_dict["train"])
print("==succeeded in building data and model==")
print(train_op_dict)
def eval_fn(eval_dict, sess):
i = 0
total_accuracy = 0
eval_total_dict = {}
while True:
try:
eval_result = sess.run(eval_dict)
for key in eval_result:
if key not in eval_total_dict:
if key in ["prob", "label_ids"]:
eval_total_dict[key] = []
eval_total_dict[key].append(eval_result[key].tolist())
if key in ["scores", "loss"]:
eval_total_dict[key] = 0.0
eval_total_dict[key] += eval_result[key]
else:
if key in ["prob", "label_ids"]:
eval_total_dict[key].append(eval_result[key].tolist())
if key in ["scores", "loss"]:
eval_total_dict[key] += eval_result[key]
i += 1
if np.mod(i, num_eval_steps) == 0:
break
except tf.errors.OutOfRangeError:
print("End of dataset")
break
label_id = eval_total_dict["label_ids"]
prob = eval_total_dict["prob"]
label_dict_id = sorted(list(label_dict["id2label"].keys()))
scores = label_ranking_average_precision_score(
label_id,
prob)
print("==ranking scores==", scores)
eval_total_dict['label_ranking_scores'] = scores
return eval_total_dict
def train_fn(train_op_dict, sess):
i = 0
cnt = 0
loss_dict = {}
monitoring_train = []
monitoring_eval = []
while True:
try:
[train_result] = sess.run([train_op_dict])
for key in train_result:
if key == "train_op":
continue
else:
try:
if np.isnan(train_result[key]):
print(train_loss, "get nan loss")
break
else:
if key in loss_dict:
loss_dict[key] += train_result[key]
else:
loss_dict[key] = train_result[key]
except:
# if key == "student_logit":
# print(train_result[key])
continue
# print(pkl, "==pkl==")
# if pkl:
# pkl.dump(train_result, open("/data/xuht/distillation.pkl", "wb"))
i += 1
cnt += 1
if np.mod(i, num_storage_steps) == 0:
string = ""
for key in loss_dict:
tmp = key + " " + str(loss_dict[key]/cnt) + "\t"
string += tmp
print(string)
monitoring_train.append(loss_dict)
eval_finial_dict = eval_fn(eval_dict, sess)
monitoring_eval.append(eval_finial_dict)
for key in loss_dict:
loss_dict[key] = 0.0
cnt = 0
if is_debug == "0":
if i == num_train_steps:
break
except tf.errors.OutOfRangeError:
print("==Succeeded in training model==")
break
return {"eval":monitoring_eval,
"train":monitoring_train}
print("===========begin to train============")
# sess_config = tf.ConfigProto(allow_soft_placement=False,
# log_device_placement=False)
# # sess_config.gpu_options.visible_device_list = str(task_index)
# print(sess_config.gpu_options.visible_device_list, task_index, "==============")
print("start training")
hooks = []
hooks.extend(train_op_dict["hooks"])
if FLAGS.opt_type == "ps" or FLAGS.opt_type == "ps_sync":
sess_config = tf.ConfigProto(allow_soft_placement=False,
log_device_placement=False)
print("==create monitored training session==", FLAGS.opt_type, is_chief)
sess = tf.train.MonitoredTrainingSession(master=target,
is_chief=is_chief,
config=kargs.get("sess_config",sess_config),
hooks=hooks,
checkpoint_dir=checkpoint_dir,
save_checkpoint_steps=num_storage_steps)
elif FLAGS.opt_type == "pai_soar" and pai:
sess_config = tf.ConfigProto(allow_soft_placement=False,
log_device_placement=False)
sess = tf.train.MonitoredTrainingSession(master=target,
is_chief=is_chief,
config=kargs.get("sess_config",sess_config),
hooks=hooks,
checkpoint_dir=checkpoint_dir,
save_checkpoint_steps=num_storage_steps)
elif FLAGS.opt_type == "hvd" and hvd:
sess_config = tf.ConfigProto(allow_soft_placement=False,
log_device_placement=False)
sess_config.gpu_options.allow_growth = False
sess_config.gpu_options.visible_device_list = str(hvd.local_rank())
sess = tf.train.MonitoredTrainingSession(checkpoint_dir=checkpoint_dir,
hooks=hooks,
config=sess_config,
save_checkpoint_steps=num_storage_steps)
else:
print("==single sess==")
sess_config = tf.ConfigProto(allow_soft_placement=False,
log_device_placement=False)
sess = tf.train.MonitoredTrainingSession(config=sess_config,
hooks=hooks,
checkpoint_dir=checkpoint_dir,
save_checkpoint_steps=num_storage_steps)
print("==begin to train and eval==")
# step = sess.run(tf.train.get_global_step())
# print(step, task_index, "==task_index, global_step==")
monitoring_info = train_fn(train_dict, sess)
# for i in range(10):
# l = sess.run(train_features)
# print(l, task_index)
if task_index == 0:
start_time = time.time()
print("===========begin to eval============")
eval_finial_dict = eval_fn(eval_dict, sess)
end_time = time.time()
print("==total forward time==", end_time - start_time)
# with tf.gfile.Open(os.path.join(checkpoint_dir, "train_and_eval_info.json"), "w") as fwobj:
# import json
# fwobj.write(json.dumps({"final_eval":eval_finial_dict,
# "train_and_eval":monitoring_info}))
| [
"albert.xht@alibaba-inc.com"
] | albert.xht@alibaba-inc.com |
4ecdb76b8e0319bfa6d4f6b7dc0e061f3c6dc767 | e2345e19d448c4fa36af58a6fc908698086137f4 | /woodwork/__init__.py | 3c97e9a8ad0cc661bc81bca8183f7354516c0add | [
"MIT"
] | permissive | westurner/woodwork | df727eb30ea7c07399664b44f0df3afc626db371 | 2475f9cbc220fc57828f880014e9e2a00f547c84 | refs/heads/develop | 2023-01-09T07:29:48.452416 | 2019-07-20T12:20:40 | 2019-07-20T12:20:40 | 191,066,025 | 0 | 0 | MIT | 2022-12-26T20:47:39 | 2019-06-09T23:32:34 | Makefile | UTF-8 | Python | false | false | 141 | py | # -*- coding: utf-8 -*-
"""Top-level package for woodwork."""
__author__ = """Wes Turner"""
__email__ = 'wes@wrd.nu'
__version__ = '0.1.0'
| [
"wes@wrd.nu"
] | wes@wrd.nu |
aebb1eddc6fcf0731267d7340d71f4c83cedd761 | bf902add6952d7f7decdb2296bb136eea55bf441 | /YOLO/.history/pytorch-yolo-v3/video_demo_20201105155405.py | 15b378f813c2de470669d0aedcd9618634ee4675 | [
"MIT"
] | permissive | jphacks/D_2003 | c78fb2b4d05739dbd60eb9224845eb78579afa6f | 60a5684d549862e85bdf758069518702d9925a48 | refs/heads/master | 2023-01-08T16:17:54.977088 | 2020-11-07T06:41:33 | 2020-11-07T06:41:33 | 304,576,949 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 15,214 | py | from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import cv2
from util import *
from darknet import Darknet
from preprocess import prep_image, inp_to_image
import pandas as pd
import random
import argparse
import pickle as pkl
import requests
from requests.auth import HTTPDigestAuth
import io
from PIL import Image, ImageDraw, ImageFilter
import play
#from pygame import mixer
#import winsound
camera_name = {
"north":0,
"south":2,
"east":1,
"west":3,
}
def prep_image(img, inp_dim):
# CNNに通すために画像を加工する
orig_im = img
dim = orig_im.shape[1], orig_im.shape[0]
img = cv2.resize(orig_im, (inp_dim, inp_dim))
img_ = img[:,:,::-1].transpose((2,0,1)).copy()
img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)
return img_, orig_im, dim
def count(x, img, count):
# 画像に結果を描画
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
cls = int(x[-1])
label = "{0}".format(classes[cls])
print("label:\n", label)
# 人数カウント
if(label=='no-mask'):
count+=1
print(count)
return count
def write(x, img,camId):
global count
global point
p = [0,0]
# 画像に結果を描画
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
cls = int(x[-1])
# print(camId, "_c1:",c1)
# print(camId, "_c2:",c2)
label = "{0}".format(classes[cls])
print("label:", label)
# 人数カウント
if(label=='no-mask'):
count+=1
print(count)
p[0] = (c2[0]-c1[0])/2
p[1] = (c2[1]-c1[1])/2
point[camId].append(p)
print("point0",point[0])
print("point1",point[1])
color = random.choice(colors)
cv2.rectangle(img, c1, c2,color, 1)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]
c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
cv2.rectangle(img, c1, c2,color, -1)
cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);
return img
def arg_parse():
# モジュールの引数を作成
parser = argparse.ArgumentParser(description='YOLO v3 Cam Demo') # ArgumentParserで引数を設定する
parser.add_argument("--confidence", dest = "confidence", help = "Object Confidence to filter predictions", default = 0.25)
# confidenceは信頼性
parser.add_argument("--nms_thresh", dest = "nms_thresh", help = "NMS Threshhold", default = 0.4)
# nms_threshは閾値
parser.add_argument("--reso", dest = 'reso', help =
"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed",
default = "160", type = str)
# resoはCNNの入力解像度で、増加させると精度が上がるが、速度が低下する。
return parser.parse_args() # 引数を解析し、返す
def cvpaste(img, imgback, x, y, angle, scale):
# x and y are the distance from the center of the background image
r = img.shape[0]
c = img.shape[1]
rb = imgback.shape[0]
cb = imgback.shape[1]
hrb=round(rb/2)
hcb=round(cb/2)
hr=round(r/2)
hc=round(c/2)
# Copy the forward image and move to the center of the background image
imgrot = np.zeros((rb,cb,3),np.uint8)
imgrot[hrb-hr:hrb+hr,hcb-hc:hcb+hc,:] = img[:hr*2,:hc*2,:]
# Rotation and scaling
M = cv2.getRotationMatrix2D((hcb,hrb),angle,scale)
imgrot = cv2.warpAffine(imgrot,M,(cb,rb))
# Translation
M = np.float32([[1,0,x],[0,1,y]])
imgrot = cv2.warpAffine(imgrot,M,(cb,rb))
# Makeing mask
imggray = cv2.cvtColor(imgrot,cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(imggray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
# Now black-out the area of the forward image in the background image
img1_bg = cv2.bitwise_and(imgback,imgback,mask = mask_inv)
# Take only region of the forward image.
img2_fg = cv2.bitwise_and(imgrot,imgrot,mask = mask)
# Paste the forward image on the background image
imgpaste = cv2.add(img1_bg,img2_fg)
return imgpaste
def hconcat_resize_min(im_list, interpolation=cv2.INTER_CUBIC):
h_min = min(im.shape[0] for im in im_list)
im_list_resize = [cv2.resize(im, (int(im.shape[1] * h_min / im.shape[0]), h_min), interpolation=interpolation)
for im in im_list]
return cv2.hconcat(im_list_resize)
# def beep(freq, dur=100):
# winsound.Beep(freq, dur)
if __name__ == '__main__':
#学習前YOLO
# cfgfile = "cfg/yolov3.cfg" # 設定ファイル
# weightsfile = "weight/yolov3.weights" # 重みファイル
# classes = load_classes('data/coco.names') # 識別クラスのリスト
#マスク学習後YOLO
cfgfile = "cfg/mask.cfg" # 設定ファイル
weightsfile = "weight/mask_1500.weights" # 重みファイル
classes = load_classes('data/mask.names') # 識別クラスのリスト
num_classes = 80 # クラスの数
args = arg_parse() # 引数を取得
confidence = float(args.confidence) # 信頼性の設定値を取得
nms_thesh = float(args.nms_thresh) # 閾値を取得
start = 0
CUDA = torch.cuda.is_available() # CUDAが使用可能かどうか
max = 0 #限界人数
num_camera = 2 #camera数
num_classes = 80 # クラスの数
bbox_attrs = 5 + num_classes
model = [[] for i in range(num_camera)]
inp_dim = [[] for i in range(num_camera)]
cap = [[] for i in range(num_camera)]
ret = [[] for i in range(num_camera)]
frame = [[] for i in range(num_camera)]
img = [[] for i in range(num_camera)]
orig_im = [[] for i in range(num_camera)]
dim = [[] for i in range(num_camera)]
output0 = []
output1 = []
output2 = []
output3 = []
point = [[] for i in range(num_camera)]
# output = [[] for i in range(num_camera)]
# output = torch.tensor(output)
# print("output_shape\n", output.shape)
for i in range(num_camera):
model[i] = Darknet(cfgfile) #model1の作成
model[i].load_weights(weightsfile) # model1に重みを読み込む
model[i].net_info["height"] = args.reso
inp_dim[i] = int(model[i].net_info["height"])
assert inp_dim[i] % 32 == 0
assert inp_dim[i] > 32
#mixer.init() #初期化
if CUDA:
for i in range(num_camera):
model[i].cuda() #CUDAが使用可能であればcudaを起動
for i in range(num_camera):
model[i].eval()
cap[0] = cv2.VideoCapture(2) #カメラを指定(USB接続)
cap[1] = cv2.VideoCapture(1) #カメラを指定(USB接続)
# cap = cv2.VideoCapture("movies/sample.mp4")
#cap = cv2.VideoCapture("movies/one_v2.avi")
# Use the next line if your camera has a username and password
# cap = cv2.VideoCapture('protocol://username:password@IP:port/1')
#cap = cv2.VideoCapture('rtsp://admin:admin@192.168.11.4/1') #(ネットワーク接続)
#cap = cv2.VideoCapture('rtsp://admin:admin@192.168.11.4/80')
#cap = cv2.VideoCapture('http://admin:admin@192.168.11.4:80/video')
#cap = cv2.VideoCapture('http://admin:admin@192.168.11.4/camera-cgi/admin/recorder.cgi?action=start&id=samba')
#cap = cv2.VideoCapture('http://admin:admin@192.168.11.4/recorder.cgi?action=start&id=samba')
#cap = cv2.VideoCapture('http://admin:admin@192.168.11.5:80/snapshot.jpg?user=admin&pwd=admin&strm=0')
print('-1')
#カメラの起動を確認
for i in range(num_camera):
if not cap[i].isOpened():
if i < num_camera - 1:
for j in range(len(num_camera - i) - 1):
cap[i + j] = cap[i + j + 1]
cap.pop()
num_camera -= 1
#assert cap.isOpened(), 'Cannot capture source' #カメラが起動できたか確認
img1 = cv2.imread("images/phase_1.jpg")
img2 = cv2.imread("images/phase_2.jpg")
img3 = cv2.imread("images/phase_2_red.jpg")
img4 = cv2.imread("images/phase_3.jpg")
#mixer.music.load("voice/voice_3.m4a")
#print(img1)
frames = 0
count_frame = 0 #フレーム数カウント
flag = 0 #密状態(0:疎密,1:密入り)
start = time.time()
print('-1')
while (cap[i].isOpened() for i in range(num_camera)): #カメラが起動している間
count=0 #人数をカウント
point = [[] for i in range(num_camera)]
for i in range(num_camera):
ret[i], frame[i] = cap[i].read() #キャプチャ画像を取得
if (ret[i] for i in range(num_camera)):
# 解析準備としてキャプチャ画像を加工
for i in range(num_camera):
img[i], orig_im[i], dim[i] = prep_image(frame[i], inp_dim[i])
if CUDA:
for i in range(num_camera):
im_dim[i] = im_dim[i].cuda()
img[i] = img[i].cuda()
# for i in range(num_camera):
# output[i] = model[i](Variable(img[i]), CUDA)
output0 = model[0](Variable(img[0]), CUDA)
output1 = model[1](Variable(img[1]), CUDA)
# output2 = model[2](Variable(img[2]), CUDA)
# output3 = model[3](Variable(img[3]), CUDA)
#print("output:\n", output)
# output[i] = write_results(output[i], confidence, num_classes, nms = True, nms_conf = nms_thesh)
output0 = write_results(output0, confidence, num_classes, nms = True, nms_conf = nms_thesh)
output1 = write_results(output1, confidence, num_classes, nms = True, nms_conf = nms_thesh)
# output2 = write_results(output2, confidence, num_classes, nms = True, nms_conf = nms_thesh)
# output3 = write_results(output3, confidence, num_classes, nms = True, nms_conf = nms_thesh)
# print("output", i, ":\n", output[i])
# print(output.shape)
"""
# FPSの表示
if (type(output[i]) == int for i in range(num_camera)):
print("表示")
frames += 1
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
# qキーを押すとFPS表示の終了
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
continue
for i in range(num_camera):
output[i][:,1:5] = torch.clamp(output[i][:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]
output[i][:,[1,3]] *= frame[i].shape[1]
output[i][:,[2,4]] *= frame[i].shape[0]
"""
# # FPSの表示
# if type(output0) == int:
# print("表示")
# frames += 1
# print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
# # qキーを押すとFPS表示の終了
# key = cv2.waitKey(1)
# if key & 0xFF == ord('q'):
# break
# continue
# for i in range(num_camera):
output0[:,1:5] = torch.clamp(output0[:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]
output0[:,[1,3]] *= frame[0].shape[1]
output0[:,[2,4]] *= frame[0].shape[0]
output1[:,1:5] = torch.clamp(output1[:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]
output1[:,[1,3]] *= frame[1].shape[1]
output1[:,[2,4]] *= frame[1].shape[0]
# output2[:,1:5] = torch.clamp(output2[:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]
# output2[:,[1,3]] *= frame[i].shape[1]
# output2[:,[2,4]] *= frame[i].shape[0]
# output3[:,1:5] = torch.clamp(output3[:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]
# output3[:,[1,3]] *= frame[i].shape[1]
# output3[:,[2,4]] *= frame[i].shape[0]
colors = pkl.load(open("pallete", "rb"))
#count = lambda x: count(x, orig_im, count) #人数をカウント
"""
for i in range(num_camera):
list(map(lambda x: write(x, orig_im[i]), output[i]))
print("count:\n",count)
"""
# for i in range(num_camera):
# list(map(lambda x: write(x, orig_im[i]), output))
list(map(lambda x0: write(x0, orig_im[0],0), output0))
list(map(lambda x1: write(x1, orig_im[1],1), output1))
# print("x0",x0)
# list(map(lambda x2: write(x2, orig_im[2],2), output2))
# list(map(lambda x3: write(x3, orig_im[3],3), output3))
# print("point0",point[0])
# print("point1",point[1])
print("count:\n",count)
print("count_frame", count_frame)
if count > max:
count_frame += 1
#print("-1")
if count_frame <= 50:
x=0
y=0
angle=20
scale=1.5
for i in range(num_camera):
imgpaste = cvpaste(img1, orig_im[i], x, y, angle, scale)
if flag == 1:
# play.googlehome()
flag += 1
#mixer.music.play(1)
elif count_frame <= 100:
x=-30
y=10
angle=20
scale=1.1
if count_frame%2==1:
for i in range(num_camera):
imgpaste = cvpaste(img2, orig_im[i], x, y, angle, scale)
else:
for i in range(num_camera):
imgpaste = cvpaste(img3, orig_im[i], x, y, angle, scale)
if flag == 2:
# play.googlehome()
flag += 1
else:
x=-30
y=0
angle=20
scale=1.5
for i in range(num_camera):
imgpaste = cvpaste(img4, orig_im[i], x, y, angle, scale)
if count_frame > 101: #<--2フレームずらす
print("\007") #警告音
time.sleep(3)
if flag == 3:
# play.googlehome()
flag += 1
# cv2.imshow("frame", imgpaste)
else:
count_frame = 0
flag = 0
#print("-2")
# for i in range(num_camera):
im_h_resize = hconcat_resize_min(orig_im)
cv2.imshow("frame", im_h_resize )
# play.googlehome()
key = cv2.waitKey(1)
# qキーを押すと動画表示の終了
if key & 0xFF == ord('q'):
break
frames += 1
print("count_frame:\n", count_frame)
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
else:
break
| [
"73480314+ryo-jpg@users.noreply.github.com"
] | 73480314+ryo-jpg@users.noreply.github.com |
3b042aa37d0a4f05387f289756ac8cbe1d169d5c | 003349d700f7d762f2cc3124717e332d0091be1a | /www/src/Lib/asyncio/coroutines.py | 58f1db3f88c528324bed50fd77177ba810c60fb5 | [
"BSD-3-Clause"
] | permissive | Rocia/brython | bffce20d736f67b58587f503ad8b503232823fbb | 4c29ad017d0b91971d195f31f6a0e18f68e28c55 | refs/heads/master | 2021-01-15T13:18:54.149409 | 2017-08-06T09:33:15 | 2017-08-06T09:33:15 | 99,669,037 | 1 | 0 | null | 2017-08-08T08:23:59 | 2017-08-08T08:23:58 | null | UTF-8 | Python | false | false | 6,467 | py | __all__ = ['coroutine',
'iscoroutinefunction', 'iscoroutine']
import functools
import inspect
import opcode
import os
import sys
import traceback
import types
from . import events
from . import futures
from .log import logger
# Opcode of "yield from" instruction
_YIELD_FROM = opcode.opmap['YIELD_FROM']
# If you set _DEBUG to true, @coroutine will wrap the resulting
# generator objects in a CoroWrapper instance (defined below). That
# instance will log a message when the generator is never iterated
# over, which may happen when you forget to use "yield from" with a
# coroutine call. Note that the value of the _DEBUG flag is taken
# when the decorator is used, so to be of any use it must be set
# before you define your coroutines. A downside of using this feature
# is that tracebacks show entries for the CoroWrapper.__next__ method
# when _DEBUG is true.
_DEBUG = (not sys.flags.ignore_environment
and bool(os.environ.get('PYTHONASYNCIODEBUG')))
# Check for CPython issue #21209
def has_yield_from_bug():
class MyGen:
def __init__(self):
self.send_args = None
def __iter__(self):
return self
def __next__(self):
return 42
def send(self, *what):
self.send_args = what
return None
def yield_from_gen(gen):
yield from gen
value = (1, 2, 3)
gen = MyGen()
coro = yield_from_gen(gen)
next(coro)
coro.send(value)
return gen.send_args != (value,)
_YIELD_FROM_BUG = has_yield_from_bug()
del has_yield_from_bug
class CoroWrapper:
# Wrapper for coroutine object in _DEBUG mode.
def __init__(self, gen, func):
assert inspect.isgenerator(gen), gen
self.gen = gen
self.func = func
self._source_traceback = traceback.extract_stack(sys._getframe(1))
# __name__, __qualname__, __doc__ attributes are set by the coroutine()
# decorator
def __repr__(self):
coro_repr = _format_coroutine(self)
if self._source_traceback:
frame = self._source_traceback[-1]
coro_repr += ', created at %s:%s' % (frame[0], frame[1])
return '<%s %s>' % (self.__class__.__name__, coro_repr)
def __iter__(self):
return self
def __next__(self):
return next(self.gen)
if _YIELD_FROM_BUG:
# For for CPython issue #21209: using "yield from" and a custom
# generator, generator.send(tuple) unpacks the tuple instead of passing
# the tuple unchanged. Check if the caller is a generator using "yield
# from" to decide if the parameter should be unpacked or not.
def send(self, *value):
frame = sys._getframe()
caller = frame.f_back
assert caller.f_lasti >= 0
if caller.f_code.co_code[caller.f_lasti] != _YIELD_FROM:
value = value[0]
return self.gen.send(value)
else:
def send(self, value):
return self.gen.send(value)
def throw(self, exc):
return self.gen.throw(exc)
def close(self):
return self.gen.close()
@property
def gi_frame(self):
return None
#return self.gen.gi_frame
@property
def gi_running(self):
return self.gen.gi_running
@property
def gi_code(self):
return self.gen.__code__
def __del__(self):
# Be careful accessing self.gen.frame -- self.gen might not exist.
gen = getattr(self, 'gen', None)
frame = getattr(gen, 'gi_frame', None)
if frame is not None and frame.f_lasti == -1:
msg = '%r was never yielded from' % self
tb = getattr(self, '_source_traceback', ())
if tb:
tb = ''.join(traceback.format_list(tb))
msg += ('\nCoroutine object created at '
'(most recent call last):\n')
msg += tb.rstrip()
logger.error(msg)
def coroutine(func):
"""Decorator to mark coroutines.
If the coroutine is not yielded from before it is destroyed,
an error message is logged.
"""
if inspect.isgeneratorfunction(func):
coro = func
else:
@functools.wraps(func)
def coro(*args, **kw):
res = func(*args, **kw)
if isinstance(res, futures.Future) or inspect.isgenerator(res):
res = yield from res
res.gi_frame = None
return res
if not _DEBUG:
wrapper = coro
else:
@functools.wraps(func)
def wrapper(*args, **kwds):
w = CoroWrapper(coro(*args, **kwds), func)
if w._source_traceback:
del w._source_traceback[-1]
w.__name__ = func.__name__
if hasattr(func, '__qualname__'):
w.__qualname__ = func.__qualname__
w.__doc__ = func.__doc__
return w
wrapper.gi_frame = None
wrapper._is_coroutine = True # For iscoroutinefunction().
return wrapper
def iscoroutinefunction(func):
"""Return True if func is a decorated coroutine function."""
return getattr(func, '_is_coroutine', False)
_COROUTINE_TYPES = (types.GeneratorType, CoroWrapper)
def iscoroutine(obj):
"""Return True if obj is a coroutine object."""
return isinstance(obj, _COROUTINE_TYPES)
def _format_coroutine(coro):
try:
assert iscoroutine(coro)
coro_name = getattr(coro, '__qualname__', coro.__name__)
filename = coro.__code__.co_filename
if (isinstance(coro, CoroWrapper)
and not inspect.isgeneratorfunction(coro.func)):
filename, lineno = events._get_function_source(coro.func)
if coro.gi_frame is None:
coro_repr = ('%s() done, defined at %s:%s'
% (coro_name, filename, lineno))
else:
coro_repr = ('%s() running, defined at %s:%s'
% (coro_name, filename, lineno))
elif coro.gi_frame is not None:
lineno = coro.gi_frame.f_lineno
coro_repr = ('%s() running at %s:%s'
% (coro_name, filename, lineno))
else:
lineno = coro.__code__.co_firstlineno
coro_repr = ('%s() done, defined at %s:%s'
% (coro_name, filename, lineno))
except:
coro_repr = "Coroutine: %s" % coro_name
return coro_repr
| [
"jonathan.verner@matfyz.cz"
] | jonathan.verner@matfyz.cz |
16fc7bcb55d17f616c53e65cc1ae9dafcc3968f6 | 215e3c24d9bf55c5951cdbab08d045663003331a | /Lib/hTools2/dialogs/font/__init__.py | 74e3e5f5c6b36643c9ecc94222334f4bf4af2a8f | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | hipertipo/hTools2 | 8ac14ee37d6ed78a5ce906e65befa889798cc53d | a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c | refs/heads/master | 2022-07-10T20:37:13.869044 | 2018-11-21T10:42:44 | 2018-11-21T10:42:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | # dialogs.font
'''Dialogs to do things to the current font.'''
from element_set import setElementDialog
from groups_print import printGroupsDialog
from glyphs_rename import batchRenameGlyphs
from info_copy import copyFontInfoDialog
from info_print import clearFontInfoDialog
from layer_delete import deleteLayerDialog
from layer_import import importUFOIntoLayerDialog
from spaces_create import createSpaceGlyphsDialog
from vmetrics_adjust import adjustVerticalMetrics
from vmetrics_transfer import transferVMetricsDialog
__all__ = [
'adjustVerticalMetrics',
'copyFontInfoDialog',
'createSpaceGlyphsDialog',
'deleteLayerDialog',
'importUFOIntoLayerDialog',
'printGroupsDialog',
'clearFontInfoDialog',
'batchRenameGlyphs',
'setElementDialog',
'transferVMetricsDialog',
]
| [
"gustavo@hipertipo.com"
] | gustavo@hipertipo.com |
b35deb44547fcef0a2d8632596c3f8f056a079ff | 55f945f29f78c0c0c6ac110df808126a38999be5 | /devel/lib/python2.7/dist-packages/mav_planning_msgs/msg/_Point2D.py | ad30cc3b60d081967f1ea88872955374f0dadcab | [] | no_license | aarchilla/NodeROS | 43e9f0d6931d1eb11057d229e20e2911fba943c2 | 4d79e3ffbbb19c11535613249fed2191ada63000 | refs/heads/master | 2020-06-16T20:00:39.218889 | 2019-07-07T18:36:17 | 2019-07-07T18:36:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | /home/esaii-admin/catkin_ws/devel/.private/mav_planning_msgs/lib/python2.7/dist-packages/mav_planning_msgs/msg/_Point2D.py | [
"aarchilla21@gmail.com"
] | aarchilla21@gmail.com |
e60dcbd49809656ea4dc38d9856068c52d115ebc | 75224b9a071a7e231c87cb984e1ac81d873a0165 | /finalsweek/game/program_api/game_deck_api.py | 97726ef723a21f6ee0a7388e4d0b29ad0ee1aa0c | [] | no_license | tckerr/finalsweek | 94fe740f9f1db100071d3d5b04d57d8aa48f9695 | dea12866919e5b37643e46d42d797d672dd83182 | refs/heads/master | 2021-01-23T03:52:52.777566 | 2017-05-20T00:58:20 | 2017-05-20T00:58:20 | 86,127,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,372 | py | from game.definitions import OperationType, OperatorType
from game.operation.decorators import accepts_operation, accepts_operator
from game.scripting.api.program_child_api import ProgramChildApi
from trace.definitions import LogLevel, LogType
from trace.logger import Logger
# TODO: split action card and dismissal card APIs
class GameDeckApi(ProgramChildApi):
@accepts_operation(OperationType.Draw)
@accepts_operator(OperatorType.Add)
def draw_action_cards(self, operation):
operation = self._mutate(operation)
actor = self.program_api.actors.get(operation.targeted_actor_id)
action_card_deck = self.data.action_card_deck
self._assert_deck_size(action_card_deck, operation.value)
drawn = [self._draw_action_card(action_card_deck, actor) for _ in range(0, operation.value)]
self.program_api.increment_metadata("drawn_action_cards", len(drawn))
return drawn
def set_discipline_card_for_phase(self, phase):
discipline_card = self.data.discipline_card_deck.cards.pop()
self.data.phase_discipline_cards[phase.id] = discipline_card
self._log_discipline_card_draw(discipline_card, phase.phase_type)
return discipline_card
def get_discipline_card_for_phase(self, phase_id):
return self.data.phase_discipline_cards[phase_id]
def _draw_action_card(self, action_card_deck, actor):
card = action_card_deck.cards.pop()
actor.action_card_hand.cards.append(card)
self._log_action_card_draw(card)
return card
@staticmethod
def _assert_deck_size(action_card_deck, quantity):
deck_length = len(action_card_deck.cards)
if deck_length < quantity:
message = "Cannot draw {quantity} cards from a pile of size {pile_size}."
raise Exception(message.format(quantity=quantity, pile_size=deck_length))
@staticmethod
def _log_action_card_draw(card):
message = "Drew action card '{}', pc: {}".format(card.template.name, card.id)
Logger.log(message, level=LogLevel.Info, log_type=LogType.GameLogic)
@staticmethod
def _log_discipline_card_draw(discipline_card, phase_type):
message = "Drew dismissal card '{}' for phase '{}'".format(discipline_card.template.name, phase_type)
Logger.log(message, level=LogLevel.Info, log_type=LogType.GameLogic)
| [
"tckerr@gmail.com"
] | tckerr@gmail.com |
d58981503e1312e14b20bf7ce3a549340b34779d | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/142/usersdata/231/62183/submittedfiles/av2_p3_civil.py | c48a48605ffd795c38ee98f06a74012f88f0d143 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | # -*- coding: utf-8 -*-
def media(a):
soma = 0
for i in range(0,len(a),1):
soma = soma + a[i]
media = soma/len(a)
return (media)
#ESCREVA AS DEMAIS FUNÇÕES
def soma1(g):
media=media(a)
cont=0
for i in range(0,len(g),1):
cont=cont+(g[i]-media(a))
return cont
def entradaLista(n):
a = []
for i in range(0,n,1):
valor = float(input('Digite um valor: '))
a.append(valor)
return (a)
n = int(input('Digite o tamanho da lista: '))
x = entradaLista(n)
y = entradaLista(n)
p = #CALCULE O VALOR DE P
p = abs(p)
print('%.4f' % p)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
33872138ea3dceeb7ec6c48bc1b0b17ef7f988e6 | 91e0036d8e976b09c9a9e3281f33559c4b163412 | /Api/admin.py | 39cdfb2cdc1835fbf3a59932392c2fba1d9315fa | [] | no_license | sirajmuneer123/LibraryApp | b1b77f817ec909899715b484788eb6f9b2e1a853 | f41543ece1f513edc85508cd45b4b68c1e4b3cbc | refs/heads/master | 2020-09-09T00:14:32.235343 | 2019-11-15T13:50:18 | 2019-11-15T13:50:18 | 221,285,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | from django.apps import apps
from django.contrib import admin
from django.contrib.admin.sites import AlreadyRegistered
app_models = apps.get_app_config('Api').get_models()
for model in app_models:
try:
admin.site.register(model)
except AlreadyRegistered:
pass
| [
"sirajmuneer4@gmail.com"
] | sirajmuneer4@gmail.com |
c30df6cdff0df25e70d3951f98834cff940e8c4f | b333dc607a2f1556f6a8adb6d16dc88fa8a30c8b | /portal/apps/epubparser/views.py | a2dac12c6e6f988b3cc1a028f3cb4ed50e01b92c | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hernan0216/utopia-cms | 6558f8f600620c042dd79c7d2edf18fb77caebb8 | 48b48ef9acf8e3d0eb7d52601a122a01da82075c | refs/heads/main | 2023-02-06T10:31:35.525180 | 2020-12-15T17:43:28 | 2020-12-15T17:43:28 | 321,775,279 | 1 | 0 | BSD-3-Clause | 2020-12-15T19:59:17 | 2020-12-15T19:59:16 | null | UTF-8 | Python | false | false | 8,191 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.shortcuts import redirect
from django.template import RequestContext
import os, re
import ebooklib
from ebooklib import epub, utils
from bs4 import BeautifulSoup, Tag
from django.views.generic import ListView, FormView, View, DetailView
from django.core.urlresolvers import reverse_lazy, reverse
from django.contrib import messages
from models import EpubFile
from forms import UploadEpubForm, EpubChangeSectionForm
import traceback
from lxml import etree
from lxml.etree import tostring
from core.models import Article, Section
class FileAddView(FormView):
form_class = UploadEpubForm
success_url = reverse_lazy('epub-home')
template_name = "epubparser/templates/add_epub_parser.html"
def form_valid(self, form):
epub_section = form.cleaned_data['section']
epub_file = form.cleaned_data['f']
if str(epub_file).endswith('.epub'):
form.save(commit=True)
messages.success(self.request, 'Epub ingresado correctamente', fail_silently=True)
else:
messages.error(self.request, 'El archivo no es un EPUB')
return super(FileAddView, self).form_valid(form)
class FileListView(ListView):
model = EpubFile
queryset = EpubFile.objects.order_by('-id')
context_object_name = "files"
template_name = "epubparser/templates/index_epub_parser.html"
paginate_by = 5
class ParseView(DetailView):
model = EpubFile
context_object_name = "files"
template_name = "epubparser/templates/index_epub_parser.html"
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(ParseView, self).get_context_data(**kwargs)
epub_file = context['files'].f
file = EpubFile.objects.get(f=epub_file)
epub_section = file.section
try:
book = epub.read_epub(epub_file)
for item in book.get_items():
# - 9 es el codigo de tipo de archivo xhtml
if (item.get_type() is 9):
content = item.get_body_content()
print content
#reemplazo los estilos con classes css del xhtml del epub
content = replace_style(content,
'<span class="char-style-override-1">',
'</span>', '<span>', '</span> ')
content = replace_style(content,
'<span class="char-style-override-3">',
'</span>', '_', '_')
content = replace_style(content,
'<span class="Muy-destacado char-style-override-3">',
'</span>', '', '')
content = replace_style(content,
'<span class="Muy-destacado char-style-override-4">',
'</span>', '', '')
content = replace_style(content,
'<p class="Subt-tulo">',
'</p>', '<p class="Normal">\nS>', '</p>')
content = replace_style(content,
'<p class="Primer para-style-override-1">',
'</p>', '<p class="Primer">', '</p>')
content = replace_style(content,
'<span>', '</span>', ' ', ' ')
soup = BeautifulSoup(content, 'html.parser')
#cada subcuadro contiene un articulo
subcuadro_nota = soup('div', {'class': 'Subcuadro-nota'})
for e in subcuadro_nota:
tag = etree.fromstring(str(e))
titulo = ''.join(tag.xpath('//p[starts-with(@class, "T-tulo")]/text()'))
bajada = ''.join(tag.xpath('//p[@class="Bajada"]/text()'))
copete = ''.join(tag.xpath('//p[starts-with(@class, "Copete")]/text()'))
parrafos = '\n\n'.join(
tag.xpath('(//p[@class="Primer"]|//p[@class="Normal"]|//p[@class="Normal"]/span '
'|//p[@class="Subt-tulo"]|//p[@class="Autor"])/text()'))
if titulo:
try:
article = Article(
headline=titulo,
deck=bajada,
lead=copete,
#home_lead=copete,
body=parrafos,
)
article.save()
ar = Article.objects.get(id=article.id)
ar.sections.add(epub_section.id)
ar.save()
success_msg = 'Articulo generado correctamente: %s' % article.headline
messages.success(self.request, success_msg, fail_silently=True)
except:
traceback.print_exc()
messages.error(self.request, 'Hubo un error al procesar el archivo')
except:
traceback.print_exc()
messages.error(self.request, 'Hubo un error al procesar el archivo')
files = EpubFile.objects.order_by('-id')
section = Section.objects.all()
context['files'] = files
context['section'] = section
return context
class FileChangeView(DetailView):
model = EpubFile
context_object_name = "files"
template_name = "epubparser/templates/change_epub_parser.html"
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(FileChangeView, self).get_context_data(**kwargs)
epub_file = context['files']
sec = context['files'].section
section = Section.objects.all()
context['section'] = section
context['files'] = epub_file
changeForm = EpubChangeSectionForm()
changeForm.f = epub_file
changeForm.section = sec
context['changeForm'] = changeForm
return context
def changeSection(request):
if request.method == 'POST':
try:
id_epub = request.POST.get('id_epub')
sec = request.POST.get('section')
epub = EpubFile.objects.get(id=id_epub)
section = Section.objects.get(id=sec)
epub.section = section
epub.save()
except:
traceback.print_exc()
messages.error(request, 'Debe seleccionar una SECCIÓN')
return redirect(reverse('epub-home'))
def replace_style(content, tag_abre_style, tag_cierra_style, tag_change_style, tag_close_style):
encontre = True
while encontre:
posicion_abre_span = content.find(tag_abre_style)
#si no encuentra el span se va del loop
if (posicion_abre_span == -1):
encontre = False
else:
#posicion de cierre del span
posicion_cierra_span = content.find(tag_cierra_style, posicion_abre_span)
#reemplaza el proximo cierre de span por el cierre de em
content = replace_at_position(content,tag_cierra_style, tag_close_style, posicion_cierra_span)
#reemplaza la apertura del span por la apertura del em
content = replace_at_position(content,tag_abre_style, tag_change_style, posicion_abre_span)
return content
#reemplaza en la cadena total la cadena vieja por la cadena nueva
#la cadena vieja esta ubicada en la cadena_total en la posicion pos
def replace_at_position(cadena_total, cadena_vieja, cadena_nueva, pos):
cadena_total = cadena_total[:pos] + cadena_nueva + cadena_total[pos+len(cadena_vieja):]
return cadena_total
| [
"apacheco@ladiaria.com.uy"
] | apacheco@ladiaria.com.uy |
70c7ef7f60eb803955e8f403aaebada84e807bda | 36f6b1d7a7355ee21e387b2a4f56ebd8dd044b2c | /snippets/try_bench.py | f416d2344f0af895df5c80c490df937f4b35e6bc | [] | no_license | sbl1996/hinas | e826936537094d7de5ba36cc78dcdb8e4de076ac | e2db5ebc219a2f7dc1b1344e5d13c97177467e08 | refs/heads/main | 2023-02-20T17:52:38.719867 | 2021-01-21T02:22:05 | 2021-01-21T02:22:05 | 301,271,639 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | import numpy as np
from hinas.models.nas_bench_201.search.darts import Network
from hinas.models.nas_bench_201.api import SimpleNASBench201
api = SimpleNASBench201("/Users/hrvvi/Code/study/pytorch/datasets/NAS-Bench-201-v1_1-096897-simple.pth")
net = Network(4, 8)
val_accs = []
ranks = []
for i in range(100):
net._initialize_alphas()
s = net.genotype()
val_accs.append(np.mean(api.query_eval_acc(s)))
ranks.append(api.query_eval_acc_rank(s))
| [
"sbl1996@126.com"
] | sbl1996@126.com |
2eb572de05a2ef49a132ba18d76c92d43849d6f6 | b253452291fe7a0ebd5673bf9f6e8ead4a6825c8 | /fireplace/cards/gvg/mage.py | 1204db22fc7a1a231b295c5a1f01cf38318431cb | [] | no_license | rafzi/fireplace | 3c75b3892848635f5de264f01fd1431c34ef6983 | 8fc6198c1b855b448e2fceebe7bdab5e6436e2b7 | refs/heads/master | 2021-01-14T08:51:54.403001 | 2015-07-08T13:45:09 | 2015-07-08T13:45:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | from ..utils import *
##
# Minions
# Snowchugger
class GVG_002:
events = [
Damage().on(
lambda self, target, amount, source: source is self and [Freeze(target)] or []
)
]
# Goblin Blastmage
class GVG_004:
def action(self):
if self.poweredUp:
return [Hit(RANDOM_ENEMY_CHARACTER, 1) * 4]
# Illuminator
class GVG_089:
events = [
OWN_TURN_END.on(
lambda self, player: player.secrets and [Heal(FRIENDLY_HERO, 4)] or []
)
]
##
# Spells
# Flamecannon
class GVG_001:
action = [Hit(RANDOM_ENEMY_MINION, 4)]
# Unstable Portal
class GVG_003:
# TODO
def action(self):
card = self.controller.give(RandomMinion())
self.buff(card, "GVG_003e")
# Echo of Medivh
class GVG_005:
action = [Give(CONTROLLER, Copy(FRIENDLY_MINIONS))]
| [
"jerome@leclan.ch"
] | jerome@leclan.ch |
e660785218f7aaf8b80cd9306885cd20180a5a38 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc008/A/4900621.py | 339205eb206cbdbeac2ed6cbc083a7ebc5b7e05f | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | s = input().split()
S = int(s[0])
T = int(s[1])
if 1 <= S <= T <= 1000:
print(T - S +1) | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
93ee83b0a17ef7ea90b7e103fcfa8ebc52d9406c | d1c7d493eb01ba3636482ad452aa540e253ff0e9 | /python-3/beginner/1164.py | 5bb22dbc2ce9dc78e4b7a81857d93a04b81e579d | [
"MIT"
] | permissive | MisaelAugusto/uri | 411aa8b3915c9c046ce46ac180daab7950922109 | 22bee72edf44f939d7a290383336b4d061faecbb | refs/heads/master | 2022-12-05T08:32:22.999188 | 2020-08-31T12:31:05 | 2020-08-31T12:31:05 | 268,656,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | # -*- coding: utf-8 -*-
N = int(input())
for i in range(N):
X = int(input())
total = 0
for i in range(1, int((X / 2)) + 1):
if (X % i == 0):
total += i
print(("%d eh perfeito" % X) if (total == X) else ("%d nao eh perfeito" % X)) | [
"misael.costa@ccc.ufcg.edu.br"
] | misael.costa@ccc.ufcg.edu.br |
0d69a25061e8410a69ca57c98149a3ff32ec0dbd | b02a5015ecc61414834c4b24e5f33168eb99070a | /CCscripts/DrawCuts.py | 4aa074ccf23b22710695a909fad2956b4ae69a06 | [
"MIT"
] | permissive | mrvollger/SDA | f1aa8edf9989125d7e0c0f6ae159bca495915826 | 3d5e9ec8d1e7ac97121c33c6be80d635392631cf | refs/heads/master | 2023-05-13T05:24:54.665854 | 2023-05-07T23:40:25 | 2023-05-07T23:40:25 | 101,452,926 | 29 | 5 | MIT | 2019-11-21T18:08:13 | 2017-08-26T00:58:01 | Python | UTF-8 | Python | false | false | 605 | py | #!/usr/bin/env python
import ABPUtils
import argparse
ap = argparse.ArgumentParser(description="Plot cuts individually")
ap.add_argument("graph", help="Original graph file.")
ap.add_argument("cuts", help="Cuts file.")
ap.add_argument("--out", help="Output file.", default="./")
args = ap.parse_args()
g = ABPUtils.ReadGraph(args.graph)
cuts = ABPUtils.ReadCuts(args.cuts)
cutIndex = 1
ABPUtils.ColorGraphByCut(g,cuts)
for cut in cuts:
sub = g.subgraph(list(cut))
sub.graph['NumVertexColors']=len(cuts)
ABPUtils.DrawGraph(sub, "{}subgraph.{}.png".format(args.out,cutIndex))
cutIndex+=1
| [
"mrvollger@gmail.com"
] | mrvollger@gmail.com |
04674a600e226e31aa0f8316d18ab01272775691 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_subcontracts.py | 7377f23ac79db909499e908ee8af2088973acc0e | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py |
from xai.brain.wordbase.verbs._subcontract import _SUBCONTRACT
#calss header
class _SUBCONTRACTS(_SUBCONTRACT, ):
def __init__(self,):
_SUBCONTRACT.__init__(self)
self.name = "SUBCONTRACTS"
self.specie = 'verbs'
self.basic = "subcontract"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
8d5a83daead27736aecc6e7ac623324c76e906dc | 2a54e8d6ed124c64abb9e075cc5524bb859ba0fa | /.history/3-lists_20200405001109.py | 5a0bda1428374ca1f3e46e140e448d5438925749 | [] | no_license | CaptainStorm21/Python-Foundation | 01b5fbaf7a913506518cf22e0339dd948e65cea1 | a385adeda74f43dd7fb2d99d326b0be23db25024 | refs/heads/master | 2021-05-23T01:29:18.885239 | 2020-04-23T19:18:06 | 2020-04-23T19:18:06 | 253,171,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | # A List is a collection which is ordered and changeable. Allows duplicate members.
numbers = [ 3, 23, 111, 3423, 352]
print(numbers)
print(type(numbers))
#using a constructor
listNum = list (( 213, 11, 342, 2342, 55432))
print(listNum)
fruits = ['Apples', 'Oranges', 'Grapes', 'Pears']
print(fruits[2])
#Get len
print(len(fruits))
#append to the list
fruits.append('Mango')
print(fruits)
#remove from the list
fruits.remove('Grapes')
print(fruits)
#insert into a spot
fruits.insert(2, 'Coconut')
print(fruits)
#remove from a spot
fruits.pop(4)
print(fruits)
#reverse list
fruits.reverse()
print(fruits)
#sort an array
| [
"tikana4@yahoo.com"
] | tikana4@yahoo.com |
f2eee898f0944c4b6faea8454a3765ba7fb32f35 | 428ee863e50fecfaedbbf64f3da95e9acb746ae4 | /src/tamsin/sysmod.py | ee7671e8e91b5727f007a422a134f6c0d9003edd | [
"BSD-3-Clause",
"Unlicense",
"LicenseRef-scancode-public-domain"
] | permissive | catseye/Tamsin | ba53a0ee4ac882486a958e6ba7225f19eea763ef | 1c9e7ade052d734fa1753d612f2426ac067d5252 | refs/heads/master | 2021-01-17T09:21:25.202969 | 2016-03-31T15:00:14 | 2016-03-31T15:00:14 | 19,212,331 | 12 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,364 | py | # encoding: UTF-8
# Copyright (c)2014 Chris Pressey, Cat's Eye Technologies.
# Distributed under a BSD-style license; see LICENSE for more information.
# Python version of Tamsin's $ module.
import sys
from tamsin.term import Atom, Constructor
from tamsin.scanner import EOF
TRANSLATOR = {'return': 'return_', 'print': 'print_'}
def call(name, interpreter, args):
name = TRANSLATOR.get(name, name)
if name not in globals():
raise NotImplementedError(name)
return globals()[name](interpreter, args)
def arity(name):
name = TRANSLATOR.get(name, name)
if name not in globals():
raise NotImplementedError(name)
return globals()[name].arity
def return_(self, args):
return (True, args[0])
return_.arity = 1
def fail(self, args):
return (False, args[0])
fail.arity = 1
def expect(self, args):
upcoming_token = self.scanner.peek()
term = args[0]
token = str(term)
if self.scanner.consume(token):
return (True, term)
else:
return (False,
Atom(self.scanner.error_message("'%s'" % token, upcoming_token))
)
expect.arity = 1
def eof(self, args):
if self.scanner.peek() is EOF:
return (True, '')
else:
return (False,
Atom(self.scanner.error_message('EOF', self.scanner.peek()))
)
eof.arity = 0
def any(self, args):
if self.scanner.peek() is not EOF:
return (True, Atom(self.scanner.scan()))
else:
return (False,
Atom(self.scanner.error_message('any token', EOF))
)
any.arity = 0
def alnum(self, args):
if (self.scanner.peek() is not EOF and
self.scanner.peek()[0].isalnum()):
return (True, Atom(self.scanner.scan()))
else:
return (False,
Atom(self.scanner.error_message('alphanumeric', self.scanner.peek()))
)
alnum.arity = 0
def upper(self, args):
if (self.scanner.peek() is not EOF and
self.scanner.peek()[0].isupper()):
return (True, Atom(self.scanner.scan()))
else:
return (False,
Atom(self.scanner.error_message('uppercase', self.scanner.peek()))
)
upper.arity = 0
def startswith(self, args):
if (self.scanner.peek() is not EOF and
self.scanner.peek()[0].startswith((str(args[0]),))):
return (True, Atom(self.scanner.scan()))
else:
return (False,
Atom(self.scanner.error_message("'%s...'" % args[0], self.scanner.peek()))
)
startswith.arity = 1
def equal(self, args):
if args[0].match(args[1]) != False:
return (True, args[0])
else:
return (False, Atom("term '%s' does not equal '%s'" %
(args[0], args[1])))
equal.arity = 2
def unquote(self, args):
q = str(args[0])
l = str(args[1])
r = str(args[2])
if (q.startswith(l) and q.endswith(r)):
if len(r) == 0:
return (True, Atom(q[len(l):]))
return (True, Atom(q[len(l):-len(r)]))
else:
return (False, Atom("term '%s' is not quoted with '%s' and '%s'" %
(q, l, r)))
unquote.arity = 3
def mkterm(self, args):
t = args[0]
l = args[1]
contents = []
while isinstance(l, Constructor) and l.tag == 'list':
contents.append(l.contents[0])
l = l.contents[1]
if contents:
return (True, Constructor(t.text, contents))
else:
return (True, t)
mkterm.arity = 2
def reverse(self, args):
return (True, args[0].reversed(args[1]))
reverse.arity = 2
def print_(self, args):
val = args[0]
sys.stdout.write(str(val))
sys.stdout.write("\n")
return (True, val)
print_.arity = 1
def emit(self, args):
val = args[0]
sys.stdout.write(str(val))
return (True, val)
emit.arity = 1
def repr(self, args):
val = args[0]
val = Atom(val.repr())
return (True, val)
repr.arity = 1
counter = 0
def gensym(self, args):
global counter
counter += 1
return (True, Atom(str(args[0]) + str(counter)))
gensym.arity = 1
def hexbyte(self, args):
return (True, Atom(chr(int(args[0].text + args[1].text, 16))))
hexbyte.arity = 2
def format_octal(self, args):
return (True, Atom("%o" % ord(args[0].text[0])))
format_octal.arity = 1
def length(self, args):
return (True, Atom(str(len(str(args[0])))))
length.arity = 1
| [
"cpressey@catseye.tc"
] | cpressey@catseye.tc |
dd9795fe720d8bf7f51b07a3bf2c790181a0397d | 380a47268c5975473a2e7c38c747bc3bdbd981b1 | /benchmark/third_party/transformers/src/transformers/models/data2vec/modeling_data2vec_vision.py | e63ee0d32cf1879d8d939daa93b2807a091002b6 | [
"Apache-2.0"
] | permissive | FMInference/FlexGen | 07aa9b1918c19b02077e13ad07e76840843810dd | d34f7b4b43ed87a374f394b0535ed685af66197b | refs/heads/main | 2023-07-24T02:29:51.179817 | 2023-07-21T22:38:31 | 2023-07-21T22:38:31 | 602,270,517 | 6,821 | 411 | Apache-2.0 | 2023-07-07T22:59:24 | 2023-02-15T21:18:53 | Python | UTF-8 | Python | false | false | 53,001 | py | # coding=utf-8
# Copyright 2022 Meta Platforms and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Data2VecVision model."""
import collections.abc
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPooling,
ImageClassifierOutput,
SemanticSegmenterOutput,
)
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_data2vec_vision import Data2VecVisionConfig
logger = logging.get_logger(__name__)
# General docstring
_CONFIG_FOR_DOC = "Data2VecVisionConfig"
_FEAT_EXTRACTOR_FOR_DOC = "BeitFeatureExtractor"
# Base docstring
_CHECKPOINT_FOR_DOC = "facebook/data2vec-vision-base"
_EXPECTED_OUTPUT_SHAPE = [1, 197, 768]
# Image classification docstring
_IMAGE_CLASS_CHECKPOINT = "facebook/data2vec-vision-base-ft1k"
_IMAGE_CLASS_EXPECTED_OUTPUT = "remote control, remote"
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/data2vec-vision-base-ft1k",
# See all Data2VecVision models at https://huggingface.co/models?filter=data2vec-vision
]
@dataclass
# Copied from transformers.models.beit.modeling_beit.BeitModelOutputWithPooling with Beit->Data2VecVision
class Data2VecVisionModelOutputWithPooling(BaseModelOutputWithPooling):
"""
Class for outputs of [`Data2VecVisionModel`].
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
Average of the last layer hidden states of the patch tokens (excluding the *[CLS]* token) if
*config.use_mean_pooling* is set to True. If set to False, then the final hidden state of the *[CLS]* token
will be returned.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
# Copied from transformers.models.beit.modeling_beit.drop_path
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
argument.
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->Data2VecVision
class Data2VecVisionDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float] = None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, x: torch.Tensor) -> torch.Tensor:
return drop_path(x, self.drop_prob, self.training)
def extra_repr(self) -> str:
return "p={}".format(self.drop_prob)
# Copied from transformers.models.beit.modeling_beit.BeitEmbeddings with Beit->Data2VecVision
class Data2VecVisionEmbeddings(nn.Module):
"""
Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
"""
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__()
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
if config.use_mask_token:
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
else:
self.mask_token = None
self.patch_embeddings = Data2VecVisionPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
if config.use_absolute_position_embeddings:
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size))
else:
self.position_embeddings = None
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None) -> torch.Tensor:
embeddings = self.patch_embeddings(pixel_values)
batch_size, seq_len, _ = embeddings.size()
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
if bool_masked_pos is not None:
mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
# replace the masked visual tokens by mask_tokens
w = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
embeddings = embeddings * (1 - w) + mask_tokens * w
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
if self.position_embeddings is not None:
embeddings = embeddings + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
# Copied from transformers.models.beit.modeling_beit.BeitPatchEmbeddings with Beit->Data2VecVision
class Data2VecVisionPatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config):
super().__init__()
image_size, patch_size = config.image_size, config.patch_size
num_channels, hidden_size = config.num_channels, config.hidden_size
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
patch_shape = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.patch_shape = patch_shape
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
batch_size, num_channels, height, width = pixel_values.shape
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
if height != self.image_size[0] or width != self.image_size[1]:
raise ValueError(
f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})."
)
embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2)
return embeddings
# Copied from transformers.models.beit.modeling_beit.BeitSelfAttention with Beit->Data2VecVision
class Data2VecVisionSelfAttention(nn.Module):
def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple] = None) -> None:
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
if window_size:
self.relative_position_bias = Data2VecVisionRelativePositionBias(config, window_size=window_size)
else:
self.relative_position_bias = None
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states: torch.Tensor,
head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
relative_position_bias: Optional["Data2VecVisionRelativePositionBias"] = None,
) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
mixed_query_layer = self.query(hidden_states)
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Add relative position bias if present.
if self.relative_position_bias is not None:
attention_scores = attention_scores + self.relative_position_bias().unsqueeze(0)
# Add shared relative position bias if provided.
if relative_position_bias is not None:
attention_scores = attention_scores + relative_position_bias
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
# Copied from transformers.models.beit.modeling_beit.BeitSelfOutput with Beit->Data2VecVision
class Data2VecVisionSelfOutput(nn.Module):
"""
The residual connection is defined in Data2VecVisionLayer instead of here (as is the case with other models), due
to the layernorm applied before each block.
"""
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor, gamma=None) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
# Copied from transformers.models.beit.modeling_beit.BeitAttention with Beit->Data2VecVision
class Data2VecVisionAttention(nn.Module):
def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple] = None) -> None:
super().__init__()
self.attention = Data2VecVisionSelfAttention(config, window_size=window_size)
self.output = Data2VecVisionSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.attention.query = prune_linear_layer(self.attention.query, index)
self.attention.key = prune_linear_layer(self.attention.key, index)
self.attention.value = prune_linear_layer(self.attention.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states: torch.Tensor,
head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
relative_position_bias: Optional["Data2VecVisionRelativePositionBias"] = None,
) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
self_outputs = self.attention(hidden_states, head_mask, output_attentions, relative_position_bias)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.beit.modeling_beit.BeitIntermediate with Beit->Data2VecVision
class Data2VecVisionIntermediate(nn.Module):
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.beit.modeling_beit.BeitOutput with Beit->Data2VecVision
class Data2VecVisionOutput(nn.Module):
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
# Copied from transformers.models.beit.modeling_beit.BeitLayer with Beit->Data2VecVision,BEiT->Data2VecVision
class Data2VecVisionLayer(nn.Module):
"""This corresponds to the Block class in the timm implementation."""
def __init__(
self, config: Data2VecVisionConfig, window_size: Optional[tuple] = None, drop_path_rate: float = 0.0
) -> None:
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = Data2VecVisionAttention(config, window_size=window_size)
self.intermediate = Data2VecVisionIntermediate(config)
self.output = Data2VecVisionOutput(config)
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.drop_path = Data2VecVisionDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
init_values = config.layer_scale_init_value
if init_values > 0:
self.lambda_1 = nn.Parameter(init_values * torch.ones((config.hidden_size)), requires_grad=True)
self.lambda_2 = nn.Parameter(init_values * torch.ones((config.hidden_size)), requires_grad=True)
else:
self.lambda_1, self.lambda_2 = None, None
def forward(
self,
hidden_states: torch.Tensor,
head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
relative_position_bias: Optional["Data2VecVisionRelativePositionBias"] = None,
) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
self_attention_outputs = self.attention(
self.layernorm_before(hidden_states), # in Data2VecVision, layernorm is applied before self-attention
head_mask,
output_attentions=output_attentions,
relative_position_bias=relative_position_bias,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
# apply lambda_1 if present
if self.lambda_1 is not None:
attention_output = self.lambda_1 * attention_output
# first residual connection
hidden_states = self.drop_path(attention_output) + hidden_states
# in Data2VecVision, layernorm is also applied after self-attention
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = self.output(layer_output)
if self.lambda_2 is not None:
layer_output = self.lambda_2 * layer_output
# second residual connection
layer_output = self.drop_path(layer_output) + hidden_states
outputs = (layer_output,) + outputs
return outputs
# Copied from transformers.models.beit.modeling_beit.BeitRelativePositionBias with Beit->Data2VecVision
class Data2VecVisionRelativePositionBias(nn.Module):
def __init__(self, config: Data2VecVisionConfig, window_size: tuple) -> None:
super().__init__()
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, config.num_attention_heads)
) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = torch.zeros(
size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype
)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
def forward(self) -> torch.Tensor:
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1
) # Wh*Ww,Wh*Ww,nH
return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
# Copied from transformers.models.beit.modeling_beit.BeitEncoder with Beit->Data2VecVision
class Data2VecVisionEncoder(nn.Module):
def __init__(self, config: Data2VecVisionConfig, window_size: Optional[tuple] = None) -> None:
super().__init__()
self.config = config
if config.use_shared_relative_position_bias:
self.relative_position_bias = Data2VecVisionRelativePositionBias(config, window_size=window_size)
else:
self.relative_position_bias = None
# stochastic depth decay rule
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
self.layer = nn.ModuleList(
[
Data2VecVisionLayer(
config,
window_size=window_size if config.use_relative_position_bias else None,
drop_path_rate=dpr[i],
)
for i in range(config.num_hidden_layers)
]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
) -> Union[tuple, BaseModelOutput]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
layer_head_mask,
)
else:
relative_position_bias = (
self.relative_position_bias() if self.relative_position_bias is not None else None
)
layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions, relative_position_bias)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
# Copied from transformers.models.beit.modeling_beit.BeitPreTrainedModel with Beit->Data2VecVision,beit->data2vec_vision
class Data2VecVisionPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = Data2VecVisionConfig
base_model_prefix = "data2vec_vision"
main_input_name = "pixel_values"
supports_gradient_checkpointing = True
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, Data2VecVisionEncoder):
module.gradient_checkpointing = value
DATA2VEC_VISION_START_DOCSTRING = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`Data2VecVisionConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
DATA2VEC_VISION_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`BeitFeatureExtractor`]. See
[`BeitFeatureExtractor.__call__`] for details.
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Data2VecVision Model transformer outputting raw hidden-states without any specific head on top.",
DATA2VEC_VISION_START_DOCSTRING,
)
# Copied from transformers.models.beit.modeling_beit.BeitModel with BEIT->DATA2VEC_VISION,Beit->Data2VecVision,True->False
class Data2VecVisionModel(Data2VecVisionPreTrainedModel):
def __init__(self, config: Data2VecVisionConfig, add_pooling_layer: bool = False) -> None:
super().__init__(config)
self.config = config
self.embeddings = Data2VecVisionEmbeddings(config)
self.encoder = Data2VecVisionEncoder(config, window_size=self.embeddings.patch_embeddings.patch_shape)
self.layernorm = (
nn.Identity() if config.use_mean_pooling else nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
)
self.pooler = Data2VecVisionPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(DATA2VEC_VISION_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_FEAT_EXTRACTOR_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=Data2VecVisionModelOutputWithPooling,
config_class=_CONFIG_FOR_DOC,
modality="vision",
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
bool_masked_pos: Optional[torch.BoolTensor] = None,
head_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, Data2VecVisionModelOutputWithPooling]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(pixel_values, bool_masked_pos)
encoder_outputs = self.encoder(
embedding_output,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)
return head_outputs + encoder_outputs[1:]
return Data2VecVisionModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
# Copied from transformers.models.beit.modeling_beit.BeitPooler with Beit->Data2VecVision
class Data2VecVisionPooler(nn.Module):
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__()
self.layernorm = (
nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if config.use_mean_pooling else None
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
if self.layernorm is not None:
# Mean pool the final hidden states of the patch tokens
patch_tokens = hidden_states[:, 1:, :]
pooled_output = self.layernorm(patch_tokens.mean(1))
else:
# Pool by simply taking the final hidden state of the [CLS] token
pooled_output = hidden_states[:, 0]
return pooled_output
@add_start_docstrings(
"""
Data2VecVision Model transformer with an image classification head on top (a linear layer on top of the average of
the final hidden states of the patch tokens) e.g. for ImageNet.
""",
DATA2VEC_VISION_START_DOCSTRING,
)
# Copied from transformers.models.beit.modeling_beit.BeitForImageClassification with BEIT->DATA2VEC_VISION,Beit->Data2VecVision,beit->data2vec_vision
class Data2VecVisionForImageClassification(Data2VecVisionPreTrainedModel):
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.data2vec_vision = Data2VecVisionModel(config, add_pooling_layer=True)
# Classifier head
self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(DATA2VEC_VISION_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_FEAT_EXTRACTOR_FOR_DOC,
checkpoint=_IMAGE_CLASS_CHECKPOINT,
output_type=ImageClassifierOutput,
config_class=_CONFIG_FOR_DOC,
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
)
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, ImageClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.data2vec_vision(
pixel_values,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from transformers.models.beit.modeling_beit.BeitConvModule with Beit->Data2VecVision
class Data2VecVisionConvModule(nn.Module):
"""
A convolutional block that bundles conv/norm/activation layers. This block simplifies the usage of convolution
layers, which are commonly used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, int]],
padding: Union[int, Tuple[int, int], str] = 0,
bias: bool = False,
dilation: Union[int, Tuple[int, int]] = 1,
) -> None:
super().__init__()
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding=padding,
bias=bias,
dilation=dilation,
)
self.bn = nn.BatchNorm2d(out_channels)
self.activation = nn.ReLU()
def forward(self, input: torch.Tensor) -> torch.Tensor:
output = self.conv(input)
output = self.bn(output)
output = self.activation(output)
return output
# Copied from transformers.models.beit.modeling_beit.BeitPyramidPoolingBlock with Beit->Data2VecVision
class Data2VecVisionPyramidPoolingBlock(nn.Module):
def __init__(self, pool_scale: int, in_channels: int, channels: int) -> None:
super().__init__()
self.layers = [
nn.AdaptiveAvgPool2d(pool_scale),
Data2VecVisionConvModule(in_channels, channels, kernel_size=1),
]
for i, layer in enumerate(self.layers):
self.add_module(str(i), layer)
def forward(self, input: torch.Tensor) -> torch.Tensor:
hidden_state = input
for layer in self.layers:
hidden_state = layer(hidden_state)
return hidden_state
# Copied from transformers.models.beit.modeling_beit.BeitPyramidPoolingModule with Beit->Data2VecVision
class Data2VecVisionPyramidPoolingModule(nn.Module):
"""
Pyramid Pooling Module (PPM) used in PSPNet.
Args:
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
Module.
in_channels (int): Input channels.
channels (int): Channels after modules, before conv_seg.
align_corners (bool): align_corners argument of F.interpolate.
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
"""
def __init__(self, pool_scales: Tuple[int, ...], in_channels: int, channels: int, align_corners: bool) -> None:
super().__init__()
self.pool_scales = pool_scales
self.align_corners = align_corners
self.in_channels = in_channels
self.channels = channels
self.blocks = []
for i, pool_scale in enumerate(pool_scales):
block = Data2VecVisionPyramidPoolingBlock(
pool_scale=pool_scale, in_channels=in_channels, channels=channels
)
self.blocks.append(block)
self.add_module(str(i), block)
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
ppm_outs = []
for ppm in self.blocks:
ppm_out = ppm(x)
upsampled_ppm_out = nn.functional.interpolate(
ppm_out, size=x.size()[2:], mode="bilinear", align_corners=self.align_corners
)
ppm_outs.append(upsampled_ppm_out)
return ppm_outs
# Copied from transformers.models.beit.modeling_beit.BeitUperHead with Beit->Data2VecVision
class Data2VecVisionUperHead(nn.Module):
"""
Unified Perceptual Parsing for Scene Understanding. This head is the implementation of
[UPerNet](https://arxiv.org/abs/1807.10221).
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
"""
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__()
self.pool_scales = config.pool_scales # e.g. (1, 2, 3, 6)
self.in_channels = [config.hidden_size] * 4 # e.g. [768, 768, 768, 768]
self.channels = config.hidden_size
self.align_corners = False
self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1)
# PSP Module
self.psp_modules = Data2VecVisionPyramidPoolingModule(
self.pool_scales,
self.in_channels[-1],
self.channels,
align_corners=self.align_corners,
)
self.bottleneck = Data2VecVisionConvModule(
self.in_channels[-1] + len(self.pool_scales) * self.channels,
self.channels,
kernel_size=3,
padding=1,
)
# FPN Module
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
l_conv = Data2VecVisionConvModule(in_channels, self.channels, kernel_size=1)
fpn_conv = Data2VecVisionConvModule(self.channels, self.channels, kernel_size=3, padding=1)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
self.fpn_bottleneck = Data2VecVisionConvModule(
len(self.in_channels) * self.channels,
self.channels,
kernel_size=3,
padding=1,
)
def psp_forward(self, inputs):
x = inputs[-1]
psp_outs = [x]
psp_outs.extend(self.psp_modules(x))
psp_outs = torch.cat(psp_outs, dim=1)
output = self.bottleneck(psp_outs)
return output
def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
# build laterals
laterals = [lateral_conv(encoder_hidden_states[i]) for i, lateral_conv in enumerate(self.lateral_convs)]
laterals.append(self.psp_forward(encoder_hidden_states))
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] = laterals[i - 1] + nn.functional.interpolate(
laterals[i], size=prev_shape, mode="bilinear", align_corners=self.align_corners
)
# build outputs
fpn_outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)]
# append psp feature
fpn_outs.append(laterals[-1])
for i in range(used_backbone_levels - 1, 0, -1):
fpn_outs[i] = nn.functional.interpolate(
fpn_outs[i], size=fpn_outs[0].shape[2:], mode="bilinear", align_corners=self.align_corners
)
fpn_outs = torch.cat(fpn_outs, dim=1)
output = self.fpn_bottleneck(fpn_outs)
output = self.classifier(output)
return output
# Copied from transformers.models.beit.modeling_beit.BeitFCNHead with Beit->Data2VecVision
class Data2VecVisionFCNHead(nn.Module):
"""
Fully Convolution Networks for Semantic Segmentation. This head is implemented of
[FCNNet](https://arxiv.org/abs/1411.4038>).
Args:
config (Data2VecVisionConfig): Configuration.
in_channels
kernel_size (int): The kernel size for convs in the head. Default: 3.
dilation (int): The dilation rate for convs in the head. Default: 1.
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
"""
def __init__(
self,
config: Data2VecVisionConfig,
in_index: int = 2,
kernel_size: int = 3,
dilation: Union[int, Tuple[int, int]] = 1,
) -> None:
super().__init__()
self.in_channels = config.hidden_size
self.channels = config.auxiliary_channels
self.num_convs = config.auxiliary_num_convs
self.concat_input = config.auxiliary_concat_input
self.in_index = in_index
conv_padding = (kernel_size // 2) * dilation
convs = []
convs.append(
Data2VecVisionConvModule(
self.in_channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation
)
)
for i in range(self.num_convs - 1):
convs.append(
Data2VecVisionConvModule(
self.channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation
)
)
if self.num_convs == 0:
self.convs = nn.Identity()
else:
self.convs = nn.Sequential(*convs)
if self.concat_input:
self.conv_cat = Data2VecVisionConvModule(
self.in_channels + self.channels, self.channels, kernel_size=kernel_size, padding=kernel_size // 2
)
self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1)
def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
# just take the relevant feature maps
hidden_states = encoder_hidden_states[self.in_index]
output = self.convs(hidden_states)
if self.concat_input:
output = self.conv_cat(torch.cat([hidden_states, output], dim=1))
output = self.classifier(output)
return output
@add_start_docstrings(
"""
Data2VecVision Model transformer with a semantic segmentation head on top e.g. for ADE20k, CityScapes.
""",
DATA2VEC_VISION_START_DOCSTRING,
)
# Copied from transformers.models.beit.modeling_beit.BeitForSemanticSegmentation with BEIT->DATA2VEC_VISION,Beit->Data2VecVision,microsoft/beit-base-finetuned-ade-640-640->facebook/data2vec-vision-base,beit->data2vec_vision
class Data2VecVisionForSemanticSegmentation(Data2VecVisionPreTrainedModel):
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.data2vec_vision = Data2VecVisionModel(config, add_pooling_layer=False)
# FPNs
self.fpn1 = nn.Sequential(
nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2),
nn.BatchNorm2d(config.hidden_size),
nn.GELU(),
nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2),
)
self.fpn2 = nn.Sequential(
nn.ConvTranspose2d(config.hidden_size, config.hidden_size, kernel_size=2, stride=2),
)
self.fpn3 = nn.Identity()
self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
# Semantic segmentation head(s)
self.decode_head = Data2VecVisionUperHead(config)
self.auxiliary_head = Data2VecVisionFCNHead(config) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
def compute_loss(self, logits, auxiliary_logits, labels):
# upsample logits to the images' original size
upsampled_logits = nn.functional.interpolate(
logits, size=labels.shape[-2:], mode="bilinear", align_corners=False
)
if auxiliary_logits is not None:
upsampled_auxiliary_logits = nn.functional.interpolate(
auxiliary_logits, size=labels.shape[-2:], mode="bilinear", align_corners=False
)
# compute weighted loss
loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index)
main_loss = loss_fct(upsampled_logits, labels)
auxiliary_loss = loss_fct(upsampled_auxiliary_logits, labels)
loss = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
return loss
@add_start_docstrings_to_model_forward(DATA2VEC_VISION_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=SemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, SemanticSegmenterOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
Returns:
Examples:
```python
>>> from transformers import AutoFeatureExtractor, Data2VecVisionForSemanticSegmentation
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/data2vec-vision-base")
>>> model = Data2VecVisionForSemanticSegmentation.from_pretrained("facebook/data2vec-vision-base")
>>> inputs = feature_extractor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> # logits are of shape (batch_size, num_labels, height, width)
>>> logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
outputs = self.data2vec_vision(
pixel_values,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=True, # we need the intermediate hidden states
return_dict=return_dict,
)
encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
# only keep certain features, and reshape
# note that we do +1 as the encoder_hidden_states also includes the initial embeddings
features = [feature for idx, feature in enumerate(encoder_hidden_states) if idx + 1 in self.config.out_indices]
batch_size = pixel_values.shape[0]
patch_resolution = self.config.image_size // self.config.patch_size
features = [
x[:, 1:, :].permute(0, 2, 1).reshape(batch_size, -1, patch_resolution, patch_resolution) for x in features
]
# apply FPNs
ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
for i in range(len(features)):
features[i] = ops[i](features[i])
logits = self.decode_head(features)
auxiliary_logits = None
if self.auxiliary_head is not None:
auxiliary_logits = self.auxiliary_head(features)
loss = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one")
else:
loss = self.compute_loss(logits, auxiliary_logits, labels)
if not return_dict:
if output_hidden_states:
output = (logits,) + outputs[1:]
else:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states if output_hidden_states else None,
attentions=outputs.attentions,
)
| [
"sqy1415@gmail.com"
] | sqy1415@gmail.com |
e7436a8ac1f5eadf84e2b41f3f537b5b70bdc951 | fda673f9450d6d78c542a699fe8b9064f65d9057 | /spider/ms_main_update_score.py | a97faedfdf27c2b6a8d2aba18398cc1e66b7101c | [] | no_license | asdlei99/MovieSite | 1221f8103ec4af63b4f9b80d89d7f7bf5a21a1c3 | 2a7ee09a308e13df58f6b43f6288908c68fc6271 | refs/heads/master | 2020-03-26T16:19:19.059927 | 2018-06-08T09:01:05 | 2018-06-08T09:01:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,946 | py | # coding=utf-8
"""
Update douban_addr and socre for db
"""
import urllib
import re
import os
from ms_utils.db_helper import connect_db
from ms_utils.html_helper import Douban
from ms_utils.common import get_webdriver, get_html_content, get_douban_sn, \
get_douban_url
from ms_utils.log import Log
from ms_constants import CATES_ENG_CH, UPDATE_SCORE_PATH, MOVIE_NAME_ENG, \
TV_NAME_ENG, ANIME_NAME_ENG, SHOW_NAME_ENG
LOG = Log()
Douban = Douban()
def _replace_symbol(s):
res = s.replace("'", "'").replace("&", "&").replace(""", '"')
return res
def _update(url, sid, cate_eng, ch_name, foreign_name, douban_sn_old, imdb_sn_old,
score_old, conn, force=False):
"""
:param url:
:param sid:
:param cate_eng:
:param ch_name:
:param foreign_name:
:param douban_sn_old:
:param imdb_sn_old:
:param score_old:
:param conn:
:param force: Force to update
:return:
"""
content = get_html_content(url)
cate_chn = CATES_ENG_CH.get(cate_eng)
db_value = Douban.get_douban_text_info(
content, cate_eng, cate_chn, enable_log=False)
if db_value == 'continue':
return 'mismatch'
else:
(name1, name2, year, director, screenwriter, actor, mtype,
region, date_show, date, running_time, score_new, othername,
imdb_sn_new, intro) = db_value
score_new = float(score_new)
condition = True if force else (name1 == ch_name and name2 == foreign_name)
if condition:
set_clause = 'SET score="%s"' % score_new
if not douban_sn_old:
douban_sn_new = get_douban_sn(url)
if douban_sn_new:
set_clause += ',douban_sn="%s"' % douban_sn_new
if not imdb_sn_old:
set_clause += ',imdb_sn="%s"' % imdb_sn_new
sql = ('UPDATE movie_%s %s WHERE id=%s' % (cate_eng, set_clause, sid))
try:
_cur = conn.cursor()
_cur.execute(sql)
conn.commit()
_cur.close()
except Exception as e:
LOG.info('%5s FAILED %s %s %s' % (cate_chn, name1, name2, str(e)),
path=UPDATE_SCORE_PATH,
filename='update_score.log')
return 'error'
LOG.info('%5s %3.1f(%3.1f) %s %s' %
(cate_chn, score_old, score_new, name1, name2),
path=UPDATE_SCORE_PATH,
filename='update_score.log')
return 'ok'
else:
return 'mismatch'
def _compare_info(d_url, ch_name_db, foreign_name_db,
director_db, actor_db, year_db, region_db, cate_eng, conn):
"""
比较名字(30),年代(10),国家地区(10),导演(25),演员(25)
:return:
"""
d_content = get_html_content(d_url, url_log=False)
text_info = Douban.get_douban_text_info(d_content, cate_eng)
if not isinstance(text_info, tuple):
return text_info
(name1, name2, year, director, screenwriter, actor, mtype,
region, date_show, date, running_time, score, othername, imdb,
intro) = text_info
director_db = director_db.split('/')[0].strip()
actor_db = actor_db.split('/')[0].strip()
weight = 0
# name
if ch_name_db == name1 and foreign_name_db == name2:
LOG.debug('Name match (30)')
weight += 30
if director_db in director:
LOG.debug('Director match (25)')
weight += 25
if actor_db in actor:
LOG.debug('Actor match (25)')
weight += 25
# TODO
def main():
"""
Update Douban link and score
:return:
"""
if not os.path.exists(UPDATE_SCORE_PATH):
os.makedirs(UPDATE_SCORE_PATH)
conn = connect_db()
driver = get_webdriver()
for cate_eng in ('movie', 'tv', 'anime', 'show'):
sid = 10
while True:
print sid
cur = conn.cursor()
cur.execute('SELECT ch_name, foreign_name, score, director, actor, '
'year, region, douban_sn, imdb_sn FROM movie_%s '
'WHERE id=%d' % (cate_eng, sid))
res = cur.fetchone()
cur.close()
if res:
res = (item.encode('utf-8') for item in res)
(ch_name, foreign_name, score_old, director, actor, year,
region, douban_sn_old, imdb_sn_old) = res
score_old = float(score_old)
if douban_sn_old:
url = get_douban_url(douban_sn_old)
_update(url, sid, cate_eng, ch_name, foreign_name,
douban_sn_old, imdb_sn_old, score_old, conn, force=True)
else:
urls = Douban.get_douban_search_result(ch_name, driver)
for url in urls:
# 对比豆瓣与数据库信息
res = _compare_info(url, ch_name, foreign_name,
director, actor, year,
region, cate_eng, conn)
if res:
result = _update(url, sid, cate_eng, ch_name,
foreign_name, douban_sn_old, imdb_sn_old,
score_old, conn)
if result == 'ok' or result == 'error':
sid += 1
break
elif result == 'mismatch':
continue
LOG.info('%5s FAILED %s %s' % (CATES_ENG_CH.get(cate_eng),
ch_name, foreign_name),
path=UPDATE_SCORE_PATH,
filename='update_score.log')
sid += 1
else:
sid += 1
if __name__ == '__main__':
main() | [
"1225191678@qq.com"
] | 1225191678@qq.com |
7a2d0fe093e4636025d1c89079263aa7eacf5957 | c6fca34b2c9cb973d9d65d23e58e40d4513e173a | /aoc2016/day25.py | f1a75d25732ce30e7e8d149f847be0996fb35b09 | [] | no_license | tomkooij/AdventOfCode | 8ff47c027c887194b0d441f61a8db172c4e260ea | 7890d45a01498dcb48972a7e311888ce6f003bd2 | refs/heads/master | 2021-08-15T19:46:21.869137 | 2021-01-18T06:37:50 | 2021-01-18T06:37:50 | 48,421,868 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,707 | py | # adventofcode assembunny
from collections import defaultdict
def run(len_output, **kwargs):
def int_or_reg(register):
try:
return int(register)
except ValueError:
return regs[register]
regs = defaultdict(int)
output = []
for key, value in kwargs.items():
regs[key] = value
while regs['ip'] < len(instructions):
opcode, *rest = instructions[regs['ip']]
#print('evaluate ', instruction)
if opcode == 'cpy':
register = rest[1]
value = rest[0]
regs[register] = int_or_reg(value)
regs['ip'] += 1
elif opcode == 'inc':
register = rest[0]
regs[register] += 1
regs['ip'] += 1
elif opcode == 'dec':
register = rest[0]
regs[register] -= 1
regs['ip'] += 1
elif opcode == 'jnz':
register = rest[0]
offset = rest[1]
zero = int_or_reg(register)
if zero:
regs['ip'] += int_or_reg(offset)
else:
regs['ip'] += 1
elif opcode == 'out':
value = int_or_reg(rest[0])
output.append(value)
if len(output) >= len_output:
return output
regs['ip'] += 1
else:
assert False, 'unknown instruction.'
return regs['a']
with open('input\input25.txt') as f:
instructions = [line.rstrip('\n').split() for line in f.readlines()]
print('part A: ')
# stupid brute force
for i in range(1000):
if not i % 25:
print(i)
if run(16, a=i)== 8*[0, 1]:
print('part A: ', i)
break
| [
"tomkooij@tomkooij.nl"
] | tomkooij@tomkooij.nl |
b2b85535c8249764df743da35a36a587fbee07e3 | 9905901a2beae3ff4885fbc29842b3c34546ffd7 | /nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/basic/vserver.py | dd3e1af728c8e9185c6e0c6bd682e7968ced6772 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] | permissive | culbertm/NSttyPython | f354ebb3dbf445884dbddb474b34eb9246261c19 | ff9f6aedae3fb8495342cd0fc4247c819cf47397 | refs/heads/master | 2020-04-22T17:07:39.654614 | 2019-02-13T19:07:23 | 2019-02-13T19:07:23 | 170,530,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,121 | py | #
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class vserver(base_resource) :
""" Configuration for virtual server resource. """
def __init__(self) :
self._name = None
self._backupvserver = None
self._redirecturl = None
self._cacheable = None
self._clttimeout = None
self._somethod = None
self._sopersistence = None
self._sopersistencetimeout = None
self._sothreshold = None
self._pushvserver = None
@property
def name(self) :
r"""The name of the virtual server to be removed.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""The name of the virtual server to be removed.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def backupvserver(self) :
r"""The name of the backup virtual server for this virtual server.<br/>Minimum length = 1.
"""
try :
return self._backupvserver
except Exception as e:
raise e
@backupvserver.setter
def backupvserver(self, backupvserver) :
r"""The name of the backup virtual server for this virtual server.<br/>Minimum length = 1
"""
try :
self._backupvserver = backupvserver
except Exception as e:
raise e
@property
def redirecturl(self) :
r"""The URL where traffic is redirected if the virtual server in the system becomes unavailable.<br/>Minimum length = 1.
"""
try :
return self._redirecturl
except Exception as e:
raise e
@redirecturl.setter
def redirecturl(self, redirecturl) :
r"""The URL where traffic is redirected if the virtual server in the system becomes unavailable.<br/>Minimum length = 1
"""
try :
self._redirecturl = redirecturl
except Exception as e:
raise e
@property
def cacheable(self) :
r"""Use this option to specify whether a virtual server (used for load balancing or content switching) routes requests to the cache redirection virtual server before sending it to the configured servers.<br/>Possible values = YES, NO.
"""
try :
return self._cacheable
except Exception as e:
raise e
@cacheable.setter
def cacheable(self, cacheable) :
r"""Use this option to specify whether a virtual server (used for load balancing or content switching) routes requests to the cache redirection virtual server before sending it to the configured servers.<br/>Possible values = YES, NO
"""
try :
self._cacheable = cacheable
except Exception as e:
raise e
@property
def clttimeout(self) :
r"""The timeout value in seconds for idle client connection.<br/>Maximum length = 31536000.
"""
try :
return self._clttimeout
except Exception as e:
raise e
@clttimeout.setter
def clttimeout(self, clttimeout) :
r"""The timeout value in seconds for idle client connection.<br/>Maximum length = 31536000
"""
try :
self._clttimeout = clttimeout
except Exception as e:
raise e
@property
def somethod(self) :
r"""The spillover factor. The system will use this value to determine if it should send traffic to the backupvserver when the main virtual server reaches the spillover threshold.<br/>Possible values = CONNECTION, DYNAMICCONNECTION, BANDWIDTH, HEALTH, NONE.
"""
try :
return self._somethod
except Exception as e:
raise e
@somethod.setter
def somethod(self, somethod) :
r"""The spillover factor. The system will use this value to determine if it should send traffic to the backupvserver when the main virtual server reaches the spillover threshold.<br/>Possible values = CONNECTION, DYNAMICCONNECTION, BANDWIDTH, HEALTH, NONE
"""
try :
self._somethod = somethod
except Exception as e:
raise e
@property
def sopersistence(self) :
r"""The state of the spillover persistence.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._sopersistence
except Exception as e:
raise e
@sopersistence.setter
def sopersistence(self, sopersistence) :
r"""The state of the spillover persistence.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._sopersistence = sopersistence
except Exception as e:
raise e
@property
def sopersistencetimeout(self) :
r"""The spillover persistence entry timeout.<br/>Default value: 2<br/>Minimum length = 2<br/>Maximum length = 1440.
"""
try :
return self._sopersistencetimeout
except Exception as e:
raise e
@sopersistencetimeout.setter
def sopersistencetimeout(self, sopersistencetimeout) :
r"""The spillover persistence entry timeout.<br/>Default value: 2<br/>Minimum length = 2<br/>Maximum length = 1440
"""
try :
self._sopersistencetimeout = sopersistencetimeout
except Exception as e:
raise e
@property
def sothreshold(self) :
r"""The spillver threshold value.<br/>Minimum length = 1<br/>Maximum length = 4294967294.
"""
try :
return self._sothreshold
except Exception as e:
raise e
@sothreshold.setter
def sothreshold(self, sothreshold) :
r"""The spillver threshold value.<br/>Minimum length = 1<br/>Maximum length = 4294967294
"""
try :
self._sothreshold = sothreshold
except Exception as e:
raise e
@property
def pushvserver(self) :
r"""The lb vserver of type PUSH/SSL_PUSH to which server pushes the updates received on the client facing non-push lb vserver.<br/>Minimum length = 1.
"""
try :
return self._pushvserver
except Exception as e:
raise e
@pushvserver.setter
def pushvserver(self, pushvserver) :
r"""The lb vserver of type PUSH/SSL_PUSH to which server pushes the updates received on the client facing non-push lb vserver.<br/>Minimum length = 1
"""
try :
self._pushvserver = pushvserver
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(vserver_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.vserver
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
r""" Use this API to delete vserver.
"""
try :
if type(resource) is not list :
deleteresource = vserver()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
r""" Use this API to update vserver.
"""
try :
if type(resource) is not list :
updateresource = vserver()
updateresource.name = resource.name
updateresource.backupvserver = resource.backupvserver
updateresource.redirecturl = resource.redirecturl
updateresource.cacheable = resource.cacheable
updateresource.clttimeout = resource.clttimeout
updateresource.somethod = resource.somethod
updateresource.sopersistence = resource.sopersistence
updateresource.sopersistencetimeout = resource.sopersistencetimeout
updateresource.sothreshold = resource.sothreshold
updateresource.pushvserver = resource.pushvserver
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def enable(cls, client, resource) :
r""" Use this API to enable vserver.
"""
try :
if type(resource) is not list :
enableresource = vserver()
if type(resource) != type(enableresource):
enableresource.name = resource
else :
enableresource.name = resource.name
return enableresource.perform_operation(client,"enable")
except Exception as e :
raise e
@classmethod
def disable(cls, client, resource) :
r""" Use this API to disable vserver.
"""
try :
if type(resource) is not list :
disableresource = vserver()
if type(resource) != type(disableresource):
disableresource.name = resource
else :
disableresource.name = resource.name
return disableresource.perform_operation(client,"disable")
except Exception as e :
raise e
class Cacheable:
YES = "YES"
NO = "NO"
class Somethod:
CONNECTION = "CONNECTION"
DYNAMICCONNECTION = "DYNAMICCONNECTION"
BANDWIDTH = "BANDWIDTH"
HEALTH = "HEALTH"
NONE = "NONE"
class Sopersistence:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class vserver_response(base_response) :
def __init__(self, length=1) :
self.vserver = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.vserver = [vserver() for _ in range(length)]
| [
"mdculbert@marathonpetroleum.com"
] | mdculbert@marathonpetroleum.com |
eacfea7464bd43192aea85f068759c41257baf5d | ca9e037a6ac24117b7250922c607ce9e5609e82b | /docrec/compatibility/sib18.py | 5298bf7d77cec38ab770661ad3d5f55c27f43caa | [] | no_license | thiagopx/deeprec-cvpr20 | 4d4e1752fa33a17c3da26cc11433531a2e08d40c | 3f04c494574790cbc390930cd60e0e207ec15b10 | refs/heads/master | 2022-06-02T18:31:32.664352 | 2022-05-16T20:50:35 | 2022-05-16T20:50:35 | 248,400,773 | 2 | 3 | null | 2022-05-16T20:55:20 | 2020-03-19T03:21:03 | Python | UTF-8 | Python | false | false | 4,872 | py | import sys
import cv2
import numpy as np
import math
from time import time
from skimage.filters import threshold_sauvola, threshold_otsu
import tensorflow as tf
from .algorithm import Algorithm
from ..models.squeezenet import SqueezeNet
class Sib18(Algorithm):
''' Proposed algorithm. '''
def __init__(
self, arch, weights_path, vshift, input_size, num_classes,
thresh_method='sauvola', seed=None, offset=None, sess=None
):
assert arch in ['sn']
assert thresh_method in ['otsu', 'sauvola']
self.sess = sess
if self.sess is None:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
# preparing model
self.offset = offset
self.vshift = vshift
self.input_size_h, self.input_size_w = input_size
self.images_ph = tf.placeholder(
tf.float32, name='images_ph', shape=(None, self.input_size_h, self.input_size_w, 3) # channels last
)
self.batch = np.ones((2 * vshift + 1, self.input_size_h, self.input_size_w, 3), dtype=np.float32)
# model
model = SqueezeNet(self.images_ph, include_top=True, num_classes=num_classes, mode='test', channels_first=False, sess=self.sess)
logits = model.output
probs = tf.nn.softmax(logits)
self.comp_op = tf.reduce_max(probs[:, 1])
self.disp_op = tf.argmax(probs[:, 1]) - vshift
# result
self.compatibilities = None
self.displacements = None
# init model
self.sess.run(tf.global_variables_initializer())
model.load_weights(weights_path)
self.inference_time = 0
self.preparation_time = 0
self.pairwise_time = 0
self.thresh_method = thresh_method
def _extract_features(self, strip):
''' Extract image around the border. '''
image = cv2.cvtColor(strip.filled_image(), cv2.COLOR_RGB2GRAY)
thresh_func = threshold_sauvola if self.thresh_method == 'sauvola' else threshold_otsu
thresh = thresh_func(image)
thresholded = (image > thresh).astype(np.float32)
image_bin = np.stack(3 * [thresholded]).transpose((1, 2, 0)) # channels last
wl = math.ceil(self.input_size_w / 2)
wr = int(self.input_size_w / 2)
h, w, _ = strip.image.shape
# vertical offset
offset = (h - self.input_size_h) // 2 if self.offset is None else self.offset
# left image
left_border = strip.offsets_l
left = np.ones((self.input_size_h, wl, 3), dtype=np.float32)
for y, x in enumerate(left_border[offset : offset + self.input_size_h]):
w_new = min(wl, w - x)
left[y, : w_new] = image_bin[y + offset, x : x + w_new]
# right image
right_border = strip.offsets_r
right = np.ones((self.input_size_h, wr, 3), dtype=np.float32)
for y, x in enumerate(right_border[offset : offset + self.input_size_h]):
w_new = min(wr, x + 1)
right[y, : w_new] = image_bin[y + offset, x - w_new + 1: x + 1]
return left, right
def run(self, strips, d=0, ignore_pairs=[], verbose=False): # d is not being used at this moment
''' Run algorithm. '''
t0 = time()
N = len(strips.strips)
compatibilities = np.zeros((N, N), dtype=np.float32)
displacements = np.zeros((N, N), dtype=np.int32)
wr = int(self.input_size_w / 2)
# features
features = []
for strip in strips.strips:
left, right = self._extract_features(strip)
features.append((left, right))
self.preparatation_time = time() - t0
t0 = time()
self.inference_time = 0
for i in range(N):
if verbose: print('row {} of {}'.format(i + 1, N))
self.batch[:, :, : wr] = features[i][1]
for j in range(N):
if i == j or (i, j) in ignore_pairs:
continue
feat_j = features[j][0]
self.batch[self.vshift, :, wr : ] = feat_j
for r in range(1, self.vshift + 1):
self.batch[self.vshift - r, : -r, wr :] = feat_j[r :] # slide up
self.batch[self.vshift + r, r : , wr :] = feat_j[: -r] # slide down
t1 = time()
comp, disp = self.sess.run([self.comp_op, self.disp_op], feed_dict={self.images_ph: self.batch})
self.inference_time += time() - t1
compatibilities[i, j] = comp
displacements[i, j] = disp
self.pairwise_time = time() - t0
self.compatibilities = compatibilities
self.displacements = displacements
return self
def name(self):
''' Method name. '''
return 'sib18'
| [
"paixao@gmail.com"
] | paixao@gmail.com |
45921dd008fad6bac984470e8218cf30bc8ac73e | b2afbc68e3900ebd2a673f5152f4f04374792bca | /pm4py/objects/conversion/log/versions/to_trace_log.py | 6a08685e41dbd465c77611512cf40f07a7bc4638 | [] | no_license | indrawaspada/online_process_monitoring_using_incremental_state-space_expansion_an_exact_algorithm | 83d0c2a3dfd7f1f9ce686564bf7f525bb534e973 | 2b23ab20f739e87447af86bb7f63d8816c69c9db | refs/heads/master | 2022-09-18T12:15:33.172540 | 2020-06-04T13:58:07 | 2020-06-04T13:58:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,443 | py | import pm4py
from pm4py.objects.log import log as log_instance
from pm4py.objects.log.util import general as log_util
def apply(log, parameters=None):
if isinstance(log, pm4py.objects.log.log.EventLog) and (not isinstance(log, pm4py.objects.log.log.TraceLog)):
parameters = parameters if parameters is not None else dict()
if log_util.PARAMETER_KEY_CASE_GLUE in parameters:
glue = parameters[log_util.PARAMETER_KEY_CASE_GLUE]
else:
glue = log_util.CASE_ATTRIBUTE_GLUE
if log_util.PARAMETER_KEY_CASE_ATTRIBUTE_PRFIX in parameters:
case_pref = parameters[log_util.PARAMETER_KEY_CASE_ATTRIBUTE_PRFIX]
else:
case_pref = log_util.CASE_ATTRIBUTE_PREFIX
return transform_event_log_to_trace_log(log, case_glue=glue, include_case_attributes=False,
case_attribute_prefix=case_pref)
return log
def transform_event_log_to_trace_log(log, case_glue=log_util.CASE_ATTRIBUTE_GLUE, include_case_attributes=True,
case_attribute_prefix=log_util.CASE_ATTRIBUTE_PREFIX):
"""
Converts the event log to a trace log
Parameters
----------
log: :class:`pm4py.log.log.EventLog`
An event Log
case_glue:
Case identifier. Default is 'case:concept:name'
include_case_attributes:
Default is True
case_attribute_prefix:
Default is 'case:'
Returns
-------
log : :class:`pm4py.log.log.TraceLog`
A trace log
"""
traces = {}
for event in log:
glue = event[case_glue]
if glue not in traces:
trace_attr = {}
if include_case_attributes:
for k in event.keys():
if k.startswith(case_attribute_prefix):
trace_attr[k.replace(case_attribute_prefix, '')] = event[k]
traces[glue] = log_instance.Trace(attributes=trace_attr)
if include_case_attributes:
for k in list(event.keys()):
if k.startswith(case_attribute_prefix):
del event[k]
traces[glue].append(event)
return log_instance.TraceLog(traces.values(), attributes=log.attributes, classifiers=log.classifiers,
omni_present=log.omni_present, extensions=log.extensions) | [
"daniel.schuster@fit.fraunhofer.de"
] | daniel.schuster@fit.fraunhofer.de |
2f0d441ea6d652558924b0e2e0149d7843eebddb | 25597909c9fd5ae13eb89cf255f4ae79053a6d80 | /.ipynb_checkpoints/c2_model_selection-checkpoint.py | 8bb516e43fa4fba192fe988c6aea97eb6256ce53 | [] | no_license | FangyangJz/AI_Machine_Learning_Muke | 986038941dd7e4af9d2873369408808d5a75e412 | 751f3b850dad31441fd80ac94e1909adbb43473a | refs/heads/master | 2020-03-22T21:53:28.386637 | 2018-07-30T15:01:11 | 2018-07-30T15:01:11 | 140,719,785 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | # !/usr/bin/env python3
# -*- coding:utf-8 -*-
'''
created by Fangyang on Time:2018/7/11
'''
__author__ = 'Fangyang'
import numpy as np
def train_test_split(X, y, test_ratio=0.2, seed=None):
'''将数据X和y按照test_ratio分割成X_train, X_test, y_train, y_test'''
assert X.shape[0] == y.shape[0], \
"the size of X must be equal to the size of the y"
assert 0.0 <= test_ratio <= 1.0, \
"test_ratio must be valid"
if seed:
np.random.seed(seed)
shuffled_indexes = np.random.permutation(len(X))
test_size = int(len(X) * test_ratio)
test_indexes = shuffled_indexes[:test_size]
train_indexes = shuffled_indexes[test_size:]
X_train = X[train_indexes]
y_train = y[train_indexes]
X_test = X[test_indexes]
y_test = y[test_indexes]
return X_train, y_train, X_test, y_test | [
"fangyang.jing@hotmail.com"
] | fangyang.jing@hotmail.com |
df34cfcdc20a398e643f5cd7471eec02573e3d40 | 1b2aafc17203e0c789e51b9b51e59c70be478f23 | /l10n_ve_hr_payroll/__init__.py | 80081c7a38dc614ac7b750b6c4994248d670259e | [] | no_license | adrt271988/odoo_custom_v8 | 7cfd9e1a1537e32de69ef0d617592af118c6d129 | e61176e06bffd6afd82fbcc17fad4ede8a955502 | refs/heads/master | 2020-04-06T05:53:33.547748 | 2017-03-17T00:15:03 | 2017-03-17T00:15:03 | 49,723,158 | 3 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_payslip
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"adrt271988@gmail.com"
] | adrt271988@gmail.com |
f0ef214be096340939ed6eb9684488ba392bb112 | 8b45916af90aca42f23eab1fd0f78833dfca5bfa | /tests/test_app.py | 8af16aa31636dc536e6387f9cfcf93d8e4a4dd4d | [
"MIT"
] | permissive | bradh/rio-viz | aeeef8c3b94729092ce21ff44b7ecaf79b43b50c | bc73a06c09e49b19541543f1e758109466ca17f8 | refs/heads/master | 2022-03-01T02:50:23.640625 | 2019-07-15T15:37:31 | 2019-07-15T15:37:31 | 197,893,243 | 0 | 0 | MIT | 2019-07-20T07:07:08 | 2019-07-20T07:04:58 | Python | UTF-8 | Python | false | false | 3,052 | py | """tests rio_viz.server."""
import os
import pytest
from starlette.testclient import TestClient
from rio_viz.raster import RasterTiles
from rio_viz.app import viz
from rio_tiler.errors import TileOutsideBounds
cog_path = os.path.join(os.path.dirname(__file__), "fixtures", "cog.tif")
def test_viz():
"""Should work as expected (create TileServer object)."""
r = RasterTiles(cog_path)
app = viz(r)
assert app.raster == r
assert app.port == 8080
assert app.get_bounds() == r.bounds
assert app.get_center() == r.center
assert app.get_endpoint_url() == "http://127.0.0.1:8080"
assert app.get_template_url() == "http://127.0.0.1:8080/index.html"
client = TestClient(app.app)
response = client.get("/")
assert response.status_code == 404
response = client.get("/tiles/7/64/43.png?rescale=1,10")
assert response.status_code == 200
assert response.headers["content-type"] == "image/png"
response = client.get("/tiles/7/64/43.png?rescale=1,10&indexes=1")
assert response.status_code == 200
assert response.headers["content-type"] == "image/png"
response = client.get("/tiles/7/64/43.png?rescale=1,10&color_map=cfastie")
assert response.status_code == 200
assert response.headers["content-type"] == "image/png"
with pytest.raises(TileOutsideBounds):
client.get("/tiles/18/8624/119094.png")
with pytest.raises(TileOutsideBounds):
client.get("/tiles/18/8624/119094.pbf")
response = client.get("/tiles/7/64/43.pbf")
assert response.status_code == 200
assert response.headers["content-type"] == "application/x-protobuf"
response = client.get("/tiles/7/64/43.pbf?feature_type=polygon")
assert response.status_code == 200
assert response.headers["content-type"] == "application/x-protobuf"
response = client.get("/metadata")
assert response.status_code == 200
assert response.headers["content-type"] == "application/json"
response = client.get("/tilejson.json?tile_format=png")
assert response.status_code == 200
assert response.headers["content-type"] == "application/json"
r = response.json()
assert r["bounds"]
assert r["center"]
assert r["minzoom"] == 6
assert r["maxzoom"] == 8
assert r["tiles"][0].endswith("png")
response = client.get("/tilejson.json?tile_format=pbf")
assert response.status_code == 200
assert response.headers["content-type"] == "application/json"
r = response.json()
assert r["tiles"][0].endswith("pbf")
response = client.get("/tilejson.json?tile_format=pbf&feature_type=polygon")
assert response.status_code == 200
assert response.headers["content-type"] == "application/json"
r = response.json()
assert r["tiles"][0].endswith("pbf?feature_type=polygon")
response = client.get("/point?coordinates=-2,48")
assert response.status_code == 200
assert response.headers["content-type"] == "application/json"
assert response.json() == {"coordinates": [-2.0, 48.0], "value": {"band1": 110}}
| [
"vincent.sarago@gmail.com"
] | vincent.sarago@gmail.com |
df9837844c511d1fd489c599062faa029c57ba79 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03681/s154151403.py | efacb540b804061374acf8c118541a6ce367cf52 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | def factorial_mod(n,mod):
ret = 1
for i in range(1,n+1):
ret *= i
ret %= mod
return ret
def comb_mod(n,r,mod):
if r > n or r < 0:
ret = 0
else:
fact_n = factorial_mod(n, mod)
fact_r = factorial_mod(r, mod)
fact_nr = factorial_mod(n-r, mod)
ret = fact_n * pow(fact_r, mod-2, mod) * pow(fact_nr, mod-2, mod) % mod
return ret
n,m = map(int,input().split())
c = abs(n-m)
mod = 10**9+7
if c >= 2:
ans = 0
elif c == 1:
ans = factorial_mod(n,mod)*factorial_mod(m,mod)
else:
ans = 2*factorial_mod(n,mod)*factorial_mod(m,mod)
print(ans%mod) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
776304ba0a3143e65e73466d9f081b897a53d0d9 | a3d6556180e74af7b555f8d47d3fea55b94bcbda | /content/test/gpu/gpu_path_util/setup_tools_perf_paths.py | 5d90d6b007b2d134c53bc620913767cf22226e3e | [
"BSD-3-Clause"
] | permissive | chromium/chromium | aaa9eda10115b50b0616d2f1aed5ef35d1d779d6 | a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c | refs/heads/main | 2023-08-24T00:35:12.585945 | 2023-08-23T22:01:11 | 2023-08-23T22:01:11 | 120,360,765 | 17,408 | 7,102 | BSD-3-Clause | 2023-09-10T23:44:27 | 2018-02-05T20:55:32 | null | UTF-8 | Python | false | false | 228 | py | # Copyright 2022 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import gpu_path_util
gpu_path_util.AddDirToPathIfNeeded(gpu_path_util.TOOLS_PERF_DIR)
| [
"chromium-scoped@luci-project-accounts.iam.gserviceaccount.com"
] | chromium-scoped@luci-project-accounts.iam.gserviceaccount.com |
7e864297750e34096f4ed3464d0db7370279f277 | 657ac5489186e9e6f9aa7522cbf25ee929f8dde6 | /microchip/microchip/wsgi.py | 34cc8817d3d3337752078af76ec64f2d42776f9c | [] | no_license | Ginkooo/MicroChipServer | ab8f30fb87d89d9cd217b5abe7a8e940c9d3de3f | fe18496f1023f13a0dad2efba17cfa95e1748a5e | refs/heads/master | 2021-01-13T05:28:46.870298 | 2017-05-10T10:34:47 | 2017-05-10T10:34:47 | 86,624,819 | 0 | 1 | null | 2017-04-02T13:16:07 | 2017-03-29T20:09:22 | Python | UTF-8 | Python | false | false | 396 | py | """
WSGI config for microchip project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "microchip.settings")
application = get_wsgi_application()
| [
"piotr_czajka@outlook.com"
] | piotr_czajka@outlook.com |
ff264d04f5910444f98cfa82b309a14c52616dbd | b731d1b35a5416cdd73d421ea3b88a3a18e4c6d3 | /tools/report-error-positional-distribution.py | 050f158a7b77d58da6ba54495fe3b1672e24adbd | [] | no_license | xflicsu/ecliptic | ad772d3563cff1875dddc7d29d156093e03afd07 | e9d2e671bcabc5df30ada0cf42953769099ad5d7 | refs/heads/master | 2020-12-28T21:06:37.212834 | 2013-06-18T14:13:23 | 2013-06-18T14:13:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,225 | py | #!/usr/bin/env python
from __future__ import division
import csv
import sys
import os
import pickle
import numpy as np
import matplotlib as mpl
mpl.use('Agg') # to enable plotting without X11
from matplotlib import pyplot as plt
from ecliptic.support.plotutils import iwork_colors, adjust_numbers_style
PLOT_FORMATS = ['png', 'pdf']
BASES = 'ACGTX'
BASE_NONEXISTENT = 4
COLFORSTATS = 2 # index in pickled profile. 0: from 5' end 1: from 3' end 2: 12-division
# Make masking matrices for counting match, del, ins or substitutions.
EYE_MATRIX = np.identity(len(BASES), np.uint64)
MASK_MATCH = EYE_MATRIX.copy()
MASK_DEL = np.zeros([len(BASES)] * 2, np.uint64); MASK_DEL[:, BASE_NONEXISTENT] = 1
MASK_INS = np.zeros([len(BASES)] * 2, np.uint64); MASK_INS[BASE_NONEXISTENT, :] = 1
MASK_SUBST = 1 - MASK_MATCH - MASK_DEL - MASK_INS
for arr in (MASK_MATCH, MASK_DEL, MASK_INS, MASK_SUBST):
arr[BASE_NONEXISTENT, BASE_NONEXISTENT] = 0
def write_tabular_output(options, data, fieldsize):
w = csv.writer(open(options['csv'], 'w'))
fieldheaders = [
'%d %s-to-%s' % (fn + 1, basef, baset)
for fn in range(fieldsize)
for basef in BASES for baset in BASES
if basef != 'X' or baset != 'X'
]
w.writerow(['Sample'] + fieldheaders)
for sample, _ in options['profiles']:
profile = data[sample]
fielddata = [
profile[COLFORSTATS][fn, i, j]
for fn in range(fieldsize)
for i, basef in enumerate(BASES)
for j, baset in enumerate(BASES)
if basef != 'X' or baset != 'X'
]
w.writerow([sample] + fielddata)
def plot_error_profiles_total(options, data, fieldsize):
xpositions = np.arange(fieldsize)
for sample, _ in options['profiles']:
profile = data[sample][COLFORSTATS]
fig = plt.figure(figsize=(3.5, 2.8))
subst_freq = []
ins_freq = []
del_freq = []
for fn in range(fieldsize):
inserted = (profile[fn] * MASK_INS).sum()
deleted = (profile[fn] * MASK_DEL).sum()
substed = (profile[fn] * MASK_SUBST).sum()
total = profile[fn].sum()
subst_freq.append(substed / total * 100)
ins_freq.append(inserted / total * 100)
del_freq.append(deleted / total * 100)
plt.plot(xpositions, subst_freq, label='subst', c=iwork_colors.yellow, linewidth=1.2)
plt.plot(xpositions, ins_freq, label='ins', c=iwork_colors.red, linewidth=1.2)
plt.plot(xpositions, del_freq, label='del', c=iwork_colors.blue, linewidth=1.2)
ax = plt.gca()
plt.setp(ax.get_xticklabels(), visible=False)
plt.xlim(0, fieldsize-1)
plt.xlabel('Position in tag')
plt.ylabel('Frequency (percent)')
plt.title(sample)
box = ax.get_position()
ax.set_position([box.x0 + 0.1, box.y0 + box.height * 0.2,
box.width * 0.9, box.height * 0.77])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1), ncol=3, columnspacing=0.5,
frameon=False, handletextpad=0.5)
adjust_numbers_style(ax, xgrid=True)
for format in PLOT_FORMATS:
plt.savefig(options['plots'].format(format=format, type='total', sample=sample))
plt.cla()
plt.clf()
def plot_error_profiles_pertype(options, data, fieldsize, plottype):
xpositions = np.arange(fieldsize)
freqmask = {'del': MASK_DEL, 'subst': MASK_SUBST}[plottype]
for sample, _ in options['profiles']:
profile = data[sample][COLFORSTATS]
fig = plt.figure(figsize=(3.5, 2.8))
freqs = [[] for i in range(len(BASES) - 1)]
for fn in range(fieldsize):
profmasked = profile[fn] * freqmask
for basei, freqtbl in enumerate(freqs):
basereads = profile[fn, basei].sum()
freqtbl.append(profmasked[basei].sum() / basereads)
for base, freqtbl, color in zip(BASES, freqs, iwork_colors):
plt.plot(xpositions, freqtbl, label=base, c=color, linewidth=1.2)
ax = plt.gca()
plt.setp(ax.get_xticklabels(), visible=False)
plt.xlim(0, fieldsize-1)
plt.xlabel('Position in tag')
plt.ylabel('Frequency (percent)')
plt.title(sample)
box = ax.get_position()
ax.set_position([box.x0 + 0.1, box.y0 + box.height * 0.2,
box.width * 0.9, box.height * 0.77])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1), ncol=4, columnspacing=0.5,
frameon=False, handletextpad=0.5)
adjust_numbers_style(ax, xgrid=True)
for format in PLOT_FORMATS:
plt.savefig(options['plots'].format(format=format, type=plottype, sample=sample))
plt.cla()
plt.clf()
def plot_error_profiles(options, data, fieldsize):
plot_error_profiles_total(options, data, fieldsize)
for plottype in ('subst', 'del'):
plot_error_profiles_pertype(options, data, fieldsize, plottype)
def parse_arguments():
import argparse
parser = argparse.ArgumentParser(description='Generate a table and plot of '
'error frequency distribution for the report')
parser.add_argument('profiles', metavar='name:pickle', type=str, nargs='+',
help='Input profiles with name.')
parser.add_argument('--output-csv', dest='csv', metavar='PATH', type=str,
help='Tabular output file path')
parser.add_argument('--output-plot', dest='plots', metavar='PATH', type=str,
help='Plot output file path (use {format}, {sample} and {type})')
args = parser.parse_args()
return {
'profiles': [tok.split(':') for tok in args.profiles],
'csv': args.csv,
'plots': args.plots,
}
if __name__ == '__main__':
options = parse_arguments()
data = dict((sample, pickle.load(open(filename)))
for sample, filename in options['profiles'])
fieldsize = data.itervalues().next()[COLFORSTATS].shape[0]
write_tabular_output(options, data, fieldsize)
plot_error_profiles(options, data, fieldsize)
| [
"hyeshik@snu.ac.kr"
] | hyeshik@snu.ac.kr |
4f7c0f59574280506381ae50d52222c1fa3a1b2a | ba12e5b18bf29cf152050bade5dfc04493904afb | /setup.py | 3291b5009df36c8de49ffd8eb8a279cc654313df | [] | no_license | st4lk/centrifuge-mongodb | 4ac5b99db29443db0db3862712bb0194d4097a82 | c9c5c678fc1935c761433d33804bcf59d588c406 | refs/heads/master | 2020-02-26T13:19:19.022732 | 2014-03-19T08:42:02 | 2014-03-19T08:42:02 | 26,912,597 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,447 | py | import os
import sys
from setuptools import setup
if sys.argv[-1] == 'test':
status = os.system('python tests/tests.py')
sys.exit(1 if status > 127 else status)
requirements = [
'centrifuge',
'six==1.3.0',
'motor==0.1.2'
]
def long_description():
return "MongoDB structure backend for Centrifuge"
setup(
name='centrifuge-mongodb',
version='0.2.0',
description="MongoDB structure backend for Centrifuge",
long_description=long_description(),
url='https://github.com/centrifugal/centrifuge-mongodb',
download_url='https://github.com/centrifugal/centrifuge-mongodb',
author="Alexandr Emelin",
author_email='frvzmb@gmail.com',
license='MIT',
packages=['centrifuge_mongodb'],
install_requires=requirements,
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development',
'Topic :: System :: Networking',
'Topic :: Terminals',
'Topic :: Text Processing',
'Topic :: Utilities'
]
)
| [
"frvzmb@gmail.com"
] | frvzmb@gmail.com |
8fe0d18309ed7841e63127156eb08261cb23e707 | 3185e5061f522f80d88122e022a35e7b1541296f | /startCamp/04_day/flask/app.py | 548c80842bc4c8e68432fb66aaa84b767ba26eb8 | [] | no_license | min1378/TIL | cc688b6e6377563e9d55712b81b117c69d1c8c25 | afbeba104d10614e078ec43676a4e1c1d70422a3 | refs/heads/master | 2020-06-17T11:55:45.982224 | 2019-07-12T01:14:04 | 2019-07-12T01:14:04 | 195,916,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,339 | py | from flask import Flask, render_template, request #requests랑 다름 flask 자체 제공 함수 사용자의 요청을 확인할 수 있는 객체
import requests
import bs4
app = Flask(__name__)
@app.route('/') # / => root
def index():
return 'hello world'
@app.route('/greeting/<string:name>') #입력받아올땐 꺽쇠 <> 사용!!!!!
def hello(name):
#return f'안녕하세요 {name}님'
return render_template('greeting.html', html_name=name)
@app.route('/ping')
def ping():
return render_template('ping.html')
@app.route('/pong')
def pong():
age= request.args.get('age')
return f'Pong! age: {age}'
@app.route('/google')
def google():
return render_template('google.html')
@app.route('/naver')
def naver():
return render_template('naver.html')
@app.route('/ascii_input')
def ascii_input():
return render_template('ascii_input.html')
@app.route('/ascii_result')
def ascii_result():
text = request.args.get('text') # Message
#Ascii Art API를 활용하여 사용자의 input 값을 변경한다.
response = requests.get(f'http://artii.herokuapp.com/make?text={text}')
result = response.text
return render_template('ascii_result.html', result=result)
@app.route('/lotto_input')
def lotto_input():
return render_template('lotto_input.html')
@app.route('/lotto_result')
def lotto_result():
lotto_round = request.args.get('lotto_round')
lotto_number = request.args.get('text').split()
url=f'https://dhlottery.co.kr/common.do?method=getLottoNumber&drwNo={lotto_round}'
response = requests.get(url)
lotto_info = response.json() # Json Type의 파일을 파이썬 dictionary로 parsing해줘!!!!!!
#for key, val in lotto_info.items():
winner =[]
for i in range(1,7) :
winner.append(str(lotto_info[f'drwtNo{i}']))
#winner.sort()
print(winner)
print(lotto_number)
#번호 교집합 개수 찾기
if len(lotto_number) == 6: # 사용자가 보낸 숫자가 6개가 맞는지 확인
matched = 0 #교집합 개수 초기화
for number in lotto_number: #사용자가 보낸 숫자만큼 돌림
if number in winner: #사용자가 보낸 숫자와 1등번호를 비교
matched += 1 #교집합 발생시 1씩 증가
if matched == 6:
result = "1등"
elif matched == 5:
if str(lotto_info['bnusNo']) in lotto_number:
result ="2등"
else :
result ="3등"
elif matched == 4:
result = "4등"
elif matched == 3:
result = "5등"
else:
result = '꽝'
return render_template('lotto_result.html', result=result)
if __name__ == '__main__' : # 파이썬 실행방법에는 두가지... 1. python @@.py 2. 모듈을 호출하는 방법 import시키면 자동 실행 파이썬은 그 자체로 모듈이 된다.
#__name__이라는 변수는 모든 파이썬에 존재. 모듈 호출로 실행하면 __name__에 __main__이 없다. 해당모듈의 이름이 나옴.
# 그러나 @@.py로 실행하면 __name__에 __main__이라는 값이 들어간다. 구분하기 위한 용도
app.run(debug=True) | [
"qwes123@naver.com"
] | qwes123@naver.com |
cd8df0cdec8e425c79e6dc06a9fc7ff507004647 | 70054615f56be28373b00c9df96544ec822be683 | /res/scripts/client/gui/prb_control/events_dispatcher.py | 4c439155085c3f32851159a72c51df0964279474 | [] | no_license | wanyancan/WOTDecompiled | c646ad700f5ec3fb81fb4e87862639ce0bdf0000 | 9ffb09007a61d723cdb28549e15db39c34c0ea1e | refs/heads/master | 2020-04-17T23:13:15.649069 | 2013-11-15T16:37:10 | 2013-11-15T16:37:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,890 | py | # 2013.11.15 11:25:38 EST
# Embedded file name: scripts/client/gui/prb_control/events_dispatcher.py
from constants import PREBATTLE_TYPE
from debug_utils import LOG_ERROR
from gui.Scaleform.framework import VIEW_TYPE, g_entitiesFactories as guiFactory
from gui.Scaleform.framework.managers.containers import POP_UP_CRITERIA
from gui.Scaleform.locale.CHAT import CHAT
from gui.prb_control.settings import DEFAULT_PREBATTLE_COOLDOWN
from gui.shared import g_eventBus, events, EVENT_BUS_SCOPE
from gui.shared.events import ChannelManagementEvent
from messenger.ext import channel_num_gen
from messenger.m_constants import LAZY_CHANNEL
def updateUI():
g_eventBus.handleEvent(events.FightButtonEvent(events.FightButtonEvent.FIGHT_BUTTON_UPDATE), scope=EVENT_BUS_SCOPE.LOBBY)
def loadHangar():
g_eventBus.handleEvent(events.LoadEvent(events.LoadEvent.LOAD_HANGAR), scope=EVENT_BUS_SCOPE.LOBBY)
def loadBattleQueue():
g_eventBus.handleEvent(events.LoadEvent(events.LoadEvent.LOAD_BATTLE_QUEUE), scope=EVENT_BUS_SCOPE.LOBBY)
def loadTrainingList():
g_eventBus.handleEvent(events.LoadEvent(events.LoadEvent.LOAD_TRAININGS), scope=EVENT_BUS_SCOPE.LOBBY)
def loadTrainingRoom():
g_eventBus.handleEvent(events.LoadEvent(events.LoadEvent.LOAD_TRAINING_ROOM), scope=EVENT_BUS_SCOPE.LOBBY)
def exitFromTrainingRoom():
from gui.WindowsManager import g_windowsManager
from gui.Scaleform.daapi.settings.views import VIEW_ALIAS
if g_windowsManager.window is not None:
view = g_windowsManager.window.containerManager.getContainer(VIEW_TYPE.LOBBY_SUB).getView()
if view is not None and view.settings.alias == VIEW_ALIAS.LOBBY_TRAINING_ROOM:
return loadTrainingList()
return
def _showSquadWindow(isInvitesOpen = False):
g_eventBus.handleEvent(events.ShowWindowEvent(events.ShowWindowEvent.SHOW_SQUAD_WINDOW, {'isInvitesOpen': isInvitesOpen}), scope=EVENT_BUS_SCOPE.LOBBY)
def _closeSquadWindow():
g_eventBus.handleEvent(events.LoadEvent(events.HideWindowEvent.HIDE_SQUAD_WINDOW), scope=EVENT_BUS_SCOPE.LOBBY)
def addSquadToCarousel():
clientID = channel_num_gen.getClientID4Prebattle(PREBATTLE_TYPE.SQUAD)
if not clientID:
LOG_ERROR('Client ID not found', 'addSquadToCarousel')
return
g_eventBus.handleEvent(ChannelManagementEvent(clientID, ChannelManagementEvent.REQUEST_TO_ADD, {'label': CHAT.CHANNELS_SQUAD,
'canClose': False,
'isNotified': False,
'icon': '../maps/icons/messenger/squad_icon.png',
'order': channel_num_gen.getOrder4Prebattle(),
'criteria': {POP_UP_CRITERIA.VIEW_ALIAS: guiFactory.getAliasByEvent(events.ShowWindowEvent.SHOW_SQUAD_WINDOW)},
'openHandler': _showSquadWindow}), scope=EVENT_BUS_SCOPE.LOBBY)
def removeSquadFromCarousel():
clientID = channel_num_gen.getClientID4Prebattle(PREBATTLE_TYPE.SQUAD)
if not clientID:
LOG_ERROR('Client ID not found', '_removeSquadFromCarousel')
return
g_eventBus.handleEvent(ChannelManagementEvent(clientID, ChannelManagementEvent.REQUEST_TO_REMOVE), scope=EVENT_BUS_SCOPE.LOBBY)
def loadSquad(isInvitesOpen = False):
addSquadToCarousel()
_showSquadWindow(isInvitesOpen=isInvitesOpen)
def unloadSquad():
_closeSquadWindow()
removeSquadFromCarousel()
requestToDestroyPrbChannel(PREBATTLE_TYPE.SQUAD)
def unloadNotificationInviteWindow():
g_eventBus.handleEvent(events.LoadEvent(events.HideWindowEvent.HIDE_NOTIFICATION_INVITES_WINDOW), scope=EVENT_BUS_SCOPE.LOBBY)
def _showCompanyWindow(isInvitesOpen = False):
g_eventBus.handleEvent(events.ShowWindowEvent(events.ShowWindowEvent.SHOW_COMPANY_WINDOW, {'isInvitesOpen': isInvitesOpen}), scope=EVENT_BUS_SCOPE.LOBBY)
def _closeCompanyWindow():
g_eventBus.handleEvent(events.LoadEvent(events.HideWindowEvent.HIDE_COMPANY_WINDOW), scope=EVENT_BUS_SCOPE.LOBBY)
def addCompanyToCarousel():
clientID = channel_num_gen.getClientID4Prebattle(PREBATTLE_TYPE.COMPANY)
if not clientID:
LOG_ERROR('Client ID not found', 'addCompanyToCarousel')
return
else:
g_eventBus.handleEvent(ChannelManagementEvent(clientID, ChannelManagementEvent.REQUEST_TO_ADD, {'label': CHAT.CHANNELS_TEAM,
'canClose': False,
'isNotified': False,
'icon': None,
'order': channel_num_gen.getOrder4Prebattle(),
'criteria': {POP_UP_CRITERIA.VIEW_ALIAS: guiFactory.getAliasByEvent(events.ShowWindowEvent.SHOW_COMPANY_WINDOW)},
'openHandler': _showCompanyWindow}), scope=EVENT_BUS_SCOPE.LOBBY)
return
def removeCompanyFromCarousel():
clientID = channel_num_gen.getClientID4Prebattle(PREBATTLE_TYPE.COMPANY)
if not clientID:
LOG_ERROR('Client ID not found', 'removeCompanyFromCarousel')
return
g_eventBus.handleEvent(ChannelManagementEvent(clientID, ChannelManagementEvent.REQUEST_TO_REMOVE), scope=EVENT_BUS_SCOPE.LOBBY)
def loadCompany(isInvitesOpen = False):
addCompanyToCarousel()
_showCompanyWindow(isInvitesOpen=isInvitesOpen)
def unloadCompany():
_closeCompanyWindow()
removeCompanyFromCarousel()
requestToDestroyPrbChannel(PREBATTLE_TYPE.COMPANY)
def _showBattleSessionWindow():
g_eventBus.handleEvent(events.ShowWindowEvent(events.ShowWindowEvent.SHOW_BATTLE_SESSION_WINDOW), scope=EVENT_BUS_SCOPE.LOBBY)
def _closeBattleSessionWindow():
g_eventBus.handleEvent(events.ShowWindowEvent(events.HideWindowEvent.HIDE_BATTLE_SESSION_WINDOW), scope=EVENT_BUS_SCOPE.LOBBY)
def addSpecBattleToCarousel(prbType):
clientID = channel_num_gen.getClientID4Prebattle(prbType)
if not clientID:
LOG_ERROR('Client ID not found', 'addSpecBattleToCarousel')
return
else:
if prbType is PREBATTLE_TYPE.CLAN:
label = CHAT.CHANNELS_CLAN
elif prbType is PREBATTLE_TYPE.TOURNAMENT:
label = CHAT.CHANNELS_TOURNAMENT
else:
LOG_ERROR('Prebattle type is not valid', prbType)
return
g_eventBus.handleEvent(ChannelManagementEvent(clientID, ChannelManagementEvent.REQUEST_TO_ADD, {'label': label,
'canClose': False,
'isNotified': False,
'icon': None,
'order': channel_num_gen.getOrder4Prebattle(),
'criteria': {POP_UP_CRITERIA.VIEW_ALIAS: guiFactory.getAliasByEvent(events.ShowWindowEvent.SHOW_BATTLE_SESSION_WINDOW)},
'openHandler': _showBattleSessionWindow}), scope=EVENT_BUS_SCOPE.LOBBY)
return
def removeSpecBattleFromCarousel(prbType):
clientID = channel_num_gen.getClientID4Prebattle(prbType)
if not clientID:
LOG_ERROR('Client ID not found', '_removeSpecBattleFromCarousel')
return
g_eventBus.handleEvent(ChannelManagementEvent(clientID, ChannelManagementEvent.REQUEST_TO_REMOVE), scope=EVENT_BUS_SCOPE.LOBBY)
def loadBattleSessionWindow(prbType):
addSpecBattleToCarousel(prbType)
_showBattleSessionWindow()
def unloadBattleSessionWindow(prbType):
_closeBattleSessionWindow()
removeSpecBattleFromCarousel(prbType)
requestToDestroyPrbChannel(prbType)
def loadBattleSessionList():
g_eventBus.handleEvent(events.ShowWindowEvent(events.ShowWindowEvent.SHOW_BATTLE_SESSION_LIST), scope=EVENT_BUS_SCOPE.LOBBY)
def addSpecBattlesToCarousel():
clientID = channel_num_gen.getClientID4LazyChannel(LAZY_CHANNEL.SPECIAL_BATTLES)
if not clientID:
LOG_ERROR('Client ID not found', 'addSpecBattlesToCarousel')
return
else:
g_eventBus.handleEvent(ChannelManagementEvent(clientID, ChannelManagementEvent.REQUEST_TO_ADD, {'label': LAZY_CHANNEL.SPECIAL_BATTLES,
'canClose': False,
'isNotified': False,
'icon': None,
'order': channel_num_gen.getOrder4LazyChannel(LAZY_CHANNEL.SPECIAL_BATTLES),
'criteria': {POP_UP_CRITERIA.VIEW_ALIAS: guiFactory.getAliasByEvent(events.ShowWindowEvent.SHOW_BATTLE_SESSION_LIST)},
'openHandler': loadBattleSessionList}), scope=EVENT_BUS_SCOPE.LOBBY)
return
def removeSpecBattlesFromCarousel():
clientID = channel_num_gen.getClientID4LazyChannel(LAZY_CHANNEL.SPECIAL_BATTLES)
if not clientID:
LOG_ERROR('Client ID not found', 'removeSpecBattlesFromCarousel')
return
g_eventBus.handleEvent(ChannelManagementEvent(clientID, ChannelManagementEvent.REQUEST_TO_REMOVE), scope=EVENT_BUS_SCOPE.LOBBY)
def loadCompaniesWindow():
g_eventBus.handleEvent(events.ShowWindowEvent(events.ShowWindowEvent.SHOW_COMPANIES_WINDOW), scope=EVENT_BUS_SCOPE.LOBBY)
def addCompaniesToCarousel():
clientID = channel_num_gen.getClientID4LazyChannel(LAZY_CHANNEL.COMPANIES)
if not clientID:
LOG_ERROR('Client ID not found', 'addCompaniesToCarousel')
return
else:
g_eventBus.handleEvent(ChannelManagementEvent(clientID, ChannelManagementEvent.REQUEST_TO_ADD, {'label': LAZY_CHANNEL.COMPANIES,
'canClose': False,
'isNotified': False,
'icon': None,
'order': channel_num_gen.getOrder4LazyChannel(LAZY_CHANNEL.COMPANIES),
'criteria': {POP_UP_CRITERIA.VIEW_ALIAS: guiFactory.getAliasByEvent(events.ShowWindowEvent.SHOW_COMPANIES_WINDOW)},
'openHandler': loadCompaniesWindow}), scope=EVENT_BUS_SCOPE.LOBBY)
return
def showUnitWindow():
g_eventBus.handleEvent(events.ShowWindowEvent(events.ShowWindowEvent.SHOW_UNIT_WINDOW), scope=EVENT_BUS_SCOPE.LOBBY)
def _closeUnitWindow():
g_eventBus.handleEvent(events.LoadEvent(events.HideWindowEvent.HIDE_UNIT_WINDOW), scope=EVENT_BUS_SCOPE.LOBBY)
def addUnitToCarousel():
from gui.Scaleform.locale.CYBERSPORT import CYBERSPORT
clientID = channel_num_gen.getClientID4Prebattle(PREBATTLE_TYPE.UNIT)
if not clientID:
LOG_ERROR('Client ID not found', 'addUnitToCarousel')
return
else:
g_eventBus.handleEvent(ChannelManagementEvent(clientID, ChannelManagementEvent.REQUEST_TO_ADD, {'label': CYBERSPORT.WINDOW_TITLE,
'canClose': False,
'isNotified': False,
'icon': None,
'order': channel_num_gen.getOrder4Prebattle(),
'criteria': {POP_UP_CRITERIA.VIEW_ALIAS: guiFactory.getAliasByEvent(events.ShowWindowEvent.SHOW_UNIT_WINDOW)},
'openHandler': showUnitWindow}), scope=EVENT_BUS_SCOPE.LOBBY)
return
def removeUnitFromCarousel():
clientID = channel_num_gen.getClientID4Prebattle(PREBATTLE_TYPE.UNIT)
if not clientID:
LOG_ERROR('Client ID not found', 'removeUnitFromCarousel')
return
g_eventBus.handleEvent(ChannelManagementEvent(clientID, ChannelManagementEvent.REQUEST_TO_REMOVE), scope=EVENT_BUS_SCOPE.LOBBY)
def loadUnit():
addUnitToCarousel()
showUnitWindow()
def unloadUnit():
_closeUnitWindow()
removeUnitFromCarousel()
requestToDestroyPrbChannel(PREBATTLE_TYPE.UNIT)
def requestToDestroyPrbChannel(prbType):
g_eventBus.handleEvent(events.MessengerEvent(events.MessengerEvent.PRB_CHANNEL_CTRL_REQUEST_DESTROY, {'prbType': prbType}), scope=EVENT_BUS_SCOPE.LOBBY)
def fireCoolDownEvent(requestID, coolDown = DEFAULT_PREBATTLE_COOLDOWN):
g_eventBus.handleEvent(events.CoolDownEvent(events.CoolDownEvent.PREBATTLE, requestID=requestID, coolDown=coolDown), scope=EVENT_BUS_SCOPE.LOBBY)
def fireAutoInviteReceived(invite):
g_eventBus.handleEvent(events.AutoInviteEvent(invite, events.AutoInviteEvent.INVITE_RECEIVED), scope=EVENT_BUS_SCOPE.LOBBY)
def showParentControlNotification():
from gui import game_control, DialogsInterface
if game_control.g_instance.gameSession.isPlayTimeBlock:
key = 'koreaPlayTimeNotification'
else:
key = 'koreaParentNotification'
DialogsInterface.showI18nInfoDialog(key, lambda *args: None)
# okay decompyling res/scripts/client/gui/prb_control/events_dispatcher.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.11.15 11:25:39 EST
| [
"james.sweet88@googlemail.com"
] | james.sweet88@googlemail.com |
b06dc60fd4fb7841a68042ea98f1808d2d288fe3 | 888e79392cb660be5799cc5bd25d76bcfa9e2e2c | /doctorus/doctorus/doctype/actividad/actividad.py | eee8389df8919f860cdc420df5fd9dbd3524f95f | [
"MIT"
] | permissive | Nirchains/doctorus | 269eadee5754612c521d1c6193d5fe7bbfdb3b8a | 38d39270742dfdae6597a06713952df01a2c3e9d | refs/heads/master | 2020-03-17T07:09:30.046005 | 2019-05-08T06:51:50 | 2019-05-08T06:51:50 | 133,386,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,963 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2018, HISPALIS DIGITAL and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Actividad(Document):
def validate(self):
pass
def on_submit(self):
validate_requirements(self.expedient)
def on_cancel(self):
validate_requirements(self.expedient)
def validate_requirements(expedient):
#print("validando expediente: {0}".format(expedient))
activities = 'No superadas'
if check_requirements(expedient):
#print("Actividades superadas")
activities = 'Superadas'
doc_expedient = frappe.get_doc("Expediente", expedient)
if doc_expedient.activities != activities:
#print("Actualizando documento")
doc_expedient.activities = activities
doc_expedient.save()
frappe.clear_cache()
#frappe.db.set_value('Expediente', expedient, 'activities', activities)
def check_requirements(expedient):
#print("validando expediente: {0}".format(expedient))
rules = frappe.db.get_list('Requisitos Actividades', filters={
'parent': 'Configuracion del Programa',
'parentfield': 't_activity_requirements'
}, fields=['activity_type', 'min'])
meet_requirements = True
for r in rules:
count = int(frappe.db.sql("""select count(*) from `tabActividad` where
expedient=%(expedient)s and docstatus = 1
and activity_type=%(activity_type)s """, {"expedient": expedient, "activity_type": r.activity_type})[0][0])
#frappe.log_error("Contador de expediente '{0}' y actividad '{1}': {2}".format(expedient, r.activity_type, count))
#print("Contador de expediente '{0}' y actividad '{1}': {2}".format(expedient, r.activity_type, count))
if count < r.min:
meet_requirements = False
return meet_requirements
def update_all_requirements():
expedientes = frappe.db.get_list('Expediente')
for expediente in expedientes:
validate_requirements(expediente.name)
frappe.db.commit() | [
"nirchains@gmail.com"
] | nirchains@gmail.com |
ea518cb07b2ff4574c1d97e42179ea4970eddb78 | 26d6c34df00a229dc85ad7326de6cb5672be7acc | /msgraph-cli-extensions/beta/reports_beta/azext_reports_beta/generated/_help.py | 884a89739caaf6c46be6e844a298a6731039e84c | [
"MIT"
] | permissive | BrianTJackett/msgraph-cli | 87f92471f68f85e44872939d876b9ff5f0ae6b2c | 78a4b1c73a23b85c070fed2fbca93758733f620e | refs/heads/main | 2023-06-23T21:31:53.306655 | 2021-07-09T07:58:56 | 2021-07-09T07:58:56 | 386,993,555 | 0 | 0 | NOASSERTION | 2021-07-17T16:56:05 | 2021-07-17T16:56:05 | null | UTF-8 | Python | false | false | 61,930 | py | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
from knack.help_files import helps
helps['reports_beta'] = '''
type: group
short-summary: Manage Reports
'''
helps['reports audit-log-audit-log-root'] = """
type: group
short-summary: Manage audit log audit log root with reports_beta
"""
helps['reports audit-log-audit-log-root show-audit-log-root'] = """
type: command
short-summary: "Get auditLogs."
"""
helps['reports audit-log-audit-log-root update-audit-log-root'] = """
type: command
short-summary: "Update auditLogs."
parameters:
- name: --restricted-sign-ins
long-summary: |
Usage: --restricted-sign-ins target-tenant-id=XX alternate-sign-in-name=XX app-display-name=XX app-id=XX \
applied-conditional-access-policies=XX authentication-details=XX authentication-methods-used=XX \
authentication-processing-details=XX authentication-requirement=XX authentication-requirement-policies=XX \
client-app-used=XX conditional-access-status=XX correlation-id=XX created-date-time=XX device-detail=XX ip-address=XX \
is-interactive=XX mfa-detail=XX network-location-details=XX original-request-id=XX processing-time-in-milliseconds=XX \
resource-display-name=XX resource-id=XX resource-tenant-id=XX risk-detail=XX risk-event-types=XX \
risk-event-types-v2=XX risk-level-aggregated=XX risk-level-during-sign-in=XX risk-state=XX service-principal-id=XX \
service-principal-name=XX sign-in-event-types=XX status=XX token-issuer-name=XX token-issuer-type=XX user-agent=XX \
user-display-name=XX user-id=XX user-principal-name=XX city=XX country-or-region=XX geo-coordinates=XX state=XX id=XX
app-display-name: App name displayed in the Azure Portal.
app-id: Unique GUID representing the app ID in the Azure Active Directory.
client-app-used: Identifies the legacy client used for sign-in activity. Includes Browser, Exchange \
Active Sync, modern clients, IMAP, MAPI, SMTP, and POP.
correlation-id: The request ID sent from the client when the sign-in is initiated; used to troubleshoot \
sign-in activity.
created-date-time: Date and time (UTC) the sign-in was initiated. Example: midnight on Jan 1, 2014 is \
reported as '2014-01-01T00:00:00Z'.
device-detail: deviceDetail
ip-address: IP address of the client used to sign in.
is-interactive: Indicates if a sign-in is interactive or not.
mfa-detail: mfaDetail
resource-display-name: Name of the resource the user signed into.
resource-id: ID of the resource that the user signed into.
risk-event-types: Risk event types associated with the sign-in. The possible values are: unlikelyTravel, \
anonymizedIPAddress, maliciousIPAddress, unfamiliarFeatures, malwareInfectedIPAddress, suspiciousIPAddress, \
leakedCredentials, investigationsThreatIntelligence, generic, and unknownFutureValue.
risk-event-types-v2: The list of risk event types associated with the sign-in. Possible values: \
unlikelyTravel, anonymizedIPAddress, maliciousIPAddress, unfamiliarFeatures, malwareInfectedIPAddress, \
suspiciousIPAddress, leakedCredentials, investigationsThreatIntelligence, generic, or unknownFutureValue.
status: signInStatus
user-display-name: Display name of the user that initiated the sign-in.
user-id: ID of the user that initiated the sign-in.
user-principal-name: User principal name of the user that initiated the sign-in.
city: Provides the city where the sign-in originated. This is calculated using latitude/longitude \
information from the sign-in activity.
country-or-region: Provides the country code info (2 letter code) where the sign-in originated. This is \
calculated using latitude/longitude information from the sign-in activity.
geo-coordinates: geoCoordinates
state: Provides the State where the sign-in originated. This is calculated using latitude/longitude \
information from the sign-in activity.
id: Read-only.
Multiple actions can be specified by using more than one --restricted-sign-ins argument.
"""
helps['reports audit-log'] = """
type: group
short-summary: Manage audit log with reports_beta
"""
helps['reports audit-log create-directory-audit'] = """
type: command
short-summary: "Create new navigation property to directoryAudits for auditLogs."
parameters:
- name: --additional-details
short-summary: "Indicates additional details on the activity."
long-summary: |
Usage: --additional-details key=XX value=XX
key: Key for the key-value pair.
value: Value for the key-value pair.
Multiple actions can be specified by using more than one --additional-details argument.
- name: --app
short-summary: "appIdentity"
long-summary: |
Usage: --app app-id=XX display-name=XX service-principal-id=XX service-principal-name=XX
app-id: Refers to the Unique GUID representing Application Id in the Azure Active Directory.
display-name: Refers to the Application Name displayed in the Azure Portal.
service-principal-id: Refers to the Unique GUID indicating Service Principal Id in Azure Active Directory \
for the corresponding App.
service-principal-name: Refers to the Service Principal Name is the Application name in the tenant.
- name: --user
short-summary: "userIdentity"
long-summary: |
Usage: --user ip-address=XX user-principal-name=XX display-name=XX id=XX
ip-address: Indicates the client IP address used by user performing the activity (audit log only).
user-principal-name: The userPrincipalName attribute of the user.
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
"""
helps['reports audit-log create-directory-provisioning'] = """
type: command
short-summary: "Create new navigation property to directoryProvisioning for auditLogs."
parameters:
- name: --initiated-by
short-summary: "initiator"
long-summary: |
Usage: --initiated-by display-name=XX id=XX initiator-type=XX
- name: --modified-properties
long-summary: |
Usage: --modified-properties display-name=XX new-value=XX old-value=XX
display-name: Indicates the property name of the target attribute that was changed.
new-value: Indicates the updated value for the propery.
old-value: Indicates the previous value (before the update) for the property.
Multiple actions can be specified by using more than one --modified-properties argument.
- name: --service-principal
short-summary: "provisioningServicePrincipal"
long-summary: |
Usage: --service-principal display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
"""
helps['reports audit-log create-provisioning'] = """
type: command
short-summary: "Create new navigation property to provisioning for auditLogs."
parameters:
- name: --initiated-by
short-summary: "initiator"
long-summary: |
Usage: --initiated-by display-name=XX id=XX initiator-type=XX
- name: --modified-properties
long-summary: |
Usage: --modified-properties display-name=XX new-value=XX old-value=XX
display-name: Indicates the property name of the target attribute that was changed.
new-value: Indicates the updated value for the propery.
old-value: Indicates the previous value (before the update) for the property.
Multiple actions can be specified by using more than one --modified-properties argument.
- name: --service-principal
short-summary: "provisioningServicePrincipal"
long-summary: |
Usage: --service-principal display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
"""
helps['reports audit-log create-restricted-sign-in'] = """
type: command
short-summary: "Create new navigation property to restrictedSignIns for auditLogs."
parameters:
- name: --applied-conditional-access-policies
long-summary: |
Usage: --applied-conditional-access-policies conditions-not-satisfied=XX conditions-satisfied=XX \
display-name=XX enforced-grant-controls=XX enforced-session-controls=XX id=XX result=XX
display-name: Refers to the Name of the conditional access policy (example: 'Require MFA for Salesforce').
enforced-grant-controls: Refers to the grant controls enforced by the conditional access policy (example: \
'Require multi-factor authentication').
enforced-session-controls: Refers to the session controls enforced by the conditional access policy \
(example: 'Require app enforced controls').
id: Unique GUID of the conditional access policy.
Multiple actions can be specified by using more than one --applied-conditional-access-policies argument.
- name: --authentication-details
long-summary: |
Usage: --authentication-details authentication-method=XX authentication-method-detail=XX \
authentication-step-date-time=XX authentication-step-requirement=XX authentication-step-result-detail=XX succeeded=XX
Multiple actions can be specified by using more than one --authentication-details argument.
- name: --authentication-processing-details
long-summary: |
Usage: --authentication-processing-details key=XX value=XX
key: Key for the key-value pair.
value: Value for the key-value pair.
Multiple actions can be specified by using more than one --authentication-processing-details argument.
- name: --authentication-requirement-policies
long-summary: |
Usage: --authentication-requirement-policies detail=XX requirement-provider=XX
Multiple actions can be specified by using more than one --authentication-requirement-policies argument.
- name: --device-detail
short-summary: "deviceDetail"
long-summary: |
Usage: --device-detail browser=XX browser-id=XX device-id=XX display-name=XX is-compliant=XX is-managed=XX \
operating-system=XX trust-type=XX
browser: Indicates the browser information of the used for signing in.
device-id: Refers to the UniqueID of the device used for signing in.
display-name: Refers to the name of the device used for signing in.
is-compliant: Indicates whether the device is compliant.
is-managed: Indicates whether the device is managed.
operating-system: Indicates the operating system name and version used for signing in.
trust-type: Provides information about whether the signed-in device is Workplace Joined, AzureAD Joined, \
Domain Joined.
- name: --mfa-detail
short-summary: "mfaDetail"
long-summary: |
Usage: --mfa-detail auth-detail=XX auth-method=XX
- name: --network-location-details
long-summary: |
Usage: --network-location-details network-names=XX network-type=XX
Multiple actions can be specified by using more than one --network-location-details argument.
- name: --status
short-summary: "signInStatus"
long-summary: |
Usage: --status additional-details=XX error-code=XX failure-reason=XX
additional-details: Provides additional details on the sign-in activity
error-code: Provides the 5-6digit error code that's generated during a sign-in failure. Check out the list \
of error codes and messages.
failure-reason: Provides the error message or the reason for failure for the corresponding sign-in \
activity. Check out the list of error codes and messages.
- name: --geo-coordinates
short-summary: "geoCoordinates"
long-summary: |
Usage: --geo-coordinates altitude=XX latitude=XX longitude=XX
altitude: Optional. The altitude (height), in feet, above sea level for the item. Read-only.
latitude: Optional. The latitude, in decimal, for the item. Read-only.
longitude: Optional. The longitude, in decimal, for the item. Read-only.
"""
helps['reports audit-log create-sign-in'] = """
type: command
short-summary: "Create new navigation property to signIns for auditLogs."
parameters:
- name: --applied-conditional-access-policies
long-summary: |
Usage: --applied-conditional-access-policies conditions-not-satisfied=XX conditions-satisfied=XX \
display-name=XX enforced-grant-controls=XX enforced-session-controls=XX id=XX result=XX
display-name: Refers to the Name of the conditional access policy (example: 'Require MFA for Salesforce').
enforced-grant-controls: Refers to the grant controls enforced by the conditional access policy (example: \
'Require multi-factor authentication').
enforced-session-controls: Refers to the session controls enforced by the conditional access policy \
(example: 'Require app enforced controls').
id: Unique GUID of the conditional access policy.
Multiple actions can be specified by using more than one --applied-conditional-access-policies argument.
- name: --authentication-details
long-summary: |
Usage: --authentication-details authentication-method=XX authentication-method-detail=XX \
authentication-step-date-time=XX authentication-step-requirement=XX authentication-step-result-detail=XX succeeded=XX
Multiple actions can be specified by using more than one --authentication-details argument.
- name: --authentication-processing-details
long-summary: |
Usage: --authentication-processing-details key=XX value=XX
key: Key for the key-value pair.
value: Value for the key-value pair.
Multiple actions can be specified by using more than one --authentication-processing-details argument.
- name: --authentication-requirement-policies
long-summary: |
Usage: --authentication-requirement-policies detail=XX requirement-provider=XX
Multiple actions can be specified by using more than one --authentication-requirement-policies argument.
- name: --device-detail
short-summary: "deviceDetail"
long-summary: |
Usage: --device-detail browser=XX browser-id=XX device-id=XX display-name=XX is-compliant=XX is-managed=XX \
operating-system=XX trust-type=XX
browser: Indicates the browser information of the used for signing in.
device-id: Refers to the UniqueID of the device used for signing in.
display-name: Refers to the name of the device used for signing in.
is-compliant: Indicates whether the device is compliant.
is-managed: Indicates whether the device is managed.
operating-system: Indicates the operating system name and version used for signing in.
trust-type: Provides information about whether the signed-in device is Workplace Joined, AzureAD Joined, \
Domain Joined.
- name: --mfa-detail
short-summary: "mfaDetail"
long-summary: |
Usage: --mfa-detail auth-detail=XX auth-method=XX
- name: --network-location-details
long-summary: |
Usage: --network-location-details network-names=XX network-type=XX
Multiple actions can be specified by using more than one --network-location-details argument.
- name: --status
short-summary: "signInStatus"
long-summary: |
Usage: --status additional-details=XX error-code=XX failure-reason=XX
additional-details: Provides additional details on the sign-in activity
error-code: Provides the 5-6digit error code that's generated during a sign-in failure. Check out the list \
of error codes and messages.
failure-reason: Provides the error message or the reason for failure for the corresponding sign-in \
activity. Check out the list of error codes and messages.
- name: --geo-coordinates
short-summary: "geoCoordinates"
long-summary: |
Usage: --geo-coordinates altitude=XX latitude=XX longitude=XX
altitude: Optional. The altitude (height), in feet, above sea level for the item. Read-only.
latitude: Optional. The latitude, in decimal, for the item. Read-only.
longitude: Optional. The longitude, in decimal, for the item. Read-only.
"""
helps['reports audit-log delete-directory-audit'] = """
type: command
short-summary: "Delete navigation property directoryAudits for auditLogs."
"""
helps['reports audit-log delete-directory-provisioning'] = """
type: command
short-summary: "Delete navigation property directoryProvisioning for auditLogs."
"""
helps['reports audit-log delete-provisioning'] = """
type: command
short-summary: "Delete navigation property provisioning for auditLogs."
"""
helps['reports audit-log delete-restricted-sign-in'] = """
type: command
short-summary: "Delete navigation property restrictedSignIns for auditLogs."
"""
helps['reports audit-log delete-sign-in'] = """
type: command
short-summary: "Delete navigation property signIns for auditLogs."
"""
helps['reports audit-log list-directory-audit'] = """
type: command
short-summary: "Get directoryAudits from auditLogs."
"""
helps['reports audit-log list-directory-provisioning'] = """
type: command
short-summary: "Get directoryProvisioning from auditLogs."
"""
helps['reports audit-log list-provisioning'] = """
type: command
short-summary: "Get provisioning from auditLogs."
"""
helps['reports audit-log list-restricted-sign-in'] = """
type: command
short-summary: "Get restrictedSignIns from auditLogs."
"""
helps['reports audit-log list-sign-in'] = """
type: command
short-summary: "Get signIns from auditLogs."
"""
helps['reports audit-log show-directory-audit'] = """
type: command
short-summary: "Get directoryAudits from auditLogs."
"""
helps['reports audit-log show-directory-provisioning'] = """
type: command
short-summary: "Get directoryProvisioning from auditLogs."
"""
helps['reports audit-log show-provisioning'] = """
type: command
short-summary: "Get provisioning from auditLogs."
"""
helps['reports audit-log show-restricted-sign-in'] = """
type: command
short-summary: "Get restrictedSignIns from auditLogs."
"""
helps['reports audit-log show-sign-in'] = """
type: command
short-summary: "Get signIns from auditLogs."
"""
helps['reports audit-log update-directory-audit'] = """
type: command
short-summary: "Update the navigation property directoryAudits in auditLogs."
parameters:
- name: --additional-details
short-summary: "Indicates additional details on the activity."
long-summary: |
Usage: --additional-details key=XX value=XX
key: Key for the key-value pair.
value: Value for the key-value pair.
Multiple actions can be specified by using more than one --additional-details argument.
- name: --app
short-summary: "appIdentity"
long-summary: |
Usage: --app app-id=XX display-name=XX service-principal-id=XX service-principal-name=XX
app-id: Refers to the Unique GUID representing Application Id in the Azure Active Directory.
display-name: Refers to the Application Name displayed in the Azure Portal.
service-principal-id: Refers to the Unique GUID indicating Service Principal Id in Azure Active Directory \
for the corresponding App.
service-principal-name: Refers to the Service Principal Name is the Application name in the tenant.
- name: --user
short-summary: "userIdentity"
long-summary: |
Usage: --user ip-address=XX user-principal-name=XX display-name=XX id=XX
ip-address: Indicates the client IP address used by user performing the activity (audit log only).
user-principal-name: The userPrincipalName attribute of the user.
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
"""
helps['reports audit-log update-directory-provisioning'] = """
type: command
short-summary: "Update the navigation property directoryProvisioning in auditLogs."
parameters:
- name: --initiated-by
short-summary: "initiator"
long-summary: |
Usage: --initiated-by display-name=XX id=XX initiator-type=XX
- name: --modified-properties
long-summary: |
Usage: --modified-properties display-name=XX new-value=XX old-value=XX
display-name: Indicates the property name of the target attribute that was changed.
new-value: Indicates the updated value for the propery.
old-value: Indicates the previous value (before the update) for the property.
Multiple actions can be specified by using more than one --modified-properties argument.
- name: --service-principal
short-summary: "provisioningServicePrincipal"
long-summary: |
Usage: --service-principal display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
"""
helps['reports audit-log update-provisioning'] = """
type: command
short-summary: "Update the navigation property provisioning in auditLogs."
parameters:
- name: --initiated-by
short-summary: "initiator"
long-summary: |
Usage: --initiated-by display-name=XX id=XX initiator-type=XX
- name: --modified-properties
long-summary: |
Usage: --modified-properties display-name=XX new-value=XX old-value=XX
display-name: Indicates the property name of the target attribute that was changed.
new-value: Indicates the updated value for the propery.
old-value: Indicates the previous value (before the update) for the property.
Multiple actions can be specified by using more than one --modified-properties argument.
- name: --service-principal
short-summary: "provisioningServicePrincipal"
long-summary: |
Usage: --service-principal display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
"""
helps['reports audit-log update-restricted-sign-in'] = """
type: command
short-summary: "Update the navigation property restrictedSignIns in auditLogs."
parameters:
- name: --applied-conditional-access-policies
long-summary: |
Usage: --applied-conditional-access-policies conditions-not-satisfied=XX conditions-satisfied=XX \
display-name=XX enforced-grant-controls=XX enforced-session-controls=XX id=XX result=XX
display-name: Refers to the Name of the conditional access policy (example: 'Require MFA for Salesforce').
enforced-grant-controls: Refers to the grant controls enforced by the conditional access policy (example: \
'Require multi-factor authentication').
enforced-session-controls: Refers to the session controls enforced by the conditional access policy \
(example: 'Require app enforced controls').
id: Unique GUID of the conditional access policy.
Multiple actions can be specified by using more than one --applied-conditional-access-policies argument.
- name: --authentication-details
long-summary: |
Usage: --authentication-details authentication-method=XX authentication-method-detail=XX \
authentication-step-date-time=XX authentication-step-requirement=XX authentication-step-result-detail=XX succeeded=XX
Multiple actions can be specified by using more than one --authentication-details argument.
- name: --authentication-processing-details
long-summary: |
Usage: --authentication-processing-details key=XX value=XX
key: Key for the key-value pair.
value: Value for the key-value pair.
Multiple actions can be specified by using more than one --authentication-processing-details argument.
- name: --authentication-requirement-policies
long-summary: |
Usage: --authentication-requirement-policies detail=XX requirement-provider=XX
Multiple actions can be specified by using more than one --authentication-requirement-policies argument.
- name: --device-detail
short-summary: "deviceDetail"
long-summary: |
Usage: --device-detail browser=XX browser-id=XX device-id=XX display-name=XX is-compliant=XX is-managed=XX \
operating-system=XX trust-type=XX
browser: Indicates the browser information of the used for signing in.
device-id: Refers to the UniqueID of the device used for signing in.
display-name: Refers to the name of the device used for signing in.
is-compliant: Indicates whether the device is compliant.
is-managed: Indicates whether the device is managed.
operating-system: Indicates the operating system name and version used for signing in.
trust-type: Provides information about whether the signed-in device is Workplace Joined, AzureAD Joined, \
Domain Joined.
- name: --mfa-detail
short-summary: "mfaDetail"
long-summary: |
Usage: --mfa-detail auth-detail=XX auth-method=XX
- name: --network-location-details
long-summary: |
Usage: --network-location-details network-names=XX network-type=XX
Multiple actions can be specified by using more than one --network-location-details argument.
- name: --status
short-summary: "signInStatus"
long-summary: |
Usage: --status additional-details=XX error-code=XX failure-reason=XX
additional-details: Provides additional details on the sign-in activity
error-code: Provides the 5-6digit error code that's generated during a sign-in failure. Check out the list \
of error codes and messages.
failure-reason: Provides the error message or the reason for failure for the corresponding sign-in \
activity. Check out the list of error codes and messages.
- name: --geo-coordinates
short-summary: "geoCoordinates"
long-summary: |
Usage: --geo-coordinates altitude=XX latitude=XX longitude=XX
altitude: Optional. The altitude (height), in feet, above sea level for the item. Read-only.
latitude: Optional. The latitude, in decimal, for the item. Read-only.
longitude: Optional. The longitude, in decimal, for the item. Read-only.
"""
helps['reports audit-log update-sign-in'] = """
type: command
short-summary: "Update the navigation property signIns in auditLogs."
parameters:
- name: --applied-conditional-access-policies
long-summary: |
Usage: --applied-conditional-access-policies conditions-not-satisfied=XX conditions-satisfied=XX \
display-name=XX enforced-grant-controls=XX enforced-session-controls=XX id=XX result=XX
display-name: Refers to the Name of the conditional access policy (example: 'Require MFA for Salesforce').
enforced-grant-controls: Refers to the grant controls enforced by the conditional access policy (example: \
'Require multi-factor authentication').
enforced-session-controls: Refers to the session controls enforced by the conditional access policy \
(example: 'Require app enforced controls').
id: Unique GUID of the conditional access policy.
Multiple actions can be specified by using more than one --applied-conditional-access-policies argument.
- name: --authentication-details
long-summary: |
Usage: --authentication-details authentication-method=XX authentication-method-detail=XX \
authentication-step-date-time=XX authentication-step-requirement=XX authentication-step-result-detail=XX succeeded=XX
Multiple actions can be specified by using more than one --authentication-details argument.
- name: --authentication-processing-details
long-summary: |
Usage: --authentication-processing-details key=XX value=XX
key: Key for the key-value pair.
value: Value for the key-value pair.
Multiple actions can be specified by using more than one --authentication-processing-details argument.
- name: --authentication-requirement-policies
long-summary: |
Usage: --authentication-requirement-policies detail=XX requirement-provider=XX
Multiple actions can be specified by using more than one --authentication-requirement-policies argument.
- name: --device-detail
short-summary: "deviceDetail"
long-summary: |
Usage: --device-detail browser=XX browser-id=XX device-id=XX display-name=XX is-compliant=XX is-managed=XX \
operating-system=XX trust-type=XX
browser: Indicates the browser information of the used for signing in.
device-id: Refers to the UniqueID of the device used for signing in.
display-name: Refers to the name of the device used for signing in.
is-compliant: Indicates whether the device is compliant.
is-managed: Indicates whether the device is managed.
operating-system: Indicates the operating system name and version used for signing in.
trust-type: Provides information about whether the signed-in device is Workplace Joined, AzureAD Joined, \
Domain Joined.
- name: --mfa-detail
short-summary: "mfaDetail"
long-summary: |
Usage: --mfa-detail auth-detail=XX auth-method=XX
- name: --network-location-details
long-summary: |
Usage: --network-location-details network-names=XX network-type=XX
Multiple actions can be specified by using more than one --network-location-details argument.
- name: --status
short-summary: "signInStatus"
long-summary: |
Usage: --status additional-details=XX error-code=XX failure-reason=XX
additional-details: Provides additional details on the sign-in activity
error-code: Provides the 5-6digit error code that's generated during a sign-in failure. Check out the list \
of error codes and messages.
failure-reason: Provides the error message or the reason for failure for the corresponding sign-in \
activity. Check out the list of error codes and messages.
- name: --geo-coordinates
short-summary: "geoCoordinates"
long-summary: |
Usage: --geo-coordinates altitude=XX latitude=XX longitude=XX
altitude: Optional. The altitude (height), in feet, above sea level for the item. Read-only.
latitude: Optional. The latitude, in decimal, for the item. Read-only.
longitude: Optional. The longitude, in decimal, for the item. Read-only.
"""
helps['reports report-root'] = """
type: group
short-summary: Manage report report root with reports_beta
"""
helps['reports report-root show-report-root'] = """
type: command
short-summary: "Get reports."
"""
helps['reports report-root update-report-root'] = """
type: command
short-summary: "Update reports."
parameters:
- name: --credential-user-registration-details
long-summary: |
Usage: --credential-user-registration-details auth-methods=XX is-capable=XX is-enabled=XX \
is-mfa-registered=XX is-registered=XX user-display-name=XX user-principal-name=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --credential-user-registration-details argument.
- name: --user-credential-usage-details
long-summary: |
Usage: --user-credential-usage-details auth-method=XX event-date-time=XX failure-reason=XX feature=XX \
is-success=XX user-display-name=XX user-principal-name=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --user-credential-usage-details argument.
- name: --daily-print-usage-summaries-by-printer
long-summary: |
Usage: --daily-print-usage-summaries-by-printer completed-black-and-white-job-count=XX \
completed-color-job-count=XX incomplete-job-count=XX printer-id=XX usage-date=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --daily-print-usage-summaries-by-printer \
argument.
- name: --daily-print-usage-summaries-by-user
long-summary: |
Usage: --daily-print-usage-summaries-by-user completed-black-and-white-job-count=XX \
completed-color-job-count=XX incomplete-job-count=XX usage-date=XX user-principal-name=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --daily-print-usage-summaries-by-user argument.
- name: --monthly-print-usage-summaries-by-printer
long-summary: |
Usage: --monthly-print-usage-summaries-by-printer completed-black-and-white-job-count=XX \
completed-color-job-count=XX incomplete-job-count=XX printer-id=XX usage-date=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --monthly-print-usage-summaries-by-printer \
argument.
- name: --monthly-print-usage-summaries-by-user
long-summary: |
Usage: --monthly-print-usage-summaries-by-user completed-black-and-white-job-count=XX \
completed-color-job-count=XX incomplete-job-count=XX usage-date=XX user-principal-name=XX id=XX
id: Read-only.
Multiple actions can be specified by using more than one --monthly-print-usage-summaries-by-user argument.
"""
helps['reports report'] = """
type: group
short-summary: Manage report with reports_beta
"""
helps['reports report create-application-sign-in-detailed-summary'] = """
type: command
short-summary: "Create new navigation property to applicationSignInDetailedSummary for reports."
parameters:
- name: --status
short-summary: "signInStatus"
long-summary: |
Usage: --status additional-details=XX error-code=XX failure-reason=XX
additional-details: Provides additional details on the sign-in activity
error-code: Provides the 5-6digit error code that's generated during a sign-in failure. Check out the list \
of error codes and messages.
failure-reason: Provides the error message or the reason for failure for the corresponding sign-in \
activity. Check out the list of error codes and messages.
"""
helps['reports report create-credential-user-registration-detail'] = """
type: command
short-summary: "Create new navigation property to credentialUserRegistrationDetails for reports."
"""
helps['reports report create-daily-print-usage-summary-by-printer'] = """
type: command
short-summary: "Create new navigation property to dailyPrintUsageSummariesByPrinter for reports."
"""
helps['reports report create-daily-print-usage-summary-by-user'] = """
type: command
short-summary: "Create new navigation property to dailyPrintUsageSummariesByUser for reports."
"""
helps['reports report create-monthly-print-usage-summary-by-printer'] = """
type: command
short-summary: "Create new navigation property to monthlyPrintUsageSummariesByPrinter for reports."
"""
helps['reports report create-monthly-print-usage-summary-by-user'] = """
type: command
short-summary: "Create new navigation property to monthlyPrintUsageSummariesByUser for reports."
"""
helps['reports report create-user-credential-usage-detail'] = """
type: command
short-summary: "Create new navigation property to userCredentialUsageDetails for reports."
"""
helps['reports report delete-application-sign-in-detailed-summary'] = """
type: command
short-summary: "Delete navigation property applicationSignInDetailedSummary for reports."
"""
helps['reports report delete-credential-user-registration-detail'] = """
type: command
short-summary: "Delete navigation property credentialUserRegistrationDetails for reports."
"""
helps['reports report delete-daily-print-usage-summary'] = """
type: command
short-summary: "Delete navigation property dailyPrintUsageSummariesByPrinter for reports And Delete navigation \
property dailyPrintUsageSummariesByUser for reports."
"""
helps['reports report delete-monthly-print-usage-summary'] = """
type: command
short-summary: "Delete navigation property monthlyPrintUsageSummariesByPrinter for reports And Delete navigation \
property monthlyPrintUsageSummariesByUser for reports."
"""
helps['reports report delete-user-credential-usage-detail'] = """
type: command
short-summary: "Delete navigation property userCredentialUsageDetails for reports."
"""
helps['reports report device-configuration-device-activity'] = """
type: command
short-summary: "Invoke function deviceConfigurationDeviceActivity."
"""
helps['reports report device-configuration-user-activity'] = """
type: command
short-summary: "Invoke function deviceConfigurationUserActivity."
"""
helps['reports report list-application-sign-in-detailed-summary'] = """
type: command
short-summary: "Get applicationSignInDetailedSummary from reports."
"""
helps['reports report list-credential-user-registration-detail'] = """
type: command
short-summary: "Get credentialUserRegistrationDetails from reports."
"""
helps['reports report list-daily-print-usage-summary'] = """
type: command
short-summary: "Get dailyPrintUsageSummariesByPrinter from reports And Get dailyPrintUsageSummariesByUser from \
reports."
"""
helps['reports report list-monthly-print-usage-summary'] = """
type: command
short-summary: "Get monthlyPrintUsageSummariesByPrinter from reports And Get monthlyPrintUsageSummariesByUser from \
reports."
"""
helps['reports report list-user-credential-usage-detail'] = """
type: command
short-summary: "Get userCredentialUsageDetails from reports."
"""
helps['reports report managed-device-enrollment-abandonment-detail'] = """
type: command
short-summary: "Invoke function managedDeviceEnrollmentAbandonmentDetails."
"""
helps['reports report managed-device-enrollment-abandonment-summary'] = """
type: command
short-summary: "Invoke function managedDeviceEnrollmentAbandonmentSummary."
"""
helps['reports report managed-device-enrollment-failure-details027-e'] = """
type: command
short-summary: "Invoke function managedDeviceEnrollmentFailureDetails."
"""
helps['reports report managed-device-enrollment-failure-details2-b3-d'] = """
type: command
short-summary: "Invoke function managedDeviceEnrollmentFailureDetails."
"""
helps['reports report managed-device-enrollment-failure-trend'] = """
type: command
short-summary: "Invoke function managedDeviceEnrollmentFailureTrends."
"""
helps['reports report managed-device-enrollment-top-failure-afd1'] = """
type: command
short-summary: "Invoke function managedDeviceEnrollmentTopFailures."
"""
helps['reports report managed-device-enrollment-top-failures4669'] = """
type: command
short-summary: "Invoke function managedDeviceEnrollmentTopFailures."
"""
helps['reports report show-application-sign-in-detailed-summary'] = """
type: command
short-summary: "Get applicationSignInDetailedSummary from reports."
"""
helps['reports report show-azure-ad-application-sign-in-summary'] = """
type: command
short-summary: "Invoke function getAzureADApplicationSignInSummary."
"""
helps['reports report show-azure-ad-feature-usage'] = """
type: command
short-summary: "Invoke function getAzureADFeatureUsage."
"""
helps['reports report show-azure-ad-license-usage'] = """
type: command
short-summary: "Invoke function getAzureADLicenseUsage."
"""
helps['reports report show-azure-ad-user-feature-usage'] = """
type: command
short-summary: "Invoke function getAzureADUserFeatureUsage."
"""
helps['reports report show-credential-usage-summary'] = """
type: command
short-summary: "Invoke function getCredentialUsageSummary."
"""
helps['reports report show-credential-user-registration-count'] = """
type: command
short-summary: "Invoke function getCredentialUserRegistrationCount."
"""
helps['reports report show-credential-user-registration-detail'] = """
type: command
short-summary: "Get credentialUserRegistrationDetails from reports."
"""
helps['reports report show-daily-print-usage-summary'] = """
type: command
short-summary: "Get dailyPrintUsageSummariesByPrinter from reports And Get dailyPrintUsageSummariesByUser from \
reports."
"""
helps['reports report show-email-activity-count'] = """
type: command
short-summary: "Invoke function getEmailActivityCounts."
"""
helps['reports report show-email-activity-user-count'] = """
type: command
short-summary: "Invoke function getEmailActivityUserCounts."
"""
helps['reports report show-email-activity-user-detail-ddb2'] = """
type: command
short-summary: "Invoke function getEmailActivityUserDetail."
"""
helps['reports report show-email-activity-user-detail-fe32'] = """
type: command
short-summary: "Invoke function getEmailActivityUserDetail."
"""
helps['reports report show-email-app-usage-app-user-count'] = """
type: command
short-summary: "Invoke function getEmailAppUsageAppsUserCounts."
"""
helps['reports report show-email-app-usage-user-count'] = """
type: command
short-summary: "Invoke function getEmailAppUsageUserCounts."
"""
helps['reports report show-email-app-usage-user-detail546-b'] = """
type: command
short-summary: "Invoke function getEmailAppUsageUserDetail."
"""
helps['reports report show-email-app-usage-user-detail62-ec'] = """
type: command
short-summary: "Invoke function getEmailAppUsageUserDetail."
"""
helps['reports report show-email-app-usage-version-user-count'] = """
type: command
short-summary: "Invoke function getEmailAppUsageVersionsUserCounts."
"""
helps['reports report show-m365-app-platform-user-count'] = """
type: command
short-summary: "Invoke function getM365AppPlatformUserCounts."
"""
helps['reports report show-m365-app-user-count'] = """
type: command
short-summary: "Invoke function getM365AppUserCounts."
"""
helps['reports report show-m365-app-user-detail-c8-df'] = """
type: command
short-summary: "Invoke function getM365AppUserDetail."
"""
helps['reports report show-m365-app-user-detail2-b20'] = """
type: command
short-summary: "Invoke function getM365AppUserDetail."
"""
helps['reports report show-mailbox-usage-detail'] = """
type: command
short-summary: "Invoke function getMailboxUsageDetail."
"""
helps['reports report show-mailbox-usage-mailbox-count'] = """
type: command
short-summary: "Invoke function getMailboxUsageMailboxCounts."
"""
helps['reports report show-mailbox-usage-quota-status-mailbox-count'] = """
type: command
short-summary: "Invoke function getMailboxUsageQuotaStatusMailboxCounts."
"""
helps['reports report show-mailbox-usage-storage'] = """
type: command
short-summary: "Invoke function getMailboxUsageStorage."
"""
helps['reports report show-monthly-print-usage-summary'] = """
type: command
short-summary: "Get monthlyPrintUsageSummariesByPrinter from reports And Get monthlyPrintUsageSummariesByUser from \
reports."
"""
helps['reports report show-office365-activation-count'] = """
type: command
short-summary: "Invoke function getOffice365ActivationCounts."
"""
helps['reports report show-office365-activation-user-count'] = """
type: command
short-summary: "Invoke function getOffice365ActivationsUserCounts."
"""
helps['reports report show-office365-activation-user-detail'] = """
type: command
short-summary: "Invoke function getOffice365ActivationsUserDetail."
"""
helps['reports report show-office365-active-user-count'] = """
type: command
short-summary: "Invoke function getOffice365ActiveUserCounts."
"""
helps['reports report show-office365-active-user-detail-d389'] = """
type: command
short-summary: "Invoke function getOffice365ActiveUserDetail."
"""
helps['reports report show-office365-active-user-detail68-ad'] = """
type: command
short-summary: "Invoke function getOffice365ActiveUserDetail."
"""
helps['reports report show-office365-group-activity-count'] = """
type: command
short-summary: "Invoke function getOffice365GroupsActivityCounts."
"""
helps['reports report show-office365-group-activity-detail38-f6'] = """
type: command
short-summary: "Invoke function getOffice365GroupsActivityDetail."
"""
helps['reports report show-office365-group-activity-detail81-cc'] = """
type: command
short-summary: "Invoke function getOffice365GroupsActivityDetail."
"""
helps['reports report show-office365-group-activity-file-count'] = """
type: command
short-summary: "Invoke function getOffice365GroupsActivityFileCounts."
"""
helps['reports report show-office365-group-activity-group-count'] = """
type: command
short-summary: "Invoke function getOffice365GroupsActivityGroupCounts."
"""
helps['reports report show-office365-group-activity-storage'] = """
type: command
short-summary: "Invoke function getOffice365GroupsActivityStorage."
"""
helps['reports report show-office365-service-user-count'] = """
type: command
short-summary: "Invoke function getOffice365ServicesUserCounts."
"""
helps['reports report show-one-drive-activity-file-count'] = """
type: command
short-summary: "Invoke function getOneDriveActivityFileCounts."
"""
helps['reports report show-one-drive-activity-user-count'] = """
type: command
short-summary: "Invoke function getOneDriveActivityUserCounts."
"""
helps['reports report show-one-drive-activity-user-detail-c424'] = """
type: command
short-summary: "Invoke function getOneDriveActivityUserDetail."
"""
helps['reports report show-one-drive-activity-user-detail05-f1'] = """
type: command
short-summary: "Invoke function getOneDriveActivityUserDetail."
"""
helps['reports report show-one-drive-usage-account-count'] = """
type: command
short-summary: "Invoke function getOneDriveUsageAccountCounts."
"""
helps['reports report show-one-drive-usage-account-detail-dd7-f'] = """
type: command
short-summary: "Invoke function getOneDriveUsageAccountDetail."
"""
helps['reports report show-one-drive-usage-account-detail-e827'] = """
type: command
short-summary: "Invoke function getOneDriveUsageAccountDetail."
"""
helps['reports report show-one-drive-usage-file-count'] = """
type: command
short-summary: "Invoke function getOneDriveUsageFileCounts."
"""
helps['reports report show-one-drive-usage-storage'] = """
type: command
short-summary: "Invoke function getOneDriveUsageStorage."
"""
helps['reports report show-relying-party-detailed-summary'] = """
type: command
short-summary: "Invoke function getRelyingPartyDetailedSummary."
"""
helps['reports report show-share-point-activity-file-count'] = """
type: command
short-summary: "Invoke function getSharePointActivityFileCounts."
"""
helps['reports report show-share-point-activity-page'] = """
type: command
short-summary: "Invoke function getSharePointActivityPages."
"""
helps['reports report show-share-point-activity-user-count'] = """
type: command
short-summary: "Invoke function getSharePointActivityUserCounts."
"""
helps['reports report show-share-point-activity-user-detail-b778'] = """
type: command
short-summary: "Invoke function getSharePointActivityUserDetail."
"""
helps['reports report show-share-point-activity-user-detail-f3-be'] = """
type: command
short-summary: "Invoke function getSharePointActivityUserDetail."
"""
helps['reports report show-share-point-site-usage-detail-d27-a'] = """
type: command
short-summary: "Invoke function getSharePointSiteUsageDetail."
"""
helps['reports report show-share-point-site-usage-detail204-b'] = """
type: command
short-summary: "Invoke function getSharePointSiteUsageDetail."
"""
helps['reports report show-share-point-site-usage-file-count'] = """
type: command
short-summary: "Invoke function getSharePointSiteUsageFileCounts."
"""
helps['reports report show-share-point-site-usage-page'] = """
type: command
short-summary: "Invoke function getSharePointSiteUsagePages."
"""
helps['reports report show-share-point-site-usage-site-count'] = """
type: command
short-summary: "Invoke function getSharePointSiteUsageSiteCounts."
"""
helps['reports report show-share-point-site-usage-storage'] = """
type: command
short-summary: "Invoke function getSharePointSiteUsageStorage."
"""
helps['reports report show-skype-for-business-activity-count'] = """
type: command
short-summary: "Invoke function getSkypeForBusinessActivityCounts."
"""
helps['reports report show-skype-for-business-activity-user-count'] = """
type: command
short-summary: "Invoke function getSkypeForBusinessActivityUserCounts."
"""
helps['reports report show-skype-for-business-activity-user-detail-e4-c9'] = """
type: command
short-summary: "Invoke function getSkypeForBusinessActivityUserDetail."
"""
helps['reports report show-skype-for-business-activity-user-detail744-e'] = """
type: command
short-summary: "Invoke function getSkypeForBusinessActivityUserDetail."
"""
helps['reports report show-skype-for-business-device-usage-distribution-user-count'] = """
type: command
short-summary: "Invoke function getSkypeForBusinessDeviceUsageDistributionUserCounts."
"""
helps['reports report show-skype-for-business-device-usage-user-count'] = """
type: command
short-summary: "Invoke function getSkypeForBusinessDeviceUsageUserCounts."
"""
helps['reports report show-skype-for-business-device-usage-user-detail-a692'] = """
type: command
short-summary: "Invoke function getSkypeForBusinessDeviceUsageUserDetail."
"""
helps['reports report show-skype-for-business-device-usage-user-detail-e753'] = """
type: command
short-summary: "Invoke function getSkypeForBusinessDeviceUsageUserDetail."
"""
helps['reports report show-skype-for-business-organizer-activity-count'] = """
type: command
short-summary: "Invoke function getSkypeForBusinessOrganizerActivityCounts."
"""
helps['reports report show-skype-for-business-organizer-activity-minute-count'] = """
type: command
short-summary: "Invoke function getSkypeForBusinessOrganizerActivityMinuteCounts."
"""
helps['reports report show-skype-for-business-organizer-activity-user-count'] = """
type: command
short-summary: "Invoke function getSkypeForBusinessOrganizerActivityUserCounts."
"""
helps['reports report show-skype-for-business-participant-activity-count'] = """
type: command
short-summary: "Invoke function getSkypeForBusinessParticipantActivityCounts."
"""
helps['reports report show-skype-for-business-participant-activity-minute-count'] = """
type: command
short-summary: "Invoke function getSkypeForBusinessParticipantActivityMinuteCounts."
"""
helps['reports report show-skype-for-business-participant-activity-user-count'] = """
type: command
short-summary: "Invoke function getSkypeForBusinessParticipantActivityUserCounts."
"""
helps['reports report show-skype-for-business-peer-to-peer-activity-count'] = """
type: command
short-summary: "Invoke function getSkypeForBusinessPeerToPeerActivityCounts."
"""
helps['reports report show-skype-for-business-peer-to-peer-activity-minute-count'] = """
type: command
short-summary: "Invoke function getSkypeForBusinessPeerToPeerActivityMinuteCounts."
"""
helps['reports report show-skype-for-business-peer-to-peer-activity-user-count'] = """
type: command
short-summary: "Invoke function getSkypeForBusinessPeerToPeerActivityUserCounts."
"""
helps['reports report show-team-device-usage-distribution-user-count'] = """
type: command
short-summary: "Invoke function getTeamsDeviceUsageDistributionUserCounts."
"""
helps['reports report show-team-device-usage-user-count'] = """
type: command
short-summary: "Invoke function getTeamsDeviceUsageUserCounts."
"""
helps['reports report show-team-device-usage-user-detail7148'] = """
type: command
short-summary: "Invoke function getTeamsDeviceUsageUserDetail."
"""
helps['reports report show-team-device-usage-user-detail7565'] = """
type: command
short-summary: "Invoke function getTeamsDeviceUsageUserDetail."
"""
helps['reports report show-team-user-activity-count'] = """
type: command
short-summary: "Invoke function getTeamsUserActivityCounts."
"""
helps['reports report show-team-user-activity-user-count'] = """
type: command
short-summary: "Invoke function getTeamsUserActivityUserCounts."
"""
helps['reports report show-team-user-activity-user-detail-a3-f1'] = """
type: command
short-summary: "Invoke function getTeamsUserActivityUserDetail."
"""
helps['reports report show-team-user-activity-user-detail-eb13'] = """
type: command
short-summary: "Invoke function getTeamsUserActivityUserDetail."
"""
helps['reports report show-tenant-secure-score'] = """
type: command
short-summary: "Invoke function getTenantSecureScores."
"""
helps['reports report show-user-credential-usage-detail'] = """
type: command
short-summary: "Get userCredentialUsageDetails from reports."
"""
helps['reports report show-yammer-activity-count'] = """
type: command
short-summary: "Invoke function getYammerActivityCounts."
"""
helps['reports report show-yammer-activity-user-count'] = """
type: command
short-summary: "Invoke function getYammerActivityUserCounts."
"""
helps['reports report show-yammer-activity-user-detail-ac30'] = """
type: command
short-summary: "Invoke function getYammerActivityUserDetail."
"""
helps['reports report show-yammer-activity-user-detail15-a5'] = """
type: command
short-summary: "Invoke function getYammerActivityUserDetail."
"""
helps['reports report show-yammer-device-usage-distribution-user-count'] = """
type: command
short-summary: "Invoke function getYammerDeviceUsageDistributionUserCounts."
"""
helps['reports report show-yammer-device-usage-user-count'] = """
type: command
short-summary: "Invoke function getYammerDeviceUsageUserCounts."
"""
helps['reports report show-yammer-device-usage-user-detail-cfad'] = """
type: command
short-summary: "Invoke function getYammerDeviceUsageUserDetail."
"""
helps['reports report show-yammer-device-usage-user-detail-d0-ac'] = """
type: command
short-summary: "Invoke function getYammerDeviceUsageUserDetail."
"""
helps['reports report show-yammer-group-activity-count'] = """
type: command
short-summary: "Invoke function getYammerGroupsActivityCounts."
"""
helps['reports report show-yammer-group-activity-detail-da9-a'] = """
type: command
short-summary: "Invoke function getYammerGroupsActivityDetail."
"""
helps['reports report show-yammer-group-activity-detail0-d7-d'] = """
type: command
short-summary: "Invoke function getYammerGroupsActivityDetail."
"""
helps['reports report show-yammer-group-activity-group-count'] = """
type: command
short-summary: "Invoke function getYammerGroupsActivityGroupCounts."
"""
helps['reports report update-application-sign-in-detailed-summary'] = """
type: command
short-summary: "Update the navigation property applicationSignInDetailedSummary in reports."
parameters:
- name: --status
short-summary: "signInStatus"
long-summary: |
Usage: --status additional-details=XX error-code=XX failure-reason=XX
additional-details: Provides additional details on the sign-in activity
error-code: Provides the 5-6digit error code that's generated during a sign-in failure. Check out the list \
of error codes and messages.
failure-reason: Provides the error message or the reason for failure for the corresponding sign-in \
activity. Check out the list of error codes and messages.
"""
helps['reports report update-credential-user-registration-detail'] = """
type: command
short-summary: "Update the navigation property credentialUserRegistrationDetails in reports."
"""
helps['reports report update-daily-print-usage-summary-by-printer'] = """
type: command
short-summary: "Update the navigation property dailyPrintUsageSummariesByPrinter in reports."
"""
helps['reports report update-daily-print-usage-summary-by-user'] = """
type: command
short-summary: "Update the navigation property dailyPrintUsageSummariesByUser in reports."
"""
helps['reports report update-monthly-print-usage-summary-by-printer'] = """
type: command
short-summary: "Update the navigation property monthlyPrintUsageSummariesByPrinter in reports."
"""
helps['reports report update-monthly-print-usage-summary-by-user'] = """
type: command
short-summary: "Update the navigation property monthlyPrintUsageSummariesByUser in reports."
"""
helps['reports report update-user-credential-usage-detail'] = """
type: command
short-summary: "Update the navigation property userCredentialUsageDetails in reports."
"""
| [
"japhethobalak@gmail.com"
] | japhethobalak@gmail.com |
2717eda646b498b47de156b64448a2edde9c8e84 | adce0de4c11887519b8e471f1cbca4e18b46d906 | /h0rton/tdlmc_utils/__init__.py | 8bb4883f2bc5cbe39977a9e7fde986993408766b | [
"MIT"
] | permissive | jiwoncpark/h0rton | 30ca4a3c9943099ecd393e4b936b48cad7d81943 | 2541885d70d090fdb777339cfb77a3a9f3e7996d | refs/heads/master | 2021-06-25T23:08:26.902632 | 2021-01-12T01:57:47 | 2021-01-12T01:57:47 | 199,093,811 | 7 | 1 | null | 2020-03-19T16:02:01 | 2019-07-26T23:56:49 | Jupyter Notebook | UTF-8 | Python | false | false | 86 | py | from .tdlmc_parser import *
from .reorder_images import *
from .tdlmc_metrics import * | [
"jiwon.christine.park@gmail.com"
] | jiwon.christine.park@gmail.com |
9f417eb14d8abcbced54fe8f9fe566e3e40b832b | a7d1030cb797b862b87ee3e8b8a206814d26eee2 | /mp32wav | 4a3f91cf65190620fa86b58032fd439474bf2f5e | [] | no_license | lmanul/sak | 8bdf98d2e463f3e171aa79b82557cd4d6ade2724 | 37604f1d0dc61373bd24d73d742afe9c754e62a3 | refs/heads/master | 2023-08-30T07:51:04.727676 | 2023-08-27T06:09:46 | 2023-08-27T06:09:46 | 144,207,029 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | #!/usr/bin/python3
import os
import sys
mp3 = sys.argv[1]
wav = mp3.replace("mp3", "wav")
wav = wav.replace("MP3", "wav")
os.system("ffmpeg -i " + mp3 + " " + wav)
| [
"m@ma.nu"
] | m@ma.nu | |
46cf4fbeb1bcfa982b10cd663c29fdddcee01b4c | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2921/58586/259025.py | 986b891f3ca8d9ab2517f01c7e63aa7d53fae1c9 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | [n,m,d]=list(map(int,input().split(" ")))
arr=[]
for i in range(n):
temp=list(map(int,input().split(" ")))
for j in range(m):
arr.append(temp[j])
arr.sort()
start=arr[0]
flag=False
final={}
index=0
for i in range(1,len(arr)):
if arr[i]!=start:
if (arr[i]-start)%d:
flag=True
break
else:
final.setdefault(start,i-index)
index=i
start=arr[i]
if flag:
print(-1)
else:
final.setdefault(start,len(arr)-index)
mix=1000000007
for i in final.keys():
sum=0
for j in final.keys():
if j!=i:
sum+=(abs(j-i)*final[j])//d
mix=min(mix,sum)
print(mix)
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
c90e8ed2cdc4ea72c332363db1794fa3f0f34920 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_Class2485.py | 6b13a2466853ebc3044175c1edfaec9ec5c78dc7 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,244 | py | # qubit number=4
# total number=43
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=24
prog.cz(input_qubit[0],input_qubit[3]) # number=25
prog.h(input_qubit[3]) # number=26
prog.h(input_qubit[3]) # number=21
prog.cz(input_qubit[0],input_qubit[3]) # number=22
prog.h(input_qubit[3]) # number=23
prog.h(input_qubit[3]) # number=27
prog.cz(input_qubit[0],input_qubit[3]) # number=28
prog.h(input_qubit[3]) # number=29
prog.h(input_qubit[3]) # number=37
prog.cz(input_qubit[0],input_qubit[3]) # number=38
prog.h(input_qubit[3]) # number=39
prog.x(input_qubit[3]) # number=31
prog.h(input_qubit[3]) # number=33
prog.cz(input_qubit[0],input_qubit[3]) # number=34
prog.h(input_qubit[3]) # number=35
prog.h(input_qubit[3]) # number=40
prog.cz(input_qubit[0],input_qubit[3]) # number=41
prog.h(input_qubit[3]) # number=42
prog.rx(-0.364424747816416,input_qubit[3]) # number=36
prog.y(input_qubit[3]) # number=20
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.cx(input_qubit[0],input_qubit[3]) # number=12
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=19
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
# circuit end
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =8000
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class2485.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
37c32bed552b8bc0037a4d7c3e13c1478324fb9a | a89b9dbcd0af4a98303651e73bdcc100bd7bf8f2 | /lib/node.py | fd373f513300941bfa8ca89dc179e7ab4be52ec6 | [] | no_license | freeman1981/algorithms | f5b9065db7ea5ede108a70ad9276630b329a6cbf | 312d1253c2bf0a53bc346abee9b51369037f93a6 | refs/heads/master | 2021-09-04T16:03:57.791928 | 2018-01-20T05:13:14 | 2018-01-20T05:13:14 | 115,587,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | class Node:
def __init__(self, init_data):
self._data = init_data
self._next = None
def get_data(self):
return self._data
def get_next(self):
return self._next
def set_data(self, new_data):
self._data = new_data
def set_next(self, new_next):
self._next = new_next
| [
"a"
] | a |
e184ae7009848ce2548c4df955b05fda154381b6 | 375e5bca82843647941068bd7634cf7adf2015ca | /tests/test_sequences_reservoir.py | 381cd3d7128ebc9be2b68e3abab2b2c2ada57282 | [
"MIT"
] | permissive | civodlu/trw | cd57e7bded7fdb0a9d623ed9cd50645fab96583b | 11c59dea0072d940b036166be22b392bb9e3b066 | refs/heads/master | 2023-02-08T09:56:39.203340 | 2023-02-07T14:22:16 | 2023-02-07T14:22:16 | 195,147,670 | 12 | 2 | MIT | 2020-10-19T15:24:11 | 2019-07-04T01:19:31 | Python | UTF-8 | Python | false | false | 18,334 | py | from unittest import TestCase
import trw
import trw.train
import numpy as np
import time
import collections
import torch
import functools
import trw.utils
def function_to_run(batch):
return batch
def function_to_run_id(batch):
return batch
def function_to_run_id_wait(batch, wait_time: float, text=None):
time.sleep(wait_time)
if text is not None:
print(text, batch)
return batch
def function_to_run_multiple_wait(batch, nb: int, wait_time: float, text=None):
time.sleep(wait_time)
#if text is not None:
# print(text, batch)
v = float(batch['path'])
return {'path2': torch.arange(v * 100, v * 100 + nb)}
def function_to_run2(batch):
print('JOB starte', batch['path'])
time.sleep(0.1)
return batch
def worker_with_error(batch):
if batch['sample_uid'][0] == 10:
raise IndexError('This is an expected exception to test worker recovery from failure!')
return batch
def make_volume_torch(batch):
print('JOB starte', batch['path'])
batch['volume'] = torch.zeros([1, 10, 11, 12], dtype=torch.float)
time.sleep(0.1)
return batch
def make_list_dicts(batch, wait_time=None):
samples = []
print('sample_uid=', batch['sample_uid'])
for n in range(10):
sub_batch = {
'sample_uid': batch['sample_uid'],
'volume': torch.zeros([1, 42])
}
samples.append(sub_batch)
if wait_time is not None:
time.sleep(wait_time)
return samples
class TestSequenceReservoir(TestCase):
def test_reservoir_basics2(self):
# Test the sequence satisfies statistical properties:
# - items from a sequence must be in equal proportion (they are randomly sampled)
nb_indices = 10
paths = [[i, 42] for i in range(nb_indices)]
split = {'path': np.asarray(paths)}
max_reservoir_samples = 5
sampler = trw.train.SamplerRandom()
numpy_sequence = trw.train.SequenceArray(split, sampler=sampler)
sequence = trw.train.SequenceAsyncReservoir(numpy_sequence, max_reservoir_samples=max_reservoir_samples,
function_to_run=function_to_run,
min_reservoir_samples=max_reservoir_samples).collate()
time.sleep(2)
time_start = time.time()
samples = collections.defaultdict(lambda: 0)
nb_epochs = 100000
for i in range(1, nb_epochs):
batches = []
for batch in sequence:
nb_samples = trw.utils.len_batch(batch)
# we requested a single sample at a time
self.assertTrue(nb_samples == 1)
p = trw.utils.to_value(batch['path'])
self.assertTrue(p.shape == (1, 2))
batches.append(batch)
value = int(trw.utils.to_value(batch['sample_uid'])[0])
samples[value] += 1
self.assertTrue(len(batches) <= max_reservoir_samples)
time_end = time.time()
print('TIME=', time_end - time_start)
expected_counts = nb_epochs / nb_indices * max_reservoir_samples
for c, counts in samples.items():
error_percent = abs(counts - expected_counts) / expected_counts
print(f'c={c}, counts={counts}, expected_counts={expected_counts}, error={error_percent}')
self.assertTrue(error_percent < 0.1)
def test_subsample_uid(self):
"""
Make sure we can resample the sequence with UID
"""
nb_indices = 800
paths = [[i, 42] for i in range(nb_indices)]
split = {'path': np.asarray(paths)}
max_reservoir_samples = 200
sampler = trw.train.SamplerSequential()
numpy_sequence = trw.train.SequenceArray(split, sampler=sampler)
sequence = trw.train.SequenceAsyncReservoir(numpy_sequence, max_reservoir_samples=max_reservoir_samples,
function_to_run=function_to_run,
min_reservoir_samples=max_reservoir_samples).collate().batch(40)
subsampled_sequence = sequence.subsample_uids(uids=np.arange(200, 300), uids_name=trw.train.default_sample_uid_name)
nb_samples = 0
values = set()
for batch in subsampled_sequence:
batch_set = set(trw.utils.to_value(batch['sample_uid']))
nb_samples += trw.utils.len_batch(batch)
values = values.union(batch_set)
assert len(values) == 100
assert np.min(list(values)) == 200
assert np.max(list(values)) == 299
def test_reservoir_batch(self):
"""
Test that we can easily combine SequenceArray -> SequenceAsyncReservoir -> SequenceBatch
"""
nb_indices = 20
split = {'path': np.asarray(np.arange(nb_indices))}
sampler = trw.train.SamplerSequential(batch_size=1)
numpy_sequence = trw.train.SequenceArray(split, sampler=sampler)
sequence = trw.train.SequenceAsyncReservoir(numpy_sequence, max_reservoir_samples=10,
function_to_run=make_list_dicts, min_reservoir_samples=10).batch(5)
for batch in sequence:
assert trw.utils.len_batch(batch) == 5 * 10, 'found={}, expected={}'.format(
trw.utils.len_batch(batch), 5 * 10)
def test_fill_reservoir_every_epoch(self):
"""
The reservoir will start tasks and retrieve results every epoch
"""
max_jobs_at_once = 5
nb_indices = 30
nb_epochs = 7
min_reservoir_samples = 5
max_reservoir_samples = 20
split = {'path': np.asarray(np.arange(nb_indices))}
sampler = trw.train.SamplerSequential(batch_size=1)
numpy_sequence = trw.train.SequenceArray(split, sampler=sampler)
sequence = trw.train.SequenceAsyncReservoir(numpy_sequence, max_reservoir_samples=max_reservoir_samples,
function_to_run=functools.partial(make_list_dicts, wait_time=0.02),
min_reservoir_samples=min_reservoir_samples,
max_jobs_at_once=max_jobs_at_once)
for epoch in range(nb_epochs):
print('sleeping')
time.sleep(0.5)
print('epoch=', epoch)
expected_reservoir_size = min(epoch * max_jobs_at_once, max_reservoir_samples)
assert sequence.reservoir_size() >= expected_reservoir_size, 'found={}, expected={}'.format(sequence.reservoir_size(), expected_reservoir_size)
assert sequence.reservoir_size() <= expected_reservoir_size + max_jobs_at_once, 'found={}, expected={}'.format(sequence.reservoir_size(), expected_reservoir_size)
print('found={}, expected={}'.format(sequence.reservoir_size(), expected_reservoir_size))
for batch_id, batch in enumerate(sequence):
if batch_id == 0 and epoch == 0:
assert sequence.reservoir_size() >= min_reservoir_samples and \
sequence.reservoir_size() <= min_reservoir_samples + max_jobs_at_once
def test_worker_error(self):
nb_indices = 20
split = {'path': np.asarray(np.arange(nb_indices))}
sampler = trw.train.SamplerSequential(batch_size=1)
numpy_sequence = trw.train.SequenceArray(split, sampler=sampler)
sequence = trw.train.SequenceAsyncReservoir(numpy_sequence, max_reservoir_samples=10,
function_to_run=worker_with_error, min_reservoir_samples=5,
max_jobs_at_once=1).collate()
for n in range(100):
batches = []
for batch in sequence:
assert batch['sample_uid'][0] != 10, 'this job should have failed!'
batches.append(batch)
assert len(batches) >= 5
def test_multiple_iterators_same_sequence(self):
# test the statistics of iterating the same sequence using different iterators
np.random.seed(0)
torch.random.manual_seed(0)
nb_indices = 10
paths = [[i, 42] for i in range(nb_indices)]
split = {'path': np.asarray(paths)}
max_reservoir_samples = 5
sampler = trw.train.SamplerRandom()
numpy_sequence = trw.train.SequenceArray(split, sampler=sampler)
sequence = trw.train.SequenceAsyncReservoir(numpy_sequence, max_reservoir_samples=max_reservoir_samples,
function_to_run=function_to_run,
min_reservoir_samples=max_reservoir_samples,
reservoir_sampler=trw.train.SamplerRandom()).collate()
samples_0 = collections.defaultdict(lambda: 0)
samples_1 = collections.defaultdict(lambda: 0)
nb_epochs = 100000
for i in range(1, nb_epochs):
it_0 = iter(sequence)
it_1 = iter(sequence)
for batch in it_0:
nb_samples = trw.utils.len_batch(batch)
self.assertTrue(nb_samples == 1)
value = int(trw.utils.to_value(batch['sample_uid'])[0])
samples_0[value] += 1
for batch in it_1:
nb_samples = trw.utils.len_batch(batch)
self.assertTrue(nb_samples == 1)
value = int(trw.utils.to_value(batch['sample_uid'])[0])
samples_1[value] += 1
expected_counts = nb_epochs / nb_indices * max_reservoir_samples
for c, counts in samples_0.items():
error_percent = abs(counts - expected_counts) / expected_counts
print(f'c={c}, counts={counts}, expected_counts={expected_counts}, error={error_percent}')
self.assertTrue(error_percent < 0.1)
for c, counts in samples_1.items():
error_percent = abs(counts - expected_counts) / expected_counts
print(f'c={c}, counts={counts}, expected_counts={expected_counts}, error={error_percent}')
self.assertTrue(error_percent < 0.1)
def test_subsample(self):
nb_indices = 10
paths = [[i, 42] for i in range(nb_indices)]
split = {'path': np.asarray(paths)}
max_reservoir_samples = 5
sampler = trw.train.SamplerRandom()
numpy_sequence = trw.train.SequenceArray(split, sampler=sampler)
sequence = trw.train.SequenceAsyncReservoir(numpy_sequence, max_reservoir_samples=max_reservoir_samples,
function_to_run=function_to_run,
min_reservoir_samples=max_reservoir_samples,
reservoir_sampler=trw.train.SamplerRandom()).collate().subsample(2)
uids = set()
for epoch in range(10):
for batch in sequence:
value = int(trw.utils.to_value(batch['sample_uid'])[0])
uids.add(value)
assert len(uids) == 2
def test_reservoir_maximum_replacement(self):
"""
Make sure we can control at which rate the content of the reservoir is replaced per epoch
"""
nb_indices = 100
paths = [[i, 42] for i in range(nb_indices)]
split = {'path': np.asarray(paths)}
max_reservoir_samples = 20
max_reservoir_replacement_size = 5
max_jobs_at_once = 100
sampler = trw.train.SamplerSequential()
numpy_sequence = trw.train.SequenceArray(split, sampler=sampler)
sequence = trw.train.SequenceAsyncReservoir(numpy_sequence, max_reservoir_samples=max_reservoir_samples,
function_to_run=function_to_run_id,
min_reservoir_samples=max_reservoir_samples,
max_jobs_at_once=max_jobs_at_once,
max_reservoir_replacement_size=max_reservoir_replacement_size,
reservoir_sampler=trw.train.SamplerSequential())
last_uids = set()
for epoch in range(10):
print(f'-----epoch={epoch}')
time.sleep(0.1)
current_uids = set()
for batch in sequence:
assert len(batch) == 1
uid = batch[0]['sample_uid'][0]
current_uids.add(uid)
print(current_uids)
if epoch > 0:
d = last_uids.difference(current_uids)
print(d)
assert len(d) == max_reservoir_replacement_size, f'found={len(d)}, expected={max_reservoir_replacement_size}'
last_uids = current_uids
def test_slow_sequence_reservoir_fast_map_id(self):
"""
Test the loading of the reservoir doesn't affect the iteration
of the reservoir (i.e., once the minimum of jobs is loaded,
iterating the reservoir should be instantaneous)
"""
nb_indices = 15
nb_epochs = 4
split = {'path': np.asarray([[i] for i in range(nb_indices)])}
max_reservoir_samples = 5
max_reservoir_replacement_size = 5
max_jobs_at_once = max_reservoir_samples
wait_time_reservoir = 1.0
wait_time_map = 0.5
numpy_sequence = trw.train.SequenceArray(split, sampler=trw.train.SamplerSequential())
sequence = trw.train.SequenceAsyncReservoir(
numpy_sequence,
function_to_run=functools.partial(function_to_run_id_wait, wait_time=wait_time_reservoir, text='reservoir_loaded'),
max_reservoir_samples=max_reservoir_samples,
min_reservoir_samples=max_reservoir_samples,
max_jobs_at_once=max_jobs_at_once,
max_reservoir_replacement_size=max_reservoir_replacement_size,
reservoir_sampler=trw.train.SamplerSequential())
sequence = sequence.map(functools.partial(function_to_run_id_wait, wait_time=wait_time_map), nb_workers=1)
expected_time = max_reservoir_samples * wait_time_map * (nb_epochs - 1)
time_start = None
for epoch in range(nb_epochs):
if epoch == 1:
# discard first epoch timing due to processes/threads creation time
time_start = time.perf_counter()
epoch_start = time.perf_counter()
for batch_id, batch in enumerate(sequence):
print(batch_id, str(batch))
epoch_end = time.perf_counter()
epoch_time = epoch_end - epoch_start
print(f'epoch={epoch}, epoch_time={epoch_time}')
time_end = time.perf_counter()
time_taken = time_end - time_start
print(f'DONE, time_taken={time_taken}, expected_time={expected_time}')
assert abs(time_taken - expected_time) < 0.5
def test_slow_sequence_reservoir_fast_map_multiple(self):
"""
Test the loading of the reservoir doesn't affect the iteration
of the reservoir (i.e., once the minimum of jobs is loaded,
iterating the reservoir should be instantaneous).
Many more batches to process than reservoir size. Since
there is almost no data transferred between processes
the overhead SHOULD be minimal.
"""
nb_indices = 15
nb_epochs = 3
split = {'path': np.asarray([[i] for i in range(nb_indices)])}
max_reservoir_samples = 5
max_reservoir_replacement_size = 5
multiple = 10
max_jobs_at_once = max_reservoir_samples
wait_time_reservoir = 1.0
wait_time_map = 0.1
nb_map_workers = 2
max_queue_size_pin = 4
numpy_sequence = trw.train.SequenceArray(split, sampler=trw.train.SamplerSequential())
sequence = trw.train.SequenceAsyncReservoir(
numpy_sequence,
function_to_run=functools.partial(function_to_run_multiple_wait, nb=multiple, wait_time=wait_time_reservoir, text='reservoir_loaded'),
max_reservoir_samples=max_reservoir_samples,
min_reservoir_samples=max_reservoir_samples,
max_jobs_at_once=max_jobs_at_once,
max_reservoir_replacement_size=max_reservoir_replacement_size,
reservoir_sampler=trw.train.SamplerSequential()).collate()
sequence = sequence.rebatch(batch_size=1).map(functools.partial(function_to_run_id_wait, wait_time=0), nb_workers=nb_map_workers, max_queue_size_pin=max_queue_size_pin)
nb_samples = 0
expected_time = max_reservoir_samples * wait_time_map * (nb_epochs - 1) * multiple #/ max(1, nb_map_workers)
time_start = None
for epoch in range(nb_epochs):
if epoch == 1:
# discard first epoch timing due to processes/threads creation time
time_start = time.perf_counter()
epoch_start = time.perf_counter()
for _, batch in enumerate(sequence):
# simulate a workload. Overhead of the map
# should be hidden!
time.sleep(wait_time_map)
nb_samples += trw.utils.len_batch(batch)
# check the expected prefetch size: how many batches
# are already processed and queued?
average_prefetch = sequence.debug_metadata.pin_queue_size / sequence.debug_metadata.nb_batches
assert abs(max_queue_size_pin - 1 - average_prefetch) < 0.5, f'expected={max_queue_size_pin}, got={average_prefetch}'
epoch_end = time.perf_counter()
epoch_time = epoch_end - epoch_start
print(f'epoch={epoch}, epoch_time={epoch_time}')
time_end = time.perf_counter()
time_taken = time_end - time_start
print(f'DONE, time_taken={time_taken}, expected_time={expected_time}')
assert abs(time_taken - expected_time) < 0.5
expected_samples = max_reservoir_samples * nb_epochs * multiple
assert expected_samples == nb_samples, f'nb_samples={nb_samples}, expected_samples={expected_samples}' | [
"civodlu@gmail.com"
] | civodlu@gmail.com |
fcb2f6c40dbb8ed628205a4f54ebdeb0aac571fa | 6444622ad4a150993955a0c8fe260bae1af7f8ce | /djangoenv/lib/python2.7/site-packages/django/contrib/postgres/forms/ranges.py | 0498f42c1330eb6418f119272348fadab2465108 | [] | no_license | jeremyrich/Lesson_RestAPI_jeremy | ca965ef017c53f919c0bf97a4a23841818e246f9 | a44263e45b1cc1ba812059f6984c0f5be25cd234 | refs/heads/master | 2020-04-25T23:13:47.237188 | 2019-03-22T09:26:58 | 2019-03-22T09:26:58 | 173,138,073 | 0 | 0 | null | 2019-03-22T09:26:59 | 2019-02-28T15:34:19 | Python | UTF-8 | Python | false | false | 3,070 | py | from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange
from django import forms
from django.core import exceptions
from django.forms.widgets import MultiWidget
from django.utils.translation import ugettext_lazy as _
__all__ = [
"IntegerRangeField",
"FloatRangeField",
"DateTimeRangeField",
"DateRangeField",
]
class BaseRangeField(forms.MultiValueField):
default_error_messages = {
"invalid": _("Enter two valid values."),
"bound_ordering": _(
"The start of the range must not exceed the end of the range."
),
}
def __init__(self, **kwargs):
if "widget" not in kwargs:
kwargs["widget"] = RangeWidget(self.base_field.widget)
if "fields" not in kwargs:
kwargs["fields"] = [
self.base_field(required=False),
self.base_field(required=False),
]
kwargs.setdefault("required", False)
kwargs.setdefault("require_all_fields", False)
super(BaseRangeField, self).__init__(**kwargs)
def prepare_value(self, value):
lower_base, upper_base = self.fields
if isinstance(value, self.range_type):
return [
lower_base.prepare_value(value.lower),
upper_base.prepare_value(value.upper),
]
if value is None:
return [lower_base.prepare_value(None), upper_base.prepare_value(None)]
return value
def compress(self, values):
if not values:
return None
lower, upper = values
if lower is not None and upper is not None and lower > upper:
raise exceptions.ValidationError(
self.error_messages["bound_ordering"], code="bound_ordering"
)
try:
range_value = self.range_type(lower, upper)
except TypeError:
raise exceptions.ValidationError(
self.error_messages["invalid"], code="invalid"
)
else:
return range_value
class IntegerRangeField(BaseRangeField):
default_error_messages = {"invalid": _("Enter two whole numbers.")}
base_field = forms.IntegerField
range_type = NumericRange
class FloatRangeField(BaseRangeField):
default_error_messages = {"invalid": _("Enter two numbers.")}
base_field = forms.FloatField
range_type = NumericRange
class DateTimeRangeField(BaseRangeField):
default_error_messages = {"invalid": _("Enter two valid date/times.")}
base_field = forms.DateTimeField
range_type = DateTimeTZRange
class DateRangeField(BaseRangeField):
default_error_messages = {"invalid": _("Enter two valid dates.")}
base_field = forms.DateField
range_type = DateRange
class RangeWidget(MultiWidget):
def __init__(self, base_widget, attrs=None):
widgets = (base_widget, base_widget)
super(RangeWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
return (value.lower, value.upper)
return (None, None)
| [
"jeremyrich@free.fr"
] | jeremyrich@free.fr |
55dd996f52ef086c06fc9b23667bc1055a5ef042 | 4943280542b35028ec84d5aca3b24dd3e1bd1794 | /pm_readwrite1.py | 13903bf9bd7ee3ad8c66c62afe7b22e263385436 | [] | no_license | Botany-Downs-Secondary-College/password_manager-sam-hurd | e3db67508dc0b605483e49ebf6ca14c7504794b3 | 2ff0a2efb04f76c92675fb2320278ba91bd74bad | refs/heads/main | 2023-03-30T04:59:51.852270 | 2021-03-25T01:41:55 | 2021-03-25T01:41:55 | 341,007,543 | 0 | 1 | null | 2021-02-21T21:26:12 | 2021-02-21T21:26:07 | null | UTF-8 | Python | false | false | 3,771 | py | #password_manager.py
#store and display passwords for users
#Sam Hurd, Feb 22
i = 1
user_list = ["bdsc", "pass1234"]
login_list = []
with open("members.txt") as fa:
lines = fa.readlines()
for line in lines:
members = line.split()
for member in members:
user_list.append(member)
print(user_list)
with open("logins.txt") as fa:
lines = fa.readlines()
for line in lines:
members = line.split("\n")
for member in members:
login_list.append(member)
print(login_list)
def add_login():
while True:
app_name = input("Enter the name of the app or service, or type 'menu' to return to the menu\n").strip().title()
if app_name == "Menu":
break
else:
username = input("What username did you use for {}? \n".format(app_name)).strip()
password = input("What password did you use for {}? \n".format(app_name)).strip()
login = "Website/App: {} -- Username: {} -- Password: {}\n".format(app_name, username, password)
login_list.append(login)
with open("logins.txt", 'a+') as output:
for listitem in login_list:
output.write(listitem)
print("Login added successfully")
break
def view_login():
print("Here are your logins:\n")
for logins in login_list:
print(logins)
print("Welcome to the Password Manager")
name = input("What is your name? ")
while True:
try:
age = float(input("What is your age? "))
break
except:
print("Please enter a number")
print("Hello {}".format(name))
if age < 13:
print("Sorry, you are not old enough to use the password manager. The programme will now exit")
exit()
while i == 1:
member = input("Enter L to log in or N to create a new account \n")
if member == "L":
username = input("Enter your username ")
password = input("Enter your password ")
if username and password in user_list:
print("Log in successful")
i += 1
elif username and password not in user_list:
print("That combination does not match any existing account")
elif member == "N":
username = input("Enter a username \n")
user_list.append(username)
while True:
password = input("Enter a password. Your password must contain at least eight characters, one capital letter and one number\n")
if (any(passreqs.isupper() for passreqs in password) and any(passreqs.isdigit() for passreqs in password) and len(password) >= 8):
user_list.append(password)
print(user_list)
with open("members.txt", 'a+') as output:
for listitem in user_list:
output.write('%s\n' % listitem)
print("Account successfully created")
i += 1
break
else:
print("Your password does not meet the requirements")
else:
print("That is not a valid option, Enter L to log in or N to create a new account \n")
while True:
option = input("Please choose a mode by entering a number from 1 to 3: \n 1. Add a password 2. View your passwords 3. Exit \n")
if option == "1":
add_login()
elif option == "2":
view_login()
elif option == "3":
print("Thanks for using Password Manager!")
exit()
else:
print("That is not a valid option. Please enter a number from 1 to 3 \n")
| [
"noreply@github.com"
] | Botany-Downs-Secondary-College.noreply@github.com |
11f5f77e6c949ed37e981d5c17ca0838d81e0991 | b84d5fe69232139ecf7282af72b2f3f802128f8b | /dev_nbs/course/lesson4-collab.py | 4cca2b9046bc206e971b43a85b1f7d32ab0f5612 | [
"Apache-2.0"
] | permissive | huangyingw/fastai_fastai | f8ba2e0a83c7b017dd797400aa5677b17590052e | 68ff76a21b2f70f44fb91885abcb73c1c213ec1a | refs/heads/master | 2023-08-30T09:14:38.268415 | 2020-10-17T23:54:20 | 2020-10-17T23:54:20 | 289,820,769 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,351 | py | # ---
# jupyter:
# jupytext:
# formats: ipynb,py
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from fastai.collab import *
from fastai.tabular.all import *
# ## Collaborative filtering example
# `collab` models use data in a `DataFrame` of user, items, and ratings.
user, item, title = 'userId', 'movieId', 'title'
path = untar_data(URLs.ML_SAMPLE)
path
ratings = pd.read_csv(path / 'ratings.csv')
ratings.head()
# That's all we need to create and train a model:
dls = CollabDataLoaders.from_df(ratings, bs=64, seed=42)
y_range = [0, 5.5]
learn = collab_learner(dls, n_factors=50, y_range=y_range)
learn.fit_one_cycle(3, 5e-3)
# ## Movielens 100k
# Let's try with the full Movielens 100k data dataset, available from http://files.grouplens.org/datasets/movielens/ml-100k.zip
path = Config().data / 'ml-100k'
ratings = pd.read_csv(path / 'u.data', delimiter='\t', header=None,
names=[user, item, 'rating', 'timestamp'])
ratings.head()
movies = pd.read_csv(path / 'u.item', delimiter='|', encoding='latin-1', header=None,
names=[item, 'title', 'date', 'N', 'url', *[f'g{i}' for i in range(19)]])
movies.head()
len(ratings)
rating_movie = ratings.merge(movies[[item, title]])
rating_movie.head()
dls = CollabDataLoaders.from_df(rating_movie, seed=42, valid_pct=0.1, bs=64, item_name=title, path=path)
dls.show_batch()
y_range = [0, 5.5]
learn = collab_learner(dls, n_factors=40, y_range=y_range)
learn.lr_find()
learn.fit_one_cycle(5, 5e-3, wd=1e-1)
learn.save('dotprod')
# Here's [some benchmarks](https://www.librec.net/release/v1.3/example.html) on the same dataset for the popular Librec system for collaborative filtering. They show best results based on RMSE of 0.91, which corresponds to an MSE of `0.91**2 = 0.83`.
# ## Interpretation
# ### Setup
learn.load('dotprod')
learn.model
g = rating_movie.groupby('title')['rating'].count()
top_movies = g.sort_values(ascending=False).index.values[:1000]
top_movies[:10]
# ### Movie bias
movie_bias = learn.model.bias(top_movies, is_item=True)
movie_bias.shape
mean_ratings = rating_movie.groupby('title')['rating'].mean()
movie_ratings = [(b, i, mean_ratings.loc[i]) for i, b in zip(top_movies, movie_bias)]
def item0(o): return o[0]
sorted(movie_ratings, key=item0)[:15]
sorted(movie_ratings, key=lambda o: o[0], reverse=True)[:15]
# ### Movie weights
movie_w = learn.model.weight(top_movies, is_item=True)
movie_w.shape
movie_pca = movie_w.pca(3)
movie_pca.shape
fac0, fac1, fac2 = movie_pca.t()
movie_comp = [(f, i) for f, i in zip(fac0, top_movies)]
sorted(movie_comp, key=itemgetter(0), reverse=True)[:10]
sorted(movie_comp, key=itemgetter(0))[:10]
movie_comp = [(f, i) for f, i in zip(fac1, top_movies)]
sorted(movie_comp, key=itemgetter(0), reverse=True)[:10]
sorted(movie_comp, key=itemgetter(0))[:10]
idxs = np.random.choice(len(top_movies), 50, replace=False)
idxs = list(range(50))
X = fac0[idxs]
Y = fac2[idxs]
plt.figure(figsize=(15, 15))
plt.scatter(X, Y)
for i, x, y in zip(top_movies[idxs], X, Y):
plt.text(x, y, i, color=np.random.rand(3) * 0.7, fontsize=11)
plt.show()
| [
"huangyingw@gmail.com"
] | huangyingw@gmail.com |
2f3c95e5e141f6523213f3eb08049afb0594cd36 | 88726f0d487a0d9f1c568722f458d5cc8ad40566 | /panasonic2020/C.py | 1dc61efb636939e18fdd56451751aa9713f8deb7 | [] | no_license | thortoyo/study | 60e3cccbf7b6587044ca3ee5c4cdb07f038a5800 | 7c20f7208703acf81125aca49de580982391ecfe | refs/heads/master | 2023-06-21T21:56:47.141439 | 2023-06-20T02:38:47 | 2023-06-20T02:38:47 | 196,919,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | import math
a,b,c=map(int,input().split())
#if ((a + b + (2 * math.sqrt(a*b))) < c):
# print("Yes")
#else:
# print("No")
if (((c-a-b) >= 0) and (4*a*b < ((c-a-b)*(c-a-b)) )):
print("Yes")
else:
print("No")
| [
"thor.toyo@gmail.com"
] | thor.toyo@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.