blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4f3d07f153b1f95a64a58781bc02da8823cfab74 | 376e2608fcedd1407d8c2a65634220984bbd9b85 | /alpenbank/settings.py | 1ba723fa8566bcf23684c4aa5e5a47d21f914256 | [] | no_license | SimeonYS/alpenbank | c2128697deab2c4f2fd97ea87ac7810cf889ebab | a0f46ad71cde350bec4e30a851708428e32be72e | refs/heads/main | 2023-03-09T09:11:26.534659 | 2021-02-23T11:42:00 | 2021-02-23T11:42:00 | 341,535,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | BOT_NAME = 'alpenbank'
SPIDER_MODULES = ['alpenbank.spiders']
NEWSPIDER_MODULE = 'alpenbank.spiders'
FEED_EXPORT_ENCODING = 'utf-8'
LOG_LEVEL = 'ERROR'
DOWNLOAD_DELAY = 0
USER_AGENT="Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36"
ROBOTSTXT_OBEY = True
ITEM_PIPELINES = {
'alpenbank.pipelines.AlpenbankPipeline': 300,
} | [
"simeon.simeonov@ADPVT.com"
] | simeon.simeonov@ADPVT.com |
6527e7f7e863bf1444b75c2ba9f2bdd32b29fdd3 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nntwen.py | 1834cb51dc6c935d14c6cac1a4c6dc9e237a3cd4 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 121 | py | ii = [('MartHSI2.py', 1), ('ClarGE2.py', 1), ('BachARE.py', 1), ('WheeJPT.py', 1), ('FerrSDO.py', 1), ('MackCNH2.py', 1)] | [
"varunwachaspati@gmail.com"
] | varunwachaspati@gmail.com |
e4e175743927d2d38466815d4429550bd4380b0f | a5a99f646e371b45974a6fb6ccc06b0a674818f2 | /RecoJets/JetProducers/python/fixedGridRhoProducer_cfi.py | 6f9ee1ae1a6df7a91ece5014cdba4413192be8b4 | [
"Apache-2.0"
] | permissive | cms-sw/cmssw | 4ecd2c1105d59c66d385551230542c6615b9ab58 | 19c178740257eb48367778593da55dcad08b7a4f | refs/heads/master | 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 | Apache-2.0 | 2023-09-14T19:14:28 | 2013-06-26T14:09:07 | C++ | UTF-8 | Python | false | false | 497 | py | import FWCore.ParameterSet.Config as cms
fixedGridRhoCentral = cms.EDProducer("FixedGridRhoProducer",
pfCandidatesTag = cms.InputTag("particleFlow"),
EtaRegion = cms.string("Central")
)
fixedGridRhoForward = cms.EDProducer("FixedGridRhoProducer",
pfCandidatesTag = cms.InputTag("particleFlow"),
EtaRegion = cms.string("Forward")
)
fixedGridRhoAll = cms.EDProducer("FixedGridRhoProducer",
pfCandidatesTag = cms.InputTag("particleFlow"),
EtaRegion = cms.string("All")
)
| [
"giulio.eulisse@gmail.com"
] | giulio.eulisse@gmail.com |
c4695c901a02a94e112fab81598f233dc0534459 | fcaa0395a7c6aa74cbc47c40f35fdc312e44b9c5 | /aok/comparisons/__init__.py | 00898d6b19afe33e80500e988e0cc24bd6dfcf91 | [] | no_license | rocketboosters/a-ok | b6f1a70d262123c2df5e4969a687cbcfdfbafc8c | 06f31404a4ce34d561253ba74b533ce3fb73c60c | refs/heads/main | 2023-09-02T19:18:18.158296 | 2021-11-03T01:54:36 | 2021-11-03T01:54:36 | 388,142,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,275 | py | """Comparison operators subpackage for the aok library."""
from aok.comparisons._basics import Anything # noqa: F401
from aok.comparisons._basics import Between # noqa: F401
from aok.comparisons._basics import Equals # noqa: F401
from aok.comparisons._basics import Greater # noqa: F401
from aok.comparisons._basics import GreaterOrEqual # noqa: F401
from aok.comparisons._basics import Less # noqa: F401
from aok.comparisons._basics import LessOrEqual # noqa: F401
from aok.comparisons._basics import NoneOf # noqa: F401
from aok.comparisons._basics import OneOf # noqa: F401
from aok.comparisons._basics import Unequals # noqa: F401
from aok.comparisons._basics import anything # noqa: F401
from aok.comparisons._basics import between # noqa: F401
from aok.comparisons._basics import equals # noqa: F401
from aok.comparisons._basics import greater # noqa: F401
from aok.comparisons._basics import greater_or_equal # noqa: F401
from aok.comparisons._basics import less # noqa: F401
from aok.comparisons._basics import less_or_equal # noqa: F401
from aok.comparisons._basics import none_of # noqa: F401
from aok.comparisons._basics import one_of # noqa: F401
from aok.comparisons._basics import unequals # noqa: F401
from aok.comparisons._dicts import Dict # noqa: F401
from aok.comparisons._dicts import Okay # noqa: F401
from aok.comparisons._lists import JsonList # noqa: F401
from aok.comparisons._lists import List # noqa: F401
from aok.comparisons._lists import OkayList # noqa: F401
from aok.comparisons._lists import StrictList # noqa: F401
from aok.comparisons._lists import Tuple # noqa: F401
from aok.comparisons._lists import json_list # noqa: F401
from aok.comparisons._nullish import NotNull # noqa: F401
from aok.comparisons._nullish import Optional # noqa: F401
from aok.comparisons._nullish import not_null # noqa: F401
from aok.comparisons._nullish import optional # noqa: F401
from aok.comparisons._strings import Like # noqa: F401
from aok.comparisons._strings import LikeCase # noqa: F401
from aok.comparisons._strings import Match # noqa: F401
from aok.comparisons._strings import like # noqa: F401
from aok.comparisons._strings import like_case # noqa: F401
from aok.comparisons._strings import match # noqa: F401
| [
"swernst@gmail.com"
] | swernst@gmail.com |
2acf663118eb22264c326d53e1cc3e0f86209fce | d737fa49e2a7af29bdbe5a892bce2bc7807a567c | /software/qt_examples/src/pyqt-official/widgets/shapedclock.py | 0c1c44b50df3de59c7adebb087f4f9d2b396d3f4 | [
"GPL-3.0-only",
"MIT",
"CC-BY-NC-SA-4.0",
"GPL-1.0-or-later"
] | permissive | TG-Techie/CASPER | ec47dfbfd6c3a668739ff4d707572e0b853518b4 | 2575d3d35e7dbbd7f78110864e659e582c6f3c2e | refs/heads/master | 2020-12-19T12:43:53.825964 | 2020-01-23T17:24:04 | 2020-01-23T17:24:04 | 235,736,872 | 0 | 1 | MIT | 2020-01-23T17:09:19 | 2020-01-23T06:29:10 | Python | UTF-8 | Python | false | false | 5,194 | py | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt5.QtCore import QPoint, QSize, Qt, QTime, QTimer
from PyQt5.QtGui import QColor, QPainter, QPolygon, QRegion
from PyQt5.QtWidgets import QAction, QApplication, QWidget
class ShapedClock(QWidget):
hourHand = QPolygon([
QPoint(7, 8),
QPoint(-7, 8),
QPoint(0, -40)
])
minuteHand = QPolygon([
QPoint(7, 8),
QPoint(-7, 8),
QPoint(0, -70)
])
hourColor = QColor(127, 0, 127)
minuteColor = QColor(0, 127, 127, 191)
def __init__(self, parent=None):
super(ShapedClock, self).__init__(parent,
Qt.FramelessWindowHint | Qt.WindowSystemMenuHint)
timer = QTimer(self)
timer.timeout.connect(self.update)
timer.start(1000)
quitAction = QAction("E&xit", self, shortcut="Ctrl+Q",
triggered=QApplication.instance().quit)
self.addAction(quitAction)
self.setContextMenuPolicy(Qt.ActionsContextMenu)
self.setToolTip("Drag the clock with the left mouse button.\n"
"Use the right mouse button to open a context menu.")
self.setWindowTitle(self.tr("Shaped Analog Clock"))
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
self.dragPosition = event.globalPos() - self.frameGeometry().topLeft()
event.accept()
def mouseMoveEvent(self, event):
if event.buttons() == Qt.LeftButton:
self.move(event.globalPos() - self.dragPosition)
event.accept()
def paintEvent(self, event):
side = min(self.width(), self.height())
time = QTime.currentTime()
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
painter.translate(self.width() / 2, self.height() / 2)
painter.scale(side / 200.0, side / 200.0)
painter.setPen(Qt.NoPen)
painter.setBrush(ShapedClock.hourColor)
painter.save()
painter.rotate(30.0 * ((time.hour() + time.minute() / 60.0)))
painter.drawConvexPolygon(ShapedClock.hourHand)
painter.restore()
painter.setPen(ShapedClock.hourColor)
for i in range(12):
painter.drawLine(88, 0, 96, 0)
painter.rotate(30.0)
painter.setPen(Qt.NoPen)
painter.setBrush(ShapedClock.minuteColor)
painter.save()
painter.rotate(6.0 * (time.minute() + time.second() / 60.0))
painter.drawConvexPolygon(ShapedClock.minuteHand)
painter.restore()
painter.setPen(ShapedClock.minuteColor)
for j in range(60):
if (j % 5) != 0:
painter.drawLine(92, 0, 96, 0)
painter.rotate(6.0)
def resizeEvent(self, event):
side = min(self.width(), self.height())
maskedRegion = QRegion(self.width()/2 - side/2, self.height()/2 - side/2, side, side, QRegion.Ellipse)
self.setMask(maskedRegion)
def sizeHint(self):
return QSize(100, 100)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
clock = ShapedClock()
clock.show()
sys.exit(app.exec_())
| [
"TGTechie01@gmail.com"
] | TGTechie01@gmail.com |
7aa75076d646b49b3ef2cc13d97d1040ad806a7e | e8790304ded051df1d6bce56e2a5df32b2a8bd71 | /eshop/cart/forms.py | b0489422ab83f7a8647ce15df7868070e106b02a | [] | no_license | puskarkarki/TrialEcommerce | d5769c348937f66d6a8d7bd25eef4fc581856219 | a8afd83a93c6299b5505b23d74d8740e3ee66928 | refs/heads/master | 2023-08-31T23:43:25.965510 | 2021-09-22T12:09:30 | 2021-09-22T12:09:30 | 405,687,882 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | from django import forms
PRODUCT_QUANTITY_OPTIONS = [(i, str(i)) for i in range(1, 100)]
class AddProductToCartForm(forms.Form):
quantity = forms.TypedChoiceField(choices=PRODUCT_QUANTITY_OPTIONS, coerce=int)
override = forms.BooleanField(required=False, initial=False, widget=forms.HiddenInput)
| [
"puskarrajkarki1234@gmail.com"
] | puskarrajkarki1234@gmail.com |
50986868ac3beda7336d080eb3fedccbd1f18816 | e89693a2906534fa4a9d180b404cb96751302e8c | /reference_info.py | de35a29f7f6ccdf45cf70a74977b76abd0910368 | [] | permissive | friedrich-schotte/Lauecollect | e278e00692d109e98450c27502986673bf59db6a | acfc5afe34b4df5891a0f8186b8df76625afb51d | refs/heads/master | 2022-09-12T02:59:04.363963 | 2022-08-18T16:39:01 | 2022-08-18T16:39:01 | 186,062,944 | 0 | 2 | MIT | 2020-07-20T02:04:07 | 2019-05-10T22:42:26 | Python | UTF-8 | Python | false | false | 3,529 | py | """
Author: Friedrich Schotte
Date created: 2022-06-23
Date last modified: 2022-06-23
Revision comment:
"""
__version__ = "1.0"
import logging
from threading import Lock
def reference_info(reference, payload_type, *args, **kwargs):
container = attribute_or_item_reference_container(reference)
payload_name = payload_type.__name__.lower()
if not hasattr(container, payload_name):
with container.lock:
if not hasattr(container, payload_name):
new_payload = payload_type(*args, **kwargs)
setattr(container, payload_name, new_payload)
payload = getattr(container, payload_name)
return payload
def attribute_or_item_reference_container(reference):
if hasattr(reference, "attribute_name"):
attribute_info_base = attribute_reference_container(reference)
elif hasattr(reference, "index"):
attribute_info_base = item_reference_container(reference)
else:
raise AttributeError(f"{reference} is missing attributes 'attribute_name' or 'index'")
return attribute_info_base
def attribute_reference_container(reference):
obj = reference.object
attribute_name = f"__{reference.attribute_name}__info__"
if not hasattr(obj, attribute_name):
with attribute_info_lock(obj):
if not hasattr(obj, attribute_name):
# logging.debug(f"{obj}.{attribute_name} = {Container()}")
setattr(obj, attribute_name, Container())
container = getattr(obj, attribute_name)
return container
def item_reference_container(reference):
obj = reference.object
item = reference.index
container_dict_name = "__item_info__"
if not hasattr(obj, container_dict_name):
with item_info_lock(obj):
if not hasattr(obj, container_dict_name):
setattr(obj, container_dict_name, {})
container_dict = getattr(obj, container_dict_name)
if item not in container_dict:
with item_info_lock(obj):
if item not in container_dict:
# logging.debug(f"{obj}.{container_dict_name}.[{item}] = {Container()}")
container_dict[item] = Container()
container = container_dict[item]
return container
def attribute_info_lock(obj):
return object_lock(obj, "attribute_info")
def item_info_lock(obj):
return object_lock(obj, "item_info")
def object_lock(obj, name):
attribute_name = f"__{name}_lock__"
if not hasattr(obj, attribute_name):
with global_lock:
if not hasattr(obj, attribute_name):
lock = Lock()
# logging.debug(f"{reference}.{attribute_name} = {lock}")
setattr(obj, attribute_name, lock)
lock = getattr(obj, attribute_name)
return lock
global_lock = Lock()
class Container:
def __init__(self):
self.lock = Lock()
def __repr__(self):
return f"{self.class_name}()"
@property
def class_name(self):
return type(self).__name__
if __name__ == '__main__':
msg_format = "%(asctime)s %(levelname)s %(module)s.%(funcName)s, line %(lineno)d: %(message)s"
logging.basicConfig(level=logging.DEBUG, format=msg_format)
from timing_system_client import timing_system_client
from reference import reference as _reference
domain_name = "BioCARS"
timing_system = timing_system_client(domain_name)
reference = _reference(timing_system.channels.xdet.trig_count, "count")
self = attribute_or_item_reference_container(reference)
| [
"friedrich.schotte@gmail.com"
] | friedrich.schotte@gmail.com |
1bda04dee8ec7d57057dc9c1002af3262d36d79f | 26552adb0d8889affd40e009d3c311e41a873e43 | /Python_Solutions/16003.py | 1d232c99deebfd3a3fb463781eb54a524dcccaef | [] | no_license | Isaac-Lee/BOJ-Algorithm | 3b9b64aba9ab3b48d15133cbf5ad122822e441d0 | 27f0339195c48f416e672390758e85305203b71a | refs/heads/main | 2022-06-29T21:36:11.500158 | 2022-06-25T06:35:05 | 2022-06-25T06:35:05 | 203,349,860 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | nm = [int(k) for k in input().split()]
nList = [[] for i in range(nm[0])]
re=[]
mNum ={}
for i in range(nm[1]):
m = [int(m) for m in input().split()]
nList[m[0]-1].append(m[1]-1)
nList[m[1]-1].append(m[0]-1)
print(nList)
for j in range(nm[0]):
mNum[j] = len(nList[j])
print(mNum)
for k in range(nm[1]):
mini = list(mNum.values()).index(min(mNum.values()))
re.append(mini)
print(mini)
for index in range(nm[0]):
print(index)
if index != mini:
print(nList[index])
nList[index].remove(mini)
mNum[index] -= 1
# print(nList)
# print(mNum)
# print(re)
'''미해결'''
'''
nList[index].remove(mini)에서 에러
''' | [
"yy0221ss@gmail.com"
] | yy0221ss@gmail.com |
f4efbd707a0ea513abca53dd28189b88cc398eeb | a5bffa3c32a4955648345a201c3be4fe0a324136 | /utils/box/metric.py | 1cade1c54a41deec5844621516e8934dad9ba6ed | [
"MIT"
] | permissive | ming71/SLA | 178282e0ae1ecba8512f4b4b69f0d721a3c590b4 | 7024b093bc0d456b274314ebeae3bc500c2db65a | refs/heads/master | 2023-08-02T06:10:50.893229 | 2021-09-24T02:11:50 | 2021-09-24T02:11:50 | 370,882,400 | 11 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,602 | py | import numpy as np
from collections import defaultdict, Counter
from .rbbox_np import rbbox_iou
def get_ap(recall, precision):
recall = [0] + list(recall) + [1]
precision = [0] + list(precision) + [0]
for i in range(len(precision) - 1, 0, -1):
precision[i - 1] = max(precision[i - 1], precision[i])
ap = sum((recall[i] - recall[i - 1]) * precision[i] for i in range(1, len(recall)) if recall[i] != recall[i - 1])
return ap * 100
def get_ap_07(recall, precision):
ap = 0.
for t in np.linspace(0, 1, 11, endpoint=True):
mask = recall >= t
if np.any(mask):
ap += np.max(precision[mask]) / 11
return ap * 100
def get_det_aps(detect, target, num_classes, iou_thresh=0.5, use_07_metric=False):
# [[index, bbox, score, label], ...]
aps = []
for c in range(num_classes):
target_c = list(filter(lambda x: x[3] == c, target))
detect_c = filter(lambda x: x[3] == c, detect)
detect_c = sorted(detect_c, key=lambda x: x[2], reverse=True)
tp = np.zeros(len(detect_c))
fp = np.zeros(len(detect_c))
target_count = Counter([x[0] for x in target_c])
target_count = {index: np.zeros(count) for index, count in target_count.items()}
target_lut = defaultdict(list)
for index, bbox, conf, label in target_c:
target_lut[index].append(bbox)
detect_lut = defaultdict(list)
for index, bbox, conf, label in detect_c:
detect_lut[index].append(bbox)
iou_lut = dict()
for index, bboxes in detect_lut.items():
if index in target_lut:
iou_lut[index] = rbbox_iou(np.stack(bboxes), np.stack(target_lut[index]))
counter = defaultdict(int)
for i, (index, bbox, conf, label) in enumerate(detect_c):
count = counter[index]
counter[index] += 1
iou_max = -np.inf
hit_j = 0
if index in iou_lut:
for j, iou in enumerate(iou_lut[index][count]):
if iou > iou_max:
iou_max = iou
hit_j = j
if iou_max > iou_thresh and target_count[index][hit_j] == 0:
tp[i] = 1
target_count[index][hit_j] = 1
else:
fp[i] = 1
tp_sum = np.cumsum(tp)
fp_sum = np.cumsum(fp)
npos = len(target_c)
recall = tp_sum / npos
precision = tp_sum / (tp_sum + fp_sum)
aps.append((get_ap_07 if use_07_metric else get_ap)(recall, precision))
return aps
| [
"mq_chaser@126.com"
] | mq_chaser@126.com |
ef1bd6c833f07200173ede8a31562c463ffe5137 | f999bc5a6e0da4f0904ef2112d7b6191f180ca5b | /Practice/1/no_of_paths_mx.py | 0d9e16140066176ae5144fd71ec615bfba130cee | [] | no_license | ritesh-deshmukh/Algorithms-and-Data-Structures | 721485fbe91a5bdb4d7f99042077e3f813d177cf | 2d3a9842824305b1c64b727abd7c354d221b7cda | refs/heads/master | 2022-11-09T00:18:51.203415 | 2018-10-08T22:31:05 | 2018-10-08T22:31:05 | 132,504,988 | 0 | 1 | null | 2022-10-23T00:51:15 | 2018-05-07T19:07:33 | Python | UTF-8 | Python | false | false | 539 | py | def test(m,n):
arr = [[0 for _ in range(m)] for _ in range(n)]
for i in range(m):
arr[i][0] = 1
for j in range(n):
arr[0][j] = 1
for i in range(1, m):
for j in range(n):
arr[i][j] = arr[i-1][j] + arr[i][j-1]
print(arr)
return arr[m-1][n-1]
# print(arr, end=" ")
m = 3
n = 3
print(test(m,n))
# def no_of_paths(m, n):
# if m == 1 or n == 1:
# return 1
#
# return no_of_paths(m-1, n) + no_of_paths(m, n-1)
#
#
# m = 3
# n = 3
#
# print(no_of_paths(m,n))
| [
"riteshdeshmukh260@gmail.com"
] | riteshdeshmukh260@gmail.com |
56d944f03c1ac19e18d9938fd221054b6cf16446 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_22785.py | dc28a321c6dce65fa06f5b4ba488d4dc41753e99 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,839 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((592.124, 542.587, 480.884), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((593.266, 496.557, 531.803), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((588.198, 434.607, 585.324), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((540.604, 417.686, 454.869), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((590.163, 321.006, 742.554), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((583.563, 514.992, 517.168), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((583.112, 516.108, 516.369), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((564.093, 522.152, 536.357), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((537.613, 513.007, 538.272), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((513.068, 513.1, 524.54), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((491.539, 525.609, 511.531), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((497.197, 549.391, 525.813), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((601.381, 533.093, 504.219), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((398.195, 573.461, 554.589), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((465.31, 452, 701.543), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((465.31, 452, 701.543), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((481.424, 463.668, 681.179), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((493.277, 479.902, 661.002), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((511.285, 490.535, 641.824), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((529.088, 498.483, 621.174), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((547.857, 500.641, 600.038), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((566.362, 501.603, 578.393), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((319.488, 472.341, 649.098), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((812.48, 531.743, 502.175), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((592.859, 478.766, 603.711), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((592.859, 478.766, 603.711), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((576.03, 465.998, 584.012), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((568.801, 448.034, 562.347), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((579.874, 422.284, 554.689), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((591.623, 491.718, 452.208), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((570.835, 347.278, 654.841), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((589.843, 485.823, 505.387), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((589.843, 485.821, 505.384), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((585.872, 463.403, 489.31), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((573.262, 485.679, 476.217), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((564.041, 510.168, 488.399), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((551.354, 530.425, 505.065), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((537.281, 548.635, 523.341), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((521.029, 565.32, 541.705), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((596.329, 554.389, 579.056), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((439.028, 579.59, 511.297), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((629.582, 522.597, 575.073), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((620.218, 502.115, 566.032), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((597.086, 456.72, 543.572), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((574.156, 408.573, 517.677), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((615.762, 432.129, 450.696), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((511.758, 324.77, 531.521), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((525.026, 449.536, 514.037), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((552.124, 445.442, 521.138), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((580.392, 442.016, 517.613), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((604.664, 429.222, 527.617), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((619.048, 424.259, 553.246), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((632.277, 418.067, 579.599), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((617.422, 486.293, 542.27), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((652.821, 352.367, 627.536), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"batxes@gmail.com"
] | batxes@gmail.com |
e4f11f4ce8171c561aea33859a1304ce7d33d527 | 4577d8169613b1620d70e3c2f50b6f36e6c46993 | /students/1719708/homework03/program01.py | 1d48e0a1c60c036592ba3307723907bbfdace1cb | [] | no_license | Fondamenti18/fondamenti-di-programmazione | cbaf31810a17b5bd2afaa430c4bf85d05b597bf0 | 031ec9761acb1a425fcc4a18b07884b45154516b | refs/heads/master | 2020-03-24T03:25:58.222060 | 2018-08-01T17:52:06 | 2018-08-01T17:52:06 | 142,419,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,464 | py | from immagini import *
def quadrato(filename,c):
lato = [-1]
vertice = [0, 0]
img = load(filename)
for row in range(len(img)):
for col in range(len(img[0])):
if img[row][col] == c:
v = [row, col]
cerca_quadrati(img, lato, vertice, v, c)
return (lato[0], (vertice[1], vertice[0]))
def cerca_quadrati(image, lato, vertice, v, c):
value = True
l = lato[0] + 1
while v[0] + l < len(image) and v[1] + l < len(image[0]) and value:
value, l = controlli(image, l, v, c, value)
if not value and lato[0] < l - 1:
lato[0] = l
vertice[:] = v[:]
def controlli(image, l, v, c, value):
if primari(image, l, v, c):
value = False
elif secondari(image, l, v, c):
value = False
else:
l += 1
return value, l
def primari(image, l, v, c):
value = False
if not vertici(image, l, v, c):
value = True
elif not diagonale1(image, l, v, c):
value = True
elif not diagonale2(image, l, v, c):
value = True
return value
def secondari(image, l, v, c):
value = False
if l != 0 and not frame(image, l, v, c):
value = True
elif l != 0 and not full(image, l, v, c):
value = True
return value
def vertici(image, l, v, c):
return image[v[0]][v[1] + l] == c and image[v[0] + l][v[1]] == c and image[v[0] + l][v[1] + l] == c
def diagonale1(image, l, v, c):
d = 0
while d < l and image[v[0] + d][v[1] + d] == c:
d += 1
return image[v[0] + d][v[1] + d] == c
def diagonale2(image, l, v, c):
d = 0
while d < l and image[v[0] + d][v[1] + l - d] == c:
d += 1
return image[v[0] + d][v[1] + l - d] == c
def full(image, l, v, color):
r = v[0] + 1
c = v[1] + 1
l = l - 1
while r < v[0] + l and image[r][c] == color:
while c < v[1] + l and image[r][c] == color:
c += 1
if image[r][c] == color:
r += 1
c = v[1]
return image[r][c] == color
def frame(image, l, v, color):
r = v[0]
c = v[1]
value1 = altezze(image, r, c, l, color)
if value1:
i = 0
value2 = paralleli(image, r, c, l, color)
return value1 and value2
def altezze(image, r, c, l, color):
i = 0
while i < l and image[r + i][c + l] == color and image[r + i][c] == color:
i += 1
return image[r + i][c + l] == color and image[r + i][c] == color
def paralleli(image, r, c, l, color):
i = 0
while i < l and image[r + l][c + i] == color and image[r][c + i] == color:
i += 1
return image[r + l][c + i] == color and image[r][c + i] == color | [
"a.sterbini@gmail.com"
] | a.sterbini@gmail.com |
f4b6fb07afe16236f8d8856add56e78b27adbdd7 | b1adf2e06de52417119c1b6a150739533e9634a9 | /tools/geojson_simplifier/download.py | 67817408fefc638890c34429b23d360d37cbd78d | [
"Apache-2.0"
] | permissive | tjann/website | 5fadc61c86418d7beed7efe25e55ba2e8320cab4 | 0a4d41f157632324437305ba66b4f0ee8e54df00 | refs/heads/master | 2023-04-01T23:21:24.396459 | 2020-11-25T22:27:27 | 2020-11-25T22:27:27 | 301,866,316 | 0 | 0 | Apache-2.0 | 2020-10-06T22:09:37 | 2020-10-06T22:09:36 | null | UTF-8 | Python | false | false | 4,657 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Downloads and saves GeoJson map files from DataCommons.
Typical usage:
python3 download.py
"""
import datacommons as dc
import geojson
# TODO(fpernice-google): Support downloading more than just US states.
class GeojsonDownloader:
"""Downloads desired GeoJSON files from the DataCommons Knowledge Graph.
Attributes:
geojsons: A dictionary that maps each queried area to another
dictionary containing the GeoJSON coordinate information. An
example of this is the following
{ # Multipolygon of the state of Alabama (fake).
"geoId/01": [{
"type": "MultiPolygon",
# Set of individual Polygons that compose it.
"coordinates": [
# Polygon 1
[[ [1.5, 12.4], [5.3, 45.2], [1.1, 3.5],
[1.5, 12.4] ]],
# Polygon 2
[[ [1, 2], [3, 4], [5, 6], [2, -1], [1, 2] ]],
# Polygon 3
[[ [53, 23], [65, 2], [31, 12], [53, 23] ]]
]
}],
# Polygon of the state of Wyoming (fake).
# Since Wyoming is a single chunk of land, its type
# is Polygon instead of Multipolygon.
"geoId/17": [{
"type": "Polygon",
"coordinates": [
# Polygon 1
[[ [1.5, 12.4], [5.3, 45.2], [1.1, 3.5],
[1.5, 12.4] ]]
]
}]
}
"""
LEVEL_MAP = {
"Country": "AdministrativeArea1",
"AdministrativeArea1": "AdministrativeArea2",
"AdministrativeArea2": "City"
}
def __init__(self):
dc.set_api_key('dev')
self.geojsons = None
def download_data(self, place='country/USA'):
"""Downloads GeoJSON data for a specified location.
Given the specified location, extracts the GeoJSONs of all
administrative areas one level below it (as specified by the
LEVEL_MAP class constant). For example, if the input is country/USA,
extracts all AdministrativeArea1's within the US (US states).
Args:
place: A string that is a valid value for the geoId property of a
DataCommons node.
Raises:
ValueError: If a Data Commons API call fails.
"""
geolevel = dc.get_property_values([place], "typeOf")
# There is an extra level of nesting in geojson files, so we have
# to get the 0th element explicitly.
assert len(geolevel[place]) == 1
geolevel = geolevel[place][0]
geos_contained_in_place = dc.get_places_in(
[place], self.LEVEL_MAP[geolevel])[place]
self.geojsons = dc.get_property_values(geos_contained_in_place,
"geoJsonCoordinates")
def save(self, prefix='', path='./original-data/'):
"""Saves the downloaded geojsons to disk.
Args:
prefix: Prefix prepended to the geoId of a given GeoJSON to
determine the name of its filename. For example, if
prefix='original-', a resulting filename might be
'original-geoId-01.geojson'.
path: Directory in which to save the desired files, as a string.
"""
for geoid in self.geojsons:
assert len(self.geojsons[geoid]) == 1
coords = self.geojsons[geoid][0]
filename = geoid.replace('/', '-')
with open(path + prefix + filename + '.geojson', 'w') as f:
geojson.dump(geojson.loads(coords), f)
if __name__ == '__main__':
loader = GeojsonDownloader()
loader.download_data()
loader.save()
| [
"noreply@github.com"
] | tjann.noreply@github.com |
54c12ac707d0fb9f3034bafa6706d3b24fb2c777 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_inaugurates.py | 7de12e81b92bba657d8dbf9875af9f14f3716b76 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
#calss header
class _INAUGURATES():
def __init__(self,):
self.name = "INAUGURATES"
self.definitions = inaugurate
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['inaugurate']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
2a8dbc3fc49a292443e81b1a3b57fa3f2efcb749 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/eqptcapacity/l3usagecaphist1year.py | b54b3bf0fae3f249f60ef6e82c8d700a8b801275 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,430 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class L3UsageCapHist1year(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = StatsClassMeta("cobra.model.eqptcapacity.L3UsageCapHist1year", "Layer3 entries max capacity")
counter = CounterMeta("v6LocalEpCap", CounterCategory.GAUGE, "count", "Local v6 Endpoints capacity")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "v6LocalEpCapMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "v6LocalEpCapMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "v6LocalEpCapAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "v6LocalEpCapSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "v6LocalEpCapThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "v6LocalEpCapTr"
meta._counters.append(counter)
counter = CounterMeta("v4LocalEpCap", CounterCategory.GAUGE, "count", "Local v4 Endpoints capacity")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "v4LocalEpCapMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "v4LocalEpCapMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "v4LocalEpCapAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "v4LocalEpCapSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "v4LocalEpCapThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "v4LocalEpCapTr"
meta._counters.append(counter)
meta.moClassName = "eqptcapacityL3UsageCapHist1year"
meta.rnFormat = "HDeqptcapacityL3UsageCap1year-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical Layer3 entries max capacity stats in 1 year"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.eqptcapacity.Entity")
meta.superClasses.add("cobra.model.stats.Hist")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.eqptcapacity.L3UsageCapHist")
meta.rnPrefixes = [
('HDeqptcapacityL3UsageCap1year-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "index", "index", 20473, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "v4LocalEpCapAvg", "v4LocalEpCapAvg", 20512, PropCategory.IMPLICIT_AVG)
prop.label = "Local v4 Endpoints capacity average value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4LocalEpCapAvg", prop)
prop = PropMeta("str", "v4LocalEpCapMax", "v4LocalEpCapMax", 20511, PropCategory.IMPLICIT_MAX)
prop.label = "Local v4 Endpoints capacity maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4LocalEpCapMax", prop)
prop = PropMeta("str", "v4LocalEpCapMin", "v4LocalEpCapMin", 20510, PropCategory.IMPLICIT_MIN)
prop.label = "Local v4 Endpoints capacity minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4LocalEpCapMin", prop)
prop = PropMeta("str", "v4LocalEpCapSpct", "v4LocalEpCapSpct", 20513, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Local v4 Endpoints capacity suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("v4LocalEpCapSpct", prop)
prop = PropMeta("str", "v4LocalEpCapThr", "v4LocalEpCapThr", 20514, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Local v4 Endpoints capacity thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("v4LocalEpCapThr", prop)
prop = PropMeta("str", "v4LocalEpCapTr", "v4LocalEpCapTr", 20515, PropCategory.IMPLICIT_TREND)
prop.label = "Local v4 Endpoints capacity trend"
prop.isOper = True
prop.isStats = True
meta.props.add("v4LocalEpCapTr", prop)
prop = PropMeta("str", "v6LocalEpCapAvg", "v6LocalEpCapAvg", 20533, PropCategory.IMPLICIT_AVG)
prop.label = "Local v6 Endpoints capacity average value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6LocalEpCapAvg", prop)
prop = PropMeta("str", "v6LocalEpCapMax", "v6LocalEpCapMax", 20532, PropCategory.IMPLICIT_MAX)
prop.label = "Local v6 Endpoints capacity maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6LocalEpCapMax", prop)
prop = PropMeta("str", "v6LocalEpCapMin", "v6LocalEpCapMin", 20531, PropCategory.IMPLICIT_MIN)
prop.label = "Local v6 Endpoints capacity minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6LocalEpCapMin", prop)
prop = PropMeta("str", "v6LocalEpCapSpct", "v6LocalEpCapSpct", 20534, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Local v6 Endpoints capacity suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("v6LocalEpCapSpct", prop)
prop = PropMeta("str", "v6LocalEpCapThr", "v6LocalEpCapThr", 20535, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Local v6 Endpoints capacity thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("v6LocalEpCapThr", prop)
prop = PropMeta("str", "v6LocalEpCapTr", "v6LocalEpCapTr", 20536, PropCategory.IMPLICIT_TREND)
prop.label = "Local v6 Endpoints capacity trend"
prop.isOper = True
prop.isStats = True
meta.props.add("v6LocalEpCapTr", prop)
meta.namingProps.append(getattr(meta.props, "index"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
26f920f8b22724bb3ef9147e50da276891d8dd5d | 9b32771b7d1513ee37bc62dd347675abcfc1bfc9 | /example_snippets/multimenus_snippets/NewSnippets/SciPy/Physical and mathematical constants/CODATA physical constants/L/Loschmidt constant (273.15 K, 101.325 kPa).py | 29b699f064aeba095877beb22cd784dbc63d2fd1 | [
"BSD-3-Clause"
] | permissive | listar0810/jupyterlab-snippets-multimenus | 44087ef1aeb030a3074862a337508b57d50072c6 | 477f51cfdbad7409eab45abe53cf774cd70f380c | refs/heads/master | 2022-12-12T18:19:25.221083 | 2020-09-08T01:11:01 | 2020-09-08T01:11:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | constants.physical_constants["Loschmidt constant (273.15 K, 101.325 kPa)"] | [
"kptan86@gmail.com"
] | kptan86@gmail.com |
8fd79fcb1629b053b15ec3b50f90f913cea4dd13 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2/poroussel/pancakes | 5c6939bca7de41d39bd719fa3b49cb1a7017af13 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 548 | #!/usr/bin/env python
import fileinput
def resolve(pancakes):
while pancakes[-1] == '+':
pancakes.pop()
if not len(pancakes):
return 0
num = 1
last = pancakes[0]
for p in pancakes[1:]:
if p != last:
num += 1
last = p
return num
if __name__ == "__main__":
input = fileinput.input()
nbtst = int(input.readline())
for idx in range(nbtst):
pancakes = list(input.readline().strip('\n'))
print 'Case #{}: {}'.format(idx+1, resolve(pancakes))
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] | |
ee0b0649a0153943aa926fb5b3951b59399eba96 | 3e8352f1523f5cc1982a41a9e2f655ebda7e58ad | /test/hummingbot/client/command/test_balance_command.py | 1d85c6a462be9e1fbec811d3bdae6a533f44ee33 | [
"Apache-2.0"
] | permissive | matthewbackhouse/hummingbot | a159bfa7d94c3b2c9b3549e4bc304253c4a42791 | 9023822744202624fad276b326cc999b72048d67 | refs/heads/master | 2023-09-03T03:03:18.354741 | 2021-11-02T05:16:59 | 2021-11-02T05:16:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,789 | py | import asyncio
import unittest
from copy import deepcopy
from typing import Awaitable
from unittest.mock import patch, MagicMock
from hummingbot.client.config.global_config_map import global_config_map
from hummingbot.client.hummingbot_application import HummingbotApplication
from test.mock.mock_cli import CLIMockingAssistant
class BalanceCommandTest(unittest.TestCase):
@patch("hummingbot.core.utils.trading_pair_fetcher.TradingPairFetcher")
def setUp(self, _: MagicMock) -> None:
super().setUp()
self.ev_loop = asyncio.get_event_loop()
self.app = HummingbotApplication()
self.cli_mock_assistant = CLIMockingAssistant(self.app.app)
self.cli_mock_assistant.start()
self.global_config_backup = deepcopy(global_config_map)
def tearDown(self) -> None:
self.cli_mock_assistant.stop()
self.reset_global_config()
super().tearDown()
def reset_global_config(self):
for key, value in self.global_config_backup.items():
global_config_map[key] = value
@staticmethod
def get_async_sleep_fn(delay: float):
async def async_sleep(*_, **__):
await asyncio.sleep(delay)
return async_sleep
def async_run_with_timeout(self, coroutine: Awaitable, timeout: float = 1):
ret = self.ev_loop.run_until_complete(asyncio.wait_for(coroutine, timeout))
return ret
def async_run_with_timeout_coroutine_must_raise_timeout(self, coroutine: Awaitable, timeout: float = 1):
class DesiredError(Exception):
pass
async def run_coro_that_raises(coro: Awaitable):
try:
await coro
except asyncio.TimeoutError:
raise DesiredError
try:
self.async_run_with_timeout(run_coro_that_raises(coroutine), timeout)
except DesiredError: # the coroutine raised an asyncio.TimeoutError as expected
raise asyncio.TimeoutError
except asyncio.TimeoutError: # the coroutine did not finish on time
raise RuntimeError
@patch("hummingbot.user.user_balances.UserBalances.all_balances_all_exchanges")
def test_show_balances_handles_network_timeouts(
self, all_balances_all_exchanges_mock
):
all_balances_all_exchanges_mock.side_effect = self.get_async_sleep_fn(delay=0.02)
global_config_map["other_commands_timeout"].value = 0.01
with self.assertRaises(asyncio.TimeoutError):
self.async_run_with_timeout_coroutine_must_raise_timeout(self.app.show_balances())
self.assertTrue(
self.cli_mock_assistant.check_log_called_with(
msg="\nA network error prevented the balances to update. See logs for more details."
)
)
| [
"petioptrv@icloud.com"
] | petioptrv@icloud.com |
fa1d8f93970ac77ce82fb5918674f2d1f937b0d5 | 2a3606551a4d850a7b4d6a4e08089c51108ef7be | /script.mrknow.urlresolver/lib/urlresolver9/plugins/vidmad.py | ed863f4b84cd7ed1da2688e61c60efbc0f68e9f3 | [
"GPL-2.0-only",
"Apache-2.0"
] | permissive | rrosajp/filmkodi | a6bb1823f4ed45453c8b8e54ffbd6a7b49f44450 | 0162cde9ae25ddbf4a69330948714833ff2f78c9 | refs/heads/master | 2021-09-18T06:03:17.561062 | 2018-06-22T23:28:53 | 2018-06-22T23:28:53 | 234,768,781 | 1 | 0 | Apache-2.0 | 2021-06-03T20:33:07 | 2020-01-18T17:11:57 | null | UTF-8 | Python | false | false | 1,745 | py | '''
urlresolver Kodi plugin
Copyright (C) 2016 Gujal
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from lib import helpers
from urlresolver9 import common
from urlresolver9.resolver import UrlResolver, ResolverError
class VidMadResolver(UrlResolver):
name = "vidmad.net"
domains = ["vidmad.net", "tamildrive.com"]
pattern = '(?://|\.)((?:vidmad|tamildrive)\.(?:net|com))/(?:embed-)?([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'User-Agent': common.FF_USER_AGENT}
response = self.net.http_GET(web_url, headers=headers)
html = response.content
if 'Not Found' in html:
raise ResolverError('File Removed')
if 'Video is processing' in html:
raise ResolverError('File still being processed')
sources = helpers.scrape_sources(html)
return helpers.pick_source(sources) + helpers.append_headers(headers)
def get_url(self, host, media_id):
return self._default_get_url(host, media_id)
| [
"mrknow@interia.pl"
] | mrknow@interia.pl |
bf878dc50992ff87dfe63509c1edb33f2a81f5d9 | c92f8b2870add6860ef6b98a9e702788af5fd967 | /Chapter04/generatorex.py | 17a3e95792dfd007db20bac86a4a23afdaa11417 | [] | no_license | Rabidza/INF1511 | c6e1244679b2eff3f782957c056a72d49a552176 | 4bc10130a3693f3223bd12f4e9212a037bd1fc2a | refs/heads/master | 2021-09-08T05:42:58.897999 | 2018-03-07T20:08:31 | 2018-03-07T20:08:31 | 116,714,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | def fruits(seq):
for fruit in seq:
yield '%s' % fruit
f = fruits(["Apple", "Orange", "Mango", "Banana"])
print("The list of fruits is:")
print(f.__next__())
print(f.__next__())
print(f.__next__())
print(f.__next__())
f = fruits(["Apple", "Orange", "Mango", "Banana"])
print("The list of fruits is:")
for x in f:
print(x)
| [
"neillhenning@gmail.com"
] | neillhenning@gmail.com |
ef21d236fe9887c6a043c2e2b8b071947d54c588 | 7ec92031e28b1a92a10a9f252f99211663e0d8f9 | /src/py/l0893.py | 7f22bf16ff81b31efaa612c67eff54c34c57752b | [] | no_license | SS4G/leetcode_2020 | 4eb63f6afd59f84e44334e78cb06c7b33a89dd15 | 9a9a8fc779e7456db77f88e7dcdcc1f5cae92c62 | refs/heads/master | 2020-06-29T17:12:39.488350 | 2020-02-08T01:07:08 | 2020-02-08T01:07:08 | 200,575,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | from collections import defaultdict
class SpecialEqual:
def __init__(self, str0):
self.oddCharSet = defaultdict(lambda :0)
self.evenCharSet = defaultdict(lambda :0)
for idx, c in enumerate(str0):
if idx & 1 == 1:
self.oddCharSet[c] += 1
else:
self.evenCharSet[c] += 1
#self.hashKey = self.getHashKey()
def getHashKey(self):
oddTuple = list(self.oddCharSet.items())
oddTuple.sort()
evenTuple = list(self.evenCharSet.items())
evenTuple.sort()
return (evenTuple, oddTuple)
#return s
class Solution(object):
def numSpecialEquivGroups(self, A):
"""
:type A: List[str]
:rtype: int
"""
keyDict = defaultdict(lambda :0)
for str0 in A:
se = SpecialEqual(str0)
keyDict[se.hashKey] += 1
return len(keyDict.keys())
if __name__ == "__main__":
pass
| [
"zihengs@opera.com"
] | zihengs@opera.com |
269a9583ed02424a432d30fb8e2324113b3155e9 | b948da1493329127a9a9ab567bae874c8cfa0bf4 | /portfolio/settings.py | d14137d7fde4fba7b1ae45b948c24066d3adef5c | [] | no_license | Kotodian/portfolio | edb93bec72d66d1fececd71b67a8e7f92cebb260 | 5661bf5d8134bbb576b2ea771fe5a6210c942feb | refs/heads/master | 2020-06-13T02:32:59.356331 | 2019-07-04T08:22:28 | 2019-07-04T08:24:22 | 194,503,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,511 | py | """
Django settings for portfolio project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's@281miy&cizve+fkz*ppmfm2$$qtk_2&*jqv@fr082ng=v!w('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#我的App
'gallery.apps.GalleryConfig',
'blog.apps.BlogConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'portfolio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'portfolio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATICFILES_DIRS=[
os.path.join(BASE_DIR,'static')
]
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL='/media/'
MEDIA_ROOT=os.path.join(BASE_DIR,'media')
| [
"root@vultr.guest"
] | root@vultr.guest |
092b4f51337ff6d474d15c92a99205d86476b0e0 | 63b997a325ccd3a0d50eed68acb260dba0d9ddbc | /solution 1.py | 848ce35f44cfeef22118bb38b46ae345a1120b3f | [] | no_license | GLAU-TND/python-programming-assignment-2-anshika123456 | 238c8f78f09192f731b395313acecdc70bad3b11 | 2892ed91b5e0cab7d00cf349129b5746cb6eaf03 | refs/heads/master | 2021-01-13T22:45:16.969806 | 2020-02-23T17:38:17 | 2020-02-23T17:38:17 | 242,518,558 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | n=eval(input())
a=[]
p=n[0][-1]
a.append(n[0])
n=n[1:]
for j in n:
for i in n:
if p==i[0] and i not in a:
a.append(i)
p=i[-1]
print(a)
| [
"noreply@github.com"
] | GLAU-TND.noreply@github.com |
fa525dc7c9ab05eb28f373dd7d92e1e7c26dc407 | 0f5f6ff75cef3e81419216ba0191bb69a26c9068 | /hackerank/stat/wmean.py | 4fba60a0e2fa733e6d25f1ea4c30d0b972482be5 | [] | no_license | daddyawesome/CodingP | 1c7bbb2edd30333f7cb1b78ec6a0428854c4fa2b | 66ab4e452c23f3f770d6ad1e32f604c65e1dcbd3 | refs/heads/master | 2022-10-13T18:36:23.068195 | 2022-10-04T07:01:58 | 2022-10-04T07:01:58 | 220,047,911 | 0 | 0 | null | 2020-07-07T20:49:07 | 2019-11-06T17:01:44 | Python | UTF-8 | Python | false | false | 390 | py | '''
Weighted Mean
'''
n = input()
elements = input()
weights = input()
elements = elements.split(' ')
weights = weights.split(' ')
numerator = 0
denominator = 0
for i in range(0, len(elements)):
numerator = numerator + int(weights[i]) * int(elements[i])
denominator = denominator + int(weights[i])
weighted_mean = numerator / float(denominator)
print(round(weighted_mean, 1))
| [
"sablay296@gmail.com"
] | sablay296@gmail.com |
0e820711e2cf9e4038ab0cc7af51a70addfbc936 | b12f467a6bf5aff09c1e1effb25d8f444f126766 | /trac/trac/ticket/notification.py | 2de8ec915240b0652bb13061bf5dadfcb6e40cd1 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"Python-2.0",
"Apache-2.0"
] | permissive | apache/bloodhound | 61a6c619cadfa64a2831cae4beac45c40429633e | c3e31294e68af99d4e040e64fbdf52394344df9e | refs/heads/trunk | 2023-09-03T14:19:40.562436 | 2018-02-17T18:49:54 | 2018-02-17T18:49:54 | 9,290,691 | 87 | 40 | Apache-2.0 | 2023-04-15T11:57:45 | 2013-04-08T07:20:19 | Python | UTF-8 | Python | false | false | 20,369 | py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Daniel Lundin <daniel@edgewall.com>
# Copyright (C) 2005-2006 Emmanuel Blot <emmanuel.blot@free.fr>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Daniel Lundin <daniel@edgewall.com>
#
from __future__ import with_statement
from hashlib import md5
from genshi.template.text import NewTextTemplate
from trac.core import *
from trac.config import *
from trac.notification import NotifyEmail
from trac.ticket.api import TicketSystem
from trac.util.datefmt import to_utimestamp
from trac.util.text import obfuscate_email_address, text_width, wrap
from trac.util.translation import deactivate, reactivate
class TicketNotificationSystem(Component):
always_notify_owner = BoolOption('notification', 'always_notify_owner',
'false',
"""Always send notifications to the ticket owner (''since 0.9'').""")
always_notify_reporter = BoolOption('notification',
'always_notify_reporter',
'false',
"""Always send notifications to any address in the ''reporter''
field.""")
always_notify_updater = BoolOption('notification', 'always_notify_updater',
'true',
"""Always send notifications to the person who causes the ticket
property change and to any previous updater of that ticket.""")
ticket_subject_template = Option('notification', 'ticket_subject_template',
'$prefix #$ticket.id: $summary',
"""A Genshi text template snippet used to get the notification subject.
By default, the subject template is `$prefix #$ticket.id: $summary`.
`$prefix` being the value of the `smtp_subject_prefix` option.
''(since 0.11)''""")
batch_subject_template = Option('notification', 'batch_subject_template',
'$prefix Batch modify: $tickets_descr',
"""Like ticket_subject_template but for batch modifications.
By default, the template is `$prefix Batch modify: $tickets_descr`.
''(since 1.0)''""")
ambiguous_char_width = Option('notification', 'ambiguous_char_width',
'single',
"""Which width of ambiguous characters (e.g. 'single' or
'double') should be used in the table of notification mail.
If 'single', the same width as characters in US-ASCII. This is
expected by most users. If 'double', twice the width of
US-ASCII characters. This is expected by CJK users. ''(since
0.12.2)''""")
def get_ticket_notification_recipients(env, config, tktid, prev_cc):
notify_reporter = config.getbool('notification', 'always_notify_reporter')
notify_owner = config.getbool('notification', 'always_notify_owner')
notify_updater = config.getbool('notification', 'always_notify_updater')
ccrecipients = prev_cc
torecipients = []
with env.db_query as db:
# Harvest email addresses from the cc, reporter, and owner fields
for row in db("SELECT cc, reporter, owner FROM ticket WHERE id=%s",
(tktid,)):
if row[0]:
ccrecipients += row[0].replace(',', ' ').split()
reporter = row[1]
owner = row[2]
if notify_reporter:
torecipients.append(row[1])
if notify_owner:
torecipients.append(row[2])
break
# Harvest email addresses from the author field of ticket_change(s)
if notify_updater:
for author, ticket in db("""
SELECT DISTINCT author, ticket FROM ticket_change
WHERE ticket=%s
""", (tktid,)):
torecipients.append(author)
# Suppress the updater from the recipients
updater = None
for updater, in db("""
SELECT author FROM ticket_change WHERE ticket=%s
ORDER BY time DESC LIMIT 1
""", (tktid,)):
break
else:
for updater, in db("SELECT reporter FROM ticket WHERE id=%s",
(tktid,)):
break
if not notify_updater:
filter_out = True
if notify_reporter and (updater == reporter):
filter_out = False
if notify_owner and (updater == owner):
filter_out = False
if filter_out:
torecipients = [r for r in torecipients
if r and r != updater]
elif updater:
torecipients.append(updater)
return (torecipients, ccrecipients, reporter, owner)
class TicketNotifyEmail(NotifyEmail):
"""Notification of ticket changes."""
template_name = "ticket_notify_email.txt"
ticket = None
newticket = None
modtime = 0
from_email = 'trac+ticket@localhost'
COLS = 75
def __init__(self, env):
NotifyEmail.__init__(self, env)
self.prev_cc = []
ambiguous_char_width = env.config.get('notification',
'ambiguous_char_width',
'single')
self.ambiwidth = 2 if ambiguous_char_width == 'double' else 1
def notify(self, ticket, newticket=True, modtime=None):
"""Send ticket change notification e-mail (untranslated)"""
t = deactivate()
translated_fields = ticket.fields
try:
ticket.fields = TicketSystem(self.env).get_ticket_fields()
self._notify(ticket, newticket, modtime)
finally:
ticket.fields = translated_fields
reactivate(t)
def _notify(self, ticket, newticket=True, modtime=None):
self.ticket = ticket
self.modtime = modtime
self.newticket = newticket
changes_body = ''
self.reporter = ''
self.owner = ''
changes_descr = ''
change_data = {}
link = self.env.abs_href.ticket(ticket.id)
summary = self.ticket['summary']
author = None
if not self.newticket and modtime: # Ticket change
from trac.ticket.web_ui import TicketModule
for change in TicketModule(self.env).grouped_changelog_entries(
ticket, when=modtime):
if not change['permanent']: # attachment with same time...
continue
author = change['author']
change_data.update({
'author': self.obfuscate_email(author),
'comment': wrap(change['comment'], self.COLS, ' ', ' ',
'\n', self.ambiwidth)
})
link += '#comment:%s' % str(change.get('cnum', ''))
for field, values in change['fields'].iteritems():
old = values['old']
new = values['new']
newv = ''
if field == 'description':
new_descr = wrap(new, self.COLS, ' ', ' ', '\n',
self.ambiwidth)
old_descr = wrap(old, self.COLS, '> ', '> ', '\n',
self.ambiwidth)
old_descr = old_descr.replace(2 * '\n', '\n' + '>' + \
'\n')
cdescr = '\n'
cdescr += 'Old description:' + 2 * '\n' + old_descr + \
2 * '\n'
cdescr += 'New description:' + 2 * '\n' + new_descr + \
'\n'
changes_descr = cdescr
elif field == 'summary':
summary = "%s (was: %s)" % (new, old)
elif field == 'cc':
(addcc, delcc) = self.diff_cc(old, new)
chgcc = ''
if delcc:
chgcc += wrap(" * cc: %s (removed)" %
', '.join(delcc),
self.COLS, ' ', ' ', '\n',
self.ambiwidth) + '\n'
if addcc:
chgcc += wrap(" * cc: %s (added)" %
', '.join(addcc),
self.COLS, ' ', ' ', '\n',
self.ambiwidth) + '\n'
if chgcc:
changes_body += chgcc
self.prev_cc += self.parse_cc(old) if old else []
else:
if field in ['owner', 'reporter']:
old = self.obfuscate_email(old)
new = self.obfuscate_email(new)
newv = new
length = 7 + len(field)
spacer_old, spacer_new = ' ', ' '
if len(old + new) + length > self.COLS:
length = 5
if len(old) + length > self.COLS:
spacer_old = '\n'
if len(new) + length > self.COLS:
spacer_new = '\n'
chg = '* %s: %s%s%s=>%s%s' % (field, spacer_old, old,
spacer_old, spacer_new,
new)
chg = chg.replace('\n', '\n' + length * ' ')
chg = wrap(chg, self.COLS, '', length * ' ', '\n',
self.ambiwidth)
changes_body += ' %s%s' % (chg, '\n')
if newv:
change_data[field] = {'oldvalue': old, 'newvalue': new}
if newticket:
author = ticket['reporter']
ticket_values = ticket.values.copy()
ticket_values['id'] = ticket.id
ticket_values['description'] = wrap(
ticket_values.get('description', ''), self.COLS,
initial_indent=' ', subsequent_indent=' ', linesep='\n',
ambiwidth=self.ambiwidth)
ticket_values['new'] = self.newticket
ticket_values['link'] = link
subject = self.format_subj(summary)
if not self.newticket:
subject = 'Re: ' + subject
self.data.update({
'ticket_props': self.format_props(),
'ticket_body_hdr': self.format_hdr(),
'subject': subject,
'ticket': ticket_values,
'changes_body': changes_body,
'changes_descr': changes_descr,
'change': change_data
})
NotifyEmail.notify(self, ticket.id, subject, author)
def format_props(self):
tkt = self.ticket
fields = [f for f in tkt.fields
if f['name'] not in ('summary', 'cc', 'time', 'changetime')]
width = [0, 0, 0, 0]
i = 0
for f in fields:
if f['type'] == 'textarea':
continue
fname = f['name']
if not fname in tkt.values:
continue
fval = tkt[fname] or ''
if fval.find('\n') != -1:
continue
if fname in ['owner', 'reporter']:
fval = self.obfuscate_email(fval)
idx = 2 * (i % 2)
width[idx] = max(self.get_text_width(f['label']), width[idx])
width[idx + 1] = max(self.get_text_width(fval), width[idx + 1])
i += 1
width_l = width[0] + width[1] + 5
width_r = width[2] + width[3] + 5
half_cols = (self.COLS - 1) / 2
if width_l + width_r + 1 > self.COLS:
if ((width_l > half_cols and width_r > half_cols) or
(width[0] > half_cols / 2 or width[2] > half_cols / 2)):
width_l = half_cols
width_r = half_cols
elif width_l > width_r:
width_l = min((self.COLS - 1) * 2 / 3, width_l)
width_r = self.COLS - width_l - 1
else:
width_r = min((self.COLS - 1) * 2 / 3, width_r)
width_l = self.COLS - width_r - 1
sep = width_l * '-' + '+' + width_r * '-'
txt = sep + '\n'
cell_tmp = [u'', u'']
big = []
i = 0
width_lr = [width_l, width_r]
for f in [f for f in fields if f['name'] != 'description']:
fname = f['name']
if not tkt.values.has_key(fname):
continue
fval = tkt[fname] or ''
if fname in ['owner', 'reporter']:
fval = self.obfuscate_email(fval)
if f['type'] == 'textarea' or '\n' in unicode(fval):
big.append((f['label'], '\n'.join(fval.splitlines())))
else:
# Note: f['label'] is a Babel's LazyObject, make sure its
# __str__ method won't be called.
str_tmp = u'%s: %s' % (f['label'], unicode(fval))
idx = i % 2
cell_tmp[idx] += wrap(str_tmp, width_lr[idx] - 2 + 2 * idx,
(width[2 * idx]
- self.get_text_width(f['label'])
+ 2 * idx) * ' ',
2 * ' ', '\n', self.ambiwidth)
cell_tmp[idx] += '\n'
i += 1
cell_l = cell_tmp[0].splitlines()
cell_r = cell_tmp[1].splitlines()
for i in range(max(len(cell_l), len(cell_r))):
if i >= len(cell_l):
cell_l.append(width_l * ' ')
elif i >= len(cell_r):
cell_r.append('')
fmt_width = width_l - self.get_text_width(cell_l[i]) \
+ len(cell_l[i])
txt += u'%-*s|%s%s' % (fmt_width, cell_l[i], cell_r[i], '\n')
if big:
txt += sep
for name, value in big:
txt += '\n'.join(['', name + ':', value, '', ''])
txt += sep
return txt
def parse_cc(self, txt):
return filter(lambda x: '@' in x, txt.replace(',', ' ').split())
def diff_cc(self, old, new):
oldcc = NotifyEmail.addrsep_re.split(old)
newcc = NotifyEmail.addrsep_re.split(new)
added = [self.obfuscate_email(x) \
for x in newcc if x and x not in oldcc]
removed = [self.obfuscate_email(x) \
for x in oldcc if x and x not in newcc]
return (added, removed)
def format_hdr(self):
return '#%s: %s' % (self.ticket.id, wrap(self.ticket['summary'],
self.COLS, linesep='\n',
ambiwidth=self.ambiwidth))
def format_subj(self, summary):
template = self.config.get('notification','ticket_subject_template')
template = NewTextTemplate(template.encode('utf8'))
prefix = self.config.get('notification', 'smtp_subject_prefix')
if prefix == '__default__':
prefix = '[%s]' % self.env.project_name
data = {
'prefix': prefix,
'summary': summary,
'ticket': self.ticket,
'env': self.env,
}
return template.generate(**data).render('text', encoding=None).strip()
def get_recipients(self, tktid):
(torecipients, ccrecipients, reporter, owner) = \
get_ticket_notification_recipients(self.env, self.config,
tktid, self.prev_cc)
self.reporter = reporter
self.owner = owner
return (torecipients, ccrecipients)
def get_message_id(self, rcpt, modtime=None):
"""Generate a predictable, but sufficiently unique message ID."""
s = '%s.%08d.%d.%s' % (self.env.project_url.encode('utf-8'),
int(self.ticket.id), to_utimestamp(modtime),
rcpt.encode('ascii', 'ignore'))
dig = md5(s).hexdigest()
host = self.from_email[self.from_email.find('@') + 1:]
msgid = '<%03d.%s@%s>' % (len(s), dig, host)
return msgid
def send(self, torcpts, ccrcpts):
dest = self.reporter or 'anonymous'
hdrs = {}
hdrs['Message-ID'] = self.get_message_id(dest, self.modtime)
hdrs['X-Trac-Ticket-ID'] = str(self.ticket.id)
hdrs['X-Trac-Ticket-URL'] = self.data['ticket']['link']
if not self.newticket:
msgid = self.get_message_id(dest)
hdrs['In-Reply-To'] = msgid
hdrs['References'] = msgid
NotifyEmail.send(self, torcpts, ccrcpts, hdrs)
def get_text_width(self, text):
return text_width(text, ambiwidth=self.ambiwidth)
def obfuscate_email(self, text):
""" Obfuscate text when `show_email_addresses` is disabled in config.
Obfuscation happens once per email, regardless of recipients, so
cannot use permission-based obfuscation.
"""
if self.env.config.getbool('trac', 'show_email_addresses'):
return text
else:
return obfuscate_email_address(text)
class BatchTicketNotifyEmail(NotifyEmail):
"""Notification of ticket batch modifications."""
template_name = "batch_ticket_notify_email.txt"
def __init__(self, env):
NotifyEmail.__init__(self, env)
def notify(self, tickets, new_values, comment, action, author):
"""Send batch ticket change notification e-mail (untranslated)"""
t = deactivate()
try:
self._notify(tickets, new_values, comment, action, author)
finally:
reactivate(t)
def _notify(self, tickets, new_values, comment, action, author):
self.tickets = tickets
changes_body = ''
self.reporter = ''
self.owner = ''
changes_descr = '\n'.join(['%s to %s' % (prop, val)
for (prop, val) in new_values.iteritems()])
tickets_descr = ', '.join(['#%s' % t for t in tickets])
subject = self.format_subj(tickets_descr)
link = self.env.abs_href.query(id=','.join([str(t) for t in tickets]))
self.data.update({
'tickets_descr': tickets_descr,
'changes_descr': changes_descr,
'comment': comment,
'action': action,
'author': author,
'subject': subject,
'ticket_query_link': link,
})
NotifyEmail.notify(self, tickets, subject, author)
def format_subj(self, tickets_descr):
template = self.config.get('notification','batch_subject_template')
template = NewTextTemplate(template.encode('utf8'))
prefix = self.config.get('notification', 'smtp_subject_prefix')
if prefix == '__default__':
prefix = '[%s]' % self.env.project_name
data = {
'prefix': prefix,
'tickets_descr': tickets_descr,
'env': self.env,
}
return template.generate(**data).render('text', encoding=None).strip()
def get_recipients(self, tktids):
alltorecipients = []
allccrecipients = []
for t in tktids:
(torecipients, ccrecipients, reporter, owner) = \
get_ticket_notification_recipients(self.env, self.config,
t, [])
alltorecipients.extend(torecipients)
allccrecipients.extend(ccrecipients)
return (list(set(alltorecipients)), list(set(allccrecipients)))
| [
"gjm@apache.org"
] | gjm@apache.org |
a501a6377f6fd1a8cf8077ad539dbf88ce6b8c96 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03626/s769192318.py | 659f9e7bc552bd622370f1ebcadc9cd3dcf92378 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | mod = 1000000007
n = int(input())
s1 = input()
s2 = input()
ans = 1
for i in range(n):
if s1[i] == s2[i]:
if i==0:
ans *= 3
else:
if s1[i-1]==s2[i-1]:
ans *= 2
else:
continue
else:
if i==0:
ans *= 6
else:
if s1[i] == s1[i-1]:
continue
elif s1[i-1] == s2[i-1]:
ans *=2
else:
ans *= 3
ans = ans%mod
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
54ef31def53b0ce31a1fa0cf49cb09a862d4173e | bf460178162ada0dff219808ebcb6909d2118c0b | /0x11-python-network_1/8-json_api.py | f14ad6ccaabd85d773455006210295e0a3cced71 | [] | no_license | dario-castano/holbertonschool-higher_level_programming | b273d53da01eaa13aafcfef49a84cf4504e15795 | b509695dc898bf31dfb8cc4f82c4bdfdb8407cae | refs/heads/master | 2023-08-10T17:15:27.232508 | 2020-02-13T14:42:17 | 2020-02-13T14:42:17 | 207,344,442 | 0 | 0 | null | 2023-07-22T15:46:03 | 2019-09-09T15:39:58 | Python | UTF-8 | Python | false | false | 585 | py | #!/usr/bin/python3
"""
Python script that takes in a URL, sends a request to the URL
and displays the body of the response.
"""
import sys
import requests
if __name__ == "__main__":
data = {"q": sys.argv[1] if sys.argv.__len__() >= 2 else ""}
url = 'http://0.0.0.0:5000/search_user'
response = requests.post(url, data)
try:
json_data = response.json()
if not json_data:
print('No result')
else:
print('[{}] {}'.format(json_data.get('id'), json_data.get('name')))
except ValueError:
print('Not a valid JSON')
| [
"dario.castano@aim.com"
] | dario.castano@aim.com |
fdfd1fdd521757e153615bc1f421caef78c1123e | 0fd66a4a28bdc7d967ec18d90eca5cc54b5cbdd4 | /middleware/legato/templates/legato_gfx_mxt_cpro/legato_gfx_mxt_cpro.py | b8df421ed62d583c03dbef0bf18cd7bf19b1b356 | [
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"LicenseRef-scancode-public-domain"
] | permissive | fb321/gfx | b865539ea6acd9c99d11a3968424ae03b5dea438 | e59a8d65ef77d4b017fdc523305d4d29a066d92a | refs/heads/master | 2020-06-27T14:20:24.209933 | 2019-07-31T22:01:05 | 2019-07-31T22:01:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,346 | py | # coding: utf-8
##############################################################################
# Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries.
#
# Subject to your compliance with these terms, you may use Microchip software
# and any derivatives exclusively with Microchip products. It is your
# responsibility to comply with third party license terms applicable to your
# use of third party software (including open source software) that may
# accompany Microchip software.
#
# THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
# EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
# WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
# PARTICULAR PURPOSE.
#
# IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
# INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
# WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
# BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
# FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
# ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
# THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
##############################################################################
componentsIDTable = ["HarmonyCore", "gfx_legato", "gfx_disp_mxt_cpro_320x480", "le_gfx_driver_ili9488", "sys_input", "gfx_maxtouch_controller"]
autoConnectTable = [["le_gfx_driver_ili9488", "Graphics Display", "gfx_disp_mxt_cpro_320x480", "gfx_display"],
["gfx_legato", "gfx_driver", "le_gfx_driver_ili9488", "gfx_driver_ili9488"],
["gfx_maxtouch_controller", "touch_panel", "gfx_disp_mxt_cpro_320x480", "touch_panel"]]
deactivateIDTable = ["FreeRTOS"]
execfile(Module.getPath() + "../common/pin_config.py")
execfile(Module.getPath() + "../common/bsp_utils.py")
execfile(Module.getPath() + "../common/display_utils.py")
pinConfigureFxn = configurePins
pinResetFxn = resetPins
#Add BSP support
execfile(Module.getPath() + "Support_BSP_SAM_E54_Curiosity_Ultra.py")
def enableConfigPins(bspID, configID, enable):
global pinConfigureFxn
if (enable == True):
print("enableCOnfig " + configID)
else:
print("disableCOnfig " + configID)
pinConfig = getBSPSupportNode(bspID, configID).getPinConfig()
if (enable == True):
pinConfigureFxn(pinConfig)
else:
pinResetFxn(pinConfig)
def enableConfig(bspID, configID, enable):
componentIDTable = getBSPSupportNode(bspID, configID).getComponentActivateList()
deactivateIDTable = getBSPSupportNode(bspID, configID).getComponentDeactivateList()
autoConnectTable = getBSPSupportNode(bspID, configID).getComponentAutoConnectList()
if (enable == True):
if (componentIDTable != None):
res = Database.activateComponents(componentIDTable)
if (deactivateIDTable != None):
res = Database.deactivateComponents(deactivateIDTable)
if (autoConnectTable != None):
res = Database.connectDependencies(autoConnectTable)
try:
getBSPSupportNode(bspID, configID).getEventCallbackFxn()("configure")
except:
print("No event callback for " + bspID + " configID.")
elif (enable == False):
if (componentIDTable != None):
res = Database.deactivateComponents(componentIDTable)
try:
getBSPSupportNode(bspID, configID).getEventCallbackFxn()("unconfigure")
except:
print("No event callback for " + bspID + " configID.")
enableConfigPins(bspID, configID, enable)
def configureDisplayInterface(bspID, interface):
print("Configuring for " + str(interface) + " Interface.")
if (bspID == None):
print("No BSP used, will not configure")
else:
DisplayInterfaceList = getDisplayInterfaces(bspID)
if (DisplayInterfaceList != None):
if (str(interface) in DisplayInterfaceList):
for val in DisplayInterfaceList:
if (val != interface):
enableConfig(bspID, val, False)
enableConfig(bspID, interface, True)
else:
print(str(interface) + " display interface is not supported.")
def onDisplayInterfaceSelected(interfaceSelected, event):
bspID = getSupportedBSP()
newDisplayInterface= interfaceSelected.getComponent().getSymbolByID("DisplayInterface").getValue()
currDisplayInterface = interfaceSelected.getComponent().getSymbolByID("currDisplayInterface").getValue()
interfaceSelected.getComponent().getSymbolByID("currDisplayInterface").setValue(event["value"], 0)
configureDisplayInterface(bspID, str(newDisplayInterface))
def instantiateComponent(templateComponent):
global componentsIDTable
global autoConnectTable
global supportedBSPsIDList
#Check if a supported BSP is loaded
bspUsedKeyID = getSupportedBSP()
DisplayInterfaceList = getDisplayInterfaces(bspUsedKeyID)
#if there is no list, build the list from the interfaces for each supported BSP
if (DisplayInterfaceList == None):
DisplayInterfaceList = []
bspSupportedList = getSupportedBSPList()
for bsp in bspSupportedList:
DisplayInterfaceList += getDisplayInterfaces(bsp)
# Remove duplicates
DisplayInterfaceList = list(dict.fromkeys(DisplayInterfaceList))
DisplayInterface = templateComponent.createComboSymbol("DisplayInterface", None, DisplayInterfaceList)
DisplayInterface.setLabel("Display Interface")
DisplayInterface.setDescription("Configures the display controller interface to the maXTouch Curiosity Pro.")
DisplayInterface.setDependencies(onDisplayInterfaceSelected, ["DisplayInterface"])
DisplayInterface.setVisible(True)
# Shadow display interface symbol
currDisplayInterface = templateComponent.createComboSymbol("currDisplayInterface", None, DisplayInterfaceList)
currDisplayInterface.setVisible(False)
res = Database.activateComponents(componentsIDTable)
res = Database.connectDependencies(autoConnectTable)
res = Database.deactivateComponents(deactivateIDTable);
if (bspUsedKeyID != None):
DisplayInterface.setDefaultValue(getDefaultDisplayInterface(bspUsedKeyID))
currDisplayInterface.setDefaultValue(getDefaultDisplayInterface(bspUsedKeyID))
configureDisplayInterface(bspUsedKeyID, str(currDisplayInterface.getValue()))
else:
print("No BSP used, only software components are configured. Please add board-specific components.")
| [
"http://support.microchip.com"
] | http://support.microchip.com |
9919ac5083dab3733793eb45a8b2e4565092b311 | aa410a95773aeea73e75f0e701db5cdc0eda890b | /competences/pycamp_ar_2014/players/futurologist.py | 8bf925a5ec4cf504a5022fa110a0975c01b568df | [] | no_license | predominant/zombsole | ccc00893b7739c5341c43fc28375415fa628b885 | a04ff40a144cb1f63d8aa29ccf0b06ecccc2bc7f | refs/heads/master | 2021-01-21T19:29:05.322551 | 2014-03-26T05:38:15 | 2014-03-26T05:38:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,910 | py | # coding: utf-8
import unittest
import random
import itertools
import copy
import sys
import codecs
import utils
from things import Player, Zombie, Wall, Box
from utils import closest
from weapons import Rifle, Shotgun
from core import World
names = """Chen Tuan
Ge Hong
Laozi
Lie Yukou
Yang Xiong
Zhang Daoling
Zhang Jue
Zhang Sanfeng
Zhuangzi
Darni""".split("\n")
class GoalDistanceMap(object):
def __init__(self, goal, things):
self.things = things
mx = max(t[0] for t in things) + 2
my = max(t[1] for t in things) + 2
self.size = mx, my
self.goal = goal
self.map = [ [None] * my for x in range(mx) ]
self.build()
def __getitem__(self, item):
if item[0] < 0 or item[1] < 0:
return float("+inf")
try:
return self.map[item[0]][item[1]]
except IndexError:
return float("+inf")
def build(self):
#print "goal", self.goal
#print "size", self.size
if isinstance(self.goal, list):
for g in self.goal:
self.map[g[0]][g[1]] = 0
else:
self.map[self.goal[0]][self.goal[1]] = 0
count = 0
while True:
changed = False
for y in range(self.size[1]):
for x in range(self.size[0]):
thing = self.things.get((x,y), None)
if thing is not None:
if isinstance(thing, Wall) or isinstance(thing, Box):
self.map[x][y] = float("+inf")
continue
v = self.map[x][y]
if v is None:
for p in utils.adyacent_positions((x,y)):
cv = self[p]
if cv == count:
self.map[x][y] = cv + 1
changed = True
if not changed:
break
count += 1
def show(self):
for y in range(self.size[1]):
for x in range(self.size[0]):
if x > 30: continue
thing = self.things.get((x,y), None)
if thing is not None:
if isinstance(thing, Wall):
sys.stdout.write("##|")
continue
if isinstance(thing, Box):
sys.stdout.write("==|")
continue
v = self.map[x][y]
if isinstance(v, int):
if v > 99:
sys.stdout.write("**|")
else:
sys.stdout.write("%02d|" % v)
else:
sys.stdout.write("XX|")
#print
class State(object):
cover_1_map = None
cover_2_map = None
cover_3_map = None
goal_map = None
tick = 0
number_players = 0
played = 0
last_action = 0
strategy = None
next_strategy = "first_strategy"
strategy_name = None
S = State()
class FuturologistSafehouse(Player):
'''A player that stays still and shoots zombies.'''
futurologist = True
def next_step(self, things, t=None):
result = None
if S.played == 0:
# lead a change in strategy
S.tick += 1
if S.next_strategy is not None:
S.strategy = getattr(self, 'build_' + S.next_strategy)(things)
S.strategy_name = S.next_strategy
S.next_strategy = None
S.played += 1
if S.played == len([x for x in things.values() if isinstance(x, Player)]):
S.played = 0
result = S.strategy.get_next_move(self, things)
self.status = S.strategy_name + "|"+ str(S.tick) + "|" + str(result)
return result
def build_first_strategy(self, things):
return RushStrategy(things, (86, 25), "dos")
def build_dos(self, things):
return RushStrategy(things, [(93, 15), (94, 15)], "tres")
def build_tres(self, things):
return RushStrategy(things, (93, 8), "cuatro")
def build_cuatro(self, things):
return RushStrategy(things, (93, 0), "cinco")
def build_cinco(self, things):
return RushStrategy(things, (85, 0), "seis")
def build_seis(self, things):
return RushStrategy(things, (73, 0), "siete")
def build_siete(self, things):
return RushStrategy(things, (61, 0), "ocho")
def build_ocho(self, things):
return RushStrategy(things, (49, 0), "nueve")
def build_nueve(self, things):
return RushStrategy(things, (37, 0), "diez")
def build_diez(self, things):
return RushStrategy(things, (25, 0), "once")
def build_once(self, things):
return RushStrategy(things, (13, 0), "goal")
def build_goal(self, things):
return RushStrategy(things, (2, 2), "siete")
class FuturologistExtermination(Player):
'''A player that stays still and shoots zombies.'''
futurologist = True
def next_step(self, things, t=None):
result = None
if S.played == 0:
# lead a change in strategy
S.tick += 1
if S.next_strategy is not None:
S.strategy = getattr(self, 'build_' + S.next_strategy)(things)
S.strategy_name = S.next_strategy
S.next_strategy = None
S.played += 1
if S.played == len([x for x in things.values() if isinstance(x, Player)]):
S.played = 0
result = S.strategy.get_next_move(self, things)
self.status = S.strategy_name + "|"+ str(S.tick) + "|" + str(result)
return result
def build_first_strategy(self, things):
return RushStrategy(things, (7, 12), "dos", 50)
def build_dos(self, things):
return RushStrategy(things, (2, 8), "tres", 50)
def build_tres(self, things):
return RushStrategy(things, (27, 13), "cuatro", 50)
def build_cuatro(self, things):
return RushStrategy(things, (27, 1), "first_strategy", 50)
class FuturologistEvacuation(Player):
'''A player that stays still and shoots zombies.'''
futurologist = True
def next_step(self, things, t=None):
result = None
if S.played == 0:
# lead a change in strategy
S.tick += 1
if S.next_strategy is not None:
S.strategy = getattr(self, 'build_' + S.next_strategy)(things)
S.strategy_name = S.next_strategy
S.next_strategy = None
S.played += 1
if S.played == len([x for x in things.values() if isinstance(x, Player)]):
S.played = 0
result = S.strategy.get_next_move(self, things)
self.status = S.strategy_name + "|"+ str(S.tick) + "|" + str(result)
return result
def build_first_strategy(self, things):
return RushStrategy(things, [(4, 5), (9, 12), (6, 15), (15, 11), (22, 7), (44, 2), (43, 19), (56, 9), (89, 7), (85, 13)], "dos", 50)
class RushStrategy(object):
def __init__(self, things, goal, next_strategy, wait=20, timeout=250):
self.goal = goal
self.next_strategy = next_strategy
self.map = GoalDistanceMap(goal, things)
self.wait = wait
self.start_t = None
self.timeout = timeout
def get_next_move(self, player, things):
if self.start_t is None:
self.start_t = S.tick
result = None
g = self.map
current = g[player.position]
winner = None
if player.life < 70 and random.random() < 0.3:
result = ('heal', player)
#elif player.life < 40:
# result = ('heal', player)
else:
#print "evaluating", self, self.position
moves = utils.possible_moves(player, things)
random.shuffle(moves)
for pos in moves:
#print pos, g[pos], current
if g[pos] < current:
winner = pos
if winner:
result = ('move', winner)
else:
target = closest(player, [x for x in things.values() if isinstance(x, Zombie)])
if target is not None:
if utils.distance(target, player) <= player.weapon.max_range:
result = ('attack', target)
# if result is None:
# if random.random() < 0.25:
# moves = utils.possible_moves(self, things)
# if moves:
# pos = random.choice(moves)
# result = ('move', pos)
if result is None:
result = ('heal', player)
if result[0] in ('attack', 'move'):
S.last_action = S.tick
if S.tick - S.last_action > self.wait:
S.next_strategy = self.next_strategy
S.last_action = S.tick
if S.tick - self.start_t > self.timeout:
S.next_strategy = self.next_strategy
S.last_action = S.tick
return result
class RushRushStrategy(object):
def __init__(self, goal, wait=10, timeout=100):
self.goal = goal
self.map = None
self.wait = wait
self.timeout = timeout
self.start_tick = None
def get_next_move(self, player, things):
if self.start_tick is None:
self.start_tick = S.tick
if self.map is None:
self.map = GoalDistanceMap(self.goal, things)
result = None
done = False
g = self.map
current = g[player.position]
winner = None
if player.life < 70 and random.random() < 0.3:
result = ('heal', player)
elif player.life < 50:
result = ('heal', player)
else:
#print "evaluating", self, self.position
moves = utils.possible_moves(player, things)
random.shuffle(moves)
for pos in moves:
#print pos, g[pos], current
if g[pos] < current:
winner = pos
if winner:
result = ('move', winner)
else:
target = closest(player, [x for x in things.values() if isinstance(x, Zombie)])
if target is not None:
if utils.distance(target, player) <= player.weapon.max_range:
result = ('attack', target)
# target = closest(player, [x for x in things.values() if isinstance(x, Zombie)])
# if target is not None:
# if utils.distance(target, player) <= 1.5:
# result = ('attack', target)
# if result is not None:
# moves = utils.possible_moves(player, things)
# random.shuffle(moves)
# for pos in moves:
# #print pos, g[pos], current
# if g[pos] < current:
# winner = pos
# if winner:
# result = ('move', winner)
# else:
# target = closest(player, [x for x in things.values() if isinstance(x, Zombie)])
# if target is not None:
# if utils.distance(target, player) <= player.weapon.max_range:
# result = ('attack', target)
# if result is None:
# if random.random() < 0.25:
# moves = utils.possible_moves(self, things)
# if moves:
# pos = random.choice(moves)
# result = ('move', pos)
if result is None:
result = ('heal', player)
if result[0] in ('attack', 'move'):
S.last_action = S.tick
if S.tick - S.last_action > self.wait:
done = True
S.last_action = S.tick
if S.tick - self.start_tick > self.timeout:
done = True
return result, done, "Rush(%s)" % (self.goal,)
class DestroyThingStrategy(object):
def __init__(self, location):
self.location = location
def get_next_move(self, player, things):
result = None
done = False
if player.life < 70 and random.random() < 0.3:
result = ('heal', player)
else:
target = closest(player, [x for x in things.values() if isinstance(x, Zombie)])
if target is not None:
if utils.distance(target, player) <= player.weapon.max_range:
result = ('attack', target)
done = False
if not self.location in things:
done = True
else:
if result is None:
result = ('attack', things[self.location])
return result, done, "Destroy(%s)" % (self.location,)
class WaitStrategy(object):
def __init__(self):
pass
def get_next_move(self, player, things):
result = None
done = False
if player.life < 70 and random.random() < 0.3:
result = ('heal', player)
else:
target = closest(player, [x for x in things.values() if isinstance(x, Zombie)])
if target is not None:
if utils.distance(target, player) <= player.weapon.max_range:
result = ('attack', target)
if result is None:
result = ('heal', player)
return result, False, "Waiting"
class ComposerStrategy(object):
def __init__(self, *strategies):
self.strategies = strategies
self.ptr = 0
self.done = set()
self.going_for_win = None
self.win_strategy = None
def get_next_move(self, player, things):
if self.going_for_win is not None:
if self.win_strategy is None:
self.win_strategy = RushRushStrategy(self.going_for_win)
s = self.win_strategy
else:
s = self.strategies[(self.ptr%len(self.strategies))]
action, done, status = s.get_next_move(player, things)
if done:
done = False
self.done.add(player.name)
if len(self.done) >= len([x for x in things.values() if isinstance(x, Player)]):
self.ptr += 1
self.done = set()
self.going_for_win = self.is_winnable(things)
if self.ptr >= len(self.strategies):
done = True
#print self.done, player, s
#raw_input()
return action, done, ("{%s/%s}" % (self.ptr, len(self.strategies))) +status
def is_winnable(self, things):
def adyacent(one, two):
ps = utils.adyacent_positions(one)
if two.position in ps:
return True
return False
players = [x for x in things.values() if isinstance(x, Player)]
for player in players:
close = [player]
for other in players:
if player == other:
continue
for test in close:
if adyacent(test, other):
close.append(other)
if len(close) > 2:
return close[0].position
class PlayerSpecificStrategies(object):
def __init__(self, default_stategy, wait_strategy, selector, **kwargs):
self.default = default_stategy
self.wait = wait_strategy
self.selector = selector
self.specific = kwargs
self.players = {}
self.done = set()
def get_next_move(self, player, things):
if player.name in self.players:
strat = self.players[player.name]
else:
pk = self.selector.key_for(player)
strat = self.specific.get(pk, None)
if strat is None:
self.done.add(player.name)
strat = self.default
self.players[player.name] = strat
move, done, status = strat.get_next_move(player, things)
live_names = set([x.name for x in things.values() if isinstance(x, Player)])
if done:
done = False
self.done.add(player.name)
self.players[player.name] = self.wait
if live_names.issubset(self.done):
done = True
return move, done, "[%s/%s] " % (len(self.done), len(live_names)) + status
class MapReader(object):
def __init__(self, filename):
self.lines = [l for l in codecs.open(filename, "r", "utf-8")]
def __getitem__(self, item):
x, y = item
try:
return self.lines[y][x]
except IndexError:
return None
def key_for(self, player):
return self[player.position]
class FuturologistEvacuation(Player):
'''A player that stays still and shoots zombies.'''
futurologist = True
def next_step(self, things, t=None):
result = None
if S.played == 0:
# lead a change in strategy
S.tick += 1
if S.next_strategy is not None:
S.strategy = getattr(self, 'build_' + S.next_strategy)(things)
S.strategy_name = S.next_strategy
S.next_strategy = None
S.played += 1
if S.played == len([x for x in things.values() if isinstance(x, Player)]):
S.played = 0
result, done, status = S.strategy.get_next_move(self, things)
pos_code = MapReader('players/evacuation.map')[self.position ]
if not 'a' <= pos_code <= 'z':
pos_code = ""
self.status = "p" + str(self.position) + pos_code \
+ "|" +str(status) + "|" + str(result)
return result
def build_first_strategy(self, things):
return self._strategy
_strategy = ComposerStrategy(
RushRushStrategy([
(4, 5), (9, 12), (9, 14), (15, 11), (22, 7),
(44, 2), (43, 19), (62, 9), (89, 7), (85, 13)
], 5),
# H to I
# PlayerSpecificStrategies(
# WaitStrategy(), WaitStrategy(),
# MapReader('players/evacuation.map'),
# h=RushRushStrategy((62, 11))
# ),
# PlayerSpecificStrategies(
# WaitStrategy(), WaitStrategy(),
# MapReader('players/evacuation.map'),
# h=RushRushStrategy((56, 9))
# ),
# PlayerSpecificStrategies(
# WaitStrategy(), WaitStrategy(),
# MapReader('players/evacuation.map'),
# h=DestroyThingStrategy((56, 8)),
# ),
# PlayerSpecificStrategies(
# WaitStrategy(), WaitStrategy(),
# MapReader('players/evacuation.map'),
# h=RushRushStrategy((56, 8))
# ),
# PlayerSpecificStrategies(
# WaitStrategy(), WaitStrategy(),
# MapReader('players/evacuation.map'),
# h=RushRushStrategy((44, 2))
# ),
# I to J
PlayerSpecificStrategies(
WaitStrategy(), WaitStrategy(),
MapReader('players/evacuation.map'),
i=DestroyThingStrategy((43, 2)),
),
PlayerSpecificStrategies(
WaitStrategy(), WaitStrategy(),
MapReader('players/evacuation.map'),
i=DestroyThingStrategy((42, 2)),
),
PlayerSpecificStrategies(
WaitStrategy(), WaitStrategy(),
MapReader('players/evacuation.map'),
i=RushRushStrategy((22, 7))
),
# J to A
PlayerSpecificStrategies(
WaitStrategy(), WaitStrategy(),
MapReader('players/evacuation.map'),
j=DestroyThingStrategy((21, 7)),
),
PlayerSpecificStrategies(
WaitStrategy(), WaitStrategy(),
MapReader('players/evacuation.map'),
j=RushRushStrategy((4, 5))
),
# A to B
PlayerSpecificStrategies(
WaitStrategy(), WaitStrategy(),
MapReader('players/evacuation.map'),
a=RushRushStrategy((9, 12))
),
PlayerSpecificStrategies(
WaitStrategy(), WaitStrategy(),
MapReader('players/evacuation.map'),
b=DestroyThingStrategy((9, 13)),
),
PlayerSpecificStrategies(
WaitStrategy(), WaitStrategy(),
MapReader('players/evacuation.map'),
b=RushRushStrategy((9, 13))
),
PlayerSpecificStrategies(
WaitStrategy(), WaitStrategy(),
MapReader('players/evacuation.map'),
b=RushRushStrategy((15, 11))
),
PlayerSpecificStrategies(
WaitStrategy(), WaitStrategy(),
MapReader('players/evacuation.map'),
c=RushRushStrategy((44, 20))
),
PlayerSpecificStrategies(
WaitStrategy(), WaitStrategy(),
MapReader('players/evacuation.map'),
e=RushRushStrategy((92, 10))
),
PlayerSpecificStrategies(
WaitStrategy(), WaitStrategy(),
MapReader('players/evacuation.map'),
e=DestroyThingStrategy((92, 11))
),
PlayerSpecificStrategies(
WaitStrategy(), WaitStrategy(),
MapReader('players/evacuation.map'),
e=RushRushStrategy((93, 13)),
),
PlayerSpecificStrategies(
WaitStrategy(), WaitStrategy(),
MapReader('players/evacuation.map'),
d=RushRushStrategy((88, 15)),
),
PlayerSpecificStrategies(
WaitStrategy(), WaitStrategy(),
MapReader('players/evacuation.map'),
f=RushRushStrategy((78, 15)),
),
PlayerSpecificStrategies(
WaitStrategy(), WaitStrategy(),
MapReader('players/evacuation.map'),
h=RushRushStrategy((82, 15)),
),
# PlayerSpecificStrategies(
# WaitStrategy(), WaitStrategy(),
# MapReader('players/evacuation.map'),
# d=DestroyThingStrategy((79, 12))
# ),
PlayerSpecificStrategies(
WaitStrategy(), WaitStrategy(),
MapReader('players/evacuation.map'),
e=RushRushStrategy((56, 11)),
d=RushRushStrategy((56, 11))
),
RushRushStrategy((44, 20)),
)
def create(rules, objetives=None):
random.shuffle(names)
name = names.pop()
S.number_players += 1
if rules == "safehouse":
return FuturologistSafehouse(name, 'yellow', weapon=Rifle())
if rules == "extermination":
return FuturologistExtermination(name, 'yellow', weapon=Rifle())
if rules == "evacuation":
return FuturologistEvacuation(name, 'yellow', weapon=Shotgun())
| [
"fisadev@gmail.com"
] | fisadev@gmail.com |
d3c0d776930b5de95f6b211fc5f65d4c139333b5 | ce083128fa87ca86c65059893aa8882d088461f5 | /python/flask-mail-labs/.venv/lib/python2.7/site-packages/decorator.py | 7c6fcfe3b03a174786c79279c6888e1c531cd2e8 | [] | no_license | marcosptf/fedora | 581a446e7f81d8ae9a260eafb92814bc486ee077 | 359db63ff1fa79696b7bc803bcfa0042bff8ab44 | refs/heads/master | 2023-04-06T14:53:40.378260 | 2023-03-26T00:47:52 | 2023-03-26T00:47:52 | 26,059,824 | 6 | 5 | null | 2022-12-08T00:43:21 | 2014-11-01T18:48:56 | null | UTF-8 | Python | false | false | 16,194 | py | # ######################### LICENSE ############################ #
# Copyright (c) 2005-2016, Michele Simionato
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in bytecode form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""
Decorator module, see http://pypi.python.org/pypi/decorator
for the documentation.
"""
from __future__ import print_function
import re
import sys
import inspect
import operator
import itertools
import collections
__version__ = '4.0.10'
if sys.version >= '3':
from inspect import getfullargspec
def get_init(cls):
return cls.__init__
else:
class getfullargspec(object):
"A quick and dirty replacement for getfullargspec for Python 2.X"
def __init__(self, f):
self.args, self.varargs, self.varkw, self.defaults = \
inspect.getargspec(f)
self.kwonlyargs = []
self.kwonlydefaults = None
def __iter__(self):
yield self.args
yield self.varargs
yield self.varkw
yield self.defaults
getargspec = inspect.getargspec
def get_init(cls):
return cls.__init__.__func__
# getargspec has been deprecated in Python 3.5
ArgSpec = collections.namedtuple(
'ArgSpec', 'args varargs varkw defaults')
def getargspec(f):
"""A replacement for inspect.getargspec"""
spec = getfullargspec(f)
return ArgSpec(spec.args, spec.varargs, spec.varkw, spec.defaults)
DEF = re.compile('\s*def\s*([_\w][_\w\d]*)\s*\(')
# basic functionality
class FunctionMaker(object):
"""
An object with the ability to create functions with a given signature.
It has attributes name, doc, module, signature, defaults, dict and
methods update and make.
"""
# Atomic get-and-increment provided by the GIL
_compile_count = itertools.count()
def __init__(self, func=None, name=None, signature=None,
defaults=None, doc=None, module=None, funcdict=None):
self.shortsignature = signature
if func:
# func can be a class or a callable, but not an instance method
self.name = func.__name__
if self.name == '<lambda>': # small hack for lambda functions
self.name = '_lambda_'
self.doc = func.__doc__
self.module = func.__module__
if inspect.isfunction(func):
argspec = getfullargspec(func)
self.annotations = getattr(func, '__annotations__', {})
for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
'kwonlydefaults'):
setattr(self, a, getattr(argspec, a))
for i, arg in enumerate(self.args):
setattr(self, 'arg%d' % i, arg)
if sys.version < '3': # easy way
self.shortsignature = self.signature = (
inspect.formatargspec(
formatvalue=lambda val: "", *argspec)[1:-1])
else: # Python 3 way
allargs = list(self.args)
allshortargs = list(self.args)
if self.varargs:
allargs.append('*' + self.varargs)
allshortargs.append('*' + self.varargs)
elif self.kwonlyargs:
allargs.append('*') # single star syntax
for a in self.kwonlyargs:
allargs.append('%s=None' % a)
allshortargs.append('%s=%s' % (a, a))
if self.varkw:
allargs.append('**' + self.varkw)
allshortargs.append('**' + self.varkw)
self.signature = ', '.join(allargs)
self.shortsignature = ', '.join(allshortargs)
self.dict = func.__dict__.copy()
# func=None happens when decorating a caller
if name:
self.name = name
if signature is not None:
self.signature = signature
if defaults:
self.defaults = defaults
if doc:
self.doc = doc
if module:
self.module = module
if funcdict:
self.dict = funcdict
# check existence required attributes
assert hasattr(self, 'name')
if not hasattr(self, 'signature'):
raise TypeError('You are decorating a non function: %s' % func)
def update(self, func, **kw):
"Update the signature of func with the data in self"
func.__name__ = self.name
func.__doc__ = getattr(self, 'doc', None)
func.__dict__ = getattr(self, 'dict', {})
func.__defaults__ = getattr(self, 'defaults', ())
func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None)
func.__annotations__ = getattr(self, 'annotations', None)
try:
frame = sys._getframe(3)
except AttributeError: # for IronPython and similar implementations
callermodule = '?'
else:
callermodule = frame.f_globals.get('__name__', '?')
func.__module__ = getattr(self, 'module', callermodule)
func.__dict__.update(kw)
def make(self, src_templ, evaldict=None, addsource=False, **attrs):
"Make a new function from a given template and update the signature"
src = src_templ % vars(self) # expand name and signature
evaldict = evaldict or {}
mo = DEF.match(src)
if mo is None:
raise SyntaxError('not a valid function template\n%s' % src)
name = mo.group(1) # extract the function name
names = set([name] + [arg.strip(' *') for arg in
self.shortsignature.split(',')])
for n in names:
if n in ('_func_', '_call_'):
raise NameError('%s is overridden in\n%s' % (n, src))
if not src.endswith('\n'): # add a newline for old Pythons
src += '\n'
# Ensure each generated function has a unique filename for profilers
# (such as cProfile) that depend on the tuple of (<filename>,
# <definition line>, <function name>) being unique.
filename = '<decorator-gen-%d>' % (next(self._compile_count),)
try:
code = compile(src, filename, 'single')
exec(code, evaldict)
except:
print('Error in generated code:', file=sys.stderr)
print(src, file=sys.stderr)
raise
func = evaldict[name]
if addsource:
attrs['__source__'] = src
self.update(func, **attrs)
return func
@classmethod
def create(cls, obj, body, evaldict, defaults=None,
doc=None, module=None, addsource=True, **attrs):
"""
Create a function from the strings name, signature and body.
evaldict is the evaluation dictionary. If addsource is true an
attribute __source__ is added to the result. The attributes attrs
are added, if any.
"""
if isinstance(obj, str): # "name(signature)"
name, rest = obj.strip().split('(', 1)
signature = rest[:-1] # strip a right parens
func = None
else: # a function
name = None
signature = None
func = obj
self = cls(func, name, signature, defaults, doc, module)
ibody = '\n'.join(' ' + line for line in body.splitlines())
return self.make('def %(name)s(%(signature)s):\n' + ibody,
evaldict, addsource, **attrs)
def decorate(func, caller):
"""
decorate(func, caller) decorates a function using a caller.
"""
evaldict = dict(_call_=caller, _func_=func)
fun = FunctionMaker.create(
func, "return _call_(_func_, %(shortsignature)s)",
evaldict, __wrapped__=func)
if hasattr(func, '__qualname__'):
fun.__qualname__ = func.__qualname__
return fun
def decorator(caller, _func=None):
"""decorator(caller) converts a caller function into a decorator"""
if _func is not None: # return a decorated function
# this is obsolete behavior; you should use decorate instead
return decorate(_func, caller)
# else return a decorator function
if inspect.isclass(caller):
name = caller.__name__.lower()
doc = 'decorator(%s) converts functions/generators into ' \
'factories of %s objects' % (caller.__name__, caller.__name__)
elif inspect.isfunction(caller):
if caller.__name__ == '<lambda>':
name = '_lambda_'
else:
name = caller.__name__
doc = caller.__doc__
else: # assume caller is an object with a __call__ method
name = caller.__class__.__name__.lower()
doc = caller.__call__.__doc__
evaldict = dict(_call_=caller, _decorate_=decorate)
return FunctionMaker.create(
'%s(func)' % name, 'return _decorate_(func, _call_)',
evaldict, doc=doc, module=caller.__module__,
__wrapped__=caller)
# ####################### contextmanager ####################### #
try: # Python >= 3.2
from contextlib import _GeneratorContextManager
except ImportError: # Python >= 2.5
from contextlib import GeneratorContextManager as _GeneratorContextManager
class ContextManager(_GeneratorContextManager):
def __call__(self, func):
"""Context manager decorator"""
return FunctionMaker.create(
func, "with _self_: return _func_(%(shortsignature)s)",
dict(_self_=self, _func_=func), __wrapped__=func)
init = getfullargspec(_GeneratorContextManager.__init__)
n_args = len(init.args)
if n_args == 2 and not init.varargs: # (self, genobj) Python 2.7
def __init__(self, g, *a, **k):
return _GeneratorContextManager.__init__(self, g(*a, **k))
ContextManager.__init__ = __init__
elif n_args == 2 and init.varargs: # (self, gen, *a, **k) Python 3.4
pass
elif n_args == 4: # (self, gen, args, kwds) Python 3.5
def __init__(self, g, *a, **k):
return _GeneratorContextManager.__init__(self, g, a, k)
ContextManager.__init__ = __init__
contextmanager = decorator(ContextManager)
# ############################ dispatch_on ############################ #
def append(a, vancestors):
"""
Append ``a`` to the list of the virtual ancestors, unless it is already
included.
"""
add = True
for j, va in enumerate(vancestors):
if issubclass(va, a):
add = False
break
if issubclass(a, va):
vancestors[j] = a
add = False
if add:
vancestors.append(a)
# inspired from simplegeneric by P.J. Eby and functools.singledispatch
def dispatch_on(*dispatch_args):
"""
Factory of decorators turning a function into a generic function
dispatching on the given arguments.
"""
assert dispatch_args, 'No dispatch args passed'
dispatch_str = '(%s,)' % ', '.join(dispatch_args)
def check(arguments, wrong=operator.ne, msg=''):
"""Make sure one passes the expected number of arguments"""
if wrong(len(arguments), len(dispatch_args)):
raise TypeError('Expected %d arguments, got %d%s' %
(len(dispatch_args), len(arguments), msg))
def gen_func_dec(func):
"""Decorator turning a function into a generic function"""
# first check the dispatch arguments
argset = set(getfullargspec(func).args)
if not set(dispatch_args) <= argset:
raise NameError('Unknown dispatch arguments %s' % dispatch_str)
typemap = {}
def vancestors(*types):
"""
Get a list of sets of virtual ancestors for the given types
"""
check(types)
ras = [[] for _ in range(len(dispatch_args))]
for types_ in typemap:
for t, type_, ra in zip(types, types_, ras):
if issubclass(t, type_) and type_ not in t.__mro__:
append(type_, ra)
return [set(ra) for ra in ras]
def ancestors(*types):
"""
Get a list of virtual MROs, one for each type
"""
check(types)
lists = []
for t, vas in zip(types, vancestors(*types)):
n_vas = len(vas)
if n_vas > 1:
raise RuntimeError(
'Ambiguous dispatch for %s: %s' % (t, vas))
elif n_vas == 1:
va, = vas
mro = type('t', (t, va), {}).__mro__[1:]
else:
mro = t.__mro__
lists.append(mro[:-1]) # discard t and object
return lists
def register(*types):
"""
Decorator to register an implementation for the given types
"""
check(types)
def dec(f):
check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__)
typemap[types] = f
return f
return dec
def dispatch_info(*types):
"""
An utility to introspect the dispatch algorithm
"""
check(types)
lst = []
for anc in itertools.product(*ancestors(*types)):
lst.append(tuple(a.__name__ for a in anc))
return lst
def _dispatch(dispatch_args, *args, **kw):
types = tuple(type(arg) for arg in dispatch_args)
try: # fast path
f = typemap[types]
except KeyError:
pass
else:
return f(*args, **kw)
combinations = itertools.product(*ancestors(*types))
next(combinations) # the first one has been already tried
for types_ in combinations:
f = typemap.get(types_)
if f is not None:
return f(*args, **kw)
# else call the default implementation
return func(*args, **kw)
return FunctionMaker.create(
func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str,
dict(_f_=_dispatch), register=register, default=func,
typemap=typemap, vancestors=vancestors, ancestors=ancestors,
dispatch_info=dispatch_info, __wrapped__=func)
gen_func_dec.__name__ = 'dispatch_on' + dispatch_str
return gen_func_dec
| [
"marcosptf@yahoo.com.br"
] | marcosptf@yahoo.com.br |
c32cacad1c1141e9755e500443ac092c49f4da39 | ece5f5355fd3c76af49e4912ceffade563617dae | /src/scripts/examples/extract_data.py | ce17dac6c330a9725fab5b778709fe6ad52497a9 | [
"MIT"
] | permissive | stevenchen0x01/binwalk | 5f1f3d79a0427e70858c8454f60fd46d5a82dbd1 | 023a25e1222cd4209d120bd752aa5c55e621ed2a | refs/heads/master | 2021-01-19T16:41:47.397994 | 2017-08-21T17:45:43 | 2017-08-21T17:45:43 | 101,019,328 | 1 | 0 | null | 2017-08-22T03:57:23 | 2017-08-22T03:57:23 | null | UTF-8 | Python | false | false | 839 | py | #!/usr/bin/env python
import sys
import binwalk
# Extracts and logs
for module in binwalk.scan(*sys.argv[1:], signature=True, quiet=True, extract=True):
print ("%s Results:" % module.name)
for result in module.results:
if module.extractor.output.has_key(result.file.path):
if module.extractor.output[result.file.path].extracted.has_key(result.offset):
print (
"Extracted '%s' at offset 0x%X from '%s' to '%s'" % (result.description.split(',')[0],
result.offset,
result.file.path,
str(module.extractor.output[result.file.path].extracted[result.offset])))
| [
"heffnercj@gmail.com"
] | heffnercj@gmail.com |
4ff33f7a4c541df8aeed895cdff5400708508d53 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/cv/image_classification/Hourglass_ID1809_for_PyTorch/datat/MPII/dp.py | bd677276e15f1823aa307654bd484b272a28ce4a | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 8,833 | py | #
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
import cv2
import sys
import os
import torch
import numpy as np
import torch.utils.data
import utils.img
import torch.npu
import os
NPU_CALCULATE_DEVICE = 0
if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
if torch.npu.current_device() != NPU_CALCULATE_DEVICE:
torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')
class GenerateHeatmap():
def __init__(self, output_res, num_parts):
self.output_res = output_res
self.num_parts = num_parts
sigma = self.output_res/64
self.sigma = sigma
size = 6*sigma + 3
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0, y0 = 3*sigma + 1, 3*sigma + 1
self.g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
def __call__(self, keypoints):
hms = np.zeros(shape = (self.num_parts, self.output_res, self.output_res), dtype = np.float32)
sigma = self.sigma
for p in keypoints:
for idx, pt in enumerate(p):
if pt[0] > 0:
x, y = int(pt[0]), int(pt[1])
if x<0 or y<0 or x>=self.output_res or y>=self.output_res:
continue
ul = int(x - 3*sigma - 1), int(y - 3*sigma - 1)
br = int(x + 3*sigma + 2), int(y + 3*sigma + 2)
c,d = max(0, -ul[0]), min(br[0], self.output_res) - ul[0]
a,b = max(0, -ul[1]), min(br[1], self.output_res) - ul[1]
cc,dd = max(0, ul[0]), min(br[0], self.output_res)
aa,bb = max(0, ul[1]), min(br[1], self.output_res)
hms[idx, aa:bb,cc:dd] = np.maximum(hms[idx, aa:bb,cc:dd], self.g[a:b,c:d])
return hms
class Dataset(torch.utils.data.Dataset):
def __init__(self, config, ds, index):
self.input_res = config['train']['input_res']
self.output_res = config['train']['output_res']
self.generateHeatmap = GenerateHeatmap(self.output_res, config['inference']['num_parts'])
self.ds = ds
self.index = index
def __len__(self):
return len(self.index)
def __getitem__(self, idx):
return self.loadImage(self.index[idx % len(self.index)])
def loadImage(self, idx):
ds = self.ds
## load + crop
orig_img = ds.get_img(idx)
path = ds.get_path(idx)
orig_keypoints = ds.get_kps(idx)
kptmp = orig_keypoints.copy()
c = ds.get_center(idx)
s = ds.get_scale(idx)
normalize = ds.get_normalized(idx)
cropped = utils.img.crop(orig_img, c, s, (self.input_res, self.input_res))
for i in range(np.shape(orig_keypoints)[1]):
if orig_keypoints[0,i,0] > 0:
orig_keypoints[0,i,:2] = utils.img.transform(orig_keypoints[0,i,:2], c, s, (self.input_res, self.input_res))
keypoints = np.copy(orig_keypoints)
## augmentation -- to be done to cropped image
height, width = cropped.shape[0:2]
center = np.array((width/2, height/2))
scale = max(height, width)/200
aug_rot=0
aug_rot = (np.random.random() * 2 - 1) * 30.
aug_scale = np.random.random() * (1.25 - 0.75) + 0.75
scale *= aug_scale
mat_mask = utils.img.get_transform(center, scale, (self.output_res, self.output_res), aug_rot)[:2]
mat = utils.img.get_transform(center, scale, (self.input_res, self.input_res), aug_rot)[:2]
inp = cv2.warpAffine(cropped, mat, (self.input_res, self.input_res)).astype(np.float32)/255
keypoints[:,:,0:2] = utils.img.kpt_affine(keypoints[:,:,0:2], mat_mask)
if np.random.randint(2) == 0:
inp = self.preprocess(inp)
inp = inp[:, ::-1]
keypoints = keypoints[:, ds.flipped_parts['mpii']]
keypoints[:, :, 0] = self.output_res - keypoints[:, :, 0]
orig_keypoints = orig_keypoints[:, ds.flipped_parts['mpii']]
orig_keypoints[:, :, 0] = self.input_res - orig_keypoints[:, :, 0]
## set keypoints to 0 when were not visible initially (so heatmap all 0s)
for i in range(np.shape(orig_keypoints)[1]):
if kptmp[0,i,0] == 0 and kptmp[0,i,1] == 0:
keypoints[0,i,0] = 0
keypoints[0,i,1] = 0
orig_keypoints[0,i,0] = 0
orig_keypoints[0,i,1] = 0
## generate heatmaps on outres
heatmaps = self.generateHeatmap(keypoints)
return inp.astype(np.float32), heatmaps.astype(np.float32)
def preprocess(self, data):
# random hue and saturation
data = cv2.cvtColor(data, cv2.COLOR_RGB2HSV);
delta = (np.random.random() * 2 - 1) * 0.2
data[:, :, 0] = np.mod(data[:,:,0] + (delta * 360 + 360.), 360.)
delta_sature = np.random.random() + 0.5
data[:, :, 1] *= delta_sature
data[:,:, 1] = np.maximum( np.minimum(data[:,:,1], 1), 0 )
data = cv2.cvtColor(data, cv2.COLOR_HSV2RGB)
# adjust brightness
delta = (np.random.random() * 2 - 1) * 0.3
data += delta
# adjust contrast
mean = data.mean(axis=2, keepdims=True)
data = (data - mean) * (np.random.random() + 0.5) + mean
data = np.minimum(np.maximum(data, 0), 1)
return data
def init(config):
batchsize = config['train']['batchsize']
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(current_path)
import ref as ds
ds.init()
train, valid = ds.setup_val_split()
dataset = { key: Dataset(config, ds, data) for key, data in zip( ['train', 'valid'], [train, valid] ) }
use_data_loader = config['train']['use_data_loader']
loaders = {}
for key in dataset:
if config['opt'].ddp:
loaders_sampler = torch.utils.data.distributed.DistributedSampler(dataset[key])
loaders_batch_size = int(batchsize)
loaders[key] = torch.utils.data.DataLoader(dataset[key], batch_size=loaders_batch_size, shuffle=False, num_workers=config['train']['num_workers'], pin_memory=False, drop_last = True, sampler = loaders_sampler)
else:
loaders[key] = torch.utils.data.DataLoader(dataset[key], batch_size=batchsize, shuffle=True, num_workers=config['train']['num_workers'], pin_memory=False)
def gen(phase):
batchsize = config['train']['batchsize']
batchnum = config['train']['{}_iters'.format(phase)]
loader = loaders[phase].__iter__()
for i in range(batchnum):
try:
imgs, heatmaps = next(loader)
except StopIteration:
# to avoid no data provided by dataloader
loader = loaders[phase].__iter__()
imgs, heatmaps = next(loader)
yield {
'imgs': imgs, #cropped and augmented
'heatmaps': heatmaps, #based on keypoints. 0 if not in img for joint
}
return lambda key: gen(key)
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
569e248140e15b1b5e02e5607a39007906d082fc | ea459bc6571b254f04fedb9262e297038773afe2 | /111_776A.py | b8d01b8351f7bc60fd6f5abd57c604e23a3162c4 | [] | no_license | ywtail/codeforces | 47da2564858e0c906aa715b3b8b76e6d41b76dd8 | 5c000124ff5ef1172494bc5c5dc252bcf8515ce1 | refs/heads/master | 2020-12-24T08:00:47.738455 | 2018-04-21T15:27:48 | 2018-04-21T15:27:48 | 59,407,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | # coding=utf-8
# 776A. A Serial Killer
a,b=raw_input().split()
n=int(raw_input())
print a,b
for i in range(n):
temp=raw_input().split()
if a==temp[0]:
a=temp[1]
if b==temp[0]:
b=temp[1]
print a,b
'''
input
ross rachel
4
ross joey
rachel phoebe
phoebe monica
monica chandler
output
ross rachel
joey rachel
joey phoebe
joey monica
joey chandler
input
icm codeforces
1
codeforces technex
output
icm codeforces
icm technex
题目地址:http://codeforces.com/problemset/problem/776/A
题目大意:第一行初始值,从第三行开始,将空格前面的字符串替换为后面的。
之前用replace做的,报错了。原因:假设现在有abk k,要将k替换成a,就会变成aba k,而不是替换单个字符k。
''' | [
"ywtail@gmail.com"
] | ywtail@gmail.com |
558a28d7a353b44934bab408ca1769ee54d76a03 | fb63d298e6e765b42cb9544695a69bd0c8cb544a | /app.py | e7affb6d0596256387ad24f22867591dbe0bbee0 | [
"MIT"
] | permissive | hirajanwin/Single-Page-Django-App | b33f1dfc6dd8048481577b9588908488de84873c | fe02a59784908161103b1ec8f6d0073c02f1d88f | refs/heads/master | 2022-12-30T23:17:35.517498 | 2020-10-20T22:36:06 | 2020-10-20T22:36:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,152 | py | import sys
from django.conf import settings
from django.core.wsgi import get_wsgi_application
from django.http import HttpResponse
from django.urls import path
# https://docs.djangoproject.com/en/dev/topics/settings/#using-settings-without-setting-django-settings-module
settings.configure(
DEBUG=True,
SECRET_KEY = 'w^h13cf0p8fl^98raarj#-u$c6e!)l@1rl!+9j^a%rrb*8xpe3',
ALLOWED_HOSTS=['*'],
ROOT_URLCONF=__name__,
)
def home_view(request, *args, **kwargs):
return HttpResponse("<h1>Hello World</h1>")
def about_view(request, *args, **kwargs):
return HttpResponse("<h1>About World</h1>")
urlpatterns = [
path("", home_view),
path("about/", about_view)
]
application = get_wsgi_application()
if __name__ == "__main__":
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv) | [
"hello@teamcfe.com"
] | hello@teamcfe.com |
3219750783f2185ca332af5be1323d8447f7ac45 | 1c873d4edf9e6d2239fc834c8cb3cf692da45110 | /leibniz/nn/net/unet.py | da66997b4c40efd1c41b5bfaa8bcfba8d30cbb18 | [
"MIT"
] | permissive | fangzuliang/leibniz | 3815e572c59d1a927bd82eeb2a1ba7dc9d2011fd | 54faae33fd21a142d0daca6d153c66c11ac87eaa | refs/heads/master | 2023-03-27T13:45:03.203623 | 2021-03-25T07:11:58 | 2021-03-25T07:11:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,442 | py | # -*- coding: utf-8 -*-
import logging
import numpy as np
import torch as th
import torch.nn as nn
from leibniz.nn.conv import DepthwiseSeparableConv1d, DepthwiseSeparableConv2d, DepthwiseSeparableConv3d
from leibniz.nn.layer.cbam import CBAM
from leibniz.nn.net.hyptube import HypTube
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class Enconv(nn.Module):
def __init__(self, in_channels, out_channels, size=(256, 256), conv=nn.Conv2d, padding=None):
super(Enconv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.size = size
if len(size) == 1:
self.scale = nn.Upsample(size=tuple(size), mode='linear')
elif len(size) == 2:
self.scale = nn.Upsample(size=tuple(size), mode='bilinear')
elif len(size) == 3:
self.scale = nn.Upsample(size=tuple(size), mode='trilinear')
self.padding = padding
if padding is not None:
self.conv = conv(in_channels, out_channels, kernel_size=3, stride=1, padding=0, dilation=1, groups=1)
else:
self.conv = conv(in_channels, out_channels, kernel_size=3, stride=1, padding=1, dilation=1, groups=1)
def forward(self, x):
ratio = (np.array(x.size())[-len(self.size):].prod()) / (np.array(self.size).prod())
if ratio < 1.0:
x = self.scale(x)
if self.padding is not None:
x = self.padding(x)
x = self.conv(x)
else:
if self.padding is not None:
x = self.padding(x)
x = self.conv(x)
x = self.scale(x)
return x
class Deconv(nn.Module):
def __init__(self, in_channels, out_channels, size=(256,256), conv=nn.Conv2d, padding=None):
super(Deconv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.size = size
if len(size) == 1:
self.scale = nn.Upsample(size=tuple(size), mode='linear')
elif len(size) == 2:
self.scale = nn.Upsample(size=tuple(size), mode='bilinear')
elif len(size) == 3:
self.scale = nn.Upsample(size=tuple(size), mode='trilinear')
self.padding = padding
if padding is not None:
self.conv = conv(in_channels, out_channels, kernel_size=3, stride=1, padding=0, dilation=1, groups=1)
else:
self.conv = conv(in_channels, out_channels, kernel_size=3, stride=1, padding=1, dilation=1, groups=1)
def forward(self, x):
ratio = (np.array(x.size())[-len(self.size):].prod()) / (np.array(self.size).prod())
if ratio < 1.0:
x = self.scale(x)
if self.padding is not None:
x = self.padding(x)
x = self.conv(x)
else:
if self.padding is not None:
x = self.padding(x)
x = self.conv(x)
x = self.scale(x)
return x
class Transform(nn.Module):
def __init__(self, in_channels, out_channels, nblks=0, block=None, relu=None, conv=nn.Conv2d):
super(Transform, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
if relu is None:
relu = nn.ReLU(inplace=True)
self.blocks = None
if nblks > 0 and block is not None:
blocks = []
for i in range(nblks - 1):
blocks.append(block(self.out_channels, step=1.0 / nblks, relu=relu, conv=conv))
blocks.append(relu)
blocks.append(block(self.out_channels, step=1.0 / nblks, relu=relu, conv=conv))
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
if self.blocks is not None:
return self.blocks(x), x
else:
return x, x
class Block(nn.Module):
def __init__(self, transform, activation=True, dropout=False, relu=None, attn=CBAM, dim=2, normalizor='batch', conv=None):
super(Block, self).__init__()
self.activation = activation
self.dropout_flag = dropout > 0
self.blocks = None
self.transform = transform
self.attn = attn(transform.out_channels, conv=conv)
if self.activation:
if relu is not None:
self.lrelu = relu
else:
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
self.normalizor = None
if normalizor == 'batch':
if dim == 1:
self.normalizor = nn.BatchNorm1d(transform.out_channels, affine=True)
elif dim == 2:
self.normalizor = nn.BatchNorm2d(transform.out_channels, affine=True)
elif dim == 3:
self.normalizor = nn.BatchNorm3d(transform.out_channels, affine=True)
elif normalizor == 'instance':
if dim == 1:
self.normalizor = nn.InstanceNorm1d(transform.out_channels)
elif dim == 2:
self.normalizor = nn.InstanceNorm2d(transform.out_channels)
elif dim == 3:
self.normalizor = nn.InstanceNorm3d(transform.out_channels)
elif normalizor == 'layer':
self.normalizor = nn.LayerNorm(tuple([transform.out_channels]) + tuple(transform.size))
if self.dropout_flag:
if dim == 1:
self.drop = nn.Dropout(p=dropout)
elif dim == 2:
self.drop = nn.Dropout2d(p=dropout)
elif dim == 3:
self.drop = nn.Dropout3d(p=dropout)
def forward(self, *xs):
x = th.cat(xs, dim=1)
if self.activation:
x = self.lrelu(x)
x = self.transform(x)
if self.normalizor:
x = self.normalizor(x)
if self.dropout_flag:
x = self.drop(x)
x = self.attn(x)
return x
class UNet(nn.Module):
def __init__(self, in_channels, out_channels, block=None, attn=None, relu=None, layers=4, ratio=2, enhencer=None, ksize_in=7, dropout=0.1,
vblks=None, hblks=None, scales=None, factors=None, spatial=(256, 256), normalizor='batch', padding=None, final_normalized=True):
super().__init__()
extension = block.extension
lrd = block.least_required_dim
spatial = np.array(spatial, dtype=np.int)
dim = len(spatial)
self.dim = dim
Conv = self.get_conv_for_prepare()
TConv = self.get_conv_for_transform()
scales = np.array(scales)
if scales.shape[0] != layers:
raise ValueError('scales should have %d layers at dim 0!' % layers)
if len(scales.shape) == 1:
scales = scales.reshape([layers, 1])
if len(scales.shape) != 2:
raise ValueError('scales should have length 2 to be compatible with spatial dimensions!')
ratio = np.exp2(ratio)
factors = np.array(factors + [0.0])
scales = np.exp2(scales)
factors = np.exp2(factors)
num_filters = int(in_channels * ratio)
self.final_normalized = final_normalized
self.ratio = ratio
self.hblks = hblks
self.vblks = vblks
self.scales = scales
self.factors = factors
logger.info('---------------------------------------')
logger.info('ratio: %f', ratio)
logger.info('vblks: [%s]', ', '.join(map(str, vblks)))
logger.info('hblks: [%s]', ', '.join(map(str, hblks)))
logger.info('scales: [%s]', ', '.join(map(str, scales)))
logger.info('factors: [%s]', ', '.join(map(str, factors[0:4])))
logger.info('---------------------------------------')
self.exceeded = np.any(np.cumprod(scales, axis=0) * spatial < 1) or np.any((in_channels * ratio * np.cumprod(factors)) < lrd)
if not self.exceeded:
self.layers = layers
self.in_channels = in_channels
self.num_filters = num_filters
self.out_channels = out_channels
if enhencer is None:
if self.dim == 2:
enhencer = HypTube
self.enhencer_in = None
self.enhencer_out = None
self.enhencer_mid = None
if relu is None:
relu = nn.ReLU(inplace=True)
if attn is None:
attn = CBAM
ex = extension
c0 = int(ex * num_filters)
if padding:
self.conv_padding = 0
self.iconv = nn.Sequential(
padding,
Conv(in_channels, c0, kernel_size=ksize_in, padding=(ksize_in - 1) // 2, groups=1),
)
self.oconv = nn.Sequential(
padding,
Conv(c0, out_channels, kernel_size=3, padding=self.conv_padding, bias=False, groups=1),
)
else:
self.conv_padding = 1
self.iconv = Conv(in_channels, c0, kernel_size=5, padding=2, groups=1)
self.oconv = Conv(c0, out_channels, kernel_size=3, padding=self.conv_padding, bias=False, groups=1)
if final_normalized:
self.relu6 = nn.ReLU6()
self.enconvs = nn.ModuleList()
self.dnforms = nn.ModuleList()
self.hzforms = nn.ModuleList()
self.upforms = nn.ModuleList()
self.deconvs = nn.ModuleList()
self.spatial = [np.array(spatial, dtype=np.int)]
self.channel_sizes = [np.array(c0, dtype=np.int)]
for ix in range(layers):
least_factor = ex
scale, factor = scales[ix], factors[ix]
self.spatial.append(np.array(self.spatial[ix] * scale, dtype=np.int))
self.channel_sizes.append(np.array(self.channel_sizes[ix] * factor // least_factor * least_factor, dtype=np.int))
ci, co = self.channel_sizes[ix].item(), self.channel_sizes[ix + 1].item()
szi, szo = self.spatial[ix + 1], self.spatial[ix]
logger.info('%d - ci: %d, co: %d', ix, ci, co)
logger.info('%d - szi: [%s], szo: [%s]', ix, ', '.join(map(str, szi)), ', '.join(map(str, szo)))
self.exceeded = self.exceeded or ci < lrd or co < lrd or szi.min() < 1 or szo.min() < 1
if not self.exceeded:
try:
dropout_flag = (layers - ix) * 3 < layers
dropout = dropout if dropout_flag else -1
self.enconvs.append(Block(Enconv(ci, co, size=szi, conv=TConv, padding=padding), activation=True, dropout=dropout_flag, relu=relu, attn=attn, dim=self.dim, normalizor=normalizor, conv=TConv))
self.dnforms.append(Transform(co, co, nblks=vblks[ix], block=block, relu=relu, conv=TConv))
self.hzforms.append(Transform(co, co, nblks=hblks[ix], block=block, relu=relu, conv=TConv))
self.deconvs.append(Block(Deconv(co * 2, ci, size=szo, conv=TConv, padding=padding), activation=True, dropout=False, relu=relu, attn=attn, dim=self.dim, normalizor=normalizor, conv=TConv))
self.upforms.append(Transform(ci, ci, nblks=vblks[ix], block=block, relu=relu, conv=TConv))
except Exception as e:
logger.exception(e)
self.exceeded = True
else:
logger.error('scales are exceeded!')
raise ValueError('scales exceeded!')
if self.dim == 2 and enhencer is not None:
self.enhencer_in = enhencer(c0, c0 * 2, c0)
self.enhencer_out = enhencer(c0, c0 * 2, c0)
self.enhencer_mid = enhencer(co, (c0 + 1) // 2, co)
def get_conv_for_prepare(self):
if self.dim == 1:
conv = DepthwiseSeparableConv1d
elif self.dim == 2:
conv = DepthwiseSeparableConv2d
elif self.dim == 3:
conv = DepthwiseSeparableConv3d
else:
raise ValueError('dim %d is not supported!' % self.dim)
return conv
def get_conv_for_transform(self):
if self.dim == 1:
conv = DepthwiseSeparableConv1d
elif self.dim == 2:
conv = DepthwiseSeparableConv2d
elif self.dim == 3:
conv = DepthwiseSeparableConv3d
else:
raise ValueError('dim %d is not supported!' % self.dim)
return conv
def forward(self, x):
if self.exceeded:
raise ValueError('scales exceeded!')
dnt = self.iconv(x)
if self.enhencer_in is not None:
dnt = self.enhencer_in(dnt)
hzts = []
for ix in range(self.layers):
dnt, enc = self.dnforms[ix](self.enconvs[ix](dnt))
hzt, _ = self.hzforms[ix](enc)
hzts.append(hzt)
if self.enhencer_mid is None:
upt = dnt
else:
upt = self.enhencer_mid(dnt)
for ix in range(self.layers - 1, -1, -1):
hzt = hzts[ix]
upt, dec = self.upforms[ix](self.deconvs[ix](upt, hzt))
if self.enhencer_out is not None:
upt = self.enhencer_out(upt)
if self.final_normalized:
return self.relu6(self.oconv(upt)) / 6
else:
return self.oconv(upt)
| [
"mingli.yuan@gmail.com"
] | mingli.yuan@gmail.com |
b09646433f33ef6fe4a3098d1f3e25f092a646f7 | b4eef8c2e03378328293bc41303879db3050bc98 | /watsondt/cybox/test/objects/win_semaphore_test.py | c9f3dd5987d2976f29a9397b387b63101f889b76 | [] | no_license | luisgf/watsondt | 18682a28397b27eece5ce8913ca66bc37c92e555 | 6b3b2f76be23e6a054a0188a02a93a5207099e55 | refs/heads/master | 2021-01-10T07:37:31.010055 | 2016-02-24T22:01:29 | 2016-02-24T22:01:29 | 52,477,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | # Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from cybox.objects.win_semaphore_object import WinSemaphore
from cybox.test.objects import ObjectTestCase
class TestWinSemaphore(ObjectTestCase, unittest.TestCase):
object_type = "WindowsSemaphoreObjectType"
klass = WinSemaphore
_full_dict = {
'handle': {
'id': 1234,
'name': "MyHandle",
'type': "Window",
'object_address': 0xdeadbeef,
'access_mask': 0x70000000,
'pointer_count': 3,
'xsi:type': "WindowsHandleObjectType",
},
'security_attributes': "Attributes go here",
'named': False,
'current_count': 100,
'maximum_count': 250,
'name': "A Test",
'xsi:type': object_type
}
if __name__ == "__main__":
unittest.main()
| [
"luisgf@luisgf.es"
] | luisgf@luisgf.es |
3518ab5dd5c20a0d87bed2e714b16acc6cf9101f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03272/s172607501.py | c400b1024da0843303aefa8251692a0133b6431d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | list1 = [int(a) for a in input().split()]
list1_0 = list1[0] #N
list1_1 = list1[1] #i
t = (list1_0 - list1_1) + 1
print(t) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
59527ef34011ce59e50f0ca795f7ae991f6efd07 | 9b142372020cd0e456ba07a08ce23d2d93804bec | /new_practice/functional_programming/function_enumerate.py | 6ac290355cb1191dadfb90021ee5f3be13453e72 | [] | no_license | pavel-malin/new_practices | 2f287a3477cc1cb4c1d0d668903a8e036e383b66 | c078fbfac0212bc258550023cc71cb25f0e4f533 | refs/heads/master | 2020-08-01T07:14:55.594507 | 2019-10-25T10:19:09 | 2019-10-25T10:19:09 | 210,911,049 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | # Without enumerate
i = 0
while i < len(mylist):
print("Item %d: %s" % (i, mylist[i]))
i += 1
# With enumerate
for i, item in enumerate(mylist):
print("Item %d: %s" % (i, item))
| [
"kurchevskijpavel@gmail.com"
] | kurchevskijpavel@gmail.com |
a9b66394400c72c14bdb93febdfe8545e8a5e943 | 487eac14c3fcc5cd6be3bb9e10e765a18edd564a | /src/python/twitter/common/python/dependency.py | 81c76ee7fe40225523c86d818502f0c145bd774f | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | billwei/commons | 4a5ae6b183f871f4670b5a5d9c737824bac0623d | c980481f2627c336c7b75d57824c23d368f3ba43 | refs/heads/master | 2021-01-17T22:07:50.800151 | 2012-01-28T03:17:57 | 2012-01-28T03:17:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,985 | py | # ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import os
import types
import zipfile
from twitter.common.python.dirwrapper import PythonDirectoryWrapper
from twitter.common.python.reqbuilder import ReqBuilder
from setuptools.package_index import EXTENSIONS as VALID_SOURCE_EXTENSIONS
"""
TODO(wickman): I don't like how this is factored right now, though it's an
improvement over what we used to have.
In the next iteration let's do:
Make PythonDependency a base class for:
PythonEggDependency <= .egg(s)
PythonTgzDependency <= .tgz
PythonReqDependency <= pkg_resources.Requirement
PythonDependency exports the API:
input_files()
activate(path) (idempotent) => .egg heads
We then encode PythonDependency blobs directly into the manifest to make the
dependencies more explicit than just autodetecting a bunch of ".egg" directories
in the "dependency" fileset of the chroot.
"""
class PythonDependency(object):
class UnpackingError(Exception): pass
class NotFoundError(Exception): pass
class BuildError(Exception): pass
class RequirementError(Exception): pass
DEFAULT_URI = "http://pypi.python.org"
@staticmethod
def from_file(filename):
if filename.lower().endswith('.egg'):
return PythonDependency.from_egg(filename)
else:
for suffix in VALID_SOURCE_EXTENSIONS:
if filename.lower().endswith(suffix):
return PythonDependency.from_source(filename)
raise PythonDependency.RequirementError(
'Unrecognized Python dependency file format: %s!' % filename)
# TODO(wickman): This arguably shouldn't belong -- we should probably
# have the bootstrapper interface with ReqFetcher so that
# install_requirements never goes out to the network w/o our knowledge.
@staticmethod
def from_req(requirement):
dists = ReqBuilder.install_requirement(requirement)
return PythonDependency.from_distributions(*list(dists))
@staticmethod
def from_source(filename):
if not os.path.exists(filename):
raise PythonDependency.NotFoundError(
"Could not find PythonDependency target %s!" % filename)
dists = ReqBuilder.install_requirement(filename)
return PythonDependency.from_distributions(*list(dists))
@staticmethod
def from_distributions(*distributions):
if not distributions:
raise PythonDependency.BuildError(
"Cannot extract PythonDependency from empty distribution!")
else:
if any(not dist.location.endswith('.egg') for dist in distributions):
raise PythonDependency.RequirementError(
'PythonDependency.from_distribution requires Egg distributions!')
return PythonDependency.from_eggs(*[dist.location for dist in distributions])
@staticmethod
def from_eggs(*egg_paths):
return PythonDependency(egg_paths)
def __init__(self, eggheads):
"""
eggheads = List of files or directories that end with ".egg" and point to
valid eggs.
Not intended to be called directly. Instead use the from_* factory methods.
"""
if not isinstance(eggheads, (types.ListType, types.TupleType)):
raise ValueError('Expected eggs to be a list of filenames! Got %s' % type(eggheads))
self._eggs = {}
for egg in eggheads:
self._eggs[os.path.basename(egg)] = PythonDirectoryWrapper.get(egg)
def files(self):
"""
Iterator that yields
(filename, content)
Where filename is going to be:
my_egg.egg if a file egg
my_egg.egg/EGG-INFO/stuff1.txt if a directory egg or unzipsafe egg
"""
for egg, wrapper in self._eggs.iteritems():
all_files = sorted(wrapper.listdir())
if 'EGG-INFO/zip-safe' in all_files and wrapper.is_condensed():
with open(wrapper.path(), 'r') as fp:
yield (egg, fp.read())
else:
for filename in all_files:
# do space optimization where we skip .pyc/.pyo if the .py is already included
if (filename.endswith('.pyc') or filename.endswith('.pyo')) and (
'%s.py' % filename[:-4] in all_files):
continue
yield (os.path.join(egg, filename), wrapper.read(filename))
| [
"jsirois@twitter.com"
] | jsirois@twitter.com |
8dff5822ab1b4f7f2607db2d045a4a5e89fd310a | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_usage.py | a9c1c291214946757c3b7d41f1297ba952011a44 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py |
#calss header
class _USAGE():
def __init__(self,):
self.name = "USAGE"
self.definitions = [u'the way a particular word in a language, or a language in general, is used: ', u'the way something is treated or used: ', u'the bad and unfair way someone treats you: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
ca7907b8d1400a229dd989b9c40e5c385916658d | f9bd7c1475b2ee956ca4bdbd6a35071b3c5ae5d9 | /test/sorted_array_to_bst.py | 4ef9c8088b51814db0c1437a23f8bc438bcbfd71 | [
"MIT"
] | permissive | gsathya/dsalgo | 7f984c1288f1894cf458ec4bafb6291a4e239c8d | 61c89ec597ced3e69bfbb438fd856c8fc5f20aba | refs/heads/master | 2020-05-18T02:28:18.390769 | 2014-10-16T19:32:00 | 2014-10-16T19:32:00 | 16,241,162 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | import unittest
from lib import bst
from algo import sorted_array_to_bst
class TestSortedArrayToBST(unittest.TestCase):
def setUp(self):
self.bst = bst.BST()
def test_add(self):
vals = range(7)
sorted_array_to_bst.convert(vals, 0, len(vals)-1, self.bst)
| [
"gsathya.ceg@gmail.com"
] | gsathya.ceg@gmail.com |
9674cf7ffd5bfb6e9597610c956057aa62ddfc87 | 0693cce8efbeca806f4551c22dce60d5f392c5c9 | /contentful_management/editor_interface.py | b3d213115f887570c052d167d231e3f697e8a7df | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | timwis/contentful-management.py | 2dc4b7389ca2136ee2a12b89812b18ef2a347e67 | d71a0e18205d1de821b41c7225e8244e786be7f3 | refs/heads/master | 2021-06-28T12:04:58.130393 | 2017-08-10T16:30:09 | 2017-08-10T16:32:50 | 103,517,328 | 0 | 0 | null | 2017-09-14T10:04:48 | 2017-09-14T10:04:48 | null | UTF-8 | Python | false | false | 1,645 | py | from .resource import Resource
"""
contentful_management.editor_interface
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements the EditorInterface class.
API reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/editor-interfaces
:copyright: (c) 2017 by Contentful GmbH.
:license: MIT, see LICENSE for more details.
"""
class EditorInterface(Resource):
"""
API reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/editor-interfaces
"""
def __init__(self, item, **kwargs):
super(EditorInterface, self).__init__(item, **kwargs)
self.controls = item.get('controls', [])
@classmethod
def base_url(self, space_id, content_type_id, **kwargs):
"""
Returns the URI for the editor interface.
"""
return "spaces/{0}/content_types/{1}/editor_interface".format(
space_id,
content_type_id
)
@classmethod
def update_attributes_map(klass):
"""
Attributes for object mapping.
"""
return {'controls': []}
def to_json(self):
"""
Returns the JSON representation of the editor interface.
"""
result = super(EditorInterface, self).to_json()
result.update({'controls': self.controls})
return result
def _update_url(self):
return self.__class__.base_url(
self.space.id,
self.content_type.id
)
def __repr__(self):
return "<EditorInterface id='{0}'>".format(
self.sys.get('id', '')
)
| [
"david.litvakb@gmail.com"
] | david.litvakb@gmail.com |
6f51613fafe60c7d57c562aac5a76f18afd45aff | 60f2d047db9433b1fa211cec5e6dbdee961d0e39 | /sina_data/command.py | e8f434d3f012e57f72c457edf0ed845acb1fbcf0 | [] | no_license | cheatm/DataCollector | a43071868bcac8fde64875332c0f597e46c5e736 | 8daab8673b5f07939e7073055f916d260727ec47 | refs/heads/master | 2020-12-02T22:53:14.085439 | 2017-07-06T09:47:36 | 2017-07-06T09:47:36 | 96,197,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,258 | py | # encoding:utf-8
from file_manager import FileManger
import json
from fxdayu_data import MongoHandler
import pandas as pd
import os
import logger
# ensure_tick # 按tick文件更新索引
# update_index # 更新索引日期
# emend_db # 按数据库更新索引
# write_db # 根据索引写数据库
# req_tick_data # 根据索引向MQ提交数据下载任务
# ensure_index # 按tick和数据库更新索引
# download_tick # 监听MQ下载tick数据
try:
root = os.environ.get("SINADATA")
fm = FileManger(root)
handler = MongoHandler.params(**json.load(open(root+'/mongo.json')))
except:
root = None
fm = None
handler = None
def write_db():
import index_db
index_db.write_db(fm, handler, logger.get_time_rotate("WriteDBLog"))
def download_tick():
import save_tick
from worker import Consumer, TornadoWorker
import MQconfig
TornadoWorker.params(
Consumer(save_tick.callback, MQconfig.queue, MQconfig.consume)
).start()
def emend_db(include_2=False):
import index_db
log = logger.get_time_rotate("CommandLog")
db = handler.client[handler.db]
for code in fm.find_stocks():
try:
index_db.do_scan(code, fm, db[code], include_2)
log.info("EmendDB {} success".format(code))
except Exception as e:
log.error("EmendDB {} fail {}".format(code, e))
def ensure_tick():
log = logger.get_time_rotate("CommandLog")
for code in fm.find_stocks():
result = fm.ensure_tick(code)
if isinstance(result, pd.DataFrame):
log.info("Ensure {} success".format(code))
elif isinstance(result, Exception):
log.error("Ensure {} fail {}".format(code, result))
def update_index(update_bench=True):
log = logger.get_time_rotate("CommandLog")
benchmark = fm.get_benchmark(update_bench)
for code in fm.find_stocks():
result = fm.update_stock(code, benchmark.index)
if isinstance(result, pd.DataFrame):
log.info("UpdateIndex {} success".format(code))
elif isinstance(result, Exception):
log.error("UpdateIndex {} fail {}".format(code, result))
def req_tick_data():
from worker import TornadoWorker, Producer
import MQconfig
import req_data
TornadoWorker.params(
Producer(req_data.check(), MQconfig.exchange, MQconfig.queue, MQconfig.bind)
).start()
def ensure_index(include_2=False):
ensure_tick()
emend_db(include_2)
import click
@click.group(chain=True)
def command():
pass
@command.command()
def tick():
"""read tick file and update index"""
ensure_tick()
@command.command()
@click.option("--bench", is_flag=True, help='update benchmark before update stock index')
def update(bench):
"""
update stock index by benchmark
"""
update_index(bench)
@command.command()
def require():
"""read index and post DataRequestMessage to MQ"""
req_tick_data()
@command.command()
def write():
"""read index and write data into db"""
write_db()
@command.command()
@click.option("--include2", is_flag=True, help="check all data in index")
def emend(include2):
"""
read db and update index
--include2: check all log
"""
emend_db(include2)
from datetime import datetime
@command.command()
@click.option("--start", default='2012-06-01')
@click.option("--end", default=datetime.now().strftime("%Y-%m-%d"))
@click.option("--stock_index", is_flag=True)
def create(start, end, stock_index):
"""create index dir"""
os.makedirs(root+'/')
os.makedirs(root+'/Log/')
fm = FileManger(root)
benchmark = fm.create_benchmark(start, end)
print "create benchmark {}".format(fm.benchmark)
import json
codes = json.load(open('stocks.json'))
json.dump(codes, open('stocks.json', 'w'))
if stock_index:
for code in codes:
try:
fm.create_stock(code, benchmark.index)
print "create index {}".format(code)
except:
print "create index {} failed".format(code)
@command.command()
def download():
"""activate download listener to MQ"""
download_tick()
if __name__ == '__main__':
command()
| [
"862786917@qq.com"
] | 862786917@qq.com |
7b16ff529324924577f1dd439cee9d8a24bdad19 | 72ea8dbdbd68813156b76c077edb5a3806bf42ab | /synapse/tools/melder.py | d20397c0808530674c4fbcc272855608dc5de77a | [
"Apache-2.0"
] | permissive | williballenthin/synapse | 5c6f197f5a3cb3566c48dc444770592e89d4152a | 799854da814b79d6631e5cc2796c347bf4a80ce7 | refs/heads/master | 2020-12-24T14:19:12.530026 | 2017-03-16T20:30:38 | 2017-03-16T20:30:38 | 41,521,212 | 2 | 0 | null | 2015-08-28T02:01:50 | 2015-08-28T02:01:50 | null | UTF-8 | Python | false | false | 1,619 | py | import sys
import msgpack
import argparse
import synapse.mindmeld as s_mindmeld
from synapse.common import *
def main(argv):
'''
Command line tool for MindMeld construction/manipulation.
'''
p = argparse.ArgumentParser(prog='melder')
p.add_argument('meldfile',help='meld file path')
p.add_argument('--add-pypath', dest='pypaths', default=[], action='append', help='add a python path to the meld')
p.add_argument('--add-datfiles', dest='datfiles', action='store_true', help='when adding pypath, include datfiles')
p.add_argument('--dump-info', dest='dumpinfo', action='store_true', help='dump the entire meld info dictionary to stdout')
p.add_argument('--set-name', dest='name', default=None, help='set meld name (ie, "foolib")')
p.add_argument('--set-version', dest='version', default=None, help='set meld version (ie, 8.2.30)')
opts = p.parse_args(argv)
meldinfo = {}
if os.path.isfile(opts.meldfile):
with open(opts.meldfile,'rb') as fd:
meldinfo = msgpack.load(fd,encoding='utf8')
meld = s_mindmeld.MindMeld(**meldinfo)
if opts.version:
meld.setVersion(vertup(opts.version))
if opts.name:
meld.setName(opts.name)
for pypath in opts.pypaths:
meld.addPyPath(pypath,datfiles=opts.datfiles)
meldinfo = meld.getMeldDict()
if opts.dumpinfo:
print(repr(meldinfo))
meldbyts = msgpack.dumps( meld.getMeldDict(), encoding='utf8', use_bin_type=True )
with open(opts.meldfile,'wb') as fd:
fd.write(meldbyts)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| [
"invisigoth@kenshoto.com"
] | invisigoth@kenshoto.com |
0e51fba2f80f46ac06c1128a50b217af17862cff | 3fd3da4f11a251cc43d44d1d61ff2ffe5c82a4ce | /dlp/common/img_util.py | 8fafd5919c6f8c89627cc176e0b8598124df38f2 | [] | no_license | dumpinfo/TsBook | d95faded917bce3e024e77ff06afd30717ed9ef4 | 8fadfcd2ebf935cd49784fd27d66b2fd9f307fbd | refs/heads/master | 2023-05-27T07:56:24.149421 | 2019-07-31T20:51:52 | 2019-07-31T20:51:52 | 198,481,031 | 1 | 3 | null | 2023-05-22T21:13:31 | 2019-07-23T17:47:19 | Jupyter Notebook | UTF-8 | Python | false | false | 1,764 | py | from PIL import Image as image
def get_scaled_dims(org_w, org_h, dest_w, dest_h):
scale = dest_w / org_w
dh = scale * org_h
new_w = dest_w
new_h = dest_h
if dh < dest_h:
new_h = dh
else:
scale = dest_h / org_h
new_w = scale * org_w
return int(new_w), int(new_h)
def get_resized_dim(ori_w, ori_h, dest_w, dest_h):
widthRatio = heightRatio = None
ratio = 1
if (ori_w and ori_w > dest_w) or (ori_h and ori_h > dest_h):
if dest_w and ori_w > dest_w:
widthRatio = float(dest_w) / ori_w #正确获取小数的方式
if dest_h and ori_h > dest_h:
heightRatio = float(dest_h) / ori_h
if widthRatio and heightRatio:
if widthRatio < heightRatio:
ratio = widthRatio
else:
ratio = heightRatio
if widthRatio and not heightRatio:
ratio = widthRatio
if heightRatio and not widthRatio:
ratio = heightRatio
newWidth = int(ori_w * ratio)
newHeight = int(ori_h * ratio)
else:
newWidth = ori_w
newHeight = ori_h
return newWidth, newHeight
def resize_img_file(org_img, dest_img, dest_w, dest_h, save_quality=35):
print('resize the image')
im = image.open(org_img)
print('im={0}'.format(im))
ori_w, ori_h = im.size
newWidth, newHeight = get_resized_dim(ori_w, ori_h, dest_w, dest_h)
im.resize((newWidth,newHeight),image.ANTIALIAS).save(dest_img,quality=save_quality)
def resize_img(im, dest_w, dest_h, save_quality=35):
ori_w, ori_h = im.size
newWidth, newHeight = get_resized_dim(ori_w, ori_h, dest_w, dest_h)
return im.resize((newWidth,newHeight),image.ANTIALIAS)
| [
"twtravel@126.com"
] | twtravel@126.com |
8beda32bbb13b6f511b0c1daa4f271163ee84276 | b820ed3fba2c851715905116ef437843b3532b57 | /owners/stores/admin.py | 19525f46b5d315117deb9e8b7fb84b6557820ef8 | [] | no_license | dev-fahim/retail_app | f88ea96dd4b95516598f4fceedca31a02e8eaeb3 | 41438560a8dac3c1f3cfd966373230dc2c3af9ff | refs/heads/master | 2022-12-11T06:05:29.023809 | 2019-01-12T17:01:27 | 2019-01-12T17:01:27 | 156,075,067 | 0 | 0 | null | 2022-11-22T03:06:03 | 2018-11-04T11:27:55 | Python | UTF-8 | Python | false | false | 149 | py | from django.contrib import admin
from owners.stores.models import OwnerStoreModel
# Register your models here.
admin.site.register(OwnerStoreModel)
| [
"fahim6668@gmail.com"
] | fahim6668@gmail.com |
ca1f46c68d3b1cfef20fcac79a1a882105478872 | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Tensorflow_Pandas_Numpy/source3.6/tensorflow/contrib/kfac/python/ops/utils_lib.py | ddbb4485ce6967082f1844c6d798c078f1cc303b | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 1,520 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,wildcard-import
from tensorflow.contrib.kfac.python.ops.utils import *
from tensorflow.python.util.all_util import remove_undocumented
# pylint: enable=unused-import,line-too-long,wildcard-import
_allowed_symbols = [
"SequenceDict",
"setdefault",
"tensors_to_column",
"column_to_tensors",
"kronecker_product",
"layer_params_to_mat2d",
"mat2d_to_layer_params",
"compute_pi",
"posdef_inv",
"posdef_inv_matrix_inverse",
"posdef_inv_cholesky",
"posdef_inv_funcs",
"SubGraph",
"generate_random_signs",
"fwd_gradients",
]
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
| [
"ryfeus@gmail.com"
] | ryfeus@gmail.com |
396c94f3cc267a2427d8631410da750806a52241 | ecfc473acd6dc4a6ccf401b64a2eee227b056a9e | /xrpc_tests/test_dict.py | 35d2e574b6fb13a34128c860af07da8509b72157 | [
"Apache-2.0"
] | permissive | andreycizov/python-xrpc | 774cb2a433a86d83cc55d92b7c4d0c0d6780577b | ed403ae74d5e89e0ebac68bcc58591d6b32742ff | refs/heads/master | 2020-03-22T13:48:03.024609 | 2019-08-07T11:33:19 | 2019-08-07T11:33:19 | 140,132,337 | 0 | 0 | Apache-2.0 | 2018-07-09T01:53:08 | 2018-07-08T02:46:45 | Python | UTF-8 | Python | false | false | 895 | py | import unittest
from datetime import timedelta
from xrpc.dict import RPCLogDict, ObjectDict
from xrpc.error import HorizonPassedError
from xrpc.net import RPCKey
from xrpc.util import time_now
class TestDict(unittest.TestCase):
def test_rpclogdict(self):
cr = time_now()
x = RPCLogDict(cr)
with self.subTest('a'):
with self.assertRaises(HorizonPassedError):
x[RPCKey(time_now() - timedelta(seconds=10))] = False
kv = RPCKey()
with self.subTest('b'):
val = True
x[kv] = val
self.assertEqual(x[kv], val)
with self.subTest('c'):
x.set_horizon(time_now())
with self.assertRaises(HorizonPassedError):
x[kv]
def test_object_dict(self):
v = ObjectDict()
with self.assertRaises(AttributeError):
v.attr | [
"acizov@gmail.com"
] | acizov@gmail.com |
86162c2e3044c4991f3146946be6253becc800fc | d8913c1512146bb42756f61ba0872d73179884eb | /env/bin/wheel3 | 6c5a912cdabbfbc51374fb893594f592a6266ee5 | [
"MIT"
] | permissive | sahin88/Django_Rest_Framework_Redux_React_Estate_App_FullStack | 2ed305c399edfab05ce3653e8bcaf36f09ae9015 | 10e31c4071bcebc0e4401f42084211d170b2ea56 | refs/heads/main | 2023-03-22T17:00:37.102265 | 2021-03-16T17:26:53 | 2021-03-16T17:26:53 | 319,297,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | #!/home/alex/Documents/estate/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"sahinmuratogur@gmail.com"
] | sahinmuratogur@gmail.com | |
c1ec56c62be7e95389e3aeb7ad30f6794d553aba | a1b892c0f5f8c5aa2c67b555b8d1d4b7727a86a4 | /Python/outage/edit_update.py | f28d19d183c03af4d7ba848164e4bf291788d408 | [] | no_license | Vivekdjango/outage | 60f463ae5294d2b33544a19bda34cc2c22dd42c8 | 20cfbc07e6714f0c8c7e685ea389f1b8ef1bfd53 | refs/heads/master | 2021-01-20T04:18:40.023340 | 2017-04-28T06:46:26 | 2017-04-28T06:46:26 | 89,675,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,475 | py | #!/usr/bin/python
print "Content-Type: text/html"
print ""
import cgi, cgitb
import re
import smtplib
import codecs
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
form=cgi.FieldStorage()
st=form.getvalue('status')
sub=form.getvalue('subject')
com=form.getvalue('comment')
print com
cm=com.split('\n')
print cm
check=re.search("Communication(.*)",sub)
new=check.group()
i=new.split()
x=i.remove('Communication')
j=' '.join(i).replace(" ","")
html_file=j+'.html'
m=open('/var/www/html/outage/%s'%(html_file),'r+')
o=m.readlines()
m.seek(0)
for i in o:
if i !="<b>ETA:</b>NA<br><br>Regards & Thanks<br>NOC":
m.write(i)
m.truncate()
m.close()
s=open("/var/www/html/outage/%s"%(html_file)).read()
z=re.search('style(.*)',s)
x=z.group()
if st=="amber" and x=="style='color:red'><b><u>Status:</u></b>RED</p>":
s=s.replace(x,"style='color:orange'><b><u>Status:</u></b>AMBER</p>")
f=open("/var/www/html/outage/%s"%(html_file),'w')
f.write(s)
f.close()
elif st=="amber" and x=="style='color:green'><b><u>Status:</u></b>GREEN</p>":
s=s.replace(x,"style='color:orange'><b><u>Status:</u></b>AMBER</p>")
f=open("/var/www/html/outage/%s"%(html_file),'w')
f.write(s)
f.close()
elif st=="resolved" and x=="style='color:red'><b><u>Status:</u></b>RED</p>":
s=s.replace(x,"style='color:green'><b><u>Status:</u></b>GREEN</p>")
f=open("/var/www/html/outage/%s"%(html_file),'w')
f.write(s)
f.close()
elif st=="resolved" and x=="style='color:orange'><b><u>Status:</u></b>AMBER</p>":
s=s.replace(x,"style='color:green'><b><u>Status:</u></b>GREEN</p>")
f=open("/var/www/html/outage/%s"%(html_file),'w')
f.write(s)
f.close()
elif st=="red" and x=="style='color:orange'><b><u>Status:</u></b>AMBER</p>":
s=s.replace(x,"style='color:red'><b><u>Status:</u></b>RED</p>")
f=open("/var/www/html/outage/%s"%(html_file),'w')
f.write(s)
f.close()
elif st=="red" and x=="style='color:green'><b><u>Status:</u></b>GREEN</p>":
s=s.replace(x,"style='color:red'><b><u>Status:</u></b>RED</p>")
f=open("/var/www/html/outage/%s"%(html_file),'w')
f.write(s)
f.close()
#with open("/var/www/html/outage/%s"%html_file, "r") as f:
# lines = f.readlines()
#for index, line in enumerate(lines):
# if line.startswith("ETA \n"):
# break
#lines.insert(index, "<br><ul><li>{0}</li></ul>".format(com))
#
l=[]
for v in cm:
val="<ul><li>%s</li></ul>"%(v)
l.append(val)
print l
#print l.remove('<ul><li>\r</li></ul>')
#val="<br><ul><li>{0}</li></ul>".format(com)
foot="<b>ETA:</b>NA<br><br>Regards & Thanks<br>NOC"
with open("/var/www/html/outage/%s"%html_file, "a") as f:
for ca in l:
f.writelines(ca)
f.write('\n')
f.write('\n')
f.write(foot)
f.close()
ab=codecs.open('/var/www/html/outage/%s'%html_file)
bc=ab.read()
def py_mail(SUBJECT, BODY, TO, FROM):
MESSAGE = MIMEMultipart('alternative')
MESSAGE['subject'] = SUBJECT
MESSAGE['To'] = TO
MESSAGE['From'] = FROM
HTML_BODY = MIMEText(BODY, 'html')
MESSAGE.attach(HTML_BODY)
server = smtplib.SMTP('<mail server>')
server.sendmail(FROM, [TO], MESSAGE.as_string())
server.quit()
if __name__ == "__main__":
"""Executes if the script is run as main script (for testing purposes)"""
email_content =bc
FROM = '<sender email-id>'
TO ='<receiver1>'
py_mail(sub, email_content, TO, FROM)
| [
"viveksinha@IC0532-L0.corp.inmobi.com"
] | viveksinha@IC0532-L0.corp.inmobi.com |
04481067cae2cdf914133af49338265cf8615ad1 | b306aab9dcea2dd83dda700bc9f7b9f1a32cff3a | /CAIL2020/sfzyzc/sfzyzb/preprocess.py | 90d05468cc6b241abae61ba2d175fe7a69e29aed | [
"Apache-2.0"
] | permissive | Tulpen/CAIL | d6ca9981c7ea2603ae61675ba330a9614cd9398d | c4cfa98ab4ecedbce34a7a5a186830486047540c | refs/heads/master | 2023-04-23T20:07:56.774530 | 2021-04-16T13:18:36 | 2021-04-16T13:18:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,081 | py | import json
import re
from difflib import SequenceMatcher
from query_sim import Query_Similar
import pandas
smallfile = "data/sfzy_small.json"
bigfile = "data/sfzy_big.json"
interfile = "data/inter.json"
# threshold = 0.3
def process_context(line):
spans = re.split('([,,:;;。])', line)
spans = [span for span in spans if len(span)>0]
spans_sep = []
for i in range(len(spans)//2):
spans_sep.append(spans[2*i]+spans[2*i+1])
if len(spans_sep) == 0:
return []
return spans_sep
# with open(interfile, 'w', encoding='utf-8') as fw:
# with open(smallfile,'r', encoding='utf-8') as fin:
# for line in fin:
# sents = json.loads(line.strip())
# pos = []
# neg = []
# summary = sents['summary']
# text = sents['text']
# sentences = [item['sentence'] for item in text]
# summary_spans = process_context(summary)
# query_sim = Query_Similar(sentences)
# matching_ids = [query_sim.find_similar(span) for span in summary_spans]
# pos = [sentences[i] for i in range(len(sentences)) if i in matching_ids]
# neg = [sentences[i] for i in range(len(sentences)) if i not in matching_ids]
# sents['pos'] = pos
# sents['neg'] = neg
# print('.')
# fw.write(json.dumps(sents, ensure_ascii=False)+"\n")
#
# with open(bigfile,'r', encoding='utf-8') as fin:
# for line in fin:
# sents = json.loads(line.strip())
# pos = []
# neg = []
# summary = sents['summary']
# text = sents['text']
# sentences = [item['sentence'] for item in text]
# summary_spans = process_context(summary)
# query_sim = Query_Similar(sentences)
# matching_ids = [query_sim.find_similar(span) for span in summary_spans]
# pos = [sentences[i] for i in range(len(sentences)) if i in matching_ids]
# neg = [sentences[i] for i in range(len(sentences)) if i not in matching_ids]
# sents['pos'] = pos
# sents['neg'] = neg
# print('.')
# fw.write(json.dumps(sents, ensure_ascii=False)+"\n")
tag_sents = []
para_id=0
with open(smallfile, 'r', encoding='utf-8') as fin:
for line in fin:
print('.')
sents = json.loads(line.strip())
for sent in sents:
tag_sents.append((para_id,sent))
para_id += 1
df = pandas.DataFrame(tag_sents, columns=['para','content'])
df.to_csv("data/para_content_train.csv", columns=['para','content'], index=False)
#
# # df = pandas.DataFrame()
# tag_sents = []
# with open(interfile, 'r', encoding='utf-8') as fin:
# for line in fin:
# print('.')
# sents = json.loads(line.strip())
# tag_sents.append(("".join(sents['pos']), sents['summary']))
# df = pandas.DataFrame(tag_sents, columns=['core', 'summary'])
# df.to_csv("data/core_summary_train.csv", columns=['core','summary'], index=False) | [
"bangtech@sina.com"
] | bangtech@sina.com |
afbcad670fab8581a7dea774391325bec563aeaf | e3fc5f889d99f2428fa8aadc3949e4d3fe7ce48d | /pygeodesy/cartesianBase.py | 27131c7b7db3d318a42db941f9ebbe299248026b | [
"MIT"
] | permissive | huangyizhi11/PyGeodesy | dd08d59645f35c46f3a22cfdb9073253551fb65e | c293600eb103248f46241659ef5602416d3cd46e | refs/heads/master | 2023-01-23T02:06:48.881445 | 2020-12-04T21:36:18 | 2020-12-04T21:36:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,455 | py |
# -*- coding: utf-8 -*-
u'''(INTERNAL) Base classes for elliposiodal, spherical and N-/vectorial
C{Cartesian}s.
After I{(C) Chris Veness 2011-2015} published under the same MIT Licence**,
see U{https://www.Movable-Type.co.UK/scripts/latlong.html},
U{https://www.Movable-Type.co.UK/scripts/latlong-vectors.html} and
U{https://www.Movable-Type.co.UK/scripts/geodesy/docs/latlon-ellipsoidal.js.html}..
@newfield example: Example, Examples
'''
from pygeodesy.basics import property_doc_, property_RO, _xinstanceof
from pygeodesy.datums import Datum, Datums, _ellipsoidal_datum
from pygeodesy.errors import _datum_datum, _IsnotError, _ValueError, _xkwds
from pygeodesy.fmath import cbrt, fsum_, hypot_, hypot2
from pygeodesy.interns import EPS, NN, _COMMASPACE_, _ellipsoidal_, _1_0
from pygeodesy.interns import _spherical_ # PYCHOK used!
from pygeodesy.lazily import _ALL_DOCS
from pygeodesy.namedTuples import LatLon4Tuple, Vector4Tuple
from pygeodesy.streprs import Fmt
from pygeodesy.vector3d import Vector3d, _xyzhdn6
from math import sqrt # hypot
__all__ = ()
__version__ = '20.11.05'
class CartesianBase(Vector3d):
'''(INTERNAL) Base class for ellipsoidal and spherical C{Cartesian}.
'''
_datum = None # L{Datum}, to be overriden
_Ecef = None # preferred C{EcefKarney} class
_e9t = None # cached toEcef (L{Ecef9Tuple})
_height = 0 # height (L{Height})
_v4t = None # cached toNvector (L{Vector4Tuple})
def __init__(self, xyz, y=None, z=None, datum=None, ll=None, name=NN):
'''New C{Cartesian...}.
@arg xyz: An L{Ecef9Tuple}, L{Vector3Tuple}, L{Vector4Tuple}
or the C{X} coordinate (C{scalar}).
@arg y: The C{Y} coordinate (C{scalar}) if B{C{xyz}} C{scalar}.
@arg z: The C{Z} coordinate (C{scalar}) if B{C{xyz}} C{scalar}.
@kwarg datum: Optional datum (L{Datum}, L{Ellipsoid}, L{Ellipsoid2}
or L{a_f2Tuple}).
@kwarg ll: Optional, original latlon (C{LatLon}).
@kwarg name: Optional name (C{str}).
@raise TypeError: Non-scalar B{C{xyz}}, B{C{y}} or B{C{z}}
coordinate or B{C{xyz}} not an L{Ecef9Tuple},
L{Vector3Tuple} or L{Vector4Tuple}.
'''
x, y, z, h, d, n = _xyzhdn6(xyz, y, z, None, datum, ll)
Vector3d.__init__(self, x, y, z, ll=ll, name=name or n)
if h:
self._height = h
if d:
self.datum = _ellipsoidal_datum(d, name=name)
def _update(self, updated, *attrs):
'''(INTERNAL) Zap cached attributes if updated.
'''
if updated:
Vector3d._update(self, updated, '_e9t', '_v4t', *attrs)
def _applyHelmert(self, transform, inverse=False, **datum):
'''(INTERNAL) Return a new cartesian by applying a Helmert
transform to this cartesian.
@arg transform: Transform to apply (L{Transform}).
@kwarg inverse: Optionally, apply the inverse
Helmert transform (C{bool}).
@kwarg datum: Optional datum of the returned point,
(C{B{datum}=}L{Datum}).
@return: The transformed point (C{Cartesian}).
@note: For C{B{inverse}=True} keyword B{C{datum}} must
be C{B{datum}=}L{Datums.WGS84}.
'''
xyz = transform.transform(self.x, self.y, self.z, inverse)
return self._xnamed(self.classof(xyz, **datum))
def convertDatum(self, datum2, datum=None):
'''Convert this cartesian from one datum to an other.
@arg datum2: Datum to convert I{to} (L{Datum}).
@kwarg datum: Datum to convert I{from} (L{Datum}).
@return: The converted point (C{Cartesian}).
@raise TypeError: B{C{datum2}} or B{C{datum}}
invalid.
'''
_xinstanceof(Datum, datum2=datum2)
if datum not in (None, self.datum):
c = self.convertDatum(datum)
else:
c = self
i, d = False, c.datum
if d == datum2:
return c.copy() if c is self else c
elif d == Datums.WGS84:
d = datum2 # convert from WGS84 to datum2
elif datum2 == Datums.WGS84:
i = True # convert to WGS84 by inverse transform
else: # neither datum2 nor c.datum is WGS84, invert to WGS84 first
c = c._applyHelmert(d.transform, True, datum=Datums.WGS84)
d = datum2
return c._applyHelmert(d.transform, i, datum=datum2)
@property_doc_(''' this cartesian's datum (L{Datum}).''')
def datum(self):
'''Get this cartesian's datum (L{Datum}).
'''
return self._datum
@datum.setter # PYCHOK setter!
def datum(self, datum):
'''Set this cartesian's C{datum} I{without conversion}.
@arg datum: New datum (L{Datum}).
@raise TypeError: The B{C{datum}} is not a L{Datum}.
'''
_xinstanceof(Datum, datum=datum)
d = self.datum
if d is not None:
if d.isEllipsoidal and not datum.isEllipsoidal:
raise _IsnotError(_ellipsoidal_, datum=datum)
elif d.isSpherical and not datum.isSpherical:
raise _IsnotError(_spherical_, datum=datum)
self._update(datum != d)
self._datum = datum
@property_RO
def Ecef(self):
'''Get the ECEF I{class} (L{EcefKarney}).
'''
if CartesianBase._Ecef is None:
from pygeodesy.ecef import EcefKarney
CartesianBase._Ecef = EcefKarney # default
return CartesianBase._Ecef
@property_RO
def height(self):
'''Get the height (C{meter}).
'''
return self._height
@property_RO
def isEllipsoidal(self):
'''Check whether this cartesian is ellipsoidal (C{bool} or C{None} if unknown).
'''
return self.datum.isEllipsoidal if self._datum else None
@property_RO
def isSpherical(self):
'''Check whether this cartesian is spherical (C{bool} or C{None} if unknown).
'''
return self.datum.isSpherical if self._datum else None
@property_RO
def latlon(self):
'''Get this cartesian's (geodetic) lat- and longitude in C{degrees} (L{LatLon2Tuple}C{(lat, lon)}).
'''
return self.toEcef().latlon
@property_RO
def latlonheight(self):
'''Get this cartesian's (geodetic) lat-, longitude in C{degrees} with height (L{LatLon3Tuple}C{(lat, lon, height)}).
'''
return self.toEcef().latlonheight
@property_RO
def latlonheightdatum(self):
'''Get this cartesian's (geodetic) lat-, longitude in C{degrees} with height and datum (L{LatLon4Tuple}C{(lat, lon, height, datum)}).
'''
return self.toEcef().latlonheightdatum
@property_RO
def _N_vector(self):
'''(INTERNAL) Get the (C{nvectorBase._N_vector_}).
'''
from pygeodesy.nvectorBase import _N_vector_
r = self._v4t or self.toNvector()
return _N_vector_(r.x, r.y, r.z, h=r.h)
@property_RO
def philam(self):
'''Get this cartesian's (geodetic) lat- and longitude in C{radians} (L{PhiLam2Tuple}C{(phi, lam)}).
'''
return self.toEcef().philam
@property_RO
def philamheight(self):
'''Get this cartesian's (geodetic) lat-, longitude in C{radians} with height (L{PhiLam3Tuple}C{(phi, lam, height)}).
'''
return self.toEcef().philamheight
@property_RO
def philamheightdatum(self):
'''Get this cartesian's (geodetic) lat-, longitude in C{radians} with height and datum (L{PhiLam4Tuple}C{(phi, lam, height, datum)}).
'''
return self.toEcef().philamheightdatum
def to3llh(self, datum=None): # PYCHOK no cover
'''DEPRECATED, use property C{latlonheightdatum} or property C{latlonheight}.
@return: A L{LatLon4Tuple}C{(lat, lon, height, datum)}.
@note: This method returns a B{C{-4Tuple}} I{and not a} C{-3Tuple}
as its name suggests.
'''
t = self.toLatLon(datum=datum, LatLon=None)
r = LatLon4Tuple(t.lat, t.lon, t.height, t.datum)
return self._xnamed(r)
# def _to3LLh(self, datum, LL, **pairs): # OBSOLETE
# '''(INTERNAL) Helper for C{subclass.toLatLon} and C{.to3llh}.
# '''
# r = self.to3llh(datum) # LatLon3Tuple
# if LL is not None:
# r = LL(r.lat, r.lon, height=r.height, datum=datum)
# for n, v in pairs.items():
# setattr(r, n, v)
# r = self._xnamed(r)
# return r
def toEcef(self):
'''Convert this cartesian to geodetic (lat-/longitude) coordinates.
@return: An L{Ecef9Tuple}C{(x, y, z, lat, lon, height,
C, M, datum)} with C{C} and C{M} if available.
@raise EcefError: A C{.datum} or an ECEF issue.
'''
if self._e9t is None:
r = self.Ecef(self.datum).reverse(self, M=True)
self._e9t = self._xnamed(r)
return self._e9t
def toLatLon(self, datum=None, LatLon=None, **LatLon_kwds): # see .ecef.Ecef9Tuple.convertDatum
'''Convert this cartesian to a geodetic (lat-/longitude) point.
@kwarg datum: Optional datum (L{Datum}, L{Ellipsoid}, L{Ellipsoid2}
or L{a_f2Tuple}).
@kwarg LatLon: Optional class to return the geodetic point
(C{LatLon}) or C{None}.
@kwarg LatLon_kwds: Optional, additional B{C{LatLon}}
keyword arguments, ignored if
C{B{LatLon}=None}.
@return: The geodetic point (B{C{LatLon}}) or if B{C{LatLon}}
is C{None}, an L{Ecef9Tuple}C{(x, y, z, lat, lon,
height, C, M, datum)} with C{C} and C{M} if available.
@raise TypeError: Invalid B{C{datum}} or B{C{LatLon_kwds}}.
'''
d = self.datum
if datum in (None, d):
r = self.toEcef()
else:
c = self.convertDatum(datum)
d = c.datum
r = c.Ecef(d).reverse(c, M=True)
if LatLon is not None: # class or .classof
kwds = _xkwds(LatLon_kwds, datum=r.datum, height=r.height)
r = LatLon(r.lat, r.lon, **kwds)
_datum_datum(r.datum, d)
return self._xnamed(r)
def toNvector(self, Nvector=None, datum=None, **Nvector_kwds): # PYCHOK Datums.WGS84
'''Convert this cartesian to C{n-vector} components.
@kwarg Nvector: Optional class to return the C{n-vector}
components (C{Nvector}) or C{None}.
@kwarg datum: Optional datum (L{Datum}, L{Ellipsoid}, L{Ellipsoid2}
or L{a_f2Tuple}) overriding this cartesian's datum.
@kwarg Nvector_kwds: Optional, additional B{C{Nvector}} keyword
arguments, ignored if B{C{Nvector=None}}.
@return: The C{unit, n-vector} components (B{C{Nvector}}) or a
L{Vector4Tuple}C{(x, y, z, h)} if B{C{Nvector}} is C{None}.
@raise TypeError: Invalid B{C{datum}}.
@raise ValueError: The B{C{Cartesian}} at origin.
@example:
>>> c = Cartesian(3980581, 97, 4966825)
>>> n = c.toNvector() # (x=0.622818, y=0.00002, z=0.782367, h=0.242887)
'''
d = _ellipsoidal_datum(datum or self.datum, name=self.name)
r = self._v4t
if r is None or d != self.datum:
# <https://www.Movable-Type.co.UK/scripts/geodesy/docs/
# latlon-nvector-ellipsoidal.js.html#line309>
E = d.ellipsoid
x, y, z = self.xyz
# Kenneth Gade eqn 23
p = hypot2(x, y) * E.a2_
q = (z**2 * E.e12) * E.a2_
r = fsum_(p, q, -E.e4) / 6
s = (p * q * E.e4) / (4 * r**3)
t = cbrt(fsum_(1, s, sqrt(s * (2 + s))))
u = r * fsum_(_1_0, t, _1_0 / t)
v = sqrt(u**2 + E.e4 * q)
w = E.e2 * fsum_(u, v, -q) / (2 * v)
k = sqrt(fsum_(u, v, w**2)) - w
if abs(k) < EPS:
raise _ValueError(origin=self)
e = k / (k + E.e2)
# d = e * hypot(x, y)
# tmp = 1 / hypot(d, z) == 1 / hypot(e * hypot(x, y), z)
t = hypot_(e * x, e * y, z) # == 1 / tmp
if t < EPS:
raise _ValueError(origin=self)
h = fsum_(k, E.e2, -_1_0) / k * t
s = e / t # == e * tmp
r = Vector4Tuple(x * s, y * s, z / t, h)
self._v4t = r if d == self.datum else None
if Nvector is not None:
r = Nvector(r.x, r.y, r.z, h=r.h, datum=d, **Nvector_kwds)
return self._xnamed(r)
def toStr(self, prec=3, fmt=Fmt.SQUARE, sep=_COMMASPACE_): # PYCHOK expected
'''Return the string representation of this cartesian.
@kwarg prec: Optional number of decimals, unstripped (C{int}).
@kwarg fmt: Optional enclosing backets format (string).
@kwarg sep: Optional separator to join (string).
@return: Cartesian represented as "[x, y, z]" (string).
'''
return Vector3d.toStr(self, prec=prec, fmt=fmt, sep=sep)
def toVector(self, Vector=None, **Vector_kwds):
'''Return this cartesian's components as vector.
@kwarg Vector: Optional class to return the C{n-vector}
components (L{Vector3d}) or C{None}.
@kwarg Vector_kwds: Optional, additional B{C{Vector}} keyword
arguments, ignored if C{B{Vector}=None}.
@return: A B{C{Vector}} or an L{Vector3Tuple}C{(x, y, z)}
if B{C{Vector}} is C{None}.
@raise TypeError: Invalid B{C{Vector}} or B{C{Vector_kwds}}.
'''
return self.xyz if Vector is None else \
self._xnamed(Vector(self.x, self.y, self.z, **Vector_kwds))
__all__ += _ALL_DOCS(CartesianBase)
# xyz = Vector3d.xyz
# '''Get this cartesian's X, Y and Z components (L{Vector3Tuple}C{(x, y, z)}).
# '''
# **) MIT License
#
# Copyright (C) 2016-2020 -- mrJean1 at Gmail -- All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
| [
"mrJean1@Gmail.com"
] | mrJean1@Gmail.com |
10c9ed49de23d070efdc6ced2c308317186bc7a6 | 71e1c13ba888477e8b5547518d36596d9b813201 | /q2_micom/tests/test_tradeoff.py | d29cdfe400485a8dbac81941098531b0731b1606 | [
"Apache-2.0"
] | permissive | khemlalnirmalkar/q2-micom | bc465d924ad11288210b447473f1e991d6ac44dc | 8ff3a29feddbe7624793b3d4c0c3726ae1e22a8f | refs/heads/master | 2022-12-04T20:09:39.142056 | 2020-08-26T23:29:36 | 2020-08-26T23:29:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,022 | py | """Test if tradeoff analysis works."""
import numpy as np
import os.path as path
import pandas as pd
import pytest
import qiime2 as q2
import q2_micom as q2m
this_dir = q2m.tests.this_dir
medium = q2.Artifact.load(path.join(this_dir, "data", "medium.qza")).view(
pd.DataFrame
)
models = q2.Artifact.load(path.join(this_dir, "data", "build.qza"))
res = q2m.tradeoff(
models.view(q2m._formats_and_types.CommunityModelDirectory),
medium)
def test_tradeoff_values():
assert np.allclose(res.tradeoff.min(), 0.1)
assert np.allclose(res.tradeoff.max(), 1.0)
assert res.tradeoff.nunique() == 10
def test_growth_rates():
low = res.growth_rate[res.tradeoff == 0.1]
high = res.growth_rate[res.tradeoff == 1.0]
assert(low.sum() < high.sum())
def test_sane_tradeoff():
with pytest.raises(ValueError):
q2m.tradeoff(
models.view(q2m._formats_and_types.CommunityModelDirectory),
medium,
tradeoff_min=0.5,
tradeoff_max=0.4
)
| [
"ch.diener@gmail.com"
] | ch.diener@gmail.com |
95f9b98dee3a9a430148aba4d1756e5d4cfa3066 | aca209472c7288d69adf57124c197baf98c7a6e7 | /OpenCV讀者資源/讀者資源/程式實例/ch29/ch29_1.py | f67382807fb13fb3f3d33e9baac0b20b1a10cf61 | [] | no_license | Hank-Liao-Yu-Chih/document | 712790325e48b9d8115d04b5cc2a90cd78431e61 | fafe616678cd224e70936296962dcdbbf55e38b3 | refs/heads/master | 2022-09-22T12:40:33.284033 | 2022-09-08T00:33:41 | 2022-09-08T00:33:41 | 102,203,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 948 | py | # ch29_1.py
import cv2
import numpy as np
face_db = [] # 建立空串列
face_db.append(cv2.imread("ch29_1\\hung1.jpg",cv2.IMREAD_GRAYSCALE))
face_db.append(cv2.imread("ch29_1\\hung2.jpg",cv2.IMREAD_GRAYSCALE))
face_db.append(cv2.imread("ch29_1\\star1.jpg",cv2.IMREAD_GRAYSCALE))
face_db.append(cv2.imread("ch29_1\\star2.jpg",cv2.IMREAD_GRAYSCALE))
labels = [0,0,1,1] # 建立標籤串列
faceNames = {"0":"Hung", "1":"Unistar"} # 建立對應名字的字典
recognizer = cv2.face.LBPHFaceRecognizer_create() # 建立人臉辨識物件
recognizer.train(face_db, np.array(labels)) # 訓練人臉辨識
# 讀取要辨識的人臉
face = cv2.imread("ch29_1\\face.jpg",cv2.IMREAD_GRAYSCALE)
label,confidence = recognizer.predict(face) # 執行人臉辨識
print(f"Name = {faceNames[str(label)]}")
print(f"Confidence = {confidence:6.2f}")
| [
"hank.liao@vicorelogic.com"
] | hank.liao@vicorelogic.com |
6cbe27584c2dab8abaf90e88fe6fec974ff6fd67 | a2e638cd0c124254e67963bda62c21351881ee75 | /Extensions/AMWIDealTaker/FPythonCode/FCustomizedFpML.py | ecbed3358079d3af61ad3725bd57da298e6c4981 | [] | no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,723 | py | """------------------------------------------------------------------------
MODULE
FCustomizedFpML -
DESCRIPTION:
This file is used to modify the FpML as per the incoming source
VERSION: 1.0.30
RESTRICTIONS/ LIMITATIONS:
1. Any modifications to the scripts/ encrypted modules/ clear text code within the core is not supported.
2. This module is not customizable
3. The component may not work as expected with any modifications done to this module at user end
--------------------------------------------------------------------------"""
import base64, zlib, imp, marshal
if imp.get_magic() == '\x03\xf3\r\n':
__pyc = marshal.loads(zlib.decompress(base64.b64decode("""
eNqtU0tPGzEQ9m4CNNv0xaGnHnxBQqoIantDVVXYJFWkJKDd0AOXaGtPwLDZddeTQio40f/dzngTVahST3jlsWc8j2+8n5VYjZDm
Z5ruLQktxBnJQOhQ5IE4CwQE4jIQ92QPhW6IX7Rp8Gm62+SgNxS+90gjGh13T4e9SNLoxwuH5dz8BN23o6Hci7q9NE4GJ5PB8fjA
u0wujJMzk4OkdeFASyzlvNRmtpR4AdLHZU5aqLxuCkUJi3PpykWlIPraS1JOJneSnSjppZNkEHP2dF8OB6PB5NArB1HrXUceFss6
tVEZmrJwXIuTOlUZi25fQqGqpUVCQX6LHMikcsioNNygVKUGeW3wwhQ+TJWVh12UKN3C2rKiyI6MWu87dV91krWLWl1G9i2HqPWB
fTjH3JYFFCjn2dK7XZfVFXcMNxYUQ+GKMvsHu6awuoG/lTLkO6yoDy2jvUcb5jeNsWoQVzZo8hozbywJFEwtItt9IO4a4tbz6ioU
1YyZRlTMQ3HXFLdNcUmnQtySJ1loQ1Rses+x0BviLhTfZ964uQ7fWhkL4QNDMWPGPqGiY3xKsn8yGk7T49Mk7iFjOjr64pHic1IG
hYaboSmuQMfd1LUYczeV3ozPSEuJRTmMszmQ3cfT6t8DviKhymJmzqc2q8gDoXI+6zlgbfqR5QtAxtKnssxS5BJ05oC1Xb4o5Gwz
O8+xvdqMiUJD49DjtxX9NYWTpYW6H8v9eF4nW6TvirVw23z88DV17BIjMtePhS0v154ieBG06dum2fK7ui9OOp3qUk2n+JozclTs
Gz3hphLINFQJQ0+ePQDwXxR8Cx9rBn7iPtwmic2gHbbDPye1JMs=""")))
else:
__pyc = marshal.loads(zlib.decompress(base64.b64decode("""
The system cannot find the path specified.""")))
del base64, zlib, imp, marshal
exec(__pyc)
del __pyc
| [
"nencho.georogiev@absa.africa"
] | nencho.georogiev@absa.africa |
f6784a2efadd835fc1bebda97261f36e0d6e0d39 | ba60d3ccf11157abaf6c7bcf3a81aace27c6af88 | /spoj/lovebirds.py | 90b400cd6db14f86ef9eb2fbf8e0987a6707406c | [] | no_license | eightnoteight/compro | 9a09628593cdd3201f4d3bcf271f1ca6a4e5efca | 1e5d32ee83e9d8f27623dee7262decad3d107bd5 | refs/heads/master | 2021-01-21T04:19:02.746824 | 2016-08-01T20:51:16 | 2016-08-01T20:51:16 | 44,669,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
# pylint: disable=invalid-name,missing-docstring,bad-builtin
from sys import stdin
def main():
catalan = [1]
for x in xrange(1, 1001):
catalan.append(2*(2*x + 1)*catalan[-1] // (x + 2))
dstream = iter(map(int, stdin.read().split()))
print '\n'.join([str(catalan[next(dstream) - 1] % 1908) for _ in xrange(next(dstream))])
main()
| [
"mr.eightnoteight@gmail.com"
] | mr.eightnoteight@gmail.com |
0ee114876823cd86a77166b1ce845a4a38d03557 | 066ee4df594a5dc90335d271b9d5a1b1e2a4d34c | /y/google-cloud-sdk/platform/google_appengine/google/appengine/tools/devappserver2/python/pdb_sandbox.py | 93f585410c26310319af9f492d5e168e08a36358 | [
"LGPL-2.1-or-later",
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"GPL-2.0-or-later",
"MPL-1.1",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ychen820/microblog | a2d82447525325ec58285c2e5db58b79cceaca1b | d379afa2db3582d5c3be652165f0e9e2e0c154c6 | refs/heads/master | 2021-01-20T05:58:48.424357 | 2015-04-28T22:03:09 | 2015-04-28T22:03:09 | 32,948,331 | 0 | 2 | BSD-3-Clause | 2020-07-25T05:04:35 | 2015-03-26T19:45:07 | Python | UTF-8 | Python | false | false | 2,835 | py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Modify pdb to work with the devappserver2 sandbox."""
import sys
import threading
def install(config):
"""Install the necessary changes to pdb.
Monkeypatch pdb so that it can be used in the devappserver sandbox. Must
be called after the sandbox has been installed but before stdin/stdout
objects have been reassigned.
Args:
config: The runtime_config_pb2.Config to use to configure the sandbox.
"""
# Import here (i.e. after sandbox installed) to get the post sandbox pdb.
# Extremely important so that we monkeypatch the same pdb the apps can
# import.
import pdb as pdb_postsandbox
# Save stdin/stdout as the references will not be available when user
# code runs.
real_stdin = sys.stdin
real_stdout = sys.stdout
# Capture the original Pdb so we can forward the __init__ call after
# monkeypatching (if not captured, forwarding the call results in infinite
# recursion).
pdb_premonkeypatch = pdb_postsandbox.Pdb
if config.threadsafe or config.max_instances != 1:
warning = """
********************************************************************************
* WARNING: please read before using PDB:
* https://developers.google.com/appengine/docs/python/tools/devserver#Python_Debugging_with_PDB
********************************************************************************
"""
lock = threading.Lock()
else:
warning = ''
class _Pdb(pdb_postsandbox.Pdb):
_warning_written = False
# TODO: improve argument handling so if new arguments are added
# in the future or the defaults change, this does not need to be updated.
def __init__(self, completekey='tab', stdin=None, stdout=None, skip=None):
if stdin is None:
stdin = real_stdin
if stdout is None:
stdout = real_stdout
# Pdb is old style class so no super().
pdb_premonkeypatch.__init__(self, completekey, stdin, stdout, skip)
if warning:
with lock:
# Note: while the goal is to write the warning only one time, it
# may be written multiple times (once each per instance).
if not _Pdb._warning_written:
stdout.write(warning)
_Pdb._warning_written = True
pdb_postsandbox.Pdb = _Pdb
| [
"ychen207@binghamton.edu"
] | ychen207@binghamton.edu |
d2ee2d0a7b652d2c7469c2b5320dcd4cf34c8a62 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/134/usersdata/218/46244/submittedfiles/escadarolante.py | ce5ff2ed83c850366a6d95b6864aa99715f36420 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | # -*- coding: utf-8 -*-
n=int(input('digite a quantidade de pessoas:'))
T=0
for i in range (0,n,1):
t=int(input('tempo:'))
if i==0:
t1=t
if i==n:
nf=t+10
T=nf-t1
print(T) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
26b2a67d895180bab09500fbaec44f381f50fe05 | 29df1c3c67f6d32c25a6b88399238126696b46b0 | /benchmarks/benchmarks/sparse_linalg_onenormest.py | a23dae2b400a224a69f4bd129715f852217a6250 | [
"MIT",
"Qhull",
"BSD-3-Clause",
"Python-2.0",
"Apache-2.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | odidev/scipy | 2347f38c562f18f863d84e907048fb8a61562ff2 | 0def9e108548b00263c79248df96f0fa1c0c0cd8 | refs/heads/master | 2022-01-24T13:54:00.539960 | 2022-01-06T13:16:38 | 2022-01-06T13:16:38 | 252,180,143 | 1 | 0 | BSD-3-Clause | 2020-04-01T13:16:17 | 2020-04-01T13:16:16 | null | UTF-8 | Python | false | false | 1,848 | py | """Compare the speed of exact one-norm calculation vs. its estimation.
"""
import numpy as np
try:
import scipy.sparse
import scipy.special # import cycle workaround for some versions
import scipy.sparse.linalg
except ImportError:
pass
from .common import Benchmark
class BenchmarkOneNormEst(Benchmark):
params = [
[2, 3, 5, 10, 30, 100, 300, 500, 1000, 1e4, 1e5, 1e6],
['exact', 'onenormest']
]
param_names = ['n', 'solver']
def setup(self, n, solver):
np.random.seed(1234)
nrepeats = 100
shape = (int(n), int(n))
if solver == 'exact' and n >= 300:
# skip: slow, and not useful to benchmark
raise NotImplementedError()
if n <= 1000:
# Sample the matrices.
self.matrices = []
for i in range(nrepeats):
M = np.random.randn(*shape)
self.matrices.append(M)
else:
max_nnz = 100000
nrepeats = 1
self.matrices = []
for i in range(nrepeats):
M = scipy.sparse.rand(shape[0], shape[1], min(max_nnz/(shape[0]*shape[1]), 1e-5))
self.matrices.append(M)
def time_onenormest(self, n, solver):
if solver == 'exact':
# Get the exact values of one-norms of squares.
for M in self.matrices:
M.dot(M)
scipy.sparse.linalg.matfuncs._onenorm(M)
elif solver == 'onenormest':
# Get the estimates of one-norms of squares.
for M in self.matrices:
scipy.sparse.linalg.matfuncs._onenormest_matrix_power(M, 2)
# Retain old benchmark results (remove this if changing the benchmark)
time_onenormest.version = "f7b31b4bf5caa50d435465e78dab6e133f3c263a52c4523eec785446185fdb6f"
| [
"pav@iki.fi"
] | pav@iki.fi |
67fdb530ec4acf4b959afd9117ae5dda82b668ec | 79cf58db2795344616cbebd7ee84e7623a2f1dd1 | /Image/img_to_features.py | 50b5796751200b616031eb47f02c5b0f9d9e1324 | [] | no_license | zpeng1989/Machine_Learing_ZP_2020 | 77f290000f00a8083026681059d2c905ce575774 | 63cadb672265b9a1fba13edd5c4a91f8e2a94910 | refs/heads/main | 2023-02-28T04:10:52.286020 | 2021-02-05T02:18:08 | 2021-02-05T02:18:08 | 319,541,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | import cv2
import numpy as np
from matplotlib import pyplot as plt
image = cv2.imread('plane.jpeg', cv2.IMREAD_COLOR)
image_10 = cv2.resize(image,(10,10))
print(image_10.flatten())
#plt.imshow(image_10, cmap='gray')
#plt.axis('off')
#plt.show()
channels = cv2.mean(image_10)
observation = np.array([(channels[2], channels[1], channels[0])])
print(observation)
features = []
colors = ['r', 'g', 'b']
for i, channel in enumerate(colors):
histogram = cv2.calcHist([image], [i], None, [256], [0,256])
features.extend(histogram)
observation = np.array(features).flatten()
print(observation)
| [
"592392714@qq.com"
] | 592392714@qq.com |
950dce45bcc59842e8f000b131db9780ed98bb5c | cfdf08cb9592760a1b676282b458e58779abb0dd | /PlaneGame/manage.py | 427eeb140bc89ac984861ea69e78866f852becc1 | [
"MIT"
] | permissive | webturing/Python3 | d830c22921fe1d1e6182e9481bcd0a930309f65c | 6fcf6ee6cc5afba0225bd56e6928b01882eb9ecb | refs/heads/master | 2021-06-12T13:13:44.421511 | 2020-06-28T11:56:01 | 2020-06-28T11:56:01 | 128,641,162 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from bin.main import main
if __name__ == '__main__':
"""
环境: python3 + pygame
running 起来就可以打飞机了O(∩_∩)O~.
"""
main()
"""
PlayPlane/
|-- bin/
| |-- main.py 程序运行主体程序
|-- config/
| |-- settings.py 程序配置(例如: 游戏背景音乐的加载等)
|-- material 程序素材放置(打飞机游戏素材放置)
|-- ...
|-- src/ 程序主体模块存放
| |-- __init__.py
| |-- bullet.py 我方飞机发射子弹实现代码存放
| |-- enemy.py 敌方飞机实现代码存放
| |-- plane.py 我方飞机实现代码存放
|-- manage.py 程序启动文件
|-- README.md
"""
| [
"zj@webturing.com"
] | zj@webturing.com |
0c3928fbbb1c31312df8cd2f0e2e59b5eb9f44ca | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/web/v20190801/list_static_site_build_function_app_settings.py | 76e8903e60e6d4e3bf71b53f3db33a36c9efa0e6 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,013 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ListStaticSiteBuildFunctionAppSettingsResult',
'AwaitableListStaticSiteBuildFunctionAppSettingsResult',
'list_static_site_build_function_app_settings',
]
@pulumi.output_type
class ListStaticSiteBuildFunctionAppSettingsResult:
"""
String dictionary resource.
"""
def __init__(__self__, id=None, kind=None, name=None, properties=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Mapping[str, str]:
"""
Settings.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableListStaticSiteBuildFunctionAppSettingsResult(ListStaticSiteBuildFunctionAppSettingsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListStaticSiteBuildFunctionAppSettingsResult(
id=self.id,
kind=self.kind,
name=self.name,
properties=self.properties,
type=self.type)
def list_static_site_build_function_app_settings(name: Optional[str] = None,
pr_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListStaticSiteBuildFunctionAppSettingsResult:
"""
String dictionary resource.
:param str name: Name of the static site.
:param str pr_id: The stage site identifier.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
__args__ = dict()
__args__['name'] = name
__args__['prId'] = pr_id
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:web/v20190801:listStaticSiteBuildFunctionAppSettings', __args__, opts=opts, typ=ListStaticSiteBuildFunctionAppSettingsResult).value
return AwaitableListStaticSiteBuildFunctionAppSettingsResult(
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
properties=__ret__.properties,
type=__ret__.type)
| [
"noreply@github.com"
] | MisinformedDNA.noreply@github.com |
c84bc2a5625c487a309556a47160aabac50d97c7 | 5e8a33d64c9c82f91d70b4641cb24978ff81aec5 | /mealpy/bio_based/IWO.py | 1492290eb6a9298d5353ebffc602b72e2abee160 | [
"MIT"
] | permissive | Alhassan20/mealpy | 495e77a557f102ab61daccfdcc0cfe5949b561bb | 7ed365c5c495ad1c1e066662c90159b3d5e9b8e3 | refs/heads/master | 2023-07-31T08:09:47.285307 | 2021-09-16T01:22:12 | 2021-09-16T01:22:12 | 406,805,947 | 0 | 0 | MIT | 2021-09-15T14:41:24 | 2021-09-15T14:41:23 | null | UTF-8 | Python | false | false | 6,063 | py | #!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 12:17, 18/03/2020 %
# %
# Email: nguyenthieu2102@gmail.com %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
#-------------------------------------------------------------------------------------------------------%
from numpy.random import uniform
from numpy import ceil
from copy import deepcopy
from mealpy.optimizer import Root
class BaseIWO(Root):
"""
My version of: weed colonization (IWO)
A novel numerical optimization algorithm inspired from weed colonization
Noted:
https://pdfs.semanticscholar.org/734c/66e3757620d3d4016410057ee92f72a9853d.pdf
"""
def __init__(self, obj_func=None, lb=None, ub=None, verbose=True, epoch=750, pop_size=100,
seeds=(2, 10), exponent=2, sigma=(0.5, 0.001), **kwargs):
super().__init__(obj_func, lb, ub, verbose, kwargs)
self.epoch = epoch
self.pop_size = pop_size
self.seeds = seeds # (Min, Max) Number of Seeds
self.exponent = exponent # Variance Reduction Exponent
self.sigma = sigma # (Initial, Final) Value of Standard Deviation
def train(self):
pop = [self.create_solution() for _ in range(self.pop_size)]
pop, g_best = self.get_sorted_pop_and_global_best_solution(pop, self.ID_FIT, self.ID_MIN_PROB)
fit_best = g_best[self.ID_FIT]
fit_worst = pop[self.ID_MAX_PROB][self.ID_FIT]
for epoch in range(self.epoch):
# Update Standard Deviation
sigma = ((self.epoch - epoch) / (self.epoch - 1)) ** self.exponent * (self.sigma[0] - self.sigma[1]) + self.sigma[1]
# Reproduction
pop_new = []
for item in pop:
ratio = (item[self.ID_FIT] - fit_worst) / (fit_best - fit_worst + self.EPSILON)
s = int(ceil(self.seeds[0] + (self.seeds[1] - self.seeds[0]) * ratio))
for j in range(s):
# Initialize Offspring and Generate Random Location
pos_new = item[self.ID_POS] + sigma * uniform(self.lb, self.ub)
pos_new = self.amend_position_faster(pos_new)
fit = self.get_fitness_position(pos_new)
pop_new.append([pos_new, fit])
# Re-calculate best train and worst train
pop = pop + pop_new
pop, g_best = self.update_sorted_population_and_global_best_solution(pop, self.ID_MIN_PROB, g_best)
pop = pop[:self.pop_size]
fit_worst = pop[self.ID_MAX_PROB][self.ID_FIT]
fit_best = pop[self.ID_MIN_PROB][self.ID_FIT]
self.loss_train.append(g_best[self.ID_FIT])
if self.verbose:
print("> Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
self.solution = g_best
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
class OriginalIWO(Root):
"""
Original version of: weed colonization (IWO)
A novel numerical optimization algorithm inspired from weed colonization
Link:
https://pdfs.semanticscholar.org/734c/66e3757620d3d4016410057ee92f72a9853d.pdf
"""
def __init__(self, obj_func=None, lb=None, ub=None, verbose=True, epoch=750, pop_size=100,
seeds=(2, 10), exponent=2, sigma=(0.5, 0.001), **kwargs):
super().__init__(obj_func, lb, ub, verbose, kwargs)
self.epoch = epoch
self.pop_size = pop_size
self.seeds = seeds # (Min, Max) Number of Seeds
self.exponent = exponent # Variance Reduction Exponent
self.sigma = sigma # (Initial, Final) Value of Standard Deviation
def train(self):
pop = [self.create_solution() for _ in range(self.pop_size)]
pop_sorted, g_best = self.get_sorted_pop_and_global_best_solution(pop, self.ID_FIT, self.ID_MIN_PROB)
cost_best = g_best[self.ID_FIT]
cost_worst = pop_sorted[self.ID_MAX_PROB][self.ID_FIT]
for epoch in range(self.epoch):
# Update Standard Deviation
sigma = ((self.epoch - epoch) / (self.epoch - 1)) ** self.exponent * (self.sigma[0] - self.sigma[1]) + self.sigma[1]
# Reproduction
pop_new = []
for item in pop:
ratio = (item[self.ID_FIT] - cost_worst) / (cost_best - cost_worst)
S = int(ceil(self.seeds[0] + (self.seeds[1] - self.seeds[0]) * ratio))
for j in range(S):
# Initialize Offspring and Generate Random Location
pos_new = item[self.ID_POS] + sigma * uniform(self.lb, self.ub)
pos_new = self.amend_position_faster(pos_new)
fit = self.get_fitness_position(pos_new)
pop_new.append([pos_new, fit])
# Merge Populations
pop = pop + pop_new
pop = sorted(pop, key=lambda temp: temp[self.ID_FIT])
pop = pop[:self.pop_size]
# Re-calculate best train and worst train
cost_worst = pop[self.ID_MAX_PROB][self.ID_FIT]
if cost_best > pop[self.ID_MIN_PROB][self.ID_FIT]:
g_best = deepcopy(pop[self.ID_MIN_PROB])
cost_best = g_best[self.ID_FIT]
self.loss_train.append(g_best[self.ID_FIT])
if self.verbose:
print("> Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
self.solution = g_best
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
| [
"nguyenthieu2102@gmail.com"
] | nguyenthieu2102@gmail.com |
b0152453501e9413c085c5e3d68b541fb358198b | 7ce2b2000cfefe8fbefc2271ebc7df2061c88194 | /CAIL2020/rlfl/utils.py | 4923804ca7c3f8f7275cdb4a9a8cc3047c173528 | [
"Apache-2.0"
] | permissive | generalzgd/CAIL | f06d79acf42ac2188938c02087f7d07b9b43095c | 57529e64ee2f602324a500ff9bed660ddcde10bb | refs/heads/master | 2023-01-24T01:14:05.382525 | 2020-11-20T03:40:47 | 2020-11-20T03:40:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,598 | py | """Utils tools.
Author: Yixu GAO yxgao19@fudan.edu.cn
"""
import logging
import os
from collections import OrderedDict
import torch
def get_path(path):
"""Create the path if it does not exist.
Args:
path: path to be used
Returns:
Existed path
"""
if not os.path.exists(path):
os.makedirs(path)
return path
def get_csv_logger(log_file_name,
title='',
log_format='%(message)s',
log_level=logging.INFO):
"""Get csv logger.
Args:
log_file_name: file name
title: first line in file
log_format: default: '%(message)s'
log_level: default: logging.INFO
Returns:
csv logger
"""
logger = logging.getLogger(log_file_name)
logger.setLevel(log_level)
file_handler = logging.FileHandler(log_file_name, 'w')
formatter = logging.Formatter(log_format)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.propagate = False
if title:
logger.info(title)
return logger
def load_torch_model(model, model_path):
"""Load state dict to model.
Args:
model: model to be loaded
model_path: state dict file path
Returns:
loaded model
"""
pretrained_model_dict = torch.load(model_path, map_location="cpu")
new_state_dict = OrderedDict()
for k, value in pretrained_model_dict.items():
# name = k[7:] # remove `module.`
new_state_dict[k] = value
model.load_state_dict(new_state_dict, strict=True)
return model
| [
"bangtech@sina.com"
] | bangtech@sina.com |
ee91d134d504e616ede6c544cdcee453df6d38dc | 9545652800884f0e54fe6595d8634c29ea4827a2 | /高级算法/leetCode_97_缺失的第一个正数.py | 3a17cc5a3fe47f855b2992c4e538bc986324c951 | [] | no_license | challeger/leetCode | 662d9f600a40fd8970568679656f6911a6fdfb05 | d75c35b6f8ab33c158de7fa977ab0b16dac4fc25 | refs/heads/master | 2023-01-13T07:34:42.464959 | 2020-11-13T02:40:31 | 2020-11-13T02:40:31 | 286,426,790 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,734 | py | """
day: 2020-08-25
url: https://leetcode-cn.com/leetbook/read/top-interview-questions-hard/xwkftg/
题目名: 缺失的第一个正数
题目描述: 给你一个未排序的整数数组,请你找出其中没有出现的最小正整数
算法的时间复杂度应该为O(n),并且只能使用常数级别的额外空间
示例:
输入: [3,4,-1,1]
输出: 2
输入: [7,8,9,11,12]
输出: 1
思路:
1. 哈希
我们可以将数组作为一个哈希表,它的索引+1为键,它的 是否为正数 为值
遍历哈希表,第一个值为正数的键,就是我们要找的数.
所以我们可以分为三步:
N = len(n)
1.将数组中所有的非正数 赋值N+1(N+1是必定不在正确的数组中的)
2.根据数组的值,将这个值-1对应的格子,标记为负数
3.遍历数组,若当前格子的值是正数,那么当前格子的索引+1就是我们缺失的数字
2. 置换
对于一个数组[3, 2, 4, 1, 6]
正确的未缺失数组应该是[1, 2, 3, 4, 5]
也就是,该数组的第i-1个元素,它的值应该是i
所以我们第一次遍历数组,如果这个格子的值是有效的索引
把这个格子的值与它应该在的格子交换值,因为交换后当前格子
的值可能还可以置换,所以我们应该继续交换该格子..
若当前格子的值等于它要交换的格子的值,说明出现了重复变量,那就
直接选择不置换
然后我们第二次遍历,若当前的d[i] != i+1,那么i+1就是我们的缺失的
正数
"""
from typing import List
class Solution:
def firstMissingPositive(self, nums: List[int]) -> int:
# n = len(nums)
# # 将所有非正数标记为N+1
# for i in range(n):
# if nums[i] <= 0:
# nums[i] = n + 1
# # 将nums中所有在[1, n]范围内的数作为索引
# # 将对应的格子标记为负数
# for i in range(n):
# num = abs(nums[i])
# if num <= n:
# nums[num-1] = -abs(nums[num-1])
# # 第一个正数的索引+1,就是第一个未出现的正数
# for i in range(n):
# if nums[i] > 0:
# break
# return i + 1
n = len(nums)
for i in range(n):
# 判断这个数是否是有效的索引,然后将数字放到它应该在的位置
while 0 < nums[i] <= n and nums[nums[i]-1] != nums[i]:
nums[nums[i]-1], nums[i] = nums[i], nums[nums[i]-1]
for i in range(n):
if nums[i] != i+1:
return i + 1
return n + 1
if __name__ == "__main__":
test = [1, 2, 2]
s = Solution()
print(s.firstMissingPositive(test))
| [
"799613500@qq.com"
] | 799613500@qq.com |
ebed68d8bce26e2dcea621972a7d517e79bd3cca | 77e39afb2752ad63bbb775b256e2ed48fc50fde6 | /contents/views.py | 3fabf597a4a2cf036545d8639135be17de7b01bf | [] | no_license | soohyun-lee/Docker-python | f6b08eab6c0eec3efe37ba74b751e69aef153d38 | f318fc2df24a6983469c64e6ac15fd7ab0e202c2 | refs/heads/master | 2023-04-22T17:50:01.603450 | 2021-05-12T08:08:05 | 2021-05-12T08:08:05 | 366,639,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,248 | py | from typing import List
from contents.models import Listup
import re, requests
import json
from django.shortcuts import render
from django.views import View
from django.http import JsonResponse
from .models import Listup
class Contents(View):
def post(self, request):
data = json.loads(request.body)
Listup.objects.create(
study_date = data['studya_date'],
title = data['title'],
study_contents = data['contents']
)
return JsonResponse({'message' : 'success'}, status=200)
def get(self, reuquest):
all_contents = Listup.object.all()
data = [{
'id' : content.id,
'date' : content.study_date,
'title' : content.title,
'study_contetns' : content.study_contents
}for content in all_contents]
return JsonResponse({'data' : data}, status=200)
def patch(self, request):
data = json.loads(request.body)
Listup.object.filter(id = data['id']).update(
'date' : data['data']
'title' : data['title']
'study_contetns' : data['study_contetns']
)
return JsonResponse({'message': 'success'}, status=200) | [
"soohyun527@gmail.com"
] | soohyun527@gmail.com |
9142155f37a0bf512415188dc621c7f8b1e37f45 | 1710a1785c45f4e9cf35486ee96d7d14d767c82a | /matrices.py | 3ae4ba36e94a4acc24ac67af1c1cb601a345af81 | [] | no_license | Th3Lourde/spaceGame | 2a73ae90dbb68d456fbbb23a3c244c1b740bc780 | 8e7c4e990d658fdc7f3dd21188d4493c09146fe5 | refs/heads/master | 2022-07-10T02:30:19.680893 | 2020-05-14T01:47:26 | 2020-05-14T01:47:26 | 263,761,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | import numpy
t = numpy.pi/36
u = 1
T = 0
Rotate = numpy.array([[numpy.cos(t),-numpy.sin(t),0], [numpy.sin(t), numpy.cos(t), 0],[0,0,1]])
Rotate_Neg = numpy.array([[numpy.cos(t),numpy.sin(t),0], [-numpy.sin(t), numpy.cos(t), 0],[0,0,1]])
Shift = numpy.array([[1,0, u*numpy.cos(T)], [0,1,u*numpy.sin(T)], [0,0,1]])
i = numpy.array([[0, 0],[0, 2],[1, 1]])
shift_towards = numpy.array([[1,0, i[0][0]],[0,1,i[1][0]], [0,0,1]])
shift_away = numpy.array([[1,0, i[0][0]],[0,1, i[1][0]], [0,0,1]])
i
def moveForwards(coord):
shift = numpy.array([[1,0, u*numpy.cos(T)], [0,1,u*numpy.sin(T)], [0,0,1]])
coord = shift@coord
return coord
def getPosition():
# (0,0), (0,2)
p1 = i[0][0]
pos = [(i[0][0] + i[1][0])/2, (i[0][1] + i[1][1])/2]
print("position is: {}".format(pos))
getPosition()
i = moveForwards(i)
print(i) | [
"th3sylvia.lourde@gmail.com"
] | th3sylvia.lourde@gmail.com |
12ea2600b5eb5ac6f1f983418ca1c299d535df57 | e8ab6a8108801dfedb694557626fd847651564e2 | /Dragon/python/dragon/__init__.py | 01bcb0e88bd4653deaa63c50b2dc5a69b6aac13b | [
"BSD-2-Clause"
] | permissive | Spark001/Dragon-1 | 87f722bcb0feaec7fad29d923c60681cf9584267 | 310bcb5f6d9a6623bb58ed3d1ad02d1f440da474 | refs/heads/master | 2020-03-09T22:52:08.171500 | 2018-04-01T10:13:50 | 2018-04-01T10:13:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 929 | py | # ------------------------------------------------------------
# Copyright (c) 2017-preseent, SeetaTech, Co.,Ltd.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# ------------------------------------------------------------
# config
from dragon.config import *
import dragon.config as config
# core
from dragon.core.tensor import Tensor
import dragon.core.workspace as workspace
# ops
from dragon.ops import *
# updaters
from dragon.updaters import *
# theano utilities
from dragon.vm.theano.compile.function import function as function
from dragon.vm.theano.tensor import grad as grad
# scope
from dragon.core.scope import TensorScope as name_scope
from dragon.core.scope import PhaseScope as phase_scope
from dragon.core.scope import DeviceScope as device_scope
| [
"ting.pan@seetatech.com"
] | ting.pan@seetatech.com |
7d1653be04f8139a72d11e847ec2123d319b7518 | 9d74cbd676e629f8acdc68a4bac3dea0a98b9776 | /yc213/825.py | 201024344db09ebb4a8336c0ddfbc2f1d28dbd79 | [
"MIT"
] | permissive | c-yan/yukicoder | 01166de35e8059eaa8e3587456bba52f35bd0e44 | dcfd89b0a03759156dcea8c2e61a7705543dc0d4 | refs/heads/master | 2022-03-20T06:50:48.225922 | 2022-02-25T15:48:50 | 2022-02-25T15:48:50 | 237,735,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | A, B, C = map(int, input().split())
if B == 0:
if C >= A or A == 0:
print('Impossible')
else:
print(A - C)
else:
if C >= A + B + 10:
print('Impossible')
elif C >= A + B:
print(A + B + 9 - C)
else:
while A >= 10 and (A - 10) + (B + 1) > C:
B += 1
A -= 10
if A + B - C <= A:
print(A + B - C)
else:
print((B - C) * 10 + A)
| [
"c-yan@users.noreply.github.com"
] | c-yan@users.noreply.github.com |
bf2719d65f4f33e7e5cd99ef493bae50a8b389f5 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p2DJ/New/R2/benchmark/startPyquil191.py | e48239dddba4fcb815b00f48785364fe8b897f0f | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,159 | py | # qubit number=2
# total number=12
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += X(1) # number=2
prog += H(1) # number=9
prog += CZ(0,1) # number=10
prog += H(1) # number=11
prog += CNOT(0,1) # number=7
prog += X(1) # number=8
prog += X(1) # number=5
prog += CNOT(0,1) # number=6
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('1q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil191.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
5c547bb0ff634b09ba993d4ef3fdca1aa94090cf | 195b8d12796872c05d539aa9283fc3f407b8d8b5 | /python-cinderclient/cinderclient/v2/__init__.py | 5408cd3bd67805693815186f0c080b13e78a9c61 | [
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | rvbelapure/openstack-nova-sched | afaa5928da3a8430b64bc23aedb251bae0e7d3ef | 325da0e08979d79b7470d7506ced1b4210e2b696 | refs/heads/master | 2021-01-17T05:28:44.474242 | 2013-04-20T21:18:35 | 2013-04-20T21:18:35 | 9,082,500 | 0 | 1 | null | 2021-09-07T08:33:18 | 2013-03-28T17:30:46 | Python | UTF-8 | Python | false | false | 679 | py | # Copyright (c) 2013 OpenStack, LLC.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient.v2.client import Client
| [
"owlpostarun@gmail.com"
] | owlpostarun@gmail.com |
d910418d27bae4339185576b152e789a949bc49d | 9b07e3fc9436f876a426bf5b02d07733de10d775 | /tests/bind_tests/boolean_tests/operation_tests/test_process_events.py | 44fd2d01a13dee2e67120d5b8ff44fef1ed96b03 | [
"MIT"
] | permissive | lycantropos/martinez | 019e859ec513cc7ad38901e22dff8e897615776c | 86db48324cb50ecb52be8ab2e4278a6d5cdd562b | refs/heads/master | 2021-07-10T04:19:23.372706 | 2020-11-28T00:58:47 | 2020-11-28T00:58:47 | 224,819,004 | 7 | 1 | MIT | 2020-12-20T15:47:17 | 2019-11-29T09:16:26 | Python | UTF-8 | Python | false | false | 521 | py | from typing import (List,
Tuple)
from hypothesis import given
from tests.bind_tests.hints import (BoundOperation,
BoundSweepEvent)
from . import strategies
@given(strategies.operations_with_events_lists)
def test_basic(operation_with_events: Tuple[BoundOperation,
List[BoundSweepEvent]]) -> None:
operation, events = operation_with_events
result = operation.process_events(events)
assert result is None
| [
"azatibrakov@gmail.com"
] | azatibrakov@gmail.com |
f6820bfb35ad7611866d9dcf33d2dc416122acd3 | 3206db2c6c3f9fd8f4fcd6d400b9e345a0cbd342 | /qtranscripter/mainwindow.py | 6654950b56550a900a911086bbec1eedfebbcd9f | [] | no_license | mugwort-rc/qtranscripter | dc27cca4a717545f519c4c1003cb0742bbc114f6 | aa355b37b81c6ce6975db6cd402708f6f00d1f0e | refs/heads/master | 2021-01-01T10:14:07.092822 | 2015-10-08T09:02:31 | 2015-10-08T09:02:31 | 31,541,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,762 | py | # -*- coding: utf-8 -*-
import os
import numpy as np
import scipy.io.wavfile
from PyQt4.Qt import *
from PyQt4.Qwt5 import *
from PyQt4.phonon import Phonon
from ui_mainwindow import Ui_MainWindow
from persondialog import PersonDialog
from models import TimePointModel
from utils import ClickRelease
from utils import TimePointHighlighter
CACHE_SIZE = 1000
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.persons = PersonDialog(self)
self.lastOpenDir = ""
self.WAVE_FILTER = self.tr('Wave (*.wav)')
self.initUi()
def initUi(self):
self.plot_width = 3.0 # display sec
self.cache = None
self.cache_width = None
self.cache_start = 0
self.dy = None
self.ui.qwtPlot.setAxisScale(QwtPlot.yLeft, -1, 1)
self.ui.qwtPlot.setAxisScale(QwtPlot.xBottom,
-self.plot_width,
self.plot_width)
self.c1 = QwtPlotCurve()
self.c2 = QwtPlotCurve()
self.c2.setPen(QColor(Qt.red))
self.c1.attach(self.ui.qwtPlot)
self.c2.attach(self.ui.qwtPlot)
self.obj = Phonon.MediaObject()
self.dev = Phonon.AudioOutput()
Phonon.createPath(self.obj, self.dev)
self.ui.seekSlider.setMediaObject(self.obj)
self.obj.finished.connect(self.media_finished)
self.obj.tick.connect(self.ticked)
# Time picker
self.picker = QwtPlotPicker(QwtPlot.xBottom,
QwtPlot.yLeft,
QwtPicker.PointSelection,
QwtPlotPicker.CrossRubberBand,
QwtPicker.AlwaysOn,
self.ui.qwtPlot.canvas())
self.picker.selected.connect(self.selected)
# hook click of seekslider
self.mouseRelease = ClickRelease()
self.ui.seekSlider.installEventFilter(self.mouseRelease)
self.mouseRelease.released.connect(self.released)
self.timePointModel = TimePointModel(self)
self.ui.listView.setModel(self.timePointModel)
self.highlighter = TimePointHighlighter(self.ui.textEdit.document())
self.ui.splitter.setStretchFactor(0, 1)
self.ui.splitter.setStretchFactor(1, 4)
self.timer = QTimer()
self.timer.timeout.connect(self.replot)
if os.name == 'posix':
self.time = QTime()
def closeEvent(self, event):
ret = QMessageBox.information(self, self.tr("Close"),
self.tr("Do you want to exit?"),
QMessageBox.Ok | QMessageBox.Cancel,
QMessageBox.Cancel)
if ret == QMessageBox.Cancel:
event.ignore()
else:
event.accept()
def getOpenFileName(self, filter, caption='', dir=None):
if dir is None:
dir = self.lastOpenDir
return QFileDialog.getOpenFileName(self, caption, dir, filter)
def getSaveFileName(self, filter, caption='', dir=None):
if dir is None:
dir = self.lastOpenDir
return QFileDialog.getSaveFileName(self, caption, dir, filter)
def setLastOpenDirectory(self, path):
info = QFileInfo(path)
if info.isDir():
self.lastOpenDir = path
else:
self.lastOpenDir = info.dir().path()
def plot(self, start):
start -= start % 2
cnt = int(start * (self.rate/1000.0))
self.ui.statusbar.showMessage(self.tr('%1 / %2')
.arg(cnt/1000.0)
.arg(self.nframe/1000.0))
wid = self.plot_width * self.rate
end = cnt+wid
bef = cnt-wid
self.c2.setData([cnt*self.interval]*2, [-1, 1])
dx = np.arange(max(bef, 0), min(end, self.nframe)) * self.interval
if self.cache is None or self.cache_start > cnt or (self.cache_start+self.cache_width) < end:
self.cache = self.wav[max(bef, 0):min(bef+self.cache_width, self.nframe)]
if isinstance(self.cache[0], np.ndarray):
ch = len(self.cache[0])
self.cache = self.cache.reshape(len(self.cache)*ch)[::ch]
self.cache = self.cache / 32768.0
self.cache_start = cnt
bef -= self.cache_start
end -= self.cache_start
dy = None
if bef < 0:
dy = self.cache[:end]
elif bef >= 0 and end < len(self.cache)-1:
dy = self.cache[bef:end]
else:
dy = self.cache[bef:]
self.c1.setData(dx, dy)
self.ui.qwtPlot.setAxisScale(QwtPlot.xBottom,
bef*self.interval,
end*self.interval)
self.ui.qwtPlot.replot()
def update_pause_stop(self):
self.ui.actionStartPause.setText(self.tr('Start'))
@pyqtSlot(QPointF)
def selected(self, point):
if self.obj.state() not in [Phonon.PlayingState,
Phonon.PausedState,
Phonon.BufferingState]:
return
time = point.x() * 1000
self.obj.seek(time)
self.plot(int(time))
@pyqtSlot()
def released(self):
if self.obj.state() not in [Phonon.PlayingState,
Phonon.PausedState,
Phonon.BufferingState]:
return
self.plot(self.obj.currentTime())
@pyqtSlot(int)
def ticked(self, time):
if os.name == 'posix':
self.time.restart()
@pyqtSlot()
def replot(self):
if self.obj.state() != Phonon.PlayingState:
return
if os.name == 'posix':
self.plot(self.obj.currentTime()+self.time.elapsed())
else:
self.plot(self.obj.currentTime())
@pyqtSlot()
def on_textEdit_textChanged(self):
reg = QRegExp(r'<(\d{2}:\d{2}:\d{2}(.\d+)?)>')
text = self.ui.textEdit.toPlainText()
points = []
index = reg.indexIn(text)
while index != -1:
points.append((reg.cap(1), index))
index = reg.indexIn(text, index+reg.matchedLength())
self.timePointModel.setPoints(points)
@pyqtSlot(QModelIndex)
def on_listView_doubleClicked(self, index):
data = index.data().toString()
value = index.data(Qt.UserRole)
assert not value.isNull()
text,pos = value.toPyObject()
cursor = self.ui.textEdit.textCursor()
cursor.setPosition(pos)
self.ui.textEdit.setTextCursor(cursor)
self.ui.textEdit.setFocus()
if self.obj.state() != Phonon.PausedState:
return
reg = QRegExp(r'(\d{2}):(\d{2}):(\d{2})(.(\d+))?')
if reg.exactMatch(data):
hour = int(reg.cap(1))
minute = int(reg.cap(2))
second = int(reg.cap(3))
milsec = 0
if reg.cap(4):
milsec = int(reg.cap(5))
time = hour * 3600000 + minute * 60000 + second * 1000 + milsec
time -= self.baseMsecs()
if time < 0:
time = 0
self.obj.seek(time)
self.plot(time)
@pyqtSlot()
def on_actionWaveOpen_triggered(self):
filepath = self.getOpenFileName(self.WAVE_FILTER)
if filepath.isEmpty():
return
self.setLastOpenDirectory(filepath)
filepath = unicode(filepath)
# get media
self.rate, self.wav = scipy.io.wavfile.read(filepath, mmap=True)
self.nframe = len(self.wav)
self.interval = 1.0 / self.rate
self.cache_width = self.rate * self.plot_width * CACHE_SIZE
self.setWindowTitle(self.tr("%1").arg(QFileInfo(filepath).fileName()))
# set media
self.obj.setCurrentSource(Phonon.MediaSource(filepath))
self.plot(0)
@pyqtSlot()
def on_actionStartPause_triggered(self):
if ( self.obj.state() == Phonon.StoppedState or
self.obj.state() == Phonon.PausedState ):
self.obj.play()
self.timer.start(16)
if os.name == 'posix':
self.time.start()
self.ui.actionStartPause.setText(self.tr('Pause'))
elif self.obj.state() == Phonon.PlayingState:
self.obj.pause()
self.timer.stop()
self.update_pause_stop()
elif self.obj.state() == Phonon.BufferingState:
pass
else:
print 'Unknown state:', self.obj.state()
@pyqtSlot()
def on_actionStop_triggered(self):
self.obj.stop()
self.update_pause_stop()
@pyqtSlot()
def media_finished(self):
self.update_pause_stop()
@pyqtSlot()
def on_actionInsertTimestamp_triggered(self):
time = self.obj.currentTime() + self.baseMsecs()
milsec = time % 1000
second = time / 1000 % 60
minute = time / 1000 / 60 % 60
hour = time / 1000 / 60 / 60
text = '<{:02d}:{:02d}:{:02d}.{}>'.format(hour, minute, second, milsec)
self.ui.textEdit.textCursor().insertText(text)
def baseMsecs(self):
return QTime(0, 0).msecsTo(self.ui.timeEdit.time())
@pyqtSlot()
def on_actionBack_triggered(self):
self.back(1000)
@pyqtSlot()
def on_actionForward_triggered(self):
self.forward(1000)
def back(self, back):
time = self.obj.currentTime() - back
self.obj.seek(time)
self.plot(time)
def forward(self, forward):
time = self.obj.currentTime() + forward
self.obj.seek(time)
self.plot(time)
@pyqtSlot()
def on_actionCursorLeft_triggered(self):
self.moveCursor(QTextCursor.Left)
@pyqtSlot()
def on_actionCursorRight_triggered(self):
self.moveCursor(QTextCursor.Right)
@pyqtSlot()
def on_actionCursorUp_triggered(self):
self.moveCursor(QTextCursor.Up)
@pyqtSlot()
def on_actionCursorDown_triggered(self):
self.moveCursor(QTextCursor.Down)
@pyqtSlot()
def on_actionSelectLeft_triggered(self):
self.moveCursor(QTextCursor.Left, QTextCursor.KeepAnchor)
@pyqtSlot()
def on_actionSelectRight_triggered(self):
self.moveCursor(QTextCursor.Right, QTextCursor.KeepAnchor)
@pyqtSlot()
def on_actionSelectUp_triggered(self):
self.moveCursor(QTextCursor.Up, QTextCursor.KeepAnchor)
@pyqtSlot()
def on_actionSelectDown_triggered(self):
self.moveCursor(QTextCursor.Down, QTextCursor.KeepAnchor)
def moveCursor(self, move, anchor=QTextCursor.MoveAnchor):
cursor = self.ui.textEdit.textCursor()
cursor.movePosition(move, anchor)
self.ui.textEdit.setTextCursor(cursor)
@pyqtSlot()
def on_actionTextCopy_triggered(self):
text = self.ui.textEdit.toPlainText()
QApplication.clipboard().setText(text)
@pyqtSlot()
def on_actionTextPaste_triggered(self):
text = QApplication.clipboard().text()
self.ui.textEdit.insertPlainText(text)
@pyqtSlot()
def on_actionInsertUnprintable_triggered(self):
unprintable = u"\u25a0"
self.ui.textEdit.insertPlainText(unprintable)
@pyqtSlot()
def on_actionInsertPerson_triggered(self):
ret = self.persons.exec_()
if ret != QDialog.Accepted:
return
person = self.persons.person()
self.ui.textEdit.insertPlainText(person)
| [
"mugwort.rc@gmail.com"
] | mugwort.rc@gmail.com |
ec67db41e7d10ffa59c7feac6008fe09eeab37f3 | b0f101d115d30b59e6ab3887e782e08e57d0fb52 | /cbow/main.py | fa1848f55df982f9208377e5cf6cca13a2dc004e | [] | no_license | trigrass2/torch_light | 9c15c75aeb290fb54b2f247b0a0daec428415414 | 17f54f50309c50749711d7483f1563756e8d8afd | refs/heads/master | 2021-05-07T18:55:07.337457 | 2017-10-29T06:29:26 | 2017-10-29T06:29:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,205 | py | import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
CONTEXT_SIZE = 2 # 2 words to the left, 2 to the right
EMBEDDING_DIM = 64
raw_text = """We are about to study the idea of a computational process.
Computational processes are abstract beings that inhabit computers.
As they evolve, processes manipulate other abstract things called data.
The evolution of a process is directed by a pattern of rules
called a program. People create programs to direct processes. In effect,
we conjure the spirits of the computer with our spells.""".split()
vocab = set(raw_text)
vocab_size = len(vocab)
word_to_ix = {word: i for i, word in enumerate(vocab)}
data = []
for i in range(2, len(raw_text) - 2):
context = [raw_text[i - 2], raw_text[i - 1],
raw_text[i + 1], raw_text[i + 2]]
target = raw_text[i]
data.append((context, target))
class CBOW(nn.Module):
def __init__(self, vocab_size, ebd_size, cont_size):
super(CBOW, self).__init__()
self.ebd = nn.Embedding(vocab_size, ebd_size)
self.ebd.weight.data.uniform_(-0.1, 0.1)
self.lr1 = nn.Linear(ebd_size*cont_size*2, 128)
self.lr2 = nn.Linear(128, vocab_size)
def forward(self, inputs):
out = self.ebd(inputs).view(1, -1)
out = F.relu(self.lr1(out))
out = self.lr2(out)
out = F.log_softmax(out)
return out
def make_context_vector(context, word_to_ix):
idxs = [word_to_ix[w] for w in context]
tensor = torch.LongTensor(idxs)
return autograd.Variable(tensor)
loss_function = nn.NLLLoss()
model = CBOW(vocab_size, EMBEDDING_DIM, CONTEXT_SIZE)
optimizer = optim.Adam(model.parameters(), lr=0.001)
for epoch in range(1, 41):
total_loss = 0.0
for context, target in data:
v_ctx = make_context_vector(context, word_to_ix)
v_tar = autograd.Variable(torch.LongTensor([word_to_ix[target]]))
model.zero_grad()
out = model(v_ctx)
loss = loss_function(out, v_tar)
total_loss += loss.data
loss.backward()
optimizer.step()
print("end of epoch {} | loss {:2.3f}".format(epoch, total_loss[0]))
| [
"422618856@qq.com"
] | 422618856@qq.com |
6c07e3d6f5128ce364ddac46fea5a3d9630b40c1 | 67a48a7a2db56247fdd84474efa35124565fd8b9 | /Codeforces/1559/1559c.py | 163287d73c0a3162fb31da550314ccaa6c48cae6 | [] | no_license | qazz625/Competitive-Programming-Codes | e3de31f9276f84e919a6017b2cf781c946809862 | e5df9cdc4714d78b7b6a7535ed7a45e07d3781c3 | refs/heads/master | 2022-08-30T07:57:55.172867 | 2022-08-10T08:02:07 | 2022-08-10T08:02:07 | 242,182,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | t = int(input())
for _ in range(t):
n = int(input())
arr = [int(j) for j in input().split()]
if arr[-1] == 0:
ans = [i for i in range(1, n+2)]
print(*ans)
continue
if arr[0] == 1:
ans = [n+1] + [i for i in range(1, n+1)]
print(*ans)
continue
ind = -1
for i in range(n-1):
if arr[i] == 0 and arr[i+1] == 1:
ind = i
break
# print(arr)
assert ind != -1
# print(ind)
ans = []
for i in range(ind+1):
ans += [i]
ans += [n]
for i in range(ind+1, n):
ans += [i]
for i in range(len(ans)):
ans[i] += 1
print(*ans) | [
"arun49804@gmail.com"
] | arun49804@gmail.com |
82d446eafb24c198fd5c76a9edf92a8e22cebb63 | 37cde98734ebe6cc99a390c8ae2f049346ffbe88 | /o_izuchenie_peredelka_5_borb s proigr_rab_mod_stavok_2.py | 64324c64cd0499c981c7faa769fbbd7c2432c8be | [] | no_license | Dimon0014/Zapis_chtenie_03_12 | 4d36efaf2667263e163cec1dd5d64c9c8f3f0422 | d32e9eeb968ff9f6c9339a8e33219f166f757687 | refs/heads/master | 2021-09-09T16:40:39.751513 | 2018-03-18T05:21:33 | 2018-03-18T05:21:33 | 113,576,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44,903 | py | import json
import random
from datetime import datetime, timedelta
from time import clock
# ------------- в начале обработка единичных символов
import math
def last_last_seen_steps_of_simv_01(dict, key): # альтернатива "last_next_seen_all_steps_1"
result = dict[key][0]
# print('функия next_seen_steps =',result)
return result
def dob_next_seen_1(dict, key, steps): # функция добавления/инициализация шагов с последнего появления
if (key) in dict: # проверка на наличие значений
last_seen = last_last_seen_steps_of_simv_01(dict, key)
# print('steps-last_time seen_in_key =', last_seen)
# print('печатает dict[key][1][0]', dict[key][1][0])
dict[key][1].append(last_seen)
dict[key][2] = len(dict[key][1]) # сколько раз уже выпадала
else: # инициализация
dict.update({(key): [0, [steps], 1, key, steps]}) # инициализация
# print('key in function =', key)
def add_step_to_all_1(dict):
for item in dict:
dict[item][0] = dict[item][0] + 1
dict[item][4] = dict[item][4] + 1
def more_of_1(dict):
spisok = []
a = 0
s = 0
spisok2 = []
for item in dict:
if dict[item][2] == 1:
if dict[item][4] > 100:
spisok.append(dict[item])
if len(spisok) > 0:
a = dict[item][3]
s = dict[item][4]
# print('spisok', spisok)
spisok2.append(a)
spisok2.append(s)
# print('spisok2', spisok2)
break
# print("номер", dict[item][3], "выпал", dict[item][2], "раз(а)--шаг", dict[item][4])
return spisok2
def podchet_simv(slist): # подсчет сколько раз встречаются символы в строке(списке)
d = dict()
for c in slist:
if c not in d:
d[c] = 1
else:
d[c] += 1
return d
# отсюда функции патернов-------------------------------
#
# сначала из двух цифр
# key1 -- значение выпавшего числа\\
# key1step(key,dictEd) - функция находит значение интервала для выпавшего числа в словаре dictEd \\
# key1step - значение интервала у выпавшего сейчас числа\\
# key2step - значение интервала с предыдущего шага\\
# key1step, key2step -- паттерн который нужно сравнить \\
# dictEd -- словарь одиночных символов, откуда нужно взять значение step(dict[key][0])\\
# listAll_inter -- список всех интервалов друг за другом -- сплошняком
# dict2Glob -- словарь интервалов двоек глобал\\
# dict2Lok -- словарь интервалов двоек локал\\
#
#
# def intervals_of_02(key1_step,key2step):
def key01step(key, dictEd):
result = dictEd[(key)][0]
dictEd[key][0] = 0 # после того как достанет обнуляет
# print('из функции достающей интервалы result',result )
# print('из функции достающей интервалы dictEd[(key)][0]', dictEd[(key)][0])
return result
# key01step = key01step(key,dictEd) # вычисляем когда последний раз был виден выпавший номер
def intervals_of_2(key2step, key1step, dict2Glob,
steps_sesia): # функция добавления интервалов как для глобал так и для локал
if (key2step, key1step) in dict2Glob: # проверка dict2Glob на наличие ключа, если нет то инициализация
last_seen = dict2Glob[(key2step, key1step)][0] # переменную последний раз видели загоняем в буфер
dict2Glob[(key2step, key1step)][0] = 0 # переменную последний раз видели обнуляем
dict2Glob[(key2step, key1step)][1].append(last_seen) # добавляем значение к списку последний раз видели
count = len(dict2Glob[(key2step, key1step)][1]) # переменную переменную раз видели загоняем в буфер
dict2Glob[(key2step, key1step)][2] = count
# dict2Glob[(key2step,key1step)][3] = steps_sesia # количество шагов в сесии- нужно для предсказанияkey1step # первый символ ключа
# dict2Glob[(key2step,key1step)][4] = key2step # первый символ ключа
# dict2Glob[(key2step,key1step)][5] = key1step # второй символ ключа
else:
dict2Glob.update({(key2step, key1step): [0, [1], 1, key2step, key1step, steps_sesia]}) # инициализация
def intervals_of_3(key3step, key2step, key1step, dict3Glob,
steps_sesia): # функция добавления интервалов как для глобал так и для локал
if (key3step, key2step, key1step) in dict3Glob: # проверка dict2Glob на пустоту, если пусто то инициализация
last_seen = dict3Glob[(key3step, key2step, key1step)][0] # переменную последний раз видели загоняем в буфер
dict3Glob[(key3step, key2step, key1step)][0] = 0 # переменную последний раз видели обнуляем
dict3Glob[(key3step, key2step, key1step)][1].append(
last_seen) # добавляем значение к списку последний раз видели
count = len(
dict3Glob[(key3step, key2step, key1step)][1]) # переменную переменную раз видели загоняем в буфер
dict3Glob[(key3step, key2step, key1step)][2] = count
# dict3Glob[(key3step, key2step,key1step)][3] = steps_sesia #количество шагов в сесии- нужно для предсказания
# dict3Glob[(key3step, key2step,key1step)][4] = key3step # первый символ ключа
# dict3Glob[(key3step, key2step,key1step)][5] = key2step # второй символ ключа
# dict3Glob[(key3step, key2step,key1step)][6] = key1step # третий символ ключа
print(' обновление словаря')
else:
dict3Glob.update({(key3step, key2step, key1step): [0, [1], 1, key3step, key2step, key1step,
steps_sesia]}) # инициализация
print(' создание словаря')
def intervals_of_all(key1step, listAll_inter): # список всех подряд интервалов
listAll_inter.append(key1step)
# функция добавляющая шаги к словарям
def add_step_to_all_intervals_of_2(dict_interv2, key2step, key1step): # а вот функция которая добовляет всем шаги
# key=(key2step, key1step)
for item in dict_interv2:
# if item != key:
dict_interv2[item][0] = dict_interv2[item][0] + 1
dict_interv2[item][3] = dict_interv2[item][3] + 1
print('dict_interv2[item][0]', dict_interv2[item][0])
print('dict_interv2[item][3]', dict_interv2[item][3])
# for item in dict_interv2:
# if item == key:
# dict[item][3] = dict[item][0] + 1
# def intervals_of_2(key1_step,key2step, dictEd, dict2,dict2lok,listAll_inter):
# # перебор всех значений словаря по ключу в другой функции, эта функция сравнивает и добавляет
#
# if len(dict2[(key1)][0]) != 0: # проверка на наличие значений, проверяется длинна словаря- умное решение
# # times_seen = len(dict[key][1])
# listAll_inter.append(0) # добавление 0-левого интервала в общий список
# # в принципе интервал равный нулю может быть только в начале, как и интервал [1,0], [2,1] в
# # общем первые значения интервалов в мусор
# listAll_inter.append(dict[key1][0]) # добавление первого значимого интервала в общий список
# key2step=key1
# dict2[(key1)][0]=1
# print('первая запись в списке интервалов', key1)
# else:
# listAll_inter.append(dict[key1][0])
# dict[(key)][0]
# for item in dict:
# if item == key1:
# dict2[item][0] = dict[item][0] + 1
# собственно тело программы начинается здесь----------------------------------------------------------------
# steps = 225 типа имитатор счетчика ходов
# значение словаря еденичных символо на текущий момент
# проверочный словарь d = {(36):[ 1,[1, 2], 33, 22,2],(35):[ 11,[101, 102], 31, 22,2],(34):[ 13,[103, 106], 71, 22,2]}
# типа число полученное от распознователя символов key = (35)
# востановление всех ходов
def postrocno(spisok, name):
i = 0
for item in spisok:
i = i + 1
print(steps, name, 'стока', i, item)
def stepsbig(interval, porog, steps_big):
steps_big
if interval < porog:
steps_big = steps_big + 1
return steps_big
def podchet_interv_odd(slovar):
obshie = 0
rezult = 0
for item in slovar:
if (slovar[item][3] % 2) != 0:
if (slovar[item][0]) < 1000:
rezult = obshie + slovar[item][2]
return rezult
def podchet_interv_iven(slovar):
obshie = 0
rezult = 0
for item in slovar:
if (slovar[item][3] % 2) == 0:
if (slovar[item][0]) < 1000:
rezult = obshie + slovar[item][2]
return rezult
def nahogd_big_interv(slovar):
rezult = 0
big = 0
for item in slovar:
if (slovar[item][0]) > big:
rezult = slovar[item][3]
slovar[item][0] = big
return rezult
def pre1_predskazatel_1(key, list_of200, steps_of_predscazan):
keys = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
29, 30, 31, 32, 33, 34, 35, 36]
list = list_of200
for item in keys:
if item == key:
list.append(key)
if len(list) > steps_of_predscazan:
list.pop(0)
return list
def pre2_predskazatel_1(list_of200):
keys = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
29, 30, 31, 32, 33, 34, 35, 36]
list = list_of200
list_par = []
for item in keys:
list_par.append([item, 0])
for it in list:
if it == item:
list_par[item][1] = list_par[item][1] + 1
return list_par
# for it in keys:
# for item in list:
# if item not in d:
# list_par.append([item])
# else:
# d[c] += 1
def pre3_predskazatel_1(list_sort):
list_sort.sort(key=lambda item: item[1])
list_sort.reverse()
# nolik = list_sort[0][0]
# odin = list_sort[1][0]
# dva = list_sort[2][0]
# #tri = list_sort[3][0]
# result =list_sort[0][0] # random.choice([nolik,odin,dva] )
if list_sort[0][1] > 1:
result = list_sort[0][0] # random.choice([nolik,odin,dva] )
else:
result = 99
return result
def pre3_predskazatel_1_all(list_sort):
list_sort.sort(key=lambda item: item[1])
list_sort.reverse()
# nolik = list_sort[0][0]
# odin = list_sort[1][0]
# dva = list_sort[2][0]
# #tri = list_sort[3][0]
result = list_sort # random.choice([nolik,odin,dva] )
return result
def proverka_predskaza_1(key, list_of_win_proverki, winer_1):
if key == list_of_win_proverki[2]:
list_of_win_proverki[
1] = 1 # включение происходит в двух случаях при выигрыше и при превышении количества 54 шагов
result = list_of_win_proverki # первое значение - количество шагоd
# второе значени флаг сброса продолжения проверки
# третье значение предсказаное число
else:
list_of_win_proverki[1] = 0
list_of_win_proverki[0] = list_of_win_proverki[
0] + 1 # так как else наступает и в случае (winer_1 == 99) - когда
# шагов нет, они прибавляются то ниже при (winer_1 == 99)
# эти шаги вычитаются
result = list_of_win_proverki
if winer_1 == 99:
list_of_win_proverki[0] = list_of_win_proverki[0] - 1
result = list_of_win_proverki
return result
def pre1_predskazatel_2(key, list_of200, steps_of_predscazan):
keys = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
28, 29, 30, 31, 32, 33, 34, 35, 36]
list = list_of200
for item in keys:
if item == key:
list.append(key)
if len(list) > steps_of_predscazan:
list.pop(0)
return list
def pre2_predskazatel_2(list_of200):
keys = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
28, 29, 30, 31, 32, 33, 34, 35, 36]
list = list_of200
list_par = []
for item in keys:
list_par.append([item, 0])
for it in list:
if it == item:
list_par[item][1] = list_par[item][1] + 1
return list_par
# for it in keys:
# for item in list:
# if item not in d:
# list_par.append([item])
# else:
# d[c] += 1
def pre3_predskazatel_2(list_sort):
list_sort.sort(key=lambda item: item[1])
list_sort.reverse()
# nolik = list_sort[0][0]
# odin = list_sort[1][0]
# dva = list_sort[2][0]
# #tri = list_sort[3][0]
result = list_sort[0][0] # random.choice([nolik,odin,dva] )
return result
def proverka_predskaza_2(key, list_of_win_proverki):
if key == list_of_win_proverki[2]:
list_of_win_proverki[1] = 1
result = list_of_win_proverki # первое значение - количество шагоd
# второе значени флаг сброса продолжения проверки
# третье значение предсказаное число
else:
list_of_win_proverki[1] = 0
list_of_win_proverki[0] = list_of_win_proverki[0] + 1
result = list_of_win_proverki
return result
def podchet_simv(slist): # подсчет сколько раз встречаются символы в строке(списке)
d = dict()
for c in slist:
if c not in d:
d[c] = 1
else:
d[c] += 1
return d
def podchet_balansa(spisok):
pribyl = 0
for item in spisok:
if (item > 0) and (item < 36):
pribyl = pribyl + 0.35 - (item*0.01)
# if (item < 55) and (item >36):
# pribyl = pribyl + ((72-36) - (item-36)*2)
if (item > 35) and (item < 54):
pribyl = pribyl + 0.35*2 - (item * 0.01)*2
if (item > 53) and (item < 66):
pribyl = pribyl + 0.35 * 3 - (item * 0.01) * 3
if (item > 65) and (item < 75):
pribyl = pribyl + 0.35 * 4 - (item * 0.01) * 4
if (item > 74) and (item < 82):
pribyl = pribyl + 0.35 * 5 - (item * 0.01) * 5
if (item > 81) and (item < 88):
pribyl = pribyl + 0.35 * 6 - (item * 0.01) * 6
if (item > 87) and (item < 93):
pribyl = pribyl + 0.35 * 7 - (item * 0.01) * 7
if (item > 92) and (item < 97):
pribyl = pribyl + 0.35 * 8 - (item * 0.01) * 8
if (item > 96) and (item < 101):
pribyl = pribyl + 0.35 * 9 - (item * 0.01) * 9
if (item > 100) and (item < 105):
pribyl = pribyl + 0.35 * 10 - (item * 0.01) * 10
if (item > 104) and (item < 108):
pribyl = pribyl + 0.35 * 11 - (item * 0.01) * 11
if (item > 107) and (item < 111):
pribyl = pribyl + 0.35 * 12 - (item * 0.01) * 12
if (item > 110) and (item < 114):
pribyl = pribyl + 0.35 * 13 - (item * 0.01) * 13
if (item > 113) and (item < 116):
pribyl = pribyl + 0.35 * 14 - (item * 0.01) * 14
if (item > 115) and (item < 119):
pribyl = pribyl + 0.35 * 15 - (item * 0.01) * 15
if item == 119:
pribyl = pribyl -5.54
return pribyl
rasnica2 = 0
ik = 0
vig = 0
prg = 0
chag = 0
nol = 0
real_pribyl = 0
pribyl2 = 0
neuch = ''
neuch2 = 0
i = 0
next_nol = 0
uchet_intervala = 0
for i in range(222, 1286): # while (ik < 1): # количество файлов
# ik = ik + 1
# file_obj = open('200cikl_ochh.txt', 'w')
# file_obj.close()
# file_obj = open('200cikl_ochh.txt', 'a')
# for i in range(200+1): # количество ходов в файле
# chislo = random.randint(0, 36) # генерируем число
# file_obj.write(str(chislo) + '\n')
#
# file_obj.close()
naime_file = str(i) + 'cikl_och.txt'
viborka = []
file_obj = open(naime_file)
data_list = file_obj.readlines()
for line in data_list:
viborka.append(int(line))
# объявление всех переменных-----------------------------------------------------------------------------------
dic_ed = {} # болванка под словарь едениц
# -----------------------------------------------------------------------------------
key = 0
steps_sesia = 1
key1 = key
steps = 0
# print("выборка",len(viborka))
chet = 0
nechet = 0
list_of200_1 = []
list_par_of200_1 = []
list_of_win200_1 = []
winer_1 = 0
supwiner = 0
ste_ps_1 = 0
steps_of_win_1 = 0
# list_of_win_proverki_1 = [0,0,0]
list_of_win_proverki_1 = [0, 0, 0, 0, 0, 0, 0, 1] # первая цифра- подсчет шагов до выигрыша,
# вторая - активное ли предсказание, третья предсказанное число, четвертое перескок,
# пятое прибыль, шестое убыль,
# седьмое номер игрового цикла.
# next_nol = viborka[-1]
# list_of_win_proverki_1[2] = next_nol
# if list_of_win_proverki_1[2]<36:
# list_of_win_proverki_1[2] = next_nol+1
# print('nachalo cikla--------------------------------------------------------------------------------')
# print('viborka[-1]',viborka[-1])
# print('list_of_win_proverki_1[3]', list_of_win_proverki_1[3])
steps_to_win_1 = 0
list_of_steps_toWin_1 = []
list_of_all_Win_1 = []
list_of_all_Win_1_and_steps = []
list_of_win_and_steps = []
list_of_winSteps_and_steps = []
urezanuy_spisok = []
best_chisla = []
chislo_stavok = 0
chislo_levyh_stavok = 0
razreshenie_na_stavku = False
sum_of_stavok = 0
sum_of_win = 0
now_name = datetime.now()
tme_name = now_name.strftime("%d,%m,%y %H.%M.%S")
name_of_log_stavok = 'stavki_' + '_data ' + tme_name + '.txt'
file_obj_log = open(name_of_log_stavok, 'a')
buffer_shagov = 0
otdel_podchet_stavok = 0
tecuchajaStavka = 99
kolichestvoVyigrashey = 0
razreshenie_na_stavku_2 = True
razmer_stavki = 0.01
stavka = 99
while (steps < len(viborka)):
key = viborka[steps]
key1 = key
steps = steps + 1
############################################################################################
# БЛОК ЕДЕНИЦЫ
############################################################################################
# print(steps, 'предсказано: ', list_of_win_proverki_1[2], 'выпало:', key1)
# print('шаги до выигрыша: ', list_of_win_proverki_1[0])
# print(' ''предсказ-Winner:',winer_1)
# print(steps,'list_of_win_proverki_1[0]', list_of_win_proverki_1[0])
# if list_of_win_proverki_1[0] > buffer_shagov:
# otdel_podchet_stavok = otdel_podchet_stavok+1
# #print('list_of_win_proverki_1[0]',list_of_win_proverki_1[0])
# buffer_shagov = list_of_win_proverki_1[0]
# if (key1 == tecuchajaStavka) and list_of_win_proverki_1[7] == 1:
# list_of_win_proverki_1[7] = list_of_win_proverki_1[7] + 1
# razreshenie_na_stavku = False
# if (key1 == tecuchajaStavka) and razreshenie_na_stavku:
# kolichestvoVyigrashey = kolichestvoVyigrashey + 1
# print(steps, ' vyigrysh n:', kolichestvoVyigrashey, '#', 'vyigrysh vypal na: ', key1,
# '# shag stavok:', list_of_win_proverki_1[0])
# if razmer_stavki == 0.01:
# sum_of_win = sum_of_win + 0.35
# list_of_win_proverki_1[7] = list_of_win_proverki_1[7] + 1
# razreshenie_na_stavku = False
# if razmer_stavki == 0.02:
# print(' AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAprib: 70, na shage: ', steps)
# sum_of_win = sum_of_win + 0.70
# list_of_win_proverki_1[7] = list_of_win_proverki_1[7] + 1
# razreshenie_na_stavku = False
# razmer_stavki = 0.01
##############################################################################################3
#AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
# if (item > 0) and (item < 36):
# pribyl = pribyl + 0.35 - (item * 0.01)
# # if (item < 55) and (item >36):
# # pribyl = pribyl + ((72-36) - (item-36)*2)
# if (item > 35) and (item < 54):
# pribyl = pribyl + 0.35 * 2 - (item * 0.01) * 2
#
# if (item > 53) and (item < 66):
# pribyl = pribyl + 0.35 * 3 - (item * 0.01) * 3
#
# if (item > 65) and (item < 75):
# pribyl = pribyl + 0.35 * 4 - (item * 0.01) * 4
# if (item > 74) and (item < 82):
# pribyl = pribyl + 0.35 * 5 - (item * 0.01) * 5
#
# if (item > 81) and (item < 88):
# pribyl = pribyl + 0.35 * 6 - (item * 0.01) * 6
# if (item > 87) and (item < 93):
# pribyl = pribyl + 0.35 * 7 - (item * 0.01) * 7
# if (item > 92) and (item < 97):
# pribyl = pribyl + 0.35 * 8 - (item * 0.01) * 8
# if (item > 96) and (item < 101):
# pribyl = pribyl + 0.35 * 9 - (item * 0.01) * 9
# if (item > 100) and (item < 105):
# pribyl = pribyl + 0.35 * 10 - (item * 0.01) * 10
#
# if (item > 104) and (item < 108):
# pribyl = pribyl + 0.35 * 11 - (item * 0.01) * 11
# if (item > 107) and (item < 111):
# pribyl = pribyl + 0.35 * 12 - (item * 0.01) * 12
#
# if (item > 110) and (item < 114):
# pribyl = pribyl + 0.35 * 13 - (item * 0.01) * 13
# if (item > 113) and (item < 116):
# pribyl = pribyl + 0.35 * 14 - (item * 0.01) * 14
# if (item > 115) and (item < 119):
# pribyl = pribyl + 0.35 * 15 - (item * 0.01) * 15
# if item == 119:
# pribyl = pribyl - 5.54
#AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
if winer_1 != 99: # если предсказание не на паузе
# if steps ==137:
# print('key1 ', key1, 'list_of_win_proverki_1[2] ', list_of_win_proverki_1[2])
# if steps == 155:
# print('key1 ', key1, 'list_of_win_proverki_1[2] ', list_of_win_proverki_1[2])
if list_of_win_proverki_1[2] == key1:
# if list_of_win_proverki_1[0]>0 and list_of_win_proverki_1[0]< 37:
###### #sum_of_win = sum_of_win + 35
file_obj_log.write(' step: ' + str(steps) + ' vyigrush na: ' + str(
list_of_win_proverki_1[2]) + ' na shage: ' + str(list_of_win_proverki_1[0]) + '\n')
buffer_shagov = 0
list_of_win_proverki_1 = proverka_predskaza_1(key1, list_of_win_proverki_1, winer_1)
# print('активность предсказа', list_of_win_proverki_1[1])
if list_of_win_proverki_1[1] == 1: # если предсказание активно то
# print('активность предсказа', list_of_win_proverki_1[1],' проскочил')
steps_to_win_1 = list_of_win_proverki_1[
0] # забираем в буферную переменную количество шагов до выигрыша
list_of_win_and_steps.append(list_of_win_proverki_1[2])
list_of_win_and_steps.append(steps)
list_of_all_Win_1.append(list_of_win_proverki_1[2])
# list_of_all_Win_1_and_steps.append(list_of_win_and_steps)
supwiner = list_of_win_proverki_1[2]
# if steps_to_win_1<34:
list_of_win_proverki_1[
2] = winer_1 # назначение нового числа предсказания _ назначение с опаздыванием на один шаг
# if steps_to_win_1 == 19:
# print('19-ka na shage: ', steps)
list_of_steps_toWin_1.append(steps_to_win_1)
list_of_winSteps_and_steps.append(steps_to_win_1)
list_of_winSteps_and_steps.append(steps)
list_of_win_proverki_1[0] = 1 # обнуляем количество шагов до выигрыша
if list_of_win_proverki_1[0] == 1:
buffer_shagov = 0
# if (steps > 150)and (list_of_win_proverki_1[0] == 1): #################################### если начинается новый цикл
# print('-------------------------------------------------------------------------------выходим из программы на шаге: ', steps, 'neuch',list_of_win_proverki_1[0])
# break
# print(dic_ed[(key)] )
if list_of_win_proverki_1[0] > 118:
# print('list_of_win_proverki_1[2]',list_of_win_proverki_1[2] )
list_of_steps_toWin_1.append(119)
stavka = 0.01
list_of_win_proverki_1[0] = 1
list_of_win_proverki_1[1] = 0
winer_1 = 99
# list_of_win_proverki_1[2] = winer_1
# list_of_win_proverki_1[2] = winer_1 # назначение нового числа предсказания _ назначение с опаздыванием на один шаг
list_of200_1 = pre1_predskazatel_1(key1, list_of200_1,
6) # шаг нахождения винера##############################################################№№№№№№№№№№№№№№№№№№№№№№№№№№№№№№№№№№№№№№№№№№
# if steps > 400:
list_par_of200_1 = pre2_predskazatel_1(list_of200_1)
# print('urezanuy_spisok',urezanuy_spisok)
winer_1 = pre3_predskazatel_1(list_par_of200_1)
######################### start BLOK Stavki
# print('list_of_win_proverki_1[0]',list_of_win_proverki_1[0],'buffer_shagov',buffer_shagov)
razreshenie_na_stavku = False
# if ((list_of_win_proverki_1[0]) == 1) and (steps>90):
# break
# if (list_of_win_proverki_1[0] > buffer_shagov) and (list_of_win_proverki_1[0] < 37) and (stavka != 2):
# # otdel_podchet_stavok = otdel_podchet_stavok + 1
# # if list_of_win_proverki_1[0] == 0:
# # stavka = 32
# # elif list_of_win_proverki_1[0] == 1:
# # stavka = 20
# # elif list_of_win_proverki_1[0] == 2:
# # stavka = 25
# # elif list_of_win_proverki_1[0] == 3:
# # stavka = 26
# # elif list_of_win_proverki_1[0] == 4:
# # stavka = 21
# # elif list_of_win_proverki_1[0] == 5:
# # stavka = 24
# # elif list_of_win_proverki_1[0] == 6:
# # stavka = 27
# # elif list_of_win_proverki_1[0] == 7:
# # stavka = 28
# # elif list_of_win_proverki_1[0] == 8:
# # stavka = 23
# # elif list_of_win_proverki_1[0] == 9:
# # stavka = 22
# # elif list_of_win_proverki_1[0] == 10:
# # stavka = 5
# # elif list_of_win_proverki_1[0] == 11:
# # stavka = 30
# # elif list_of_win_proverki_1[0] == 12:
# # stavka = 35
# # elif list_of_win_proverki_1[0] == 13:
# # stavka = 36
# # elif list_of_win_proverki_1[0] == 14:
# # stavka = 31
# # elif list_of_win_proverki_1[0] == 15:
# # stavka = 19
# # elif list_of_win_proverki_1[0] == 16:
# # stavka = 33
# # elif list_of_win_proverki_1[0] == 17:
# # stavka = 34
# # elif list_of_win_proverki_1[0] == 18:
# # stavka = 29
# # elif list_of_win_proverki_1[0] == 19:
# # stavka = 4
# # elif list_of_win_proverki_1[0] == 20:
# # stavka = 14
# # elif list_of_win_proverki_1[0] == 21:
# # stavka = 2
# # elif list_of_win_proverki_1[0] == 22:
# # stavka = 18
# # elif list_of_win_proverki_1[0] == 23:
# # stavka = 10
# # elif list_of_win_proverki_1[0] == 24:
# # stavka = 16
# # elif list_of_win_proverki_1[0] == 25:
# # stavka = 17
# # elif list_of_win_proverki_1[0] == 26:
# # stavka = 0
# # elif list_of_win_proverki_1[0] == 27:
# # stavka = 13
# # elif list_of_win_proverki_1[0] == 28:
# # stavka = 12
# # elif list_of_win_proverki_1[0] == 29:
# # stavka = 7
# # elif list_of_win_proverki_1[0] == 30:
# # stavka = 8
# # elif list_of_win_proverki_1[0] == 31:
# # stavka = 9
# # elif list_of_win_proverki_1[0] == 32:
# # stavka = 15
# # elif list_of_win_proverki_1[0] == 33:
# # stavka = 1
# # elif list_of_win_proverki_1[0] == 34:
# # stavka = 6
# # elif list_of_win_proverki_1[0] == 35:
# # stavka = 3
# # elif list_of_win_proverki_1[0] == 36:
# # stavka = 11
#
# # print(' ',list_of_win_proverki_1[0],'predskazanie STAVKA na shag: ',steps+1,'vypadet n: ',list_of_win_proverki_1[2])
# buffer_shagov = list_of_win_proverki_1[0]
#
# razreshenie_na_stavku = True
# if razreshenie_na_stavku:
#
# # tecuchajaStavka = stavka
# tecuchajaStavka = list_of_win_proverki_1[2]
# if list_of_win_proverki_1[7] != 1:
# chislo_stavok = chislo_stavok + 1
# sum_of_stavok = sum_of_stavok + 1
# razmer_stavki = 0.01
# else:
# razreshenie_na_stavku = False
# else:
# chislo_levyh_stavok = chislo_levyh_stavok + 1
# tecuchajaStavka = tecuchajaStavka
# razreshenie_na_stavku = False
# if (list_of_win_proverki_1[0] > buffer_shagov) and (list_of_win_proverki_1[0] < 37) and (stavka == 2):
# # otdel_podchet_stavok = otdel_podchet_stavok + 1
# # if list_of_win_proverki_1[0] == 0:
# # stavka = 32
# # elif list_of_win_proverki_1[0] == 1:
# # stavka = 20
# # elif list_of_win_proverki_1[0] == 2:
# # stavka = 25
# # elif list_of_win_proverki_1[0] == 3:
# # stavka = 26
# # elif list_of_win_proverki_1[0] == 4:
# # stavka = 21
# # elif list_of_win_proverki_1[0] == 5:
# # stavka = 24
# # elif list_of_win_proverki_1[0] == 6:
# # stavka = 27
# # elif list_of_win_proverki_1[0] == 7:
# # stavka = 28
# # elif list_of_win_proverki_1[0] == 8:
# # stavka = 23
# # elif list_of_win_proverki_1[0] == 9:
# # stavka = 22
# # elif list_of_win_proverki_1[0] == 10:
# # stavka = 5
# # elif list_of_win_proverki_1[0] == 11:
# # stavka = 30
# # elif list_of_win_proverki_1[0] == 12:
# # stavka = 35
# # elif list_of_win_proverki_1[0] == 13:
# # stavka = 36
# # elif list_of_win_proverki_1[0] == 14:
# # stavka = 31
# # elif list_of_win_proverki_1[0] == 15:
# # stavka = 19
# # elif list_of_win_proverki_1[0] == 16:
# # stavka = 33
# # elif list_of_win_proverki_1[0] == 17:
# # stavka = 34
# # elif list_of_win_proverki_1[0] == 18:
# # stavka = 29
# # elif list_of_win_proverki_1[0] == 19:
# # stavka = 4
# # elif list_of_win_proverki_1[0] == 20:
# # stavka = 14
# # elif list_of_win_proverki_1[0] == 21:
# # stavka = 2
# # elif list_of_win_proverki_1[0] == 22:
# # stavka = 18
# # elif list_of_win_proverki_1[0] == 23:
# # stavka = 10
# # elif list_of_win_proverki_1[0] == 24:
# # stavka = 16
# # elif list_of_win_proverki_1[0] == 25:
# # stavka = 17
# # elif list_of_win_proverki_1[0] == 26:
# # stavka = 0
# # elif list_of_win_proverki_1[0] == 27:
# # stavka = 13
# # elif list_of_win_proverki_1[0] == 28:
# # stavka = 12
# # elif list_of_win_proverki_1[0] == 29:
# # stavka = 7
# # elif list_of_win_proverki_1[0] == 30:
# # stavka = 8
# # elif list_of_win_proverki_1[0] == 31:
# # stavka = 9
# # elif list_of_win_proverki_1[0] == 32:
# # stavka = 15
# # elif list_of_win_proverki_1[0] == 33:
# # stavka = 1
# # elif list_of_win_proverki_1[0] == 34:
# # stavka = 6
# # elif list_of_win_proverki_1[0] == 35:
# # stavka = 3
# # elif list_of_win_proverki_1[0] == 36:
# # stavka = 11
#
# # print(' ',list_of_win_proverki_1[0],'predskazanie STAVKA na shag: ',steps+1,'vypadet n: ',list_of_win_proverki_1[2])
# buffer_shagov = list_of_win_proverki_1[0]
#
# razreshenie_na_stavku = True
# if razreshenie_na_stavku:
#
# # tecuchajaStavka = stavka
# tecuchajaStavka = list_of_win_proverki_1[2]
# if list_of_win_proverki_1[7] != 1:
# chislo_stavok = chislo_stavok + 2
# sum_of_stavok = sum_of_stavok + 2
# razmer_stavki = 0.02
# else:
# razreshenie_na_stavku = False
# else:
# chislo_levyh_stavok = chislo_levyh_stavok + 1
# tecuchajaStavka = tecuchajaStavka
# razreshenie_na_stavku = False
#
# if list_of_win_proverki_1[0] == 54:
# buffer_shagov = 0
# # razreshenie_na_stavku_2 =False глобальная не используется сейчас
# if steps > 750:
# break
# # list_of_win_proverki_1[2] =winer_1
# ######################### end BLOK Stavki
# # if winer_1 !=99:
# # # if list_of_win_proverki_1[0] > buffer_shagov:
# # # otdel_podchet_stavok = otdel_podchet_stavok + 1
# # # # print('list_of_win_proverki_1[0]',list_of_win_proverki_1[0])
# # # buffer_shagov = list_of_win_proverki_1[0]
# # if (list_of_win_proverki_1[0] > 0) and (list_of_win_proverki_1[0] < 55): # ошибка повторы тоже считаются
# # # if (list_of_win_proverki_1[0] > 0) and (list_of_win_proverki_1[0] < 37):
# # if (list_of_win_proverki_1[0] == 54):
# # chislo_stavok = 0
# # if (list_of_win_proverki_1[0] == 1):
# # chislo_stavok = 0
# # if list_of_win_proverki_1[2] == key1:
# # #sum_of_win = sum_of_win+35
# # chislo_stavok = 0
# #
# # #chislo_stavok = chislo_stavok + 1
# # #print(' ', chislo_stavok, 'stavka na:', list_of_win_proverki_1[2])
# # sum_of_stavok = sum_of_stavok+1
# # file_obj_log.write(
# # 'chislo stavok ' + str(sum_of_stavok) + '\n' + 'step: ' + str(steps) + ' stavka na: ' + str(
# # list_of_win_proverki_1[2])+ '\n')
# # else:
# # file_obj_log.write(' propusk iz za prevyshenie porog 54'+ '\n')
# # else:
# # print('')
# # print(' ', chislo_stavok, 'net stavka na:', list_of_win_proverki_1[2])
# else:
# file_obj_log.write(' propusk iz za winer_1 !=99' + '\n')
best_chisla = pre3_predskazatel_1_all(list_par_of200_1)
# if steps == 136:
# print('предсказание на 137:','list_of_win_proverki_1[2] :',list_of_win_proverki_1[2] ,'winer_1:', winer_1)
# print('количество шагов до выигрыша',list_of_win_proverki_1[0])
# print('winer_1 ',winer_1 )
# if winer_1 == 99:
# list_of200_1 = pre1_predskazatel_1(key1, list_of200_1,12) # шаг нахождения винера#####
# # if steps > 400:
# list_par_of200_1 = pre2_predskazatel_1(list_of200_1)
# winer_1 = pre3_predskazatel_1(list_par_of200_1)
# best_chisla = pre3_predskazatel_1_all(list_par_of200_1)
##################################### --- УЧЕТ ЕДЕНИЦ БЛОК НЕ ТРОГАЕМ --- ######################
################################################################################################
dob_next_seen_1(dic_ed, key, steps) # создание\ обновление словаря едениц ###############
interval = key01step(key1, dic_ed) # последний интервал выпавшего числа ##############
add_step_to_all_1(dic_ed) # добавление шагов всем еденицам #############################
################################################################################################
# проверочный - dictEd = {(36): [23, [1, 2], 33]
# print('последний интервал выпавшего числа:',interval) # проверка функции возращающей последний интервал выпавшего числа
print('111111111111111111111111111111111111111111111111111111111111111111111111111111111')
keyer = len(list_of200_1)
print(naime_file)
print(list_of_all_Win_1, 'list_of_all_Win_1')
print(list_of_win_and_steps, 'list_of_win_and_steps')
print(list_of_steps_toWin_1, 'list_of_steps_toWin_1')
print(list_of_winSteps_and_steps, 'list_of_winSteps_and_steps')
print('summa stavok', sum_of_stavok)
print('chislo_levyh_stavok', chislo_levyh_stavok)
# print('otdel_podchet_stavok', otdel_podchet_stavok)
bonus = sum_of_stavok - otdel_podchet_stavok ####???
# print('summa pribuli', sum_of_win)
print('kollichestvo vyigrushey', sum_of_win / 35)
# print('real pribul', sum_of_win - (sum_of_stavok))
# prybyl_rel = sum_of_win - (sum_of_stavok)
print('list_of_win_proverki_1[0]: ', list_of_win_proverki_1[0])
# if list_of_win_proverki_1[0] > 118:
# uchet_intervala = 5.39
# else:
# uchet_intervala = list_of_win_proverki_1[0]*0.01
print('posledniy interval iz shagov', uchet_intervala)
pribyl = podchet_balansa(list_of_steps_toWin_1) - uchet_intervala
pribyl_0 = podchet_balansa(list_of_steps_toWin_1)
print('pribyl: ', pribyl)
# print('pribyl_0: ', pribyl_0)
# # print('best: ',best_chisla )
# if (prybyl_rel < 0) and (pribyl < 0):
# print('raznica mejdu real_prib i prib', math.fabs(prybyl_rel) - math.fabs(pribyl))
# if (prybyl_rel > 0) and (pribyl > 0):
# print('raznica mejdu real_prib i prib', prybyl_rel - pribyl)
# if (prybyl_rel > 0) and (pribyl < 0):
# print('raznica mejdu real_prib i prib -', prybyl_rel - pribyl)
# if (prybyl_rel < 0) and (pribyl > 0):
# print('raznica mejdu real_prib i prib -', pribyl - prybyl_rel)
print('kolichestvoVyigrashey: ', kolichestvoVyigrashey)
bu = uchet_intervala
# neuch = str(bu)
neuch2 = neuch2 + bu
print('neuchtenka: ', bu)
print('---------------------------------------------------------------end', naime_file)
print('steps: ', steps)
print('-------------nachalo-nachlo-------------------------------------------------start next of', naime_file)
pribyl2 = pribyl2 + pribyl
# real_pribyl = real_pribyl + prybyl_rel
file_obj_log.close()
# print('2222222222222222222222222222222222222222222222222222222222222222222222222222222222')
# keyer = len(list_of200_2)
# print(list_par_of200_2[key])
# print(list_of_all_Win_2)
# print(list_of_steps_toWin_2)
# print('pribyl: ',podchet_balansa(list_of_steps_toWin_2))
# print(dic_ed[(supwiner)])
# print('chet: ', chet)
# print('nechet: ',nechet)
# print('nol:',nol)
# print('raznica_chet_nechet',chet-nechet)
# for i in range(37):
# print(i,': ',dic_ed[(i)])
# rasnica=vig-prg
# print("разница:",rasnica)
print('---------------------------------------')
print('pribyl2: ', pribyl2)
# print('real_pribyl_all: ', real_pribyl)
print('neuchtenka: ', neuch2) | [
"toropov0014@mail.ru"
] | toropov0014@mail.ru |
fdd396e48bbd6514054391792a4eec855608cd68 | eac22714038e840028cc5abb72bc750004626ebb | /mct_utilities/src/mct_utilities/iface_tools.py | ea4e2aa90833414c05934e0eca84b528708099fb | [
"Apache-2.0"
] | permissive | iorodeo/mct | 79b19f6dab9f6567452df7274d67245bf64b1801 | fa8b85f36533c9b1486ca4f6b0c40c3daa6f4e11 | refs/heads/master | 2022-11-11T18:03:18.178182 | 2014-08-20T19:21:27 | 2014-08-20T19:21:27 | 273,790,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | import netifaces
def get_ip_addr(iface):
"""
Returns the IP address for the given interface.
"""
ifaddresses = netifaces.ifaddresses(iface)
ip = ifaddresses[2][0]['addr']
return ip
| [
"will@iorodeo.com"
] | will@iorodeo.com |
77055629a54447ef734ce62f2f95dff7d4883e48 | 282ec49f8ce8aa176c24e4f13a8852c9b0752e4a | /educational/optimization-methods/constrained-optimization/test_suites.py | 55553d168f71c88dbc2351812c09262cac6095b8 | [] | no_license | montreal91/workshop | b118b9358094f91defdae1d11ff8a1553d67cee6 | 8c05e15417e99d7236744fe9f960f4d6b09e4e31 | refs/heads/master | 2023-05-22T00:26:09.170584 | 2023-01-28T12:41:08 | 2023-01-28T12:41:08 | 40,283,198 | 3 | 1 | null | 2023-05-01T20:19:11 | 2015-08-06T03:53:44 | C++ | UTF-8 | Python | false | false | 4,275 | py |
from utilities import Struct
from vector2 import Vector2
def Target1( vec ):
return vec.x ** 2 + vec.y ** 2
def DerTarget1( vec ):
return Vector2( 2 * vec.x, 2 * vec.y )
def Target2(vec):
return vec.x ** 2 + vec.y ** 3
def DerTarget2(vec):
return Vector2(2 * vec.x, 3 * vec.y ** 2)
def Target3(vec):
return (vec.x **2 + 6 * vec.y) ** 4 + 8 * vec.x * vec.y
def DerTarget3(vec):
x = ((vec.x ** 2 + 6 * vec.y) ** 3 ) * 8 * vec.x + 8 * vec.y
y = ((vec.x ** 2 + 6 * vec.y) ** 3 ) * 24 + 8 * vec.y
return Vector2(x, y)
def e11(vec):
return vec.x + 1.235
def de11(vec):
return Vector2(1, 0)
def e12(vec):
return vec.y - 1
def de12(vec):
return Vector2(0, 1)
def e13(vec):
return vec.x - 2 * vec.y ** 2 + 8 * vec.y
def de13(vec):
return Vector2(1, -4 * vec.y + 8)
def i21(vec):
return (vec.x + 2) ** 2 + (vec.y + 2) ** 2 - 4
def di21(vec):
return Vector2(2 * vec.x + 4, 2 * vec.y + 4)
def i22(vec):
return vec.x ** 2 + vec.y ** 2 - 1
def di22(vec):
return Vector2(2 * vec.x, 2 * vec.y)
def i31(vec):
return vec.x - 1
def di31(vec):
return Vector2(1, 0)
def i32(vec):
return - 3 * vec.x + vec.y - 6
def di32(vec):
return Vector2( -3, 1)
def i33(vec):
return vec.x - vec.y - 4
def di33(vec):
return Vector2(1, -1)
def i34(vec):
return -0.5 * vec.x + vec.y + 2
def di34(vec):
return Vector2(-0.5, 1)
def i35(vec):
return -vec.x - vec.y - 4
def di35(vec):
return Vector2(-1, -1)
def TargetBygfoot(vec):
return - vec.x * vec.y
def DerTargetBygfoot(vec):
return Vector2(-vec.y, -vec.x)
def i_bfoot1(vec):
return -vec.x
def di_bfoot1(vec):
return Vector2(-1, 0)
def i_bfoot2(vec):
return vec.x - 30000
def di_bfoot2(vec):
return Vector2(1, 0)
def i_bfoot3(vec):
return -vec.y
def di_bfoot3(vec):
return Vector2(0, -1)
def i_bfoot4(vec):
return vec.y - 30
def di_bfoot4(vec):
return Vector2(0, 1)
bf_suite1 = Struct()
bf_suite1.target_cb = TargetBygfoot
bf_suite1.d_target_cb = DerTargetBygfoot
bf_suite1.constraints_eq_l = []
bf_suite1.d_constraints_eq_l = []
bf_suite1.constraints_ineq_l = [i_bfoot1, i_bfoot2, i_bfoot3, i_bfoot4]
bf_suite1.d_constraints_ineq_l = [di_bfoot1, di_bfoot2, di_bfoot3, di_bfoot4]
bf_suite1.start_point = Vector2(50000, 100)
suite1 = Struct()
suite1.target_cb = Target1
suite1.d_target_cb = DerTarget1
suite1.constraints_eq_l = [e11]
suite1.d_constraints_eq_l = [de11]
suite1.constraints_ineq_l = []
suite1.d_constraints_ineq_l = []
suite1.start_point = Vector2(4, 3)
suite2 = Struct()
suite2.target_cb = Target1
suite2.d_target_cb = DerTarget1
suite2.constraints_eq_l = [e11, e12]
suite2.d_constraints_eq_l = [de11, de12]
suite2.constraints_ineq_l = []
suite2.d_constraints_ineq_l = []
suite2.start_point = Vector2(-4, 3)
suite3 = Struct()
suite3.target_cb = Target1
suite3.d_target_cb = DerTarget1
suite3.constraints_eq_l = [e13]
suite3.d_constraints_eq_l = [de13]
suite3.constraints_ineq_l = []
suite3.d_constraints_ineq_l = []
suite3.start_point = Vector2(3, -2)
suite4 = Struct()
suite4.target_cb = Target2
suite4.d_target_cb = DerTarget2
suite4.constraints_eq_l = []
suite4.d_constraints_eq_l = []
suite4.constraints_ineq_l = [i21]
suite4.d_constraints_ineq_l = [di21]
suite4.start_point = Vector2(4, -4)
suite5 = Struct()
suite5.target_cb = Target2
suite5.d_target_cb = DerTarget2
suite5.constraints_eq_l = []
suite5.d_constraints_eq_l = []
suite5.constraints_ineq_l = [i21, i22]
suite5.d_constraints_ineq_l = [di21, di22]
suite5.start_point = Vector2(4, 4)
suite6 = Struct()
suite6.target_cb = Target3
suite6.d_target_cb = DerTarget3
suite6.constraints_eq_l = []
suite6.d_constraints_eq_l = []
suite6.constraints_ineq_l = [i31, i32, i33, i34, i35]
suite6.d_constraints_ineq_l = [di31, di32, di33, di34, di35]
suite6.start_point = Vector2(-4, 4)
| [
"nefedov.alexander91@yandex.ru"
] | nefedov.alexander91@yandex.ru |
3807a3ce3a75852bdce9ac2d08c8f7b277905a1a | a3cc7286d4a319cb76f3a44a593c4a18e5ddc104 | /lib/googlecloudsdk/api_lib/app/api/appengine_domains_api_client.py | 9cdab7d0091b78e506f53797b868e7f40c86062c | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | jordanistan/Google-Cloud-SDK | f2c6bb7abc2f33b9dfaec5de792aa1be91154099 | 42b9d7914c36a30d1e4b84ae2925df7edeca9962 | refs/heads/master | 2023-09-01T01:24:53.495537 | 2023-08-22T01:12:23 | 2023-08-22T01:12:23 | 127,072,491 | 0 | 1 | NOASSERTION | 2023-08-22T01:12:24 | 2018-03-28T02:31:19 | Python | UTF-8 | Python | false | false | 8,334 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for creating a client to talk to the App Engine Admin API."""
from googlecloudsdk.api_lib.app import operations_util
from googlecloudsdk.api_lib.app.api import appengine_api_client_base as base
from googlecloudsdk.calliope import base as calliope_base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import resources
DOMAINS_VERSION_MAP = {
calliope_base.ReleaseTrack.GA: 'v1',
calliope_base.ReleaseTrack.ALPHA: 'v1alpha',
calliope_base.ReleaseTrack.BETA: 'v1beta'
}
def GetApiClientForTrack(release_track):
api_version = DOMAINS_VERSION_MAP[release_track]
if release_track == calliope_base.ReleaseTrack.GA:
return AppengineDomainsApiClient.GetApiClient(api_version)
else:
return AppengineDomainsApiBetaClient.GetApiClient(api_version)
class AppengineDomainsApiClient(base.AppengineApiClientBase):
"""Client used by gcloud to communicate with the App Engine API."""
def __init__(self, client):
base.AppengineApiClientBase.__init__(self, client)
self._registry = resources.REGISTRY.Clone()
# pylint: disable=protected-access
self._registry.RegisterApiByName('appengine', client._VERSION)
def CreateDomainMapping(self, domain, certificate_id):
"""Creates a domain mapping for the given application.
Args:
domain: str, the custom domain string.
certificate_id: str, a certificate id for the new domain.
Returns:
The created DomainMapping object.
"""
ssl = self.messages.SslSettings(certificateId=certificate_id)
domain_mapping = self.messages.DomainMapping(id=domain, sslSettings=ssl)
request = self.messages.AppengineAppsDomainMappingsCreateRequest(
parent=self._FormatApp(), domainMapping=domain_mapping)
operation = self.client.apps_domainMappings.Create(request)
return operations_util.WaitForOperation(self.client.apps_operations,
operation).response
def DeleteDomainMapping(self, domain):
"""Deletes a domain mapping for the given application.
Args:
domain: str, the domain to delete.
"""
request = self.messages.AppengineAppsDomainMappingsDeleteRequest(
name=self._FormatDomainMapping(domain))
operation = self.client.apps_domainMappings.Delete(request)
operations_util.WaitForOperation(self.client.apps_operations, operation)
def GetDomainMapping(self, domain):
"""Gets a domain mapping for the given application.
Args:
domain: str, the domain to retrieve.
Returns:
The retrieved DomainMapping object.
"""
request = self.messages.AppengineAppsDomainMappingsGetRequest(
name=self._FormatDomainMapping(domain))
return self.client.apps_domainMappings.Get(request)
def ListDomainMappings(self):
"""Lists all domain mappings for the given application.
Returns:
A list of DomainMapping objects.
"""
request = self.messages.AppengineAppsDomainMappingsListRequest(
parent=self._FormatApp())
response = self.client.apps_domainMappings.List(request)
return response.domainMappings
def UpdateDomainMapping(self, domain, certificate_id, no_certificate_id):
"""Updates a domain mapping for the given application.
Args:
domain: str, the custom domain string.
certificate_id: str, a certificate id for the domain.
no_certificate_id: bool, remove the certificate id from the domain.
Returns:
The updated DomainMapping object.
"""
mask_fields = []
if certificate_id or no_certificate_id:
mask_fields.append('sslSettings.certificateId')
ssl = self.messages.SslSettings(certificateId=certificate_id)
domain_mapping = self.messages.DomainMapping(id=domain, sslSettings=ssl)
if not mask_fields:
raise exceptions.MinimumArgumentException(
['--[no-]certificate-id'],
'Please specify at least one attribute to the domain-mapping update.')
request = self.messages.AppengineAppsDomainMappingsPatchRequest(
name=self._FormatDomainMapping(domain),
domainMapping=domain_mapping,
updateMask=','.join(mask_fields))
operation = self.client.apps_domainMappings.Patch(request)
return operations_util.WaitForOperation(self.client.apps_operations,
operation).response
def ListVerifiedDomains(self):
"""Lists all domains verified by the current user.
Returns:
A list of AuthorizedDomain objects.
"""
request = self.messages.AppengineAppsAuthorizedDomainsListRequest(
parent=self._FormatApp())
response = self.client.apps_authorizedDomains.List(request)
return response.domains
def _FormatDomainMapping(self, domain):
res = self._registry.Parse(
domain,
params={'appsId': self.project},
collection='appengine.apps.domainMappings')
return res.RelativeName()
class AppengineDomainsApiBetaClient(AppengineDomainsApiClient):
"""Client used by gcloud to communicate with the App Engine API."""
def CreateDomainMapping(self, domain, certificate_id, management_type):
"""Creates a domain mapping for the given application.
Args:
domain: str, the custom domain string.
certificate_id: str, a certificate id for the new domain.
management_type: SslSettings.SslManagementTypeValueValuesEnum,
AUTOMATIC or MANUAL certificate provisioning.
Returns:
The created DomainMapping object.
"""
ssl = self.messages.SslSettings(certificateId=certificate_id,
sslManagementType=management_type)
domain_mapping = self.messages.DomainMapping(id=domain, sslSettings=ssl)
request = self.messages.AppengineAppsDomainMappingsCreateRequest(
parent=self._FormatApp(),
domainMapping=domain_mapping)
operation = self.client.apps_domainMappings.Create(request)
return operations_util.WaitForOperation(self.client.apps_operations,
operation).response
def UpdateDomainMapping(self,
domain,
certificate_id,
no_certificate_id,
management_type):
"""Updates a domain mapping for the given application.
Args:
domain: str, the custom domain string.
certificate_id: str, a certificate id for the domain.
no_certificate_id: bool, remove the certificate id from the domain.
management_type: SslSettings.SslManagementTypeValueValuesEnum,
AUTOMATIC or MANUAL certificate provisioning.
Returns:
The updated DomainMapping object.
"""
mask_fields = []
if certificate_id or no_certificate_id:
mask_fields.append('sslSettings.certificateId')
if management_type:
mask_fields.append('sslSettings.sslManagementType')
ssl = self.messages.SslSettings(
certificateId=certificate_id, sslManagementType=management_type)
domain_mapping = self.messages.DomainMapping(id=domain, sslSettings=ssl)
if not mask_fields:
raise exceptions.MinimumArgumentException(
['--[no-]certificate-id', '--no_managed_certificate'],
'Please specify at least one attribute to the domain-mapping update.')
request = self.messages.AppengineAppsDomainMappingsPatchRequest(
name=self._FormatDomainMapping(domain),
domainMapping=domain_mapping,
updateMask=','.join(mask_fields))
operation = self.client.apps_domainMappings.Patch(request)
return operations_util.WaitForOperation(self.client.apps_operations,
operation).response
| [
"jordan.robison@gmail.com"
] | jordan.robison@gmail.com |
556d819f1e8715a82a6f1ec8b75bf6c9475578ed | ccd400e42d98c5c9be099e60fc8f7081951438b4 | /idb/grpc/tests/xctest_log_parser.py | 6842ac576f11b2b902c72705cd15b997cf1f7a16 | [
"MIT"
] | permissive | Unity-Technologies/idb | d0b986c1ac082f8f07fa04ed263fa85259130c4c | f02a51d8018f497e4975e943dc5e94a9d4785fdd | refs/heads/unity-main | 2023-07-17T13:15:31.637414 | 2021-12-23T11:07:15 | 2021-12-23T11:07:15 | 317,798,876 | 5 | 0 | MIT | 2021-12-27T16:01:11 | 2020-12-02T08:36:26 | Objective-C | UTF-8 | Python | false | false | 3,499 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
from unittest import TestCase
from idb.grpc.xctest_log_parser import XCTestLogParser
def _begin_test(class_name: str, method_name: str) -> str:
return json.dumps(
{"className": class_name, "methodName": method_name, "event": "begin-test"}
)
def _end_test(class_name: str, method_name: str) -> str:
return json.dumps(
{"className": class_name, "methodName": method_name, "event": "end-test"}
)
class XCTestLogParserTestCase(TestCase):
def test_ignores_line_missing_class_name(self) -> None:
parser = XCTestLogParser()
for line in [
"some line",
'{"event": "begin-test", "methodName": "MyTestMethod"}',
"abc",
_end_test("MyTestClass", "MyTestMethod"),
]:
parser.parse_streaming_log(line)
self.assertCountEqual({}, parser._logs)
def test_ignores_line_with_mismatched_types(self) -> None:
parser = XCTestLogParser()
for line in [
"some line",
'{"event": "begin-test", "className": "MyTestClass", "methodName": 42}',
"abc",
_end_test("MyTestClass", "MyTestMethod"),
]:
parser.parse_streaming_log(line)
self.assertCountEqual({}, parser._logs)
def test_ignores_line_that_is_too_long(self) -> None:
parser = XCTestLogParser()
method_name = "a" * 10_001
for line in [
_begin_test("MyTestClass", method_name),
"abc",
"def",
_end_test("MyTestClass", method_name),
]:
parser.parse_streaming_log(line)
self.assertCountEqual({}, parser._logs)
def test_ignores_log_lines_outside_test(self) -> None:
parser = XCTestLogParser()
for line in ["some line", '{"this line": "has json"}']:
parser.parse_streaming_log(line)
self.assertCountEqual({}, parser._logs)
def test_adds_lines_to_distinct_tests(self) -> None:
parser = XCTestLogParser()
for line in [
_begin_test("MyTestClass", "MyTestMethod"),
"abc",
"def",
_end_test("MyTestClass", "MyTestMethod"),
_begin_test("MyTestClass", "OtherMethod"),
"123",
"456",
_end_test("MyTestClass", "OtherMethod"),
]:
parser.parse_streaming_log(line)
self.assertListEqual(
parser.get_logs_for_test("MyTestClass", "MyTestMethod"), ["abc", "def"]
)
self.assertListEqual(
parser.get_logs_for_test("MyTestClass", "OtherMethod"), ["123", "456"]
)
def test_handles_mismatched_starts(self) -> None:
parser = XCTestLogParser()
for line in [
_begin_test("MyTestClass", "MyTestMethod"),
"abc",
"def",
_begin_test("MyTestClass", "OtherMethod"),
"123",
"456",
_end_test("MyTestClass", "OtherMethod"),
]:
parser.parse_streaming_log(line)
self.assertListEqual(
parser.get_logs_for_test("MyTestClass", "MyTestMethod"), ["abc", "def"]
)
self.assertListEqual(
parser.get_logs_for_test("MyTestClass", "OtherMethod"), ["123", "456"]
)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
1e8b004d0c84a1e74b561476b9d095ddbd2e22d9 | 9507ff9e9bca2ca8104369c9e25acd74d308e9b3 | /base_data_from_net/get_base_data.py | 18446831f88c2d3a3a0c490889e28e8464b49c00 | [] | no_license | yangkang411/python_tool | 03e483c7ec7e1e76284f93cf5b9086fdf98af826 | 713071a9fbabfabcbc3c16ce58d1382c410a7ea3 | refs/heads/master | 2023-03-17T16:14:03.332332 | 2020-09-10T02:37:05 | 2020-09-10T02:37:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,752 | py | import socket
from time import sleep
import base64
import _thread
import serial
import time
import datetime
import os
host = '47.116.1.17'
port = 2201
#mountPoint = 'RTKBASE'
mountPoint = 'WX02'
userAgent = 'NTRIP Aceinna CloudRTK 1.0'
username = 'yundong'
password = 'TEL8IOZTBJVVJ0IT'
com_port = 'com34'
def get_utc_day():
year = int(time.strftime("%Y"))
month = int(time.strftime("%m"))
day = int(time.strftime("%d"))
hour = int(time.strftime("%H"))
minute = int(time.strftime("%M"))
second = int(time.strftime("%S"))
local_time = datetime.datetime(year, month, day, hour, minute, second)
time_struct = time.mktime(local_time.timetuple())
utc_st = datetime.datetime.utcfromtimestamp(time_struct)
d1 = datetime.datetime(year, 1, 1)
utc_sub = utc_st - d1
utc_str = utc_sub.__str__()
utc_day_int = int(utc_str.split( )[0])
utc_day_str = str(utc_day_int + 1)
return utc_day_str
def mkdir(path):
path=path.strip()
path=path.rstrip("\\")
isExists=os.path.exists(path)
if not isExists:
os.makedirs(path)
print (path+' mkdir suc')
return True
else:
print ('mkdir exist')
return False
def rev_ntrip_data(client,is_log):
if is_log:
day = get_utc_day()
try:
mkdir(day)
except:
pass
file_time = time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime())
log_file = day + '/' + 'ntrip_' + file_time +'.bin'
fs = open(log_file,'wb')
while True:
rev_data = client.recv(1024)
if(len(rev_data) > 0):
#print('len = %s' % (len(rev_data)))
#port_handle.write(rev_data)
if is_log:
fs.write(rev_data)
#print(rev_data)
sleep(0.1)
def rev_uart_data(port_handle,client,is_log):
if is_log:
day = get_utc_day()
try:
mkdir(day)
except:
pass
file_time = time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime())
log_file = day + '/' + 'sta8100_' + file_time +'.log'
fs = open(log_file,'w')
while True:
data = port_handle.read_all()
if len(data) > 0 :
client.send(data)
print(str(data))
if is_log:
fs.write(str(data))
sleep(0.1)
def ntip_connect():
'''
uart_handle=serial.Serial(com_port,460800, timeout=1)
if(uart_handle.is_open):
print('wrong port')
'''
rtk_client = socket.socket()
rtk_client.connect((host, port))
auth = username + ':' + password
bytes_auth = auth.encode("utf-8")
authorization = base64.b64encode(bytes_auth)
#authorization = username + ':' + password
info = "GET /%s HTTP/1.0\r\nUser-Agent: %s\r\nAuthorization: Basic %s\r\n\r\n"%(mountPoint,userAgent,authorization.decode('utf-8'))
print ("info = %s" % info)
rtk_client.send(info.encode("utf8"))
rev_data = rtk_client.recv(1024)
if('ICY 200 OK' in str(rev_data)):
print ('connect ntrip suc start connect com')
_thread.start_new_thread(rev_ntrip_data,(rtk_client,1,))
#_thread.start_new_thread(rev_uart_data,(uart_handle,rtk_client,1,))
'''
try:
serial=serial.Serial(com_port,460800, timeout=1)
if(serial == NULL):
print('wrong port')
rtk_client = socket.socket()
rtk_client.connect((host, port))
auth = username + ':' + password
bytes_auth = auth.encode("utf-8")
authorization = base64.b64encode(bytes_auth)
#authorization = username + ':' + password
info = "GET /%s HTTP/1.0\r\nUser-Agent: %s\r\nAuthorization: Basic %s\r\n\r\n"%(mountPoint,userAgent,authorization.decode('utf-8'))
print (info)
rtk_client.send(info.encode("utf8"))
rev_data = rtk_client.recv(1024)
if('ICY 200 OK' in str(rev_data)):
print ('connect ntrip suc start connect com')
_thread.start_new_thread(rev_ntrip_data,(serial,rtk_client,0,))
_thread.start_new_thread(rev_uart_data,(serial,rtk_client,0,))
except:
print('error')
return
'''
if __name__ == '__main__':
ntip_connect()
while 1:
pass
| [
"41727862+geqian@users.noreply.github.com"
] | 41727862+geqian@users.noreply.github.com |
c8a2c709881af6d33928d6a76a1e18aa42103ef1 | 5000d053f7a8bd90d68d5580d2398e2b74259719 | /alerta/plugins/normalise.py | c6410e2032e29795d43a316d846b49a734ac5882 | [
"Apache-2.0"
] | permissive | alvsgithub/alerta | 8203d7632ff58e0ef5e2e9e64156ab797f322710 | 25f4257347d2135a259b37d5646da51514fc8e64 | refs/heads/master | 2021-01-18T08:12:49.658867 | 2014-10-18T22:15:18 | 2014-10-18T22:15:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py |
from alerta.plugins import PluginBase, RejectException
class NormaliseAlert(PluginBase):
def pre_receive(self, alert):
alert.text = '%s: %s' % (alert.severity.upper(), alert.text)
return alert
def post_receive(self, alert):
pass | [
"nick.satterly@guardian.co.uk"
] | nick.satterly@guardian.co.uk |
0dab4666a712bbc8aa7a63cfd615561f4fd6e6f5 | a36c6c96e21a61ac622d53b2f441adf70da8e465 | /httpx_http.py | 199982e6ef1b01d5eaa8c6e7ed0e28644460feac | [] | no_license | pen960223/ProxyTest | a7fff2e394cb7dc5bcea42e47bb4834f456432df | 003abe06cb4ebd77281b27886dbdf58bc3d11848 | refs/heads/master | 2023-06-17T10:59:11.931023 | 2021-07-10T14:10:48 | 2021-07-10T14:10:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | import httpx
proxy = '127.0.0.1:7890'
proxies = {
'http://': 'http://' + proxy,
'https://': 'http://' + proxy,
}
with httpx.Client(proxies=proxies) as client:
response = client.get('https://httpbin.org/get')
print(response.text)
| [
"cqc@cuiqingcai.com"
] | cqc@cuiqingcai.com |
1ba5e73d31ba4cce03b8f7156ce2da8d41f8b346 | 060ce17de7b5cdbd5f7064d1fceb4ded17a23649 | /fn_joe_sandbox_analysis/fn_joe_sandbox_analysis/__init__.py | 09b22a32edc9da84a9bffe6bfd13f979462e9f3f | [
"MIT"
] | permissive | ibmresilient/resilient-community-apps | 74bbd770062a22801cef585d4415c29cbb4d34e2 | 6878c78b94eeca407998a41ce8db2cc00f2b6758 | refs/heads/main | 2023-06-26T20:47:15.059297 | 2023-06-23T16:33:58 | 2023-06-23T16:33:58 | 101,410,006 | 81 | 107 | MIT | 2023-03-29T20:40:31 | 2017-08-25T14:07:33 | Python | UTF-8 | Python | false | false | 197 | py | # (c) Copyright IBM Corp. 2018. All Rights Reserved.
import pkg_resources
try:
__version__ = pkg_resources.get_distribution(__name__).version
except pkg_resources.DistributionNotFound:
pass | [
"shane.curtin@ie.ibm.com"
] | shane.curtin@ie.ibm.com |
1372764e6f68c43882fa2ccf7731bcd1330247bc | a8fc7cc0f64571ddcb4d78278f9aa166a74038e5 | /worlds/oribf/Options.py | ac6808aa8d34ab5aab58f13897fd300f02c407c1 | [
"MIT"
] | permissive | adampziegler/Archipelago | 8d10ad309e8e3e71fc008d692b3fb1efc6f6ce6a | 6b0b78d8e07d275aceead3f5a1890eac5f153bf0 | refs/heads/main | 2023-08-23T15:09:06.365113 | 2021-11-03T22:27:09 | 2021-11-03T22:27:09 | 396,093,506 | 0 | 0 | NOASSERTION | 2021-08-14T18:25:23 | 2021-08-14T18:25:23 | null | UTF-8 | Python | false | false | 249 | py | from .RulesData import location_rules
from Options import Toggle
options = {
"open" : Toggle,
"openworld": Toggle
}
for logic_set in location_rules:
if logic_set != "casual-core":
options[logic_set.replace("-", "_")] = Toggle
| [
"fabian.dill@web.de"
] | fabian.dill@web.de |
1a943889ad313bd6c151019a90fcd91d175507d1 | 93a12156f091858f6e48c7c85c84483742c221f7 | /programmers/heap_disk.py | 1d24814334908fa99d48aecc1c414bbdc5aaed55 | [] | no_license | dojinkimm/AlgorithmPractice | 74cd5c63f9973792f034fe019504b2160ad51e40 | 211857e7541faeeec2f21690eecc8c17d31ad142 | refs/heads/master | 2021-09-28T14:54:12.756098 | 2021-09-26T13:23:38 | 2021-09-26T13:23:38 | 205,849,589 | 2 | 1 | null | 2019-09-30T03:20:10 | 2019-09-02T12:12:24 | Python | UTF-8 | Python | false | false | 538 | py | # 디스크 컨트롤러 - Heap
from heapq import heappush, heappop
def solution(jobs):
time, end, q = 0, -1, []
answer = 0
cnt = 0
length = len(jobs)
while cnt < length:
for job in jobs:
if end < job[0] <= time:
answer += (time - job[0])
heappush(q, job[1])
if len(q) > 0:
answer += len(q) * q[0]
end = time
time += heappop(q)
cnt += 1
else:
time += 1
return (int(answer / length)) | [
"dojinkim119@gmail.com"
] | dojinkim119@gmail.com |
328acbeccc71240aef3b45402311a0e1884a6353 | 1779f1d096d4af6d3112e39ad36c63ff4b04ae00 | /utils/fluids/time_providers.py | a2eeadc833a15e904487727367793f266f11bbde | [] | no_license | 10erick-cpu/RL_Mixing_Colours | eb753f7b6b309cf54fe41f74f993dfada83d7867 | a11756d6988422929a452dc1e19ccf6b051f0832 | refs/heads/master | 2023-07-27T22:38:34.299006 | 2021-09-09T16:16:33 | 2021-09-09T16:16:33 | 398,141,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,874 | py | import datetime
import time
class TimeProvider(object):
def __init__(self):
self.env = None
def get_time(self):
raise NotImplementedError("base")
def advance_time_s(self, seconds, sleep_time=0.2, progress_callback=None):
done_time = self.get_time() + seconds
remain = done_time - self.get_time()
while remain > 0:
s_time = min(sleep_time, remain)
start_sleep = self.get_time()
self.sleep(s_time)
if progress_callback:
progress_callback(delta_time=self.get_time() - start_sleep)
remain = done_time - self.get_time()
def get_human_time_str(self):
return str(datetime.timedelta(seconds=self.get_time()))
def sleep(self, seconds):
raise NotImplementedError()
def set_parent_env(self, env):
self.env = env
class RealTime(TimeProvider):
def sleep(self, seconds):
time.sleep(seconds)
def get_time(self):
return time.time()
def advance_time_s(self, seconds, sleep_time=0.2, progress_callback=None):
if self.env.step_start:
diff = time.time() - self.env.step_start
rel_advance = seconds - diff
#print("Advance realtime: %f seconds" % rel_advance)
if rel_advance >= 0:
super(RealTime, self).advance_time_s(rel_advance, sleep_time, progress_callback)
else:
#print("Advance realtime: %d seconds" % seconds)
super(RealTime, self).advance_time_s(seconds, sleep_time, progress_callback)
class SimulatedTime(TimeProvider):
def sleep(self, seconds):
self.time += 1
#print("SimTime", self.get_time(), self.get_time() / 60 / 60, "hrs")
def __init__(self):
super().__init__()
self.time = 0
def get_time(self):
return self.time
| [
"leonelerick59@gmail.com"
] | leonelerick59@gmail.com |
a5f85460b290b63e1434daa2e7dc4a7d0178df63 | db56d3d14d85e2171c1d8be5564b227da544a056 | /Accounts/migrations/0002_auto_20170827_1753.py | 59f1f8ecd5adc531ef53b702ba5ef59775a871cc | [] | no_license | onkar27/BillDesk | cd039793f90916745399dfcbbef661b8cc42e9af | 0c0046c3fd54198b24042d52b3e206204560d02f | refs/heads/master | 2020-03-12T16:55:28.711387 | 2018-03-15T10:59:50 | 2018-03-15T10:59:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-27 12:23
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Accounts', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='Authority_Admin',
),
migrations.RemoveField(
model_name='profile',
name='Authority_Customer',
),
]
| [
"vvt5676@gmail.com"
] | vvt5676@gmail.com |
eaaaa0d8824cc670b4ad903fe69ed5ca32f2c0ab | c703b8ac3b5545857f6c95efa2d61eaf7a664021 | /iPERCore/tools/human_pose2d_estimators/openpose/dataset.py | 0bb0536d30befaa5bad1483400beaff9707f7918 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | iPERDance/iPERCore | d29681d229b3098b3517b1abf4f7ea65f579de73 | fcf9a18ffd66bf3fdd3eea4153a3bc4785131848 | refs/heads/main | 2023-07-30T15:04:15.835396 | 2023-04-12T14:21:23 | 2023-04-12T14:21:23 | 313,664,064 | 2,520 | 339 | Apache-2.0 | 2023-05-12T03:26:52 | 2020-11-17T15:36:25 | Python | UTF-8 | Python | false | false | 2,564 | py | # Copyright (c) 2020-2021 impersonator.org authors (Wen Liu and Zhixin Piao). All rights reserved.
import numpy as np
import math
import cv2
import os
def normalize(img, img_mean, img_scale):
img = np.array(img, dtype=np.float32)
img = (img - img_mean) * img_scale
return img
def pad_width(img, stride, pad_value, min_dims):
h, w, _ = img.shape
h = min(min_dims[0], h)
min_dims[0] = math.ceil(min_dims[0] / float(stride)) * stride
min_dims[1] = max(min_dims[1], w)
min_dims[1] = math.ceil(min_dims[1] / float(stride)) * stride
pad = []
pad.append(int(math.floor((min_dims[0] - h) / 2.0)))
pad.append(int(math.floor((min_dims[1] - w) / 2.0)))
pad.append(int(min_dims[0] - h - pad[0]))
pad.append(int(min_dims[1] - w - pad[1]))
padded_img = cv2.copyMakeBorder(img, pad[0], pad[2], pad[1], pad[3],
cv2.BORDER_CONSTANT, value=pad_value)
return padded_img, pad
def preprocess(img, net_input_height_size=368, stride=8,
pad_value=(0, 0, 0), img_mean=(128, 128, 128), img_scale=1/256):
"""
Args:
img:
Returns:
"""
height, width, _ = img.shape
scale = net_input_height_size / height
scaled_img = cv2.resize(img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
scaled_img = normalize(scaled_img, img_mean, img_scale)
min_dims = [net_input_height_size, max(scaled_img.shape[1], net_input_height_size)]
padded_img, pad = pad_width(scaled_img, stride, pad_value, min_dims)
outputs = {
"img": padded_img,
"pad": pad,
"scale": scale
}
return outputs
class ImageFolderDataset(object):
def __init__(self, root_dir, valid_names=None):
if valid_names is None:
img_names = os.listdir(root_dir)
img_names.sort()
else:
img_names = valid_names
self.root_dir = root_dir
self.img_names = img_names
self.file_paths = [os.path.join(root_dir, img_name) for img_name in img_names]
self.max_idx = len(img_names)
self.idx = 0
def __iter__(self):
self.idx = 0
return self
def __len__(self):
return self.max_idx
def __next__(self):
if self.idx == self.max_idx:
raise StopIteration
img = cv2.imread(self.file_paths[self.idx], cv2.IMREAD_COLOR)
if img.size == 0:
raise IOError('Image {} cannot be read'.format(self.file_paths[self.idx]))
self.idx = self.idx + 1
return img
| [
"liuwen@shanghaitech.edu.cn"
] | liuwen@shanghaitech.edu.cn |
b079035a6e58641ea9ea4e0a3a16fa016762411c | b705fef880e38b657bae00148b8f91d7e5816fc9 | /nets/net.py | a5e92dd209ba0ea658f3bac362f41882a6b654ce | [] | no_license | rscv5/tframe | 4d6b89ed5bcb2e7d4ded9d7af0f36cbe09431e63 | 5d694326482b3190a97c4ba8ea4c60e7bb6eb657 | refs/heads/master | 2022-11-21T22:47:35.641339 | 2020-07-28T02:56:13 | 2020-07-28T02:56:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,579 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tframe import console
from tframe.core import Function
from tframe import context
from tframe.layers.layer import Layer
from tframe.layers import Input
from tframe.utils import shape_string
import tframe.utils.format_string as fs
from tframe.utils.string_tools import merger
from tframe import pedia
from tframe import hub
from tframe.utils import stark
from tframe.core.decorators import with_graph_if_has
from tframe.core.slots import OutputSlot
class Net(Function):
"""Function which can packet sub-functions automatically when calling add
method"""
CASCADE = pedia.cascade
PROD = pedia.prod
SUM = pedia.sum
FORK = pedia.fork
CONCAT = pedia.concat
RECURRENT = 'RECURRENT'
def __init__(self, name, level=0, inter_type=pedia.cascade,
is_branch=False, **kwargs):
"""Instantiate Net, a name must be given
:param level: level 0 indicates the trunk
:param inter_type: \in {cascade, fork, sum, prod, concat}
"""
self.name = name
self._level = level
self._inter_type = inter_type
self.is_branch = is_branch
self.input_ = None
self._output_scale = None
self.children = []
self.branch_outputs = []
self.kwargs = kwargs
# Losses
self._extra_loss = None
# self._reg_loss = None
# Tensor extractor
self._tensor_extractors = []
self._output_slots = []
# region : Properties
@property
def var_list(self):
"""Should be used in with graph context"""
return [var for var in tf.trainable_variables()
if '{}'.format(self.name) == var.name.split('/')[self._level]]
@property
def weight_vars(self):
vars = []
for v in self.var_list:
assert isinstance(v, tf.Variable)
name = v.name.split('/')[-1]
if 'w' == name.lower()[0]: vars.append(v)
return vars
@property
def weight_list(self):
return [var for var in self.var_list if 'weights' in var.name]
@property
def params_num(self):
return stark.get_params_num(self.var_list, consider_prune=True)
@property
def group_name(self):
return self.name
@property
def last_function(self):
if len(self.children) == 0 or self._inter_type in (pedia.prod, pedia.sum):
return None
f = self.children[-1]
while isinstance(f, Net): f = f.last_function
return f
@property
def output_slots(self):
results = self._output_slots
for child in self.children:
if isinstance(child, Net): results += child.output_slots
assert isinstance(results, list)
return results
@property
def input_tensor(self):
if self.input_ is None: raise ValueError('!! Input not found')
return self.input_.place_holder
@property
def logits_tensor(self):
"""This property should be visited only by RNNs"""
tensors = list(context.logits_tensor_dict.values())
if len(tensors) == 0: return None
assert len(tensors) == 1
return tensors[0]
@property
def is_root(self):
return self._level == 0
@property
@with_graph_if_has
def structure_detail(self):
"""A list of structure strings with format
Layer (type) Output Shape Params #
Currently only work for sequential model
TODO: refactoring is needed
"""
from tframe.nets.rnet import RNet
from tframe.nets.customized_net import CustomizedNet
widths = [33, 24, 20]
indent = 3
rows = []
add_to_rows = lambda cols: rows.append(fs.table_row(cols, widths))
# Dense total will be used when model weights are pruned
total_params, dense_total = 0, 0
if self.is_root:
add_to_rows(['input', shape_string(self.input_.sample_shape), ''])
def get_num_string(num, dense_num):
if num == 0: num_str = ''
elif hub.prune_on or hub.etch_on:
num_str = '{} ({:.1f}%)'.format(num, 100.0 * num / dense_num)
else: num_str = str(num)
return num_str
for child in self.children:
if isinstance(child, Layer):
# Try to find variable in child
# TODO: to be fixed
# variables = [v for v in self.var_list if child.group_name in v.name]
variables = [
v for v in self.var_list
if child.group_name == v.name.split('/')[self._level + 1]]
num, dense_num = stark.get_params_num(variables, consider_prune=True)
# Generate a row
cols = [self._get_layer_string(child, True, True),
child.output_shape_str, get_num_string(num, dense_num)]
add_to_rows(cols)
elif isinstance(child, (RNet, CustomizedNet)):
num, dense_num = child.params_num
cols = [child.structure_string(), child.output_shape_str,
get_num_string(num, dense_num)]
add_to_rows(cols)
elif isinstance(child, Net):
_rows, num, dense_num = child.structure_detail
rows += _rows
else:
raise TypeError('!! unknown child type {}'.format(type(child)))
# Accumulate total_params and dense_total_params
total_params += num
dense_total += dense_num
# Check total params
assert total_params == sum([np.prod(v.shape) for v in self.var_list])
if self.is_root:
# Head
detail = ''
add_with_indent = lambda d, c: d + ' ' * indent + c + '\n'
width = sum(widths)
detail = add_with_indent(detail, '-' * width)
detail = add_with_indent(
detail, fs.table_row(['Layers', 'Output Shape', 'Params #'], widths))
detail = add_with_indent(detail, '=' * width)
# Content
for i, row in enumerate(rows):
if i > 0:
detail = add_with_indent(detail, '-' * width)
detail = add_with_indent(detail, row)
# Summary
detail = add_with_indent(detail, '=' * width)
detail = add_with_indent(
detail, 'Total params: {}'.format(
get_num_string(total_params, dense_total)))
detail += ' ' * indent + '-' * width
return detail, total_params, dense_total
else: return rows, total_params, dense_total
def _get_layer_string(self, f, scale, full_name=False):
assert isinstance(f, Layer)
return f.get_layer_string(scale, full_name)
def structure_string(self, detail=True, scale=True):
# Get functions to be added to structure string
assert isinstance(self.children, list)
fs = [f for f in self.children if isinstance(f, Net)
or detail or f.is_nucleus]
# Add input layer
result = ('' if self.input_ is None else 'input[{}] => '.format(
shape_string(self.input_.sample_shape)))
# Check interconnection type
next_net, next_layer = ' => ', ' -> '
if self._inter_type not in (pedia.cascade,
self.RECURRENT) or self.is_branch:
if self._inter_type in [pedia.sum, pedia.prod, pedia.concat]:
result += self._inter_type
if self.is_branch: result += 'branch'
else: next_layer, next_net = ', ', ', '
result += '('
# Add children
str_list, next_token = [], None
for f in fs:
if isinstance(f, Net):
if next_token is None: next_token = next_net
assert next_token == next_net
str_list.append(f.structure_string(detail, scale))
else:
assert isinstance(f, Layer)
if next_token is None: next_token = next_layer
assert next_token == next_layer
str_list.append(self._get_layer_string(f, scale))
str_list = merger(str_list)
result += next_token.join(str_list)
# Check is_branch flag
if self.is_branch:
result += ' -> output'
# Check interconnection type
if self._inter_type not in (pedia.cascade,
self.RECURRENT) or self.is_branch: result += ')'
# Add output scale
if self.is_root and not self._inter_type == pedia.fork:
result += ' => output[{}]'.format(self.output_shape_str)
# Return
return result
@property
def extra_loss(self):
"""When this property is accessed for the 1st time in model.build:
For RNN, self._extra_loss has already been calculated
For FNN, self._extra_loss is None, and needed to be calculated
"""
if self._extra_loss is None: self._extra_loss = self._get_extra_loss()
return self._extra_loss
@property
def layers(self):
"""A customized net is also took as layer"""
if len(self.children) == 0: return [self]
layers = []
for child in self.children:
if isinstance(child, Layer): layers.append(child)
else:
assert isinstance(child, Net)
layers += child.layers
return layers
# endregion : Properties
# region : Overrode Method
# TODO: modify with_logits mechanism
def _link(self, *inputs, **kwargs):
# region : Check inputs
if len(inputs) == 0 or inputs[0] is None:
input_ = self.input_() if self.input_ is not None else None
elif len(inputs) == 1: input_ = inputs[0]
else: raise SyntaxError('!! Too much inputs')
if input_ is not None and not isinstance(input_, tf.Tensor):
raise TypeError('!! input should be a Tensor')
# endregion : Check inputs
# Check children
assert isinstance(self.children, list)
# if len(self.children) == 0: raise ValueError('!! Net is empty')
pioneer = input_
output_list = []
output = None
# Link all functions in children
for f in self.children:
# Handle branches
if isinstance(f, Net) and f.is_branch:
self.branch_outputs.append(f(pioneer))
continue
# Call each child
output = f(pioneer)
if self.is_root and hub.export_activations:
context.monitor.register_tensor(
output, reduce_1st_dim=True, name='activation')
if self._inter_type == pedia.cascade: pioneer = output
else: output_list.append(output)
# Calculate output
if self._inter_type == self.FORK:
output = output_list
self.branch_outputs = output
elif self._inter_type == self.SUM:
output = tf.add_n(output_list)
elif self._inter_type == self.PROD:
output = output_list.pop()
for tensor in output_list: output *= tensor
elif self._inter_type == self.CONCAT:
output = tf.concat(output_list, axis=-1)
elif self._inter_type != self.CASCADE:
raise TypeError('!! Unknown net inter type {}'.format(self._inter_type))
# This will only happens when Net is empty
if output is None: output = input_
# Extract tensors to export
if self.is_root:
# Run customized extractor
for extractor in self._tensor_extractors:
assert callable(extractor)
extractor(self)
# Run build-in extractors
self.variable_extractor()
# Return
return output
# endregion : Overrode Methods
# region : Public Methods
def register_extractor(self, extractor):
"""Extractors will be used to extract tensors to export while linking"""
assert callable(extractor)
self._tensor_extractors.append(extractor)
def add_to_last_net(self, layer, only_cascade=False):
from tframe.nets.rnet import RNet
if len(self.children) == 0:
raise AssertionError('!! This net does not have children')
last_net = self.children[-1]
if isinstance(last_net, RNet) or (only_cascade and
last_net._inter_type != self.CASCADE):
last_net = self._add_new_subnet(layer)
assert isinstance(last_net, Net)
last_net.add(layer)
return last_net
def add_branch(self):
if not self.is_root: raise ValueError('Branches can only added to the root')
net = Net(name='branch', is_branch=True)
self.add(net)
return net
def add(self,
f=None,
inter_type=pedia.cascade,
return_net=False,
as_output=False,
output_name=None,
loss_identifier=None,
target_key=None,
loss_coef=1.0):
"""Add a net or a layer in to this model
:param f: \in (Net, Layer)
:param inter_type: inter-connection type
:return: f or f's container
"""
# If add an empty net
if f is None:
name = self._get_new_name(inter_type)
net = Net(name, level=self._level + 1, inter_type=inter_type)
self.children.append(net)
return net
# If add a function to this net
container = self
if isinstance(f, Input):
# If f is a placeholder
self.input_ = f
elif (isinstance(f, Net) or not self.is_root or
self._inter_type not in (self.CASCADE, self.RECURRENT)):
# Net should be added directly into self.children of any net
# Layer should be added directly into self.children for non-cascade nets
container = self._save_add(f)
elif isinstance(f, Layer):
# If layer is a nucleus or the 1st layer added into this Net
if f.is_nucleus or len(self.children) == 0:
self._add_new_subnet(f)
# Otherwise add this layer to last Net of self.children
container = self.add_to_last_net(f, only_cascade=True)
else: raise ValueError('!! Object added to a Net must be a Layer or a Net')
# Register output slot if necessary
def _handle_error_injection():
"""This is a compromise"""
if not as_output: return
assert self.is_root
self._output_slots.append(OutputSlot(
self, f, loss=loss_identifier, loss_coef=loss_coef, name=output_name,
target_key=target_key, last_only=False))
_handle_error_injection()
if return_net: return container
else: return f
# endregion : Public Methods
# region : Private Methods
def _save_add(self, f):
# TODO: avoid name scope conflict when add layers to non-cascade nets
name = self._get_new_name(f)
net = self
if isinstance(f, Layer): f.full_name = name
elif isinstance(f, Net):
f._level = self._level + 1
f.name = name
net = f
self.children.append(f)
return net
def _add_new_subnet(self, layer):
# Input f should be a layer
assert isinstance(layer, Layer)
# Specify the name of the Net
# if len(self.children) == 0: name = 'Preprocess'
if len(self.children) == 0 and not layer.is_nucleus: name = 'Preprocess'
else: name = self._get_new_name(layer.abbreviation)
# Wrap the layer into a new Net
return self.add(Net(name, level=self._level + 1), return_net=True)
def _get_new_name(self, entity):
if isinstance(entity, Net): name = entity.group_name
elif isinstance(entity, Layer): name = entity.full_name
else: name = entity
index = 1
get_name = lambda: '{}{}'.format(name, '' if index == 1 else index)
for f_ in self.children:
if isinstance(entity, Layer) and isinstance(f_, Layer):
if f_.full_name == get_name(): index += 1
elif f_.group_name == get_name(): index += 1
return get_name()
def _get_customized_loss(self, outer=False):
f = (context.customed_outer_loss_f_net if outer
else context.customed_loss_f_net)
if callable(f):
loss_list = f(self)
assert isinstance(loss_list, list)
return loss_list
else: return []
def _get_extra_loss(self):
loss_tensor_list = context.loss_tensor_list
assert isinstance(loss_tensor_list, list)
customized_loss = self._get_customized_loss()
if customized_loss:
loss_tensor_list += customized_loss
# Add regularizer losses
loss_tensor_list += tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
# Add loss tensor list
if loss_tensor_list:
result = tf.add_n(loss_tensor_list, 'extra_loss')
else: result = None
# Show loss list
if hub.show_extra_loss_info and loss_tensor_list:
console.show_info('Extra losses:')
for loss_tensor in loss_tensor_list:
assert isinstance(loss_tensor, tf.Tensor)
console.supplement(loss_tensor.name, level=2)
console.split()
return result
def _gen_injection_loss(self):
loss_tensors = []
for slot in self.output_slots:
assert isinstance(slot, OutputSlot)
# Do auto plug
loss_tensor = slot.auto_plug()
if loss_tensor is not None: loss_tensors.append(loss_tensor)
if not loss_tensors: return None
loss_tensor_sum = tf.add_n(loss_tensors, name='injection_loss')
console.show_status('{} loss injected'.format(len(loss_tensors)))
return loss_tensor_sum
# endregion: Private Methods
# region : Link tools
def _get_variable(self, name, shape, initializer=None):
if initializer is None:
initializer = getattr(
self, '_weight_initializer', tf.glorot_normal_initializer())
else:
assert callable(initializer)
return tf.get_variable(
name, shape, dtype=hub.dtype, initializer=initializer)
def _get_bias(self, name, dim, initializer=None):
if initializer is None:
initializer = getattr(self, '_bias_initializer', tf.zeros_initializer)
else:
assert callable(initializer)
return tf.get_variable(
name, shape=[dim], dtype=hub.dtype, initializer=initializer)
@staticmethod
def _get_shape_list(tensor):
assert isinstance(tensor, tf.Tensor)
return tensor.shape.as_list()
# endregion : Link tools
# region : Overrides
def __str__(self):
return self.structure_string()
def __repr__(self):
return self.structure_string()
# endregion : Overrides
# region : Build-in extractors
def variable_extractor(self):
""""""
get_key = lambda v: '/'.join(v.name.split('/')[1:])
def add_to_dict(v): context.variables_to_export[get_key(v)] = v
if hub.export_weights:
for v in self.weight_vars: add_to_dict(v)
# if hub.export_masked_weights and hub.pruning_rate_fc > 0:
if hub.export_masked_weights:
from tframe.operators.prune.pruner import Pruner
Pruner.extractor()
if hub.export_sparse_weights:
for v in context.sparse_weights_list:
assert isinstance(v, (tf.Tensor, tf.Variable))
# TODO: temporal solution to circumvent conflicts
if 'scan' in v.name.lower(): continue
add_to_dict(v)
# Register weights
self._register_weights_to_monitor()
def _register_weights_to_monitor(self):
"""<monitor_grad_step_01: register to monitor>"""
if not hub.monitor_weight_grads: return
monitor = context.monitor
# weights of type tf.Variable
monitor.register_weights(self.weight_vars)
# TODO: register masked_weights and sparse weights
# endregion : Build-in extractors
| [
"willi4m@zju.edu.cn"
] | willi4m@zju.edu.cn |
2d191c757142a720141d3a5d96c2126e29b3ef28 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2094/60737/239642.py | 0098c5af191cc1c3f4d216f01622a587b1ca1811 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | def is_number(s):
s.strip()
if len(s)<1:
return False
i = 0
numstr = list(s)
if (numstr[i] == '+') or (numstr[i] == '-'):
i += 1
pcount = 0
dcount = 0
while (numstr[i].isdigit() or numstr[i] == '.') and (i<len(numstr)-1):
if numstr[i] == '.':
pcount += 1
else:
dcount += 1
i += 1
if pcount>1 or dcount<1:
return False
if numstr[i] == 'e' and i<len(numstr):
dcount = 0
i += 1
if (numstr[i] == '+') or (numstr[i] == '-'):
i += 1
while numstr[i].isdigit() and i<len(numstr)-1:
dcount += 1
i += 1
if dcount<0:
return False
return numstr[i].isdigit()
if __name__ == "__main__":
s = input()
print(is_number(s))
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
adca9735678e580feace8d4d2e07ce4733269084 | 999879f8d18e041d7fa313132408b252aded47f8 | /01-codes/scipy-master/scipy/stats/mstats.py | 90fbe30640d5e1062c6ca2064baf7cdf5d159f3c | [
"MIT"
] | permissive | QPanProjects/Surrogate-Model | ebcaf05728e82dcbcd924c2edca1b490ab085173 | 848c7128201218b0819c9665e2cec72e3b1d29ac | refs/heads/master | 2022-10-11T19:03:55.224257 | 2020-06-09T14:37:35 | 2020-06-09T14:37:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,827 | py | """
===================================================================
Statistical functions for masked arrays (:mod:`scipy.stats.mstats`)
===================================================================
.. currentmodule:: scipy.stats.mstats
This module contains a large number of statistical functions that can
be used with masked arrays.
Most of these functions are similar to those in scipy.stats but might
have small differences in the API or in the algorithm used. Since this
is a relatively new package, some API changes are still possible.
.. autosummary::
:toctree: generated/
argstoarray
betai
chisquare
count_tied_groups
describe
f_oneway
f_value_wilks_lambda
find_repeats
friedmanchisquare
kendalltau
kendalltau_seasonal
kruskalwallis
ks_twosamp
kurtosis
kurtosistest
linregress
mannwhitneyu
plotting_positions
mode
moment
mquantiles
msign
normaltest
obrientransform
pearsonr
plotting_positions
pointbiserialr
rankdata
scoreatpercentile
sem
signaltonoise
skew
skewtest
spearmanr
theilslopes
threshold
tmax
tmean
tmin
trim
trima
trimboth
trimmed_stde
trimr
trimtail
tsem
ttest_onesamp
ttest_ind
ttest_onesamp
ttest_rel
tvar
variation
winsorize
zmap
zscore
compare_medians_ms
gmean
hdmedian
hdquantiles
hdquantiles_sd
hmean
idealfourths
kruskal
ks_2samp
median_cihs
meppf
mjci
mquantiles_cimj
rsh
sen_seasonal_slopes
trimmed_mean
trimmed_mean_ci
trimmed_std
trimmed_var
ttest_1samp
"""
from __future__ import division, print_function, absolute_import
# Functions that support masked array input in stats but need to be kept in the
# mstats namespace for backwards compatibility:
| [
"quanpan302@hotmail.com"
] | quanpan302@hotmail.com |
16ddc0aaeea6e0a6e5a683e93612337aec6b10bb | 691e4890a070b18feb74e1bf817af8ebe9db342b | /V-Scrack/exp/payload/hikvision_default_password.py | d3453024c65e669d0aa9cde7fc9fc71dc845938c | [] | no_license | witchfindertr/Python-crack | 764af457b140fad4f28f259dc14c444d2445b287 | 88659e72e98d4cec0df52d2f5b4b9c8af35a39a6 | refs/heads/master | 2023-02-23T12:45:11.475706 | 2020-03-03T14:41:05 | 2020-03-03T14:41:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,108 | py | # coding: utf-8
import sys
import requests
import warnings
def verify(protocol,ip,port):
url = protocol + '://' + ip + ':' + str(port)
warnings.filterwarnings("ignore")
print('testing if hikvision default password admin+12345 vul')
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"Authorization": "Basic YWRtaW46MTIzNDU="
}
payload = '/ISAPI/Security/userCheck'
vulnurl = url + payload
try:
req = requests.get(vulnurl, headers=headers, timeout=3, verify=False)
if r"<statusValue>200</statusValue>" in req.text:
msg = 'There is hikvision default password vul on url: ' + url + ' with password : admin:12345 .'
number = 'v108'
return True, url, number, msg
else:
pass
except Exception as e:
msg = str(e)
number = 'v0'
return False, url,number,msg
msg = 'There is no hikvision default password vul'
number = 'v0'
return False, url, number, msg
| [
"xianghgoog@gmail.com"
] | xianghgoog@gmail.com |
bb16ed90dc453ca90c077f239ce91588832f0a79 | 877eca8af51e43f7bd4c2766251152cd64846163 | /Disqus/templatetags/disqus.py | 560980b2a7aa0019a72f65a19d3842337ff305a0 | [] | no_license | dpitkevics/MyOpinion | 2a949aa5bf0c073b5b0cb564a1d7d827bd115375 | e81182c4a24d139501c82c430758e9e769b477b6 | refs/heads/master | 2020-12-24T17:18:09.334732 | 2015-05-05T14:16:47 | 2015-05-05T14:16:47 | 34,736,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,595 | py | from django.template import Library
from django.core.urlresolvers import reverse
from django.core.cache import get_cache
import time
from datetime import datetime
from Disqus import DisqusAPI, APIError
from MyOpinion import settings
register = Library()
cache = get_cache('default')
@register.assignment_tag()
def get_comments(link):
disqus = DisqusAPI(settings.DISQUS_SECRET_KEY, settings.DISQUS_PUBLIC_KEY)
thread_query = 'link:%s' % link
posts_list = disqus.get('threads.listPosts', forum=settings.DISQUS_FORUM_NAME, thread=thread_query, method='get')
return posts_list
@register.assignment_tag()
def get_comment_count(link):
key_format = 'disqus_comment_count_%s'
comment_count = cache.get(key_format % link)
if not comment_count:
try:
posts_list = get_comments(link)
comment_count = len(posts_list)
except APIError:
comment_count = 0
cache.set(key_format % link, comment_count, 300)
return comment_count
@register.assignment_tag()
def get_forum_url(request, slug):
full_url = 'http://%s%s' % (request.get_host(), reverse('Topics:view_opinion', kwargs={'slug': slug}))
return full_url
@register.assignment_tag()
def get_latest_action(link):
try:
posts_list = get_comments(link)
except APIError:
return None
try:
latest_post = posts_list[0]
except IndexError:
return None
time_struct = time.strptime(latest_post['createdAt'], '%Y-%m-%dT%H:%M:%S')
dt = datetime.fromtimestamp(time.mktime(time_struct))
return dt | [
"daniels.pitkevics@gmail.com"
] | daniels.pitkevics@gmail.com |
0d8eefeb2c32ce6934de64e362e2edf8bb7317d9 | 637d2b471ab26a683cf67b259c58d2f9318a1bf2 | /McUtils/Jupyter/MoleculeGraphics.py | a627532b572d6f532022293d8715d70baebc0b3b | [
"MIT"
] | permissive | McCoyGroup/McUtils | 9c1c3befcef88d6094961e23a894efb4d97c84b1 | c7c3910e7cb5105c65b01ecb17a6668d126b2063 | refs/heads/master | 2023-08-18T16:24:17.718849 | 2023-08-11T23:10:21 | 2023-08-11T23:10:21 | 188,920,933 | 0 | 2 | MIT | 2022-12-15T19:17:02 | 2019-05-27T23:29:24 | Python | UTF-8 | Python | false | false | 9,179 | py |
import numpy as np
__all__ = [
"MoleculeGraphics"
]
class MoleculeGraphics:
def __init__(self,
atoms,
coords,
bonds=None,
displacements=None,
displacement_range=(-1, 1),
displacement_steps=5,
name='Molecule', # 'My special little molecule I love it and do not hate secretely hate it',
program='Python', # 'WTF-who-keeps-this-info-these-days',
comment="", # "I have nothing to say to you",
metadata=None,
**params):
self.obj = self._load_nglview()(
atoms,
coords,
bonds=bonds,
displacements=displacements,
displacement_range=displacement_range,
displacement_steps=displacement_steps,
name=name, # 'My special little molecule I love it and do not hate secretely hate it',
program=program, # 'WTF-who-keeps-this-info-these-days',
comment=comment, # "I have nothing to say to you",
metadata=metadata,
**params
)
self._widg = None
def to_widget(self):
if self._widg is None:
self._widg = self.obj.show()
return self._widg
def show(self):
return self.to_widget()
def _ipython_display_(self):
from .JHTML import JupyterAPIs
JupyterAPIs().display_api.display(self.show())
@classmethod
def _load_nglview(cls):
try:
import nglview
except ImportError:
class MoleculeGraphics:
def __init__(self, *args, **kwargs):
raise ImportError("{} requires `nglview`".format(type(self).__name__))
else:
class MoleculeGraphics(nglview.Structure, nglview.Trajectory):
misc_useless_structural_data_header = " 0 0 0 0 0 0 0999 V2000"
def __init__(self,
atoms,
coords,
bonds=None,
displacements=None,
displacement_range=(-1, 1),
displacement_steps=5,
name='Molecule', # 'My special little molecule I love it and do not hate secretely hate it',
program='Python', # 'WTF-who-keeps-this-info-these-days',
comment="", # "I have nothing to say to you",
metadata=None,
**params
):
super().__init__()
self.ext = "sdf"
self.params = params
self.atoms = atoms
self.coords = np.asanyarray(coords)
self.bonds = [] if bonds is None else bonds
self.name = name
self.program = program
self.comment = comment
self.meta = metadata
self.dips = displacements
if displacements is None:
self.scales = [0.]
else:
base_scales = np.linspace(*displacement_range, displacement_steps)
self.scales = np.concatenate([
base_scales,
base_scales[-2:0:-1]
])
def convert_header(self, comment=None):
return "\n".join([
self.name,
" " + self.program,
" " + self.comment + ("" if comment is None else comment)
])
def convert_counts_line(self):
return "{:>3.0f}{:>3.0f} {}".format(len(self.atoms), len(self.bonds),
self.misc_useless_structural_data_header)
def convert_coordinate_block(self, coords):
return "\n".join(
" {0[0]:>9.5f} {0[1]:>9.5f} {0[2]:>9.5f} {1:<3} 0 0 0 0 0 0 0 0 0 0 0 0".format(
crd,
at
) for crd, at in zip(coords, self.atoms)
)
def convert_bond_block(self):
return "\n".join(
"{:>3.0f}{:>3.0f}{:>3.0f} 0 0 0 0".format(
b[0] + 1,
b[1] + 1,
b[2] if len(b) > 2 else 1
) for b in self.bonds
)
def get_single_structure_string(self, coords, comment=None):
return """
{header}
{counts}
{atoms}
{bonds}
M END
{meta}
$$$$
""".format(
header=self.convert_header(comment=comment),
counts=self.convert_counts_line(),
atoms=self.convert_coordinate_block(coords),
bonds=self.convert_bond_block(),
meta="" if self.meta is None else self.meta
).strip()
def get_coordinates(self, index):
if self.coords.ndim == 3:
return self.coords[index]
else:
if self.dips is None and not isinstance(index, (int, np.integer)) and not index == 0:
raise ValueError("no Cartesian displacements passed...")
scales = self.scales[index]
arr = self.coords + self.dips * scales
return arr
def get_substructure(self, idx):
import copy
new = copy.copy(self)
new.coords = self.get_coordinates(idx)
new.dips = None
new.scales = [0.]
new.comment += "<Structure {}>".format(idx)
return new
def __getitem__(self, idx):
return self.get_substructure(idx)
def __iter__(self):
for i in range(self.n_frames):
yield self.get_substructure(i)
def get_structure_string(self):
if self.dips is None and self.coords.ndim == 2:
return self.get_single_structure_string(self.coords)
else:
return "\n".join(
self.get_single_structure_string(self.get_coordinates(i)) for i in range(self.n_frames)
)
@property
def n_frames(self):
if self.coords.ndim == 2:
return len(self.scales)
else:
return len(self.coords)
# basically arguments to "add representation"
default_theme = [
['licorice'],
['ball+stick', dict(selection='_H', aspect_ratio=2.5)],
['ball+stick', dict(selection='not _H', aspect_ratio=3)]
]
def show(self,
themes=None,
frame_size=('100%', 'vh'),
scale=1.2,
**opts):
"""Basically a hack tower to make NGLView actually visualize molecules well"""
viewer = nglview.NGLWidget(self)
if themes is None:
themes = self.default_theme
if len(themes) > 0:
viewer.clear()
for t in themes:
if len(t) > 1:
viewer.add_representation(t[0], **t[1])
else:
viewer.add_representation(t[0])
# try:
# player = viewer.player.widget_player
# except AttributeError:
# pass
# else:
# if player is not None:
# # player = player.children[0]
# player._playing = playing
# player._repeat = repeat
viewer.display(**opts)
# this is a temporary hack to get a better window size
if frame_size is not None:
arg_str = [s if isinstance(s, str) else '{}px'.format(s) for s in frame_size]
viewer._remote_call(
"setSize",
target="Widget",
args=arg_str
)
if scale is not None:
viewer[0].set_scale(scale)
return viewer
return MoleculeGraphics | [
"b3m2a1@gmail.com"
] | b3m2a1@gmail.com |
480549448a7b184241c21a165d42e3a8e0a0271a | e845f7f61ff76b3c0b8f4d8fd98f6192e48d542a | /djangocg/db/backends/postgresql_psycopg2/creation.py | 40279f3b84b86e2377c9140cbc52db5d64624a57 | [
"BSD-3-Clause"
] | permissive | timothyclemans/djangocg | fd150c028013cb5f53f5a3b4fdc960a07fdaaa78 | 52cf28e046523bceb5d436f8e6bf61e7d4ba6312 | refs/heads/master | 2021-01-18T13:20:13.636812 | 2012-08-31T23:38:14 | 2012-08-31T23:38:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,133 | py | import psycopg2.extensions
from djangocg.db.backends.creation import BaseDatabaseCreation
from djangocg.db.backends.util import truncate_name
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'serial',
'BooleanField': 'boolean',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'timestamp with time zone',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'inet',
'GenericIPAddressField': 'inet',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer CHECK ("%(column)s" >= 0)',
'PositiveSmallIntegerField': 'smallint CHECK ("%(column)s" >= 0)',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
}
def sql_table_creation_suffix(self):
assert self.connection.settings_dict['TEST_COLLATION'] is None, "PostgreSQL does not support collation setting at database creation time."
if self.connection.settings_dict['TEST_CHARSET']:
return "WITH ENCODING '%s'" % self.connection.settings_dict['TEST_CHARSET']
return ''
def sql_indexes_for_field(self, model, f, style):
if f.db_index and not f.unique:
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
tablespace = f.db_tablespace or model._meta.db_tablespace
if tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(tablespace)
if tablespace_sql:
tablespace_sql = ' ' + tablespace_sql
else:
tablespace_sql = ''
def get_index_sql(index_name, opclass=''):
return (style.SQL_KEYWORD('CREATE INDEX') + ' ' +
style.SQL_TABLE(qn(truncate_name(index_name,self.connection.ops.max_name_length()))) + ' ' +
style.SQL_KEYWORD('ON') + ' ' +
style.SQL_TABLE(qn(db_table)) + ' ' +
"(%s%s)" % (style.SQL_FIELD(qn(f.column)), opclass) +
"%s;" % tablespace_sql)
output = [get_index_sql('%s_%s' % (db_table, f.column))]
# Fields with database column types of `varchar` and `text` need
# a second index that specifies their operator class, which is
# needed when performing correct LIKE queries outside the
# C locale. See #12234.
db_type = f.db_type(connection=self.connection)
if db_type.startswith('varchar'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' varchar_pattern_ops'))
elif db_type.startswith('text'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' text_pattern_ops'))
else:
output = []
return output
def set_autocommit(self):
self._prepare_for_test_db_ddl()
def _prepare_for_test_db_ddl(self):
"""Rollback and close the active transaction."""
self.connection.connection.rollback()
self.connection.connection.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
| [
"timothy.clemans@gmail.com"
] | timothy.clemans@gmail.com |
fe020d01e5f8b437325c08d1f4537a582e862ed4 | 029c1a47eca887e3ece82177287608d6f3649a9c | /lb/display.py | 0a46eda845c6dfa1644a182b9ce5327937f3be16 | [] | no_license | Eugeny/loop-bastard | 9bf4147d63b30c4b13793ae40d501cba95c8b714 | 78089e8cd67e9b517d6e3c7741453eaf5610af12 | refs/heads/master | 2021-02-26T12:04:38.102389 | 2020-04-12T16:41:40 | 2020-04-12T16:41:40 | 245,524,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,469 | py | import math
import os
import pygame
import pygame.font
import pygame.gfxdraw
import time
import threading
import sys
from lb.util import number_to_note
os.environ['SDL_VIDEO_CENTERED'] = '1'
def color_reduce(c):
return (c[0] // 2, c[1] // 2, c[2] // 2)
def draw_text_centered(surface, font, text, color, rect):
text_w, text_h = font.size(text)
surface.blit(
font.render(
text,
True,
color,
),
(rect[0] + rect[2] // 2 - text_w // 2, rect[1] + rect[3] // 2 - text_h // 2, text_w, text_h),
)
class Display(threading.Thread):
def __init__(self, app):
super().__init__(daemon=True)
self.app = app
self.had_midi_in_activity = False
self.had_midi_out_activity = False
self.midi_in_channel_activity = [False] * 16
self.app.input_manager.message.subscribe(lambda stuff: self._on_midi_in(stuff[1]))
self.app.output_manager.message.subscribe(lambda _: self._on_midi_out())
def _on_midi_in(self, message):
self.had_midi_in_activity = True
if hasattr(message, 'channel'):
self.midi_in_channel_activity[message.channel] = True
def _on_midi_out(self):
self.had_midi_out_activity = True
def get_blink(self, type):
if type == 'beat':
a = self.app.tempo.get_position() % 1
return int(255 - a * 255)
if type == 'fast':
return 255 * (int(time.time() * 16) % 2)
def _draw_list(self, surface, items=None, index=None, bg=None, fg=None):
w, h = surface.get_size()
item_count = 3
selected_position = 1
display_items = [None] * item_count
for i in range(item_count):
src_index = i + index - selected_position
if src_index >= 0 and src_index < len(items):
display_items[i] = items[src_index]
item_h = h // item_count
for i in range(item_count):
if display_items[i]:
if i == selected_position:
rect = (5, i * item_h, w - 20, item_h)
surface.fill(fg, rect)
else:
rect = (10, i * item_h, w - 20, item_h)
surface.fill(color_reduce(fg), rect)
draw_text_centered(
surface,
self.font,
display_items[i],
bg if i == selected_position else fg,
rect,
)
def draw_param_selector(self, surface):
w, h = surface.get_size()
bg = (32, 64, 128)
fg = (64, 128, 255)
param_group = self.app.current_param_group[self.app.current_scope]
param_groups = self.app.scope_param_groups[self.app.current_scope]
# pygame.draw.rect(
# surface,
# bg,
# (0, 0, w, h),
# )
# pygame.draw.rect(
# surface,
# fg,
# (0, 0, w, h),
# 5,
# )
label_h = 20
draw_text_centered(surface, self.font_sm, 'Parameters', fg, (0, h - label_h, w, label_h))
self._draw_list(
surface.subsurface((0, 5, w, h - label_h - 10)),
items=[x.name for x in param_groups],
index=param_groups.index(param_group),
bg=(0, 0, 0), fg=fg,
)
# text_w = self.font.size(param.name)[0]
# surface.blit(self.font.render(
# param.name,
# True,
# (255, 255, 255)
# ), (w // 4 - text_w // 2, h - 40))
def draw_param_body(self, surface, param, fg):
w, h = surface.get_size()
if not param.is_on():
fg = color_reduce(fg)
# pygame.draw.rect(
# surface,
# bg,
# (0, 0, w, h),
# )
# pygame.draw.rect(
# surface,
# fg,
# (0, 0, w, h),
# 5,
# )
if param.type == 'list':
self._draw_list(
surface.subsurface((0, 5, w, h - 10)),
items=[param.to_str(x) for x in param.options],
index=param.options.index(param.get()),
bg=(0, 0, 0), fg=fg,
)
if param.type == 'dial':
text_w = self.font.size(param.to_str(param.get()))[0]
surface.blit(self.font.render(
param.to_str(param.get()),
True,
(255, 255, 255)
), (w // 2 - text_w // 2, h - 40))
def index_to_angle(i):
return -1.5 + 3 * i / (len(param.options) - 1)
for i in range(0, len(param.options)):
pygame.draw.line(
surface,
fg,
(w // 2 + 30 * math.sin(index_to_angle(i)), h // 2 - 30 * math.cos(index_to_angle(i))),
(w // 2 + 40 * math.sin(index_to_angle(i)), h // 2 - 40 * math.cos(index_to_angle(i))),
3,
)
option_index = param.options.index(param.get())
pygame.draw.line(
surface,
(255, 255, 255),
(w // 2 + 20 * math.sin(index_to_angle(option_index)), h // 2 - 20 * math.cos(index_to_angle(option_index))),
(w // 2 + 40 * math.sin(index_to_angle(option_index)), h // 2 - 40 * math.cos(index_to_angle(option_index))),
5,
)
if param.type == 'midi-channel':
margin = 5
box_w_out = (w - margin) // 4 - margin
box_h_out = (h - margin) // 4 - margin
for i in range(16):
x = i % 4
y = i // 4
if i + 1 == param.get():
surface.fill(
fg,
rect=(
(margin + box_w_out) * x,
(margin + box_h_out) * y,
margin * 2 + box_w_out, margin * 2 + box_h_out
)
)
color = fg if self.midi_in_channel_activity[i] else color_reduce(fg)
if not param.get():
color = (64, 64, 64)
surface.fill(
color,
rect=(
margin + (margin + box_w_out) * x,
margin + (margin + box_h_out) * y,
box_w_out, box_h_out
)
)
color = (255, 255, 255) if self.midi_in_channel_activity[i] else fg
if not param.get():
color = (0, 0, 0)
text_w, text_h = self.font_sm.size(str(i + 1))
surface.blit(self.font_sm.render(
str(i + 1),
True,
color
), (
margin + (margin + box_w_out) * x + box_w_out // 2 - text_w // 2,
margin + (margin + box_h_out) * y + box_h_out // 2 - text_h // 2,
))
if not param.get():
text_w, text_h = self.font_lg.size('All')
surface.blit(self.font_lg.render(
'All',
True,
fg,
), (
w // 2 - text_w // 2,
h // 2 - text_h // 2,
))
def draw_param_value(self, surface, param, fg):
w, h = surface.get_size()
text_w, text_h = self.font_sm.size(param.name)
surface.blit(
self.font_sm.render(
param.name,
True,
fg,
),
(w // 2 - text_w // 2, h - text_h - 2, w, text_h + 2),
)
self.draw_param_body(
surface.subsurface(0, 0, w, h - text_h - 2),
param, fg
)
def draw_status_bar(self, surface):
surface.fill((128, 128, 128), rect=(5, surface.get_height() - 2, surface.get_width() - 10, 2))
p = 0
c = (255, 255, 255) if self.had_midi_in_activity else (128, 128, 128)
if not self.app.input_manager.has_input():
c = (255, 0, 0)
surface.blit(self.font.render('IN', True, c), (p + 5, 5))
p += 50
# Clock
t = 'EXT' if self.app.input_manager.active_clock else 'INT'
c = (0, 255, 128) if self.app.input_manager.active_clock else (255, 128, 0)
surface.blit(
self.font.render(t, True, c),
(p + 5, 5),
)
p += 60
# BPM
surface.blit(
self.font.render(
str(int(self.app.tempo.bpm)) + ' BPM',
True,
(128, 128, 128),
),
(p + 5, 5),
)
p += 110
# Beat display
p += 5
for i in range(self.app.tempo.bar_size):
w = 0 if i == self.app.tempo.get_q()[2] - 1 else 2
pygame.draw.rect(
surface,
(128, 128, 255),
(p + 2, 10, 16, 16),
w
)
p += 25
p += 5
c = (255, 255, 255) if self.had_midi_out_activity else (128, 128, 128)
if not self.app.output_manager.has_output():
c = (255, 0, 0)
surface.blit(self.font.render('OUT', True, c), (surface.get_width() - 55, 5))
def draw_bottom_bar(self, surface):
surface.fill((128, 128, 128), rect=(5, 0, surface.get_width() - 10, 2))
p = 0
for v, name in [('global', 'GLOB'), ('sequencer', 'SEQ'), ('note', 'NOTE')]:
w = self.font.size(name)[0]
if self.app.current_scope == v:
surface.fill((255, 255, 255), rect=(p, 0, w + 10, surface.get_height()))
surface.blit(self.font.render(name, True, (0, 0, 0)), (p + 5, 5))
else:
surface.blit(self.font.render(name, True, (255, 255, 255)), (p + 5, 5))
p += w + 10
def draw_sequencer_icon(self, surface, sequencer, mini=False):
w, h = surface.get_size()
if sequencer == self.app.selected_sequencer:
pygame.draw.rect(
surface,
(32, 64, 128),
(0, 0, w, h),
)
else:
if sequencer.running:
fill_q = 1 - sequencer.get_position() / sequencer.get_length()
pygame.draw.rect(
surface,
(32, 128, 64),
(0, h * (1 - fill_q), w, h * fill_q),
)
if not mini:
border_color = (64, 128, 255) if sequencer == self.app.selected_sequencer else (64, 64, 64)
pygame.draw.rect(
surface,
border_color,
(0, 0, w, h),
5,
)
text = str(self.app.sequencers.index(sequencer) + 1)
if mini:
draw_text_centered(surface, self.font_sm, text, (255, 255, 255), (0, 0, w, h))
else:
draw_text_centered(surface, self.font_lg, text, (255, 255, 255), (0, 10, w, 40))
if not self.app.sequencer_is_empty[sequencer]:
draw_text_centered(
surface, self.font_sm,
f'Ch. {sequencer.output_channel}',
(255, 255, 255), (0, 50, w, 20)
)
self.img_play_sm.set_alpha(64)
x, y = (w // 2 - 16, h - 44) if not mini else (w // 2 - 16, 0)
surface.blit(self.img_play_sm, (x, y))
if sequencer.running:
self.img_play_sm_active.set_alpha(self.get_blink('beat'))
surface.blit(self.img_play_sm_active, (x, y))
if sequencer.start_scheduled:
self.img_play_sm_active.set_alpha(self.get_blink('fast'))
surface.blit(self.img_play_sm_active, (x, y))
if sequencer.stop_scheduled:
self.img_play_sm_stopping.set_alpha(self.get_blink('fast'))
surface.blit(self.img_play_sm_stopping, (x, y))
def draw_sequencer(self, surface, sequencer):
w, h = surface.get_size()
toolbar_size = 64
self.draw_sequencer_body(
surface.subsurface((toolbar_size, 0, w - toolbar_size, h)),
sequencer
)
self.img_play.set_alpha(64)
surface.blit(self.img_play, (0, 0))
if not sequencer.recording:
if sequencer.start_scheduled:
self.img_play_active.set_alpha(self.get_blink('fast'))
surface.blit(self.img_play_active, (0, 0))
elif sequencer.stop_scheduled:
self.img_play_stopping.set_alpha(self.get_blink('fast'))
surface.blit(self.img_play_stopping, (0, 0))
elif sequencer.running:
self.img_play_active.set_alpha(self.get_blink('beat'))
surface.blit(self.img_play_active, (0, 0))
self.img_record.set_alpha(64)
surface.blit(self.img_record, (0, 64))
if sequencer.recording:
a = self.get_blink('beat')
if sequencer.start_scheduled:
a = self.get_blink('fast')
self.img_record_active.set_alpha(a)
surface.blit(self.img_record_active, (0, 64))
a = self.get_blink('beat')
pygame.draw.rect(
surface, (a, 0, 0), (0, 0, w, h), 4
)
def draw_sequencer_body(self, surface, sequencer):
def pos_to_x(p):
return surface.get_width() * p / sequencer.get_length()
for i in range(0, sequencer.bars * self.app.tempo.bar_size):
color = (50, 50, 100) if (i % 4 == 0) else (30, 30, 30)
surface.fill(color, rect=(
pos_to_x(i),
0,
1,
surface.get_height(),
))
if sequencer.quantizer_filter.divisor:
q_pos = 4 / sequencer.quantizer_filter.divisor
q_color = (255, 128, 0)
for i in range(0, int(sequencer.get_length() / q_pos)):
surface.fill(q_color, (pos_to_x(q_pos * i), 0, 2, 5))
with sequencer.lock:
dif_notes = [x.message.note for x in sequencer.filtered_events]
dif_notes += [x.message.note for x in sequencer.currently_recording_notes.values()]
dif_notes = sorted(set(dif_notes))
if len(dif_notes):
note_h = surface.get_height() / max(10, len(dif_notes))
notes_y = {note: surface.get_height() - (idx + 1) * surface.get_height() / len(dif_notes) for idx, note in enumerate(dif_notes)}
def draw_note(event, x, w):
c = event.message.velocity / 128
color = (50 + c * 180, 50, 220 - c * 180)
text_color = (
min(int(color[0] * 1.5), 255),
min(int(color[1] * 1.5), 255),
min(int(color[2] * 1.5), 255),
)
note_rect = (x, notes_y[event.message.note], w, note_h)
if getattr(event, 'source_event', event) == self.app.selected_event:
pygame.draw.rect(
surface,
(self.get_blink('fast'), self.get_blink('fast') // 2, 0),
(
note_rect[0] - 5,
note_rect[1] - 5,
note_rect[2] + 10,
note_rect[3] + 10,
),
)
pygame.draw.rect(
surface,
color,
note_rect,
)
pygame.draw.rect(
surface,
color_reduce(color),
pygame.Rect(note_rect).inflate(-2, -2),
)
name, o = number_to_note(event.message.note)
text = f'{name} {o}'
if x >= 0:
text_w, text_h = self.font_xs.size(text)
if note_rect[2] > text_w + 5 and note_rect[3] > text_h + 5:
surface.blit(
self.font_xs.render(
text,
True,
text_color,
),
(x + 5, notes_y[event.message.note] + 5, w, note_h),
)
m = {}
notes = []
remaining_events = sequencer.filtered_events[:]
for event in remaining_events[:]:
if event.message.type == 'note_on':
m[event.message.note] = event
remaining_events.remove(event)
if event.message.type == 'note_off':
if event.message.note in m:
notes.append((m[event.message.note], event.position - m[event.message.note].position))
remaining_events.remove(event)
del m[event.message.note]
for event in remaining_events:
if event.message.type == 'note_off':
if event.message.note in m:
notes.append((m[event.message.note], event.position + sequencer.get_length() - m[event.message.note].position))
del m[event.message.note]
for event in sequencer.currently_recording_notes.values():
length = sequencer.get_position() - event.position
length = sequencer.normalize_position(length)
notes.append((event, length))
for (event, length) in notes:
draw_note(
event,
pos_to_x(event.position),
pos_to_x(length),
)
if event.position + length > sequencer.get_length():
draw_note(
event,
pos_to_x(event.position - sequencer.get_length()),
pos_to_x(length),
)
# Time indicator
surface.fill(
(255, 255, 255),
(pos_to_x(sequencer.get_position()), 0, 1, surface.get_height())
)
def draw_sequencer_bank(self, surface, bank_index):
w, h = surface.get_size()
if bank_index == self.app.selected_sequencer_bank:
pygame.draw.rect(
surface,
(32, 64, 128),
(0, 0, w, h),
)
# pygame.draw.rect(
# surface,
# (64, 128, 255),
# (0, 0, w, h),
# 5,
# )
surface.blit(self.font_sm.render(
'Bank',
True,
(255, 255, 255)
), (10, 3))
surface.blit(self.font.render(
str(bank_index + 1),
True,
(255, 255, 255)
), (50, 0))
for i in range(self.app.sequencer_bank_size):
header_w = 100
icon_w = (w - header_w) // 4 - 10
self.draw_sequencer_icon(
surface.subsurface((
header_w + (icon_w + 10) * i, 0,
icon_w, h,
)),
self.app.sequencers[bank_index * self.app.sequencer_bank_size + i],
mini=True
)
def draw_sequencer_banks(self, surface):
w, h = surface.get_size()
spacing = 3
pygame.draw.rect(
surface,
(0, 0, 0),
(0, 0, w, h),
)
for bank_index in range(self.app.sequencer_banks):
self.draw_sequencer_bank(
surface.subsurface((spacing, (h + spacing) // 4 * bank_index, w - spacing * 2, (h + spacing) // 4 - spacing)),
bank_index
)
def run(self):
pygame.init()
pygame.mouse.set_visible(0)
self.screen = pygame.display.set_mode((800, 400))
self.font_xs = pygame.font.Font('bryant.ttf', 10)
self.font_sm = pygame.font.Font('bryant.ttf', 14)
self.font = pygame.font.Font('bryant.ttf', 24)
self.font_lg = pygame.font.Font('bryant.ttf', 36)
self.img_play = pygame.image.load('images/play.png')
self.img_play_active = self.img_play.copy()
self.img_play_active.fill((0, 255, 64), special_flags=pygame.BLEND_MULT)
self.img_play_stopping = self.img_play.copy()
self.img_play_stopping.fill((255, 0, 64), special_flags=pygame.BLEND_MULT)
self.img_record = pygame.image.load('images/record.png')
self.img_record_active = self.img_record.copy()
self.img_record_active.fill((255, 0, 64), special_flags=pygame.BLEND_MULT)
self.img_play_sm = pygame.image.load('images/play-sm.png')
self.img_play_sm_active = self.img_play_sm.copy()
self.img_play_sm_active.fill((0, 255, 64), special_flags=pygame.BLEND_MULT)
self.img_play_sm_stopping = self.img_play_sm.copy()
self.img_play_sm_stopping.fill((255, 0, 64), special_flags=pygame.BLEND_MULT)
status_bar_h = 40
v_spacer = 10
top_bar_h = 120
seq_h = 220
while True:
self.screen.fill((0, 0, 20))
self.draw_status_bar(
self.screen.subsurface((0, 0, self.screen.get_width(), status_bar_h)),
)
# self.draw_bottom_bar(
# self.screen.subsurface((0, self.screen.get_height() - 40, self.screen.get_width(), 40)),
# )
self.draw_sequencer(
self.screen.subsurface((
0, status_bar_h + v_spacer * 2 + top_bar_h,
self.screen.get_width(),
seq_h,
)),
self.app.selected_sequencer,
)
for i in range(self.app.sequencer_bank_size):
s_index = self.app.sequencer_bank_size * self.app.selected_sequencer_bank + i
s = self.app.sequencers[s_index]
self.draw_sequencer_icon(
self.screen.subsurface((10 + 70 * i, status_bar_h + v_spacer, 60, top_bar_h)),
s
)
self.draw_param_selector(
self.screen.subsurface((
self.screen.get_width() - 10 - 430,
status_bar_h + v_spacer, 170, top_bar_h
))
)
param_group = self.app.current_param_group[self.app.current_scope]
if param_group.param1:
self.draw_param_value(
self.screen.subsurface((
self.screen.get_width() - 10 - 250,
status_bar_h + v_spacer, 120, top_bar_h)
),
param_group.param1,
(255, 128, 64),
)
if param_group.param2:
self.draw_param_value(
self.screen.subsurface((
self.screen.get_width() - 10 - 120,
status_bar_h + v_spacer, 120, top_bar_h)
),
param_group.param2,
(255, 64, 128),
)
if self.app.controls.shift_button.pressed:
self.draw_sequencer_banks(
self.screen.subsurface((0, status_bar_h + v_spacer, 70 * 4, top_bar_h)),
)
pygame.display.flip()
self.had_play_activity = False
self.had_midi_out_activity = False
self.midi_in_channel_activity = [False] * 16
try:
time.sleep(1 / 30)
for event in pygame.event.get():
self.app.controls.process_event(event)
if event.type == pygame.QUIT:
sys.exit()
except KeyboardInterrupt:
sys.exit(0)
| [
"john.pankov@gmail.com"
] | john.pankov@gmail.com |
20e848ad66bc4ecfd92b62c50a9ccdb374c6066c | 3e5487663e2aeb98972347cdbdac282617f601f8 | /releases/tests.py | d8e58602ea87d47161ce3386ca4f0be25f82207a | [] | no_license | areski/djangoproject.com | 58b652ecaea6e5ea130e226d164a111bb563e2a5 | 60a94e4a0dddc15d38bd7fd6db8568be8747c192 | refs/heads/master | 2021-01-18T11:03:49.893412 | 2014-08-05T14:03:20 | 2014-08-05T14:03:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | from __future__ import absolute_import, unicode_literals
from django.contrib.redirects.models import Redirect
from django.test import TestCase
from .models import create_releases_up_to_1_5
class LegacyURLsTests(TestCase):
fixtures = ['redirects-downloads'] # provided by the legacy app
def test_legacy_redirects(self):
# Save list of redirects, then wipe them
redirects = list(Redirect.objects.values_list('old_path', 'new_path'))
Redirect.objects.all().delete()
# Ensure the releases app faithfully reproduces the redirects
create_releases_up_to_1_5()
for old_path, new_path in redirects:
response = self.client.get(old_path, follow=False)
location = response.get('Location', '')
if location.startswith('http://testserver'):
location = location[17:]
self.assertEquals(location, new_path)
self.assertEquals(response.status_code, 301)
| [
"aymeric.augustin@m4x.org"
] | aymeric.augustin@m4x.org |
570ec3cd5bad80c562187222d7de713a20453bd6 | 1f5420fda4359bfc21b53de3a5f6e6a93b47b996 | /ch06/ch6_602.py | f51eb0830ff133aac9bec233c9abb451e7456f50 | [] | no_license | fl0wjacky/wxPython | 600f5bfccad3ef5589e11573b30cffd1e2708b83 | 50b3cd5a63750d36065684b73aab0da70ff650a7 | refs/heads/master | 2022-09-02T04:24:47.540157 | 2022-08-10T04:13:17 | 2022-08-10T04:13:17 | 13,976,582 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | #! /usr/bin/env python
# -*- coding:utf-8 -*-
import wx
from ch06_SketchWindow import SketchWindow
class SketchFrame(wx.Frame):
def __init__(self,parent):
wx.Frame.__init__(self,parent,-1,"Sketch Frame",size=(800,600))
self.sketch = SketchWindow(self,-1)
self.sketch.Bind(wx.EVT_MOTION,self.OnSketchMotion)
self.statusbar = self.CreateStatusBar()
self.statusbar.SetFieldsCount(3)
self.statusbar.SetStatusWidths([-1,-2,-3])
def OnSketchMotion(self,event):
self.statusbar.SetStatusText(str(event.GetPositionTuple()))
event.Skip()
self.statusbar.SetStatusText("Current Pts:%s" % len(self.sketch.curLine),1)
self.statusbar.SetStatusText("Line Count:%s" % len(self.sketch.lines),2)
class App(wx.App):
def OnInit(self):
self.frame = SketchFrame(None)
self.frame.Show(True)
return True
if __name__ == '__main__':
app = App()
app.MainLoop()
| [
"flowjacky@gmail.com"
] | flowjacky@gmail.com |
526e86ea2e6c5d4a2cbbde0385733b10715f7286 | 6364bb727b623f06f6998941299c49e7fcb1d437 | /msgraph-cli-extensions/src/bookings/azext_bookings/vendored_sdks/bookings/models/_bookings_enums.py | f3e4eb3e09f82254c59c047e3c1ef2715d2e5e7d | [
"MIT"
] | permissive | kanakanaidu/msgraph-cli | 1d6cd640f4e10f4bdf476d44d12a7c48987b1a97 | b3b87f40148fb691a4c331f523ca91f8a5cc9224 | refs/heads/main | 2022-12-25T08:08:26.716914 | 2020-09-23T14:29:13 | 2020-09-23T14:29:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,833 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class Enum13(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
SELF_SERVICE_APPOINTMENT_ID = "selfServiceAppointmentId"
SELF_SERVICE_APPOINTMENT_ID_DESC = "selfServiceAppointmentId desc"
CUSTOMER_ID = "customerId"
CUSTOMER_ID_DESC = "customerId desc"
CUSTOMER_NAME = "customerName"
CUSTOMER_NAME_DESC = "customerName desc"
CUSTOMER_EMAIL_ADDRESS = "customerEmailAddress"
CUSTOMER_EMAIL_ADDRESS_DESC = "customerEmailAddress desc"
CUSTOMER_PHONE = "customerPhone"
CUSTOMER_PHONE_DESC = "customerPhone desc"
CUSTOMER_LOCATION = "customerLocation"
CUSTOMER_LOCATION_DESC = "customerLocation desc"
CUSTOMER_NOTES = "customerNotes"
CUSTOMER_NOTES_DESC = "customerNotes desc"
SERVICE_ID = "serviceId"
SERVICE_ID_DESC = "serviceId desc"
SERVICE_NAME = "serviceName"
SERVICE_NAME_DESC = "serviceName desc"
START = "start"
START_DESC = "start desc"
END = "end"
END_DESC = "end desc"
DURATION = "duration"
DURATION_DESC = "duration desc"
PRE_BUFFER = "preBuffer"
PRE_BUFFER_DESC = "preBuffer desc"
POST_BUFFER = "postBuffer"
POST_BUFFER_DESC = "postBuffer desc"
SERVICE_LOCATION = "serviceLocation"
SERVICE_LOCATION_DESC = "serviceLocation desc"
PRICE_TYPE = "priceType"
PRICE_TYPE_DESC = "priceType desc"
PRICE = "price"
PRICE_DESC = "price desc"
SERVICE_NOTES = "serviceNotes"
SERVICE_NOTES_DESC = "serviceNotes desc"
REMINDERS = "reminders"
REMINDERS_DESC = "reminders desc"
OPT_OUT_OF_CUSTOMER_EMAIL = "optOutOfCustomerEmail"
OPT_OUT_OF_CUSTOMER_EMAIL_DESC = "optOutOfCustomerEmail desc"
STAFF_MEMBER_IDS = "staffMemberIds"
STAFF_MEMBER_IDS_DESC = "staffMemberIds desc"
INVOICE_AMOUNT = "invoiceAmount"
INVOICE_AMOUNT_DESC = "invoiceAmount desc"
INVOICE_DATE = "invoiceDate"
INVOICE_DATE_DESC = "invoiceDate desc"
INVOICE_ID = "invoiceId"
INVOICE_ID_DESC = "invoiceId desc"
INVOICE_STATUS = "invoiceStatus"
INVOICE_STATUS_DESC = "invoiceStatus desc"
INVOICE_URL = "invoiceUrl"
INVOICE_URL_DESC = "invoiceUrl desc"
class Enum14(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
SELF_SERVICE_APPOINTMENT_ID = "selfServiceAppointmentId"
CUSTOMER_ID = "customerId"
CUSTOMER_NAME = "customerName"
CUSTOMER_EMAIL_ADDRESS = "customerEmailAddress"
CUSTOMER_PHONE = "customerPhone"
CUSTOMER_LOCATION = "customerLocation"
CUSTOMER_NOTES = "customerNotes"
SERVICE_ID = "serviceId"
SERVICE_NAME = "serviceName"
START = "start"
END = "end"
DURATION = "duration"
PRE_BUFFER = "preBuffer"
POST_BUFFER = "postBuffer"
SERVICE_LOCATION = "serviceLocation"
PRICE_TYPE = "priceType"
PRICE = "price"
SERVICE_NOTES = "serviceNotes"
REMINDERS = "reminders"
OPT_OUT_OF_CUSTOMER_EMAIL = "optOutOfCustomerEmail"
STAFF_MEMBER_IDS = "staffMemberIds"
INVOICE_AMOUNT = "invoiceAmount"
INVOICE_DATE = "invoiceDate"
INVOICE_ID = "invoiceId"
INVOICE_STATUS = "invoiceStatus"
INVOICE_URL = "invoiceUrl"
class Enum15(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
SELF_SERVICE_APPOINTMENT_ID = "selfServiceAppointmentId"
CUSTOMER_ID = "customerId"
CUSTOMER_NAME = "customerName"
CUSTOMER_EMAIL_ADDRESS = "customerEmailAddress"
CUSTOMER_PHONE = "customerPhone"
CUSTOMER_LOCATION = "customerLocation"
CUSTOMER_NOTES = "customerNotes"
SERVICE_ID = "serviceId"
SERVICE_NAME = "serviceName"
START = "start"
END = "end"
DURATION = "duration"
PRE_BUFFER = "preBuffer"
POST_BUFFER = "postBuffer"
SERVICE_LOCATION = "serviceLocation"
PRICE_TYPE = "priceType"
PRICE = "price"
SERVICE_NOTES = "serviceNotes"
REMINDERS = "reminders"
OPT_OUT_OF_CUSTOMER_EMAIL = "optOutOfCustomerEmail"
STAFF_MEMBER_IDS = "staffMemberIds"
INVOICE_AMOUNT = "invoiceAmount"
INVOICE_DATE = "invoiceDate"
INVOICE_ID = "invoiceId"
INVOICE_STATUS = "invoiceStatus"
INVOICE_URL = "invoiceUrl"
class Enum16(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
SELF_SERVICE_APPOINTMENT_ID = "selfServiceAppointmentId"
SELF_SERVICE_APPOINTMENT_ID_DESC = "selfServiceAppointmentId desc"
CUSTOMER_ID = "customerId"
CUSTOMER_ID_DESC = "customerId desc"
CUSTOMER_NAME = "customerName"
CUSTOMER_NAME_DESC = "customerName desc"
CUSTOMER_EMAIL_ADDRESS = "customerEmailAddress"
CUSTOMER_EMAIL_ADDRESS_DESC = "customerEmailAddress desc"
CUSTOMER_PHONE = "customerPhone"
CUSTOMER_PHONE_DESC = "customerPhone desc"
CUSTOMER_LOCATION = "customerLocation"
CUSTOMER_LOCATION_DESC = "customerLocation desc"
CUSTOMER_NOTES = "customerNotes"
CUSTOMER_NOTES_DESC = "customerNotes desc"
SERVICE_ID = "serviceId"
SERVICE_ID_DESC = "serviceId desc"
SERVICE_NAME = "serviceName"
SERVICE_NAME_DESC = "serviceName desc"
START = "start"
START_DESC = "start desc"
END = "end"
END_DESC = "end desc"
DURATION = "duration"
DURATION_DESC = "duration desc"
PRE_BUFFER = "preBuffer"
PRE_BUFFER_DESC = "preBuffer desc"
POST_BUFFER = "postBuffer"
POST_BUFFER_DESC = "postBuffer desc"
SERVICE_LOCATION = "serviceLocation"
SERVICE_LOCATION_DESC = "serviceLocation desc"
PRICE_TYPE = "priceType"
PRICE_TYPE_DESC = "priceType desc"
PRICE = "price"
PRICE_DESC = "price desc"
SERVICE_NOTES = "serviceNotes"
SERVICE_NOTES_DESC = "serviceNotes desc"
REMINDERS = "reminders"
REMINDERS_DESC = "reminders desc"
OPT_OUT_OF_CUSTOMER_EMAIL = "optOutOfCustomerEmail"
OPT_OUT_OF_CUSTOMER_EMAIL_DESC = "optOutOfCustomerEmail desc"
STAFF_MEMBER_IDS = "staffMemberIds"
STAFF_MEMBER_IDS_DESC = "staffMemberIds desc"
INVOICE_AMOUNT = "invoiceAmount"
INVOICE_AMOUNT_DESC = "invoiceAmount desc"
INVOICE_DATE = "invoiceDate"
INVOICE_DATE_DESC = "invoiceDate desc"
INVOICE_ID = "invoiceId"
INVOICE_ID_DESC = "invoiceId desc"
INVOICE_STATUS = "invoiceStatus"
INVOICE_STATUS_DESC = "invoiceStatus desc"
INVOICE_URL = "invoiceUrl"
INVOICE_URL_DESC = "invoiceUrl desc"
class Enum17(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
SELF_SERVICE_APPOINTMENT_ID = "selfServiceAppointmentId"
CUSTOMER_ID = "customerId"
CUSTOMER_NAME = "customerName"
CUSTOMER_EMAIL_ADDRESS = "customerEmailAddress"
CUSTOMER_PHONE = "customerPhone"
CUSTOMER_LOCATION = "customerLocation"
CUSTOMER_NOTES = "customerNotes"
SERVICE_ID = "serviceId"
SERVICE_NAME = "serviceName"
START = "start"
END = "end"
DURATION = "duration"
PRE_BUFFER = "preBuffer"
POST_BUFFER = "postBuffer"
SERVICE_LOCATION = "serviceLocation"
PRICE_TYPE = "priceType"
PRICE = "price"
SERVICE_NOTES = "serviceNotes"
REMINDERS = "reminders"
OPT_OUT_OF_CUSTOMER_EMAIL = "optOutOfCustomerEmail"
STAFF_MEMBER_IDS = "staffMemberIds"
INVOICE_AMOUNT = "invoiceAmount"
INVOICE_DATE = "invoiceDate"
INVOICE_ID = "invoiceId"
INVOICE_STATUS = "invoiceStatus"
INVOICE_URL = "invoiceUrl"
class Enum18(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
SELF_SERVICE_APPOINTMENT_ID = "selfServiceAppointmentId"
CUSTOMER_ID = "customerId"
CUSTOMER_NAME = "customerName"
CUSTOMER_EMAIL_ADDRESS = "customerEmailAddress"
CUSTOMER_PHONE = "customerPhone"
CUSTOMER_LOCATION = "customerLocation"
CUSTOMER_NOTES = "customerNotes"
SERVICE_ID = "serviceId"
SERVICE_NAME = "serviceName"
START = "start"
END = "end"
DURATION = "duration"
PRE_BUFFER = "preBuffer"
POST_BUFFER = "postBuffer"
SERVICE_LOCATION = "serviceLocation"
PRICE_TYPE = "priceType"
PRICE = "price"
SERVICE_NOTES = "serviceNotes"
REMINDERS = "reminders"
OPT_OUT_OF_CUSTOMER_EMAIL = "optOutOfCustomerEmail"
STAFF_MEMBER_IDS = "staffMemberIds"
INVOICE_AMOUNT = "invoiceAmount"
INVOICE_DATE = "invoiceDate"
INVOICE_ID = "invoiceId"
INVOICE_STATUS = "invoiceStatus"
INVOICE_URL = "invoiceUrl"
class Enum19(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
EMAIL_ADDRESS = "emailAddress"
EMAIL_ADDRESS_DESC = "emailAddress desc"
class Enum20(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
DISPLAY_NAME = "displayName"
EMAIL_ADDRESS = "emailAddress"
class Enum21(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
DISPLAY_NAME = "displayName"
EMAIL_ADDRESS = "emailAddress"
class Enum22(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
DEFAULT_DURATION = "defaultDuration"
DEFAULT_DURATION_DESC = "defaultDuration desc"
DEFAULT_LOCATION = "defaultLocation"
DEFAULT_LOCATION_DESC = "defaultLocation desc"
DEFAULT_PRICE = "defaultPrice"
DEFAULT_PRICE_DESC = "defaultPrice desc"
DEFAULT_PRICE_TYPE = "defaultPriceType"
DEFAULT_PRICE_TYPE_DESC = "defaultPriceType desc"
DEFAULT_REMINDERS = "defaultReminders"
DEFAULT_REMINDERS_DESC = "defaultReminders desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
IS_HIDDEN_FROM_CUSTOMERS = "isHiddenFromCustomers"
IS_HIDDEN_FROM_CUSTOMERS_DESC = "isHiddenFromCustomers desc"
NOTES = "notes"
NOTES_DESC = "notes desc"
PRE_BUFFER = "preBuffer"
PRE_BUFFER_DESC = "preBuffer desc"
POST_BUFFER = "postBuffer"
POST_BUFFER_DESC = "postBuffer desc"
SCHEDULING_POLICY = "schedulingPolicy"
SCHEDULING_POLICY_DESC = "schedulingPolicy desc"
STAFF_MEMBER_IDS = "staffMemberIds"
STAFF_MEMBER_IDS_DESC = "staffMemberIds desc"
class Enum23(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
DISPLAY_NAME = "displayName"
DEFAULT_DURATION = "defaultDuration"
DEFAULT_LOCATION = "defaultLocation"
DEFAULT_PRICE = "defaultPrice"
DEFAULT_PRICE_TYPE = "defaultPriceType"
DEFAULT_REMINDERS = "defaultReminders"
DESCRIPTION = "description"
IS_HIDDEN_FROM_CUSTOMERS = "isHiddenFromCustomers"
NOTES = "notes"
PRE_BUFFER = "preBuffer"
POST_BUFFER = "postBuffer"
SCHEDULING_POLICY = "schedulingPolicy"
STAFF_MEMBER_IDS = "staffMemberIds"
class Enum24(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
DISPLAY_NAME = "displayName"
DEFAULT_DURATION = "defaultDuration"
DEFAULT_LOCATION = "defaultLocation"
DEFAULT_PRICE = "defaultPrice"
DEFAULT_PRICE_TYPE = "defaultPriceType"
DEFAULT_REMINDERS = "defaultReminders"
DESCRIPTION = "description"
IS_HIDDEN_FROM_CUSTOMERS = "isHiddenFromCustomers"
NOTES = "notes"
PRE_BUFFER = "preBuffer"
POST_BUFFER = "postBuffer"
SCHEDULING_POLICY = "schedulingPolicy"
STAFF_MEMBER_IDS = "staffMemberIds"
class Enum25(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
EMAIL_ADDRESS = "emailAddress"
EMAIL_ADDRESS_DESC = "emailAddress desc"
AVAILABILITY_IS_AFFECTED_BY_PERSONAL_CALENDAR = "availabilityIsAffectedByPersonalCalendar"
AVAILABILITY_IS_AFFECTED_BY_PERSONAL_CALENDAR_DESC = "availabilityIsAffectedByPersonalCalendar desc"
COLOR_INDEX = "colorIndex"
COLOR_INDEX_DESC = "colorIndex desc"
ROLE = "role"
ROLE_DESC = "role desc"
USE_BUSINESS_HOURS = "useBusinessHours"
USE_BUSINESS_HOURS_DESC = "useBusinessHours desc"
WORKING_HOURS = "workingHours"
WORKING_HOURS_DESC = "workingHours desc"
class Enum26(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
DISPLAY_NAME = "displayName"
EMAIL_ADDRESS = "emailAddress"
AVAILABILITY_IS_AFFECTED_BY_PERSONAL_CALENDAR = "availabilityIsAffectedByPersonalCalendar"
COLOR_INDEX = "colorIndex"
ROLE = "role"
USE_BUSINESS_HOURS = "useBusinessHours"
WORKING_HOURS = "workingHours"
class Enum27(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
DISPLAY_NAME = "displayName"
EMAIL_ADDRESS = "emailAddress"
AVAILABILITY_IS_AFFECTED_BY_PERSONAL_CALENDAR = "availabilityIsAffectedByPersonalCalendar"
COLOR_INDEX = "colorIndex"
ROLE = "role"
USE_BUSINESS_HOURS = "useBusinessHours"
WORKING_HOURS = "workingHours"
class Enum28(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
SYMBOL = "symbol"
SYMBOL_DESC = "symbol desc"
class Enum29(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
SYMBOL = "symbol"
class Enum30(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
SYMBOL = "symbol"
class Get1ItemsItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
DISPLAY_NAME = "displayName"
BUSINESS_TYPE = "businessType"
ADDRESS = "address"
PHONE = "phone"
EMAIL = "email"
WEB_SITE_URL = "webSiteUrl"
DEFAULT_CURRENCY_ISO = "defaultCurrencyIso"
BUSINESS_HOURS = "businessHours"
SCHEDULING_POLICY = "schedulingPolicy"
IS_PUBLISHED = "isPublished"
PUBLIC_URL = "publicUrl"
APPOINTMENTS = "appointments"
CALENDAR_VIEW = "calendarView"
CUSTOMERS = "customers"
SERVICES = "services"
STAFF_MEMBERS = "staffMembers"
class Get2ItemsItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
APPOINTMENTS = "appointments"
CALENDAR_VIEW = "calendarView"
CUSTOMERS = "customers"
SERVICES = "services"
STAFF_MEMBERS = "staffMembers"
class Get5ItemsItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
BUSINESS_TYPE = "businessType"
BUSINESS_TYPE_DESC = "businessType desc"
ADDRESS = "address"
ADDRESS_DESC = "address desc"
PHONE = "phone"
PHONE_DESC = "phone desc"
EMAIL = "email"
EMAIL_DESC = "email desc"
WEB_SITE_URL = "webSiteUrl"
WEB_SITE_URL_DESC = "webSiteUrl desc"
DEFAULT_CURRENCY_ISO = "defaultCurrencyIso"
DEFAULT_CURRENCY_ISO_DESC = "defaultCurrencyIso desc"
BUSINESS_HOURS = "businessHours"
BUSINESS_HOURS_DESC = "businessHours desc"
SCHEDULING_POLICY = "schedulingPolicy"
SCHEDULING_POLICY_DESC = "schedulingPolicy desc"
IS_PUBLISHED = "isPublished"
IS_PUBLISHED_DESC = "isPublished desc"
PUBLIC_URL = "publicUrl"
PUBLIC_URL_DESC = "publicUrl desc"
class Get6ItemsItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
DISPLAY_NAME = "displayName"
BUSINESS_TYPE = "businessType"
ADDRESS = "address"
PHONE = "phone"
EMAIL = "email"
WEB_SITE_URL = "webSiteUrl"
DEFAULT_CURRENCY_ISO = "defaultCurrencyIso"
BUSINESS_HOURS = "businessHours"
SCHEDULING_POLICY = "schedulingPolicy"
IS_PUBLISHED = "isPublished"
PUBLIC_URL = "publicUrl"
APPOINTMENTS = "appointments"
CALENDAR_VIEW = "calendarView"
CUSTOMERS = "customers"
SERVICES = "services"
STAFF_MEMBERS = "staffMembers"
class Get7ItemsItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
APPOINTMENTS = "appointments"
CALENDAR_VIEW = "calendarView"
CUSTOMERS = "customers"
SERVICES = "services"
STAFF_MEMBERS = "staffMembers"
class MicrosoftGraphBookingInvoiceStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
DRAFT = "draft"
REVIEWING = "reviewing"
OPEN = "open"
CANCELED = "canceled"
PAID = "paid"
CORRECTIVE = "corrective"
class MicrosoftGraphBookingPriceType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
UNDEFINED = "undefined"
FIXED_PRICE = "fixedPrice"
STARTING_AT = "startingAt"
HOURLY = "hourly"
FREE = "free"
PRICE_VARIES = "priceVaries"
CALL_US = "callUs"
NOT_SET = "notSet"
class MicrosoftGraphBookingReminderRecipients(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ALL_ATTENDEES = "allAttendees"
STAFF = "staff"
CUSTOMER = "customer"
class MicrosoftGraphBookingStaffRole(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
GUEST = "guest"
ADMINISTRATOR = "administrator"
VIEWER = "viewer"
EXTERNAL_GUEST = "externalGuest"
class MicrosoftGraphDayOfWeek(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
SUNDAY = "sunday"
MONDAY = "monday"
TUESDAY = "tuesday"
WEDNESDAY = "wednesday"
THURSDAY = "thursday"
FRIDAY = "friday"
SATURDAY = "saturday"
class MicrosoftGraphLocationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
DEFAULT = "default"
CONFERENCE_ROOM = "conferenceRoom"
HOME_ADDRESS = "homeAddress"
BUSINESS_ADDRESS = "businessAddress"
GEO_COORDINATES = "geoCoordinates"
STREET_ADDRESS = "streetAddress"
HOTEL = "hotel"
RESTAURANT = "restaurant"
LOCAL_BUSINESS = "localBusiness"
POSTAL_ADDRESS = "postalAddress"
class MicrosoftGraphLocationUniqueIdType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "unknown"
LOCATION_STORE = "locationStore"
DIRECTORY = "directory"
PRIVATE = "private"
BING = "bing"
class MicrosoftGraphPhysicalAddressType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
UNKNOWN = "unknown"
HOME = "home"
BUSINESS = "business"
OTHER = "other"
| [
"japhethobalak@gmail.com"
] | japhethobalak@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.