blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6672bc6d3850e044ea1f8fb79c6ff9cba031cfe5 | e68a40e90c782edae9d8f89b827038cdc69933c4 | /res/scripts/client/gui/scaleform/daapi/view/lobby/boosters/boosterspanelcomponent.py | 0caa7bfe82fda7414ba9a2de46bbce35ac7d8dd8 | [] | no_license | webiumsk/WOT-0.9.16 | 2486f8b632206b992232b59d1a50c770c137ad7d | 71813222818d33e73e414e66daa743bd7701492e | refs/heads/master | 2021-01-10T23:12:33.539240 | 2016-10-11T21:00:57 | 2016-10-11T21:00:57 | 70,634,922 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 5,857 | py | # 2016.10.11 22:11:02 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/boosters/BoostersPanelComponent.py
from gui import game_control
from gui.Scaleform.genConsts.TOOLTIPS_CONSTANTS import TOOLTIPS_CONSTANTS
from gui.ClientUpdateManager import g_clientUpdateManager
from gui.goodies.Booster import MAX_ACTIVE_BOOSTERS_COUNT
from gui.goodies import g_goodiesCache
from gui.Scaleform.locale.TOOLTIPS import TOOLTIPS
from gui.Scaleform.genConsts.BOOSTER_CONSTANTS import BOOSTER_CONSTANTS
from gui.Scaleform.locale.RES_ICONS import RES_ICONS
from gui.Scaleform.daapi.view.meta.SlotsPanelMeta import SlotsPanelMeta
from gui.shared.ItemsCache import g_itemsCache
from gui.shared.utils.requesters.ItemsRequester import REQ_CRITERIA
from gui.shared.utils.functions import makeTooltip
_GUI_SLOTS_PROPS = {'slotsCount': MAX_ACTIVE_BOOSTERS_COUNT,
'slotWidth': 50,
'paddings': 64,
'groupPadding': 18,
'ySlotPosition': 5,
'offsetSlot': 13,
'useOnlyLeftBtn': True}
ADD_BOOSTER_ID = 'add'
_ADD_AVAILABLE_BOOSTER_ID = 'addAvailable'
_EMPTY_BOOSTER_ID = 'empty'
class BoostersPanelComponent(SlotsPanelMeta):
def __init__(self):
super(BoostersPanelComponent, self).__init__()
self._items = g_itemsCache.items
self._isPanelInactive = True
self._wasPopulated = False
self._slotsMap = {}
def setSettings(self, isPanelInactive = True):
self._isPanelInactive = isPanelInactive
if self._wasPopulated:
self._buildList()
def getBoosterSlotID(self, idx):
return self._slotsMap.get(int(idx), None)
def getSlotTooltipBody(self, slotIdx):
boosterID = self._slotsMap.get(int(slotIdx), None)
tooltip = ''
if boosterID in (ADD_BOOSTER_ID, _ADD_AVAILABLE_BOOSTER_ID):
if not self._isPanelInactive:
body = TOOLTIPS.BOOSTERSPANEL_OPENBOOSTERSWINDOW_BODY
tooltip = makeTooltip(None, body)
else:
tooltip = TOOLTIPS_CONSTANTS.BOOSTERS_BOOSTER_INFO
return tooltip
def _populate(self):
super(BoostersPanelComponent, self)._populate()
g_clientUpdateManager.addCallbacks({'goodies': self.__onUpdateGoodies})
game_control.g_instance.boosters.onBoosterChangeNotify += self.__onUpdateGoodies
self._buildList()
self._wasPopulated = True
def _dispose(self):
self._items = None
self._isPanelInactive = None
self._wasPopulated = None
self._slotsMap = None
game_control.g_instance.boosters.onBoosterChangeNotify -= self.__onUpdateGoodies
g_clientUpdateManager.removeObjectCallbacks(self)
super(BoostersPanelComponent, self)._dispose()
return
def __getAvailableBoosters(self):
criteria = REQ_CRITERIA.BOOSTER.IS_READY_TO_ACTIVATE
return g_goodiesCache.getBoosters(criteria=criteria)
def _buildList(self):
result = []
activeBoosters = g_goodiesCache.getBoosters(criteria=REQ_CRITERIA.BOOSTER.ACTIVE)
activeBoostersList = sorted(activeBoosters.values(), key=lambda b: b.getUsageLeftTime(), reverse=True)
availableBoostersCount = len(self.__getAvailableBoosters())
activeBoostersCount = min(len(activeBoostersList), MAX_ACTIVE_BOOSTERS_COUNT)
freeSlotsCount = MAX_ACTIVE_BOOSTERS_COUNT - min(activeBoostersCount, MAX_ACTIVE_BOOSTERS_COUNT)
addBoostersSlotsCount = min(freeSlotsCount, availableBoostersCount)
self._slotsMap = {}
for idx in range(0, activeBoostersCount):
booster = activeBoostersList[idx]
self._slotsMap[idx] = booster.boosterID
result.append(self.__makeBoosterVO(idx, booster))
icon = ''
if not self._isPanelInactive:
icon = RES_ICONS.MAPS_ICONS_ARTEFACT_EMPTYORDER
addAndActiveBoostersCount = activeBoostersCount + addBoostersSlotsCount
for idx in range(activeBoostersCount, MAX_ACTIVE_BOOSTERS_COUNT):
self._slotsMap[idx], slotLinkage = self.getEmptySlotParams(idx, addAndActiveBoostersCount)
result.append(self.__makeEmptyBoosterVO(idx, slotLinkage, icon))
self.as_setPanelPropsS(_GUI_SLOTS_PROPS)
self.as_setSlotsS(result)
def getEmptySlotParams(self, idx, addAndActiveBoostersCount):
if idx < addAndActiveBoostersCount and not self._isPanelInactive:
slotLinkage = BOOSTER_CONSTANTS.SLOT_ADD_UI
emptyBoosterID = _ADD_AVAILABLE_BOOSTER_ID
else:
slotLinkage = BOOSTER_CONSTANTS.SLOT_UI
emptyBoosterID = ADD_BOOSTER_ID
return (emptyBoosterID, slotLinkage)
def __makeBoosterVO(self, idx, booster):
return {'boosterId': booster.boosterID,
'id': str(idx),
'icon': booster.icon,
'inCooldown': booster.inCooldown,
'cooldownPercent': booster.getCooldownAsPercent(),
'leftTime': booster.getUsageLeftTime(),
'leftTimeText': booster.getShortLeftTimeStr(),
'showLeftTime': True,
'isDischarging': True,
'isInactive': self._isPanelInactive,
'isEmpty': False,
'qualityIconSrc': booster.getQualityIcon(),
'slotLinkage': BOOSTER_CONSTANTS.SLOT_UI}
def __makeEmptyBoosterVO(self, idx, slotLinkage, icon):
return {'id': str(idx),
'isInactive': self._isPanelInactive,
'isEmpty': True,
'icon': icon,
'slotLinkage': slotLinkage,
'showLeftTime': False}
def __onUpdateGoodies(self, *args):
self._buildList()
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\lobby\boosters\boosterspanelcomponent.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.10.11 22:11:02 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
0667c0b03b59413ed9e1ceebc3ff08683d9d142b | b72c37e3ccda507b231649cddd5c7845c6c34ba1 | /PythonBasic/Day12/HomeWork_birth.py | e78f35a7fd539268e61aa6848d9f0f6fbea30bde | [] | no_license | ljrdemail/AID1810 | 51c61c255b5c5efc1dc642b46691a614daedd85e | b417bd831bc1550ab953ce7ca23f54e34b8b2692 | refs/heads/master | 2020-04-24T09:45:14.781612 | 2019-02-21T11:26:49 | 2019-02-21T11:26:49 | 171,866,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | import datetime
def liveday():
dob = input("请输入你出生年月日,以-分隔:")
year = int((dob.split("-")[0]))
month = int(dob.split("-")[1])
day = int(dob.split("-")[2])
d1 = datetime.datetime(year, month, day)
d2= datetime.datetime.now()
w={0:"星期一",1:"星期二",2:"星期三",3:"星期四",4:"星期五",5:"星期六",6:"星期日"}
print("你活了",(d2-d1).days,"天")
print("你出生那天是:",w[d1.weekday()])
liveday()
| [
"root"
] | root |
c98c4a5b216fe8c82e6824648216dbc541cb869f | 165305e7d92075018b57f6288b84b4d5129be412 | /knit-graph.py | 9cae11d27be278f431448c7c8a588c5f698700a0 | [] | no_license | kshedstrom/plug-ins | 7c6e1358926f1d0cc062dc0d19e21f32d99b202c | 65aeb7c8b2c7f7c0a27fbf813863f10e6e2c00c9 | refs/heads/master | 2022-02-10T12:10:03.156258 | 2022-02-02T17:54:14 | 2022-02-02T17:54:14 | 7,652,530 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,811 | py | #!/usr/bin/env python
#
# Take an image and turn it into a knitting pattern: expand it by
# some (you pick) number and place black lines around each box.
# Chunks stolen from Akkana Peck's arclayer script.
import math
from gimpfu import *
from array import array
def python_knit_graph(timg, tdrawable, x_scale=14, y_scale=10):
w = tdrawable.width
h = tdrawable.height
bpp = tdrawable.bpp
width = w*x_scale + 3
height = h*y_scale + 3
img = gimp.Image(width, height, RGB)
img.disable_undo()
layer= gimp.Layer(img, "Graph", width, height, RGB_IMAGE,
100, NORMAL_MODE)
img.add_layer(layer, 0)
layers = img.layers
# for l in layers:
# print "Layer: Name=\"%s\" Width=%d Height=%d X=%d Y=%d\n"%(l.name, l.width, l.height, l.offsets[0], l.offsets[1])
# initialize the regions and get their contents into arrays:
srcRgn = tdrawable.get_pixel_rgn(0, 0, w, h, False, False)
src_pixels = array("B", srcRgn[0:w, 0:h])
dstRgn = layer.get_pixel_rgn(0, 0, width, height, True, True)
p_size = len(dstRgn[0,0])
# fg_colour = gimp.get_foreground()
# Just let it fill with black
dest_pixels = array("B", "\x00" * (width * height * p_size))
for y in range(0, h):
for x in range(0, w):
src_pos = (x + w * y) * p_size
newval = src_pixels[src_pos: src_pos + p_size]
x1 = x_scale*x
x2 = x1 + x_scale
y1 = y_scale*y
y2 = y1 + y_scale
if (x%10 == 9):
x2 = x2 - 1
if (y%10 == 9):
y2 = y2 - 1
if (x%10 == 0):
x1 = x_scale*x + 2
else:
x1 = x_scale*x + 1
if (y%10 == 0):
y1 = y_scale*y + 2
else:
y1 = y_scale*y + 1
for yy in range(y1, y2):
for xx in range(x1, x2):
dest_pos = (xx + width * yy) * p_size
dest_pixels[dest_pos: dest_pos + p_size] = newval
dstRgn[0:width, 0:height] = dest_pixels.tostring()
layer.flush()
layer.merge_shadow(True)
layer.update(0, 0, width, height)
img.enable_undo()
gimp.Display(img)
gimp.displays_flush()
# drawable = pdb.gimp_image_get_active_layer(img)
# pdb.gimp_file_save(img, drawable, file_name, file_name)
register(
"python_knit_graph",
"Stretch the specified image for use as a knitting pattern",
"Stretch the specified image for use as a knitting pattern",
"Kate Hedstrom",
"Kate Hedstrom",
"2013",
"<Image>/Image/Knit_graph...",
"*",
[
(PF_INT, "x_scale", "X scale", 14),
(PF_INT, "y_scale", "Y scale", 10)
],
[],
python_knit_graph)
main()
| [
"kshedstrom@alaska.edu"
] | kshedstrom@alaska.edu |
5603b4614f0b5b7454891b5011ba3cb49b7f827e | c769dc9a7a4338f2eba6fe44d6de3eab968ff51f | /OA/OA/settings.py | 313328eb96393c078887de6b506a7a873aef643b | [] | no_license | simonzhao88/practice | 267f4836c3d4d19e8db973f13d561024613bb88c | d29db772793a8d01301bbcb457595d8bb9ea33bc | refs/heads/master | 2020-03-17T17:41:26.650235 | 2018-07-07T09:50:12 | 2018-07-07T09:50:12 | 133,798,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,734 | py | """
Django settings for OA project.
Generated by 'django-admin startproject' using Django 2.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i)!2=k-$7@(luo)7bgk)_z)ldkcm^3z@ndccz@i)08j@i*1t5^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'hrs.apps.HrsConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'OA.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'OA.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'oa',
'HOST': '127.0.0.1',
'PORT': 3306,
'USER': 'root',
'PASSWORD': 'root'
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
# STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
STATIC_URL = '/static/'
# 配置将日志输出到控制台 日志级别为DEBUG(最详细的日志)
# DEBUG < INFO < WARNING < ERROR < CRITICAL
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'DEBUG'),
},
},
} | [
"642252108@qq.com"
] | 642252108@qq.com |
a9210d6d25b25c52a6e6e17e87b2ec6fa22734db | 986fc4298a3d728691951f77470beb5e92505425 | /icepack/mesh/stitch.py | a939ec8137ff08d6c2058f78e15ca17faafaba51 | [] | no_license | danshapero/icepack-py | 4eeee730056248cb1a33112bd07fea02b0e82194 | 24a13cba05dd5597fbec64f8bd2eb9580cc69fe7 | refs/heads/master | 2021-07-23T11:50:27.779303 | 2017-10-31T22:44:35 | 2017-10-31T22:44:35 | 105,718,249 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,654 | py |
import copy
import itertools
from numpy import ones, zeros, sqrt
from matplotlib.path import *
# --------------
def dist(x1, x2):
return np.sqrt((x1[0] - x2[0])**2 + (x1[1] - x2[1])**2)
# -----------------------------------
class IsolatedSegmentError(Exception):
pass
# -----------------------------------
def next_segment(Xs, i, tol = 1.0e4):
"""
Return the next segment in the input geometry
Args:
Xs: a list of coordinates of the lines of the input geometry
i: an index of one line of the input geometry
tol: criterion for whether another segment's endpoint is close enough
Returns:
j: the index of the segment after `i` in the input geometry, i.e. the
segment whose start- or end-point is closest to the end-point of
`i`. This could be `i` itself if it describes a closed loop. If the
successor segment's order needs to be reversed, returns `-j`.
Raises:
IsolatedSegmentError on a segment with no successor within the given
tolerance.
"""
num_segments = len(Xs)
Xi = Xs[i]
if dist(Xi[0], Xi[-1]) < tol:
return i
for j in range(num_segments):
if j != i:
Xj = Xs[j]
if dist(Xi[-1], Xj[0]) < tol:
return j
if dist(Xi[-1], Xj[-1]) < tol:
return -j
raise IsolatedSegmentError()
# --------------------------------------
def segment_successors(Xs, tol = 1.0e4):
"""
Return a new geometry identical to the input but with orientations flipped
so that all segments lie end-to-end.
Args:
Xs: input geometry
tol: tolerance for segment proximity
Returns:
Ys: input geometry, possibly with some segments in reverse order
successors: successors[i] = the next segment after `i` in the PSLG
"""
num_segments = len(Xs)
Ys = copy.deepcopy(Xs)
segments = set(range(num_segments))
successors = list(range(num_segments))
while segments:
i0 = segments.pop()
i = i0
j = next_segment(Ys, i, tol)
while j != i0:
if j < 0:
j = -j
Ys[j].reverse()
segments.remove(j)
successors[i] = j
i = j
j = next_segment(Ys, i, tol)
successors[i] = i0
return Ys, successors
# --------------------------------
def lines_to_paths(Xs, successors):
"""
Return a list of closed matplotlib Path objects of the input geometry
"""
segments = set(range(len(Xs)))
Ps = []
while segments:
i0 = segments.pop()
i = i0
X = X[i]
j = successors[i]
while j != i0:
segments.remove(j)
X.extend(Xs[j])
i = j
j = successors[i]
p = Path(X, closed = True)
Ps.append(p)
return Ps
# ---------------------------
def find_point_inside_path(p):
"""
Return a point inside the path p.
Triangle needs to have a point contained in any holes in the mesh.
"""
x = (0.0, 0.0)
i, j = 0, len(p)/2
while not p.contains_point(x):
j += 1
x = (0.5 * p.vertices[i, 0] + p.vertices[j, 0],
0.5 * p.vertices[i, 1] + p.vertices[j, 1])
return x
# --------------------------------
def identify_holes(Xs, successors):
"""
Return a list of points
"""
Ys = []
Ps = lines_to_paths(Xs, successors)
for p, q in itertools.combinations(ps, 2):
if p.contains_path(q):
y = point_inside_path(q)
Ys.append(y)
return Ys
| [
"shapero.daniel@gmail.com"
] | shapero.daniel@gmail.com |
d577c4d03856c05f0a30f68c85439667e187a747 | 4c577d9ddf21d8aba5626343f91a4986266f01e2 | /eric6/.eric6/eric6plugins/vcsGit/GitFetchDialog.py | 6686d150f3e09781568a6197a7e8228c683b3132 | [] | no_license | metamarcdw/.dotfiles | 362199d415ebd7d09247ee0efbda03243aa22faa | 3df0c805225a8d4f2709565d7eda4e07a050c986 | refs/heads/master | 2020-12-30T15:29:25.769345 | 2017-12-22T05:44:01 | 2017-12-22T05:44:01 | 91,143,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,548 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2015 - 2017 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing a dialog to enter data for a Fetch operation.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QDialog, QDialogButtonBox
from .Ui_GitFetchDialog import Ui_GitFetchDialog
class GitFetchDialog(QDialog, Ui_GitFetchDialog):
"""
Class implementing a dialog to enter data for a Fetch operation.
"""
def __init__(self, vcs, repodir, parent=None):
"""
Constructor
@param vcs reference to the git object
@param repodir directory name of the local repository (string)
@param parent reference to the parent widget (QWidget)
"""
super(GitFetchDialog, self).__init__(parent)
self.setupUi(self)
self.__vcs = vcs
self.__repodir = repodir
self.__all = self.tr("<All>")
self.__custom = self.tr("<Custom>")
remoteUrlsList = self.__vcs.gitGetRemoteUrlsList(self.__repodir)
self.__repos = {name: url for name, url in remoteUrlsList}
self.__okButton = self.buttonBox.button(QDialogButtonBox.Ok)
self.remotesComboBox.addItems(sorted(self.__repos.keys()))
self.remotesComboBox.addItem(self.__all)
self.remotesComboBox.addItem(self.__custom)
index = self.remotesComboBox.findText("origin")
if index == -1:
index = 0
self.remotesComboBox.setCurrentIndex(index)
localBranches = self.__vcs.gitGetBranchesList(
self.__repodir, withMaster=True)
self.localBranchComboBox.addItems([""] + sorted(localBranches))
self.localBranchComboBox.setEnabled(False)
def __okButtonEnable(self):
"""
Private slot to set the enabled state of the OK button.
"""
self.__okButton.setEnabled(
self.remoteBranchesList.count() > 0 or
self.remotesComboBox.currentText() == self.__all
)
def __updateButtonEnable(self):
"""
Private slot to set the enabled state of the update button.
"""
remote = self.remotesComboBox.currentText()
enable = remote != self.__all
if remote == self.__custom:
enable = self.remoteEdit.text() != ""
self.updateButton.setEnabled(enable)
@pyqtSlot(str)
def on_remotesComboBox_currentTextChanged(self, txt):
"""
Private slot to handle changes of the selected repository.
@param txt current text of the combo box (string)
"""
self.remoteEdit.setReadOnly(txt != self.__custom)
self.remoteBranchesList.setEnabled(txt != self.__all)
self.remoteEdit.clear()
self.remoteBranchesList.clear()
self.__updateButtonEnable()
self.__okButtonEnable()
if txt not in [self.__all, self.__custom]:
remoteBranches = self.__vcs.gitGetRemoteBranchesList(
self.__repodir, txt)
self.remoteBranchesList.addItems(sorted(remoteBranches))
if txt in self.__repos:
self.remoteEdit.setText(self.__repos[txt])
@pyqtSlot(str)
def on_remoteEdit_textChanged(self, txt):
"""
Private slot to handle changes of the URL edit.
@param txt current text of the URL edit (string)
"""
self.__updateButtonEnable()
if self.remotesComboBox.currentText() == self.__custom and \
txt != "":
remoteBranches = self.__vcs.gitGetRemoteBranchesList(
self.__repodir, txt)
self.remoteBranchesList.clear()
self.remoteBranchesList.addItems(sorted(remoteBranches))
self.__okButtonEnable()
@pyqtSlot()
def on_remoteBranchesList_itemSelectionChanged(self):
"""
Private slot to handle a change of selected remote branches.
"""
singleSelection = len(self.remoteBranchesList.selectedItems()) == 1
self.localBranchComboBox.setEnabled(singleSelection)
if singleSelection:
txt = self.remoteBranchesList.selectedItems()[0].text()
else:
txt = ""
index = self.localBranchComboBox.findText(txt)
if index == -1:
self.localBranchComboBox.setEditText(txt)
else:
self.localBranchComboBox.setCurrentIndex(index)
@pyqtSlot()
def on_updateButton_clicked(self):
"""
Private slot to update the list of remote branches.
"""
remote = self.remotesComboBox.currentText()
if remote == self.__all:
# shouldn't happen
return
if remote == self.__custom:
remote = self.remoteEdit.text()
if remote == "":
# shouldn't happen either
return
remoteBranches = self.__vcs.gitGetRemoteBranchesList(
self.__repodir, remote)
self.remoteBranchesList.clear()
self.remoteBranchesList.addItems(sorted(remoteBranches))
self.__okButtonEnable()
def getData(self):
"""
Public method to get the entered data.
@return tuple of remote name, remote url (for custom remotes),
remote branches, local branch, a flag indicating to fetch from
all repositories, a flag indicating to remove obsolete tracking
references and a flag indicating to fetch tags as well
(string, string, list of strings, string, boolean, boolean,
boolean)
"""
remote = ""
url = ""
remoteBranches = []
allRepos = False
localBranch = ""
remoteRepo = self.remotesComboBox.currentText()
if remoteRepo == self.__all:
allRepos = True
else:
if remoteRepo == self.__custom:
url = self.remoteEdit.text()
else:
remote = remoteRepo
for itm in self.remoteBranchesList.selectedItems():
remoteBranches.append(itm.text())
if len(remoteBranches) == 1:
localBranch = self.localBranchComboBox.currentText()
return (remote, url, remoteBranches, localBranch, allRepos,
self.pruneCheckBox.isChecked(), self.tagsCheckBox.isChecked())
| [
"metamarcdw@gmail.com"
] | metamarcdw@gmail.com |
536ff8a0db556679da306bd709afddbc31ee7b11 | 03f9b8bdea312636afb4df3737b55cb0cc4b21ff | /EditDistance.py | 20fb3e9c2e621e3789092624f15fa5338bbd61f3 | [] | no_license | ellinx/LC-python | f29dd17bbe15407ba0d06ad68386efdc9a343b56 | 9190d3d178f1733aa226973757ee7e045b7bab00 | refs/heads/master | 2021-06-01T15:21:24.379811 | 2020-10-29T04:37:07 | 2020-10-29T04:37:07 | 132,704,788 | 1 | 1 | null | 2019-05-15T03:26:11 | 2018-05-09T05:13:26 | Python | UTF-8 | Python | false | false | 1,405 | py | """
Given two words word1 and word2, find the minimum number of operations required to convert word1 to word2.
You have the following 3 operations permitted on a word:
1. Insert a character
2. Delete a character
3. Replace a character
Example 1:
Input: word1 = "horse", word2 = "ros"
Output: 3
Explanation:
horse -> rorse (replace 'h' with 'r')
rorse -> rose (remove 'r')
rose -> ros (remove 'e')
Example 2:
Input: word1 = "intention", word2 = "execution"
Output: 5
Explanation:
intention -> inention (remove 't')
inention -> enention (replace 'i' with 'e')
enention -> exention (replace 'n' with 'x')
exention -> exection (replace 'n' with 'c')
exection -> execution (insert 'u')
"""
class Solution:
def minDistance(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
m, n = len(word1), len(word2)
dp = [[0]*(n+1) for _ in range(m+1)]
# first row
for i in range(n+1):
dp[0][i] = i
# first col
for i in range(m+1):
dp[i][0] = i
for i in range(1, m+1):
for j in range(1, n+1):
dp[i][j] = min(dp[i-1][j], dp[i][j-1])+1
if word1[i-1]==word2[j-1]:
dp[i][j] = min(dp[i][j], dp[i-1][j-1])
else:
dp[i][j] = min(dp[i][j], dp[i-1][j-1]+1)
return dp[m][n]
| [
"ellin.xll@gmail.com"
] | ellin.xll@gmail.com |
c14390c5349a7bb564d59f5828cdc6bbf09d9e06 | a29c7e363026111276e94b96d39b1b4ab48dbca8 | /sdk/test/test_overdraft_overdraft_fee_charge_cap.py | 2424dab7913426542a731b72bb7d31c75654266f | [
"MIT"
] | permissive | matteo-kalogirou/yapily-sdk-python | a56bf6f9b1b308efda38f081f6237ebd8c8f8ad5 | f10d2d14383f551eeb59aa893d328ffa5080da22 | refs/heads/master | 2022-12-16T22:24:18.026765 | 2020-09-18T13:59:26 | 2020-09-18T13:59:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,212 | py | # coding: utf-8
"""
Yapily API
To access endpoints that require authentication, use your application key and secret created in the Dashboard (https://dashboard.yapily.com) # noqa: E501
The version of the OpenAPI document: 0.0.242
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import yapily
from yapily.models.overdraft_overdraft_fee_charge_cap import OverdraftOverdraftFeeChargeCap # noqa: E501
from yapily.rest import ApiException
class TestOverdraftOverdraftFeeChargeCap(unittest.TestCase):
"""OverdraftOverdraftFeeChargeCap unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test OverdraftOverdraftFeeChargeCap
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = yapily.models.overdraft_overdraft_fee_charge_cap.OverdraftOverdraftFeeChargeCap() # noqa: E501
if include_optional :
return OverdraftOverdraftFeeChargeCap(
capping_period = 'Day',
fee_cap_amount = '0',
fee_cap_occurrence = 1.337,
fee_type = [
'ArrangedOverdraft'
],
min_max_type = 'Minimum',
notes = [
'0'
],
other_fee_type = [
yapily.models.overdraft_other_fee_type.OverdraftOtherFeeType(
code = '0',
description = '0',
name = '0', )
],
overdraft_control_indicator = True
)
else :
return OverdraftOverdraftFeeChargeCap(
)
def testOverdraftOverdraftFeeChargeCap(self):
"""Test OverdraftOverdraftFeeChargeCap"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"systems@yapily.com"
] | systems@yapily.com |
9e892d45dda82d76e0bb7c174261cd156399a9d1 | 59934e837a4e425bba4ce9bcb46940a00b68691c | /user_auth/views.py | f240bb27c98f015c23120e1c06e384e664960076 | [] | no_license | cjredmond/store_app | 309e8956785a7720b0578cf512c33cab7d2d03a9 | d285ddfcc2e2427baa1c30fc9504c4bbf0f387f6 | refs/heads/master | 2021-01-12T07:31:56.664103 | 2017-01-09T19:42:48 | 2017-01-09T19:42:48 | 76,973,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,790 | py | from django.shortcuts import render
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from django.views.generic import TemplateView, ListView, DetailView
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse, reverse_lazy
from user_auth.models import Cart, Profile
from shipping.models import Shipment, OrderProduct
from products.models import CartProduct
class IndexView(TemplateView):
template_name = "index.html"
class UserCreateView(CreateView):
model = User
form_class = UserCreationForm
def get_success_url(self):
return reverse('login')
class CartDetailView(DetailView):
model = Cart
class CartUpdateView(UpdateView):
fields = []
model = Cart
success_url = '/'
def form_valid(self, form, **kwargs):
instance = form.save(commit=False)
target = Cart.objects.get(id=self.kwargs['pk'])
new = Shipment.objects.create(user=self.request.user)
items = CartProduct.objects.filter(cart=target)
for product in items:
OrderProduct.objects.create(name=product.name, price=product.price, description=product.description, shipment=new, copy_product=Product.models.get(name=product.name))
new.save()
items.delete()
return super().form_valid(form)
class ProfileDetailView(DetailView):
model = Profile
class ProfileUpdateView(UpdateView):
model = Profile
fields = ('address_num', 'address_street','address_city', 'address_state')
def get_success_url(self):
return reverse('profile_detail_view', args=str(self.request.user.profile.id))
def form_valid(self,form):
instance = form.save(commit=False)
return super().form_valid(form)
| [
"connor.redmond@gmail.com"
] | connor.redmond@gmail.com |
9f8e6550d80fe0672b5a2dc3e1f8e9427e24ba58 | 5df8b0f5bda2603cf5437c2bcf1e30b326ea791e | /geektrust/MakeSpace/Tests/TestBufferTime.py | 329061397f6a11f3f3cdff0af3e8272ae4a269ab | [] | no_license | RishabhKatiyar/CorePython | 4e9133ab3949815290157818b9be8ab943f9b802 | d4e80e26b4c439fa269351c674466a2918eaaf77 | refs/heads/main | 2023-07-19T14:03:38.662270 | 2021-09-05T09:09:51 | 2021-09-05T09:09:51 | 369,292,272 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | import unittest
from src.Models.BufferTime import BufferTime
class TestQuery(unittest.TestCase):
def test_buffer_time(self):
bufferTime = BufferTime()
bufferTime.StartTime = "12:00"
bufferTime.EndTime = "13:00"
self.assertEqual(str(bufferTime), "12:00-13:00")
if __name__ == '__main__':
unittest.main() | [
"rishabh.katiyar@outlook.com"
] | rishabh.katiyar@outlook.com |
0243cb82e0007eaf0d6e9fbaf1c0b266247784ad | 09f0a01272042b6de7cb7af6d40f10dd6e10a574 | /compounds/script_new/prescription_13.py | bd9f51f91b260b8dbbc4ab23498987b051b8e19b | [] | no_license | jianping-grp/yatcm-1-02 | 4c010788989562365f78745a619fc66f5ef7554e | d03d599cc45fd757b19e11856bc203ee1c39fdf7 | refs/heads/master | 2021-09-06T11:39:22.829983 | 2018-02-06T05:28:54 | 2018-02-06T05:28:54 | 115,930,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,148 | py | import os
import xlrd
import logging
os.environ.setdefault("DJANGO_SETTINGS_MODULE", 'yatcm.settings')
import django
django.setup()
from compounds.models import *
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
primary_file = '/home/jianping/django_test/yatcm/compounds/data/prescription_primary_complete_last.xlsx'
vice_file = '/home/jianping/django_test/yatcm/compounds/data/vice_complete.xlsx'
logger = logging.getLogger('tax_logger')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('/home/jianping/django_test/logs/prescription.txt')
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(message)s")
fh.setFormatter(formatter)
logger.addHandler(fh)
def primary_upload(row):
chinese_name = row[0].strip()
english_name = row[1].strip()
pinyin = row[2].strip()
zucheng = row[3].strip()
herb_list = row[3].strip().split()
yongfa = row[4].strip()
fangjie = row[5].strip()
chinese_gongyong = row[6].strip()
english_gongyong = row[7].strip()
chinese_xiandai = row[8].strip()
english_xiandai = row[9].strip()
try:
prescription, created = Prescription.objects.get_or_create(
chinese_name=chinese_name,
english_name=english_name,
pinyin_name=pinyin,
zucheng=zucheng,
yongfa=yongfa,
fangjie=fangjie,
chinese_indiction=chinese_gongyong,
english_indiction=english_gongyong,
chinese_modern_application=chinese_xiandai,
english_modern_application=english_xiandai
)
except Prescription.DoesNotExist:
logger.warning("{} does not exist!".format(unicode(chinese_name)))
except Prescription.MultipleObjectsReturned:
logger.warning("{} return more than one objects".format(unicode(chinese_name)))
for herb_name in herb_list:
try:
herbs = Herb.objects.filter(Chinese_name=herb_name)
for herb in herbs:
prescription.herbs.add(herb)
prescription.save()
except Herb.DoesNotExist:
logger.info("{} does not exist".format(herb_name))
def vice_upload(row):
main_prescription_name = row[0].strip()
chinese_name = row[1].strip()
pinyin_name = row[2].strip()
zucheng = row[3].strip()
herb_list = row[3].strip().split()
yongfa = row[4].strip()
try:
prescription, created = Prescription.objects.get_or_create(
chinese_name=chinese_name,
pinyin_name=pinyin_name,
zucheng=zucheng,
yongfa=yongfa
)
try:
main_prescription = Prescription.objects.get(chinese_name=main_prescription_name)
prescription.main_prescription = main_prescription
prescription.save()
except Prescription.DoesNotExist:
logger.warning("%s does not exist!" % main_prescription_name)
except Prescription.MultipleObjectsReturned:
logger.warning("%s return more than one objects" % main_prescription_name)
for herb_name in herb_list:
try:
herbs = Herb.objects.filter(Chinese_name=herb_name)
for herb in herbs:
prescription.herbs.add(herb)
prescription.save()
except Herb.DoesNotExist:
logger.info("{} does not exist".format(herb_name))
except Prescription.DoesNotExist:
logger.warning("%s does not exist".format(chinese_name))
except Prescription.MultipleObjectsReturned:
logger.warning("{} return more than one objects".format(chinese_name))
if __name__ == '__main__':
primary_table = xlrd.open_workbook(primary_file).sheet_by_index(0)
for row_number in range(1, primary_table.nrows):
print row_number
row = primary_table.row_values(row_number)
primary_upload(row)
# vice_upload(row)
vice_table = xlrd.open_workbook(vice_file).sheet_by_index(0)
for row_number in range(1, vice_table.nrows):
print row_number
row = vice_table.row_values(row_number)
vice_upload(row)
| [
"libaiqing11@163.com"
] | libaiqing11@163.com |
d3367cde2f6849fd46b362f07fe9839e088bf261 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/bps_cs22955-0024/sdB_BPS_CS22955-0024_coadd.py | 413af22bb38328ee2ff50f70e492e505c5504036 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[305.959417,-25.141333], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_BPS_CS22955-0024/sdB_BPS_CS22955-0024_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_BPS_CS22955-0024/sdB_BPS_CS22955-0024_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
a7a762a61b13efb880d4bd6c329f5fe38e10421f | da8730b3977d0b1e59e0b80d88cc871d92cd2700 | /nlp_txt/tfidf_test.py | 567643af0c55ace0ff786e4a687a64e2730c14bd | [] | no_license | legend1412/PythonDemo | 44b62f82a8826b5a50cf0a6506e6082d89d1ab7b | 3e6e4624801b9b9c272d0891f5675ec9466b4205 | refs/heads/master | 2020-12-28T01:27:39.942087 | 2020-10-12T02:33:29 | 2020-10-12T02:33:29 | 238,136,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,252 | py | import os
import math
file_path = '../data/allfiles'
# 加载停用词表
stop_set = set()
with open('stop_word.txt', 'r', encoding='utf-8') as f:
for word in f.readlines():
stop_set.add(word.strip())
doc_words = dict()
doc_num = 0
for filename in os.listdir(file_path): # 当前目录下的所有文章的名字
# print(filename)
with open(file_path + '/' + filename, 'r', encoding='utf-8') as f:
# print(f.read())
word_freq = dict()
sum_cnt = 0 # 统计占比用
max_tf = 0 # 使用最大词频的单词处理
for line in f.readlines():
words = line.strip().split(' ')
for word in words:
if len(word.strip()) < 1 or word in stop_set:
continue
if word_freq.get(word, -1) == -1:
word_freq[word] = 0
word_freq[word] += 1
sum_cnt += 1
if word_freq[word] > max_tf:
max_tf = word_freq[word]
# print(word_freq)
# 将词频处理成占比形式
for word in word_freq.keys():
# word_freq[word] /= sum_cnt
word_freq[word] /= max_tf
# print(word_freq)
doc_words[filename] = word_freq
doc_num += 1
# print(doc_words)
# 统计每个词的doc_frq (df)
doc_freq = dict()
for doc in doc_words.keys(): # 文本名字
for word in doc_words[doc].keys():
if doc_freq.get(word, -1) == -1:
doc_freq[word] = 0
doc_freq[word] += 1
# print(doc_num)
# print(doc_freq)
# 套idf公式
for word in doc_freq.keys():
doc_freq[word] = math.log(doc_num / float(doc_freq[word] + 1), 10)
# print(doc_freq)
# 取前10和后10
# print(sorted(doc_freq.items(), key=lambda x: x[1], reverse=True)[:10])
# print(sorted(doc_freq.items(), key=lambda x: x[1], reverse=False)[:10])
# 套公式tf*idf
for doc in doc_words.keys():
for word in doc_words[doc].keys():
doc_words[doc][word] *= doc_freq[word]
# 某一篇文章的前10和后10
# print(sorted(doc_words['3business.seg.cln.txt'].items(), key=lambda x: x[1], reverse=True)[:10])
# print(sorted(doc_words['3business.seg.cln.txt'].items(), key=lambda x: x[1], reverse=False)[:10])
| [
"zhaojianhao_1984@163.com"
] | zhaojianhao_1984@163.com |
e4244f1657cfe342166a84a3c031654b728f69f5 | 91a2ecfaf5dc6c917ec2fda31f56291103f68ceb | /tests/post_process/test_ctc_greedy_decoder.py | bc51da0de6d2dc6ebf4c6a36ba101ef8d5808b7c | [
"BSD-3-Clause"
] | permissive | MyrtleSoftware/myrtlespeech | 635d1d16d1bd60fb07a4d30edbf9acb61786c13f | 8522048fd37744ffa06827a0cbd202b839a15453 | refs/heads/master | 2021-07-16T14:55:00.479967 | 2020-03-20T14:33:15 | 2020-03-20T14:33:15 | 192,501,300 | 12 | 1 | NOASSERTION | 2020-03-20T14:33:17 | 2019-06-18T08:44:33 | Python | UTF-8 | Python | false | false | 5,299 | py | from typing import Tuple
import hypothesis.strategies as st
import pytest
import torch
from hypothesis import assume
from hypothesis import given
from myrtlespeech.post_process.ctc_greedy_decoder import CTCGreedyDecoder
from tests.data.test_alphabet import random_alphabet
# Fixtures and Strategies -----------------------------------------------------
@st.composite
def ctc_greedy_decoder_input_outputs(
draw,
) -> st.SearchStrategy[
Tuple[
int, # blank index
Tuple[torch.Tensor, torch.Tensor], # x, lengths
Tuple[torch.Tensor, torch.Tensor], # output, output_lengths
]
]:
"""Returns a SearchStrategy for (blank_index, input, expected output)."""
alphabet = draw(random_alphabet())
assume(len(alphabet) > 1) # must be at least blank and one other symbol
blank_index = draw(st.integers(0, len(alphabet) - 1))
# generate random batch of sentence (indices) excluding blank index
batch_size = draw(st.integers(1, 8))
non_blanks = alphabet.get_indices(list(alphabet))
non_blanks.pop(blank_index)
sentences = [
draw(st.lists(st.sampled_from(non_blanks), min_size=1))
for _ in range(batch_size)
]
# for each sentence insert "blank" between duplicate symbols and replicate
# some symbols
blank_sentences = []
for sentence in sentences:
blank_sentence = []
prev = None
for symbol_idx in sentence:
if prev is not None and prev == symbol_idx:
n_rep = draw(st.integers(1, 5))
blank_sentence.extend([blank_index] * n_rep)
n_rep = draw(st.integers(1, 5))
blank_sentence.extend([symbol_idx] * n_rep)
prev = symbol_idx
blank_sentences.append(blank_sentence)
# compute inputs
longest = max([len(sentence) for sentence in blank_sentences])
input_sentences = [] # list of input 2D tensors (longest, len(alphabet))
input_lengths = [] # list of input lengths
for sentence in blank_sentences:
input_sentence = torch.empty((longest, len(alphabet))).normal_()
# ensure desired symbol has greatest value at each time step by summing
# up abs value of all symbols
for seq_idx, sym_idx in enumerate(sentence):
input_sentence[seq_idx, sym_idx] = (
0.5 + input_sentence[seq_idx, :].abs().sum()
)
input_sentences.append(input_sentence)
input_lengths.append(len(sentence))
x = torch.stack(input_sentences, dim=1)
supported_dtypes = [torch.int64]
if longest <= 2 ** 31 - 1:
supported_dtypes.append(torch.int32)
if longest <= 2 ** 15 - 1:
supported_dtypes.append(torch.int16)
if longest <= 2 ** 8 - 1:
supported_dtypes.append(torch.uint8)
if longest <= 2 ** 7 - 1:
supported_dtypes.append(torch.int8)
lengths_dtype = draw(st.sampled_from(supported_dtypes))
lengths = torch.tensor(input_lengths, dtype=lengths_dtype)
return blank_index, (x, lengths), sentences
# Tests -----------------------------------------------------------------------
@given(input_output=ctc_greedy_decoder_input_outputs())
def test_ctc_greedy_decoder_correct_decode(input_output) -> None:
blank_index, (x, lengths), exp_sentences = input_output
ctc_decoder = CTCGreedyDecoder(blank_index)
act_sentences = ctc_decoder(x, lengths)
assert act_sentences == exp_sentences
@given(
input_output=ctc_greedy_decoder_input_outputs(),
dtype=st.sampled_from([torch.half, torch.float, torch.double]),
)
def test_ctc_greedy_decoder_raises_value_error_for_float_dtypes(
input_output, dtype: torch.dtype
) -> None:
"""Ensures ValueError raised when lengths.dtype is float."""
blank_index, (x, lengths), exp_sentences = input_output
lengths = lengths.to(dtype)
ctc_decoder = CTCGreedyDecoder(blank_index)
with pytest.raises(ValueError):
ctc_decoder(x, lengths)
@given(x_batch_size=st.integers(1, 32), lengths_batch_size=st.integers(1, 32))
def test_ctc_greedy_decoder_raises_value_error_when_batch_x_lengths_differ(
x_batch_size: int, lengths_batch_size: int
) -> None:
"""Ensures ValueError raised when batch size of x and lengths differs."""
assume(x_batch_size != lengths_batch_size)
ctc_decoder = CTCGreedyDecoder(0)
# create input tensors, batch and alphabet size fixed to 10 and 5
x = torch.empty((10, x_batch_size, 5))
lengths = torch.empty(lengths_batch_size, dtype=torch.int16)
with pytest.raises(ValueError):
ctc_decoder(x, lengths)
@given(data=st.data(), input_output=ctc_greedy_decoder_input_outputs())
def test_ctc_greedy_decoder_raises_value_error_lengths_values_greater_seq_len(
data, input_output
) -> None:
"""Ensures ValueError when lengths entry is greater than seq len of x."""
blank_index, (x, lengths), exp_sentences = input_output
seq_len, batch, _ = x.size()
ctc_decoder = CTCGreedyDecoder(blank_index)
invalid_length = data.draw(st.integers(seq_len + 1, 3 * seq_len))
assume(invalid_length <= torch.iinfo(lengths.dtype).max)
invalid_idx = data.draw(st.integers(0, batch - 1))
lengths[invalid_idx] = invalid_length
with pytest.raises(ValueError):
ctc_decoder(x, lengths)
| [
"sam@samgd.com"
] | sam@samgd.com |
fe6dd390f3317b44c302772859d4bfffd9a5d58f | f0f4eef5a57807960070a1989799def6deaf30ef | /bclearer_boson_1_2_source/b_code/substages/operations/b_evolve/runners/boson_1_2_add_composite_names_operation_substage_runner.py | eb1b64dfdeff8908e40bf4673a209e0227ec359e | [
"MIT"
] | permissive | boro-alpha/bclearer_boson_1_2 | 4f8dd72b79e0ccdd759271d79e3cbfa61a949ac6 | 571b2e1ca6dee93ccc5cb4e30abe2660f40c2ac0 | refs/heads/master | 2023-08-23T05:02:30.384138 | 2021-11-04T11:24:35 | 2021-11-04T11:24:35 | 424,218,650 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,547 | py | from nf_common_source.code.services.reporting_service.reporters.log_with_datetime import log_message
from nf_ea_common_tools_source.b_code.services.general.nf_ea.com.nf_ea_com_universes import NfEaComUniverses
from nf_ea_common_tools_source.b_code.services.session.orchestrators.ea_tools_session_managers import \
EaToolsSessionManagers
from bclearer_boson_1_2_source.b_code.configurations.objects.bespoke_operation_configurations import \
BespokeOperationConfigurations
from bclearer_boson_1_2_source.b_code.substages.operations.b_evolve.coordinate_lines.composite_names.add_composite_names_orchestrator import \
orchestrate_add_composite_names
def run_boson_1_2_add_composite_names_operation_substage(
content_universe: NfEaComUniverses,
ea_tools_session_manager: EaToolsSessionManagers,
bespoke_operation_configuration: BespokeOperationConfigurations) \
-> NfEaComUniverses:
log_message(
message='CONTENT OPERATION: Add composite_names to universe - ' +
bespoke_operation_configuration.short_name + ' - started')
output_universe = \
orchestrate_add_composite_names(
content_universe=content_universe,
ea_tools_session_manager=ea_tools_session_manager,
short_name=bespoke_operation_configuration.short_name)
log_message(
message='CONTENT OPERATION: Add composite_names to universe - ' +
bespoke_operation_configuration.short_name + ' - finished')
return \
output_universe
| [
"xibertao@borogroup.co.uk"
] | xibertao@borogroup.co.uk |
a043804e7b66e1f74b60df789131b4a8007b392f | ceead28beb1ea6cb56a2bb4472bc1d2396b39e6f | /gen_basis_helpers/workflows/unit_tests/utest_surface_energies.py | 850a7385e490305b0b680a214039bfa158ae9a4b | [] | no_license | RFogarty1/plato_gen_basis_helpers | 9df975d4198bff7bef80316527a8086b6819d8ab | 8469a51c1580b923ca35a56811e92c065b424d68 | refs/heads/master | 2022-06-02T11:01:37.759276 | 2022-05-11T12:57:40 | 2022-05-11T12:57:40 | 192,934,403 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,025 | py |
import types
import unittest
import unittest.mock as mock
import gen_basis_helpers.workflows.surface_energies as tCode
class TestSurfaceEnergyWorkflow(unittest.TestCase):
def setUp(self):
self.bulkEnergy = 4
self.bulkNumbAtoms = 1
self.surfEnergy = 6
self.surfNumbAtoms = 2
self.surfaceArea = 3
self.surfCalcObj = mock.Mock()
self.bulkCalcObj = mock.Mock()
self.surfaceAreaFromUnitCell = mock.Mock()
self.createTestObjs()
def createTestObjs(self):
energiesObjBulk = types.SimpleNamespace( electronicTotalE=self.bulkEnergy )
energiesObjSurface = types.SimpleNamespace( electronicTotalE=self.surfEnergy )
self.bulkCalcObj.parsedFile.energies = energiesObjBulk
self.bulkCalcObj.parsedFile.numbAtoms = self.bulkNumbAtoms
self.surfCalcObj.parsedFile.energies = energiesObjSurface
self.surfCalcObj.parsedFile.numbAtoms = self.surfNumbAtoms
self.surfaceAreaFromUnitCell.side_effect = lambda x: self.surfaceArea
self.testObjA = tCode.SurfaceEnergyWorkflow(self.surfCalcObj, self.bulkCalcObj,
self.surfaceAreaFromUnitCell)
def testEnergyPerAtomBulk(self):
expValue = self.bulkEnergy / self.bulkNumbAtoms
actValue = self.testObjA._energyPerAtomBulk
self.assertEqual(expValue,actValue)
def testEnergyPerAtomSurface(self):
expValue = self.surfEnergy / self.surfNumbAtoms
actValue = self.testObjA._energyPerAtomSurface
self.assertEqual(expValue,actValue)
def testRunGivesExpectedVal(self):
self.testObjA.run()
expVal = -(1/3)
actVal = self.testObjA.output[0].surfaceEnergy
self.assertAlmostEqual(expVal,actVal)
def testRunGivesExpectedEPerAtomVals(self):
self.testObjA.run()
expSurfEPerAtom, expBulkEPerAtom = self.surfEnergy/self.surfNumbAtoms, self.bulkEnergy/self.bulkNumbAtoms
actSurfEPerAtom, actBulkEPerAtom = self.testObjA.output[0].surfEPerAtom, self.testObjA.output[0].bulkEPerAtom
self.assertAlmostEqual(expSurfEPerAtom, actSurfEPerAtom)
self.assertAlmostEqual(expBulkEPerAtom, actBulkEPerAtom)
| [
"richard.m.fogarty@gmail.com"
] | richard.m.fogarty@gmail.com |
4fa4c5f3db02a42dbc8eed96c34b70d36bbb1d69 | e532600dd9a7f4ad5cdb62134c4a6c670270b026 | /viewer/settings_nersc_dev.py | ebda2349228f803f210d7404cea09c3d963b2ff7 | [] | no_license | ziyaointl/decals-web | 5a94195528dd11bf9774f4ddf10076116ec80e14 | 8950ccb28d5ec51c6eda305b51ffbc484c1c8452 | refs/heads/master | 2020-04-26T17:27:07.555736 | 2019-07-13T04:17:24 | 2019-07-13T04:17:24 | 169,711,896 | 0 | 0 | null | 2019-02-08T09:34:06 | 2019-02-08T09:34:06 | null | UTF-8 | Python | false | false | 1,068 | py | from viewer.settings_common import *
#ENABLE_SQL = True
DEBUG_LOGGING = True
#DEBUG_LOGGING = False
USER_QUERY_DIR = '/tmp/viewer-dev-user'
#USER_CATALOG_DIR = USER_QUERY_DIR
READ_ONLY_BASEDIR = True
ROOT_URL = '/viewer-dev'
STATIC_URL_PATH = '/viewer-dev/static'
STATIC_URL = 'http://%s%s/' % (HOSTNAME, STATIC_URL_PATH)
TILE_URL = 'http://{s}.%s%s/{id}/{ver}/{z}/{x}/{y}.jpg' % (HOSTNAME, ROOT_URL)
STATIC_TILE_URL = 'http://{s}.%s%s/tiles/{id}/{ver}/{z}/{x}/{y}.jpg' % (HOSTNAME, STATIC_URL_PATH)
STATIC_TILE_URL_B = 'http://{s}.imagine.legacysurvey.org/static/tiles/{id}/{ver}/{z}/{x}/{y}.jpg'
SUBDOMAINS_B = SUBDOMAINS
# no CORS -- so don't use subdomains, or specify hostname (www.legacysurvey.org vs legacysurvey.org)
CAT_URL = '%s/{id}/{ver}/{z}/{x}/{y}.cat.json' % (ROOT_URL)
#ENABLE_SQL = True
#ENABLE_MZLS = True
ENABLE_DEV = True
ENABLE_DR2 = False
ENABLE_DECAPS = True
ENABLE_EBOSS = True
ENABLE_DR3 = False
ENABLE_DR4 = False
ENABLE_DR5 = True
ENABLE_PS1 = True
#ENABLE_DR6 = True
#ENABLE_DR7 = True
ENABLE_DR8 = True
ENABLE_DES_DR1 = True
| [
"dstndstn@gmail.com"
] | dstndstn@gmail.com |
1945c8b7fc54ee79f504b75e8e089fb5ab7e8023 | 6faa21b2d8a7e55e64fe289a21e455d1b4718fbb | /app/alamat_app/urls.py | 756fa3973ae1e73aa1a4c1bac10f5218910202b6 | [
"MIT"
] | permissive | ganggas95/simdus_app | 62eae2e7a1e0e9b2250fbccd795de1f658db814b | 0c57e11c712912f61d29ca4b63dfa1fe38bb067c | refs/heads/master | 2020-03-27T12:42:48.506769 | 2018-08-29T10:05:28 | 2018-08-29T10:05:28 | 146,563,860 | 0 | 1 | MIT | 2018-08-29T10:05:28 | 2018-08-29T07:43:05 | CSS | UTF-8 | Python | false | false | 726 | py | from .blueprint import (
alamat_bp,
api_alamat_bp,
api_alamat)
from .views import (
AlamatView,
AddAlamatView,
EditAlamatView)
from .api import alamat_ns
alamat_bp.add_url_rule(
'/admin/alamat',
view_func=AlamatView.as_view(
'alamat_view',
'alamat.html'
)
)
alamat_bp.add_url_rule(
'/admin/alamat/add',
view_func=AddAlamatView.as_view(
'add_alamat_view',
'tambah_alamat.html'
),
methods=['GET', 'POST']
)
alamat_bp.add_url_rule(
'/admin/alamat/<int:id_alamat>/detail',
view_func=EditAlamatView.as_view(
'edit_alamat_view',
'edit_alamat.html'
),
methods=['GET', 'POST']
)
api_alamat.add_namespace(alamat_ns)
| [
"subhannizar25@gmail.com"
] | subhannizar25@gmail.com |
32e94eed43019733021268596b411e4d13419be2 | ebd2b04c9d55691e7208ff8d6816beb7dcae8f77 | /fin_app/tests.py | 594425d6e1c9efda02cee7397cb6138dd08e28a8 | [] | no_license | playmepe/djing2 | e35f11fdbef2b4f4bf2ef37d7f3e7b6dc9d5aba0 | 10379e974d969be94a40317e8121436f03f19ca2 | refs/heads/master | 2022-12-28T01:28:01.653972 | 2020-10-12T14:37:23 | 2020-10-12T14:37:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,121 | py | from hashlib import md5
# from django.test.utils import override_settings
from django.utils import timezone
from django.utils.html import escape
from rest_framework.test import APITestCase
from customers.models import Customer
from fin_app.models import PayAllTimeGateway
from profiles.models import UserProfile
def _make_sign(act: int, pay_account: str, serv_id: str, pay_id, secret: str):
md = md5()
s = "%d_%s_%s_%s_%s" % (act, pay_account, serv_id, pay_id, secret)
md.update(bytes(s, 'utf-8'))
return md.hexdigest()
# @override_settings(DEFAULT_TABLESPACE='ram')
class CustomAPITestCase(APITestCase):
def get(self, *args, **kwargs):
return self.client.get(*args, **kwargs)
def post(self, *args, **kwargs):
return self.client.post(*args, **kwargs)
def setUp(self):
self.admin = UserProfile.objects.create_superuser(
username='admin',
password='admin',
telephone='+797812345678'
)
# customer for tests
custo1 = Customer.objects.create_user(
telephone='+79782345678',
username='custo1',
password='passw'
)
custo1.balance = -13.12
custo1.fio = 'Test Name'
custo1.save(update_fields=('balance', 'fio'))
custo1.refresh_from_db()
self.customer = custo1
# Pay System
pay_system = PayAllTimeGateway.objects.create(
title='Test pay system',
secret='secret',
service_id='service_id',
slug='pay_gw_slug'
)
pay_system.refresh_from_db()
self.pay_system = pay_system
class AllPayTestCase(CustomAPITestCase):
time_format = '%d.%m.%Y %H:%M'
url = '/api/fin/pay_gw_slug/pay/'
def test_user_pay_view_info(self):
current_date = timezone.now().strftime(self.time_format)
service_id = self.pay_system.service_id
r = self.get(self.url, {
'ACT': 1,
'PAY_ACCOUNT': 'custo1',
'SIGN': _make_sign(1, 'custo1', '', '', self.pay_system.secret)
})
o = ''.join((
"<pay-response>",
"<balance>-13.12</balance>",
"<name>Test Name</name>",
"<account>custo1</account>",
"<service_id>%s</service_id>" % escape(service_id),
"<min_amount>10.0</min_amount>",
"<max_amount>5000</max_amount>",
"<status_code>21</status_code>",
"<time_stamp>%s</time_stamp>" % escape(current_date),
"</pay-response>"
))
self.assertXMLEqual(r.content.decode('utf8'), o)
self.assertEqual(r.status_code, 200)
def test_user_pay_pay(self):
current_date = timezone.now().strftime(self.time_format)
service_id = self.pay_system.service_id
r = self.get(self.url, {
'ACT': 4,
'PAY_ACCOUNT': 'custo1',
'PAY_AMOUNT': 18.21,
'RECEIPT_NUM': 2126235,
'SERVICE_ID': service_id,
'PAY_ID': '840ab457-e7d1-4494-8197-9570da035170',
'TRADE_POINT': 'term1',
'SIGN': _make_sign(4, 'custo1', service_id,
'840ab457-e7d1-4494-8197-9570da035170', self.pay_system.secret)
})
xml = ''.join((
"<pay-response>",
"<pay_id>840ab457-e7d1-4494-8197-9570da035170</pay_id>",
"<service_id>%s</service_id>" % escape(service_id),
"<amount>18.21</amount>",
"<status_code>22</status_code>",
"<time_stamp>%s</time_stamp>" % escape(current_date),
"</pay-response>"
))
self.assertXMLEqual(r.content.decode('utf-8'), xml)
self.assertEqual(r.status_code, 200)
self.customer.refresh_from_db()
self.assertEqual(round(self.customer.balance, 2), 5.09)
self.user_pay_check(current_date)
def user_pay_check(self, test_pay_time):
current_date = timezone.now().strftime(self.time_format)
service_id = self.pay_system.service_id
r = self.get(self.url, {
'ACT': 7,
'SERVICE_ID': service_id,
'PAY_ID': '840ab457-e7d1-4494-8197-9570da035170',
'SIGN': _make_sign(7, '', service_id,
'840ab457-e7d1-4494-8197-9570da035170', self.pay_system.secret)
})
xml = ''.join((
"<pay-response>",
"<status_code>11</status_code>",
"<time_stamp>%s</time_stamp>" % escape(current_date),
"<transaction>",
"<pay_id>840ab457-e7d1-4494-8197-9570da035170</pay_id>",
"<service_id>%s</service_id>" % escape(service_id),
"<amount>18.21</amount>",
"<status>111</status>",
"<time_stamp>%s</time_stamp>" % escape(test_pay_time),
"</transaction>"
"</pay-response>"
))
self.assertXMLEqual(r.content.decode(), xml)
self.assertEqual(r.status_code, 200)
| [
"nerosketch@gmail.com"
] | nerosketch@gmail.com |
650ab3e0162c461a54a59ba252b1be8069adf69a | f4f2ef334b7ccf704a4fc9e034fb863370d3d63a | /demo/oop/mi/demo4.py | a809a72674572c87f659b4991896a18f4effd402 | [] | no_license | srikanthpragada/PYTHON_21_SEP_2020 | 012f27868dcbbeba9964fbc2f963198e9d3f626f | 9e28dfc1d35d710fb4f32b158e0e73861d5fc9ac | refs/heads/master | 2023-01-02T18:39:28.554862 | 2020-10-28T02:24:20 | 2020-10-28T02:24:20 | 298,142,893 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | class A:
def process(self):
print("Process in A")
class B(A):
pass
class C(A):
def process(self):
print("Process in C")
class D(B, C):
pass
obj = D()
obj.process()
print(D.mro()) | [
"srikanthpragada@gmail.com"
] | srikanthpragada@gmail.com |
c23660db52fea446d25017ac197279c37ff7df94 | c4267e2e092ac0e3f53e82eef5a0bd5417222525 | /base/base_T5.py | b7209bde1bec3d59f9f7cefb2f28f8fe4a4fe002 | [] | no_license | chenyang1999/lanqiaocup_marcus | fde22466287e2bea02b7ea4292256b70fed1d33b | 6ad13f1a28ca6b650d9545b5148a450d1c9cd154 | refs/heads/master | 2023-01-07T06:47:11.316530 | 2020-11-14T08:21:42 | 2020-11-14T08:21:42 | 286,075,513 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | '''
问题描述
给定一个年份,判断这一年是不是闰年。
当以下情况之一满足时,这一年是闰年:
1. 年份是4的倍数而不是100的倍数;
2. 年份是400的倍数。
其他的年份都不是闰年。
输入格式
输入包含一个整数y,表示当前的年份。
输出格式
输出一行,如果给定的年份是闰年,则输出yes,否则输出no。
'''
n=int(input())
if (n%4==0)and(n%100!=0) or (n%400==0):
print('yes')
else:
print('no')
| [
"34475230+chenyang1999@users.noreply.github.com"
] | 34475230+chenyang1999@users.noreply.github.com |
3a4f7cd5e67246cbd53f57079413493158901450 | a4deea660ea0616f3b5ee0b8bded03373c5bbfa2 | /concrete_instances/register-variants/vpsrlw_ymm_ymm_xmm/instructions/vpsrlw_ymm_ymm_xmm/vpsrlw_ymm_ymm_xmm.gen.vex.py | ac220d900c443bc37197e364bd268625ee83393d | [] | no_license | Vsevolod-Livinskij/x86-64-instruction-summary | 4a43472e26f0e4ec130be9a82f7e3f3c1361ccfd | c276edab1b19e3929efb3ebe7514489f66087764 | refs/heads/master | 2022-02-02T18:11:07.818345 | 2019-01-25T17:19:21 | 2019-01-25T17:19:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | import angr
proj = angr.Project('./instructions/vpsrlw_ymm_ymm_xmm/vpsrlw_ymm_ymm_xmm.o')
print proj.arch
print proj.entry
print proj.filename
irsb = proj.factory.block(proj.entry).vex
irsb.pp() | [
"sdasgup3@illinois.edu"
] | sdasgup3@illinois.edu |
574d9f240c46961ab64781b6206ab27569c919d4 | 5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5 | /blimgui/dist/sdl2/test/version_test.py | 4c4f54ca0aba4dd64e485bd5c91f9973da5dff93 | [
"MIT"
] | permissive | juso40/bl2sdk_Mods | 8422a37ca9c2c2bbf231a2399cbcb84379b7e848 | 29f79c41cfb49ea5b1dd1bec559795727e868558 | refs/heads/master | 2023-08-15T02:28:38.142874 | 2023-07-22T21:48:01 | 2023-07-22T21:48:01 | 188,486,371 | 42 | 110 | MIT | 2022-11-20T09:47:56 | 2019-05-24T20:55:10 | Python | UTF-8 | Python | false | false | 2,505 | py | import sys
import ctypes
import pytest
import sdl2
from sdl2 import dll, __version__, version_info
def test__version_tuple():
# Note that this is not public API.
assert dll._version_tuple_to_int((2, 0, 18)) == 2018
assert dll._version_tuple_to_int((2, 24, 1)) == 2241
# Micro version stops at 9 in this encoding
assert dll._version_tuple_to_int((2, 24, 15)) == 2249
assert dll._version_tuple_to_int((2, 99, 9)) == 2999
# Minor version stops at 99 in this encoding
assert dll._version_tuple_to_int((2, 103, 6)) == 2999
def test_SDL_version():
v = sdl2.SDL_version(0, 0, 0)
assert v.major == 0
assert v.minor == 0
assert v.patch == 0
def test_SDL_GetVersion():
v = sdl2.SDL_version()
sdl2.SDL_GetVersion(ctypes.byref(v))
assert type(v) == sdl2.SDL_version
assert v.major == 2
assert v.minor >= 0
assert v.patch >= 0
assert (v.major, v.minor, v.patch) >= (2, 0, 5)
assert (v.major, v.minor, v.patch) == dll.version_tuple
def test_SDL_VERSIONNUM():
assert sdl2.SDL_VERSIONNUM(1, 2, 3) == 1203
assert sdl2.SDL_VERSIONNUM(4, 5, 6) == 4506
assert sdl2.SDL_VERSIONNUM(2, 0, 0) == 2000
assert sdl2.SDL_VERSIONNUM(17, 42, 3) == 21203
# This is a bit weird now that SDL uses the minor version more often,
# but does sort in the correct order against all versions of SDL 2.
assert sdl2.SDL_VERSIONNUM(2, 23, 0) == 4300
# This is the highest possible SDL 2 version
assert sdl2.SDL_VERSIONNUM(2, 255, 99) == 27599
def test_SDL_VERSION_ATLEAST():
assert sdl2.SDL_VERSION_ATLEAST(1, 2, 3)
assert sdl2.SDL_VERSION_ATLEAST(2, 0, 0)
assert sdl2.SDL_VERSION_ATLEAST(2, 0, 1)
assert sdl2.SDL_VERSION_ATLEAST(
sdl2.SDL_MAJOR_VERSION, sdl2.SDL_MINOR_VERSION, sdl2.SDL_PATCHLEVEL
)
assert not sdl2.SDL_VERSION_ATLEAST(4, 0, 0)
def test_SDL_GetRevision():
rev = sdl2.SDL_GetRevision()
# If revision not empty string (e.g. Conda), test the prefix
if len(rev):
if dll.version_tuple >= (2, 0, 16):
if rev[0:4] not in (b"http", b"SDL-"):
pytest.xfail("no API guarantee about the format of this string")
else:
assert rev[0:3] == b"hg-"
def test_SDL_GetRevisionNumber():
if sys.platform in ("win32",) or dll.version_tuple >= (2, 0, 16):
# HG tip on Win32 does not set any revision number
assert sdl2.SDL_GetRevisionNumber() >= 0
else:
assert sdl2.SDL_GetRevisionNumber() >= 7000
| [
"justin.sostmann@googlemail.com"
] | justin.sostmann@googlemail.com |
1064dd2a532c28681368959e15db5a6a1789ba6d | 152f8c72bcb315bc0cf40ec389a97898cbc057c3 | /_sadm/listen/errors.py | bb22caffc4806e8930841d85c15e5e243312fe81 | [
"BSD-3-Clause"
] | permissive | jrmsdev/pysadm | 0a205cf7b4bf647461d480403051b5f88f82090b | 0d6b3f0c8d870d83ab499c8d9487ec8e3a89fc37 | refs/heads/master | 2022-06-11T07:33:53.847565 | 2019-11-13T04:25:00 | 2019-11-13T04:25:00 | 186,210,706 | 1 | 0 | BSD-3-Clause | 2022-06-03T22:45:36 | 2019-05-12T04:19:14 | Python | UTF-8 | Python | false | false | 1,470 | py | # Copyright (c) Jeremías Casteglione <jrmsdev@gmail.com>
# See LICENSE file.
from bottle import response, HTTPError, request, HTTP_CODES
from _sadm import log
__all__ = ['init', 'error']
def _handler(code, error):
log.debug("handler %d" % code)
log.debug("%d - %s" % (error.status_code, error.status_line))
argsLen = len(error.args)
if argsLen >= 3:
log.error("%s %d - %s" % (request.remote_addr, code, error.args[2]))
if argsLen >= 4:
log.debug("%s" % error.args[3])
else:
log.error("%s %d - %s" % (request.remote_addr, code, request.path))
response.headers['Content-Type'] = 'text/plain; charset=UTF-8'
if code == 304:
# 304 response should not include body content
return ''
codeStatus = HTTP_CODES.get(code, None)
if codeStatus is not None:
return "%s\n" % codeStatus
return "ERROR %d\n" % code
_initDone = False
def init(wapp):
global _initDone
@wapp.error(304)
def error_304(error):
return _handler(304, error)
@wapp.error(400)
def error_400(error):
return _handler(400, error)
@wapp.error(403)
def error_403(error):
return _handler(403, error)
@wapp.error(404)
def error_404(error):
return _handler(404, error)
@wapp.error(405)
def error_405(error):
return _handler(405, error)
@wapp.error(500)
def error_500(error):
return _handler(500, error)
_initDone = True
def error(code, msg):
log.error("%s %d - %s" % (request.remote_addr, code, msg))
return HTTPError(
status = code,
body = msg,
)
| [
"jrmsdev@gmail.com"
] | jrmsdev@gmail.com |
d55baa8c7931039f27574f922f012fbfad8715c7 | ee10559195ec6e8e8c514ace5e131598a9ae29ae | /algo/algo_02/특별한정렬.py | 85e3aae8e8c813cbafcb69669c7665333bd59b5d | [] | no_license | jun0811/TIL | 12e8f0ab0506ed6ba81e3c0b6f18c4d5743324d1 | 2bc6da0214ffad7989270b60d682d5060a37531f | refs/heads/master | 2023-04-03T09:56:30.434378 | 2020-11-12T08:55:32 | 2020-11-12T08:55:32 | 282,910,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | def select_max(arr, k):
for i in range(k):
max_n = i
for j in range(i+1, len(arr)):
if arr[max_n] < arr[j]:
max_n = j
arr[i], arr[max_n] = arr[max_n], arr[i]
return str(arr[k-1])
def select_min(arr, k):
for i in range(k):
min_n = i
for j in range(i+1, len(arr)):
if arr[min_n] > arr[j]:
min_n = j
arr[i], arr[min_n] = arr[min_n], arr[i]
return str(arr[k-1])
T = int(input())
for test_case in range(1,T+1):
N = int(input())
numbers = list(map(int,input().split()))
result = []
for i in range(1,6):
result.append(select_max(numbers,i))
result.append(select_min(numbers,i))
a= ' '.join(result)
print('#{} {}'.format(test_case, a))
| [
"nate199458@gmail.com"
] | nate199458@gmail.com |
5e88ad802cd4191e107338ee1973dcab8852c683 | 5f22ddbd3eeb99709e43e7b9a7958c9987c7efa4 | /training/04_sorting/closest_numbers.py | a8911c2fa9d77f3da3ce44d5dceeaa0729b6d78a | [] | no_license | salvador-dali/algorithms_general | 04950bd823fc354adc58a4f23b7d2f3d39664798 | aeee3356e2488c6fab08741b1ac26e8bd5e4ac0d | refs/heads/master | 2020-12-14T06:24:10.466601 | 2016-07-17T06:00:17 | 2016-07-17T06:00:17 | 47,397,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | # https://www.hackerrank.com/challenges/closest-numbers/
# Given a list of unsorted numbers, can you find the numbers that have the smallest absolute
# difference between them? If there are multiple pairs, find them all.
def closestNumbers(arr):
arr.sort()
smallestList, smallestDiff = [], 10**10
for i in range(len(arr) - 1):
diff = abs(arr[i] - arr[i + 1])
if diff == smallestDiff:
smallestList.extend([arr[i], arr[i + 1]])
if diff < smallestDiff:
smallestList = [arr[i], arr[i + 1]]
smallestDiff = diff
return smallestList
raw_input()
print ' '.join(map(str, closestNumbers(map(int, raw_input().split())))) | [
"dmytro@knowlabs.com"
] | dmytro@knowlabs.com |
7dda270cb89324484f1f2395537ee120b96934c0 | 63efeff58299f3ca66c7be0aa80d636ade379ebf | /March/firstNotRepeatingCharacters.py | 7d6c8eeff907be76a685eb78cdf248738008b223 | [] | no_license | gosch/Katas-in-python | 0eb6bafe2d6d42dac64c644c2fd48f90bdcef22b | f89ee2accdde75222fa1e4e0ca8b4f8e27b7b760 | refs/heads/master | 2021-07-24T23:50:26.268217 | 2020-04-14T23:53:15 | 2020-04-14T23:53:15 | 137,545,678 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | def firstNotRepeatingCharacter(s):
v = [0] * 26
for i in s:
v[ord(i) - ord('a')] += 1
for i in s:
if v[ord(i) - ord['a']] == 1:
return i
return '_'
print(firstNotRepeatingCharacter("abacabad")) | [
"francisco.gosch@ge.com"
] | francisco.gosch@ge.com |
544f39d0e4849b3724752b33837d4ade1f75bb68 | b5ce6908490cfb8e6a1e1cbe4745d675122ddce0 | /questions/compare-version-numbers/Solution.py | c09a63ebef803affca38aee90843be66dd2e765e | [
"MIT"
] | permissive | franklingu/leetcode-solutions | 8895910f13208e1d8e604100d84c2dd35684cde4 | 7ad7e5c1c040510b7b7bd225ed4297054464dbc6 | refs/heads/master | 2023-01-09T01:34:08.097518 | 2023-01-02T02:05:35 | 2023-01-02T02:05:35 | 43,345,677 | 155 | 66 | MIT | 2020-10-02T03:41:36 | 2015-09-29T04:54:38 | Python | UTF-8 | Python | false | false | 2,144 | py | """
Compare two version numbers version1 and version2.
If version1 > version2 return 1; if version1 < version2 return -1;otherwise return 0.
You may assume that the version strings are non-empty and contain only digits and the . character.
The . character does not represent a decimal point and is used to separate number sequences.
For instance, 2.5 is not "two and a half" or "half way to version three", it is the fifth second-level revision of the second first-level revision.
You may assume the default revision number for each level of a version number to be 0. For example, version number 3.4 has a revision number of 3 and 4 for its first and second level revision number. Its third and fourth level revision number are both 0.
Example 1:
Input: version1 = "0.1", version2 = "1.1"
Output: -1
Example 2:
Input: version1 = "1.0.1", version2 = "1"
Output: 1
Example 3:
Input: version1 = "7.5.2.4", version2 = "7.5.3"
Output: -1
Example 4:
Input: version1 = "1.01", version2 = "1.001"
Output: 0
Explanation: Ignoring leading zeroes, both “01” and “001" represent the same number “1”
Example 5:
Input: version1 = "1.0", version2 = "1.0.0"
Output: 0
Explanation: The first version number does not have a third level revision number, which means its third level revision number is default to "0"
Note:
Version strings are composed of numeric strings separated by dots . and this numeric strings may have leading zeroes.
Version strings do not start or end with dots, and they will not be two consecutive dots.
"""
import itertools
class Solution:
def compareVersion(self, version1: str, version2: str) -> int:
v1 = version1.split('.')
v2 = version2.split('.')
for s1, s2 in itertools.zip_longest(v1, v2, fillvalue='0'):
if len(s1) > len(s2):
s2 = '0' * (len(s1) - len(s2)) + s2
elif len(s1) < len(s2):
s1 = '0' * (len(s2) - len(s1)) + s1
for c1, c2 in itertools.zip_longest(s1, s2):
if c1 > c2:
return 1
elif c1 < c2:
return -1
return 0
| [
"franklingujunchao@gmail.com"
] | franklingujunchao@gmail.com |
b883ed842f7690fde699ff443ca676adc01a44b0 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_vinegary.py | 653de88336afb19a4ab2a15bab7561617533e496 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py |
#calss header
class _VINEGARY():
def __init__(self,):
self.name = "VINEGARY"
self.definitions = [u'tasting or smelling like vinegar: ', u'angry and unpleasant, or giving a lot of criticism: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
aeff02783c27d7d8d94f42c54b1ddcec5836af70 | 7619aed8a311e2832634379762c373886f4354fb | /trace_floodlight_firewall-MeshTopology4-steps200/replay_config.py | 87195057cf5fe11f0768a6301d89358f649e3825 | [] | no_license | jmiserez/sdnracer-traces | b60f8588277c4dc2dad9fe270c05418c47d229b3 | 8991eee19103c8ebffd6ffe15d88dd8c25e1aad5 | refs/heads/master | 2021-01-21T18:21:32.040221 | 2015-12-15T14:34:46 | 2015-12-15T14:34:46 | 39,391,225 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,391 | py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow.replayer import Replayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='java -ea -Dlogback.configurationFile=./src/main/resources/logback-trace.xml -jar ./target/floodlight.jar -cf ./src/main/resources/trace_firewall.properties', label='c1', address='127.0.0.1', cwd='../floodlight')],
topology_class=MeshTopology,
topology_params="num_switches=4",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False,
ignore_interposition=False,
kill_controllers_on_exit=True)
control_flow = Replayer(simulation_config, "paper/trace_floodlight_firewall-MeshTopology4-steps200/events.trace",
input_logger=InputLogger(),
wait_on_deterministic_values=False,
allow_unexpected_messages=False,
delay_flow_mods=False,
default_dp_permit=False,
pass_through_whitelisted_messages=False,
invariant_check_name='InvariantChecker.check_liveness',
bug_signature="")
| [
"jeremie@miserez.org"
] | jeremie@miserez.org |
e8f2ee54b4593c5f2cba14c51572ac76ab1bd325 | 8d2e5b5ea408579faa699c09bdbea39e864cdee1 | /ufora/networking/SocketStringChannel.py | 7dd8db8de7d30be32e2bfbb205bf76b4a306ae67 | [
"dtoa",
"MIT",
"BSD-3-Clause",
"BSL-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] | permissive | iantuioti/ufora | 2218ef4c7e33c171268ce11458e9335be7421943 | 04db96ab049b8499d6d6526445f4f9857f1b6c7e | refs/heads/master | 2021-01-17T17:08:39.228987 | 2017-01-30T16:00:45 | 2017-01-30T16:00:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,505 | py | # Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ufora.native.SocketStringChannel as SocketStringChannelNative
#note that when we create native Channel objects, we need to keep the python object alive
#indefinitely. Otherwise, if we lose the python socket, it will close the file descriptor.
#we can't use os.dup to duplicate the descriptors because it occasionally produces file descriptors
#that conflict with incoming sockets.
allSockets_ = []
def SocketStringChannel(callbackScheduler, socket):
"""Create a SocketStringChannel from a python socket object.
The resulting class is an instance of ufora.native.StringChannel.StringChannel. We keep the
python socket object alive. This prevents it from releasing the file descriptor on its own,
since the SocketStringChannel does that itself.
"""
allSockets_.append(socket)
return SocketStringChannelNative.SocketStringChannel(callbackScheduler, socket.fileno())
| [
"braxton.mckee@gmail.com"
] | braxton.mckee@gmail.com |
9e3346c416adf380dc6e422d8318352326c33b14 | 6deafbf6257a5c30f084c3678712235c2c31a686 | /Toolz/sqlmap/plugins/dbms/sybase/fingerprint.py | c88b22d045d9152ffd54bb1a076a59d3a2daa804 | [
"Unlicense",
"LicenseRef-scancode-generic-cla",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"LicenseRef-scancode-commercial-license",
"LicenseRef-scancode-other-permissive"
] | permissive | thezakman/CTF-Heaven | 53fcb4a72afa821ad05d8cc3b309fb388f958163 | 4b52a2178922f1502ab00fa8fc156d35e1dc653f | refs/heads/master | 2023-04-05T18:20:54.680378 | 2023-03-21T13:47:45 | 2023-03-21T13:47:45 | 167,290,879 | 182 | 24 | Unlicense | 2022-11-29T21:41:30 | 2019-01-24T02:44:24 | Python | UTF-8 | Python | false | false | 3,355 | py | #!/usr/bin/env python
"""
Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
from lib.core.common import Backend
from lib.core.common import Format
from lib.core.common import unArrayizeValue
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.enums import DBMS
from lib.core.enums import OS
from lib.core.session import setDbms
from lib.core.settings import SYBASE_ALIASES
from lib.request import inject
from plugins.generic.fingerprint import Fingerprint as GenericFingerprint
class Fingerprint(GenericFingerprint):
def __init__(self):
GenericFingerprint.__init__(self, DBMS.SYBASE)
def getFingerprint(self):
value = ""
wsOsFp = Format.getOs("web server", kb.headersFp)
if wsOsFp:
value += "%s\n" % wsOsFp
if kb.data.banner:
dbmsOsFp = Format.getOs("back-end DBMS", kb.bannerFp)
if dbmsOsFp:
value += "%s\n" % dbmsOsFp
value += "back-end DBMS: "
if not conf.extensiveFp:
value += DBMS.SYBASE
return value
actVer = Format.getDbms()
blank = " " * 15
value += "active fingerprint: %s" % actVer
if kb.bannerFp:
banVer = kb.bannerFp.get("dbmsVersion")
banVer = Format.getDbms([banVer])
value += "\n%sbanner parsing fingerprint: %s" % (blank, banVer)
htmlErrorFp = Format.getErrorParsedDBMSes()
if htmlErrorFp:
value += "\n%shtml error message fingerprint: %s" % (blank, htmlErrorFp)
return value
def checkDbms(self):
if not conf.extensiveFp and Backend.isDbmsWithin(SYBASE_ALIASES):
setDbms("%s %s" % (DBMS.SYBASE, Backend.getVersion()))
self.getBanner()
Backend.setOs(OS.WINDOWS)
return True
infoMsg = "testing %s" % DBMS.SYBASE
logger.info(infoMsg)
if conf.direct:
result = True
else:
result = inject.checkBooleanExpression("@@transtate=@@transtate")
if result:
infoMsg = "confirming %s" % DBMS.SYBASE
logger.info(infoMsg)
result = inject.checkBooleanExpression("suser_id()=suser_id()")
if not result:
warnMsg = "the back-end DBMS is not %s" % DBMS.SYBASE
logger.warn(warnMsg)
return False
setDbms(DBMS.SYBASE)
self.getBanner()
if not conf.extensiveFp:
return True
infoMsg = "actively fingerprinting %s" % DBMS.SYBASE
logger.info(infoMsg)
result = unArrayizeValue(inject.getValue("SUBSTRING(@@VERSION,1,1)"))
if result and result.isdigit():
Backend.setVersion(str(result))
else:
for version in xrange(12, 16):
result = inject.checkBooleanExpression("PATINDEX('%%/%d[./]%%',@@VERSION)>0" % version)
if result:
Backend.setVersion(str(version))
break
return True
else:
warnMsg = "the back-end DBMS is not %s" % DBMS.SYBASE
logger.warn(warnMsg)
return False
| [
"thezakman@ctf-br.org"
] | thezakman@ctf-br.org |
f8bd0380e72c08810391f7b6c48e94849efcf7a1 | e06700779972be87d537d1fee69d1808317c4c65 | /posts/views.py | 506535f85514465bdc3a83139bd13b42184ff508 | [] | no_license | ajy720/django_Study | 5d73d14974ff8c538871dd0b5782d431baf6bdce | a7b7e7ad12239b9b296f8055c6d417e6835938f3 | refs/heads/master | 2022-06-05T13:21:01.479148 | 2020-05-04T05:37:17 | 2020-05-04T05:37:17 | 229,542,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,961 | py | from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.http import require_POST
from .models import Post
from .forms import PostForm
import pdb
def main(request):
context = {
"posts": Post.objects.all(), # 정렬 방법 1. models.py에서 해주는 방법
# "posts": Post.objects.order_by("-created_at"), # 정렬 방법 2
}
return render(request, "posts/main.html", context)
def new(request):
context = {
"form": PostForm(), # PostForm 양식 전달
}
return render(request, "posts/new.html", context)
@require_POST # 데코레이터(메서드를 꾸며주는 역할)로 애초에 POST 방식의 값만 받도록 설정
def create(request):
form = PostForm(
request.POST, request.FILES or None # PostForm에 넣어줄 때도 FILES 안에 들어있는 이미지도 함께
) # POSTFORM이라는 모델에 전달받은 객체 넣고 생성
if form.is_valid():
form.save()
# return redirect("main") # 첫번째 방법
return redirect(form.instance)
def show(request, post_id): # 방법 2. 주소에서 값 전달 -> urls.py에서 post_id 전달해준거 받기
# 방법 1
# post_id = request.GET.get("post_id") # 받은 url에서 post_id라는 인자 값 얻고
# post = Post.objects.get(id=post_id) # Post 객체들 중에 해당 post_id를 id(Primary key)로 갖고 있는 친구를 찾아서
post = get_object_or_404(Post, id=post_id)
context = {
"post": post,
} # context에 딕셔너리 형태로 넣어주고
post.view_count += 1
post.save() # 객체 저장
return render(request, "posts/show.html", context) # 템플릿에 전달하면 해당 html(템플릿) 안에서 출력
def edit(request, post_id):
post = get_object_or_404(Post, id=post_id)
context = {
"form": PostForm(instance=post), # 해당 id를 가지고 원래 있던 데이터를 인스턴스에 저장
"post": post, # update 할 때 참조용
}
return render(request, "posts/edit.html", context)
@require_POST
def update(request, post_id):
post = get_object_or_404(Post, id=post_id)
form = PostForm(
request.POST,
request.FILES or None, # PostForm에 넣어줄 때도 FILES 안에 들어있는 이미지도 함께
instance=post,
) # instance 속성을 붙여 줌으로써 새로 생성이 아닌 있는 인스턴스를 수정
if form.is_valid():
form.save()
# return redirect("posts:show", post_id) # 게시글 화면으로 리다이렉트 / 원래 방법
return redirect(
post
) # 게시글 화면으로 리다이렉트 / 장고스런 방법. 왜? <- get_absolute_url이란 함수가 자동으로 객체의 url을 반환해줌.
@require_POST
def delete(request, post_id):
post = get_object_or_404(Post, id=post_id)
post.delete() # 삭제하는 ORM
return redirect("main")
| [
"ajy720@gmail.com"
] | ajy720@gmail.com |
cd9080a31750fd6fb0f6169f6b3a521c68c2b600 | 501615c82801733e69c7447ab9fd68d3883ed947 | /hotfix/.svn/pristine/cd/cd9080a31750fd6fb0f6169f6b3a521c68c2b600.svn-base | 3edb65fecab8efada344d05e730544d8fd6e0929 | [] | no_license | az0ne/python | b2e1cc1e925d1fcdb269e7dd4c48e24665deeeee | aec5d23bb412f7dfca374fb5c5b9988c1b817347 | refs/heads/master | 2021-07-18T02:08:46.314972 | 2017-10-27T06:23:36 | 2017-10-27T06:23:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | #!/usr/bin/env python
# -*- coding: utf8 -*-
from django.db import models
class RecommendKeywords(models.Model):
name = models.CharField(u'推荐搜索关键词', max_length=50)
class Meta:
verbose_name = u'推荐搜索关键词'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
| [
"1461847795@qq.com"
] | 1461847795@qq.com | |
fed1270ad3e023a09ad4a6c34adf894036258543 | 9848a719ddfdd21b5fe1fa2f55da290c0f6952dc | /lib/RollHash.py | 256aaff82909156e295b6cb35458d089751d67f4 | [] | no_license | maomao905/algo | 725f7fe27bb13e08049693765e4814b98fb0065a | 84b35ec9a4e4319b29eb5f0f226543c9f3f47630 | refs/heads/master | 2023-03-13T12:38:55.401373 | 2021-03-25T01:55:48 | 2021-03-25T01:55:48 | 351,278,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | A = 'abcd'
B = 'dacbabcdaacd'
W, MOD = 26, 2**32
L = len(A)
h = 0
for i in range(len(A)):
h = (h * W + ord(A[i])) % MOD
b_h = 0
WL = pow(W,L,MOD)
for i in range(len(B)):
if i < len(A):
b_h = (b_h * W + ord(B[i])) % MOD
else:
b_h = (b_h * W - ord(B[i-L]) * WL + ord(B[i])) % MOD
if b_h == h:
return True
| [
"maoya.sato@gmail.com"
] | maoya.sato@gmail.com |
c22c92dfe26cffb6193e8818d239bc2767418d4f | e755453c853ae400d94f562ad215b59166b63782 | /tests/splay_tests/test_prev.py | 68c2817fe7eda9dc6a0bce3cc6dcdd77fe0ba5d6 | [
"MIT"
] | permissive | lycantropos/dendroid | 0cb3e276dd9c476b82b0b7a17c25c2e05616a993 | fd11c74a395eb791caf803c848805569869080f6 | refs/heads/master | 2023-04-07T11:07:55.550796 | 2023-03-27T00:46:03 | 2023-03-27T00:46:03 | 215,369,321 | 0 | 1 | MIT | 2020-09-24T05:02:02 | 2019-10-15T18:29:36 | Python | UTF-8 | Python | false | false | 547 | py | from typing import Tuple
from hypothesis import given
from dendroid.hints import Value
from tests.utils import (BaseSet,
are_keys_equal,
set_value_to_key)
from . import strategies
@given(strategies.non_empty_sets_with_their_non_min_values)
def test_properties(set_with_value: Tuple[BaseSet, Value]) -> None:
set_, value = set_with_value
result = set_.prev(value)
assert are_keys_equal(set_value_to_key(set_, result), set_.tree.root.key)
assert result is set_.tree.root.value
| [
"azatibrakov@gmail.com"
] | azatibrakov@gmail.com |
1d51d837e727c28b3f81bbbc0c956636983839e0 | b28300f3af1517e149baeadd9f87d92e56e23ba0 | /pages/forms.py | 65907867023b70c0f348bf0a17bbf9029e3fe93f | [] | no_license | xarala221/myportfolio | f39ea13fe493d4d3a7525774d568daa099a51cd0 | a62be57414b0971157a9923c17ec8bf5c9524823 | refs/heads/master | 2022-12-14T23:00:42.540391 | 2018-06-29T01:27:39 | 2018-06-29T01:27:39 | 138,381,453 | 2 | 0 | null | 2022-12-08T02:14:30 | 2018-06-23T07:39:53 | JavaScript | UTF-8 | Python | false | false | 781 | py | from django import forms
from .models import Contact
class ContactForm(forms.ModelForm):
email = forms.EmailField(label='Email :', widget=forms.EmailInput(attrs={'placeholder': 'Your Email', 'class': 'form-control ', 'required': 'True'}))
name = forms.CharField(label='Name :', widget=forms.TextInput(attrs={'placeholder': 'Your Name', 'class': 'form-control ', 'required': 'True'}))
message = forms.CharField(label='Message :', widget=forms.Textarea(attrs={'placeholder': 'What you want to say? Are You have a project? or you need help?', 'class': 'form-control ', 'required': 'True'}))
class Meta:
model = Contact
fields = ['name', 'email', 'message']
def clean_email(self, *args, **kwargs):
email = self.cleaned_data.get("email")
return email | [
"xaralaxarala@gmail.com"
] | xaralaxarala@gmail.com |
c340916b2367f30fe565bb515bbbbf87eb4445e3 | acad4e69e68354311e6b82f70d713e5c47d30cf8 | /User_version/Chapter1_Pre-requisit/Section1_math/01_solution.py | c1513df4d29e7b25e65b9fd7de36fa04bd169bf1 | [
"MIT"
] | permissive | MacHu-GWU/Data-Science-in-Python | fabe7232349cec78a4cded4d930a66fc5e362a2c | a3042864fae6c764d031817c7d7f5ef4ee7251cb | refs/heads/master | 2020-04-06T07:02:54.415759 | 2018-08-04T16:56:27 | 2018-08-04T16:56:27 | 22,156,078 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,846 | py | ##coding=utf8
##author=Sanhe
##date=07-13-2014
'''
Let's take a look at how long your codes takes.
Please copy your codes to the function "combination(n,k)", and run.
Usually the solution is 4-10 times faster than naive implementation.
WHY?
Time complexity
c(n,k) = n! / (k! * (n-k)! )
n + k + n-k = 2n times multiply
= [ n * (n-1) * ... * (n-k+1) ] / k!
k + k = 2k times multiply
Memory complexity
for i in xrange(n) is better than for i in range(n)
Because, range(5) is to generate a "n" length list in
memory and then cumulatively do the multiply
But, xrange(n) is to generate only one number each time
and then do the mutiply. So when n is large, you save
a lot of memory.
'''
import time
import math
def combination(n,k,answer = -999):
''' Copy your codes down here
'''
return answer
def combination1(n, k):
""" A fast way to calculate combination.
"""
if 0 <= k <= n:
ntok = 1
ktok = 1
for t in xrange(1, min(k, n - k) + 1): ## high performance generator
ntok *= n
ktok *= t
n -= 1
return ntok // ktok
else:
return 0
def test():
n, k = 500, 188 ## <=== test data
st = time.clock()
answer = combination(n,k)
print '\nyour codes time elapse: %s' % (time.clock() - st)
print 'c(%s,%s) = %s' % (n, k, answer)
st = time.clock()
answer1 = combination1(n,k)
print '\nsolution time elapse: %s' % (time.clock() - st)
print 'c(%s,%s) = %s' % (n, k, answer1)
if answer != answer1:
print ('\nMaybe you forget to paste your own codes to "def combination(n,k,answer=0):"'
'\n or your code has something wrong.')
if __name__ == '__main__':
test() | [
"husanhe@gmail.com"
] | husanhe@gmail.com |
d7c70f9ad07e408f22db366edf4d3645a22ed5d4 | 3f7d819c918fe185bfb2c96bdd85b394e3b3b42c | /12-4-15.py | fffc1d210b97cd33177e4c8571206576d5467b74 | [] | no_license | daxaxelrod/notes-from-3rd-semester | 1b04a1c8bba458c9af226d190ebdfa6bb31cef62 | 171f272bd57c7989f999dc799f1fa1b3ca88f5d9 | refs/heads/master | 2021-01-10T12:49:05.632358 | 2015-12-14T22:19:40 | 2015-12-14T22:19:40 | 47,573,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,642 | py | 12-4-15 class notes
quick thing on zeroth law of thermodynamics
validates idea of tempertature
2 objects in thermal equalirium with a 3rd object are equal to eachother
Chapter 19
Kinetic Theory of gasses
def review():
look over the first two setions of chater 19
pressure volume equals N R tempertature
if you hold pressure constant
volme is proportional to tempertature
pressure Volume is proportional to the tempertature of the system
PV = N*R*T
n = number of moles in the system
r = gas constant has a value of 8.31 joules per mole kelvin (j/ mol*K)
Pv = N * k * T
n is the same
k is the boltzmann constant == 1.38 times 10 ^ -23 j/k
make sure you are always working with kelvin
add 273 to the celius value
Ideal Gasses and work
Isothermal process (constant temperature) work:
pv =nrt where pressure = nrt/V
work is the integral from Vi to Vf of pressure d volume
plug in pressure from above
bam
we are left integrating 1/V
work then equals
n * R * T (ln V) from vi to vf
Word done for isothermic = nrt ln(Vf/Vi)
remember in constant volume
work is 0
isobaric work equal pressure(vf-vi)
Constant temperature process
we get a curve were pressume is proportional to 1 / V
Specific heat
recall
Q = cm detla temperature c is specific heat, m is mass
can also write in moles but pay attention to units bro
for constant volume case
Q = c n delta t c equals 12.5 moles kelvin
delta E = Q minus work
for constant volume case
the way to account for internal energy change isisssssss is
delta e = Molar specific heat *moles * delta temperature
path independant
this can be used for change in internal energy
all processes with same delta t have the same #!/usr/bin/env python
for constant pressuure case:
Q = molar specific heat at constant pressure * n * delta t
where molar blah blah is 20.8 j / mol k
at constant pressure, work = p delta v
can also write n * R * delta t
any change has to come from the temperature
Recall first law
delta e is molar thing *n * delta t
constant specific heat for volume * n delta t
===
constant specific heat for pressure
Cp - Cv = R
adiabatic means that there is no heat exchange
bc internal enrgy is path independant (thing potential with grav)
use delta E = cv *n * delta t
| [
"daxaxelrod@gmail.com"
] | daxaxelrod@gmail.com |
7f34e8fac9c89052bcd5c9d0ab669942016c0277 | a3ed36806067cecb5c7aaa5dcfe4a0e4163a0742 | /tests/unit/test_base.py | ca8ad6a430909b83a6a161825458f6c183d37108 | [
"MIT"
] | permissive | gabrielfalcao/plant | 02f14055439947fa4c56f58c8b9f539b8eb7f559 | 6d9122470fd6ad3b89957f0dcbc6bdeedb46ca9b | refs/heads/master | 2021-08-28T16:00:45.533014 | 2017-09-08T17:55:39 | 2017-09-08T17:55:39 | 11,139,306 | 6 | 4 | NOASSERTION | 2019-12-20T17:39:56 | 2013-07-03T01:37:50 | Python | UTF-8 | Python | false | false | 1,435 | py | # -*- coding: utf-8 -*-
# <plant - filesystem for humans>
# Copyright (C) <2013> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from mock import patch
from plant.core import isfile, isdir
@patch('plant.core.isdir_base')
def test_isdir_when_exists(isdir_base):
("plant.core.isdir should return os.path.isdir when given path exists")
isdir_base.return_value = "yeah!"
isdir("/foo", True).should.equal("yeah!")
isdir_base.assert_called_once_with("/foo")
@patch('plant.core.isfile_base')
def test_isfile_when_exists(isfile_base):
("plant.core.isfile should return os.path.isfile when given path exists")
isfile_base.return_value = "yeah!"
isfile("/foo", True).should.equal("yeah!")
isfile_base.assert_called_once_with("/foo")
| [
"gabriel@nacaolivre.org"
] | gabriel@nacaolivre.org |
bf8ca3cf4733a29992a93f9a401b418c5c3aeb68 | b6699361cea596afbafcff40056e12a3ccadb590 | /complexconfig_python/complexconfig-0.2/complexconfig/parser/dict_parser.py | 5605448e1ff53b86678150ab5608dcb74eec378e | [
"Apache-2.0"
] | permissive | benhe119/python_lib | 4c6ba3468ef380eadc5ab65401052aba224801db | e2d4052de04c82cb7bccd08042f28db824cab442 | refs/heads/master | 2020-08-09T10:09:59.368521 | 2019-03-29T02:21:55 | 2019-03-29T02:21:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | # -*- coding: utf8 -*-
from __future__ import print_function, unicode_literals
from . import BaseParser
class DictParser(BaseParser):
"""
Parse config from dictionary.
This is a specific parser which parse code from python dictionary instead of text
"""
def parse(self, data):
return data
def dump(self, config):
return config
| [
"zengjinping@threathunter.cn"
] | zengjinping@threathunter.cn |
db4a5b1057320eaf972ba83b5616f4e54d71cc19 | 99ed889f5d679f0712a9578435819ff9fe1038e9 | /baselines_tactile/ppo2/test_microbatches.py | c758fb8f5ba3adf9123d5c59631875cee4554f44 | [] | no_license | WMViolet/baselines_tactile | 7e7800c44167d6e29f4f4a187e49d92462f49100 | 761193122ff8c914d8b983d93620a7ffc63ea917 | refs/heads/main | 2023-02-24T00:30:04.616016 | 2021-02-01T23:45:53 | 2021-02-01T23:45:53 | 322,393,115 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,194 | py | from envs import gym
import tensorflow as tf
import numpy as np
from functools import partial
from tactile_baselines.common.vec_env.dummy_vec_env import DummyVecEnv
from tactile_baselines.common.tf_util import make_session
from tactile_baselines.ppo2.ppo2 import learn
from tactile_baselines.ppo2.microbatched_model import MicrobatchedModel
def test_microbatches():
def env_fn():
env = gym.make('CartPole-v0')
env.seed(0)
return env
learn_fn = partial(learn, network='mlp', nsteps=32, total_timesteps=32, seed=0)
env_ref = DummyVecEnv([env_fn])
sess_ref = make_session(make_default=True, graph=tf.Graph())
learn_fn(env=env_ref)
vars_ref = {v.name: sess_ref.run(v) for v in tf.trainable_variables()}
env_test = DummyVecEnv([env_fn])
sess_test = make_session(make_default=True, graph=tf.Graph())
learn_fn(env=env_test, model_fn=partial(MicrobatchedModel, microbatch_size=2))
# learn_fn(env=env_test)
vars_test = {v.name: sess_test.run(v) for v in tf.trainable_variables()}
for v in vars_ref:
np.testing.assert_allclose(vars_ref[v], vars_test[v], atol=3e-3)
if __name__ == '__main__':
test_microbatches()
| [
"violetfuyao@berkeley.edu"
] | violetfuyao@berkeley.edu |
6eaff47ed53d499b32e238b5a7097c89a1bc0175 | f6641c552622e1446d913d50f561ff14c524e885 | /data/box_data_bidirect.py | 76bae58334dd110d98ed895f37fc04711a480921 | [] | no_license | yangyi02/video_motion_synthetic3 | 939d1ddd3a4caada87e0e2ef3ed430dae9b2447e | e732d3641c555422b977648211683cb21186bcdb | refs/heads/master | 2021-01-01T06:46:55.553125 | 2017-08-04T00:19:28 | 2017-08-04T00:19:28 | 97,509,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,168 | py | import numpy
from synthetic_data_bidirect import SyntheticDataBidirect
import learning_args
import logging
logging.basicConfig(format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s',
level=logging.INFO)
class BoxDataBidirect(SyntheticDataBidirect):
def __init__(self, args):
super(BoxDataBidirect, self).__init__(args)
self.fg_noise = args.fg_noise
self.bg_noise = args.bg_noise
self.train_images, self.test_images = None, None
def generate_source_image(self):
batch_size, num_objects, im_size = self.batch_size, self.num_objects, self.im_size
im = numpy.zeros((num_objects, batch_size, 3, im_size, im_size))
mask = numpy.zeros((num_objects, batch_size, 1, im_size, im_size))
for i in range(num_objects):
for j in range(batch_size):
width = numpy.random.randint(im_size/8, im_size*3/4)
height = numpy.random.randint(im_size/8, im_size*3/4)
x = numpy.random.randint(0, im_size - width)
y = numpy.random.randint(0, im_size - height)
color = numpy.random.uniform(self.bg_noise, 1 - self.fg_noise, 3)
for k in range(3):
im[i, j, k, y:y+height, x:x+width] = color[k]
noise = numpy.random.rand(3, height, width) * self.fg_noise
im[i, j, :, y:y+height, x:x+width] = im[i, j, :, y:y+height, x:x+width] + noise
mask[i, j, 0, y:y+height, x:x+width] = num_objects - i
return im, mask
def get_next_batch(self, images=None):
src_image, src_mask = self.generate_source_image()
im, motion, motion_r, motion_label, motion_label_r, seg_layer = self.generate_data(src_image, src_mask)
return im, motion, motion_r, motion_label, motion_label_r, seg_layer
def unit_test():
args = learning_args.parse_args()
logging.info(args)
data = BoxDataBidirect(args)
im, motion, motion_r, motion_label, motion_label_r, seg_layer = data.get_next_batch()
data.display(im, motion, motion_r, seg_layer)
if __name__ == '__main__':
unit_test()
| [
"yangyi02@gmail.com"
] | yangyi02@gmail.com |
adb66b38b38c519f92faf97c07eefe21491c1f8b | bf0c13d412a7021b299c5e0622e63e72172cf725 | /week4/todolist/api/migrations/0002_task_created_by.py | c072e72d50a61591cd796376074211e17682bbbe | [] | no_license | Alibek120699/BFDjango | 765e734e925041947f607a1d15228309dfa3e647 | eac06c317551c561ffccb44750862972ae11dea3 | refs/heads/master | 2022-12-01T15:49:39.402815 | 2020-04-19T21:09:39 | 2020-04-19T21:09:39 | 233,657,360 | 0 | 0 | null | 2022-11-22T05:49:56 | 2020-01-13T17:50:13 | Python | UTF-8 | Python | false | false | 593 | py | # Generated by Django 2.2 on 2020-02-16 13:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('api', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='task',
name='created_by',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"sayakalibek1@gmail.com"
] | sayakalibek1@gmail.com |
8a56d384d8f53a7a0624ccda96ddcebda8177931 | acf8ce66cc5335b7a1ce98887949bee724d98d9e | /stu_and_extend/extend_info/views.py | e751667727978882e693199a847ea95679efd8f4 | [] | no_license | iversongit/20180426 | eec4f56612cd775be179783932fee19cd9f59096 | b5af588eff11940fff9222e9662ca3f49442ab10 | refs/heads/master | 2020-03-13T10:15:20.061654 | 2018-04-27T02:19:00 | 2018-04-27T02:19:00 | 131,079,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py | from django.http import HttpResponse
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
# Create your views here.
from extend_info.models import Extend
from student.models import Student
def addExtendInfo(request,s_id):
if request.method == "GET":
return render(request,"addExtendInfo.html",{'s_id':s_id})
if request.method == "POST":
e_addr = request.POST.get("e_addr")
e_tel =request.POST.get("e_tel")
e_birth = request.POST.get("e_birth")
e_des = request.POST.get("e_des")
Extend.objects.create(
e_addr = e_addr,
e_tel = e_tel,
e_birth = e_birth,
e_des = e_des,
s_id=s_id
)
# return HttpResponse("添加成功")
return HttpResponseRedirect("/extendapp/showall")
def showAllInfo(request):
stus = Student.objects.all()
extinfos = Extend.objects.all()
return render(request,"showAllInfo.html",{'stus':stus,'extinfos':extinfos})
| [
"1564329410@qq.com"
] | 1564329410@qq.com |
d4fba6a899bdae2ae0774c15e23236244c0e3ec1 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/LArCalorimeter/LArExample/LArCalibProcessing/share/LArCalib_Example_HVCorr.py | e4330dd17032ade9800b69f61ce3b19e7ff0e949 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,059 | py | #################################################################
#
# example jobOptions to read HV from Cool/DCS in athena
# and compute cell level correction factors to store in conditions
# database
#
##################################################################
from time import strptime,time
from calendar import timegm
#set date to compute the correction
if "date" not in dir():
date="2013-02-06:09:45:00"
if "TimeStamp" not in dir():
try:
ts=strptime(date+'/UTC','%Y-%m-%d:%H:%M:%S/%Z')
TimeStamp=int(timegm(ts))*1000000000L
except ValueError:
print "ERROR in time specification, use e.g. 2007-05-25:14:01:00"
from LArCalibProcessing.TimeStampToRunLumi import TimeStampToRunLumi
rlb=TimeStampToRunLumi(TimeStamp,dbInstance="CONDBR2")
if rlb is None:
print "WARNING: Failed to convert time",TimeStamp,"into a run/lumi number"
RunNumber=999999
LumiBlock=0
else:
RunNumber=rlb[0]
LumiBlock=rlb[1]
print "---> Working on run",RunNumber,"LB",LumiBlock,"Timestamp:",TimeStamp
timediff=int(time()-(TimeStamp/1000000000L))
if timediff<0:
print "ERROR: Timestamp in the future???"
else:
(days,remainder)=divmod(timediff,24*60*60)
(hours,seconds)=divmod(remainder,60*60)
print "---> Timestamp is %i days %i hours and %i minutes ago" % (days,hours,int(seconds/60))
pass
# name of output local sql file
OutputSQLiteFile = 'HVScaleCorr.db'
# name of output Pool file
PoolFileName = "dummy.pool.root"
# database folder
LArHVScaleCorrFolder = "/LAR/ElecCalibFlat/HVScaleCorr"
# output key
keyOutput = "LArHVScaleCorr"
# tag suffix
#LArCalibFolderOutputTag = "-UPD3-00"
# write IOV
WriteIOV = True
# global tag to read other conditions if needed
if "GlobalTag" not in dir():
GlobalTag = 'LARCALIB-RUN2-00'
# begin run IOV
IOVBegin = 0
###################################################################
from RecExConfig.RecFlags import rec
rec.RunNumber.set_Value_and_Lock(int(RunNumber))
from PerfMonComps.PerfMonFlags import jobproperties
jobproperties.PerfMonFlags.doMonitoring = True
from AthenaCommon.DetFlags import DetFlags
DetFlags.all_setOff()
DetFlags.LAr_setOn()
DetFlags.Tile_setOn()
from AthenaCommon.GlobalFlags import globalflags
globalflags.DetGeo.set_Value_and_Lock('atlas')
globalflags.DataSource.set_Value_and_Lock('data')
globalflags.DatabaseInstance="CONDBR2"
# Get a handle to the default top-level algorithm sequence
from AthenaCommon.AppMgr import ToolSvc
from AthenaCommon.AlgSequence import AlgSequence
topSequence = AlgSequence()
# Get a handle to the ServiceManager
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
# Get a handle to the ApplicationManager
from AthenaCommon.AppMgr import theApp
# Setup Db stuff
import AthenaPoolCnvSvc.AthenaPool
from AthenaCommon.GlobalFlags import jobproperties
jobproperties.Global.DetDescrVersion='ATLAS-R2-2015-04-00-00'
from AtlasGeoModel import SetGeometryVersion
from AtlasGeoModel import GeoModelInit
from AtlasGeoModel import SetupRecoGeometry
svcMgr.IOVDbSvc.GlobalTag = GlobalTag
try:
svcMgr.IOVDbSvc.DBInstance=""
except:
pass
include( "AthenaCommon/Atlas_Gen.UnixStandardJob.py" )
include( "CaloDetMgrDetDescrCnv/CaloDetMgrDetDescrCnv_joboptions.py")
#include( "CaloIdCnv/CaloIdCnv_joboptions.py" )
#include( "TileIdCnv/TileIdCnv_jobOptions.py" )
#include( "LArDetDescr/LArDetDescr_joboptions.py" )
#include("TileConditions/TileConditions_jobOptions.py" )
include("LArConditionsCommon/LArConditionsCommon_comm_jobOptions.py")
#include( "LArCondAthenaPool/LArCondAthenaPool_joboptions.py" )
include( "LArConditionsCommon/LArIdMap_comm_jobOptions.py" )
#block to read the existing HVCorr
conddb.blockFolder(LArHVScaleCorrFolder);
from LArConditionsCommon import LArHVDB #Sets HV Calbling and DCS Database folders
#conddb.addOverride("/LAR/IdentifierOfl/HVLineToElectrodeMap","LARIdentifierOflHVLineToElectrodeMap-UPD3-00")
from LArCondUtils.LArCondUtilsConf import LArHVToolDB
theLArHVToolDB = LArHVToolDB("LArHVToolDB")
ToolSvc += theLArHVToolDB
from LArRecUtils.LArRecUtilsConf import LArHVCorrTool
theLArHVCorrTool = LArHVCorrTool("LArHVCorrTool")
theLArHVCorrTool.keyOutput = keyOutput
theLArHVCorrTool.folderName= LArHVScaleCorrFolder
theLArHVCorrTool.HVTool = theLArHVToolDB
ToolSvc += theLArHVCorrTool
from LArCalibUtils.LArCalibUtilsConf import LArHVCorrMaker
theLArHVCorrMaker = LArHVCorrMaker("LArHVCorrMaker")
topSequence += theLArHVCorrMaker
from LArCalibTools.LArCalibToolsConf import LArHVScaleCorr2Ntuple
theLArHVScaleCorr2Ntuple = LArHVScaleCorr2Ntuple("LArHVScaleCorr2Ntuple")
theLArHVScaleCorr2Ntuple.AddFEBTempInfo = False
topSequence += theLArHVScaleCorr2Ntuple
#from LArCalibTools.LArCalibToolsConf import LArWFParams2Ntuple
#LArWFParams2Ntuple = LArWFParams2Ntuple("LArWFParams2Ntuple")
#LArWFParams2Ntuple.DumpTdrift = True
#topSequence += LArWFParams2Ntuple
theApp.HistogramPersistency = "ROOT"
from GaudiSvc.GaudiSvcConf import NTupleSvc
svcMgr += NTupleSvc()
svcMgr.NTupleSvc.Output = [ "FILE1 DATAFILE='hvcorr_ntuple.root' OPT='NEW'" ]
# deal with DB output
OutputObjectSpec = "CondAttrListCollection#"+LArHVScaleCorrFolder
OutputObjectSpecTag = ''
OutputDB = "sqlite://;schema="+OutputSQLiteFile+";dbname=CONDBR2"
from RegistrationServices.OutputConditionsAlg import OutputConditionsAlg
theOutputConditionsAlg=OutputConditionsAlg("OutputConditionsAlg",PoolFileName,
[OutputObjectSpec],[OutputObjectSpecTag],WriteIOV)
theOutputConditionsAlg.Run1 = IOVBegin
svcMgr.IOVDbSvc.dbConnection = OutputDB
from RegistrationServices.RegistrationServicesConf import IOVRegistrationSvc
svcMgr += IOVRegistrationSvc()
svcMgr.IOVRegistrationSvc.OutputLevel = DEBUG
svcMgr.IOVRegistrationSvc.RecreateFolders = True
svcMgr.IOVRegistrationSvc.SVFolder=True
svcMgr.IOVRegistrationSvc.OverrideNames += ["HVScaleCorr",]
svcMgr.IOVRegistrationSvc.OverrideTypes += ["Blob16M",]
#--------------------------------------------------------------
#--- Dummy event loop parameters
#--------------------------------------------------------------
svcMgr.EventSelector.RunNumber = RunNumber
svcMgr.EventSelector.EventsPerRun = 1
svcMgr.EventSelector.FirstEvent = 0
svcMgr.EventSelector.EventsPerLB = 1
svcMgr.EventSelector.FirstLB = LumiBlock
svcMgr.EventSelector.InitialTimeStamp = int(TimeStamp/1e9)
svcMgr.EventSelector.TimeStampInterval = 5
svcMgr.EventSelector.OverrideRunNumber=True
theApp.EvtMax = 1
#--------------------------------------------------------------
# Set output level threshold (1=VERBOSE, 2=DEBUG, 3=INFO, 4=WARNING, 5=ERROR, 6=FATAL )
#--------------------------------------------------------------
svcMgr.MessageSvc.OutputLevel = INFO
svcMgr.MessageSvc.debugLimit = 100000
svcMgr.MessageSvc.infoLimit = 100000
svcMgr.MessageSvc.Format = "% F%30W%S%7W%R%T %0W%M"
svcMgr.IOVDbSvc.OutputLevel = INFO
theLArHVCorrMaker.OutputLevel = INFO
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
58310eb70b1d56186ab82bb5923708c9a6cfd67d | b3f22f69401f57e29ea7b3fe7fd921d328abfdb2 | /autohome/items.py | 98d04c298d38af7dedc356a9e5352544c08f6325 | [] | no_license | ivancoacher/autohome | 1f148b4ab6738f226bb2c6683aeafd4e37cde358 | ebf0d213e08a8386b436e03b65a6747aa0de4d43 | refs/heads/master | 2021-05-31T02:27:54.979715 | 2015-11-24T05:18:13 | 2015-11-24T05:18:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy import Item, Field
class AutohomeItem(Item):
# define the fields for your item here like:
# name = scrapy.Field()
content = Field()
| [
"heshang1203@sina.com"
] | heshang1203@sina.com |
5cc795f76bf1ca525800c7786bef520c5fc96657 | 32e716570ab33a201e7581bfa497bfa820729cd8 | /web_3/class_write/server.py | cbfbef35c11da618ca6b434626347f6b6b01a4c6 | [] | no_license | Coder-Chandler/Web | b21bf9213432d1bfe949c00e9c0e507883574d1e | bb1e403ae194aec01896f374607135d24d2cb16f | refs/heads/master | 2021-09-07T11:16:49.516177 | 2018-02-22T05:33:31 | 2018-02-22T05:33:31 | 112,816,855 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,973 | py | # coding: utf-8
"""
url 的规范
第一个 ? 之前的是 path
? 之后的是 query
http://c.cc/search?a=b&c=d&e=1
PATH /search
QUERY a=b&c=d&e=1
"""
import socket
import urllib.parse
from utils import log
from routes import route_static
from routes import route_dict
# 定义一个 class 用于保存请求的数据
class Request(object):
def __init__(self):
self.method = 'GET'
self.path = ''
self.query = {}
self.body = ''
def form(self):
"""
form 函数用于把 body 解析为一个字典并返回
body 的格式如下 a=b&c=d&e=1
"""
# username=g+u%26a%3F&password=
# username=g u&a?&password=
# TODO, 这实际上算是一个 bug,应该在解析出数据后再去 unquote
body = urllib.parse.unquote(self.body)
args = body.split('&')
f = {}
for arg in args:
k, v = arg.split('=')
f[k] = v
return f
#
request = Request()
def error(request, code=404):
"""
根据 code 返回不同的错误响应
目前只有 404
"""
# 之前上课我说过不要用数字来作为字典的 key
# 但是在 HTTP 协议中 code 都是数字似乎更方便所以打破了这个原则
e = {
404: b'HTTP/1.1 404 NOT FOUND\r\n\r\n<h1>NOT FOUND</h1>',
}
return e.get(code, b'')
def parsed_path(path):
"""
input: message=hello&author=gua
return: {
'message': 'hello',
'author': 'gua',
}
"""
# find函数用于在str中找某一个字符,如果找得到,那么返回0,找不到返回-1
index = path.find('?')
if index == -1:
return path, {}
else:
path, query_string = path.split('?', 1)
args = query_string.split('&')
query = {}
for arg in args:
k, v = arg.split('=')
query[k] = v
return path, query
def response_for_path(path):
# parsed_path 用于把 path 和 query 分离
path, query = parsed_path(path)
request.path = path
request.query = query
log('path and query ->', (path, query))
"""
根据 path 调用相应的处理函数
没有处理的 path 会返回 404
"""
r = {
'/static': route_static,
# '/': route_index,
# '/login': route_login,
# '/messages': route_message,
}
r.update(route_dict)
response = r.get(path, error)
return response(request)
def run(host='', port=3000):
"""
启动服务器
"""
# 初始化 socket 套路
# 使用 with 可以保证程序中断的时候正确关闭 socket 释放占用的端口
log('start at', '{}:{}'.format(host, port))
with socket.socket() as s:
s.bind((host, port))
# 无限循环来处理请求
while True:
# 监听 接受 读取请求数据 解码成字符串
s.listen(5)
connection, address = s.accept()
r = connection.recv(1000)
r = r.decode('utf-8')
log('原始请求', r)
# log('ip and request, {}\n{}'.format(address, request))
# 因为 chrome 会发送空请求导致 split 得到空 list
# 所以这里判断一下防止程序崩溃
if len(r.split()) < 2:
continue
path = r.split()[1]
# 设置 request 的 method
request.method = r.split()[0]
# 把 body 放入 request 中
request.body = r.split('\r\n\r\n', 1)[1]
# 用 response_for_path 函数来得到 path 对应的响应内容
response = response_for_path(path)
# 把响应发送给客户端
connection.sendall(response)
# 处理完请求, 关闭连接
connection.close()
if __name__ == '__main__':
# 生成配置并且运行程序
config = dict(
host='',
port=3000,
)
run(**config)
| [
"ysiwgtus@gmail.com"
] | ysiwgtus@gmail.com |
dccaabaea2ce82f404be9435d198c09cebb12357 | e01842bbd6fa4ec5df69e9dc3feda23f0085367c | /blog/posts/urls.py | 9e9f930a79c1eb0d80c0c12fe9aef76893311746 | [] | no_license | Greh/pyladies-gswd | cae38aa9f60e9d0307401e18f97390a3388d32d2 | a6c0cf555068fd4bc7cf8eaead8237e2d04d0f46 | refs/heads/master | 2021-01-20T22:51:08.683674 | 2013-08-24T08:53:39 | 2013-08-24T08:53:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns('',
url(r'^$', views.post_list_view, name='list'),
url(r'^read/(?P<slug>[-\w]+)/$', views.post_detail_view, name='detail'),
)
| [
"kenneth@gigantuan.net"
] | kenneth@gigantuan.net |
fec6355a19a54de67434605bd6c36ffc7fd76909 | 8633ec7985ffd7f849210b93bc20e632f8ae8707 | /tree/CMSSW_4_2_8_patch7/src/Validation/RecoTrack/python/PostProcessorTracker_cfi.py | 9d4bd7efb05784788385b3074371e74ca53102c8 | [] | no_license | liis/el_track | 2ed5b3b7a64d57473328df0e5faf28808bab6166 | cd7978e5fa95d653bab5825b940911b465172c1a | refs/heads/master | 2016-09-10T20:09:07.882261 | 2015-01-08T14:41:59 | 2015-01-08T14:41:59 | 14,494,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,530 | py | import FWCore.ParameterSet.Config as cms
postProcessorTrack = cms.EDAnalyzer("DQMGenericClient",
subDirs = cms.untracked.vstring("Tracking/Track/*"),
efficiency = cms.vstring(
"effic 'Efficiency vs #eta' num_assoc(simToReco)_eta num_simul_eta",
"efficPt 'Efficiency vs p_{T}' num_assoc(simToReco)_pT num_simul_pT",
"effic_vs_hit 'Efficiency vs hit' num_assoc(simToReco)_hit num_simul_hit",
"effic_vs_phi 'Efficiency vs #phi' num_assoc(simToReco)_phi num_simul_phi",
"effic_vs_dxy 'Efficiency vs Dxy' num_assoc(simToReco)_dxy num_simul_dxy",
"effic_vs_dz 'Efficiency vs Dz' num_assoc(simToReco)_dz num_simul_dz",
"effic_vs_vertpos 'Efficiency vs vertpos' num_assoc(simToReco)_vertpos num_simul_vertpos",
"effic_vs_zpos 'Efficiency vs zpos' num_assoc(simToReco)_zpos num_simul_zpos",
"fakerate 'Fake rate vs #eta' num_assoc(recoToSim)_eta num_reco_eta fake",
"fakeratePt 'Fake rate vs p_{T}' num_assoc(recoToSim)_pT num_reco_pT fake",
"fakerate_vs_hit 'Fake rate vs hit' num_assoc(recoToSim)_hit num_reco_hit fake",
"fakerate_vs_phi 'Fake rate vs phi' num_assoc(recoToSim)_phi num_reco_phi fake",
"fakerate_vs_dxy 'Fake rate vs dxy' num_assoc(recoToSim)_dxy num_reco_dxy fake",
"fakerate_vs_dz 'Fake rate vs dz' num_assoc(recoToSim)_dz num_reco_dz fake"
),
resolution = cms.vstring(
"cotThetares_vs_eta '#sigma(cot(#theta)) vs #eta' cotThetares_vs_eta",
"cotThetares_vs_pt '#sigma(cot(#theta)) vs p_{T}' cotThetares_vs_pt",
"h_dxypulleta 'd_{xy} Pull vs #eta' dxypull_vs_eta",
"dxyres_vs_eta '#sigma(d_{xy}) vs #eta' dxyres_vs_eta",
"dxyres_vs_pt '#sigma(d_{xy}) vs p_{T}' dxyres_vs_pt",
"h_dzpulleta 'd_{z} Pull vs #eta' dzpull_vs_eta",
"dzres_vs_eta '#sigma(d_{z}) vs #eta' dzres_vs_eta",
"dzres_vs_pt '#sigma(d_{z}) vs p_{T}' dzres_vs_pt",
"etares_vs_eta '#sigma(#eta) vs #eta' etares_vs_eta",
"h_phipulleta '#phi Pull vs #eta' phipull_vs_eta",
"h_phipullphi '#phi Pull vs #phi' phipull_vs_phi",
"phires_vs_eta '#sigma(#phi) vs #eta' phires_vs_eta",
"phires_vs_phi '#sigma(#phi) vs #phi' phires_vs_phi",
"phires_vs_pt '#sigma(#phi) vs p_{T}' phires_vs_pt",
"h_ptpulleta 'p_{T} Pull vs #eta' ptpull_vs_eta",
"h_ptpullphi 'p_{T} Pull vs #phi' ptpull_vs_phi",
"ptres_vs_eta '#sigma(p_{T}) vs #eta' ptres_vs_eta",
"ptres_vs_phi '#sigma(p_{T}) vs #phi' ptres_vs_phi",
"ptres_vs_pt '#sigma(p_{T}) vs p_{T}' ptres_vs_pt",
"h_thetapulleta '#theta Pull vs #eta' thetapull_vs_eta",
"h_thetapullphi '#theta Pull vs #phi' thetapull_vs_phi"
),
profile= cms.vstring(
"chi2mean 'mean #chi^{2} vs #eta' chi2_vs_eta",
"chi2mean_vs_phi 'mean #chi^{2} vs #phi' chi2_vs_phi",
"chi2mean_vs_nhits 'mean #chi^{2} vs n. hits' chi2_vs_nhits",
"hits_eta 'mean #hits vs eta' nhits_vs_eta",
"hits_phi 'mean #hits vs #phi' nhits_vs_phi",
"losthits_eta 'mean #lost hits vs #eta' nlosthits_vs_eta",
"PXBhits_eta 'mean # PXB hits vs #eta' nPXBhits_vs_eta",
"PXFhits_eta 'mean # PXF hits vs #eta' nPXFhits_vs_eta",
"TIBhits_eta 'mean # TIB hits vs #eta' nTIBhits_vs_eta",
"TIDhits_eta 'mean # TID hits vs #eta' nTIDhits_vs_eta",
"TOBhits_eta 'mean # TOB hits vs #eta' nTOBhits_vs_eta",
"TEChits_eta 'mean # TEC hits vs #eta' nTEChits_vs_eta",
"LayersWithMeas_eta 'mean # LayersWithMeas vs #eta' nLayersWithMeas_vs_eta",
"PXLlayersWith2dMeas 'mean # PXLlayersWithMeas vs #eta' nPXLlayersWith2dMeas",
"STRIPlayersWithMeas_eta 'mean # STRIPlayersWithMeas vs #eta' nSTRIPlayersWithMeas_eta",
"STRIPlayersWith1dMeas_eta 'mean # STRIPlayersWith1dMeas vs #eta' nSTRIPlayersWith1dMeas_eta",
"STRIPlayersWith2dMeas_eta 'mean # STRIPlayersWith2dMeas vs #eta' nSTRIPlayersWith2dMeas_eta"
),
outputFileName = cms.untracked.string("")
)
| [
"polaarrebane@gmail.com"
] | polaarrebane@gmail.com |
b4d5b5ebd165f661378a8add290298b1ad8bac5c | 36c00fe2afff4818c937e312ce0c6a79f35e2a77 | /7-kyu/dictionary-from-two-lists/python/solution.py | 4a710029d9177cbb5e6dccb8e00376800102be0b | [] | no_license | p-lots/codewars | 0a67b6ee4c91180ff78c648421b9d2d64463ddc3 | 535faeee475c6b398124d6f5002b0e111406e8bb | refs/heads/master | 2023-08-23T22:14:33.635011 | 2023-08-23T13:30:37 | 2023-08-23T13:30:37 | 195,320,309 | 0 | 0 | null | 2023-05-09T19:25:50 | 2019-07-05T01:40:15 | Python | UTF-8 | Python | false | false | 132 | py | def createDict(keys, values):
while len(values) < len(keys):
values.append(None)
return dict(zip(keys, values))
| [
"paul.calotta@gmail.com"
] | paul.calotta@gmail.com |
a39e9bab1aa016cf4f51db6b1ddcf7edd3ba6b25 | 4d9bdc1444ab73858a123b8273b72e1d74a9233d | /funNLearn/src/main/java/dsAlgo/sort/BucketSort.py | cddef4a09f7d8aba57c892f73c9bfb70035b2b75 | [] | no_license | vishalpmittal/practice-fun | f7ca1389d758f93ddf2ddc3a58f2592b7caabab4 | 727dec2e23e765925a5e7e003fc99aeaf25111e9 | refs/heads/master | 2022-07-11T18:31:49.574410 | 2022-02-26T23:05:12 | 2022-02-26T23:05:12 | 51,132,794 | 0 | 1 | null | 2022-06-29T19:34:05 | 2016-02-05T07:34:32 | JavaScript | UTF-8 | Python | false | false | 1,184 | py | """
Tag: sort
"""
from typing import List
class Solution:
def insertionSort(self, B: List[int]) -> List[int]:
for i in range(1, len(B)):
up = B[i]
j = i - 1
while j >= 0 and B[j] > up:
B[j + 1] = B[j]
j -= 1
B[j + 1] = up
return B
def bucket_sort(self, A: List[int]) -> List[int]:
arr = []
slot_num = 10 # 10 means 10 slots, each
# slot's size is 0.1
for i in range(slot_num):
arr.append([])
# Put array elements in different buckets
for j in A:
index_b = int(slot_num * j)
arr[index_b].append(j)
# Sort individual buckets
for i in range(slot_num):
arr[i] = self.insertionSort(arr[i])
# concatenate the result
k = 0
for i in range(slot_num):
for j in range(len(arr[i])):
A[k] = arr[i][j]
k += 1
return A
assert Solution().bucket_sort([0.897, 0.565, 0.656, 0.1234, 0.665, 0.3434]) == [
0.1234,
0.3434,
0.565,
0.656,
0.665,
0.897,
]
print("Tests Passed!")
| [
"vishalm@vmware.com"
] | vishalm@vmware.com |
10b1c8f6d365f2b3cdbf5aba30dd342bfdf97743 | a8ca62991d552367831daf302833053dec847d1b | /data_structures/python/tree.py | f4b902f99075ec6af63bc84c56e5f2a913b9792c | [] | no_license | krinj/tech-interview-kit | fec0d6e192ee4c3226602c840fcf650e7e10e726 | 86967a9237dd465dbeb9f6ada896eeceae6d553c | refs/heads/master | 2022-01-10T02:57:22.632021 | 2019-05-31T14:02:02 | 2019-05-31T14:02:02 | 185,377,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | # -*- coding: utf-8 -*-
"""
Python binary tree.
"""
from typing import Union
__author__ = "Jakrin Juangbhanich"
__email__ = "juangbhanich.k@gmail.com"
class TNode:
def __init__(self, key: Union[str, int, float], data=None):
self.left: TNode = None
self.right: TNode = None
self.key: Union[str, int, float] = key
self.data = data
class Tree:
def __init__(self):
pass
| [
"juangbhanich.k@gmail.com"
] | juangbhanich.k@gmail.com |
019239c764dc2fe2ed066beb136ef05b5afe9741 | 3d37f595a8aaaa7c5723ddbd6758ecac5147dce2 | /evaluate-reverse-polish-notation/evaluate-reverse-polish-notation.py | b2e0ca728623d1edf0e18e6502be0e0b436af60b | [] | no_license | baggy2797/Leetcode | ec218b155ebb972cd793253f25c3e18117216703 | 469c1541579401768f7a1da55d504a9e8656b21e | refs/heads/main | 2023-06-24T17:03:42.708935 | 2021-07-16T22:31:24 | 2021-07-16T22:31:24 | 342,979,700 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | class Solution:
def evalRPN(self, tokens: List[str]) -> int:
stack = []
# operators = {"+","-","*","/"}
for token in tokens:
if token[-1].isdigit():
stack.append(int(token))
# elif token in operators:
else:
second = stack.pop()
first = stack.pop()
if token == "+":
stack.append(first + second)
elif token == "-":
stack.append(first - second)
elif token == "*":
stack.append(first * second)
else:
stack.append(int(float(first) / second))
return stack.pop() | [
"bhagwataditya226@gmail.com"
] | bhagwataditya226@gmail.com |
2b3452773a594169640f1364f39a01c29f048d9b | 34b55781ae90e1f268ce74ee27ecd4909663b728 | /freqlearn/images/draw_impulses.py | 2c1aa223d0c864e568b0e4078afc6057ad1863ba | [] | no_license | fbcotter/thesis | 28ecac0d84f1a5ff9ea104e8e1ac7753beba1c51 | e1a4032ffd6d241694fc173d67051798db22f20d | refs/heads/master | 2023-03-28T14:02:09.835895 | 2020-06-12T09:56:04 | 2020-06-12T09:56:04 | 269,305,306 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 847 | py | # coding: utf-8
import matplotlib.pyplot as plt
import numpy as np
from math import *
import dtcwt
xfm = dtcwt.Transform2d('near_sym_b', 'qshift_b')
x = np.zeros((128,64))
p = xfm.forward(x, nlevels=2)
m = p.highpasses[1].shape[0] // 2
r = int(.8 * m)
fig, ax = plt.subplots()
w = np.array([-1j, 1j, -1j, -1, 1, -1], 'complex')
for l in range(6):
if l < 3:
theta = 15+30*l
else:
theta = 15+30*l - 180
p.highpasses[1][int(m-r*sin(theta*pi/180)), int(r*cos(theta*pi/180)), l] = w[l]
y = xfm.inverse(p)
ax.imshow(y, cmap='gray')
m = y.shape[0] // 2
r = int(.88 * m)
for l in range(6):
if l < 3:
theta = 15+30*l
else:
theta = 15+30*l - 180
y = int(m - r*sin(theta*pi/180))
x = int(r*cos(theta*pi/180))
plt.text(x,y,"{}{}".format(theta, r"$^{\circ}$"), color='b', fontsize=11)
plt.show()
| [
"fbcotter90@gmail.com"
] | fbcotter90@gmail.com |
9c473691dcc0f4e2dc4c0c3a0135eb5eca24fded | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/layout/scene/yaxis/_dtick.py | 508da58d48cdfe7cca997d9b666a67c6367abde3 | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 477 | py | import _plotly_utils.basevalidators
class DtickValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="dtick", parent_name="layout.scene.yaxis", **kwargs):
super(DtickValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
implied_edits=kwargs.pop("implied_edits", {"tickmode": "linear"}),
**kwargs
)
| [
"noreply@github.com"
] | hugovk.noreply@github.com |
be9dbe69c82f3c7fcfb4be1760bb03d41e845213 | f121695e2dff353607fa47fb42482470e03bbf8a | /capitulo_19-Contas_de_usuario/learning_log/learning_logs/migrations/0003_topic_owner.py | 9b0e4de5a7cad3f304adda9c7ccdd8dc8ebadeb9 | [] | no_license | ranog/python_work | 76cbcf784c86fae4482be5383223e4b0a34f4130 | 47c442a90dcf32d5aef70858693a772a3c76a7ac | refs/heads/master | 2022-12-22T11:02:26.482059 | 2021-04-17T01:12:22 | 2021-04-17T01:12:22 | 233,634,221 | 2 | 1 | null | 2022-12-08T07:38:43 | 2020-01-13T15:58:46 | Python | UTF-8 | Python | false | false | 602 | py | # Generated by Django 3.1.4 on 2021-01-23 15:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('learning_logs', '0002_entry'),
]
operations = [
migrations.AddField(
model_name='topic',
name='owner',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='auth.user'),
preserve_default=False,
),
]
| [
"jprnogueira@yahoo.com.br"
] | jprnogueira@yahoo.com.br |
0cedadd55b1e6d9d6b10d6d76f215c3480001304 | c2636c8bb8964ed2d6c8eca6c922dea27ef597e8 | /main.py | 26767768b29e108b2f1cd7bd3429822c71ab9629 | [] | no_license | rafaeltorrese/webscrapperplatzi | 97db94c5db5f919d198a4aa266aba4ed42a08d73 | f692f18ad17740d4a506a9d560a61180bad1b8ea | refs/heads/master | 2022-12-14T08:37:24.353294 | 2020-09-04T22:53:05 | 2020-09-04T22:53:05 | 292,951,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,942 | py | import argparse
import logging
import re
from requests.exceptions import HTTPError
from urllib3.exceptions import MaxRetryError
from common import config
import news_page_object as news
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
is_well_formed_link = re.compile(r'^https?://.+/.+$') # https://example.com/hello
is_root_path = re.compile(r'^/.+$') # /some-text
def _news_scraper(news_site_uid):
host = config()["news_site"][news_site_uid]["url"]
logging.info(f"Beginning scraper for {host}")
homepage = news.HomePage(news_site_uid, host)
articles = []
for link in homepage.article_links:
article = _fetch_article(news_site_uid, host, link)
if article:
logger.info("Article fetched!!")
articles.append(article)
print(article.title)
print(len(articles))
def _fetch_article(news_site_uid, host, link):
logger.info(f"Start fetching article at {link}")
article = None
try:
article = news.ArticlePage(news_site_uid, _build_link(host, link))
except (HTTPError, MaxRetryError) as e:
logger.warning("Error while fetching the article", exc_info=False)
if article and not article.body:
logger.warning("Invalid article. There is no body")
return None
return article
def _build_link(host, link):
if is_well_formed_link.match(link):
return link
elif is_root_path.match(link):
return f"{host}{link}"
else:
return f"{host}/{link}"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
news_site_choices = list(config()['news_site'].keys()) # list of keys
parser.add_argument("news_site",
help="The news site you want tow scrape",
type=str,
choices=news_site_choices)
args = parser.parse_args()
_news_scraper(args.news_site)
| [
"torresc.rafael@gmail.com"
] | torresc.rafael@gmail.com |
e3bed94cd6c673192d0065f770c84ddcc55c6d0f | 1287ad54942fd2020a217ab12004a541abb62558 | /pythonexercicios/Ex069.py | 3e115b129e95eaefc77d1cfc0493628117d9168a | [] | no_license | LuPessoa/exerciciospy- | 637f24581722e547a62380973ca645b55ff65d90 | b5faad818f978bb13a65922edceb17888b73a407 | refs/heads/master | 2023-05-12T04:16:39.847184 | 2021-06-04T03:02:24 | 2021-06-04T03:02:24 | 374,410,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | tot18 = toth = totm20 = 0
while True:
idade = int(input('Idade: '))
sexo = ' '
while sexo not in 'MF':
sexo = str(input('Sexo:[M/F]')).strip().upper()[0]
if idade >= 18:
tot18 += 1
if sexo == 'M':
toth += 1
if sexo == 'F'and idade < 20:
totm20 += 1
resp = ' '
while resp not in 'SN':
resp = str(input('Quer continuar? [S/N] ')).strip().upper()[0]
if resp == 'N':
break
print(f'Total de pessoas com mais de 18 anos : {tot18}')
print(f'Ao todo temos {toth} homens cadastrados')
print(f'E temos {totm20} mulheres com menos de 20 anos')
| [
"lulenemacedo29@gmail.com"
] | lulenemacedo29@gmail.com |
86e3018dfa70529dc368c2fd8577eea8b8c9b37b | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/Reconstruction/RecExample/RecExCommission/share/RecExCommissionCommonFlags_jobOptions.py | 28d3249054bcee974759a24c6adda8d0d21b089f | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,532 | py | # ---------------------------------------------------------------------------
# RecExCommon flags
# ---------------------------------------------------------------------------
include.block("RecExCommission/RecExCommissionCommonFlags_jobOptions.py")
# ---------------------------------------------------------------------------
# AthenaCommonFlags
# ---------------------------------------------------------------------------
# start using the new job properties
from AthenaCommon.JobProperties import jobproperties
# AthenaCommon flags
from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
# -----------------------------------------------------------------------
# being general reconstruction flags
# -----------------------------------------------------------------------
from RecExConfig.RecFlags import jobproperties, rec
#Number of events to process or generate
athenaCommonFlags.EvtMax=10
# Number of events to skip when reading an input POOL file.
athenaCommonFlags.SkipEvents = 0
# The list of input POOL files containing collections of Raw data objects
# ['myfile.pool.root'] : file in run directory
# ['LFN:mylogicalfile.root']: logical file name as specified in PoolFileCatalog.cml
# ['rfio:/castor/cern.ch/somepath/somefile.pool.root']:file on castor (at CERN)
athenaCommonFlags.PoolRDOInput = ["/afs/cern.ch/atlas/maxidisk/d17/data/TileTest/dig.05AprProd-10000000.root"]
# The list of input ByteStream files containing collections of Raw data objects
#athenaCommonFlags.BSRDOInput = ["/castor/cern.ch/grid/atlas/t0/perm/DAQ/daq.m4_combined.0019783.Default.L1TT-b00000001.LB0001.SFO-1._0001.data"]
athenaCommonFlags.BSRDOInput = ["/castor/cern.ch/grid/atlas/t0/perm/DAQ/daq.m4_combined.0020720.debug.L1TT-b00000001.LB0000.SFO-1._0001.data"]
#athenaCommonFlags.BSRDOInput = ["/castor/cern.ch/grid/atlas/DAQ/M6/daq.NoTag.0043719.physics.HLT_Cosmic_AllTeIDSelected.LB0000.SFO-1._0001.data"]
# The list of input POOL files containing collections of ESD objects
athenaCommonFlags.PoolESDInput = ["castor:/castor/cern.ch/grid/atlas/t0/perm/M4reproc/0020720/FESD/M4.0020720.physics.L1TT-b00000010.FESD.v130026.part0001._lumi0002._0001.1"]
# The name of the output POOL file containing collections of ESD objects
athenaCommonFlags.PoolESDOutput = "ESD.root"
# The list of input POOL files containing collections of TAGs
#athenaCommonFlags.PoolTAGInput = "TAG.root"
# The name of the output POOL file containing collections of TAGs
athenaCommonFlags.PoolTAGOutput = "TAG.root"
rec.PoolTAGCOMOutput="TAGCOM.root"
athenaCommonFlags.PoolInputQuery = "TRT_Cosmic_Tracks"
#AllowIgnoreExistingDataObject
#AllowIgnoreConfigError
#athenaCommonFlags.AthenaCommonFlags
# -----------------------------------------------------------------------
# GlobalFlags
# -----------------------------------------------------------------------
# GlobalFlags
from AthenaCommon.GlobalFlags import globalflags
# Which detector configuration : atlas, combined test beam or commisisoning
# Commissioning: 'commis'
globalflags.DetGeo = 'commis'
# Detector geometry DB tag
globalflags.DetDescrVersion="ATLAS-CommNF-04-00-00"
# data source: Where does the data comes from : real data, geant3 or geant4
# real data: data
# simulation: 'geant4'
globalflags.DataSource = 'data'
# Input format:
# to read from BS: 'bytestream'
# to read from ESD, RDO Pool files = 'pool'
globalflags.InputFormat = 'bytestream'
# ---------------------------------------------------------------------------
# Beam flags to define the
# ---------------------------------------------------------------------------
# Type of data to reconstruct: 'singlebeam','cosmics'
from AthenaCommon.BeamFlags import jobproperties
#jobproperties.Beam.beamType.set_Value_and_Lock("cosmics")
# ---------------------------------------------------------------------------
# BField flags to define the
# ---------------------------------------------------------------------------
# Field configuration: solenoidOn() barrelToroidOn() endcapToroidOn()
from AthenaCommon.BFieldFlags import jobproperties
jobproperties.BField.solenoidOn=False
jobproperties.BField.barrelToroidOn=False
jobproperties.BField.endcapToroidOn=False
# -----------------------------------------------------------------------
# flags to drive the general behaviour of Reconstruction configuration
# -----------------------------------------------------------------------
#from RecExConfig.RecConfFlags import recConfFlags
#RecConfFlags.AllowBackNavigation
#RecConfFlags.AllowDisable
#RecConfFlags.AllowIgnoreConfigError
#RecConfFlags.AllowIgnoreExistingDataObject
#RecConfFlags.RecConfFlags
rec.CBNTAthenaAware = True
rec.doAOD = False
#rec.doAODall
#rec.doAODCaloCells
rec.doCBNT = True
#rec.doCheckDictionary
#rec.doCheckJOT
#rec.doDetailedAuditor
#rec.doDumpMC
#rec.doDumpPoolInputContent
#rec.doDumpProperties
#rec.doDumpTDS
#rec.doDumpTES
#rec.doEdmMonitor
#rec.doESD = True
# rec.doFileMetaData TODO might replace doDetStatus???
rec.doDetStatus = True
#rec.doFloatingPointException
#rec.doHeavyIon
rec.doHist = True
rec.doJiveXML = False
#rec.doLowPt
#rec.doMinimalRec
#rec.doNameAuditor
#rec.doPerfMon = False
rec.doPersint = False
#rec.doRestrictedESD
#rec.doSGAuditor
#rec.doShowSizeStatistics
#rec.doTimeLimit
#rec.doTruth
rec.doWriteAOD = False
#rec.doWriteBS
# If True writes out ESD file
rec.doWriteESD = True
#rec.doWriteRDO
# If True writes out TAG file
rec.doWriteTAG = True
#rec.noESDTrigger
#rec.oldFlagCompatibility
#rec.oldFlagLandMine
#rec.oldFlagTopSteering
# General msg output level ALL,VERBOSE,DEBUG,INFO,WARNING,ERROR,FATAL
rec.OutputLevel = INFO
#rec.readAOD
#If True runs on ESD file
rec.readESD = False
#rec.readRDO
#rec.readTAG
#rec.Rec
#rec.RecAlgs
rec.RootHistoOutput = "monitoring.root"
rec.RootNtupleOutput = "ntuple.root"
#rec.TAGFromRDO
#rec.UserAlgs
rec.doTile = True
rec.doLArg = True
rec.doInDet = True
rec.doMuon = True
## Switch on/off Calibration Ntuple
#from MuonRecExample.MuonRecFlags import muonRecFlags
#from MuonCalibAlgs.MuonCalibFlags import muonCalibFlags
#muonRecFlags.doCalib = True
#muonCalibFlags.Mode = 'trackNtuple'
#muonCalibFlags.EventTag = 'Moore'
### Switch on/off Combined Algorithms
from MuonCombinedRecExample.MuonCombinedRecFlags import muonCombinedRecFlags,muidFlags
muonCombinedRecFlags.doMuGirl = False
muonCombinedRecFlags.doCaloTrkMuId = True
muonCombinedRecFlags.doStaco = False
muonCombinedRecFlags.doMuTag = False
muonCombinedRecFlags.doAODMuons = True # switch off AOD making
muonCombinedRecFlags.doMergeMuons = True # switch off merging for ESD
muidFlags.SegmentTagger = 'MuTagIMO' # switch off by ''
#muidFlags.Extrapolated = 'MuidStandalone' # switch off by ''
muidFlags.Extrapolated = '' # switch off by ''
#muidFlags.Combined = 'MuidCombined' # NOW not run # switch off by ''
muidFlags.Combined = 'CombinedMuonFit' # switch off by ''
# ----------------------------------------------------------------------
# being flags to switch algorithm on/off
# ----------------------------------------------------------------------
from RecExConfig.RecAlgsFlags import recAlgs
#recAlgs.doTrigger
#
# hack...
#
from RecExConfig.RecFlags import rec
if not 'InDetKeys' in dir():
#
# --- setup StoreGate keys (JobProperties!)
#
print "InDetRec_jobOptions: InDetKeys not set before - I import them now"
from InDetRecExample.InDetKeys import InDetKeys
# InDetKeys.lock_JobProperties()
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
912f205c750470d91a847ee210f182d2c0cb001a | 40ac1c3f3dc024e2cdb5e7939abf408cde1b59ee | /webscraper/application/airquality/app_air_quality_scraper.py | 71cc8dafa1476eb9dbe385cb727aab56a7da302c | [] | no_license | plutoese/webscraper | 5319fbdcd2baf7392b2f9fb623eddef8f9c0bbcf | f360a10e0e6da2c250a2c7e5c64ceb74e6919ac6 | refs/heads/master | 2020-04-15T12:43:23.740908 | 2017-12-24T14:22:07 | 2017-12-24T14:22:07 | 61,554,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,194 | py | # coding=UTF-8
# --------------------------------------------------------------
# application_air_quality文件
# @introduction: 抓取空气质量的数据
# @source:天气后报,http://www.tianqihoubao.com/aqi/
# @dependency: requests,bs4及re包
# @author: plutoese
# @date: 2016.06.26
# --------------------------------------------------------------
import sys
from libs.class_mongodb import MongoDB
from libs.class_proxymanager import ProxyManager
from libs.class_staticsitescraper import StaticSiteScraper
# 1. 初始化参数
# 1.1 设置代理服务器
pmanager = ProxyManager()
ramdomproxy = pmanager.random_proxy
# 设置递归深度
sys.setrecursionlimit(1000000)
# 1.2 设置网页爬虫
db = MongoDB()
db.connect('cache','scraper')
pages = [item['webaddress'] for item in db.collection.find({'label':'airquality'},projection={'_id':0,'webaddress':1})]
site_scraper = StaticSiteScraper('http://www.tianqihoubao.com/aqi/',
label='airquality',
proxy=ramdomproxy,
pages=set(pages))
# 2. 开始爬虫
site_scraper.get_links(page_url='',condition='/aqi/[a-zA-Z]+',cache=True) | [
"glen.zhang7@gmail.com"
] | glen.zhang7@gmail.com |
8de8074878d9a6ef66cf4b622dd96bda62582f72 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_ConstantTrend_NoCycle_MLP.py | 8dc04c8230edaa12adfba6c5a152dc044fc81b45 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 161 | py | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Integration'] , ['ConstantTrend'] , ['NoCycle'] , ['MLP'] ); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
10ffed599ac0d1038847cb9bd88e3bb7cfa7baa5 | 245381ad175dcc03ee0710964340eed4daa2ef85 | /shagroup/asgi.py | 68a2f8abe49c2e7dc7b2c96529a21083cda0f947 | [] | no_license | musabansari-1/Shagroup-erp-backend | 2c1f56f7ce5763dae668d160cdcc1a26dbc2e8d7 | 87845f11faae50301d5bb73ffa0c3ee0bed38256 | refs/heads/main | 2023-04-13T02:25:36.808755 | 2021-04-15T16:28:19 | 2021-04-15T16:28:19 | 358,324,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
ASGI config for shagroup project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shagroup.settings')
application = get_asgi_application()
| [
"musabzahida@gmail.com"
] | musabzahida@gmail.com |
a5e95a78db593a7a838f3c05604b0d18e3f5e2b0 | 6af6c7158609f889fa1f53c99b63c435113d496e | /RLTutorial/modelFreeValue.py | c57ba1bad60aeb8faff7827ff764ece01fdbbfe2 | [
"MIT"
] | permissive | fyabc/MSRAPaperProject | 170752a5b8bfdecbab876841762d8fd2f9732f08 | 2d7974acfe8065523d0c56da695807e94acd0b34 | refs/heads/master | 2020-04-06T08:03:08.211020 | 2016-09-07T10:28:11 | 2016-09-07T10:28:11 | 64,015,907 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,089 | py | #! /usr/bin/python3
# -*- encoding: utf-8 -*-
from __future__ import print_function, unicode_literals
from MDP import MDP
import version23
__author__ = 'fyabc'
def MonteCarlo(mdp, stateSamples, actionSamples, rewardSamples):
vFunc = {
state: 0.0
for state in mdp.states
}
nFunc = {
state: 0
for state in mdp.states
}
for i in range(len(stateSamples)):
# g: total rewards
g = 0.0
for step in range(len(stateSamples[i]) - 1, -1, -1):
g *= mdp.gamma
g += rewardSamples[i][step]
# Using every MC method
for step in range(len(stateSamples[i])):
state = stateSamples[i][step]
vFunc[state] += g
nFunc[state] += 1
g -= rewardSamples[i][step]
g /= mdp.gamma
for state in mdp.states:
if nFunc[state] > 0:
vFunc[state] /= nFunc[state]
return vFunc
def temporalDifference(mdp, alpha, stateSamples, actionSamples, rewardSamples):
# TD(0)
# TD update: v(s) = v(s) + \alpha * (r + \gamma * v(s') - v(s))
vFunc = {
state: 0.0
for state in mdp.states
}
for i in range(len(stateSamples)):
for step in range(len(stateSamples[i])):
state = stateSamples[i][step]
reward = rewardSamples[i][step]
if step < len(stateSamples[i]) - 1:
nextState = stateSamples[i][step + 1]
nextV = vFunc[nextState]
else:
nextV = 0.0
vFunc[state] += alpha * (reward + mdp.gamma * nextV - vFunc[state])
return vFunc
def test():
mdp = MDP(0.5)
vFunc = MonteCarlo(mdp, *mdp.randomWalkSamples(100))
print('Monte Carlo:')
for i in range(1, 6):
print('%d: %f\t' % (i, vFunc[i]), end='')
print()
vFunc = temporalDifference(mdp, 0.15, *mdp.randomWalkSamples(100))
print('Temporal Difference:')
for i in range(1, 6):
print('%d: %f\t' % (i, vFunc[i]), end='')
print()
if __name__ == '__main__':
test()
| [
"fyabc@mail.ustc.edu.cn"
] | fyabc@mail.ustc.edu.cn |
7c1155cf96173f5b9655062ea7bac26a067474ff | a192078ebd74d54db64d02d815d805b4e8d9a0c6 | /GeneratePAMLChemopartitions.py | 4396e3399c57c88109daaa45fc7437cb7a69175c | [] | no_license | rjovelin/CRM_POPVAR | ae1fe4b55345dd41b6c8ed4566a4ebce8b1ac437 | 9fe6d475f834b1bf9cfad26248d16dadb9c4f2ae | refs/heads/master | 2021-01-21T14:23:53.028058 | 2016-07-04T18:08:33 | 2016-07-04T18:08:33 | 57,171,739 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,729 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 30 18:38:26 2016
@author: Richard
"""
# use this script to generate PAML alignment files for chemoreceptor gene membrane partitions
from manipulate_sequences import *
from chemoreceptors import *
import os
# set number of minimum codons of each partition
MinimumCodons = 5
# get the set of chemoreceptors from the iprscan outputfile
chemo = get_chemoreceptors('../Genome_Files/PX356_protein_seq.tsv')
print('got chemo genes')
# create a set of valid transcripts
transcripts = get_valid_transcripts('../Genome_Files/unique_transcripts.txt')
print('got valid transcripts')
# create a set of valid chemoreceptors
GPCRs = set(gene for gene in chemo if gene in transcripts)
print('got valid GPCR genes')
# create a dict with the remanei CDS
CDS = convert_fasta('../Genome_Files/noamb_PX356_all_CDS.fasta')
print('got CDS sequences')
# create directories to store the aligned partitions
os.mkdir('Partitions')
os.mkdir('./Partitions/Membrane/')
os.mkdir('./Partitions/Extra_membrane/')
os.mkdir('./Partitions/Inside/')
os.mkdir('./Partitions/Outside/')
print('created directories')
# make a list of files in alignment directory
ali_files = os.listdir('../CREM_CLA_protein_divergence/pairs/Aligned_pairs/')
print('made a list of files')
# make a list of alignment files
alignments = [filename for filename in ali_files if filename[-8:] == '_aln.tfa']
print('made a list of aligned sequence pairs')
# loop over genes in GPCRs
for gene in GPCRs:
# loop over alignment
for filename in alignments:
# check that gene in filename
if gene == filename[:filename.index('_CLA')]:
# get the aligned codons
codons = get_aligned_codons(filename, '../CREM_CLA_protein_divergence/pairs/Aligned_pairs/')
# get the dict of probabilities
probabilities = parse_phobius_output(gene + '_proba.txt', './Chemo_genes/')
# get a list of codon index
codon_index = [i for i in codons]
# sort list
codon_index.sort()
# get the list of amino acid index
aa_index = [i for i in probabilities]
# sort list
aa_index.sort()
# check that the list of index are the same
if aa_index != codon_index:
print(gene, codon_index, aa_index)
raise ValueError('Codon and AA index lists are different')
# create sequences to store the different partitions
crm_TM, crm_intra, crm_extra, crm_not_TM, cla_TM, cla_intra, cla_extra, cla_not_TM = '', '', '', '', '', '', '', ''
# loop over the aa_indices
for i in aa_index:
# check that sequences in each dict is the same
if probabilities[i][0] != cds_translate(codons[i][0]):
raise ValueError('Protein sequences in ortholog and probability dicts are different')
# check probabilities and build sequences
if probabilities[i][1] >= 0.95:
# build intra and not membrane sequences
crm_intra += codons[i][0]
cla_intra += codons[i][1]
crm_not_TM += codons[i][0]
cla_not_TM += codons[i][1]
elif probabilities[i][2] >= 0.95:
# build outside and not membrane sequences
crm_extra += codons[i][0]
cla_extra += codons[i][1]
crm_not_TM += codons[i][0]
cla_not_TM += codons[i][1]
elif probabilities[i][3] >= 0.95:
# build membrane sequences
crm_TM += codons[i][0]
cla_TM += codons[i][1]
elif probabilities[i][4] >= 0.95:
# build not_membrane sequences
crm_not_TM += codons[i][0]
cla_not_TM += codons[i][1]
# get cla_gene name
cla_gene = filename[filename.index('CLA'):filename.index('_aln')]
# check that remanei sequence is not empty and that latens sequence has minimum codons
if len(crm_TM) != 0 and len(crm_not_TM) != 0 and len(cla_TM.replace('-', '')) >= MinimumCodons and len(cla_not_TM.replace('-', '')) >= MinimumCodons:
# gene has both membrane and extra-membrane residues
# open file for writing
newfile = open('./Partitions/Membrane/' + gene + '_TM.txt', 'w')
# write alignment file in codeml input format
newfile.write('2' + ' ' + str(len(crm_TM)) + '\n')
newfile.write('>' + gene + '\n')
newfile.write(crm_TM + '\n')
newfile.write('>' + cla_gene + '\n')
newfile.write(cla_TM + '\n')
newfile.close()
# open file for writing
newfile = open('./Partitions/Extra_membrane/' + gene + '_ExtraTM.txt', 'w')
# write alignment file in codeml input format
newfile.write('2' + ' ' + str(len(crm_not_TM)) + '\n')
newfile.write('>' + gene + '\n')
newfile.write(crm_not_TM + '\n')
newfile.write('>' + cla_gene + '\n')
newfile.write(cla_not_TM + '\n')
newfile.close()
if len(crm_intra) != 0 and len(cla_intra.replace('-', '')) >= MinimumCodons:
# gene has intra-cellular domain
# open file for writing
newfile = open('./Partitions/Inside/' + gene + '_inside.txt', 'w')
# write alignment file in codeml input format
newfile.write('2' + ' ' + str(len(crm_intra)) + '\n')
newfile.write('>' + gene + '\n')
newfile.write(crm_intra + '\n')
newfile.write('>' + cla_gene + '\n')
newfile.write(cla_intra + '\n')
newfile.close()
if len(crm_extra) != 0 and len(cla_extra.replace('-', '')) >= MinimumCodons:
# gene has extra-cellular domain
# open file for writing
newfile = open('./Partitions/Outside/' + gene + '_outside.txt', 'w')
# write alignment file in codeml input format
newfile.write('2' + ' ' + str(len(crm_extra)) + '\n')
newfile.write('>' + gene + '\n')
newfile.write(crm_extra + '\n')
newfile.write('>' + cla_gene + '\n')
newfile.write(cla_extra + '\n')
newfile.close()
| [
"richard.jovelin@oicr.on.ca"
] | richard.jovelin@oicr.on.ca |
05e0c4be9187c69e133664eedd04e6dc36798554 | d3b77550a40b860970450e702b6bcd28d5f9b3e4 | /Hackerrank/problem_solving/implementation/Break_the_records.py | b150aeec27614733888d1b3cafb916de5e67b008 | [] | no_license | CateGitau/Python_programming | 47bc9277544814ad853b44a88f129713f1a40697 | 6ae42b3190134c4588ad785d62e08b0763cf6b3a | refs/heads/master | 2023-07-08T03:08:46.236063 | 2021-08-12T09:38:03 | 2021-08-12T09:38:03 | 228,712,021 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 28 09:07:41 2020
@author: aims
"""
scores = [17,45,41,60,17,41,76,43,51,40,89,92,34,6,64,7,37,81,32,50]
def breakingRecords(scores):
high = []
high.append(scores[0])
low = []
low.append(scores[0])
count_max = 0
count_min = 0
for i, j in zip(scores[1:], high):
if i < j:
high.append(j)
else:
high.append(i)
count_max += 1
for i, k in zip(scores[1:], low):
if i < k:
low.append(i)
count_min += 1
elif i == k:
low.append(i)
else:
low.append(k)
print(high)
print(low)
return(count_max, count_min)
print(breakingRecords(scores)) | [
"catherinegitau94@gmail.com"
] | catherinegitau94@gmail.com |
7b98411d65809d3254ba52dfd4f00395db75f254 | a5a4cee972e487512275c34f308251e6cc38c2fa | /pypospack/pyposmat/visualization/plot_2d_density_new.py | 06a52f350768e1a703822139dcea4ad5249f251b | [
"MIT"
] | permissive | eragasa/pypospack | 4f54983b33dcd2dce5b602bc243ea8ef22fee86b | 21cdecaf3b05c87acc532d992be2c04d85bfbc22 | refs/heads/master | 2021-06-16T09:24:11.633693 | 2019-12-06T16:54:02 | 2019-12-06T16:54:02 | 99,282,824 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,650 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
from pypospack.pyposmat.visualization import PyposmatAbstractPlot
class Pyposmat2DDensityPlot(PyposmatAbstractPlot):
kde_bandwidth_types = ['silverman','silverman1986','chiu1999']
def __init__(self,config=None,data=None):
PyposmatAbstractPlot.__init__(self,config=config,data=data)
self.x_limits = None
self.y_limits = None
def determine_limits(self,name,ppf_min=0.1,ppf_max=0.9):
assert name in self.configuration.qoi_names \
or name in self.configuration.parameter_names
assert isinstance(ppf_min,float)
assert isinstance(ppf_max,float)
norm_rv = stats.norm(
loc = self.data.df[name].mean(),
scale = self.data.df[name].std()
)
lim_min = norm_rv.ppf(ppf_min)
lim_max = norm_rv.ppf(ppf_max)
return lim_min,lim_max
def determine_x_limits(self,x_name=None,x_limits=None,ppf_min=0.1,ppf_max=0.9):
assert x_name is None or isinstance(x_name,str)
assert x_limits is None or isinstance(x_name,list)
assert isinstance(ppf_min,float)
assert isinstance(ppf_max,float)
if x_name is None:
x_name = self.x_name
if x_limits is None:
x_lim_min,x_lim_max = self.determine_limits(x_name)
self.x_limits = (x_lim_min,x_lim_max)
else:
self.x_limits = x_limits
return self.x_limits
def determine_y_limits(self,y_name=None,y_limits=None,ppf_min=0.1,ppf_max=0.9):
assert y_name is None or isinstance(y_name,str)
assert y_limits is None or isinstance(y_name,list)
assert isinstance(ppf_min,float)
assert isinstance(ppf_max,float)
if y_name is None:
y_name = self.y_name
if y_limits is None:
y_lim_min,y_lim_max = self.determine_limits(y_name)
self.y_limits = (y_lim_min,y_lim_max)
else:
self.y_limits = y_limits
return self.y_limits
def plot(self,
x_name,y_name,
with_kde_plot=True,
with_data_plot=True,
x_limits=None,y_limits=None,h=None):
assert x_name in self.configuration.qoi_names \
or x_name in self.configuration.parameter_names
assert y_name in self.configuration.qoi_names \
or y_name in self.configuration.parameter_names
assert x_limits is None \
or isinstance(x_limits,list)
assert y_limits is None \
or isinstance(y_limits,list)
assert h is None \
or h in kde_bandwidth_types
self.x_name = x_name
self.y_name = y_name
self.determine_x_limits()
self.determine_y_limits()
x = self.data.df[x_name].values
y = self.data.df[y_name].values
if self.fig is None or self.ax is None:
self.create_subplots()
if with_kde_plot:
self.plot_kde(x,y,h)
if with_data_plot:
self.plot_data_points(x,y)
self.ax.set_xlim(self.x_limits[0],self.x_limits[1])
self.ax.set_ylim(self.y_limits[0],self.y_limits[1])
xy_grid = np.vstack([x,y])
kde = self.make_kde(x,y,h=h)
def plot_kde(self,x,y,h=None,XY_cmap_name='Blues'):
# build the grid
xmin = self.x_limits[0]
xmax = self.x_limits[1]
ymin = self.y_limits[0]
ymax = self.y_limits[1]
X_density=200j
Y_density=200j
X,Y = np.mgrid[xmin:xmax:X_density,ymin:ymax:Y_density]
XY_grid = np.vstack([X.ravel(),Y.ravel()])
# evaluate density on the grid
kde = self.make_kde(x,y,h)
Z = np.reshape(kde(XY_grid),X.shape)
aspectratio=(xmax-xmin)/(ymax-ymin)
self.ax.imshow(
np.rot90(Z),
cmap=plt.get_cmap(XY_cmap_name),
extent=[xmin,xmax,ymin,ymax],
aspect=aspectratio)
def plot_data_points(self,x,y,size=1):
self.ax.scatter(x,y,s=1)
def make_kde(self,x,y,h=None):
assert h is None or h in kde_bandwidth_types
values=np.vstack([x,y])
if h is None:
kde = stats.gaussian_kde(values)
elif h in ['silverman','silverman1986']:
kde = stats.gaussian_kde(values,'silverman')
elif h is 'chiu1999':
h = Chiu1999_h(values)
kde = stats.gaussian_kde(value,h)
else:
raise ValueError(h)
return kde
| [
"eragasa@ufl.edu"
] | eragasa@ufl.edu |
9b6a8004b396ffea385ddfe4ebf79d7c40d5a1d2 | 1d8a4659d4a13cd8b0244918484990bb000687ea | /OpenPROD/openprod-addons/tracker/__openerp__.py | 4ea0f630e42a6909f66a9cb9ad244b53b60136b7 | [] | no_license | kazacube-mziouadi/ceci | d8218ede129186c26eb36f251ef42f07c7a74883 | eb394e1f79ba1995da2dcd81adfdd511c22caff9 | refs/heads/master | 2020-03-23T00:22:51.501409 | 2018-07-13T14:58:32 | 2018-07-13T14:58:32 | 140,859,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | # -*- coding: utf-8 -*-
{
'name': 'Issue tracker',
'version': '1.1',
'category': 'Hidden/Dependency',
'license': 'LGPL',
'description': """""",
'author': 'Objectif-PI',
'website': '',
'depends': ['base_openprod'],
'data': [
'tracker_view.xml',
'data/sequence.xml',
'wizard/wizard_create_timetracking_view.xml',
'security/security.xml',
'security/ir.model.access.csv',
],
'demo': [],
'installable': True,
}
| [
"mziouadi@kazacube.com"
] | mziouadi@kazacube.com |
6a8b9e4ede272b664d0f795040983a39b0abec0f | d7016f69993570a1c55974582cda899ff70907ec | /sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2022_06_01/aio/_configuration.py | 432effa58c39c7f1021a6c274d2906adcb47ac1b | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 3,545 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class MonitorManagementClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for MonitorManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2022-06-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any) -> None:
super(MonitorManagementClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop("api_version", "2022-06-01") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "mgmt-monitor/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(
self.credential, *self.credential_scopes, **kwargs
)
| [
"noreply@github.com"
] | kurtzeborn.noreply@github.com |
c26f65a8b4765d23a71a577bf9fdc2a895991866 | f411c70d2951f1744b7c5c47433593c5a1288f4d | /old_version_mysite (wegen bilder)-1a566232fd31fbf4038539de112513e859373364/blog/migrations/0009_auto_20200107_2049.py | f5e7596ffe53a3763b471b0fadf11444bb79f228 | [] | no_license | nevergofullretard/technikmax-website | b239c6cd378c196ab97c6141dd345434db795888 | 57c0ab44fc73bccc097df5d7003aaa38125e5413 | refs/heads/master | 2023-01-05T04:49:02.600988 | 2020-11-06T22:26:53 | 2020-11-06T22:26:53 | 249,065,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | # Generated by Django 2.1.2 on 2020-01-07 19:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0008_auto_20200107_2040'),
]
operations = [
migrations.AlterField(
model_name='project',
name='description',
field=models.CharField(max_length=1000),
),
migrations.AlterField(
model_name='project',
name='github',
field=models.CharField(blank=True, max_length=200),
),
migrations.AlterField(
model_name='project',
name='title',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='project',
name='title_tag',
field=models.CharField(max_length=100),
),
]
| [
"jagermaxi1@gmail.com"
] | jagermaxi1@gmail.com |
5b70f0ee6f386b9458baaa5b140f0b18bed0f90b | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flashblade/FB_2_6/models/file_info.py | 23b64849799a5104d64bb70df45f389369dbffa6 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 3,301 | py | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.6, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_6 import models
class FileInfo(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'length': 'int'
}
attribute_map = {
'name': 'name',
'length': 'length'
}
required_args = {
}
def __init__(
self,
name=None, # type: str
length=None, # type: int
):
"""
Keyword args:
name (str): Name of the object (e.g., a file system or snapshot).
length (int): Length of the file (in bytes).
"""
if name is not None:
self.name = name
if length is not None:
self.length = length
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `FileInfo`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FileInfo, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FileInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"tlewis@purestorage.com"
] | tlewis@purestorage.com |
cbe0e65929a0840a5b46f3c6711671225d2c19c2 | 58dcd83b6af6e947328519e3e8e43e7e07dce1da | /tasks/ptf.py | ec7bac864be4098bec8c90868c7c6594d2265149 | [
"MIT"
] | permissive | astrocatalogs/kilonovae | ad10ba93b5c9676edb0ccf983d8ff770d4de7808 | 887742fdfc26a291c61056bbb3a420370c377584 | refs/heads/master | 2021-01-21T22:25:39.526071 | 2018-06-29T00:36:58 | 2018-06-29T00:36:58 | 102,157,542 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,887 | py | """Import tasks for the Palomar Transient Factory (PTF).
"""
import os
from astrocats.catalog.utils import is_number, pbar
from bs4 import BeautifulSoup
from ..kilonova import KILONOVA
def do_ptf(catalog):
# response =
# urllib.request.urlopen('http://wiserep.weizmann.ac.il/objects/list')
# bs = BeautifulSoup(response, 'html5lib')
# select = bs.find('select', {'name': 'objid'})
# options = select.findAll('option')
# for option in options:
# print(option.text)
# name = option.text
# if ((name.startswith('PTF') and is_number(name[3:5])) or
# name.startswith('PTFS') or name.startswith('iPTF')):
# name = catalog.add_entry(name)
task_str = catalog.get_current_task_str()
html = catalog.load_url('http://wiserep.weizmann.ac.il/spectra/update',
os.path.join(catalog.get_current_task_repo(),
'PTF/update.html'))
bs = BeautifulSoup(html, 'html5lib')
select = bs.find('select', {'name': 'objid'})
options = select.findAll('option')
for option in pbar(options, task_str):
name = option.text
if (((name.startswith('PTF') and is_number(name[3:5])) or
name.startswith('PTFS') or name.startswith('iPTF'))):
if '(' in name:
alias = name.split('(')[0].strip(' ')
name = name.split('(')[-1].strip(') ').replace('sn', 'SN')
if name == 'SNiauname': # A misentered entry
continue
name, source = catalog.new_entry(
name, bibcode='2012PASP..124..668Y')
catalog.entries[name].add_quantity(KILONOVA.ALIAS, alias,
source)
else:
# name = catalog.add_entry(name)
name, source = catalog.new_entry(
name, bibcode='2012PASP..124..668Y')
with open(
os.path.join(catalog.get_current_task_repo(),
'PTF/old-ptf-events.csv')) as f:
for suffix in pbar(f.read().splitlines(), task_str):
name = catalog.add_entry('PTF' + suffix)
with open(
os.path.join(catalog.get_current_task_repo(),
'PTF/perly-2016.csv')) as f:
for row in pbar(f.read().splitlines(), task_str):
cols = [x.strip() for x in row.split(',')]
alias = ''
if cols[8]:
name = cols[8]
alias = 'PTF' + cols[0]
else:
name = 'PTF' + cols[0]
name = catalog.add_entry(name)
source = catalog.entries[name].add_source(
bibcode='2016ApJ...830...13P')
catalog.entries[name].add_quantity(KILONOVA.ALIAS, name, source)
if alias:
catalog.entries[name].add_quantity(KILONOVA.ALIAS, alias,
source)
catalog.entries[name].add_quantity(KILONOVA.RA, cols[1], source)
catalog.entries[name].add_quantity(KILONOVA.DEC, cols[2], source)
catalog.entries[name].add_quantity(KILONOVA.CLAIMED_TYPE,
'SLSN-' + cols[3], source)
catalog.entries[name].add_quantity(
KILONOVA.REDSHIFT, cols[4], source, kind='spectroscopic')
maxdate = cols[6].replace('-', '/')
upl = maxdate.startswith('<')
catalog.entries[name].add_quantity(
KILONOVA.MAX_DATE,
maxdate.lstrip('<'),
source,
upperlimit=upl)
catalog.entries[name].add_quantity(
KILONOVA.EBV, cols[7], source, kind='spectroscopic')
name = catalog.add_entry('PTF' + suffix)
catalog.journal_entries()
return
| [
"guillochon@gmail.com"
] | guillochon@gmail.com |
443af4141f7802b5d3e978997b9dac8822173592 | 892dd32ee0be7135cd33c875b06dcc66307dcc99 | /automation/MPTS/backup/Accounts.py | 6fe7e7d76954193b0d4e99c8c2a8a0ba288108da | [] | no_license | cloudbytestorage/devops | 6d21ed0afd752bdde8cefa448d4433b435493ffa | b18193b08ba3d6538277ba48253c29d6a96b0b4a | refs/heads/master | 2020-05-29T08:48:34.489204 | 2018-01-03T09:28:53 | 2018-01-03T09:28:53 | 68,889,307 | 4 | 8 | null | 2017-11-30T08:11:39 | 2016-09-22T05:53:44 | Python | UTF-8 | Python | false | false | 1,866 | py | import json
import sys
import time
from cbrequest import sendrequest, filesave, timetrack, queryAsyncJobResult, configFile
config = configFile(sys.argv);
stdurl = 'https://%s/client/api?apikey=%s&response=%s&' %(config['host'], config['apikey'], config['response'])
######## To Add an Account for TSM -- Begins
print "Account Creation Begins"
timetrack("Account Creation Begins")
for x in range(1, int(config['Number_of_Accounts'])+1):
querycommand = 'command=createAccount&name=%s&description=%s' %(config['accountName%d' %(x)], config['accountDescription%d' %(x)])
resp_createAccount=sendrequest(stdurl,querycommand)
filesave("logs/AccountCreation.txt","w",resp_createAccount)
data = json.loads(resp_createAccount.text)
if not 'errorcode' in data['createaccountresponse']:
print "%s is created" %(config['accountName%d' %(x)])
account_id=data["createaccountresponse"]["account2"]["id"]
#creating Account User Authentication
name = "%sAUTH" %(config['accountName%d' %(x)])
user = "%suser" %(config['accountName%d' %(x)])
password = user
time.sleep(2);
querycommand ='command=addCIFSAuthGroup&accountid=%s&name=%s&comment=%s&username=%s&password=%s&fullname=%s' %(account_id, name,"Comment",user,password,"fullname")
resp_tsmcifsauthgroupresponse=sendrequest(stdurl,querycommand)
filesave("logs/AccountUserCreation.txt","w",resp_tsmcifsauthgroupresponse)
data = json.loads(resp_tsmcifsauthgroupresponse.text)
if not "errortext" in data["tsmcifsauthgroupresponse"]:
print "%s created" %(name)
else:
print "Error in creating %s : %s" %(name,data["tsmcifsauthgroupresponse"]["errortext"])
time.sleep(2);
else:
print "Error in creating %s : %s " %(config['accountName%d' %(x)],str(data['createaccountresponse']['errortext']))
| [
"karthik.s@cloudbyte.com"
] | karthik.s@cloudbyte.com |
1a543e20fb95abcd577d9003d76182a50bd8bce2 | f5ab9e3d4119bee183bf8f8bd8fb2f3fea755fc5 | /backend/home/migrations/0002_load_initial_data.py | d4bb919a45ea61c2c8b90e58cf799aa726e7d3a9 | [] | no_license | crowdbotics-apps/nyayo-wallet-18168 | ac0ad9e481a81055ef499b41eb86f88448675bd7 | 6b9c874b3176713c800dc623b3cb366cc504c585 | refs/heads/master | 2022-11-05T23:37:52.818433 | 2020-06-17T19:52:20 | 2020-06-17T19:52:20 | 273,065,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "Nyayo Wallet"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">Nyayo Wallet</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "nyayo-wallet-18168.botics.co"
site_params = {
"name": "Nyayo Wallet",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
2fc4d12557de8db0b15da6c67806ef018129e119 | 6271171cbfab3e2b195b73cb936428adc6a1ca96 | /virtual/bin/pip3 | 8033ed37aafbc34b36d8141d1db9b65fd90cd886 | [] | no_license | UmuhireAnuarithe/Neighborhood | 7592f24d0f11ec77d7f46a92cdb9dced5e66dd11 | 60072d868433e38145b74cbe1cee06e16bf58266 | refs/heads/master | 2020-09-04T11:34:29.819137 | 2019-11-07T19:19:30 | 2019-11-07T19:19:30 | 218,758,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | #!/home/wecode/Desktop/Umuhire/Neighborhood/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"you@example.com"
] | you@example.com | |
a0323ac561a7a50abb306f498d8afa48cd00566e | d7fb8743b6faa4d948b2b08ca0dbdd3b0f11379b | /测试代码/theano/LSTMVRAE-master/VRAE.py | c61d81f248ad9a86b1b74adf9f6bfaf207556f16 | [] | no_license | bancheng/Stock-market | 219e9882858e6d10edad1d13fba67dadbedc27ba | 142ea0eaed0fdccd8e79a51c34d66d1be1c336ed | refs/heads/master | 2021-01-20T15:13:14.667022 | 2017-09-10T06:31:10 | 2017-09-10T06:31:10 | 90,737,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,947 | py | import numpy as np
from chainer import Variable, Chain
from chainer import functions as F
class LSTMVRAE(Chain):
"""
Class: LSTMVRAE
===============
Implements Variational Recurrent Autoencoders, described here: http://arxiv.org/pdf/1412.6581.pdf
This specific architecture uses a single-layer LSTM for both the encoder and the decoder.
"""
def __init__(self, n_input, n_hidden, n_latent, loss_func):
"""
:param n_input: number of input dimensions
:param n_hidden: number of LSTM cells for both generator and decoder
:param n_latent: number of dimensions for latent code (z)
:param loss_func: loss function to compute reconstruction error (e.g. F.mean_squared_error)
"""
self.__dict__.update(locals())
super(LSTMVRAE, self).__init__(
# Encoder (recognition):
recog_x_h=F.Linear(n_input, n_hidden*4),
recog_h_h=F.Linear(n_hidden, n_hidden*4),
recog_mean=F.Linear(n_hidden, n_latent),
recog_log_sigma=F.Linear(n_hidden, n_latent),
# Decoder (generation)
gen_z_h=F.Linear(n_latent, n_hidden*4),
gen_x_h=F.Linear(n_input, n_hidden*4),
gen_h_h=F.Linear(n_hidden, n_hidden*4),
output=F.Linear(n_hidden, n_input)
)
def make_initial_state(self):
"""Returns an initial state of the RNN - all zeros"""
return {
'h_rec':Variable(np.zeros((1, self.n_hidden), dtype=np.float32)),
'c_rec':Variable(np.zeros((1, self.n_hidden), dtype=np.float32)),
'h_gen':Variable(np.zeros((1, self.n_hidden), dtype=np.float32)),
'c_gen':Variable(np.zeros((1, self.n_hidden), dtype=np.float32))
}
def forward(self, x_data, state):
"""
Does encode/decode on x_data.
:param x_data: input data (a single timestep) as a numpy.ndarray
:param state: previous state of RNN
:param nonlinear_q: nonlinearity used in q(z|x) (encoder)
:param nonlinear_p: nonlinearity used in p(x|z) (decoder)
:param output_f: #TODO#
:return: output, recognition loss, KL Divergence, state
"""
#=====[ Step 1: Compute q(z|x) - encoding step, get z ]=====
# Forward encoding
for i in range(x_data.shape[0]):
x = Variable(x_data[i].reshape((1, x_data.shape[1])))
h_in = self.recog_x_h(x) + self.recog_h_h(state['h_rec'])
c_t, h_t = F.lstm(state['c_rec'], h_in)
state.update({'c_rec':c_t, 'h_rec':h_t})
# Compute q_mean and q_log_sigma
q_mean = self.recog_mean( state['h_rec'] )
q_log_sigma = 0.5 * self.recog_log_sigma( state['h_rec'] )
# Compute KL divergence based on q_mean and q_log_sigma
KLD = -0.0005 * F.sum(1 + q_log_sigma - q_mean**2 - F.exp(q_log_sigma))
# Compute as q_mean + noise*exp(q_log_sigma)
eps = Variable(np.random.normal(0, 1, q_log_sigma.data.shape ).astype(np.float32))
z = q_mean + F.exp(q_log_sigma) * eps
#=====[ Step 2: Compute p(x|z) - decoding step ]=====
# Initial step
output = []
h_in = self.gen_z_h(z)
c_t, h_t = F.lstm(state['c_gen'], h_in)
state.update({'c_gen':c_t, 'h_gen':h_t})
rec_loss = Variable(np.zeros((), dtype=np.float32))
for i in range(x_data.shape[0]):
# Get output and loss
x_t = self.output(h_t)
output.append(x_t.data)
rec_loss += self.loss_func(x_t, Variable(x_data[i].reshape((1, x_data.shape[1]))))
# Get next hidden state
h_in = self.gen_x_h(x_t) + self.gen_h_h(state['h_gen'])
c_t, h_t = F.lstm(state['c_gen'], h_in)
state.update({'c_gen':c_t, 'h_gen':h_t})
#=====[ Step 3: Compute KL-Divergence based on all terms ]=====
return output, rec_loss, KLD, state
| [
"tangdongge@buaa.edu.cn"
] | tangdongge@buaa.edu.cn |
28c55dd0c36cd14490b30996420bde8006459891 | 4577d8169613b1620d70e3c2f50b6f36e6c46993 | /students/1809541/homework02/program02.py | 219171cdc7d415d3f2ab468fa1f3b181bae13566 | [] | no_license | Fondamenti18/fondamenti-di-programmazione | cbaf31810a17b5bd2afaa430c4bf85d05b597bf0 | 031ec9761acb1a425fcc4a18b07884b45154516b | refs/heads/master | 2020-03-24T03:25:58.222060 | 2018-08-01T17:52:06 | 2018-08-01T17:52:06 | 142,419,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,022 | py | '''
Un file di compiti contiene informazioni su un insieme di compiti da eseguire.
Esistono due tipologie di compiti:
- compiti che possono essere eseguiti indipendentemente dagli altri.
- compiti da svolgere solo al termine di un compito preliminare.
I compiti del primo tipo sono codificati nel file mediante una linea che contiene
in sequenza le due sottostringhe "comp" ed "N" (senza virgolette) eventualmente inframmezzate,
precedute e/o seguite da spazi. "N" e' l'ID del compito (un numero positivo).
Compiti del secondo tipo sono codificati nel file mediante due linee di codice.
-- la prima linea, contiene in sequenza le due sottostringhe "comp" ed "N"
(senza virgolette) eventualmente inframmezzate,
precedute e/o seguite da spazi. "N" e' l'ID del compito (un numero positivo).
-- la seconda linea (immediatamente successiva nel file) contiene
in sequenza le due sottostringhe "sub" ed "M" (senza virgolette) eventualmente inframmezzate,
precedute e/o seguite da spazi. "M" e' l'ID del compito preliminare.
il seguente file di compiti contiene informazioni su 4 compiti (con identificativi 1,3,7 e 9).
I compiti con identificativi 1 e 9 possono essere svolti indipendentemente dagli altri mentre i compiti
con identificativo 3 e 7 hanno entrambi un compito preliminare.
comp 3
sub 9
comp1
comp 9
comp 7
sub3
Scrivere la funzione pianifica(fcompiti,insi,fout) che prende in input:
- il percorso di un file (fcompiti)
- un insieme di ID di compiti da cercare (insi)
- ed il percorso di un file (fout)
e che salva in formato JSON nel file fout un dizionario (risultato).
Il dizionario (risultato) dovra' contenere come chiavi gli identificativi (ID) dei compiti
presenti in fcompiti e richiesti nell'insieme insi.
Associata ad ogni ID x del dizionario deve esserci una lista contenente gli identificativi (ID) dei compiti
che bisogna eseguire prima di poter eseguire il compito x richiesto
(ovviamente la lista di un ID di un compito che non richie un compito preliminare risultera' vuota ).
Gli (ID) devono comparire nella lista nell'ordine di esecuzione corretto, dal primo fino a quello precedente a quello richiesto
(ovviamente il primo ID di una lista non vuota corripondera' sempre ad un compito che non richiede un compito preliminare).
Si puo' assumere che:
- se l' ID di un compito che richieda un compito preliminare e' presente in fcompiti
allora anche l'ID di quest'ultimo e' presente in fcompiti
- la sequenza da associare al compito ID del dizionario esiste sempre
- non esistono cicli (compiti che richiedono se' stessi anche indirettamente)
Ad esempio per il file di compiti fcompiti contenente:
comp 3
sub 9
comp1
comp 9
comp 7
sub3
al termine dell'esecuzione di pianifica(fcompiti,{'7','1','5'}, 'a.json')
il file 'a.json' deve contenere il seguente dizionario
{'7':['9','3'],'1':[]}
Per altri esempi vedere il file grade02.txt
AVVERTENZE:
non usare caratteri non ASCII, come le lettere accentate;
non usare moduli che non sono nella libreria standard.
NOTA: l'encoding del file e' 'utf-8'
ATTENZIONE: Se un test del grader non termina entro 10 secondi il punteggio di quel test e' zero.
'''
def test(diz,insi):
result={}
lun=len(insi)
for i in range(0,lun):
lis=[]
if insi != set():
c=insi.pop()
if c in diz:
result[c]=lis
while c in diz and diz[c]!='':
lis.append(diz[c])
c=diz[c]
lis.reverse()
return result
def pianifica(fcompiti,insi,fout):
'''Implementare qui la funzione'''
from json import dump
lis=open(fcompiti,'r')
diz={}
for i in lis:
i=i.replace(' ','').replace('\n','')
arg=''
if 'sub' not in i:
key=i.replace('comp','')
diz[key]=arg
else:
diz[key]+=i.replace('sub','')
risultato=test(diz,insi)
js=open(fout,'w',encoding='utf-8')
dump(risultato,js)
js.close()
| [
"a.sterbini@gmail.com"
] | a.sterbini@gmail.com |
bd207e42fac6fcf7bc3bd69478759816700352a8 | 0c35b8b69fe899bf510826c52ab3171443acdaf3 | /sportshack/predictor/migrations/0001_initial.py | d15192eb788edc952ded02a54ed439eeb4969c95 | [
"MIT"
] | permissive | vaastav/SportsHack | c37c8e9b315142091ad9dbe6b50268880421a69d | 6d20d1abcb1d72659607c08e4a9aafc291162c58 | refs/heads/master | 2020-02-26T17:12:24.526099 | 2016-03-12T21:50:54 | 2016-03-12T21:50:54 | 47,007,457 | 0 | 0 | null | 2016-03-12T21:50:55 | 2015-11-28T02:44:14 | null | UTF-8 | Python | false | false | 3,843 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('game_id', models.IntegerField()),
('home_team', models.CharField(max_length=255)),
('away_team', models.CharField(max_length=255)),
('home_score', models.IntegerField()),
('away_score', models.IntegerField()),
('home_qt1', models.IntegerField()),
('home_qt2', models.IntegerField()),
('home_qt3', models.IntegerField()),
('home_qt4', models.IntegerField()),
('away_qt1', models.IntegerField()),
('away_qt2', models.IntegerField()),
('away_qt3', models.IntegerField()),
('away_qt4', models.IntegerField()),
('date', models.DateTimeField(blank=True, default=datetime.datetime(2015, 11, 29, 7, 18, 13, 49051))),
],
),
migrations.CreateModel(
name='Play',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('type_id', models.CharField(max_length=255)),
('success', models.IntegerField()),
],
),
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('team', models.CharField(max_length=255)),
('touchdown', models.IntegerField()),
('points', models.IntegerField()),
('fumbles', models.IntegerField()),
('height', models.DecimalField(decimal_places=7, max_digits=10)),
('weight', models.IntegerField()),
('birthplace', models.CharField(max_length=255)),
('position', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Predictions',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('award_points', models.IntegerField()),
('play', models.ForeignKey(to='predictor.Play')),
],
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('win', models.IntegerField()),
('loss', models.IntegerField()),
('points', models.IntegerField()),
('points_scored', models.IntegerField()),
('points_conceded', models.IntegerField()),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('points', models.IntegerField()),
('num_votes', models.IntegerField()),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True)),
],
),
]
| [
"vaastav.anand05@gmail.com"
] | vaastav.anand05@gmail.com |
7191f92baf553381ff048a6940f844b83fc097a7 | ca064338a35104cc94c60b330fc9b60efee6a089 | /cabunicrisis/comparativegenomics/FastANI/FastANI_output_to_distance_matrix.py | 733ec67fa6482e409befb0df111fba5a6c1b4022 | [] | no_license | compgenomics2020/Team2-WebServer | 76d9c5e9ac01be84da2b19ee8c7cfcb088e24911 | 0e550cc523e93afd4417af688a1e9ada79ae489f | refs/heads/master | 2022-06-23T08:16:11.751133 | 2020-05-02T14:00:43 | 2020-05-02T14:00:43 | 263,474,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,910 | py | #!/usr/bin/env python3
"""
Copyright 2019 by Jianshu Zhao (jansuechao@hotmail.com)
This script uses FastANI output to generate a PHYLIP distance matrix suitable for tree/headmap et.al.
"""
import argparse
import sys
def get_arguments():
parser = argparse.ArgumentParser(description='Distance matrix from pairwise identities')
parser.add_argument('identities', type=str,
help='FastANI output file (or similarly formatted file with three '
'whitespace-delimited columns of assembly 1, assembly 2, percent '
'identity')
parser.add_argument('--max_dist', type=float, required=False, default=1.0,
help='Maximum allowed genomic distance')
args = parser.parse_args()
return args
def main():
args = get_arguments()
clusters = set()
distances = {}
print('', file=sys.stderr)
print('Convert FastANI distances to PHYLIP matrix', file=sys.stderr)
print('------------------------------------------------', file=sys.stderr)
fastani_output_filename = args.identities
with open(fastani_output_filename, 'rt') as fastani_output:
for line in fastani_output:
parts = line.strip().split()
cluster_1 = parts[0]
cluster_2 = parts[1]
ani = float(parts[2])
if cluster_1 == cluster_2:
distance = 1.0
else:
distance = ani / 100.0
clusters.add(cluster_1)
clusters.add(cluster_2)
add_distance(distances, cluster_1, cluster_2, distance)
add_distance(distances, cluster_2, cluster_1, distance)
print('Found {} clusters and {} distances'.format(len(clusters), len(distances)),
file=sys.stderr)
print(len(clusters))
clusters = sorted(clusters)
for i in clusters:
print(i, end='')
for j in clusters:
print('\t', end='')
try:
distance = distances[(i, j)]
except KeyError:
distance = args.max_dist
if distance > args.max_dist:
distance = args.max_dist
print('%.6f' % distance, end='')
print()
print('', file=sys.stderr)
def add_distance(distances, cluster_1, cluster_2, distance):
# If this is the first time we've seen this pair, then we just add it to the dictionary.
if (cluster_1, cluster_2) not in distances:
distances[(cluster_1, cluster_2)] = distance
# If we've seen this pair before (the other way around), then we make sure the distances are
# close (sanity check) and then save the mean distance.
else:
assert abs(distance - distances[(cluster_1, cluster_2)]) < 0.1
distances[(cluster_1, cluster_2)] = (distances[(cluster_1, cluster_2)] + distance) / 2.0
if __name__ == '__main__':
main()
| [
"github-noreply@oit.gatech.edu"
] | github-noreply@oit.gatech.edu |
5090607a5ef39fea84b21da06b8f38aeeea43fb4 | 13d013cd5481ad47d48cf87750647f08a9c630ed | /melons.py | b5545fdc3526f8ce157ea918c5c488daa28dd367 | [] | no_license | marjanasarker/oo-melons | 7878de32f9ead132770c77570ae1410a7fe29ed3 | a75fcce861b1e4ea47896921f2fbc221d875764f | refs/heads/master | 2023-03-21T11:01:13.996561 | 2021-03-12T21:00:08 | 2021-03-12T21:00:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,021 | py | """Classes for melon orders."""
class AbstractMelonOrder():
"""An abstract base class that other Melon Orders inherit from."""
def __init__(self, species, qty):
self.species = species
self.qty = qty
def get_total(self):
"""Calculate price, including tax."""
base_price = 5
if self.species == "ChristmasMelon":
base_price *= 1.5
if self.order_type == "International" and self.qty < 10:
base_price += 3
total = (1 + self.tax) * self.qty * base_price
return total
def mark_shipped(self):
"""Record the fact than an order has been shipped."""
self.shipped = True
class DomesticMelonOrder(AbstractMelonOrder):
"""A melon order within the USA."""
tax = 0.08
order_type = 'domestic'
def __init__(self, species, qty):
super().__init__(species, qty)
"""Initialize melon order attributes."""
self.shipped = False
#self.order_type = "domestic"
#self.tax = 0.08
class InternationalMelonOrder(AbstractMelonOrder):
"""An international (non-US) melon order."""
tax = 0.17
order_type = "international"
def __init__(self, species, qty, country_code):
super().__init__(species, qty)
"""Initialize melon order attributes."""
self.country_code = country_code
self.shipped = False
def get_country_code(self):
"""Return the country code."""
return self.country_code
class GovernmentMelonOrder(AbstractMelonOrder):
"""A U.S Government Tax-Free Melon Order"""
tax = 0
passed_inspection = False
order_type = "Government"
def __init__(self, species, qty):
super().__init__(species, qty)
self.marked_inspection = False
def marked_inspection(self):
"""Indicates if melon passed inspection"""
if self == "passed":
passed_inspection = True
| [
"vagrant@vagrant.vm"
] | vagrant@vagrant.vm |
87b129553779da9bda36a4a63c210538154e7ef6 | 2fa4007849c1ec0f9c009ba536887001217b7f9f | /sgkit_plink/tests/test_pysnptools.py | ac5df4ddbc65a1abad0d9c88df7aa98d4361d3b7 | [
"Apache-2.0"
] | permissive | eric-czech/sgkit-plink | 219195318836dcfbfbb6de9b720ef6f3e183f7be | 4f10c1b0b260fa80fe18c31d28a434c175e558cd | refs/heads/master | 2022-12-08T18:28:09.111583 | 2020-08-04T13:49:56 | 2020-08-04T13:49:56 | 276,471,757 | 0 | 0 | null | 2020-07-01T20:06:50 | 2020-07-01T20:06:50 | null | UTF-8 | Python | false | false | 3,100 | py | import numpy as np
import pytest
import xarray as xr
from sgkit_plink.pysnptools import read_plink
example_dataset_1 = "plink_sim_10s_100v_10pmiss"
@pytest.fixture(params=[dict()])
def ds1(shared_datadir, request):
path = shared_datadir / example_dataset_1
return read_plink(path=path, bim_sep="\t", fam_sep="\t", **request.param)
def test_read_multi_path(shared_datadir, ds1):
path = shared_datadir / example_dataset_1
ds2 = read_plink(
bed_path=path.with_suffix(".bed"),
bim_path=path.with_suffix(".bim"),
fam_path=path.with_suffix(".fam"),
bim_sep="\t",
fam_sep="\t",
)
xr.testing.assert_equal(ds1, ds2)
def test_raise_on_both_path_types():
with pytest.raises(
ValueError,
match="Either `path` or all 3 of `{bed,bim,fam}_path` must be specified but not both",
):
read_plink(path="x", bed_path="x")
def test_read_slicing(ds1):
gt = ds1["call_genotype"]
shape = gt.shape
assert gt[:3].shape == (3,) + shape[1:]
assert gt[:, :3].shape == shape[:1] + (3,) + shape[2:]
assert gt[:3, :5].shape == (3, 5) + shape[2:]
assert gt[:3, :5, :1].shape == (3, 5, 1)
@pytest.mark.parametrize("ds1", [dict(bim_int_contig=True)], indirect=True)
def test_read_int_contig(ds1):
# Test contig parse as int (the value is always "1" in .bed for ds1)
assert np.all(ds1["variant_contig"].values == 1)
assert ds1.attrs["contigs"] == ["1"]
@pytest.mark.parametrize("ds1", [dict(bim_int_contig=False)], indirect=True)
def test_read_str_contig(ds1):
# Test contig indexing as string (the value is always "1" in .bed for ds1)
assert np.all(ds1["variant_contig"].values == 0)
assert ds1.attrs["contigs"] == ["1"]
def test_read_call_values(ds1):
# Validate a few randomly selected individual calls
# (spanning all possible states for a call)
idx = np.array(
[
[50, 7],
[81, 8],
[45, 2],
[36, 8],
[24, 2],
[92, 9],
[26, 2],
[81, 0],
[31, 8],
[4, 9],
]
)
expected = np.array(
[
[1, 0],
[1, 0],
[1, 1],
[1, 1],
[-1, -1],
[0, 0],
[0, 0],
[1, 1],
[0, 0],
[0, 0],
]
)
gt = ds1["call_genotype"].values
actual = gt[tuple(idx.T)]
np.testing.assert_equal(actual, expected)
def test_read_stat_call_rate(ds1):
# Validate call rate for each sample
sample_call_rates = (
(ds1["call_genotype"] >= 0).max(dim="ploidy").mean(dim="variants").values
)
np.testing.assert_equal(
sample_call_rates, [0.95, 0.9, 0.91, 0.87, 0.86, 0.83, 0.86, 0.87, 0.92, 0.92]
)
def test_read_stat_alt_alleles(ds1):
# Validate alt allele sum for each sample
n_alt_alleles = (
ds1["call_genotype"].clip(0, 2).sum(dim="ploidy").sum(dim="variants").values
)
np.testing.assert_equal(n_alt_alleles, [102, 95, 98, 94, 88, 91, 90, 98, 96, 103])
| [
"eric.allen.czech@gmail.com"
] | eric.allen.czech@gmail.com |
7ef1801849d89765bd4444252fe2be738977662f | fe9d6ff3d7a39bb57a6ed7a973c0318d3a7aa189 | /chapters5/exe5_14.py | 30417bf07dc5fd973a3261b80db35c60467c31c2 | [] | no_license | yiguming/python_core_progreamming | 12cfca8b44b187a706c7dd7e1bb73ab3ef1f7552 | 9b7790938f33523c0cd4172b0d508e49bbddf17a | refs/heads/master | 2020-03-29T16:27:55.888601 | 2015-04-19T03:09:39 | 2015-04-19T03:09:39 | 31,845,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | #!/usr/bin/env python
def huibao(basemoney,rate):
totalmoney = basemoney*(1+rate)*365
huibao = totalmoney / float(basemoney)
print "The basemoney %.2f ,after a year it will be change %.2f ,and the huibaolv is %f"%(basemoney,totalmoney,huibao)
if __name__ == "__main__":
huibao(100.00,0.25)
| [
"439309415@qq.com"
] | 439309415@qq.com |
1067841ff7b2255d7deb7766bb7e37d66f3416ec | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/ThirteenTeV/Wprime/Wprime_WZ_WlepZhad_narrow_M3000_13TeV-madgraph_cff.py | 5ea1fd9178d90686f424118336088b009a602725 | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 769 | py | import FWCore.ParameterSet.Config as cms
# link to cards:
# https://github.com/cms-sw/genproductions/tree/91ab3ea30e3c2280e4c31fdd7072a47eb2e5bdaa/bin/MadGraph5_aMCatNLO/cards/production/13TeV/exo_diboson/Spin-1/Wprime_WZ_WlepZhad/Wprime_WZ_WlepZhad_narrow_M3000
externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
args = cms.vstring('/cvmfs/cms.cern.ch/phys_generator/gridpacks/slc6_amd64_gcc481/13TeV/madgraph/V5_2.2.2/exo_diboson/Spin-1/Wprime_WZ_WlepZhad/narrow/v2/Wprime_WZ_WlepZhad_narrow_M3000_tarball.tar.xz'),
nEvents = cms.untracked.uint32(5000),
numberOfParameters = cms.uint32(1),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh')
)
| [
"sheffield@physics.rutgers.edu"
] | sheffield@physics.rutgers.edu |
323a93c9c5d16e668db2cef2445cd7fe25a23394 | 91ac6c1be33810c98cb49a18318603bcab1ff37d | /temp.py | 3d64b4e0a5fcfbe0c0c16eb54fdfc8bd8eb7b5bd | [] | no_license | antonylu/serial | 492a1e61cc65e9c32f7e49ff1cf1a2281b6c8045 | 141986c3c8a911d9560ab133e1e7488627094f71 | refs/heads/master | 2022-11-13T00:07:18.333268 | 2020-06-30T12:53:33 | 2020-06-30T12:53:33 | 275,778,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,181 | py | import serial
import pygame
import json
BG_L_IMG = "bg.jpg"
BG_P_IMG = "bgp.jpg"
OPTION_JSON = "options.json"
class Temp():
def __init__(self):
pygame.init()
pygame.display.set_mode((0, 0), pygame.FULLSCREEN)
self._surface = pygame.display.get_surface()
options = json.load(open(OPTION_JSON))
self.s = serial.Serial(options['COM_PORT'])
self.high_temp = float(options['HIGH_TEMP'])
self._running = True if self.s else False
self.w, self.h = self._surface.get_size()
self.portrait_mode = True if self.w < self.h else False
if self.portrait_mode:
self.bg = pygame.image.load(BG_P_IMG)
self.text_center = (int(self.w/2), 590)
self.font = pygame.font.SysFont('consolas', 360)
else: # landscape mode
self.bg = pygame.image.load(BG_L_IMG)
self.text_center = (int(self.w/2), int(self.h/2)+50)
self.font = pygame.font.SysFont('consolas', 512)
pygame.mouse.set_visible(False)
self.alarm = pygame.mixer.Sound('balarm.wav')
def run(self):
while self._running:
self._handle_events()
self._get_temperature()
self._redraw()
pygame.quit()
def _get_temperature(self) -> None:
self.temp = self.s.readline().decode('ascii').strip()
#print(self.temp)
def _redraw(self) -> None:
tt = float(self.temp)
if tt > self.high_temp:
self.alarm.play()
text = self.font.render(self.temp, 1, (255, 0, 0))
else:
text = self.font.render(self.temp, 1, (255, 255, 255))
self.alarm.stop()
text_rect = text.get_rect(center=self.text_center)
self._surface.blit(self.bg, (0,0))
self._surface.blit(text, text_rect)
#self._surface.fill(pygame.Color(41,36,33))
pygame.display.update()
def _handle_events(self) -> None:
for e in pygame.event.get():
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_ESCAPE:
self._running = False
if __name__ == '__main__':
sn = Temp().run()
| [
"w3back@gmail.com"
] | w3back@gmail.com |
2f69b6682d0e02c84f16d9a1392c5798407925f3 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_desisted.py | dc89e70ec3bbf682ce15215f48ef5b72a515f1d5 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py |
#calss header
class _DESISTED():
def __init__(self,):
self.name = "DESISTED"
self.definitions = desist
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['desist']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
11bec3b555107fc73d0e2a16654cd5dc9d63550a | 7e4460c85790fae2d470182732289bcd1b8777b2 | /Process/process_skins.py | 3a30a41c954eafa1f3fe1ae717d05245e99ec506 | [] | no_license | khamukkamu/swconquest-msys | 5b23654c8dd2e8b2f25bc7914252eedc05a5cc1e | 71337a4ae9c507b9440e84cf49d31fc67a781978 | refs/heads/master | 2021-04-29T19:00:10.389224 | 2019-05-01T15:11:11 | 2019-05-01T15:11:11 | 121,704,753 | 1 | 1 | null | 2018-02-16T01:40:58 | 2018-02-16T01:40:58 | null | UTF-8 | Python | false | false | 3,719 | py | import string
from process_common import *
from module_info import *
from module_skins import *
from module_info import wb_compile_switch as is_wb_skin
import string
from process__swyhelper import *
# WARNING: The following should be the same as the number in face_generator.h
num_voice_types = 2
#####################
def replace_spaces(s0):
return string.replace(s0," ","_")
def write_face_tex(ofile,tex_set):
ofile.write(" %d "%len(tex_set))
for tex in tex_set:
color = tex[1]
hair_mats = tex[2]
hair_colors = []
if len(tex) > 3:
hair_colors = tex[3]
ofile.write(" %s %d %d %d "%(tex[0],color, len(hair_mats), len(hair_colors)))
for hair_mat in hair_mats:
ofile.write(" %s "%(replace_spaces(hair_mat)))
for hair_color in hair_colors:
ofile.write(" %d "%(hair_color))
ofile.write("\n")
def write_textures(ofile,tex_set):
ofile.write(" %d "%len(tex_set))
for tex in tex_set:
ofile.write(" %s "%tex)
ofile.write("\n")
def write_voices(ofile, voices):
ofile.write(" %d "%(len(voices)))
for voice_rec in voices:
ofile.write(" %d %s "%(voice_rec[0],voice_rec[1]))
ofile.write("\n")
def export_skins(skins):
ofile = open(export_dir + "skins.txt","w")
ofile.write("skins_file version 1\n")
ofile.write("%d\n"%len(skins))
if len(skins) > 29:
skins = skins[0:30]
for skin in skins:
if is_wb_skin:
#swy-- convert tuple to list to make it writable
skin = list(skin)
#swy--
for i, mesh in enumerate(skin):
if type(mesh) is str and mesh == "_":
skin[i] = "dummy_mesh"
skin_name = skin[0]
skin_flags = skin[1]
body_name = skin[2]
calf_name = skin[3]
hand_name = skin[4]
head_mesh = skin[5]
face_keys = skin[6]
hair_meshes = skin[7]
beard_meshes = skin[8]
hair_textures = skin[9]
beard_textures = skin[10]
face_textures = skin[11]
voices = skin[12]
skeleton_name = skin[13]
scale = skin[14]
blood_particles_1 = 0
blood_particles_2 = 0
constraints = []
if len(skin) > 15:
blood_particles_1 = skin[15]
if len(skin) > 16:
blood_particles_2 = skin[16]
if len(skin) > 17:
constraints = skin[17]
ofile.write("%s %d\n %s %s %s\n"%(skin_name, skin_flags, body_name, calf_name, hand_name))
ofile.write(" %s %d "%(head_mesh,len(face_keys)))
for face_key in face_keys:
ofile.write("skinkey_%s %d %d %s %s %s "%(convert_to_identifier(face_key[4]), face_key[0],face_key[1],swytrailzro(face_key[2]),swytrailzro(face_key[3]),replace_spaces(face_key[4])))
ofile.write("\n%d\n"%len(hair_meshes))
for mesh_name in hair_meshes:
ofile.write(" %s "%mesh_name)
ofile.write("\n %d\n"%len(beard_meshes))
for bmn in beard_meshes:
ofile.write(" %s\n"%bmn)
ofile.write("\n")
write_textures(ofile,hair_textures)
write_textures(ofile,beard_textures)
write_face_tex(ofile,face_textures)
write_voices(ofile, voices)
ofile.write(" %s %s "%(skeleton_name, swytrailzro(scale)))
ofile.write("\n%d %d\n"%(blood_particles_1, blood_particles_2))
ofile.write("%d\n"%(len(constraints)))
for constraint in constraints:
ofile.write("\n%s %d %d "%(swytrailzro(constraint[0]), constraint[1], (len(constraint) - 2)))
for i_pair in xrange(len(constraint)):
if i_pair > 1:
ofile.write(" %s %d"%(swytrailzro(constraint[i_pair][0]), constraint[i_pair][1]))
ofile.write("\n")
ofile.close()
print "Exporting skins..."
export_skins(skins)
| [
"swyterzone@gmail.com"
] | swyterzone@gmail.com |
417b7c78684e01503bc3ad7a901d8c6be1916817 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/224/users/4379/codes/1649_2446.py | ef3d63fb97cf206c3f0439f899c69140cd7791fa | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | num=float(input("senha (6 digitos): "))
n1=num//100000
n2=(num//10000)%10
n3=(num//1000)%10
n4=(num//100)%10
n5=(num//10)%10
n6=num%10
if ((n2+n4+n6)%(n1+n3+n5)==0):
print("acesso liberado")
else:
print("senha invalida")
| [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
cccc38104fcdd3214cf991fab273dde1f1d0454d | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_088/ch35_2020_09_16_23_12_26_483631.py | 9df7bf2f9250ad03afdea874bfc0681b95417cdd | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | continua=True
soma=0
while(continua):
numero=int(input("digite o numero"))
soma+=numero
if(numero==0):
continua =False
print(soma) | [
"you@example.com"
] | you@example.com |
9aa5e52832cf35b3e5921015b4c55e33c4e5b7dd | b44a984ac8cfd183e218d56e1ec5d0d3e72d20fd | /High_Frequency/Dynamic Programming/Course Schedule IV/dfs+memo.py | de84bcc80eaf5dd410518179c7f1e887dc08076f | [] | no_license | atomextranova/leetcode-python | 61381949f2e78805dfdd0fb221f8497b94b7f12b | 5fce59e6b9c4079b49e2cfb2a6d2a61a0d729c56 | refs/heads/master | 2021-07-15T20:32:12.592607 | 2020-09-21T00:10:27 | 2020-09-21T00:10:27 | 207,622,038 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,487 | py | class Solution:
"""
@param n: an integer, denote the number of courses
@param p: a list of prerequisite pairs
@return: return an integer,denote the number of topologicalsort
"""
def topologicalSortNumber(self, n, p):
post_to_pres = []
cur_list = [str(i) for i in range(n)]
memo = {}
for i in range(n):
post_to_pres.append(set())
for (pre, post) in p:
post_to_pres[post].add(pre)
# return self.dfs(post_to_pres, post_to_count, cur_list, memo)
result = self.dfs(post_to_pres, cur_list, memo)
return result
def dfs(self, post_to_pres, cur_list, memo):
if len(cur_list) == 0:
return 1
key = "".join(cur_list)
if key in memo:
return memo[key]
memo[key] = 0
for i in range(len(cur_list)):
cur_course = int(cur_list[i])
if len(post_to_pres[cur_course]) != 0:
continue
next_list = cur_list[:i] + cur_list[i+1:]
remove_list = []
for course in next_list:
course = int(course)
if cur_course in post_to_pres[course]:
post_to_pres[course].remove(cur_course)
remove_list.append(course)
memo[key] += self.dfs(post_to_pres, next_list, memo)
for course in remove_list:
post_to_pres[course].add(cur_course)
return memo[key] | [
"atomextranova@gmail.com"
] | atomextranova@gmail.com |
fd24ce8e6ab3237a43786af7a37372084eb42eb7 | de4aa86038fb75778b4e6d0e7bb07fc78cf28a0e | /__init__.py | ca2ddad9a898b8b1ae13ba3007b43eafaef53bfb | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | radaniba/jcvi | 718d8b87fc5a5e748841a37f5e100cfa2c232347 | 04d457ea7231897d547ea0bd51b011fe7412f171 | refs/heads/master | 2020-12-31T06:22:16.125846 | 2014-09-18T16:58:43 | 2014-09-18T16:58:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | __author__ = ("Haibao Tang", "Vivek Krishnakumar", "Jingping Li", "Maria Kim")
__copyright__ = "Copyright (c) 2010-2014, Haibao Tang"
__email__ = "tanghaibao@gmail.com"
__license__ = "BSD"
__status__ = "Development"
__version__ = "0.4.9"
| [
"tanghaibao@gmail.com"
] | tanghaibao@gmail.com |
b116d249bb6b8d8d11168a3c5583bf091dcdc466 | 4e13248d569f3d2ba30519e45d7479d8764f84a2 | /lib/plot.py | c68d5a227c947ff42198e2c4592f12990837bf71 | [] | no_license | SunnerLi/Cup2 | 84df0b07df5875f20e0480b7032fe982bb8b4a79 | 8c8c8c0864a4f4b02f1496bb8e91970a04d5c6d0 | refs/heads/master | 2021-06-24T10:07:35.597057 | 2017-09-12T03:08:22 | 2017-09-12T03:08:22 | 93,631,639 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,753 | py | from collections import Counter, defaultdict
from config import *
import numpy as np
import time
import cv2
# Mapping object (Auto-generated)
obj_index_2_name = {index: name for name, index in obj_name_2_index.iteritems()}
# Other variable (Auto-generated)
kind = len(obj_name_2_index)
grid_height = None
grid_width = None
# Dilation kernel
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
def binaryEdgeMapToRed(img):
"""
Change the laplacian edge image into red image
Arg: img - The laplacian edge image
Ret: The red edge image
"""
red = np.zeros([np.shape(img)[0], np.shape(img)[1], 3])
red[..., 2] = img
return red
def coverEdge(origin_img, edge_img):
"""
Cover the original image with laplacian edge image
* Notice: Since this function used bitwise operator to split the edge region.
As the result, the region will not work if the pixel is white.
Arg: origin_img - The original image
edge_img - The edge image after doing the laplacian process
Ret: The image with edge covered
"""
res = np.copy(origin_img)
edge_map_inv = cv2.bitwise_not(edge_img)
img_bg = cv2.bitwise_and(res, res, mask=edge_map_inv)
img_fg = cv2.bitwise_and(binaryEdgeMapToRed(edge_img), binaryEdgeMapToRed(edge_img), mask=edge_img)
res = cv2.add(img_bg.astype(np.uint8), img_fg.astype(np.uint8))
return res
def mergeSegmentAndScoringRes(img, result_segment, result_scoring):
"""
Merge the segment and scoring result into the original image
Arg: img - The original image
result_segment - The predict result after conducting the UNet
result_scoring - The predict result after conducting the scoring net
Ret: The image with merge result
"""
# Copy image to prevent revised the original one
res_img = np.copy(img)
# Do the connected component
result_segment = cv2.dilate(result_segment, kernel)
result_segment = result_segment.astype(np.uint8)
num_segment, label_map, component_info_list, centroids = cv2.connectedComponentsWithStats(
result_segment, 4, cv2.CV_32S)
# Generate grid variable and form the vector to the original shape
grid_height = np.shape(img)[0] / grid_height_num
grid_width = np.shape(img)[1] / grid_width_num
scores = np.reshape(result_scoring, [kind, grid_height_num, grid_width_num])
# Plot the ROI binary map
has_response_map = np.zeros([grid_height_num, grid_width_num])
for i in range(grid_height_num):
for j in range(grid_width_num):
for k in range(kind):
if scores[k][i][j] != 0.0:
has_response_map[i][j] = 1
break
# Create bucket
component_bucket = [[None]] * np.max(label_map)
for i in range(len(component_bucket)):
component_bucket[i] = np.zeros(kind)
# ----------------------------------------------------------------------------------
# Collect score
# ----------------------------------------------------------------------------------
class_map = np.argmax(scores, axis=0)
for i in range(grid_height_num):
for j in range(grid_width_num):
if has_response_map[i][j] == 1:
# Determine grid point coordinate tuple
grid_p1 = (j * grid_width, i * grid_height)
grid_p2 = (j * grid_width + grid_width, i * grid_height + grid_height)
# Get the frequent for each component
mapping_componenet_2_freq = Counter()
for k in range(grid_p1[1], grid_p2[1]):
for m in range(grid_p1[0], grid_p2[0]):
if result_segment[k][m] != 0:
if not label_map[k][m] in mapping_componenet_2_freq:
mapping_componenet_2_freq[label_map[k][m]] = 1
else:
mapping_componenet_2_freq[label_map[k][m]] += 1
# Get the most frequent class
freq_class = mapping_componenet_2_freq.most_common(1)
if len(freq_class) != 0:
freq_class = freq_class[0][0] - 1 # !!??
# Add result into bucket
_score = scores[class_map[i][j]][i][j]
component_bucket[freq_class][class_map[i][j]] += _score
# Voting
for i in range(len(component_bucket)):
component_bucket[i] = np.argmax(component_bucket[i], axis=0)
# ----------------------------------------------------------------------------------
# Plot the result of segmentation
# ----------------------------------------------------------------------------------
_, edge_graph = cv2.threshold(result_segment, 127, 255, cv2.THRESH_BINARY)
_, contour, __ = cv2.findContours(edge_graph, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(res_img, contour, -1, (0, 0, 255), 1)
# ----------------------------------------------------------------------------------
# Ploting Bounding box and classification
# (Select first 5th region)
# ----------------------------------------------------------------------------------
for i in range(num_segment - 1):
bbox_p1 = (component_info_list[i + 1][cv2.CC_STAT_LEFT], component_info_list[i + 1][cv2.CC_STAT_TOP])
bbox_p2 = (bbox_p1[0] + component_info_list[i + 1][cv2.CC_STAT_WIDTH],
bbox_p1[1] + component_info_list[i + 1][cv2.CC_STAT_HEIGHT])
text_p = (int(round(0.5 * bbox_p1[0] + 0.5 * bbox_p2[0])), bbox_p1[1])
cent_p = (text_p[0], int(round(0.5 * bbox_p1[1] + 0.5 * bbox_p2[1])))
exam_extra_p1 = (bbox_p1[0], bbox_p2[1])
exam_extra_p2 = (bbox_p2[0], bbox_p1[1])
if has_response_map[exam_extra_p1[1] / grid_height][exam_extra_p1[0] / grid_width] != 0 or \
has_response_map[exam_extra_p2[1] / grid_height][exam_extra_p2[0] / grid_width] != 0 or \
has_response_map[cent_p[1] / grid_height][cent_p[0] / grid_width] != 0 or \
has_response_map[bbox_p1[1] / grid_height][bbox_p1[0] / grid_width] != 0 or \
has_response_map[bbox_p2[1] / grid_height][bbox_p2[0] / grid_width] != 0:
class_index = component_bucket[i]
cv2.rectangle(res_img, bbox_p1, bbox_p2, obj_index_2_response_color_tuple[class_index], thickness=2)
cv2.putText(res_img, obj_index_2_name[class_index], text_p, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))
else:
cv2.rectangle(res_img, bbox_p1, bbox_p2, (0, 0, 50), thickness=2)
return res_img | [
"a6214123@gmail.com"
] | a6214123@gmail.com |
4046b3d62d1ce23666ab43c8a41a59234f840fd4 | 2850d9adba96bc4e73185de5d6adebf363a5c534 | /tce/tcloud/cvm/RebootInstances.py | 6b68f7510fb20afa20eecb430276d6bf55bdddb3 | [
"Apache-2.0"
] | permissive | FatAnker/tencentcloud-sdk-python | d8f757b12ad336e78a06b68a789ecc3c86d1d331 | d6f75a41dc7053cb51f9091f4d41b8cb7a837559 | refs/heads/master | 2020-04-30T22:34:16.740484 | 2019-04-28T11:14:11 | 2019-04-28T11:14:11 | 177,122,691 | 0 | 1 | null | 2019-03-22T10:46:01 | 2019-03-22T10:46:01 | null | UTF-8 | Python | false | false | 2,131 | py | # -*- coding: utf-8 -*-
import os
from tencentcloud.common import credential
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
# 导入对应产品模块的client models。
from tencentcloud.cvm.v20170312 import cvm_client, models
import json
# 导入可选配置类
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.common.profile.http_profile import HttpProfile
import ssl
from tce.tcloud.utils.config import global_config
ssl._create_default_https_context = ssl._create_unverified_context
region = global_config.get('regions')
params = global_config.get(region)
secretId = params['secretId']
secretKey = params['secretKey']
domain = params['domain']
try:
# 实例化一个认证对象,入参需要传入腾讯云账户secretId,secretKey
cred = credential.Credential(secretId, secretKey)
httpProfile = HttpProfile()
httpProfile.endpoint = "cvm."+domain
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
# 实例化要请求产品(以cvm为例)的client对象,clientProfile是可选的。
client = cvm_client.CvmClient(cred, region, clientProfile)
# 实例化一个cvm实例信息查询请求对象,每个接口都会对应一个request对象。
req = models.RebootInstancesRequest()
# 这里还支持以标准json格式的string来赋值请求参数的方式。下面的代码跟上面的参数赋值是等效的。
params = '{"InstanceIds":["ins-i4ekkudx","ins-gwggvy39"]}'
req.from_json_string(params)
# 通过client对象调用DescribeInstances方法发起请求。注意请求方法名与请求对象是对应的。
# 返回的resp是一个DescribeInstancesResponse类的实例,与请求对象对应。
resp = client.RebootInstances(req)
# 输出json格式的字符串回包
print(resp.to_json_string())
# 也可以取出单个值。
# 你可以通过官网接口文档或跳转到response对象的定义处查看返回字段的定义。
# print(resp.TotalCount)
except TencentCloudSDKException as err:
print(err) | [
"1113452717@qq.com"
] | 1113452717@qq.com |
fefac9fb3ef1ecc86facfe365495ab0f28693881 | 87b006149b16a3028385fc58cf781f5a12c94ad9 | /PyFunceble/checker/syntax/second_lvl_domain.py | e01845520774e59a1dd0e96aa7033158eb539543 | [
"Apache-2.0"
] | permissive | spirillen/PyFunceble | 04d03b2678ad46ec81c520a32df5397832414451 | 3c8f62062bffa0e16d465c150a853af8bf2f2205 | refs/heads/master | 2023-05-12T04:32:04.587521 | 2022-11-20T11:19:06 | 2022-11-20T11:19:06 | 237,827,167 | 2 | 0 | Apache-2.0 | 2021-01-27T10:09:59 | 2020-02-02T19:50:47 | Python | UTF-8 | Python | false | false | 4,696 | py | """
The tool to check the availability or syntax of domain, IP or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Provides the second level domain syntax checker.
Author:
Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/#/special-thanks
Contributors:
https://pyfunceble.github.io/#/contributors
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/latest/
Project homepage:
https://pyfunceble.github.io/
License:
::
Copyright 2017, 2018, 2019, 2020, 2022 Nissar Chababy
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Optional
from PyFunceble.checker.syntax.domain_base import DomainSyntaxCheckerBase
from PyFunceble.helpers.regex import RegexHelper
class SecondLvlDomainSyntaxChecker(DomainSyntaxCheckerBase):
"""
Provides an interface to check the syntax of a second domain.
:param str subject:
Optional, The subject to work with.
"""
# pylint: disable=line-too-long
REGEX_VALID_DOMAIN: str = r"^(?=.{0,253}$)(([a-z0-9][a-z0-9-]{0,61}[a-z0-9]|[a-z0-9])\.)+((?=.*[^0-9])([a-z0-9][a-z0-9-]{0,61}[a-z0-9](?:\.)?|[a-z0-9](?:\.)?))$"
REGEX_VALID_RELAXED_DOMAIN: str = r"^(?=.{0,253}$)(([a-z0-9][a-z0-9_-]{0,61}[a-z0-9_-]|[a-z0-9])\.)+((?=.*[^0-9])([a-z0-9][a-z0-9-]{0,61}[a-z0-9](?:\.)?|[a-z0-9](?:\.)?))$"
last_point_index: Optional[int] = None
"""
Saves the index of the last point.
"""
@DomainSyntaxCheckerBase.ensure_subject_is_given
def is_valid(self) -> bool:
"""
Validate the given subject.
.. warning::
A valid domain may also be a valid subdomain.
If you precisely want to check a subdomain please refer to the
right checker (not this one :-) )!
"""
# pylint: disable=too-many-return-statements
extension = self.get_extension()
if not extension or (
extension not in self.iana_dataset
and extension not in self.SPECIAL_USE_DOMAIN_NAMES_EXTENSIONS
):
return False
subject_without_extension = self.idna_subject[: self.last_point_index]
subject_without_suffix, _ = self.get_subject_without_suffix(
self.idna_subject, extension
)
if subject_without_suffix:
if "." in subject_without_suffix:
return False
return RegexHelper(self.REGEX_VALID_DOMAIN).match(
self.idna_subject, return_match=False
) or RegexHelper(self.REGEX_VALID_RELAXED_DOMAIN).match(
self.idna_subject, return_match=False
)
if "." in subject_without_extension:
return False
return RegexHelper(self.REGEX_VALID_DOMAIN).match(
self.idna_subject, return_match=False
)
| [
"contact@funilrys.com"
] | contact@funilrys.com |
9634edfec32f0f9cfa846e90a764fa87057766cf | a20f9643cc79d2ce4fe69176b4439ce5855fdab4 | /backend/songbird_18676/urls.py | db249b68d533e4dd80a645b2e807acbfe92cbca3 | [] | no_license | crowdbotics-apps/songbird-18676 | bde32b1206d3f045403e2ef9e609254e33761994 | 09490a0d9438df7219164cf0c84437a69596c5a6 | refs/heads/master | 2022-11-14T19:46:35.980627 | 2020-07-07T20:58:39 | 2020-07-07T20:58:39 | 277,920,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,914 | py | """songbird_18676 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Songbird"
admin.site.site_title = "Songbird Admin Portal"
admin.site.index_title = "Songbird Admin"
# swagger
schema_view = get_schema_view(
openapi.Info(
title="Songbird API",
default_version="v1",
description="API documentation for Songbird App",
),
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
99ac83b99ddb015a8af7fe01b944afb27dfa0dd2 | 84226827016bf833e843ebce91d856e74963e3ed | /tests/integration/modules/boto_sns.py | 8bff3a65f280a9b4b69437e07f0a875772f21f3b | [
"Apache-2.0"
] | permissive | jbq/pkg-salt | ad31610bf1868ebd5deae8f4b7cd6e69090f84e0 | b6742e03cbbfb82f4ce7db2e21a3ff31b270cdb3 | refs/heads/master | 2021-01-10T08:55:33.946693 | 2015-05-21T13:41:01 | 2015-05-21T13:41:01 | 36,014,487 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,929 | py | # -*- coding: utf-8 -*-
'''
Validate the boto_sns module
'''
from salttesting import skipIf
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
import integration
NO_BOTO_MODULE = True
BOTO_NOT_CONFIGURED = True
try:
import boto
NO_BOTO_MODULE = False
try:
boto.connect_iam()
BOTO_NOT_CONFIGURED = False
except boto.exception.NoAuthHandlerFound:
pass
except ImportError:
pass
@skipIf(
NO_BOTO_MODULE,
'Please install the boto library before running boto integration tests.'
)
@skipIf(
BOTO_NOT_CONFIGURED,
'Please setup boto AWS credentials before running boto integration tests.'
)
class BotoSNSTest(integration.ModuleCase):
def test_exists(self):
ret = self.run_function('boto_sns.exists', ['nonexistent'])
self.assertFalse(ret)
def test_create(self):
ret = self.run_function('boto_sns.create', ['my-test-topic'])
self.assertTrue(ret)
def test_delete(self):
ret = self.run_function('boto_sns.delete', ['my-test-topic'])
self.assertTrue(ret)
def test_get_all_topics(self):
self.run_function('boto_sns.create', ['my-test-topic'])
self.run_function('boto_sns.create', ['my-second-test-topic'])
ret = self.run_function('boto_sns.get_all_topics')
self.assertIn('my-test-topic', ret.keys())
self.assertIn(self._get_arn('my-test-topic'), ret.values())
self.assertIn('my-second-test-topic', ret.keys())
self.assertIn(self._get_arn('my-second-test-topic'), ret.values())
def _get_arn(self, name):
return 'arn:aws:sns:us-east-1:{0}:{1}'.format(self.account_id, name)
@property
def account_id(self):
if not hasattr(self, '_account_id'):
account_id = self.run_function('boto_iam.get_account_id')
setattr(self, '_account_id', account_id)
return self._account_id
| [
"joehealy@gmail.com"
] | joehealy@gmail.com |
2ae43110afb9d1103a7ace2557660dbaa671a972 | 58df224689ab08c99359b1a6077d2fba3728dc61 | /lamda-ocr/merge-files/borb/pdf/canvas/event/begin_text_event.py | edadfc51f9880306c13f85e943a3463f2f3669f3 | [] | no_license | LIT-Midas/LITHackathon | 2b286728c156d79d3f426f6d19b160a2a04690db | 7b990483dd48b91cf3ec3452b78ab67770da71af | refs/heads/main | 2023-08-13T05:22:59.373965 | 2021-08-16T01:09:49 | 2021-08-16T01:09:49 | 395,024,729 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This implementation of Event is triggered when an BT instruction is being processed.
"""
from borb.pdf.canvas.event.event_listener import Event
class BeginTextEvent(Event):
"""
This implementation of Event is triggered when an BT instruction is being processed.
"""
pass
| [
"trevordino@gmail.com"
] | trevordino@gmail.com |
5feb3012f0d839b4153bd825a19ad07f4b45e4f4 | 36b75aac4236e928e22552e8812abd45d32aecf1 | /modules/dbnd/src/dbnd/_core/task_ctrl/task_dag_describe.py | a9c956e79acc37da5af6f130546fa1f05224cdb3 | [
"Apache-2.0"
] | permissive | reloadbrain/dbnd | 7793aa1864f678005de626068b0ac9361d637d65 | ec0076f9a142b20e2f7afd886ed1a18683c553ec | refs/heads/master | 2023-09-01T08:04:09.486666 | 2021-10-14T16:43:00 | 2021-10-14T16:43:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,355 | py | from __future__ import print_function
import logging
from dbnd._core.constants import DescribeFormat
from dbnd._core.errors import DatabandSystemError
from dbnd._core.settings import DescribeConfig
from dbnd._core.task_ctrl.task_ctrl import TaskSubCtrl
from dbnd._core.utils.basics.helpers import indent
from dbnd._vendor.termcolor import colored
from dbnd.tasks import DataSourceTask
logger = logging.getLogger(__name__)
def tasks_trail(tasks):
task_ids = [t.task_id for t in tasks]
return " -> ".join(task_ids)
class DescribeDagCtrl(TaskSubCtrl):
def __init__(self, task, describe_format=DescribeFormat.long, complete_status=None):
super(DescribeDagCtrl, self).__init__(task)
self.describe_format = describe_format
# dummy implementation of complete cache
self._complete_status = complete_status or {}
@property
def config(self):
return self.settings.describe
def tree_view(self, describe_format=None):
"""
Shows an ascii tree representation of the DAG
"""
# TODO: change to treelib implementation
seen = set()
def get_downstream(task, level=0):
task_desc = self._describe_task(task, describe_format=describe_format)
if task in seen:
return [(level, "%s (*)" % task_desc)]
result = [(level, task_desc)]
seen.add(task)
level += 1
count = 0
task_dag = task.ctrl.task_dag
for t in task_dag.upstream:
count += 1
if isinstance(t, DataSourceTask):
continue
if count > 30:
result.append((level, "..(%s tasks).." % len(task_dag.upstream)))
break
result.extend(get_downstream(t, level))
return result
result = get_downstream(self.task)
messages = [indent(msg, "\t" * level) for level, msg in result]
logger.info(
"Tasks Graph - (*) represent existing node in the graph run "
"(green is completed, yellow is going to be submitted):\n%s",
"\n".join(messages),
)
def list_view(self):
logger.info("List View of the DAG:\n")
for t in self.task_dag.subdag_tasks():
logger.info("%s\n" % self._describe_task(t))
def _get_task_complete(self, task):
if task.task_id not in self._complete_status:
try:
complete = task._complete()
except Exception as ex:
logger.warning(
"Failed to get complete status for %s: %s", task.task_id, ex
)
complete = None
self._complete_status[task.task_id] = complete
return self._complete_status[task.task_id]
def _describe_task(self, task, describe_format=None, msg=None, color=None):
describe_format = describe_format or self.describe_format
describe_config = self.config # type: DescribeConfig
msg = msg or ""
if color is None:
color = "white"
if not describe_config.no_checks:
color = "green" if self._get_task_complete(task) else "cyan"
if describe_format == DescribeFormat.short:
return colored(str(task.task_id), color)
if (
describe_format == DescribeFormat.long
or describe_format == DescribeFormat.verbose
):
title = "%s - %s" % (task.task_name, task.task_id)
if task.task_name != task.get_task_family():
title += "(%s)" % task.get_task_family()
if msg:
title += ": %s" % msg
return task.ctrl.visualiser.banner(
title, color=color, verbose=describe_format == DescribeFormat.verbose
)
raise DatabandSystemError("Not supported format mode %s" % self.describe_format)
def describe_dag(self):
# print short tree
self.tree_view(describe_format=DescribeFormat.short)
self.list_view()
def describe(self, as_tree=False):
if as_tree:
from dbnd._core.constants import DescribeFormat
self.tree_view(describe_format=DescribeFormat.short)
else:
self.ctrl.describe_dag.list_view()
| [
"roman.slipchenko@databand.ai"
] | roman.slipchenko@databand.ai |
95565b6038b246017520d2f8a8fd5549545a9860 | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/client/account_helpers/__init__.py | c17ac8288f39b7019429fc00e8831b5f8f10f203 | [] | no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,047 | py | # 2017.05.04 15:20:20 Střední Evropa (letní čas)
# Embedded file name: scripts/client/account_helpers/__init__.py
import datetime
import BigWorld
from constants import ACCOUNT_ATTR
from account_helpers.AccountSettings import AccountSettings, GOLD_FISH_LAST_SHOW_TIME
from shared_utils.account_helpers import BattleResultsCache, ClientClubs
from shared_utils.account_helpers import ClientInvitations
from helpers.time_utils import getCurrentTimestamp
def __checkAccountAttr(attrs, attrID):
return attrs is not None and attrs & attrID != 0
def isPremiumAccount(attrs):
return __checkAccountAttr(attrs, ACCOUNT_ATTR.PREMIUM)
def isMoneyTransfer(attrs):
return __checkAccountAttr(attrs, ACCOUNT_ATTR.TRADING)
def isDemonstrator(attrs):
return __checkAccountAttr(attrs, ACCOUNT_ATTR.ARENA_CHANGE)
def isRoamingEnabled(attrs):
return __checkAccountAttr(attrs, ACCOUNT_ATTR.ROAMING)
def isOutOfWallet(attrs):
return __checkAccountAttr(attrs, ACCOUNT_ATTR.OUT_OF_SESSION_WALLET)
def isClanEnabled(attrs):
return __checkAccountAttr(attrs, ACCOUNT_ATTR.CLAN)
def getPremiumExpiryDelta(expiryTime):
check = datetime.datetime.utcfromtimestamp(expiryTime)
now = datetime.datetime.utcnow()
return check - now
def convertGold(gold):
return gold
def getPlayerID():
return getattr(BigWorld.player(), 'id', 0)
def getAccountDatabaseID():
return getattr(BigWorld.player(), 'databaseID', 0)
def isLongDisconnectedFromCenter():
return getattr(BigWorld.player(), 'isLongDisconnectedFromCenter', False)
def getAccountHelpersConfig(manager):
""" Configures services for package gui.
:param manager: helpers.dependency.DependencyManager.
"""
from account_helpers import settings_core
manager.install(settings_core.getSettingsCoreConfig)
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\account_helpers\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:20:20 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
ca486408ec1dad11aa30a35e2c012cbbef64c774 | aaa762ce46fa0347cdff67464f56678ea932066d | /AppServer/lib/django-1.2/tests/regressiontests/forms/localflavor/id.py | cb346ef7213c8b9f9b3ac7246e6189dda91db151 | [
"Apache-2.0",
"BSD-3-Clause",
"LGPL-2.1-or-later",
"MIT",
"GPL-2.0-or-later",
"MPL-1.1"
] | permissive | obino/appscale | 3c8a9d8b45a6c889f7f44ef307a627c9a79794f8 | be17e5f658d7b42b5aa7eeb7a5ddd4962f3ea82f | refs/heads/master | 2022-10-01T05:23:00.836840 | 2019-10-15T18:19:38 | 2019-10-15T18:19:38 | 16,622,826 | 1 | 0 | Apache-2.0 | 2022-09-23T22:56:17 | 2014-02-07T18:04:12 | Python | UTF-8 | Python | false | false | 6,972 | py | from django.contrib.localflavor.id.forms import (IDPhoneNumberField,
IDPostCodeField, IDNationalIdentityNumberField, IDLicensePlateField,
IDProvinceSelect, IDLicensePlatePrefixSelect)
from utils import LocalFlavorTestCase
class IDLocalFlavorTests(LocalFlavorTestCase):
def test_IDProvinceSelect(self):
f = IDProvinceSelect()
out = u'''<select name="provinces">
<option value="BLI">Bali</option>
<option value="BTN">Banten</option>
<option value="BKL">Bengkulu</option>
<option value="DIY">Yogyakarta</option>
<option value="JKT">Jakarta</option>
<option value="GOR">Gorontalo</option>
<option value="JMB">Jambi</option>
<option value="JBR">Jawa Barat</option>
<option value="JTG">Jawa Tengah</option>
<option value="JTM">Jawa Timur</option>
<option value="KBR">Kalimantan Barat</option>
<option value="KSL">Kalimantan Selatan</option>
<option value="KTG">Kalimantan Tengah</option>
<option value="KTM">Kalimantan Timur</option>
<option value="BBL">Kepulauan Bangka-Belitung</option>
<option value="KRI">Kepulauan Riau</option>
<option value="LPG" selected="selected">Lampung</option>
<option value="MLK">Maluku</option>
<option value="MUT">Maluku Utara</option>
<option value="NAD">Nanggroe Aceh Darussalam</option>
<option value="NTB">Nusa Tenggara Barat</option>
<option value="NTT">Nusa Tenggara Timur</option>
<option value="PPA">Papua</option>
<option value="PPB">Papua Barat</option>
<option value="RIU">Riau</option>
<option value="SLB">Sulawesi Barat</option>
<option value="SLS">Sulawesi Selatan</option>
<option value="SLT">Sulawesi Tengah</option>
<option value="SLR">Sulawesi Tenggara</option>
<option value="SLU">Sulawesi Utara</option>
<option value="SMB">Sumatera Barat</option>
<option value="SMS">Sumatera Selatan</option>
<option value="SMU">Sumatera Utara</option>
</select>'''
self.assertEqual(f.render('provinces', 'LPG'), out)
def test_IDLicensePlatePrefixSelect(self):
f = IDLicensePlatePrefixSelect()
out = u'''<select name="codes">
<option value="A">Banten</option>
<option value="AA">Magelang</option>
<option value="AB">Yogyakarta</option>
<option value="AD">Surakarta - Solo</option>
<option value="AE">Madiun</option>
<option value="AG">Kediri</option>
<option value="B">Jakarta</option>
<option value="BA">Sumatera Barat</option>
<option value="BB">Tapanuli</option>
<option value="BD">Bengkulu</option>
<option value="BE" selected="selected">Lampung</option>
<option value="BG">Sumatera Selatan</option>
<option value="BH">Jambi</option>
<option value="BK">Sumatera Utara</option>
<option value="BL">Nanggroe Aceh Darussalam</option>
<option value="BM">Riau</option>
<option value="BN">Kepulauan Bangka Belitung</option>
<option value="BP">Kepulauan Riau</option>
<option value="CC">Corps Consulate</option>
<option value="CD">Corps Diplomatic</option>
<option value="D">Bandung</option>
<option value="DA">Kalimantan Selatan</option>
<option value="DB">Sulawesi Utara Daratan</option>
<option value="DC">Sulawesi Barat</option>
<option value="DD">Sulawesi Selatan</option>
<option value="DE">Maluku</option>
<option value="DG">Maluku Utara</option>
<option value="DH">NTT - Timor</option>
<option value="DK">Bali</option>
<option value="DL">Sulawesi Utara Kepulauan</option>
<option value="DM">Gorontalo</option>
<option value="DN">Sulawesi Tengah</option>
<option value="DR">NTB - Lombok</option>
<option value="DS">Papua dan Papua Barat</option>
<option value="DT">Sulawesi Tenggara</option>
<option value="E">Cirebon</option>
<option value="EA">NTB - Sumbawa</option>
<option value="EB">NTT - Flores</option>
<option value="ED">NTT - Sumba</option>
<option value="F">Bogor</option>
<option value="G">Pekalongan</option>
<option value="H">Semarang</option>
<option value="K">Pati</option>
<option value="KB">Kalimantan Barat</option>
<option value="KH">Kalimantan Tengah</option>
<option value="KT">Kalimantan Timur</option>
<option value="L">Surabaya</option>
<option value="M">Madura</option>
<option value="N">Malang</option>
<option value="P">Jember</option>
<option value="R">Banyumas</option>
<option value="RI">Federal Government</option>
<option value="S">Bojonegoro</option>
<option value="T">Purwakarta</option>
<option value="W">Sidoarjo</option>
<option value="Z">Garut</option>
</select>'''
self.assertEqual(f.render('codes', 'BE'), out)
def test_IDPhoneNumberField(self):
error_invalid = [u'Enter a valid phone number']
valid = {
'0812-3456789': u'0812-3456789',
'081234567890': u'081234567890',
'021 345 6789': u'021 345 6789',
'0213456789': u'0213456789',
'+62-21-3456789': u'+62-21-3456789',
'(021) 345 6789': u'(021) 345 6789',
}
invalid = {
'0123456789': error_invalid,
'+62-021-3456789': error_invalid,
'+62-021-3456789': error_invalid,
'+62-0812-3456789': error_invalid,
'0812345678901': error_invalid,
'foo': error_invalid,
}
self.assertFieldOutput(IDPhoneNumberField, valid, invalid)
def test_IDPostCodeField(self):
error_invalid = [u'Enter a valid post code']
valid = {
'12340': u'12340',
'25412': u'25412',
' 12340 ': u'12340',
}
invalid = {
'12 3 4 0': error_invalid,
'12345': error_invalid,
'10100': error_invalid,
'123456': error_invalid,
'foo': error_invalid,
}
self.assertFieldOutput(IDPostCodeField, valid, invalid)
def test_IDNationalIdentityNumberField(self):
error_invalid = [u'Enter a valid NIK/KTP number']
valid = {
' 12.3456.010178 3456 ': u'12.3456.010178.3456',
'1234560101783456': u'12.3456.010178.3456',
'12.3456.010101.3456': u'12.3456.010101.3456',
}
invalid = {
'12.3456.310278.3456': error_invalid,
'00.0000.010101.0000': error_invalid,
'1234567890123456': error_invalid,
'foo': error_invalid,
}
self.assertFieldOutput(IDNationalIdentityNumberField, valid, invalid)
def test_IDLicensePlateField(self):
error_invalid = [u'Enter a valid vehicle license plate number']
valid = {
' b 1234 ab ': u'B 1234 AB',
'B 1234 ABC': u'B 1234 ABC',
'A 12': u'A 12',
'DK 12345 12': u'DK 12345 12',
'RI 10': u'RI 10',
'CD 12 12': u'CD 12 12',
}
invalid = {
'CD 10 12': error_invalid,
'CD 1234 12': error_invalid,
'RI 10 AB': error_invalid,
'B 12345 01': error_invalid,
'N 1234 12': error_invalid,
'A 12 XYZ': error_invalid,
'Q 1234 AB': error_invalid,
'foo': error_invalid,
}
self.assertFieldOutput(IDLicensePlateField, valid, invalid)
| [
"root@lucid64.hsd1.ca.comcast.net"
] | root@lucid64.hsd1.ca.comcast.net |
2332fc6eb75e911cda4ac674a691601b23144c56 | cdb7bb6215cc2f362f2e93a040c7d8c5efe97fde | /N/NumberofSubsequencesThatSatisfytheGivenSumCondition.py | 2b3ac4e3f31a679a7d7875712e7e3752b79dbc87 | [] | no_license | bssrdf/pyleet | 8861bbac06dfe0f0f06f6ad1010d99f8def19b27 | 810575368ecffa97677bdb51744d1f716140bbb1 | refs/heads/master | 2023-08-20T05:44:30.130517 | 2023-08-19T21:54:34 | 2023-08-19T21:54:34 | 91,913,009 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,195 | py | '''
-Medium-
You are given an array of integers nums and an integer target.
Return the number of non-empty subsequences of nums such that the sum of the minimum and maximum element on it is less or equal to target. Since the answer may be too large, return it modulo 109 + 7.
Example 1:
Input: nums = [3,5,6,7], target = 9
Output: 4
Explanation: There are 4 subsequences that satisfy the condition.
[3] -> Min value + max value <= target (3 + 3 <= 9)
[3,5] -> (3 + 5 <= 9)
[3,5,6] -> (3 + 6 <= 9)
[3,6] -> (3 + 6 <= 9)
Example 2:
Input: nums = [3,3,6,8], target = 10
Output: 6
Explanation: There are 6 subsequences that satisfy the condition. (nums can have repeated numbers).
[3] , [3] , [3,3], [3,6] , [3,6] , [3,3,6]
Example 3:
Input: nums = [2,3,3,4,6,7], target = 12
Output: 61
Explanation: There are 63 non-empty subsequences, two of them do not satisfy the condition ([6,7], [7]).
Number of valid subsequences (63 - 2 = 61).
Constraints:
1 <= nums.length <= 105
1 <= nums[i] <= 106
1 <= target <= 106
'''
from typing import List
import bisect
class Solution:
def numSubseq(self, nums: List[int], target: int) -> int:
A, n, mod = nums, len(nums), 10**9+7
A.sort()
ans = 0
for i in range(n):
idx = bisect.bisect_right(A[i:], target-A[i]) - 1
# print(i, idx)
if idx >= 0:
ans = (ans + (1 << idx)) % mod
# print(ans)
return ans
def numSubseq2(self, nums: List[int], target: int) -> int:
A, n, mod = nums, len(nums), 10**9+7
A.sort()
ans = 0
for i in range(n):
idx = bisect.bisect_right(A, target-A[i]) - i - 1
if idx >= 0:
ans = (ans + (1 << idx)) % mod
return ans
if __name__ == "__main__":
print(Solution().numSubseq(nums = [3,5,6,7], target = 9))
print(Solution().numSubseq(nums = [3,3,6,8], target = 10))
print(Solution().numSubseq(nums = [2,3,3,4,6,7], target = 12))
print(Solution().numSubseq2(nums = [3,5,6,7], target = 9))
print(Solution().numSubseq2(nums = [3,3,6,8], target = 10))
print(Solution().numSubseq2(nums = [2,3,3,4,6,7], target = 12)) | [
"merlintiger@hotmail.com"
] | merlintiger@hotmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.