blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ead0e0a903999cb85caad2b76d28601ad31e19a1 | 590a4f34de0dd9b964b52e41d84cf3218609c9d4 | /climbingStairs.py | 1ca699fb1a7c1009fc8ee36c297063024d7130f0 | [] | no_license | zhrmrz/climbingStairs | 81d97c5b23b811116192ea27172170a9ff55aa68 | 0455ad42e56dca3a1ee66c121323faf90d1efc98 | refs/heads/master | 2020-04-23T03:50:25.962986 | 2019-02-15T15:52:32 | 2019-02-15T15:52:32 | 170,889,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | class Sol:
def climbing_stairs(self,numOfStairs):
if numOfStairs ==1 or numOfStairs ==0:
return 1
return self.climbing_stairs(numOfStairs - 1) + self.climbing_stairs(numOfStairs - 2)
if __name__ == '__main__':
numOfStairs=5
p1=Sol()
print(p1.climbing_stairs(numOfStairs))
| [
"noreply@github.com"
] | zhrmrz.noreply@github.com |
666e846a19ec40bfb77ca6238787a9f10d1e0bc6 | aaad70e69d37f92c160c07e4ca03de80becf2c51 | /filesystem/usr/lib/python3.6/typing.py | 38eba04e677eb46599d1e8690a84dbc547044f3f | [] | no_license | OSWatcher/ubuntu-server | 9b4dcad9ced1bff52ec9cdb4f96d4bdba0ad3bb9 | 17cb333124c8d48cf47bb9cec1b4e1305626b17a | refs/heads/master | 2023-02-10T18:39:43.682708 | 2020-12-26T01:02:54 | 2020-12-26T01:02:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | {
"MIME": "text/x-python",
"inode_type": "REG",
"magic_type": "Python script, ASCII text executable",
"mode": "-rw-r--r--",
"sha1": "827ca9e64ac471213db81b20b50191728a1e0db3"
} | [
"mathieu.tarral@protonmail.com"
] | mathieu.tarral@protonmail.com |
e7d02b3f6ce84fe8eae213ef42f6cb8167f57557 | 6f69ec5f27716b36d7a61b2d150408118bb65559 | /app/views/clients.py | 5dd16656b768657d3c7388690c44440372371d8c | [
"MIT"
] | permissive | m3ssana/memegen | a3c35a51cc7a45e9ab3556ee5665775b578985f8 | 20510753d7c6811a75295580f6fdb2c459124e7d | refs/heads/main | 2023-03-15T18:50:23.546416 | 2022-07-12T21:19:46 | 2022-07-12T21:19:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,515 | py | from sanic import Blueprint, response
from sanic_ext import openapi
from .. import utils
from .helpers import preview_image
from .schemas import AuthResponse, ErrorResponse
blueprint = Blueprint("clients", url_prefix="/")
@blueprint.post("/auth")
@openapi.summary("Validate your API key")
@openapi.response(200, {"application/json": AuthResponse}, "Your API key is valid")
@openapi.response(401, {"application/json": ErrorResponse}, "Your API key is invalid")
async def validate(request):
info = await utils.meta.authenticate(request)
return response.json(
info or {"error": "API key missing or invalid."},
status=200 if info else 401,
)
@blueprint.get("/images/preview.jpg")
@openapi.summary("Display a preview of a custom meme")
@openapi.parameter("text[]", str, "query", description="Lines of text to render")
@openapi.parameter("style", str, "query", description="Style name or custom overlay")
@openapi.parameter(
"template", str, "query", description="Template ID, URL, or custom background"
)
@openapi.response(200, {"image/jpeg": bytes}, "Successfully displayed a custom meme")
async def preview(request):
id = request.args.get("template", "_error")
lines = request.args.getlist("text[]") or request.args.getlist("lines[]") or []
style = request.args.get("style") or ",".join(request.args.getlist("styles[]", []))
while style.endswith(",default"):
style = style.removesuffix(",default")
return await preview_image(request, id, lines, style)
| [
"jacebrowning@gmail.com"
] | jacebrowning@gmail.com |
2525bb90231a226a0ea32ccb99249f17ba841247 | 60715c9ea4c66d861708531def532814eab781fd | /python-programming-workshop/list/24.listmethods.py | 4cee5ef936923f1209b1adfc70b9c2d7c66add57 | [] | no_license | bala4rtraining/python_programming | 6ce64d035ef04486f5dc9572cb0975dd322fcb3e | 99a5e6cf38448f5a01b310d5f7fa95493139b631 | refs/heads/master | 2023-09-03T00:10:26.272124 | 2021-11-01T08:20:52 | 2021-11-01T08:20:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py |
fruits = ['orange', 'apple', 'pear', 'banana', 'kiwi', 'apple', 'banana']
print(fruits[0])
print(fruits.count('apple'))
print(fruits.count('tangerine'))
print(fruits.index('banana'))
print(fruits.index('banana', 4)) # Find next banana starting a position 4
print(fruits.reverse()) # the fruits reverse is returning None
print(fruits)
fruits.append('grape')
print(fruits)
fruits.sort()
print(fruits)
fruits.pop()
print(fruits)
fruits.remove('apple')
print(fruits)
| [
"karthikkannan@gmail.com"
] | karthikkannan@gmail.com |
c18ebb7f50f1f17388bcd0a8d07ad564c0918332 | e570b6d6ffee8b600d0047ff9eeb8eb671660a58 | /7. NLTK/NLTK Code.py | 40695e3ab519bbe04b320d5813447a5f4f4e4df1 | [] | no_license | ShashankSinha98/MachineLearning | 06619687bb83a47c9425dee3a5947747f49bbbb8 | 3b6e94107a8dad6bd25faef400bcc94ed1a77ca4 | refs/heads/master | 2022-07-09T14:33:44.674227 | 2020-05-12T19:27:22 | 2020-05-12T19:27:22 | 256,843,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | #!/usr/bin/env python
# coding: utf-8
# In[7]:
import nltk
# In[9]:
from nltk.corpus import brown
# In[10]:
# Corpus- Large collection of text
brown.categories()
# In[11]:
data = brown.sents(categories='fiction')
# In[18]:
print(" ".join(data[1]))
# In[19]:
from nltk.corpus import stopwords
sw = set(stopwords.words('english'))
# In[21]:
print(len(sw))
# In[22]:
def remove_stopwords(words,sw):
useful_words = []
useful_words = [w for w in words if w not in sw]
return useful_words
# In[24]:
setence = "I do not love her very much"
ans = remove_stopwords(setence.split(),sw)
print(ans)
# In[ ]:
| [
"34626597+ShashankSinha98@users.noreply.github.com"
] | 34626597+ShashankSinha98@users.noreply.github.com |
36cc8b3c58ca68674925a18aac8712498e09708e | d225ec04301d8abd681d68ad8d7316befc404bc1 | /ZShape/EffAcc/python/scans/EE_mean_Apr12_1251_cfg.py | e60ceb0aea80a1df554aef8fd6b6e2792995aa08 | [] | no_license | UMN-CMS/ZShape_Analysis | 372ea0083d6c0bda2dbba30322ef01269501afa8 | 83bff158b21210b048afbcff0af1e803780ad4bd | refs/heads/master | 2020-06-07T10:34:40.227684 | 2013-12-11T16:59:25 | 2013-12-11T16:59:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,424 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("Zefficiency")
process.TimerService = cms.Service("TimerService")
process.load('FWCore.MessageService.MessageLogger_cfi')
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True),
SkipEvent = cms.untracked.vstring('ProductNotFound')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(30000)
)
process.source = cms.Source("PoolSource",
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring("file:/home/jmmans/data/zshape/Summer11_DYToEE_M-20_CT10_TuneZ2_7TeV-powheg-pythia/F61A0CD6-9AA8-E011-A92B-0024E8769B05.root" )
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string('histo_10M_partBUILDINGTTEST.root')
)
process.f2s = cms.EDProducer("ZFullSim2Event"
)
import ZShape.EffAcc.FullSimSmearedElectronProducer_cfi
import ZShape.EffAcc.ZEfficiencyKevin_cfi
process.EEXmeanX0X989Xsmear = ZShape.EffAcc.FullSimSmearedElectronProducer_cfi.FullSimSmearedElectronsProducer.clone()
process.EEXmeanX0X989Xsmear.EE.mean = cms.double(0.989)
process.EEXmeanX0X989 = ZShape.EffAcc.ZEfficiencyKevin_cfi.mcEff.clone()
process.EEXmeanX0X989.zTreeLevelElectronsCollection = cms.untracked.InputTag("f2s","ZEventEle3")
process.EEXmeanX0X989.zsrc = cms.untracked.InputTag("EEXmeanX0X989Xsmear","ZEventParticles")
process.EEXmeanX0X989.zElectronsCollection = cms.untracked.InputTag("EEXmeanX0X989Xsmear","ZEventParticles")
process.EEXmeanX0X990Xsmear = ZShape.EffAcc.FullSimSmearedElectronProducer_cfi.FullSimSmearedElectronsProducer.clone()
process.EEXmeanX0X990Xsmear.EE.mean = cms.double(0.990)
process.EEXmeanX0X990 = ZShape.EffAcc.ZEfficiencyKevin_cfi.mcEff.clone()
process.EEXmeanX0X990.zTreeLevelElectronsCollection = cms.untracked.InputTag("f2s","ZEventEle3")
process.EEXmeanX0X990.zsrc = cms.untracked.InputTag("EEXmeanX0X990Xsmear","ZEventParticles")
process.EEXmeanX0X990.zElectronsCollection = cms.untracked.InputTag("EEXmeanX0X990Xsmear","ZEventParticles")
process.EEXmeanX0X991Xsmear = ZShape.EffAcc.FullSimSmearedElectronProducer_cfi.FullSimSmearedElectronsProducer.clone()
process.EEXmeanX0X991Xsmear.EE.mean = cms.double(0.991)
process.EEXmeanX0X991 = ZShape.EffAcc.ZEfficiencyKevin_cfi.mcEff.clone()
process.EEXmeanX0X991.zTreeLevelElectronsCollection = cms.untracked.InputTag("f2s","ZEventEle3")
process.EEXmeanX0X991.zsrc = cms.untracked.InputTag("EEXmeanX0X991Xsmear","ZEventParticles")
process.EEXmeanX0X991.zElectronsCollection = cms.untracked.InputTag("EEXmeanX0X991Xsmear","ZEventParticles")
process.EEXmeanX0X992Xsmear = ZShape.EffAcc.FullSimSmearedElectronProducer_cfi.FullSimSmearedElectronsProducer.clone()
process.EEXmeanX0X992Xsmear.EE.mean = cms.double(0.992)
process.EEXmeanX0X992 = ZShape.EffAcc.ZEfficiencyKevin_cfi.mcEff.clone()
process.EEXmeanX0X992.zTreeLevelElectronsCollection = cms.untracked.InputTag("f2s","ZEventEle3")
process.EEXmeanX0X992.zsrc = cms.untracked.InputTag("EEXmeanX0X992Xsmear","ZEventParticles")
process.EEXmeanX0X992.zElectronsCollection = cms.untracked.InputTag("EEXmeanX0X992Xsmear","ZEventParticles")
process.EEXmeanX0X993Xsmear = ZShape.EffAcc.FullSimSmearedElectronProducer_cfi.FullSimSmearedElectronsProducer.clone()
process.EEXmeanX0X993Xsmear.EE.mean = cms.double(0.993)
process.EEXmeanX0X993 = ZShape.EffAcc.ZEfficiencyKevin_cfi.mcEff.clone()
process.EEXmeanX0X993.zTreeLevelElectronsCollection = cms.untracked.InputTag("f2s","ZEventEle3")
process.EEXmeanX0X993.zsrc = cms.untracked.InputTag("EEXmeanX0X993Xsmear","ZEventParticles")
process.EEXmeanX0X993.zElectronsCollection = cms.untracked.InputTag("EEXmeanX0X993Xsmear","ZEventParticles")
process.EEXmeanX0X994Xsmear = ZShape.EffAcc.FullSimSmearedElectronProducer_cfi.FullSimSmearedElectronsProducer.clone()
process.EEXmeanX0X994Xsmear.EE.mean = cms.double(0.994)
process.EEXmeanX0X994 = ZShape.EffAcc.ZEfficiencyKevin_cfi.mcEff.clone()
process.EEXmeanX0X994.zTreeLevelElectronsCollection = cms.untracked.InputTag("f2s","ZEventEle3")
process.EEXmeanX0X994.zsrc = cms.untracked.InputTag("EEXmeanX0X994Xsmear","ZEventParticles")
process.EEXmeanX0X994.zElectronsCollection = cms.untracked.InputTag("EEXmeanX0X994Xsmear","ZEventParticles")
process.EEXmeanX0X995Xsmear = ZShape.EffAcc.FullSimSmearedElectronProducer_cfi.FullSimSmearedElectronsProducer.clone()
process.EEXmeanX0X995Xsmear.EE.mean = cms.double(0.995)
process.EEXmeanX0X995 = ZShape.EffAcc.ZEfficiencyKevin_cfi.mcEff.clone()
process.EEXmeanX0X995.zTreeLevelElectronsCollection = cms.untracked.InputTag("f2s","ZEventEle3")
process.EEXmeanX0X995.zsrc = cms.untracked.InputTag("EEXmeanX0X995Xsmear","ZEventParticles")
process.EEXmeanX0X995.zElectronsCollection = cms.untracked.InputTag("EEXmeanX0X995Xsmear","ZEventParticles")
process.load("RecoEgamma.EgammaHFProducers.hfEMClusteringSequence_cff")
process.p = cms.Path(process.hfRecoEcalCandidate+process.f2s
+ process.EEXmeanX0X989Xsmear
+ process.EEXmeanX0X989
+ process.EEXmeanX0X990Xsmear
+ process.EEXmeanX0X990
+ process.EEXmeanX0X991Xsmear
+ process.EEXmeanX0X991
+ process.EEXmeanX0X992Xsmear
+ process.EEXmeanX0X992
+ process.EEXmeanX0X993Xsmear
+ process.EEXmeanX0X993
+ process.EEXmeanX0X994Xsmear
+ process.EEXmeanX0X994
+ process.EEXmeanX0X995Xsmear
+ process.EEXmeanX0X995
)
| [
"klapoetke@physics.umn.edu"
] | klapoetke@physics.umn.edu |
8064eb59894b1a18df1ff8998010971ec3b593f0 | 9cabdeb8dce5718e8f4f490f3684eba0eb1f2d2e | /test/functional/minchainwork.py | 159c107cadbc5d27259a8c9fbe3ae4678e647321 | [
"MIT"
] | permissive | wolfoxonly/woo | fcfe275007cb102fff10239b0f722264dbbd40e2 | a5fb13575afe855b58915bd8e15cbffb9015e5e2 | refs/heads/master | 2020-03-09T17:00:57.668308 | 2018-05-13T15:21:17 | 2018-05-13T15:21:17 | 127,590,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,949 | py | #!/usr/bin/env python3
# Copyright (c) 2017 The Woochain Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test logic for setting nMinimumChainWork on command line.
Nodes don't consider themselves out of "initial block download" until
their active chain has more work than nMinimumChainWork.
Nodes don't download blocks from a peer unless the peer's best known block
has more work than nMinimumChainWork.
While in initial block download, nodes won't relay blocks to their peers, so
test that this parameter functions as intended by verifying that block relay
only succeeds past a given node once its nMinimumChainWork has been exceeded.
"""
import time
from test_framework.test_framework import WoochainTestFramework
from test_framework.util import connect_nodes, assert_equal
# 2 hashes required per regtest block (with no difficulty adjustment)
REGTEST_WORK_PER_BLOCK = 2
class MinimumChainWorkTest(WoochainTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]]
self.node_min_work = [0, 101, 101]
def setup_network(self):
# This test relies on the chain setup being:
# node0 <- node1 <- node2
# Before leaving IBD, nodes prefer to download blocks from outbound
# peers, so ensure that we're mining on an outbound peer and testing
# block relay to inbound peers.
self.setup_nodes()
for i in range(self.num_nodes-1):
connect_nodes(self.nodes[i+1], i)
def run_test(self):
# Start building a chain on node0. node2 shouldn't be able to sync until node1's
# minchainwork is exceeded
starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work
self.log.info("Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1])
starting_blockcount = self.nodes[2].getblockcount()
num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK)
self.log.info("Generating %d blocks on node0", num_blocks_to_generate)
hashes = self.nodes[0].generate(num_blocks_to_generate)
self.log.info("Node0 current chain work: %s", self.nodes[0].getblockheader(hashes[-1])['chainwork'])
# Sleep a few seconds and verify that node2 didn't get any new blocks
# or headers. We sleep, rather than sync_blocks(node0, node1) because
# it's reasonable either way for node1 to get the blocks, or not get
# them (since they're below node1's minchainwork).
time.sleep(3)
self.log.info("Verifying node 2 has no more blocks than before")
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
# Node2 shouldn't have any new headers yet, because node1 should not
# have relayed anything.
assert_equal(len(self.nodes[2].getchaintips()), 1)
assert_equal(self.nodes[2].getchaintips()[0]['height'], 0)
assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash()
assert_equal(self.nodes[2].getblockcount(), starting_blockcount)
self.log.info("Generating one more block")
self.nodes[0].generate(1)
self.log.info("Verifying nodes are all synced")
# Because nodes in regtest are all manual connections (eg using
# addnode), node1 should not have disconnected node0. If not for that,
# we'd expect node1 to have disconnected node0 for serving an
# insufficient work chain, in which case we'd need to reconnect them to
# continue the test.
self.sync_all()
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
if __name__ == '__main__':
MinimumChainWorkTest().main()
| [
"415313577@qq.com"
] | 415313577@qq.com |
f1b6352f34d1c818206ce4aea972ca832c438851 | 208d8a40d1497b1623a9ea78ece4a493e5182661 | /redwood/settings.py | a34acff441c423ca7fc5296cf908b114ce437a42 | [] | no_license | muremwa/RedWood | 273de7c5edfac3b43c4e91f03921062375312912 | c0735999f517b280de8211022daeead4ea45bb17 | refs/heads/master | 2021-07-08T02:17:09.659659 | 2020-08-09T15:33:51 | 2020-08-09T15:33:51 | 166,394,652 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,454 | py | import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_o^^x=d#@7ufl_g(9ef!wbz8i042gg)_9lyox!vk7p332sq_mr'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'watch.apps.WatchConfig',
'accounts.apps.AccountsConfig',
'staff.apps.StaffConfig',
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
)
}
ROOT_URLCONF = 'redwood.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'redwood.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'mysql.connector.django',
'OPTIONS': {
'option_files': 'smile.cnf'
},
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# comment out when debug is true
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "redwood/static"),
)
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = '/media/'
| [
"danmburu254@gmail.com"
] | danmburu254@gmail.com |
d80297e71294a37b02f17f59041029e8e7f646a4 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/PhysicsAnalysis/PhysicsValidation/PhysValMonitoring/share/PhysValSUSY_jobOptions.py | 35e75db8b1b928b6f74ab87eb24a10ea2f37c756 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,813 | py | ### configuration
run_default_rel19 = True
run_migrated_rel17_tools = True
rel17_use_separate_output = False
run_other_tools = {
'MET' : True,
'Muon' : True,
'Tau' : True,
'InDet': False,
}
### print configuration
print "PhysValSUSY job options:"
print " Running default release 19 tool : %s" % str(run_default_rel19)
print " Running migrated release 17 tool : %s" % str(run_migrated_rel17_tools)
print " output in separate file : %s" % str(rel17_use_separate_output)
print " Running following other tools : %s" % str(run_other_tools)
### consistency checks
# avoid tools being added twice
for other_tool_name in [other_tool for other_tool, run_this in run_other_tools.items() if run_this is True]:
if "do" + other_tool_name in vars():
if vars()["do" + other_tool_name] is True:
print "Configuration warning: do%s was already set active, removing from configuration." % other_tool_name
run_other_tools[other_tool_name] = False
### perform initialization / add tools
if run_default_rel19:
# add default tool
from SUSYPhysValMonitoring.SUSYPhysValMonitoringConf import SUSYPhysVal__SUSYPhysValMonitoring
tool1 = SUSYPhysVal__SUSYPhysValMonitoring()
tool1.EnableLumi = False
tool1.OutputLevel = INFO
tool1.DetailLevel = 10
tool1.UseTruthInformation = True
from AthenaCommon.AppMgr import ToolSvc
ToolSvc += tool1
monMan = CfgMgr.AthenaMonManager("PhysValMonManager")
monMan.AthenaMonTools += [ tool1 ]
for run_other_tool in [other_tool for other_tool, run_this in run_other_tools.items() if run_this is True]:
# add "external tools"
include("PhysValMonitoring/PhysVal" + run_other_tool + "_jobOptions.py")
if run_migrated_rel17_tools:
# add migrated tool (main part of old release 17 validation package)
### imports
from SUSYPhysValMonitoring.SUSYPhysValMonitoringConf import *
doTrigger = False
doTruth = False
# specify input container names
TrigDecisionTool = "TrigDecisionTool"
McEventCollection = "GEN_AOD"
TrackParticleContainer = "TrackParticleCandidate"
JetContainer = "AntiKt4EMTopoJets" ##leave off the suffix "Jets"
ElectronContainer = "ElectronCollection"
MuonContainer = "Muons"
TauJetContainer = "TauRecContainer"
MissingET = "MET_RefFinal"
# for 17.2.1 (SUSYD3PDMaker-00-12-00)
# SUSY_MET_name = "MET_RefFinal_Simplified20" # MET name to be used by SUSYSusyRec.cxx
# SUSY_MET_muons_name = "MET_Muon_Total_Staco_Simplified20" # MET muons contribution name to be used by SUSYSusyRec.cxx
# for 17.2.7.5.9 (SUSYD3PDMaker-00-12-36)
#SUSY_MET_name = "MET_RefFinal_Egamma10NoTau" # MET name to be used by SUSYSusyRec.cxx
#SUSY_MET_muons_name = "MET_Muon_Staco_Egamma10NoTau" # MET muons contribution name to be used by SUSYSusyRec.cxx
SUSY_MET_name = "Final"
SUSY_MET_muons_name = "Muons"
# init and add tool
SusyPlot = SUSYPlot("SusyPlot")
SusyPlot.DoTrigger = doTrigger
SusyPlot.DoTruth = doTruth
SusyPlot.HistToolKeys = [ "SUSYSusyRec/susyTool" ]
SusyPlot += SUSYSusyRec("susyTool")
SusyPlot.susyTool.ElectronName = ElectronContainer
SusyPlot.susyTool.MuonName = MuonContainer
SusyPlot.susyTool.TauName = TauJetContainer
SusyPlot.susyTool.JetName = JetContainer
SusyPlot.susyTool.MetName = MissingET
SusyPlot.susyTool.SUSYMissingET = SUSY_MET_name
SusyPlot.susyTool.SUSYMissingETMuons = SUSY_MET_muons_name
SusyPlot.susyTool.McEventName = McEventCollection
SusyPlot.susyTool.OutputLevel = INFO
if doTrigger:
SusyPlot.TrigDecisionTool = ToolSvc.TrigDecisionTool
SusyPlot.susyTool.PtLeptonCut = 20*GeV
SusyPlot.susyTool.PtLeptonPreCut = 20*GeV
SusyPlot.susyTool.EtIsolCut = 10*GeV
SusyPlot.susyTool.EtMissCut = 80*GeV
SusyPlot.susyTool.etaJetCut = 3
SusyPlot.susyTool.PtJetCut = 20*GeV
SusyPlot.susyTool.PtJet0Cut = 60*GeV
SusyPlot.susyTool.PtJet1Cut = 30*GeV
SusyPlot.susyTool.MeffCut = 500*GeV
SusyPlot.susyTool.MTCut = 100*GeV
SusyPlot.susyTool.STCut = 0.2
topSequence += SusyPlot
if rel17_use_separate_output:
### if you want to write histograms to separate file use this:
from AthenaCommon.AppMgr import theApp
theApp.HistogramPersistency = "ROOT"
from AthenaCommon.AppMgr import ServiceMgr
## The string "TestMon" in the argument below is the 'FileKey'
## used by Athena to access the output file internally
svcMgr.THistSvc.Output += ["PhysVal2 DATAFILE='hist.root' OPT='RECREATE'"]
svcMgr.THistSvc.PrintAll = True
svcMgr.THistSvc.OutputLevel = DEBUG
else:
SusyPlot.susyTool.HistBaseDirectory = "/PhysVal/SUSY/ETmiss/"
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
8fd2b5abe11973bb39a5ea5414185d6020233a5c | 8f73125d816f3b44b03159dba272e095f37c1f0c | /scripts/GC.py | e44c355231dd8f230cc820456915ffc1d42efce0 | [] | no_license | tarah28/nanopore | 356b218e5ca3dfb98e4dd7232d8f1c6303f899d1 | ec716ee15ab26d7bf33b7f7352ab8cad1c369ae8 | refs/heads/master | 2021-05-27T06:21:51.958938 | 2014-09-10T11:36:07 | 2014-09-10T11:36:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | #!/usr/bin/python
import sys
from Bio import SeqIO
from Bio.SeqUtils import GC
for rec in SeqIO.parse(open(sys.argv[1]), "fasta"):
print GC(rec.seq)
| [
"n.j.loman@bham.ac.uk"
] | n.j.loman@bham.ac.uk |
6fe58ce1fb865313489a03dc9cbef4f19f953283 | 2b502aae9bc33bac6c4b28d1e702591f2cbed690 | /terrascript/resource/dns.py | 8fd238ad486f0695dd33eb92a6bb8da3554771cd | [
"Python-2.0",
"BSD-2-Clause"
] | permissive | LeeroyC710/python-terrascript | 4c8fbe032e9b7dd8844d962f888c28f87a26ff77 | b8f3c3549b149c124e3e48e0cea0396332ad1a1d | refs/heads/master | 2020-12-28T03:58:04.502969 | 2020-01-19T21:46:52 | 2020-01-19T21:46:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | # terrascript/resource/dns.py
import terrascript
class dns_a_record_set(terrascript.Resource):
pass
class dns_aaaa_record_set(terrascript.Resource):
pass
class dns_cname_record(terrascript.Resource):
pass
class dns_mx_record_set(terrascript.Resource):
pass
class dns_ns_record_set(terrascript.Resource):
pass
class dns_ptr_record(terrascript.Resource):
pass
class dns_srv_record_set(terrascript.Resource):
pass
class dns_txt_record_set(terrascript.Resource):
pass
__all__ = [
'dns_a_record_set',
'dns_aaaa_record_set',
'dns_cname_record',
'dns_mx_record_set',
'dns_ns_record_set',
'dns_ptr_record',
'dns_srv_record_set',
'dns_txt_record_set',
] | [
"markus@juenemann.net"
] | markus@juenemann.net |
21877d77b4b1bf26d4c63363609811615fcedfb2 | 4c852fab792606580acb3f3a61b7f86ae25930b0 | /Python/UoM/3-UsingPythonToAccessWebData/Assignments/wk06/json2.py | d85bd8bffe3c3a47e3bf9ddff4a4707c8d4955fa | [] | no_license | hmchen47/Programming | a9767a78a35c0844a1366391f48b205ff1588591 | 9637e586eee5c3c751c96bfc5bc1d098ea5b331c | refs/heads/master | 2022-05-01T01:57:46.573136 | 2021-08-09T04:29:40 | 2021-08-09T04:29:40 | 118,053,509 | 2 | 1 | null | 2021-09-20T19:54:02 | 2018-01-19T00:06:04 | Python | UTF-8 | Python | false | false | 358 | py | import json
input = '''
[
{ "id" : "001",
"x" : "2",
"name" : "Chuck"
} ,
{ "id" : "009",
"x" : "7",
"name" : "Chuck"
}
]'''
info = json.loads(input)
# print json.dumps(info, indent = 4)
print 'User count:', len(info)
for item in info:
print 'Name', item['name']
print 'Id', item['id']
print 'Attribute', item['x']
| [
"h.m.chen@ieee.org"
] | h.m.chen@ieee.org |
76d227e325f5ae99474dd87d0bb5ad3011dba504 | c0e9fe97583b8d431064e9bc382f8d4e4b7c2ad4 | /utils/import_gender.py | 5134a5acc0014f9de1fe89a49066b06d33c3aa64 | [] | no_license | satoriforos/data-api-website | 4f21c58702b9586ebc9aea1e6d1db9beb35da190 | 3fad87754568201c1a2dc345227837a1096d2513 | refs/heads/master | 2020-08-06T04:08:53.695134 | 2019-10-04T13:59:34 | 2019-10-04T13:59:34 | 212,810,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,043 | py | #!/usr/bin/env python3
import pandas as pd
from pathlib import Path
from settings.settings import settings
from modules.databasemanager.DatabaseManager import DatabaseManager
from modules.geolocation.UsCounty import UsCounty
from modules.geolocation.City import City
from modules.geolocation.UsState import UsState
from modules.geolocation.Country import Country
from modules.usdemographics.GenderCounty import GenderCounty
def get_database_connection(mysql_settings):
database_manager = DatabaseManager(
host=mysql_settings["server"],
port=mysql_settings["port"],
user=mysql_settings["username"],
password=mysql_settings["password"],
db=mysql_settings["schema"],
charset=mysql_settings["charset"]
)
return database_manager
database_manager = get_database_connection(settings["mysql"])
us_counties = database_manager.fetch_all(UsCounty(database_manager))
cities = database_manager.fetch_all(City(database_manager))
us_states = database_manager.fetch_all(UsState(database_manager))
countries = database_manager.fetch_all(Country(database_manager))
country_id = None
for country in countries:
if country.code == "US":
country_id = country.id
break
file_paths = [
Path("~/Downloads/County Demographic Datasets/SEX01.xls"),
]
header_translations = {
"Area_name": "city_state",
"STCOU": "county_code",
"SEX150200D": "males_2000",
"SEX150201D": "males_2001",
"SEX150202D": "males_2002",
"SEX150203D": "males_2003",
"SEX150204D": "males_2004",
"SEX150205D": "males_2005",
"SEX150206D": "males_2006",
"SEX150207D": "males_2007",
"SEX150208D": "males_2008",
"SEX150209D": "males_2009",
"SEX100210D": "males_2010",
"SEX250200D": "females_2000",
"SEX250201D": "females_2001",
"SEX250202D": "females_2002",
"SEX250203D": "females_2003",
"SEX250204D": "females_2004",
"SEX250205D": "females_2005",
"SEX250206D": "females_2006",
"SEX250207D": "females_2007",
"SEX250208D": "females_2008",
"SEX250209D": "females_2009",
"SEX200210D": "females_2010",
}
headers = list(header_translations.keys())
excel_files = [
pd.ExcelFile(file_path.expanduser().as_posix())
for file_path in file_paths
]
sheets = []
for excel_file in excel_files:
sheet_names = excel_file.sheet_names
for sheet_name in sheet_names:
sheets.append(pd.read_excel(excel_file, sheet_name))
gender_data = []
for i in range(0, sheets[0].shape[0]):
gender_row = GenderCounty(database_manager)
gender_row.country_id = country_id
for sheet in sheets:
for input_header, output_header in header_translations.items():
if input_header != "Area_name" and input_header != "STCOU":
if input_header in sheet.keys():
setattr(gender_row, output_header, int(sheet[input_header][i]))
if gender_row.county_code is None:
gender_row.country_id = country_id
city_state = sheet["Area_name"][i].split(", ")
gender_row.county_name = city_state[0]
gender_row.state_id = None
gender_row.state_code = None
if len(city_state) > 1:
for state in us_states:
if state.code == city_state[1].upper():
gender_row.state_id = state.id
gender_row.state_code = state.code
break
else:
for state in us_states:
if state.code.upper() == sheet["Area_name"][i]:
gender_row.state_id = state.id
gender_row.county_name = None
break
gender_row.county_code = int(sheet["STCOU"][i])
gender_data.append(gender_row)
#for gender_row in gender_data:
# gender_row.database_manager = database_manager
#for gender_row in gender_data:
# database_manager.insert(gender_row)
database_manager.insert_many(gender_data)
| [
"{ID}+{username}@users.noreply.github.com"
] | {ID}+{username}@users.noreply.github.com |
a619a2370f98ade6097f5eee217494a59fb8c848 | 026991d5749c55910f4c33cc6f35a778494ef89e | /Laboratory Works/Lab_7/Problems/Informatics/Loops/For/335.py | 86aa7d0128aee684c9b5ea93d22e666baa62e8a3 | [
"MIT"
] | permissive | diable201/WEB-development | e832df0e35a837fc5464d6b0dada1c8fd8c9783b | 370bd731b9a65a1658033a60c63abece11d4e259 | refs/heads/master | 2023-06-02T03:46:20.786310 | 2021-06-28T15:57:11 | 2021-06-28T15:57:11 | 336,358,294 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | from math import sqrt
a = int(input())
b = int(input())
result = []
for i in range(a, b + 1):
j = int(sqrt(i))
if j * j == i:
result.append(i)
print(' '.join(str(i) for i in result))
| [
"diable201@protonmail.com"
] | diable201@protonmail.com |
f3368b9c95ad10ebfa7c6e9990af3b273864b5ed | 0e25538b2f24f1bc002b19a61391017c17667d3d | /cmdt/win_cmdtdirectory.py | e380d3bf0c25daf67520c700f8e97391f780840d | [] | no_license | trondhindenes/Ansible-Auto-Generated-Modules | 725fae6ba9b0eef00c9fdc21179e2500dfd6725f | efa6ac8cd2b545116f24c1929936eb8cc5c8d337 | refs/heads/master | 2020-04-06T09:21:00.756651 | 2016-10-07T07:08:29 | 2016-10-07T07:08:29 | 36,883,816 | 12 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,378 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# <COPYRIGHT>
# <CODEGENMETA>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_cmdtdirectory
version_added:
short_description: Generated from DSC module cmdt version 1.0.0.4 at 07.10.2016 00.47.03
description:
- A DSC Module for installing Microsoft Deployment Toolkit
options:
Ensure:
description:
-
required: True
default:
aliases: []
choices:
- Absent
- Present
Name:
description:
-
required: True
default:
aliases: []
Path:
description:
-
required: True
default:
aliases: []
PSDriveName:
description:
-
required: False
default:
aliases: []
PSDrivePath:
description:
-
required: False
default:
aliases: []
PsDscRunAsCredential_username:
description:
-
required: False
default:
aliases: []
PsDscRunAsCredential_password:
description:
-
required: False
default:
aliases: []
AutoInstallModule:
description:
- If true, the required dsc resource/module will be auto-installed using the Powershell package manager
required: False
default: false
aliases: []
choices:
- true
- false
AutoConfigureLcm:
description:
- If true, LCM will be auto-configured for directly invoking DSC resources (which is a one-time requirement for Ansible DSC modules)
required: False
default: false
aliases: []
choices:
- true
- false
| [
"trond@hindenes.com"
] | trond@hindenes.com |
bc2906b349072602366dcb6b45306532d65f9503 | ddd993057174b52a9c4ecffddda655504ccc2366 | /src/main/python/systemds/operator/algorithm/builtin/img_posterize.py | c6f2b41dcd3bc80e6a8d7069d6d0509a782f2bbc | [
"Apache-2.0"
] | permissive | atefeh-asayesh/systemds | 68840e3e8005d5bff3e76aeed811c7ab1cb89e8f | 96733360c8f600355d5600f2edb8960ba1d47861 | refs/heads/master | 2023-08-04T18:23:56.076995 | 2021-09-27T08:41:40 | 2021-09-27T08:41:40 | 368,129,199 | 0 | 0 | Apache-2.0 | 2021-06-08T20:22:08 | 2021-05-17T09:29:42 | Java | UTF-8 | Python | false | false | 1,694 | py | # -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
# Autogenerated By : src/main/python/generator/generator.py
# Autogenerated From : scripts/builtin/img_posterize.dml
from typing import Dict, Iterable
from systemds.operator import OperationNode, Matrix, Frame, List, MultiReturn, Scalar
from systemds.script_building.dag import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
def img_posterize(img_in: Matrix,
bits: int):
"""
:param img_in: Input image
:param bits: The number of bits keep for the values.
:param 1: and white, 8 means every integer between 0 and 255.
:return: 'OperationNode' containing
"""
params_dict = {'img_in': img_in, 'bits': bits}
return Matrix(img_in.sds_context,
'img_posterize',
named_input_nodes=params_dict)
| [
"baunsgaard@tugraz.at"
] | baunsgaard@tugraz.at |
7fb067b74a39322835924b99508c932a474ff19d | 8c209079e798c53a5a149613de06f96d10ad756a | /backend/tst_al_11171_dev_15203/wsgi.py | 3faa1e00012a46d702dc1e691a81218d24ab281d | [] | no_license | crowdbotics-apps/tst-al-11171-dev-15203 | d415d3a80eca99c6f76740db887de1345bdf1306 | c7e456053f15411f8e6635d4888520e335c5980b | refs/heads/master | 2023-01-13T20:57:17.850058 | 2020-11-17T22:26:37 | 2020-11-17T22:26:37 | 313,758,333 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | """
WSGI config for tst_al_11171_dev_15203 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tst_al_11171_dev_15203.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
8af796943db03458872ba948f02d5d4666aa68ea | 284c49f1a514088cd25b29375c1e2c6c2b5d9628 | /src/conf/gunicorn_conf.py | f4b6a9b80ee828bf0a8acb8d2f75d143b7cd272d | [] | no_license | oscar6echo/redirect-server | 2397578680c194c086f0d456da862f71f004f95c | d72ae59ec32595519f976d46b0556b5370c2936e | refs/heads/master | 2020-04-26T12:35:11.232084 | 2020-03-16T10:32:15 | 2020-03-16T10:32:15 | 173,554,405 | 0 | 0 | null | 2019-05-28T15:38:37 | 2019-03-03T09:23:41 | Vue | UTF-8 | Python | false | false | 290 | py |
bind = '0.0.0.0:5000'
backlog = 2048
workers = 1
worker_class = 'sync'
worker_connections = 1000
timeout = 30
keepalive = 2
daemon = False
errorlog = '-'
loglevel = 'debug'
accesslog = '-'
access_log_format = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
reload = True
| [
"olivier.borderies@gmail.com"
] | olivier.borderies@gmail.com |
c8eb4caf29ca60835a43044d8d8a3a9e735b9d52 | ccc545fb3f4107741c715b41976b72177bea0957 | /encoding/functions/basic.py | e7cc2e09ae4b16466ec8c5af8f6ee0388792d906 | [
"MIT"
] | permissive | lfz/PyTorch-Encoding | 98eada3a73da6f6f7bed08e1a079c9071f1638a4 | dbcae04e3fb29417fbafcb2cc96d87def3739400 | refs/heads/master | 2021-05-02T11:27:54.213818 | 2018-02-08T15:27:36 | 2018-02-08T15:27:36 | 120,778,758 | 1 | 0 | null | 2018-02-08T15:24:08 | 2018-02-08T15:24:07 | null | UTF-8 | Python | false | false | 7,871 | py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## ECE Department, Rutgers University
## Email: zhang.hang@rutgers.edu
## Copyright (c) 2017
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import threading
import torch
import torch.nn.functional as F
from torch.autograd import Function, Variable
__all__ = ['squeeze_each', 'view_each', 'multi_each', 'sum_each',
'cat_each', 'upsample', 'dropout', 'relu']
def squeeze_each(x, dim=None):
"""Multi-GPU version torch. squeeze()
"""
y = []
for i in range(len(x)):
if dim is None:
y.append(x[i].squeeze())
else:
y.append(x[i].squeeze(dim))
return y
def view_each(x, size):
"""Multi-GPU version torch.view
Returns a new tensor with the same data but different size.
The returned tensor shares the same data and must have the same number
of elements, but may have a different size. A tensor must be
:attr:`contiguous` to be viewed.
Args:
input: list of multi-gpu tensors
size (torch.Size or int...): Desired size
"""
y = []
for i in range(len(x)):
y.append(x[i].view(size))
return y
def multi_each(a, b):
"""Multi-GPU version multiplication
.. math::
y[i] = a[i] * b[i]
"""
y = []
for i in range(len(a)):
y.append(a[i] * b[i])
return y
def sum_each(x, y):
"""Multi-GPU version torch.add
.. math::
y[i] = a[i] + b[i]
"""
assert(len(x)==len(y))
z = []
for i in range(len(x)):
z.append(x[i]+y[i])
return z
def cat_each(x1, x2, dim):
"""Multi-GPU version torch.cat
.. math::
y[i] = torch.cat(a[i], b[i], dim)
"""
assert(len(x1)==len(x2))
z = []
for i in range(len(x1)):
with torch.cuda.device_of(x1[i]):
x = torch.cat((x1[i], x2[i]), dim)
z.append(x)
return z
def dict_to_list(x):
"""Converting Dict{} to list[]
"""
y = []
for i in range(len(x)):
xi = x[i]
if isinstance(xi, Exception):
raise xi
y.append(xi)
return y
def upsample(input, size=None, scale_factor=None, mode='nearest'):
"""Multi-GPU version torch.nn.functional.upsample
Upsamples the input to either the given :attr:`size` or the given
:attr:`scale_factor`
The algorithm used for upsampling is determined by :attr:`mode`.
Currently temporal, spatial and volumetric upsampling are supported, i.e.
expected inputs are 3-D, 4-D or 5-D in shape.
The input dimensions are interpreted in the form:
`mini-batch x channels x [depth] x [height] x width`
The modes available for upsampling are: `nearest`, `linear` (3D-only),
`bilinear` (4D-only), `trilinear` (5D-only)
Args:
input (Variable): input
size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):
output spatial size.
scale_factor (int): multiplier for spatial size. Has to be an integer.
mode (string): algorithm used for upsampling:
'nearest' | 'linear' | 'bilinear' | 'trilinear'. Default: 'nearest'
"""
if isinstance(input, Variable):
return F.upsample(input, size=size, scale_factor=scale_factor,
mode=mode)
elif isinstance(input, tuple) or isinstance(input, list):
lock = threading.Lock()
results = {}
def _worker(i, x):
try:
with torch.cuda.device_of(x):
result = F.upsample(x, size=size, \
scale_factor=scale_factor,mode=mode)
with lock:
results[i] = result
except Exception as e:
with lock:
resutls[i] = e
# multi-threading for different gpu
threads = [threading.Thread(target=_worker,
args=(i, x),
)
for i, (x) in enumerate(input)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
outputs = dict_to_list(results)
return outputs
else:
raise RuntimeError('unknown input type')
def dropout(input, p=0.5, training=False, inplace=True):
"""Multi-GPU version torch.nn.functional.droupout
The channels to zero-out are randomized on every forward call.
*Usually the input comes from Conv2d modules.*
As described in the paper
`Efficient Object Localization Using Convolutional Networks`,
if adjacent pixels within feature maps are strongly correlated
(as is normally the case in early convolution layers) then iid dropout
will not regularize the activations and will otherwise just result
in an effective learning rate decrease.
In this case, :func:`nn.Dropout2d` will help promote independence between
feature maps and should be used instead.
Args:
p (float, optional): probability of an element to be zeroed.
inplace (bool, optional): If set to True, will do this operation
in-place
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
"""
if isinstance(input, Variable):
return F.dropout(input, p, training, inplace)
elif isinstance(input, tuple) or isinstance(input, list):
lock = threading.Lock()
results = {}
def _worker(i, x):
try:
with torch.cuda.device_of(x):
result = F.dropout(x, p, training, inplace)
with lock:
results[i] = result
except Exception as e:
with lock:
resutls[i] = e
# multi-threading for different gpu
threads = [threading.Thread(target=_worker,
args=(i, x),
)
for i, (x) in enumerate(input)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
outputs = dict_to_list(results)
return outputs
else:
raise RuntimeError('unknown input type')
def relu(input, inplace=False):
"""Multi-GPU version torch.nn.functional.relu
Applies the rectified linear unit function element-wise
:math:`{ReLU}(x)= max(0, x)`
Args:
inplace: can optionally do the operation in-place. Default: False
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
"""
if isinstance(input, Variable):
return F.relu(input, inplace)
elif isinstance(input, tuple) or isinstance(input, list):
lock = threading.Lock()
results = {}
def _worker(i, x):
try:
with torch.cuda.device_of(x):
result = F.relu(x, inplace)
with lock:
results[i] = result
except Exception as e:
with lock:
resutls[i] = e
# multi-threading for different gpu
threads = [threading.Thread(target=_worker,
args=(i, x),
)
for i, (x) in enumerate(input)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
outputs = dict_to_list(results)
return outputs
else:
raise RuntimeError('unknown input type')
| [
"zhang.hang@rutgers.edu"
] | zhang.hang@rutgers.edu |
ecde5f0d489cadb5227aa49e7e8fe29890b68494 | 9680800074ee2f50c7f9573076f0414b0b37cc70 | /backend/home/migrations/0003_test.py | f8d4b6538dd97edf54edbec05074b17072795872 | [] | no_license | crowdbotics-apps/testnewmobile-dev-1517 | 6bc6b0df8bef375dd13a750ad4f4894e55465482 | 6eeb801f949dac5d47391fdef4c99c3c2750b19a | refs/heads/master | 2022-11-18T04:01:52.930493 | 2020-07-15T23:38:56 | 2020-07-15T23:38:56 | 234,537,030 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | # Generated by Django 2.2.9 on 2020-01-23 10:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("home", "0002_load_initial_data"),
]
operations = [
migrations.CreateModel(
name="Test",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("test", models.DecimalField(decimal_places=10, max_digits=30)),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
235a3f420a9e5d6da034ccc5fa3786a231a5c5c5 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_mixing.py | 3fd7241702edc77070a2b1628ae489ab3e937317 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py |
#calss header
class _MIXING():
def __init__(self,):
self.name = "MIXING"
self.definitions = mix
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['mix']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
518d57e4b6824f9d2d23efade951dfe404bad0ca | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_waivers.py | 4561725c43217e00c698770290379ce1da4c364a | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py |
#calss header
class _WAIVERS():
def __init__(self,):
self.name = "WAIVERS"
self.definitions = waiver
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['waiver']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
ef9227af50c4ba29def9aa46fa510d5b13377a44 | a9e3f3ad54ade49c19973707d2beb49f64490efd | /Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/mobile_api/urls.py | a6fec8dd978ca0c55326ed76d877e0eff6cfc7b0 | [
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"MIT"
] | permissive | luque/better-ways-of-thinking-about-software | 8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d | 5809eaca7079a15ee56b0b7fcfea425337046c97 | refs/heads/master | 2021-11-24T15:10:09.785252 | 2021-11-22T12:14:34 | 2021-11-22T12:14:34 | 163,850,454 | 3 | 1 | MIT | 2021-11-22T12:12:31 | 2019-01-02T14:21:30 | JavaScript | UTF-8 | Python | false | false | 341 | py | """
URLs for mobile API
"""
from django.conf.urls import include, url
from .users.views import my_user_info
urlpatterns = [
url(r'^users/', include('lms.djangoapps.mobile_api.users.urls')),
url(r'^my_user_info', my_user_info, name='user-info'),
url(r'^course_info/', include('lms.djangoapps.mobile_api.course_info.urls')),
]
| [
"rafael.luque@osoco.es"
] | rafael.luque@osoco.es |
d5da1b70e6407c1638c9816437723719580a57d4 | 70730512e2643833e546e68761ee6cd3d7b95e1d | /01-python基础/code/day14/module01.py | be1e6837225cdfe1bbc42fd6506d29dd55e3f212 | [] | no_license | Yuchen1995-0315/review | 7f0b0403aea2da62566642c6797a98a0485811d1 | 502859fe11686cc59d2a6d5cc77193469997fe6a | refs/heads/master | 2020-08-26T23:16:33.193952 | 2019-10-24T00:30:32 | 2019-10-24T00:30:32 | 217,177,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | """
Module01 模块
"""
def fun01():
print("Module01 -- fun01")
def fun02():
print("Module01 -- fun02")
def fun03():
print("Module01 -- fun03") | [
"2456830920@qq.com"
] | 2456830920@qq.com |
d2081c5594a00d4d326c9ed1272419e2f5280042 | e86364b36b82c24596dd71f9fa2221d036e8defc | /collections/ansible_collections/cisco/nxos/plugins/modules/nxos_hsrp_interfaces.py | 4f1e68c3cdc543565ca3705927e1027b20a80ae6 | [] | no_license | ganeshrn/network_collections_migration | b3f11be5ecb9557787bcd12ca01b227379c7c102 | 8f56b60bfde606b291627665a1218bf7ce15f3a1 | refs/heads/master | 2020-09-12T12:10:58.189645 | 2019-11-18T11:44:48 | 2019-11-18T11:44:48 | 222,419,125 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,905 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Cisco and/or its affiliates.
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The module file for nxos_hsrp_interfaces
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'
}
DOCUMENTATION = '''module: nxos_hsrp_interfaces
short_description: Manages HSRP attributes of NXOS interfaces.
description: Manages Hot Standby Router Protocol (HSRP) interface attributes.
author: Chris Van Heuveln (@chrisvanheuveln)
notes: null
options:
config:
description: The provided configuration
type: list
elements: dict
suboptions:
name:
type: str
description: The name of the interface.
bfd:
type: str
description:
- Enable/Disable HSRP Bidirectional Forwarding Detection (BFD) on the interface.
choices:
- enable
- disable
state:
description:
- The state the configuration should be left in
type: str
choices:
- merged
- replaced
- overridden
- deleted
default: merged
'''
EXAMPLES = """
# Using deleted
- name: Configure hsrp attributes on interfaces
nxos_hsrp_interfaces:
config:
- name: Ethernet1/1
- name: Ethernet1/2
operation: deleted
# Using merged
- name: Configure hsrp attributes on interfaces
nxos_hsrp_interfaces:
config:
- name: Ethernet1/1
bfd: enable
- name: Ethernet1/2
bfd: disable
operation: merged
# Using overridden
- name: Configure hsrp attributes on interfaces
nxos_hsrp_interfaces:
config:
- name: Ethernet1/1
bfd: enable
- name: Ethernet1/2
bfd: disable
operation: overridden
# Using replaced
- name: Configure hsrp attributes on interfaces
nxos_hsrp_interfaces:
config:
- name: Ethernet1/1
bfd: enable
- name: Ethernet1/2
bfd: disable
operation: replaced
"""
RETURN = """
before:
description: The configuration prior to the model invocation.
returned: always
type: list
sample: >
The configuration returned will always be in the same format
of the parameters above.
after:
description: The resulting configuration model invocation.
returned: when changed
type: list
sample: >
The configuration returned will always be in the same format
of the parameters above.
commands:
description: The set of commands pushed to the remote device.
returned: always
type: list
sample: ['interface Ethernet1/1', 'hsrp bfd']
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.hsrp_interfaces.hsrp_interfaces import Hsrp_interfacesArgs
from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.config.hsrp_interfaces.hsrp_interfaces import Hsrp_interfaces
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
module = AnsibleModule(argument_spec=Hsrp_interfacesArgs.argument_spec,
supports_check_mode=True)
result = Hsrp_interfaces(module).execute_module()
module.exit_json(**result)
if __name__ == '__main__':
main()
| [
"ganesh634@gmail.com"
] | ganesh634@gmail.com |
86b2703f93f197bc7cf3ed852e7f82e536e6d092 | 17a0371a52c00e949460a891702109d1471d19af | /backend/no_crop_ig_stories_20282/urls.py | 5dcff4097643848f9f51e9868241285e295389ba | [] | no_license | crowdbotics-apps/no-crop-ig-stories-20282 | 2df2d1957f660e7fcd89f9a9b6619cb1fe54a6dc | a79f7e9b3ed3533b2e6fcbc2562db32f2d50d46a | refs/heads/master | 2022-12-12T18:17:55.054916 | 2020-09-15T08:27:57 | 2020-09-15T08:27:57 | 295,663,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,979 | py | """no_crop_ig_stories_20282 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "No Crop IG Stories"
admin.site.site_title = "No Crop IG Stories Admin Portal"
admin.site.index_title = "No Crop IG Stories Admin"
# swagger
api_info = openapi.Info(
title="No Crop IG Stories API",
default_version="v1",
description="API documentation for No Crop IG Stories App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
905c0a9dbb7ada9c3de9b778a76664ef072ef7c6 | 284f4775828d155fd289697f5da52cf138e8c937 | /abc104/a.py | c8723430c9a572ab922018eb1b2075e7fb5bfe42 | [] | no_license | aikiyy/AtCoder | f006d9eec4bea0c265b700259fa4a43790f92df0 | e4208bcc708c301b088d01528294fe013a475f21 | refs/heads/master | 2020-06-29T16:57:29.692986 | 2020-01-20T04:53:08 | 2020-01-20T04:53:08 | 200,572,345 | 0 | 0 | null | 2020-01-19T15:36:57 | 2019-08-05T03:00:55 | Python | UTF-8 | Python | false | false | 157 | py | r = int(input())
if r < 1200:
print('ABC')
elif r < 2800:
print('ARC')
else:
print('AGC')
# print(['ABC', 'ARC', 'AGC'][int(input())//50+8>>5])
| [
"aiki.yougai@gmail.com"
] | aiki.yougai@gmail.com |
fbf195990428f078f116132132635148407abaa2 | c9ad6ad969de505b3c8471c6f46dfd782a0fb498 | /0x0F-python-object_relational_mapping/10-model_state_my_get.py | e1b8c12f3ce8649549fd011e01d9d604ab15cec7 | [] | no_license | enterpreneur369/holbertonschool-higher_level_programming | 002fd5a19b40c8b1db06b34c4344e307f24c17ac | dd7d3f14bf3bacb41e2116d732ced78998a4afcc | refs/heads/master | 2022-06-20T00:57:27.736122 | 2020-05-06T14:26:10 | 2020-05-06T14:26:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | #!/usr/bin/python3
"""Module 9-model_state_filter_a.py
"""
import sys
from model_state import Base, State
from sqlalchemy import (create_engine)
if __name__ == "__main__":
""" Main
Get the States by a n letter using SQLAlchemy
"""
engine = create_engine(
'mysql+mysqldb://{}:{}@localhost/{}'
.format(
sys.argv[1], sys.argv[2],
sys.argv[3]
),
pool_pre_ping=True
)
sname = sys.argv[4]
Base.metadata.create_all(engine)
sql = "SELECT * FROM states ORDER BY states.id;"
result = engine.execute(sql)
states = result.fetchall()
exist = False
for s in states:
if sname == s.name:
print("{:d}".format(s.id))
exist = True
if exist is False:
print("Not found")
| [
"jose.calderon@holbertonschool.com"
] | jose.calderon@holbertonschool.com |
a37d7da8f835b83be2821b76a95ed83ee07ad3b5 | 5181d3b3ef8fe301ea2d6b095260e9d327c2fd79 | /scripts/iemre/areal_coverage.py | dcbd0b2c6f99ad435cfa8f0078503e6ef15a8952 | [] | no_license | danhreitz/iem | 88113ef9c9c4a2918c9c2abdfd0510d5ca4ec819 | ed490dcd6c2a8359f88cb805ccee8f6707566f57 | refs/heads/master | 2021-01-18T15:27:28.607250 | 2015-08-10T21:33:54 | 2015-08-10T21:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,568 | py | import netCDF4
from pyiem import iemre, plot
import numpy
import datetime
import pytz
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
sts = datetime.datetime(2013,4,1, 0)
sts = sts.replace(tzinfo=pytz.timezone("UTC"))
ets = datetime.datetime(2013,6,16, 0)
ets = ets.replace(tzinfo=pytz.timezone("UTC"))
nc = netCDF4.Dataset('/mesonet/data/iemre/2013_mw_daily.nc')
lons = nc.variables['lon'][:]
lats = nc.variables['lat'][:]
precip = nc.variables['p01d']
nc2 = netCDF4.Dataset("/mesonet/data/iemre/state_weights.nc")
iowa = nc2.variables['IA'][:]
iowapts = numpy.sum(numpy.where(iowa > 0, 1, 0))
nc2.close()
days = []
coverage = []
now = sts
while now < ets:
idx = iemre.daily_offset(now)
pday = numpy.where(iowa > 0, precip[idx,:,:], -1)
tots = numpy.sum(numpy.where(pday >= (0.05 * 25.4), 1, 0 ))
days.append( now )
coverage.append( tots / float(iowapts) * 100.0)
now += datetime.timedelta(days=1)
days.append( now )
coverage.append( 0 )
days.append( now + datetime.timedelta(days=1))
coverage.append( 0 )
(fig, ax) = plt.subplots(1,1)
ax.bar(days, coverage, fc='b', ec='b')
ax.set_yticks([0,25,50,75,100])
ax.grid(True)
ax.set_title("2013 Daily Iowa Precipitation Coverage of 0.05+ inch")
ax.set_ylabel("Areal Coverage [%]")
ax.xaxis.set_major_locator(
mdates.DayLocator(interval=7)
)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%-d\n%b'))
ax.set_xlim(min(days), max(days))
fig.savefig('test.svg')
import iemplot
iemplot.makefeature('test') | [
"akrherz@iastate.edu"
] | akrherz@iastate.edu |
1e333d4c033792130b775c73a3c6c372ece02d41 | 14373275670c1f3065ce9ae195df142146e2c1a4 | /stubs/python-xlib/Xlib/ext/ge.pyi | d0add2edbd711d2a66f70a9614bafbfc6f77c1ed | [
"Apache-2.0",
"MIT"
] | permissive | sobolevn/typeshed | eb7af17c06a9722f23c337e6b9a4726223155d58 | d63a82640390a9c130e0fe7d409e8b0b836b7c31 | refs/heads/master | 2023-08-04T05:59:29.447015 | 2023-06-14T21:27:53 | 2023-06-14T21:27:53 | 216,265,622 | 2 | 0 | Apache-2.0 | 2022-02-08T10:40:53 | 2019-10-19T20:21:25 | Python | UTF-8 | Python | false | false | 556 | pyi | from typing_extensions import Final
from Xlib._typing import Unused
from Xlib.display import Display
from Xlib.protocol import rq
from Xlib.xobject import resource
extname: Final = "Generic Event Extension"
GenericEventCode: Final = 35
class GEQueryVersion(rq.ReplyRequest): ...
def query_version(self: Display | resource.Resource) -> GEQueryVersion: ...
class GenericEvent(rq.Event): ...
def add_event_data(self: Display | resource.Resource, extension: int, evtype: int, estruct: int) -> None: ...
def init(disp: Display, info: Unused) -> None: ...
| [
"noreply@github.com"
] | sobolevn.noreply@github.com |
83da271d2007ec5ce1f1ba16f484a54719876d17 | da5bf3d91fd7b73752d955b6ae783019c11267ec | /cuda-device-2.py | 6f653305999de71218042a7ef855d8f0be1290e5 | [] | no_license | izham-sugita/numba-fd | badd89608fcbbc3cd69be1a92ff69b81248b2498 | 8c4b2003c800454e5202908fc9abeb0df531e9df | refs/heads/master | 2023-03-28T01:04:56.136570 | 2021-03-30T10:09:36 | 2021-03-30T10:09:36 | 284,838,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | from numba import cuda, float32
import math
import time
@cuda.jit
def matmul(A, B, C):
"""Perform square matrix multiplication of C = A * B
"""
i, j = cuda.grid(2)
if i < C.shape[0] and j < C.shape[1]:
tmp = 0.
for k in range(A.shape[1]):
tmp += A[i, k] * B[k, j]
C[i, j] = tmp
deviceID = int( input("Select device ID: "))
print(cuda.select_device(deviceID))
import numpy as np
N = 4096
A = np.random.rand(N,N)
B = np.identity(N)
C = np.zeros_like(A)
#print(A)
threadsperblock = (16, 16)
blockspergrid_x = math.ceil(A.shape[0] / threadsperblock[0])
blockspergrid_y = math.ceil(A.shape[1] / threadsperblock[1])
blockspergrid = (blockspergrid_x, blockspergrid_y)
ts = time.time()
matmul[blockspergrid,threadsperblock](A,B,C)
te = time.time()
elp = te -ts
gflops = ( ( float(N)**3 ) / elp ) * 10.0e-9
print("Elapsed time: ",elp, "secs")
print("Throughput ", gflops, "GFLOPS ")
print()
#print(C)
#rvalue = str( cuda.detect() ) # return True, False only
#print(rvalue) #cannot get the device
| [
"sugita5019@gmail.com"
] | sugita5019@gmail.com |
aaad4f9650237b09e2f1935c4e00ff4f34b6c145 | 9cc6721acb439db2e7cff8eb4dbff4b6e14040d5 | /백준/2231.py | 12b6383564cc1f9fc8b9974f79a0ce94df16f6f4 | [] | no_license | young31/Algorithm | 35c6ec6b6d9b192f9d0e6e6f6484f33c92100232 | bfcccfa798d031a930490efa24d9b2263bd4b984 | refs/heads/master | 2021-11-04T14:01:53.827508 | 2021-10-25T06:35:24 | 2021-10-25T06:35:24 | 196,034,851 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | n = list(input())
l = len(n)
n = int(''.join(n))
for i in range(max(0, n-9*l), n):
k = list(map(int, list(str(i))))
if sum(k)+i == n:
print(i)
break
else:
print(0)
| [
"migael38317@gmail.com"
] | migael38317@gmail.com |
4de5316a1b00eba55f5fbc916cdf2bbf7b91b27d | 763be4d77e20504848c9ddd29fe99b8012b00ea7 | /uchicagohvz/game/dorm_migrations/0008_highvaluedorm_dorm_fk.py | 7484b9415b6045ca84f26983520b969e079375f4 | [
"MIT"
] | permissive | kz26/uchicago-hvz | 2207c944f19c6fcc3310d4a43b4e733ac8225b18 | 85e89a1b70fa2a23445890686312407fe8b2084a | refs/heads/master | 2021-12-07T03:21:03.118945 | 2020-10-08T14:31:02 | 2020-10-08T14:31:02 | 13,137,628 | 11 | 6 | MIT | 2021-11-29T17:59:45 | 2013-09-27T00:28:39 | HTML | UTF-8 | Python | false | false | 447 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('game', '0007_set_dorm_fk'),
]
operations = [
migrations.AddField(
model_name='highvaluedorm',
name='dorm_fk',
field=models.ForeignKey(default=1, to='game.Dorm'),
preserve_default=False,
),
]
| [
"whitehat2k9@gmail.com"
] | whitehat2k9@gmail.com |
e12c1c659d3522d8afd7c9c6a5000dcd949f7080 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /070_oop/007_exceptions/_exercises/templates/GoCongr/022_Coding Exceptions Classes_!cool!.py | 741999e8439973d27e69f5a1bd8bf673b05a8ae2 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 351 | py | # # Coding Exceptions Classes
# c_ General ? p..
# c_ Specific1 G.. p..
# c_ Specific2 G.. p..
#
# ___ raiser0 r____ G...
# ___ raiser1 r____ S.1
# ___ raiser2 r____ S.2
#
# ___ func __ _0 _1 _2
# ___
# ?
# ____ G.. __ X # X is the raised instance
# print('caught:' X. -c # Same as sys.exc_info()[0]
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
3e68c157c162e237bcf7cbdfab3344aa166b63ed | f76862c7dfcc08a495927eae3f4a995e416b948c | /amulet_map_editor/api/opengl/__init__.py | 485d3392e3d5341aa400e59e6ba9d552087799c4 | [] | permissive | Ex-Ark/Amulet-Map-Editor | 0bc1e6ac07349736114ea80e6c3ee2233863366e | 24704b91749727e8bce25aecf41d39f4b0eba433 | refs/heads/master | 2023-03-02T23:06:46.201874 | 2021-02-14T12:31:59 | 2021-02-14T12:31:59 | 221,939,739 | 0 | 0 | MIT | 2019-11-15T14:26:14 | 2019-11-15T14:26:14 | null | UTF-8 | Python | false | false | 145 | py | from .context_manager import ContextManager
from .drawable import Drawable
from .thread_generator import ThreadedObject, ThreadedObjectContainer
| [
"james_clare1@yahoo.co.uk"
] | james_clare1@yahoo.co.uk |
ad8e82445acb57add0fd8ed5e34f5c5e3db445b7 | 20b5ef21ed539ac5b906d252e323d3622432fe52 | /phantompy/webelements.py | bfcca92a3940679bdfdd6110ed810aba1e8a47b4 | [
"BSD-2-Clause"
] | permissive | ballacky13/phantompy | 51277264e4d3f4368abb329f798413e04a276ab4 | 9460c93565151b40f0137b29f8c6dde8eded7651 | refs/heads/master | 2021-01-21T01:59:25.939983 | 2013-05-24T14:38:19 | 2013-05-24T14:38:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 999 | py | # -*- coding: utf-8 -*-
from .api import library as lib
from .api import ctypes
class WebElement(object):
def __init__(self, el_ptr, frame):
self._el_ptr = el_ptr
self._frame = frame
self._closed = False
# Setup tagname
tagname = lib.ph_webelement_tag_name(el_ptr)
self._tag_name = tagname.decode("utf-8")
def __repr__(self):
return "<WebElement <{0}> >".format(self.name)
@property
def ptr(self):
return self._el_ptr
def __del__(self):
lib.ph_webelement_free(self.ptr)
@property
def name(self):
return self._tag_name
def inner_html(self):
result = lib.ph_webelement_inner_html(self.ptr)
return result.decode("utf-8")
def inner_text(self):
result = lib.ph_webelement_inner_text(self.ptr)
return result.decode("utf-8")
def is_none(self):
result = lib.ph_webelement_is_null(self.ptr)
return True if result == 0 else False
| [
"niwi@niwi.be"
] | niwi@niwi.be |
ed07f6d6de8dd7e3d776e9358767ea4cc11affc9 | ba602dc67ad7bb50133aeb312f3c6c54627b3dec | /data/3955/WA_py/508331.py | e587cdede063c05ef1b12076f39eb043f6dff69e | [] | no_license | Dearyyyyy/TCG | 0d21d89275906157372d775f33309ce337e6bc95 | 7b80de16de2d3f5d95a7c4ed95d45a9e38882e67 | refs/heads/master | 2020-12-27T23:19:44.845918 | 2020-02-04T01:59:23 | 2020-02-04T01:59:23 | 238,101,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | # coding=utf-8
while True:
n1,n2 = input().split()
n3 = ''
for i in range(len(n1)):
n3 = n1[i:] + n1[:i]
if n3 == n2:
print("Yes")
break
else:
print("No") | [
"543271544@qq.com"
] | 543271544@qq.com |
5be7a9deeb09ea616df6b8cc3c5b75a2fd056175 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03645/s325823599.py | 4ee88108ab98ed34053bd76fe0f66cfe1a062006 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | n, m = map(int, input().split())
arr_ = [tuple(map(int, input().split())) for _ in range(m)]
one = set([b for a, b in arr_ if a == 1])
last = set([a for a, b in arr_ if b == n])
print("POSSIBLE" if len(one & last) >= 1 else "IMPOSSIBLE") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
a24259cf4a4c9640df796299cdbea9e8c318b970 | 2312ee83cd5cdfcd83af3a805dc14444b38f89c6 | /barpie.py | 6cda7263efdad4ff5e567070c4c80f8530c455cb | [
"MIT"
] | permissive | llord1/DataVisualization | 49ea215d012566d149a740c87185092d0a4e8ede | d7f7f43479549732ef6c94e7cd1c1ccc401593a8 | refs/heads/master | 2021-09-21T15:29:40.016594 | 2018-08-28T14:34:46 | 2018-08-28T14:34:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | #!/usr/bin/env pythonw
import matplotlib.pyplot as plt
import seaborn as sns
#sns.set_style('ticks')
# Get default color cycle
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
fig, axs = plt.subplots(nrows=2, ncols=3)
dataA = [17, 18, 20, 22, 24]
dataB = [20, 20, 19, 21, 20]
dataC = [24, 22, 20, 18, 17]
axs[0][0].pie(dataA, labels=range(5))
axs[0][1].pie(dataB, labels=range(5))
axs[0][2].pie(dataC, labels=range(5))
axs[1][0].bar(x=range(5), height=dataA, color=colors)
axs[1][0].set_ylim(0, 25)
axs[1][1].bar(x=range(5), height=dataB, color=colors)
axs[1][1].set_ylim(0, 25)
axs[1][2].bar(x=range(5), height=dataC, color=colors)
axs[1][2].set_ylim(0, 25)
fig.show()
fig.savefig('barpie.png', dpi=300) | [
"bgoncalves@gmail.com"
] | bgoncalves@gmail.com |
697efe77da57a33f49ad2ede10702df06f27631b | b3c3a810d48b02e40685f57d346fd9c0f2237a9e | /Python Essentials/5.0 Lists Basics/05. Numbers Filter.py | ce2d045c045aed6537ce710c040b83b686b4c757 | [] | no_license | byAbaddon/Essentials-Course-with----JavaScript___and___Python | 7ef8112edafd6a2e2cef82c7709f974a67c64cc0 | 5194d9e74c2aa186e5571745f8931f31595d4b99 | refs/heads/main | 2023-03-27T23:23:18.426633 | 2021-04-05T21:09:06 | 2021-04-05T21:09:06 | 349,848,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | num_list = [int(input()) for _ in range(int(input()))]
command = input()
def calc(type_command):
switch_command = {
'even': list(filter(lambda x: not x & 1, num_list)),
'odd': list(filter(lambda x: x & 1, num_list)),
'negative': list(filter(lambda x: x < 0, num_list)),
'positive': list(filter(lambda x: x >= 0, num_list)),
}
return switch_command.get(type_command)
print(calc(command))
'''
5
33
19
-2
18
998
even
#[-2, 18, 998]
''' | [
"noreply@github.com"
] | byAbaddon.noreply@github.com |
97a8d4ed865ee8e97ff76957231e9f0eafaa5a40 | 1925c535d439d2d47e27ace779f08be0b2a75750 | /CtCl/Sorting and Searching/quick_sort.py | c1b55d5d7746ba459bff4223121d04d400ffdc4c | [] | no_license | arthurDz/algorithm-studies | ee77d716041671c4b8bb757d8d96f3d10b6589f7 | 1e4d23dd0c40df34f58d71c7ca3e6491be732075 | refs/heads/master | 2023-04-27T12:17:06.209278 | 2021-04-30T20:16:18 | 2021-04-30T20:16:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | # Runtime O(NlogN) - average, O(N^2) - worst, Memory O(logN)
def quickSort(num):
if len(num) < 2: return num
less = []
more = []
equal = [num[0]]
for i in range(1, len(num)):
if num[i] < num[0]:
less.append(num[i])
elif num[i] == num[0]:
equal.append(num[i])
else:
more.append(num[i])
return quickSort(less) + equal + quickSort(more) | [
"yunfan.yang@minerva.kgi.edu"
] | yunfan.yang@minerva.kgi.edu |
ad7770f5327908343402cf810055eb1ed533e4e5 | ce16345f020d14b138c0cb152abbdd1acbe450f5 | /grobber/grobber/sources/masteranime.py | 6e021e191b6101afd8a9d6aa3d9def0cee96ffef | [
"MIT"
] | permissive | siku2/MyAnimeStream | 9f1b57f33f344e35c6f0bae0c0008c6c76518eea | addfa3831cbe52667fea7f58d49325d6f200b2a1 | refs/heads/master | 2020-03-21T19:04:45.667147 | 2018-12-27T09:34:03 | 2018-12-27T09:34:03 | 138,929,617 | 1 | 1 | MIT | 2018-10-05T18:33:22 | 2018-06-27T20:27:27 | Python | UTF-8 | Python | false | false | 3,914 | py | import json
import logging
from typing import Any, AsyncIterator, Dict, List, Optional
from . import register_source
from .. import utils
from ..decorators import cached_property
from ..languages import Language
from ..models import Anime, Episode, SearchResult, get_certainty
from ..request import DefaultUrlFormatter, Request
from ..url_pool import UrlPool
log = logging.getLogger(__name__)
BASE_URL = "{MASTERANIME_URL}"
SEARCH_URL = BASE_URL + "/api/anime/filter"
ANIME_URL = BASE_URL + "/api/anime/{anime_id}/detailed"
EPISODE_URL = BASE_URL + "/anime/watch/{anime_slug}/{episode}"
class MasterEpisode(Episode):
ATTRS = ("mirror_data",)
@cached_property
async def mirror_data(self) -> List[Dict[str, Any]]:
bs = await self._req.bs
element = bs.select_one("video-mirrors")
if not element:
return []
return json.loads(element[":mirrors"])
@cached_property
async def raw_streams(self) -> List[str]:
links = []
for mirror in await self.mirror_data:
host_data = mirror["host"]
prefix = host_data["embed_prefix"]
suffix = host_data["embed_suffix"] or ""
embed_id = mirror["embed_id"]
links.append(f"{prefix}{embed_id}{suffix}")
return links
class MasterAnime(Anime):
ATTRS = ("anime_id", "anime_slug")
EPISODE_CLS = MasterEpisode
@cached_property
async def info_data(self) -> Dict[str, Any]:
return (await self._req.json)["info"]
@cached_property
async def episode_data(self) -> List[Dict[str, Any]]:
return (await self._req.json)["episodes"]
@cached_property
async def anime_id(self) -> int:
return (await self.info_data)["id"]
@cached_property
async def anime_slug(self) -> str:
return (await self.info_data)["slug"]
@cached_property
async def title(self) -> str:
return (await self.info_data)["title"]
@cached_property
async def is_dub(self) -> bool:
return False
@cached_property
async def language(self) -> Language:
return Language.ENGLISH
@cached_property
async def episode_count(self) -> int:
return len(await self.episode_data)
@classmethod
async def search(cls, query: str, *, language=Language.ENGLISH, dubbed=False) -> AsyncIterator[SearchResult]:
if dubbed or language != Language.ENGLISH:
return
# Query limit is 45 characters!!
req = Request(SEARCH_URL, {"search": query[:45], "order": "relevance_desc"})
json_data = await req.json
if not json_data:
logging.warning("couldn't get json from masteranime")
return
for raw_anime in json_data["data"]:
anime_id = raw_anime["id"]
title = raw_anime["title"]
req = Request(utils.format_available(ANIME_URL, anime_id=anime_id))
anime = cls(req)
anime._anime_id = anime_id
anime._anime_slug = raw_anime["slug"]
anime._title = title
yield SearchResult(anime, get_certainty(title, query))
@cached_property
async def raw_eps(self) -> List[Episode]:
episodes = []
slug = await self.anime_slug
for ep_data in await self.episode_data:
ep_id = ep_data["info"]["episode"]
req = Request(utils.format_available(EPISODE_URL, anime_slug=slug, episode=ep_id))
episodes.append(self.EPISODE_CLS(req))
return episodes
async def get_episode(self, index: int) -> Optional[Episode]:
return (await self.raw_eps)[index]
async def get_episodes(self) -> List[Episode]:
return await self.raw_eps
masteranime_pool = UrlPool("MasterAnime", ["https://www.masterani.me"])
DefaultUrlFormatter.add_field("MASTERANIME_URL", lambda: masteranime_pool.url)
register_source(MasterAnime)
| [
"siku2@outlook.com"
] | siku2@outlook.com |
9a501b13539c3f91f4c336f040f5baa05f56b93c | 53cb2e9e1f5dfb57090295fe45d4810aac07caad | /example/example/tests/test_tenants.py | 4d69f27fa8417ea99daead345777b5a42686dc35 | [] | no_license | kissgyorgy/django-tenants | 47c01f79b459842d6aaf72828a16327b9690ff47 | 327fc85ba18a10b6622a06ad1fe6c5f9ec8d83c6 | refs/heads/master | 2020-04-29T07:58:43.877588 | 2013-11-23T11:27:50 | 2013-11-23T11:27:50 | 14,571,644 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | from django.test import TestCase
from django.db import connection as conn
from django.core.management import call_command
from example.models import Team
class TenantCreationTest(TestCase):
def setUp(self):
Team.objects.create(name='Something', domain='127.0.0.1', schema='something')
def test_create_tenant(self):
team = Team.objects.get(schema='something')
self.assertEqual(team.name, 'Something')
self.assertEqual(team.domain, '127.0.0.1')
| [
"kissgyorgy@me.com"
] | kissgyorgy@me.com |
2993e9834a1447e598649f1c186faef412f6e96f | 3e06c2e64c14c3e3486cd3604268f12510fdeb56 | /nostradamus/nostradamus/urls.py | da6c0686f55de26517f6a72f5c2a01de940b74af | [
"Apache-2.0"
] | permissive | exactpro/nostradamus | 42296e9d4762ac6d7364a665dd5cd74117caacc8 | 80df847a012374ad2b702cc9f9c9cb46c1153ee7 | refs/heads/master | 2022-09-29T08:49:14.505795 | 2021-12-21T12:43:01 | 2021-12-21T12:43:01 | 162,601,150 | 32 | 8 | Apache-2.0 | 2022-09-13T23:04:20 | 2018-12-20T15:58:05 | TypeScript | UTF-8 | Python | false | false | 1,588 | py | """nostradamus URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
schema_view = get_schema_view(
openapi.Info(title="Nostradamus API", default_version="v1"),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
path(
"swagger/",
schema_view.with_ui("swagger", cache_timeout=0),
),
path("admin/", admin.site.urls),
path("analysis_and_training/", include("apps.analysis_and_training.urls")),
path("settings/", include("apps.settings.urls")),
path(
"description_assessment/", include("apps.description_assessment.urls")
),
path("qa_metrics/", include("apps.qa_metrics.urls")),
path("virtual_assistant/", include("apps.virtual_assistant.urls")),
] + staticfiles_urlpatterns()
| [
"litvinov.ivan44@gmail.com"
] | litvinov.ivan44@gmail.com |
4de9633aa2daa7c8e1e69befaaa0c97c1d963018 | 8e833f71bc2f913f459b112e08725ad6d37b0897 | /myapp/migrations/0002_auto_20200309_2105.py | 8bcc39339b7f2fdb42f5e6f23a51a5694601bec4 | [] | no_license | 21toffy/slider | 0d5727befac41f7e37160e6684dd92d9c2583671 | fc064ce61b110351be911abd614fab2810c9c046 | refs/heads/master | 2021-01-09T19:12:39.625561 | 2020-04-22T01:25:23 | 2020-04-22T01:25:23 | 242,425,521 | 0 | 1 | null | 2020-02-23T01:06:51 | 2020-02-22T23:16:47 | Python | UTF-8 | Python | false | false | 406 | py | # Generated by Django 2.0.13 on 2020-03-09 20:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='pictures',
name='image',
field=models.FileField(blank=True, null=True, upload_to='imagefile'),
),
]
| [
"oketofoke@gmail.com"
] | oketofoke@gmail.com |
184ae98d1ffdd0597c98eebc3660eb8b5a22ef05 | e76ea38dbe5774fccaf14e1a0090d9275cdaee08 | /src/chrome/app/DEPS | 4d39b7881b84cb0b57b7e6b77d01a35d4ff0eefb | [
"BSD-3-Clause"
] | permissive | eurogiciel-oss/Tizen_Crosswalk | efc424807a5434df1d5c9e8ed51364974643707d | a68aed6e29bd157c95564e7af2e3a26191813e51 | refs/heads/master | 2021-01-18T19:19:04.527505 | 2014-02-06T13:43:21 | 2014-02-06T13:43:21 | 16,070,101 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 743 | include_rules = [
"+apps",
"+breakpad",
"+chrome/browser",
"+chrome/installer",
"+chrome/plugin/chrome_content_plugin_client.h",
"+chrome/renderer/chrome_content_renderer_client.h",
"+chrome/utility/chrome_content_utility_client.h",
"+chromeos/chromeos_paths.h",
"+chromeos/chromeos_switches.h",
"+components/breakpad",
"+components/nacl/common",
"+components/nacl/zygote",
"+components/startup_metric_utils",
"+content/public/app",
"+content/public/browser/browser_main_runner.h",
"+content/public/browser/render_process_host.h",
"+grit", # For generated headers
"+native_client/src/trusted/service_runtime/osx",
"+policy", # For generated headers and source
"+sandbox",
"+tools/memory_watcher",
]
| [
"ronan@fridu.net"
] | ronan@fridu.net | |
d3389799ab4c3d8f942947a427250ff6be14a12c | 626b14ce13986b6d5e03143e151004247659625a | /Day01-15/code/Day13/generator1.py | 7b575dd1c65f8f34f1e31fbf29de050b1a02a0d5 | [] | no_license | Focavn/Python-100-Days | c7586ecf7ae3f1fd42f024558bb998be23ee9df8 | d8de6307aeff9fe31fd752bd7725b9cc3fbc084b | refs/heads/master | 2021-08-08T17:57:02.025178 | 2020-09-17T11:58:04 | 2020-09-17T11:58:04 | 220,427,144 | 0 | 0 | null | 2019-11-08T08:59:43 | 2019-11-08T08:59:41 | null | UTF-8 | Python | false | false | 365 | py | """
生成器 - 生成器语法
Version: 0.1
Author: 骆昊
Date: 2018-03-21
"""
seq = [x * x for x in range(10)]
print(seq)
gen = (x * x for x in range(10))
print(gen)
for x in gen:
print(x)
num = 10
gen = (x ** y for x, y in zip(range(1, num), range(num - 1, 0, -1)))
print(gen)
n = 1
while n < num:
print(next(gen))
n += 1
| [
"Focavn@users.github.com"
] | Focavn@users.github.com |
79eda3f61f4e8bd36a9b6f559862df999c11672e | 1e9ad304868c2bda918c19eba3d7b122bac3923b | /kubernetes/client/models/v1_persistent_volume_claim_list.py | 0424df102137fcef71706c3593b0b6412a5b3642 | [
"Apache-2.0"
] | permissive | pineking/client-python | c77e5bd3d476ac852e6dffa96056008baa0f597f | 74a64d7325518f4298600d4bb300f92843c29347 | refs/heads/master | 2021-01-22T22:16:27.368406 | 2017-03-15T08:21:21 | 2017-03-15T08:21:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,612 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.5.1-660c2a2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1PersistentVolumeClaimList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, api_version=None, items=None, kind=None, metadata=None):
"""
V1PersistentVolumeClaimList - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'api_version': 'str',
'items': 'list[V1PersistentVolumeClaim]',
'kind': 'str',
'metadata': 'UnversionedListMeta'
}
self.attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
self._api_version = api_version
self._items = items
self._kind = kind
self._metadata = metadata
@property
def api_version(self):
"""
Gets the api_version of this V1PersistentVolumeClaimList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:return: The api_version of this V1PersistentVolumeClaimList.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1PersistentVolumeClaimList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:param api_version: The api_version of this V1PersistentVolumeClaimList.
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""
Gets the items of this V1PersistentVolumeClaimList.
A list of persistent volume claims. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims
:return: The items of this V1PersistentVolumeClaimList.
:rtype: list[V1PersistentVolumeClaim]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1PersistentVolumeClaimList.
A list of persistent volume claims. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims
:param items: The items of this V1PersistentVolumeClaimList.
:type: list[V1PersistentVolumeClaim]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def kind(self):
"""
Gets the kind of this V1PersistentVolumeClaimList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:return: The kind of this V1PersistentVolumeClaimList.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1PersistentVolumeClaimList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1PersistentVolumeClaimList.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1PersistentVolumeClaimList.
Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:return: The metadata of this V1PersistentVolumeClaimList.
:rtype: UnversionedListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1PersistentVolumeClaimList.
Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:param metadata: The metadata of this V1PersistentVolumeClaimList.
:type: UnversionedListMeta
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"mehdy@google.com"
] | mehdy@google.com |
498f10683ef402dac4279ed635e049a50304e21b | 51f43f1901d5aad2bce2b5ccd82bd9d5a0b397d1 | /TranskribusDU/graph/FeatureDefinition.py | ebadd300a6236d4fd7664bfa8d804b789e9ac6d6 | [
"BSD-3-Clause"
] | permissive | kapitsa2811/TranskribusDU | e1fd32b656ed8e3dcddc62e149647398cc48030e | 9e680b0bf14ea52678f7c4dccad465d5a9d9ee9d | refs/heads/master | 2020-08-09T12:52:52.832320 | 2019-10-01T15:04:38 | 2019-10-01T15:04:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,235 | py | # -*- coding: utf-8 -*-
"""
Feature Definition
Sub-class it and specialize getTransformer and clean_tranformers
Copyright Xerox(C) 2016 JL. Meunier
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
import math
from common.trace import traceln
class FeatureDefinition:
"""
A class to sub-class to define which features from a Tranformer class, you want for node and edges
"""
def __init__(self, nbClass=None, node_transformer=None, edge_transformer=None):
self.nbClass = nbClass #number of node classes (also called 'labels', and 'states' in pystruct)
self._node_transformer = node_transformer
self._edge_transformer = edge_transformer
def setTransformers(self, node_transformer, edge_transformer):
self._node_transformer = node_transformer
self._edge_transformer = edge_transformer
def getTransformers(self):
"""
return (node transformer, edge transformer)
"""
return self._node_transformer, self._edge_transformer
def fitTranformers(self, lGraph,lY=None):
"""
Fit the transformers using the graphs
return True
"""
lAllNode = [nd for g in lGraph for nd in g.lNode]
self._node_transformer.fit(lAllNode,lY)
del lAllNode #trying to free the memory!
lAllEdge = [edge for g in lGraph for edge in g.lEdge]
self._edge_transformer.fit(lAllEdge,lY)
del lAllEdge
return True
def cleanTransformers(self):
"""
Some extractors/transfomers keep a large state in memory , which is not required in "production".
This method must clean this useless large data
For instance: the TFIDF transformers are keeping the stop words => huge pickled file!!!
"""
for _trnsf in self.getTransformers():
try:
_trnsf.cleanTransformers()
except Exception as e:
traceln("Cleaning warning: ", e)
return None
def _getTypeNumber(self, kwargs):
"""
Utility function. In some case the __init__ method gets a dictionary of length N + N^2
(N config for unary extractor, N^2 config for pairwise)
Here we compute N from the dictionary length. ^^
"""
return int(round(math.sqrt( len(kwargs) + 1/4.0)-0.5, 0))
| [
"jean-luc.meunier@naverlabs.com"
] | jean-luc.meunier@naverlabs.com |
14a8ed01957f432d88c64cd1119811b80606126c | 6238dc5b5818f54295547cf4cb1afa5553ddfb94 | /taobao/top/api/rest/SimbaRptAdgroupeffectGetRequest.py | 3a84baac14a3d21395468c846584c6fc0d259785 | [] | no_license | liaosiwei/guagua | 8208bb82b1df5506dcb86c1a7094c849ea5576a6 | ee6025813e83568dc25beb52279c86f8bd33f1a4 | refs/heads/master | 2016-09-06T16:45:00.798633 | 2013-05-03T04:02:35 | 2013-05-03T04:02:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | '''
Created by auto_sdk on 2013-04-14 16:35:32
'''
from top.api.base import RestApi
class SimbaRptAdgroupeffectGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.adgroup_id = None
self.campaign_id = None
self.end_time = None
self.nick = None
self.page_no = None
self.page_size = None
self.search_type = None
self.source = None
self.start_time = None
self.subway_token = None
def getapiname(self):
return 'taobao.simba.rpt.adgroupeffect.get'
| [
"liaosiweiorxiaowei@gmail.com"
] | liaosiweiorxiaowei@gmail.com |
9fc4ac132c48756bea8f1d552e5271d4383972b4 | 9d1e84e70048a6a6cba71e93c3f0a66afbd276dd | /code/storyboard_root/models/storytext_model.py | e438cdf05920d4e9a3aa77c4f18e98ee6b741cc1 | [] | no_license | 1SouravGhosh/StoryBoard-API-services | ab7cd6523bf06d1f042aa0a9dbba52a5bc712336 | 4cb79e7df1d95ae1dc337267b008c0a6cf42e80a | refs/heads/master | 2023-04-09T01:59:01.882784 | 2023-03-26T14:18:18 | 2023-03-26T14:18:18 | 166,553,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,739 | py | import datetime
from optparse import Option
from os.path import getsize, join
from symbol import with_item
from typing import Text
from xml.etree.ElementTree import tostring
import psycopg2
from click import DateTime
from flask.globals import session
from flask_restful.fields import Boolean, DateTime, Integer
from psycopg2.extensions import Column
from pylint.pyreverse.diagrams import Relationship
from sqlalchemy.orm import backref, defer, load_only, relationship, undefer
from sqlalchemy.sql.expression import outerjoin
from sqlalchemy.sql.operators import like_op
from sqlalchemy.sql.schema import FetchedValue, ForeignKey
from storyboard_root.resources.database_resources.db_resource import db
class StoryTextModel(db.Model):
__table_args__ = {"schema":"sch_storyboard"}
__tablename__ = "tbl_storytext"
story_text_id = db.Column( db.Integer , primary_key = True )
story_text = db.Column( db.Text )
story_table = db.relationship( "StoryModel" , backref='storytext')
def __init__(self,i_story_text_id, i_story_text):
self.story_text_id = i_story_text_id
self.story_text = i_story_text
def json(self):
return {
"storytextid" : self.story_text_id ,
"storytext" : self.story_text
}
@classmethod
def get_storytext_by_id(self,in_storytext_id):
try:
storytext = self.query.filter_by(story_text_id=in_storytext_id).first()
return storytext
except:
print("exception occured storytext model")
@classmethod
def create_storytext(self, in_storytext): #need to inlude story text model
try:
new_storytext = self(i_story_text_id=None,i_story_text=in_storytext)
db.session.add(new_storytext)
db.session.commit()
storytext = self.query.order_by(self.story_text_id.desc()).first()
return storytext
except:
print("exception occured")
@classmethod
def update_storytext(self, in_storytext_id, in_storytext):
try:
existing_storytext = self.get_storytext_by_id(in_storytext_id) # reusing the "get_storydetails_by_id" function of this class
if in_storytext is not None:
existing_storytext.story_text = in_storytext
except:
print("exception occured")
finally:
db.session.commit()
@classmethod
def delete_story_by_id(self,in_storytext_id):
try:
self.query.filter_by(story_text_id=in_storytext_id).delete()
except:
print("exception occured")
finally:
db.session.commit()
| [
"1SouravGhosh@noreply.github.com"
] | 1SouravGhosh@noreply.github.com |
ecb2e410d0ae0ebcdf8a1a3a003cd918ee96c2e2 | 9127a5582694a055e7c64ae65ae97a11728ff82f | /cunt/pools/pool_wallet_info.py | 7400e5ee4d419ef638f85277bf296554277e9d21 | [
"Apache-2.0"
] | permissive | nahvan/cunt-blockchain | 0f881df58f3ca5fe554b52a025437053df6f1037 | 447084a809ec0339bcd24f7141f39ee0e8dddffa | refs/heads/main | 2023-06-24T16:47:31.099801 | 2021-07-29T21:17:02 | 2021-07-29T21:17:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,502 | py | from dataclasses import dataclass
from enum import IntEnum
from typing import Optional, Dict
from blspy import G1Element
from cunt.protocols.pool_protocol import POOL_PROTOCOL_VERSION
from cunt.types.blockchain_format.coin import Coin
from cunt.types.blockchain_format.program import Program
from cunt.types.blockchain_format.sized_bytes import bytes32
from cunt.util.byte_types import hexstr_to_bytes
from cunt.util.ints import uint32, uint8
from cunt.util.streamable import streamable, Streamable
class PoolSingletonState(IntEnum):
"""
From the user's point of view, a pool group can be in these states:
`SELF_POOLING`: The singleton exists on the blockchain, and we are farming
block rewards to a wallet address controlled by the user
`LEAVING_POOL`: The singleton exists, and we have entered the "escaping" state, which
means we are waiting for a number of blocks = `relative_lock_height` to pass, so we can leave.
`FARMING_TO_POOL`: The singleton exists, and it is assigned to a pool.
`CLAIMING_SELF_POOLED_REWARDS`: We have submitted a transaction to sweep our
self-pooled funds.
"""
SELF_POOLING = 1
LEAVING_POOL = 2
FARMING_TO_POOL = 3
SELF_POOLING = PoolSingletonState.SELF_POOLING
LEAVING_POOL = PoolSingletonState.LEAVING_POOL
FARMING_TO_POOL = PoolSingletonState.FARMING_TO_POOL
@dataclass(frozen=True)
@streamable
class PoolState(Streamable):
"""
`PoolState` is a type that is serialized to the blockchain to track the state of the user's pool singleton
`target_puzzle_hash` is either the pool address, or the self-pooling address that pool rewards will be paid to.
`target_puzzle_hash` is NOT the p2_singleton puzzle that block rewards are sent to.
The `p2_singleton` address is the initial address, and the `target_puzzle_hash` is the final destination.
`relative_lock_height` is zero when in SELF_POOLING state
"""
version: uint8
state: uint8 # PoolSingletonState
# `target_puzzle_hash`: A puzzle_hash we pay to
# When self-farming, this is a main wallet address
# When farming-to-pool, the pool sends this to the farmer during pool protocol setup
target_puzzle_hash: bytes32 # TODO: rename target_puzzle_hash -> pay_to_address
# owner_pubkey is set by the wallet, once
owner_pubkey: G1Element
pool_url: Optional[str]
relative_lock_height: uint32
def initial_pool_state_from_dict(state_dict: Dict, owner_pubkey: G1Element, owner_puzzle_hash: bytes32) -> PoolState:
state_str = state_dict["state"]
singleton_state: PoolSingletonState = PoolSingletonState[state_str]
if singleton_state == SELF_POOLING:
target_puzzle_hash = owner_puzzle_hash
pool_url: str = ""
relative_lock_height = uint32(0)
elif singleton_state == FARMING_TO_POOL:
target_puzzle_hash = bytes32(hexstr_to_bytes(state_dict["target_puzzle_hash"]))
pool_url = state_dict["pool_url"]
relative_lock_height = uint32(state_dict["relative_lock_height"])
else:
raise ValueError("Initial state must be SELF_POOLING or FARMING_TO_POOL")
# TODO: change create_pool_state to return error messages, as well
assert relative_lock_height is not None
return create_pool_state(singleton_state, target_puzzle_hash, owner_pubkey, pool_url, relative_lock_height)
def create_pool_state(
state: PoolSingletonState,
target_puzzle_hash: bytes32,
owner_pubkey: G1Element,
pool_url: Optional[str],
relative_lock_height: uint32,
) -> PoolState:
if state not in set(s.value for s in PoolSingletonState):
raise AssertionError("state {state} is not a valid PoolSingletonState,")
ps = PoolState(
POOL_PROTOCOL_VERSION, uint8(state), target_puzzle_hash, owner_pubkey, pool_url, relative_lock_height
)
# TODO Move verify here
return ps
@dataclass(frozen=True)
@streamable
class PoolWalletInfo(Streamable):
"""
Internal Pool Wallet state, not destined for the blockchain. This can be completely derived with
the Singleton's CoinSolutions list, or with the information from the WalletPoolStore.
"""
current: PoolState
target: Optional[PoolState]
launcher_coin: Coin
launcher_id: bytes32
p2_singleton_puzzle_hash: bytes32
current_inner: Program # Inner puzzle in current singleton, not revealed yet
tip_singleton_coin_id: bytes32
singleton_block_height: uint32 # Block height that current PoolState is from
| [
"svginsomnia@gmail.com"
] | svginsomnia@gmail.com |
dd0718fed67c8e6fbc08d84adfd6864150fac493 | 5f2103b1083b088aed3f3be145d01a770465c762 | /210. Course Schedule II.py | 68cd9b9b2fc5afeac0093f89da02788485c01257 | [] | no_license | supersj/LeetCode | 5605c9bcb5ddcaa83625de2ad9e06c3485220019 | 690adf05774a1c500d6c9160223dab7bcc38ccc1 | refs/heads/master | 2021-01-17T17:23:39.585738 | 2017-02-27T15:08:42 | 2017-02-27T15:08:42 | 65,526,089 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 921 | py | class Solution(object):
def findOrder(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
innode = {}
graph = {}
haszero = 0
for i in range(numCourses):
graph[i] = []
innode[i] = 0
for ele in prerequisites:
graph[ele[0]].append(ele[1])
innode[ele[1]] += 1
while innode:
haszero = 0
for k,v in innode.items():
if v == 0:
haszero = 1
for ele in graph[k]:
innode[ele] -= 1
del innode[k]
del graph[k]
break
if haszero == 0:
return False
if graph:
return False
return True
| [
"ml@ml.ml"
] | ml@ml.ml |
1e90f2c55b41185c48d23dff1e6fb2d9fad2fd87 | 09cead98874a64d55b9e5c84b369d3523c890442 | /sj200917_python2m6/day02_200924/dict_11_methodlist.py | 5a508f734ba7992c2c1f3f0404da772b9b41c2af | [] | no_license | edu-athensoft/stem1401python_student | f12b404d749286036a090e941c0268381ce558f8 | baad017d4cef2994855b008a756758d7b5e119ec | refs/heads/master | 2021-08-29T15:01:45.875136 | 2021-08-24T23:03:51 | 2021-08-24T23:03:51 | 210,029,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | """
dictionary methods
"""
"""
dict.clear() - remove all items
dict.copy() - generate a dictionary
dict.fromkeys() - generate a dictionary
dict.get(key) - get value by a key , v.s. dictname[key]
dict.items() - get items (key-value pairs)
dict.keys() - get keys
dict.values() - get values
dict.pop() - remove item by key
dict.popitem() - remove an item and return it
dict.update() - update dictionary
"""
mydict = {}
mydict = mydict.fromkeys([1,2,3,4])
print(mydict)
mydict = mydict.fromkeys([1,2,3,4],0)
print(mydict)
print(mydict.get(1))
print(mydict.get(5,'unknown'))
| [
"lada314@gmail.com"
] | lada314@gmail.com |
af08d772855a08671c2e692e3c7b06ad5fbcf4d6 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Scripts/Lazymux/routersploit/tests/exploits/routers/multi/test_misfortune_cookie.py | 8e606a9221a4791e9846362b46e42b3d6b09a037 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:6bcf1caec62792e74d178e8d2aa3078c7144e75b45a0fa53cc1c5451184f3f3e
size 695
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
d04fd4b38a21bf1a91a7f87ac2ddf9a8d4186477 | 9e3620265aee10c0772484403509fbace7259f40 | /mhw_armor_edit/ftypes/wp_dat_g.py | 44512ba612426c4b7a65e1e6cc23e28121a973e4 | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | nikibobi/MHWorldData | 5d104fa886087fe121b262497686ad81e6720751 | 78b5a4dc10ef532d5bad7359ef0b098f99104782 | refs/heads/master | 2020-12-14T23:41:45.224370 | 2020-01-20T08:40:16 | 2020-01-20T08:40:16 | 234,912,823 | 0 | 0 | MIT | 2020-01-19T14:25:05 | 2020-01-19T14:25:04 | null | UTF-8 | Python | false | false | 1,023 | py | # coding: utf-8
from mhw_armor_edit.ftypes import StructFile, Struct
class WpDatGEntry(Struct):
STRUCT_SIZE = 68
id: "<I"
unk1: "<H"
base_model_id: "<h"
part1_id: "<h"
part2_id: "<h"
color: "<B"
tree_id: "<B"
is_fixed_upgrade: "<B"
muzzle_type: "<B"
barrel_type: "<B"
magazine_type: "<B"
scope_type: "<B"
crafting_cost: "<I"
rarity: "<B"
raw_damage: "<H"
defense: "<H"
affinity: "<b"
element_id: "<B"
element_damage: "<H"
hidden_element_id: "<B"
hidden_element_damage: "<H"
elderseal: "<B"
shell_table_id: "<H"
deviation: "<B"
num_gem_slots: "<B"
gem_slot1_lvl: "<B"
gem_slot2_lvl: "<B"
gem_slot3_lvl: "<B"
unk2: "<I"
unk3: "<I"
unk4: "<I"
unk5: "<B"
special_ammo_type: "<B"
tree_position: "<B"
order: "<H"
gmd_name_index: "<H"
gmd_description_index: "<H"
skill_id: "<H"
unk6: "<H"
class WpDatG(StructFile):
EntryFactory = WpDatGEntry
MAGIC = 0x01B1
| [
"cfern1990@gmail.com"
] | cfern1990@gmail.com |
469c0b09e5b32a0e9eaee51ec4926608ce93ff46 | 7b85779e7cec84604315ffe3929e325b32ccd9b0 | /Python设计模式/singleton/with_decorator_and_param.py | 0c69281b9cafa823ebbb9e1e9c6800242f191deb | [] | no_license | clara123clara/test_auto | d27fa78b8d7f1f917402d4c3336ef003ceeaf4d5 | 26cf9ab7428a8895450e94bbae894aeb4462358f | refs/heads/master | 2023-07-09T19:56:05.248972 | 2021-08-18T02:44:14 | 2021-08-18T02:44:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | def singleton(cls):
"""
定义单例的装饰器(闭包)
:param cls:
:return:
"""
_instance = {}
def _singleton(*args, **kargs):
if cls not in _instance:
_instance[cls] = cls(*args, **kargs)
return _instance[cls]
return _singleton
@singleton
class Singleton(object):
"""单例实例"""
def __init__(self, arg1):
self.arg1 = arg1
if __name__ == '__main__':
instance1 = Singleton("xag")
instance2 = Singleton("xingag")
print(id(instance1))
print(id(instance2))
| [
"xinganguo@gmail.com"
] | xinganguo@gmail.com |
f69f660717b475fe52eb6697bf4346389ecffd0b | 759de7cc7c264b165cc481d277eaf46235470bc6 | /database/async_saver.py | a14ca2cc66d956e8aec3bb7fcfa0ad010ac9375b | [] | no_license | RuzzyRullezz/insta_project | cdc743bcad8c95bbbec589d202aaa4e4bcc44857 | 3c29e5a505abaec9b468817117da1f0222fb2b49 | refs/heads/main | 2021-06-27T01:01:02.532268 | 2020-10-28T09:45:25 | 2020-10-28T09:45:25 | 174,750,569 | 1 | 0 | null | 2020-06-05T20:05:13 | 2019-03-09T21:56:57 | Python | UTF-8 | Python | false | false | 721 | py | import json
from django.db import IntegrityError
from mq_consumer.consumers import Consumer
from utils.mq import get_connector
class DBSaver(Consumer):
def __init__(self):
from database import models
queue = 'db_save'
self.model_attr = 'model'
self.models = models
super().__init__(get_connector(queue), self.handle)
def handle(self, channel, method, properties, body):
data = json.loads(body)
model = data.pop(self.model_attr)
model_cls = getattr(self.models, model)
obj = model_cls(**data)
try:
obj.save()
except IntegrityError:
pass
channel.basic_ack(delivery_tag=method.delivery_tag)
| [
"rgilfanov@fix.ru"
] | rgilfanov@fix.ru |
4b1685e4d87b983cc9dd2aa6bb969e8ca01d7711 | a873f3cd46a10ad879fc56d78e1f533d8bf486c0 | /spider/阶段11-爬虫开发/代码以及其他/06.mongodb数据库/code/1.pymongo_test.py | ee716a5ce8e5881760249665d2d051612ead4a44 | [] | no_license | shenhaiyu0923/resful | d0301b39363e6b3d3659f62fa4a9b2532ebcd225 | 1e66cae7d68fa231794776953cc1a5e999bf36c6 | refs/heads/master | 2021-07-08T20:46:57.300298 | 2021-06-01T08:17:27 | 2021-06-01T08:17:27 | 244,308,016 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | #coding:utf-8
from pymongo import MongoClient
# 创建数据库链接对象
client = MongoClient('172.16.123.223', 27017)
# 选择一个数据库
db = client['admin']
db.authenticate('python','python')
# 选择一个集合
col = client['pydata']['test']
# col.insert({"class":"python37"})
# col.insert([{"class":"python38"},{"class":"python39"},{"class":"python40"}])
for data in col.find():
print(data)
# print(col.find_one())
print("*"*50)
# 全文档覆盖更新
# col.update({"class":"python40"},{"message":"helloworld"})
# col.update({},{"$set":{"id":"xxxx-xxxx"}})
# col.update({}, {"$set": {"id": "xxxx-xxxx"}}, multi=True)
# col.update({"message":"hello world"}, {"$set": {"id": "xxxx-xxx2"}}, upsert=True)
# col.delete_one({"message":"helloworld"})
col.delete_many({"id": "xxxx-xxxx"})
for data in col.find():
print(data)
| [
"1802161998@qq.com"
] | 1802161998@qq.com |
e7817dd212cdd36df40dd836c8d9c61472074615 | 7def8c4abacc5c596358467c90afdc8dbd677c02 | /SWEA/swea_4012_chef.py | 2de21e8079a7670e820becb96d956a34785627b2 | [] | no_license | liza0525/algorithm-study | 0b2e41a29e6f263c1906a90771f9c932008b84d2 | 906e817ba1d033b2e6cfad4b64bb9906d0fe03b7 | refs/heads/master | 2023-08-18T11:08:31.238163 | 2021-09-06T14:31:50 | 2021-09-06T14:31:50 | 208,087,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | def score(arr):
s = 0
for i in range(len(arr)-1):
for j in range(i+1, len(arr)):
s += table[arr[i]][arr[j]] + table[arr[j]][arr[i]]
return s
def combi(arr, d, next):
global group1,group2, res
if d == int(N/2):
group1 = arr[:]
group2 = list(set(food) - set(arr))
temp_res = abs(score(group1) - score(group2))
if temp_res < res:
res = temp_res
else:
for i in range(next, N):
temp = arr[:]
temp.append(i)
combi(temp, d+1, i+1)
temp.pop()
for test in range(int(input())):
N = int(input())
table = [list(map(int, input().split())) for _ in range(N)]
food = [i for i in range(N)]
group1, group2 = [], []
res = 987654321
combi([], 0, 0)
print('#{} {}'.format(test+1, res)) | [
"double.y.0525@gmail.com"
] | double.y.0525@gmail.com |
d824969c11f9acd62838ef00fe7b652c4b39d466 | f2658c4bd7f833ace25ac2b63e88317b05f4602d | /2017 July/2017-July-11/st_rdf_test/model2/RelationsAdmin.py | 789db15c499db32fbdf401d7303f8bbb4316d67f | [] | no_license | xiaochao00/telanav_diary | e4c34ac0a14b65e4930e32012cc2202ff4ed91e2 | 3c583695e2880322483f526c98217c04286af9b2 | refs/heads/master | 2022-01-06T19:42:55.504845 | 2019-05-17T03:11:46 | 2019-05-17T03:11:46 | 108,958,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,148 | py | #-------------------------------------------------------------------------------
# Name: RelationsAdmin model
# Purpose: this model is used to mapping the
# columns: [ ]
#
# Author: rex
#
# Created: 10/12/2015
# Copyright: (c) rex 2015
# Licence: <your licence>
#-------------------------------------------------------------------------------
from record import Record
from record import CSV_SEP
from constants import *
import os
import sys
import datetime
import json
ROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),"..")
GLOBAL_KEY_PREFIX = "relations_admin_"
#CSV_SEP = '`'
LF = '\n'
#(key, category, function)
STATISTIC_KEYS = (("type",False,"type"),
("admin_order",False,"admin_order"),
("iso", True, "iso"),
("admin_level", True, "admin_level"),
("admin_type", True, "admin_type"),
("timezone", False, "timezone"),
("dst_observed", True, "dst_observed"),
("dst_start_day", False, "dst_start_day"),
("dst_start_weekday", False, "dst_start_weekday"),
("dst_start_month", False, "dst_start_month"),
("dst_start_time", False, "dst_start_time"),
("dst_end_day", False, "dst_end_day"),
("dst_end_weekday", False, "dst_end_weekday"),
("dst_end_month", False, "dst_end_month"),
("dst_end_time", False, "dst_end_time"))
class RelationsAdmin(Record):
def __init__(self, region):
Record.__init__(self)
self.dump_file = os.path.join(ROOT_DIR, "temporary", self.__class__.__name__)
self.stat = {}
self.region = region
def dump2file(self):
cmd = "SELECT \
DISTINCT(rah.admin_place_id), \
rah.admin_order, \
rah.iso_country_code, \
rap.admin_type, \
rap.time_zone, \
rad.dst_observed, \
rad.dst_start_day, \
rad.dst_start_weekday, \
rad.dst_start_month, \
rad.dst_start_time, \
rad.dst_end_day, \
rad.dst_end_weekday, \
rad.dst_end_month, \
rad.dst_end_time \
FROM \
public.rdf_admin_place AS rap LEFT JOIN public.rdf_admin_hierarchy AS rah ON rap.admin_place_id=rah.admin_place_id \
LEFT JOIN public.rdf_admin_dst AS rad ON rad.dst_id = rap.dst_id \
WHERE rah.iso_country_code IN (%s)"%(REGION_COUNTRY_CODES(self.region, GLOBAL_KEY_PREFIX))
print cmd
self.cursor.copy_expert("COPY (%s) TO STDOUT DELIMITER '%s'"%(cmd, CSV_SEP),open(self.dump_file,"w"))
def get_statistic(self):
try:
self.dump2file()
except:
print "Some table or schema don't exist! Please check the upper sql"
return {}
processcount = 0
with open(self.dump_file, "r",1024*1024*1024) as csv_f:
for line in csv_f:
line = line.rstrip()
#line_p = line.split(CSV_SEP)
line_p = Record.split(line)
if len(line_p) < 1:
continue
self.__statistic(line_p)
processcount += 1
if processcount%5000 == 0:
print "\rProcess index [ "+str(processcount)+" ]",
print "\rProcess index [ "+str(processcount)+" ]",
# write to file
with open(os.path.join(ROOT_DIR, "output", "stat", self.__class__.__name__), 'w') as stf:
stf.write(json.dumps(self.stat))
return self.stat
def __statistic(self,line):
for keys in STATISTIC_KEYS:
try:
getattr(self,'_RelationsAdmin__get_'+keys[2])(keys,line)
except:
print "The statistic [ %s ] didn't exist"%(keys[2])
print ("Unexpected error:[ RelationsAdmin.py->__statistic] "+str(sys.exc_info()))
def __count(self,key):
if self.stat.has_key(key):
self.stat[key] += 1
else:
self.stat[key] = 1
# all statistic method
def __get_type(self,keys,line):
if '\N' != line[0]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
def __get_admin_order(self,keys,line):
if '\N' != line[1]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
def __get_iso(self,keys,line):
if '\N' != line[2]:
self.__count("%s%s%s"%(GLOBAL_KEY_PREFIX,keys[0],keys[1] and "#%s"%(line[2]) or ""))
def __get_admin_level(self,keys,line):
pass
def __get_admin_type(self,keys,line):
if '\N' != line[3]:
self.__count("%s%s%s"%(GLOBAL_KEY_PREFIX,keys[0],keys[1] and "#%s"%(line[3]) or ""))
def __get_timezone(self,keys,line):
if '\N' != line[4]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
def __get_dst_observed(self,keys,line):
if 'Y' == line[5]:
self.__count("%s%s%s"%(GLOBAL_KEY_PREFIX,keys[0],keys[1] and "#%s"%('yes') or ""))
def __get_dst_start_day(self,keys,line):
if '\N' != line[6]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
def __get_dst_start_weekday(self,keys,line):
if '\N' != line[7]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
def __get_dst_start_month(self,keys,line):
if '\N' != line[8]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
def __get_dst_start_time(self,keys,line):
if '\N' != line[9]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
def __get_dst_end_day(self,keys,line):
if '\N' != line[10]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
def __get_dst_end_weekday(self,keys,line):
if '\N' != line[11]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
def __get_dst_end_month(self,keys,line):
if '\N' != line[12]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
def __get_dst_end_time(self,keys,line):
if '\N' != line[13]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
if __name__ == "__main__":
# use to test this model
bg = datetime.datetime.now()
stat = RelationsAdmin('na').get_statistic()
keys = stat.keys()
print "==>"
print "{%s}"%(",".join(map(lambda px: "\"%s\":%s"%(px,stat[px]) ,keys)))
print "<=="
ed = datetime.datetime.now()
print "Cost time:"+str(ed - bg)
| [
"1363180272@qq.com"
] | 1363180272@qq.com |
f9b5d946001dbc2a7b0e167bab23eec454bf4d50 | 7ddb110792c8242acd2c1a8042caf62a586dd3f5 | /OnlineClustering/main_fast.py | 2b9d60f523f3721b1ffdafe8c65c7ce0bfe78205 | [] | no_license | rashadulrakib/short-text-stream-clustering | 43744dd5761ca102d576d90f487c1c5b63e75c6a | f7600a3501064000ddfd849653c7b36f5cc742f7 | refs/heads/master | 2021-07-03T09:05:29.161305 | 2020-12-17T01:23:31 | 2020-12-17T01:23:31 | 320,949,303 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,444 | py | import os
from datetime import datetime
from general_util import readlistWholeJsonDataSet
from evaluation import Evaluate_old
from read_pred_true_text import ReadPredTrueText
from clustering_term_online_fast import cluster_biterm
from word_vec_extractor import extractAllWordVecsPartialStemming
ignoreMinusOne=True
isSemantic=False
dataDir = "data/"
outputPath = "result/"
dataset='News-T' # 'stackoverflow_javascript' 'stackoverflow_java' 'stackoverflow_python' 'stackoverflow_csharp' 'stackoverflow_php' 'stackoverflow_android' 'stackoverflow_jquery' 'stackoverflow_r' 'stackoverflow_java' # 'stackoverflow_java' 'stackoverflow_cplus' 'stackoverflow_mysql' 'stackoverflow_large_tweets-T_news-T_suff' 'stackoverflow_large_tweets-T' #'News-T' 'NT-mstream-long1' #'Tweets-T' # 'stackoverflow_large' 'stackoverflow_large_tweets-T'
inputfile = dataDir+dataset
resultFile=outputPath+'personal_cluster_biterm.txt'
#list_pred_true_words_index_postid=readStackOverflowDataSet(inputfile)
list_pred_true_words_index=readlistWholeJsonDataSet(inputfile)
print(len(list_pred_true_words_index))
all_words=[]
for item in list_pred_true_words_index:
all_words.extend(item[2])
all_words=list(set(all_words))
gloveFile = "glove.6B.50d.txt"
embedDim=50
wordVectorsDic={}
if isSemantic==True:
wordVectorsDic=extractAllWordVecsPartialStemming(gloveFile, embedDim, all_words)
if os.path.exists(resultFile):
os.remove(resultFile)
c_bitermsFreqs={}
c_totalBiterms={}
c_wordsFreqs={}
c_totalWords={}
c_txtIds={}
c_clusterVecs={}
txtId_txt={}
last_txtId=0
max_c_id=0
dic_clus__id={}
dic_biterm__clusterId_Freq={}
dic_biterm__allClusterFreq={}
dic_biterm__clusterIds={}
f = open(resultFile, 'w')
t11=datetime.now()
c_bitermsFreqs, c_totalBiterms, c_wordsFreqs, c_totalWords, c_txtIds, c_clusterVecs, txtId_txt, last_txtId, dic_clus__id, dic_biterm__clusterId_Freq, dic_biterm__allClusterFreq, dic_biterm__clusterIds=cluster_biterm(f, list_pred_true_words_index, c_bitermsFreqs, c_totalBiterms, c_wordsFreqs, c_totalWords, c_txtIds, c_clusterVecs, txtId_txt, last_txtId, max_c_id, wordVectorsDic, dic_clus__id, dic_biterm__clusterId_Freq, dic_biterm__allClusterFreq, dic_biterm__clusterIds)
t12=datetime.now()
t_diff = t12-t11
print("total time diff secs=",t_diff.seconds)
f.close()
listtuple_pred_true_text=ReadPredTrueText(resultFile, ignoreMinusOne)
print('result for', inputfile)
Evaluate_old(listtuple_pred_true_text) | [
"rashadul.rakib@gmail.com"
] | rashadul.rakib@gmail.com |
7b462f75e7c76807b69e821d3c3f4330b2d1ff28 | be30e4f6bdd9e9e9ec1fc56d6c083fc4ebcf0b23 | /urls/client.py | 6991212984d87225f3864f4a2a7ef17a9ef94cd2 | [] | no_license | Cronopioss/braavos | 2ecc404ce4efdc29434ca9c1ebbe385e86da4f68 | 32ebd07177c06a5e6bec4a69cd1abde2a5faf64b | refs/heads/master | 2021-06-07T12:17:19.647469 | 2016-07-28T07:07:12 | 2016-07-28T07:07:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | from controllers.client import client_bp
def client_register_blueprint(app):
app.register_blueprint(client_bp, url_prefix='/clients') | [
"guoyu@inad.com"
] | guoyu@inad.com |
a8b3047f947b583472067a2401b98ff1708113fb | 2b5dfacdb7389aefff64c67fac863e3f82d3723e | /source/tygame-sdk/src/tysdk/entity/pay4/payment/payv4_wandoujiadanji.py | dc33aaaa616e37b8b338639cfd8b5dbdbf2c1dad | [] | no_license | hi-noikiy/hall0 | 54ef76c715f7ac7fec4c9ca175817e12f60fbd6a | 21ea94c5b048bc611fb1557ac0b6e3ef4fdbbc09 | refs/heads/master | 2020-04-08T21:58:55.239106 | 2018-01-15T14:58:32 | 2018-01-15T14:58:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,657 | py | # -*- coding=utf-8 -*-
import json
from payv4_helper import PayHelperV4
from tyframework.context import TyContext
from tysdk.entity.pay.rsacrypto import rsaVerify
from tysdk.entity.pay4.charge_model import ChargeModel
from tysdk.entity.pay4.decorator.payv4_callback import payv4_callback
from tysdk.entity.pay4.decorator.payv4_order import payv4_order
from tysdk.entity.pay4.payment.payv4_base import PayBaseV4
class TuYouPayWandoujiadanji(PayBaseV4):
@payv4_order("wandoujiadanji")
def charge_data(cls, mi):
charge_info = cls.get_charge_info(mi)
return cls.return_mo(0, chargeInfo=charge_info)
@payv4_callback("/open/ve/pay/wandoujiadanji/callback")
def doCallback(cls, rpath):
rparams = TyContext.RunHttp.convertArgsToDict()
content = rparams['content']
content_json = json.loads(content)
orderId = content_json['out_trade_no']
if not cls.verify_sign(rparams):
TyContext.ftlog.info('TuYouPayWandoujiadanji->sign verify ERROR')
return "fail"
total_fee = int(float(content_json['money']))
total_fee = int(total_fee / 100)
ChargeModel.save_third_pay_order_id(orderId, content_json.get('orderId', ''))
is_ok = PayHelperV4.callback_ok(orderId, total_fee, rparams)
if is_ok:
return 'success'
else:
return 'fail'
@classmethod
def verify_sign(cls, rparams):
sign = rparams['sign']
data = rparams['content']
# wandoujiadanji跟wandoujia使用的是同一个公钥
if rsaVerify(data, sign, 'wandoujia'):
return True
return False
| [
"cg@ibenxi.com"
] | cg@ibenxi.com |
63154825c27e52db1c81a916e178d71201a7bb5a | b59f66a9c4b5492b95c767b7ca76cd026f6f572a | /aac/transforms/pad.py | df4df193638df824329e201a743c55d1d9400c40 | [] | no_license | Labbeti/dcase2021task6 | b50f51370af15c241bd9f257920e2df4bc925669 | 2e792749bd9b2a495fa4b870f6190f6fb389fc56 | refs/heads/main | 2023-06-11T07:10:50.179348 | 2021-07-05T09:28:11 | 2021-07-05T09:28:11 | 377,414,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,562 | py |
import random
import torch
from torch import Tensor
from torch.nn import Module
from torch.nn.functional import pad
class Pad(Module):
def __init__(
self,
target_length: int,
align: str = 'left',
fill_value: float = 0.0,
dim: int = -1,
mode: str = 'constant',
p: float = 1.0,
):
"""
Example :
>>> import torch; from torch import tensor
>>> x = torch.ones(6)
>>> zero_pad = Pad(10, align='left')
>>> x_pad = zero_pad(x)
... tensor([1, 1, 1, 1, 1, 1, 0, 0, 0, 0])
:param target_length: The target length of the dimension.
:param align: The alignment type. Can be 'left', 'right', 'center' or 'random'. (default: 'left')
:param fill_value: The fill value used for constant padding. (default: 0.0)
:param dim: The dimension to pad. (default: -1)
:param mode: The padding mode. Can be 'constant', 'reflect', 'replicate' or 'circular'. (default: 'constant')
:param p: The probability to apply the transform. (default: 1.0)
"""
super().__init__()
self.target_length = target_length
self.align = align
self.fill_value = fill_value
self.dim = dim
self.mode = mode
self.p = p
def forward(self, data: Tensor) -> Tensor:
if self.p >= 1.0 or self.p <= random.random():
return self.process(data)
else:
return data
def process(self, data: Tensor) -> Tensor:
if self.align == 'left':
return self.pad_align_left(data)
elif self.align == 'right':
return self.pad_align_right(data)
elif self.align == 'center':
return self.pad_align_center(data)
elif self.align == 'random':
return self.pad_align_random(data)
else:
raise ValueError(f'Unknown alignment "{self.align}". Must be one of {str(["left", "right", "center", "random"])}.')
def pad_align_left(self, x: Tensor) -> Tensor:
# Note: pad_seq : [pad_left_dim_-1, pad_right_dim_-1, pad_left_dim_-2, pad_right_dim_-2, ...)
idx = len(x.shape) - (self.dim % len(x.shape)) - 1
pad_seq = [0 for _ in range(len(x.shape) * 2)]
missing = max(self.target_length - x.shape[self.dim], 0)
pad_seq[idx * 2 + 1] = missing
x = pad(x, pad_seq, mode=self.mode, value=self.fill_value)
return x
def pad_align_right(self, x: Tensor) -> Tensor:
idx = len(x.shape) - (self.dim % len(x.shape)) - 1
pad_seq = [0 for _ in range(len(x.shape) * 2)]
missing = max(self.target_length - x.shape[self.dim], 0)
pad_seq[idx * 2] = missing
x = pad(x, pad_seq, mode=self.mode, value=self.fill_value)
return x
def pad_align_center(self, x: Tensor) -> Tensor:
idx = len(x.shape) - (self.dim % len(x.shape)) - 1
pad_seq = [0 for _ in range(len(x.shape) * 2)]
missing = max(self.target_length - x.shape[self.dim], 0)
missing_left = missing // 2 + missing % 2
missing_right = missing // 2
pad_seq[idx * 2] = missing_left
pad_seq[idx * 2 + 1] = missing_right
x = pad(x, pad_seq, mode=self.mode, value=self.fill_value)
return x
def pad_align_random(self, x: Tensor) -> Tensor:
idx = len(x.shape) - (self.dim % len(x.shape)) - 1
pad_seq = [0 for _ in range(len(x.shape) * 2)]
missing = max(self.target_length - x.shape[self.dim], 0)
missing_left = torch.randint(low=0, high=missing + 1, size=()).item()
missing_right = missing - missing_left
pad_seq[idx * 2] = missing_left
pad_seq[idx * 2 + 1] = missing_right
x = pad(x, pad_seq, mode=self.mode, value=self.fill_value)
return x
def extra_repr(self) -> str:
return (
f'target_length={self.target_length}, '
f'align={self.align}, '
f'fill_value={self.fill_value}, '
f'dim={self.dim}, '
f'mode={self.mode}'
)
| [
"etienne.labbe31@gmail.com"
] | etienne.labbe31@gmail.com |
bb568e0c498b71992d25b88b505eca73155e3abd | c0d28a5a52748d78563372a8cffa53cacab1847a | /django项目/MovieProject/MovieProject/wsgi.py | 111e9f0f5ef27d51c33b447d4ded1ca266301eaf | [] | no_license | fanfanstl/projects | 5f1cf83bdb1c21855c3c1b3b4904f99a08dd2808 | 0c12892e691971a55239a1c5317df77220402d5e | refs/heads/master | 2021-08-11T04:21:50.379625 | 2018-12-27T15:39:19 | 2018-12-27T15:39:19 | 148,894,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | """
WSGI config for MovieProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "MovieProject.settings")
application = get_wsgi_application()
| [
"2094531487@qq.com"
] | 2094531487@qq.com |
817c70df624353a0834faa58ab16bbffc92b6df0 | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /little_thing/thing/problem/next_place/big_woman_and_large_number/big_person.py | cfd590d9cafb67969b1bdf84b3de88bdc33d89f5 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py |
#! /usr/bin/env python
def know_big_work_after_way(str_arg):
have_great_world_in_way(str_arg)
print('important_group')
def have_great_world_in_way(str_arg):
print(str_arg)
if __name__ == '__main__':
know_big_work_after_way('last_work')
| [
"jingkaitang@gmail.com"
] | jingkaitang@gmail.com |
73bec882ceac5d897c1bcee5afab1ea363ad254c | c31ee23b3d0c219deba0b0f462f172858df8b5ac | /chineblog/chineblog/wsgi.py | ebf3780fe13bfd1997d36b0c71cbfb80ba8c827f | [] | no_license | qinxuye/chineblog | b0afca7302c121021d0af821fa1143c42686bfa2 | 8c7df04d8cd4e3f120ef78546c3a18000909d2aa | refs/heads/master | 2021-06-04T04:48:51.404366 | 2016-10-07T14:49:02 | 2016-10-07T14:49:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | """
WSGI config for chineblog project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "chineblog.settings")
application = get_wsgi_application()
| [
"xuye.qin@alibaba-inc.com"
] | xuye.qin@alibaba-inc.com |
af4ee720495d2af2515908848f9e97725a22992d | 93fc75b62e3fb6524f3891daf58772175fee781c | /夏丽平/第一次作业/第一次作业-金工17-1 -2017310413--夏丽平/夏丽平 金工17-1 2017310413/zy4.py | fe4cb5c18967dbeb5f85aedf3d715091ae3ff401 | [] | no_license | jingong171/jingong-homework | 13174a4a7b39b8ae6d5da103cbf0fb40766d59c1 | 542e8781f26676a62538714b92fb0bccdf41b47b | refs/heads/master | 2020-03-29T13:38:34.152280 | 2018-12-17T14:38:08 | 2018-12-17T14:38:08 | 149,974,131 | 8 | 11 | null | 2018-10-08T14:40:58 | 2018-09-23T10:32:35 | Python | UTF-8 | Python | false | false | 153 | py | list=[]
j=2
for i in range(2,100):
for j in range(2,i):
if i%j==0:
break;
else:
list.append(i)
print(list)
| [
"35986375+FrancisLau098@users.noreply.github.com"
] | 35986375+FrancisLau098@users.noreply.github.com |
826d074f96c4b666cf4a019492d4a6be84d6a780 | 5dd7eccc1314861babdb19b840c117da46b70c3f | /dispersing/kaitai_parsers/summoning_colors.py | fbd3a95c7c262a5e8c9c7fde02a36416f3c3c29e | [
"BSD-3-Clause"
] | permissive | matthewturk/dispersing | fba98e06e3b3a97ce819f09d485310268bbfc38b | e368e21bb7b42035b1b28f38727f4e0f880fec0b | refs/heads/main | 2023-08-05T00:22:06.065355 | 2023-07-02T20:14:13 | 2023-07-02T20:14:13 | 226,524,950 | 1 | 1 | NOASSERTION | 2023-08-02T02:05:13 | 2019-12-07T14:14:56 | Python | UTF-8 | Python | false | false | 3,272 | py | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
import collections
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class SummoningColors(KaitaiStruct):
SEQ_FIELDS = ["ncolors", "palettes"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['ncolors']['start'] = self._io.pos()
self.ncolors = self._io.read_u1()
self._debug['ncolors']['end'] = self._io.pos()
self._debug['palettes']['start'] = self._io.pos()
self.palettes = [None] * ((self._root._io.size() - 1) // (self.ncolors * 3))
for i in range((self._root._io.size() - 1) // (self.ncolors * 3)):
if not 'arr' in self._debug['palettes']:
self._debug['palettes']['arr'] = []
self._debug['palettes']['arr'].append({'start': self._io.pos()})
self.palettes[i] = SummoningColors.Palette(self._io, self, self._root)
self._debug['palettes']['arr'][i]['end'] = self._io.pos()
self._debug['palettes']['end'] = self._io.pos()
class Palette(KaitaiStruct):
SEQ_FIELDS = ["colors"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['colors']['start'] = self._io.pos()
self.colors = [None] * (self._root.ncolors)
for i in range(self._root.ncolors):
if not 'arr' in self._debug['colors']:
self._debug['colors']['arr'] = []
self._debug['colors']['arr'].append({'start': self._io.pos()})
self.colors[i] = SummoningColors.Rgb(self._io, self, self._root)
self._debug['colors']['arr'][i]['end'] = self._io.pos()
self._debug['colors']['end'] = self._io.pos()
class Rgb(KaitaiStruct):
SEQ_FIELDS = ["red", "green", "blue"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
self._read()
def _read(self):
self._debug['red']['start'] = self._io.pos()
self.red = self._io.read_u1()
self._debug['red']['end'] = self._io.pos()
self._debug['green']['start'] = self._io.pos()
self.green = self._io.read_u1()
self._debug['green']['end'] = self._io.pos()
self._debug['blue']['start'] = self._io.pos()
self.blue = self._io.read_u1()
self._debug['blue']['end'] = self._io.pos()
| [
"matthewturk@gmail.com"
] | matthewturk@gmail.com |
68060140507db7822a5adeded2f77f1d002209a9 | a8750439f200e4efc11715df797489f30e9828c6 | /CodeForces/stack_sorting.py | 3a9343d2622a99a3a07cf0a0a9e909d06efc4016 | [] | no_license | rajlath/rkl_codes | f657174305dc85c3fa07a6fff1c7c31cfe6e2f89 | d4bcee3df2f501349feed7a26ef9828573aff873 | refs/heads/master | 2023-02-21T10:16:35.800612 | 2021-01-27T11:43:34 | 2021-01-27T11:43:34 | 110,989,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,447 | py | '''
Let's suppose you have an array a, a stack s (initially empty) and an array b (also initially empty).
You may perform the following operations until both a and s are empty:
Take the first element of a, push it into s and remove it from a (if a is not empty);
Take the top element from s, append it to the end of array b and remove it from s (if s is not empty).
You can perform these operations in arbitrary order.
If there exists a way to perform the operations such that array b is sorted in non-descending order in the end,
then array a is called stack-sortable.
For example, [3, 1, 2] is stack-sortable, because b will be sorted if we perform the following operations:
Remove 3 from a and push it into s;
Remove 1 from a and push it into s;
Remove 1 from s and append it to the end of b;
Remove 2 from a and push it into s;
Remove 2 from s and append it to the end of b;
Remove 3 from s and append it to the end of b.
After all these operations b = [1, 2, 3], so [3, 1, 2] is stack-sortable. [2, 3, 1] is not stack-sortable.
You are given k first elements of some permutation p of size n (recall that a permutation of size n is an array of
size n where each integer from 1 to n occurs exactly once). You have to restore the remaining n - k elements of this permutation so it is stack-sortable. If there are multiple answers, choose the answer such that p is lexicographically maximal (an array q is lexicographically greater than an array p iff there exists some integer k such that for every i < k qi = pi, and qk > pk). You may not swap or change any of first k elements of the permutation.
Print the lexicographically maximal permutation p you can obtain.
If there exists no answer then output -1.
Input
The first line contains two integers n and k (2 ≤ n ≤ 200000, 1 ≤ k < n) — the size of a desired permutation,
and the number of elements you are given, respectively.
The second line contains k integers p1, p2, ..., pk (1 ≤ pi ≤ n) — the first k elements of p.
These integers are pairwise distinct.
Output
If it is possible to restore a stack-sortable permutation p of size n such that the first k elements of p are
equal to elements given in the input, print lexicographically maximal such permutation.
Otherwise print -1.
Examples
input
5 3
3 2 1
output
3 2 1 5 4
input
5 3
2 3 1
output
-1
input
5 1
3
output
3 2 1 5 4
input
5 2
3 4
output
-1
'''
| [
"raj.lath@gmail.com"
] | raj.lath@gmail.com |
aa74dd25d623bfc3edf85fed5ea24609da6a8f6a | 35e41b591609e17e6de4a27dfe27ac0233bd58c3 | /src/forms/unused_or_obsolete/open_meta_mac.py.bak.py.bak | c35a727cea1e4067f9f3a8b38f8ddd8cd6195fc8 | [] | no_license | doclumbri666/OpenMeta-analyst- | 9fe1449f08c99b9f703fc34c02f29522cdb8a6ad | 7ed715b5fe30ffe28d553685808c6ac988975a2b | refs/heads/master | 2020-12-25T21:33:51.153178 | 2014-01-08T20:33:37 | 2014-01-08T20:33:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,039 | bak | #------------------------------------------------------------------------------
# open_meta_mac.py
#
# this is an initscript used by cx_freeze for our mac
# distribution. we set the R_HOME variable via the sys
# library before starting up meta_form. to use this script
# just point cx_freeze to it via the initscript argument,
# eg.,
# $ build/scripts-2.7/cxfreeze /Users/byronwallace/dev/OpenMeta-analyst-/meta_form.py \
# --init-script=/Users/byronwallace/dev/OpenMeta-analyst-/open_meta_mac.py
#
# note that we also set the path to
#------------------------------------------------------------------------------
import os
import sys
import zipimport
import pdb
print "\n\nR.I.P. Steve Jobs. \n\nI'm setting your R path temporarily (this console only).\n\ns"
# issue #160 - setting path to dynamic libraries
paths = os.environ.get("DYLD_LIBRARY_PATH", "").split(os.pathsep)
if DIR_NAME not in paths:
paths.insert(0, DIR_NAME)
os.environ["DYLD_LIBRARY_PATH"] = os.pathsep.join(paths)
os.execv(sys.executable, sys.argv)
else:
paths = os.environ.get("LD_LIBRARY_PATH", "").split(os.pathsep)
if DIR_NAME not in paths:
paths.insert(0, DIR_NAME)
os.environ["LD_LIBRARY_PATH"] = os.pathsep.join(paths)
os.execv(sys.executable, sys.argv)
#os.environ["DYLD_LIBRARY_PATH"] = DIR_NAME
#os.execv(sys.executable, sys.argv)
print "dynamic library path set...I think?"
#pdb.set_trace()
os.environ["R_HOME"] = os.path.join(DIR_NAME, "R_dist", "2.10", "Resources")
sys.frozen = True
sys.path = sys.path[:4]
# *now* we can import meta_form... cross your fingers.
#import meta_form
#meta_form.start()
print "\n\nok...?\n\n"
m = __import__("__main__")
importer = zipimport.zipimporter(INITSCRIPT_ZIP_FILE_NAME)
code = importer.get_code(m.__name__)
exec code in m.__dict__
versionInfo = sys.version_info[:3]
if versionInfo >= (2, 5, 0) and versionInfo <= (2, 6, 4):
module = sys.modules.get("threading")
if module is not None:
module._shutdown()
| [
"byron.wallace@gmail.com"
] | byron.wallace@gmail.com |
0019c9d8911a07b945a54c9471c43fe8d4cc2941 | 16136f6f9578358ad6ff00101831978d20a43926 | /bhch13/bhch13exrc15.py | 7dc7b1bef78419ceceac1a59a36181c5d77bee30 | [] | no_license | Yaachaka/pyPractice1 | 567c0f8e62cb4f6bff66f1f50672a2ffbc57eeee | fcd4deda3d1094c91ef228b36dfb6124cfa86a8b | refs/heads/main | 2023-06-15T17:14:59.697340 | 2021-07-07T05:01:20 | 2021-07-07T05:01:20 | 331,349,117 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | """
bhch13exrc15.py: Write a function called root that is given a number x and an integer n and returns x^(1/n). In the function definition, set the default value of n to 2.
"""
print('*'*80)
def root(x, n=2):
return x**(1/n)
x = eval(input('Enter value of x: '))
n = eval(input('Enter value of n: '))
print('The solution upon passing only x: {:}.'.format(root(x)))
print('The solution upon passing both x and n: {:}.'.format(root(x, n)))
print('*'*80)
"""PROGRAM OUTPUT
********************************************************************************
Enter value of x: 5
Enter value of n: 3
The solution upon passing only x: 2.23606797749979.
The solution upon passing both x and n: 1.7099759466766968.
********************************************************************************
""" | [
"rosaarjuna@gmail.com"
] | rosaarjuna@gmail.com |
3ee509e0d421e69dd1400611c08ff74fb32c9166 | 0005e05b9d8b8ad0d3c3c0539b2ded9db6e9f1dd | /test/test_inline_response_200_22_result.py | fb5ddc58fea76b357717bc0b200a0de81c7004cb | [] | no_license | termicoder/codechef-client-lib | a3e3de2b300355c5daa5ed3fad03a9859af13d86 | 74d6b21787c75a987e3451751f5554e4cc6cf469 | refs/heads/master | 2020-03-27T17:58:45.298121 | 2018-09-30T18:03:14 | 2018-09-30T18:03:14 | 146,889,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | # coding: utf-8
"""
CodeChef API
CodeChef API to support different applications. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import codechef_client
from codechef_client.models.inline_response_200_22_result import InlineResponse20022Result # noqa: E501
from codechef_client.rest import ApiException
class TestInlineResponse20022Result(unittest.TestCase):
"""InlineResponse20022Result unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInlineResponse20022Result(self):
"""Test InlineResponse20022Result"""
# FIXME: construct object with mandatory attributes with example values
# model = codechef_client.models.inline_response_200_22_result.InlineResponse20022Result() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"diveshuttamchandani@gmail.com"
] | diveshuttamchandani@gmail.com |
64589a084642ed05b9d147b11ed69dfa36eb6f9b | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /apigatewayv2_write_3/api-mapping_update.py | 96fa1132d9b4a58195e8e476f296d2f2e2c06bb6 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,315 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_three_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/apigatewayv2/update-api-mapping.html
if __name__ == '__main__':
"""
create-api-mapping : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/apigatewayv2/create-api-mapping.html
delete-api-mapping : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/apigatewayv2/delete-api-mapping.html
get-api-mapping : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/apigatewayv2/get-api-mapping.html
get-api-mappings : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/apigatewayv2/get-api-mappings.html
"""
parameter_display_string = """
# api-id : The API identifier.
# api-mapping-id : The API mapping identifier.
# domain-name : The domain name.
"""
add_option_dict = {}
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_three_parameter("apigatewayv2", "update-api-mapping", "api-id", "api-mapping-id", "domain-name", add_option_dict)
| [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
be7b2fc4819c966c4e8aeae9a2b7673d0262e9a6 | 06cf972369c30da9d98b296bcbc26a826aa98126 | /aloisioimoveis/locations/tests/views/test_view_cities.py | 38198318512150fe06089a2c2fac463e84bfffb1 | [] | no_license | thiagorossener/aloisioimoveis | 2597422af6ac058ed3b8aa6e58f0f8913488a7fe | f9d974440f9a8cc875da8a1d4a5c885429563c1b | refs/heads/master | 2021-06-16T23:02:11.193518 | 2021-02-01T14:17:10 | 2021-02-01T14:17:10 | 94,144,023 | 18 | 17 | null | 2021-06-10T20:35:48 | 2017-06-12T21:55:18 | JavaScript | UTF-8 | Python | false | false | 787 | py | from django.test import TestCase
from model_mommy import mommy
from aloisioimoveis.locations.models import City
class CitiesViewTest(TestCase):
def test_get(self):
"""GET /api/locations/cities should return status 200"""
response = self.client.get('/api/locations/cities')
self.assertEqual(200, response.status_code)
def test_json(self):
"""GET /api/locations/cities should return json with all cities"""
mommy.make(City, name='Taubaté')
mommy.make(City, name='Tremembé')
response = self.client.get('/api/locations/cities')
self.assertJSONEqual(str(response.content, encoding='utf8'),
[{'id': 1, 'name': 'Taubaté'},
{'id': 2, 'name': 'Tremembé'}])
| [
"thiago.rossener@gmail.com"
] | thiago.rossener@gmail.com |
1058a4ceb4d0151caf6da4ca219bf6f9660dec47 | c2d3c6d5fe759f8b582ad9f3adba0c9889be7299 | /modules/demo/test_module/module.py | d48f6a87b880f959c0163d38d4e4ae233ade2ae9 | [
"Apache-2.0"
] | permissive | nepeplwu/HubModule | 592e272df32797730cec0afdbe8537359bae44cc | 590b6e617038cbdf3851de8c12cc43e44cfffe59 | refs/heads/master | 2021-01-07T00:46:50.620074 | 2020-10-19T06:41:30 | 2020-10-19T06:41:30 | 241,528,583 | 2 | 4 | Apache-2.0 | 2020-04-22T12:58:33 | 2020-02-19T04:00:21 | Python | UTF-8 | Python | false | false | 236 | py | from paddlehub.module.module import moduleinfo
@moduleinfo(
name='test_module',
version='1.0.0'
)
class TestModule:
def __init__(self):
print('This is a test module.')
def echo(self, text):
print(text)
| [
"wuzewu@baidu.com"
] | wuzewu@baidu.com |
ce299ac7109455025df8ba76b1decea26d789703 | 0f3146f6e44e43048dc030a6ad44def9201dbd29 | /src/basket/highscore/models.py | 4256326506b4ef5ccd57752527aa60814f4ec122 | [] | no_license | socek/basket | 30c7c4be753006a33b997c17cf6348a32b420cd6 | 30ba79a35f63fd1cf4a4cdaf4b3d21b063cfc1b6 | refs/heads/master | 2016-09-10T18:40:40.334233 | 2015-03-25T21:29:00 | 2015-03-25T21:29:24 | 30,159,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | from haplugin.sql import Base
from sqlalchemy import Column, Integer, ForeignKey
from sqlalchemy.orm import relationship
class HighScore(Base):
__tablename__ = 'highscores'
id = Column(Integer, primary_key=True)
index = Column(Integer, nullable=False)
team_id = Column(Integer, ForeignKey('teams.id'), nullable=False)
team = relationship("Team")
| [
"msocek@gmail.com"
] | msocek@gmail.com |
c39d266b53a4726e2f9ccbf27b058e403f6ec001 | df20743069e3c81128438ecc8a368b1853dc8137 | /overrides/scr/Spell1089 - Curse of Impending Blades Mass.py | e0bedd89acc27b2c6e79f91edcd36f5598f7abee | [
"MIT"
] | permissive | dolio/ToEE_Mods | 3f020d82e590a63a04047912d8d76fa2212957d7 | 53aa8086b89b25d7afb3104c5d8896c8a38c89b0 | refs/heads/main | 2023-04-09T06:17:47.064224 | 2021-04-29T09:41:58 | 2021-04-29T09:41:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,402 | py | from toee import *
def OnBeginSpellCast(spell):
print "Curse of Impending Blades Mass OnBeginSpellCast"
print "spell.target_list=", spell.target_list
print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level
#game.particles("sp-enchantment-conjure",spell.caster )
def OnSpellEffect(spell):
print "Curse of Impending Blades Mass OnSpellEffect"
targetsToRemove = []
spell.duration = 10 * spell.caster_level # 1 min/cl
for spellTarget in spell.target_list:
targetIsFriendly = spellTarget.obj.is_friendly(spell.caster)
if targetIsFriendly: # Curse only affects enemies
targetsToRemove.append(spellTarget.obj)
else:
spellTarget.obj.float_text_line("Curse of Impending Blades", tf_red)
game.create_history_freeform(spellTarget.obj.description + " is affected by ~Curse of Impending Blades~[TAG_SPELLS_CURSE_OF_IMPENDING_BLADES]\n\n")
spellTarget.obj.condition_add_with_args('sp-Curse of Impending Blades', spell.id, spell.duration)
spellTarget.partsys_id = game.particles('sp-Phantasmal Killer', spellTarget.obj)
spell.target_list.remove_list(targetsToRemove)
spell.spell_end(spell.id)
def OnBeginRound(spell):
print "Curse of Impending Blades Mass OnBeginRound"
def OnEndSpellCast(spell):
print "Curse of Impending Blades Mass OnEndSpellCast" | [
"herbstgeist@googlemail.com"
] | herbstgeist@googlemail.com |
e9b649f995d933213dd1ba667e0997810ba41994 | 57300751060249be6553e6e0155f39eda8e08fe0 | /2015/Day 22/box.py | 3ed90e1aea220af6711d5d9f98fcffef91f5f54c | [] | no_license | shekeru/advent-of-code | f01a2b653173a326deed6a0ffc4f5b9cdd4635b2 | 0ab4158b1c8ced9353a88f25223abe761dddc57e | refs/heads/main | 2022-12-10T03:16:10.367596 | 2022-12-02T07:09:15 | 2022-12-02T07:09:15 | 160,104,399 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,333 | py | import copy, queue, dataclasses
# Spell Class
class Effect:
def __repr__(s):
return f"{s.__class__.__name__}: {s.Turns} Turns"
def __init__(s, Cost, World, Turns = 0):
s.Boss, s.Turns = World.Boss, Turns
s.Effects = World.Effects
s.Player = World.Player
if Turns:
s.Effects[type(s)] = s
s.Player.Mana -= s.Cost
World.Spent += Cost
def StartTurn(s):
s.Turns -= 1
return s.Turns
def EndEffect(s):
del s.Effects[type(s)]
# Children
class Missile(Effect):
def __init__(s, World):
super().__init__(s.Cost, World)
s.Boss.HP -= 4
Cost = 53
class Drain(Effect):
def __init__(s, World):
super().__init__(s.Cost, World)
s.Player.HP += 2
s.Boss.HP -= 2
Cost = 73
class Shield(Effect):
def __init__(s, World):
super().__init__(s.Cost, World, 6)
s.Player.Armor += 7
def EndEffect(s):
super().EndEffect()
s.Player.Armor -= 7
Cost = 113
class Poison(Effect):
def __init__(s, World):
super().__init__(s.Cost, World, 6)
def StartTurn(s):
s.Boss.HP -= 3
return super().StartTurn()
Cost = 173
class Recharge(Effect):
def __init__(s, World):
super().__init__(s.Cost, World, 5)
def StartTurn(s):
s.Player.Mana += 101
return super().StartTurn()
Cost = 229
# Entities
@dataclasses.dataclass
class Player:
def __repr__(s):
return f"[Player] HP: {s.HP}, Mana: {s.Mana}, Armor: {s.Armor}"
HP: int; Mana: int; Armor: int = 0
@dataclasses.dataclass
class Boss:
def __repr__(s):
return f"[Boss] HP: {s.HP}, Damage: {s.Damage}"
HP: int; Damage: int
Spells = Effect.__subclasses__()
# Compact State
class World:
def __init__(s, Player, Boss):
s.Cast, s.Spent, s.Effects = True, 0, {}
s.Player, s.Boss = Player, Boss
def __repr__(s):
return "\n".join(map(repr, [s.Player, s.Boss, s.Effects]))
def __lt__(s, o):
return s.Boss.HP * s.Spent < o.Boss.HP * o.Spent
def CastOptions(s):
return filter(lambda x: x not in s.Effects
and x.Cost <= s.Player.Mana, Spells)
def ExecuteTurn(s, Delta = 0):
Copies = []
if s.Cast:
s.Player.HP -= Delta
if s.Player.HP <= 0:
return Copies
for Active in (*s.Effects.values(),):
if not Active.StartTurn():
Active.EndEffect()
if s.Boss.HP <= 0:
World.Least = s.Spent
return s.Spent
if s.Cast:
for Opt in s.CastOptions():
Opt(Alt := copy.deepcopy(s))
if Alt.Spent < World.Least:
Alt.Cast = not s.Cast
Copies.append(Alt)
else:
s.Player.HP -= max(1, s.Boss.Damage -
s.Player.Armor); s.Cast = not s.Cast
if s.Player.HP > 0:
Copies.append(s)
return Copies
# A* Like Search
def A_Search(Delta = 0):
World.Least, Q = 5000, queue.PriorityQueue()
Q.put(World(Player(50, 500), Boss(71, 10)))
while isinstance(Value := Q.get().ExecuteTurn
(Delta), list): [*map(Q.put, Value)]
return Value
# Run Problem
print("Silver:", A_Search())
print("Gold:", A_Search(1))
| [
"sheks@desu.systems"
] | sheks@desu.systems |
3a548fdfd613987b601ea37f5d41c018a5d6017f | ef187d259d33e97c7b9ed07dfbf065cec3e41f59 | /work/atcoder/abc/abc018/A/answers/128595_akio0803.py | f301ad920080964bf1a66d445f2a6ae22c9224d8 | [] | no_license | kjnh10/pcw | 847f7295ea3174490485ffe14ce4cdea0931c032 | 8f677701bce15517fb9362cc5b596644da62dca8 | refs/heads/master | 2020-03-18T09:54:23.442772 | 2018-07-19T00:26:09 | 2018-07-19T00:26:09 | 134,586,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py |
T = [int(input()) for i in range(3)]
T = sorted(list(enumerate(T)), key=lambda x: x[1])[::-1]
T = sorted(list(enumerate(T)), key=lambda x: x[1][0])
for t in T:
print(t[0] + 1)
| [
"kojinho10@gmail.com"
] | kojinho10@gmail.com |
797acc8e3f3c01cdac84072d59c826b177596681 | 7be4f595d555614a28f708c1ba7edda321f0cf30 | /practice/algorithms/sorting/counting_sort_2/counting_sort_2.py | 522e58a38e665fbc0c6ea29552a97a5a4d58c5ce | [] | no_license | orel1108/hackerrank | de31a2d31aaf8aeb58477d1f2738744bfe492555 | 55da1f3a94e8c28ed0f0dea3103e51774f0047de | refs/heads/master | 2021-04-09T17:38:25.112356 | 2017-01-22T11:21:19 | 2017-01-22T11:21:19 | 50,198,159 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | #!/usr/bin/env python
from collections import Counter
n = int(raw_input().strip())
a = map(int, raw_input().strip().split())
counter = Counter(a)
for VAL in range(100):
if VAL in counter.keys():
while counter[VAL] > 0:
print VAL,
counter[VAL] -= 1
| [
"r.orlovskyi@gmail.com"
] | r.orlovskyi@gmail.com |
8f300c37d9eebc1c1ced3d8d1e0035bcd19b974e | 7b221a4981edad73991cf1e357274b46c4054eff | /stacks/XIAOMATECH/1.0/services/HBASE/package/scripts/setup_ranger_hbase.py | 962460a669ff3b614cd7546af73086fab901a1a3 | [
"Apache-2.0"
] | permissive | aries-demos/dataops | a4e1516ef6205ad1ac5f692822e577e22ee85c70 | 436c6e89a1fdd0593a17815d3ec79c89a26d48f1 | refs/heads/master | 2020-05-29T17:20:12.854005 | 2019-05-22T06:06:00 | 2019-05-22T06:06:00 | 189,270,801 | 2 | 3 | Apache-2.0 | 2019-05-29T17:35:25 | 2019-05-29T17:35:24 | null | UTF-8 | Python | false | false | 5,217 | py | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.core.logger import Logger
import sys, os
script_path = os.path.realpath(__file__).split(
'/services')[0] + '/../../../stack-hooks/before-INSTALL/scripts/ranger'
sys.path.append(script_path)
from setup_ranger_plugin_xml import setup_ranger_plugin
def setup_ranger_hbase(upgrade_type=None, service_name="hbase-master"):
import params
if params.enable_ranger_hbase:
if params.retryAble:
Logger.info(
"HBase: Setup ranger: command retry enables thus retrying if ranger admin is down !"
)
else:
Logger.info(
"HBase: Setup ranger: command retry not enabled thus skipping if ranger admin is down !"
)
if params.xa_audit_hdfs_is_enabled and service_name == 'hbase-master':
try:
params.HdfsResource(
"/ranger/audit",
type="directory",
action="create_on_execute",
owner=params.hdfs_user,
group=params.hdfs_user,
mode=0755,
recursive_chmod=True)
params.HdfsResource(
"/ranger/audit/hbaseMaster",
type="directory",
action="create_on_execute",
owner=params.hbase_user,
group=params.hbase_user,
mode=0700,
recursive_chmod=True)
params.HdfsResource(
"/ranger/audit/hbaseRegional",
type="directory",
action="create_on_execute",
owner=params.hbase_user,
group=params.hbase_user,
mode=0700,
recursive_chmod=True)
params.HdfsResource(None, action="execute")
except Exception, err:
Logger.exception(
"Audit directory creation in HDFS for HBASE Ranger plugin failed with error:\n{0}"
.format(err))
api_version = 'v2'
setup_ranger_plugin(
'hbase-client',
'hbase',
None,
None,
None,
None,
params.java64_home,
params.repo_name,
params.hbase_ranger_plugin_repo,
params.ranger_env,
params.ranger_plugin_properties,
params.policy_user,
params.policymgr_mgr_url,
params.enable_ranger_hbase,
conf_dict=params.hbase_conf_dir,
component_user=params.hbase_user,
component_group=params.user_group,
cache_service_list=['hbaseMaster', 'hbaseRegional'],
plugin_audit_properties=params.config['configurations']
['ranger-hbase-audit'],
plugin_audit_attributes=params.config['configurationAttributes']
['ranger-hbase-audit'],
plugin_security_properties=params.config['configurations']
['ranger-hbase-security'],
plugin_security_attributes=params.config['configurationAttributes']
['ranger-hbase-security'],
plugin_policymgr_ssl_properties=params.config['configurations']
['ranger-hbase-policymgr-ssl'],
plugin_policymgr_ssl_attributes=params.
config['configurationAttributes']['ranger-hbase-policymgr-ssl'],
component_list=[
'hbase-client', 'hbase-master', 'hbase-regionserver'
],
audit_db_is_enabled=False,
credential_file=params.credential_file,
xa_audit_db_password=None,
ssl_truststore_password=params.ssl_truststore_password,
ssl_keystore_password=params.ssl_keystore_password,
skip_if_rangeradmin_down=not params.retryAble,
api_version=api_version,
is_security_enabled=params.security_enabled,
is_stack_supports_ranger_kerberos=params.
stack_supports_ranger_kerberos
if params.security_enabled else None,
component_user_principal=params.ranger_hbase_principal
if params.security_enabled else None,
component_user_keytab=params.ranger_hbase_keytab
if params.security_enabled else None)
else:
Logger.info('Ranger HBase plugin is not enabled')
| [
"xianhuawei@MacBook-Air.local"
] | xianhuawei@MacBook-Air.local |
335f685ef600c1073b4914abf6ff91b8708d62a0 | 3958a948646610cbe76bed6e3a285ecc457c1958 | /akshare/article/ff_factor.py | 93c13038436cc94448c0606f495e946e0a4ffea2 | [
"MIT"
] | permissive | moon-chaser/akshare | 1745abda950c8259a24782364e73d0b376b576d1 | f243df40c54e102f0faf88e8149b57ae28ea0a76 | refs/heads/master | 2020-09-18T17:49:13.992266 | 2019-11-26T09:18:38 | 2019-11-26T09:18:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,993 | py | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: Albert King
date: 2019/11/14 20:31
contact: jindaxiang@163.com
desc: FF-data-library: http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/data_library.html
"""
import requests
import pandas as pd
from akshare.article.cons import ff_home_url
def ff_crr():
res = requests.get(ff_home_url)
# first table
list_index = (
pd.read_html(res.text, header=0, index_col=0)[4].iloc[2, :].index.tolist()
)
list_0 = [
item
for item in pd.read_html(res.text, header=0, index_col=0)[4]
.iloc[0, :][0]
.split(" ")
if item != ""
]
list_1 = [
item
for item in pd.read_html(res.text, header=0, index_col=0)[4]
.iloc[0, :][1]
.split(" ")
if item != ""
]
list_2 = [
item
for item in pd.read_html(res.text, header=0, index_col=0)[4]
.iloc[0, :][2]
.split(" ")
if item != ""
]
list_0.insert(0, "-")
list_1.insert(0, "-")
list_2.insert(0, "-")
temp_columns = (
pd.read_html(res.text, header=0)[4]
.iloc[:, 0]
.str.split(" ", expand=True)
.T[0]
.dropna()
.tolist()
)
table_one = pd.DataFrame(
[list_0, list_1, list_2], index=list_index, columns=temp_columns
).T
# second table
list_index = (
pd.read_html(res.text, header=0, index_col=0)[4].iloc[1, :].index.tolist()
)
list_0 = [
item
for item in pd.read_html(res.text, header=0, index_col=0)[4]
.iloc[1, :][0]
.split(" ")
if item != ""
]
list_1 = [
item
for item in pd.read_html(res.text, header=0, index_col=0)[4]
.iloc[1, :][1]
.split(" ")
if item != ""
]
list_2 = [
item
for item in pd.read_html(res.text, header=0, index_col=0)[4]
.iloc[1, :][2]
.split(" ")
if item != ""
]
list_0.insert(0, "-")
list_1.insert(0, "-")
list_2.insert(0, "-")
temp_columns = (
pd.read_html(res.text, header=0)[4]
.iloc[:, 0]
.str.split(" ", expand=True)
.T[1]
.dropna()
.tolist()
)
table_two = pd.DataFrame(
[list_0, list_1, list_2], index=list_index, columns=temp_columns
).T
# third table
df = pd.read_html(res.text, header=0, index_col=0)[4].iloc[2, :]
name_list = (
pd.read_html(res.text, header=0)[4]
.iloc[:, 0]
.str.split(r" ", expand=True)
.iloc[2, :]
.tolist()
)
value_list_0 = df[0].split(" ")
value_list_0.insert(0, "-")
value_list_0.insert(1, "-")
value_list_0.insert(8, "-")
value_list_0.insert(15, "-")
value_list_1 = df[1].split(" ")
value_list_1.insert(0, "-")
value_list_1.insert(1, "-")
value_list_1.insert(8, "-")
value_list_1.insert(15, "-")
value_list_2 = df[2].split(" ")
value_list_2.insert(0, "-")
value_list_2.insert(1, "-")
value_list_2.insert(8, "-")
value_list_2.insert(15, "-")
name_list.remove("Small Growth Big Value")
name_list.insert(5, "Small Growth")
name_list.insert(6, "Big Value")
temp_list = [item for item in name_list if "Portfolios" not in item]
temp_list.insert(0, "Fama/French Research Portfolios")
temp_list.insert(1, "Size and Book-to-Market Portfolios")
temp_list.insert(8, "Size and Operating Profitability Portfolios")
temp_list.insert(15, "Size and Investment Portfolios")
temp_df = pd.DataFrame([temp_list, value_list_0, value_list_1, value_list_2]).T
temp_df.index = temp_df.iloc[:, 0]
temp_df = temp_df.iloc[:, 1:]
# concat
all_df = pd.DataFrame()
all_df = all_df.append(table_one)
all_df = all_df.append(table_two)
temp_df.columns = table_two.columns
all_df = all_df.append(temp_df)
return all_df
if __name__ == "__main__":
df_data = ff_crr()
print(df_data)
| [
"jindaxiang@163.com"
] | jindaxiang@163.com |
5e67671c5eac3faa7543afa89657bf0b16fd6cd2 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_bugle.py | 827fd7e3c07f021180c489a04288a6051e4e65f4 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py |
#calss header
class _BUGLE():
def __init__(self,):
self.name = "BUGLE"
self.definitions = [u'a musical instrument like a simple trumpet, used especially in the army']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
9c4e70425e5c2faf4eb71757339669d9799d3ce7 | 2f92274606b4a8f91bf11e6383197f77d92fbd5e | /tests/utils/test_template_parser.py | 9cd347b626cdb9977453afc4b87b4a51634299b7 | [
"BSD-2-Clause"
] | permissive | tws0002/anima | 1db50532ab50dcc034db7300a3cd106b30bc8e00 | 73c256d1f7716a2db7933d6d8519a51333c7e5b4 | refs/heads/master | 2020-12-24T12:05:53.385210 | 2019-07-19T07:41:43 | 2019-07-19T07:41:43 | 73,074,603 | 0 | 0 | BSD-2-Clause | 2019-08-06T04:00:17 | 2016-11-07T12:06:41 | Python | UTF-8 | Python | false | false | 2,847 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2012-2017, Anima Istanbul
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
import pytest
@pytest.fixture('session')
def test_data():
"""reads test data
"""
# reads the test data as text
import os
here = os.path.dirname(__file__)
test_data_file_path = os.path.join(here, 'data', 'test_template.json')
import json
with open(test_data_file_path) as f:
test_data = json.load(f)
yield test_data
@pytest.fixture('session')
def create_db():
"""creates a test database
"""
import os
os.environ.pop('STALKER_PATH')
from stalker import db
db.setup({'sqlalchemy.url': 'sqlite://'})
db.init()
@pytest.fixture('session')
def create_project():
"""creates test data
"""
from stalker import Repository, Project
repo = Repository(
name='Test Repository',
windows_path='T:/',
linux_path='/mnt/T/',
osx_path='/Volumes/T/'
)
project = Project(
name='Test Project',
code='TP',
repository=repo
)
yield project
def test_database_is_correctly_created(create_db):
"""testing if the fixture is working properly
"""
from stalker.db.session import DBSession
assert str(DBSession.connection().engine.dialect.name) == 'sqlite'
def test_template_argument_accepts_only_a_json_as_text():
"""testing if a TypeError will be raised when the template argument is not
a string containing JSON data.
"""
from anima.utils.task_template_parser import TaskTemplateParser
with pytest.raises(TypeError):
TaskTemplateParser('not json data')
def test_template_argument_is_working_properly(test_data):
"""testing if the template argument value is parsed and passed to the
template attribute
"""
from anima.utils.task_template_parser import TaskTemplateParser
ttp = TaskTemplateParser(test_data)
assert ttp is not None
def test_creating_test_data(create_db, create_project):
"""testing if the test project is created correctcly
"""
project = create_project
from stalker import Project
assert isinstance(project, Project)
def test_creating_tasks_from_template(create_db, create_project):
"""testing if tasks are created out of Templates
"""
project = create_project
from anima.utils.task_template_parser import TaskTemplateParser
from anima import defaults
ttp = TaskTemplateParser(task_data=defaults.task_template)
asset = ttp.create(project, 'Asset', 'Character')
from stalker import Asset
assert isinstance(asset, Asset)
# def test_create_entity_type_is_not_a_string(prepare_db):
# """testing if a TypeError will be raised if the entity_type is not
# """
| [
"eoyilmaz@gmail.com"
] | eoyilmaz@gmail.com |
5d27d9b5a003bd3336600af6e1e5651cf34b8bf0 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipayBossContractManagementCreateResponse.py | 8903d731dfb6cc885f8cffdadbc4bfbd10096c7a | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,041 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.InterTradeStartContractApprovalResult import InterTradeStartContractApprovalResult
class AlipayBossContractManagementCreateResponse(AlipayResponse):
def __init__(self):
super(AlipayBossContractManagementCreateResponse, self).__init__()
self._result_set = None
@property
def result_set(self):
return self._result_set
@result_set.setter
def result_set(self, value):
if isinstance(value, InterTradeStartContractApprovalResult):
self._result_set = value
else:
self._result_set = InterTradeStartContractApprovalResult.from_alipay_dict(value)
def parse_response_content(self, response_content):
response = super(AlipayBossContractManagementCreateResponse, self).parse_response_content(response_content)
if 'result_set' in response:
self.result_set = response['result_set']
| [
"jiandong.jd@antfin.com"
] | jiandong.jd@antfin.com |
391ce8cb0cb0d48ea2565f84872784ab46e5bf5e | 0f8254a1d49aa55130fe9bfd4e0488b92c75aa3c | /cookie_auth/cookie_auth/data/album.py | 914ed6a616abad5fb03e53cd804e99cc56167c59 | [] | no_license | Durant21/cookie_auth | 5c41dee21bc0b18ee80bf25389b5c24475ff804a | e94244e430b6c87ed08108b2ba58c769daad647d | refs/heads/master | 2020-03-29T00:45:03.354865 | 2018-09-18T21:30:04 | 2018-09-18T21:30:04 | 149,354,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | import sqlalchemy
import sqlalchemy.orm
from sqlalchemy.ext.orderinglist import ordering_list
from cookie_auth.data.modelbase import SqlAlchemyBase
class Album(SqlAlchemyBase):
__tablename__ = 'Album'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True, autoincrement=True)
name = sqlalchemy.Column(sqlalchemy.String, index=True, unique=True, nullable=False)
url = sqlalchemy.Column(sqlalchemy.String, index=True, unique=True, nullable=False)
year = sqlalchemy.Column(sqlalchemy.Integer, index=True)
price = sqlalchemy.Column(sqlalchemy.Float, index=True)
album_image = sqlalchemy.Column(sqlalchemy.String)
has_preview = sqlalchemy.Column(sqlalchemy.Boolean, default=False)
is_published = sqlalchemy.Column(sqlalchemy.Boolean, default=False)
tracks = sqlalchemy.orm.relationship('Track', back_populates='album',
order_by='Track.display_order',
collection_class=ordering_list('display_order'),
cascade='all')
| [
"durant.crimson@icloud.com"
] | durant.crimson@icloud.com |
1ccdf89cc474766550a06c99cb71f19bf678915d | d121dbf198d835d1f040da8e8212948d469d16cb | /baekjoon/Python/2530.py | 4e6b2beef79bf32d6ea29ff41cf6bdfc28b41105 | [] | no_license | yeonjooyou/algorithm | ad66d2477aaed1656751d56db19a90ab1957df93 | 067f0ca746949328695f51f458cf5db9adfb91af | refs/heads/master | 2023-08-26T07:29:43.000966 | 2021-11-01T13:38:56 | 2021-11-01T13:38:56 | 391,618,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | # 인공지능 시계
A, B, C = map(int, input().split())
D = int(input())
C += D%60
if C >= 60 :
C -= 60
B += 1
D //= 60
B += D%60
if B >= 60 :
B -= 60
A += 1
D //= 60
A += D%24
if A >= 24 :
A -= 24
print(A, B, C)
# 틀린 코드
# print((A + (B+D//60)//60)%24, (B + (C+D)//60%60)%60, (C + D%60)%60) | [
"yeonjooyou@naver.com"
] | yeonjooyou@naver.com |
ce980daa1aba4aaf7f0bb4ee521d812d89d91503 | 08acec95bd1dc302633fadf7b47cd8ba3b749ff3 | /day-2018-05-10/myproject/venv/lib/python2.7/site-packages/zope/security/tests/test_permission.py | 82ddea20e391f459841634eaa3cdc6f517c06cb0 | [] | no_license | WeAreHus/StudyRecord | 74a312103ad2c037de23534160fa42d6a68ad174 | 047b7d9dcbee7c01ad2e8b888b160e66dfa9012d | refs/heads/master | 2022-12-16T14:47:15.984939 | 2019-04-29T15:16:15 | 2019-04-29T15:16:15 | 127,758,387 | 2 | 1 | null | 2022-11-22T02:50:30 | 2018-04-02T13:15:07 | Python | UTF-8 | Python | false | false | 7,808 | py | ##############################################################################
#
# Copyright (c) 2004 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test permissions
"""
import unittest
from zope.component.testing import PlacelessSetup
from zope.security.interfaces import PUBLIC_PERMISSION_NAME as zope_Public
class PermissionTests(unittest.TestCase):
def _getTargetClass(self):
from zope.security.permission import Permission
return Permission
def _makeOne(self, id, *args):
klass = self._getTargetClass()
return klass(id, *args)
def test_class_conforms_to_IPermission(self):
from zope.interface.verify import verifyClass
from zope.security.interfaces import IPermission
verifyClass(IPermission, self._getTargetClass())
def test_instance_conforms_to_IPermission(self):
from zope.interface.verify import verifyObject
from zope.security.interfaces import IPermission
from zope.schema import getValidationErrors
verifyObject(IPermission, self._makeOne('testing'))
self.assertEqual([],
getValidationErrors(IPermission,
self._makeOne('testing')))
def test_ctor_only_id(self):
permission = self._makeOne('testing')
self.assertEqual(permission.id, u'testing')
self.assertEqual(permission.title, u'')
self.assertEqual(permission.description, u'')
def test_ctor_w_title_and_description(self):
permission = self._makeOne('testing', u'TITLE', u'DESCRIPTION')
self.assertEqual(permission.id, 'testing')
self.assertEqual(permission.title, u'TITLE')
self.assertEqual(permission.description, u'DESCRIPTION')
class Test_checkPermission(PlacelessSetup, unittest.TestCase):
def _callFUT(self, context, permission_id):
from zope.security.permission import checkPermission
return checkPermission(context, permission_id)
def test_w_CheckerPublic(self):
from zope.security.checker import CheckerPublic
self._callFUT(None, CheckerPublic) # no raise
def test_miss(self):
self.assertRaises(ValueError, self._callFUT, None, 'nonesuch')
def test_hit(self):
from zope.component import provideUtility
from zope.security.interfaces import IPermission
permission = object()
provideUtility(permission, IPermission, 'testing')
self._callFUT(None, 'testing') # no raise
class Test_allPermissions(PlacelessSetup, unittest.TestCase):
def _callFUT(self):
from zope.security.permission import allPermissions
return allPermissions()
def test_empty(self):
self.assertEqual(list(self._callFUT()), [])
def test_w_registration(self):
self.assertEqual(list(self._callFUT()), [])
from zope.component import provideUtility
from zope.security.interfaces import IPermission
permission = object()
provideUtility(permission, IPermission, 'testing')
self.assertEqual(list(self._callFUT()), ['testing'])
def test_skips_zope_Public(self):
self.assertEqual(list(self._callFUT()), [])
from zope.component import provideUtility
from zope.security.checker import CheckerPublic
from zope.security.interfaces import IPermission
permission = object()
provideUtility(permission, IPermission, 'testing')
provideUtility(CheckerPublic, IPermission, zope_Public)
self.assertEqual(list(self._callFUT()), ['testing'])
class Test_PermissionsVocabulary(PlacelessSetup, unittest.TestCase):
def _callFUT(self):
from zope.security.permission import PermissionsVocabulary
return PermissionsVocabulary()
def test_empty(self):
from zope.schema.vocabulary import SimpleVocabulary
vocabulary = self._callFUT()
self.assertTrue(isinstance(vocabulary, SimpleVocabulary))
self.assertEqual(list(vocabulary), [])
def test_w_registration(self):
self.assertEqual(list(self._callFUT()), [])
from zope.component import provideUtility
from zope.security.interfaces import IPermission
permission = object()
provideUtility(permission, IPermission, 'testing')
vocabulary = self._callFUT()
self.assertEqual([x.token for x in vocabulary], ['testing'])
def test_includes_zope_Public(self):
self.assertEqual(list(self._callFUT()), [])
from zope.component import provideUtility
from zope.security.checker import CheckerPublic
from zope.security.interfaces import IPermission
permission = object()
provideUtility(permission, IPermission, 'testing')
provideUtility(CheckerPublic, IPermission, zope_Public)
vocabulary = self._callFUT()
self.assertEqual(sorted([x.token for x in vocabulary]),
['testing', zope_Public])
def test_zcml_valid(self):
from zope.configuration import xmlconfig
import zope.security
from zope.interface.verify import verifyObject
from zope.security.interfaces import IPermission
from zope.schema import getValidationErrors
xmlconfig.file('configure.zcml', zope.security)
vocabulary = self._callFUT()
vocabulary = sorted(vocabulary, key=lambda term: term.token)
self.assertEqual(6, len(vocabulary))
for term in vocabulary:
p = term.value
__traceback_info__ = term.token, p
verifyObject(IPermission, p)
self.assertEqual([], getValidationErrors(IPermission, p))
class Test_PermissionIdsVocabulary(PlacelessSetup, unittest.TestCase):
def _callFUT(self):
from zope.security.permission import PermissionIdsVocabulary
return PermissionIdsVocabulary()
def test_empty(self):
from zope.schema.vocabulary import SimpleVocabulary
vocabulary = self._callFUT()
self.assertTrue(isinstance(vocabulary, SimpleVocabulary))
self.assertEqual(list(vocabulary), [])
def test_w_registration(self):
self.assertEqual(list(self._callFUT()), [])
from zope.component import provideUtility
from zope.security.interfaces import IPermission
permission = object()
provideUtility(permission, IPermission, 'testing')
vocabulary = self._callFUT()
self.assertEqual([x.value for x in vocabulary], ['testing'])
self.assertEqual([x.token for x in vocabulary], ['testing'])
def test_includes_zope_Public(self):
self.assertEqual(list(self._callFUT()), [])
from zope.component import provideUtility
from zope.security.checker import CheckerPublic
from zope.security.interfaces import IPermission
permission = object()
provideUtility(permission, IPermission, 'testing')
provideUtility(CheckerPublic, IPermission, zope_Public)
vocabulary = self._callFUT()
self.assertEqual([x.value for x in vocabulary],
[CheckerPublic, 'testing'])
self.assertEqual([x.token for x in vocabulary],
[zope_Public, 'testing'])
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
| [
"1131360171@qq.com"
] | 1131360171@qq.com |
b5e2c73c9143273582301d3fa689f293b7767799 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/automation/azure-mgmt-automation/azure/mgmt/automation/aio/operations/_node_count_information_operations.py | 9c3773f1b89ba48b3ba0f6ab556eb8dcf40263f3 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 5,458 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Optional, TypeVar, Union
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._node_count_information_operations import build_get_request
from .._vendor import AutomationClientMixinABC
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NodeCountInformationOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.automation.aio.AutomationClient`'s
:attr:`node_count_information` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def get(
self,
resource_group_name: str,
automation_account_name: str,
count_type: Union[str, _models.CountType],
**kwargs: Any
) -> _models.NodeCounts:
"""Retrieve counts for Dsc Nodes.
:param resource_group_name: Name of an Azure Resource group. Required.
:type resource_group_name: str
:param automation_account_name: The name of the automation account. Required.
:type automation_account_name: str
:param count_type: The type of counts to retrieve. Known values are: "status" and
"nodeconfiguration". Required.
:type count_type: str or ~azure.mgmt.automation.models.CountType
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NodeCounts or the result of cls(response)
:rtype: ~azure.mgmt.automation.models.NodeCounts
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-01-13-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2020-01-13-preview")
)
cls: ClsType[_models.NodeCounts] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
automation_account_name=automation_account_name,
count_type=count_type,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("NodeCounts", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/nodecounts/{countType}"
}
| [
"noreply@github.com"
] | Azure.noreply@github.com |
607386616a143c398ff7b721265e63b4d30a4f6c | 54cabe33c24f17f1101b5e7444db44732665e078 | /api/common/api_client.py | 9d66cfbb061b50c3e7a8439b74061c831c36b8f2 | [] | no_license | DemocracyClub/aggregator-api | 6fc40381f880849df6f32a87e6bf1de18fadbe2e | eec276791133d84027195e8b4c12bb9133e34957 | refs/heads/master | 2023-08-09T22:23:49.867773 | 2023-07-31T15:57:15 | 2023-07-31T15:57:15 | 158,564,104 | 3 | 2 | null | 2023-07-31T15:57:17 | 2018-11-21T14:54:36 | HTML | UTF-8 | Python | false | false | 131 | py | from abc import ABC
class BaseAPIClient(ABC):
def __init__(self, base_url="") -> None:
self.base_url: str = base_url
| [
"sym.roe@talusdesign.co.uk"
] | sym.roe@talusdesign.co.uk |
bad52bc81cb15fb632fb67f4271b25dc83af451f | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/02111bc6c8800d8f644b52ad842cd738a17e192a-<create_host_port_group>-bug.py | 4b1a7042cbd94d7de38cf5f7162c638b57555a91 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,905 | py | def create_host_port_group(self, host_system, portgroup_name, vlan_id, vswitch_name, network_policy):
'\n Function to create/update portgroup on given host using portgroup specifications\n Args:\n host_system: Name of Host System\n portgroup_name: Name of Portgroup\n vlan_id: The VLAN ID for ports using this port group.\n vswitch_name: Name of vSwitch Name\n network_policy: Network policy object\n '
desired_pgs = self.get_port_group_by_name(host_system=host_system, portgroup_name=portgroup_name, vswitch_name=vswitch_name)
port_group = vim.host.PortGroup.Config()
port_group.spec = vim.host.PortGroup.Specification()
if (not desired_pgs):
port_group.spec.name = portgroup_name
port_group.spec.vlanId = vlan_id
port_group.spec.vswitchName = vswitch_name
port_group.spec.policy = network_policy
try:
host_system.configManager.networkSystem.AddPortGroup(portgrp=port_group.spec)
self.changed = True
except vim.fault.AlreadyExists as e:
self.module.fail_json(msg=('Failed to add Portgroup as it already exists: %s' % e.msg))
except vim.fault.NotFound as e:
self.module.fail_json(msg=('Failed to add Portgroup as vSwitch was not found: %s' % e.msg))
except vim.fault.HostConfigFault as e:
self.module.fail_json(msg=('Failed to add Portgroup due to host system configuration failure : %s' % e.msg))
except vmodl.fault.InvalidArgument as e:
self.module.fail_json(msg=('Failed to add Portgroup as VLAN id was not correct as per specifications: %s' % e.msg))
else:
if (desired_pgs[0].spec.vlanId != vlan_id):
port_group.spec.vlanId = vlan_id
self.changed = True
if self.check_network_policy_diff(desired_pgs[0].spec.policy, network_policy):
port_group.spec.policy = network_policy
self.changed = True
if self.changed:
try:
host_system.configManager.networkSystem.UpdatePortGroup(pgName=self.portgroup_name, portgrp=port_group.spec)
except vim.fault.AlreadyExists as e:
self.module.fail_json(msg=('Failed to update Portgroup as it conflicts with already existing Portgroup: %s' % e.msg))
except vim.fault.NotFound as e:
self.module.fail_json(msg=('Failed to update Portgroup as vSwitch was not found: %s' % e.msg))
except vim.fault.HostConfigFault as e:
self.module.fail_json(msg=('Failed to update Portgroup due to host system configuration failure : %s' % e.msg))
except vmodl.fault.InvalidArgument as e:
self.module.fail_json(msg=('Failed to update Portgroup as VLAN id was not correct as per specifications: %s' % e.msg))
self.changed = False | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
0920838d52e1fdc735af985efc9aa53ecc6c5c2d | b4dd760e79de0db39792b947bacfe2b27c2a89ee | /challenge106interm.py | 195d821f20a949f08778ad8a8b077a487a568c35 | [] | no_license | mooja/dailyprogrammer | c23f1a0c5d6e4269b6c03b47d8cc18f6d857a6e1 | d12fcb6744ac3b4a5e651f37ea0b3f20ca062f7d | refs/heads/master | 2021-01-16T23:47:28.955660 | 2018-04-09T18:03:50 | 2018-04-09T18:03:50 | 23,394,207 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,987 | py | #!/usr/bin/env python
# encoding: utf-8
# Daily Programmer Challenge 106 Intermediate
#
# http://www.reddit.com/r/dailyprogrammer/comments/11xjfd/10232012_challenge_106_intermediate_jugs/
#
# May.10.2015
from operator import attrgetter
from collections import namedtuple
from itertools import combinations, permutations
Jug = namedtuple('Jug', 'gallons, capacity')
JugsState = namedtuple('JugsState', 'jugs, actions')
def is_full(jug):
return jug.gallons >= jug.capacity
def is_empty(jug):
return jug.gallons == 0
def fill_jug(jug):
return Jug(jug.capacity, jug.capacity)
def empty_jug(jug):
return Jug(0, jug.capacity)
def transf_from_1_to_2(jug1, jug2):
avail_water = jug1.gallons
avail_capacity = jug2.capacity - jug2.gallons
total_transfer = min(avail_water, avail_capacity)
jug1 = Jug(jug1.gallons - total_transfer, jug1.capacity)
jug2 = Jug(jug2.gallons + total_transfer, jug2.capacity)
return jug1, jug2
def jugs_total_gallons(jugs):
return sum(jug.gallons for jug in jugs)
def sort_jugs_by_capacity(jugs):
return sorted(jugs, key=attrgetter('capacity'))
def gen_new_jug_states(jugs_state):
successor_states = []
# generate jug states by filling each jug that is not filled
for jug in jugs_state.jugs:
if not is_full(jug):
# copy state (other than the jug we're going to fill)
new_jugs_state = JugsState([j for j in jugs_state.jugs if j != jug],
[action for action in jugs_state.actions])
new_jugs_state.jugs.append(fill_jug(jug))
new_jugs_state.jugs.sort(key=attrgetter('capacity'))
new_jugs_state.actions.append("> filled ({}, {})".format(
jug.gallons, jug.capacity))
new_jugs_state.actions.append("current state: {}".format(new_jugs_state.jugs))
successor_states.append(new_jugs_state)
# generate jug states by empying each jug that is not empty
for jug in jugs_state.jugs:
if not is_empty(jug):
# copy state (other than the jug we're going to empty)
new_jugs_state = JugsState([j for j in jugs_state.jugs if j != jug],
[action for action in jugs_state.actions])
new_jugs_state.jugs.append(empty_jug(jug))
new_jugs_state.jugs.sort(key=attrgetter('capacity'))
new_jugs_state.actions.append("> emptied ({}, {})".format(
jug.gallons, jug.capacity))
new_jugs_state.actions.append("current state: {}".format(new_jugs_state.jugs))
successor_states.append(new_jugs_state)
# generate jug states by transferring contents each jug into another
for jug1, jug2 in permutations(jugs_state.jugs):
if is_empty(jug1) or is_full(jug2):
continue
new_jugs_state = JugsState([j for j in jugs_state.jugs if j != jug1 and j != jug2],
[action for action in jugs_state.actions])
new_jugs_state.jugs.append(transf_from_1_to_2(jug1, jug2)[0])
new_jugs_state.jugs.append(transf_from_1_to_2(jug1, jug2)[1])
new_jugs_state.jugs.sort(key=attrgetter('capacity'))
new_jugs_state.actions.append("> transfered {} to {}".format(
jug1, jug2))
new_jugs_state.actions.append("current state: {}".format(new_jugs_state.jugs))
successor_states.append(new_jugs_state)
return successor_states
def main():
def is_wanted_jug_state(jugs_state):
return jugs_total_gallons(jugs_state.jugs) == 4
initial_jugs_state = JugsState([Jug(0, 3), Jug(0, 5)], actions=['initial'])
jug_state_queue = [initial_jugs_state]
while jug_state_queue:
if is_wanted_jug_state(jug_state_queue[0]):
print '\n'.join(jug_state_queue[0].actions)
print "Wanted state reached!"
break
jug_state = jug_state_queue.pop(0)
jug_state_queue.extend(gen_new_jug_states(jug_state))
if __name__ == '__main__':
main()
# output:
# initial
# > filled (0, 3)
# current state: [Jug(gallons=3, capacity=3), Jug(gallons=0, capacity=5)]
# > transfered Jug(gallons=3, capacity=3) to Jug(gallons=0, capacity=5)
# current state: [Jug(gallons=0, capacity=3), Jug(gallons=3, capacity=5)]
# > filled (0, 3)
# current state: [Jug(gallons=3, capacity=3), Jug(gallons=3, capacity=5)]
# > transfered Jug(gallons=3, capacity=3) to Jug(gallons=3, capacity=5)
# current state: [Jug(gallons=1, capacity=3), Jug(gallons=5, capacity=5)]
# > emptied (5, 5)
# current state: [Jug(gallons=1, capacity=3), Jug(gallons=0, capacity=5)]
# > transfered Jug(gallons=1, capacity=3) to Jug(gallons=0, capacity=5)
# current state: [Jug(gallons=0, capacity=3), Jug(gallons=1, capacity=5)]
# > filled (0, 3)
# current state: [Jug(gallons=3, capacity=3), Jug(gallons=1, capacity=5)]
# Wanted state reached!
| [
"max.atreides@gmail.com"
] | max.atreides@gmail.com |
9279e380fad7f0a2d4a3dd2741fd94ceeb8bcd89 | a9db74855c63d83034bf4874cda908f77a6eb90b | /demo_project/demo_project/testrunner.py | 4d1a6bd11cce31264a557ec2848637378a57ac83 | [] | no_license | mikelopez/sciweb-django-messages | eadb4d1a117e637b3ac8c32f297249985a8dcace | 55ecc9c444d1e0c718f97da70c5c846c872cda7d | refs/heads/master | 2020-04-15T05:58:58.660546 | 2013-06-22T09:48:11 | 2013-06-22T09:48:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | __author__ = 'Marcos Lopez'
# http://github.com/mikelopez
from django.test.simple import DjangoTestSuiteRunner
import settings
class BaseAppsTestNoDb(DjangoTestSuiteRunner):
def setup_databases(self, **kwargs):
""" override the db stuff from DjangoTestSuiteRunner """
pass
def teardown_databases(self, old_config, **kwargs):
""" override db teardown from DjangoTestSuiteRunner """
pass
def build_suite(self, test_labels, *args, **kwargs):
return super(BaseAppsTestNoDb, self).build_suite(test_labels or \
[i for i in settings.INSTALLED_APPS if not "django" in i], *args, **kwargs)
| [
"dev@scidentify.info"
] | dev@scidentify.info |
4e18bd8857b03ec62724b0ac7499c7556affc40d | 6c512b7d2ae4b1ad713a57f74a4816e1291ba7a1 | /python_3/solutions/soln_kaggle_titanic.py | 44f9b1d34823679fec1149596540f66f21a80b61 | [
"MIT"
] | permissive | duttashi/applied-machine-learning | 451389e8f27931f32132a148e93effa7c6352536 | ff3267b97d9dd7122400754798e06fb493daa40a | refs/heads/master | 2021-12-17T19:12:39.531717 | 2021-12-04T09:36:46 | 2021-12-04T09:36:46 | 169,368,684 | 0 | 2 | MIT | 2021-12-04T09:36:47 | 2019-02-06T07:19:08 | R | UTF-8 | Python | false | false | 496 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 10 18:10:07 2020
@author: Ashish
"""
import pandas as pd
from pandas import Series, DataFrame
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
#% matplotlib inline
#print(os.getcwd())
titanic_df = pd.read_csv("../../data/kaggle_titanic_train.csv"
, low_memory=False)
print(titanic_df.head())
print(titanic_df.info())
sns.catplot(y="Sex", data = titanic_df, hue = 'Pclass')
| [
"ashish.dutt8@gmail.com"
] | ashish.dutt8@gmail.com |
4c03770d902f460fff4750192f2a760a9c9f977b | ef96d96f6b92c5beb0b6e3334c7db2ef590f3875 | /coordination/runtime.py | 0010794e255b8de9bdb02ee30207c3b7bc470c17 | [] | no_license | Evgenus/coordination | 84693f11285dc2d16f864f619fd9d704cdea1b75 | 17d0c4030ccaa672a901af7f83605a237283bd96 | refs/heads/master | 2021-01-25T08:28:08.998333 | 2011-08-04T15:17:46 | 2011-08-04T15:17:46 | 2,118,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,792 | py | #standart
from functools import partial
from weakref import proxy
from collections import deque, defaultdict
class FeatureChecker(object):
def __init__(self, name, func):
self.name = name
self.func = func
def __call__(self, subj):
try:
self.func(subj)
except Exception as error:
subj.forbid(self.name)
else:
subj.allow(self.name)
class FeaturesProvider(object):
'Holding information about available system features for aspects'
checkers = {}
def __init__(self):
self.features = {}
self.namespace = {}
for name, checker in self.checkers.iteritems():
checker(self)
def __contains__(self, feature):
return self.features.get(feature, False)
def allow(self, name):
self.features[name] = True
def forbid(self, name):
self.features[name] = False
def provide(self, **kwargs):
self.namespace.update(kwargs)
def __getattr__(self, name):
if name in self.namespace:
return self.namespace[name]
raise AttributeError(name)
@classmethod
def feature(cls, name):
return partial(FeatureChecker, name)
class FeaturesProviderMeta(type):
def __new__(meta, name, bases, internals):
checkers = {}
cls = type.__new__(meta, name, bases, internals)
checkers.update(cls.checkers)
for name, value in internals.iteritems():
if isinstance(value, FeatureChecker):
checkers[name] = value
cls.checkers = checkers
return cls
FeaturesProvider = FeaturesProviderMeta('FeaturesProvider',
FeaturesProvider.__bases__, dict(FeaturesProvider.__dict__))
class MessageQueue(object):
'Events queue for actors'
def __init__(self):
self.queue = deque()
def add(self, callable, *args, **kwargs):
item = partial(callable, *args, **kwargs)
self.queue.append(item)
def __call__(self):
if self.queue:
callable = self.queue.popleft()
callable()
return True
else:
return False
class MessageLoop(object):
'Base abstract class for event-loop'
timeout = 0.01
def set_callback(self, callback):
self.callback = callback
def run(self):
raise NotImplementedError()
def callback(self):
return False
class Action(object):
'Basic concurency primitive'
queue = None
def __init__(self, preprocess=None):
self.preprocess = preprocess
self.callbacks = []
self.source = None
self.name = None
def __lshift__(self, callback):
if callback not in self.callbacks:
self.callbacks.append(callback)
return self
def __rshift__(self, callback):
if callback in self.callbacks:
self.callbacks.append(callback)
return self
def clear(self):
self.callbacks = []
def __repr__(self):
return "<Action {0} of {1}>".format(self.name, self.source)
def __call__(self, *args, **kwargs):
if self.preprocess is not None:
result = self.preprocess(self.source, *args, **kwargs)
if result is not None:
args, kwargs = result
for callback in self.callbacks:
if self.queue is not None:
self.queue.add(callback, *args, **kwargs)
else:
callback(*args, **kwargs)
def clone(self):
new = self.__class__(self.preprocess)
new.name = self.name
return new
@classmethod
def wrap(cls, callable):
return cls(callable)
class Actor(object):
class __metaclass__(type):
def __new__(meta, name, bases, internals):
actions = internals['_actions'] = {}
for key, value in internals.iteritems():
if isinstance(value, Action):
actions[key] = value
cls = type.__new__(meta, name, bases, internals)
for key, action in actions.iteritems():
action.source = proxy(cls)
action.name = key
return cls
def __init__(self):
super(Actor, self).__init__()
for name, cls_action in self._actions.iteritems():
action = cls_action.clone()
cls_action << action
action.source = proxy(self)
setattr(self, name, action)
class Scope(object):
def __init__(self):
self.entities = defaultdict(list)
self.aspects = defaultdict(list)
def register_entity(self, entity):
self.entities[type(entity)].append(entity)
def register_aspect(self, entity, aspect):
self.aspects[type(entity), type(aspect)].append(aspect)
| [
"chernyshov.eugene@gmail.com"
] | chernyshov.eugene@gmail.com |
3e87f99793c05532a5476acb0d9b4699334dae17 | 49900ba50d4f6c979d6d433577828c8007973125 | /utils.py | 371a67f1d768d75ea6e4aa04eb4705a21502a4b3 | [] | no_license | weizhenzhao/cs224d_nlp_problem_set2 | 9661414965a58b97113f828a47932c5b9d8411df | 302f0e53cdd88147a5c1727d06f0be18270d8a2a | refs/heads/master | 2021-10-22T18:22:31.063591 | 2019-03-12T14:03:36 | 2019-03-12T14:03:36 | 104,356,708 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,768 | py | from collections import defaultdict
import numpy as np
class Vocab(object):
def __init__(self):
self.word_to_index = {}
self.index_to_word = {}
self.word_freq = defaultdict(int)
self.total_words = 0
self.unknown = '<unk>'
self.add_word(self.unknown, count=0)
def add_word(self, word, count=1):
if word not in self.word_to_index:
index = len(self.word_to_index)
self.word_to_index[word] = index
self.index_to_word[index] = word
self.word_freq[word] += count
def construct(self, words):
for word in words:
self.add_word(word)
self.total_words = float(sum(self.word_freq.values()))
print('{} total words with {} uniques'.format(self.total_words, len(self.word_freq)))
def encode(self, word):
if word not in self.word_to_index:
word = self.unknown
return self.word_to_index[word]
def decode(self, index):
return self.index_to_word[index]
def __len__(self):
return len(self.word_freq)
def calculate_perplexity(log_probs):
# https://web.stanford.edu/class/cs124/lec/languagemodeling.pdf
perp = 0
for p in log_probs:
perp += -p
return np.exp(perp / len(log_probs))
def get_ptb_dataset(dataset='train'):
fn = 'data/ptb/ptb.{}.txt'
for line in open(fn.format(dataset)):
for word in line.split():
yield word
# Add token to the end of the line
# Equivalent to <eos> in:
# https://github.com/wojzaremba/lstm/blob/master/data.lua#L32
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/rnn/ptb/reader.py#L31
yield '<eos>'
def ptb_iterator(raw_data, batch_size, num_steps):
# Pulled from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/rnn/ptb/reader.py#L82
raw_data = np.array(raw_data, dtype=np.int32)
data_len = len(raw_data)
batch_len = data_len // batch_size
data = np.zeros([batch_size, batch_len], dtype=np.int32)
for i in range(batch_size):
data[i] = raw_data[batch_len * i:batch_len * (i + 1)]
epoch_size = (batch_len - 1) // num_steps
if epoch_size == 0:
raise ValueError("epoch_size == 0, decrease batch_size or num_steps")
for i in range(epoch_size):
x = data[:, i * num_steps:(i + 1) * num_steps]
y = data[:, i * num_steps + 1:(i + 1) * num_steps + 1]
yield (x, y)
def sample(a, temperature=1.0):
# helper function to sample an index from a probability array
# from https://github.com/fchollet/keras/blob/master/examples/lstm_text_generation.py
a = np.log(a) / temperature
a = np.exp(a) / np.sum(np.exp(a))
return np.argmax(np.random.multinomial(1, a, 1))
def data_iterator(orig_X, orig_y=None, batch_size=32, label_size=2, shuffle=False):
# Optionally shuffle the data before training
if shuffle:
indices = np.random.permutation(len(orig_X))
data_X = orig_X[indices]
data_y = orig_y[indices] if np.any(orig_y) else None
else:
data_X = orig_X
data_y = orig_y
# ##
total_processed_examples = 0
total_steps = int(np.ceil(len(data_X) / float(batch_size)))
for step in range(total_steps):
# Create the batch by selecting up to batch_size elements
batch_start = step * batch_size
x = data_X[batch_start:batch_start + batch_size]
# Convert our target from the class index to a one hot vector
y = None
if np.any(data_y):
y_indices = data_y[batch_start:batch_start + batch_size]
y = np.zeros((len(x), label_size), dtype=np.int32)
y[np.arange(len(y_indices)), y_indices] = 1
# ##
yield x, y
total_processed_examples += len(x)
# Sanity check to make sure we iterated over all the dataset as intended
assert total_processed_examples == len(data_X), 'Expected {} and processed {}'.format(len(data_X), total_processed_examples)
| [
"958904120@qq.com"
] | 958904120@qq.com |
c15f225950aa88e2dc2917e52c329801e9be9352 | f8065e5d6f898e02f4fbe533f5b252fe82273bb8 | /master/childmanager.py | 4758fa62b1f48da1ef1b398af3d5f2ddb28e9a83 | [] | no_license | pizi06/firefly_study | f79c8e3596043beabc2b13783d46b18515617bfe | 4e85db73e4eda473180b302c16872a498d605aab | refs/heads/master | 2021-01-15T11:20:39.691679 | 2016-08-25T03:36:43 | 2016-08-25T03:36:43 | 65,810,416 | 0 | 0 | null | 2016-08-16T10:15:39 | 2016-08-16T10:15:39 | null | UTF-8 | Python | false | false | 3,221 | py | #coding:utf8
"""
Created on 2011-10-14
@author: lan (www.9miao.com)
"""
from twisted.python import log
from zope.interface import Interface
from zope.interface import implements
from child import RemoteChild
class _ChildsManager(Interface):
"""节点管理器接口"""
def __init__(self):
"""初始化接口"""
def getChildById(self,childId):
"""根据节点id获取节点实例"""
def getChildByName(self,childname):
"""根据节点的名称获取节点实例"""
def addChild(self,child):
"""添加一个child节点
@param child: Child object
"""
def dropChild(self,*arg,**kw):
"""删除一个节点"""
def callChild(self,*args,**kw):
"""调用子节点的接口"""
def callChildByName(self,*args,**kw):
"""调用子节点的接口
@param childname: str 子节点的名称
"""
def dropChildByID(self,childId):
"""删除一个child 节点
@param childId: Child ID
"""
class ChildsManager(object):
"""子节点管理器"""
implements(_ChildsManager)
def __init__(self):
"""初始化子节点管理器"""
self._childs = {}
def getChildById(self,childId):
"""根据节点的ID获取节点实例"""
return self._childs.get(childId)
def getChildByName(self,childname):
"""根据节点的名称获取节点实例"""
for key,child in self._childs.items():
if child.getName() == childname:
return self._childs[key]
return None
def addChild(self,child):
"""添加一个child节点
@param child: Child object
"""
key = child._name
if self._childs.has_key(key):
raise "child node %s exists"% key
self._childs[key] = child
def addChildByNamePeer(self,name,peer):
child = RemoteChild(name,peer)
self.addChild(child)
def dropChild(self,child):
"""删除一个child 节点
@param child: Child Object
"""
key = child._name
try:
del self._childs[key]
except Exception,e:
log.msg(str(e))
def dropChildByID(self,childId):
"""删除一个child 节点
@param childId: Child ID
"""
try:
del self._childs[childId]
except Exception,e:
log.msg(str(e))
def callChild(self,childId,*args,**kw):
"""调用子节点的接口
@param childId: int 子节点的id
"""
child = self._childs.get(childId,None)
if not child:
log.err("child %s doesn't exists"%childId)
return
return child.callbackChild(*args,**kw)
def callChildByName(self,childname,*args,**kw):
"""调用子节点的接口
@param childname: str 子节点的名称
"""
child = self.getChildByName(childname)
if not child:
log.err("child %s doesn't exists"%childname)
return
return child.callbackChild(*args,**kw)
| [
"chenee543216@gmail.com"
] | chenee543216@gmail.com |
7b20daa0763292ec49da5292a8b5fec8a9b0692c | 99b3a6bdf81ae69ed07c402098458635f20a75a7 | /one_student_without_allennlp/mean_teacher/modules/convert_mednli.py | 0616df65a45a0f8f77a3b1a50597d1d04f5377cd | [] | no_license | mithunpaul08/mean-teacher | e6298efac8f7d67671bc6eca19f07568afa0caee | 11a0a5b813b4a2f8b9c3524af35d3e3914d457b6 | refs/heads/master | 2021-07-16T13:26:22.343467 | 2020-05-14T23:39:35 | 2020-05-14T23:39:35 | 164,931,583 | 1 | 0 | null | 2020-05-14T23:39:36 | 2019-01-09T20:18:32 | Python | UTF-8 | Python | false | false | 537 | py | import pandas as pd
import json
import os
test_file="../../data/rte/mednli/mli_test_v1.jsonl"
assert os.path.exists(test_file) is True
t=pd.read_json(test_file,lines=True)
out_path="../../data/rte/mednli/mli_test_lex.jsonl"
with open(out_path,'w') as outfile:
outfile.write("")
for i,row in t.iterrows():
with open(out_path, 'a+') as outfile:
total = {'claim': row.sentence1,
'evidence':row.sentence2,
"label":row.gold_label}
json.dump(total,outfile)
outfile.write("\n") | [
"mithunpaul08@gmail.com"
] | mithunpaul08@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.