blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e2498c3bf7bb6bc5cb3c855e8debb0ebab026286 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_168/95.py | 8dfe2a69a12272a82854b8234769c1ae46bcb16d | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,988 | py | from sys import stdin, stdout
def solve(R, C, mat):
"""
. period = no arrow
^ caret = up arrow
> greater than = right arrow
v lowercase v = down arrow
< less than = left arrow
"""
rowTally = [0]*R
colTally = [0]*C
for r in range(R):
for c in range(C):
if mat[r][c] != ".":
rowTally[r] += 1
colTally[c] += 1
change = 0
for r in range(R):
for c in range(C):
# yes i have heard of functions
if mat[r][c] == "v":
if not any( mat[r_][c] != "." for r_ in range(r+1,R) ):
if rowTally[r] > 1 or colTally[c] > 1:
change += 1
else:
return "IMPOSSIBLE"
elif mat[r][c] == "^":
if not any( mat[r_][c] != "." for r_ in range(0,r) ):
if rowTally[r] > 1 or colTally[c] > 1:
change += 1
else:
return "IMPOSSIBLE"
elif mat[r][c] == ">":
if not any( mat[r][c_] != "." for c_ in range(c+1,C) ):
if rowTally[r] > 1 or colTally[c] > 1:
change += 1
else:
return "IMPOSSIBLE"
elif mat[r][c] == "<":
if not any( mat[r][c_] != "." for c_ in range(0,c) ):
if rowTally[r] > 1 or colTally[c] > 1:
change += 1
else:
return "IMPOSSIBLE"
return change
if __name__ == '__main__':
T = int(stdin.readline())
for i in range(T):
# read input for this problem
R, C = map(int, stdin.readline().strip().split())
mat = []
for j in range(R):
mat.append(stdin.readline().strip())
result = solve(R, C, mat)
print "Case #%d: %s"%(i+1, result) | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
cccba4a3c3c68ac7d17083f6f91026c74ccae90d | bf12e13c0ab5ccf2fc32509b02aaae6b6a2e3327 | /benchmarks/richards_AOT.py | ed3c97c6c7e4880120a53657731d8761841680bb | [
"MIT",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | HighCWu/tpythonpp | 42b56c9eb3c77192cbda36f0e198707bb858fe38 | f1c15e1101993e4c9c7529739823b47759ea13f7 | refs/heads/master | 2023-06-30T16:17:09.409107 | 2021-03-19T04:16:12 | 2021-03-19T04:16:12 | 391,806,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,386 | py | # based on a Java version:
# Based on original version written in BCPL by Dr Martin Richards
# in 1981 at Cambridge University Computer Laboratory, England
# and a C++ version derived from a Smalltalk version written by
# L Peter Deutsch.
# Java version: Copyright (C) 1995 Sun Microsystems, Inc.
# Translation from C++, Mario Wolczko
# Outer loop added by Alex Jacoby
# TPython AOT version: raptorman 2020
#AOT begin
# Task IDs
I_IDLE = 1
I_WORK = 2
I_HANDLERA = 3
I_HANDLERB = 4
I_DEVA = 5
I_DEVB = 6
# Packet types
K_DEV = 1000
K_WORK = 1001
# Packet
BUFSIZE = 4
TASKTABSIZE = 10
class Packet(object):
def __init__(self,l,i,k):
self.plink = l
self.pident = i
self.kind = k
self.datum = 0
self.data = [0] * BUFSIZE
def append_to(self,lst):
self.plink = None
if lst is None:
return self
else:
p = lst
next = p.plink
while next is not None:
p = next
next = p.plink
p.plink = self
return lst
# Task Records
class TaskRec(object):
pass
class DeviceTaskRec(TaskRec):
def __init__(self):
self.pending = None
class IdleTaskRec(TaskRec):
def __init__(self):
self.control = 1
self.icount = 10000
class HandlerTaskRec(TaskRec):
def __init__(self):
self.work_in = None
self.device_in = None
def workInAdd(self,p):
self.work_in = p.append_to( self.work_in)
return self.work_in
def devInAdd(self,p):
self.device_in = p.append_to( self.device_in)
return self.device_in
class WorkerTaskRec(TaskRec):
def __init__(self):
self.dest = I_HANDLERA
self.wcount = 0
# Task
class TaskState(object):
def __init__(self):
self.pktpending = True
self.tskwaiting = False
self.tskholding = False
def packetPending(self):
self.pktpending = True
self.tskwaiting = False
self.tskholding = False
return self
def waiting(self):
self.pktpending = False
self.tskwaiting = True
self.tskholding = False
return self
def running(self):
self.pktpending = False
self.tskwaiting = False
self.tskholding = False
return self
def waitPacket(self):
self.pktpending = True
self.tskwaiting = True
self.tskholding = False
return self
def isPending(self):
return self.pktpending
def isTskWait(self):
return self.tskwaiting
def isTskHold(self):
return self.tskholding
def isHoldWait(self):
return self.tskholding or ((not self.pktpending) and self.tskwaiting)
def isWaitPkt(self):
return self.pktpending and self.tskwaiting and not self.tskholding
class TaskWorkArea(object):
def __init__(self):
self.taskTab = [None] * TASKTABSIZE
self.taskList = None
self.holdCount = 0
self.qpktCount = 0
WorkArea = TaskWorkArea()
class Task(TaskState):
def __init__(self,i,p,w, initialState,r):
self.link = WorkArea.taskList
self.ident = i
self.priority = p
self.input = w
self.pktpending = initialState.isPending()
self.tskwaiting = initialState.isTskWait()
self.tskholding = initialState.isTskHold()
self.handle = r
WorkArea.taskList = self
WorkArea.taskTab[i] = self
def addPacket(self,p,old):
if self.input is None:
self.input = p
self.pktpending = True
if self.priority > int(old.priority):
return self
else:
p.append_to( self.input )
return old
def waitTask(self):
self.tskwaiting = True
return self
def hold(self):
WorkArea.holdCount += 1
self.tskholding = True
return self.link
def findtcb(self,id):
t = WorkArea.taskTab[id]
if t is None:
print("Exception in findtcb")
return t
def release(self,i):
t = self.findtcb(i)
t.tskholding = False
if t.priority > self.priority:
return t
else:
return self
def qpkt(self, pkt):
t = self.findtcb( pkt.pident )
WorkArea.qpktCount += 1
pkt.plink = None
pkt.pident = self.ident
p = t.addPacket(pkt,self)
return p
def fn(self,pkt,r):
raise NotImplementedError
def runTask(self):
if self.isWaitPkt():
msg = self.input
self.input = msg.plink
if self.input is None:
self.running()
else:
self.packetPending()
return self.fn(msg,self.handle)
else:
return self.fn(None,self.handle)
# DeviceTask
class DeviceTask(Task):
def __init__(self,i,p,w,s,r):
Task.__init__(self,i,p,w,s,r)
def fn(self,pkt,r):
d = r
if pkt is None:
pk = d.pending
if pk is None:
tsk = self.waitTask()
return tsk
else:
d.pending = None
return self.qpkt(pk)
else:
d.pending = pkt
return self.hold()
class HandlerTask(Task):
def __init__(self,i,p,w,s,r):
Task.__init__(self,i,p,w,s,r)
def fn(self,pkt,r):
h = r
if pkt is not None:
if pkt.kind == K_WORK:
h.workInAdd(pkt)
else:
h.devInAdd(pkt)
work = h.work_in
if work is None:
return self.waitTask()
count = work.datum
if count >= BUFSIZE:
h.work_in = work.plink
return self.qpkt(work)
dev = h.device_in
if dev is None:
return self.waitTask()
h.device_in = dev.plink
dev.datum = work.data[count]
work.datum = count + 1
return self.qpkt(dev)
# IdleTask
class IdleTask(Task):
def __init__(self,i,p,w,s,r):
Task.__init__(self,i,0,None,s,r)
def fn(self,pkt,r):
i = r
i.icount -= 1
if i.icount == 0:
return self.hold()
elif (i.control & 1) == 0:
i.control = int(i.control / 2)
return self.release(I_DEVA)
else:
i.control = int(i.control / 2) ^ 0xd008
return self.release(I_DEVB)
# WorkTask
A = ord('A')
class WorkTask(Task):
def __init__(self,i,p,w,s,r):
Task.__init__(self,i,p,w,s,r)
def fn(self,pkt,r):
w = r
dest = None
if pkt is None:
tsk = self.waitTask()
return tsk
if w.dest == I_HANDLERA:
dest = I_HANDLERB
else:
dest = I_HANDLERA
w.dest = dest
pkt.pident = dest
pkt.datum = 0
for i in range(BUFSIZE):
w.wcount += 1
if int( w.wcount ) > 26:
w.wcount = 1
pkt.data[i] = A + int( w.wcount ) - 1
return self.qpkt(pkt)
def schedule():
t = WorkArea.taskList
while t is not None:
if t.isHoldWait():
t = t.link
else:
t = t.runTask()
class Richards(object):
def __init__(self):
pass
def run(self, iterations):
for i in range( int(iterations) ):
WorkArea.holdCount = 0
WorkArea.qpktCount = 0
ts1 = TaskState()
IdleTask(I_IDLE, 1, 10000, ts1.running(), IdleTaskRec())
wkq = Packet(None, 0, K_WORK)
wkq = Packet(wkq , 0, K_WORK)
ts2 = TaskState()
WorkTask(I_WORK, 1000, wkq, ts2.waitPacket(), WorkerTaskRec())
wkq = Packet(None, I_DEVA, K_DEV)
wkq = Packet(wkq , I_DEVA, K_DEV)
wkq = Packet(wkq , I_DEVA, K_DEV)
ts3 = TaskState()
HandlerTask(I_HANDLERA, 2000, wkq, ts3.waitPacket(), HandlerTaskRec())
wkq = Packet(None, I_DEVB, K_DEV)
wkq = Packet(wkq , I_DEVB, K_DEV)
wkq = Packet(wkq , I_DEVB, K_DEV)
ts4 = TaskState()
HandlerTask(I_HANDLERB, 3000, wkq, ts4.waitPacket(), HandlerTaskRec())
ts5 = TaskState()
DeviceTask(I_DEVA, 4000, None, ts5.waiting(), DeviceTaskRec())
ts6 = TaskState()
DeviceTask(I_DEVB, 5000, None, ts6.waiting(), DeviceTaskRec())
schedule()
if WorkArea.holdCount == 9297 and WorkArea.qpktCount == 23246:
pass
else:
print("richards failed")
print( WorkArea.holdCount)
print( WorkArea.qpktCount)
return False
return True
#AOT export
def run_richards(loops):
print("enter richards...")
r = Richards()
result = r.run(loops)
if not result:
print("ERROR incorrect results!")
return None
#AOT end
def main():
print('enter main...')
iterations = 24
run_richards(iterations)
main()
| [
"goatman.py@gmail.com"
] | goatman.py@gmail.com |
8097db870e799ab5765abe37a3cd5c66098b9a29 | 8ac3fe3d861a222210912a02effea2110456d052 | /django_for_beginners/project_3_mb_app/project_3_mb_app/wsgi.py | 694273d02277daaef7beda9ccea67910d0f019e9 | [
"MIT"
] | permissive | rednafi/django-unchained | 40446960f52f0c905a6ba3e318154ca11a31188b | 0f71c8d056699496d4af3ab049f9b2f9d057486b | refs/heads/master | 2022-12-10T10:11:52.906880 | 2020-09-01T17:43:58 | 2020-09-01T17:43:58 | 282,356,752 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | """
WSGI config for project_3_mb_app project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project_3_mb_app.settings")
application = get_wsgi_application()
| [
"redowan.nafi@gmail.com"
] | redowan.nafi@gmail.com |
c75372be898bef2f2b78e0654de40ee572e5b0c6 | 5b3bf81b22f4eb78a1d9e801b2d1d6a48509a236 | /leetcode/1029.py | 7c92c896074a20b0d420da645524ea36db209412 | [] | no_license | okoks9011/problem_solving | 42a0843cfdf58846090dff1a2762b6e02362d068 | e86d86bb5e3856fcaaa5e20fe19194871d3981ca | refs/heads/master | 2023-01-21T19:06:14.143000 | 2023-01-08T17:45:16 | 2023-01-08T17:45:16 | 141,427,667 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | class Solution:
def twoCitySchedCost(self, costs: List[List[int]]) -> int:
m = len(costs)
n = m // 2
costs.sort(key=lambda c: c[0] - c[1])
result = 0
for c in costs[:n]:
result += c[0]
for c in costs[n:]:
result += c[1]
return result
| [
"okoks9011@gmail.com"
] | okoks9011@gmail.com |
3112c122eb78775c5aff7a7f0d75a685d7133de8 | ef1f62cf4e53f856bf763ac0dee73f054518530d | /Week_09/5.Longest_Palindromic_Substring.py | f20d75493465be4876f6af1df68bed6c2e3081c0 | [] | no_license | ZHHJemotion/algorithm008-class01 | 3338af3619d8e1754a62af6a852f517b47298d95 | 5bb7d2b74110df0b5788b94c69582552d711563a | refs/heads/master | 2022-11-12T09:26:24.941738 | 2020-06-30T15:29:20 | 2020-06-30T15:29:20 | 255,102,230 | 0 | 0 | null | 2020-04-12T14:39:17 | 2020-04-12T14:39:17 | null | UTF-8 | Python | false | false | 1,361 | py | # Given a string s, find the longest palindromic substring in s. You may assume
# that the maximum length of s is 1000.
#
# Example 1:
#
#
# Input: "babad"
# Output: "bab"
# Note: "aba" is also a valid answer.
#
#
# Example 2:
#
#
# Input: "cbbd"
# Output: "bb"
#
# Related Topics String Dynamic Programming
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def longestPalindrome(self, s: str) -> str:
# 动态规划
size = len(s)
if size < 2:
return s
dp = [[False for _ in range(size)] for _ in range(size)]
max_len = 1
start = 0
for i in range(size):
dp[i][i] = True
for j in range(1, size):
# 只有下面这一行代码不一样
for i in range(j - 1, -1, -1):
if s[i] == s[j]:
if j - i < 3:
dp[i][j] = True
else:
dp[i][j] = dp[i + 1][j - 1]
else:
dp[i][j] = False
if dp[i][j]:
cur_len = j - i + 1
if cur_len > max_len:
max_len = cur_len
start = i
return s[start:start + max_len]
# leetcode submit region end(Prohibit modification and deletion)
| [
"zhhjemotion@hotmail.com"
] | zhhjemotion@hotmail.com |
0bab8e53db1d047169277c517c29a58eeb03ffaa | 10a3708ecf54c7acd302b91e816b6212afc9b4a6 | /tests/test_main.py | 64f15a0adeffdf15d8e6800e12bac9451ff0e30e | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Vizzuality/timvt | 4bfa4bcc7918c5d02bae382b9e0c4e13efdd77c3 | 887a1dc81c55d3f2658ddaabbe113174b45a47b2 | refs/heads/master | 2023-06-29T00:54:30.896044 | 2021-07-26T17:06:40 | 2021-07-26T17:06:40 | 389,608,019 | 0 | 0 | MIT | 2021-07-26T17:06:41 | 2021-07-26T11:23:24 | null | UTF-8 | Python | false | false | 239 | py | """Test timvt.main.app."""
def test_health(app):
"""Test /healthz endpoint."""
response = app.get("/healthz")
assert response.status_code == 200
assert response.json() == {"message": "I wear a mask and I wash my hands!"}
| [
"vincent.sarago@gmail.com"
] | vincent.sarago@gmail.com |
6cc0162440e618521ffa11d341ef9ae24c9e0f58 | c69f0b95dd485439dfa56610b0c123b972181cae | /perm/backends.py | bdb41333f83de1b07ebb8049d70130b774d2501d | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | pombreda/django-perm | ec6cdf075f54c39fd402befe30bc215792fc3e2e | 7141a3095942243684cd336cfaeeb99812e77dd6 | refs/heads/master | 2021-01-09T05:46:24.311389 | 2014-05-21T08:16:59 | 2014-05-21T08:16:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,056 | py | from django.utils.translation import ugettext_lazy as _
from django.db.models import Model
from .exceptions import PermAppException
from .permissions import permissions_manager
class ModelPermissionBackend(object):
supports_object_permissions = True
supports_anonymous_user = True
supports_inactive_user = True
def authenticate(self, username, password):
"""
This backend does not authenticate
"""
return None
def has_perm(self, user_obj, perm, obj=None):
# Non-existing and inactive users never get permission
if not user_obj or not user_obj.is_active:
return False
# If obj is an instance, get the model class
if not obj:
obj = None
model = None
elif isinstance(obj, Model):
model = obj.__class__
elif issubclass(obj, Model):
model = obj
obj = None
else:
raise PermAppException(_('Unexpected value for model.'))
# If permission is in dot notation,
perm_parts = perm.split('.')
if len(perm_parts) > 1:
# Keep only the last part of the permission (without application name)
perm_app, perm = perm_parts
# Make sure permission and object application are the same
if model:
model_app = model._meta.app_label
if perm_app != model_app:
raise PermAppException(
_("App mismatch, perm has '%(perm_app)s' and model has '%(model_app)s'" % {
'perm_app': perm_app,
'model_app': model_app,
})
)
# Get the ModelPermissions object
object_permissions = permissions_manager.get_permissions(model, user_obj, perm, obj)
# No ModelPermissions means no permission
if not object_permissions:
return False
# Check the permissions
return object_permissions.has_perm()
| [
"dylan@zostera.nl"
] | dylan@zostera.nl |
2a113215d8fd82dc5cb93c773b50527c3ecb670a | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/ThirteenTeV/HToSS/LO_HToSSTodddd_MH125_MS40_ctauS0_13TeV.py | 787512b6f56f88bf5c9224698486ff90902d0d84 | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 1,722 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
from Configuration.Generator.Pythia8PowhegEmissionVetoSettings_cfi import *
generator = cms.EDFilter("Pythia8HadronizerFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
pythia8PowhegEmissionVetoSettingsBlock,
processParameters = cms.vstring(
'9000006:all = sk skbar 0 0 0 40 3.9464e-12 1.0 75.0 0',
'9000006:oneChannel = 1 1.0 101 1 -1',
'9000006:mayDecay = on',
'9000006:isResonance = on',
'25:m0 = 125.0',
'25:onMode = off',
'25:addChannel = 1 0.000000001 101 9000006 -9000006',
'25:onIfMatch = 9000006 -9000006',
'9000006:onMode = off',
'9000006:onIfAny = 1',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'pythia8PowhegEmissionVetoSettings',
'processParameters'
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"sheffield@physics.rutgers.edu"
] | sheffield@physics.rutgers.edu |
1b4cdf8106a5d49679acc14e28e2691b6895d5fc | 6ed48bf3c72e61fe53144a3545ab305112c93501 | /infra/tools/testjs/test/testjs_test.py | d48e6218bbb2a4a643d6ee1b7d0548b7c49a7653 | [
"BSD-3-Clause"
] | permissive | eunchong/infra | ee5f7a9379977de8c814f90dbba3f6adbf06a75c | ce3728559112bfb3e8b32137eada517aec6d22f9 | refs/heads/master | 2022-11-27T06:26:57.415805 | 2016-04-08T12:34:36 | 2016-04-08T12:34:36 | 55,699,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,448 | py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for ../testjs.py"""
import argparse
import unittest
import random
import mock
import contextlib
import os
from infra.tools.testjs import testjs
from infra_libs import utils
class TestJsTest(unittest.TestCase):
def test_arguments(self):
parser = argparse.ArgumentParser()
testjs.add_argparse_options(parser)
args = parser.parse_args(['chrome'])
self.assertEqual(args.target, ['chrome'])
@mock.patch('subprocess.Popen')
@mock.patch('os.path.exists')
@mock.patch('random.choice')
def test_get_display(self, choice, exists, popen):
choice.return_value = 102
exists.side_effect = [False, True]
fake_popen = mock.MagicMock()
fake_popen.poll.return_value = None
fake_popen.pid = 1234
popen.return_value = fake_popen
with utils.temporary_directory() as tempdir:
tempfile = os.path.join(tempdir, 'pidfile')
real_tempfile = '%s102' % tempfile
with open(real_tempfile, 'w') as f:
f.write('1234')
testjs.LOCK_LOCATION = '%s%%d' % tempfile
with testjs.get_display() as display:
self.assertEquals(display, ':102')
@mock.patch('subprocess.call')
def test_karma(self, _call):
with mock.patch.dict(os.environ, {'foo': 'bar'}):
testjs.test_karma('somedir', 'stable', ':99')
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
7aa1945e1af5576de5561476842823113d0c1deb | 33b844747fe11a32fdc2a746d7f98c331d286756 | /454.py | 23d22c10cef3a64838d2981e882703a21d9a3047 | [] | no_license | michaelhuo/pcp | 1f1ccd338e68d53e3cd2e775ac7d3b3a2fe323a4 | d506e07f9ce5a5e1506fe88ddf94dc7c54d35bcd | refs/heads/master | 2023-02-16T00:32:02.448257 | 2021-01-18T18:02:00 | 2021-01-18T18:02:00 | 290,109,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | class Solution:
def fourSumCount(self, A: List[int], B: List[int], C: List[int], D: List[int]) -> int:
if not A or not B or not C or not D:
return 0
A.sort()
B.sort()
C.sort()
D.sort()
AB = [i+j for i in A for j in B]
CD = [i+j for i in C for j in D]
AB.sort()
CD.sort()
hash_map = {}
for i,n in enumerate(CD):
if n in hash_map:
hash_map[n] += 1
else:
hash_map[n] = 1
length = len(A)
count = 0
for i,ab in enumerate(AB):
cd = -ab
if cd in hash_map:
count += hash_map[cd]
return count
| [
"5288085+michaelhuo@users.noreply.github.com"
] | 5288085+michaelhuo@users.noreply.github.com |
de7349c5fe952767b84df51272496c545530f5ce | a26b11bd2ee82f6ffbf82525d421ad32e2328c33 | /pentest_project/apps.py | 1c45b9056f1b24b77beb008ea891487197d20ffb | [] | no_license | Nekmo/pentest-studio | b08804fe19a5c3fb2bc14b90953922a2772623e2 | 6bac1cf8618437f034bcedc74c41c6079ef95d0b | refs/heads/master | 2023-09-06T07:51:55.333412 | 2023-08-19T21:46:44 | 2023-08-19T21:46:44 | 162,857,644 | 8 | 0 | null | 2023-03-04T03:03:04 | 2018-12-23T02:54:26 | JavaScript | UTF-8 | Python | false | false | 172 | py | from django.apps import AppConfig
class ProjectsConfig(AppConfig):
name = 'pentest_project'
def ready(self):
from pentest_project import signals # noqa
| [
"contacto@nekmo.com"
] | contacto@nekmo.com |
44c9e925bfe248186c3763308eac496173bc6788 | ac64fda7f1bfc92f7897efd60b8f3f0aeb22b4d7 | /syntactic_mutations/udacity/mutants/mutant8.py | 4ec26ac2e87ee995147927a7128110712ebad95a | [] | no_license | dlfaults/mutation_operators_evaluation | ea7f33459ba7bcf7d70092d9db8b40f9b338d516 | 7d1ff30e901931a46bf8908e9bb05cae3daa5f0f | refs/heads/master | 2020-12-27T15:45:07.262012 | 2020-02-03T12:22:01 | 2020-02-03T12:22:01 | 237,955,342 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,758 | py | from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras.layers import Lambda, Conv2D, MaxPooling2D, Dropout, Dense, Flatten
from batch_generator import Generator
from utils import INPUT_SHAPE, batch_generator, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS
from keras import backend as K
from PIL import Image
import numpy as np
def build_model(args):
'''
Modified NVIDIA model
'''
model = Sequential()
model.add(Lambda((lambda x: ((x / 127.5) - 1.0)), input_shape=INPUT_SHAPE))
model.add(Conv2D(24, (5, 6), activation='elu', strides=(2, 2)))
model.add(Conv2D(36, (5, 5), activation='elu', strides=(2, 2)))
model.add(Conv2D(48, (5, 5), activation='elu', strides=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='elu'))
model.add(Conv2D(64, (3, 3), activation='elu'))
model.add(Dropout(args.keep_prob))
model.add(Flatten())
model.add(Dense(100, activation='elu'))
model.add(Dense(50, activation='elu'))
model.add(Dense(10, activation='elu'))
model.add(Dense(1))
return model
def train_model(x_train, x_valid, y_train, y_valid, model_name, args):
'''
Train the model
'''
model = build_model(args)
model.compile(loss='mean_squared_error', optimizer=Adam(lr=args.learning_rate))
train_generator = Generator(x_train, y_train, True, args)
validation_generator = Generator(x_valid, y_valid, False, args)
model.fit_generator(train_generator, validation_data=\
validation_generator, epochs=\
args.nb_epoch, use_multiprocessing=\
False, max_queue_size=\
10, workers=\
4)
model.save(model_name) | [
"gunel71@gmail.com"
] | gunel71@gmail.com |
eedab756cf34c40f4a29e36e5f9f6a4329822ddf | 44064ed79f173ddca96174913910c1610992b7cb | /Second_Processing_app/temboo/Library/OneLogin/Events/CreateEvent.py | 079ba3e690ed53ad877f84568560303e9d959953 | [] | no_license | dattasaurabh82/Final_thesis | 440fb5e29ebc28dd64fe59ecd87f01494ed6d4e5 | 8edaea62f5987db026adfffb6b52b59b119f6375 | refs/heads/master | 2021-01-20T22:25:48.999100 | 2014-10-14T18:58:00 | 2014-10-14T18:58:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,294 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# CreateEvent
# Creates a new event.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class CreateEvent(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the CreateEvent Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/OneLogin/Events/CreateEvent')
def new_input_set(self):
return CreateEventInputSet()
def _make_result_set(self, result, path):
return CreateEventResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return CreateEventChoreographyExecution(session, exec_id, path)
class CreateEventInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the CreateEvent
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by OneLogin.)
"""
InputSet._set_input(self, 'APIKey', value)
def set_EventTypeID(self, value):
"""
Set the value of the EventTypeID input for this Choreo. ((required, integer) The id for the type of event you want to create. Note that depending on the event type id specified, you may need to provide the ObjectName and ObjectID that is being affected.)
"""
InputSet._set_input(self, 'EventTypeID', value)
def set_ObjectID(self, value):
"""
Set the value of the ObjectID input for this Choreo. ((conditional, integer) The object id that is being affected. Required for certain event types. When specified, ObjectName must also be provided.)
"""
InputSet._set_input(self, 'ObjectID', value)
def set_ObjectName(self, value):
"""
Set the value of the ObjectName input for this Choreo. ((conditional, string) The object name that is being affected (i.e. user-id). Required for certain event types. When specified, ObjectID must also be provided.)
"""
InputSet._set_input(self, 'ObjectName', value)
class CreateEventResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the CreateEvent Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response from OneLogin.)
"""
return self._output.get('Response', None)
class CreateEventChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return CreateEventResultSet(response, path)
| [
"dattasaurabh82@gmail.com"
] | dattasaurabh82@gmail.com |
f497a0be0504ad7b1ef4f0400cde1dfb2af20cde | 4b69207838e74c89fa375294a3627da93dd0fa1d | /python/pysoarlib/util/WMNode.py | f98544510bac45a872569f7a2f371a8a4319c2e1 | [
"MIT"
] | permissive | amininger/vim-soar-plugin | ad00afe245adb32bdf4b0fb1b199be60bb0eab1d | c1a4cfa8b0e4760470c5619325f522f2f5021c25 | refs/heads/master | 2021-06-08T16:02:16.405583 | 2021-05-02T16:04:02 | 2021-05-02T16:04:02 | 133,554,667 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,513 | py | import Python_sml_ClientInterface as sml
### Note: Helper class used by extract_wm_graph
class WMNode:
""" Represents a node in the working memory graph wrapping an Identifier and containing links to child wmes
node.id = root_id (Identifier)
node.symbol = string (The root_id symbol e.g. O34)
node['attr'] = WMNode # for identifiers
node['attr'] = constant # for string, double, or int value
node['attr'] = [ val1, val2, ... ] # for multi-valued attributes (values can be constants or WMNodes)
"""
def __init__(self, soar_id):
self.id = soar_id
self.symbol = soar_id.GetValueAsString()
self.children = {}
def attributes(self):
""" Returns a list of all child wme attribute strings """
return list(self.children.keys())
# Supports dictionary syntax (read only)
def __getitem__(self, attr):
""" Returns the value of the wme (node, attr, val)
where a value can be a int, double, string, WMNode,
or a list of such values for a multi-valued attribute """
return self.children.get(attr, None)
def __str__(self):
""" Returns a nicely formatted string representation of the node and all its children
(Warning: will be a lot of text for large graphs) """
return self.__str_helper__("", set())
def __str_helper__(self, indent, ignore_ids):
var = "<" + self.symbol + ">"
if self.symbol in ignore_ids or len(self.children) == 0:
return var
ignore_ids.add(self.symbol)
s = var + " {\n"
for a, v in self.children.items():
s += indent + " " + a + ": " + _wm_value_to_str(v, indent + " ", ignore_ids) + "\n"
s += indent + "}"
return s
def _extract_children(self, max_depth, node_map):
""" Internal helper method to recursively extract graph structure for a node's children """
if max_depth == 0:
return
for index in range(self.id.GetNumberChildren()):
wme = self.id.GetChild(index)
attr = wme.GetAttribute()
if wme.IsIdentifier():
child_id = wme.ConvertToIdentifier()
child_sym = child_id.GetValueAsString()
# First check if the child id is already in the node map
if child_sym in node_map:
wme_val = node_map[child_sym]
else:
# If not, recursively create and extract the children
wme_val = WMNode(child_id)
node_map[wme_val.symbol] = wme_val
wme_val._extract_children(max_depth-1, node_map)
elif wme.GetValueType() == "int":
wme_val = wme.ConvertToIntElement().GetValue()
elif wme.GetValueType() == "double":
wme_val = wme.ConvertToFloatElement().GetValue()
else:
wme_val = wme.GetValueAsString()
self._add_child_wme(attr, wme_val)
def _add_child_wme(self, attr, value):
""" Adds the child wme to the children dictionary
If there are multiple values for a given attr, move them into a list instead of replacing """
if attr in self.children:
cur_val = self.children[attr]
if isinstance(cur_val, list):
# Child is already a list, just append
cur_val.append(value)
else:
# This is the second value for the attr, replace current value with a list
self.children[attr] = [ cur_val, value ]
else:
# First time we've seen this attr, just add to dictionary
self.children[attr] = value
def _wm_value_to_str(val, indent, ignore_ids):
"""
recursive helper function which returns a string representation of any given value type
:param val: The value to convert to a string (can be str, int, float, list, WMNode)
:param indent: a string of spaces to indent the current level
:param ignore_ids: A set of Identifier symbols to not print
"""
if isinstance(val, str):
return val
if isinstance(val, int):
return str(val)
if isinstance(val, float):
return str(val)
if isinstance(val, list):
return "[ " + ", ".join(_wm_value_to_str(i, indent, ignore_ids) for i in val) + " ]"
if isinstance(val, WMNode):
return val.__str_helper__(indent, ignore_ids)
return ""
| [
"mininger@umich.edu"
] | mininger@umich.edu |
0d04604b7ac5e80c3263d95f796f0fda1d743bfa | a110805b0e0cf26d1da8e6276ec6883ed4297752 | /SOLUCIONES/SOLUCIONES/intermedio I/doc/funciones.py | 770d04cc793b6f49d201cd46bee7c2f031484724 | [] | no_license | dayes/curso_Python | a1e77725bd8ab4c287589f15e36849817bcb39e8 | 352b0505a5e3d6f3310893b5c87d1eab31a2a66d | refs/heads/master | 2020-03-22T07:37:54.487944 | 2018-07-13T10:59:40 | 2018-07-13T10:59:40 | 139,713,481 | 0 | 0 | null | 2018-07-04T11:40:03 | 2018-07-04T11:34:55 | null | UTF-8 | Python | false | false | 158 | py | """ Esto es una prueba de doc"""
def miFun():
"""Este es un ejemplo de doc"""
return 0
print (help(miFun))
print(__doc__)
| [
"david@MacBook-Air-de-David.local"
] | david@MacBook-Air-de-David.local |
bf7437e9c50d865354cd79454a2ad6c27e409ce5 | 11d5306930ce7670d63d216c4cebab416fa98404 | /alexandria/sedes.py | 293762a6b8e4e2c0f3620e58ac8b77d0ad8cf78b | [
"MIT"
] | permissive | carver/alexandria | fb58ac96ffd1a80c0f67a057086216556695c003 | 8440b7de69f2a4f828a252d450fe66ace72ecd2b | refs/heads/master | 2021-06-14T05:25:10.348460 | 2020-04-09T16:34:21 | 2020-04-09T16:34:21 | 254,472,966 | 0 | 0 | MIT | 2020-04-09T20:34:53 | 2020-04-09T20:34:52 | null | UTF-8 | Python | false | false | 363 | py | from ssz import sedes
class ByteList(sedes.List): # type: ignore
def __init__(self, max_length: int) -> None:
super().__init__(element_sedes=sedes.uint, max_length=max_length)
def serialize(self, value: bytes) -> bytes:
return value
def deserialize(self, value: bytes) -> bytes:
return value
byte_list = ByteList(2**32)
| [
"pipermerriam@gmail.com"
] | pipermerriam@gmail.com |
b9d9d12b75776a9199a9173de4341d8f98a6779b | 3b981dfc835d36eb9bb86e4dbb0b1e332285d5cf | /kuaapi/admin.py | 9af0c962649a1536b0e087ac96344adfa77afba8 | [] | no_license | richraines/nuortenideat | d9ad5ff33e4231c7f9960b9e1a54be16395173a2 | 033f63575c52ce118f0deba1168afca743de6c26 | refs/heads/master | 2020-09-01T01:39:39.137935 | 2016-10-31T14:24:59 | 2016-10-31T14:24:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | from django.contrib import admin
from django.contrib import admin
from .models import ParticipatingMunicipality, KuaInitiative, KuaInitiativeStatus
class ParticipatingMunicipalityAdmin(admin.ModelAdmin):
list_display = ('municipality', 'created', )
search_fields = ('municipality__code',
'municipality__name_fi',
'municipality__name_sv', )
class KuaInitiativeStatusAdmin(admin.ModelAdmin):
list_display = ('kua_initiative', 'status', 'created',)
admin.site.register(ParticipatingMunicipality, ParticipatingMunicipalityAdmin)
admin.site.register(KuaInitiative)
admin.site.register(KuaInitiativeStatus, KuaInitiativeStatusAdmin)
| [
"erno@fns.fi"
] | erno@fns.fi |
e564d01e7388ed8f5969eacb8e26e07d0b40aa1b | a3c662a5eda4e269a8c81c99e229879b946a76f6 | /.venv/lib/python3.7/site-packages/pylint/test/extensions/data/docstring.py | 5410a9bb40a1abdb6d7f5028e765a11648dfaa3b | [
"MIT"
] | permissive | ahmadreza-smdi/ms-shop | 0c29da82c58b243507575672bbc94fb6e8068aeb | 65ba3f3061e2ac5c63115b08dadfe7d67f645fb6 | refs/heads/master | 2023-04-27T19:51:34.858182 | 2019-11-24T20:57:59 | 2019-11-24T20:57:59 | 223,616,552 | 6 | 2 | MIT | 2023-04-21T20:51:21 | 2019-11-23T16:09:03 | Python | UTF-8 | Python | false | false | 811 | py | """Checks of Dosctrings 'docstring-first-line-empty' 'bad-docstring-quotes'"""
def check_messages(*messages):
"""
docstring"""
return messages
def function2():
"""Test Ok"""
class FFFF:
"""
Test Docstring First Line Empty
"""
def method1(self):
'''
Test Triple Single Quotes docstring
'''
def method2(self):
"bad docstring 1"
def method3(self):
'bad docstring 2'
def method4(self):
' """bad docstring 3 '
@check_messages('bad-open-mode', 'redundant-unittest-assert',
'deprecated-module')
def method5(self):
"""Test OK 1 with decorators"""
def method6(self):
r"""Test OK 2 with raw string"""
def method7(self):
u"""Test OK 3 with unicode string"""
| [
"ahmadreza.smdi@gmail.com"
] | ahmadreza.smdi@gmail.com |
16c05d184c6bc87bc9d36e51528b7aca0ec65502 | f72fa4432e6abb742cbf1c61c580db1ed688a311 | /day26/scrapy框架/daili/daili/pipelines.py | 1c15694b848f94cf71d67119ecbccff1779b3015 | [] | no_license | huningfei/python | 7ddc9da14a3e53ad1c98fc48edd1697a6f8fc4f7 | 9ca1f57f2ef5d77e3bb52d70ac9a241b8cde54d2 | refs/heads/master | 2022-10-31T18:56:33.894302 | 2019-01-04T11:06:59 | 2019-01-04T11:06:59 | 128,178,516 | 2 | 1 | null | 2022-10-12T19:26:04 | 2018-04-05T08:25:32 | Python | UTF-8 | Python | false | false | 286 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class DailiPipeline(object):
def process_item(self, item, spider):
return item
| [
"huningfei@126.com"
] | huningfei@126.com |
75b36e830e4d2d9d11bbd198191f5bfe029b6231 | adde5784379cba18934bc32bd779959ccc8bc94f | /redash/tasks/schedule.py | e8f6f6e241b5436bdd99780c065741d11b259cb6 | [
"BSD-2-Clause"
] | permissive | YuanlvCold/mxzz-bi | 32292a8cafb4097fcb60e70917849a2f23e5511f | 7cae1b80e2f715d0af7ca912d1793668353c4b9e | refs/heads/master | 2022-12-02T04:39:06.631341 | 2020-08-17T06:46:19 | 2020-08-17T06:46:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,525 | py | from __future__ import absolute_import
import logging
import hashlib
import json
from datetime import datetime, timedelta
from rq.job import Job
from rq_scheduler import Scheduler
from redash import extensions, settings, rq_redis_connection, statsd_client
from redash.tasks import (
sync_user_details,
refresh_queries,
empty_schedules,
refresh_schemas,
cleanup_query_results,
purge_failed_jobs,
version_check,
send_aggregated_errors,
Queue
)
logger = logging.getLogger(__name__)
class StatsdRecordingScheduler(Scheduler):
"""
RQ Scheduler Mixin that uses Redash's custom RQ Queue class to increment/modify metrics via Statsd
"""
queue_class = Queue
rq_scheduler = StatsdRecordingScheduler(
connection=rq_redis_connection, queue_name="periodic", interval=5
)
def job_id(kwargs):
metadata = kwargs.copy()
metadata["func"] = metadata["func"].__name__
return hashlib.sha1(json.dumps(metadata, sort_keys=True).encode()).hexdigest()
def prep(kwargs):
interval = kwargs["interval"]
if isinstance(interval, timedelta):
interval = int(interval.total_seconds())
kwargs["interval"] = interval
kwargs["result_ttl"] = kwargs.get("result_ttl", interval * 2)
return kwargs
def schedule(kwargs):
rq_scheduler.schedule(scheduled_time=datetime.utcnow(), id=job_id(kwargs), **kwargs)
def periodic_job_definitions():
jobs = [
{"func": refresh_queries, "interval": 30, "result_ttl": 600},
{"func": empty_schedules, "interval": timedelta(minutes=60)},
{
"func": refresh_schemas,
"interval": timedelta(minutes=settings.SCHEMAS_REFRESH_SCHEDULE),
},
{
"func": sync_user_details,
"timeout": 60,
"interval": timedelta(minutes=1),
},
{"func": purge_failed_jobs, "interval": timedelta(days=1)},
{
"func": send_aggregated_errors,
"interval": timedelta(minutes=settings.SEND_FAILURE_EMAIL_INTERVAL),
},
]
if settings.VERSION_CHECK:
jobs.append({"func": version_check, "interval": timedelta(days=1)})
if settings.QUERY_RESULTS_CLEANUP_ENABLED:
jobs.append({"func": cleanup_query_results, "interval": timedelta(minutes=5)})
# Add your own custom periodic jobs in your dynamic_settings module.
jobs.extend(settings.dynamic_settings.periodic_jobs() or [])
# Add periodic jobs that are shipped as part of Redash extensions
extensions.load_periodic_jobs(logger)
jobs.extend(list(extensions.periodic_jobs.values()))
return jobs
def schedule_periodic_jobs(jobs):
job_definitions = [prep(job) for job in jobs]
jobs_to_clean_up = Job.fetch_many(
set([job.id for job in rq_scheduler.get_jobs()])
- set([job_id(job) for job in job_definitions]),
rq_redis_connection,
)
jobs_to_schedule = [
job for job in job_definitions if job_id(job) not in rq_scheduler
]
for job in jobs_to_clean_up:
logger.info("Removing %s (%s) from schedule.", job.id, job.func_name)
rq_scheduler.cancel(job)
job.delete()
for job in jobs_to_schedule:
logger.info(
"Scheduling %s (%s) with interval %s.",
job_id(job),
job["func"].__name__,
job.get("interval"),
)
schedule(job)
| [
"2426548297@qq.com"
] | 2426548297@qq.com |
aa064d157437c3a44927ba9e0cce4c7feabdaf2e | be365e466711ac6483ec6cfd247cb5f665145b89 | /url_shortener/users/apps.py | be902fc4dce4a2a15286ad81d6b6b1a5698ec885 | [
"MIT"
] | permissive | triump0870/url_shortener | f2fbd5d7568f621172fccb328968c7c0739d7053 | 3a603c09a7b8bd399f30883c8553879099f78a94 | refs/heads/master | 2021-01-06T01:29:24.188513 | 2020-02-17T19:27:36 | 2020-02-17T19:27:36 | 241,190,245 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class UsersConfig(AppConfig):
name = "url_shortener.users"
verbose_name = _("Users")
def ready(self):
try:
import url_shortener.users.signals # noqa F401
except ImportError:
pass
| [
"b4you0870@gmail.com"
] | b4you0870@gmail.com |
4994ac3acf737a52f5d42965de713f06fdc38294 | 6be845bf70a8efaf390da28c811c52b35bf9e475 | /windows/Resources/Python/Core/Lib/encodings/mac_turkish.py | 696924582b0e618ea61f9e9d0431e6cabee0b08e | [] | no_license | kyeremalprime/ms | 228194910bf2ed314d0492bc423cc687144bb459 | 47eea098ec735b2173ff0d4e5c493cb8f04e705d | refs/heads/master | 2020-12-30T15:54:17.843982 | 2017-05-14T07:32:01 | 2017-05-14T07:32:01 | 91,180,709 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,883 | py | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: mac_turkish.py
""" Python Character Mapping Codec mac_turkish generated from 'MAPPINGS/VENDORS/APPLE/TURKISH.TXT' with gencodec.py.
"""
import codecs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
def getregentry():
return codecs.CodecInfo(name='mac-turkish', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
decoding_table = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7fÄÅÇÉÑÖÜáàâäãåçéèêëíìîïñóòôöõúùûü†°¢£§•¶ß®©™´¨≠ÆØ∞±≤≥¥µ∂∑∏π∫ªºΩæø¿¡¬√ƒ≈∆«»…\xa0ÀÃÕŒœ–—“”‘’÷◊ÿŸĞğİıŞş‡·‚„‰ÂÊÁËÈÍÎÏÌÓÔ\uf8ffÒÚÛÙ\uf8a0ˆ˜¯˘˙˚¸˝˛ˇ'
encoding_table = codecs.charmap_build(decoding_table) | [
"kyeremalprime@gmail.com"
] | kyeremalprime@gmail.com |
2d2f005f8e606730f861a41b8f1ab4024ca461af | fcdfe976c9ed60b18def889692a17dc18a8dd6d7 | /python/geometry/polygon/plan_spread_spline1.py | 7b0cd59f8d1f064478adfe809d9da2e4ca8c64e2 | [] | no_license | akihikoy/ay_test | 4907470889c9bda11cdc84e8231ef3156fda8bd7 | a24dfb720960bfedb94be3b4d147e37616e7f39a | refs/heads/master | 2023-09-02T19:24:47.832392 | 2023-08-27T06:45:20 | 2023-08-27T06:45:20 | 181,903,332 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 5,489 | py | #!/usr/bin/python
import sys
sys.path.append('..')
from splines.cubic_hermite_spline import TCubicHermiteSpline
from polygon_point_in_out import *
import math
import numpy as np
import copy
#x,y wave pattern generator
class TWaveGenerator:
def __init__(self,vx=0.1):
self.data= [[0.0 ,vx*0.0 , 0.0],
[0.25,vx*0.25, 1.0],
[0.75,vx*0.75,-1.0],
[1.0 ,vx*1.0 , 0.0]]
#FINITE_DIFF, CARDINAL
self.splines= [TCubicHermiteSpline() for d in range(len(self.data[0])-1)]
for d in range(len(self.splines)):
data_d= [[x[0],x[d+1]] for x in self.data]
self.splines[d].Initialize(data_d, tan_method=self.splines[d].CARDINAL, end_tan=self.splines[d].CYCLIC, c=0.0, m=0.0)
def Evaluate(self, t, m1=1.0, m2=1.0):
self.splines[1].KeyPts[1].X= self.data[1][2] * m1
self.splines[1].KeyPts[2].X= self.data[2][2] * m2
self.splines[1].Update()
return [self.splines[d].EvaluateC(t) for d in range(len(self.splines))]
def IdSampler(N):
if N==0: return []
if N==1: return [0]
if N==2: return [0,1]
src= range(N)
res= []
res.append(src.pop(0))
res.append(src.pop(-1))
d= 2
while True:
for i in range(1,d,2):
res.append(src.pop(len(src)*i/d))
if len(src)==0: return res
d*= 2
def FSampler(xmin,xmax,num_div):
data= FRange(xmin,xmax,num_div)
return [data[i] for i in IdSampler(num_div)]
#p= func(t)
def EvalWaveFunc(func, points, resolution=20):
e1= True
e2= True
for t in FSampler(0.0,0.5,resolution/2):
p= func(t)
if not PointInPolygon2D(points,p):
e1= False
break
for t in FSampler(0.5,1.0,resolution/2):
p= func(t)
if not PointInPolygon2D(points,p):
e2= False
break
return e1, e2
#p= func(t,p1,p2)
#True/False,True/False= eval_func(lambda t:func(t,p1,p2))
def OptimizeWaveFunc1(func, p1_0, p2_0, eval_func):
#Check the existence of the solution:
e1,e2= eval_func(lambda t:func(t,0.0,0.0))
if not e1 or not e2: return None,None
p1= p1_0
p2= p2_0
while True:
e1,e2= eval_func(lambda t:func(t,p1,p2))
#print p1,p2,e1,e2
if e1 and e2: return p1,p2
if not e1:
p1*= 0.9
if p1<1.0e-6:
p2*= 0.9
if not e2:
p2*= 0.9
if p2<1.0e-6:
p1*= 0.9
import cma_es.cma as cma
#Optimize spline parameters using CMA-ES
#p= func(t,p1,p2)
#True/False,True/False= eval_func(lambda t:func(t,p1,p2))
def OptimizeWaveFunc2(func, p1_0, p2_0, eval_func):
#Check the existence of the solution:
e1,e2= eval_func(lambda t:func(t,0.0,0.0))
if not e1 or not e2: return None,None
to_fmin= lambda p,e: (-2.0*p[0]**2-2.0*p[1]**2) if e[0] and e[1] else None
fobj= lambda p: to_fmin( p, eval_func(lambda t:func(t,p[0],p[1])) )
options = {'CMA_diagonal':1, 'verb_time':0}
options['bounds']= [[0.0,0.0],[2.0,2.0]]
options['tolfun']= 3.0e-1 # 1.0e-4
options['verb_log']= False
options['scaling_of_variables']= np.array([1.0,1.0])
scale0= 1.0
parameters0= [0.0, 0.0]
#res= cma.fmin(fobj, parameters0, scale0, options)
es= cma.CMAEvolutionStrategy(parameters0, scale0, options)
solutions, scores= [], []
count= 0
while not es.stop():
while len(solutions) < es.popsize:
x= es.ask(1)[0]
f= fobj(x)
if f is not None:
solutions.append(x)
scores.append(f)
#print x,f
es.tell(solutions, scores)
es.disp()
#print 'es.result()@%i:'%(count),es.result()
count+=1
solutions, scores= [], []
res= es.result()
return res[0][0], res[0][1]
if __name__=='__main__':
def PrintEq(s): print '%s= %r' % (s, eval(s))
from gen_data import *
#points= To2d(Gen3d_01())
#points= To2d2(Gen3d_02())
#points= To2d2(Gen3d_11())
points= To2d2(Gen3d_12())
#points= To2d2(Gen3d_13())
fp= file('/tmp/orig.dat','w')
for p in points.tolist()+[points[0].tolist()]:
fp.write(' '.join(map(str,p))+'\n')
fp.close()
pca= TPCA(points)
u_dir= pca.EVecs[0]
print 'direction=',u_dir
direction= math.atan2(u_dir[1],u_dir[0])
start= pca.Mean
while True:
start2= np.array(start)-0.01*u_dir
if not PointInPolygon2D(points, start2): break
start= start2
print 'start=',start
rot= np.array([[math.cos(direction),-math.sin(direction)],[math.sin(direction),math.cos(direction)]])
wave= TWaveGenerator()
##Test spreading wave (without planning)
#fp= file('/tmp/spread1.dat','w')
#n_old= 0
#for t in FRange(0.0,10.0,120):
#ti= Mod(t,1.0)
#n= (t-ti)/1.0
#if n!=n_old:
#start= np.array(start) + np.dot(rot, np.array(wave.Evaluate(1.0)))
#n_old= n
#p= np.array(start) + np.dot(rot, np.array(wave.Evaluate(ti)))
#fp.write(' '.join(map(str,p))+'\n')
#fp.close()
#Spreading wave (with planning)
fp= file('/tmp/spread2.dat','w')
while True:
func= lambda ti,p1,p2: np.array(start) + np.dot(rot, np.array(wave.Evaluate(ti,m1=p1,m2=p2)))
p1o,p2o= OptimizeWaveFunc1(func, p1_0=2.0, p2_0=2.0, eval_func=lambda f:EvalWaveFunc(f,points))
#p1o,p2o= OptimizeWaveFunc2(func, p1_0=2.0, p2_0=2.0, eval_func=lambda f:EvalWaveFunc(f,points))
if None in (p1o,p2o): break
print p1o, p2o, EvalWaveFunc(lambda t:func(t,p1o,p2o),points), func(0.0,p1o,p2o), PointInPolygon2D(points,func(0.0,p1o,p2o))
for t in FRange(0.0,1.0,100):
p= func(t,p1o,p2o)
fp.write(' '.join(map(str,p))+'\n')
start= p
fp.close()
print 'Plot by'
print "qplot -x -s 'set size ratio -1' /tmp/orig.dat w l /tmp/spread2.dat w l"
| [
"info@akihikoy.net"
] | info@akihikoy.net |
f89377c0f4f21913e8c14627b65bc29d74c65253 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r9/Gen/DecFiles/options/11144120.py | e781cf1012d7c6dbdfcb026eb4030ee62195d75a | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 881 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r9/Gen/DecFiles/options/11144120.py generated: Fri, 27 Mar 2015 16:10:04
#
# Event Type: 11144120
#
# ASCII decay Descriptor: [B0 -> (J/psi(1S) -> mu+ mu- {,gamma} {,gamma}) p+ p~-]cc
#
from Configurables import Generation
Generation().EventType = 11144120
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bd_Jpsippbar=DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 511,-511 ]
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
943f0da3a075e3d81e38d43e212a3497cfa18521 | 02035d84092291ff6a691047e7a3709ea03dddd8 | /visbrain/objects/tests/test_brain_obj.py | 14a5e1042f873a1e24ad7bde7758e90f647fc246 | [
"BSD-3-Clause"
] | permissive | abhiishekpal/visbrain | a8abaf9bc4434fcf694158ac6510d9f67925b9a7 | 824724656a5d890330c086541176a539b004766d | refs/heads/master | 2020-03-30T08:17:28.823302 | 2018-07-04T01:54:59 | 2018-07-04T01:56:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,993 | py | """Test BrainObj."""
import numpy as np
from visbrain.objects import BrainObj, SourceObj
from visbrain.objects.tests._testing_objects import _TestObjects
from visbrain.io import read_stc, clean_tmp
NEEDED_FILES = dict(ANNOT_FILE_1='lh.aparc.annot',
ANNOT_FILE_2='rh.PALS_B12_Brodmann.annot',
MEG_INVERSE='meg_source_estimate-lh.stc',
OVERLAY_1='lh.sig.nii.gz',
OVERLAY_2='lh.alt_sig.nii.gz',
PARCELLATES_1='lh.aparc.a2009s.annot',
PARCELLATES_2='rh.aparc.annot'
)
# BRAIN :
b_obj = BrainObj('B1')
n_vertices, n_faces = 100, 50
vertices_x3 = 20. * np.random.rand(n_vertices, 3, 3)
vertices = 20. * np.random.rand(n_vertices, 3)
normals = (vertices >= 0).astype(float)
faces = np.random.randint(0, n_vertices, (n_faces, 3))
# SOURCES :
xyz = np.random.uniform(-20, 20, (50, 3))
mask = xyz[:, 0] > 10
s_obj = SourceObj('xyz', xyz, mask=mask)
class TestBrainObj(_TestObjects):
"""Test BrainObj."""
OBJ = b_obj
def _prepare_brain(self, name='inflated'):
b_obj.set_data(name)
b_obj.clean()
def test_get_template_list(self):
"""Test function get_template_list."""
b_obj._get_template_path()
b_obj._get_default_templates()
b_obj._get_downloadable_templates()
b_obj._add_downloadable_templates('white')
def test_rotation(self):
"""Test function rotation."""
# Test fixed rotations :
f_rot = ['sagittal_0', 'left', 'sagittal_1', 'right', 'coronal_0',
'front', 'coronal_1', 'back', 'axial_0', 'top', 'axial_1',
'bottom']
for k in f_rot:
b_obj.rotate(k)
# Test custom rotation :
for k in [(0, 90), (170, 21), (45, 65)]:
b_obj.rotate(custom=k)
def test_definition(self):
"""Test function definition."""
BrainObj('inflated', sulcus=True)
# Test default templates :
for k, i in zip(['B1', 'B2', 'B3'], ['left', 'both', 'right']):
b_obj.set_data(name=k, hemisphere=i)
def test_custom_templates(self):
"""Test passing vertices, faces and normals."""
BrainObj('Custom', vertices=vertices, faces=faces)
BrainObj('Custom', vertices=vertices, faces=faces, normals=normals)
def test_get_parcellates(self):
"""Test function get_parcellates."""
import pandas as pd
file_1 = self.need_file(NEEDED_FILES['ANNOT_FILE_1'])
file_2 = self.need_file(NEEDED_FILES['ANNOT_FILE_2'])
df_1 = b_obj.get_parcellates(file_1)
df_2 = b_obj.get_parcellates(file_2)
assert all([isinstance(k, pd.DataFrame) for k in [df_1, df_2]])
def test_overlay_from_file(self):
"""Test add_activation method."""
# Prepare the brain :
self._prepare_brain()
file_1 = self.need_file(NEEDED_FILES['OVERLAY_1'])
file_2 = self.need_file(NEEDED_FILES['OVERLAY_2'])
# Overlay :
b_obj.add_activation(file=file_1, clim=(4., 30.), hide_under=4,
cmap='Reds_r', hemisphere='left')
b_obj.add_activation(file=file_2, clim=(4., 30.), hide_under=4,
cmap='Blues_r', hemisphere='left', n_contours=10)
# Meg inverse :
file_3 = read_stc(self.need_file(NEEDED_FILES['MEG_INVERSE']))
data = file_3['data'][:, 2]
vertices = file_3['vertices']
b_obj.add_activation(data=data, vertices=vertices, smoothing_steps=3)
b_obj.add_activation(data=data, vertices=vertices, smoothing_steps=5,
clim=(13., 22.), hide_under=13., cmap='plasma')
def test_parcellize(self):
"""Test function parcellize."""
file_1 = self.need_file(NEEDED_FILES['PARCELLATES_1'])
file_2 = self.need_file(NEEDED_FILES['PARCELLATES_2'])
b_obj.parcellize(file_1, hemisphere='left')
select = ['insula', 'paracentral', 'precentral']
data = np.arange(len(select))
b_obj.parcellize(file_2, select=select, data=data, cmap='Spectral_r')
def test_projection(self):
"""Test cortical projection and repartition."""
b_obj.project_sources(s_obj, 'modulation')
b_obj.project_sources(s_obj, 'repartition')
def test_properties(self):
"""Test BrainObj properties (setter and getter)."""
self._tested_obj = b_obj
self.assert_and_test('translucent', True)
self.assert_and_test('alpha', .03)
self.assert_and_test('hemisphere', 'both')
self.assert_and_test('scale', 1.)
assert b_obj.camera is not None
assert isinstance(b_obj.vertices, np.ndarray)
# Test if getting vertices and faces depends on the selected hemisphere
b_obj.hemisphere = 'both'
n_vertices_both = b_obj.vertices.shape[0]
n_faces_both = b_obj.faces.shape[0]
n_normals_both = b_obj.normals.shape[0]
for k in ['left', 'right']:
b_obj.hemisphere = k
assert b_obj.vertices.shape[0] < n_vertices_both
assert b_obj.faces.shape[0] < n_faces_both
assert b_obj.normals.shape[0] < n_normals_both
def test_clean(self):
"""Test function clean."""
b_obj.clean()
def test_list(self):
"""Test function list."""
assert isinstance(b_obj.list(), list)
def test_save(self):
"""Test function save."""
b_cust = BrainObj('Custom', vertices=vertices, faces=faces)
b_cust.save()
b_cust_tmp = BrainObj('CustomTmp', vertices=vertices, faces=faces)
b_cust_tmp.save(tmpfile=True)
def test_reload_saved_template(self):
"""Test function reload_saved_template."""
BrainObj('Custom')
BrainObj('CustomTmp')
def test_remove(self):
"""Test function remove."""
b_cust = BrainObj('Custom')
b_cust.remove()
clean_tmp()
| [
"e.combrisson@gmail.com"
] | e.combrisson@gmail.com |
258dc7b2a47a47e5e61b09105ca39812b58d3360 | dcf9a7aeaddc876530e8f28fd17130f8859feda9 | /pymatflow/abinit/post/scripts/post-abinit-scf.py | 213833057837cb01c467221155b788270ad180e2 | [
"MIT"
] | permissive | DeqiTang/pymatflow | 3c6f4a6161a729ad17db21db9533187c04d8f5ac | 922722187e2678efbfa280b66be2624b185ecbf5 | refs/heads/master | 2022-05-25T19:41:19.187034 | 2022-03-05T03:07:08 | 2022-03-05T03:07:08 | 245,462,857 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,261 | py | #!/usr/bin/evn python
# _*_ coding: utf-8 _*_
import os
import argparse
import copy
import datetime
import subprocess
import matplotlib.pyplot as plt
from pymatflow.abinit.post.scf import scf_out
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--directory", help="previously static running directory", type=str, default="tmp-abinit-static")
parser.add_argument("--scfout", help="output file of static calculation", type=str, default="static-scf.out")
args = parser.parse_args()
#os.chdir(args.directory)
#task = scf_post(args.scfout)
#task.export()
#os.chdir("../")
os.chdir(args.directory)
scf = scf_out()
scf.get_info(file=args.scfout)
os.system("mkdir -p post-processing")
os.chdir("post-processing")
#plt.plot(self.run_info["iterations"])
#plt.title("Iterations per SCF")
#plt.xlabel("Scf cycles ")
#plt.ylabel("iterations")
#plt.tight_layout()
#plt.savefig("iterations-per-scf.png")
#plt.close()
with open("scf-info.md", 'w', encoding='utf-8') as fout:
fout.write("# 静态计算实验统计\n")
fout.write("## 计算参数\n")
for item in self.scf_params:
fout.write("- %s: %s\n" % (item, str(self.scf_params[item])))
fout.write("## 运行信息\n")
# calculate the running time and print it out
# end the time information
for item in self.run_info:
fout.write("- %s: %s\n" % (item, str(self.run_info[item])))
fout.write("## 运行信息图示\n")
fout.write("Iterations per SCF\n")
fout.write("\n")
os.chdir("../")
os.chdir("../")
# --------------------------------------------------------------------------
# print information to the terminal
# --------------------------------------------------------------------------
print("=====================================================================\n")
print(" post-abinit-scf.py\n")
print("---------------------------------------------------------------------\n")
print("\n")
| [
"deqi_tang@163.com"
] | deqi_tang@163.com |
f9bc741fcec7ef7c64d9b390a8a829888ac15e89 | f63028878311f21f73ed21f9bc88a0fd2ba8ba88 | /01.python/ch13/ex03.py | 98237fe46d86fb534209855450d8e53c6d6a1d98 | [] | no_license | nugeat23/workspace | ac12b93b0cb826206138aa2262382b0e6389977b | 221344b95daa40c3ba66d27e04cbf9dae3172edc | refs/heads/master | 2023-07-14T20:37:32.851769 | 2021-09-01T08:55:01 | 2021-09-01T08:55:01 | 383,780,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | # str = '89'
str = '89점'
try:
score = int(str)
print(score)
a = str[5]
except Exception as err:
print(err)
# except ValueError as err:
# print(err)
# print('점수의 형식이 잘못되었습니다.')
# except IndexError as err:
# print(err)
# print('첨자 범위를 벗어났습니다.')
# except (ValueError, IndexError):
# print('점수의 형식이나 첨자가 잘못되었습니다.')
print('작업 완료')
| [
"nugeat23@gmail.com"
] | nugeat23@gmail.com |
b0a401a93668770021670faba297c212eeed0026 | 3eb4293c23f78e8da4c157692ba6aa54be4436e6 | /Server/routes/api/drawer/draw_doc.py | ed67a5d2d3b0f040a754f2bcd174362c143e19b8 | [
"MIT"
] | permissive | DSM-GRAM/Artist-Soongsil | 0b1753b5f4887313bfd2c24219815c4115e31024 | 4ec94190ee878b864489821b210132fc3b8588da | refs/heads/master | 2021-07-11T18:19:56.200896 | 2017-10-17T00:43:26 | 2017-10-17T00:43:26 | 106,808,686 | 0 | 0 | null | 2017-10-17T00:43:27 | 2017-10-13T10:04:47 | null | UTF-8 | Python | false | false | 1,091 | py | DRAW = {
'tags': ['Drawer'],
'description': '그리기 시작',
'parameters': [
{
'name': 'shower_id',
'description': '카테고리 선택 시 할당된 shower id',
'in': 'formData',
'type': 'str'
}
],
'responses': {
'201': {
'description': '그리기 시작 성공'
}
}
}
SCORE = {
'tags': ['Drawer'],
'description': '점수 측정(미구현)',
'parameters': [
{
'name': 'image',
'description': '사용자가 그린 이미지',
'in': 'formData',
'type': 'file'
},
{
'name': 'origin_image_name',
'description': '카테고리 선택 시 할당된 image name',
'in': 'formData',
'type': 'str'
}
],
'responses': {
'201': {
'description': '점수 측정 성공',
'examples': {
'application/json': {
'score': 123
}
}
}
}
}
| [
"city7310@naver.com"
] | city7310@naver.com |
af3047b49e45f83e914d0f01b02f54c6956e0f2d | add3e9d9976aa8463fd453a7c25874afa10f80b7 | /login_websites/run_me.py | 073cb9b3de2c013d4fb439a275460c0a0b42de04 | [] | no_license | buxuele/scrapy_roadmap | 88a83ac116f429d6c35105a25949ec501ca231c5 | ca42cf7ca1e3eea39244062179af0c52f96cd5e0 | refs/heads/master | 2023-04-08T08:13:19.309521 | 2021-03-28T14:34:31 | 2021-03-28T14:34:31 | 343,089,859 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | from scrapy import cmdline
# cmdline.execute("scrapy crawl jianshu -o jianshu_ret_5W.json".split())
# cmdline.execute("scrapy crawl dou".split())
# cmdline.execute("scrapy crawl pengpai".split())
cmdline.execute("scrapy crawl zhihu".split())
| [
"baogexuxuele@163.com"
] | baogexuxuele@163.com |
bf23a18103fbc497d52186be9743206416f2b468 | b50e64cabfa1b1caae3c96ee70f8c3702b031c7e | /word2vec_ex.py | 8271d3061a4ad6941f429ed29f3f125251ee9d93 | [] | no_license | chaeonee/Semantic-correlation-based_outliers | 78908854bb03b042d653a5cca8dd35d90bad6dfb | 76cdff3330b5e4c4ce9e2e35abc5d71b246578d1 | refs/heads/main | 2023-06-01T15:24:50.834293 | 2021-06-16T07:47:54 | 2021-06-16T07:47:54 | 377,411,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,679 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 1 17:13:52 2018
@author: onee
"""
# imports needed and set up logging
import gzip
import gensim
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
data_file = "./cocoapi/coco2017/cocoword_all.txt.gz"#reviews_data.txt.gz"
#with gzip.open (data_file, 'rb') as f:
# for i,line in enumerate (f):
# print(line)
# break
def read_input(input_file):
"""This method reads the input file which is in gzip format"""
logging.info("reading file {0}...this may take a while".format(input_file))
with gzip.open (input_file, 'rb') as f:
for i, line in enumerate (f):
if (i%10000==0):
logging.info ("read {0} reviews".format (i))
# do some pre-processing and return a list of words for each review text
yield gensim.utils.simple_preprocess (line)
documents = list (read_input (data_file))
logging.info ("Done reading data file")
# read the tokenized reviews into a list
# each review item becomes a serries of words
# so this becomes a list of lists
#documents = read_input (data_file)
#logging.info ("Done reading data file")
# Load Google's pre-trained Word2Vec model.
#model = gensim.models.Word2Vec.load_word2vec_format('./model/GoogleNews-vectors-negative300.bin', binary=True)
model = gensim.models.Word2Vec (documents, size=150, min_count=1, workers=10)
model.train(documents,total_examples=len(documents),epochs=1000)
w1 = "sky"
model.wv.most_similar (positive=w1, topn=10)
model.wv.similarity(w1="person",w2="dog") | [
"noreply@github.com"
] | chaeonee.noreply@github.com |
691d8eef8f159267257820254e2b82e3f0b0823e | 4f56c30f1c8b181120f7d24a4d2417b7507a0692 | /manage.py | f2bdbf1504976f14427f6476bb961b84db46ef46 | [] | no_license | monicaoyugi/IT-ticketing | 8da7a0a8855214f227b5e4ffebe50a4ea8c7d8c5 | 58cbdeb1ce698d4c3a18497b0fc5eea358d636d3 | refs/heads/master | 2023-03-25T20:38:41.897677 | 2021-03-17T13:30:21 | 2021-03-17T13:30:21 | 348,715,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'IT_Ticketing.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"monicaoyugi@gmail.com"
] | monicaoyugi@gmail.com |
972bcb5664b7774508567984c8546be23b2c74a1 | 9758fa6d66df1121ff9e0b4a7da511653bc53cf1 | /Food/migrations/0012_auto_20190628_1339.py | 55c55d2c4ff51c3691f90bee3c590089c826f9cb | [] | no_license | hdforoozan/Restaurant-project | 179fb4138cb92bfd7716671c3b1e8b1949bfbaff | 2ab096cbc3ee20557b57ed97bd0d5556c5965e87 | refs/heads/master | 2020-06-12T08:50:03.067740 | 2019-09-17T18:48:43 | 2019-09-17T18:48:43 | 194,250,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | # Generated by Django 2.1 on 2019-06-28 09:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Food', '0011_food_image'),
]
operations = [
migrations.AlterField(
model_name='food',
name='image',
field=models.ImageField(blank=True, height_field=200, null=True, upload_to='images/', width_field=200),
),
]
| [
"hdforoozan@gmail.com"
] | hdforoozan@gmail.com |
39fc8480fe9df0f204c34f434863175728dcb810 | eef243e450cea7e91bac2f71f0bfd45a00c6f12c | /.history/app/api_service/views_20210124234745.py | 81d505ff56de11027da67e5bcb91fec182f35ae3 | [] | no_license | hoaf13/nlp-chatbot-lol | 910ab2ea3b62d5219901050271fc1a1340e46a2f | 18cb64efa9d6b4cafe1015f1cd94f4409271ef56 | refs/heads/master | 2023-05-08T04:17:19.450718 | 2021-02-02T02:37:38 | 2021-02-02T02:37:38 | 332,535,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,430 | py | from flask import jsonify, Blueprint, flash, render_template, request, session, abort, redirect, url_for
from flask.views import MethodView
import requests
import os
from app.models import Champion, Conversation
from app import app
import json
api_service = Blueprint('apis', __name__, url_prefix='/apis/')
def load_data():
with open('app/api_service/my_upload/9intents.json') as json_file:
data = json.load(json_file)
keys = data.keys()
values = data.values()
print(keys[0])
@api_service.route("/")
def hello():
load_data()
return render_template('pages/api.html')
class ChampionView(MethodView):
def get(self, id=None):
if not id:
champions = Champion.query.all()
print(champions)
res = {}
for champion in champions:
res[champion.id] = {
'name': champion.name,
}
else:
champion = Champion.query.filter_by(id=id).first()
print(champion)
if not champion:
abort(404)
res = {
'name': champion.name,
}
return jsonify(res)
champion_view = ChampionView.as_view('champion_view')
app.add_url_rule('/champions/', view_func=champion_view, methods=['GET'])
app.add_url_rule('/champions/<int:id>', view_func=champion_view, methods=['GET','PUT'])
| [
"samartcall@gmail.com"
] | samartcall@gmail.com |
a1cede54f3acb5afd26d9c096fb96884d129dccc | bc9883f9a383ff417725248d124cc79ce7f0bc83 | /questions/api/permissions.py | 2df2931152f1bf56c2f0ee08bda33cd9da454dd4 | [] | no_license | Erick-ViBe/QuestionTime | 226ffa482c41e4f7fd89419a67e31df9a21f2494 | 4bccc1a6e7f55c8251e94adf9da96a5b5de85653 | refs/heads/master | 2023-04-05T14:09:53.510287 | 2021-03-14T21:03:12 | 2021-03-14T21:03:12 | 294,256,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | from rest_framework import permissions
class IsAuthorOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.author == request.user
| [
"erickvb12@gmail.com"
] | erickvb12@gmail.com |
a7698c4ec91871c6ebb0b25630cd63930ef0ca92 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2126/60812/260890.py | 3d5094642a36be819bd131a003971034322fbd81 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | nums = sorted(int(i) for i in input().split(','))
t = []
for i in nums:
for j in t:
if i % j[0] == 0:
j.insert(0, i)
break
else:
t.append([i])
num = 0
temp = []
for i in t:
if len(i) > num:
num = len(i)
temp = i
print(sorted(temp)) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
3470f6afd2e24304914333aae51c9ded5b06f19f | 01ac9e40052a468dd472a296df0003c4e629e2c9 | /news_all/spiders_old2/gsnews.py | 7a8433448a03290242f71e36eb6660a5159c0fbe | [] | no_license | Pintrue/news_all | b5cee16584ed92e6574edd825b574214df65d917 | eb8c32c79bdacd8e2f76b88f27871c3cd0118006 | refs/heads/master | 2022-03-23T13:34:10.354029 | 2019-11-22T07:40:50 | 2019-11-22T07:40:50 | 223,058,997 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,496 | py | # -*- coding: utf-8 -*-
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Rule
from news_all.spider_models import NewsRCSpider
class GsnewsSpider(NewsRCSpider):
'''甘肃新闻网'''
name = 'gsnews'
mystart_urls = {
'http://gansu.gscn.com.cn/gsyw/': 1301167, # 甘肃新闻网 甘肃新闻-本网原创
}
rules = (
# http://gansu.gscn.com.cn/system/2019/06/25/012176563.shtml
Rule(LinkExtractor(allow=(r'gansu.gscn.com.cn/system/\d{4}/\d{2}/\d{2}/\d+\.s?html'),
deny=(r'https://live.xinhuaapp.com/xcy/reportlist.html?liveId=156092565653055&from=timeline')
), callback='parse_item',
follow=False),
)
def parse_item(self, response):
xp = response.xpath
try:
title = xp("//div[@class='a-header']/h1/text()").extract_first()
content_div = xp("//div[@class='a-container']")[0]
pubtime = xp("//span[@class='m-frt']/text()").extract_first().strip()
origin_name = xp("//div[@class='info']/span[2]/text()").extract_first()
content, media, _, _ = self.content_clean(content_div)
except:
return self.produce_debugitem(response, "xpath error")
return self.produce_item(
response=response,
title=title,
pubtime=pubtime,
origin_name=origin_name,
content=content,
media=media
) | [
"py416@ic.ac.uk"
] | py416@ic.ac.uk |
f440022ee267ad6c3be04daf205cb81b743825d9 | 9411e6835c9056ddf31ee97fd4d22495083d45d8 | /爬虫/qichacha/build/lib/qichacha/kdlRandomHandler.py | f920c7a922bdde14797620452b2d67c1866064b3 | [] | no_license | XUANXUANXU/7-PracticalTraining | e17eb15149cf50227df7e617285cf5770e9d8903 | 605fc28f896ee6ae04e3f48bd7b4b5314910a239 | refs/heads/master | 2020-04-14T11:50:10.829811 | 2019-01-02T10:01:13 | 2019-01-02T10:01:13 | 163,824,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,445 | py | import pymysql
import random
import re,time
import requests
import threading
from requests.exceptions import ProxyError,ConnectionError,SSLError,ReadTimeout,ConnectTimeout
test_url = 'https://www.baidu.com/'
timeout = 5
class RandomIpHandler:
#初始化构造函数
def __init__(self):
self.client = pymysql.Connect('localhost','root','199888','kdlproxy',3306)
self.cursor = self.client.cursor(cursor=pymysql.cursors.DictCursor)
self.results = []
self.thread = threading.Thread(target=self.get_proxy_from_db)
self.get_data()
self.thread.start()
def get_proxy_from_db(self):
while True:
self.get_data()
time.sleep(60)
def get_data(self):
#查询数据库中的所有数据
select_SQL= 'select * from usedproxies'
self.cursor.execute(select_SQL)
#返回一个结果列表
self.results=self.cursor.fetchall()
def get_random_ip(self):
if len(self.results) > 0:
#从中随机获取一个Ip
result=random.choice(self.results)
# status,time = self.ipCheck(result)
# if status == True:
# print(result)
#该代理可用,返回结果
return result
# else:
# #如果获取的ip不可用,继续获取,直到可用为止
# self.get_random_ip()
# 优化在这里我们可以做一个优化处理判断当前选取的ip是否可以使用
# 优化点:如果判断当前的代理不可用,那么我们是否可以将数据库里面的这条数据删除掉?(自己实现)
def ipCheck(self, ip_item):
"""代理检测"""
proxy = ip_item['proxy']
try:
proxies = {
'https': proxy
}
start_time = time.time()
response = requests.get(test_url, timeout=timeout, proxies=proxies)
if response.status_code == requests.codes.ok:
end_time = time.time()
used_time = end_time - start_time
# print('Proxy Valid'+proxy, 'Used Time:', used_time)
return True, used_time
#出现异常则代理不可用
except (ProxyError, ConnectTimeout, SSLError, ReadTimeout, ConnectionError):
# print('Proxy Invalid:', proxy)
return False, None
| [
"302738630@qq.com"
] | 302738630@qq.com |
a3894ae68b886461d11d6ae12739758aef350820 | 8e69eee9b474587925e22413717eb82e4b024360 | /v1.0.0.test/otp/speedchat/SCEmoteTerminal.py | 6b5eb18d4f9f999d494adc9d46a84a7be8e12052 | [
"MIT"
] | permissive | TTOFFLINE-LEAK/ttoffline | afaef613c36dc3b70514ccee7030ba73c3b5045b | bb0e91704a755d34983e94288d50288e46b68380 | refs/heads/master | 2020-06-12T15:41:59.411795 | 2020-04-17T08:22:55 | 2020-04-17T08:22:55 | 194,348,185 | 5 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,965 | py | from direct.gui.DirectGui import *
from SCTerminal import SCTerminal
from otp.otpbase.OTPLocalizer import EmoteList, EmoteWhispers
from otp.avatar import Emote
SCEmoteMsgEvent = 'SCEmoteMsg'
SCEmoteNoAccessEvent = 'SCEmoteNoAccess'
def decodeSCEmoteWhisperMsg(emoteId, avName):
if emoteId >= len(EmoteWhispers):
return None
else:
return EmoteWhispers[emoteId] % avName
class SCEmoteTerminal(SCTerminal):
def __init__(self, emoteId):
SCTerminal.__init__(self)
self.emoteId = emoteId
if not self.__ltHasAccess():
self.text = '?'
else:
self.text = EmoteList[self.emoteId]
def __ltHasAccess(self):
try:
lt = base.localAvatar
return lt.emoteAccess[self.emoteId]
except:
return 0
def __emoteEnabled(self):
if self.isWhispering():
return 1
return Emote.globalEmote.isEnabled(self.emoteId)
def finalize(self, dbArgs={}):
if not self.isDirty():
return
else:
args = {}
if not self.__ltHasAccess() or not self.__emoteEnabled():
args.update({'rolloverColor': (0, 0, 0, 0), 'pressedColor': (0, 0, 0, 0),
'rolloverSound': None,
'text_fg': self.getColorScheme().getTextDisabledColor() + (1, )})
if not self.__ltHasAccess():
args.update({'text_align': TextNode.ACenter})
elif not self.__emoteEnabled():
args.update({'clickSound': None})
self.lastEmoteEnableState = self.__emoteEnabled()
args.update(dbArgs)
SCTerminal.finalize(self, dbArgs=args)
return
def __emoteEnableStateChanged(self):
if self.isDirty():
self.notify.info("skipping __emoteEnableStateChanged; we're marked as dirty")
return
else:
if not hasattr(self, 'button'):
self.notify.error('SCEmoteTerminal is not marked as dirty, but has no button!')
btn = self.button
if self.__emoteEnabled():
rolloverColor = self.getColorScheme().getRolloverColor() + (1, )
pressedColor = self.getColorScheme().getPressedColor() + (1, )
btn.frameStyle[DGG.BUTTON_ROLLOVER_STATE].setColor(*rolloverColor)
btn.frameStyle[DGG.BUTTON_DEPRESSED_STATE].setColor(*pressedColor)
btn.updateFrameStyle()
btn['text_fg'] = self.getColorScheme().getTextColor() + (1, )
btn['rolloverSound'] = DGG.getDefaultRolloverSound()
btn['clickSound'] = DGG.getDefaultClickSound()
else:
btn.frameStyle[DGG.BUTTON_ROLLOVER_STATE].setColor(0, 0, 0, 0)
btn.frameStyle[DGG.BUTTON_DEPRESSED_STATE].setColor(0, 0, 0, 0)
btn.updateFrameStyle()
btn['text_fg'] = self.getColorScheme().getTextDisabledColor() + (1, )
btn['rolloverSound'] = None
btn['clickSound'] = None
return
def enterVisible(self):
SCTerminal.enterVisible(self)
if self.__ltHasAccess():
if hasattr(self, 'lastEmoteEnableState'):
if self.lastEmoteEnableState != self.__emoteEnabled():
self.invalidate()
if not self.isWhispering():
self.accept(Emote.globalEmote.EmoteEnableStateChanged, self.__emoteEnableStateChanged)
def exitVisible(self):
SCTerminal.exitVisible(self)
self.ignore(Emote.globalEmote.EmoteEnableStateChanged)
def handleSelect(self, displayType=0):
if not self.__ltHasAccess():
messenger.send(self.getEventName(SCEmoteNoAccessEvent))
elif self.__emoteEnabled():
SCTerminal.handleSelect(self, displayType)
messenger.send(self.getEventName(SCEmoteMsgEvent), [self.emoteId]) | [
"s0mberdemise@protonmail.com"
] | s0mberdemise@protonmail.com |
584c2ecf007a99db9cd17c9dd6131ce417bd9a70 | b2755ce7a643ae5c55c4b0c8689d09ad51819e6b | /anuvaad-etl/anuvaad-extractor/document-processor/layout-detector/tesseract/src/errors/errors_exception.py | 19c0a831b2fb07ee8393f9dd4cc021e494f44068 | [
"MIT"
] | permissive | project-anuvaad/anuvaad | 96df31170b27467d296cee43440b6dade7b1247c | 2bfcf6b9779bf1abd41e1bc42c27007127ddbefb | refs/heads/master | 2023-08-17T01:18:25.587918 | 2023-08-14T09:53:16 | 2023-08-14T09:53:16 | 265,545,286 | 41 | 39 | MIT | 2023-09-14T05:58:27 | 2020-05-20T11:34:37 | Jupyter Notebook | UTF-8 | Python | false | false | 2,162 | py | class FormatError(Exception):
def __init__(self, code, message):
self._code = code
self._message = message
@property
def code(self):
return self._code
@property
def message(self):
return self._message
def __str__(self):
return self.__class__.__name__ + ': ' + self.message
class WorkflowkeyError(Exception):
def __init__(self, code, message):
self._code = code
self._message = message
@property
def code(self):
return self._code
@property
def message(self):
return self._message
def __str__(self):
return self.__class__.__name__ + ': ' + self.message
class FileErrors(Exception):
def __init__(self, code, message):
self._code = code
self._message = message
@property
def code(self):
return self._code
@property
def message(self):
return self._message
def __repr__(self):
return { "code" : self.code, "message" : self.__class__.__name__ + ': ' + self.message }
class ServiceError(Exception):
def __init__(self, code, message):
self._code = code
self._message = message
@property
def code(self):
return self._code
@property
def message(self):
return self._message
def __str__(self):
return self.__class__.__name__ + ': ' + self.message
class KafkaConsumerError(Exception):
def __init__(self, code, message):
self._code = code
self._message = message
@property
def code(self):
return self._code
@property
def message(self):
return self._message
def __str__(self):
return self.__class__.__name__ + ': ' + self.message
class KafkaProducerError(Exception):
def __init__(self, code, message):
self._code = code
self._message = message
@property
def code(self):
return self._code
@property
def message(self):
return self._message
def __repr__(self):
return { "code" : self.code, "message" : self.__class__.__name__ + ': ' + self.message } | [
"srihari.nagaraj@tarento.com"
] | srihari.nagaraj@tarento.com |
b827d70881c3b29b2d1c4d13f1f5a6ee811ae379 | e19264962684671eacdb02c4b477e4fa470cdf96 | /Programação-Internet/Atividade-4 (Python - Condicionais)/exercicio-6.py | 3083d80cbf80738032733e7ef9a7ca27fe181c7d | [] | no_license | cabralltech/Tecnologo-ADS | 11041fab9b0e448b6c004a45f714d4404774c32a | f5fbf8683d541ffe8ba1a2cf3d4dab3c9764f249 | refs/heads/master | 2021-06-22T14:21:03.086059 | 2017-08-21T00:33:23 | 2017-08-21T00:33:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | # -*- coding : utf-8 -*-
# Decidir se vai ficar em casa ou sair.
def ficar_em_casa_ou_sair(valor):
if valor > 20:
return "Vá ao cinema."
return "Fique em casa."
assert ficar_em_casa_ou_sair(20) == 'Fique em casa.'
assert ficar_em_casa_ou_sair(25) == 'Vá ao cinema.'
assert ficar_em_casa_ou_sair(15) == 'Fique em casa.' | [
"marlysson5@gmail.com"
] | marlysson5@gmail.com |
8341c6930b4370045a2972e6095d5ffc4336995e | 747febe786dd6b7fd6c63cfe73dbe3023354daa8 | /src/the_tale/the_tale/game/map/migrations/0002_auto_20150506_1406.py | 3b36cbad46a30cee85c0950aca677a1f6e671489 | [
"BSD-3-Clause"
] | permissive | the-tale/the-tale | 4e4b8d91dc873a5fb935fe58e9721a877baa6d3f | e8450bd2332344da805b1851e728da5a3e5bf0ef | refs/heads/develop | 2023-08-01T13:53:46.835667 | 2022-12-25T18:04:56 | 2022-12-25T18:04:56 | 1,949,167 | 98 | 52 | BSD-3-Clause | 2023-02-15T18:57:33 | 2011-06-24T18:49:48 | Python | UTF-8 | Python | false | false | 361 | py | # -*- coding: utf-8 -*-
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('map', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='mapinfo',
name='created_at',
field=models.DateTimeField(auto_now_add=True),
),
]
| [
"a.eletsky@gmail.com"
] | a.eletsky@gmail.com |
90257fc5e042ac21baf0b40844356956fc7fd4e3 | bcabd9b183bc011e1ccf7e367fbed0dcaa03eee6 | /1 PYTHON/2 COREY SCHAFER/PART 2/calc.py | 00b0197e33337452aa2535beea014159b8c2098a | [] | no_license | rajeshsvv/Lenovo_Back | 287fe4da2c696aa248ec57a4c45c4f234f6ca9ed | 7e49e38aaf934c65f9992a78404d2b81a4cd0204 | refs/heads/master | 2022-12-23T16:44:41.488128 | 2019-08-29T10:00:10 | 2019-08-29T10:00:10 | 204,859,914 | 0 | 1 | null | 2022-12-10T11:50:31 | 2019-08-28T06:05:35 | Python | UTF-8 | Python | false | false | 654 | py | # unit testing in python
# unit test module is in standard library so there is no need to insatll anythingyou can just say import unittest
# to create test case first we need to create test class that inherits from unittest.testcase
# floor division in the sense it does not give the remainder
def add(x, y):
return x + y
def sub(x, y):
return x - y
def mul(x, y):
return x * y
def div(x, y):
if y == 0:
raise ValueError("cannot devided by zero")
return x / y
# return x // y this is called floor division means it does not give remainder for example 5//2=2.5 but it gives 2 only got it
# print(mul(5, 2))
| [
"rajeshsvv01@gmail.com"
] | rajeshsvv01@gmail.com |
e73cc2621c2321196fdc0e36b579743d7ad0e1d0 | 49b9af65a2d229d2c79d1f2677c56fef926aaed4 | /codeforces/268B/py/268b.py | d8675caa85faadf7b600ac95fc8a83f5e2e73b9f | [] | no_license | guzhoudiaoke/practice | 5a6a0a0e3ab8f5c0922d08bbf0ba5549b5585378 | a93cb418530f50c8577251df593624391e92b1a5 | refs/heads/master | 2020-04-06T07:05:32.965869 | 2019-10-24T14:55:39 | 2019-10-24T14:55:39 | 63,775,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | n = int(input())
# each guess make n-i+1 mistakes
# the ith guess cost i
ret = n
for i in range (1, n):
ret += i * (n-i)
print(ret)
| [
"guzhoudiaoke@126.com"
] | guzhoudiaoke@126.com |
e1ed6150b69dfd056410276e6a6d2548c0cd7227 | 7771cb3cf1d879c4ffbf443157f12a226fd2f5be | /SGITruismo/upload/migrations/0003_auto_20191128_2217.py | 93ad44ad21bd7be7155eb2b93f73041088a1a8a9 | [] | no_license | jamen17/SGIAPI | 290295385e5f929073709e8a0d0dd6725baaffd4 | 8294f6dd32d48ad2fc3834f88be13b7a6316761d | refs/heads/master | 2020-09-14T18:38:38.463588 | 2019-11-29T15:06:29 | 2019-11-29T15:06:29 | 223,216,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | # Generated by Django 2.2.7 on 2019-11-28 22:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('upload', '0002_remove_file_propietario'),
]
operations = [
migrations.AlterField(
model_name='file',
name='file',
field=models.FileField(upload_to=''),
),
]
| [
"jamen17@gmail.com"
] | jamen17@gmail.com |
31b3118ce3ce798040355e208eb2c0333a127746 | 130447dcbb954e1a42ee2645cea55d5e64321a3e | /training/utils.py | 5ddffc4068b63d32c7222b576c5c4ab9d8acb346 | [
"BSD-3-Clause"
] | permissive | bhaskarkumar1/Voice-Cloning-App | 0fff4e169b062555af86cc47d82b297a5db81e60 | 5b6b6dc4928765e50818f06c610911a3eb6e5efb | refs/heads/main | 2023-07-02T07:28:30.090064 | 2021-07-29T12:47:17 | 2021-07-29T12:47:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,376 | py | import shutil
import torch
CHECKPOINT_SIZE_MB = 333
BATCH_SIZE_PER_GB = 2.5
LEARNING_RATE_PER_BATCH = 3.125e-5
PUNCTUATION = list("_-!'(),.:;? ")
def get_available_memory():
"""
Get available GPU memory in GB.
Returns
-------
int
Available GPU memory in GB
"""
available_memory_gb = 0
for i in range(torch.cuda.device_count()):
gpu_memory = torch.cuda.get_device_properties(i).total_memory
memory_in_use = torch.cuda.memory_allocated(i)
available_memory = gpu_memory - memory_in_use
available_memory_gb += available_memory // 1024 // 1024 // 1024
return available_memory_gb
def get_batch_size(available_memory_gb):
"""
Calulate batch size.
Parameters
----------
available_memory_gb : int
Available GPU memory in GB
Returns
-------
int
Batch size
"""
return int(available_memory_gb * BATCH_SIZE_PER_GB)
def get_learning_rate(batch_size):
"""
Calulate learning rate.
Parameters
----------
batch_size : int
Batch size
Returns
-------
float
Learning rate
"""
return batch_size * LEARNING_RATE_PER_BATCH
def check_space(num_checkpoints):
"""
Check if system has enough available storage to save all checkpoints.
Parameters
----------
num_checkpoints : int
Number of checkpoints that will be generated in training
Raises
-------
AssertionError
If system does not have sufficent storage space
"""
_, _, free = shutil.disk_usage("/")
free_mb = free // (2 ** 20)
required_mb = CHECKPOINT_SIZE_MB * num_checkpoints
assert (
free_mb >= required_mb
), f"Insufficent storage space (requires {required_mb}mb). Reduce checkpoint frequency or free up space"
def load_symbols(alphabet_file):
"""
Get alphabet and punctuation for a given alphabet file.
Parameters
----------
alphabet_file : str
Path to alphabnet file
Returns
-------
list
List of symbols (punctuation + alphabet)
"""
symbols = PUNCTUATION.copy()
with open(alphabet_file) as f:
lines = [l.strip() for l in f.readlines() if l.strip() and not l.startswith("#")]
for line in lines:
if line not in symbols:
symbols.append(line)
return symbols
| [
"bandrew01@qub.ac.uk"
] | bandrew01@qub.ac.uk |
940deae204915aa6493887d54bf29c0b1967dc3b | cd2c85500d420a67c433113cf43a734669134423 | /build/v4r_ros_wrappers/object_tracker_srv_definitions/catkin_generated/pkg.installspace.context.pc.py | 369b71dbaaf11d8cc43ab37890cb70cbcc3aefe0 | [] | no_license | 0000duck/youbot_mobile_manipulation_WS | f942974724dd19c9c92e852ccbd056e29d9c6049 | 0e966211c8d7135dc7cffedbb10b15459398ef8f | refs/heads/master | 2020-12-10T11:59:30.700737 | 2017-07-17T13:49:12 | 2017-07-17T13:49:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/ros/catkin_ws/install/include".split(';') if "/home/ros/catkin_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "object_tracker_srv_definitions"
PROJECT_SPACE_DIR = "/home/ros/catkin_ws/install"
PROJECT_VERSION = "0.1.4"
| [
"mohdnaveed96@gmail.com"
] | mohdnaveed96@gmail.com |
d273fc5790d0dee3b75685ebe78c52444a9e3242 | edc1f1369794a4a1c499c6e9d5fe49a712657611 | /algorithms/leetcode_all/157.read-n-characters-given-read4/read-n-characters-given-read4.py | a10c1b9c54699a9c7a11f5490b8caad8878b2b60 | [] | no_license | williamsyb/mycookbook | 93d4aca1a539b506c8ed2797863de6da8a0ed70f | dd917b6eba48eef42f1086a54880bab6cd1fbf07 | refs/heads/master | 2023-03-07T04:16:18.384481 | 2020-11-11T14:36:54 | 2020-11-11T14:36:54 | 280,005,004 | 2 | 0 | null | 2023-03-07T02:07:46 | 2020-07-15T23:34:24 | Python | UTF-8 | Python | false | false | 613 | py | # The read4 API is already defined for you.
# @param buf, a list of characters
# @return an integer
# def read4(buf):
class Solution(object):
def read(self, buf, n):
"""
:type buf: Destination buffer (List[str])
:type n: Maximum number of characters to read (int)
:rtype: The number of characters read (int)
"""
cnt = 0
tmp = [""] * 4
while cnt < n:
r = read4(tmp)
if r == 0:
break
for i in range(min(r, n - cnt)):
buf[cnt] = tmp[i]
cnt += 1
return cnt | [
"william_sun1990@hotmail.com"
] | william_sun1990@hotmail.com |
dfdcb5e4585ba90ca35e53d4744c9c585dd6c85b | 45ca0e2ed9c3f13317b69bb1a80b523b08094000 | /pySDC/projects/node_failure/boussinesq_example.py | cc5d78ee1a27236e574de0da9d0d0d2dde987bc7 | [
"BSD-2-Clause"
] | permissive | MichaelFlec/pySDC | f325bf351adefb7495f9463c297962a193288a76 | 209e0015a46f861e3658691b7f8724cb1b36c97e | refs/heads/master | 2023-08-18T00:34:38.524816 | 2021-09-20T12:54:42 | 2021-09-20T12:54:42 | 405,362,242 | 0 | 0 | BSD-2-Clause | 2021-09-20T12:54:43 | 2021-09-11T11:34:26 | Python | UTF-8 | Python | false | false | 5,735 | py | import numpy as np
import pySDC.projects.node_failure.emulate_hard_faults as ft
from pySDC.helpers.stats_helper import filter_stats, sort_stats
from pySDC.implementations.collocation_classes.gauss_radau_right import CollGaussRadau_Right
from pySDC.implementations.problem_classes.Boussinesq_2D_FD_imex import boussinesq_2d_imex
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.transfer_classes.TransferMesh_NoCoarse import mesh_to_mesh
from pySDC.projects.node_failure.controller_nonMPI_hard_faults import controller_nonMPI_hard_faults
# noinspection PyShadowingNames,PyShadowingBuiltins
def main(ft_strategies):
"""
This routine generates the heatmaps showing the residual for node failures at different steps and iterations
"""
num_procs = 16
# setup parameters "in time"
t0 = 0
Tend = 960
Nsteps = 320
dt = Tend / float(Nsteps)
# initialize level parameters
level_params = dict()
level_params['restol'] = 1E-06
level_params['dt'] = dt
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize space transfer parameters
space_transfer_params = dict()
space_transfer_params['finter'] = True
space_transfer_params['rorder'] = 2
space_transfer_params['iorder'] = 6
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = CollGaussRadau_Right
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = 'LU'
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 20
# initialize problem parameters
problem_params = dict()
# problem_params['nvars'] = [(4, 450, 30), (4, 450, 30)]
problem_params['nvars'] = [(4, 100, 10), (4, 100, 10)]
problem_params['u_adv'] = 0.02
problem_params['c_s'] = 0.3
problem_params['Nfreq'] = 0.01
problem_params['x_bounds'] = [(-150.0, 150.0)]
problem_params['z_bounds'] = [(0.0, 10.0)]
problem_params['order'] = [4, 2]
problem_params['order_upw'] = [5, 1]
problem_params['gmres_maxiter'] = [50, 50]
problem_params['gmres_restart'] = [10, 10]
problem_params['gmres_tol_limit'] = [1e-10, 1e-10]
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = boussinesq_2d_imex # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
ft.hard_random = 0.03
controller = controller_nonMPI_hard_faults(num_procs=num_procs, controller_params=controller_params,
description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
cfl_advection = P.params.u_adv * dt / P.h[0]
cfl_acoustic_hor = P.params.c_s * dt / P.h[0]
cfl_acoustic_ver = P.params.c_s * dt / P.h[1]
print("CFL number of advection: %4.2f" % cfl_advection)
print("CFL number of acoustics (horizontal): %4.2f" % cfl_acoustic_hor)
print("CFL number of acoustics (vertical): %4.2f" % cfl_acoustic_ver)
for strategy in ft_strategies:
print('------------------------------------------ working on strategy ', strategy)
ft.strategy = strategy
# read in reference data from clean run, will provide reproducable locations for faults
if strategy != 'NOFAULT':
reffile = np.load('data/PFASST_BOUSSINESQ_stats_hf_NOFAULT_P16.npz')
ft.refdata = reffile['hard_stats']
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
P.report_log()
# get residuals of the run
extract_stats = filter_stats(stats, type='residual_post_iteration')
# find boundaries for x-,y- and c-axis as well as arrays
maxprocs = 0
maxiter = 0
minres = 0
maxres = -99
for k, v in extract_stats.items():
maxprocs = max(maxprocs, k.process)
maxiter = max(maxiter, k.iter)
minres = min(minres, np.log10(v))
maxres = max(maxres, np.log10(v))
# grep residuals and put into array
residual = np.zeros((maxiter, maxprocs + 1))
residual[:] = -99
for k, v in extract_stats.items():
step = k.process
iter = k.iter
if iter is not -1:
residual[iter - 1, step] = np.log10(v)
# stats magic: get niter (probably redundant with maxiter)
extract_stats = filter_stats(stats, level=-1, type='niter')
sortedlist_stats = sort_stats(extract_stats, sortby='process')
iter_count = np.zeros(Nsteps)
for item in sortedlist_stats:
iter_count[item[0]] = item[1]
print(iter_count)
np.savez('data/PFASST_BOUSSINESQ_stats_hf_' + ft.strategy + '_P' + str(num_procs), residual=residual,
iter_count=iter_count, hard_stats=ft.hard_stats)
if __name__ == "__main__":
# ft_strategies = ['SPREAD', 'SPREAD_PREDICT', 'INTERP', 'INTERP_PREDICT']
ft_strategies = ['NOFAULT']
main(ft_strategies=ft_strategies)
| [
"r.speck@fz-juelich.de"
] | r.speck@fz-juelich.de |
712e154222948614283e63d1afd21ccbaa7e84de | 6d4a7f3f069e68a984df61b718e39597370a1131 | /main/mpv_seek | 574243cc4b24b2a0eaf28838d53fc5635e5fbfb2 | [] | no_license | nabiuddin6/scripts-1 | d7c32a483c1ed4fcca2df3d68bf29cabf81f69c7 | 7a36fa22cfc369ccc5038332f95779370b12507c | refs/heads/master | 2022-09-01T07:14:31.211758 | 2020-05-30T19:20:02 | 2020-05-30T19:20:02 | 270,788,454 | 1 | 0 | null | 2020-06-08T18:55:19 | 2020-06-08T18:55:18 | null | UTF-8 | Python | false | false | 1,626 | #!/usr/bin/python3
"""MPV Helper for Seeking to a Specific Point"""
import argparse # noqa: F401
import datetime as dt # noqa: F401
import os # noqa: F401
from pathlib import Path # noqa: F401
import subprocess as sp # noqa: F401
import sys # noqa: F401
from typing import * # noqa: F401
from types import * # noqa: F401
import gutils
############################################################################################
# gutils library: https://github.com/bbugyi200/scripts/tree/master/modules/python/gutils #
############################################################################################
from loguru import logger as log
scriptname = os.path.basename(os.path.realpath(__file__))
@gutils.catch
def main(args: argparse.Namespace) -> None:
uinput = gutils.shell('prompt "[MPV] Goto"')
# Already Formatted
if ':' in uinput:
print(uinput)
sys.exit(0)
loc = uinput[:]
# Uses special minute syntax (i.e. ends in 'm').
if loc[-1] == 'm':
iloc = int(loc[:-1])
H = iloc // 60
M = iloc % 60
if H == 0:
loc = '{}:00'.format(M)
else:
loc = '0{}:{}:00'.format(H, M)
print(loc)
sys.exit(0)
# Add colons in all the right places.
i = 2; j = 0
length = len(loc)
while i < length:
loc = loc[:-(i + j)] + ':' + loc[-(i + j):]
i += 2; j += 1
print(loc)
if __name__ == "__main__":
parser = gutils.ArgumentParser()
args = parser.parse_args()
gutils.logging.configure(__file__, debug=args.debug, verbose=args.verbose)
main(args)
| [
"bryanbugyi34@gmail.com"
] | bryanbugyi34@gmail.com | |
5d87c501e3cccd39a8c85ec6cc73fc74adb162b3 | 881041fab1b4d05f1c5371efed2f9276037eb609 | /tasks/primary-commercial-zoning-by-lot/depositor.py | c04c226da8e60c516a9d0ee3469d3ec314664425 | [] | no_license | ResidentMario/urban-physiology-nyc-catalog | b568f3b6ee1a887a50c4df23c488f50c92e30625 | cefbc799f898f6cdf24d0a0ef6c9cd13c76fb05c | refs/heads/master | 2021-01-02T22:43:09.073952 | 2017-08-06T18:27:22 | 2017-08-06T18:27:22 | 99,377,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | import requests
r = requests.get("https://data.cityofnewyork.us/api/geospatial/pwhj-ikym?method=export&format=GeoJSON")
with open("/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/primary-commercial-zoning-by-lot/data.geojson", "wb") as f:
f.write(r.content)
outputs = ["/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/primary-commercial-zoning-by-lot/data.geojson"]
| [
"aleksey.bilogur@gmail.com"
] | aleksey.bilogur@gmail.com |
624b88870a81bdcb4b900400eb320fc0566419c6 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_5/jrdemm003/question4.py | 4dbd0abe67c40d7fdfcc20db539a82e2f03d310d | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,145 | py | #question 4, assignment 5
#Emma Jordi
#15 april 2014
def main():
import math
yinc=2/20
#get input
function = input("Enter a function f(x):\n")
for y in range(-10,11):
for x in range(-10,11):
#round off function and evaluate
x_real= round(eval(function))
y_real= -y
#fill in graph
if x_real== 0 and y_real==0:
print("o", end="")
elif x_real==y_real:
print("o",end="")
#draw axes
elif x == 0 and y==0:
print("+", end="")
elif x==0 :
print("|", end="")
elif y==0:
print("-", end ="")
#fill with spaces
else:
print(" ", end ="")
print()
main()
| [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
d769e9e09dc58d5c75811d4db8c3dbfbd43528ef | 6ab67facf12280fedf7cc47c61ae91da0bcf7339 | /service/yowsup/yowsup/env/test_env_s40.py | 38623b63f6180319ab784ba20f156e1409e29feb | [
"GPL-3.0-only",
"GPL-3.0-or-later",
"MIT"
] | permissive | PuneethReddyHC/whatsapp-rest-webservice | 2f035a08a506431c40b9ff0f333953b855f9c461 | 822dfc46b80e7a26eb553e5a10e723dda5a9f77d | refs/heads/master | 2022-09-17T14:31:17.273339 | 2017-11-27T11:16:43 | 2017-11-27T11:16:43 | 278,612,537 | 0 | 1 | MIT | 2020-07-10T11:04:42 | 2020-07-10T11:04:41 | null | UTF-8 | Python | false | false | 384 | py | import unittest
from yowsup.env import S40YowsupEnv
class S40YowsupEnvTest(unittest.TestCase):
def test_tokengen(self):
phone = "1234567"
S40YowsupEnv._TOKEN_STRING = "PdA2DJyKoUrwLw1Bg6EIhzh502dF9noR9uFCllGk1425519315543{phone}"
env = S40YowsupEnv()
token = env.getToken(phone)
self.assertEqual(token, 'e84e1f1477704159efd46f6f0781dbde')
| [
"svub@x900.svub.net"
] | svub@x900.svub.net |
83aee29a829d19494f816092d052e9bc7e2a39fc | 0d86bb399a13152cd05e3ba5684e4cb22daeb247 | /python-exercise/2-file-and-function/py108_transfer_function.py | 0388dfcb1caa73e827ef0d54aeedb514cb7c06f9 | [] | no_license | tazbingor/learning-python2.7 | abf73f59165e09fb19b5dc270b77324ea00b047e | f08c3bce60799df4f573169fcdb1a908dcb8810f | refs/heads/master | 2021-09-06T05:03:59.206563 | 2018-02-02T15:22:45 | 2018-02-02T15:22:45 | 108,609,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 17/12/8 下午11:17
# @Author : Aries
# @Site :
# @File : py108_transfer_function.py
# @Software: PyCharm
'''
11–12. 传递函数。
给在这章中描述的testit()函数写一个姊妹函数。
timeit()会带一个函数对象(和参数一起)以及计算出用了多少时间来执行这个函数,而不是测试执行时的错误。
返回下面的状态:函数返回值,消耗的时间。你可以用time.clock()或者time.time(),无论哪一个给你提供了较高的精度。
(一般的共识是在POSIX 上用time.time(),在win32 系统上用time.clock())
注意:timeit()函数与timeit 模块不相关(在python2.3 中引入)
'''
import time
def timeit(func, *nkwargs, **kwargs):
# type: (function, object, object) -> object
try:
start = time.clock()
retval = func(*nkwargs, **kwargs)
end = time.clock()
result = (True, retval, end - start)
except Exception, diag:
result = (False, str(diag))
return result
def main():
funcs = (int, long, float)
vals = (1234, 12.34, '1234', '12.34')
for eachFunc in funcs:
print '-' * 80
for eachVal in vals:
retval = timeit(eachFunc, eachVal)
if retval[0]:
print '%s(%s)=' % (eachFunc.__name__, eachVal), retval[1],
print 'this func cost %s secs' % retval[2]
else:
print '%s(%s)=FAILED: ' % (eachFunc.__name__, eachVal), retval[1]
if __name__ == '__main__':
main()
| [
"852353298@qq.com"
] | 852353298@qq.com |
09984292247853cfbf3752745f08ba17675ddaa5 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_omnibuses.py | f45a75561cfb7054b2991b2c9af1afb1662434e3 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py |
from xai.brain.wordbase.nouns._omnibus import _OMNIBUS
#calss header
class _OMNIBUSES(_OMNIBUS, ):
def __init__(self,):
_OMNIBUS.__init__(self)
self.name = "OMNIBUSES"
self.specie = 'nouns'
self.basic = "omnibus"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
4625273d7e6bd21ce3f4eebb8c43635a69250192 | 9a9fb43d866dc8fd829211d2b47328ef1f5ed428 | /PI_ROS_WORKSPACES/test/build_isolated/actionlib_msgs/catkin_generated/pkg.develspace.context.pc.py | 35418bf96535125c4d5b9139fe8d7662c67ace76 | [] | no_license | droter/auto_mow | 326df42a54676079cac61fe63c40d5d04beb049b | 3742cb2ef78bc06d2771ac4c679e5110909774f8 | refs/heads/master | 2022-05-19T20:18:33.409777 | 2020-04-29T00:42:24 | 2020-04-29T00:42:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/pi/test/devel_isolated/actionlib_msgs/include".split(';') if "/home/pi/test/devel_isolated/actionlib_msgs/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;std_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "actionlib_msgs"
PROJECT_SPACE_DIR = "/home/pi/test/devel_isolated/actionlib_msgs"
PROJECT_VERSION = "1.12.7"
| [
"joshuatygert@gmail.com"
] | joshuatygert@gmail.com |
aebc55f03133b3213a1591102cf7e0387a693ce6 | c5d5f423cac4b09d891283b3f15d9935a466760e | /set1/program3.py | f9d2a68d8759086a85b42a43422f53f0130dafb3 | [] | no_license | agrawal-prateek/codecata-hunter | b6e1ef0d442f699e31a4de8af0a059535b6713c3 | a96843375871c579c561b9546929014ad7ff609b | refs/heads/master | 2021-07-04T17:38:45.086291 | 2020-08-16T16:50:11 | 2020-08-16T16:50:11 | 154,723,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | try:
n = int(input())
arr = [int(x) for x in input().split()]
ans = set()
for i in range(len(arr)):
if arr[i] == i:
ans.add(i)
print(0) if not ans.__len__() else print(*ans)
except Exception as e:
print('Invalid Input')
| [
"prateekagrawal89760@gmail.com"
] | prateekagrawal89760@gmail.com |
4efba8bf3c2c32141b4a5926a5b8e91894c3c716 | efb1c783e1610397af1480953c330f728256d2bd | /taxon_home/views/applications/admin/Customize/Application.py | eb8a9452dbafad14ae452ddee4c1eb8dcf078d82 | [] | no_license | nguyennk/BioDIG | ee7529954b73b70e80a55cbda30cf6bcf874d233 | c28af9cb83f6be40a005993982db9a22aca4dcfe | refs/heads/master | 2021-01-09T06:52:23.163275 | 2013-05-03T15:47:29 | 2013-05-03T15:47:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 914 | py | '''
Application for the Logout Handler of the DOME
URL: /logout_handler
Author: Andrew Oberlin
Date: August 14, 2012
'''
from renderEngine.AdminApplicationBase import AdminApplicationBase
from taxon_home.views.pagelets.registered.NavBarPagelet import NavBarPagelet
from taxon_home.views.pagelets.admin.CustomizePagelet import CustomizePagelet
from taxon_home.views.pagelets.public.FooterPagelet import FooterPagelet
class Application(AdminApplicationBase):
def doProcessRender(self, request):
args = {
'title' : 'Customize'
}
self.setApplicationLayout('registered/base.html', args)
self.addPageletBinding('navBar', NavBarPagelet())
self.addPageletBinding('center-1', CustomizePagelet())
self.addPageletBinding('footer', FooterPagelet())
'''
Used for mapping to the url in urls.py
'''
def renderAction(request):
return Application().render(request)
| [
"andyoberlin@gmail.com"
] | andyoberlin@gmail.com |
6615bb023af78ee2ffd22efe695c92bec0f1bb76 | a23efcef71aee7030a906abc8fdff9b367e8262f | /pyramid_bokehserver/server_backends.py | 89c08435cd758e45306a521deeafd8afec809819 | [] | no_license | mcdonc/pyramid_bokehserver | 8f299180847215a4c877872c987c5e040a6ccc57 | d45747af188c09bc6aca322f511ad9861e7b2606 | refs/heads/master | 2021-01-10T07:27:32.873537 | 2015-07-01T19:17:54 | 2015-07-01T19:17:54 | 36,462,816 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,209 | py | from __future__ import absolute_import, print_function
import logging
logger = logging.getLogger(__name__)
import json
import shelve
from bokeh.exceptions import DataIntegrityException
from bokeh.util.string import encode_utf8, decode_utf8
class AbstractServerModelStorage(object):
"""Storage class for server side models (non backbone, that would be
document and user classes)
"""
def get(self, key):
"""given a key returns json objects"""
raise NotImplementedError
def set(self, key, val):
"""given a key and a json object, saves it"""
raise NotImplementedError
def create(self, key, val):
"""given a key and a json object, saves it
differs from set because this method should check
to make sure the object doesn't already exist
"""
raise NotImplementedError
class RedisServerModelStorage(object):
def __init__(self, redisconn):
self.redisconn = redisconn
def get(self, key):
data = self.redisconn.get(key)
if data is None:
return None
attrs = json.loads(decode_utf8(data))
return attrs
def set(self, key, val):
self.redisconn.set(key, json.dumps(val))
def create(self, key, val):
with self.redisconn.pipeline() as pipe:
pipe.watch(key)
pipe.multi()
if self.redisconn.exists(key):
raise DataIntegrityException("%s already exists" % key)
else:
pipe.set(key, json.dumps(val))
pipe.execute()
class InMemoryServerModelStorage(object):
def __init__(self, data=None):
if data is None:
data = {}
self._data = data
def get(self, key):
data = self._data.get(key, None)
if data is None:
return None
attrs = json.loads(decode_utf8(data))
return attrs
def set(self, key, val):
self._data[key] = json.dumps(val)
def create(self, key, val):
if key in self._data:
raise DataIntegrityException("%s already exists" % key)
self._data[key] = json.dumps(val)
class ShelveServerModelStorage(object):
def __init__(self, shelve_module=shelve):
# shelve_module overrideable for testing purposes
self.shelve_module = shelve_module
def get(self, key):
_data = self.shelve_module.open('bokeh.server')
try:
key = encode_utf8(key)
data = _data.get(key, None)
if data is None:
return None
attrs = json.loads(decode_utf8(data))
finally:
_data.close()
return attrs
def set(self, key, val):
_data = self.shelve_module.open('bokeh.server')
try:
key = encode_utf8(key)
_data[key] = json.dumps(val)
finally:
_data.close()
def create(self, key, val):
key = str(key)
_data = self.shelve_module.open('bokeh.server')
try:
if key in _data:
raise DataIntegrityException("%s already exists" % key)
_data[key] = json.dumps(val)
finally:
_data.close()
| [
"chrism@plope.com"
] | chrism@plope.com |
0bf6a032f9865f843f77ee5ce25a74fd1838a611 | c9a809c5ef2a6b5e7e50da548c182510d203f430 | /tests/integration/modules/test_mac_brew.py | c7085d16294e8fee5abb012e08115a54f35e4408 | [
"Apache-2.0"
] | permissive | andyyumiao/saltx | 676a44c075ce06d5ac62fc13de6dcd750b3d0d74 | a05c22a60706b5c4389adbd77581b5cf985763b5 | refs/heads/master | 2022-02-24T00:51:42.420453 | 2022-02-09T06:46:40 | 2022-02-09T06:46:40 | 231,860,568 | 1 | 5 | NOASSERTION | 2022-02-09T06:46:40 | 2020-01-05T03:10:15 | Python | UTF-8 | Python | false | false | 6,818 | py | # -*- coding: utf-8 -*-
'''
:codeauthor: Nicole Thomas <nicole@saltstack.com>
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
from tests.support.helpers import destructiveTest, skip_if_not_root
# Import Salt Libs
import salt.utils
from salt.exceptions import CommandExecutionError
# Import third party libs
import salt.ext.six as six
# Brew doesn't support local package installation - So, let's
# Grab some small packages available online for brew
ADD_PKG = 'algol68g'
DEL_PKG = 'acme'
@destructiveTest
@skip_if_not_root
@skipIf(not salt.utils.is_darwin(), 'Test only applies to macOS')
@skipIf(not salt.utils.which('brew'), 'This test requires the brew binary')
class BrewModuleTest(ModuleCase):
'''
Integration tests for the brew module
'''
def test_brew_install(self):
'''
Tests the installation of packages
'''
try:
self.run_function('pkg.install', [ADD_PKG])
pkg_list = self.run_function('pkg.list_pkgs')
try:
self.assertIn(ADD_PKG, pkg_list)
except AssertionError:
self.run_function('pkg.remove', [ADD_PKG])
raise
except CommandExecutionError:
self.run_function('pkg.remove', [ADD_PKG])
raise
def test_remove(self):
'''
Tests the removal of packages
'''
try:
# Install a package to delete - If unsuccessful, skip the test
self.run_function('pkg.install', [DEL_PKG])
pkg_list = self.run_function('pkg.list_pkgs')
if DEL_PKG not in pkg_list:
self.run_function('pkg.install', [DEL_PKG])
self.skipTest('Failed to install a package to delete')
# Now remove the installed package
self.run_function('pkg.remove', [DEL_PKG])
del_list = self.run_function('pkg.list_pkgs')
try:
self.assertNotIn(DEL_PKG, del_list)
except AssertionError:
raise
except CommandExecutionError:
self.run_function('pkg.remove', [DEL_PKG])
raise
def test_version(self):
'''
Test pkg.version for mac. Installs a package and then checks we can get
a version for the installed package.
'''
try:
self.run_function('pkg.install', [ADD_PKG])
pkg_list = self.run_function('pkg.list_pkgs')
version = self.run_function('pkg.version', [ADD_PKG])
try:
self.assertTrue(version,
msg=('version: {0} is empty,\
or other issue is present'.format(version)))
self.assertIn(ADD_PKG, pkg_list,
msg=('package: {0} is not in\
the list of installed packages: {1}'
.format(ADD_PKG, pkg_list)))
#make sure the version is accurate and is listed in the pkg_list
self.assertIn(version, str(pkg_list[ADD_PKG]),
msg=('The {0} version: {1} is \
not listed in the pkg_list: {2}'
.format(ADD_PKG, version, pkg_list[ADD_PKG])))
except AssertionError:
self.run_function('pkg.remove', [ADD_PKG])
raise
except CommandExecutionError:
self.run_function('pkg.remove', [ADD_PKG])
raise
def test_latest_version(self):
'''
Test pkg.latest_version:
- get the latest version available
- install the package
- get the latest version available
- check that the latest version is empty after installing it
'''
try:
self.run_function('pkg.remove', [ADD_PKG])
uninstalled_latest = self.run_function('pkg.latest_version', [ADD_PKG])
self.run_function('pkg.install', [ADD_PKG])
installed_latest = self.run_function('pkg.latest_version', [ADD_PKG])
version = self.run_function('pkg.version', [ADD_PKG])
try:
self.assertTrue(isinstance(uninstalled_latest, six.string_types))
self.assertEqual(installed_latest, version)
except AssertionError:
self.run_function('pkg.remove', [ADD_PKG])
raise
except CommandExecutionError:
self.run_function('pkg.remove', [ADD_PKG])
raise
def test_refresh_db(self):
'''
Integration test to ensure pkg.refresh_db works with brew
'''
refresh_brew = self.run_function('pkg.refresh_db')
self.assertTrue(refresh_brew)
def test_list_upgrades(self):
'''
Test pkg.list_upgrades: data is in the form {'name1': 'version1',
'name2': 'version2', ... }
'''
try:
upgrades = self.run_function('pkg.list_upgrades')
try:
self.assertTrue(isinstance(upgrades, dict))
if len(upgrades):
for name in upgrades:
self.assertTrue(isinstance(name, six.string_types))
self.assertTrue(isinstance(upgrades[name], six.string_types))
except AssertionError:
self.run_function('pkg.remove', [ADD_PKG])
raise
except CommandExecutionError:
self.run_function('pkg.remove', [ADD_PKG])
raise
def test_info_installed(self):
'''
Test pkg.info_installed: info returned has certain fields used by
mac_brew.latest_version
'''
try:
self.run_function('pkg.install', [ADD_PKG])
info = self.run_function('pkg.info_installed', [ADD_PKG])
try:
self.assertTrue(ADD_PKG in info)
self.assertTrue('versions' in info[ADD_PKG])
self.assertTrue('revision' in info[ADD_PKG])
self.assertTrue('stable' in info[ADD_PKG]['versions'])
except AssertionError:
self.run_function('pkg.remove', [ADD_PKG])
raise
except CommandExecutionError:
self.run_function('pkg.remove', [ADD_PKG])
raise
def tearDown(self):
'''
Clean up after tests
'''
pkg_list = self.run_function('pkg.list_pkgs')
# Remove any installed packages
if ADD_PKG in pkg_list:
self.run_function('pkg.remove', [ADD_PKG])
if DEL_PKG in pkg_list:
self.run_function('pkg.remove', [DEL_PKG])
| [
"yumiao3@jd.com"
] | yumiao3@jd.com |
322f916f4aad856095b140e0c432697436dc74de | 14be9e4cde12392ebc202e69ae87bbbedd2fea64 | /CSC148/assignment1/rider.py | ada290ad67dfbdea2ee6638e9089c22030032bf2 | [
"LicenseRef-scancode-unknown-license-reference",
"GLWTPL"
] | permissive | zijuzhang/Courses | 948e9d0288a5f168b0d2dddfdf825b4ef4ef4e1e | 71ab333bf34d105a33860fc2857ddd8dfbc8de07 | refs/heads/master | 2022-01-23T13:31:58.695290 | 2019-08-07T03:13:09 | 2019-08-07T03:13:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,737 | py | """
The rider module contains the Rider class. It also contains
constants that represent the status of the rider.
=== Constants ===
@type WAITING: str
A constant used for the waiting rider status.
@type CANCELLED: str
A constant used for the cancelled rider status.
@type SATISFIED: str
A constant used for the satisfied rider status
"""
from location import Location
WAITING = "waiting"
CANCELLED = "cancelled"
SATISFIED = "satisfied"
class Rider:
"""A rider for a ride-sharing service.
=== Attributes ===
@type id: str
A unique identifier for the rider.
@type destination: Location
The destination for the rider
@type status: str
Rider's status may be one of waiting, cancelled, or satistied
@type patience: int
The number of time units the rider will wait to be picked up before they cancel their ride
"""
def __init__(self, identifier, origin, destination, patience):
"""
Initialize a rider
status defaults to waiting once initialized
@param Rider self: this rider
@param str identifier: unique identifier of this rider
@param Location origin: this rider's origin
@param Location destination: this rider's destination
@param int patience: The number of time units the rider will wait to be picked up before he cancel the ride
@rtype: None
"""
self.id = identifier
self.origin = origin
self.destination = destination
self.status = WAITING
self.patience = patience
def __str__(self):
"""Return a string representation.
@type self: Rider
@rtype: str
>>> r = Rider('Peter', Location(0, 0), Location(1, 1), 10)
>>> print(r)
rider Peter -> origin: (0, 0), destination: (1, 1), patience 10, status: waiting
"""
return 'rider {} -> origin: {}, destination: {}, patience {}, status: {}'.format(self.id, self.origin, self.destination, self.patience, self.status)
def __eq__(self, other):
'''evaluate equivalence of rider objects
@param Rider self: this rider object
@param Rider | Any other: other rider object
>>> r1 = Rider('Peter', Location(0, 0), Location(1, 1), 10)
>>> r2 = Rider('Peter', Location(0, 0), Location(1, 1), 10)
>>> r3 = Rider('Peter', Location(0, 1), Location(1, 1), 10)
>>> r1 == r2
True
>>> r1 == r3
False
'''
return (isinstance(other, Rider) and
self.id == other.id and
self.origin == other.origin and
self.destination == other.destination and
self.patience == other.patience)
| [
"peiqi1122@gmail.com"
] | peiqi1122@gmail.com |
09641252efc2095e47001cb36e03f1a823e97a48 | 4acd3a014c145509e72c3f91bc6efc83778268b5 | /scripts/labtainer-student/bin/imodule | f8a7cdbba1c4342412bad230f0bd9d023664889c | [] | no_license | mfthomps/Labtainers | 66375e53f0c8aa8eecfe07fddc70007f16eaeb4f | c7e8682c08178487092dbca26e777cad1638a660 | refs/heads/master | 2023-08-19T03:24:21.644920 | 2023-08-08T18:25:12 | 2023-08-08T18:25:12 | 129,324,240 | 235 | 71 | null | 2022-12-11T18:35:04 | 2018-04-13T00:00:18 | Python | UTF-8 | Python | false | false | 5,240 | #!/usr/bin/env python3
import os
import argparse
import subprocess
import shutil
import LabtainerLogging
home = os.getenv('HOME')
imodule_path = os.path.join(home, '.local/share/labtainers/imodules.txt')
def fixLabtainerDir():
ldir = os.getenv('LABTAINER_DIR')
if ldir is None:
print('LABTAINER_DIR not defined, exiting')
exit(1)
elif ldir == '/trunk':
''' special case screw-up of the env variable. fix it locally. Assume we are in labtainer-student '''
ldir = os.path.abspath('../..')
os.putenv("LABTAINER_DIR", ldir)
os.environ['LABTAINER_DIR'] = ldir
def update(logger, imodule_url):
tfile = '/tmp/imodule.tar'
try:
os.rm(tfile)
except:
pass
if imodule_url.startswith('file://'):
shutil.copyfile(imodule_url[6:].strip(), '/tmp/imodule.tar')
logger.debug('copied local file from %s' % imodule_url[6:])
else:
cmd = 'wget -L -O /tmp/imodule.tar %s' % imodule_url.strip()
logger.debug('cmd is %s' % cmd)
ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].strip()) > 0:
for line in output[1].decode('utf-8').splitlines(True):
logger.debug(line)
if len(output[0].strip()) > 0:
for line in output[0].decode('utf-8').splitlines(True):
logger.debug(line)
if os.path.isfile(tfile):
logger.debug('Got tar, expand from %s' % imodule_url)
cmd = 'tar -xf %s' % tfile
print('Updating IModule from %s' % imodule_url.strip())
#print('expand into %s with command:' % os.getcwd())
#print(cmd)
ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = ps.communicate()
if len(output[1].strip()) > 0:
print('Error installing IModule from %s, see $LABTAINER_DIR/logs/imodule.log' % imodule_url)
for line in output[1].decode('utf-8').splitlines(True):
logger.debug(line)
if len(output[0].strip()) > 0:
for line in output[0].decode('utf-8').splitlines(True):
logger.debug(line)
else:
print('Unable to retreive IModule from %s' % imodule_url)
def doChdir():
retval = True
fixLabtainerDir()
here = os.getcwd()
labdir = os.path.join(os.getenv('LABTAINER_DIR'), 'labs')
try:
os.chdir(labdir)
except:
print('could not cd to %s' % labdir)
retval = False
return retval
def doUpdates(logger):
logger.debug('doUpdates')
if not doChdir():
print('IModule update failed')
else:
if not os.path.isfile(imodule_path):
logger.debug('No imodules at %s to update.' % imodule_path)
print('No imodules defined.')
return
with open(imodule_path) as fh:
for imodule_url in fh:
update(logger, imodule_url)
if __name__ == '__main__':
fixLabtainerDir()
config_file = os.path.join(os.getenv('LABTAINER_DIR'), 'config', 'labtainer.config')
logger = LabtainerLogging.LabtainerLogging("/tmp/imodule.log", '', config_file)
parser = argparse.ArgumentParser(prog='imodule', description='Add an imodule URL to extend the set of labs available to this Labtainers installation.')
group = parser.add_mutually_exclusive_group()
group.add_argument('path', default='NONE', nargs='?', action='store', help='The url of the imodule to add to the local installation.')
group.add_argument('-l', '--list', action='store_true', default=False, help='List IModules already added to this installation.')
group.add_argument('-u', '--updates', action='store_true', default=False, help='Update IModules for this installation.')
group.add_argument('-d', '--delete', action='store_true', default=False, help='Delete all IModule URLs (will not affect existing labs).')
args = parser.parse_args()
if args.list:
if os.path.isfile(imodule_path):
with open(imodule_path) as fh:
did_one = False
for line in fh:
print(line)
did_one = True
if not did_one:
print('No IModules have been added to this installation')
else:
print('No IModules have been added to this installation')
elif args.updates:
doUpdates(logger)
elif args.delete:
if os.path.isfile(imodule_path):
with open(imodule_path, 'w') as fh:
pass
print('Deleted all IModule URLs.')
else:
print('No IModules to delete.')
elif args.path is not None and args.path != 'NONE':
try:
makedirs(os.dirname(imodule_path))
except:
pass
with open(imodule_path, 'a') as fh:
logger.debug('Adding imodule path %s' % args.path)
print('Adding imodule path %s' % args.path)
fh.write(args.path+'\n')
if doChdir():
update(logger, args.path)
else:
print('IModule failed')
else:
parser.print_help()
| [
"mfthomps@nps.edu"
] | mfthomps@nps.edu | |
6ada12a471dec628d07c037ae10eb975a061215c | 4dc4345cca9c5f452bf4b87263505ee6b4e960af | /functions_lab/orders.py | 446f16a6087d702c447d6f0991c5a53031e4e678 | [] | no_license | ivan-yosifov88/python_fundamentals | 88c7eb5167bbe6692b95051d1551496a84893524 | 1cfe6d18453362fc26be984f6cb871b9d7dec63d | refs/heads/master | 2023-03-29T16:46:55.363035 | 2021-04-07T10:39:44 | 2021-04-07T10:39:44 | 341,604,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | def order_price(product, quantity):
price = None
if product == "coffee":
price = quantity * 1.50
elif product == "water":
price = quantity * 1.00
elif product == "coke":
price = quantity * 1.40
elif product == "snacks":
price = quantity * 2.00
return price
type_of_product = input()
product_quantity = int(input())
print(f"{order_price(type_of_product, product_quantity):.2f}")
| [
"ivan.yosifov88gmail.com"
] | ivan.yosifov88gmail.com |
2a96efd09743ee500d34206b25dc21c326a63484 | 6f483999d6923445bb1ef6b07158a9e748e5d504 | /20161208/fn1.py | d37287349e19f5062f3792f0652430c0a0f5ece0 | [] | no_license | SearchOldMan/python_demo | 8bec61b46ad188304e3089ef66e7822e35577519 | 4ecba350a54806cf51896af614f2d1c459793c6f | refs/heads/master | 2020-06-14T15:10:02.677325 | 2017-03-01T08:57:24 | 2017-03-01T08:57:24 | 75,167,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | #coding=utf-8
def cmpInt(*num):
for i in num:
if not isinstance(i,int):
return 'type not Interge'
else:
pass
a = sorted(num)
return 'the max num is %s,the min num is %s'%(a[-1],a[0])
print cmpInt('a',[1,3,4],1,45) | [
"1161938933@qq.com"
] | 1161938933@qq.com |
1afa2d1191ccf1a6997259594c8679f1dd98294d | 5bd1c53fbab45dbd9e02992523fdc5279b642181 | /examples/example_distances.py | 211b8110f25a55a08a6d972bc3085850db7d2e22 | [
"Unlicense"
] | permissive | madeyoga/Machine-Learning-Algorithms | 3e192f69bebc3ff3157e22d791a45fd2d5ba81bf | 77ef39851de5f4073c3f4bdfd707347dec3eebc3 | refs/heads/master | 2020-04-12T18:39:34.141364 | 2019-04-19T06:24:55 | 2019-04-19T06:24:55 | 162,686,220 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | from pymla.model.base.distances import euclidean_distance
A = [176, 72]
B = [154, 85]
C = [182, 75]
D = [167, 98]
E = [158, 65]
Q = [160, 78]
print(euclidean_distance(A, Q))
print(euclidean_distance(B, Q))
print(euclidean_distance(C, Q))
print(euclidean_distance(D, Q))
print(euclidean_distance(E, Q))
| [
"vngla21@gmail.com"
] | vngla21@gmail.com |
7770dfe535fa23532bf406472196284dbdd726c4 | a1bffcd8854e1843e56bb812d4d83b3161a5211e | /tests/unit/modules/network/aruba/test_aruba_command.py | 6de1b7c11eedbb45bb806b0a2b7f7503518bd2b1 | [] | no_license | goneri/ansible.community | 1a71f9d98c164b77f8ed2ed7f558b4963005ff8f | f26f612dd0a3154050d90b51a75502018c95f6e4 | refs/heads/master | 2020-12-29T07:47:35.353515 | 2020-01-22T17:43:18 | 2020-01-22T17:43:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,476 | py | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible_collections.ansible.community.tests.unit.compat.mock import patch
from ansible_collections.ansible.community.plugins.modules import aruba_command
from ansible_collections.ansible.community.tests.unit.modules.utils import set_module_args
from ..aruba_module import TestArubaModule, load_fixture
class TestArubaCommandModule(TestArubaModule):
module = aruba_command
def setUp(self):
super(TestArubaCommandModule, self).setUp()
self.mock_run_commands = patch('ansible_collections.ansible.community.plugins.modules.aruba_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestArubaCommandModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item['command'])
command = obj['command']
except ValueError:
command = item['command']
filename = str(command).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_aruba_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Aruba Operating System Software'))
def test_aruba_command_multiple(self):
set_module_args(dict(commands=['show version', 'show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Aruba Operating System Software'))
def test_aruba_command_wait_for(self):
wait_for = 'result[0] contains "Aruba Operating System Software"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_aruba_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_aruba_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_aruba_command_match_any(self):
wait_for = ['result[0] contains "Aruba Operating System Software"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_aruba_command_match_all(self):
wait_for = ['result[0] contains "Aruba Operating System Software"',
'result[0] contains "Aruba Networks"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_aruba_command_match_all_failure(self):
wait_for = ['result[0] contains "Aruba Operating System Software"',
'result[0] contains "test string"']
commands = ['show version', 'show version']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
| [
"ansible_migration@example.com"
] | ansible_migration@example.com |
f727803c61a1a40efea95935e4ba3aa85f0abef9 | 0b11a00eaa683dc1d8be07818b3b704124edca59 | /scripts/kinect_frame.py | a54b87c882e6e4f69879d616daa027695a2c926c | [
"BSD-2-Clause"
] | permissive | OMARI1988/baxter_pykdl | 1a7a98e6c019af3681c811d7a06655b64c3b13e3 | d1e77d830a39b038b1d239e0f601b06bba816f1a | refs/heads/master | 2020-12-07T05:13:25.946808 | 2016-03-11T14:57:59 | 2016-03-11T14:57:59 | 49,892,153 | 1 | 1 | null | 2017-03-08T14:44:03 | 2016-01-18T17:15:48 | Python | UTF-8 | Python | false | false | 2,349 | py | #!/usr/bin/python
import rospy
import tf
import numpy as np
import cv2
global q,xyz,q_calib1,xyz_calib1,rpy
xyz = [0.0, 0.0, 0.0]
q1 = [0.0, 0.0, 0.0, 1.0]
rpy = [0,0,0]
xyz_calib1 = [0.272, -0.03600000000000003, 0.23399999999999999]
q_calib1 = [-0.62690571, 0.63143915, -0.31687864, 0.32842314]
rpy_calib = [2343, 10, 2701] # the values you should use
if __name__ == '__main__':
rospy.init_node('my_tf_broadcaster')
br1 = tf.TransformBroadcaster()
br2 = tf.TransformBroadcaster()
rate = rospy.Rate(100.0)
#
def nothing(x):
global q,xyz,rpy
# get current positions of four trackbars
xyz[0] = cv2.getTrackbarPos('x','image')/1000.0-1
xyz[1] = cv2.getTrackbarPos('y','image')/1000.0-1
xyz[2] = cv2.getTrackbarPos('z','image')/1000.0-1
r = cv2.getTrackbarPos('Roll','image')
p = cv2.getTrackbarPos('Pitch','image')
y = cv2.getTrackbarPos('Yaw','image')
rpy = [r,p,y]
q = tf.transformations.quaternion_from_euler( r*np.pi/1800, p*np.pi/1800, y*np.pi/1800)
# q = tuple(qq)
# Create a black image, a window
img = np.zeros((300,512,3), np.uint8)
cv2.namedWindow('image')
# create trackbars for color change
cv2.createTrackbar('x','image',0,3000,nothing)
cv2.createTrackbar('y','image',0,3000,nothing)
cv2.createTrackbar('z','image',0,3000,nothing)
cv2.createTrackbar('Roll','image',0,3600,nothing)
cv2.createTrackbar('Pitch','image',0,3600,nothing)
cv2.createTrackbar('Yaw','image',0,3600,nothing)
while not rospy.is_shutdown():
# cv2.imshow('image',img)
# k = cv2.waitKey(1) & 0xFF
# print 'xyz:',xyz
# print 'rpy:',rpy
# print 'q:',q1
# print '------------------'
#
# br2.sendTransform((xyz[0], xyz[1], xyz[2]),
# (q1[0], q1[1], q1[2], q1[3]),
# rospy.Time.now(),
# "kinect2_1_ir_optical_frame",
# "torso")
br2.sendTransform((xyz_calib1[0], xyz_calib1[1], xyz_calib1[2]),
(q_calib1[0], q_calib1[1], q_calib1[2], q_calib1[3]),
rospy.Time.now(),
"kinect2_1_ir_optical_frame",
"torso")
rate.sleep()
| [
"omari.1988@gmail.com"
] | omari.1988@gmail.com |
6b5de45c4314243a3c4d7be082422f0c0c8ef2c0 | 5891ffffca901a14df7e5bf6ce9d8f755b21fcc8 | /src/accounts/migrations/0004_auto_20170127_0502.py | a77b156f42e99d068923bcb0b6b1ecea64ad3b06 | [] | no_license | aminhp93/django_serverup_premium | 13d88ea62288b1e8da832e1388a020e0087f1f1d | 05e5f62023b17fd6292bb398af933107a7972389 | refs/heads/master | 2021-01-11T19:03:06.422233 | 2017-01-28T04:39:21 | 2017-01-28T04:39:21 | 79,303,130 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-27 05:02
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_membership'),
]
operations = [
migrations.RemoveField(
model_name='membership',
name='user',
),
migrations.DeleteModel(
name='Membership',
),
]
| [
"minhpn.org.ec@gmail.com"
] | minhpn.org.ec@gmail.com |
55b5e8fc968a1628283c426288af71ae765f78c1 | b48b47fcdbf46c43633be8ffaa9ad271892ebb07 | /Activities/Activity_11_Solutions/Car_sol.py | fd8d150f3413a7c1f6db69f3d214292e72e59e84 | [] | no_license | DavinderSohal/Python | 92e878c2648c9507b00fef6ee7d7ae67338219d9 | 728e77c1e55aaa00a642d76dce5d6cdf56eff788 | refs/heads/main | 2023-05-12T23:41:46.226139 | 2021-05-16T16:12:01 | 2021-05-16T16:12:01 | 340,810,583 | 0 | 0 | null | 2021-04-20T09:18:00 | 2021-02-21T03:42:21 | Python | UTF-8 | Python | false | false | 156 | py | """Class representing Car"""
class Car:
def initialize(self, name):
self.name = name
"""Set name"""
car = Car()
car.initialize("Ford")
| [
"sohal00026@gmail.com"
] | sohal00026@gmail.com |
8d8edd1ebcb59267b640206ff5a9d911e03ee8a3 | 63f0ca44a91c1c4eed7eb2b255b9431c54ad931e | /models/loss/hinge.py | b76ac1b977acc046221586a1e0488f9964a28d3d | [
"Apache-2.0"
] | permissive | jamesoneill12/LayerFusion | e4685e2a54467d6c4dc02022b97af5da2c429aa7 | 99cba1030ed8c012a453bc7715830fc99fb980dc | refs/heads/main | 2023-08-18T04:45:16.662884 | 2021-09-26T18:12:48 | 2021-09-26T18:12:48 | 410,594,160 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | import torch
from torch import nn
class HingeLoss(nn.Module):
def __init__(self):
super(HingeLoss, self).__init__()
def forward(self, y_pred, y_target):
e = torch.max(torch.abs(0.5 - y_pred * y_target), dim=0)/y_target.shape[0]
return e | [
"james.oneill@insight-centre.org"
] | james.oneill@insight-centre.org |
e2efcaaeef659583b5cb0f5f32557df3f12d219f | a3ad7dfd1b38e7df15b2fe08934fd024408e9c58 | /state/migrations/0003_alter_city_options.py | be0bc0e8b735e5a1cbf6491b287568529476d322 | [] | no_license | mmrubayet/dumpsterRentalSite | a74d071cebe8fecb24b694a7e0b27baca030463d | aa7e8ae995179cb769ec09ff3894d614ca18ae08 | refs/heads/main | 2023-07-04T13:40:08.285433 | 2021-07-27T03:17:40 | 2021-07-27T03:17:40 | 383,875,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | # Generated by Django 3.2.5 on 2021-07-17 13:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('state', '0002_auto_20210711_0656'),
]
operations = [
migrations.AlterModelOptions(
name='city',
options={'verbose_name_plural': 'cities'},
),
]
| [
"m.rubayet94@gmail.com"
] | m.rubayet94@gmail.com |
a7263040c2cac925769b6fcce9b5f7cdfa8e6671 | b50f43c7c8cba1c0f349870596f12d1a333e6f42 | /axonius_api_client/tests/tests_api/tests_asset_callbacks/test_callbacks_base.py | 38f40c4bd518d6e0d18f840bbd62b94a7bf030af | [
"MIT"
] | permissive | zahediss/axonius_api_client | 190ca466e5de52a98af9b527a5d1c132fd8a5020 | 8321788df279ffb7794f179a4bd8943fe1ac44c4 | refs/heads/master | 2023-08-01T14:35:17.095559 | 2021-09-13T21:04:23 | 2021-09-13T21:04:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,545 | py | # -*- coding: utf-8 -*-
"""Test suite for assets."""
import copy
import pytest
from ...utils import get_rows_exist
from .test_callbacks import Callbacks
class TestCallbacksBase(Callbacks):
@pytest.fixture(params=["api_devices", "api_users"])
def apiobj(self, request):
return request.getfixturevalue(request.param)
@pytest.fixture(scope="class")
def cbexport(self):
return "base"
def test_row_as_is(self, cbexport, apiobj, caplog):
getargs = {}
cbobj = self.get_cbobj(apiobj=apiobj, cbexport=cbexport, getargs=getargs)
cbobj.start()
rows_orig = get_rows_exist(apiobj=apiobj, max_rows=5)
rows = copy.deepcopy(rows_orig)
rows_proc = []
for row in rows:
rows_proc += cbobj.process_row(row=row)
assert rows_proc == rows_orig
cbobj.stop()
def test_row_fully_loaded(self, cbexport, apiobj, caplog):
getargs = {
"field_excludes": ["adapters"],
"field_flatten": True,
"field_titles": True,
"field_join": True,
"field_null": True,
"report_adapters_missing": True,
}
cbobj = self.get_cbobj(apiobj=apiobj, cbexport=cbexport, getargs=getargs)
cbobj.start()
rows_orig = get_rows_exist(apiobj=apiobj, max_rows=5)
rows = copy.deepcopy(rows_orig)
rows_proc = []
for row in rows:
rows_proc += cbobj.process_row(row=row)
assert rows_proc != rows_orig
cbobj.stop()
| [
"jimbosan@gmail.com"
] | jimbosan@gmail.com |
9a627fa2389ac90fe1a409ee46f64840d9bde7de | 54511c722e83e84516a1e100248e73879bb8fca7 | /Day_10_MOCKI/explanation/test/test_sample_patch.py | 3ab47164965f73aaaded3743ea8a45122f248bfb | [] | no_license | Oleksandr015/Python-podstawy | a453c1e6848019d859c1114836fb996ce497b2d2 | 9fa910684c6ffa981db64f0ab2e8423714269636 | refs/heads/master | 2023-06-16T04:03:43.547947 | 2021-07-13T12:56:53 | 2021-07-13T12:56:53 | 282,299,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,467 | py | import unittest
from unittest.mock import patch, MagicMock
from Day_10_MOCKI.explanation.sample_patch import RemovalService
class TestRemovalService(unittest.TestCase):
@patch('sample_5_mocks.explanation.sample_patch.os')
def test_rm(self, mock_os):
# instantiate our service
reference = RemovalService()
reference.rm("any path")
# test that the remove call was called.
self.assertTrue(mock_os.remove.called, "Failed to not remove the file if not present.")
mock_os.remove.assert_called_once_with('any path')
def test_rm(self):
# instantiate our service
reference = RemovalService()
mock_os = MagicMock()
with patch('sample_5_mocks.explanation.sample_patch.os', mock_os):
reference.rm("any path")
# test that the remove call was called.
self.assertTrue(mock_os.remove.called, "Failed to not remove the file if not present.")
# lub
mock_os.remove.assert_called_once_with('any path')
def test_file_exists(self):
# mockujemy modul os
reference = RemovalService()
mock_os = MagicMock()
with patch('Day_10_MOCKI.explanation.sample_patch.os', mock_os):
reference.file_exists("new path")
#self.assertTrue(mock_os.path.exists.called, "Failed to not remove the file if not present.")
mock_os.path.exists.assert_called_once_with('new path')
| [
"iskander@kursGit.pl"
] | iskander@kursGit.pl |
11147064cf5159f780d484dbbcceb5bc6601303d | 6e13bb96879e9fe25e3adfd592aecc182fdc12ab | /plugins/action/network_device_with_snmp_v3_des_info.py | 7f42087d3b15e0083a4c2f4df9df3b31cc654327 | [
"MIT"
] | permissive | yijxiang/dnacenter-ansible | 716419123e49be97fba199fadc0d6c0ae98f7b50 | 5138e99bf9ee2b2eb64f70f6ab514c146d17ebc8 | refs/heads/main | 2023-07-08T01:41:14.773304 | 2021-08-10T17:09:34 | 2021-08-10T17:09:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,846 | py | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
try:
from ansible_collections.ansible.utils.plugins.module_utils.common.argspec_validate import (
AnsibleArgSpecValidator,
)
except ImportError:
ANSIBLE_UTILS_IS_INSTALLED = False
else:
ANSIBLE_UTILS_IS_INSTALLED = True
from ansible.errors import AnsibleActionFail
from ansible_collections.cisco.dnac.plugins.module_utils.dnac import (
DNACSDK,
dnac_argument_spec,
)
# Get common arguments specification
argument_spec = dnac_argument_spec()
# Add arguments specific for this module
argument_spec.update(dict(
siteId=dict(type="str"),
offset=dict(type="str"),
limit=dict(type="str"),
sortBy=dict(type="str"),
order=dict(type="str"),
))
required_if = []
required_one_of = []
mutually_exclusive = []
required_together = []
class ActionModule(ActionBase):
def __init__(self, *args, **kwargs):
if not ANSIBLE_UTILS_IS_INSTALLED:
raise AnsibleActionFail("ansible.utils is not installed. Execute 'ansible-galaxy collection install ansible.utils'")
super(ActionModule, self).__init__(*args, **kwargs)
self._supports_async = True
self._result = None
# Checks the supplied parameters against the argument spec for this module
def _check_argspec(self):
aav = AnsibleArgSpecValidator(
data=self._task.args,
schema=dict(argument_spec=argument_spec),
schema_format="argspec",
schema_conditionals=dict(
required_if=required_if,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
required_together=required_together,
),
name=self._task.action,
)
valid, errors, self._task.args = aav.validate()
if not valid:
raise AnsibleActionFail(errors)
def get_object(self, params):
new_object = dict(
site_id=params.get("siteId"),
offset=params.get("offset"),
limit=params.get("limit"),
sort_by=params.get("sortBy"),
order=params.get("order"),
)
return new_object
def run(self, tmp=None, task_vars=None):
self._task.diff = False
self._result = super(ActionModule, self).run(tmp, task_vars)
self._result["changed"] = False
self._check_argspec()
dnac = DNACSDK(params=self._task.args)
response = dnac.exec(
family="devices",
function='get_devices_with_snmpv3_des',
params=self.get_object(self._task.args),
)
self._result.update(dict(dnac_response=response))
self._result.update(dnac.exit_json())
return self._result
| [
"wastorga@altus.co.cr"
] | wastorga@altus.co.cr |
04447b3332fa975b8d367a3e1edaed6469108db5 | cc5b4a35e62f23aa31a31ffa603c0cc7423f5745 | /venv/Lib/site-packages/PyInstaller/hooks/hook-netCDF4.py | 01e4001fd1b1ca9249e70686984c2885b8fb251b | [
"MIT",
"GPL-2.0-or-later",
"Bootloader-exception"
] | permissive | Lyniat/AutomatedWallpaperChanger | 2cc334ce8dbe8c855b9702e24c0b2bf62ea0dc14 | 76093f4f9bd20d8fdfd497f6dfbe93d22b17feac | refs/heads/master | 2023-06-25T18:17:37.625084 | 2021-07-15T11:44:47 | 2021-07-15T11:44:47 | 387,545,342 | 0 | 0 | MIT | 2021-07-19T17:32:04 | 2021-07-19T17:32:04 | null | UTF-8 | Python | false | false | 613 | py | #-----------------------------------------------------------------------------
# Copyright (c) 2015-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
# netCDF4 (tested with v.1.1.9) has some hidden imports
hiddenimports = ['netCDF4.utils', 'netcdftime']
| [
"martenscheuck@gmail.com"
] | martenscheuck@gmail.com |
659b19079e5cdd8d3e4596eb7dc1b7d2ef1208cc | d2a181395347b6b7308cdbd9a411c79775a035c8 | /pytorch_lightning/trainer/connectors/env_vars_connector.py | 1f1c41c6eb2f0e18366561d801f5ea64a1e8d17d | [
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] | permissive | rephrase-ai/pytorch-lightning | d30d552288d1bf6f65a605e5c8893583ecc58862 | 8bd7b1bdd7d3f723822e78908033cf0a6743713a | refs/heads/master | 2023-06-06T11:32:41.765882 | 2021-06-23T12:09:53 | 2021-06-23T12:09:53 | 291,268,679 | 2 | 0 | Apache-2.0 | 2020-08-29T12:38:33 | 2020-08-29T12:38:32 | null | UTF-8 | Python | false | false | 1,676 | py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import wraps
from typing import Callable
from pytorch_lightning.utilities.argparse import get_init_arguments_and_types, parse_env_variables
def _defaults_from_env_vars(fn: Callable) -> Callable:
"""
Decorator for :class:`~pytorch_lightning.trainer.trainer.Trainer` methods for which
input arguments should be moved automatically to the correct device.
"""
@wraps(fn)
def insert_env_defaults(self, *args, **kwargs):
cls = self.__class__ # get the class
if args: # inace any args passed move them to kwargs
# parse only the argument names
cls_arg_names = [arg[0] for arg in get_init_arguments_and_types(cls)]
# convert args to kwargs
kwargs.update({k: v for k, v in zip(cls_arg_names, args)})
env_variables = vars(parse_env_variables(cls))
# update the kwargs by env variables
kwargs = dict(list(env_variables.items()) + list(kwargs.items()))
# all args were already moved to kwargs
return fn(self, **kwargs)
return insert_env_defaults
| [
"noreply@github.com"
] | rephrase-ai.noreply@github.com |
1b0e93a8c1aa0e434feb04429bad5eed50cc3c47 | 686a8d0612c5ba6177bc6556d900fbb8ba8f7e9b | /oop/sort_marks_ex.py | f9b5f6717cc30d7778de319f32ff8466c9d122ca | [] | no_license | srikanthpragada/PYTHON_10_DEC_2018_LANGDEMO | 8da367e33d94f2e6d407b9caecc24195b17f3d06 | 23ca8b81e69a4313ae1a36d0a03a37373248caaf | refs/heads/master | 2020-04-10T20:28:04.255860 | 2019-01-05T02:59:51 | 2019-01-05T02:59:51 | 161,268,725 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | # Print marks in sorted order
marks = []
for i in range(1, 6):
try:
n = int(input("Enter marks : "))
marks.append(n)
except ValueError:
print("Invalid number!")
for n in sorted(marks):
print(n)
| [
"srikanthpragada@gmail.com"
] | srikanthpragada@gmail.com |
ec64dbea053ac6ca8950f58fad929831e33817fd | 591738c175efc21b6936fd02e29ec6668c9c97ec | /models.py | 7be2182c3390051e5e37bbf5dd85a9597b732702 | [] | no_license | creimers/simple_slider | 7d2cf949afdf47944be123c367984c7d74d12230 | f00cc74f610b8aa4829b772a07010df0ee0f4920 | refs/heads/master | 2016-09-11T09:02:56.242200 | 2015-03-24T13:12:22 | 2015-03-24T13:12:22 | 28,854,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,105 | py | from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from filer.fields.image import FilerImageField
from cms.models import CMSPlugin
@python_2_unicode_compatible
class Slider(CMSPlugin):
name = models.CharField(max_length=50)
dots = models.BooleanField(default=False)
fade = models.BooleanField(default=False)
autoplay = models.BooleanField(default=True)
def copy_relations(self, oldinstance):
for image in oldinstance.images.all():
image.pk = None
image.slider = self
image.save()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Image(models.Model):
slider = models.ForeignKey(
Slider,
related_name="images"
)
image = FilerImageField(
null=True,
blank=False
)
caption_text = models.CharField(
null=True,
blank=True,
max_length=255
)
def __str__(self):
if self.caption_text:
return self.caption_text
else:
return self.image.label
| [
"christoph@superservice-international.com"
] | christoph@superservice-international.com |
3527f08ade4276b6c56bb8d9c0f9869d9307b486 | 46201552303331f68418e67231cce5f4688b85e3 | /awesome_gans/dragan/dragan_train.py | 8492e5b652b8004c616495b2ebd10440ff515064 | [
"MIT"
] | permissive | kozistr/Awesome-GANs | 6c8d7a62eefe4f60b3b8e4261d073c74f22dd52c | 6548b49d8c05459f7b252c17d0959b5825d2fc69 | refs/heads/master | 2023-07-19T09:37:55.311749 | 2022-06-25T12:03:10 | 2022-06-25T12:03:10 | 92,664,599 | 820 | 195 | MIT | 2021-03-30T01:46:01 | 2017-05-28T14:00:08 | Python | UTF-8 | Python | false | false | 4,357 | py | import time
import numpy as np
import tensorflow as tf
import awesome_gans.dragan.dragan_model as dragan
import awesome_gans.image_utils as iu
from awesome_gans.datasets import MNISTDataSet as DataSet
results = {'output': './gen_img/', 'model': './model/DRAGAN-model.ckpt'}
train_step = {
'batch_size': 64,
'global_step': 200001,
'logging_interval': 2000,
}
np.random.seed(777)
def get_perturbed_images(images):
return images + 0.5 * images.std() * np.random.random(images.shape)
def main():
start_time = time.time() # Clocking start
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as s:
# DRAGAN model
model = dragan.DRAGAN(s, batch_size=train_step['batch_size'])
# Initializing variables
s.run(tf.global_variables_initializer())
# Load model & Graph & Weights
saved_global_step = 0
ckpt = tf.train.get_checkpoint_state('./model/')
if ckpt and ckpt.model_checkpoint_path:
model.saver.restore(s, ckpt.model_checkpoint_path)
saved_global_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
print("[+] global step : %s" % saved_global_step, " successfully loaded")
else:
print('[-] No checkpoint file found')
# MNIST DataSet images
mnist = DataSet(ds_path="D:\\DataSet/mnist/").data
for global_step in range(saved_global_step, train_step['global_step']):
batch_x, _ = mnist.train.next_batch(model.batch_size)
batch_x_p = get_perturbed_images(batch_x)
batch_x = np.reshape(batch_x, [-1] + model.image_shape)
batch_x_p = np.reshape(batch_x_p, [-1] + model.image_shape)
batch_z = np.random.uniform(-1.0, 1.0, [model.batch_size, model.z_dim]).astype(np.float32)
# Update D network
_, d_loss = s.run(
[model.d_op, model.d_loss],
feed_dict={
model.x: batch_x,
model.x_p: batch_x_p,
model.z: batch_z,
},
)
# Update G network
_, g_loss = s.run(
[model.g_op, model.g_loss],
feed_dict={
model.z: batch_z,
},
)
if global_step % train_step['logging_interval'] == 0:
batch_z = np.random.uniform(-1.0, 1.0, [model.batch_size, model.z_dim]).astype(np.float32)
d_loss, g_loss, summary = s.run(
[model.d_loss, model.g_loss, model.merged],
feed_dict={
model.x: batch_x,
model.x_p: batch_x_p,
model.z: batch_z,
},
)
# Print loss
print(
"[+] Global Step %05d => " % global_step,
" D loss : {:.8f}".format(d_loss),
" G loss : {:.8f}".format(g_loss),
)
# Training G model with sample image and noise
sample_z = np.random.uniform(-1.0, 1.0, [model.sample_num, model.z_dim]).astype(np.float32)
samples = s.run(
model.g,
feed_dict={
model.z: sample_z,
},
)
samples = np.reshape(samples, [-1] + model.image_shape)
# Summary saver
model.writer.add_summary(summary, global_step)
# Export image generated by model G
sample_image_height = model.sample_size
sample_image_width = model.sample_size
sample_dir = results['output'] + 'train_{0}.png'.format(global_step)
# Generated image save
iu.save_images(samples, size=[sample_image_height, sample_image_width], image_path=sample_dir)
# Model save
model.saver.save(s, results['model'], global_step)
global_step += 1
end_time = time.time() - start_time # Clocking end
# Elapsed time
print("[+] Elapsed time {:.8f}s".format(end_time))
# Close tf.Session
s.close()
if __name__ == '__main__':
main()
| [
"kozistr@gmail.com"
] | kozistr@gmail.com |
505bd36558a928b1780ed5b81313b5f056361d91 | 0adb68bbf576340c8ba1d9d3c07320ab3bfdb95e | /regexlib/2021-5-15/python_re2_test_file/regexlib_1225.py | 1c22ccb8779f7f6bcff943d316db2de4f3ed1685 | [
"MIT"
] | permissive | agentjacker/ReDoS-Benchmarks | c7d6633a3b77d9e29e0ee2db98d5dfb60cde91c6 | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | refs/heads/main | 2023-05-10T13:57:48.491045 | 2021-05-21T11:19:39 | 2021-05-21T11:19:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | # 1225
# ^([a-zA-Z0-9]+)([\._-]?[a-zA-Z0-9]+)*@([a-zA-Z0-9]+)([\._-]?[a-zA-Z0-9]+)*([\.]{1}[a-zA-Z0-9]{2,})+$
# EXPONENT
# nums:5
# EXPONENT AttackString:"a"+"0"*32+"!1 __NQ"
import re2 as re
from time import perf_counter
regex = """^([a-zA-Z0-9]+)([\._-]?[a-zA-Z0-9]+)*@([a-zA-Z0-9]+)([\._-]?[a-zA-Z0-9]+)*([\.]{1}[a-zA-Z0-9]{2,})+$"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "a" + "0" * i * 1 + "!1 __NQ"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *1}: took {DURATION} seconds!") | [
"liyt@ios.ac.cn"
] | liyt@ios.ac.cn |
198a5f525873609cf2162a8ec5634de4a2c29c0e | 44cbc067afcced7fac7ad7f4584d0d16d66bf5b4 | /ansiblemetrics/playbook/num_filters.py | 74aeb55f948d05200c4552cd44b2936fd35a3668 | [
"Apache-2.0"
] | permissive | ElsevierSoftwareX/SOFTX_2020_231 | fb75820e99dbd6f2380146ecf5b8893d69942260 | e3ad95ebdc324ae308669d437ec60bd726580102 | refs/heads/master | 2023-01-18T22:53:04.785576 | 2020-11-18T11:52:39 | 2020-11-18T11:52:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | import re
from ansiblemetrics.ansible_metric import AnsibleMetric
from ansiblemetrics.utils import key_value_list
filter_regex = re.compile(r'[^\|]+(\|)[^\|]')
class NumFilters(AnsibleMetric):
""" This class implements the metric 'Number Of Filters' in an Ansible script. """
def count(self):
""" Return the number of filters. """
filters = 0
for item in key_value_list(self.playbook):
v = item[1]
if v is not None and re.search(r'\{\{.*\}\}', str(
v)) is not None: # check for statements with brackets, such as: {{ foobar }}
filters += len(filter_regex.findall(str(v)))
return filters
| [
"stefano.dallapalma0@gmail.com"
] | stefano.dallapalma0@gmail.com |
6a7c42f5a6d31b4e0af18af7c9b263216235c5c7 | 9e207d04bb5ab7ec04ec19c83862f1afbc24d933 | /itkwidgets/_version.py | 75b5538f448ecdbf46072749134bcae5c4be3b73 | [
"Apache-2.0"
] | permissive | darrent1974/itkwidgets | 62a617606b74d0b2a321b3aa02bd051f60d405d5 | 9b914cbc847a7e9d6fb9024edb81cd8460237722 | refs/heads/master | 2020-09-08T17:26:22.947926 | 2019-11-12T11:14:17 | 2019-11-12T11:14:17 | 221,195,502 | 0 | 0 | Apache-2.0 | 2019-11-12T10:53:17 | 2019-11-12T10:53:16 | null | UTF-8 | Python | false | false | 294 | py | version_info = (0, 23, 0, 'dev', 0)
_specifier_ = {'dev': 'dev', 'alpha': 'a', 'beta': 'b', 'candidate': 'rc', 'final': ''}
__version__ = '%s.%s.%s%s'%(version_info[0], version_info[1], version_info[2],
'' if version_info[3]=='final' else _specifier_[version_info[3]]+str(version_info[4]))
| [
"matt.mccormick@kitware.com"
] | matt.mccormick@kitware.com |
e18a567dd46af30b052673ce5e08f0af57add366 | 7882860350c714e6c08368288dab721288b8d9db | /2일차/14번.py | df00a65704f76b790c0f30c1a53b19d20b90ab85 | [] | no_license | park-seonju/Algorithm | 682fca984813a54b92a3f2ab174e4f05a95921a8 | 30e5bcb756e9388693624e8880e57bc92bfda969 | refs/heads/master | 2023-08-11T18:23:49.644259 | 2021-09-27T10:07:49 | 2021-09-27T10:07:49 | 388,741,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | dicBase = (('가','깋'), ('나','닣'), ('다','딯'), ('라','맇'), ('마','밓'), ('바','빟'), ('사','싷'),
('아','잏'), ('자','짛'), ('차','칳'), ('카','킿'), ('타','팋'), ('파','핗'), ('하','힣'))
inputWord = ['막', '부모님', '비용', '비행기', '원래', '처리', '최초', '꼴', '좀', '들다', '싶다',
'수출', '계시다', '다', '뒤', '듣다', '함께', '아이', '무척', '보이다', '가지다', '그',
'자르다', '데리다', '마리', '개', '정도', '옳다', '놀이','뜨겁다']
list1=[]
for i in dicBase:
list2=[]
for j in inputWord:
if ord(i[0])<=ord(j[0])<=ord(i[1]):
list2.append("".join(j))
list1.append(list2)
print(list1) | [
"cucu9823@naver.com"
] | cucu9823@naver.com |
b7392d11918434ff4842aebfe27e26ee278338ed | 7864ab2c567f5f3a98e7ab38ff38a3bd7c816fde | /fireplace/cards/cardxml.py | 89e69e0959453d27aa72dc3e040152a876ddf732 | [] | no_license | gmagogsfm/fireplace | bfa1b57254b673317442518a997c635183bd3e61 | f16ee0659310a003d54552d0660ea3eb15c4da3f | refs/heads/master | 2021-01-09T09:06:35.035741 | 2015-02-09T14:30:24 | 2015-02-09T14:30:24 | 28,540,192 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,249 | py | import os
from xml.etree import ElementTree
from fireplace.enums import GameTag, PlayReq
class CardXML(object):
def __init__(self, xml):
self.xml = xml
@property
def id(self):
return self.xml.attrib["CardID"]
@property
def chooseCards(self):
cards = self.xml.findall("ChooseCard")
return [tag.attrib["cardID"] for tag in cards]
@property
def entourage(self):
cards = self.xml.findall("EntourageCard")
return [tag.attrib["cardID"] for tag in cards]
@property
def requirements(self):
reqs = self.xml.findall("Power[PlayRequirement]/PlayRequirement")
return {PlayReq(int(tag.attrib["reqID"])): int(tag.attrib["param"] or 0) for tag in reqs}
@property
def name(self):
return self.getTag(GameTag.CARDNAME)
@property
def description(self):
return self.getTag(GameTag.CARDTEXT_INHAND) or ""
def _findTag(self, id):
return self.xml.findall('./Tag[@enumID="%i"]' % (id))
def _getTag(self, element):
type = element.attrib.get("type", "Int")
if type == "String":
return element.text
value = int(element.attrib["value"])
if type == "Bool":
return bool(value)
return value
def getTag(self, id):
element = self._findTag(id)
if not element:
return 0
return self._getTag(element[0])
@property
def tags(self):
return {GameTag(int(e.attrib["enumID"])): self._getTag(e) for e in self.xml.findall("./Tag")}
##
# Requirement properties
def _reqParam(self, req):
tags = self.xml.findall("Power/PlayRequirement[@reqID='%i']" % (req))
if tags:
return int(tags[0].attrib["param"])
return 0
@property
def minMinions(self):
return self._reqParam(PlayReq.REQ_MINIMUM_TOTAL_MINIONS)
@property
def minTargets(self):
return self._reqParam(PlayReq.REQ_MINIMUM_ENEMY_MINIONS)
@property
def targetMaxAttack(self):
return self._reqParam(PlayReq.REQ_TARGET_MAX_ATTACK)
@property
def targetMinAttack(self):
return self._reqParam(PlayReq.REQ_TARGET_MIN_ATTACK)
@property
def targetRace(self):
race = self._reqParam(PlayReq.REQ_TARGET_WITH_RACE)
if race:
return Race(race)
def load(path):
db = {}
with open(path, "r") as f:
xml = ElementTree.parse(f)
for carddata in xml.findall("Entity"):
card = CardXML(carddata)
db[card.id] = card
return db, xml
| [
"jerome@leclan.ch"
] | jerome@leclan.ch |
0d8ef984ce7ea86387d1180d132d938cf7403a17 | 3faf4b9fb76145b2326446bc6bc190a5712b3b62 | /Algorithms/0069 Sqrt(x).py | f497f2d62b3169bb0db1ed835e9bf8cacdcd6731 | [] | no_license | cravo123/LeetCode | b93c18f3e4ca01ea55f4fdebceca76ccf664e55e | 4c1288c99f78823c7c3bac0ceedd532e64af1258 | refs/heads/master | 2021-07-12T11:10:26.987657 | 2020-06-02T12:24:29 | 2020-06-02T12:24:29 | 152,670,206 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | # Solution 1, binary search
class Solution:
def mySqrt(self, x: int) -> int:
i, j = 0, x + 1
while i < j:
m = (i + j) // 2
if m * m <= x:
i = m + 1
else:
j = m
return i - 1 | [
"cc3630@columbia.edu"
] | cc3630@columbia.edu |
9c13a6343b13beab7294fdc91e61d75d0e85aa2c | 710f5c2d7147ff2670f496fd28f49959cd0995b7 | /modules/Sec 27 - Checking HTmiss tails for data in the event display/listings/CMSSW and AlphaTools modded scripts/EventDisplay/Displayer_QCD_cfg.py | 2a96e32c1bb5fc05c7c90ca09469d3716f21b0fd | [] | no_license | eshwen/lab-book | 1480e79c99b1ef3d203d9f100cc26b9d7dd7676d | 6268de336e876f60166acb0c4a8c36347cf7fba7 | refs/heads/master | 2023-01-14T15:44:37.569542 | 2020-11-20T12:43:34 | 2020-11-20T12:43:34 | 258,014,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,821 | py | #!/usr/bin/env python
from Analyzers.EventDisplay.EventDisplay_cfi import *
from Skimmers.DodgyQcdFinder import DodgyQcdFinder
from Skimmers.TreeSkimmer import TreeSkimmer
#____________________________________________________________________________||
# PSet
process.PSet = psetSignal2016
process.PSet.reweightTrees = False
process.nEvents = -1
process.nCores = 1
#____________________________________________________________________________||
# Sample
process.PSet.sampleSelection = ["QCD_HT"]
#____________________________________________________________________________||
eventDisplayer.outPathTemplate = "%s_QcdDisplay.pdf"
eventDisplayer.drawSetting = DefaultSetting
eventPrinter.outPathTemplate = "%s_QcdEventList.txt"
dodgyQcdFinder = DodgyQcdFinder('dodgyQcdFinder')
treeSkimmer = TreeSkimmer("treeSkimmer")
#____________________________________________________________________________||
# Output
process.outputFilename = "QcdDisplay.root"
process.outputDir = "output"
#____________________________________________________________________________||
process.sequence = sequence2016
#process.sequence.append(treeSkimmer)
process.sequence.append(dodgyQcdFinder)
eventDisplaySequence = ModuleSequence( [
phyObj ,
eventDisplayer ,
eventPrinter
] )
process.sequence.extend( eventDisplaySequence )
#process.sequence = eventDisplaySequence
#____________________________________________________________________________||
r.gROOT.ProcessLine( "gErrorIgnoreLevel = 1001;")
#____________________________________________________________________________||
process.Run()
| [
"eshwen.bhal@bristol.ac.uk"
] | eshwen.bhal@bristol.ac.uk |
caf426fb6b3896ab02ef00a1530c8814ea180c7e | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_loses.py | 0840e4d32556b66b83bf20f2c03d10e140845164 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py |
#calss header
class _LOSES():
def __init__(self,):
self.name = "LOSES"
self.definitions = lose
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['lose']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
93b6f62f740fcbfffef22180c6a16d018a08f402 | eea1c66c80784d4aefeb0d5fd2e186f9a3b1ac6e | /atcoder/abc/abc101-200/abc157/c.py | 97d8fe428acb79d8525b168173b3661ed78c5255 | [] | no_license | reo11/AtCoder | 4e99d6f40d8befe264761e3b8c33d3a6b7ba0fe9 | 69c6d67f05cb9190d8fb07204488cd7ce4d0bed2 | refs/heads/master | 2023-08-28T10:54:50.859288 | 2023-08-22T18:52:47 | 2023-08-22T18:52:47 | 162,085,118 | 4 | 0 | null | 2023-07-01T14:17:28 | 2018-12-17T06:31:10 | Python | UTF-8 | Python | false | false | 531 | py | n, m = map(int, input().split())
ans = ["?" for _ in range(n)]
f = True
for i in range(m):
s, c = map(int, input().split())
if ans[s - 1] != "?":
if ans[s - 1] != c:
f = False
else:
ans[s - 1] = c
else:
ans[s - 1] = c
if n >= 2 and ans[0] == 0:
f = False
output = ""
if f:
if n >= 2 and ans[0] == "?":
ans[0] = 1
for i in range(n):
if ans[i] == "?":
ans[i] = 0
output += str(ans[i])
print(output)
else:
print(-1)
| [
"reohirao116@gmail.com"
] | reohirao116@gmail.com |
ddc5045dd9230e4a43e6c46425dccc2aa7fd3e20 | 8b9e9de996cedd31561c14238fe655c202692c39 | /sort/leetcode_Largest_Number.py | 9ee6de93bab9f52c6ca7ce2d617c8ebecac0d724 | [] | no_license | monkeylyf/interviewjam | 0049bc1d79e6ae88ca6d746b05d07b9e65bc9983 | 33c623f226981942780751554f0593f2c71cf458 | refs/heads/master | 2021-07-20T18:25:37.537856 | 2021-02-19T03:26:16 | 2021-02-19T03:26:16 | 6,741,986 | 59 | 31 | null | null | null | null | UTF-8 | Python | false | false | 1,840 | py | """leetcode_Largest_Number.
leetcode
Given a list of non negative integers, arrange them such that they form the
largest number.
For example, given [3, 30, 34, 5, 9], the largest formed number is 9534330.
Note: The result may be very large, so you need to return a string instead of
an integer.
"""
class Solution(object):
"""Variation of merge sort."""
def merge(self, left, right):
"""Merge two sorted arr."""
merged = []
i = j = 0
while i < len(left) and j < len(right):
# Where the magic hides.
# Comparing concaternated str with different order instead of
# comparing two str.
if left[i] + right[j] > right[j] + left[i]:
merged.append(left[i])
i += 1
else:
merged.append(right[j])
j += 1
while i < len(left):
merged.append(left[i])
i += 1
while j < len(right):
merged.append(right[j])
j += 1
return merged
def merge_sort(self, arr):
""""""
if len(arr) <= 1:
return arr
half = len(arr) / 2
left = self.merge_sort(arr[:half])
right = self.merge_sort(arr[half:])
return self.merge(left, right)
def largestNumberComplicated(self, num):
""""""
arr = self.merge_sort(map(str, num))
return int(''.join(arr))
def largestNumber(self, nums):
"""The key is the comparator you defined."""
nums = [str(i) for i in nums]
nums = sorted(nums, cmp=lambda x, y: -1 if x + y > y + x else 1)
return str(int(''.join(nums)))
def main():
arr = [3, 30, 34, 5, 9]
sol = Solution()
assert sol.largestNumber(arr) == '9534330'
if __name__ == '__main__':
main()
| [
"laituan1986@gmail.com"
] | laituan1986@gmail.com |
31f38fb10ff9c515fced3e082144d14a56b9e51a | 3a533d1503f9a1c767ecd3a29885add49fff4f18 | /saleor/checkout/tests/test_associate_checkout_with_account.py | 10069a85c1a8ea9829612f13ebc9e56f1bd484d8 | [
"BSD-3-Clause"
] | permissive | jonserna/saleor | 0c1e4297e10e0a0ce530b5296f6b4488f524c145 | b7d1b320e096d99567d3fa7bc4780862809d19ac | refs/heads/master | 2023-06-25T17:25:17.459739 | 2023-06-19T14:05:41 | 2023-06-19T14:05:41 | 186,167,599 | 0 | 0 | BSD-3-Clause | 2019-12-29T15:46:40 | 2019-05-11T18:21:31 | TypeScript | UTF-8 | Python | false | false | 3,472 | py | import pytest
from saleor.channel import MarkAsPaidStrategy
from saleor.checkout.complete_checkout import complete_checkout
from ...plugins.manager import get_plugins_manager
from ..fetch import fetch_checkout_info, fetch_checkout_lines
@pytest.mark.django_db
@pytest.mark.parametrize(
"paid_strategy",
[
MarkAsPaidStrategy.TRANSACTION_FLOW,
MarkAsPaidStrategy.PAYMENT_FLOW,
],
)
def test_associate_guest_checkout_with_account_if_exists(
paid_strategy, app, address, checkout, customer_user
):
# set the checkout email
checkout.email = "test@example.com"
checkout.billing_address = address
checkout.save()
user = None
manager = get_plugins_manager()
lines, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, manager)
checkout_info.channel.order_mark_as_paid_strategy == paid_strategy
# call the complete_checkout function with the checkout object
order, _, _ = complete_checkout(
checkout_info=checkout_info,
manager=manager,
lines=lines,
payment_data={},
store_source=False,
user=user,
app=app,
)
# assert that the order is associated with the correct user
assert order.user == customer_user
@pytest.mark.django_db
@pytest.mark.parametrize(
"paid_strategy",
[
MarkAsPaidStrategy.TRANSACTION_FLOW,
MarkAsPaidStrategy.PAYMENT_FLOW,
],
)
def test_associate_guest_checkout_with_account_if_exists_with_guest_user(
paid_strategy,
app,
address,
checkout,
):
# set the checkout email
checkout.email = "guest@email.com"
checkout.billing_address = address
checkout.save()
user = None
manager = get_plugins_manager()
lines, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, manager)
checkout_info.channel.order_mark_as_paid_strategy == paid_strategy
# call the complete_checkout function with the checkout object
order, _, _ = complete_checkout(
checkout_info=checkout_info,
manager=manager,
lines=lines,
payment_data={},
store_source=False,
user=user,
app=app,
)
# assert that the order is associated with the correct user
assert order.user is None
@pytest.mark.django_db
@pytest.mark.parametrize(
"paid_strategy",
[
MarkAsPaidStrategy.TRANSACTION_FLOW,
MarkAsPaidStrategy.PAYMENT_FLOW,
],
)
def test_associate_guest_checkout_with_account_if_exists_with_inactive_user(
paid_strategy, app, address, checkout, customer_user
):
# set the checkout email
checkout.email = "test@example.com"
checkout.billing_address = address
checkout.save()
# deactivate the customer user
customer_user.is_active = False
customer_user.save()
user = None
manager = get_plugins_manager()
lines, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, manager)
checkout_info.channel.order_mark_as_paid_strategy == paid_strategy
# call the complete_checkout function with the checkout object
order, _, _ = complete_checkout(
checkout_info=checkout_info,
manager=manager,
lines=lines,
payment_data={},
store_source=False,
user=user,
app=app,
)
# assert that the order is associated with the correct user
assert order.user is None
| [
"noreply@github.com"
] | jonserna.noreply@github.com |
141fa3e6689914550b1d80f29b77f3383625c906 | 75ef407d2a62cf54c597874addef32e6a33cd2e2 | /shining_present/app/tests/test_views.py | ec5714c0c7d9d9d4a927ff10718512409bc9d52b | [] | no_license | Wishez/portfolio | 3bce329e93c052a1f684dd89a4dd8387d3d98d47 | 6c28d98b92cfbdfe12c3652b6125439d2b20fea2 | refs/heads/master | 2022-10-29T04:08:08.816862 | 2022-10-25T16:17:33 | 2022-10-25T18:26:35 | 77,537,307 | 0 | 0 | null | 2022-10-25T18:27:45 | 2016-12-28T14:09:24 | HTML | UTF-8 | Python | false | false | 166 | py | from django.test import TestCase
from django.urls import reverse
from words.models import User
import json
class WordsApi(TestCase):
def setUp(self):
pass | [
"shiningfinger@list.ru"
] | shiningfinger@list.ru |
e026c5497766b23a902fc28512758abba5bedc51 | 0f79fd61dc47fcafe22f83151c4cf5f2f013a992 | /BOJ/7662.py | c3b3264955579d69721b547e2b77127267ed183c | [] | no_license | sangm1n/problem-solving | 670e119f28b0f0e293dbc98fc8a1aea74ea465ab | bc03f8ea9a6a4af5d58f8c45c41e9f6923f55c62 | refs/heads/master | 2023-04-22T17:56:21.967766 | 2021-05-05T12:34:01 | 2021-05-05T12:34:01 | 282,863,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,235 | py | """
author : Lee Sang Min
github : https://github.com/sangm1n
e-mail : dltkd96als@naver.com
title : 이중 우선순위 큐
description : binary tree
"""
import sys
import bisect
from collections import deque
input = sys.stdin.readline
T = int(input())
for _ in range(T):
N = int(input())
result = deque()
tmp = dict()
for _ in range(N):
alpha, digit = map(str, input().split())
digit = int(digit)
if alpha == 'I':
try:
tmp[digit] += 1
except:
tmp[digit] = 1
bisect.insort_left(result, digit)
else:
if not result:
continue
if digit == 1:
max_val = result[-1]
if tmp[max_val] == 1:
tmp.pop(max_val)
result.pop()
else:
tmp[max_val] -= 1
else:
min_val = result[0]
if tmp[min_val] == 1:
tmp.pop(min_val)
result.popleft()
else:
tmp[min_val] -= 1
if not result:
print('EMPTY')
else:
print(result[-1], result[0], end=' ')
| [
"dltkd96als@naver.com"
] | dltkd96als@naver.com |
944f91c1b3cd78c4f9f68c83ec19177d21c2d296 | 2734b77a68f6d7e22e8b823418ad1c59fe1a34af | /opengever/core/upgrades/20170601103050_add_hidden_flag_to_admin_and_org_units/upgrade.py | 82c1d396e72dab97ecf65628cfd5c0009c66ff1a | [] | no_license | 4teamwork/opengever.core | 5963660f5f131bc12fd0a5898f1d7c8f24a5e2b1 | a01bec6c00d203c21a1b0449f8d489d0033c02b7 | refs/heads/master | 2023-08-30T23:11:27.914905 | 2023-08-25T14:27:15 | 2023-08-25T14:27:15 | 9,788,097 | 19 | 8 | null | 2023-09-14T13:28:56 | 2013-05-01T08:28:16 | Python | UTF-8 | Python | false | false | 1,329 | py | from opengever.core.upgrade import SchemaMigration
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy.sql.expression import column
from sqlalchemy.sql.expression import table
class AddHiddenFlagToAdminAndOrgUnits(SchemaMigration):
"""Add hidden flag to admin and org units.
"""
def migrate(self):
for tablename in ['admin_units', 'org_units']:
if self.has_column(tablename, "hidden"):
continue
self.add_column(tablename)
self.insert_default_value(tablename)
self.make_column_non_nullable(tablename)
def add_column(self, tablename):
self.op.add_column(
tablename,
Column('hidden', Boolean, default=False, nullable=True))
def insert_default_value(self, tablename):
_table = table(
tablename,
column("unit_id"),
column("hidden"))
self.connection.execute(
_table.update().values(hidden=False))
def make_column_non_nullable(self, tablename):
self.op.alter_column(tablename, 'hidden',
existing_type=Boolean, nullable=False)
def has_column(self, table_name, column_name):
table = self.metadata.tables.get(table_name)
return column_name in table.columns
| [
"niklaus.johner@4teamwork.ch"
] | niklaus.johner@4teamwork.ch |
7ea8482e03c5eade36fb35bdf2fbf45a1262edc1 | 9a05e1e8c950b091124d805ea70f24d2837b827c | /daydayup/cema_python/threeday/myrandom.py | 573b4e0587e70c4448c3d302f58515c7ce60696b | [] | no_license | fanzongpeng/mywork | 20676a9fe0e0599461a756ad194e4bd35aad4668 | aa6d044bbab3c0288de48888b2cc7dbd7785c91b | refs/heads/master | 2022-05-31T06:03:26.826914 | 2020-04-30T09:50:22 | 2020-04-30T09:50:22 | 257,189,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 703 | py | import time
import os
# 生成的测试报告的名字是根据当前时间和文件名全名的。
report_time = time.strftime('%Y%m%d%H%M%S', time.localtime())
print(report_time+"_"+os.path.basename(__file__))
import random
import string
# 将序列中的某个随机取出来做为测试数据。序列可以是列表,字符串等
courses = ['python', 'java', 'selenium', 'appium']
random_course = random.choice(courses)
print(random_course)
# 大写小写字母和数字随机拼接为测试数据
rad_str = ''.join(random.choice(string.ascii_uppercase\
+ string.ascii_lowercase + string.digits)\
for _ in range(8))
print(rad_str+"@163.com")
| [
"18210023228.com"
] | 18210023228.com |
aaf2906e9feb94f830ed4364ca9d10eabc5ba37c | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/galeone_dynamic-training-bench/dynamic-training-bench-master/models/interfaces/Classifier.py | 1158c78d87dd91fd15cfecb3b9dd1198e850887f | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 1,603 | py | #Copyright (C) 2016 Paolo Galeone <nessuno@nerdz.eu>
#
#This Source Code Form is subject to the terms of the Mozilla Public
#License, v. 2.0. If a copy of the MPL was not distributed with this
#file, you can obtain one at http://mozilla.org/MPL/2.0/.
#Exhibit B is not attached; this software is compatible with the
#licenses expressed under Section 1.12 of the MPL v2.
"""Define the interface to implement to work with classifiers"""
from abc import ABCMeta, abstractmethod
class Classifier(object, metaclass=ABCMeta):
"""Classifier is the interface that classifiers must implement"""
@abstractmethod
def get(self, images, num_classes, train_phase=False, l2_penalty=0.0):
"""Define the model with its inputs.
Use this function to define the model in training and when exporting the model
in the protobuf format.
Args:
images: model input
num_classes: number of classes to predict
train_phase: set it to True when defining the model, during train
l2_penalty: float value, weight decay (l2) penalty
Returns:
is_training_: tf.bool placeholder enable/disable training ops at run time
logits: the model output
"""
@abstractmethod
def loss(self, logits, labels):
"""Return the loss operation between logits and labels
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
| [
"659338505@qq.com"
] | 659338505@qq.com |
423a6dd38b0588fd8027cf1f7bd285b72f3283cf | 8a5ccfbd09fdc3eb42e8240c0b7ceaf981f27814 | /astropy_stark/astropy_stark/vortexa_rli.py | 92839783200f436a3ff34386f055b77f1941328b | [] | no_license | hlabathems/pycecream | 97edfd388e32ab12b22765debab31ee8c4929ab4 | cd52937c3ff053dede0b02803933ba58789d5ff3 | refs/heads/master | 2020-06-09T22:46:14.114693 | 2019-06-19T17:42:24 | 2019-06-19T17:42:24 | 193,521,752 | 0 | 1 | null | 2019-06-24T14:30:05 | 2019-06-24T14:30:04 | null | UTF-8 | Python | false | false | 2,486 | py | import vortexa_makefake as vmf
import numpy as np
#dat ntimes x 2 matrix first column is the driver second is the response
def rli(dat):
n = len(dat[:,0])
npsi = n
dmk = np.mean(dat[:,0])
drive = dat[:,0]
#smoothing matrix (see thesis page 123)
s = np.zeros((n,npsi))
for ik in range(n):
if (ik == 0):
s[ik,0] = 2
elif (ik == 1):
s[ik,1] = 2
elif (ik == n-1):
s[ik,-1] = 2
elif (ik == n-2):
s[ik,-2] = 2
else:
s[ik,ik-2:ik+3] = np.array([0.5,-2,3,-2,0.5])
#driving matrix of look back times
dmat = np.zeros((n,npsi))
for ik in range(n):
ik2 = np.arange(npsi)
ilb = np.arange(ik,ik-npsi,-1)
ilb[ilb < 0] = 0
dmat[ik,ik2]=drive[ilb]
#for ik2 in range(npsi):
# idx = max(ik - ik2,0)
# dmat[ik,ik2] = drive[idx]
#now construct hessian matrix from dmat and sigma 2 see also page 123 of thesis
return()
#for ik in range(npsi):
# for ij in range(1,npsi):
#
##sum = 0.0
# for it3 in range(1,nf):
# sum = sum + (xitp(it3,ij) - dmj)*(xitp(it3,ik) - dmk)/sig2(it3)
#!write(*,*) it3,xitp(it3,ij),xitp(it3,ik),sig2(it3)
#enddo
#hesmat(ij,ik) = sum
#
#
#sum = 0.d0
#!now add constraint from smoothness function
#if ((ik .gt. 2) .and. (ik .lt. npsi-1)) then
# if ((ij .eq. ik - 2) .or. (ij .eq. ik + 2)) then
# sum = 0.25 !0.5
# else if ((ij .eq. ik -1) .or. (ij .eq. ik + 1)) then
# sum = -1. !*2
# else if (ij .eq. ik) then
# sum = 1.5 !3
# endif
#
#
# !if (ij .eq. ik - 1) then
# !sum = sum - alpha*0.5
# !else if (ij .eq. ik) then
# !sum = sum + alpha*1.
# !else if (ij .eq. ik + 1) then
# !sum = sum - alpha*0.5
# !else
# !sum = sum - alpha*0.
# !endif
#
#else if ((ik .le. 2) .or. (ik .ge. npsi - 1)) then
# if (ij .eq. ik) then
# sum = 1. !+ alpha*1.!2
# else
# sum = 0.
# endif
#endif
#
#!if (ij .eq. 1) then !! edge effects. This minimises psi^2 for the edges to pull tf down to 0 at ends
#!sum = sum - 0.0!1.e6*alpha*1.
#!else
#!sum = sum + alpha*0.
#!endif
#!else if (ik .eq. npsi) then
#!if (ij .eq. npsi) then
#!sum = sum - 0.0!1.e6*alpha*1.
#!else
#!sum = sum + alpha*0.
#!endif
#
#!endif
#!else if (ik .eq. 1) then !what to do about edge effects (ignore for now)
#!if (ij .eq. 1) then
#!sum = sum + alpha*1.
#!else if (ij .eq. 2) then
#!sum = sum + alpha*
#!endif
#
#smoothmat(ij,ik) = sum
#
#
#
#!write(*,*) ij,ik,sum
#!read(*,*)
#enddo
tlo = 0.0
thi = 100.0
dt = 0.01
a = vmf.makefake(tlo,thi,dt,shift = [2,3],noise=0.1)
din = np.array([a[0].values[:,1],a[1].values[:,1]]).T
rli(din)
| [
"david.starkey@vortexa.com"
] | david.starkey@vortexa.com |
34264c770d85f40717b542168a87afe11e4de806 | 28729bdabcb1c83429752bc15b14f2ac1950028f | /firmware/python_modules/ttgo/display.py | cec7ba3b5b7ef2de793eae865ba1176f33d89bf2 | [] | no_license | badgeteam/ESP32-platform-firmware | 434020769b36df164fd1719b3bcf996851d55294 | 04282f7fe84ddd0f0c3887fa948da68a9ade8126 | refs/heads/master | 2023-08-17T07:07:51.048777 | 2023-08-14T20:53:37 | 2023-08-14T20:53:37 | 194,534,857 | 31 | 49 | null | 2023-08-15T21:00:09 | 2019-06-30T15:59:30 | C | UTF-8 | Python | false | false | 196 | py | import framebuf, ssd1306
rawbuffer = bytearray(1024)
framebuffer = framebuf.FrameBuffer(rawbuffer, 128, 64, framebuf.MONO_VLSB)
framebuffer.fill(0)
def write():
ssd1306.write(bytes(rawbuffer))
| [
"renze@rnplus.nl"
] | renze@rnplus.nl |
46457426cc4092cec5124c9d94f0b6c5bed4816f | 177d7066f6a0326ed937a56174d7e2241653929a | /Bit Manipulation/lc342.py | dd9eb4da4e68b3189d6b9a6a91d470909dada499 | [] | no_license | jasonusaco/Leetcode-Practice | 276bcdb62b28806b3d297338882f4b1eef56cc13 | 91dc73202eb9952a6064013ef4ed20dfa4137c01 | refs/heads/master | 2020-07-06T08:29:09.419062 | 2019-10-10T01:43:03 | 2019-10-10T01:43:03 | 202,955,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | """
Power of X的万能解法,取余
如果每次将这个数除以x没有余数,直到得到数字1,
那么这个数就是x的若干次幂。
"""
class Solution:
def isPowerOfFour(self, num):
if num:
while num % 4 == 0:
num /= 4
return num == 1
return False
| [
"yangyx@raysdata.com"
] | yangyx@raysdata.com |
8cc684f45b74a98529b8ff69e60bbeb09dcfc374 | 33a50bb13812090a36257078522b798762978c66 | /top/api/rest/CrmGroupAppendRequest.py | 29f96fdca7079fb9b0b072478260fe71b7454713 | [] | no_license | aa3632840/quanlin | 52ac862073608cd5b977769c14a7f6dcfb556678 | 2890d35fa87367d77e295009f2d911d4b9b56761 | refs/heads/master | 2021-01-10T22:05:14.076949 | 2014-10-25T02:28:15 | 2014-10-25T02:28:15 | 23,178,087 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | '''
Created by auto_sdk on 2014-09-08 16:48:02
'''
from top.api.base import RestApi
class CrmGroupAppendRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.from_group_id = None
self.to_group_id = None
def getapiname(self):
return 'taobao.crm.group.append'
| [
"262708239@qq.com"
] | 262708239@qq.com |
93baf278f43051a2a6f0d658c2e09dec5bf62932 | f8ffa8ff257266df3de9d20d95b291e393f88434 | /Python - advanced/zajecia12/alch/baza_sqla02.py | a999f1f5b033b5c8a434b71097c8f37b70ef8b7b | [] | no_license | janiszewskibartlomiej/Python_Code_Me_Gda | c0583c068ef08b6130398ddf93c3a3d1a843b487 | 7568de2a9acf80bab1429bb55bafd89daad9b729 | refs/heads/master | 2020-03-30T05:06:26.757033 | 2020-03-02T08:53:28 | 2020-03-02T08:53:28 | 150,781,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | from sqlalchemy.orm import sessionmaker
from baza_model import Produkt
from sqlalchemy import create_engine
if __name__ == '__main__':
engine = create_engine('sqlite:///baza.db')
DBSession = sessionmaker(bind=engine)
session = DBSession()
nowy_produkt = Produkt(nazwa='Mleko Świeże 2%',
cena=1.99,
jednostka='op')
# uwaga! autoincrement dostajemy "za darmo"
session.add(nowy_produkt)
session.commit()
| [
"janiszewski.bartlomiej@gmail.com"
] | janiszewski.bartlomiej@gmail.com |
a278aa6fd4ed837e85f7ce4606301bc9bbe7f072 | 49ae5436d6feccc0f2572c1d1e22a980fe0e1e36 | /pydl/goddard/misc/__init__.py | b181f27f01981d5dea0b1cf0f031e5765f658fe2 | [] | no_license | linan7788626/pydl | 4e0f290c75ab78c22f5627e8ad8ea8cb040ba7f9 | 8ab8d03972990e3645d8d62e53cd66d0780698a2 | refs/heads/master | 2020-12-28T20:42:19.938393 | 2014-10-24T20:18:39 | 2014-10-24T20:18:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
This subpackage corresponds to the misc directory in the Goddard utilities.
"""
from .cirrange import cirrange
from .readcol import readcol
| [
"benjamin.weaver@nyu.edu"
] | benjamin.weaver@nyu.edu |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.