blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8b7d0a3296d50691cf8a0cffe573214b9f553a5d | f68afe06e4bbf3d523584852063e767e53441b2b | /Toontown/toontown/coghq/CogHQExterior.py | 07ead6fdae6591c90c12eaf8a283850ce41ffca2 | [] | no_license | DankMickey/Toontown-Offline-Squirting-Flower-Modded- | eb18908e7a35a5f7fc95871814207858b94e2600 | 384754c6d97950468bb62ddd8961c564097673a9 | refs/heads/master | 2021-01-19T17:53:36.591832 | 2017-01-15T02:00:04 | 2017-01-15T02:00:04 | 34,639,744 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,228 | py | from direct.directnotify import DirectNotifyGlobal
from toontown.battle import BattlePlace
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from toontown.toonbase import ToontownGlobals
from pandac.PandaModules import *
from otp.distributed.TelemetryLimiter import RotationLimitToH, TLGatherAllAvs
from otp.nametag import NametagGlobals
class CogHQExterior(BattlePlace.BattlePlace):
notify = DirectNotifyGlobal.directNotify.newCategory('CogHQExterior')
def __init__(self, loader, parentFSM, doneEvent):
BattlePlace.BattlePlace.__init__(self, loader, doneEvent)
self.parentFSM = parentFSM
self.fsm = ClassicFSM.ClassicFSM('CogHQExterior', [State.State('start', self.enterStart, self.exitStart, ['walk',
'tunnelIn',
'teleportIn',
'doorIn']),
State.State('walk', self.enterWalk, self.exitWalk, ['stickerBook',
'teleportOut',
'tunnelOut',
'DFA',
'doorOut',
'died',
'stopped',
'WaitForBattle',
'battle',
'squished',
'stopped']),
State.State('stopped', self.enterStopped, self.exitStopped, ['walk', 'teleportOut', 'stickerBook']),
State.State('doorIn', self.enterDoorIn, self.exitDoorIn, ['walk', 'stopped']),
State.State('doorOut', self.enterDoorOut, self.exitDoorOut, ['walk', 'stopped']),
State.State('stickerBook', self.enterStickerBook, self.exitStickerBook, ['walk',
'DFA',
'WaitForBattle',
'battle',
'tunnelOut',
'doorOut',
'squished',
'died']),
State.State('WaitForBattle', self.enterWaitForBattle, self.exitWaitForBattle, ['battle', 'walk']),
State.State('battle', self.enterBattle, self.exitBattle, ['walk', 'teleportOut', 'died']),
State.State('DFA', self.enterDFA, self.exitDFA, ['DFAReject', 'teleportOut', 'tunnelOut']),
State.State('DFAReject', self.enterDFAReject, self.exitDFAReject, ['walk']),
State.State('squished', self.enterSquished, self.exitSquished, ['walk', 'died', 'teleportOut']),
State.State('teleportIn', self.enterTeleportIn, self.exitTeleportIn, ['walk', 'WaitForBattle', 'battle']),
State.State('teleportOut', self.enterTeleportOut, self.exitTeleportOut, ['teleportIn', 'final', 'WaitForBattle']),
State.State('died', self.enterDied, self.exitDied, ['quietZone']),
State.State('tunnelIn', self.enterTunnelIn, self.exitTunnelIn, ['walk', 'WaitForBattle', 'battle']),
State.State('tunnelOut', self.enterTunnelOut, self.exitTunnelOut, ['final']),
State.State('final', self.enterFinal, self.exitFinal, ['start'])], 'start', 'final')
def load(self):
self.parentFSM.getStateNamed('cogHQExterior').addChild(self.fsm)
BattlePlace.BattlePlace.load(self)
def unload(self):
self.parentFSM.getStateNamed('cogHQExterior').removeChild(self.fsm)
del self.fsm
BattlePlace.BattlePlace.unload(self)
def enter(self, requestStatus):
self.zoneId = requestStatus['zoneId']
BattlePlace.BattlePlace.enter(self)
self.fsm.enterInitialState()
base.playMusic(self.loader.music, looping=1, volume=0.8)
self.loader.geom.reparentTo(render)
self.nodeList = [self.loader.geom]
self._telemLimiter = TLGatherAllAvs('CogHQExterior', RotationLimitToH)
self.accept('doorDoneEvent', self.handleDoorDoneEvent)
self.accept('DistributedDoor_doorTrigger', self.handleDoorTrigger)
NametagGlobals.setMasterArrowsOn(1)
self.tunnelOriginList = base.cr.hoodMgr.addLinkTunnelHooks(self, self.nodeList, self.zoneId)
how = requestStatus['how']
self.fsm.request(how, [requestStatus])
def exit(self):
self.fsm.requestFinalState()
self._telemLimiter.destroy()
del self._telemLimiter
self.loader.music.stop()
for node in self.tunnelOriginList:
node.removeNode()
del self.tunnelOriginList
if self.loader.geom:
self.loader.geom.reparentTo(hidden)
self.ignoreAll()
BattlePlace.BattlePlace.exit(self)
def enterTunnelOut(self, requestStatus):
fromZoneId = self.zoneId - self.zoneId % 100
tunnelName = base.cr.hoodMgr.makeLinkTunnelName(self.loader.hood.id, fromZoneId)
requestStatus['tunnelName'] = tunnelName
BattlePlace.BattlePlace.enterTunnelOut(self, requestStatus)
def enterTeleportIn(self, requestStatus):
x, y, z, h, p, r = base.cr.hoodMgr.getPlaygroundCenterFromId(self.loader.hood.id)
base.localAvatar.setPosHpr(render, x, y, z, h, p, r)
BattlePlace.BattlePlace.enterTeleportIn(self, requestStatus)
def enterTeleportOut(self, requestStatus, callback = None):
if requestStatus.has_key('battle'):
self.__teleportOutDone(requestStatus)
else:
BattlePlace.BattlePlace.enterTeleportOut(self, requestStatus, self.__teleportOutDone)
def __teleportOutDone(self, requestStatus):
hoodId = requestStatus['hoodId']
zoneId = requestStatus['zoneId']
avId = requestStatus['avId']
shardId = requestStatus['shardId']
if hoodId == self.loader.hood.hoodId and zoneId == self.loader.hood.hoodId and shardId == None:
self.fsm.request('teleportIn', [requestStatus])
elif hoodId == ToontownGlobals.MyEstate:
self.getEstateZoneAndGoHome(requestStatus)
else:
self.doneStatus = requestStatus
messenger.send(self.doneEvent)
return
def exitTeleportOut(self):
BattlePlace.BattlePlace.exitTeleportOut(self)
def enterSquished(self):
base.localAvatar.laffMeter.start()
base.localAvatar.b_setAnimState('Squish')
taskMgr.doMethodLater(2.0, self.handleSquishDone, base.localAvatar.uniqueName('finishSquishTask'))
def handleSquishDone(self, extraArgs = []):
base.cr.playGame.getPlace().setState('walk')
def exitSquished(self):
taskMgr.remove(base.localAvatar.uniqueName('finishSquishTask'))
base.localAvatar.laffMeter.stop()
| [
"jareddarty96@gmail.com"
] | jareddarty96@gmail.com |
31fdae76ee89f60e923d6125f8b0c0979a77f28c | d5b48163d236ca770be8e687f92192e2971397e8 | /d2.py | f4cf2b96e37d9a200f82ba75ba05811c122464ae | [] | no_license | Kunal352000/python_program | 191f5d9c82980eb706e11457c2b5af54b0d2ae95 | 7a1c645f9eab87cc45a593955dcb61b35e2ce434 | refs/heads/main | 2023-07-12T19:06:19.121741 | 2021-08-21T11:58:41 | 2021-08-21T11:58:41 | 376,606,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | str=input("Enter your string: ").casefold()
dic={}
for ch in str:
if ch in dic:
dic[ch]=dic[ch]+1
else:
dic[ch]=1
for i in dic:
print(i,":",dic[i])
| [
"noreply@github.com"
] | Kunal352000.noreply@github.com |
d8030447090aa7eec30c37b4ae3f45b0fd8aeb50 | 60c4255fb0cf7ed817ff09d8113bf404cde8e12b | /env/lib/python2.7/site-packages/django/conf/locale/sv/formats.py | 98efdf170c00c4b092105bb360b3f70f4e8332e7 | [] | no_license | adamjberg/finna-be-octo-ninja | 83aba13f619d4fbfb5308e48336917f0ada0459d | cf16bfcb3d7bb4e878ba0b99ad701b5cda8be34c | refs/heads/master | 2021-01-10T20:19:20.849476 | 2014-01-11T05:42:23 | 2014-01-11T05:42:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,428 | py | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'Y-m-d'
SHORT_DATETIME_FORMAT = 'Y-m-d H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = (
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y', # '10/25/06'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| [
"ilikecattle@gmail.com"
] | ilikecattle@gmail.com |
327ba90dccd54195e75cd71f17b040212a27108f | f5863cf378bce80d3aa459941dff79ea3c8adf5d | /Leetcode/80.Remove_Duplicates_from_Sorted_Array_II.py | ed2977219a1a145be0cf2b19d26cfb295afc768f | [] | no_license | Taeg92/Problem_solving | 815c13ae7895708948482eeb05411322be00ac12 | 15c0fe0eda4f77d974451777cb01d10882d8aaa9 | refs/heads/master | 2021-11-18T22:03:21.727840 | 2021-09-06T14:21:09 | 2021-09-06T14:21:09 | 235,335,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
idx = 0
for num in nums:
if idx < 2 or num != nums[idx-2]:
nums[idx] = num
idx = idx + 1
return idx
nums = [1, 1, 1, 2, 2, 3]
# Output : 5
sol = Solution()
print(sol.removeDuplicates(nums))
| [
"gtg92t@gmail.com"
] | gtg92t@gmail.com |
f567892b375898d2f9e2c1370a515b0984a11f34 | f0987e17aea6668158cd334c1fbacfe6286d3c77 | /NITA/lib/jnpr/toby/tmp/RLI/RLI-27K/RLI-27608/Apps/fw_ms_filter_length.py | f3ef23b3af736df13fa3dd6599bba08b7ddaf50c | [] | no_license | fengyun4623/file | 00bf21f952ea3f95ffc9fe18448b244b26b7fadb | 3966c63d48557b0b94303896eed7a767593a4832 | refs/heads/master | 2023-04-02T05:01:25.066052 | 2020-07-29T16:15:31 | 2020-07-29T16:15:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,259 | py | import sys,time,os,argparse
from grpc.beta import implementations
import firewall_service_pb2,jnx_addr_pb2,authentication_service_pb2
from firewall_service_pb2 import *
from jnx_addr_pb2 import *
from authentication_service_pb2 import *
from grpc.framework.interfaces.face.face import *
parser = argparse.ArgumentParser()
parser.add_argument('-d','--device', help='Input host name',required=True)
parser.add_argument('-ifl','--iflname', help='Input interface name',required=True)
args = parser.parse_args()
device1 = args.device
APP_USER = 'regress'
APP_PASSWORD = 'MaRtInI'
port = 9999
client_id = '101'
def pause():
programPause = raw_input("Enter to continue...")
print "Executing Python app"
pause()
try:
channel = implementations.insecure_channel(host=device1, port=port)
stub = authentication_service_pb2.beta_create_Login_stub(channel)
login_response = stub.LoginCheck(
authentication_service_pb2.LoginRequest(user_name=APP_USER, password=APP_PASSWORD,
client_id=client_id), 100)
if (login_response.result == 1):
print "Login to ", device1, "successful"
else:
print "Login to ", device1, "failed"
raise SystemExit()
fw = firewall_service_pb2.beta_create_AclService_stub(channel)
flag = 0
res=[]
fname = [
'12345678901234567890',
'!@!@!', 'f1', '0000000000', '-1',
'12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678',
'']
for filtname in fname:
IP1 = IpAddress(addr_string='10.1.1.2')
matchIP1 = AclMatchIpAddress(addr=IP1, prefix_len=32, match_op=ACL_MATCH_OP_EQUAL)
term1match1 = AclEntryMatchMultiService(match_addrs=[matchIP1])
t = AclEntryMultiServiceTerminatingAction(action_accept=1)
nt = AclEntryMultiServiceNonTerminatingAction(action_count=AclActionCounter(counter_name="Match1"),action_syslog=1, action_log=1, action_sample=1)
term1Action1 = AclEntryMultiServiceAction(action_t=t, actions_nt=nt)
adj=AclAdjacency(type=ACL_ADJACENCY_AFTER)
term1=AclMultiServiceEntry(ace_name="t1",ace_op=ACL_ENTRY_OPERATION_ADD,adjacency=adj,matches=term1match1,actions=term1Action1)
tlist1=AclEntry(mservice_entry=term1)
filter=AccessList(acl_name = filtname,acl_type = ACL_TYPE_CLASSIC, acl_family = ACL_FAMILY_MULTISERVICE, acl_flag = ACL_FLAGS_NONE, ace_list=[tlist1])
print filter
result = fw.AccessListAdd(filter,10)
print 'Invoking fw.AccessListAdd \nreturn = ', result
if result.status is ACL_STATUS_EOK:
print "AccessListAdd RPC Passed with filter name : %s" % filtname
res.append("AccessListAdd RPC Passed with filter name : %s and returned %s" % (filtname, result))
else:
print "AccessListAdd RPC Failed with filter name : %s" % filtname
res.append("AccessListAdd RPC Failed with filter name : %s and returned %s" % (filtname, result))
flag += 1
pause()
bind=AccessListObjBind(acl=filter,obj_type=ACL_BIND_OBJ_TYPE_INTERFACE,bind_object=AccessListBindObjPoint(intf=args.iflname + '.0'),bind_direction=ACL_BIND_DIRECTION_INPUT,bind_family=ACL_FAMILY_MULTISERVICE)
print bind
bindaddresult=fw.AccessListBindAdd(bind,10)
print 'Invoking fw.AccessListBindAdd \nreturn = ', bindaddresult
if bindaddresult.status is ACL_STATUS_EOK:
print "AccessListBindAdd RPC Passed with filter name : %s" % filtname
res.append("AccessListBindAdd RPC Passed with filter name : %s and returned %s" % (filtname, bindaddresult))
else:
print "AccessListBindAdd RPC Failed with filter name : %s" % filtname
res.append("AccessListBindAdd RPC Failed with filter name : %s and returned %s" % (filtname, bindaddresult))
flag += 1
pause()
binddelresult = fw.AccessListBindDelete(bind,10)
print 'Invoking fw.AccessListBindDelete \nreturn = ', binddelresult
if binddelresult.status is ACL_STATUS_EOK:
print "AccessListBindDelete RPC Passed with filter name : %s" % filtname
res.append("AccessListBindDelete RPC Passed with filter name : %s and returned %s" % (filtname, binddelresult))
else:
print "AccessListBindDelete RPC Failed with filter name : %s" % filtname
res.append("AccessListBindDelete RPC Failed with filter name : %s and returned %s" % (filtname, binddelresult))
flag += 1
pause()
filter = AccessList(acl_name=filtname,acl_family = ACL_FAMILY_MULTISERVICE)
print filter
acldelresult = fw.AccessListDelete(filter,10)
print 'Invoking fw.AccessListDelete \nreturn = ', acldelresult
if acldelresult.status is ACL_STATUS_EOK:
print "AccessListDelete RPC Passed with filter name : %s" % filtname
res.append("AccessListDelete RPC Passed with filter name : %s and returned %s" % (filtname, acldelresult))
else:
print "AccessListDelete RPC Failed with filter name : %s" % filtname
res.append("AccessListDelete RPC Failed with filter name : %s and returned %s" % (filtname, acldelresult))
flag += 1
pause()
print "FINAL RESULT : \n"
for i in res:
print i
if flag > 0:
print "TEST FAILED"
else:
print "TEST PASSED"
except AbortionError as e:
print "code is ", e.code
print "details is ", e.details
while True:
import signal
os.kill(os.getpid(), signal.SIGTERM)
| [
"srigupta@juniper.net"
] | srigupta@juniper.net |
8ae65a6c0f57089c39e0396a9ff9bfa5bed30b79 | 16daab90ef28ada0e3efc4d38f8c4d62df30c43f | /Speaker-Verification/dataloader.py | 590e3aacc8790eed435482c97ef137b2577d717b | [] | no_license | AntoniaLovjer/adversarial_robustness_audio | 7ba60a0bdf50ed428eecdbc3a2adbbf583129877 | 34ffecaf73332698fccb0d90d2932dde09cc77a5 | refs/heads/master | 2020-09-22T12:16:54.420697 | 2019-12-02T14:22:38 | 2019-12-02T14:22:38 | 225,189,551 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,195 | py | import os
import re
from glob import glob
import torch
import pandas as pd
WORDS = 'yes no up down left right on off stop go silence unknown'.split()
id2name = {i: name for i, name in enumerate(WORDS)}
name2id = {name: i for i, name in id2name.items()}
def load_data(data_dir):
""" Return 2 lists of tuples:
[(user_id, class_id, path), ...] for train
[(user_id, class_id, path), ...] for validation
"""
# Just a simple regexp for paths with three groups:
# prefix, label, user_id
pattern = re.compile("(.+\/)?(\w+)\/([^_]+)_.+wav")
all_files = glob(os.path.join(data_dir, 'train/audio/*/*wav'))
possible = set(WORDS)
data = []
for entry in all_files:
bl_true = True
r = re.match(pattern, entry)
if r:
label, uid = r.group(2), r.group(3)
if label == '_background_noise_':
label = 'silence'
if label not in possible:
label = 'unknown'
label_id = name2id[label]
sample = (uid, label_id, entry)
data.append(sample)
data = pd.DataFrame(data)
data.columns = ['uid', 'label_id', 'path']
return data | [
"google-dl-platform@googlegroups.com"
] | google-dl-platform@googlegroups.com |
8c757e728d9b1d70c2a7f43d7d50e4cad5895c90 | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /dockerized-gists/7485132/snippet.py | eecd6e2d90eb2489e1c6d00d27db4c3719557144 | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 1,920 | py | """
The Pool module provides some classes for managing a fixed-size thread-safe pool of functionally identical objects. One use for this is database connections, which tend to take awhile to create.
Pool
class that manages the pool of objects.
Constructor
class used to create new instances of items in the Pool.
For more details, use pydoc or read the docstrings in the code.
Credits : Andy Dustman
(Note: I just extracted the code from the Wayback Machine in order to find it more easily, but I didn't touch that code, all the credits goes to Andy Dustman !)
Version : 0.0.0, aka "Probably the first, last, and only release."
Released: 2002-06-30 00:00 UTC
Stability: Perfect in every way
Original source : http://web.archive.org/web/20070610080245/http://dustman.net/andy/python/Pool/0.0.0/Pool.py
"""
from Queue import Queue, Full, Empty
class Pool(Queue):
"""Manage a fixed-size pool of reusable, identical objects."""
def __init__(self, constructor, poolsize=5):
Queue.__init__(self, poolsize)
self.constructor = constructor
def get(self, block=1):
"""Get an object from the pool or a new one if empty."""
try:
return self.empty() and self.constructor() or Queue.get(self, block)
except Empty:
return self.constructor()
def put(self, obj, block=1):
"""Put an object into the pool if it is not full. The caller must
not use the object after this."""
try:
return self.full() and None or Queue.put(self, obj, block)
except Full:
pass
class Constructor:
"""Returns a constructor that returns apply(function, args, kwargs)
when called."""
def __init__(self, function, *args, **kwargs):
self.f = function
self.args = args
self.kwargs = kwargs
def __call__(self):
return apply(self.f, self.args, self.kwargs) | [
"gistshub@gmail.com"
] | gistshub@gmail.com |
cd337e0d2b894589f48c3ec1e73609dc98c7cf3a | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipayCommerceKidsRelationQueryResponse.py | 2e7bc0d8e1af57db2e86b5335fa2c3b97ab35b64 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,091 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.UserInfoVO import UserInfoVO
class AlipayCommerceKidsRelationQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceKidsRelationQueryResponse, self).__init__()
self._relation_list = None
@property
def relation_list(self):
return self._relation_list
@relation_list.setter
def relation_list(self, value):
if isinstance(value, list):
self._relation_list = list()
for i in value:
if isinstance(i, UserInfoVO):
self._relation_list.append(i)
else:
self._relation_list.append(UserInfoVO.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayCommerceKidsRelationQueryResponse, self).parse_response_content(response_content)
if 'relation_list' in response:
self.relation_list = response['relation_list']
| [
"jiandong.jd@antfin.com"
] | jiandong.jd@antfin.com |
6d1f6f15112b1fc0a2a3b86385e1f8c6d9ccc2c6 | c94914207f9a3918d0cd09807a101e81a8e7d9a8 | /Finance/Project Alpha/ProjectAlpha/risk.py | d757e8ef74c1c99631a212d1c47fd4a172fc535c | [] | no_license | matthewmercuri/codedeposit | eb8265d6219284ee75cba2e1abf50f1b0ef17a09 | f51d3b1bdacec96af699bc8feb993f6ed60f772c | refs/heads/main | 2023-06-10T21:27:42.550236 | 2021-07-08T20:57:53 | 2021-07-08T20:57:53 | 373,534,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,442 | py | import numpy as np
''' Ideally, we would want to put these metrics in a df
or database with the return series of the portfolio. How
would this translate to when we use these metrics to backtest
a portfolio that trades?
- want to figure out a way where we can just pass the history
to any one method and have a result outputted
- start with simpler methods like alpha, beta, etc.
- we want to start this class by being applied to the return
df (which would be the return series of the port)
- finally, we want a method that prints out ALL of the metrics
'''
class Risk:
def __init__(self):
pass
def _metrics(self, history):
''' Calls all of the individual methods
- Perhaps this can aggregrate and format too?
'''
def sharpe_ratio(self, returns, rf_rate=0):
returns = np.array(returns)
returns_std = np.std(returns)
sr = (np.mean(returns) - rf_rate) / returns_std
return sr
def treynor_measure(self, history):
pass
def sortino(self):
pass
def max_drawdown(self):
pass
def var(self):
pass
def cvar(self):
pass
def cagr(self):
pass
def roi(self):
pass
def pandl(self):
pass
def alpha(self):
pass
def beta(self):
pass
def std(self):
pass
def r_squared(self):
pass
def corr_matrix(self):
pass
| [
"mercurimatthew@gmail.com"
] | mercurimatthew@gmail.com |
a5cfaddec32dede10839f846563eaef3853c4601 | 4e5141121d8b4015db233cbc71946ec3cfbe5fe6 | /samples/basic/crud/gnmi/models/cisco-ios-xr/Cisco-IOS-XR-ifmgr-cfg/gn-create-xr-ifmgr-cfg-34-ydk.py | a3abe62056bc1ac1ba680e56c1d574ac69b5d763 | [
"Apache-2.0"
] | permissive | itbj/ydk-py-samples | 898c6c9bad9d6f8072892300d42633d82ec38368 | c5834091da0ebedbb11af7bbf780f268aad7040b | refs/heads/master | 2022-11-20T17:44:58.844428 | 2020-07-25T06:18:02 | 2020-07-25T06:18:02 | 282,382,442 | 1 | 0 | null | 2020-07-25T06:04:51 | 2020-07-25T06:04:50 | null | UTF-8 | Python | false | false | 3,721 | py | #!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Create configuration for model Cisco-IOS-XR-ifmgr-cfg.
usage: gn-create-xr-ifmgr-cfg-34-ydk.py [-h] [-v] device
positional arguments:
device gNMI device (http://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.path import Repository
from ydk.services import CRUDService
from ydk.gnmi.providers import gNMIServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_ifmgr_cfg \
as xr_ifmgr_cfg
import os
import logging
YDK_REPO_DIR = os.path.expanduser("~/.ydk/")
def config_interface_configurations(interface_configurations):
"""Add config data to interface_configurations object."""
# configure IPv4 interface
interface_configuration = interface_configurations.InterfaceConfiguration()
interface_configuration.active = "act"
interface_configuration.interface_name = "GigabitEthernet0/0/0/0"
interface_configuration.description = "CONNECTS TO LSR1 (g0/0/0/1)"
mtu = interface_configuration.mtus.Mtu()
mtu.owner = "GigabitEthernet"
mtu.mtu = 9192
interface_configuration.mtus.mtu.append(mtu)
primary = interface_configuration.ipv4_network.addresses.Primary()
primary.address = "172.16.1.0"
primary.netmask = "255.255.255.254"
interface_configuration.ipv4_network.addresses.primary = primary
interface_configuration.statistics.load_interval = 30
interface_configurations.interface_configuration.append(interface_configuration)
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="gNMI device (http://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create gNMI provider
repository = Repository(YDK_REPO_DIR+device.hostname)
provider = gNMIServiceProvider(repo=repository,
address=device.hostname,
port=device.port,
username=device.username,
password=device.password)
# create CRUD service
crud = CRUDService()
interface_configurations = xr_ifmgr_cfg.InterfaceConfigurations() # create object
config_interface_configurations(interface_configurations) # add object configuration
# create configuration on gNMI device
crud.create(provider, interface_configurations)
exit()
# End of script
| [
"deom119@gmail.com"
] | deom119@gmail.com |
44d1bb63f6fb5cac8e01d44a916c0761f1f59078 | d58bc2475a41e7c36e22947565c099908f84cfd6 | /samples/openapi3/client/petstore/python-experimental/petstore_api/paths/user_logout/get.py | c8a52c7734766188dde2478208cb0fdeff83f8cb | [
"Apache-2.0"
] | permissive | yaronius/openapi-generator | d8390dc2cfd9330d3f05a1f517612d793e332ead | 9f3fac53c1689b82bf4c99b664e10e4a5decfb8e | refs/heads/master | 2022-11-03T02:27:44.670087 | 2022-08-17T12:17:30 | 2022-08-17T12:17:30 | 175,407,506 | 0 | 0 | Apache-2.0 | 2023-09-04T20:41:29 | 2019-03-13T11:30:05 | Java | UTF-8 | Python | false | false | 4,404 | py | # coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import re # noqa: F401
import sys # noqa: F401
import typing
import urllib3
import functools # noqa: F401
from petstore_api import api_client, exceptions
import decimal # noqa: F401
from datetime import date, datetime # noqa: F401
from frozendict import frozendict # noqa: F401
from petstore_api.schemas import ( # noqa: F401
AnyTypeSchema,
ComposedSchema,
DictSchema,
ListSchema,
StrSchema,
IntSchema,
Int32Schema,
Int64Schema,
Float32Schema,
Float64Schema,
NumberSchema,
UUIDSchema,
DateSchema,
DateTimeSchema,
DecimalSchema,
BoolSchema,
BinarySchema,
NoneSchema,
none_type,
Configuration,
Unset,
unset,
ComposedBase,
ListBase,
DictBase,
NoneBase,
StrBase,
IntBase,
Int32Base,
Int64Base,
Float32Base,
Float64Base,
NumberBase,
UUIDBase,
DateBase,
DateTimeBase,
BoolBase,
BinaryBase,
Schema,
NoneClass,
BoolClass,
_SchemaValidator,
_SchemaTypeChecker,
_SchemaEnumMaker
)
from . import path
@dataclass
class ApiResponseForDefault(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: Unset = unset
headers: Unset = unset
_response_for_default = api_client.OpenApiResponse(
response_cls=ApiResponseForDefault,
)
_status_code_to_response = {
'default': _response_for_default,
}
class BaseApi(api_client.Api):
def _logout_user(
self: api_client.Api,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseForDefault,
api_client.ApiResponseWithoutDeserialization
]:
"""
Logs out current logged in user session
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
used_path = path.value
# TODO add cookie handling
response = self.api_client.call_api(
resource_path=used_path,
method='get'.upper(),
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
default_response = _status_code_to_response.get('default')
if default_response:
api_response = default_response.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
class LogoutUser(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
def logout_user(
self: BaseApi,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseForDefault,
api_client.ApiResponseWithoutDeserialization
]:
return self._logout_user(
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForget(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
def get(
self: BaseApi,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseForDefault,
api_client.ApiResponseWithoutDeserialization
]:
return self._logout_user(
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
| [
"noreply@github.com"
] | yaronius.noreply@github.com |
6136bbbbebc5bb771fd00b992c25ef41855cd34d | c3d5dcf3c18e0e652d81cdf2edb87bdc0e3f2c72 | /user/models.py | 1978c2d4f31c9d846418bad20d246153683fb04f | [] | no_license | webclinic017/easy_ledger | bc6743e4826d6d67d2d2b7f38476760077b2c7c3 | 9e85a726000cc54fc77d368a48a828a716664c07 | refs/heads/main | 2023-04-10T09:11:15.530056 | 2021-04-23T09:35:14 | 2021-04-23T09:35:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
class User(AbstractUser):
username = models.CharField(max_length=200, blank=True, null=True)
email = models.EmailField(_('email address'), unique=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def __str__(self):
return "{}".format(self.email)
class UserProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='profile')
dob = models.DateField(blank=True, null=True)
phone = models.CharField(max_length=20, blank=True, null=True)
address = models.CharField(max_length=255, blank=True, null=True)
country = models.CharField(max_length=50, blank=True, null=True)
city = models.CharField(max_length=50, blank=True, null=True)
zip = models.CharField(max_length=5, blank=True, null=True) | [
"ppark9553@gmail.com"
] | ppark9553@gmail.com |
91fc770e5872a64b5c1af1537736fd0d19afceae | 28024c936d258d56e3c02bfda44204fab8f32964 | /client/dvaclient/constants.py | a156927b6d8fd9bd73732cbd03f6105d087656ee | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | skgone/DeepVideoAnalytics | c1db730af253a2ec3acb9c7e6cce82d3264b72f2 | 6897dd92feed23af974dbcb7b5f0d2a9377f66b2 | refs/heads/master | 2021-07-01T01:36:07.086632 | 2020-09-20T10:07:26 | 2020-09-20T10:07:26 | 137,233,795 | 0 | 0 | null | 2018-06-13T15:19:50 | 2018-06-13T15:19:49 | null | UTF-8 | Python | false | false | 443 | py | SCHEDULE = 'S'
PROCESS = 'V'
QUERY = 'Q'
DETECTION = 'D'
INDEXING = 'I'
TRAINAPPROX = 'A'
CLASSIFICATION = 'C'
IMAGES = 'I'
VIDEOS = 'V'
INDEX = 'X'
TENSORFLOW = 'T'
CAFFE = 'C'
PYTORCH = 'P'
OPENCV = 'O'
MXNET = 'M'
INDEXER = 'I'
APPROXIMATOR = 'P'
DETECTOR = 'D'
ANALYZER = 'A'
SEGMENTER = 'S'
YOLO = "Y"
TFD = "T"
EXACT = 'E'
LOPQ = 'L'
ANNOTATION = 'A'
SEGMENTATION = 'S'
TRANSFORM = 'T'
POLYGON = 'P'
VIDEO_EXPORT = 'V'
MODEL_EXPORT = 'M' | [
"akshayubhat@gmail.com"
] | akshayubhat@gmail.com |
a6c4d1ea49aea7407ef9c59364ed2480517c29ca | b170bd640f7259d641008a974a59ae7111788ca1 | /dcgan.py | 461d9841337d1f2a6656215a573991491876b815 | [] | no_license | kant/chainer-dcgan | b75522fd09a3e4936d68e175318dfedb6e574480 | 20062e3e6c1156857363ea0ed3cfe4abd63b2a8c | refs/heads/master | 2020-04-26T05:37:14.165650 | 2017-09-14T09:17:01 | 2017-09-14T09:17:01 | 173,340,366 | 0 | 0 | null | 2019-03-01T17:14:22 | 2019-03-01T17:14:21 | null | UTF-8 | Python | false | false | 4,667 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import chainer
from chainer import training
from chainer.training import extensions
from net import Discriminator
from net import Generator
from updater import DCGANUpdater
from visualize import out_generated_image
def main():
parser = argparse.ArgumentParser(description='DCGAN')
parser.add_argument('--batchsize', '-b', type=int, default=64,
help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=500,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--dataset', '-i', default='',
help='Directory of image files.')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result image')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--gennum','-v', default=10,
help='visualize image rows and columns number')
parser.add_argument('--n_hidden', '-n', type=int, default=100,
help='Number of hidden units (z)')
parser.add_argument('--seed', type=int, default=0,
help='Random seed of z at visualization stage')
parser.add_argument('--snapshot_interval', type=int, default=1000,
help='Interval of snapshot')
parser.add_argument('--display_interval', type=int, default=100,
help='Interval of displaying log to console')
args = parser.parse_args()
print('GPU: {}'.format(args.gpu))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# n_hidden: {}'.format(args.n_hidden))
print('# epoch: {}'.format(args.epoch))
print('')
#学習モデルの作成
gen = Generator(n_hidden=args.n_hidden)
dis = Discriminator()
if args.gpu >= 0:
#modelをGPU用に変換
chainer.cuda.get_device(args.gpu).use()
gen.to_gpu()
dis.to_gpu()
#oputimizerのsetup
def make_optimizer(model, alpha=0.0002, beta1=0.5):
optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001), 'hook_dec')
return optimizer
opt_gen = make_optimizer(gen)
opt_dis = make_optimizer(dis)
if args.dataset == '':
#データセットの読み込み defaultはcifar10
train, _ = chainer.datasets.get_cifar10(withlabel=False, scale=255.)
else:
all_files = os.listdir(args.dataset)
image_files = [f for f in all_files if ('png' in f or 'jpg' in f)]
print('{} contains {} image files'
.format(args.dataset, len(image_files)))
train = chainer.datasets\
.ImageDataset(paths=image_files, root=args.dataset)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
#trainerのセットアップ
updater = DCGANUpdater(
models=(gen, dis),
iterator=train_iter,
optimizer={
'gen': opt_gen, 'dis': opt_dis},
device=args.gpu)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
snapshot_interval = (args.snapshot_interval, 'iteration')
display_interval = (args.display_interval, 'iteration')
trainer.extend(
extensions.snapshot(filename='snapshot_iter_{.updater.epoch}.npz'),
trigger=(100,'epoch'))
trainer.extend(extensions.snapshot_object(
gen, 'gen_iter_{.updater.epoch}.npz'), trigger=(100,'epoch'))
trainer.extend(extensions.snapshot_object(
dis, 'dis_iter_{.updater.epoch}.npz'), trigger=(100,'epoch'))
trainer.extend(extensions.LogReport(trigger=display_interval))
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'gen/loss', 'dis/loss',
]), trigger=display_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.extend(
out_generated_image(
gen, dis,
int(args.gennum),int(args.gennum), args.seed, args.out),
trigger=snapshot_interval)
if args.resume:
#学習済みmodelの読み込み
chainer.serializers.load_npz(args.resume, trainer)
#学習の実行
trainer.run()
chainer.serializers.save_npz("genmodel{0}".format(args.datasets), trainer)
if __name__ == '__main__':
main()
| [
"ytkmkw@gmail.com"
] | ytkmkw@gmail.com |
fe77dea5d4d477345bc22432f871ea702b083913 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03106/s992142201.py | 9c7161f9a8277d4dc2ac8882018659d1ef28d852 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | a,b,k=map(int,input().split())
Min=min(a,b)
l=[]
for i in range(Min,0,-1):
if a%i==0 and b%i==0:
l.append(i)
#print(l)
print(l[k-1]) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e25a8b7a032e5d796003b9abe21a15a6d1a72cc6 | 3141eceaae7509301a125e10c5ba4ec4c24b6412 | /pydruid/db/sqlalchemy.py | 9ac51f822a4441fd3639443b990874982ba159e5 | [
"Apache-2.0"
] | permissive | isabella232/pydruid_DEPRECATED | a6959112709845632d51976661bb7358efc227c7 | 471bc21308f98a7f9d245e2ea76e54892bd0b805 | refs/heads/lyft | 2023-03-11T08:29:49.961290 | 2020-10-09T19:17:20 | 2020-10-09T19:17:20 | 315,906,418 | 0 | 0 | NOASSERTION | 2021-02-23T18:56:22 | 2020-11-25T10:36:33 | null | UTF-8 | Python | false | false | 7,412 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from sqlalchemy.engine import default
from sqlalchemy.sql import compiler
from sqlalchemy import types
import pydruid.db
from pydruid.db import exceptions
RESERVED_SCHEMAS = ['INFORMATION_SCHEMA']
type_map = {
'char': types.String,
'varchar': types.String,
'float': types.Float,
'decimal': types.Float,
'real': types.Float,
'double': types.Float,
'boolean': types.Boolean,
'tinyint': types.BigInteger,
'smallint': types.BigInteger,
'integer': types.BigInteger,
'bigint': types.BigInteger,
'timestamp': types.TIMESTAMP,
'date': types.DATE,
'other': types.BLOB,
}
class UniversalSet(object):
def __contains__(self, item):
return True
class DruidIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = UniversalSet()
class DruidCompiler(compiler.SQLCompiler):
pass
class DruidTypeCompiler(compiler.GenericTypeCompiler):
def visit_REAL(self, type_, **kwargs):
return "DOUBLE"
def visit_NUMERIC(self, type_, **kwargs):
return "LONG"
visit_DECIMAL = visit_NUMERIC
visit_INTEGER = visit_NUMERIC
visit_SMALLINT = visit_NUMERIC
visit_BIGINT = visit_NUMERIC
visit_BOOLEAN = visit_NUMERIC
visit_TIMESTAMP = visit_NUMERIC
visit_DATE = visit_NUMERIC
def visit_CHAR(self, type_, **kwargs):
return "STRING"
visit_NCHAR = visit_CHAR
visit_VARCHAR = visit_CHAR
visit_NVARCHAR = visit_CHAR
visit_TEXT = visit_CHAR
def visit_DATETIME(self, type_, **kwargs):
raise exceptions.NotSupportedError('Type DATETIME is not supported')
def visit_TIME(self, type_, **kwargs):
raise exceptions.NotSupportedError('Type TIME is not supported')
def visit_BINARY(self, type_, **kwargs):
raise exceptions.NotSupportedError('Type BINARY is not supported')
def visit_VARBINARY(self, type_, **kwargs):
raise exceptions.NotSupportedError('Type VARBINARY is not supported')
def visit_BLOB(self, type_, **kwargs):
raise exceptions.NotSupportedError('Type BLOB is not supported')
def visit_CLOB(self, type_, **kwargs):
raise exceptions.NotSupportedError('Type CBLOB is not supported')
def visit_NCLOB(self, type_, **kwargs):
raise exceptions.NotSupportedError('Type NCBLOB is not supported')
class DruidDialect(default.DefaultDialect):
name = 'druid'
scheme = 'http'
driver = 'rest'
user = None
password = None
preparer = DruidIdentifierPreparer
statement_compiler = DruidCompiler
type_compiler = DruidTypeCompiler
supports_alter = False
supports_pk_autoincrement = False
supports_default_values = False
supports_empty_insert = False
supports_unicode_statements = True
supports_unicode_binds = True
returns_unicode_strings = True
description_encoding = None
supports_native_boolean = True
def __init__(self, context=None, *args, **kwargs):
super(DruidDialect, self).__init__(*args, **kwargs)
self.context = context or {}
@classmethod
def dbapi(cls):
return pydruid.db
def create_connect_args(self, url):
kwargs = {
'host': url.host,
'port': url.port or 8082,
'user': url.username or None,
'password': url.password or None,
'path': url.database,
'scheme': self.scheme,
'context': self.context,
'header': url.query.get('header') == 'true',
}
return ([], kwargs)
def get_schema_names(self, connection, **kwargs):
# Each Druid datasource appears as a table in the "druid" schema. This
# is also the default schema, so Druid datasources can be referenced as
# either druid.dataSourceName or simply dataSourceName.
result = connection.execute(
'SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA')
return [
row.SCHEMA_NAME for row in result
if row.SCHEMA_NAME not in RESERVED_SCHEMAS
]
def has_table(self, connection, table_name, schema=None):
query = """
SELECT COUNT(*) > 0 AS exists_
FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_NAME = '{table_name}'
""".format(table_name=table_name)
result = connection.execute(query)
return result.fetchone().exists_
def get_table_names(self, connection, schema=None, **kwargs):
query = "SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES"
if schema:
query = "{query} WHERE TABLE_SCHEMA = '{schema}'".format(
query=query, schema=schema)
result = connection.execute(query)
return [row.TABLE_NAME for row in result]
def get_view_names(self, connection, schema=None, **kwargs):
return []
def get_table_options(self, connection, table_name, schema=None, **kwargs):
return {}
def get_columns(self, connection, table_name, schema=None, **kwargs):
query = """
SELECT COLUMN_NAME,
DATA_TYPE,
IS_NULLABLE,
COLUMN_DEFAULT
FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_NAME = '{table_name}'
""".format(table_name=table_name)
if schema:
query = "{query} AND TABLE_SCHEMA = '{schema}'".format(
query=query, schema=schema)
result = connection.execute(query)
return [
{
'name': row.COLUMN_NAME,
'type': type_map[row.DATA_TYPE.lower()],
'nullable': get_is_nullable(row.IS_NULLABLE),
'default': get_default(row.COLUMN_DEFAULT),
}
for row in result
]
def get_pk_constraint(self, connection, table_name, schema=None, **kwargs):
return {'constrained_columns': [], 'name': None}
def get_foreign_keys(self, connection, table_name, schema=None, **kwargs):
return []
def get_check_constraints(
self,
connection,
table_name,
schema=None,
**kwargs
):
return []
def get_table_comment(self, connection, table_name, schema=None, **kwargs):
return {'text': ''}
def get_indexes(self, connection, table_name, schema=None, **kwargs):
return []
def get_unique_constraints(
self,
connection,
table_name,
schema=None,
**kwargs
):
return []
def get_view_definition(
self,
connection,
view_name,
schema=None,
**kwargs
):
pass
def do_rollback(self, dbapi_connection):
pass
def _check_unicode_returns(self, connection, additional_tests=None):
return True
def _check_unicode_description(self, connection):
return True
DruidHTTPDialect = DruidDialect
class DruidHTTPSDialect(DruidDialect):
scheme = 'https'
def get_is_nullable(druid_is_nullable):
# this should be 'YES' or 'NO'; we default to no
return druid_is_nullable.lower() == 'yes'
def get_default(druid_column_default):
# currently unused, returns ''
return str(druid_column_default) if druid_column_default != '' else None
| [
"maximebeauchemin@gmail.com"
] | maximebeauchemin@gmail.com |
21641b139d3431097cf789c444bfd4c467e6ebe1 | 20d1bf9505929948f3fc50040e1471baadbe0351 | /keras_frcnn/muscima_annotation_generator.py | 708bb5917465720f3738e2e50da596878874f90b | [
"MIT"
] | permissive | JuanuMusic/MusicObjectDetector | 1d1c57508196fe66f8a3a4acb4ef3ea28f652510 | 0382c36fa4b4ac29d80980204b0710a5af3c0bad | refs/heads/master | 2021-09-02T07:27:06.277597 | 2017-12-31T12:54:44 | 2017-12-31T12:54:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,787 | py | import os
from itertools import groupby
from lxml import etree
from typing import List, Tuple
from lxml.etree import Element, SubElement
def create_annotations_in_plain_format(exported_annotations_file_path: str, objects_appearing_in_cropped_image: List[
Tuple[str, str, Tuple[int, int, int, int]]]):
with open(exported_annotations_file_path, "a") as annotations_file:
for object_appearing_in_cropped_image in objects_appearing_in_cropped_image:
file_name = object_appearing_in_cropped_image[0]
class_name = object_appearing_in_cropped_image[1]
translated_bounding_box = object_appearing_in_cropped_image[2]
trans_top, trans_left, trans_bottom, trans_right = translated_bounding_box
annotations_file.write("{0},{1},{2},{3},{4},{5}\n".format(file_name,
trans_left,
trans_top,
trans_right,
trans_bottom,
class_name))
def create_annotations_in_pascal_voc_format(annotations_folder: str,
file_name: str,
objects_appearing_in_cropped_image: List[
Tuple[str, str, Tuple[int, int, int, int]]],
image_width: int,
image_height: int,
image_depth: int):
os.makedirs(annotations_folder, exist_ok=True)
annotation = Element("annotation")
folder = SubElement(annotation, "folder")
folder.text = "muscima_pp_cropped_images"
filename = SubElement(annotation, "filename")
filename.text = file_name
source = SubElement(annotation, "source")
database = SubElement(source, "database")
database.text = "MUSCIMA++"
source_annotation = SubElement(source, "annotation")
source_annotation.text = "MUSCIMA++ (v0.9.1)"
image = SubElement(source, "image")
image.text = "CVC-MUSCIMA"
size = SubElement(annotation, "size")
width = SubElement(size, "width")
width.text = str(image_width)
height = SubElement(size, "height")
height.text = str(image_height)
depth = SubElement(size, "depth")
depth.text = str(image_depth)
# Write results to file
for detected_object in objects_appearing_in_cropped_image:
class_name = detected_object[1]
translated_bounding_box = detected_object[2]
ymin, xmin, ymax, xmax = translated_bounding_box
object = SubElement(annotation, "object")
name = SubElement(object, "name")
name.text = class_name
pose = SubElement(object, "pose")
pose.text = "Unspecified"
truncated = SubElement(object, "truncated")
truncated.text = "0"
difficult = SubElement(object, "difficult")
difficult.text = "0"
bndbox = SubElement(object, "bndbox")
bb_xmin = SubElement(bndbox, "xmin")
bb_xmin.text = str(xmin)
bb_ymin = SubElement(bndbox, "ymin")
bb_ymin.text = str(ymin)
bb_xmax = SubElement(bndbox, "xmax")
bb_xmax.text = str(xmax)
bb_ymax = SubElement(bndbox, "ymax")
bb_ymax.text = str(ymax)
xml_file_path = os.path.join(annotations_folder, os.path.splitext(file_name)[0] + ".xml")
pretty_xml_string = etree.tostring(annotation, pretty_print=True)
with open(xml_file_path, "wb") as xml_file:
xml_file.write(pretty_xml_string)
| [
"alexander.pacha@gmail.com"
] | alexander.pacha@gmail.com |
f22791718a15c54fba750f4dd80a0d6972ecc52f | 077f29021738c3b577c7c3d9ef5851d76e93cbed | /demo/assignments/days_between.py | bff4dffec1b0f26912c3685b986474e7419edc8c | [] | no_license | srikanthpragada/PYTHON_10_JULY_2020 | fb410d87260eb290ebcc5ac6a88b6d6b01ee15b5 | b7a586cbcd49934d36facb4dd748c54038838334 | refs/heads/master | 2022-12-05T09:05:33.192365 | 2020-08-26T14:27:09 | 2020-08-26T14:27:09 | 279,319,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | from datetime import datetime, date
file = open("dates.txt", "rt")
dates = []
for line in file:
parts = line.strip().split(",")
try:
if len(parts) == 2:
fd = datetime.strptime(parts[0], "%d-%m-%Y")
sd = datetime.strptime(parts[1], "%d-%m-%Y")
elif len(parts) == 1:
fd = datetime.strptime(parts[0], "%d-%m-%Y")
sd = datetime.now()
days = (sd - fd).days
dates.append((fd, sd, days)) # Add tuple with data to list
except:
# print("Invalid line :", line)
pass
for t in sorted(dates, key=lambda t: t[2]):
print(f"{t[0].strftime('%d-%m-%Y')} - {t[1].strftime('%d-%m-%Y')} - {t[2]:4}")
| [
"srikanthpragada@gmail.com"
] | srikanthpragada@gmail.com |
32369f6c433f82f5061430fab8e40f62b23a08e0 | 7deda84f7a280f5a0ee69b98c6a6e7a2225dab24 | /KBL/migrations/0009_auto_20200514_1542.py | 3b32eeb87339f7dfb830c0485a45742ab4d6c936 | [] | no_license | Cornex-Inc/Coffee | 476e30f29412373fb847b2d518331e6c6b9fdbbf | fcd86f20152e2b0905f223ff0e40b1881db634cf | refs/heads/master | 2023-01-13T01:56:52.755527 | 2020-06-08T02:59:18 | 2020-06-08T02:59:18 | 240,187,025 | 0 | 0 | null | 2023-01-05T23:58:52 | 2020-02-13T05:47:41 | Python | UTF-8 | Python | false | false | 444 | py | # Generated by Django 2.1.15 on 2020-05-14 15:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('KBL', '0008_customer_company_date_establishment'),
]
operations = [
migrations.AlterField(
model_name='customer_company',
name='date_establishment',
field=models.CharField(default='0000-00-00', max_length=10),
),
]
| [
"khm4321@naver.com"
] | khm4321@naver.com |
01de42efe5caf477e9a65304c3a50ec28a367993 | 9b8b2b867379d04bdd2568bfd211f456401ce702 | /LogisticandSVM.py | ceaef98d4cbe1b3622d090e09ed77873dda733fb | [] | no_license | bhatnagaranshika02/Machine-Learning-Practice | fb246ba5bac20aae18c58a7f58b529c63fd09bdd | 7790bb624d467c221749c7a16f4e2486668e6dbe | refs/heads/master | 2022-11-19T09:27:52.848116 | 2020-07-20T21:08:16 | 2020-07-20T21:08:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | from sklearn import datasets
digits=datasets.load_digits()
X_train,x_test,Y_train,y_test=train_test_split(digits.data,digits.target)
lr = LogisticRegression()
lr.fit(X_train, y_train)
print(lr.score(X_train, y_train))
print(lr.score(X_test, y_test))
# Apply SVM and print scores
svm = SVC()
svm.fit(X_train, y_train)
print(svm.score(X_train, y_train))
print(svm.score(X_test, y_test))
| [
"bhatnagaranshika02@gmail.com"
] | bhatnagaranshika02@gmail.com |
6c730c4a0976bf8d2ad2c79816467ea1a0eff151 | 090324db0c04d8c30ad6688547cfea47858bf3af | /soko/struct/glue.py | 821c9fd3e606fc467d0a40edc857959287d960d6 | [] | no_license | fidlej/sokobot | b82c4c36d73e224d0d0e1635021ca04485da589e | d3d04753a5043e6a22dafd132fa633d8bc66b9ea | refs/heads/master | 2021-01-21T13:14:29.523501 | 2011-06-12T07:34:14 | 2011-06-12T07:34:14 | 32,650,745 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py |
from soko.env.env import Env
from soko.struct.expanders.pushexpander import PushExpander
from soko.struct.estimators import sokoestimator
from soko.struct import modeling
class EnvGlue(Env):
def __init__(self, maze):
self.maze = maze
def configure(self, config):
#TODO: allow to use different classes based on the command line args
self.expander = PushExpander()
self.estimator = sokoestimator.SokoEnvSokoEstimator()
self.estimator.setup_goal(self.maze)
def init(self):
return modeling.extract_state(self.maze)
def get_actions(self, s):
return self.expander.get_actions(s)
def predict(self, s, a):
return modeling.predict(s, a)
def estim_cost(self, s, cost_limit=None):
return self.estimator.estim_cost(s)
def format(self, s):
from soko.mazing import Maze
return str(Maze(s))
| [
"ivo@danihelka.net"
] | ivo@danihelka.net |
e5abebf008390550a580703fa551688a1b0449eb | 6eec2948c0907b5377de51e61014a48dff3d5ce7 | /cairo2/arcos.py | 4c1d2659d7d0e1656107dd73408a3a525d35e77a | [] | no_license | clcneogeek325/Script_python | 4da937cb2caee93a2e0eb945e77ccac8e88ec4bc | 87607c97fa738b3e64aefbe0e8c4425724ecff73 | refs/heads/master | 2021-01-17T07:44:07.124077 | 2016-06-04T03:26:44 | 2016-06-04T03:26:44 | 15,943,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | #!/usr/bin/env python
import gtk, pygtk, cairo
pygtk.require('2.0')
class dibujar:
def __init__(self):
self.ventana = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.ventana.set_position(gtk.WIN_POS_CENTER)
self.ventana.set_size_request(400,300)
self.ventana.set_title("Dibujando un circulo")
self.areaDibujo = gtk.DrawingArea()
self.areaDibujo.connect("expose-event", self.dibujando)
self.ventana.add(self.areaDibujo)
self.ventana.connect("destroy", gtk.main_quit)
self.ventana.show_all()
def main(self):
gtk.main()
def dibujando(self, widget,areaDibujo ):
self.style = self.areaDibujo.get_style()
self.gc = self.style.fg_gc[gtk.STATE_NORMAL]
self.areaDibujo.window.draw_arc(self.gc, True , 100 ,50 ,200, 200, 0, 365*74)
if __name__ == "__main__":
objeto = dibujar()
objeto.main()
| [
"clcneogeek@gmail.com"
] | clcneogeek@gmail.com |
8e790772dc1b98f875809de6ee3c8a4febabb32f | a81d21f98dd558416f8731f001cb8151d8309f4f | /interviewbit/test/test_kmp.py | c6f61524a22f6d1a9b02a51773adcef0494da13f | [] | no_license | marquesarthur/programming_problems | 1128c38e65aade27e2435f7987d7ee2b328fda51 | 2f7df25d0d735f726b7012e4aa2417dee50526d9 | refs/heads/master | 2022-01-25T18:19:02.575634 | 2022-01-18T02:07:06 | 2022-01-18T02:07:06 | 32,213,919 | 2 | 0 | null | 2020-10-13T01:29:08 | 2015-03-14T13:44:06 | Python | UTF-8 | Python | false | false | 525 | py | import unittest
from interviewbit.stringm.kmp import KMP
class KMPTest(unittest.TestCase):
def test_base_case(self):
s = KMP()
pattern = "ABCDABD"
str = "ABC ABCDAB ABCDABCDABDE"
ret = [15]
result = s.search(pattern, str)
self.assertEqual(result, ret)
def test_two_matches(self):
s = KMP()
pattern = "AA"
str = "AABC AA AAD ASAA"
ret = [0, 5, 8, 14]
result = s.search(pattern, str)
self.assertEqual(result, ret) | [
"marques.art@gmail.com"
] | marques.art@gmail.com |
a388c7647550a18d118c61a5e81bd0186660777e | c152873f28e62dbbf7100f789364cf5a7e578a38 | /sncosmo/magsystems.py | c593a6f5d5ba4252b6c47ecf478a3b8eef6aea91 | [
"BSD-3-Clause"
] | permissive | barentsen/sncosmo | d537b906e3f61098a731ff5d2fefcc251935a30f | 7276566ef3e2b7fd21beba25cc72dbcedf55e161 | refs/heads/master | 2020-12-24T12:01:00.052524 | 2016-11-06T03:28:08 | 2016-11-06T03:51:53 | 73,101,477 | 0 | 0 | null | 2016-11-07T17:03:28 | 2016-11-07T17:03:28 | null | UTF-8 | Python | false | false | 4,853 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import abc
import math
import numpy as np
import astropy.units as u
import astropy.constants as const
from ._registry import Registry
from .bandpasses import get_bandpass
__all__ = ['get_magsystem', 'MagSystem', 'SpectralMagSystem',
'ABMagSystem', 'CompositeMagSystem']
_MAGSYSTEMS = Registry()
def get_magsystem(name):
"""Get a MagSystem from the registry by name."""
if isinstance(name, MagSystem):
return name
return _MAGSYSTEMS.retrieve(name)
class MagSystem(object):
"""An abstract base class for magnitude systems."""
__metaclass__ = abc.ABCMeta
def __init__(self, name=None):
self._zpbandflux = {}
self._name = name
@abc.abstractmethod
def _refspectrum_bandflux(self, band):
"""Flux of the fundamental spectrophotometric standard."""
pass
@property
def name(self):
"""Name of magnitude system."""
return self._name
@name.setter
def name(self, value):
self._name = value
def zpbandflux(self, band):
"""Flux of an object with magnitude zero in the given bandpass.
Parameters
----------
bandpass : `~sncosmo.spectral.Bandpass` or str
Returns
-------
bandflux : float
Flux in photons / s / cm^2.
"""
band = get_bandpass(band)
try:
return self._zpbandflux[band]
except KeyError:
bandflux = self._refspectrum_bandflux(band)
self._zpbandflux[band] = bandflux
return bandflux
def band_flux_to_mag(self, flux, band):
"""Convert flux (photons / s / cm^2) to magnitude."""
return -2.5 * math.log10(flux / self.zpbandflux(band))
def band_mag_to_flux(self, mag, band):
"""Convert magnitude to flux in photons / s / cm^2"""
return self.zpbandflux(band) * 10.**(-0.4 * mag)
class CompositeMagSystem(MagSystem):
"""A magnitude system defined in a specific set of bands.
In each band, there is a fundamental standard with a known
(generally non-zero) magnitude.
Parameters
----------
bands: iterable of `~sncosmo.Bandpass` or str
The filters in the magnitude system.
standards: iterable of `~sncosmo.MagSystem` or str,
The spectrophotmetric flux standards for each band, in the
same order as `bands`.
offsets: list_like
The magnitude of standard in the given band.
"""
def __init__(self, bands, standards, offsets, name=None):
super(CompositeMagSystem, self).__init__(name=name)
if not len(bands) == len(offsets) == len(standards):
raise ValueError('Lengths of bands, standards, and offsets '
'must match.')
self._bands = [get_bandpass(band) for band in bands]
self._standards = [get_magsystem(s) for s in standards]
self._offsets = offsets
@property
def bands(self):
return self._bands
@property
def standards(self):
return self._standards
@property
def offsets(self):
return self._offsets
def _refspectrum_bandflux(self, band):
if band not in self._bands:
raise ValueError('band not in local magnitude system')
i = self._bands.index(band)
standard = self._standards[i]
offset = self._offsets[i]
return 10.**(0.4 * offset) * standard.zpbandflux(band)
def __str__(self):
s = "CompositeMagSystem {!r}:\n".format(self.name)
for i in range(len(self._bands)):
s += " {!r}: system={!r} offset={}\n".format(
self._bands[i].name,
self._standards[i].name,
self._offsets[i])
return s
class SpectralMagSystem(MagSystem):
"""A magnitude system defined by a fundamental spectrophotometric
standard.
Parameters
----------
refspectrum : `sncosmo.Spectrum`
The spectrum of the fundamental spectrophotometric standard.
"""
def __init__(self, refspectrum, name=None):
super(SpectralMagSystem, self).__init__(name)
self._refspectrum = refspectrum
def _refspectrum_bandflux(self, band):
return self._refspectrum.bandflux(band)
class ABMagSystem(MagSystem):
"""Magnitude system where a source with F_nu = 3631 Jansky at all
frequencies has magnitude 0 in all bands."""
def _refspectrum_bandflux(self, band):
bwave, btrans = band.to_unit(u.Hz)
# AB spectrum is 3631 x 10^{-23} erg/s/cm^2/Hz
# Get spectral values in photons/cm^2/s/Hz at bandpass wavelengths
# by dividing by (h \nu).
f = 3631.e-23 / const.h.cgs.value / bwave
binw = np.gradient(bwave)
return np.sum(f * btrans * binw)
| [
"kylebarbary@gmail.com"
] | kylebarbary@gmail.com |
386035a852c2dc374f26af267d7ccd3a54c081a1 | 0e338d96c395950090c9252a73fc6dd2169decf0 | /p4ss/sniff_veth0.py | c5e92907b4b9af4fe982b28d76e722391d117e7b | [] | no_license | hotephen/p4-dev | 4281fbecbef406e5dfe48774d5f555235b4dfc1d | 8476734562511cde6663a4910b71dfaeed7bf34b | refs/heads/master | 2023-01-12T04:40:41.786827 | 2022-12-30T05:30:23 | 2022-12-30T05:30:23 | 130,222,370 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,244 | py | #!/usr/bin/env python
import sys
import struct
from scapy.all import sniff, sendp, hexdump, get_if_list, get_if_hwaddr, bind_layers
from scapy.all import Packet, IPOption
from scapy.all import *
from scapy.layers.inet import _IPOption_HDR
from scapy.all import IP, TCP, UDP, Raw, Ether, Padding
from time import sleep
import argparse
parser = argparse.ArgumentParser(description='send entry packet')
parser.add_argument('--i', required=False, type=str, default='veth0', help='i')
parser.add_argument('--save', required=False, type=bool, default=True, help='save')
parser.add_argument('--show', required=False, type=bool, default=False, help='save')
args = parser.parse_args()
def handle_pkt(pkt):
if(IP in pkt and (UDP in pkt or TCP in pkt)):
if (UDP in pkt):
print(str(pkt[IP].src) + " / " + str(pkt[IP].dst) + " / " + str(pkt[UDP].sport) + " / " + str(pkt[UDP].dport))
else:
print(str(pkt[IP].src) + " / " + str(pkt[IP].dst) + " / " + str(pkt[TCP].sport) + " / " + str(pkt[TCP].dport))
def main():
iface = args.i
print ("sniffing on %s" % iface)
sys.stdout.flush()
sniff(iface = iface,
prn = lambda x: handle_pkt(x))
if __name__ == '__main__':
main()
| [
"you@example.com"
] | you@example.com |
74fd5bc6912ca3e02e94ad89eefdbae7bae13d48 | 91bd58191c9a25bc92f5372d5344b808b4f5ce5e | /tf_experiments/plot_keras.py | 93125da3cda7913711ae56da77efd762450c7df8 | [] | no_license | CBMM/Generalization-Puzzles-in-Deep-Networks-1 | c25c29b05b0439cd4cf0b53ce902b1ce5d775b6c | f7fa35fb68a9badab6843689f30b67e59fd379a3 | refs/heads/master | 2023-03-13T01:39:53.479689 | 2021-03-05T00:33:32 | 2021-03-05T00:33:32 | 449,828,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,413 | py | import matplotlib.pyplot as plt
import pickle
import os
import numpy as np
from pdb import set_trace as st
def plot_keras(history):
'''
:param history: the dictionary history saved by keras.
:return:
'''
nb_epochs = len(history['acc'],)
# Plots for training and testing process: loss and accuracy
plt.figure(0)
plt.plot(history['acc'], 'r')
plt.plot(history['val_acc'], 'g')
plt.xticks(np.arange(0, nb_epochs + 1, 2.0))
plt.rcParams['figure.figsize'] = (8, 6)
plt.xlabel("Num of Epochs")
plt.ylabel("Accuracy")
plt.title("Training Accuracy vs Validation Accuracy")
plt.legend(['train', 'validation'])
plt.figure(1)
plt.plot(history['loss'], 'r')
plt.plot(history['val_loss'], 'g')
plt.xticks(np.arange(0, nb_epochs + 1, 2.0))
plt.rcParams['figure.figsize'] = (8, 6)
plt.xlabel("Num of Epochs")
plt.ylabel("Loss")
plt.title("Training Loss vs Validation Loss")
plt.legend(['train', 'validation'])
plt.show()
def main():
path = '../pytorch_experiments/test_runs_flatness/keras_expt'
filename = 'chance_plateau_debug_0'
''' load history '''
path_2_file = os.path.join(path,filename)
with open(path_2_file, 'rb') as keras_hist_file:
hist_dict = pickle.load(keras_hist_file)
''' plot history '''
plot_keras(hist_dict)
if __name__ == '__main__':
main()
print('Done')
| [
"brando90@mit.edu"
] | brando90@mit.edu |
990899041c7f51cc4a3c9a3c649736772d803b5f | 0d949e3373deb48b715080fce4ea397d656cd701 | /garfield/deterrence/tests/test_admin.py | 353d770887788d87516d26d2b46256e75be499f0 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | RobSpectre/garfield | f291d6022024a6238839db4a2dee47be1e1501c5 | ab806b7ad9221bd1b17c92daadd0a53a4f261cbe | refs/heads/master | 2022-07-27T05:40:07.533290 | 2021-06-10T17:38:46 | 2021-06-10T17:38:46 | 106,447,706 | 3 | 1 | MIT | 2022-04-22T20:51:54 | 2017-10-10T17:14:21 | Python | UTF-8 | Python | false | false | 723 | py | from django.test import RequestFactory
from django.test import TestCase
from deterrence.admin import DeterrenceMessageInline
class DeterrenceInlineTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.request = self.factory.get("/stuff")
def test_deterrence_message_inline_get_extra(self):
test = DeterrenceMessageInline.get_extra(None, self.request)
self.assertEqual(test, 1)
def test_deterrence_message_inline_get_extra_obj_exists(self):
test = DeterrenceMessageInline.get_extra(None,
self.request,
obj=True)
self.assertEqual(test, 0)
| [
"rob@brooklynhacker.com"
] | rob@brooklynhacker.com |
ad9deeab0d64712cb21fa7a68f0bae20d525d236 | 7ab85ba79a6553659f0b324ecebb4bb39f8a8a1c | /class_method2.py | 03efa5297b7caf919e9d4354c4ec872da162c672 | [] | no_license | subinmun1997/my_python | b75db77a035fa8f531d9872bf33a1818a002206a | 634acc948e7758f5d26084536c506e7da45cd53c | refs/heads/master | 2022-12-28T21:11:40.173378 | 2020-10-16T08:02:18 | 2020-10-16T08:02:18 | 292,875,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | class Simple:
count = 0
def __init__(self):
Simple.count += 1
@classmethod
def get_count(cls):
return cls.count # cls에 전달되는 것은 Simple 클래스
def main():
print(Simple.get_count())
s = Simple()
print(Simple.get_count())
main() | [
"qzxy812@gmail.com"
] | qzxy812@gmail.com |
67ae26888983b46d5e6b9c5b7eaff0f630ef0073 | 1ca4f7d65ecc85f3607f4e56216875b094219e0d | /144.py | c8e7a5409b60bc2ce3e2667cb4a399e17940ef33 | [] | no_license | ZhangRui111/MyLeetCode | 4c1ac6b309b6c497956a46b1054201c06813c563 | 168fb5a720847721aad426a48a09999b59285794 | refs/heads/master | 2023-06-08T03:09:26.594980 | 2021-07-05T00:30:13 | 2021-07-05T00:30:13 | 381,576,497 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,125 | py | """
tag: 栈;树
144. 二叉树的前序遍历
https://leetcode-cn.com/problems/binary-tree-preorder-traversal/
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution1:
""" Recursion """
def preorderTraversal(self, root: TreeNode) -> List[int]:
def preorder(root: TreeNode): # 嵌套函数
if root is None:
return
res.append(root.val)
preorder(root.left)
preorder(root.right)
res = list()
preorder(root)
return res
class Solution2:
""" Iteration """
def preorderTraversal(self, root: TreeNode) -> List[int]:
res = list()
if root is None:
return res
stack = []
node = root
while stack or node:
while node:
res.append(node.val)
stack.append(node)
node = node.left
node = stack.pop()
node = node.right
return res
| [
"zhangruisg111@163.com"
] | zhangruisg111@163.com |
6515831fad7e7e60012980b3868f8fcee9c1ee01 | 3e50ed55208122b2f8b34e7f26f33c9ef70efce5 | /python/pygame_home/full_screen.py | 15f4ea3d3d06f15105b62e1d69837b5d38a8c3fe | [] | no_license | brady-wang/mac_home | b8343da428a4e6696b89d0e6a53ff0dfc87ffd21 | c56a739c31d3c0f62d26d8512fe1a90c036a1f96 | refs/heads/master | 2023-01-14T11:42:02.544322 | 2019-10-02T11:47:27 | 2019-10-02T11:47:27 | 193,177,718 | 0 | 0 | null | 2023-01-04T13:55:31 | 2019-06-22T01:27:10 | PHP | UTF-8 | Python | false | false | 754 | py | # *_*coding:utf-8 *_*
import pygame
from pygame.locals import *
from sys import exit
background_image = 'images/bk.jpg'
pygame.init()
screen = pygame.display.set_mode((640, 480), 0, 32)
background = pygame.image.load(background_image).convert()
Fullscreen = False
while True:
for event in pygame.event.get():
if event.type == QUIT:
exit()
if event.type == KEYDOWN:
if event.key == K_f:
Fullscreen = not Fullscreen
if Fullscreen:
screen = pygame.display.set_mode((640, 480), FULLSCREEN, 32)
else:
screen = pygame.display.set_mode((640, 480), 0, 32)
screen.blit(background, (0, 0))
pygame.display.update()
| [
"brady.wang@qq.com"
] | brady.wang@qq.com |
b58ad3496928f3809ec4c67f94811bbc30c644b9 | 3f53e38076713ab49fd03a54c7c9d3e21de5eb14 | /Pyrado/scripts/training/bob-d_dql.py | 421de7b102c0d48ccc9e952111e4b2879f71b26f | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | arlene-kuehn/SimuRLacra | 4510473789d1c8927c8d5969a9606238523d5dd7 | 15901f70f0538bce19acdda2a0018984f67cc0fe | refs/heads/master | 2023-01-28T13:10:05.607575 | 2020-12-04T14:47:01 | 2020-12-04T14:47:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,718 | py | # Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Train an agent to solve the discrete Ball-on-Beam environment using Deep Q-Leaning.
.. note::
The hyper-parameters are not tuned at all!
"""
import torch as to
import pyrado
from pyrado.algorithms.step_based.dql import DQL
from pyrado.environments.pysim.ball_on_beam import BallOnBeamDiscSim
from pyrado.logger.experiment import setup_experiment, save_list_of_dicts_to_yaml
from pyrado.policies.feed_forward.fnn import DiscreteActQValPolicy, FNN
from pyrado.utils.argparser import get_argparser
if __name__ == '__main__':
# Parse command line arguments
args = get_argparser().parse_args()
# Experiment
ex_dir = setup_experiment(BallOnBeamDiscSim.name, f'{DQL.name}_{DiscreteActQValPolicy.name}')
# Set seed if desired
pyrado.set_seed(args.seed, verbose=True)
# Environment
env_hparams = dict(dt=1/100., max_steps=500)
env = BallOnBeamDiscSim(**env_hparams)
# Policy
policy_hparam = dict(
hidden_sizes=[32, 32],
hidden_nonlin=to.tanh
)
net = FNN(
input_size=DiscreteActQValPolicy.get_qfcn_input_size(env.spec),
output_size=DiscreteActQValPolicy.get_qfcn_output_size(),
**policy_hparam
)
policy = DiscreteActQValPolicy(spec=env.spec, net=net)
# Algorithm
algo_hparam = dict(
max_iter=5000,
memory_size=10*env.max_steps,
eps_init=0.1286,
eps_schedule_gamma=0.9955,
gamma=0.998,
target_update_intvl=5,
num_batch_updates=20,
max_grad_norm=0.5,
min_steps=10,
batch_size=256,
num_workers=4,
lr=7e-4,
)
algo = DQL(ex_dir, env, policy, **algo_hparam)
# Save the hyper-parameters
save_list_of_dicts_to_yaml([
dict(env=env_hparams, seed=args.seed),
dict(policy=policy_hparam),
dict(algo=algo_hparam, algo_name=algo.name)],
ex_dir
)
# Jeeeha
algo.train(snapshot_mode='best', seed=args.seed)
| [
"fabio.muratore@famura.net"
] | fabio.muratore@famura.net |
f1ff9eb781befbc411938d6dca7c4e91b57ac891 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=3.0_rd=0.65_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=94/params.py | 50a976b4643a17024e6aad70bfb0d7da1bcc731c | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.056548',
'max_util': '3.0',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.65',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'GSN-EDF',
'trial': 94,
'utils': 'uni-medium-3'}
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
c3ec0afea7ee4fa6e33481c3719572532fe791c2 | c7f43c4cc0ee84a5fe246b67f51e30b8d726ebd5 | /keras/keras46_MC_1_fashion.py | 1a2eaf22ee6fd437d4d6962fe544b82cd65f86a1 | [] | no_license | 89Mansions/AI_STUDY | d9f8bdf206f14ba41845a082e731ea844d3d9007 | d87c93355c949c462f96e85e8d0e186b0ce49c76 | refs/heads/master | 2023-07-21T19:11:23.539693 | 2021-08-30T08:18:59 | 2021-08-30T08:18:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,978 | py | # CNN
# fashion_mnist
# from tensorflow.keras.callbacks import ModelCheckpoint
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import fashion_mnist
#1. DATA
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
print(x_train.shape, y_train.shape) # (60000, 28, 28)--> 흑백 1 생략 가능 (60000,)
print(x_test.shape, y_test.shape) # (10000, 28, 28) (10000,)
# print(x_train[0])
# print("y_train[0] : " , y_train[0]) # 9
# print(x_train[0].shape) # (28, 28)
# plt.imshow(x_train[0], 'gray') # 0 : black, ~255 : white (가로 세로 색깔)
# # plt.imshow(x_train[0]) # 색깔 지정 안해도 나오긴 함
# plt.show()
# x > preprocessing
# print(np.min(x_train),np.max(x_train)) # 0 ~ 255
x_train = x_train.reshape(x_train.shape[0],x_train.shape[1],x_train.shape[2],1)/255.
x_test = x_test.reshape(x_test.shape[0],x_test.shape[1],x_test.shape[2],1)/255.
print(x_train.shape) # (60000, 28, 28, 1)
print(x_test.shape) # (10000, 28, 28, 1)
print(np.min(x_train),np.max(x_train)) # 0.0 ~ 1.0
# y > preprocessing
# print(y_train[:20]) # 0 ~ 9
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print(y_train.shape) # (60000, 10)
print(y_test.shape) # (10000, 10)
#2. Modeling
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, MaxPool2D, Dropout
model = Sequential()
model.add(Conv2D(filters=112, kernel_size=(2,2),padding='same',strides=1,input_shape=(x_train.shape[1],x_train.shape[2],x_train.shape[3])))
model.add(MaxPool2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(filters=84,kernel_size=(2,2)))
model.add(MaxPool2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(filters=28,kernel_size=(2,2)))
model.add(Conv2D(filters=28,kernel_size=(2,2)))
model.add(Flatten())
model.add(Dense(20, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(10,activation='softmax'))
# model.summary()
#3. Compile, Train
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
# 체크포인트의 가중치를 저장할 파일경로 지정
modelpath='../data/modelcheckpoint/k46_1_fashion_{epoch:02d}-{val_loss:.4f}.hdf5'
# 02d : 정수 두 자리만 적겠다. / .4f : 소수점 아래 4째자리까지 적겠다.
# 저장 예시) k45_mnist_37-0.0100.hdf5
# 저장된 파일 중에 가장 마지막에 생성된게 가장 좋은 것이 됨
es = EarlyStopping(monitor='val_loss', patience=5, mode='max')
cp = ModelCheckpoint(filepath=modelpath,monitor='val_loss', save_best_only=True, mode='auto')
# filepath : 최저점이 찍힐 때마다 가중치가 세이브된 파일이 생성된다.
# 궁극의 목적 : 최적의 weight를 구하기 위해서
# predict할 때 혹은 evaluate 할 때 이 weight를 넣기만 하면된다.
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
hist = model.fit(x_train, y_train, epochs=15, batch_size=32, validation_split=0.2, callbacks=[es, cp])
#4. Evaluate, predict
loss, acc = model.evaluate(x_test, y_test, batch_size=32)
print("loss : ", loss)
print("acc : ", acc)
print("y_test : ", np.argmax(y_test[-5:-1],axis=1))
y_pred = model.predict(x_test[-5:-1])
print("y_pred : ", np.argmax(y_pred,axis=1))
# 시각화
# import matplotlib.pyplot as plt
# plt.figure(figsize=(10,6)) # 판 사이즈 (가로 10, 세로 6)
# plt.subplot(2, 1, 1) # plot : 도화지 하나에 그림을 그리겠다.
# # 2행 1열 중 첫 번째
# # 만약 (3, 1, 1) 이라면 세 개의 plot이 있어야 한다. (3, 1, 1) (3, 1, 2) (3, 1, 3)
# plt.plot(hist.history['loss'], marker='.', c='red', label='loss')
# plt.plot(hist.history['val_loss'], marker='.', c='blue', label='val_loss')
# plt.grid()
# # plt.title('손실비용') # 과제 : 한글 깨짐 오류 해결할 것
# plt.title('Cost Loss')
# plt.ylabel('loss')
# plt.xlabel('epoch')
# plt.legend(loc='upper right')
# plt.subplot(2, 1, 2) # 2행 1열 중 두 번째
# plt.plot(hist.history['accuracy'], marker='.', c='red', label='accuracy')
# plt.plot(hist.history['val_accuracy'], marker='.', c='blue', label='val_accuracy')
# plt.grid() # 모눈종이 격자위에 그리겠다.
# # plt.title('정확도') # 과제 : 한글 깨짐 오류 해결할 것
# plt.title('Accuracy')
# plt.ylabel('accuracy')
# plt.xlabel('epoch')
# plt.legend(loc='upper right')
# plt.show()
# CNN
# loss : 0.3053586483001709
# acc : 0.8960000276565552
# y_test : [9 1 8 1]
# y_pred : [9 1 8 1]
# ModelCheckPoint
# loss : 0.32927361130714417
# acc : 0.8809999823570251
# y_test : [9 1 8 1]
# y_pred : [9 1 8 1] | [
"hwangkei0212@gmail.com"
] | hwangkei0212@gmail.com |
66cba8cea9deee015eb75c5dcb0821fc6757469a | 1ef536d93c6616f9793e57a9ebc6b44248d50202 | /move_product_out_to_in/customer_code/models/res_partner.py | 7b6b7f6d481a13d3ea17555ec3913e2e3a386849 | [] | no_license | mohamed4185/Express | 157f21f8eba2b76042f4dbe09e4071e4411342ac | 604aa39a68bfb41165549d605d40a27b9251d742 | refs/heads/master | 2022-04-12T17:04:05.407820 | 2020-03-09T14:02:17 | 2020-03-09T14:02:17 | 246,014,712 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | # -*- coding: utf-8 -*-
from odoo import api, fields ,models
from odoo.exceptions import ValidationError
import logging
_logger = logging.getLogger(__name__)
class ResPartner(models.Model):
_inherit="res.partner"
customer_code=fields.Char('Code',size=10)
| [
"mohamed.abdelrahman@businessborderlines.com"
] | mohamed.abdelrahman@businessborderlines.com |
7178d7fb0f535f9f31617612709df1b85386f6ef | 8c92787a518bea3d528641311939137f7f37b56c | /grab/spider/error.py | 056eaeef85e057b70d0019b7e1da520f8fcb7d46 | [
"MIT"
] | permissive | brabadu/grab | 8d973d5052bc60d06d67e1ea82814a939dea6877 | 92b1d68ceeece3087e053064520261a7aef3bd02 | refs/heads/master | 2021-01-17T22:16:16.923189 | 2013-10-02T20:28:44 | 2013-10-02T20:28:44 | 13,282,380 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 971 | py | __all__ = ('SpiderError', 'SpiderMisuseError', 'FatalError',
'StopTaskProcessing', 'SpiderInternalError',
'NoTaskHandler', 'NoDataHandler')
class SpiderError(Exception):
"Base class for Spider exceptions"
class SpiderMisuseError(SpiderError):
"Improper usage of Spider framework"
class FatalError(SpiderError):
"Fatal error which should stop parsing process"
class StopTaskProcessing(SpiderError):
"""
Used in middlewares to stop task processing
"""
class SpiderInternalError(SpiderError):
"""
Used to indicate error in some internal spider services
like spider class discovering, CLI error
"""
class NoTaskHandler(SpiderError):
"""
Used then it is not possible to find which
handler should be used to process network response.
"""
class NoDataHandler(SpiderError):
"""
Used then it is not possible to find which
handler should be used to process Data object.
"""
| [
"lorien@lorien.name"
] | lorien@lorien.name |
9668b5f349cdc67a5166ee2ddb3178f990d70225 | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/Keyboard-Layout-Editor/__init__.py | 3eaf1adc8f98d6f2aa0df9bec87cf4e5246ec300 | [] | no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,510 | py | # addon details
bl_info = {
"name": "Import: KLE Raw JSON format (.json)",
"author": "/u/kdem007 /u/jacopods",
"version": (2, 4),
"blender": (2, 79, 0),
"location": "File > Import-Export > Keyboard Layout Editor Raw (.json) ",
"description": "Import Keyboard Layouts",
"warning": "",
"category": "Learnbgame",
}
import bpy
# main addon class
class JSONImporter(bpy.types.Operator):
"""Load Keyboard Layout data"""
bl_idname = "import_mesh.json"
bl_label = "Import KLE Raw JSON"
bl_options = {'UNDO'}
filepath = bpy.props.StringProperty(
subtype='FILE_PATH',
)
filter_glob = bpy.props.StringProperty(
default="*.json", options={'HIDDEN'})
def execute(self, context):
from . import import_keyboard
import_keyboard.read(self.filepath)
return {'FINISHED'}
def invoke(self, context, event):
wm = context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
# add to import menu
def menu_import(self, context):
self.layout.operator(JSONImporter.bl_idname, text="KLE Raw Data (.json)")
# register addon
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_import.append(menu_import)
# unregister addon
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_import.remove(menu_import)
if __name__ == "__main__":
register()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
9a4bc2cd59a6a8bf06f6eca10852f04a5b9a2621 | dde8b97eee29cd6af17082cf84773d50bea7ca42 | /WHAnalysis/Configuration/python/skimming_ett_cff.py | 9894ce4a59888484a2e68d200d588496a4f1e0a8 | [] | no_license | calabria/WHAnalysis | 557cee96fe1dfe221a3a76f99b92f59c0800a8eb | 6cdcc0b73d94261f5ff7822b8bf5e48bc08268ae | refs/heads/master | 2021-01-23T13:36:11.593683 | 2014-04-12T10:39:44 | 2014-04-12T10:39:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | import FWCore.ParameterSet.Config as cms
from electronHistos_ett_cff import *
skimmedElectrons = cms.EDFilter("PATElectronSelector",
src = cms.InputTag("electronVariables"),
cut = cms.string('pt > 24. && abs(eta) < 2.1 && ((isEB && userFloat("PFRelIsoDB04") < 0.15) || (isEE && userFloat("PFRelIsoDB04") < 0.1))'),
filter = cms.bool(True)
)
skimmedTaus = cms.EDFilter("PATTauSelector",
src = cms.InputTag("tauVariables"),
cut = cms.string('pt > 30.0 && abs(eta) < 2.3 && tauID("decayModeFinding") > 0.5 && tauID("byLooseCombinedIsolationDeltaBetaCorr") > 0.5 && tauID("againstMuonLoose") > 0.5 && tauID("againstElectronLoose") > 0.5'),
filter = cms.bool(True)
)
skimmingSequence = cms.Sequence(
skimmedElectrons *
skimmedTaus
)
| [
"cesare.calabria23@gmail.com"
] | cesare.calabria23@gmail.com |
6bc04cbe9bef936e8b03f30257dd980642088635 | e70b678712a355a0b51632728c7781b0bdcf29f4 | /Algorithms/Python/Next-Permutation.py | 25484560a873d416844a871ab406890796b55883 | [] | no_license | keyi/Leetcode_Solutions | b3e3c6835ed335d7d4ad53a1b37e59ac15fcf3af | 69e4e969b435ff2796bd7c4b5dad9284a853ab54 | refs/heads/master | 2020-05-21T23:36:20.450053 | 2018-11-11T03:45:28 | 2018-11-11T03:45:28 | 33,714,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | class Solution(object):
def nextPermutation(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
left, right = -1, -1
for i in range(len(nums) - 1):
if nums[i] < nums[i + 1]:
left = i
if left == -1:
nums.reverse()
return
for i in range(left + 1, len(nums)):
if nums[i] > nums[left]:
right = i
nums[left], nums[right] = nums[right], nums[left]
nums[left + 1:] = nums[:left: -1]
| [
"yike921012@gmail.com"
] | yike921012@gmail.com |
2c46f60d2fd89cab8c491fad1b4dd269924164bf | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_118/977.py | 29a7741a218216554f1a711e65d07a2ace5e8d80 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | import math
def isPal(n):
if n%1 != 0: return False
sn = str(long(n))
for i in xrange(0, len(sn)):
if (sn[i] != sn[-i-1]): return False
return True
if __name__ == "__main__":
T = int(raw_input())
for c in xrange(1,T+1):
[A, B] = map(lambda x: long(x), raw_input().split())
cnt = 0
for i in xrange(A,B+1L):
if (isPal(i) and isPal(math.sqrt(i))): cnt+=1
print 'Case #%d: %d' % (c, cnt)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
2da7102a1a8c84e25261aa94efbfe23b159ec9aa | f769e0a8e80f604502a63ae0073b8e95c771bad8 | /blog_app/urls.py | 75de32e6b39b93bb047cf5261111293302901881 | [] | no_license | Justprince234/blog | 6c318f005346086b2df28741e689a032646d1c9f | 25004f3731045384dd423f8e6375a5e609fd5548 | refs/heads/master | 2023-01-23T14:07:36.119180 | 2020-08-06T00:57:07 | 2020-08-06T00:57:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | from django.urls import path
from blog_app import views
app_name = 'blog_app'
urlpatterns = [
path('category/<slug:slug>', views.blog_category, name='blog_category'),
path('search/', views.search, name='search'),
path('<slug:slug>/', views.post_detail, name='post_detail'),
path('', views.post_list, name='home'),
] | [
"princewilljackson@ymail.com"
] | princewilljackson@ymail.com |
79ace01feaaf47a4d80a5259ed902b36c4c7207c | 9028b6983685a3ace074049fccf2b8c503b77de8 | /PyStationB/libraries/GlobalPenalisation/gp/base/chain_rule.py | c6d02e92c609b00f5467ad8833e4b3110a301889 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | mebristo/station-b-libraries | 7f5517e5e77e6cdc54c03355804b8c0a4fcae65b | 40bab526af6562653c42dbb32b174524c44ce2ba | refs/heads/main | 2023-09-03T03:54:53.181082 | 2021-10-01T03:21:11 | 2021-10-01T03:21:11 | 412,871,835 | 0 | 0 | MIT | 2021-10-02T17:53:07 | 2021-10-02T17:53:06 | null | UTF-8 | Python | false | false | 3,144 | py | # -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
import numpy as np
def chain_rule_means_vars(d_acq_dy: np.ndarray, dy_dx: np.ndarray) -> np.ndarray:
"""Implements the chain rule with respect to the means/standard deviation vectors for candidate points.
Args:
d_acq_dy: gradient of acquisition with respect to one-dimensional variable (e.g. mean, var or std).
Shape (n_candidates, 1)
dy_dx: gradient of the variable with respect to the inputs. Shape (n_candidates, n_inputs)
Returns:
d_acq_dx, shape (n_candidates, n_inputs). Note that it's not the whole expression, if ``acq`` depends on other
variable than ``y`` as well
"""
return d_acq_dy * dy_dx
def chain_rule_cross_covariance(d_acq_d_cov: np.ndarray, d_cov_dx: np.ndarray) -> np.ndarray:
"""Implements the chain rule with respect to the cross-covariance matrix between candidate points and selected points.
Args:
d_acq_d_cov: gradient of acquisition with respect to covariance matrix between candidates and selected points.
Shape (n_candidates, n_selected)
d_cov_dx: gradient of covariance matrix between candidates and selected points with respect to the inputs.
Shape (n_candidates, n_selected, n_inputs)
Returns:
d_acq_dx, shape (n_candidates, n_selected).
Note that it's not the whole expression, if ``acq`` depends on other variable than ``cov`` as well
"""
return np.einsum("ij,ijk -> ik", d_acq_d_cov, d_cov_dx)
def chain_rule_means_from_predict_joint(d_acq_d_means: np.ndarray, d_means_dx: np.ndarray) -> np.ndarray:
"""
Args:
d_acq_d_means: gradient of acquisition with respect to the means vector. Shape (n_points, 1).
d_means_dx: gradient of the means vector with respect to the inputs vector.
Shape (n_points, n_points, input_dim)
Returns:
part of d_acq_dx, which can be calculated from the chain rule with respect to the means vector.
Shape (n_points, input_dim)
"""
d_acq_d_means = d_acq_d_means.ravel()
return np.einsum("i,ikl", d_acq_d_means, d_means_dx)
def chain_rule_covariance(d_acq_d_covariance: np.ndarray, d_covariance_dx: np.ndarray) -> np.ndarray:
"""Chain rule for the gradient with respect to covariance.
Args:
d_acq_d_covariance: gradients of the acquisition function with respect to the covariance.
Shape (n_points, n_points)
d_covariance_dx: gradients of the covariance matrix entries with respect to the inputs.
Shape (n_points, n_points, n_points, input_dim)
Returns:
part of d_acq_dx, which can be calculated from the chain rule with respect to covariance.
Shape (n_points, input_dim)
"""
return np.einsum("ij,ijkl", d_acq_d_covariance, d_covariance_dx)
| [
"noreply@github.com"
] | mebristo.noreply@github.com |
78d810e3b5a2e9540be39cafd4404716991161ff | 119f87ff16278614dce6571a451c54b839a4bead | /catalyst/utils/meters/ppv_tpr_f1_meter.py | 3fb52f541fa221d5341855724b7597f9e2967f65 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | valyukov/catalyst | 17e6243cd2b0f9c790645647b7946ef05c1b57fa | a1d2638f22ff7bede74410baeb8bc6a7aff036df | refs/heads/master | 2020-08-04T17:01:27.565749 | 2020-02-15T13:33:44 | 2020-02-15T13:33:44 | 212,212,412 | 0 | 0 | Apache-2.0 | 2019-10-01T22:39:05 | 2019-10-01T22:39:05 | null | UTF-8 | Python | false | false | 3,662 | py | from collections import defaultdict
import torch
from . import meter
def f1score(precision_value, recall_value, eps=1e-5):
"""
Calculating F1-score from precision and recall to reduce computation
redundancy.
Args:
precision_value: precision (0-1)
recall_value: recall (0-1)
Returns:
F1 score (0-1)
"""
numerator = 2 * (precision_value * recall_value)
denominator = precision_value + recall_value + eps
return numerator / denominator
def precision(tp, fp, eps=1e-5):
"""
Calculates precision (a.k.a. positive predictive value) for binary
classification and segmentation.
Args:
tp: number of true positives
fp: number of false positives
Returns:
precision value (0-1)
"""
# originally precision is: ppv = tp / (tp + fp + eps)
# but when both masks are empty this gives: tp=0 and fp=0 => ppv=0
# so here precision is defined as ppv := 1 - fdr (false discovery rate)
return 1 - fp / (tp + fp + eps)
def recall(tp, fn, eps=1e-5):
"""
Calculates recall (a.k.a. true positive rate) for binary classification and
segmentation
Args:
tp: number of true positives
fn: number of false negatives
Returns:
recall value (0-1)
"""
# originally reacall is: tpr := tp / (tp + fn + eps)
# but when both masks are empty this gives: tp=0 and fn=0 => tpr=0
# so here recall is defined as tpr := 1 - fnr (false negative rate)
return 1 - fn / (fn + tp + eps)
class PrecisionRecallF1ScoreMeter(meter.Meter):
"""
Keeps track of global true positives, false positives, and false negatives
for each epoch and calculates precision, recall, and F1-score based on
those metrics. Currently, this meter works for binary cases only, please
use multiple instances of this class for multi-label cases.
"""
def __init__(self, threshold=0.5):
super(PrecisionRecallF1ScoreMeter, self).__init__()
self.threshold = threshold
self.reset()
def reset(self):
"""
Resets true positive, false positive and false negative counts to 0.
"""
self.tp_fp_fn_counts = defaultdict(int)
def add(self, output, target):
"""
Thresholds predictions and calculates the true positives,
false positives, and false negatives in comparison to the target.
Args:
output (torch.Tensor):
prediction after activation function
shape should be (batch_size, ...), but works with any shape
target (torch.Tensor):
label (binary)
shape should be the same as output's shape
Returns:
None
"""
output = (output > self.threshold).float()
tp = torch.sum(target * output)
fp = torch.sum(output) - tp
fn = torch.sum(target) - tp
self.tp_fp_fn_counts["tp"] += tp
self.tp_fp_fn_counts["fp"] += fp
self.tp_fp_fn_counts["fn"] += fn
def value(self):
"""
Calculates precision/recall/f1 based on the current stored
tp/fp/fn counts.
Args:
None
Returns:
tuple of floats: (precision, recall, f1)
"""
precision_value = precision(self.tp_fp_fn_counts["tp"],
self.tp_fp_fn_counts["fp"])
recall_value = recall(self.tp_fp_fn_counts["tp"],
self.tp_fp_fn_counts["fn"])
f1_value = f1score(precision_value, recall_value)
return (float(precision_value), float(recall_value), float(f1_value))
| [
"scitator@gmail.com"
] | scitator@gmail.com |
59fd2d2296999f253b25fe78c0624d986b164c3f | 7911da973079f325a515cd2ee66f7590a9f32e48 | /great_divice.py | 5edc06a2eac8745bdbb62b1d2d89b3b76c151283 | [] | no_license | Ponkiruthika112/Guvi | 5d2ff3dcf55d6c52c0f09a1e577d8b11632c7a92 | 319e5b4dab5654fabc25ef15c1d528f76d833c15 | refs/heads/master | 2020-04-21T06:05:03.581658 | 2018-08-02T05:53:48 | 2018-08-02T05:53:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | n1=int(input("N1 value:"))
n2=int(input("N2 value:"))
maximum=1
for x in range(1,min(n1,n2)+1):
if n1%x==0 and n2%x==0:
if x>maximum:
maximum=x
print("Ans is:",maximum)
| [
"noreply@github.com"
] | Ponkiruthika112.noreply@github.com |
905ef18e702d20a1fcff2e1cadc2674abfa4e3af | 6dae31f10260e39feae9d268e3ebe6d23146575a | /galaxy/bin_eBOSS_ELG/create_stack_list_ELG_all.py | 55cc2990fa24df1a48b2ead2e16fef481eaded1d | [
"CC0-1.0"
] | permissive | JohanComparat/pySU | e55eba92f0660e733468bce618595a03dc25a3d2 | 4169e11414be661dc0c01c774e64fb8ce6242825 | refs/heads/master | 2021-12-25T11:06:04.315554 | 2021-10-11T12:03:22 | 2021-10-11T12:03:22 | 44,340,565 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,530 | py | #! /usr/bin/env python
"""
This script produces the stacks for emission line luminosity limited samples.
"""
import sys
import os
from os.path import join
import glob
import numpy as n
import astropy.io.fits as fits
import SpectraStackingEBOSS as sse
from scipy.interpolate import interp1d
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
cosmoMD = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115)#, Ob0=0.048206)
# create all input files :
#path_2_cat = join(os.environ['HOME'],"SDSS/lss/catalogs/3", "inputs/ELG.v5_10_10.all.fits")
path_2_cat = join(os.environ['HOME'],"SDSS/lss/catalogs/4", "inputs/ELG.v5_11_0.rrv2.all.fits")
cat = fits.open(path_2_cat)[1].data
Ngal = len(cat)
N_in_stack = 200000
N_factor = 4
#bins_2nd = n.arange(N_in_stack, N_in_stack*N_factor, N_in_stack)
print(Ngal)
#print(bins_2nd)
NNN,BBB=n.histogram(cat['Z'], bins=n.arange(0,4,0.001))
N_CM = n.cumsum(NNN)
N_bins = n.arange(N_in_stack*N_factor, N_CM.max(), N_in_stack*N_factor)
itp = interp1d(N_CM, BBB[:-1])
z_mins = itp(N_bins)[:-1]
z_maxs = itp(N_bins)[1:]
# CREATES A few stacks as a function of [OII] EW
z0,z1 = 0.2, 1.5
selection = (cat['rr_Z']>z0) & (cat['rr_Z']<z1) & (cat['rr_ZWARN']<=4)
ids_sort = n.argsort(cat['rr_Z'][selection])
DATA = n.transpose([ cat['plate'], cat['MJD'], cat['FIBERID'], cat['rr_Z'] ]) [selection][ids_sort]
path_2_input = join(os.environ['HOME'],"SDSS/stacks", "eboss-elg_"+str(z0)+"_z_"+str(z1)+".asc")
print(path_2_input)
print(len(DATA))
n.savetxt(path_2_input, DATA)
| [
"johan.comparat@gmail.com"
] | johan.comparat@gmail.com |
da2c73030f131e6a6657b73707e086a447727784 | 558e979b7c6d5dc2599453392ed624265a831d0d | /glamazer/favorites/models.py | cdc9a8d82dafb91981adcda84c1f1522fd8fdf14 | [] | no_license | SpeeDly/partytask | af08ca089d518bc0d09dda61e68ce3c1d117ab2b | c9a813bc130c41995140adaa4a19344791b89968 | refs/heads/master | 2021-01-10T15:42:41.761160 | 2016-02-15T13:48:13 | 2016-02-15T13:48:13 | 51,758,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | from django.db import models
from django.contrib.auth.models import User
from glamazer.listings.models import Listing
class Favorite(models.Model):
user = models.ForeignKey(User)
listing = models.ForeignKey(Listing)
date = models.DateTimeField(auto_now_add=True) | [
"zhuhov@gmail.com"
] | zhuhov@gmail.com |
b109cb8b121bd51d0db13746d6f51a2a11d5ce4e | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa2/sample/class_def_attr-107.py | 7f478ef28ce91a5201adf88cbe7f4d1ad105b948 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | class A(object):
x:int = 1
class B(A):
def __init__(self: "B"):
pass
class C(B):
z:bool = True
a:A = None
b:B = None
c:C = None
a = A()
b = B()
c = C()
$Statement
b.x = a.x
c.z = a.x == b.x
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
361eb11c018dc172bfabcdcd14403d0fe324b70b | 4717e299b70d658f607becacd5f202cdba904181 | /scripts/mount-image-partitions.sh | f17ca7525822a6916e1d6b07b2ee4de504e51ea7 | [
"MIT"
] | permissive | deets/yocto-pi-lap-timer | 8ea4840320675cb3c22a73fc25199fb96d379f9b | 857ea7e27ed3df979fbf22129da428430a40426c | refs/heads/master | 2023-01-24T06:36:28.649809 | 2023-01-01T18:13:37 | 2023-01-01T18:13:37 | 245,871,535 | 0 | 0 | MIT | 2023-01-20T22:30:26 | 2020-03-08T19:04:35 | C++ | UTF-8 | Python | false | false | 3,685 | sh | #!/usr/bin/env python3
# -*- mode: python -*-
# Copyright: 2020, Diez B. Roggisch, Berlin . All rights reserved.
import argparse
import json
import logging
import os
import subprocess
import sys
USAGE = """Images containing several partitions are
cumbersome to inspect. You need to figure out partition block ranges,
and mount these using the loopback device.
This script encapsulates this in a convenient package and prints out
the needed umount commands."""
def collect_partitions_dims(image):
p = subprocess.run(
[
"fdisk",
"-usectors",
"-l", image,
],
check=True,
stdout=subprocess.PIPE,
)
def safe_int(value):
try:
return int(value)
except ValueError:
return None
partitions = [
line for line in p.stdout.decode("ascii").split("\n")
if line.startswith(image)
]
dims = [
[safe_int(v) for v in p.split() if safe_int(v) is not None][0:3:2]
for p in partitions
]
return dims
def parse_args():
parser = argparse.ArgumentParser(usage=USAGE)
parser.add_argument("image", help="The disk image to mount")
parser.add_argument(
"--json",
action="store_true",
help="When given, write JSON data with mountpoints instead of umount commands",
)
parser.add_argument(
"--prefix",
help="Where to mount the partitions - defaults to /media",
default="/media",
)
parser.add_argument(
"--name",
help="Name of the partitions (will be appended with p0, p1, ..)."
" Defaults to the basename of the image.",
)
parser.add_argument(
"--sudo",
action="store_true",
help="Prepend 'sudo' to the mount commands executed by this script",
)
parser.add_argument(
"-n", "--dry-run", action="store_true", help="Don't actually mount, only pretend"
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="More verbose output."
)
return parser.parse_args()
def main():
opts = parse_args()
logging.basicConfig(
level=logging.DEBUG if opts.verbose else logging.INFO,
stream=sys.stderr,
)
if opts.name is None:
opts.name = os.path.splitext(os.path.basename(opts.image))[0]
partition_dims = collect_partitions_dims(opts.image)
mountpoints = []
pending_error = None
for i, (offset, size) in enumerate(partition_dims):
logging.debug(f"partition {i}-> offset: {offset}, size: {size}")
mountpoint = "{}/{}p{}".format(opts.prefix, opts.name, i)
if not os.path.exists(mountpoint) and not opts.dry_run:
os.mkdir(mountpoint)
cmd = [
"mount",
"-o",
"rw,loop,offset={},sizelimit={}".format(offset * 512, size * 512),
"-t",
"auto",
opts.image,
mountpoint,
]
if opts.sudo:
cmd.insert(0, "sudo")
try:
if not opts.dry_run:
subprocess.run(cmd, check=True)
mountpoints.append(mountpoint)
else:
logging.debug(" ".join(cmd))
except subprocess.CalledProcessError as e:
logging.exception("Failed to mount partition p%d", i)
pending_error = e
if opts.json:
print(json.dumps(mountpoints))
else:
for x in mountpoints:
print(f"{'sudo' if opts.sudo else ''} umount {x}")
if pending_error:
logging.error("One or more partitions failed to mount")
sys.exit(1)
if __name__ == "__main__":
main()
| [
"deets@web.de"
] | deets@web.de |
ed149966b9509c17ce4cb3de3841e639cb2c9a4b | 88ae8695987ada722184307301e221e1ba3cc2fa | /chrome/browser/ui/webui/side_panel/read_anything/DEPS | 0d08619c1aa0d97e9806133820d5e14f04bdfaf3 | [
"BSD-3-Clause"
] | permissive | iridium-browser/iridium-browser | 71d9c5ff76e014e6900b825f67389ab0ccd01329 | 5ee297f53dc7f8e70183031cff62f37b0f19d25f | refs/heads/master | 2023-08-03T16:44:16.844552 | 2023-07-20T15:17:00 | 2023-07-23T16:09:30 | 220,016,632 | 341 | 40 | BSD-3-Clause | 2021-08-13T13:54:45 | 2019-11-06T14:32:31 | null | UTF-8 | Python | false | false | 230 | include_rules = [
"+chrome/common/accessibility",
]
specific_include_rules = {
"read_anything_page_handler\.*": [
"+chrome/browser/ui/views/side_panel/read_anything",
"+chrome/browser/ui/views/frame/browser_view.h",
],
}
| [
"jengelh@inai.de"
] | jengelh@inai.de | |
54f7efa4be54b7bfa4d320fc695d8ebaee3721de | d191a04a3ded41175ea84ae88ebddb4f262b7fb1 | /tree/leaf-similar_trees.py | f3e2de3f3491a207853a3eb9634c2e7fd4401330 | [] | no_license | YLyeliang/now_leet_code_practice | ae4aea945bae72ec08b11e57a8f8a3e81e704a54 | 204d770e095aec43800a9771fe88dd553463d2f7 | refs/heads/master | 2022-06-13T20:22:51.266813 | 2022-05-24T05:29:32 | 2022-05-24T05:29:32 | 205,753,056 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,193 | py | # Consider all the leaves of a binary tree. From left to right order, the values of those leaves form a leaf value sequence.
#
# 3
# 5 1
# 6 2 9 8
# 7 4
# For example, in the given tree above, the leaf value sequence is (6, 7, 4, 9, 8).
#
# Two binary trees are considered leaf-similar if their leaf value sequence is the same.
#
# Return true if and only if the two given trees with head nodes root1 and root2 are leaf-similar.
#
#
#
# Constraints:
#
# Both of the given trees will have between 1 and 200 nodes.
# Both of the given trees will have values between 0 and 200
# 分析:这个问题需要解决的是给定两个二叉树,每个二叉树从左到右的叶节点构成一个数组。问两个二叉树的数组是否一致。
# 最直接的解决方法就是利用DFS遍历两棵树,并将叶节点的值保存到两个数组中,对比两个数组是否一致,该方法需要完全遍历两棵树,需要O(2N)time,O(2k)space,k为叶结点数。
# 优化:在遍历过程中,如果有任意一个有序的结点值不相等,则返回false.且可以不使用数组进行保存。考虑到两颗二叉树的叶节点位置不一定相同,找到叶节点需要迭代的次数也不一致。
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def leafSimilar(self, root1: TreeNode, root2: TreeNode) -> bool:
res1=[]
res2=[]
def dfs(root,res):
if not root:return None
if not root.left and not root.right:res.append(root.val)
dfs(root.left,res)
dfs(root.right,res)
dfs(root1,res1)
dfs(root2,res2)
return True if res1==res2 else False
# 优化,time:最坏情况下O(2n); space:O(1)
class Solution:
def leafSimilar(self, root1: TreeNode, root2: TreeNode) -> bool:
def dfs(root):
if not root:return []
if not (root.left or root.right):return [root.val]
return dfs(root.left)+dfs(root.right)
return dfs(root1)==dfs(root2)
| [
"k87974@163.com"
] | k87974@163.com |
832551fed04256accf01a92952917bc2b13db83a | 772b0df2635b95644ea3eb370103174804024167 | /scripts/exonU.py | ae8f5252c8e3b2a19529d53f4cea5dae88284b92 | [
"MIT"
] | permissive | 4dn-dcic/clodius | ec909bda90a9df13fa1b85472951f6cf149213a5 | aa31b3d90a5a9fec883c20cab31ad4d347cd52cd | refs/heads/develop | 2020-04-17T23:31:32.114043 | 2019-04-02T14:01:46 | 2019-04-02T14:01:46 | 167,038,915 | 0 | 0 | MIT | 2019-03-28T20:10:46 | 2019-01-22T17:43:32 | Python | UTF-8 | Python | false | false | 4,095 | py | from __future__ import print_function
__author__ = "Alaleh Azhir,Peter Kerpedjiev"
#!/usr/bin/python
import collections as col
import sys
import argparse
class GeneInfo:
def __init__(self):
pass
def merge_gene_info(gene_infos, gene_info):
'''
Add a new gene_info. If it's txStart and txEnd overlap with a previous entry for this
gene, combine them.
'''
merged = False
for existing_gene_info in gene_infos[gene_info.geneId]:
if (existing_gene_info.chrName == gene_info.chrName and
existing_gene_info.txEnd > gene_info.txStart and
gene_info.txEnd > existing_gene_info.txStart):
# overlapping genes, merge the exons of the second into the first
existing_gene_info.txStart = min(existing_gene_info.txStart,
gene_info.txStart)
existing_gene_info.txEnd = max(existing_gene_info.txEnd,
gene_info.txEnd)
for (exon_start, exon_end) in gene_info.exonUnions:
existing_gene_info.exonUnions.add((exon_start, exon_end))
merged = True
if not merged:
gene_infos[gene_info.geneId].append(gene_info)
return gene_infos
def main():
parser = argparse.ArgumentParser(description="""
python ExonUnion.py Calculate the union of the exons of a list
of transcript.
chr10 27035524 27150016 ABI1 76 - NM_001178120 10006 protein-coding abl-interactor 1 27037498 27149792 10 27035524,27040526,27047990,27054146,27057780,27059173,27060003,27065993,27112066,27149675, 27037674,27040712,27048164,27054247,27057921,27059274,27060018,27066170,27112234,27150016,
""")
parser.add_argument('transcript_bed')
#parser.add_argument('-o', '--options', default='yo',
# help="Some option", type='str')
#parser.add_argument('-u', '--useless', action='store_true',
# help='Another useless option')
args = parser.parse_args()
inputFile = open(args.transcript_bed, 'r')
gene_infos = col.defaultdict(list)
for line in inputFile:
words = line.strip().split("\t")
gene_info = GeneInfo()
try:
gene_info.chrName = words[0]
gene_info.txStart = words[1]
gene_info.txEnd = words[2]
gene_info.geneName = words[3]
gene_info.score = words[4]
gene_info.strand = words[5]
gene_info.refseqId = words[6]
gene_info.geneId = words[7]
gene_info.geneType = words[8]
gene_info.geneDesc = words[9]
gene_info.cdsStart = words[10]
gene_info.cdsEnd = words[11]
gene_info.exonStarts = words[12]
gene_info.exonEnds = words[13]
except:
print("ERROR: line:", line, file=sys.stderr)
continue
# for some reason, exon starts and ends have trailing commas
gene_info.exonStartParts = gene_info.exonStarts.strip(",").split(',')
gene_info.exonEndParts = gene_info.exonEnds.strip(",").split(',')
gene_info.exonUnions = set([(int(s), int(e)) for (s,e) in zip(gene_info.exonStartParts, gene_info.exonEndParts)])
# add this gene info by checking whether it overlaps with any existing ones
gene_infos = merge_gene_info(gene_infos, gene_info)
for gene_id in gene_infos:
for contig in gene_infos[gene_id]:
output = "\t".join(map(str, [contig.chrName, contig.txStart, contig.txEnd,
contig.geneName, contig.score, contig.strand,
'union_' + gene_id, gene_id, contig.geneType, contig.geneDesc,
contig.cdsStart, contig.cdsEnd,
",".join([str(e[0]) for e in sorted(contig.exonUnions)]),
",".join([str(e[1]) for e in sorted(contig.exonUnions)])]))
print(output)
if __name__ == '__main__':
main()
| [
"pkerpedjiev@gmail.com"
] | pkerpedjiev@gmail.com |
6d26e7d8a39dea94a32fcdd63dc99c2a597f95bb | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2821/60651/234231.py | f4edee98f21804eb117fe388b3c8b273936dceec | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | n=int(input())
list1=input().split()
list1=[int(x) for x in list1]
p1=0
p2=0
while(len(list1)>1):
if list1[0]>=list1[len(list1)-1]:
p1+=list1[0]
del(list1[0])
else:
p1+=list1[len(list1)]
del(list1[len(list1)])
if list1[0]>=list1[len(list1)-1]:
p2+=list1[0]
del(list1[0])
else:
p2+=list1[len(list1)]
del(list1[len(list1)])
if len(list1)==1:
p1+=list1[0]
print(str(p1)+" "+str(p2))
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
75a398246bbdcce760ca60cf2878fa345bbdca0c | 099256b28df65fb7c90c077b060dca16b8655235 | /reinforcement_learning/0x00-q_learning/2-epsilon_greedy.py | 2430cee4b9e0d6d3beea190f0e178793a8c3ec3b | [] | no_license | Immaannn2222/holbertonschool-machine_learning | 1cebb9a889b363669bed7645d102dc56ab943c08 | 80bf8d3354702f7fb9f79bbb5ed7e00fc19f788d | refs/heads/master | 2023-08-01T05:35:00.180472 | 2021-09-22T20:28:17 | 2021-09-22T20:28:17 | 317,624,526 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | #!/usr/bin/env python3
"""Q-Learning"""
import gym
import numpy as np
def epsilon_greedy(Q, state, epsilon):
"""uses epsilon-greedy to determine the next action"""
p = np.random.uniform(0, 1)
if p < epsilon:
action = np.random.randint(Q.shape[1])
else:
action = np.argmax(Q[state, :])
return action
| [
"imennaayari@gmail.com"
] | imennaayari@gmail.com |
03677753672e492a352d7591d8b920b07ca19949 | b96f1bad8a74d31d8ff79bc955813bfcd17d7b26 | /560. Subarray Sum Equals K.py | 7388bfa4045464b95f7ef4a1835a360ec064851a | [] | no_license | brianhu0716/LeetCode-Solution | e7177af15e84e833ce8ab05027683ed4ac489643 | 158a4359c90b723545b22c4898047274cc1b80a6 | refs/heads/main | 2023-07-11T05:29:56.783795 | 2021-08-28T12:53:14 | 2021-08-28T12:53:14 | 374,991,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,527 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 4 18:21:15 2021
@author: Brian
"""
'''
前綴和(Prefix Sum)的概念,前綴和是指由index = 0至index = i(i < len(nums))所累加成的數字組成的數列,此種概念非常適合解
這類序列中有正有負,又要求必須要連續的題型,以此題為例,目標是要找到和為k的子序列個數,因此我們先建立一個由前綴和為key值,
出現次數為value值的表,每當我們計算一次前綴和後我們同時可以查表中是否有出現當前prefixSum - k的key出現,有的話代表prefixSum - k
對應的次數即為出現再prefixSum之前且與prefixSum相差k的連續字序列的個數,答案就可以再不斷更新prefixSum的同時累加prefixSum - k對應
的value後得到。如果希望看到prefixSum - k以及prefixSum出現位置可以參照註解部分的程式碼,再更新完prefixSum的字典後,依次查詢key與
key - k是否成對存在,如果都存在,檢驗key中的idx_b是否有大於key - k中的idx_f,有的話加1;這樣的寫法在初始化字典時先給出{0 : [-1]}
的值,代表在位置-1時前綴和為0
*** 關於初始化值的問題可以參考這的範例nums = [3,...],k = 3,如果不先初始化前綴和為0的位置或次數,答案一定會少算一個因為在index
為0的時候,predixSum為3,對應的prefixSum - k 等於0,如果不先初始化就查無鍵值,直接少加1次正確答案
'''
nums = [1,-1,0]
k = 0
nums = [1,1,1]
k = 2
nums = [1,2,3]
k = 3
class Solution:
def subarraySum(self, nums, k: int) -> int:
occur,prefixSum,ans = {0 : 1},0,0 # {0 : 1}的意思是再index = 0之前的前綴和 = 0,出現一次
for num in nums:
prefixSum += num
ans += occur.get(prefixSum - k,0) # 一定要先計算prefixSum - k的個數,避免k = prefixSum = 0的狀況會出現錯誤
occur[prefixSum] = occur.get(prefixSum,0) + 1
return ans
'''
occur,prefixSum,ans = {0 : [-1]},0,0
for i in range(len(nums)):
prefixSum += nums[i]
if prefixSum not in occur.keys():
occur[prefixSum] = [i]
else:
occur[prefixSum].append(i)
for key in occur.keys():
if key - k in occur.keys():
for idx_b in occur[key]:
for idx_f in occur[key - k]:
if idx_b > idx_f:
ans += 1
return ans
'''
| [
"85205343+brianhu0716@users.noreply.github.com"
] | 85205343+brianhu0716@users.noreply.github.com |
c2a8f3da8dd2bd5019bd6fc5e761b3e7657d292d | a439ca43178d38cfe6daaee50ea134ca6c52b502 | /thaniya_server_upload/src/thaniya_server_upload/__init__.py | bc23281f002d4aaf2bd5c044627665570e87f050 | [
"Apache-2.0"
] | permissive | jkpubsrc/Thaniya | 37ca727abdc6f9f605257813889fe3a033995bba | 4ebdf2854e3d7888af7396adffa22628b4ab2267 | refs/heads/master | 2023-03-05T20:58:59.528746 | 2021-02-15T19:31:06 | 2021-02-15T19:31:06 | 331,318,787 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py |
__version__ = "0.2021.1.20"
from .UploadHttpdCfg import UploadHttpdCfg
from .IAppRuntimeUserMgr import IAppRuntimeUserMgr
from .AppRuntimeUploadHttpd import AppRuntimeUploadHttpd
| [
"pubsrc@binary-overflow.de"
] | pubsrc@binary-overflow.de |
a9ffb5601e405aada062d39d769a9c49544fc474 | 092056c026f3ef162c31bca004a596bbe78948e9 | /w261/wk3/reducer_hw32.py | 10741e7585b2d326ecaeb8047f54c062d1939548 | [] | no_license | sayantansatpati/ml | 4138bbafd216a8ad848a56e4818163649a28b6a9 | 9f1765b716f39a1ef159db98b2813761bbc14b60 | refs/heads/master | 2021-01-19T03:19:42.734130 | 2019-03-12T15:44:15 | 2019-03-12T15:44:15 | 36,243,314 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,235 | py | #!/usr/bin/python
import sys
import re
import heapq
itemset_1_cnt = 0
itemset_2_cnt = 0
itemset_1_last = None
itemset_2_last = None
'''
a1,* 1
a1,* 1
a1,b1 1
a1,b1 1
a1,b2 1
a1,b2 1
a2,* 1
'''
THRESHOLD = 100
# Store Itemsets 2
dict = {}
for line in sys.stdin:
# Remove leading & trailing chars
line = line.strip()
# Split the line by <TAB> delimeter
tokens = re.split(r'\s+', line)
# Split the key by <COMMA> delimeter
items = tokens[0].split(",")
i1 = items[0]
i2 = items[1]
if not itemset_1_last:
itemset_1_last = i1
if itemset_1_last != i1:
'''
if itemset_1_cnt >= THRESHOLD:
confidence = (itemset_2_cnt * 1.0) / itemset_1_cnt
print '[%d,%d]%s\t%f' %(itemset_1_cnt, itemset_2_cnt, tokens[0], confidence)
dict[tokens[0]] = confidence
'''
# Reset
itemset_1_last = i1
itemset_1_cnt = int(tokens[1])
itemset_2_last = None
itemset_2_cnt = 0
else:
if i2 == '*':
itemset_1_cnt += int(tokens[1])
else:
if itemset_2_last != tokens[0]:
if itemset_1_cnt >= THRESHOLD and itemset_2_cnt >= THRESHOLD:
confidence = (itemset_2_cnt * 1.0) / itemset_1_cnt
#print '[%d,%d]%s\t%f' %(itemset_1_cnt, itemset_2_cnt, itemset_2_last, confidence)
dict[itemset_2_last] = confidence
itemset_2_last = tokens[0]
itemset_2_cnt = int(tokens[1])
else:
itemset_2_cnt += int(tokens[1])
# Last Set of Counts
if itemset_1_cnt >= THRESHOLD and itemset_2_cnt >= THRESHOLD:
confidence = (itemset_2_cnt * 1.0) / itemset_1_cnt
#print '[%d,%d]%s\t%f' %(itemset_1_cnt, itemset_2_cnt, itemset_2_last, confidence)
dict[itemset_2_last] = confidence
print '=== Top 5 Confidence ==='
sorted_dict = sorted(dict.items(), key=lambda x:(-x[1], x[0]))
for j,k in sorted_dict[:5]:
print '%s\t%f' %(j,k)
| [
"sayantan.satpati.sfbay@gmail.com"
] | sayantan.satpati.sfbay@gmail.com |
dd9892018dc6b2d6514d2081c8fca5562e57d115 | 65fe8e97656d41074e25219268c7b0a78fafb398 | /camera_infer.py | 7e74deaef16d9506c93f9ad054a23474383cb7d8 | [
"Apache-2.0"
] | permissive | Wblossom/Tensorflow-FaceRecognition | c48923f9ed8695380f251b5a81bcccafae33f44b | bc567fb53dc11554bfaf612f3a21045f7ab24876 | refs/heads/master | 2022-12-07T10:07:18.290096 | 2020-08-27T02:35:25 | 2020-08-27T02:35:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,491 | py | import cv2
import numpy as np
import sklearn
import config
from utils import face_preprocess
from PIL import ImageFont, ImageDraw, Image
from utils.utils import feature_compare, load_mtcnn, load_faces, load_mobilefacenet, add_faces
# 人脸识别阈值
VERIFICATION_THRESHOLD = config.VERIFICATION_THRESHOLD
# 检测人脸检测模型
mtcnn_detector = load_mtcnn()
# 加载人脸识别模型
face_sess, inputs_placeholder, embeddings = load_mobilefacenet()
# 添加人脸
add_faces(mtcnn_detector)
# 加载已经注册的人脸
faces_db = load_faces(face_sess, inputs_placeholder, embeddings)
# 注册人脸
def face_register():
print("点击y确认拍照!")
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret:
cv2.imshow('image', frame)
if cv2.waitKey(1) & 0xFF == ord('y'):
faces, landmarks = mtcnn_detector.detect(frame)
if faces.shape[0] is not 0:
faces_sum = 0
bbox = []
points = []
for i, face in enumerate(faces):
if round(faces[i, 4], 6) > 0.95:
bbox = faces[i, 0:4]
points = landmarks[i, :].reshape((5, 2))
faces_sum += 1
if faces_sum == 1:
nimg = face_preprocess.preprocess(frame, bbox, points, image_size='112,112')
user_name = input("请输入注册名:")
cv2.imencode('.png', nimg)[1].tofile('face_db/%s.png' % user_name)
print("注册成功!")
else:
print('注册图片有错,图片中有且只有一个人脸')
else:
print('注册图片有错,图片中有且只有一个人脸')
break
# 人脸识别
def face_recognition():
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret:
faces, landmarks = mtcnn_detector.detect(frame)
if faces.shape[0] is not 0:
faces_sum = 0
for i, face in enumerate(faces):
if round(faces[i, 4], 6) > 0.95:
faces_sum += 1
if faces_sum == 0:
continue
# 人脸信息
info_location = np.zeros(faces_sum)
info_location[0] = 1
info_name = []
probs = []
# 提取图像中的人脸
input_images = np.zeros((faces.shape[0], 112, 112, 3))
for i, face in enumerate(faces):
if round(faces[i, 4], 6) > 0.95:
bbox = faces[i, 0:4]
points = landmarks[i, :].reshape((5, 2))
nimg = face_preprocess.preprocess(frame, bbox, points, image_size='112,112')
nimg = nimg - 127.5
nimg = nimg * 0.0078125
input_images[i, :] = nimg
# 进行人脸识别
feed_dict = {inputs_placeholder: input_images}
emb_arrays = face_sess.run(embeddings, feed_dict=feed_dict)
emb_arrays = sklearn.preprocessing.normalize(emb_arrays)
for i, embedding in enumerate(emb_arrays):
embedding = embedding.flatten()
temp_dict = {}
# 比较已经存在的人脸数据库
for com_face in faces_db:
ret, sim = feature_compare(embedding, com_face["feature"], 0.70)
temp_dict[com_face["name"]] = sim
dict = sorted(temp_dict.items(), key=lambda d: d[1], reverse=True)
if dict[0][1] > VERIFICATION_THRESHOLD:
name = dict[0][0]
probs.append(dict[0][1])
info_name.append(name)
else:
probs.append(dict[0][1])
info_name.append("unknown")
for k in range(faces_sum):
# 写上人脸信息
x1, y1, x2, y2 = faces[k][0], faces[k][1], faces[k][2], faces[k][3]
x1 = max(int(x1), 0)
y1 = max(int(y1), 0)
x2 = min(int(x2), frame.shape[1])
y2 = min(int(y2), frame.shape[0])
prob = '%.2f' % probs[k]
label = "{}, {}".format(info_name[k], prob)
cv2img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
pilimg = Image.fromarray(cv2img)
draw = ImageDraw.Draw(pilimg)
font = ImageFont.truetype('font/simfang.ttf', 18, encoding="utf-8")
draw.text((x1, y1 - 18), label, (255, 0, 0), font=font)
frame = cv2.cvtColor(np.array(pilimg), cv2.COLOR_RGB2BGR)
cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 0, 0), 2)
cv2.imshow('image', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == '__main__':
i = int(input("请选择功能,1为注册人脸,2为识别人脸:"))
if i == 1:
face_register()
elif i == 2:
face_recognition()
else:
print("功能选择错误")
| [
"yeyupiaoling@foxmail.com"
] | yeyupiaoling@foxmail.com |
a5c4298eb129e99af9224110f4761b4b8ed3bd22 | 67117705720a3e3d81253ba48c1826d36737b126 | /Wk10_STRANDS/get_valid1.py | df448f30ab90cf1b517fb5526efd7d00ee5ea597 | [] | no_license | pyliut/Rokos2021 | 41f0f96bc396b6e8a5e268e31a38a4a4b288c370 | 70753ab29afc45766eb502f91b65cc455e6055e1 | refs/heads/main | 2023-08-13T17:29:30.013829 | 2021-09-26T19:01:35 | 2021-09-26T19:01:35 | 382,092,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 10 13:24:32 2021
@author: pyliu
"""
import pandas as pd
def get_valid1(df, adjacent):
df = df.loc[ ( df["origin"].isin(adjacent.keys()) & df["target"].isin(adjacent.keys()) ), :]
df = df[["_id", "status", "origin", "target", "edge_id","date_finished", "date_at_node", "date_started","_meta", "time_to_waypoint","operation_time", "final_node"]]
df = df.reset_index(drop=True)
return df | [
"noreply@github.com"
] | pyliut.noreply@github.com |
2993c86ec6a80448c6afc224530dff735ad239be | 81d635211686b1bc87af5892bd9e0fb95cc2ddb8 | /adwords api/googleads-python-lib-master/examples/dfp/v201502/custom_targeting_service/get_custom_targeting_keys_by_statement.py | 0812eb4235de6320b09005b1154ac6fbaf2ee011 | [
"Apache-2.0"
] | permissive | analyticsbot/Python-Code---Part-2 | de2f0581258b6c8b8808b4ef2884fe7e323876f0 | 12bdcfdef4472bcedc77ae61707c25a4a09cba8a | refs/heads/master | 2021-06-04T05:10:33.185766 | 2016-08-31T13:45:45 | 2016-08-31T13:45:45 | 66,679,512 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,953 | py | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all predefined custom targeting keys.
To create custom targeting keys, run create_custom_targeting_keys_and_values.py.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
custom_targeting_service = client.GetService(
'CustomTargetingService', version='v201502')
values = [{
'key': 'type',
'value': {
'xsi_type': 'TextValue',
'value': 'PREDEFINED'
}
}]
query = 'WHERE type = :type'
statement = dfp.FilterStatement(query, values)
# Get custom targeting keys by statement.
while True:
response = custom_targeting_service.getCustomTargetingKeysByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for key in response['results']:
print ('Custom targeting key with id \'%s\', name \'%s\', display name '
'\'%s\', and type \'%s\' was found.'
% (key['id'], key['name'], key['displayName'], key['type']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| [
"ravi.shankar1788@gmail.com"
] | ravi.shankar1788@gmail.com |
52438bbc1d7accc32306a2504e0b0ac1b2143bf5 | 505343f6ace00d22f8753c1a943a5794a619e698 | /katas/Python/5 kyu/Eight ways to iterate over table 5af5c18786d075cd5e00008b.py | 24fec5621cd28f0bfe275dbea42300fdad8dd66c | [] | no_license | bullet1337/codewars | 7652e50bf768bc47976a9124dd98b93602d4d458 | ba7f13ddd766158b41e036dae5d6b15f7f08761a | refs/heads/master | 2020-03-27T05:04:03.751302 | 2019-04-30T17:45:39 | 2019-04-30T17:45:39 | 145,991,995 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | # https://www.codewars.com/kata/5af5c18786d075cd5e00008b
DIRECTION_UP, DIRECTION_LEFT, DIRECTION_DOWN, DIRECTION_RIGHT = range(1,5)
class Table:
def __init__(self, data):
self.data = data
def get_range(self, dir):
if dir == DIRECTION_UP:
return range(len(self.data) - 1, -1, -1)
elif dir == DIRECTION_LEFT:
return range(len(self.data[0]) - 1, -1, -1)
elif dir == DIRECTION_DOWN:
return range(len(self.data))
elif dir == DIRECTION_RIGHT:
return range(len(self.data[0]))
def walk(self, dir0, dir1):
for i in self.get_range(dir1):
for j in self.get_range(dir0):
yield self.data[j][i] if dir0 % 2 else self.data[i][j]
| [
"alichek95@mail.ru"
] | alichek95@mail.ru |
10901ab549fe64751337168cade0014abf98999b | c85b91bfdd7eb2fa5a7d6c6a9b722c8548c83105 | /vscode/extensions/ms-python.python-2020.3.69010/languageServer.0.5.31/Typeshed/stdlib/2/multiprocessing/util.pyi | 76f9424a6774726392f442e22dae101e2e626220 | [
"MIT",
"Apache-2.0"
] | permissive | ryangniadek/.dotfiles | ddf52cece49c33664b56f01b17d476cf0f1fafb1 | be272baf6fb7d7cd4f4db1f6812b710196511ffe | refs/heads/master | 2021-01-14T07:43:12.516127 | 2020-03-22T20:27:22 | 2020-03-22T20:27:22 | 242,632,623 | 0 | 0 | MIT | 2020-09-12T17:28:01 | 2020-02-24T02:50:06 | Python | UTF-8 | Python | false | false | 741 | pyi | from typing import Any
import threading
SUBDEBUG = ... # type: Any
SUBWARNING = ... # type: Any
def sub_debug(msg, *args): ...
def debug(msg, *args): ...
def info(msg, *args): ...
def sub_warning(msg, *args): ...
def get_logger(): ...
def log_to_stderr(level=None): ...
def get_temp_dir(): ...
def register_after_fork(obj, func): ...
class Finalize:
def __init__(self, obj, callback, args=..., kwargs=None, exitpriority=None): ...
def __call__(self, wr=None): ...
def cancel(self): ...
def still_active(self): ...
def is_exiting(): ...
class ForkAwareThreadLock:
def __init__(self): ...
class ForkAwareLocal(threading.local):
def __init__(self): ...
def __reduce__(self): ...
| [
"ryan@gniadek.net"
] | ryan@gniadek.net |
646938fdb988b0da4f7455fce4fddf228f6bd0b0 | 254e35ed13abb5670eb664c1b17cb77d6b2d6289 | /LeetCode/python/_229.MajorityElementII.py | f84f099e6907b3b8184ac96ff90db0bcae53f8a1 | [] | no_license | bobby20180331/Algorithms | 475f7b29efcab829bc97b18a088600d406850fc7 | c56967e292b34162438f86bfc4c76925329105dd | refs/heads/master | 2023-04-23T04:36:26.977179 | 2021-02-04T06:47:41 | 2021-02-04T06:47:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,544 | py | #难点在于 linear time and in O(1)
#没思路,查了下,好像用Boyer-Moore Majority Vote algorithm
#这个算法是解决这样一个问题:从一个数组中找出出现半数以上的元素。 这道题是求n/3
'''
每次都找出一对不同的元素,从数组中删掉,直到数组为空或只有一种元素。
不难证明,如果存在元素e出现频率超过半数,那么数组中最后剩下的就只有e。
'''
class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
if not nums: 判断为空的写法
return []
count1, count2, candidate1, candidate2 = 0, 0, 0, 1 #设置两个初始值和计数
for n in nums:
if n == candidate1:
count1 += 1
elif n == candidate2:
count2 += 1
elif count1 == 0: #若两个值为0时,重设为n
candidate1, count1 = n, 1
elif count2 == 0:
candidate2, count2 = n, 1
else: #若两个值都存在,但当前n都不等于其中两个,则两个计数都减1
count1, count2 = count1 - 1, count2 - 1
#事实当减为0时又重新为n初始值了
return [n for n in (candidate1, candidate2)
if nums.count(n) > len(nums) // 3]
#为什么最后就一个(c1,c2)的元组了
#对了!因为要出现超过n/3次,不可能有三个数!
| [
"noreply@github.com"
] | bobby20180331.noreply@github.com |
4ca4ef13e503c3348a0195e98d2a84a5902c7db3 | 83ecabbeea8b17a3fd9b8142062f09c76198e232 | /test/test_document_2.py | 34e6255c4f1a7ac910d2259eec0bfd0bc5f5172c | [] | no_license | junetigerlee/python-wso2-apim-publisherclient | 387f581bb48645b35f256159cce0031babd493f0 | 5e1cadeab4eb37ebc93e46b45d6d1f98f4fdfde9 | refs/heads/master | 2021-01-01T16:11:45.362270 | 2017-07-25T06:20:46 | 2017-07-25T06:20:46 | 97,783,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,237 | py | # coding: utf-8
"""
WSO2 API Manager - Publisher API
This specifies a **RESTful API** for WSO2 **API Manager** - Publisher. Please see [full swagger definition](https://raw.githubusercontent.com/wso2/carbon-apimgt/v6.0.4/components/apimgt/org.wso2.carbon.apimgt.rest.api.publisher/src/main/resources/publisher-api.yaml) of the API which is written using [swagger 2.0](http://swagger.io/) specification.
OpenAPI spec version: 0.11.0
Contact: architecture@wso2.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import wso2_apim_publisherclient
from wso2_apim_publisherclient.rest import ApiException
from wso2_apim_publisherclient.models.document_2 import Document2
class TestDocument2(unittest.TestCase):
""" Document2 unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testDocument2(self):
"""
Test Document2
"""
# FIXME: construct object with mandatory attributes with example values
#model = wso2_apim_publisherclient.models.document_2.Document2()
pass
if __name__ == '__main__':
unittest.main()
| [
"junetigerlee@gmail.com"
] | junetigerlee@gmail.com |
639d0089dc164b752a30ef726417d9d4557180ea | 4fcad69a9b2aec97fa29e0010d82f0f085cdc446 | /tsampi/pypy/lib_python-bak/hypothesis/utils/__init__.py | 44d438c8e378b5f27ebbb6ac0833bbf585e8de04 | [] | no_license | tsampi/tsampi-0 | b64d4457f58314343630b04232c6ecc74c7bfda1 | 5e0183e80718d5668b4b5b96631853942e344b64 | refs/heads/master | 2021-01-19T04:35:05.640785 | 2016-09-12T18:34:25 | 2016-09-12T18:34:25 | 49,612,767 | 1 | 3 | null | 2016-03-25T10:35:41 | 2016-01-14T01:02:18 | Python | UTF-8 | Python | false | false | 800 | py | # coding=utf-8
#
# This file is part of Hypothesis (https://github.com/DRMacIver/hypothesis)
#
# Most of this work is copyright (C) 2013-2015 David R. MacIver
# (david@drmaciver.com), but it contains contributions by others. See
# https://github.com/DRMacIver/hypothesis/blob/master/CONTRIBUTING.rst for a
# full list of people who may hold copyright, and consult the git log if you
# need to determine who owns an individual contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
#
# END HEADER
"""hypothesis.utils is a package for things that you can consider part of the
semi-public Hypothesis API but aren't really the core point.
"""
| [
"tim@readevalprint.com"
] | tim@readevalprint.com |
42a59f892a98e74a4c42441549d2d609eed08529 | a1b892c0f5f8c5aa2c67b555b8d1d4b7727a86a4 | /HTML/outage/test.py | feaec475507a72c66bfa111e20a713c1f88284c0 | [] | no_license | Vivekdjango/outage | 60f463ae5294d2b33544a19bda34cc2c22dd42c8 | 20cfbc07e6714f0c8c7e685ea389f1b8ef1bfd53 | refs/heads/master | 2021-01-20T04:18:40.023340 | 2017-04-28T06:46:26 | 2017-04-28T06:46:26 | 89,675,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | import re
with open('/var/www/html/outage/Vivektest.html','r+') as f:
s=f.read()
d=re.search(r'Downtime</b>:([0-9]+)',s)
j='Downtime</b>:90'
x=d.group()
s=re.sub(x,j,s)
f.seek(0)
f.truncate()
with open('/var/www/html/outage/Vivektest.html','a') as g:
g.write(s)
g.close()
| [
"viveksinha@IC0532-L0.corp.inmobi.com"
] | viveksinha@IC0532-L0.corp.inmobi.com |
97005734ec6b4c93adc78cd19c609fa7f49dac2c | f83ca7d939e8b7688445f336ceedcf871da3ed21 | /sysroot/usr/lib/python3/dist-packages/blinkt.py | 3dae99458928bbdd066f8966ee0d34197bd45404 | [] | no_license | FundamentalFrequency/juce-aarch64-linux-cross-compiler | 1c432d77b8bc7dd11692a9aef34408ed8baddf1f | 3fb7539e4459228231699efbe227e50638b620e4 | refs/heads/main | 2023-03-17T07:49:31.396127 | 2022-07-15T04:52:18 | 2022-07-15T04:52:18 | 496,806,629 | 13 | 4 | null | 2022-10-21T21:07:26 | 2022-05-26T23:57:36 | Python | UTF-8 | Python | false | false | 3,619 | py | import atexit
import time
import RPi.GPIO as GPIO
__version__ = '0.1.2'
DAT = 23
CLK = 24
NUM_PIXELS = 8
BRIGHTNESS = 7
pixels = [[0, 0, 0, BRIGHTNESS]] * NUM_PIXELS
_gpio_setup = False
_clear_on_exit = True
def _exit():
if _clear_on_exit:
clear()
show()
GPIO.cleanup()
def set_brightness(brightness):
"""Set the brightness of all pixels
:param brightness: Brightness: 0.0 to 1.0
"""
if brightness < 0 or brightness > 1:
raise ValueError("Brightness should be between 0.0 and 1.0")
for x in range(NUM_PIXELS):
pixels[x][3] = int(31.0 * brightness) & 0b11111
def clear():
"""Clear the pixel buffer"""
for x in range(NUM_PIXELS):
pixels[x][0:3] = [0, 0, 0]
def _write_byte(byte):
for x in range(8):
GPIO.output(DAT, byte & 0b10000000)
GPIO.output(CLK, 1)
time.sleep(0.0000005)
byte <<= 1
GPIO.output(CLK, 0)
time.sleep(0.0000005)
# Emit exactly enough clock pulses to latch the small dark die APA102s which are weird
# for some reason it takes 36 clocks, the other IC takes just 4 (number of pixels/2)
def _eof():
GPIO.output(DAT, 0)
for x in range(36):
GPIO.output(CLK, 1)
time.sleep(0.0000005)
GPIO.output(CLK, 0)
time.sleep(0.0000005)
def _sof():
GPIO.output(DAT, 0)
for x in range(32):
GPIO.output(CLK, 1)
time.sleep(0.0000005)
GPIO.output(CLK, 0)
time.sleep(0.0000005)
def show():
"""Output the buffer to Blinkt!"""
global _gpio_setup
if not _gpio_setup:
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(DAT, GPIO.OUT)
GPIO.setup(CLK, GPIO.OUT)
atexit.register(_exit)
_gpio_setup = True
_sof()
for pixel in pixels:
r, g, b, brightness = pixel
_write_byte(0b11100000 | brightness)
_write_byte(b)
_write_byte(g)
_write_byte(r)
_eof()
def set_all(r, g, b, brightness=None):
"""Set the RGB value and optionally brightness of all pixels
If you don't supply a brightness value, the last value set for each pixel be kept.
:param r: Amount of red: 0 to 255
:param g: Amount of green: 0 to 255
:param b: Amount of blue: 0 to 255
:param brightness: Brightness: 0.0 to 1.0 (default around 0.2)
"""
for x in range(NUM_PIXELS):
set_pixel(x, r, g, b, brightness)
def get_pixel(x):
"""Get the RGB and brightness value of a specific pixel"""
r, g, b, brightness = pixels[x]
brightness /= 31.0
return r, g, b, round(brightness, 3)
def set_pixel(x, r, g, b, brightness=None):
"""Set the RGB value, and optionally brightness, of a single pixel
If you don't supply a brightness value, the last value will be kept.
:param x: The horizontal position of the pixel: 0 to 7
:param r: Amount of red: 0 to 255
:param g: Amount of green: 0 to 255
:param b: Amount of blue: 0 to 255
:param brightness: Brightness: 0.0 to 1.0 (default around 0.2)
"""
if brightness is None:
brightness = pixels[x][3]
else:
brightness = int(31.0 * brightness) & 0b11111
pixels[x] = [int(r) & 0xff, int(g) & 0xff, int(b) & 0xff, brightness]
def set_clear_on_exit(value=True):
"""Set whether Blinkt! should be cleared upon exit
By default Blinkt! will turn off the pixels on exit, but calling::
blinkt.set_clear_on_exit(False)
Will ensure that it does not.
:param value: True or False (default True)
"""
global _clear_on_exit
_clear_on_exit = value
| [
"stonepreston@protonmail.com"
] | stonepreston@protonmail.com |
0aa45401d8b9d7ecc694f70ae64fad64be567afa | c00a2490947ad10582b5d675f070ccb62b70901d | /testing/vivaldi_testing_base.gypi | 17dedd737c47f8dbfc349f283963feba7c8c0596 | [
"BSD-3-Clause"
] | permissive | teotikalki/vivaldi-source | 543d0ab336fb5784eaae1904457598f95f426186 | 22a46f2c969f6a0b7ca239a05575d1ea2738768c | refs/heads/master | 2021-01-23T01:17:34.305328 | 2016-04-29T20:28:18 | 2016-04-29T20:28:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | gypi | {
'type': 'none',
'target_conditions': [
['OS=="win"', {
# Allow running and debugging browser_tests from the testing directory of the MSVS solution view
'product_name':'<(_target_name).exe',
}],
],
}
| [
"jason@theograys.com"
] | jason@theograys.com |
5057692bd4eb3599bc2347781c35514eda74b72f | bdda88c9a9141e9873f871dea6a197a3c413aad4 | /last/wsgi.py | 01e630a2b2f7ee2e8f1bb214003fbde4dfcffb09 | [] | no_license | thienkimlove/python_last | 83e890e30ef3e4dbd7e063b7f11c5ae2b65c9f84 | f6685ed71c30196f40b122b2aefc35271802d092 | refs/heads/master | 2022-12-15T21:57:24.128016 | 2018-08-20T10:57:42 | 2018-08-20T10:57:42 | 120,873,885 | 0 | 0 | null | 2022-12-08T00:54:39 | 2018-02-09T07:42:28 | JavaScript | UTF-8 | Python | false | false | 385 | py | """
WSGI config for last project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "last.settings")
application = get_wsgi_application()
| [
"quan.dm@teko.vn"
] | quan.dm@teko.vn |
376f29ecfebbf1df2ed02a33676c3f737a6e6b60 | 7022c58c3affc4a31cb261a44cb42ff07088e654 | /modify_video.py | 64aef4b9fc60e71aaf8fbed518e654834661b99e | [
"MIT"
] | permissive | reading-stiener/Audio-to-audio-alignment-research | 9298c47c139240b8b2a4b80bfeffa6db6d278c8a | 8ea2789a760e63b92a3a2f14236a87417236e533 | refs/heads/main | 2023-02-12T14:05:25.948830 | 2020-12-22T05:53:11 | 2020-12-22T05:53:11 | 303,211,401 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,329 | py | from moviepy.editor import VideoFileClip, concatenate_videoclips, vfx
from moviepy.video.fx.speedx import speedx
from pprint import pprint
import os
import json
def modify_video(folder, filename, mod_dict, audio_file=None):
with VideoFileClip(filename, audio=False) as videoclip:
cliplist = []
for changes in mod_dict:
ta, tb, rate = changes["original_start_time"], changes["original_start_time"]+changes["original_delta_time"], changes["rate"]
snippet = videoclip.subclip(ta, tb)
# applying speed effect and appending to clip list
cliplist.append(snippet.fx(speedx, rate))
modified_clip = concatenate_videoclips(cliplist)
inputfolder, inputfile = os.path.split(filename)
if audio_file:
modified_clip.write_videofile(os.path.join(folder, inputfile[:-4]+ "_"+ audio_file.split("_")[1]) + ".mp4")
else:
modified_clip.write_videofile(os.path.join(folder, inputfile))
if __name__ == "__main__":
filename = "/home/camel/Documents/Honors Thesis Research/Audio-to-audio-alignment-research/LSTM_dataset_4/violin/01_Jupiter_vn_vc/violin_1.mp4"
folder = "annotations"
with open("annotations/Jupiter_vn_vc.json") as f:
mod_dict = json.load(fp=f)
modify_video(folder, filename, mod_dict)
| [
"apradha1@conncoll.edu"
] | apradha1@conncoll.edu |
0e2abcb6c0c351b66ae5fb542aaaecce8c8f3fbf | b23b3a4cc7d4ebdf08d958af82128ba535b1402f | /Codeforces/T-primes.py | 8fc2cb522a317a237d883dfa154a807a34c8cb13 | [] | no_license | darshantak/Competitive-Programming | 8665cb1f837140275b6664464522ae942fb6ca50 | 1413d3cc9904b534178a5ac3e4dcd48733c9d26f | refs/heads/master | 2021-08-16T00:02:33.507276 | 2020-05-15T13:55:22 | 2020-05-15T13:55:22 | 180,850,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | import math
def SieveOfEratosthenes(n, prime,primesquare, a):
for i in range(2,n+1):
prime[i] = True
for i in range((n * n + 1)+1):
primesquare[i] = False
prime[1] = False
p = 2
while(p * p <= n):
if (prime[p] == True):
i = p * 2
while(i <= n):
prime[i] = False
i += p
p+=1
j = 0
for p in range(2,n+1):
if (prime[p]==True):
a[j] = p
primesquare[p * p] = True
j+=1
def countDivisors(n):
if (n == 1):
return 1
prime = [False]*(n + 2)
primesquare = [False]*(n * n + 2)
a = [0]*n
SieveOfEratosthenes(n, prime, primesquare, a)
ans = 1
i=0
while(1):
if(a[i] * a[i] * a[i] > n):
break
cnt = 1
while (n % a[i] == 0):
n = n / a[i]
cnt = cnt + 1
ans = ans * cnt
i+=1
n=int(n) dsd
if (prime[n]==True):
ans = ans * 2
elif (primesquare[n]==True):
ans = ans * 3
elif (n != 1):
ans = ans * 4
return ans
n=int(input())
x=list(map(int,input().split()))
for number in x:
temp=countDivisors(number)
if temp==3:
print("YES")
else:
print("NO")
| [
"30834020+darshantak@users.noreply.github.com"
] | 30834020+darshantak@users.noreply.github.com |
3754f58c4dc34461dbba2390774e8247149a0188 | 90c2619937019bb1145edfb2d9d6a7cdea460b57 | /src/538.py | 004d39d2e9e53a8ea9413a35ac09f724706a45d1 | [
"MIT"
] | permissive | zhaoyi3264/leetcode-solutions | 2d289a7e5c74cfe7f8b019c6056ce16485ae057b | 1a3a2d441cdd07a17e80b0ea43b7b266844f530c | refs/heads/main | 2023-06-03T11:35:25.054669 | 2021-06-28T02:58:07 | 2021-06-28T02:58:07 | 349,618,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
s = 0
def update(self, root):
if not root:
return
self.update(root.right)
self.s += root.val
root.val = self.s
self.update(root.left)
def convertBST(self, root: TreeNode) -> TreeNode:
self.update(root)
return root
| [
"zhaoyi3264@gmail.com"
] | zhaoyi3264@gmail.com |
c76ddadc6c35d5df98c2328ccbdfe66c0af64a20 | 8be967439ddf76eaad9e49fb9d8f18d832db5cf4 | /mmd/realtime.py | d2ada934da2d2003fb6abac9d7ea39210fb9fd41 | [
"BSD-3-Clause"
] | permissive | jjgoings/McMurchie-Davidson | b4acda0d0e49f96ec10bee4e8c58c0bf20d77f77 | 8c9d176204498655a358edf41698e59cf970a548 | refs/heads/master | 2023-02-07T21:13:50.285990 | 2023-02-05T04:32:59 | 2023-02-05T04:32:59 | 81,615,513 | 72 | 23 | BSD-3-Clause | 2023-02-05T04:33:00 | 2017-02-10T23:13:26 | Python | UTF-8 | Python | false | false | 7,265 | py | from __future__ import division
from __future__ import print_function
import numpy as np
from scipy.linalg import expm
class RealTime(object):
"""Class for real-time routines"""
def __init__(self,mol,numsteps=1000,stepsize=0.1,field=0.0001,pulse=None):
self.mol = mol
self.field = field
self.stepsize = stepsize
self.numSteps = numsteps
self.time = np.arange(0,self.numSteps)*self.stepsize
if pulse:
self.pulse = pulse
else:
# zero pulse envelope
self.pulse = lambda t: 0.0
self.reset()
def reset(self):
"""Reset all time-dependent property arrays to empty, will also
re-do the SCF in order to set the reference back to ground state.
This will likely need to be changed in the future.
"""
self.mol.RHF(doPrint=False)
self.dipole = []
self.angmom = []
self.Energy = []
self.shape = []
def Magnus2(self,direction='x'):
"""Propagate in time using the second order explicit Magnus.
See: Blanes, Sergio, and Fernando Casas. A concise introduction
to geometric numerical integration. Vol. 23. CRC Press, 2016.
Magnus2 is Eq (4.61), page 128.
"""
self.reset()
self.mol.orthoDen()
self.mol.orthoFock()
h = -1j*self.stepsize
for idx,time in enumerate((self.time)):
if direction.lower() == 'x':
self.mol.computeDipole()
self.dipole.append(np.real(self.mol.mu[0]))
elif direction.lower() == 'y':
self.mol.computeDipole()
self.dipole.append(np.real(self.mol.mu[1]))
elif direction.lower() == 'z':
self.mol.computeDipole()
self.dipole.append(np.real(self.mol.mu[2]))
# record pulse envelope for later plotting, etc.
self.shape.append(self.pulse(time))
curDen = np.copy(self.mol.PO)
self.addField(time + 0.0*self.stepsize,direction=direction)
k1 = h*self.mol.FO
U = expm(k1)
self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U)))
self.mol.updateFock()
self.addField(time + 1.0*self.stepsize,direction=direction)
L = 0.5*(k1 + h*self.mol.FO)
U = expm(L)
self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U)))
self.mol.updateFock()
# density and Fock are done updating, wrap things up
self.mol.unOrthoFock()
self.mol.unOrthoDen()
self.mol.computeEnergy()
self.Energy.append(np.real(self.mol.energy))
def Magnus4(self,direction='x'):
"""Propagate in time using the fourth order explicit Magnus.
See: Blanes, Sergio, and Fernando Casas. A concise introduction
to geometric numerical integration. Vol. 23. CRC Press, 2016.
Magnus4 is Eq (4.62), page 128.
"""
self.reset()
self.mol.orthoDen()
self.mol.orthoFock()
h = -1j*self.stepsize
for idx,time in enumerate((self.time)):
if direction.lower() == 'x':
self.mol.computeDipole()
self.dipole.append(np.real(self.mol.mu[0]))
elif direction.lower() == 'y':
self.mol.computeDipole()
self.dipole.append(np.real(self.mol.mu[1]))
elif direction.lower() == 'z':
self.mol.computeDipole()
self.dipole.append(np.real(self.mol.mu[2]))
# record pulse envelope for later plotting, etc.
self.shape.append(self.pulse(time))
curDen = np.copy(self.mol.PO)
self.addField(time + 0.0*self.stepsize,direction=direction)
k1 = h*self.mol.FO
Q1 = k1
U = expm(0.5*Q1)
self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U)))
self.mol.updateFock()
self.addField(time + 0.5*self.stepsize,direction=direction)
k2 = h*self.mol.FO
Q2 = k2 - k1
U = expm(0.5*Q1 + 0.25*Q2)
self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U)))
self.mol.updateFock()
self.addField(time + 0.5*self.stepsize,direction=direction)
k3 = h*self.mol.FO
Q3 = k3 - k2
U = expm(Q1 + Q2)
self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U)))
self.mol.updateFock()
self.addField(time + 1.0*self.stepsize,direction=direction)
k4 = h*self.mol.FO
Q4 = k4 - 2*k2 + k1
L = 0.5*Q1 + 0.25*Q2 + (1/3.)*Q3 - (1/24.)*Q4
L += -(1/48.)*self.mol.comm(Q1,Q2)
U = expm(L)
self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U)))
self.mol.updateFock()
self.addField(time + 0.5*self.stepsize,direction=direction)
k5 = h*self.mol.FO
Q5 = k5 - k2
L = Q1 + Q2 + (2/3.)*Q3 + (1/6.)*Q4 - (1/6.)*self.mol.comm(Q1,Q2)
U = expm(L)
self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U)))
self.mol.updateFock()
self.addField(time + 1.0*self.stepsize,direction=direction)
k6 = h*self.mol.FO
Q6 = k6 -2*k2 + k1
L = Q1 + Q2 + (2/3.)*Q5 + (1/6.)*Q6
L += -(1/6.)*self.mol.comm(Q1, (Q2 - Q3 + Q5 + 0.5*Q6))
U = expm(L)
self.mol.PO = np.dot(U,np.dot(curDen,self.mol.adj(U)))
self.mol.updateFock()
# density and Fock are done updating, wrap things up
self.mol.unOrthoFock()
self.mol.unOrthoDen()
self.mol.computeEnergy()
self.Energy.append(np.real(self.mol.energy))
def addField(self,time,direction='x'):
""" Add the electric dipole contribution to the Fock matrix,
and then orthogonalize the results. The envelope (shape) of
the interaction with the electric field (self.pulse) needs
to be set externally in a job, since the desired pulse is
specific to each type of realtime simulation.
self.pulse: function of time (t) that returns the envelope
amplitude at a given time.
Example:
def gaussian(t):
envelope = np.exp(-(t**2))
return envelope
rt = RealTime(molecule, pulse=gaussian, field=0.001)
The above example would set up a realtime simulations with
the external field to have the gaussian envelope defined above
scaled by field=0.001.
"""
shape = self.pulse(time)
if direction.lower() == 'x':
self.mol.F += self.field*shape*(self.mol.M[0])
elif direction.lower() == 'y':
self.mol.F += self.field*shape*(self.mol.M[1])
elif direction.lower() == 'z':
self.mol.F += self.field*shape*(self.mol.M[2])
self.mol.orthoFock()
| [
"jjgoings@gmail.com"
] | jjgoings@gmail.com |
a5da9ca40cc5a5566e5d13d5e2df167a59f0917c | 88b7c57a0d9a7a3b28ebd9d6c12ecbbebc50e8a5 | /beep/wechat_callback/routing.py | 45e3e73b84586df877f05a8f9c0b838cde257889 | [] | no_license | largerbigsuper/beep | 71438a4c2feae1afd6ecd25899e95f441bf2165b | a5d84437d79f065cec168f68210c4344a60d08d1 | refs/heads/master | 2022-09-23T02:09:37.117676 | 2020-01-03T06:21:57 | 2020-01-03T06:21:57 | 209,052,138 | 0 | 0 | null | 2022-09-13T23:03:25 | 2019-09-17T12:47:26 | Python | UTF-8 | Python | false | false | 315 | py | from django.urls import path
from . import consumers_wehub, consumers_live, consumers_wehub_task
websocket_urlpatterns = [
path('ws/wehub/', consumers_wehub.WehubConsumer),
path('ws/wehub_task/', consumers_wehub_task.WehubTaskConsumer),
path('ws/live/<str:room_name>/', consumers_live.LiveConsumer),
] | [
"zaihuazhao@163.com"
] | zaihuazhao@163.com |
57609feaa868e5d5d230added1c8394bdd894701 | ed06ef44c944707276a2fca16d61e7820596f51c | /Python/build-array-where-you-can-find-the-maximum-exactly-k-comparisons.py | 1e62106f0b746ff6dded6ba1d710508c12ac3a25 | [] | no_license | sm2774us/leetcode_interview_prep_2021 | 15842bef80637c6ff43542ed7988ec4b2d03e82c | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | refs/heads/master | 2023-05-29T14:14:49.074939 | 2021-06-12T19:52:07 | 2021-06-12T19:52:07 | 374,725,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | # Time: O(n * m * k)
# Space: O(m * k)
class Solution(object):
def numOfArrays(self, n, m, k):
"""
:type n: int
:type m: int
:type k: int
:rtype: int
"""
MOD = 10**9 + 7
# dp[l][i][j] = number of ways of constructing array length l with max element i at search cost j
dp = [[[0]*(k+1) for _ in range(m+1)] for _ in range(2)]
# prefix_dp[l][i][j] = sum(dp[l][i][j] for i in [1..i])
prefix_dp = [[[0]*(k+1) for _ in range(m+1)] for _ in range(2)]
for i in range(1, m+1):
dp[1][i][1] = 1
prefix_dp[1][i][1] = (prefix_dp[1][i-1][1] + dp[1][i][1])%MOD
for l in range(2, n+1):
for i in range(1, m+1):
for j in range(1, k+1):
dp[l%2][i][j] = (i*dp[(l-1)%2][i][j]%MOD + prefix_dp[(l-1)%2][i-1][j-1])%MOD
prefix_dp[l%2][i][j] = (prefix_dp[l%2][i-1][j] + dp[l%2][i][j])%MOD
return prefix_dp[n%2][m][k]
| [
"sm2774us@gmail.com"
] | sm2774us@gmail.com |
9c32666c1d023909998ab37c378f153017c92d8e | bdd40ea113fdf2f04ef7d61a096a575322928d1d | /Rupesh/DjangoTutorial/ecomarce/analytics/admin.py | ab78365d3c71a629a921a000a91bb6fc59872b46 | [] | no_license | rupesh7399/rupesh | 3eebf924d33790c29636ad59433e10444b74bc2f | 9b746acf37ab357c147cdada1de5458c5fc64f53 | refs/heads/master | 2020-12-22T05:01:29.176696 | 2020-03-03T10:32:36 | 2020-03-03T10:32:36 | 202,111,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | from django.contrib import admin
from .models import ObjectViewed, UserSession
admin.site.register(ObjectViewed)
admin.site.register(UserSession)
| [
"rupesh7399@gmail.com"
] | rupesh7399@gmail.com |
27c774ca6c97f31bc49ca5a0c1b98c71f35dbc89 | 00df54846f8ee079785e39844329cb764c52dcd4 | /message/views.py | bf0fc4d77a173b5e6b8958456a319309963758f5 | [] | no_license | idber/devops | 1d6dcae3f5bdd173b9f38985552d40bea191f0e0 | e0371c4ae7ae552489ea376ecdb72b8847fc41a8 | refs/heads/master | 2020-04-19T15:29:31.884030 | 2019-01-30T01:45:57 | 2019-01-30T01:45:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 938 | py | from django.shortcuts import render
from message.models import Message
from django.http import Http404
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required
def log_audit(request):
'''
审计日志
'''
if request.user.is_superuser:
logs = Message.objects.all()[:300]
if request.method == 'GET':
if 'aid' in request.GET:
aid = request.get_full_path().split('=')[1]
log_detail = Message.objects.filter(id=aid)
data = {
'log_detail': log_detail,
'page_name': '日志明细'
}
return render(request, 'message/log_audit_detail.html',data)
data = {
'all_logs':logs,
'page_name':'审计日志'
}
return render(request, 'message/log_audit.html', data)
else:
raise Http404
| [
"ntuwang@126.com"
] | ntuwang@126.com |
9995f2b5def6c1301c8524c51013d6babad47b8d | 457c673c8c8d704ec150322e4eeee2fde4f827ca | /Python Fundamentals - January 2020/Basic_Syntax_Conditional_Statements_and_Loops_Exercise/03_Leonardo_DICaprio_Oscars.py | 57f953fa07bfb812d762ec8c1cb6bd4b23ec85cf | [] | no_license | xMrShadyx/SoftUni | 13c08d56108bf8b1ff56d17bb2a4b804381e0d4e | ce4adcd6e8425134d138fd8f4b6101d4eb1c520b | refs/heads/master | 2023-08-02T03:10:16.205251 | 2021-06-20T05:52:15 | 2021-06-20T05:52:15 | 276,562,926 | 5 | 1 | null | 2021-09-22T19:35:25 | 2020-07-02T06:07:35 | Python | UTF-8 | Python | false | false | 294 | py | oscar = int(input())
if oscar == 88:
print('Leo finally won the Oscar! Leo is happy')
elif oscar == 86:
print(f'Not even for Wolf of Wall Street?!')
elif oscar < 88 and 86 or oscar <= 88:
print("When will you give Leo an Oscar?")
elif oscar > 88:
print("Leo got one already!")
| [
"daredevil91138@gmail.com"
] | daredevil91138@gmail.com |
ebbe6bda3d528d935983968f89a4a23146afb88b | 7b02a5580ff924dce93b85c8614ae5a1468cd15e | /experiment_brain_parcellation.py | 2dc8bc780bd44e4ad11bbf888998f913c2c10f2b | [] | no_license | dr-alok-tiwari/brain_segmentation | 96638aafd891718666e391dac10492b3d2b38e90 | 0b233a78cad1cff686637c90ef04abeef6d830d3 | refs/heads/master | 2023-08-09T15:24:58.312337 | 2015-01-11T17:07:43 | 2015-01-11T17:07:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,808 | py | __author__ = 'adeb'
from shutil import copy2
import inspect
import PIL
import pickle
from spynet.utils.utilities import analyse_classes
from data_brain_parcellation import DatasetBrainParcellation
from network_brain_parcellation import *
from spynet.models.network import *
from spynet.models.neuron_type import *
from spynet.data.dataset import *
from spynet.training.trainer import *
from spynet.training.monitor import *
from spynet.training.parameters_selector import *
from spynet.training.stopping_criterion import *
from spynet.training.cost_function import *
from spynet.training.learning_update import *
from spynet.experiment import Experiment
from spynet.utils.utilities import tile_raster_images
import theano
class ExperimentBrain(Experiment):
"""
Main experiment to train a network on a dataset
"""
def __init__(self, exp_name, data_path):
Experiment.__init__(self, exp_name, data_path)
def copy_file_virtual(self):
copy2(inspect.getfile(inspect.currentframe()), self.path)
def run(self):
###### Create the datasets
# aa = CostNegLLWeighted(np.array([0.9, 0.1]))
# e = theano.function(inputs=[], outputs=aa.test())
# print e()
## Load the data
training_data_path = self.data_path + "train.h5"
ds_training = DatasetBrainParcellation()
ds_training.read(training_data_path)
[ds_training, ds_validation] = ds_training.split_dataset_proportions([0.95, 0.05])
testing_data_path = self.data_path + "test.h5"
ds_testing = DatasetBrainParcellation()
ds_testing.read(testing_data_path)
## Display data sample
# image = PIL.Image.fromarray(tile_raster_images(X=ds_training.inputs[0:50],
# img_shape=(29, 29), tile_shape=(5, 10),
# tile_spacing=(1, 1)))
# image.save(self.path + "filters_corruption_30.png")
## Few stats about the targets
classes, proportion_class = analyse_classes(np.argmax(ds_training.outputs, axis=1), "Training data:")
print classes
## Scale some part of the data
print "Scaling"
s = Scaler([slice(-134, None, None)])
s.compute_parameters(ds_training.inputs)
s.scale(ds_training.inputs)
s.scale(ds_validation.inputs)
s.scale(ds_testing.inputs)
pickle.dump(s, open(self.path + "s.scaler", "wb"))
###### Create the network
net = NetworkUltimateConv()
net.init(33, 29, 5, 134, 135)
print net
###### Configure the trainer
# Cost function
cost_function = CostNegLL(net.ls_params)
# Learning update
learning_rate = 0.05
momentum = 0.5
lr_update = LearningUpdateGDMomentum(learning_rate, momentum)
# Create monitors and add them to the trainer
freq = 1
freq2 = 0.00001
# err_training = MonitorErrorRate(freq, "Train", ds_training)
# err_testing = MonitorErrorRate(freq, "Test", ds_testing)
err_validation = MonitorErrorRate(freq, "Val", ds_validation)
# dice_training = MonitorDiceCoefficient(freq, "Train", ds_training, 135)
dice_testing = MonitorDiceCoefficient(freq, "Test", ds_testing, 135)
# dice_validation = MonitorDiceCoefficient(freq, "Val", ds_validation, 135)
# Create stopping criteria and add them to the trainer
max_epoch = MaxEpoch(300)
early_stopping = EarlyStopping(err_validation, 10, 0.99, 5)
# Create the network selector
params_selector = ParamSelectorBestMonitoredValue(err_validation)
# Create the trainer object
batch_size = 200
t = Trainer(net, cost_function, params_selector, [max_epoch, early_stopping],
lr_update, ds_training, batch_size,
[err_validation, dice_testing])
###### Train the network
t.train()
###### Plot the records
# pred = np.argmax(t.net.predict(ds_testing.inputs, 10000), axis=1)
# d = compute_dice(pred, np.argmax(ds_testing.outputs, axis=1), 134)
# print "Dice test: {}".format(np.mean(d))
# print "Error rate test: {}".format(error_rate(np.argmax(ds_testing.outputs, axis=1), pred))
save_records_plot(self.path, [err_validation], "err", t.n_train_batches, "upper right")
# save_records_plot(self.path, [dice_testing], "dice", t.n_train_batches, "lower right")
###### Save the network
net.save_parameters(self.path + "net.net")
if __name__ == '__main__':
exp_name = "paper_ultimate_conv"
data_path = "./datasets/paper_ultimate_conv/"
exp = ExperimentBrain(exp_name, data_path)
exp.run() | [
"adbrebs@gmail.com"
] | adbrebs@gmail.com |
fa626b028d0775bee0feb369374052f9523ec263 | b44a984ac8cfd183e218d56e1ec5d0d3e72d20fd | /High_Frequency/BFS/Normal/Number of Big Islands/bfs.py | 7700361d8fde7e07bf4ccef36dd2c64d3a73a234 | [] | no_license | atomextranova/leetcode-python | 61381949f2e78805dfdd0fb221f8497b94b7f12b | 5fce59e6b9c4079b49e2cfb2a6d2a61a0d729c56 | refs/heads/master | 2021-07-15T20:32:12.592607 | 2020-09-21T00:10:27 | 2020-09-21T00:10:27 | 207,622,038 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,499 | py | import collections
DIRECTIONS = [(0, -1), (0, 1), (-1, 0), (1, 0)]
class Solution:
"""
@param grid: a 2d boolean array
@param k: an integer
@return: the number of Islands
"""
def numsofIsland(self, grid, k):
# Write your code here
if not grid or not grid[0]:
return 0
self.col_len = len(grid)
self.row_len = len(grid[0])
self.visited = set()
result = 0
for i in range(self.col_len):
for j in range(self.row_len):
if self.is_valid(grid, i, j):
island_size = self.bfs(grid, i, j)
if island_size >= k:
result += 1
return result
def bfs(self, grid, i, j):
deque = collections.deque([(i, j)])
self.visited.add((i, j))
size = 0
cur = []
while deque:
x, y = deque.popleft()
cur.append((x, y))
size += 1
for dx, dy in DIRECTIONS:
new_x, new_y = x + dx, y + dy
if self.is_valid(grid, new_x, new_y):
deque.append((new_x, new_y))
self.visited.add((new_x, new_y))
return size
def is_valid(self, grid, i, j):
return -1 < i < self.col_len and -1 < j < self.row_len and (
i, j) not in self.visited and grid[i][j] == 1
sol = Solution()
sol.numsofIsland([[1,1,0,1,0],[0,0,0,1,1],[1,1,0,1,0],[1,1,0,0,0],[0,0,0,0,1]], 5) | [
"atomextranova@gmail.com"
] | atomextranova@gmail.com |
63d0fc53f9cc76b62fdd92fc85ebfca1525ae1a9 | f07c7e3966de00005230ebe31ab0579b92b66872 | /math_utils/convolution.py | 6d414a7c9bd485bd3e9a29deee84ce7215587363 | [
"Apache-2.0"
] | permissive | Algomorph/LevelSetFusion-Python | 30d990228e3d63a40668ade58e7879ae6e581719 | 46625cd185da4413f9afaf201096203ee72d3803 | refs/heads/master | 2021-06-25T11:30:44.672555 | 2020-11-11T14:47:33 | 2020-11-11T14:47:33 | 152,263,399 | 12 | 2 | Apache-2.0 | 2019-05-30T23:12:33 | 2018-10-09T14:15:03 | Python | UTF-8 | Python | false | false | 7,026 | py | # ================================================================
# Created by Gregory Kramida on 9/18/18.
# Copyright (c) 2018 Gregory Kramida
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ================================================================
import numpy as np
from utils.sampling import get_focus_coordinates
from utils.printing import *
sobolev_kernel_1d = np.array([2.995900285895913839e-04,
4.410949535667896271e-03,
6.571318954229354858e-02,
9.956527948379516602e-01,
6.571318954229354858e-02,
4.410949535667896271e-03,
2.995900285895913839e-04])
def convolve_with_kernel_y(vector_field, kernel):
y_convolved = np.zeros_like(vector_field)
if len(vector_field.shape) == 3 and vector_field.shape[2] == 2:
for x in range(vector_field.shape[1]):
y_convolved[:, x, 0] = np.convolve(vector_field[:, x, 0], kernel, mode='same')
y_convolved[:, x, 1] = np.convolve(vector_field[:, x, 1], kernel, mode='same')
np.copyto(vector_field, y_convolved)
elif len(vector_field.shape) == 4 and vector_field.shape[3] == 3:
for z in range(vector_field.shape[2]):
for x in range(vector_field.shape[0]):
for i_val in range(3):
y_convolved[x, :, z, i_val] = np.convolve(vector_field[x, :, z, i_val], kernel, mode='same')
else:
raise ValueError("Can only process tensors with 3 dimensions (where last dimension is 2) or "
"tensors with 4 dimensions (where last dimension is 3), i.e. 2D & 3D vector fields")
return y_convolved
def convolve_with_kernel_x(vector_field, kernel):
x_convolved = np.zeros_like(vector_field)
if len(vector_field.shape) == 3 and vector_field.shape[2] == 2:
for y in range(vector_field.shape[0]):
x_convolved[y, :, 0] = np.convolve(vector_field[y, :, 0], kernel, mode='same')
x_convolved[y, :, 1] = np.convolve(vector_field[y, :, 1], kernel, mode='same')
elif len(vector_field.shape) == 4 and vector_field.shape[3] == 3:
for z in range(vector_field.shape[0]):
for y in range(vector_field.shape[1]):
for i_val in range(3):
x_convolved[z, y, :, i_val] = np.convolve(vector_field[z, y, :, i_val], kernel, mode='same')
else:
raise ValueError("Can only process tensors with 3 dimensions (where last dimension is 2) or "
"tensors with 4 dimensions (where last dimension is 3), i.e. 2D & 3D vector fields")
np.copyto(vector_field, x_convolved)
return x_convolved
def convolve_with_kernel_z(vector_field, kernel):
if len(vector_field.shape) != 4 or vector_field.shape[3] != 3:
raise ValueError("Can only process tensors with 4 dimensions (where last dimension is 3), i.e. 3D Vector field")
def convolve_with_kernel(vector_field, kernel=sobolev_kernel_1d, print_focus_coord_info=False):
x_convolved = np.zeros_like(vector_field)
y_convolved = np.zeros_like(vector_field)
z_convolved = None
if len(vector_field.shape) == 3 and vector_field.shape[2] == 2:
focus_coordinates = get_focus_coordinates()
for x in range(vector_field.shape[1]):
y_convolved[:, x, 0] = np.convolve(vector_field[:, x, 0], kernel, mode='same')
y_convolved[:, x, 1] = np.convolve(vector_field[:, x, 1], kernel, mode='same')
for y in range(vector_field.shape[0]):
x_convolved[y, :, 0] = np.convolve(y_convolved[y, :, 0], kernel, mode='same')
x_convolved[y, :, 1] = np.convolve(y_convolved[y, :, 1], kernel, mode='same')
if print_focus_coord_info:
new_gradient_at_focus = vector_field[focus_coordinates[1], focus_coordinates[0]]
print(
" H1 grad: {:s}[{:f} {:f}{:s}]".format(BOLD_GREEN, -new_gradient_at_focus[0], -new_gradient_at_focus[1],
RESET), sep='', end='')
np.copyto(vector_field, x_convolved)
elif len(vector_field.shape) == 4 and vector_field.shape[3] == 3:
z_convolved = np.zeros_like(vector_field)
for z in range(vector_field.shape[0]):
for y in range(vector_field.shape[1]):
for i_val in range(3):
x_convolved[z, y, :, i_val] = np.convolve(vector_field[z, y, :, i_val], kernel, mode='same')
for z in range(vector_field.shape[0]):
for x in range(vector_field.shape[2]):
for i_val in range(3):
y_convolved[z, :, x, i_val] = np.convolve(x_convolved[z, :, x, i_val], kernel, mode='same')
for y in range(vector_field.shape[1]):
for x in range(vector_field.shape[2]):
for i_val in range(3):
z_convolved[:, y, x, i_val] = np.convolve(y_convolved[:, y, x, i_val], kernel, mode='same')
np.copyto(vector_field, z_convolved)
else:
raise ValueError("Can only process tensors with 3 dimensions (where last dimension is 2) or "
"tensors with 4 dimensions (where last dimension is 3), i.e. 2D & 3D vector fields")
return vector_field
def convolve_with_kernel_preserve_zeros(vector_field, kernel=sobolev_kernel_1d, print_focus_coord_info=False):
x_convolved = np.zeros_like(vector_field)
y_convolved = np.zeros_like(vector_field)
focus_coordinates = get_focus_coordinates()
zero_check = np.abs(vector_field) < 1e-6
for x in range(vector_field.shape[1]):
y_convolved[:, x, 0] = np.convolve(vector_field[:, x, 0], kernel, mode='same')
y_convolved[:, x, 1] = np.convolve(vector_field[:, x, 1], kernel, mode='same')
y_convolved[zero_check] = 0.0
for y in range(vector_field.shape[0]):
x_convolved[y, :, 0] = np.convolve(y_convolved[y, :, 0], kernel, mode='same')
x_convolved[y, :, 1] = np.convolve(y_convolved[y, :, 1], kernel, mode='same')
x_convolved[zero_check] = 0.0
np.copyto(vector_field, x_convolved)
if print_focus_coord_info:
new_gradient_at_focus = vector_field[focus_coordinates[1], focus_coordinates[0]]
print(" H1 grad: {:s}[{:f} {:f}{:s}]".format(BOLD_GREEN, -new_gradient_at_focus[0], -new_gradient_at_focus[1],
RESET), sep='', end='')
return vector_field
| [
"algomorph@gmail.com"
] | algomorph@gmail.com |
7e4b361f79a43152d672caa1c83ae56bd44ff673 | 5a25edcf994a760688dc7c933e8071bf4ff24df3 | /exercises/en/exc_04_11_02.py | a9511ce0f53ab920097e6454be946cf6c9569440 | [
"CC-BY-NC-4.0",
"MIT"
] | permissive | heyMP/spacy-course | 8762990ed6179011680730d9c24d5d34c0a8d954 | 3740c717f0d1090b01c1b0fe23f8e30af3bf0101 | refs/heads/master | 2022-11-07T21:52:15.479840 | 2020-06-25T18:13:44 | 2020-06-25T18:13:44 | 275,202,487 | 1 | 0 | MIT | 2020-06-26T16:39:32 | 2020-06-26T16:39:31 | null | UTF-8 | Python | false | false | 419 | py | TRAINING_DATA = [
(
"Reddit partners with Patreon to help creators build communities",
{"entities": [(0, 6, "WEBSITE"), (21, 28, "WEBSITE")]},
),
("PewDiePie smashes YouTube record", {"entities": [____, (18, 25, "WEBSITE")]}),
(
"Reddit founder Alexis Ohanian gave away two Metallica tickets to fans",
{"entities": [(0, 6, "WEBSITE"), ____]},
),
# And so on...
]
| [
"ines@ines.io"
] | ines@ines.io |
e5536d37b56d755c780c4e6d352acd6422f843bd | 04a4d89bc7915e0624abf95651e5ad21d9ed6da2 | /base/src/cloudstrype/array.py | 8ecac8685d1afb3f2bf4bec40a582be19bf1dc0f | [] | no_license | btimby/cloudstrype-too | 5af8f8a4fecb60838093aafc6de44cab5bf5da7c | 6bc600de7d181c41a9d7e7cca557025c6aea16f2 | refs/heads/master | 2021-04-29T21:49:14.585978 | 2018-02-15T22:18:14 | 2018-02-15T22:18:14 | 121,624,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | from __future__ import absolute_import
import os
from .base import sync_wrapper
from .auth import AuthClient
ARRAY_URL = os.environ.get('CLOUDSTRYPE_ARRAY_URL', 'http://array/')
class ArrayClient(AuthClient):
base_url = ARRAY_URL
def __init__(self, *args, **kwargs, replicas=1):
super().__init__(*args, **kwargs)
self.replicas = replicas
async def put(self, chunk_id, params=None, replicas=None):
replicas = replicas or self.replicas
params = params or {}
params.setdefault('replicas', replicas)
return await self.request('POST', '/chunk/', params=params)
put_sync = sync_wrapper(put)
async def get(self, chunk_id, params=None):
return await self.request(
'GET', '/chunk/%s/' % chunk_id, params=params)
get_sync = sync_wrapper(get)
| [
"btimby@gmail.com"
] | btimby@gmail.com |
507a8251ec9f391f3544477ad1510d2654cb40f3 | 4c4c589f9047c60eb3d65d5a7fa86ded7c6c1d64 | /populators/create_all.py | 333b163df8a55cfe1deeb691c55c3af5c901374b | [] | no_license | navkal/el | 39a27e92283f922219cebffa3821806fe5cd8a5e | a4739dc33022fb1b4e9a6f71ef40c989896b08f5 | refs/heads/master | 2023-08-09T03:35:06.918760 | 2023-08-04T15:57:12 | 2023-08-04T15:57:12 | 237,359,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 932 | py | # Copyright 2019 Energize Andover. All rights reserved.
import argparse
import os
import sys
sys.path.append( '../util' )
import util
# Main program
if __name__ == '__main__':
parser = argparse.ArgumentParser( description='Create all Andover databases' )
parser.add_argument( '-d', dest='debug', action='store_true', help='Generate debug versions of databases?' )
args = parser.parse_args()
PYTHON = 'python '
print( '\n=======> Master' )
os.system( PYTHON + 'master.py -o ../db/master.sqlite' )
print( '\n=======> Publish' )
os.system( PYTHON + 'publish.py -i ../db/master.sqlite -o ../db' )
if args.debug:
print( '\n=======> Master Debug' )
os.system( PYTHON + 'master.py -o ../db/master_debug.sqlite -d' )
print( '\n=======> Publish Debug' )
os.system( PYTHON + 'publish.py -i ../db/master_debug.sqlite -o ../db -d' )
util.report_elapsed_time()
| [
"navkal@hotmail.com"
] | navkal@hotmail.com |
b0d8569db3fe5071021907875f85cbb80e012dd1 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03646/s658997973.py | aa705f075e79683fc7b7b695a6c8322e03dacc5a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | k = int(input())
n = 50
q, r = divmod(k, n)
A = [n-1]*n
for i in range(n):
A[i] += q
for i in range(r):
A[i] += n+1
for j in range(n):
A[j] -= 1
print(n)
print(*A)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4115eda73266412d21b877ad0115bc888ce614c4 | f9215dc23bf0ab8ef9730e78413e5844247d270b | /jarbas/core/tests/__init__.py | d5b296cc9396389d1a2b48c6d00078e0e120f390 | [
"MIT"
] | permissive | vitallan/jarbas | 6c873c98663e68e5d8c2192d0648c6870a01a4a1 | 2ffdccfec499c271bec13bf518c847d8d5210d94 | refs/heads/master | 2021-01-13T04:31:09.492131 | 2016-09-26T19:39:55 | 2016-09-26T21:27:52 | 69,503,820 | 1 | 0 | null | 2016-09-28T21:07:35 | 2016-09-28T21:07:35 | null | UTF-8 | Python | false | false | 1,606 | py | from datetime import date, datetime
sample_document_data = dict(
document_id=42,
congressperson_name='Roger That',
congressperson_id=1,
congressperson_document=2,
term=1970,
state='UF',
party='Partido',
term_id=3,
subquota_number=4,
subquota_description='Subquota description',
subquota_group_id=5,
subquota_group_description='Subquota group desc',
supplier='Acme',
cnpj_cpf='11111111111111',
document_number='6',
document_type=7,
issue_date='1970-01-01 00:00:00',
document_value=8.90,
remark_value=1.23,
net_value=4.56,
month=1,
year=1970,
installment=7,
passenger='John Doe',
leg_of_the_trip=8,
batch_number=9,
reimbursement_number=10,
reimbursement_value=11.12,
applicant_id=13
)
sample_activity_data = dict(
code='42',
description='So long, so long, and thanks for all the fish'
)
sample_supplier_data = dict(
cnpj='12.345.678/9012-34',
opening=date(1995, 9, 27),
legal_entity='42 - The answer to life, the universe, and everything',
trade_name="Don't panic",
name='Do not panic, sir',
type='BOOK',
status='OK',
situation='EXISTS',
situation_reason='Douglas Adams wrote it',
situation_date=date(2016, 9, 25),
special_situation='WE LOVE IT',
special_situation_date=date(1997, 9, 28),
responsible_federative_entity='Vogons',
address='Earth',
number='',
additional_address_details='',
neighborhood='',
zip_code='',
city='',
state='',
email='',
phone='',
last_updated=datetime.now(),
)
| [
"cuducos@gmail.com"
] | cuducos@gmail.com |
0ca467d38b50cb1b3fea439d181379282e495201 | e2cf46746537799f2584fa9bc3307c95f11768e3 | /flashsale/jimay/models/order.py | faa88c71549e4202226cdc4bc94d4af4f50fc0ef | [] | no_license | wahello/xiaolusys | 3c7801543d352a7a1b1825481982cea635ebcdd4 | 7296b9b68167001c91f4b07c1f8d441cc5653578 | refs/heads/master | 2020-03-30T03:39:26.485590 | 2018-08-21T06:23:05 | 2018-08-21T06:23:05 | 150,700,684 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,196 | py | # coding: utf8
from __future__ import absolute_import, unicode_literals
import datetime
import hashlib
from django.db import models, transaction
from django.dispatch import receiver
from django.utils.functional import cached_property
from signals.jimay import signal_jimay_agent_order_ensure, signal_jimay_agent_order_paid
from core.utils.unikey import uniqid
import logging
logger = logging.getLogger(__name__)
def gen_uuid_order_no():
return uniqid('%s%s' % (JimayAgentOrder.PREFIX_CODE, datetime.date.today().strftime('%y%m%d')))
class JimayAgentOrder(models.Model):
PREFIX_CODE = 'ad'
ST_CREATE = 0
ST_ENSURE = 1
ST_PAID = 2
ST_SEND = 3
ST_COMPLETED = 4
ST_CANCEL = 5
ST_CHOICES = (
(ST_CREATE, '已提交申请'),
(ST_ENSURE, '已确认订金'),
(ST_PAID, '已确认付款'),
(ST_SEND, '已打包出库'),
(ST_COMPLETED, '已签收完成'),
(ST_CANCEL, '已取消订货'),
)
UNSURE = 0
WEIXIN = 1
ALIPAY = 2
BANK = 3
CHANNEL_CHOICES = (
(UNSURE, '未知渠道'),
(WEIXIN, '个人微信'),
(ALIPAY, '个人支付宝'),
(BANK, '银行转账'),
)
buyer = models.ForeignKey('pay.Customer', verbose_name='原始用户')
order_no = models.CharField(max_length=24, default=gen_uuid_order_no, unique=True, verbose_name='订单编号')
title = models.CharField(max_length=64, blank=True, verbose_name='商品名称')
pic_path = models.CharField(max_length=256, blank=True, verbose_name='商品图片')
model_id = models.IntegerField(default=0, verbose_name='款式ID')
sku_id = models.IntegerField(default=0, verbose_name='SKUID')
num = models.IntegerField(default=0, verbose_name='数量')
total_fee = models.IntegerField(default=0, verbose_name='商品总价(分)', help_text='商品零售价')
payment = models.IntegerField(default=0, verbose_name='支付金额(分)', help_text='现默认由运营人员填写')
address = models.ForeignKey('pay.UserAddress', related_name='jimay_agent_manager', verbose_name='用户地址')
status = models.IntegerField(default=ST_CREATE, db_index=True, choices=ST_CHOICES, verbose_name='状态')
ensure_time = models.DateTimeField(blank=True, null=True, verbose_name='审核时间')
pay_time = models.DateTimeField(blank=True, null=True, verbose_name='付款时间')
channel = models.IntegerField(choices=CHANNEL_CHOICES, default=UNSURE, db_index=True ,verbose_name='支付渠道')
logistic = models.ForeignKey('logistics.LogisticsCompany', null=True, blank=True, verbose_name='物流公司')
logistic_no = models.CharField(max_length=32, blank=True, verbose_name='物流单号')
send_time = models.DateTimeField(blank=True, null=True, verbose_name='发货时间')
manager = models.ForeignKey('auth.user', blank=True, null=True, verbose_name='管理员')
sys_memo = models.CharField(max_length=512, blank=True, verbose_name='系统备注')
created = models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='创建日期')
modified = models.DateTimeField(auto_now=True, verbose_name='修改日期')
class Meta:
db_table = 'jimay_agentorder'
app_label = 'jimay'
verbose_name = '己美医学/订货记录'
verbose_name_plural = '己美医学/订货记录'
def __unicode__(self):
return '%s,%s' % (self.id, self.buyer)
@classmethod
def gen_unique_order_no(cls):
return gen_uuid_order_no()
@classmethod
def is_createable(cls, buyer):
return not cls.objects.filter(buyer=buyer,status=JimayAgentOrder.ST_CREATE).exists()
def save(self, *args, **kwargs):
if self.status == JimayAgentOrder.ST_ENSURE and not self.pay_time:
self.action_ensure(self.ensure_time or datetime.datetime.now())
if self.status in (JimayAgentOrder.ST_PAID, JimayAgentOrder.ST_SEND) and self.pay_time:
self.action_paid(self.pay_time or datetime.datetime.now())
resp = super(JimayAgentOrder, self).save(*args, **kwargs)
return resp
def is_cancelable(self):
return self.status == JimayAgentOrder.ST_CREATE
def set_status_canceled(self):
self.status = JimayAgentOrder.ST_CANCEL
def action_ensure(self, time_ensure):
""" 订单审核通过 """
transaction.on_commit(lambda: signal_jimay_agent_order_ensure.send_robust(
sender=JimayAgentOrder,
obj=self,
time_ensure=time_ensure
))
def action_paid(self, time_paid):
""" 订单支付通知 """
transaction.on_commit(lambda: signal_jimay_agent_order_paid.send_robust(
sender=JimayAgentOrder,
obj=self,
time_paid=time_paid
))
@receiver(signal_jimay_agent_order_ensure, sender=JimayAgentOrder)
def jimay_order_ensure_weixin_paynotify(sender, obj, time_ensure, **kwargs):
try:
from shopapp.weixin.models import WeiXinAccount
from ..tasks import task_weixin_asynchronous_send_payqrcode
from django.conf import settings
wx_account = WeiXinAccount.objects.get(app_id=settings.WX_JIMAY_APPID)
task_weixin_asynchronous_send_payqrcode.delay(
wx_account.account_id, obj.buyer.id,
'wxpub',
('您的订货单已审核通过, 需支付金额:¥%s元, 请长按识别二维码转账, '
+'转账时请备注: %s_的订货号_%s .(如果需要支付宝付款, 请点击菜单[己美医学]/[支付宝付款码])'
) % (obj.payment * 0.01, obj.buyer.mobile, obj.id)
)
except Exception, exc:
logger.error(str(exc), exc_info=True)
@receiver(signal_jimay_agent_order_paid, sender=JimayAgentOrder)
def jimay_order_paid_update_stat(sender, obj, time_paid, **kwargs):
try:
from .stat import JimayAgentStat
from .agent import JimayAgent
agent = JimayAgent.objects.filter(mobile=obj.buyer.mobile).first()
JimayAgentStat.calc_salenum_and_sales_by_agent(agent)
except Exception, exc:
logger.error(str(exc), exc_info=True)
| [
"xiuqing.mei@xiaolu.so"
] | xiuqing.mei@xiaolu.so |
c9b6c3a1c4baa68286a7c7502f3b5d1a66d14d49 | 94f4bb0f6e43b2eb2f1bdb284a580b76121fa9af | /1109.py | ba76a0936d7d3434232ab867509244588d344dbe | [] | no_license | huosan0123/leetcode-py | f1ec8226bae732369d4e1989b99ab0ba4b4061c4 | 22794e5e80f534c41ff81eb40072acaa1346a75c | refs/heads/master | 2021-01-25T11:48:17.365118 | 2019-09-12T15:45:34 | 2019-09-12T15:45:34 | 93,934,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | class Solution(object):
def corpFlightBookings(self, bookings, n):
"""
:type bookings: List[List[int]]
:type n: int
:rtype: List[int]
"""
if not bookings:
return []
ans = [0] * n
for b in bookings:
ans[b[0]-1] += b[2]
if b[1] < n:
ans[b[1]] -= b[2]
for i in range(1, n):
ans[i] += ans[i-1]
return ans
| [
"noreply@github.com"
] | huosan0123.noreply@github.com |
2f47ca3bbbcb658c413b278edb19c876e0151737 | d5be2d0dadbe7c89642eadae595b6fb739ba1f63 | /Some_python/Genome_Sequencing_Bioinformatics_II-master/18.LinearSpectrum.py | 29c66e8db5505ea9a5774efd8b3e5647cca2b6db | [] | no_license | TomaszSzyborski/Bioinformatics_Specialization | af738599713cebef9d9fdb0265ec473a125df0d1 | 2ceb9d8595904da7dd5f718e82b786e3993957a8 | refs/heads/master | 2020-12-02T19:47:54.635971 | 2017-07-16T20:31:40 | 2017-07-16T20:31:40 | 96,391,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,519 | py | '''
LinearSpectrum(Peptide, AminoAcid, AminoAcidMass)
PrefixMass(0) ? 0
for i ? 1 to |Peptide|
for j ? 1 to 20
if AminoAcid(j) = i-th amino acid in Peptide
PrefixMass(i) ? PrefixMass(i ? 1) + AminoAcidMass(j)
LinearSpectrum ? a list consisting of the single integer 0
for i ? 0 to |Peptide| ? 1
for j ? i + 1 to |Peptide|
add PrefixMass(j) ? PrefixMass(i) to LinearSpectrum
return sorted list LinearSpectrum
CODE CHALLENGE: Implement LinearSpectrum.
Input: An amino acid string Peptide.
Output: The linear spectrum of Peptide.
Sample Input:
NQEL
Sample Output:
0 113 114 128 129 242 242 257 370 371 484
https://github.com/AnnaUfliand/Bioinformatics/blob/1a38fc077eaef5cf176fecf97153ad7f78f3deab/HW5/TheoreticalSpectrumOfLinearPeptide.py
'''
masses = {'G': 57, 'A': 71, 'S': 87, 'P': 97, 'V': 99, 'T': 101, 'C': 103, 'I': 113, 'L': 113, 'N': 114, 'D': 115,
'K': 128, 'Q': 128, 'E': 129, 'M': 131, 'H': 137, 'F': 147, 'R': 156, 'Y': 163, 'W': 186}
def linearSpectrum(peptide):
prefixMass = [0]
for i in range(0, len(peptide) - 1):
prefixMass.append(prefixMass[i] + masses[peptide[i]])
prefixMass.append(prefixMass[-1] + masses[peptide[-1]])
spectrum = [0]
for i in range(len(peptide) + 1):
for j in range(i + 1, len(peptide) + 1):
spectrum.append(prefixMass[j] - prefixMass[i])
return sorted(spectrum)
peptide = 'VAQ'
print (*linearSpectrum(peptide), sep = ' ') | [
"tomasz.szyborski@gmail.com"
] | tomasz.szyborski@gmail.com |
6c9052cd8ec268fdb89ca4c446426047f5b2e64b | 78d35bb7876a3460d4398e1cb3554b06e36c720a | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/aio/operations/_express_route_ports_locations_operations.py | 2cc07c596adf32b9e88c46b7057b896344f9f125 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | catchsrinivas/azure-sdk-for-python | e35f59b60318a31b3c940a7a3a07b61b28118aa5 | 596227a7738a5342274486e30489239d539b11d1 | refs/heads/main | 2023-08-27T09:08:07.986249 | 2021-11-11T11:13:35 | 2021-11-11T11:13:35 | 427,045,896 | 0 | 0 | MIT | 2021-11-11T15:14:31 | 2021-11-11T15:14:31 | null | UTF-8 | Python | false | false | 7,901 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRoutePortsLocationsOperations:
"""ExpressRoutePortsLocationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ExpressRoutePortsLocationListResult"]:
"""Retrieves all ExpressRoutePort peering locations. Does not return available bandwidths for each
location. Available bandwidths can only be obtained when retrieving a specific peering
location.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRoutePortsLocationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.ExpressRoutePortsLocationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortsLocationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRoutePortsLocationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePortsLocations'} # type: ignore
async def get(
self,
location_name: str,
**kwargs: Any
) -> "_models.ExpressRoutePortsLocation":
"""Retrieves a single ExpressRoutePort peering location, including the list of available
bandwidths available at said peering location.
:param location_name: Name of the requested ExpressRoutePort peering location.
:type location_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRoutePortsLocation, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.ExpressRoutePortsLocation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortsLocation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'locationName': self._serialize.url("location_name", location_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRoutePortsLocation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePortsLocations/{locationName}'} # type: ignore
| [
"noreply@github.com"
] | catchsrinivas.noreply@github.com |
085d88ce79b4c09829e7c422a1acd0f34b144625 | 7e9248f3b79b2ea6698d873189de0c3422997033 | /backend/tasker_business/models.py | cce19087ab6df3c7583760945bc91ed79d08b103 | [] | no_license | crowdbotics-apps/rpa-19128 | cf2c05d0e5e82b9989b8b9694bd48802655e5b3f | 88e35e7b6249fa167bb3dea3730141cb277a8e09 | refs/heads/master | 2022-11-20T03:08:53.659668 | 2020-07-23T18:18:07 | 2020-07-23T18:18:07 | 282,024,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,558 | py | from django.conf import settings
from django.db import models
class Timeslot(models.Model):
"Generated Model"
date = models.DateField()
start_time = models.TimeField()
end_time = models.TimeField()
class TaskerAvailability(models.Model):
"Generated Model"
tasker = models.OneToOneField(
"task_profile.TaskerProfile",
on_delete=models.CASCADE,
related_name="taskeravailability_tasker",
)
timeslots = models.ManyToManyField(
"tasker_business.Timeslot", related_name="taskeravailability_timeslots",
)
class BusinessPhoto(models.Model):
"Generated Model"
tasker = models.ForeignKey(
"task_profile.TaskerProfile",
on_delete=models.CASCADE,
related_name="businessphoto_tasker",
)
photo = models.URLField()
description = models.TextField()
class TaskerSkill(models.Model):
"Generated Model"
tasker = models.ForeignKey(
"task_profile.TaskerProfile",
on_delete=models.CASCADE,
related_name="taskerskill_tasker",
)
name = models.CharField(max_length=255,)
rate = models.FloatField()
description = models.TextField()
category = models.ForeignKey(
"task_category.Category",
on_delete=models.CASCADE,
related_name="taskerskill_category",
)
subcategory = models.ForeignKey(
"task_category.Subcategory",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="taskerskill_subcategory",
)
# Create your models here.
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
b2873364981f6c89eed840b8be52ebb7fb2cd891 | 81f2d4aa3bfb216e04efec81c7f614603a8fd384 | /irekua/rest/utils/permissions.py | a579360cb99113d2a5632cf58fca0969b2adc678 | [] | no_license | CONABIO-audio/irekua | 44564020c342e8bd49a14707f206962869bc026d | 4531a6dbb8b0a0014567930a134bc4399c2c00d4 | refs/heads/master | 2022-12-10T09:43:05.866848 | 2019-10-17T16:18:21 | 2019-10-17T16:18:21 | 170,434,169 | 0 | 1 | null | 2022-12-08T01:44:04 | 2019-02-13T03:32:55 | Python | UTF-8 | Python | false | false | 1,680 | py | from rest_framework.permissions import BasePermission
from rest.permissions import IsAdmin
from rest.permissions import ReadOnly
class PermissionMapping(object):
DEFAULT_PERMISSION = IsAdmin | ReadOnly
def __init__(self, mapping=None, default=None):
if mapping is None:
mapping = {}
assert isinstance(mapping, dict)
self.permission_mapping = mapping
if default is None:
default = PermissionMapping.DEFAULT_PERMISSION
if not isinstance(default, (tuple, list)):
default = [default]
self.default_permission = default
def get_permissions(self, action):
try:
permissions = self.permission_mapping[action]
if not isinstance(permissions, (list, tuple)):
return [permissions]
return permissions
except KeyError:
return self.default_permission
def extend(self, additional_actions=None, **kwargs):
if additional_actions is None:
additional_actions = {}
extended_mapping = self.permission_mapping.copy()
extended_mapping.update(additional_actions)
for key in kwargs:
extended_mapping[key] = kwargs[key]
return PermissionMapping(extended_mapping)
class PermissionMappingMixin(object):
@property
def permission_mapping(self):
print(self.__name__)
raise NotImplementedError
def get_permissions(self):
if self.action is None:
return []
permission_classes = self.permission_mapping.get_permissions(self.action)
return [permission() for permission in permission_classes]
| [
"santiago.mbal@gmail.com"
] | santiago.mbal@gmail.com |
a900434d73f3b5d48843ddb0b816fc9d3d54df50 | 2a1b8a671aceda6bc446f8ce26400aa84fa444a6 | /Packs/OpenPhish/Integrations/OpenPhish_v2/test_data/api_raw.py | 734dfd9cc498a4640f5a22d562a7bb1755d31082 | [
"MIT"
] | permissive | demisto/content | 6d4722d46f0ff0beea2748e9f7de585bf91a78b4 | 890def5a0e0ae8d6eaa538148249ddbc851dbb6b | refs/heads/master | 2023-09-04T00:02:25.618032 | 2023-09-03T21:56:22 | 2023-09-03T21:56:22 | 60,525,392 | 1,023 | 1,921 | MIT | 2023-09-14T20:55:24 | 2016-06-06T12:17:02 | Python | UTF-8 | Python | false | false | 788 | py | RAW_DATA = 'https://cnannord.com/paypal/firebasecloud/83792/htmjrtfgdsaopjdnbhhdmmdgrhehnndnmmmbvvbnmn' \
'dmnbnnbbmnm/service/paypal\nhttp://payameghdir.ir/cxxc/Owa/\nhttps://fxsearchdesk.net/Client' \
'/tang/step4.html\nhttps://fxsearchdesk.net/Client/tang/step3.html\nhttps://fxsearchdesk.net/' \
'Client/tang/step2.html\nhttp://fxsearchdesk.net/Client/tang/step2.html\n' \
'http://fxsearchdesk.net/Client/tang/step3.html\nhttp://fxsearchdesk.net/Client/tang/step4.html\n' \
'https://fxsearchdesk.net/Client/tang\nhttp://fxsearchdesk.net/Client/tang/\n' \
'http://fxsearchdesk.net/Client/tang\nhttp://revisepayee.com/admin\n' \
'http://hmrc.resolutionfix.com/\nhttps://hmrc.resolutionfix.com/refund/details'
| [
"noreply@github.com"
] | demisto.noreply@github.com |
718fe8cfe0779e581eb25d464850b5df0c04d846 | f9f54c110fa422408e95deb077bbe594f8aec960 | /epikjjh/sort/sort.py | 63e3e33e2febd51074e1f7c02c8be30ff3a875ba | [
"MIT"
] | permissive | 15ers/Solve_Naively | 39f9dc0e96aef7d957dde33cd1353dd7671aeb9c | 23ee4a3aedbedb65b9040594b8c9c6d9cff77090 | refs/heads/master | 2021-07-07T23:56:10.231601 | 2020-09-10T08:55:44 | 2020-09-10T08:55:44 | 184,999,228 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,092 | py | def selection_sort(arr: list):
for i in range(len(arr)-1, 0, -1):
max_index = i
for j in range(i):
if max_elem < arr[j]:
max_index = j
arr[i], arr[max_index] = arr[max_index], arr[i]
def insertion_sort(arr: list):
for i in range(1, len(arr)):
for j in range(i, 0, -1):
if arr[j] < arr[j-1]:
arr[j], arr[j-1] = arr[j-1], arr[j]
def bubble_sort(arr: list):
for i in range(len(arr)-1, 0, -1):
for j in range(i):
if arr[j+1] < arr[j]:
arr[j], arr[j+1] = arr[j+1], arr[j]
def merge(arr: list, left: int, mid: int, right: int):
tmp = []
left_index = left
right_index = mid+1
while left_index <= mid and right_index <= right:
if arr[left_index] < arr[right_index]:
tmp.append(arr[left_index])
left_index += 1
else:
tmp.append(arr[right_index])
right_index += 1
if left_index > mid:
for idx in range(right_index, right+1):
tmp.append(arr[idx])
else:
for idx in range(left_index, mid+1):
tmp.append(arr[idx])
arr[left:right+1] = tmp[:]
def merge_sort(arr: list, left: int, right: int):
if left < right:
mid = (left+right) // 2
merge_sort(arr, left, mid)
merge_sort(arr, mid+1, right)
merge(arr, left, mid, right)
def quick_sort_outplace(arr: list, left: int, right: int):
if left < right:
pivot = arr[(left+right) // 2]
tmp = arr[left:right+1]
tmp_left = [elem for elem in tmp if elem < pivot]
tmp_equal = [elem for elem in tmp if elem == pivot]
tmp_right = [elem for elem in tmp if elem > pivot]
arr[left:right+1] = tmp_left + tmp_equal + tmp_right
quick_sort_outplace(arr, left, left+len(tmp_left)-1)
quick_sort_outplace(arr, right-len(tmp_right)+1, right)
def quick_sort_inplace(arr: list, left: int, right: int):
if left < right:
low = left
high = right
pivot = arr[(low+high)//2]
while low <= high:
while arr[low] < pivot:
low += 1
while arr[high] > pivot:
high -= 1
if low <= high:
arr[low], arr[high] = arr[high], arr[low]
low, high = low + 1, high - 1
quick_sort_inplace(arr, left, low-1)
quick_sort_inplace(arr, low, right)
def heapify(tree: list, idx: int, length: int):
max_idx = idx
left_idx = 2*idx + 1
right_idx = 2*idx + 2
if left_idx < length and tree[left_idx] > tree[max_idx]:
max_idx = left_idx
if right_idx < length and tree[right_idx] > tree[max_idx]:
max_idx = right_idx
if max_idx != idx:
tree[idx], tree[max_idx] = tree[max_idx], tree[idx]
heapify(tree, max_idx, length)
def heap_sort(arr: list):
n = len(arr)
for idx in range((n//2)-1, -1, -1):
heapify(arr, idx, n)
for i in range(n-1, 0, -1):
arr[0], arr[i] = arr[i], arr[0]
heapify(arr, 0, i) | [
"epikjjh@gmail.com"
] | epikjjh@gmail.com |
6f78f852e6aca7f1c649ae0695b2151e991cabc2 | 2bacd64bd2679bbcc19379947a7285e7ecba35c6 | /1-notebook-examples/keras-udemy-course/cnn_class2/use_pretrained_weights_resnet.py | 48c6bc23ebedeb15802fec8078bb5d46dd73b5ff | [
"MIT"
] | permissive | vicb1/deep-learning | cc6b6d50ae5083c89f22512663d06b777ff8d881 | 23d6ef672ef0b3d13cea6a99984bbc299d620a73 | refs/heads/master | 2022-12-12T15:56:55.565836 | 2020-03-06T01:55:55 | 2020-03-06T01:55:55 | 230,293,726 | 0 | 0 | MIT | 2022-12-08T05:27:43 | 2019-12-26T16:23:18 | Jupyter Notebook | UTF-8 | Python | false | false | 4,849 | py | # https://deeplearningcourses.com/c/advanced-computer-vision
# https://www.udemy.com/advanced-computer-vision
from __future__ import print_function, division
from builtins import range, input
# Note: you may need to update your version of future
# sudo pip install -U future
from keras.layers import Input, Lambda, Dense, Flatten
from keras.models import Model
from keras.applications.resnet50 import ResNet50, preprocess_input
# from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import confusion_matrix
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
# re-size all the images to this
IMAGE_SIZE = [224, 224] # feel free to change depending on dataset
# training config:
epochs = 16
batch_size = 32
# https://www.kaggle.com/paultimothymooney/blood-cells
train_path = '../large_files/blood_cell_images/TRAIN'
valid_path = '../large_files/blood_cell_images/TEST'
# https://www.kaggle.com/moltean/fruits
# train_path = '../large_files/fruits-360/Training'
# valid_path = '../large_files/fruits-360/Validation'
# train_path = '../large_files/fruits-360-small/Training'
# valid_path = '../large_files/fruits-360-small/Validation'
# useful for getting number of files
image_files = glob(train_path + '/*/*.jp*g')
valid_image_files = glob(valid_path + '/*/*.jp*g')
# useful for getting number of classes
folders = glob(train_path + '/*')
# look at an image for fun
plt.imshow(image.load_img(np.random.choice(image_files)))
plt.show()
# add preprocessing layer to the front of VGG
res = ResNet50(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False)
# don't train existing weights
for layer in res.layers:
layer.trainable = False
# our layers - you can add more if you want
x = Flatten()(res.output)
# x = Dense(1000, activation='relu')(x)
prediction = Dense(len(folders), activation='softmax')(x)
# create a model object
model = Model(inputs=res.input, outputs=prediction)
# view the structure of the model
model.summary()
# tell the model what cost and optimization method to use
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy']
)
# create an instance of ImageDataGenerator
gen = ImageDataGenerator(
rotation_range=20,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.1,
zoom_range=0.2,
horizontal_flip=True,
vertical_flip=True,
preprocessing_function=preprocess_input
)
# test generator to see how it works and some other useful things
# get label mapping for confusion matrix plot later
test_gen = gen.flow_from_directory(valid_path, target_size=IMAGE_SIZE)
print(test_gen.class_indices)
labels = [None] * len(test_gen.class_indices)
for k, v in test_gen.class_indices.items():
labels[v] = k
# should be a strangely colored image (due to VGG weights being BGR)
for x, y in test_gen:
print("min:", x[0].min(), "max:", x[0].max())
plt.title(labels[np.argmax(y[0])])
plt.imshow(x[0])
plt.show()
break
# create generators
train_generator = gen.flow_from_directory(
train_path,
target_size=IMAGE_SIZE,
shuffle=True,
batch_size=batch_size,
)
valid_generator = gen.flow_from_directory(
valid_path,
target_size=IMAGE_SIZE,
shuffle=True,
batch_size=batch_size,
)
# fit the model
r = model.fit_generator(
train_generator,
validation_data=valid_generator,
epochs=epochs,
steps_per_epoch=len(image_files) // batch_size,
validation_steps=len(valid_image_files) // batch_size,
)
def get_confusion_matrix(data_path, N):
# we need to see the data in the same order
# for both predictions and targets
print("Generating confusion matrix", N)
predictions = []
targets = []
i = 0
for x, y in gen.flow_from_directory(data_path, target_size=IMAGE_SIZE, shuffle=False, batch_size=batch_size * 2):
i += 1
if i % 50 == 0:
print(i)
p = model.predict(x)
p = np.argmax(p, axis=1)
y = np.argmax(y, axis=1)
predictions = np.concatenate((predictions, p))
targets = np.concatenate((targets, y))
if len(targets) >= N:
break
cm = confusion_matrix(targets, predictions)
return cm
cm = get_confusion_matrix(train_path, len(image_files))
print(cm)
valid_cm = get_confusion_matrix(valid_path, len(valid_image_files))
print(valid_cm)
# plot some data
# loss
plt.plot(r.history['loss'], label='train loss')
plt.plot(r.history['val_loss'], label='val loss')
plt.legend()
plt.show()
# accuracies
plt.plot(r.history['acc'], label='train acc')
plt.plot(r.history['val_acc'], label='val acc')
plt.legend()
plt.show()
from util import plot_confusion_matrix
plot_confusion_matrix(cm, labels, title='Train confusion matrix')
plot_confusion_matrix(valid_cm, labels, title='Validation confusion matrix') | [
"vbajenaru@gmail.com"
] | vbajenaru@gmail.com |
b46ff85b0961b21961ea9d2a07ee248a8ad4b92e | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_portico.py | 71356c0776ec5f684070f1cc1c5508df8c828aeb | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py |
#calss header
class _PORTICO():
def __init__(self,):
self.name = "PORTICO"
self.definitions = [u'a covered entrance to a building, usually a large and impressive building, that is supported by columns']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
23228fca858ae9f4969e14f3cfd351321ad5a08f | bc2ea53e1dbbda6818efae0e30c93498562f850a | /setup.py | 439d3a7315cf802b47906b6e1a22207c6dad985a | [
"BSD-2-Clause"
] | permissive | zmedico/gemato | fa47db3320824fca5ff5e1dfdd04391ff9c081e6 | 2a3c4354ba3c3515a86b81782ab30a34a14faea2 | refs/heads/master | 2021-10-23T13:16:10.822184 | 2019-03-16T07:51:59 | 2019-03-16T07:51:59 | 112,166,687 | 1 | 1 | null | 2017-11-27T08:09:08 | 2017-11-27T08:09:08 | null | UTF-8 | Python | false | false | 1,187 | py | #!/usr/bin/env python
# vim:fileencoding=utf-8
# (C) 2017-2018 Michał Górny <mgorny@gentoo.org>
# Licensed under the terms of 2-clause BSD license
from setuptools import setup
setup(
name='gemato',
version='14.0',
description='Gentoo Manifest Tool -- a stand-alone utility to verify and update Gentoo Manifest files',
author='Michał Górny',
author_email='mgorny@gentoo.org',
license='BSD',
url='http://github.com/mgorny/gemato',
extras_require={
'blake2': ['pyblake2;python_version<"3.6"'],
'bz2': ['bz2file;python_version<"3.0"'],
'lzma': ['backports.lzma;python_version<"3.0"'],
'sha3': ['pysha3;python_version<"3.6"'],
},
packages=['gemato'],
entry_points={
'console_scripts': [
'gemato=gemato.cli:setuptools_main',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Security :: Cryptography',
]
)
| [
"mgorny@gentoo.org"
] | mgorny@gentoo.org |
1a01c08c797dcb23bf43fe33e045c0e7ca633adf | dd59a809da984f59315110aa019eabfbbf1a547d | /submissions/AlexMiller/AlexMiller-stitch_and_average.py | 38ff80363396aa0910870fb9a96217aaaa6f5d93 | [] | no_license | tranhoangkhuongvn/dc-michelin-challenge | f679b55a0d595c56cbc3c82e8673fb1c49bdeb44 | a34e8183f0c04314ee433852d3567c6b88a3aee6 | refs/heads/master | 2020-04-18T10:05:50.661201 | 2016-10-15T20:01:49 | 2016-10-15T20:01:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | import pandas as pd
import pdb
import random
prefix = "/media/alex/HD/"
# prefix = "D:/"
prefix2 = "/media/alex/SSD/"
# prefix2 = "C:/"
#Read in the original DC dataset
df = pd.read_csv(prefix+"Documents/Data/Yelp/dc.csv",header=0,encoding="latin1")
#Keep only restaurant name, review date, price rating, average score, review count
df = df[["req.restaurant","date","price","avg.score","review.count"]]
#Read in the prediction vectors
pred = pd.read_csv(prefix+"git/word2vec.torch/prediction_vectors.csv",header=None)
pred.columns = ["star0","star1","star2","star3"]
#We never scrambled them, so we can keep them in the same order
dc = df.join(pred)
dc_means = dc.groupby('req.restaurant').mean()
dc_means['max'] = dc_means[['star0','star1','star2','star3']].idxmax(axis=1)
# Write csv
dc_means.to_csv(prefix2+"git/dc-michelin-challenge/submissions/AlexMiller/dc_predictions.csv")
| [
"alex.k.miller@gmail.com"
] | alex.k.miller@gmail.com |
e0b4271157ee723d721cd2ef40b941481e4d51da | f80ef3a3cf859b13e8af8433af549b6b1043bf6e | /pyobjc-framework-Quartz/Examples/Programming with Quartz/BasicDrawing/MyView.py | d13dea356b286df29b777944be4bb3413e728402 | [
"MIT"
] | permissive | ronaldoussoren/pyobjc | 29dc9ca0af838a56105a9ddd62fb38ec415f0b86 | 77b98382e52818690449111cd2e23cd469b53cf5 | refs/heads/master | 2023-09-01T05:15:21.814504 | 2023-06-13T20:00:17 | 2023-06-13T20:00:17 | 243,933,900 | 439 | 49 | null | 2023-06-25T02:49:07 | 2020-02-29T08:43:12 | Python | UTF-8 | Python | false | false | 4,743 | py | import AppDrawing
import Cocoa
import Quartz
import FrameworkTextDrawing
import FrameworkUtilities
import objc
import UIHandling
import PDFHandling
from objc import super
# XXX: Why are these global?
_drawingCommand = UIHandling.kHICommandSimpleRect
_pdfDocument = None
class MyView(Cocoa.NSView):
currentMenuItem = objc.IBOutlet()
def initWithFrame_(self, frameRect):
self = super().initWithFrame_(frameRect)
if self is None:
return None
global _pdfDocument
_pdfDocument = None
return self
if False:
def isFlipped(self):
return True
def drawRect_(self, rect):
context = Cocoa.NSGraphicsContext.currentContext().graphicsPort()
if _pdfDocument is None:
if _drawingCommand in (
UIHandling.kHICommandDrawNSString,
UIHandling.kHICommandDrawNSLayoutMgr,
UIHandling.kHICommandDrawCustomNSLayoutMgr,
):
if _drawingCommand == UIHandling.kHICommandDrawNSString:
FrameworkTextDrawing.drawNSStringWithAttributes()
elif _drawingCommand == UIHandling.kHICommandDrawNSLayoutMgr:
FrameworkTextDrawing.drawWithNSLayout()
else:
FrameworkTextDrawing.drawWithCustomNSLayout()
else:
AppDrawing.DispatchDrawing(context, _drawingCommand)
else:
mediaRect = Quartz.CGPDFDocumentGetMediaBox(_pdfDocument, 1)
mediaRect.origin.x = mediaRect.origin.y = 0
Quartz.CGContextDrawPDFDocument(context, mediaRect, _pdfDocument, 1)
@objc.IBAction
def setDrawCommand_(self, sender):
global _drawingCommand, _pdfDocument
newCommand = sender.tag()
if _drawingCommand != newCommand:
_drawingCommand = newCommand
# The view needs to be redisplayed since there is a new drawing command.
self.setNeedsDisplay_(True)
# Disable previous menu item.
if self.currentMenuItem is not None:
self.currentMenuItem.setState_(Cocoa.NSOffState)
# Update the current item.
self.currentMenuItem = sender
# Enable new menu item.
self.currentMenuItem.setState_(Cocoa.NSOnState)
# If we were showing a pasted document, let's get rid of it.
if _pdfDocument:
_pdfDocument = None
def currentPrintableCommand(self):
# The best representation for printing or exporting
# when the current command caches using a bitmap context
# or a layer is to not do any caching.
if _drawingCommand in (
UIHandling.kHICommandDrawOffScreenImage,
UIHandling.kHICommandDrawWithLayer,
):
return UIHandling.kHICommandDrawNoOffScreenImage
return _drawingCommand
def print_(self, sender):
global _drawingCommand
savedDrawingCommand = _drawingCommand
# Set the drawing command to be one that is printable.
_drawingCommand = self.currentPrintableCommand()
# Do the printing operation on the view.
Cocoa.NSPrintOperation.printOperationWithView_(self).runOperation()
# Restore that before the printing operation.
_drawingCommand = savedDrawingCommand
def acceptsFirstResponder(self):
return True
@objc.IBAction
def copy_(self, sender):
FrameworkUtilities.addPDFDataToPasteBoard(_drawingCommand)
@objc.IBAction
def paste_(self, sender):
global _pdfDocument
newPDFDocument = PDFHandling.createNewPDFRefFromPasteBoard()
if newPDFDocument is not None:
_pdfDocument = newPDFDocument
# The view needs to be redisplayed since there is
# a new PDF document.
self.setNeedsDisplay_(True)
# Return the number of pages available for printing. For this
# application it is always 1.
def knowsPageRange_(self, aRange):
return True, Cocoa.NSRange(1, 1)
# Return the drawing rectangle for a particular page number.
# For this application it is always the page width and height.
def rectForPage_(self, page):
pi = Cocoa.NSPrintOperation.currentOperation().printInfo()
# Calculate the page height in points.
paperSize = pi.paperSize()
return Cocoa.NSMakeRect(0, 0, paperSize.width, paperSize.height)
def validateMenuItem_(self, menuItem):
if menuItem.tag() == _drawingCommand:
self.currentMenuItem = menuItem
menuItem.setState_(True)
else:
menuItem.setState_(False)
return True
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
9a3ca5f038df2d91274264f5c5e0e84d04919a94 | 9745e5d8acae70bcdd7011cc1f81c65d3f5eed22 | /Interview Preparation Kit/Stacks and Queues /Min Max Riddle/solutions.py | fa525d9722e71608c301d368bff05d2bbd5b24a6 | [] | no_license | rinleit/hackerrank-solutions | 82d71b562d276ec846ab9a26b3e996c80172f51e | 519a714c5316892dce6bd056b14df5e222078109 | refs/heads/master | 2022-11-10T05:08:11.185284 | 2020-07-02T01:34:35 | 2020-07-02T01:34:35 | 254,403,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | #!/bin/python3
import os
import sys
# Complete the riddle function below.
def riddle(arr):
maxes=[float("-inf")]*len(arr)
for i in range(0,len(arr)):
# find biggest window for which i is a minimum
right=i
left=i
while right+1<len(arr) and arr[right+1]>=arr[i]:
right+=1
while left-1>=0 and arr[left-1]>=arr[i]:
left-=1
for j in range(right-left+1):
maxes[j]=max(maxes[j],arr[i])
return maxes
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
arr = list(map(int, input().rstrip().split()))
res = riddle(arr)
fptr.write(' '.join(map(str, res)))
fptr.write('\n')
fptr.close()
| [
"rinle.it@gmail.com"
] | rinle.it@gmail.com |
4b5e7e285b8e0c0129a0dff3c8dc00f64d5ef802 | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/paloaltonetworks/azure-mgmt-paloaltonetworksngfw/generated_samples/prefix_list_global_rulestack_get_maximum_set_gen.py | c8ef113d486bafe5a4bee4d324a8a7d46b6bcafb | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 1,652 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.paloaltonetworksngfw import PaloAltoNetworksNgfwMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-paloaltonetworksngfw
# USAGE
python prefix_list_global_rulestack_get_maximum_set_gen.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = PaloAltoNetworksNgfwMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="SUBSCRIPTION_ID",
)
response = client.prefix_list_global_rulestack.get(
global_rulestack_name="praval",
name="armid1",
)
print(response)
# x-ms-original-file: specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PrefixListGlobalRulestack_Get_MaximumSet_Gen.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | openapi-env-test.noreply@github.com |
4948b993f20d3fd92127a96ca2711c9fa8bf6de4 | 7dbcbec8cfd75576d1be86270899b6642e0e9d70 | /testhtmlserv.py | b84d1716321ed64ef1a70b2c87ecb3637afdf4c0 | [] | no_license | huangnauh/learnpython | b72f43572cd42b962c11688c65437cca0b081f4f | e25f582a7811aa63de2f533736921529b600bcc8 | refs/heads/master | 2021-01-22T23:25:30.370990 | 2015-03-23T13:51:06 | 2015-03-23T13:51:06 | 23,954,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,619 | py | #!E:/python34/python.exe
#coding= utf-8
from http.server import BaseHTTPRequestHandler #BaseHTTPRequestHandler类细分到处理每个协议的方法,这里是‘GET’方法的例子
import urllib.parse
class GetHandler(BaseHTTPRequestHandler):
def do_GET(self): #重写这个方法
parsed_path = urllib.parse.urlparse(self.path)
message_parts = [ #建立一个想要返回的列表
'CLIENT VALUES:', #客户端信息
'client_address=%s (%s)' % (self.client_address,
self.address_string()), #返回客户端的地址和端口
'command=%s' % self.command, #返回操作的命令,这里比然是'get'
'path=%s' % self.path, #返回请求的路径
'real path=%s' % parsed_path.path, #返回通过urlparse格式化的路径
'query=%s' % parsed_path.query, #返回urlparse格式化的查询语句的键值
'request_version=%s' % self.request_version, #返回请求的http协议版本号
'',
'SERVER VALUES:', #服务器段信息
'server_version=%s' % self.server_version, #返回服务器端http的信息
'sys_version=%s' % self.sys_version, #返回服务器端使用的python版本
'protocol_version=%s' % self.protocol_version, #返回服务器端使用的http协议版本
'',
'HEADERS RECEIVED:',
]
for name, value in sorted(self.headers.items()): #返回項添加头信息,包含用户的user-agent信息,主机信息等
message_parts.append('%s=%s' % (name, value.rstrip()))
message_parts.append('')
message = '\r\n'.join(message_parts)
self.send_response(200) #返回给客户端结果,这里的响应码是200 OK,并包含一些其他信息
self.end_headers() #结束头信息
self.wfile.write(message.encode('ascii')) #返回数据
return
def do_POST(self):
datas = self.rfile.read(int(self.headers['content-length']))
datas = urllib.parse.unquote(datas.decode('ascii'))
print(datas)
self.send_response(200)
self.end_headers()
self.wfile.write(b'yes posted')
if __name__ == '__main__':
from http.server import HTTPServer
server = HTTPServer(('localhost', 8080), GetHandler) #在本地8080端口上启用httpserver,使用自定义的GetHandler处理
print('Starting server, use <Ctrl-C> to stop')
server.serve_forever() #保存程序一直运行 | [
"huanglibo2010@gmail.com"
] | huanglibo2010@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.