blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
daaf6b89bb892f00604e3b114f689b37985fdad8 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_34/266.py | 85d39360d2c79a5749c5b6d690f381a6e01efb74 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,340 | py | #!/usr/bin/env python
import sys
input = sys.stdin.readline().strip().split(" ")
l = int(input[0])
d = int(input[1])
n = int(input[2])
words = []
for i in range(d):
words.append(sys.stdin.readline().strip())
for i in range(n):
thisWord = sys.stdin.readline().strip()
cursor = 0
possibilities = []
possibleLetters = []
for j in range(l):
oldPossibilities = possibilities
possibilities = []
possibleWords = []
for word in words:
possibleWords.append(word)
possibleLetters = ""
if thisWord[cursor] == "(":
cursor += 1
while thisWord[cursor] != ")":
possibleLetters += thisWord[cursor]
cursor += 1
cursor += 1
else:
possibleLetters = thisWord[cursor]
cursor += 1
if j == 0:
for letter in possibleLetters:
possibilities.append(letter)
else:
for possibility in oldPossibilities:
for letter in possibleLetters:
for word in possibleWords:
if word.startswith(possibility + letter):
possibilities.append(possibility + letter)
break
print "Case #%d: %d" % (i+1, len(possibilities))
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
765056fd072c634af1a8a3e3a250ab7e0b791954 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/jinja2-2.6/jinja2/testsuite/tests.py | f7631ea2f6a1f560fdf62f8f91314a3294196438 | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/jinja2-2.6/jinja2/testsuite/tests.py | [
"ron.y.kagan@gmail.com"
] | ron.y.kagan@gmail.com |
57a2d22ba7c99cd7640e45bd7fef33b988099485 | c97b9ae1bf06757ba61f90905e4d9b9dd6498700 | /venv/Lib/site-packages/skimage/transform/pyramids.py | 848756848f446a4d459ec29ee55afc41cb49b718 | [] | no_license | Rahulk1p/image-processor | f7ceee2e3f66d10b2889b937cdfd66a118df8b5d | 385f172f7444bdbf361901108552a54979318a2d | refs/heads/main | 2023-03-27T10:09:46.080935 | 2021-03-16T13:04:02 | 2021-03-16T13:04:02 | 348,115,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:ad3430ef2503a2317891465bf4bcd56b145ec8bf66b8db7ad65262fae38250f1
size 12022
| [
"rksc.k1p@gmail.com"
] | rksc.k1p@gmail.com |
153e231631f50fa7cb57eb17993ad10ce072a16e | 18f71f2e462cb76bb7969ca7b71e923d05f5f36c | /feincms3/__init__.py | e6e2e368d7bff37195ce4743c3d15ba863484950 | [
"BSD-2-Clause"
] | permissive | rorito/feincms3 | 34af890ac449fe06724217969df8ea762123b79e | fbaa8d4a3d2fde1505aedd6a36059ad4fe951dd7 | refs/heads/master | 2021-05-04T07:06:13.142248 | 2016-10-06T21:33:27 | 2016-10-06T21:33:27 | 70,556,414 | 0 | 0 | null | 2016-10-11T04:36:17 | 2016-10-11T04:36:17 | null | UTF-8 | Python | false | false | 63 | py | VERSION = (0, 11, 1)
__version__ = '.'.join(map(str, VERSION))
| [
"mk@feinheit.ch"
] | mk@feinheit.ch |
1014915b0c0eff23a236456f99303253b7dbfda4 | ea841f43100a56736651f3bf8493252babc46475 | /cl2.py | 379ed52fb56638f76d3137165920b296bf5c0c78 | [
"MIT"
] | permissive | kakoni/insulaudit | d28e627d0cf449fe09dd96ca12ba6b9d6fd0cea9 | 18fe0802bafe5764882ac4e65e472fdc840baa45 | refs/heads/master | 2020-12-25T15:51:27.497274 | 2011-09-15T02:19:10 | 2011-09-15T02:19:10 | 2,763,843 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,523 | py | #!/usr/bin/python
import user
import struct
import sys
import serial
import time
import logging
from pprint import pprint, pformat
import doctest
from insulaudit.core import Command
from insulaudit.clmm.usbstick import *
from insulaudit import lib
logging.basicConfig( stream=sys.stdout )
log = logging.getLogger( 'auditor' )
log.setLevel( logging.DEBUG )
log.info( 'hello world' )
io = logging.getLogger( 'auditor.io' )
io.setLevel( logging.DEBUG )
"""
######################
#
# ComLink2
# pseudocode analysis of critical procedures
# there is some implicit OO going on
#
execute(command):
usbcommand.execute(self)
############################
#
# USB(Pump) Command Stuff
#
packSerialNumber:
return makePackedBCD(serial)
"""
"""
######################
#
# Pump
#
# every command needs:
# code, retries, params, length, pages
initDevice:
# cmdPowerControl Command(93, "rf power on", 2)
# cmdPowerControl.params = [ 1, 1 ]
# cmdPowerControl.retries = 0
# cmdReadErrorStatus = Command(117, "read pump error status")
# cmdReadState = Command(131, "Read Pump State")
# cmdReadTempBasal = Command(152, "Read Temporary Basal")
initDevice2
iniDevice2:
detectActiveBolus = Command(76, "set temp basal rate (bolus detection only)", 3)
detectActiveBolus.params = [ 0, 0, 0 ]
detectActiveBolus.retries = 0
detectActiveBolus:
# cmdDetectBolus
shutDownPump
if suspended:
shutDownPump2()
cmdCancelSuspend()
# turn rf power off
# retries 0
cmdOff = Command(93, "rf power off", [ 0 ], 2)
cmdOff.execute
shutDownPump2:
Command(91, "keypad push (ack)", [ 2 ], 1).execute
time.sleep(.500)
Command(91, "keypad push (esc)", [ 1 ], 1).execute
time.sleep(.500)
getNAKDescription:
# pass
# 2 params
Command(code, descr)
# 5: code, descr, bytesPerRecord, maxRecords, maxRetries
return Command(code, descr, 64, 1, 0)
# 3 params
Command(code, descr, paramCount):
# 5
#
com = Command(code, descr, 0, 1, 11)
com.paramCount = paramCount
numblocks = paramCount / 64 + 1
# 4 params
Command(code, descr, params, tail)
# 5
com = Command(code, descr, 0, 1, 11)
com.params = params
#com.paramCount
# 5 params
Command(code, descr, bytesPerRecord, maxRecords, ??):
# likely decompile error
# 7
Command(code, descr, bytesPerRecord, maxRecords, 0, 0, paramCount)
dataOffset = 0
cmdLength = 2
# 7 params
Command(code, descr, bytesPerRecord, maxRecords, address, addressLength, arg8):
offset = 2
if addressLength == 1:
cmdLength = 2 + addressLength
else:
cmdLength = 2 + addressLength + 1
retries = 2
# 511
execute:
result = None
for i in xrange(maxRetries)
# reset bytes read
response = usb.execute(self)
# handle stack trace
if response: break
return result
"""
"""
"""
class Link( core.CommBuffer ):
class ID:
VENDOR = 0x0a21
PRODUCT = 0x8001
timeout = .100
def __init__( self, port, timeout=None ):
super(type(self), self).__init__(port, timeout)
def setTimeout(self, timeout):
self.serial.setTimeout(timeout)
def getTimeout(self):
return self.serial.getTimeout()
def initUSBComms(self):
self.initCommunicationsIO()
#self.initDevice()
def getSignalStrength(self):
result = self.readSignalStrength()
signal = result[0]
def readSignalStrength(self):
result = self.sendComLink2Command(6, 0)
# result[0] is signal strength
log.info('%r:readSignalStrength:%s' % (self, int(result[0])))
return result
def initCommunicationsIO(self):
# close/open serial
self.readProductInfo( )
self.readSignalStrength()
def endCommunicationsIO(self):
self.readSignalStrength()
self.readInterfaceStatistics()
# close port
self.close()
def readProductInfo(self):
result = self.sendComLink2Command(4)
# 1/0/255
log.info('readProductInfo:result')
freq = result[5]
info = self.decodeProductInfo(result)
log.info('product info: %s' % pformat(info))
# decodeInterface stats
def decodeProductInfo(self, data):
class F:
body = data
comm = USBProductInfo()
comm.reply = F()
comm.onACK()
return comm.info
def sendComLink2Command(self, msg, a2=0x00, a3=0x00):
# generally commands are 3 bytes, most often CMD, 0x00, 0x00
msg = bytearray([ msg, a2, a3 ])
io.info('sendComLink2Command:write')
self.write(msg)
return self.checkAck()
# throw local usb exception
def checkAck(self):
time.sleep(.100)
result = bytearray(self.read(64))
io.info('checkAck:read')
commStatus = result[0]
# usable response
assert commStatus == 1
status = result[1]
# status == 102 'f' NAK, look up NAK
if status == 85: # 'U'
log.info('ACK OK')
return result[3:]
assert False, "NAK!!"
def decodeIFaceStats(self, data):
class F:
body = data
comm = InterfaceStats()
comm.reply = F()
comm.onACK()
return comm.info
def readInterfaceStatistics(self):
# decode and log stats
result = self.sendComLink2Command(5, 0)
info = self.decodeIFaceStats(result)
log.info("read radio Interface Stats: %s" % pformat(info))
result = self.sendComLink2Command(5, 1)
info = self.decodeIFaceStats(result)
log.info("read stick Interface Stats: %s" % pformat(info))
#######################
#
#
#
def CRC8(data):
return lib.CRC8.compute(data)
################################
# Remote Stuff
#
class BaseCommand(object):
code = 0x00
descr = "(error)"
retries = 2
timeout = 3
params = [ ]
bytesPerRecord = 0
maxRecords = 0
effectTime = 1
def __init__(self, code, descr, *args):
self.code = code
self.descr = descr
self.params = [ ]
def format(self):
pass
def allocateRawData(self):
self.raw = self.bytesPerRecord * self.maxRecords
class Device(object):
def __init__(self, link):
self.link = link
def execute(self, command):
self.command = command
self.allocateRawData()
self.sendAndRead()
def sendAndRead(self):
self.sendDeviceCommand()
time.sleep(self.command.effectTime)
if self.expectedLength > 0:
# in original code, this modifies the length tested in the previous if
# statement
self.command.data = self.readDeviceData()
def sendDeviceCommand(self):
packet = self.buildTransmitPacket()
io.info('sendDeviceCommand:write:%r' % (self.command))
self.link.write(packet)
time.sleep(.500)
code = self.command.code
params = self.command.params
if code != 93 or params[0] != 0:
self.link.checkAck()
def allocateRawData(self):
self.command.allocateRawData()
self.expectedLength = self.command.bytesPerRecord * self.command.maxRecords
def readDeviceData(self):
self.eod = False
results = bytearray( )
while not self.eod:
data = self.readDeviceDataIO( )
results.extend(data)
return results
def readDeviceDataIO(self):
results = self.readData()
lb, hb = results[5] & 0x7F, results[6]
self.eod = (results[5] & 0x80) > 0
resLength = lib.BangInt((lb, hb))
assert resLength > 63, ("cmd low byte count:\n%s" % lib.hexdump(results))
data = results[13:13+resLength]
assert len(data) == resLength
crc = results[-1]
# crc check
log.info('readDeviceDataIO:msgCRC:%r:expectedCRC:%r:data:%r' % (crc, CRC8(data), data))
assert crc == CRC8(data)
return data
def readData(self):
bytesAvailable = self.getNumBytesAvailable()
packet = [12, 0, lib.HighByte(bytesAvailable), lib.LowByte(bytesAvailable)]
packet.append( CRC8(packet) )
response = self.writeAndRead(packet, bytesAvailable)
# assert response.length > 14
assert (int(response[0]) == 2), repr(response)
# response[1] != 0 # interface number !=0
# response[2] == 5 # timeout occurred
# response[2] == 2 # NAK
# response[2] # should be within 0..4
log.info("readData ACK")
return response
def writeAndRead(self, msg, length):
io.info("writeAndRead:")
self.link.write(bytearray(msg))
time.sleep(.300)
self.link.setTimeout(self.command.timeout)
return bytearray(self.link.read(length))
def getNumBytesAvailable(self):
result = self.readStatus( )
start = time.time()
i = 0
while result == 0 and time.time() - start < 1:
log.debug('%r:getNumBytesAvailable:attempt:%s' % (self, i))
result = self.readStatus( )
time.sleep(.100)
i += 1
log.info('getNumBytesAvailable:%s' % result)
return result
def readStatus(self):
result = self.link.sendComLink2Command(3)
commStatus = result[0] # 0 indicates success
assert commStatus == 0
status = result[2]
lb, hb = result[3], result[4]
bytesAvailable = lib.BangInt((lb, hb))
self.status = status
if (status & 0x1) > 0:
return bytesAvailable
return 0
def buildTransmitPacket(self):
return self.command.format( )
class PumpCommand(BaseCommand):
serial = '665455'
#serial = '206525'
params = [ ]
bytesPerRecord = 64
maxRecords = 1
retries = 2
__fields__ = ['maxRecords', 'code', 'descr',
'serial', 'bytesPerRecord', 'params']
def __init__(self, **kwds):
for k in self.__fields__:
value = kwds.get(k, getattr(self, k))
setattr(self, k, value)
def getData(self):
return self.data
def format(self):
params = self.params
code = self.code
maxRetries = self.retries
serial = list(bytearray(self.serial.decode('hex')))
paramsCount = len(params)
head = [ 1, 0, 167, 1 ]
# serial
packet = head + serial
# paramCount 2 bytes
packet.extend( [ (0x80 | lib.HighByte(paramsCount)),
lib.LowByte(paramsCount) ] )
# not sure what this byte means
button = 0
# special case command 93
if code == 93:
button = 85
packet.append(button)
packet.append(maxRetries)
# how many packets/frames/pages/flows will this take?
responseSize = self.calcRecordsRequired()
# really only 1 or 2?
pages = responseSize
if responseSize > 1:
pages = 2
packet.append(pages)
packet.append(0)
# command code goes here
packet.append(code)
packet.append(CRC8(packet))
packet.extend(params)
packet.append(CRC8(params))
io.info(packet)
return bytearray(packet)
def calcRecordsRequired(self):
length = self.bytesPerRecord * self.maxRecords
i = length / 64
j = length % 64
if j > 0:
return i + 1
return i
class PowerControl(PumpCommand):
"""
>>> PowerControl().format() == PowerControl._test_ok
True
"""
_test_ok = bytearray( [ 0x01, 0x00, 0xA7, 0x01, 0x66, 0x54, 0x55, 0x80,
0x02, 0x55, 0x00, 0x00, 0x00, 0x5D, 0xE6, 0x01,
0x0A, 0xA2 ] )
code = 93
descr = "RF Power On"
params = [ 0x01, 0x0A ]
retries = 0
maxRecords = 0
timeout = 17
effectTime = 17
class PowerControlOff(PowerControl):
params = [ 0x00, 0x0A ]
class ReadErrorStatus(PumpCommand):
"""
>>> ReadErrorStatus().format() == ReadErrorStatus._test_ok
True
"""
_test_ok = bytearray([ 0x01, 0x00, 0xA7, 0x01, 0x66, 0x54, 0x55, 0x80,
0x00, 0x00, 0x02, 0x01, 0x00, 0x75, 0xD7, 0x00 ])
code = 117
descr = "Read Error Status any current alarms set?"
params = [ ]
retries = 2
maxRecords = 1
class ReadPumpState(PumpCommand):
"""
>>> ReadPumpState().format() == ReadPumpState._test_ok
True
"""
_test_ok = bytearray([ 0x01, 0x00, 0xA7, 0x01, 0x66, 0x54, 0x55, 0x80,
0x00, 0x00, 0x02, 0x01, 0x00, 0x83, 0x2E, 0x00 ])
code = 131
descr = "Read Pump State"
params = [ ]
retries = 2
maxRecords = 1
class ReadPumpModel(PumpCommand):
"""
>>> ReadPumpModel().format() == ReadPumpModel._test_ok
True
"""
code = 141
descr = "Read Pump Model Number"
params = [ ]
retries = 2
maxRecords = 1
_test_ok = bytearray([ 0x01, 0x00, 0xA7, 0x01, 0x66, 0x54, 0x55, 0x80,
0x00, 0x00, 0x02, 0x01, 0x00, 0x8D, 0x5B, 0x00 ])
def getData(self):
data = self.data
length = data[0]
msg = data[1:1+length]
self.model = msg
return str(msg)
def initDevice(link):
device = Device(link)
comm = PowerControl()
device.execute(comm)
log.info('comm:%s:data:%s' % (comm, getattr(comm, 'data', None)))
comm = ReadErrorStatus()
device.execute(comm)
log.info('comm:%s:data:%s' % (comm, getattr(comm, 'data', None)))
comm = ReadPumpState()
device.execute(comm)
log.info('comm:%s:data:%s' % (comm, getattr(comm, 'data', None)))
return device
def do_commands(device):
comm = ReadPumpModel( )
device.execute(comm)
log.info('comm:%s:data:%s' % (comm, getattr(comm.getData( ), 'data', None)))
log.info('REMOTE PUMP MODEL NUMBER: %s' % comm.getData( ))
def shutdownDevice(device):
comm = PowerControlOff()
device.execute(comm)
log.info('comm:%s:data:%s' % (comm, getattr(comm, 'data', None)))
if __name__ == '__main__':
io.info("hello world")
doctest.testmod( )
port = None
try:
port = sys.argv[1]
except IndexError, e:
print "usage:\n%s /dev/ttyUSB0" % sys.argv[0]
sys.exit(1)
link = Link(port)
link.initUSBComms()
device = initDevice(link)
do_commands(device)
#shutdownDevice(device)
link.endCommunicationsIO()
#pprint( carelink( USBProductInfo( ) ).info )
#####
# EOF
| [
"bewest@gmail.com"
] | bewest@gmail.com |
ee93ff8ae4035ffd35c0d86834f7c75f28445031 | 057c525d6fbff928fc0cb0cd6b2930e9494b5d4b | /training-data/py/5-analyse.py | 1f5243bdb3b48c22ec0e2f0011149bf2dc22f64b | [] | no_license | uk-gov-mirror/ukwa.text-id | 0931742d1f2df3091ac52eee6160c177ea98180d | 5f3dcc6436bc46dedb375b37e3fd51c1c0d9b45b | refs/heads/master | 2022-02-26T15:32:15.901527 | 2019-11-19T16:36:06 | 2019-11-19T16:36:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | a_a = {}N
a a("函数日志") a a:N
a a a a:N
a = a.a('\a', '')N
a a a a_a.a():N
a_a[a] = a_a[a] + 0N
a:N
a_a[a] = 0N
N
a(a_a)N
N
a_a = a(a_a.a(), a=(a a: a[0]), a=Aa)N
a(a_a)N
a a a a_a:N
a(a[0], a[0])N
| [
"Andrew.Jackson@bl.uk"
] | Andrew.Jackson@bl.uk |
fc0ea89cfa6218a774d0008ceae8bfa9386c3c1e | ac4b9385b7ad2063ea51237fbd8d1b74baffd016 | /.history/google/s5_getparser_20210216045725.py | 7234e21dc2a118fb6550c9727f65001cad465605 | [] | no_license | preethanpa/ssoemprep | 76297ef21b1d4893f1ac2f307f60ec72fc3e7c6f | ce37127845253c768d01aeae85e5d0d1ade64516 | refs/heads/main | 2023-03-09T00:15:55.130818 | 2021-02-20T06:54:58 | 2021-02-20T06:54:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,072 | py | import shutil
from fonduer.parser.preprocessors import html_doc_preprocessor
from sqlalchemy import exc
import pdftotree
import re
from sen_parser_usable import *
from config import config
import json
import os
import posixpath
import http.server
import urllib.request, urllib.parse, urllib.error
import cgi
import shutil
import mimetypes
import re
from io import BytesIO
import json
import uuid
import sys
import logging
import errno
from os import walk
from fonduer.parser.models import Document, Sentence, Table
from fonduer.parser.preprocessors import HTMLDocPreprocessor
from fonduer.parser import Parser
from pprint import pprint
from fonduer import Meta, init_logging
from fonduer.candidates import CandidateExtractor
from fonduer.candidates import MentionNgrams
from fonduer.candidates import MentionExtractor
from fonduer.candidates.models import Mention
from fonduer.candidates.models import mention_subclass
from fonduer.candidates.models import candidate_subclass
from fonduer.candidates.matchers import RegexMatchSpan, DictionaryMatch, LambdaFunctionMatcher, Intersect, Union
from fonduer.features import Featurizer
import inspect
import matchers as matchers
from extract_html import *
PII_KEYLIST = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/model/pii-keylist.json'
PARALLEL = 4 # assuming a quad-core machine
# ATTRIBUTE = "ns8s_invoice_poc_stage"
# check that the databases mentioned below already exist
getdbref = __import__('s1_2_getdbref')
# Will return <module '1_2_getdbref' from '/home/dsie/Developer/sandbox/3ray/server/backend/python/kbc_process/1_2_getdbref.py'>
# pdf_path = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/documents/pdf/'
# docs_path = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/documents/html/'
# pdf_path = json.loads(sys.argv[1])['pdf_path']
# docs_path = json.loads(sys.argv[1])['html_path']
# job_id = json.loads(sys.argv[1])['job_id']
# exc_context = 'email_id'
# doc_context = 'mock'
# exc_context = json.loads(sys.argv[1])['context'] if len(sys.argv) > 0 and json.loads(sys.argv[1])['context'] is not None else None
# doc_context = json.loads(sys.argv[1])['doc_name'] if len(sys.argv) > 0 and json.loads(sys.argv[1])['doc_name'] is not None else None
# exc_context = 'phone_number'
pdf_path = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/drive_documents/efca2facee5f8df9/pdf/'
docs_path = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/drive_documents/efca2facee5f8df9/html/'
job_id = 'efca2facee5f8df9'
exc_context = None
doc_context = None
# Configure logging for Fonduer
init_logging(log_dir="logs", level=logging.ERROR)
max_docs = 1000
PARALLEL = 4
doc_preprocessor = None
execution_stack = ["1. Get session object..."]
try:
session = getdbref.get_session()
sessType = type(session) # Will return <class 'sqlalchemy.orm.session.Session'>
execution_stack.append("Done.")
execution_stack.append("2. Processing layout...")
except Exception as session_exception:
logging.error(f'{execution_stack}, session = getdbref.get_session(), {session_exception}')
except exc.SQLAlchemyError as sql_exception:
logging.error(f'{execution_stack}, session = getdbref.get_session(), {sql_exception}')
def do_prepare_mentions_batch(candidate_mentions, config):
# for index, data in enumerate(config):
for index, data in config.items():
mention_subclass_list = list()
max_ngrams = None
for key in data.keys():
if key == 'Candidates':
for c in data.get(key):
# if c not in candidate_mentions.keys(): #TODO verify this condition
# candidate_mentions[c] = {
# "mention_names": [],
# "mention_ngrams": [],
# "mention_matchers": [],
# "mention_subclass": [],
# "max_ngrams": [],
# "throttler_function": []
# }
candidate_mentions[c]['mention_names'].append(data['MentionName'])
candidate_mentions[c]['mention_ngrams'].append(data['MentionNGrams'])
candidate_mentions[c]['mention_matchers'].append(matchers.matcher[data.get('Context')])
if 'mention_subclass' in candidate_mentions[c].keys():
candidate_mentions[c]['mention_subclass'].append(mention_subclass(data['MentionName']))
else:
candidate_mentions[c]['mention_subclass'] = [mention_subclass(data['MentionName'])]
if 'max_ngrams' in candidate_mentions[c].keys():
candidate_mentions[c]['max_ngrams'].append(MentionNgrams(n_max=candidate_mentions[c].get('mention_ngrams')))
else:
candidate_mentions[c]['max_ngrams'] = [MentionNgrams(n_max=candidate_mentions[c].get('mention_ngrams'))]
# candidate_mentions[c]['throttler_function'] = data.get('ThrottlerFunctions')[0].get('tf')
candidate_mentions[c]['throttler_function'] = [{data.get('ThrottlerFunctions')[0].get('tf')}]
return candidate_mentions
def do_prepare_mentions(candidate_mentions, config, context):
mention_subclass_list = list()
max_ngrams = None
ctx = {
"mention_names": [],
"mention_ngrams": [],
"mention_matchers": [],
"mention_subclass": [],
"max_ngrams": [],
"throttler_function": None
}
ctx['mention_names'].append(config[context].get('MentionName'))
ctx['mention_ngrams'].append(config[context]['MentionNGrams'])
ctx['mention_matchers'].append(matchers.matcher[config[context].get('Context')])
ctx['mention_subclass'].append(mention_subclass(config[context]['MentionName']))
ctx['max_ngrams'].append(MentionNgrams(n_max=config[context].get('MaxNGrams')))
ctx['throttler_function'] = config[context].get('ThrottlerFunctions')[0].get('tf')
candidate_mentions[context] = ctx
return candidate_mentions
def do_train(candidate_mentions):
from sqlalchemy import desc
docs = session.query(Document).order_by(Document.name).all()
# docs = session.query(Document).order_by(desc(Document.id)).limit(1)
total_mentions = session.query(Mention).count()
splits = (1, 0.0, 0.0)
train_cands = []
for candidate_key in candidate_mentions.keys():
train_docs = set()
dev_docs = set()
test_docs = set()
'''print('Mention Subclass {}, Ngrams {} and Matchers {}'
.format(candidate_mentions[candidate_key]['mention_subclass'],
candidate_mentions[candidate_key]['max_ngrams'],
candidate_mentions[candidate_key]['mention_matchers']))
'''
mention_extractor = MentionExtractor(session, candidate_mentions[candidate_key]['mention_subclass'], candidate_mentions[candidate_key]['max_ngrams'], candidate_mentions[candidate_key]['mention_matchers'])
mention_extractor.apply(docs, clear=False, parallelism=PARALLEL, progress_bar=False)
# mention_extractor.apply(docs)
candidate_mentions[candidate_key]['candidate_subclass'] = candidate_subclass(candidate_key, candidate_mentions[candidate_key].get('mention_subclass'), table_name=candidate_mentions[candidate_key]['mention_names'][0])
candidate_extractor = CandidateExtractor(session, [candidate_mentions[candidate_key]['candidate_subclass']], throttlers=[candidate_mentions[candidate_key]['throttler_function']])
data = [(doc.name, doc) for doc in docs]
data.sort(key=lambda x: x[0])
for i, (doc_name, doc) in enumerate(data):
train_docs.add(doc)
for i, docs in enumerate([train_docs, dev_docs, test_docs]):
candidate_extractor.apply(docs, split=i, parallelism=PARALLEL)
train_cands = candidate_extractor.get_candidates(split = 0)
train_cands.append(candidate_extractor.get_candidates(split = 0))
candidate_mentions[candidate_key]['train_cands'] = candidate_extractor.get_candidates(split = 0)
for index, item in enumerate(candidate_mentions[candidate_key]['train_cands']):
if len(item) > 0:
featurizer = Featurizer(session, [candidate_mentions[candidate_key]['candidate_subclass']])
featurizer.apply(split=0, train=True, parallelism=PARALLEL)
F_train = featurizer.get_feature_matrices(candidate_mentions[candidate_key]['train_cands'])
# %time featurizer.apply(split=0, train=True, parallelism=PARALLEL)
# %time F_train = featurizer.get_feature_matrices(candidate_mentions[candidate_key]['train_cands'])
else:
candidate_mentions[candidate_key]['train_cands'].pop(index)
# candidate[candidate_key]['train_cands'] = train_cands
return candidate_mentions
def do_process_get_candidates(candidate_mentions=None):
train_cands = do_train(candidate_mentions)
return train_cands
def handle_return(generator, func):
contextInfoDict = yield from generator
func(contextInfoDict)
def get_context_async(sm, document_context='', search_context=''):
pass
# star_char_index = sm.char_start
# end_char_index = sm.char_end
# star_char_index = sm['applicant_name_context'].char_start
# end_char_index = sm['applicant_name_context'].char_end
# contextInfoDictionary = {
# 'label': {
# # 'spanMention': sm['applicant_name_context'],
# 'document': sm[search_context].sentence.document.name,
# 'documentId': sm[search_context].sentence.document.id,
# 'sentence': sm[search_context].sentence.text,
# 'contextValue': sm[search_context].sentence.text[star_char_index:end_char_index+1],
# 'startChar': star_char_index,
# 'endChar': end_char_index
# },
# 'value': {
# # 'spanMention': sm['applicant_name_context'],
# 'document': sm[search_context].sentence.document.name,
# 'documentId': sm[search_context].sentence.document.id,
# 'sentence': sm[search_context].sentence.text,
# 'contextValue': sm[search_context].sentence.text[star_char_index:end_char_index+1],
# 'startChar': star_char_index,
# 'endChar': end_char_index
# }
# }
# yield contextInfoDictionary
def print_values(value):
print('returned: {}'.format(json.dumps(value)))
def do_get_docs_values(candidates=None, document_context=None, search_context=None):
'''
"<class 'fonduer.parser.models.document.Document'>"
"<class 'fonduer.parser.models.section.Section'>"
"<class 'fonduer.parser.models.sentence.Sentence'>"
"<class 'fonduer.candidates.models.span_mention.SpanMention'>"
"<class 'fonduer.candidates.models.mention.ApplicationNameLabel'>"
'''
train_cands = None
docs_and_values = []
all_docs_and_values = []
# print(document_context, search_context)
search_types = ['all_docs_and_pii', 'all_doc_and_'+search_context, 'all_pii_for_'+document_context, search_context+'_for_'+document_context]
search_type = ''
if document_context == None and search_context == None:
'''Entire KB'''
search_type = search_types[0]
elif document_context == None and search_context is not None:
''' Send entire KB '''
search_type = search_types[1]
elif document_context is not None and search_context == None:
''' Send KB for document'''
search_type = search_types[2]
else:
''' Send KB for match in Doc'''
search_type = search_types[3]
for index, item in enumerate(candidates):
train_cands = candidates.get(item).get('train_cands')
if train_cands is not None:
for instances in train_cands:
for candidate in instances:
for key, value in enumerate(candidate):
# all_docs_and_values.append({
docs_and_values.append({
"documentName": value.context.sentence.document.name,
"page": value.context.sentence.page,
"piiFound": value.context.sentence.text
})
for item in all_docs_and_values:
if search_type == 0:
docs_and_values.append(item)
elif search_type == 1:
'''
search_context is already filtered, hence do not filter any document
'''
docs_and_values.append(item)
elif search_type == 2:
'''
only filter document name
'''
docs_and_values.append(item) if item.get("documentName") in document_context else None
else:
'''
search_type is 3
search_context is already filtered, hence only filter document_name
'''
docs_and_values.append(item) if item.get("documentName") in document_context else None
# logging.info(f'docs_and_values: {docs_and_values}')
return docs_and_values
def train_and_test_experiment(document_context=None, context_label='', user=0, pdf_path=''):
'''
context_value:
context_label:
user:
pdf_path:
'''
candidate_mentions = do_prepare_mentions({}, config, context_label)
candidates = do_process_get_candidates(candidate_mentions)
results = []
if candidates is not None:
span_mention = None
span_mention_list = do_get_docs_values(candidates, document_context, context_label)
if len(span_mention_list) > 0:
span_mention = span_mention_list[0]
returned_contexts = handle_return(get_context_async(span_mention, document_context, context_label), print_values)
for x in returned_contexts:
results.append(x)
else:
# TODO
pass
return results
def train_and_test(document_context=None, context_label='', user=0, pdf_path=''):
'''
context_value:
context_label:
user:
pdf_path:
'''
candidate_mentions = do_prepare_mentions({}, config, context_label)
# candidate_mentions = do_prepare_mentions_batch({}, config)
candidates = do_process_get_candidates(candidate_mentions)
results = []
if candidates is not None:
results = do_get_docs_values(candidates, document_context, context_label)
return results
_, _, filenames = next(walk(pdf_path))
exc_context_list = config.keys()
combined_results = []
for fn in filenames:
fn = fn.split('.')[0]
for ec in exc_context_list:
combined_results.append(train_and_test(document_context=fn, context_label=ec))
print(json.dumps({"result": train_and_test(document_context=fn, context_label=ec), "job_id": job_id })) | [
"{abhi@third-ray.com}"
] | {abhi@third-ray.com} |
60f032df862af5ff958b8f9bfa750e02502b8da6 | 20a9787564f76ae0fcf2332a8655b21bae0646a3 | /GrokkingCodingInterview/Trees_BFS/level_order_traversal_reversed.py | 94a73523f8bfb2e99f8125589aa943d91a719918 | [] | no_license | nidhiatwork/Python_Coding_Practice | 3b33a40c947413c2695d3ee77728fa69430f14cd | 9d5071a8ddcda19181d3db029fb801d4e3233382 | refs/heads/master | 2023-02-08T20:50:47.522565 | 2023-02-04T10:04:10 | 2023-02-04T10:04:10 | 194,607,759 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,122 | py | '''
Given a binary tree, populate an array to represent its level-by-level traversal in reverse order, i.e., the lowest level comes first. You should populate the values of all nodes in each level from left to right in separate sub-arrays.
'''
from collections import deque
class TreeNode(object):
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
def level_order_traversal_reversed(root):
result = deque()
if not root:
return result
queue = deque()
queue.append(root)
while queue:
levelSize = len(queue)
currentLevel = []
for _ in range(levelSize):
currentNode = queue.popleft()
currentLevel.append(currentNode.val)
if currentNode.left:
queue.append(currentNode.left)
if currentNode.right:
queue.append(currentNode.right)
result.appendleft(currentLevel)
return result
root = TreeNode(1, TreeNode(2, TreeNode(4, TreeNode(8), TreeNode(9)), TreeNode(5, TreeNode(10), TreeNode(11))), TreeNode(3, TreeNode(6, TreeNode(12), TreeNode(13)), TreeNode(7, TreeNode(14), TreeNode(15))))
print(str(level_order_traversal_reversed(root))) | [
"“nidhi.bhushan123@gmail.com”"
] | “nidhi.bhushan123@gmail.com” |
0b34fd09a32b61b26a0e91673051e43c03f74504 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/desktopvirtualization/azure-mgmt-desktopvirtualization/generated_samples/scaling_plan_update.py | 4831a97f98926ec6612a858ab2bf88ca82161dc5 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,636 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.desktopvirtualization import DesktopVirtualizationMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-desktopvirtualization
# USAGE
python scaling_plan_update.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = DesktopVirtualizationMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="daefabc0-95b4-48b3-b645-8a753a63c4fa",
)
response = client.scaling_plans.update(
resource_group_name="resourceGroup1",
scaling_plan_name="scalingPlan1",
)
print(response)
# x-ms-original-file: specification/desktopvirtualization/resource-manager/Microsoft.DesktopVirtualization/stable/2022-09-09/examples/ScalingPlan_Update.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
5118a798d9ebf7c4a8473638ed1aae87c200932f | ea40d872e4d3122387f7a17400c1d2f31cf5bd6a | /Dynamic Programming/221. Maximal Square Medium.py | c2e3c879c1cfbc3c4bffa5bb587537c1006ee3c3 | [] | no_license | dongbo910220/leetcode_ | e4cf6c849986b105d4d5162c5cd2318ffc3fbb67 | e4c02084f26384cedbd87c4c60e9bdfbf77228cc | refs/heads/main | 2023-05-29T11:23:46.865259 | 2021-06-17T03:40:30 | 2021-06-17T03:40:30 | 344,785,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | '''
https://leetcode.com/problems/maximal-square/
'''
class Solution(object):
def maximalSquare(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
if not matrix:
return 0
rows = len(matrix)
cols = len(matrix[0])
maxside = 0
dp = [[0] * (cols + 1) for _ in range(rows+1)]
for i in range(rows):
for j in range(cols):
if matrix[i][j] == '1':
dp[i+1][j+1] = min(dp[i][j], dp[i+1][j], dp[i][j+1]) + 1
if dp[i+1][j+1] > maxside:
maxside = dp[i+1][j+1]
return maxside * maxside
'''
Success
Details
Runtime: 156 ms, faster than 94.83% of Python online submissions for Maximal Square.
Memory Usage: 20.2 MB, less than 12.50% of Python online submissions for Maximal Square.
''' | [
"1275604947@qq.com"
] | 1275604947@qq.com |
eca4ae7c383a43be9da6d68445b2e19faec9fba5 | 05d692469305dd1adb9ebc46080525bb4515b424 | /jugad1.py | 7169a71911f7fb4a3e57d3d1631de37915f9eb6b | [] | no_license | rajdharmkar/pythoncode | 979805bc0e672f123ca1460644a4bd71d7854fd5 | 15b758d373f27da5680a711bf12c07e86758c447 | refs/heads/master | 2020-08-07T18:30:55.575632 | 2019-10-14T12:46:09 | 2019-10-14T12:46:09 | 213,551,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,534 | py | def oa(o):
for at in dir(o):
print at,
'''
Sample calls and output for oa() below:
# object attributes of a dict:
oa({})
__class__ __cmp__ __contains__ __delattr__ __delitem__ __doc__ __eq__ __format__
__ge__ __getattribute__ __getitem__ __gt__ __hash__ __init__ __iter__ __le__ __len__
__lt__ __ne__ __new__ __reduce__ __reduce_ex__ __repr__ __setattr__ __setitem__
__sizeof__ __str__ __subclasshook__ clear copy fromkeys get has_key items
iteritems iterkeys itervalues keys pop popitem setdefault update values viewitems
viewkeys viewvalues
# object attributes of a list:
oa([])
__add__ __class__ __contains__ __delattr__ __delitem__ __delslice__ __doc__ __eq__
__format__ __ge__ __getattribute__ __getitem__ __getslice__ __gt__ __hash__ __iadd__
__imul__ __init__ __iter__ __le__ __len__ __lt__ __mul__ __ne__ __new__
__reduce__ __reduce_ex__ __repr__ __reversed__ __rmul__ __setattr__ __setitem__
__setslice__ __sizeof__ __str__ __subclasshook__ append count extend index insert
pop remove reverse sort
# object attributes of an int:
oa(1)
__abs__ __add__ __and__ __class__ __cmp__ __coerce__ __delattr__ __div__ __divmod__
__doc__ __float__ __floordiv__ __format__ __getattribute__ __getnewargs__ __hash__
__hex__ __index__ __init__ __int__ __invert__ __long__ __lshift__ __mod__
__mul__ __neg__ __new__ __nonzero__ __oct__ __or__ __pos__ __pow__ __radd__ __rand__
__rdiv__ __rdivmod__ __reduce__ __reduce_ex__ __repr__ __rfloordiv__ __rlshift__
__rmod__ __rmul__ __ror__ __rpow__ __rrshift__ __rshift__ __rsub__ __rtruediv__
__rxor__ __setattr__ __sizeof__ __str__ __sub__ __subclasshook__ __truediv__
__trunc__ __xor__ bit_length conjugate denominator imag numerator real
'''
def oar(o):
for at in dir(o):
if not at.startswith('__') and not at.endswith('__'):
print at,
'''
# regular (meaning non-dunder) object attributes of a dict:
oar({})
clear copy fromkeys get has_key items iteritems iterkeys itervalues keys pop popitem
setdefault update values viewitems viewkeys viewvalues
# regular object attributes of an int:
oar(1)
bit_length conjugate denominator imag numerator real
# regular object attributes of a string:
oar('')
_formatter_field_name_split _formatter_parser capitalize center count decode encode
endswith expandtabs find format index isalnum isalpha isdigit islower isspace
istitle isupper join ljust lower lstrip partition replace rfind rindex rjust rpartition
rsplit rstrip split splitlines startswith strip swapcase title translate upper zfil
''' | [
"rajdharmkar@gmail.com"
] | rajdharmkar@gmail.com |
393807b4f21d0ee4f255d31230b47c6eac204537 | 9c9dc76306f8e02b542940306d0fc781e577ea2f | /ext_agro/reports_lanta/wizard/__init__.py | 95f1953d1b810f700040a8544d7fc3e98c0876cb | [] | no_license | h3llopy/prueba_agroindustria | 13d235a7a0f6cab08eac174546a4fd8175150e56 | 22a3ebae42ad20a792f19228b4a911aef93d1d08 | refs/heads/main | 2023-02-16T18:59:31.757874 | 2021-01-19T21:10:19 | 2021-01-19T21:10:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | # -*- coding: utf-8 -*-
from . import stock_inventory_t
from . import mrp_print | [
"inmldrsolucionestecnologicas@gmail.com"
] | inmldrsolucionestecnologicas@gmail.com |
ee4a70928260425ed3cea0b9fe08e84f44207b80 | b76c08a4c33245a737fa0e139d212bb424017cd1 | /sandbox/order/utils.py | e2f60215327f7e0f361cf2b48bd1f2f84aaf5d02 | [
"ISC"
] | permissive | thelabnyc/django-oscar-cybersource | 5b09845121ef1c074335c01e86c649c36e4e51e4 | 95b33362adf8ba0217ac73c6f816b544c9faa18d | refs/heads/master | 2023-03-15T15:25:55.388795 | 2023-03-14T16:00:07 | 2023-03-14T16:00:07 | 58,149,620 | 4 | 3 | ISC | 2023-02-07T22:17:15 | 2016-05-05T17:45:52 | Python | UTF-8 | Python | false | false | 159 | py | from oscar.apps.order import utils
from oscarapicheckout.mixins import OrderCreatorMixin
class OrderCreator(OrderCreatorMixin, utils.OrderCreator):
pass
| [
"crgwbr@gmail.com"
] | crgwbr@gmail.com |
b324bb45f046ab2f4143cd271e5684defe01b32d | 7d4d6dc3c897ec7c297bb67f30c3f4e39509b250 | /Python/DailyFlash/27feb2020/MySolutions/program4.py | 4f1ba7e0d83a4b78cd942e0c584bda578e54146f | [] | no_license | kumbharswativ/Core2Web | 48a6ec0275466f4179c502097b1314d04a29e63e | 60949e5461ef103a4ad2c7c39ee9be0be101ec11 | refs/heads/master | 2022-12-24T06:11:45.096063 | 2020-08-09T12:04:07 | 2020-08-09T12:09:13 | 286,219,590 | 0 | 1 | null | 2022-12-11T10:57:50 | 2020-08-09T11:02:18 | Python | UTF-8 | Python | false | false | 199 | py | '''
write a program to print the following pattern
A B D G
G H J
J K
K
'''
a=70
for i in range(4,0,-1):
a=a-i-1
b=0
for j in range(i):
print(chr(a),end=" ")
b=b+1
a=a+b
print(" ")
| [
"“kumbharswativ@gmail.com”"
] | “kumbharswativ@gmail.com” |
641b1d430f4f0766aad0d2b37668f9c06eaf590f | f13f336c42313b9e45a9a497d5737ecff8652731 | /Python/116.Populating Next Right Pointers in Each Node .py | 08c9a38fe8fc369288a53e4552e2c4965c839beb | [] | no_license | whguo/LeetCode | 9b58bfbad07d9c3bfe8c48c74cd52fa6e019c2be | 74cc5aa3743d387213a36c7dcfd37e82ca60473a | refs/heads/master | 2020-02-26T16:02:39.172627 | 2017-07-28T01:59:20 | 2017-07-28T01:59:20 | 57,174,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,152 | py | #把二叉树同一深度的节点串联起来(next)
class TreeLinkNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
self.next = None
class Solution(object):
def connect(self, root):
self.dic = {}
self.inorder(root,0)
return root
def inorder(self,p,level):
if p!=None:
if len(self.dic)<=level:
self.dic[level] = p
else:
self.dic[level].next = p
self.dic[level] = p
self.inorder(p.left,level+1) if p.left!=None else None
self.inorder(p.right,level+1) if p.right!=None else None
t1 = TreeLinkNode(1)
t2 = TreeLinkNode(2)
t3 = TreeLinkNode(3)
t4 = TreeLinkNode(4)
t5 = TreeLinkNode(5)
t6 = TreeLinkNode(6)
t7 = TreeLinkNode(7)
t8 = TreeLinkNode(8)
t9 = TreeLinkNode(9)
t1.left = t2
t1.right = t3
t2.left = t4
t2.right = t5
t3.left = t6
t3.right = t7
t4.left = t8
t4.right = t9
s = Solution()
root = s.connect(t1)
while root!=None:
p = root
while p!=None:
print(p.val)
p = p.next
print("next")
root = root.left
| [
"490216194@qq.com"
] | 490216194@qq.com |
7c44ba09652dca939859a751af24723bd6bf41cb | f0e0c1637f3b49fd914410361c3f1f3948462659 | /Python/Sets/set_add.py | 3da84d4b720396943cd15de874d9d726b08a6a21 | [] | no_license | georggoetz/hackerrank-py | 399bcd0599f3c96d456725471708068f6c0fc4b1 | a8478670fcc65ca034df8017083269cb37ebf8b0 | refs/heads/master | 2021-09-18T07:47:32.224981 | 2018-07-11T09:24:49 | 2018-07-11T09:24:49 | 111,611,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | # http://www.hackerrank.com/contests/python-tutorial/challenges/py-set-add
if __name__ == '__main__':
s = set([])
for _ in range(int(input())):
s.add(input())
print(len(s))
| [
"GeorgG@haufe.com"
] | GeorgG@haufe.com |
1a366362e73d577c2fadcb9ccc06efc6aa64d44e | f3eaf09705b9dcc92f15d9aaa25aa739bd09c161 | /pyti/money_flow.py | 28301668fcc66057d241b31d6761604305963453 | [
"MIT"
] | permissive | BernhardSchlegel/pyti | 9f7171d660d6b4e4450d3b5882132204a4d3a3d7 | bfead587fe49f7662df475a28688d3ce649e2e9b | refs/heads/master | 2021-08-19T22:14:52.721190 | 2017-11-27T14:35:59 | 2017-11-27T14:35:59 | 111,237,071 | 1 | 0 | null | 2017-11-18T20:30:59 | 2017-11-18T20:30:59 | null | UTF-8 | Python | false | false | 387 | py | from pyti import catch_errors
from pyti.typical_price import typical_price as tp
def money_flow(close_data, high_data, low_data, volume):
"""
Money Flow.
Formula:
MF = VOLUME * TYPICAL PRICE
"""
catch_errors.check_for_input_len_diff(
close_data, high_data, low_data, volume
)
mf = volume * tp(close_data, high_data, low_data)
return mf
| [
"kyle@collectiveidea.com"
] | kyle@collectiveidea.com |
1ace0aa1a9255b961b78176e9f318b2f0adae7a5 | 7a83e536c2ea73e9f0c61928db0f566825b60e7f | /bot/wikidata/clarkart_import.py | dbea5461e95e770004b6cc9d0c1046bf9bf3846e | [] | no_license | multichill/toollabs | 32919377ae1e1bc05608828d30d81fe672569fa5 | 99a96e49cfe6b2d3151da7ad5469792d80171be3 | refs/heads/master | 2023-08-17T19:05:59.936875 | 2023-08-16T15:41:14 | 2023-08-16T15:41:14 | 54,907,129 | 18 | 6 | null | 2021-03-04T13:23:41 | 2016-03-28T16:45:15 | Python | UTF-8 | Python | false | false | 7,012 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Bot to import paintings from the Clark Art Institute to Wikidata.
Just loop over pages like https://www.clarkart.edu/artpiece/search?limit=20&offset=0&collectionIds=1095,1096,1097,1118
This bot does uses artdatabot to upload it to Wikidata.
"""
import artdatabot
import pywikibot
import requests
import re
from html.parser import HTMLParser
import json
def getClarkArtGenerator():
"""
Generator to return Clark Art Institute paintings
"""
basesearchurl = 'https://www.clarkart.edu/artpiece/search?limit=20&offset=%s&collectionIds=1095,1096,1097,1118'
htmlparser = HTMLParser()
session = requests.Session()
# 545 (to start with), 20 per page
for i in range(1, 550,20):
searchurl = basesearchurl % (i,)
print (searchurl)
searchPage = session.get(searchurl)
for item in searchPage.json().get('results'):
# Main search contains quite a bit, but we're getting the individual objects
#itemid = '%s' % (item.get('id'),)
url = 'https://www.clarkart.edu%s' % (item.get('Url'),)
itempage = session.get(url)
metadata = {}
pywikibot.output (url)
metadata['url'] = url
metadata['collectionqid'] = 'Q1465805'
metadata['collectionshort'] = 'Clark Art'
metadata['locationqid'] = 'Q1465805'
# Search is for paintings
metadata['instanceofqid'] = 'Q3305213'
title = item.get('Title').strip()
if len(title) > 220:
title = title[0:200]
metadata['title'] = { 'en' : title,
}
creatorname = item.get('Artist').strip()
metadata['creatorname'] = creatorname
metadata['description'] = { 'nl' : '%s van %s' % ('schilderij', metadata.get('creatorname'),),
'en' : '%s by %s' % ('painting', metadata.get('creatorname'),),
'de' : '%s von %s' % ('Gemälde', metadata.get('creatorname'), ),
'fr' : '%s de %s' % ('peinture', metadata.get('creatorname'), ),
}
metadata['idpid'] = 'P217'
invregex = '\<strong\>Object Number\<\/strong\>[\r\n\s\t]*\<\/td\>[\r\n\s\t]*\<td\>[\r\n\s\t]*([^\<]+)[\r\n\s\t]*\<\/td\>'
invmatch = re.search(invregex, itempage.text)
metadata['id'] = invmatch.group(1).strip()
# Year contains the date in various variants
if item.get('Year'):
createdate = item.get('Year')
dateregex = '^(\d\d\d\d)\s*$'
datecircaregex = '^c\.\s*(\d\d\d\d)\s*$'
periodregex = '^(\d\d\d\d)\s*[-–]\s*(\d\d\d\d)\s*$'
circaperiodregex = '^c\.\s\s*(\d\d\d\d)[-\/](\d\d\d\d)\s*$'
shortperiodregex = '^(\d\d)(\d\d)[-–](\d\d)\s*$'
circashortperiodregex = '^c\.\s*(\d\d)(\d\d)[-–](\d\d)\s*$'
datematch = re.search(dateregex, createdate)
datecircamatch = re.search(datecircaregex, createdate)
periodmatch = re.search(periodregex, createdate)
circaperiodmatch = re.search(circaperiodregex, createdate)
shortperiodmatch = re.search(shortperiodregex, createdate)
circashortperiodmatch = re.search(circashortperiodregex, createdate)
if datematch:
metadata['inception'] = int(datematch.group(1).strip())
elif datecircamatch:
metadata['inception'] = int(datecircamatch.group(1).strip())
metadata['inceptioncirca'] = True
elif periodmatch:
metadata['inceptionstart'] = int(periodmatch.group(1))
metadata['inceptionend'] = int(periodmatch.group(2))
elif circaperiodmatch:
metadata['inceptionstart'] = int(circaperiodmatch.group(1))
metadata['inceptionend'] = int(circaperiodmatch.group(2))
metadata['inceptioncirca'] = True
elif shortperiodmatch:
metadata['inceptionstart'] = int('%s%s' % (shortperiodmatch.group(1),shortperiodmatch.group(2),))
metadata['inceptionend'] = int('%s%s' % (shortperiodmatch.group(1),shortperiodmatch.group(3),))
elif circashortperiodmatch:
metadata['inceptionstart'] = int('%s%s' % (circashortperiodmatch.group(1),circashortperiodmatch.group(2),))
metadata['inceptionend'] = int('%s%s' % (circashortperiodmatch.group(1),circashortperiodmatch.group(3),))
metadata['inceptioncirca'] = True
else:
print ('Could not parse date: "%s"' % (createdate,))
# acquisitiondate is available
acquisitiondateRegex = '\<strong\>Acquisition\<\/strong\>[\r\n\s\t]*\\<\/td\>[\r\n\s\t]*\<td\>[\r\n\s\t]*[^\<]+, (\d\d\d\d)[\r\n\s\t]*\<\/td\>'
acquisitiondateMatch = re.search(acquisitiondateRegex, itempage.text)
if acquisitiondateMatch:
metadata['acquisitiondate'] = int(acquisitiondateMatch.group(1))
mediumRegex = '\<strong\>Medium\<\/strong\>[\r\n\s\t]*\\<\/td\>[\r\n\s\t]*\<td\>[\r\n\s\t]*([^\<]+)[\r\n\s\t]*\<\/td\>'
mediumMatch = re.search(mediumRegex, itempage.text)
# Artdatabot will sort this out
if mediumMatch:
metadata['medium'] = mediumMatch.group(1)
# Dimensions is a mix of types and also Inches and cm
# Free images! See https://www.clarkart.edu/museum/collections/image-resources
imageRegex = '\<h6 class\=\"text-center\"\>TIFF \(up to 500 MB\)\<\/h6\>[\r\n\s\t]*\<a href\=\"#\" data-href\=\"(https\:\/\/media\.clarkart\.edu\/hires\/[^\"]+\.tif)\"'
imageMatch = re.search(imageRegex, itempage.text)
if imageMatch:
metadata['imageurl'] = imageMatch.group(1).replace(' ', '%20')
metadata['imageurlformat'] = 'Q215106' # TIFF
metadata['imageoperatedby'] = 'Q1465805'
# metadata['imageurllicense'] = 'Q6938433' # Just free use
## Use this to add suggestions everywhere
metadata['imageurlforce'] = False
yield metadata
def main(*args):
dictGen = getClarkArtGenerator()
dryrun = False
create = False
for arg in pywikibot.handle_args(args):
if arg.startswith('-dry'):
dryrun = True
elif arg.startswith('-create'):
create = True
if dryrun:
for painting in dictGen:
print (painting)
else:
artDataBot = artdatabot.ArtDataBot(dictGen, create=create)
artDataBot.run()
if __name__ == "__main__":
main()
| [
"maarten@mdammers.nl"
] | maarten@mdammers.nl |
7760597e4768fdd39a083fb0413a45cd2158f3bc | d5f75adf5603927396bdecf3e4afae292143ddf9 | /python/paddle/fluid/tests/unittests/autograd/test_orig2prim.py | 924292c4a4aed304b4cb343ea3a6804d392f7888 | [
"Apache-2.0"
] | permissive | jiweibo/Paddle | 8faaaa1ff0beaf97ef7fb367f6c9fcc065f42fc4 | 605a2f0052e0ffb2fab3a4cf4f3bf1965aa7eb74 | refs/heads/develop | 2023-07-21T03:36:05.367977 | 2022-06-24T02:31:11 | 2022-06-24T02:31:11 | 196,316,126 | 3 | 2 | Apache-2.0 | 2023-04-04T02:42:53 | 2019-07-11T03:51:12 | Python | UTF-8 | Python | false | false | 12,054 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.layers.utils import flatten
from paddle.incubate.autograd.primrules import _orig2prim, _prim2orig, _jvp, _transpose
paddle.enable_static()
############################ Test orig2prim rules ############################
class TestElementWiseAddOrig2Prim(unittest.TestCase):
def setUp(self):
self.main_program = paddle.static.Program()
self.startup_program = paddle.static.Program()
self.layer_help = LayerHelper('TestOrig2Prim')
with paddle.static.program_guard(self.main_program,
self.startup_program):
self.init_data()
def init_data(self):
self.op_type = 'elementwise_add'
X = paddle.static.data(name='X', shape=[2, 2], dtype='float')
Y = paddle.static.data(name='Y', shape=[2, 2], dtype='float')
self.input = {'X': X, 'Y': Y}
self.output = {
'Out':
self.layer_help.create_variable_for_type_inference(dtype=X.dtype)
}
self.attrs = {}
self.orig2prim_args = (X, Y)
self.all_ops = ['elementwise_add', 'add_p']
# { prim_op_output_index: orig_op_output_var }
self.out_map = {0: self.output['Out']}
def test_op(self):
with paddle.static.program_guard(self.main_program,
self.startup_program):
op = self.layer_help.append_op(type=self.op_type,
inputs=self.input,
outputs=self.output,
attrs=self.attrs)
prim_out = _orig2prim(op, *self.orig2prim_args)
all_ops = [op.type for op in self.main_program.block(0).ops]
self.assertEqual(sorted(all_ops), sorted(self.all_ops))
prim_out = flatten(prim_out)
for k, v in self.out_map.items():
self.assertEqual(prim_out[k].shape, v.shape)
class TestSqrtOrig2Prim(TestElementWiseAddOrig2Prim):
def init_data(self):
self.op_type = 'sqrt'
X = paddle.static.data(name='X', shape=[7, 8], dtype='float64')
self.input = {
'X': X,
}
self.output = {
'Out':
self.layer_help.create_variable_for_type_inference(dtype=X.dtype)
}
self.attrs = {}
self.orig2prim_args = (X, )
self.all_ops = ['sqrt', 'sqrt_p']
# { prim_op_output_index: orig_op_output_var }
self.out_map = {0: self.output['Out']}
class TestElementWiseMulOrig2Prim(TestElementWiseAddOrig2Prim):
def init_data(self):
self.op_type = 'elementwise_mul'
X = paddle.static.data(name='X', shape=[8, 8], dtype='float')
Y = paddle.static.data(name='Y', shape=[8, 8], dtype='float')
self.input = {'X': X, 'Y': Y}
self.output = {
'Out':
self.layer_help.create_variable_for_type_inference(dtype=X.dtype)
}
self.attrs = {}
self.orig2prim_args = (X, Y)
self.all_ops = ['elementwise_mul', 'mul_p']
# { prim_op_output_index: orig_op_output_var }
self.out_map = {0: self.output['Out']}
class TestMatmulV2Orig2Prim(TestElementWiseAddOrig2Prim):
def init_data(self):
self.op_type = 'matmul_v2'
X = paddle.static.data(name='X', shape=[3, 4], dtype='float')
Y = paddle.static.data(name='Y', shape=[4, 3], dtype='float')
self.input = {'X': X, 'Y': Y}
self.output = {
'Out':
self.layer_help.create_variable_for_type_inference(dtype=X.dtype)
}
self.attrs = {'trans_x': True, 'trans_y': True}
self.orig2prim_args = (X, Y)
self.all_ops = ['matmul_v2', 'transpose_p', 'transpose_p', 'matmul_p']
self.out_map = {0: self.output['Out']}
class TestTanhOrig2Prim(TestElementWiseAddOrig2Prim):
def init_data(self):
self.op_type = 'tanh'
X = paddle.static.data(name='X', shape=[3, 4], dtype='float')
self.input = {
'X': X,
}
self.output = {
'Out':
self.layer_help.create_variable_for_type_inference(dtype=X.dtype)
}
self.attrs = {}
self.orig2prim_args = (X, )
self.all_ops = ['tanh', 'tanh_p']
self.out_map = {0: self.output['Out']}
class TestReshape2Orig2Prim(TestElementWiseAddOrig2Prim):
def init_data(self):
self.op_type = 'reshape2'
X = paddle.static.data(name='X', shape=[5, 6], dtype='int64')
self.input = {
'X': X,
}
self.output = {
'Out':
X,
'XShape':
self.layer_help.create_variable_for_type_inference(dtype=X.dtype)
}
self.attrs = {'shape': [6, 5]}
self.orig2prim_args = (
None,
None,
X,
)
self.all_ops = ['reshape2', 'reshape_p', 'fill_constant_p']
# Do not checke XShape
self.out_map = {0: self.output['Out']}
class TestConcatOrig2Prim(TestElementWiseAddOrig2Prim):
def init_data(self):
self.op_type = 'concat'
X = paddle.static.data(name='X', shape=[5, 6], dtype='int64')
Y = paddle.static.data(name='Y', shape=[3, 6], dtype='int64')
self.input = {
'X': [X, Y],
}
self.output = {
'Out':
self.layer_help.create_variable_for_type_inference(dtype=X.dtype)
}
self.attrs = {'axis': 0}
self.orig2prim_args = (
None,
(X, Y),
)
self.all_ops = ['concat', 'concat_p']
self.out_map = {0: self.output['Out']}
class TestSliceOrig2Prim(TestElementWiseAddOrig2Prim):
def init_data(self):
self.op_type = 'slice'
X = paddle.static.data(name='X', shape=[5, 6], dtype='int64')
self.input = {
'Input': X,
}
self.output = {
'Out':
self.layer_help.create_variable_for_type_inference(dtype=X.dtype)
}
self.attrs = {
'axes': [0],
'starts': [1],
'ends': [4],
}
self.orig2prim_args = (None, None, X, None, None)
self.all_ops = ['slice', 'slice_select_p']
self.out_map = {0: self.output['Out']}
class TestFillZerosLikeOrig2Prim(TestElementWiseAddOrig2Prim):
def init_data(self):
self.op_type = 'fill_zeros_like'
X = paddle.static.data(name='X', shape=[5, 6], dtype='int64')
self.input = {
'X': X,
}
self.output = {
'Out':
self.layer_help.create_variable_for_type_inference(dtype=X.dtype)
}
self.attrs = {}
self.orig2prim_args = (X, )
self.all_ops = ['fill_zeros_like', 'fill_constant_p']
self.out_map = {0: self.output['Out']}
class TestSumOrig2Prim(TestElementWiseAddOrig2Prim):
def init_data(self):
self.op_type = 'sum'
X = paddle.static.data(name='X', shape=[5, 6], dtype='int64')
Y = paddle.static.data(name='Y', shape=[5, 6], dtype='int64')
self.input = {'X': X, 'Y': Y}
self.output = {
'Out':
self.layer_help.create_variable_for_type_inference(dtype=X.dtype)
}
self.attrs = {}
self.orig2prim_args = ((X, Y), )
self.all_ops = ['sum', 'add_p']
self.out_map = {0: self.output['Out']}
class TestPNormOrig2Prim1(TestElementWiseAddOrig2Prim):
def init_data(self):
self.op_type = 'p_norm'
X = paddle.static.data(name='X', shape=[5, 6], dtype='int64')
self.input = {
'X': X,
}
self.output = {
'Out':
self.layer_help.create_variable_for_type_inference(dtype=X.dtype)
}
self.attrs = {
'porder': 1,
'asvector': True,
}
self.orig2prim_args = (X, )
self.all_ops = ['p_norm', 'reshape_p', 'sqrt_p', 'reduce_p', 'mul_p']
self.out_map = {0: self.output['Out']}
class TestPNormOrig2Prim2(TestElementWiseAddOrig2Prim):
def init_data(self):
self.op_type = 'p_norm'
X = paddle.static.data(name='X', shape=[5, 6], dtype='int64')
self.input = {
'X': X,
}
self.output = {
'Out':
self.layer_help.create_variable_for_type_inference(dtype=X.dtype)
}
self.attrs = {
'porder': 2,
'asvector': True,
}
self.orig2prim_args = (X, )
self.all_ops = ['p_norm', 'reshape_p', 'sqrt_p', 'reduce_p', 'mul_p']
self.out_map = {0: self.output['Out']}
class TestIndexSelectOrig2Prim(TestElementWiseAddOrig2Prim):
def init_data(self):
self.op_type = 'index_select'
X = paddle.static.data(name='X', shape=[5, 6], dtype='int64')
Index = paddle.static.data(name='Index', shape=[2], dtype='int32')
self.input = {'X': X, 'Index': Index}
self.output = {
'Out':
self.layer_help.create_variable_for_type_inference(dtype=X.dtype)
}
self.attrs = {
'dim': 0,
}
self.orig2prim_args = (
Index,
X,
)
self.all_ops = ['index_select', 'gather_p']
self.out_map = {0: self.output['Out']}
class TestElementwiseSubOrig2Prim(TestElementWiseAddOrig2Prim):
def init_data(self):
self.op_type = 'elementwise_sub'
X = paddle.static.data(name='X', shape=[5, 6], dtype='int32')
Y = paddle.static.data(name='Y', shape=[6], dtype='int32')
self.input = {'X': X, 'Y': Y}
self.output = {
'Out':
self.layer_help.create_variable_for_type_inference(dtype=X.dtype)
}
self.attrs = {
'dim': 0,
}
self.orig2prim_args = (
X,
Y,
)
self.all_ops = ['elementwise_sub', 'broadcast_p', 'sub_p']
self.out_map = {0: self.output['Out']}
class TestScaleOrig2Prim(TestElementWiseAddOrig2Prim):
def init_data(self):
self.op_type = 'scale'
X = paddle.static.data(name='X', shape=[10, 7], dtype='int32')
self.input = {
'X': X,
}
self.output = {
'Out':
self.layer_help.create_variable_for_type_inference(dtype=X.dtype)
}
self.attrs = {'scale': 2.0, 'bias': 1.0, 'bias_after_scale': True}
self.orig2prim_args = (
None,
X,
)
self.all_ops = [
'scale', 'fill_constant_p', 'fill_constant_p', 'mul_p', 'add_p'
]
self.out_map = {0: self.output['Out']}
class TestAssignOrig2Prim(TestElementWiseAddOrig2Prim):
def init_data(self):
self.op_type = 'assign'
X = paddle.static.data(name='X', shape=[10, 7], dtype='int32')
self.input = {
'X': X,
}
self.output = {
'Out':
self.layer_help.create_variable_for_type_inference(dtype=X.dtype)
}
self.attrs = {}
self.orig2prim_args = (X, )
self.all_ops = ['assign', 'fill_constant_p', 'add_p']
self.out_map = {0: self.output['Out']}
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | jiweibo.noreply@github.com |
e9fed6137ec3b295fda35dcfe8f083d0c3625be6 | 336f11ee8934581f05ab620c5324c601ba864b05 | /jb_adaptive_python/Problems/Step matrix/Programming/tests.py | 79d6ba9430dae06ac099ab9abf679986502ca197 | [] | no_license | ancient-clever/sandbox | 01adeee2638a23533965cf57ca873a30e7dfad3d | 87dec3bf8860a67a36154ee5d7c826d919d3111b | refs/heads/master | 2022-05-17T04:49:54.703068 | 2020-01-19T17:44:27 | 2020-01-19T17:44:27 | 206,946,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | from test_helper import check_samples
if __name__ == '__main__':
check_samples(samples=[["3","1 1 1 1 1\n1 2 2 2 1\n1 2 3 2 1\n1 2 2 2 1\n1 1 1 1 1"]]) | [
"ancient-clever@outlook.com"
] | ancient-clever@outlook.com |
8ebd7be5762779f7dac2dfa4eb6fc4cd1e08545f | 5ea260271732d5cd3531665b3fefcad0b0b4d1ec | /emovie/settings.py | a675b1f23cb2c26a13fad4e443346a98a8fa82bd | [] | no_license | HettyIsIn/emovie | 1504c9552b48a657fb8bf615eef6a4b444ce4bf8 | 9bc83a6bed02b7fba748866df971ad89c3d2c14e | refs/heads/master | 2020-12-25T21:01:26.250640 | 2015-05-27T15:12:22 | 2015-05-27T15:12:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,633 | py | """
Django settings for emovie project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7xzti=v(6&bh7+$l5de0a0+p!w!+p7tblv%y5-cd%alvh4t53r'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'xadmin',
'xcms',
'movie',
'movie_session',
'cinema',
'cm',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'emovie.urls'
WSGI_APPLICATION = 'emovie.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
TIME_ZONE = 'Asia/Shanghai'
LANGUAGES = (
('zh-CN', 'Simplified Chinese'),
('en', 'English'),
)
LANGUAGE_CODE = 'zh-CN'
USE_I18N = True
USE_L10N = True
USE_TZ = False
DATE_FORMAT = 'Y-m-d'
DATETIME_FORMAT = 'Y-m-d H:i:s'
TIME_FORMAT = 'H:i:s'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
STATIC_ROOT = os.path.join(ROOT_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(ROOT_DIR, 'media')
MEDIA_URL = '/media/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
# try:
# from product import *
# except ImportError:
# pass
| [
"lingnck@gmail.com"
] | lingnck@gmail.com |
adab5fb7e004978cbebf3c2330e8eac6f237a263 | d842a95213e48e30139b9a8227fb7e757f834784 | /gcloud/google-cloud-sdk/lib/surface/iot/devices/credentials/create.py | 3f061c8c10c35fbe2b555639f83327eab69ea7f8 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | bopopescu/JobSniperRails | f37a15edb89f54916cc272884b36dcd83cdc868a | 39e7f871887176770de0f4fc6789e9ddc7f32b1f | refs/heads/master | 2022-11-22T18:12:37.972441 | 2019-09-20T22:43:14 | 2019-09-20T22:43:14 | 282,293,504 | 0 | 0 | MIT | 2020-07-24T18:47:35 | 2020-07-24T18:47:34 | null | UTF-8 | Python | false | false | 2,204 | py | # -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""`gcloud iot credentials create` command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.cloudiot import devices
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.iot import flags
from googlecloudsdk.command_lib.iot import resource_args
from googlecloudsdk.command_lib.iot import util
from googlecloudsdk.core import log
class Create(base.CreateCommand):
"""Add a new credential to a device.
A device may have at most 3 credentials.
"""
@staticmethod
def Args(parser):
resource_args.AddDeviceResourceArg(parser,
'for which to create credentials',
positional=False)
flags.AddDeviceCredentialFlagsToParser(parser, combine_flags=False)
def Run(self, args):
client = devices.DevicesClient()
device_ref = args.CONCEPTS.device.Parse()
new_credential = util.ParseCredential(
args.path, args.type, args.expiration_time, messages=client.messages)
credentials = client.Get(device_ref).credentials
if len(credentials) >= util.MAX_PUBLIC_KEY_NUM:
raise util.InvalidPublicKeySpecificationError(
'Cannot create a new public key credential for this device; '
'maximum {} keys are allowed.'.format(util.MAX_PUBLIC_KEY_NUM))
credentials.append(new_credential)
response = client.Patch(device_ref, credentials=credentials)
log.CreatedResource(device_ref.Name(), 'credentials for device')
return response
| [
"luizfper@gmail.com"
] | luizfper@gmail.com |
e2600c0fed8c5a857f10392c0665bc36c5b1364a | 216a5e05360afcda9f90a2a5154ce8ea33bf8f82 | /utils/permissions.py | 4f347907c4d70eceb2eccb3b16b89c8236a7a182 | [] | no_license | ppark9553/our-web-server | dfa6bdbdd4ced51d11b1d4951255c6618371a83f | a37ba6b27fc1973d8150fd253f6f5543be97ad1c | refs/heads/master | 2021-09-16T17:24:48.021715 | 2018-06-22T12:42:25 | 2018-06-22T12:42:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | from django.contrib.auth import get_user_model
from rest_framework import permissions
User = get_user_model()
class IsOwnerOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
req_user = request.user.username
if obj.__class__ == User:
return obj.username == req_user
else:
return obj.user == request.user
| [
"ppark9553@gmail.com"
] | ppark9553@gmail.com |
a692f5fbc1997092e7d8ff1b9ee253f703e4b898 | 35a6f5a26ea97ebed8ab34619a8eec51719d2cc0 | /SpiderLearning/1 RequestBasic/4request_header_cookie.py | bdf907c06f27f2e4af98bfdc27634f2c7fbd9acf | [] | no_license | PandaCoding2020/pythonProject | c3644eda22d993b3b866564384ed10441786e6c5 | 26f8a1e7fbe22bab7542d441014edb595da39625 | refs/heads/master | 2023-02-25T14:52:13.542434 | 2021-02-03T13:42:41 | 2021-02-03T13:42:41 | 331,318,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,093 | py | """
@Time : 2021/1/29 9:54
@Author : Steven Chen
@File : 4request_header_cookie.py
@Software: PyCharm
"""
# 目标:
# 方法:
import requests
url = 'https://github.com/PandaCoding2020'
headers = {
'User-Agent':"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36 Edg/88.0.705.53",
"cookie": "_octo=GH1.1.358692347.1554840020; _ga=GA1.2.59697938.1554840020; _device_id=60d9769fd3fdc2235abf6fdd29b31a97; user_session=kKzwErWaCQYaFMH0lrjSTfLNpSoXLKhCB3NBaUPYl8FejzWl; __Host-user_session_same_site=kKzwErWaCQYaFMH0lrjSTfLNpSoXLKhCB3NBaUPYl8FejzWl; logged_in=yes; dotcom_user=PandaCoding2020; has_recent_activity=1; tz=Asia%2FShanghai; _gh_sess=rlQUfViTvHnD9iR5XhwxzbrymK7xwYYHJ1vdRGB9vAonJRFKZk2duKjpGhvr4UwZqwRpZeOiDTfwMdnsPAwn6hjm4GNYxY7xzJK05u1%2FdwqhIgZBmGNgG7s4gDvqwiEqSA%2BbA14DGgqRCCYHsFloCToW0e7wLzGrtCMBgMNv8tx67QbyP4BaMyBxgHc%2FO%2F2Z--HcrZEvIzY1UFGgaL--tFLMqsWygO75tY5xHBRqYw%3D%3D"
}
response = requests.get(url, headers = headers)
with open('github_without.html','wb') as f:
f.write(response.content) | [
"gzupanda@outlook.com"
] | gzupanda@outlook.com |
21d7f7d408c190688ed8f05e94c0b50134527b88 | 53784d3746eccb6d8fca540be9087a12f3713d1c | /res/packages/scripts/scripts/client/gui/Scaleform/daapi/view/lobby/profile/ProfileAwards.py | 6823a9a78792cbc4ee5a3719776eef9944204c58 | [] | no_license | webiumsk/WOT-0.9.17.1-CT | 736666d53cbd0da6745b970e90a8bac6ea80813d | d7c3cf340ae40318933e7205bf9a17c7e53bac52 | refs/heads/master | 2021-01-09T06:00:33.898009 | 2017-02-03T21:40:17 | 2017-02-03T21:40:17 | 80,870,824 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 3,334 | py | # 2017.02.03 21:50:17 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/profile/ProfileAwards.py
from gui.Scaleform.daapi.view.meta.ProfileAwardsMeta import ProfileAwardsMeta
from gui.Scaleform.locale.PROFILE import PROFILE
from web_stubs import i18n
from gui.Scaleform.daapi.view.AchievementsUtils import AchievementsUtils
from gui.shared.utils.RareAchievementsCache import IMAGE_TYPE
from gui.shared.gui_items.dossier import dumpDossier
class ProfileAwards(ProfileAwardsMeta):
def __init__(self, *args):
super(ProfileAwards, self).__init__(*args)
self.__achievementsFilter = PROFILE.SECTION_AWARDS_DROPDOWN_LABELS_ALL
def setFilter(self, data):
self.__achievementsFilter = data
self.invokeUpdate()
@classmethod
def _getTotalStatsBlock(cls, dossier):
return dossier.getTotalStats()
def _sendAccountData(self, targetData, accountDossier):
super(ProfileAwards, self)._sendAccountData(targetData, accountDossier)
achievements = targetData.getAchievements()
totalItemsList = []
for block in achievements:
totalItemsList.append(len(block))
if self.__achievementsFilter == PROFILE.SECTION_AWARDS_DROPDOWN_LABELS_INPROCESS:
achievements = targetData.getAchievements(isInDossier=True)
elif self.__achievementsFilter == PROFILE.SECTION_AWARDS_DROPDOWN_LABELS_NONE:
achievements = targetData.getAchievements(isInDossier=False)
packedList = []
for achievementBlockList in achievements:
packedList.append(AchievementsUtils.packAchievementList(achievementBlockList, accountDossier.getDossierType(), dumpDossier(accountDossier), self._userID is None))
self.as_responseDossierS(self._battlesType, {'achievementsList': packedList,
'totalItemsList': totalItemsList,
'battlesCount': targetData.getBattlesCount()}, '', '')
return
def _populate(self):
super(ProfileAwards, self)._populate()
initData = {'achievementFilter': {'dataProvider': [self.__packProviderItem(PROFILE.SECTION_AWARDS_DROPDOWN_LABELS_ALL), self.__packProviderItem(PROFILE.SECTION_AWARDS_DROPDOWN_LABELS_INPROCESS), self.__packProviderItem(PROFILE.SECTION_AWARDS_DROPDOWN_LABELS_NONE)],
'selectedItem': self.__achievementsFilter}}
self.as_setInitDataS(initData)
def _onRareImageReceived(self, imgType, rareID, imageData):
if imgType == IMAGE_TYPE.IT_67X71:
stats = self._getNecessaryStats()
achievement = stats.getAchievement(('rareAchievements', rareID))
if achievement is not None:
image_id = achievement.getSmallIcon()[6:]
self.as_setRareAchievementDataS(rareID, image_id)
return
def _dispose(self):
self._disposeRequester()
super(ProfileAwards, self)._dispose()
@staticmethod
def __packProviderItem(key):
return {'label': i18n.makeString(key),
'key': key}
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\daapi\view\lobby\profile\ProfileAwards.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:50:17 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
50f1bc82e3cd796a76f23afea6bb0124b04e54c2 | f5dbf8b9fc7a67167a966ad842999c5ec41d2363 | /app/migrations/0197_auto_20170209_1130.py | 4e84c7d80b4fb09fbfa2e9dde1a556f70fa6dff0 | [] | no_license | super0605/cogofly-v1 | 324ead9a50eaeea370bf40e6f37ef1372b8990fe | dee0f5db693eb079718b23099992fba3acf3e2dd | refs/heads/master | 2022-11-27T12:16:30.312089 | 2019-10-11T20:35:09 | 2019-10-11T20:35:09 | 214,522,983 | 0 | 0 | null | 2022-11-22T00:57:28 | 2019-10-11T20:25:01 | JavaScript | UTF-8 | Python | false | false | 1,974 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('app', '0196_auto_20170209_1047'),
]
operations = [
migrations.CreateModel(
name='PersonneBlogNewsletter',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date_creation', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('date_last_modif', models.DateTimeField(auto_now=True, verbose_name='Last changed')),
('date_v_debut', models.DateTimeField(default=django.utils.timezone.now, verbose_name='V. start')),
('date_v_fin', models.DateTimeField(default=None, null=True, verbose_name='V. end', blank=True)),
('date_sent', models.DateTimeField(default=None, null=True, verbose_name='Sent', blank=True)),
],
options={
'ordering': ['-date_last_modif', '-date_v_debut'],
'abstract': False,
},
),
migrations.AlterField(
model_name='blog',
name='date_envoi_newsletter',
field=models.DateField(default=None, help_text='Blank = never sent. If the date is older than now it will be sent tonight.', null=True, verbose_name='Add this blog into the newsletter', blank=True),
),
migrations.AddField(
model_name='personneblognewsletter',
name='blog',
field=models.ForeignKey(default=None, blank=True, to='app.Blog', null=True, verbose_name='Blog'),
),
migrations.AddField(
model_name='personneblognewsletter',
name='personne',
field=models.ForeignKey(default=None, blank=True, to='app.Personne', null=True, verbose_name='To'),
),
]
| [
"dream.dev1025@gmail.com"
] | dream.dev1025@gmail.com |
76bebcbd53c7a8e9ee54ffe104bf1631e3426098 | 453ca12d912f6498720152342085636ba00c28a1 | /leetcode/backtracking/python/sudoku_solver_leetcode.py | 7ab9498918f52ff1adde13c698c7938033f4934e | [] | no_license | yanbinkang/problem-bank | f9aa65d83a32b830754a353b6de0bb7861a37ec0 | bf9cdf9ec680c9cdca1357a978c3097d19e634ae | refs/heads/master | 2020-06-28T03:36:49.401092 | 2019-05-20T15:13:48 | 2019-05-20T15:13:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,652 | py | """
https://leetcode.com/problems/sudoku-solver/
Write a program to solve a Sudoku puzzle by filling the empty cells.
Empty cells are indicated by the character '.'
You may assume that there will be only one unique solution.
https://discuss.leetcode.com/topic/11327/straight-forward-java-solution-using-backtracking/18
O(9 ^ m) m represents the number of blanks to be filled in since each blank can have 9 choices. (Exponential)
"""
def solve_sudoku(board):
if not board or len(board) == 0:
return
solve(board)
def solve(board):
for i in range(len(board)): # row
for j in range(len(board[0])): # col
if board[i][j] == '.':
for c in '123456789':
if is_valid(board, i, j, c):
board[i][j] = c # put c in this cell
if solve(board):
return True # if its the solution return true
else:
board[i][j] = '.' # else go back
return False # 1..9 cannot be placed on board
return True # entire board is filled
def is_valid(board, row, col, c):
for i in range(9):
if board[i][col] == c:
return False
if board[row][i] == c:
return False
# this is also correct but results in longer runtime
# for j in range(9):
# if board[row][j] == c:
# return False
# check sub-box
for i in range(3):
for j in range(3):
if board[3 * (row / 3) + i][3 * (col / 3) + j] == c:
return False
return True
# solution for 4 x 4 board. Use for testing
def solve_sudoku_4_by_4(board):
if not board or len(board) == 0:
return
solve_4_by_4(board)
def solve_4_by_4(board):
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == '.':
for c in '1234':
if is_valid_4_by_4(board, i, j, c):
board[i][j] = c
if solve_4_by_4(board):
return True
else:
board[i][j] = '.'
return False # 1, 2, 3, 4 cannot be placed on board
return True # entire board is filled
def is_valid_4_by_4(board, row, col, c):
for i in range(4):
if board[i][col] == c:
return False
if board[row][i] == c:
return False
for i in range(2):
for j in range(2):
if board[2 * (row / 2) + i][2 * (col / 2 ) + j] == c:
return False
return True
if __name__ == '__main__':
board = [['.' for i in range(9)] for j in range(9)]
board[0] = list('53..7....')
board[1] = list('6..195...')
board[2] = list('.98....6.')
board[3] = list('8...6...3')
board[4] = list('4..8.3..1')
board[5] = list('7...2...6')
board[6] = list('.6....28.')
board[7] = list('...419..5')
board[8] = list('....8..79')
board_1 = [[None for i in range(4)] for j in range(4)]
board_1[0] = list('1.3.')
board_1[1] = list('..21')
board_1[2] = list('.1.2')
board_1[3] = list('24..')
solve_sudoku(board)
# print board
print '9 x 9 board solution'
print '\n'
for i in range(len(board)):
for j in range(len(board)):
print board[i][j],
print '\n'
print '\n'
solve_sudoku_4_by_4(board_1)
print '4 x 4 board solution'
print '\n'
for i in range(len(board_1)):
for j in range(len(board_1)):
print board_1[i][j],
print '\n'
| [
"albert.agram@gmail.com"
] | albert.agram@gmail.com |
d07657ffb4666e58c4579f7680be4286b481fc9c | a5ea878c1ab822ace8f8ba2b71c525b04dc97dad | /0x04-python-more_data_structures/4-only_diff_elements.py | a053c4f1ad16bff77eb9ad7ddff685c8f984b2bc | [] | no_license | gardenia-homsi/holbertonschool-python | 592c45e742f83695014abc318bf7269712b3a91c | fb7854835669aeffce71cf8fae7bca7d14d2e2f3 | refs/heads/master | 2023-01-22T05:50:00.447106 | 2020-12-03T21:32:55 | 2020-12-03T21:32:55 | 291,767,394 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | #!/usr/bin/python3
def only_diff_elements(set_1, set_2):
new_set = set_1.difference(set_2).union(set_2.difference(set_1))
return(new_set)
| [
"noreply@github.com"
] | gardenia-homsi.noreply@github.com |
82bfae90259287144f1f2c3cddb7ab93c5f23692 | c47b68a858e01d5fe51661a8ded5138652d3082e | /src/recommender.py | 970816d03d7c4d5ca6d3f39836420dcaf2de1fe7 | [] | no_license | RitGlv/Practice_Makes_perfect | d2d50efbf810b41d0648f27d02b5710c14c3fcae | 3dcb7ff876e58ade64faed0fa5523cba7461cf8d | refs/heads/master | 2021-03-13T03:51:43.142777 | 2017-06-05T07:32:47 | 2017-06-05T07:32:47 | 91,500,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,779 | py | import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances
import featurize
reload (featurize)
import decomposition
reload (decomposition)
from decomposition import decomposed
import plots
reload (plots)
from plots import plot_pca
import matplotlib.pyplot as plt
class SimilarityRecommender(object):
'''
Creates a matrix with recommendation scores based on content boosted collaborative filtering.
The final recommendation is based on user-user and item-item similarity
Currently works with static info, future: incorporate feature change over time
'''
def __init__(self,features_df,ratings_df):
#ratings_df = processed matrix of match rating per interview
self.ratings = ratings_df
self.sim_matrix = None
#features_df = processed matrix of features per user,assumes userId as index
self.features = features_df
self.baseline = None
self.recommended = []
self.false_positive_users = []
self.true_positive_users = []
self.all_recommendations = []
self.count=0
def fit(self):
self.get_ratings_matrix()
self.get_similarity_score()
def predict_one(self,user,n):
'''
Returns a list pf top N matched users
'''
self.recommended = []
n_most_similar = self.get_most_similar_users(user,n)
for similar_user in n_most_similar:
if np.asarray(self.match_matrix.iloc[similar_user]).max():
matched = np.asarray(self.match_matrix.iloc[similar_user]).argmax()
matched_id = self.match_matrix.index[matched]
most_similar = self.get_most_similar_users(matched_id,n)
for m in most_similar:
self.recommended.append(self.match_matrix.index[m])
self.recommended = set(self.recommended)
def get_most_similar_users(self,user,n):
'''
Ranked list of the most similar users to the requested user
User defined as a row in the sim_matrix
Treat users at different point of time as different users
'''
sorted_indices=np.argsort(self.sim_matrix[self.features.index==user])
n_most_similar= sorted_indices[0][1:(n+1)]
return n_most_similar
def get_ratings_matrix(self,index='userId1', columns='matched_user', values='good_match'):
'''
Get a matrix with all of the users matching scores
'''
self.match_matrix = self.ratings.pivot(index=index, columns=columns, values=values).fillna(-1)
def get_similarity_score(self,metric='euclidean'):
'''
Calculates similarity between every 2 users
'''
self.sim_matrix = pairwise_distances(self.features,metric=metric)
def model_eval(self,n):
'''
Asses model based on AUC for different n for recommendation
Predict all
n=2,3,5,10
'''
self.eval_mat = np.zeros(self.sim_matrix.shape)*-1.0
for user in self.match_matrix.index:
self.predict_one(user,n)
for predicted_match in self.recommended:
self.eval_mat[self.match_matrix.index==user][0][self.match_matrix.index==predicted_match]=1
if self.match_matrix[self.match_matrix.index==predicted_match][user][0] == 0:
self.false_positive_users.append((user,predicted_match))
elif self.match_matrix[self.match_matrix.index==predicted_match][user][0] == 1:
self.true_positive_users.append((user,predicted_match))
self.all_recommendations.append((user,self.recommended))
self.count+=1
if __name__=="__main__":
'''
Load data for all interview match rating
'''
path = 'data/full_data_one_row_swap_idsby_userwith_matched_user.csv'
df_for_rating = pd.read_csv(path)
#crate dataframe for match rating matrix
min_df = df_for_rating[['userId1','matched_user','totalMatch1','match1']]
with_match_type = featurize.good_match_bool(min_df)
interview_rating = featurize.dataframe_for_matrix(with_match_type)
train_path = 'data/full_data_one_row_swap_idsby_user.csv'
df = pd.read_csv(train_path).set_index('userId1')
df['experienceInYears1'] = np.sqrt(df['experienceInYears1'])
#columns to leave in the static inforamtion(pre_interview) grouped user dataframe
cols_to_leave = ['selfPrep1', 'experienceAreas1','experienceInYears1','degree1', 'status1','studyArea1']
categories = ['degree1','status1','studyArea1']
pca = decomposed(df)
pca.fit(cols_to_leave,categories,6)
df_pca = pd.DataFrame(pca.X_pca).set_index(pca.processed.index)
sim = SimilarityRecommender(df_pca,interview_rating)
sim.fit()
| [
"johndoe@example.com"
] | johndoe@example.com |
a209ed748eac1477a4eedfef2d1ff0311c02deee | 96ec8ea87fb2cfdd2d850a0471c9820f92152847 | /九章算法/基础班LintCode/Subarray Sum Closest.py | 1265ec062dd08f4ffd2b586b41598fd2433598bd | [] | no_license | bitterengsci/algorithm | ae0b9159fd21cc30c9865f981f9c18cf9c6898d7 | bf70d038b70c51edc6ddd6bfef1720fb5f9f2567 | refs/heads/master | 2023-08-10T10:22:18.774232 | 2023-07-31T21:04:11 | 2023-07-31T21:04:11 | 186,261,880 | 95 | 46 | null | 2023-07-31T21:04:12 | 2019-05-12T13:57:27 | Python | UTF-8 | Python | false | false | 861 | py | class Solution:
"""
@param: nums: A list of integers
@return: A list of integers includes the index of the first number and the index of the last number
"""
# 前缀和优化 + 排序贪心
def subarraySumClosest(self, nums):
prefix_sum = [(0, -1)] # sum, index
for i, num in enumerate(nums):
prefix_sum.append((prefix_sum[-1][0] + num, i))
prefix_sum.sort()
closest, answer = sys.maxsize, []
for i in range(1, len(prefix_sum)):
if closest > prefix_sum[i][0] - prefix_sum[i - 1][0]:
closest = prefix_sum[i][0] - prefix_sum[i - 1][0]
left = min(prefix_sum[i - 1][1], prefix_sum[i][1]) + 1
right = max(prefix_sum[i - 1][1], prefix_sum[i][1])
answer = [left, right]
return answer | [
"yanran2012@gmail.com"
] | yanran2012@gmail.com |
d259fbb9ed2f76823094bdb3eca5bd6775fc1343 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/AKSHAYUBHAT_DeepVideoAnalytics/DeepVideoAnalytics-master/dvaapp/migrations/0005_auto_20170125_1807.py | 15fbaee9b504fad725f3b818bbbeb8551faa310a | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 658 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-25 18:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dvaapp', '0004_detection_metadata'),
]
operations = [
migrations.RemoveField(
model_name='frame',
name='bucket',
),
migrations.RemoveField(
model_name='frame',
name='key',
),
migrations.AddField(
model_name='frame',
name='name',
field=models.CharField(max_length=200, null=True),
),
]
| [
"659338505@qq.com"
] | 659338505@qq.com |
164f6b75d371e03cbe09103f8ec9eb0d85a4c5a1 | ac94164dd36b9d7fee5e460a5e115356059bf280 | /src/networks/classification/bert_adapter_owm.py | 8ffc8cb47e3aaa8702657cc57290c013802edf53 | [] | no_license | leducthanguet/PyContinual | 5c6014f64ccd29dc52b05ecc858b282846aa487b | 3325a1c33bfd2eab280f96f423cce59babcfcfc6 | refs/heads/main | 2023-08-30T01:23:04.765429 | 2021-10-28T03:50:24 | 2021-10-28T03:50:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,388 | py | #coding: utf-8
import sys
import torch
from transformers import BertModel, BertConfig
import utils
from torch import nn
sys.path.append("./networks/base/")
from my_transformers import MyBertModel
class Net(torch.nn.Module):
def __init__(self,taskcla,args):
super(Net,self).__init__()
config = BertConfig.from_pretrained(args.bert_model)
config.return_dict=False
self.bert = MyBertModel.from_pretrained(args.bert_model,config=config,args=args)
#BERT fixed all ===========
for param in self.bert.parameters():
# param.requires_grad = True
param.requires_grad = False
#But adapter is open
#Only adapters are trainable
if args.apply_bert_output and args.apply_bert_attention_output:
adaters = \
[self.bert.encoder.layer[layer_id].attention.output.adapter_owm for layer_id in range(config.num_hidden_layers)] + \
[self.bert.encoder.layer[layer_id].attention.output.LayerNorm for layer_id in range(config.num_hidden_layers)] + \
[self.bert.encoder.layer[layer_id].output.adapter_owm for layer_id in range(config.num_hidden_layers)] + \
[self.bert.encoder.layer[layer_id].output.LayerNorm for layer_id in range(config.num_hidden_layers)]
elif args.apply_bert_output:
adaters = \
[self.bert.encoder.layer[layer_id].output.adapter_owm for layer_id in range(config.num_hidden_layers)] + \
[self.bert.encoder.layer[layer_id].output.LayerNorm for layer_id in range(config.num_hidden_layers)]
elif args.apply_bert_attention_output:
adaters = \
[self.bert.encoder.layer[layer_id].attention.output.adapter_owm for layer_id in range(config.num_hidden_layers)] + \
[self.bert.encoder.layer[layer_id].attention.output.LayerNorm for layer_id in range(config.num_hidden_layers)]
for adapter in adaters:
for param in adapter.parameters():
param.requires_grad = True
# param.requires_grad = False
self.taskcla=taskcla
self.dropout = nn.Dropout(args.hidden_dropout_prob)
self.args = args
if 'dil' in args.scenario:
self.last=torch.nn.Linear(args.bert_hidden_size,args.nclasses)
elif 'til' in args.scenario:
self.last=torch.nn.ModuleList()
for t,n in self.taskcla:
self.last.append(torch.nn.Linear(args.bert_hidden_size,n))
print('BERT ADAPTER OWM')
return
def forward(self,input_ids, segment_ids, input_mask):
output_dict_ = {} # more flexible
output_dict = \
self.bert(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
sequence_output, pooled_output = output_dict['outputs']
x_list = output_dict['x_list']
h_list = output_dict['h_list']
pooled_output = self.dropout(pooled_output)
if 'dil' in self.args.scenario:
y=self.last(pooled_output)
elif 'til' in self.args.scenario:
y=[]
for t,i in self.taskcla:
y.append(self.last[t](pooled_output))
output_dict_['y'] = y
output_dict_['x_list'] = x_list
output_dict_['h_list'] = h_list
return output_dict_
| [
"iscauzixuanke@gmail.com"
] | iscauzixuanke@gmail.com |
7682acc242e042c01dd0c2734bf532189b2ccb0d | 96dcea595e7c16cec07b3f649afd65f3660a0bad | /tests/components/tractive/__init__.py | dcde4b87436408e1c7bd4082fd1b7966087f1b86 | [
"Apache-2.0"
] | permissive | home-assistant/core | 3455eac2e9d925c92d30178643b1aaccf3a6484f | 80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743 | refs/heads/dev | 2023-08-31T15:41:06.299469 | 2023-08-31T14:50:53 | 2023-08-31T14:50:53 | 12,888,993 | 35,501 | 20,617 | Apache-2.0 | 2023-09-14T21:50:15 | 2013-09-17T07:29:48 | Python | UTF-8 | Python | false | false | 42 | py | """Tests for the tractive integration."""
| [
"noreply@github.com"
] | home-assistant.noreply@github.com |
c9cd595e6b21f955b807c5cb083e1cfd285d16a7 | 3d729e2e5b5d486095159c6636fa832fed48bcac | /server/advert/models.py | 27a2710cad10fba08c5e94caa1a30284a002bdac | [] | no_license | UuljanAitnazarova/advert_project | 8e7a590244c930a725916f3d3d89c74549c9ab71 | fa5e054ac2c0990c3b0feadc8e81e23e7d672a0b | refs/heads/master | 2023-07-07T20:51:10.027739 | 2021-08-21T13:35:03 | 2021-08-21T13:35:03 | 398,460,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,257 | py | from django.db import models
from django.contrib.auth import get_user_model
class Advert(models.Model):
CATEGORY_CHOICE = [
('ad', 'ad'),
('announcement', 'announcement'),
]
title = models.CharField(max_length=250, blank=False, null=False)
category = models.CharField(max_length=13, choices=CATEGORY_CHOICE, blank=False, null=False)
description = models.TextField(max_length=400, blank=False, null=False)
image = models.ImageField(upload_to='images', blank=True, null=True)
price = models.PositiveIntegerField(blank=True, null=True)
author = models.ForeignKey(get_user_model(),
blank=False,
null=False,
related_name='advert',
on_delete=models.CASCADE)
created_date = models.DateField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
post_date = models.DateTimeField(auto_now=True)
moderated = models.BooleanField(default=False)
rejected = models.BooleanField(default=False)
def __str__(self):
return f'{self.title}: {self.author}'
class Meta:
permissions = [
('сan_approve', 'Can approve')
] | [
"u.aitnazarova@gmail.com"
] | u.aitnazarova@gmail.com |
4b629d5f872139c59efe6c33f432c436361d632d | 58f7c634dc666f703f827c6e8fd49f414547501b | /models/InceptionResNetV2.py | f84e6042c89c11e535c0a6f8b693223683a664f6 | [
"MIT"
] | permissive | AsuradaYuci/deep-person-reid | fa3322c21e755e35eae41c8f62feeaad77140a3a | 3001c392776adb6937c2449c87272d4277476d3c | refs/heads/master | 2020-03-21T11:04:12.645987 | 2018-06-07T16:07:45 | 2018-06-07T16:07:45 | 138,487,653 | 1 | 1 | MIT | 2018-06-24T14:15:27 | 2018-06-24T14:15:26 | null | UTF-8 | Python | false | false | 12,336 | py | from __future__ import absolute_import
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.utils.model_zoo as model_zoo
import os
import sys
"""
Code imported from https://github.com/Cadene/pretrained-models.pytorch
"""
__all__ = ['InceptionResNetV2']
pretrained_settings = {
'inceptionresnetv2': {
'imagenet': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionresnetv2-520b38e4.pth',
'input_space': 'RGB',
'input_size': [3, 299, 299],
'input_range': [0, 1],
'mean': [0.5, 0.5, 0.5],
'std': [0.5, 0.5, 0.5],
'num_classes': 1000
},
'imagenet+background': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionresnetv2-520b38e4.pth',
'input_space': 'RGB',
'input_size': [3, 299, 299],
'input_range': [0, 1],
'mean': [0.5, 0.5, 0.5],
'std': [0.5, 0.5, 0.5],
'num_classes': 1001
}
}
}
class BasicConv2d(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes,
kernel_size=kernel_size, stride=stride,
padding=padding, bias=False) # verify bias false
self.bn = nn.BatchNorm2d(out_planes,
eps=0.001, # value found in tensorflow
momentum=0.1, # default pytorch value
affine=True)
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Mixed_5b(nn.Module):
def __init__(self):
super(Mixed_5b, self).__init__()
self.branch0 = BasicConv2d(192, 96, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(192, 48, kernel_size=1, stride=1),
BasicConv2d(48, 64, kernel_size=5, stride=1, padding=2)
)
self.branch2 = nn.Sequential(
BasicConv2d(192, 64, kernel_size=1, stride=1),
BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1),
BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1)
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
BasicConv2d(192, 64, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class Block35(nn.Module):
def __init__(self, scale=1.0):
super(Block35, self).__init__()
self.scale = scale
self.branch0 = BasicConv2d(320, 32, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(320, 32, kernel_size=1, stride=1),
BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1)
)
self.branch2 = nn.Sequential(
BasicConv2d(320, 32, kernel_size=1, stride=1),
BasicConv2d(32, 48, kernel_size=3, stride=1, padding=1),
BasicConv2d(48, 64, kernel_size=3, stride=1, padding=1)
)
self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1)
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
out = self.conv2d(out)
out = out * self.scale + x
out = self.relu(out)
return out
class Mixed_6a(nn.Module):
def __init__(self):
super(Mixed_6a, self).__init__()
self.branch0 = BasicConv2d(320, 384, kernel_size=3, stride=2)
self.branch1 = nn.Sequential(
BasicConv2d(320, 256, kernel_size=1, stride=1),
BasicConv2d(256, 256, kernel_size=3, stride=1, padding=1),
BasicConv2d(256, 384, kernel_size=3, stride=2)
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class Block17(nn.Module):
def __init__(self, scale=1.0):
super(Block17, self).__init__()
self.scale = scale
self.branch0 = BasicConv2d(1088, 192, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(1088, 128, kernel_size=1, stride=1),
BasicConv2d(128, 160, kernel_size=(1,7), stride=1, padding=(0,3)),
BasicConv2d(160, 192, kernel_size=(7,1), stride=1, padding=(3,0))
)
self.conv2d = nn.Conv2d(384, 1088, kernel_size=1, stride=1)
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
out = torch.cat((x0, x1), 1)
out = self.conv2d(out)
out = out * self.scale + x
out = self.relu(out)
return out
class Mixed_7a(nn.Module):
def __init__(self):
super(Mixed_7a, self).__init__()
self.branch0 = nn.Sequential(
BasicConv2d(1088, 256, kernel_size=1, stride=1),
BasicConv2d(256, 384, kernel_size=3, stride=2)
)
self.branch1 = nn.Sequential(
BasicConv2d(1088, 256, kernel_size=1, stride=1),
BasicConv2d(256, 288, kernel_size=3, stride=2)
)
self.branch2 = nn.Sequential(
BasicConv2d(1088, 256, kernel_size=1, stride=1),
BasicConv2d(256, 288, kernel_size=3, stride=1, padding=1),
BasicConv2d(288, 320, kernel_size=3, stride=2)
)
self.branch3 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class Block8(nn.Module):
def __init__(self, scale=1.0, noReLU=False):
super(Block8, self).__init__()
self.scale = scale
self.noReLU = noReLU
self.branch0 = BasicConv2d(2080, 192, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(2080, 192, kernel_size=1, stride=1),
BasicConv2d(192, 224, kernel_size=(1,3), stride=1, padding=(0,1)),
BasicConv2d(224, 256, kernel_size=(3,1), stride=1, padding=(1,0))
)
self.conv2d = nn.Conv2d(448, 2080, kernel_size=1, stride=1)
if not self.noReLU:
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
out = torch.cat((x0, x1), 1)
out = self.conv2d(out)
out = out * self.scale + x
if not self.noReLU:
out = self.relu(out)
return out
def inceptionresnetv2(num_classes=1000, pretrained='imagenet'):
r"""InceptionResNetV2 model architecture from the
`"InceptionV4, Inception-ResNet..." <https://arxiv.org/abs/1602.07261>`_ paper.
"""
if pretrained:
settings = pretrained_settings['inceptionresnetv2'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
# both 'imagenet'&'imagenet+background' are loaded from same parameters
model = InceptionResNetV2(num_classes=1001)
model.load_state_dict(model_zoo.load_url(settings['url']))
if pretrained == 'imagenet':
new_last_linear = nn.Linear(1536, 1000)
new_last_linear.weight.data = model.last_linear.weight.data[1:]
new_last_linear.bias.data = model.last_linear.bias.data[1:]
model.last_linear = new_last_linear
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
else:
model = InceptionResNetV2(num_classes=num_classes)
return model
##################### Model Definition #########################
class InceptionResNetV2(nn.Module):
def __init__(self, num_classes, loss={'xent'}, **kwargs):
super(InceptionResNetV2, self).__init__()
self.loss = loss
# Modules
self.conv2d_1a = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.conv2d_2a = BasicConv2d(32, 32, kernel_size=3, stride=1)
self.conv2d_2b = BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.maxpool_3a = nn.MaxPool2d(3, stride=2)
self.conv2d_3b = BasicConv2d(64, 80, kernel_size=1, stride=1)
self.conv2d_4a = BasicConv2d(80, 192, kernel_size=3, stride=1)
self.maxpool_5a = nn.MaxPool2d(3, stride=2)
self.mixed_5b = Mixed_5b()
self.repeat = nn.Sequential(
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17)
)
self.mixed_6a = Mixed_6a()
self.repeat_1 = nn.Sequential(
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10)
)
self.mixed_7a = Mixed_7a()
self.repeat_2 = nn.Sequential(
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20)
)
self.block8 = Block8(noReLU=True)
self.conv2d_7b = BasicConv2d(2080, 1536, kernel_size=1, stride=1)
self.classifier = nn.Linear(1536, num_classes)
self.feat_dim = 1536
self.init_params()
def init_params(self):
"""Load ImageNet pretrained weights"""
settings = pretrained_settings['inceptionresnetv2']['imagenet']
pretrained_dict = model_zoo.load_url(settings['url'], map_location=None)
model_dict = self.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict)
def features(self, input):
x = self.conv2d_1a(input)
x = self.conv2d_2a(x)
x = self.conv2d_2b(x)
x = self.maxpool_3a(x)
x = self.conv2d_3b(x)
x = self.conv2d_4a(x)
x = self.maxpool_5a(x)
x = self.mixed_5b(x)
x = self.repeat(x)
x = self.mixed_6a(x)
x = self.repeat_1(x)
x = self.mixed_7a(x)
x = self.repeat_2(x)
x = self.block8(x)
x = self.conv2d_7b(x)
x = F.avg_pool2d(x, x.size()[2:])
x = x.view(x.size(0), -1)
return x
def forward(self, input):
x = self.features(input)
if not self.training:
return x
y = self.classifier(x)
if self.loss == {'xent'}:
return y
elif self.loss == {'xent', 'htri'}:
return y, x
elif self.loss == {'cent'}:
return y, x
elif self.loss == {'ring'}:
return y, x
else:
raise KeyError("Unsupported loss: {}".format(self.loss)) | [
"k.zhou@qmul.ac.uk"
] | k.zhou@qmul.ac.uk |
451585c409af738c6b0ceaaf948a14aeda57bae1 | cc738a180b98d3a48b740a53ed7a1f30604be292 | /src/zeep/wsdl/utils.py | 1951deacf7bc472c557e1cd7d5f93f8a2ddd3221 | [
"MIT",
"BSD-3-Clause"
] | permissive | Easter-eggs/python-zeep | c0cb3e9b71d5a5dabfbeea81eef528e195e92918 | ad6e7ea22bff989b78d3a5b30ab04869626b2565 | refs/heads/master | 2021-01-16T22:32:56.513623 | 2016-05-31T17:52:18 | 2016-05-31T17:52:18 | 60,026,731 | 0 | 0 | null | 2016-05-30T17:13:10 | 2016-05-30T17:13:10 | null | UTF-8 | Python | false | false | 380 | py |
def _soap_element(xmlelement, key):
"""So soap1.1 and 1.2 namespaces can be mixed HAH!"""
namespaces = [
'http://schemas.xmlsoap.org/wsdl/soap/',
'http://schemas.xmlsoap.org/wsdl/soap12/',
]
for ns in namespaces:
retval = xmlelement.find('soap:%s' % key, namespaces={'soap': ns})
if retval is not None:
return retval
| [
"michaelvantellingen@gmail.com"
] | michaelvantellingen@gmail.com |
c1d04137db0e2cecbfa27de4ab64fe211339820d | 05857cd30669a914d69ce872141964a4e6b31edd | /__init__.py | 13195f8831129b1aff66bfb2dfbe0656e6b2943e | [] | no_license | EricSchles/test_naming | f61e0900835edbbd7f5054e1916e38647f460e9e | a9be0cc48c40b704c7970968458db3631c8116e2 | refs/heads/master | 2021-01-17T12:21:09.588993 | 2014-11-05T00:11:34 | 2014-11-05T00:11:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23 | py | from sample import Foo
| [
"ericschles@gmail.com"
] | ericschles@gmail.com |
7feed8b3a53ec0c6af7957788eabe1b4e7195e56 | 134c429df7d5c4d067d9761cb1435992b048adaf | /notes/0922/0922.py | 51d8a7cdbfebf9eb26dda9c7f5c67ef9348a1ca8 | [] | no_license | PaulGuo5/Leetcode-notes | 65c6ebb61201d6f16386062e4627291afdf2342d | 431b763bf3019bac7c08619d7ffef37e638940e8 | refs/heads/master | 2021-06-23T09:02:58.143862 | 2021-02-26T01:35:15 | 2021-02-26T01:35:15 | 177,007,645 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | class Solution:
def sortArrayByParityII2(self, A: List[int]) -> List[int]:
odd = []
even = []
for i in range(len(A)):
if A[i] % 2 == 0:
even.append(A[i])
else:
odd.append(A[i])
j = 0
for i in range(0, len(A), 2):
A[i] = even[j]
j += 1
j = 0
for i in range(1, len(A), 2):
A[i] = odd[j]
j += 1
return A
def sortArrayByParityII(self, A: List[int]) -> List[int]:
even = 0
odd = 1
while even < len(A) and odd <len(A):
if A[even] % 2 != 0 and A[odd] % 2 == 0:
temp = A[even]
A[even] = A[odd]
A[odd] = temp
even += 2
odd += 2
elif A[even] % 2 != 0 and A[odd] % 2 != 0:
odd += 2
elif A[even] % 2 == 0 and A[odd] % 2 != 0:
even += 2
odd += 2
elif A[even] % 2 == 0 and A[odd] % 2 == 0:
even += 2
return A
| [
"zhg26@pitt.edu"
] | zhg26@pitt.edu |
24e82d6941781e4a93889c8938e5681d3f6fbc6f | 157d2a2f4031c58e5504bcbac5348ff53883facc | /rDj27/rDj27/wsgi.py | fef165e765fca4a3dc487b940bb1a3b33aad776e | [] | no_license | optirg-39/Django_gekSh | d78b635fd3ee88addd084b68ec35c6284adfb55c | 1129a6df35c110dfeeeaaf1a76b2ebc192a5f1ce | refs/heads/master | 2023-04-15T13:09:03.067099 | 2021-04-26T12:15:35 | 2021-04-26T12:15:35 | 352,018,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | """
WSGI config for rDj27 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rDj27.settings')
application = get_wsgi_application()
| [
"opti39rg@gmail.com"
] | opti39rg@gmail.com |
8e1737c26a20b1eeebbf9f29a33a37e8ae65e723 | e7917cf00e06331c59799a27ddb57256268941f1 | /ptm/rate_estimator/debug_stuck_particles.py | ecff4a726313711b03e4ae35e6fb20c7875d09db | [] | no_license | rustychris/csc | 75d9f36b9c5ccb614ebab17ff110a7e1c0ad4764 | b29f94be3f2c44c222f3113d5fcadf6cfbbf0df1 | refs/heads/master | 2022-05-01T11:53:50.322733 | 2022-04-13T16:25:10 | 2022-04-13T16:25:10 | 133,419,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,646 | py | from stompy.grid import unstructured_grid
from stompy.model.fish_ptm import ptm_tools
##
hyd=xr.open_dataset('../../dflowfm/runs/20180807_grid98_17/ptm_hydro.nc')
g=unstructured_grid.UnstructuredGrid.from_ugrid(hyd)
##
init=ptm_tools.PtmBin('run_10days/INIT_bin.out')
sac=ptm_tools.PtmBin('run_10days/SAC_bin.out')
srv=ptm_tools.PtmBin('run_10days/SRV_bin.out')
ntimes=init.count_timesteps()
##
# zoom=(605889.6569457075, 638002.2586920519, 4217801.158715993, 4241730.226468915)
# zoom=(597913.7274775933, 648118.8262812896, 4217179.54644355, 4301202.344200377)
# zoom=(611280.377359663, 632614.9072567355, 4222938.787804629, 4248182.140275016)
zoom=(626037.7515578158, 626228.6109768279, 4232804.050163795, 4233029.878040465)
plt.figure(1).clf()
fig,ax=plt.subplots(num=1)
ti=500
init.plot(ti,ax=ax,zoom=zoom,update=False,ms=4)
sac.plot(ti,ax=ax,zoom=zoom,update=False,color='cyan',ms=4)
srv.plot(ti,ax=ax,zoom=zoom,update=False,color='g',ms=4)
g.plot_edges(color='k',lw=0.4,ax=ax,clip=zoom) # ,labeler='id')
# g.plot_cells(centers=True,labeler=lambda i,r:str(i),clip=zoom,ax=ax)
ax.axis(zoom)
##
# For example,
j=25170
c_deep=21111
c_shallow=51090
##
t=hyd.nMesh2_data_time
# Flow on this edge is 0 for all time.
Qj=hyd.h_flow_avg.isel(nMesh2_edge=j,nMesh2_layer_3d=0)
# 1 for all time
j_bot=hyd.Mesh2_edge_bottom_layer.isel(nMesh2_edge=j)
# 0 for all time.
j_top=hyd.Mesh2_edge_top_layer.isel(nMesh2_edge=j)
# 0 for all time
Aj=hyd.Mesh2_edge_wet_area.isel(nMesh2_edge=j,nMesh2_layer_3d=0)
# shallow cell 51090 is always bottom layer=1, top=0
# deep cell 21111 is always bottom=top=1
##
plt.figure(2).clf()
plt.plot(t,Qj)
| [
"rustychris@gmail.com"
] | rustychris@gmail.com |
6c5cc7b15fbdc3db75af1c48e02db2934e988e96 | 54bb9ba6d507cd25b2c2ac553665bc5fc95280d1 | /src/onegov/wtfs/layouts/invoice.py | dd78e0508c54baa33f82ff232d7890fd5e0dc050 | [
"MIT"
] | permissive | href/onegov-cloud | 9ff736d968979380edba266b6eba0e9096438397 | bb292e8e0fb60fd1cd4e11b0196fbeff1a66e079 | refs/heads/master | 2020-12-22T07:59:13.691431 | 2020-01-28T08:51:54 | 2020-01-28T08:51:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,176 | py | from cached_property import cached_property
from onegov.core.elements import Link
from onegov.wtfs import _
from onegov.wtfs.collections import PaymentTypeCollection
from onegov.wtfs.layouts.default import DefaultLayout
from onegov.wtfs.security import EditModel
class InvoiceLayout(DefaultLayout):
@cached_property
def title(self):
return _("Create invoice")
@cached_property
def editbar_links(self):
result = []
model = PaymentTypeCollection(self.request.session)
if self.request.has_permission(model, EditModel):
result.append(
Link(
text=_("Manage payment types"),
url=self.request.link(model),
attrs={'class': 'payment-icon'}
)
)
return result
@cached_property
def breadcrumbs(self):
return [
Link(_("Homepage"), self.homepage_url),
Link(self.title, self.request.link(self.model))
]
@cached_property
def cancel_url(self):
return self.invoices_url
@cached_property
def success_url(self):
return self.invoices_url
| [
"denis.krienbuehl@seantis.ch"
] | denis.krienbuehl@seantis.ch |
f9a81e96bf7412c530f031c1ba97734e5ad6a5ce | 1543840cd62b4a3301ce4626e3f3dafa1fbe3715 | /parallel_wavegan/layers/pqmf.py | bb31c430d2abe0219f58f153f69d836383e095ef | [
"MIT"
] | permissive | arita37/ParallelWaveGAN | cc4dc10560595bf17e941a4c4576731169bd64ae | bb32b19f9ccb638de670f8b8d3a1dfed13ecf1c3 | refs/heads/master | 2022-11-21T16:45:00.289300 | 2020-07-20T14:09:23 | 2020-07-20T14:09:23 | 283,928,615 | 1 | 0 | MIT | 2020-07-31T03:02:01 | 2020-07-31T03:02:00 | null | UTF-8 | Python | false | false | 4,478 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Pseudo QMF modules."""
import numpy as np
import torch
import torch.nn.functional as F
from scipy.signal import kaiser
def design_prototype_filter(taps=62, cutoff_ratio=0.15, beta=9.0):
"""Design prototype filter for PQMF.
This method is based on `A Kaiser window approach for the design of prototype
filters of cosine modulated filterbanks`_.
Args:
taps (int): The number of filter taps.
cutoff_ratio (float): Cut-off frequency ratio.
beta (float): Beta coefficient for kaiser window.
Returns:
ndarray: Impluse response of prototype filter (taps + 1,).
.. _`A Kaiser window approach for the design of prototype filters of cosine modulated filterbanks`:
https://ieeexplore.ieee.org/abstract/document/681427
"""
# check the arguments are valid
assert taps % 2 == 0, "The number of taps mush be even number."
assert 0.0 < cutoff_ratio < 1.0, "Cutoff ratio must be > 0.0 and < 1.0."
# make initial filter
omega_c = np.pi * cutoff_ratio
with np.errstate(invalid='ignore'):
h_i = np.sin(omega_c * (np.arange(taps + 1) - 0.5 * taps)) \
/ (np.pi * (np.arange(taps + 1) - 0.5 * taps))
h_i[taps // 2] = np.cos(0) * cutoff_ratio # fix nan due to indeterminate form
# apply kaiser window
w = kaiser(taps + 1, beta)
h = h_i * w
return h
class PQMF(torch.nn.Module):
"""PQMF module.
This module is based on `Near-perfect-reconstruction pseudo-QMF banks`_.
.. _`Near-perfect-reconstruction pseudo-QMF banks`:
https://ieeexplore.ieee.org/document/258122
"""
def __init__(self, subbands=4, taps=62, cutoff_ratio=0.15, beta=9.0):
"""Initilize PQMF module.
Args:
subbands (int): The number of subbands.
taps (int): The number of filter taps.
cutoff_ratio (float): Cut-off frequency ratio.
beta (float): Beta coefficient for kaiser window.
"""
super(PQMF, self).__init__()
# define filter coefficient
h_proto = design_prototype_filter(taps, cutoff_ratio, beta)
h_analysis = np.zeros((subbands, len(h_proto)))
h_synthesis = np.zeros((subbands, len(h_proto)))
for k in range(subbands):
h_analysis[k] = 2 * h_proto * np.cos(
(2 * k + 1) * (np.pi / (2 * subbands)) *
(np.arange(taps + 1) - ((taps - 1) / 2)) +
(-1) ** k * np.pi / 4)
h_synthesis[k] = 2 * h_proto * np.cos(
(2 * k + 1) * (np.pi / (2 * subbands)) *
(np.arange(taps + 1) - ((taps - 1) / 2)) -
(-1) ** k * np.pi / 4)
# convert to tensor
analysis_filter = torch.from_numpy(h_analysis).float().unsqueeze(1)
synthesis_filter = torch.from_numpy(h_synthesis).float().unsqueeze(0)
# register coefficients as beffer
self.register_buffer("analysis_filter", analysis_filter)
self.register_buffer("synthesis_filter", synthesis_filter)
# filter for downsampling & upsampling
updown_filter = torch.zeros((subbands, subbands, subbands)).float()
for k in range(subbands):
updown_filter[k, k, 0] = 1.0
self.register_buffer("updown_filter", updown_filter)
self.subbands = subbands
# keep padding info
self.pad_fn = torch.nn.ConstantPad1d(taps // 2, 0.0)
def analysis(self, x):
"""Analysis with PQMF.
Args:
x (Tensor): Input tensor (B, 1, T).
Returns:
Tensor: Output tensor (B, subbands, T // subbands).
"""
x = F.conv1d(self.pad_fn(x), self.analysis_filter)
return F.conv1d(x, self.updown_filter, stride=self.subbands)
def synthesis(self, x):
"""Synthesis with PQMF.
Args:
x (Tensor): Input tensor (B, subbands, T // subbands).
Returns:
Tensor: Output tensor (B, 1, T).
"""
# NOTE(kan-bayashi): Power will be dreased so here multipy by # subbands.
# Not sure this is the correct way, it is better to check again.
# TODO(kan-bayashi): Understand the reconstruction procedure
x = F.conv_transpose1d(x, self.updown_filter * self.subbands, stride=self.subbands)
return F.conv1d(self.pad_fn(x), self.synthesis_filter)
| [
"hayashi.tomoki@g.sp.m.is.nagoya-u.ac.jp"
] | hayashi.tomoki@g.sp.m.is.nagoya-u.ac.jp |
9f437582fa091c0826b81b2906a1727f5729c925 | ae7ba9c83692cfcb39e95483d84610715930fe9e | /xcv58/LeetCode/Maximum-Depth-of-Binary-Tree/Solution.py | f1999894091385ef5f7a20ac09a37d2cdd716a2f | [] | no_license | xenron/sandbox-github-clone | 364721769ea0784fb82827b07196eaa32190126b | 5eccdd8631f8bad78eb88bb89144972dbabc109c | refs/heads/master | 2022-05-01T21:18:43.101664 | 2016-09-12T12:38:32 | 2016-09-12T12:38:32 | 65,951,766 | 5 | 7 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | # Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @return an integer
def maxDepth(self, root):
return 0 if root is None else max(self.maxDepth(root.left), self.maxDepth(root.right)) + 1
| [
"xenron@outlook.com"
] | xenron@outlook.com |
8450e35c6df3376e4ad940bdd8d807a8d6f6efa8 | cdd51efdcf88e46c948be8a5a0cec61423d47898 | /lenstools/contours.py | f6c361c74705c8f7279871ebdf1a5cd7b4ebefbd | [
"MIT"
] | permissive | kjemmett/LensTools | 842ced8936cc8a4b52286db875a875cdf3198bf5 | 53708032aa16a3c13887f3002f4836e55523229c | refs/heads/master | 2021-01-18T13:29:41.871230 | 2015-10-13T21:08:51 | 2015-10-13T21:08:51 | 44,204,717 | 0 | 0 | null | 2015-10-13T21:03:16 | 2015-10-13T21:03:15 | null | UTF-8 | Python | false | false | 18,715 | py | """
.. module:: contours
:platform: Unix
:synopsis: This module implements a confidence contour plotting engine
.. moduleauthor:: Andrea Petri <apetri@phys.columbia.edu>
"""
from __future__ import print_function,division,with_statement
import os
import logging
import numpy as np
from scipy import stats
from scipy import integrate
import matplotlib.pyplot as plt
from matplotlib import rc
#############################################################
############Find confidence levels in 1D likelihood##########
#############################################################
def _1d_level_values(p,l,level=0.684,quantity=2):
"""
Find the parameter extremes that correspons to the likelihood N--sigma level
"""
#Find the maximum of the likelihood
maximum = np.where(l==l.max())[0][0]
parmax = p[maximum]
all_levels = np.zeros_like(l)
for n in range(l.shape[0]):
all_levels[n] = l[l>=l[n]].sum() / l.sum()
#Find the closest level
closest = np.argmin(np.abs(all_levels - level))
#Find the n corresponding parameter values
ranks = stats.rankdata(np.abs(l-l[closest])).astype(np.int) - 1
par = list()
for n in range(quantity):
par.append(p[np.where(ranks==n)[0][0]])
#Sort from left to right
par.sort()
return par
#############################################################
###########Find confidence levels in N-dim likelihood########
#############################################################
def _nd_level_value(likelihood,level,low,high,precision=0.01):
middle = (low+high)/2
current_integral = likelihood[likelihood>middle].sum()
if np.abs((current_integral-level)/level)<precision:
return middle
#Proceed with bisection method
if current_integral>level:
return _nd_level_value(likelihood,level,middle,high,precision=precision)
else:
return _nd_level_value(likelihood,level,low,middle,precision=precision)
#############################################################
##################ContourPlot class##########################
#############################################################
class ContourPlot(object):
"""
A class handler for contour plots
"""
def __init__(self,fig=None,ax=None):
try:
if (fig is None) or (ax is None):
self.fig,self.ax = plt.subplots()
self.ax.proxy = list()
else:
self.fig = fig
self.ax = ax
if not hasattr(self.ax,"proxy"):
self.ax.proxy = list()
except:
print("Warning, no matplotlib functionalities!")
pass
self.min = dict()
self.max = dict()
self.npoints = dict()
self.unit = dict()
def savefig(self,figname):
"""
Save the plot to file
"""
self.fig.savefig(figname)
def close(self):
"""
Closes the figure
"""
plt.close(self.fig)
def window(self):
plt.ion()
plt.show()
def getUnitsFromOptions(self,options):
"""
Parse options file to get physical units of axes
"""
assert hasattr(self,"parameter_axes"),"You have to load in the likelihood first!"
parameters = self.parameter_axes.keys()
for parameter in parameters:
self.min[parameter],self.max[parameter],self.npoints[parameter] = options.getfloat(parameter,"min"),options.getfloat(parameter,"max"),options.getint(parameter,"num_points")
assert self.npoints[parameter] == self.likelihood.shape[self.parameter_axes[parameter]]
self.unit[parameter] = (self.max[parameter] - self.min[parameter]) / (self.npoints[parameter] - 1)
def setUnits(self,parameter,parameter_min,parameter_max,parameter_unit):
"""
Set manually the physical units for each of the likelihood axes
"""
assert hasattr(self,"parameter_axes"),"You have to load in the likelihood first!"
assert parameter in self.parameter_axes.keys(),"You are trying to set units for a parameter that doesn't exist!"
self.min[parameter] = parameter_min
self.max[parameter] = parameter_max
self.unit[parameter] = parameter_unit
print("Units set for {0}; min={1:.3f} max={2:.3f} unit={3:.3f}".format(parameter,parameter_min,parameter_max,parameter_unit))
def value(self,*coordinates):
"""
Compute the (un-normalized) likelihood value at the specified point in parameter space
"""
assert len(coordinates) == self.likelihood.ndim,"You must specify a coordinate (and only one) for each axis"
#Compute the physical values of the pixels
pix = np.zeros(len(coordinates))
for parameter in self.parameter_axes.keys():
assert parameter in self.unit.keys() and parameter in self.min.keys()
axis = self.parameter_axes[parameter]
pix[axis] = int((coordinates[axis] - self.min[parameter])/(self.unit[parameter]))
#Return the found likelihood value
try:
return self.likelihood[tuple(pix)]
except IndexError:
print("Out of bounds!")
return None
def getLikelihood(self,likelihood_filename,parameter_axes={"Omega_m":0,"w":1,"sigma8":2},parameter_labels={"Omega_m":r"$\Omega_m$","w":r"$w$","sigma8":r"$\sigma_8$"}):
"""
Load the likelihood function from a numpy file
"""
self.parameter_axes = parameter_axes
self.parameter_labels = parameter_labels
if type(likelihood_filename)==str:
self.likelihood = np.load(likelihood_filename)
#Construct title label
self.title_label = os.path.split(likelihood_filename)[1].lstrip("likelihood_").rstrip(".npy")
elif type(likelihood_filename)==np.ndarray:
self.likelihood = likelihood_filename
#Construct title label
self.title_label = "Default"
assert len(self.parameter_axes.keys()) == self.likelihood.ndim,"The number of parameters should be the same as the number of dimensions of the likelihood!"
#Normalize
self.likelihood /= self.likelihood.sum()
def getMaximum(self,which="full"):
"""
Find the point in parameter space on which the likelihood is maximum
"""
max_parameters = dict()
if which=="full":
max_loc = np.where(self.likelihood==self.likelihood.max())
for parameter in self.parameter_axes.keys():
max_parameters[parameter] = max_loc[self.parameter_axes[parameter]][0] * self.unit[parameter] + self.min[parameter]
elif which=="reduced":
max_loc = np.where(self.reduced_likelihood==self.reduced_likelihood.max())
for n,parameter in enumerate(self.remaining_parameters):
max_parameters[parameter] = max_loc[n][0] * self.unit[parameter] + self.min[parameter]
else:
raise ValueError("which must be either 'full' or 'reduced'")
return max_parameters
def expectationValue(self,function,**kwargs):
"""
Computes the expectation value of a function of the parameters over the current parameter likelihood
"""
assert hasattr(self,"likelihood"),"You have to load in the likelihood first!"
#Parameters
parameters = self.parameter_axes.keys()
parameters.sort(key=self.parameter_axes.__getitem__)
#Initialize the parameter mesh
mesh_axes = [ np.linspace(self.min[par],self.max[par],self.npoints[par]) for par in parameters ]
parameter_mesh = np.meshgrid(*tuple(mesh_axes),indexing="ij")
#Compute the expectation value
expectation = (function(parameter_mesh,**kwargs)*self.likelihood).sum() / self.likelihood.sum()
#Return
return expectation
def variance(self,function,**kwargs):
"""
Computes the variance of a function of the parameters over the current parameter likelihood
"""
expectation = self.expectationValue(function,**kwargs)
#Parameters
parameters = self.parameter_axes.keys()
parameters.sort(key=self.parameter_axes.__getitem__)
#Initialize the parameter mesh
mesh_axes = [ np.linspace(self.min[par],self.max[par],self.npoints[par]) for par in parameters ]
parameter_mesh = np.meshgrid(*tuple(mesh_axes),indexing="ij")
#Compute the variance
variance = (self.likelihood*(function(parameter_mesh,**kwargs) - expectation)**2).sum() / self.likelihood.sum()
#Return
return variance
def marginalize(self,parameter_name="w"):
"""
Marginalize the likelihood over the indicated parameters
"""
#Parse all the parameters to marginalize over
marginalize_parameters = parameter_name.split(",")
assert hasattr(self,"likelihood"),"You have to load in the likelihood first!"
for par in marginalize_parameters:
assert par in self.parameter_axes.keys(),"You are trying to marginalize over a parameter {0}, that does not exist!".format(par)
marginalize_indices = [ self.parameter_axes[par] for par in marginalize_parameters ]
self.reduced_likelihood = self.likelihood.sum(tuple(marginalize_indices))
#Normalize
self.reduced_likelihood /= self.reduced_likelihood.sum()
#Find the remaining parameters
self.remaining_parameters = self.parameter_axes.keys()
for par in marginalize_parameters:
self.remaining_parameters.pop(self.remaining_parameters.index(par))
#Sort the remaining parameter names so that the corresponding axes are in increasing order
self.remaining_parameters.sort(key=self.parameter_axes.get)
if len(self.remaining_parameters)==2:
self.extent = (self.min[self.remaining_parameters[0]],self.max[self.remaining_parameters[0]],self.min[self.remaining_parameters[1]],self.max[self.remaining_parameters[1]])
self.ax.set_xlim(self.extent[0],self.extent[1])
self.ax.set_ylim(self.extent[2],self.extent[3])
def marginal(self,parameter_name="w",levels=None):
"""
Marginalize the likelihood over all parameters but one
"""
assert hasattr(self,"likelihood"),"You have to load in the likelihood first!"
assert parameter_name in self.parameter_axes.keys(),"You are trying to compute a marginal likelihood of a parameter that does not exist!"
remaining_parameters = self.parameter_axes.keys()
remaining_parameters.pop(remaining_parameters.index(parameter_name))
remaining_parameter_axes = [ self.parameter_axes[par] for par in remaining_parameters ]
#Marginalize the likelihood
parameter_range = np.linspace(self.min[parameter_name],self.max[parameter_name],self.npoints[parameter_name])
marginal_likelihood = self.likelihood.sum(axis=tuple(remaining_parameter_axes))
#Compute the normalization
normalization = integrate.simps(marginal_likelihood,x=parameter_range)
marginal_likelihood /= normalization
#Compute the maximum
par_max = parameter_range[np.where(marginal_likelihood==marginal_likelihood.max())[0][0]]
#Compute also the contour extremes if levels
if levels is not None:
par_extremes = list()
for level in levels:
pL = _1d_level_values(parameter_range,marginal_likelihood,level=level,quantity=3)
par_extremes.append((pL[0],pL[-1]))
#Return the normalized single parameter likelihood, along with the contour extremes
return parameter_range,marginal_likelihood,par_max,par_extremes
else:
#Return the normalized single parameter likelihood
return parameter_range,marginal_likelihood,par_max
def slice(self,parameter_name="w",parameter_value=-1.0):
"""
Slice the likelihood cube by fixing one of the parameters
"""
assert hasattr(self,"likelihood"),"You have to load in the likelihood first!"
assert parameter_name in self.parameter_axes.keys(),"You are trying to get a slice with a parameter that does not exist!"
#Select the slice
slice_axis = self.parameter_axes[parameter_name]
slice_index = int((parameter_value - self.min[parameter_name]) / self.unit[parameter_name])
assert slice_index<self.npoints[parameter_name],"Out of bounds!"
#Get the slice
self.reduced_likelihood = np.split(self.likelihood,self.npoints[parameter_name],axis=slice_axis)[slice_index].squeeze()
#Normalize
self.reduced_likelihood /= self.reduced_likelihood.sum()
#Find the remaining parameters
self.remaining_parameters = self.parameter_axes.keys()
self.remaining_parameters.pop(self.remaining_parameters.index(parameter_name))
#Sort the remaining parameter names so that the corresponding axes are in increasing order
self.remaining_parameters.sort(key=self.parameter_axes.get)
self.extent = (self.min[self.remaining_parameters[0]],self.max[self.remaining_parameters[0]],self.min[self.remaining_parameters[1]],self.max[self.remaining_parameters[1]])
self.ax.set_xlim(self.extent[0],self.extent[1])
self.ax.set_ylim(self.extent[2],self.extent[3])
def show(self):
"""
Show the 2D marginalized likelihood
"""
assert self.reduced_likelihood.ndim == 2,"Can show only 2 dimensional likelihoods in the figure!!"
self.likelihood_image = self.ax.imshow(self.reduced_likelihood.transpose(),origin="lower",cmap=plt.cm.binary_r,extent=self.extent,aspect="auto")
self.colorbar = plt.colorbar(self.likelihood_image,ax=self.ax)
def labels(self,contour_label=None,fontsize=22,**kwargs):
"""
Put the labels on the plot
"""
if not hasattr(self,"remaining_parameters"):
self.remaining_parameters = self.parameter_axes.keys()
self.remaining_parameters.sort(key=self.parameter_axes.__getitem__)
self.ax.set_xlabel(self.parameter_labels[self.remaining_parameters[0]],fontsize=fontsize)
self.ax.set_ylabel(self.parameter_labels[self.remaining_parameters[1]],fontsize=fontsize)
self.ax.set_title(self.title_label,fontsize=fontsize)
if contour_label is not None:
self.ax.legend(self.ax.proxy,contour_label,**kwargs)
def point(self,coordinate_x,coordinate_y,color="green",marker="o"):
"""
Draws a point in parameter space at the specified physical coordinates
"""
if not hasattr(self,"remaining_parameters"):
self.remaining_parameters = self.parameter_axes.keys()
self.remaining_parameters.sort(key=self.parameter_axes.__getitem__)
#First translate the physical coordinates into pixels, to obtain the likelihood value
px = int((coordinate_x - self.min[self.remaining_parameters[0]]) / self.unit[self.remaining_parameters[0]])
py = int((coordinate_y - self.min[self.remaining_parameters[1]]) / self.unit[self.remaining_parameters[1]])
#Draw the point
self.ax.plot(coordinate_x,coordinate_y,color=color,marker=marker)
#Return the likelihood value at the specified point
if hasattr(self,"reduced_likelihood"):
return self.reduced_likelihood[px,py]
else:
return self.likelihood[px,py]
#################################################################################################
###############Find the likelihood values that correspond to the confidence contours#############
#################################################################################################
def getLikelihoodValues(self,levels,precision=0.001):
"""
Find the likelihood values that correspond to the selected p_values
"""
if hasattr(self,"reduced_likelihood"):
likelihood = self.reduced_likelihood
else:
likelihood = self.likelihood
self.original_p_values = levels
#Check sanity of input, likelihood must be normalized
np.testing.assert_approx_equal(likelihood.sum(),1.0)
#Initialize list of likelihood values
values = list()
p_values = list()
#Loop through levels to find corresponding likelihood values
for level in levels:
#Call the recursive bisection method
value = _nd_level_value(likelihood,level,likelihood.min(),likelihood.max(),precision=precision)
confidence_integral = likelihood[likelihood>value].sum()
#Append the found likelihood value to the output
values.append(value)
p_values.append(confidence_integral)
#Return
self.computed_p_values = p_values
self.likelihood_values = values
return values
######################################################################
##############Plot the contours on top of the likelihood##############
######################################################################
def plotContours(self,colors=["red","green","blue"],display_percentages=True,display_maximum=True,fill=False,**kwargs):
"""
Display the confidence likelihood contours
"""
if not hasattr(self,"likelihood_values"):
self.getLikelihoodValues(levels=[0.683,0.95,0.997])
assert len(colors) >= len(self.likelihood_values)
assert self.reduced_likelihood.ndim==2,"this routine plots 2D contours only!!"
extent = self.extent
likelihood = self.reduced_likelihood.transpose()
values = self.likelihood_values
unit_j = (extent[1] - extent[0])/(likelihood.shape[1] - 1)
unit_i = (extent[3] - extent[2])/(likelihood.shape[0] - 1)
#Build contour levels
fmt = dict()
for n,value in enumerate(values):
fmt[value] = "{0:.1f}%".format(self.computed_p_values[n]*100)
if fill:
self.contour = self.ax.contourf(likelihood,values,colors=colors,origin="lower",extent=extent,aspect="auto",**kwargs)
else:
self.contour = self.ax.contour(likelihood,values,colors=colors,origin="lower",extent=extent,aspect="auto",**kwargs)
#Contour labels
self.ax.proxy += [ plt.Rectangle((0,0),1,1,fc=color) for color in colors if color!=rc.func_globals["rcParams"]["axes.facecolor"] ]
if display_percentages:
plt.clabel(self.contour,fmt=fmt,inline=1,fontsize=9)
if display_maximum:
#Find the maximum
likelihood_max = likelihood.max()
imax,jmax = np.where(likelihood==likelihood_max)
#Plot scaling to physical values
self.ax.plot(extent[0] + np.arange(likelihood.shape[1])*unit_j,np.ones(likelihood.shape[1])*imax[0]*unit_i + extent[2],linestyle="--",color="green")
self.ax.plot(extent[0] + np.ones(likelihood.shape[0])*jmax[0]*unit_j,extent[2] + np.arange(likelihood.shape[0])*unit_i,linestyle="--",color="green")
##################################################################################################
#################Plot the likelihood marginalized over all parameters except one##################
##################################################################################################
def plotMarginal(self,parameter,levels=[0.684],colors=["red","blue","green"],alpha=0.5,fill=False):
"""
Plot the likelihood function marginalized over all parameters except one
"""
#Compute marginalized likelihood
p,l,par_max,par_extremes = self.marginal(parameter,levels=levels)
#Plot the likelihood
self.ax.plot(p,l)
#Plot the confidence contours
for n,level in enumerate(levels):
relevant_indices = np.where((p>=par_extremes[n][0])*(p<=par_extremes[n][1]))[0]
if fill:
self.ax.fill_between(p[relevant_indices],np.ones_like(relevant_indices)*l.min(),l[relevant_indices],facecolor=colors[n],alpha=alpha)
else:
self.ax.plot(np.ones(100)*p[relevant_indices[0]],np.linspace(l.min(),l[relevant_indices[0]],100),color=colors[n])
self.ax.plot(np.ones(100)*p[relevant_indices[-1]],np.linspace(l.min(),l[relevant_indices[-1]],100),color=colors[n])
#Labels
self.ax.set_xlabel(self.parameter_labels[parameter],fontsize=22)
self.ax.set_ylabel(r"$\mathcal{L}$"+"$($"+self.parameter_labels[parameter]+"$)$",fontsize=22) | [
"apetri@phys.columbia.edu"
] | apetri@phys.columbia.edu |
419d60b4e47df8357a6fb1aee89231feae002341 | b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4 | /otp/src/friends/PlayerFriendsManagerUD.py | 1e3b921c511f1af8fce11c8f5cf0509f50b862a3 | [] | no_license | satire6/Anesidora | da3a44e2a49b85252b87b612b435fb4970469583 | 0e7bfc1fe29fd595df0b982e40f94c30befb1ec7 | refs/heads/master | 2022-12-16T20:05:13.167119 | 2020-09-11T16:58:04 | 2020-09-11T17:02:06 | 294,751,966 | 89 | 32 | null | null | null | null | UTF-8 | Python | false | false | 18,840 | py | from direct.distributed.DistributedObjectGlobalUD import DistributedObjectGlobalUD
from direct.task.Task import Task
from otp.otpbase import OTPGlobals
from otp.ai import AIMsgTypes
from otp.uberdog.RejectCode import RejectCode
from direct.directnotify.DirectNotifyGlobal import directNotify
from otp.friends.FriendInfo import FriendInfo
from otp.switchboard.sbWedge import sbWedge
from otp.otpbase import OTPLocalizerEnglish as localizer
import random
#--------------------------------------------------
class PlayerFriendsManagerUD(DistributedObjectGlobalUD,sbWedge):
"""
The Player Friends Manager is a global object.
This object handles client requests on player-level (as opposed to avatar-level) friends.
See Also:
"otp/src/friends/AvatarFriendsManager.py"
"otp/src/friends/PlayerFriendsManager.py"
"pirates/src/friends/PiratesFriendsList.py"
"otp/src/configfiles/otp.dc"
"pirates/src/configfiles/pirates.dc"
"""
notify = directNotify.newCategory('PlayerFriendsManagerUD')
def __init__(self, air, sbListenPort=8888, wedgeName=None, locationName="OTP"):
assert self.notify.debugCall()
DistributedObjectGlobalUD.__init__(self, air)
self.sbName = wedgeName
self.locationName = locationName
if self.sbName is None:
self.sbName = "OTP%d" % random.randint(0,99999)
self.everyoneIsFriends = uber.config.GetBool("everyone-is-friends",0)
self.sbHost = uber.sbNSHost
self.sbPort = uber.sbNSPort
self.sbListenPort = uber.sbListenPort
self.clHost = uber.clHost
self.clPort = uber.clPort
self.allowUnfilteredChat = uber.allowUnfilteredChat
self.bwDictPath = uber.bwDictPath
#self.avatarId2FriendsList = {}
self.playerId2Invitations = {}
#self.avatarId2Name = {}
#self.avatarId2Info = {}
#self.avatarId2Account = {}
#self.isAvatarOnline = {}
#self.isAccountOnline = {}
#self.accountId2Info = {}
#self.accountId2Friends = {}
self.accept("avatarOnlinePlusAccountInfo", self.avatarOnlinePlusAccountInfo, [])
self.accept("avatarOffline", self.avatarOffline, [])
sbWedge.__init__(self,wedgeName=self.sbName,
nsHost=self.sbHost,
nsPort=self.sbPort,
listenPort=self.sbListenPort,
clHost=self.clHost,
clPort=self.clPort,
allowUnfilteredChat=self.allowUnfilteredChat,
bwDictPath=self.bwDictPath)
def CheckSBWedge(task):
self.handleRequests(0)
return Task.cont
uber.taskMgr.add(CheckSBWedge,'checkSBwedge')
def announceGenerate(self):
assert self.notify.debugCall()
DistributedObjectGlobalUD.announceGenerate(self)
self.sendUpdateToChannel(
AIMsgTypes.CHANNEL_CLIENT_BROADCAST, "online", [])
self.sendUpdateToChannel(
AIMsgTypes.OTP_CHANNEL_AI_AND_UD_BROADCAST, "online", [])
def delete(self):
assert self.notify.debugCall()
self.ignoreAll()
DistributedObjectGlobalUD.delete(self)
#----------------------------------
def avatarOnline(self,avatarId,avatarType):
pass
def avatarOnlinePlusAccountInfo(self,avatarId,accountId,playerName,
playerNameApproved,openChatEnabled,
createFriendsWithChat,chatCodeCreation):
assert self.notify.debugCall()
if accountId in [-1, 0]:
return
self.log.debug("Account online. Info: %d, %d, %s, %s, %s, %s, %s"%(avatarId,
accountId,
playerName,
playerNameApproved,
openChatEnabled,
createFriendsWithChat,
chatCodeCreation))
if playerName == "Guest":
accountInfo = FriendInfo(avatarName="%d"%avatarId,
playerName="%s%d" % (playerName,accountId),
onlineYesNo=1,
openChatEnabledYesNo=openChatEnabled,
avatarId=avatarId,
location=self.locationName,
sublocation="")
else:
accountInfo = FriendInfo(avatarName="%d"%avatarId,
playerName=playerName,
onlineYesNo=1,
openChatEnabledYesNo=openChatEnabled,
avatarId=avatarId,
location=self.locationName,
sublocation="")
# Don't have my avatar name yet, asyncrequest it
context = self.air.allocateContext()
dclassName = "DistributedAvatarUD"
self.air.contextToClassName[context] = dclassName
self.acceptOnce("doFieldResponse-%s"%context,self.recvAvatarName,[accountId,accountInfo])
self.air.queryObjectField(dclassName,"setName",avatarId,context)
def recvAvatarName(self,accountId,accountInfo,context,name):
self.notify.debug("avatarName fetched for account %d: %s" % (accountId,name[0]))
accountInfo.avatarName = name[0]
# asynchronous request to SB which will tell everyone we're here and fetch our friends
if self.sbConnected:
self.enterPlayer(accountId,accountInfo)
def recvFriendsUpdate(self,accountId,accountInfo,friends):
self.log.debug("recvFriendsUpdate on %d -> %s"%(accountId,str(friends)))
for friend in friends:
friendId = friend[0]
friendInfo = friend[1]
accountInfo.timestamp = 0
friendInfo.timestamp = 0
accountInfo.openChatFriendshipYesNo = friendInfo.openChatFriendshipYesNo
accountInfo.understandableYesNo = friendInfo.openChatFriendshipYesNo or \
(friendInfo.openChatEnabledYesNo and \
accountInfo.openChatEnabledYesNo)
friendInfo.understandableYesNo = friendInfo.openChatFriendshipYesNo or \
(friendInfo.openChatEnabledYesNo and \
accountInfo.openChatEnabledYesNo)
if accountInfo.onlineYesNo:
self.sendUpdateToChannel((3L<<32)+accountId,
"updatePlayerFriend",
[friendId,friendInfo,0])
self.sendUpdateToChannel((3L<<32)+friend[0],
"updatePlayerFriend",
[accountId,accountInfo,0])
@report(types = ['args'], dConfigParam = 'orphanedavatar')
def avatarOffline(self,avatarId):
assert self.notify.debugCall()
self.exitAvatar(avatarId)
#----------------------------------------------------------------------
# Functions called by the client
def requestInvite(self, senderId, otherPlayerId, secretYesNo=True):
assert self.notify.debugCall()
self.sendOpenInvite(senderId,otherPlayerId,secretYesNo)
def requestDecline(self, senderId, otherId):
"""
Call this function to retract an invite to or decline an invite from another player.
"""
self.sendDeclineInvite(senderId,otherId)
def requestRemove(self, senderId, otherAccountId):
"""
Call this function if you want to remove an existing friend from your friends list.
otherAccountId may be online or offline.
"""
accountId = senderId
self.air.writeServerEvent('requestFriendRemove', accountId, '%s' % otherAccountId)
# update DISL friends list through Switchboard
self.removeFriendship(accountId,otherAccountId)
def recvInviteNotice(self, inviteeId, inviterId, inviterAvName):
self.sendUpdateToChannel((3L<<32)+inviteeId, "invitationFrom", [inviterId,inviterAvName])
def recvInviteRetracted(self, inviteeId, inviterId):
self.sendUpdateToChannel((3L<<32)+inviteeId, "retractInvite", [inviterId])
def recvInviteRejected(self, inviterId, inviteeId, reason):
self.sendUpdateToChannel((3L<<32)+inviterId, "rejectInvite", [inviteeId, reason])
def recvFriendshipRemoved(self,accountId,otherAccountId):
self.notify.debug("recvFriendshipRemoved on %d,%d"%(accountId,otherAccountId))
self.sendUpdateToChannel((3L<<32)+accountId,"removePlayerFriend",[otherAccountId])
self.sendUpdateToChannel((3L<<32)+otherAccountId,"removePlayerFriend",[accountId])
# SECRETS
def requestUnlimitedSecret(self,senderId):
print "# got unlimited secret request"
self.sendSecretRequest(senderId)
def requestLimitedSecret(self,senderId,parentUsername,parentPassword):
print "# got limited secret request"
self.sendSecretRequest(senderId,parentUsername,parentPassword)
def requestUseUnlimitedSecret(self,senderId,secret):
self.sendSecretRedeem(senderId,secret)
def requestUseLimitedSecret(self,senderId,secret,parentUsername,parentPassword):
self.sendSecretRedeem(senderId,secret,parentUsername,parentPassword)
def recvAddFriendshipError(self,playerId,error):
self.sendUpdateToChannel((3L<<32)+playerId,"rejectInvite",[error])
def recvSecretGenerated(self,playerId,secret):
self.sendUpdateToChannel((3L<<32)+playerId,"secretResponse",[secret])
def recvSecretRequestError(self,playerId,error):
self.sendUpdateToChannel((3L<<32)+playerId,"rejectSecret",[error])
def recvSecretRedeemError(self,playerId,error):
self.sendUpdateToChannel((3L<<32)+playerId,"rejectUseSecret",[error])
# WHISPERS
def whisperTo(self,senderId,playerId,msg):
assert self.sbConnected
self.log.debug("PFMUD whisper - %d to %d: %s" % (senderId,playerId,msg))
if senderId == -1 or playerId == -1:
return
if self._validateChatMessage(playerId,senderId,msg):
self.sendWhisper(playerId,senderId,msg)
def whisperWLTo(self,senderId,playerId,msg):
assert self.sbConnected
self.log.debug("PFMUD WLwhisper - %d to %d: %s" % (senderId,playerId,msg))
if senderId == -1 or playerId == -1:
return
# Validation being handled by client agents, do not need
#if self._validateChatMessage(playerId,senderId,msg):
self.sendWLWhisper(playerId,senderId,msg)
def whisperSCTo(self,senderId,playerId,msgId):
assert self.sbConnected
self.log.debug("PFMUD SCwhisper - %d to %d: %s" % (senderId,playerId,msgId))
if senderId == -1 or playerId == -1:
return
msgText = self._translateWhisper(msgId)
if msgText is None:
self.log.security("Invalid SC index: %d to %d: %d" % (senderId,playerId,msgId))
return
if self._validateChatMessage(playerId,senderId,msgText):
self.sendSCWhisper(playerId,senderId,msgText)
def whisperSCCustomTo(self,senderId,playerId,msgId):
assert self.sbConnected
self.log.debug("PFMUD SCCustomwhisper - %d to %d: %s" % (senderId,playerId,msgId))
if senderId == -1:
return
msgText = self._translateWhisperCustom(msgId)
if msgText is None:
self.log.security("Invalid SC custom index: %d to %d: %d" % (senderId,playerId,msgId))
return
if self._validateChatMessage(playerId,senderId,msgText):
self.sendSCWhisper(playerId,senderId,msgText)
def whisperSCEmoteTo(self,senderId,playerId,msgId):
assert self.sbConnected
self.log.debug("PFMUD SCEmotewhisper - %d to %d: %s" % (senderId,playerId,msgId))
if senderId == -1:
return
msgText = self._translateWhisperEmote(msgId)
if msgText is None:
self.log.security("Invalid SC emote index: %d to %d: %d" % (senderId,playerId,msgId))
return
# XXX Temporarily broken--where does the avatarname come from if we're stateless?
# Stick the sender's avatar name into the emote message!
#senderInfo = self.accountId2Info.get(senderId,None)
#if senderInfo is not None:
# msgText = msgText % (senderInfo.avatarName)
if self._validateChatMessage(playerId,senderId,msgText):
self.sendSCWhisper(playerId,senderId,msgText)
def whisperSCQuestTo(self,senderId,playerId,msgData):
'''
Quest messages. Uses product-specific _translateWhisperQuest that should be overridden
'''
assert self.sbConnected
self.log.debug("PFMUD SCQuestwhisper - %d to %d: %s" % (senderId,playerId,msgData))
if senderId == -1:
return
msgText = self._translateWhisperQuest(msgData)
if msgText is None:
self.log.security("Invalid SC quest data: %d to %d: %d" % (senderId,playerId,msgData))
return
if self._validateChatMessage(playerId,senderId,msgText):
self.sendSCWhisper(playerId,senderId,msgText)
#WEDGE -> UD functions
def recvWhisper(self,recipientId,senderId,msgText):
self.log.debug("Received open whisper from %d to %d: %s" % (senderId,recipientId,msgText))
self.sendUpdateToChannel((3L<<32)+recipientId,"whisperFrom",[senderId,msgText])
def recvWLWhisper(self,recipientId,senderId,msgText):
self.log.debug("Received WLwhisper from %d to %d: %s" % (senderId,recipientId,msgText))
self.sendUpdateToChannel((3L<<32)+recipientId,"whisperWLFrom",[senderId,msgText])
def recvSCWhisper(self,recipientId,senderId,msgText):
self.log.debug("Received SCwhisper from %d to %d: %s" % (senderId,recipientId,msgText))
self.sendUpdateToChannel((3L<<32)+recipientId,"whisperSCFrom",[senderId,msgText])
def recvEnterPlayer(self,playerId,playerInfo,friendsList):
self.log.debug("Saw player %d enter."%playerId)
self.log.debug("friends list: %s"%friendsList)
for friend in friendsList:
self.notify.debug("update to %d saying that %d is online" % (friend,playerId))
friendInfo = friendsList[friend]
playerInfo.openChatFriendshipYesNo = friendInfo.openChatFriendshipYesNo
playerInfo.understandableYesNo = friendInfo.openChatFriendshipYesNo or \
(friendInfo.openChatEnabledYesNo and \
playerInfo.openChatEnabledYesNo)
self.sendUpdateToChannel((3L<<32)+friend,
"updatePlayerFriend",
[playerId,playerInfo,0])
def recvExitPlayer(self,playerId,playerInfo,friendsList):
self.log.debug("Saw player %d exit."%playerId)
self.log.debug("friends list: %s"%friendsList)
for friend in friendsList:
self.notify.debug("update to %d saying that %d is offline" % (friend,playerId))
friendInfo = friendsList[friend]
playerInfo.openChatFriendshipYesNo = friendInfo.openChatFriendshipYesNo
playerInfo.understandableYesNo = friendInfo.openChatFriendshipYesNo or \
(friendInfo.openChatEnabledYesNo and \
playerInfo.openChatEnabledYesNo)
self.sendUpdateToChannel((3L<<32)+friend,
"updatePlayerFriend",
[playerId,playerInfo,0])
# helper functions
def _getFriendView(self, viewerId, friendId, info=None):
if info is None:
info = self.accountId2Info[friendId]
if self.accountId2Friends.has_key(viewerId):
if [friendId,True] in self.accountId2Friends[viewerId]:
info.openChatFriendshipYesNo = 1
else:
info.openChatFriendshipYesNo = 0
elif self.accountId2Friends.has_key(friendId):
if [viewerId,True] in self.accountId2Friends[friendId]:
info.openChatFriendshipYesNo = 1
else:
info.openChatFriendshipYesNo = 0
else:
info.openChatFriendshipYesNo = 0
if self._whisperAllowed(viewerId,friendId):
info.understandableYesNo = 1
else:
info.understandableYesNo = 0
info.timestamp = 0
return info
def _whisperAllowed(self, fromPlayer, toPlayer):
fromFriends = self.accountId2Friends.get(fromPlayer)
if fromFriends:
if [toPlayer,True] in fromFriends:
return True
elif [toPlayer,False] in fromFriends:
fromInfo = self.accountId2Info.get(fromPlayer)
toInfo = self.accountId2Info.get(toPlayer)
if toInfo and fromInfo.openChatEnabledYesNo and toInfo.openChatEnabledYesNo:
return True
else:
return False
else:
return False
def _whisperSCAllowed(self, fromPlayer, toPlayer):
fromFriends = self.accountId2Friends.get(fromPlayer)
if fromFriends:
if [toPlayer,True] in fromFriends or [toPlayer,False] in fromFriends:
return True
else:
return False
else:
return False
def _translateWhisper(self,msgId):
return localizer.SpeedChatStaticText.get(msgId)
def _translateWhisperCustom(self,msgId):
return localizer.CustomSCStrings.get(msgId)
def _translateWhisperEmote(self,msgId):
if msgId >= len(localizer.EmoteWhispers) or msgId < 0:
return None
else:
return localizer.EmoteWhispers[msgId]
def _translateWhisperQuest(self,msgData):
'''
Translate quest SC data to a text message.
Product-specific and should be overridden!
'''
return None
| [
"66761962+satire6@users.noreply.github.com"
] | 66761962+satire6@users.noreply.github.com |
55974c3062c65bc035880e96b2a781aca322528e | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/surface/compute/url_maps/describe.py | a57dae6a7042b32f18ee5d7c134c3a8aafc00b96 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 2,596 | py | # -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for describing url maps."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute import scope as compute_scope
from googlecloudsdk.command_lib.compute.url_maps import flags
from googlecloudsdk.command_lib.compute.url_maps import url_maps_utils
def _DetailedHelp():
return {
'brief':
'Describe a URL map.',
'DESCRIPTION':
"""\
*{command}* displays all data associated with a URL map in a
project.
""",
}
def _Run(args, holder, url_map_arg):
"""Issues requests necessary to describe URL maps."""
client = holder.client
url_map_ref = url_map_arg.ResolveAsResource(
args,
holder.resources,
default_scope=compute_scope.ScopeEnum.GLOBAL,
scope_lister=compute_flags.GetDefaultScopeLister(client))
if url_maps_utils.IsRegionalUrlMapRef(url_map_ref):
service = client.apitools_client.regionUrlMaps
request = client.messages.ComputeRegionUrlMapsGetRequest(
**url_map_ref.AsDict())
else:
service = client.apitools_client.urlMaps
request = client.messages.ComputeUrlMapsGetRequest(**url_map_ref.AsDict())
return client.MakeRequests([(service, 'Get', request)])[0]
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA,
base.ReleaseTrack.GA)
class Describe(base.DescribeCommand):
"""Describe a URL map."""
detailed_help = _DetailedHelp()
URL_MAP_ARG = None
@classmethod
def Args(cls, parser):
cls.URL_MAP_ARG = flags.UrlMapArgument()
cls.URL_MAP_ARG.AddArgument(parser, operation_type='describe')
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
return _Run(args, holder, self.URL_MAP_ARG)
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
4647b56d18f408edad9a00c6ac02a82e34d11f08 | 49fa43ae11cd06f68efb65a9f59add168b205f29 | /python/306_additive-number/additiveNumber.py | 2a5375a84a18bf7b1d4fdf22a0ced35681321de9 | [] | no_license | kfrancischen/leetcode | 634510672df826a2e2c3d7cf0b2d00f7fc003973 | 08500c39e14f3bf140db82a3dd2df4ca18705845 | refs/heads/master | 2021-01-23T13:09:02.410336 | 2019-04-17T06:01:28 | 2019-04-17T06:01:28 | 56,357,131 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | import itertools
class Solution(object):
def isAdditiveNumber(self, num):
"""
:type num: str
:rtype: bool
"""
n = len(num)
for i, j in itertools.combinations(range(1, n), 2):
a, b = num[:i], num[i:j]
if a != str(int(a)) or b != str(int(b)):
continue
while j < n:
c = str(int(a) + int(b))
if not num.startswith(c, j):
break
j += len(c)
a, b = b, c
if j == n:
return True
return False
mytest = Solution()
num = "0235813"
print mytest.isAdditiveNumber(num)
| [
"kfrancischen@gmail.com"
] | kfrancischen@gmail.com |
30031f16272956ac941b3b7060bb3e05b133017f | 5210993914691c70076be979aa5c57c33d5d3bc4 | /Programming101-3/Week_1/The_Final_Round/reduce_file_path.py | 5b9b39aeefb744abd25cd76f7c27dce970610b97 | [] | no_license | presian/HackBulgaria | d29f84ab7edc85a4d8dfbf055def7d0be783539e | 8bc95bb31daeb1f5a313d25b928f505013f5f0b0 | refs/heads/master | 2021-01-10T04:38:05.759005 | 2015-10-15T07:05:21 | 2015-10-15T07:05:21 | 36,889,139 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,452 | py | def double_slash_remover(path):
return path.replace("//", "/")
def string_splitter(path):
return path.split("/")
def empty_string_remover(path_entities):
return [x for x in path_entities if x != ""]
def point_checker(path_entity):
if path_entity != ".." and path_entity != ".":
return True
return False
def result_maker(path_entities):
result = []
for i in range(0, len(path_entities) - 1):
if path_entities[i + 1] != ".." and point_checker(path_entities[i]):
result.append(path_entities[i])
if len(result) > 0:
if point_checker(path_entities[-1]):
result.append(path_entities[-1])
return "/" + "/".join(result)
def reduce_file_path(path):
path = double_slash_remover(path)
path_entities = string_splitter(path)
path_entities = empty_string_remover(path_entities)
return result_maker(path_entities)
def main():
print(reduce_file_path("/"))
print(reduce_file_path("/srv/../"))
print(reduce_file_path("/srv///www/htdocs/wtf/"))
print(reduce_file_path("/srv/www/htdocs/wtf"))
print(reduce_file_path("/srv/./././././"))
print(reduce_file_path("/etc//wtf/"))
print(reduce_file_path("/etc/../etc/../etc/../"))
print(reduce_file_path("//////////////"))
print(reduce_file_path("/../"))
print(reduce_file_path(
"/home//radorado/code/./hackbulgaria/week0/../"))
if __name__ == '__main__':
main()
| [
"presiandanailov@gmail.com"
] | presiandanailov@gmail.com |
eeb5e3da57cd6e9d5e0b1d2daa31670aea57a886 | 0856f65fdd2c1bd305860eeebd9e51b5d1d1f017 | /xinshuo_images/test/test_image_processing.py | bbecf5ef608a8fe8f08dae2d4e347546dcbd179b | [] | no_license | Fuyaoyao/xinshuo_toolbox | 8a313416b4ce82188015421558d6a89ed526b81e | 9049003d9f8e05ece4ef19ae0beb42b5f9c5731b | refs/heads/master | 2021-08-23T12:15:39.864095 | 2017-12-04T21:48:24 | 2017-12-04T21:48:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | # Author: Xinshuo Weng
# email: xinshuo.weng@gmail.com
import os, sys
import pytest
import __init__paths__
from image_processing import *
from check import *
def test_imagecoor2cartesian_center():
image_shape = (480, 640)
forward, backward = imagecoor2cartesian_center(image_shape)
assert isfunction(forward)
assert isfunction(backward)
test_pts = (0, 0)
centered_pts = forward(test_pts)
assert centered_pts == (-320, 240)
back_pts = backward(centered_pts)
assert back_pts == (0, 0)
test_pts = (639, 479)
centered_pts = forward(test_pts)
assert centered_pts == (319, -239)
back_pts = backward(centered_pts)
assert back_pts == (639, 479)
test_pts = (0, 479)
centered_pts = forward(test_pts)
assert centered_pts == (-320, -239)
back_pts = backward(centered_pts)
assert back_pts == (0, 479)
test_pts = (639, 0)
centered_pts = forward(test_pts)
assert centered_pts == (319, 240)
back_pts = backward(centered_pts)
assert back_pts == (639, 0)
if __name__ == '__main__':
pytest.main([__file__]) | [
"xinshuo.weng@gmail.com"
] | xinshuo.weng@gmail.com |
28c2112aab9a451142950150c912a0a342957118 | b4180ca553c16fb2b2549d95ce7a3397ad2910ba | /mysite/venv/bin/wheel | ae2e3d8317722113890caa3d397ddf4cfd48ca1f | [] | no_license | taixingbi/django-tatch | c5ab9d676b67c613199bf2f8daed02f27f8859c8 | 6958192260e73bf9d78c706a1309629d711dce0b | refs/heads/master | 2020-04-08T04:52:47.781822 | 2018-11-28T01:39:18 | 2018-11-28T01:39:18 | 159,035,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | #!/Users/h/Desktop/boostrap-django-master/mysite/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"bitaihang@gmail.com"
] | bitaihang@gmail.com | |
cfeb827afdfb015cd4f2721eb31338c72a286d65 | 871690900c8da2456ca2818565b5e8c34818658e | /programmers/level3/72415.py | 481ce98547e56515643755fe9c3e7c61bbc6a4e2 | [] | no_license | kobeomseok95/codingTest | 40d692132e6aeeee32ee53ea5d4b7af8f2b2a5b2 | d628d72d9d0c1aef2b3fa63bfa9a1b50d47aaf29 | refs/heads/master | 2023-04-16T09:48:14.916659 | 2021-05-01T11:35:42 | 2021-05-01T11:35:42 | 311,012,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,103 | py | from collections import deque
from itertools import permutations
def ctrl(board, y0, x0, dir_y, dir_x):
for i in range(1, 4):
if 0 <= (y1 := y0 + dir_y * i) < 4 and 0 <= (x1 := x0 + dir_x * i) < 4:
if board[y1][x1] != 0:
return (y1, x1)
l = i
return (y0 + dir_y * l, x0 + dir_x * l)
def move(board, start, end):
dy, dx = [-1, 1, 0, 0], [0, 0, -1, 1]
dist = [[6 for _ in range(4)] for _ in range(4)]
q = deque([(start, 0)])
while q:
[y, x], d = q.popleft()
# 큐에서 나온 좌표가 최소 거리인 상황에 이어서 최단 경로를 구해주어야 한다.
# if절에서 최단 경로가 아니라면 거리를 구할 이유가 없다. 최단 경로가 아니기 때문이다.
if dist[y][x] > d:
dist[y][x] = d
for i in range(4):
ny, nx = y + dy[i], x + dx[i]
if 0 <= ny < 4 and 0 <= nx < 4:
q.append(((ny, nx), d + 1))
q.append((ctrl(board, y, x, dy[i], dx[i]), d + 1))
return dist[end[0]][end[1]]
def solution(board, r, c):
location = {k: [] for k in range(1, 7)}
for i in range(4):
for j in range(4):
if board[i][j]:
location[board[i][j]].append((i, j))
answer = int(1e9)
for per in permutations(filter(lambda v: v, location.values())):
dist = 0
cursors = [(r, c)]
stage = [[v for v in w] for w in board]
for xy1, xy2 in per:
# 해당 그림까지의 거리, 목적지
vs = [(move(stage, cursor, xy1) + move(stage, xy1, xy2), xy2) for cursor in cursors] + \
[(move(stage, cursor, xy2) + move(stage, xy2, xy1), xy1) for cursor in cursors]
# 이동처리
stage[xy1[0]][xy1[1]] = stage[xy2[0]][xy2[1]] = 0
dist += 2 + (mvn := min(vs)[0])
# 커서가 될 수 있는 위치, 최소 거리여야 한다.
cursors = [pos for d, pos in vs if d == mvn]
answer = min(answer, dist)
return answer | [
"37062337+kobeomseok95@users.noreply.github.com"
] | 37062337+kobeomseok95@users.noreply.github.com |
280e1dd2cbe2dd3cda6db54f063f203eaddd74d7 | 684f15ab9c10a2c2e378c04009afa33166e049f2 | /cifar_eval.py | 51f6ef0e2248b10c16b3971a8de0b5bfae249f9f | [
"MIT"
] | permissive | sx14/image_classification_imbalance | ac410e88164912ef6bf47a87041d60934becfc1f | 49869e12e9ca424496f7c137b7026bea79fa2f72 | refs/heads/master | 2023-01-09T11:08:30.574937 | 2020-11-05T14:48:52 | 2020-11-05T14:48:52 | 309,088,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,679 | py | import argparse
import os
import random
import time
import warnings
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import models
from tensorboardX import SummaryWriter
from sklearn.metrics import confusion_matrix
from utils import *
from imbalance_cifar import IMBALANCECIFAR10, IMBALANCECIFAR100
from losses import LDAMLoss, FocalLoss
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch Cifar Training')
parser.add_argument('--dataset', default='cifar10', help='dataset setting')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet32',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet32)')
parser.add_argument('--loss_type', default="CE", type=str, help='loss type')
parser.add_argument('--imb_type', default="exp", type=str, help='imbalance type')
parser.add_argument('--imb_factor', default=0.01, type=float, help='imbalance factor')
parser.add_argument('--train_rule', default='None', type=str, help='data sampling strategy for train loader')
parser.add_argument('--rand_number', default=0, type=int, help='fix random number for data sampling')
parser.add_argument('--exp_str', default='0', type=str, help='number to indicate which experiment it is')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=128, type=int,
metavar='N',
help='mini-batch size')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=2e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default='0', type=int,
help='GPU id to use.')
parser.add_argument('--root_log',type=str, default='log')
parser.add_argument('--root_model', type=str, default='checkpoint')
best_acc1 = 0
def main():
args = parser.parse_args()
args.store_name = '_'.join([args.dataset, args.arch, args.loss_type, args.train_rule, args.imb_type, str(args.imb_factor), args.exp_str])
prepare_folders(args)
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
ngpus_per_node = torch.cuda.device_count()
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for evaluating".format(args.gpu))
# create model
print("=> creating model '{}'".format(args.arch))
num_classes = 100 if args.dataset == 'cifar100' else 10
use_norm = True if args.loss_type == 'LDAM' else False
model = models.__dict__[args.arch](num_classes=num_classes, use_norm=use_norm)
load_best_checkpoint(args, model)
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
model = torch.nn.DataParallel(model).cuda()
cudnn.benchmark = True
# Data loading code
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
if args.dataset == 'cifar10':
val_dataset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_val)
elif args.dataset == 'cifar100':
val_dataset = datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_val)
else:
warnings.warn('Dataset is not listed')
return
# evaluate on validation set
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=100, shuffle=False,
num_workers=args.workers, pin_memory=True)
validate(val_loader, model, args)
def validate(val_loader, model, args, flag='val'):
batch_time = AverageMeter('Time', ':6.3f')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
# switch to evaluate mode
model.eval()
all_preds = []
all_targets = []
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
_, pred = torch.max(output, 1)
all_preds.extend(pred.cpu().numpy())
all_targets.extend(target.cpu().numpy())
cf = confusion_matrix(all_targets, all_preds).astype(float)
cls_cnt = cf.sum(axis=1)
cls_hit = np.diag(cf)
cls_acc = cls_hit / cls_cnt
output = ('{flag} Results: Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(flag=flag, top1=top1, top5=top5))
out_cls_acc = '%s Class Accuracy: %s'%(flag,(np.array2string(cls_acc, separator=',', formatter={'float_kind':lambda x: "%.3f" % x})))
print(output)
print(out_cls_acc)
return top1.avg
if __name__ == '__main__':
main() | [
"1059363093@qq.com"
] | 1059363093@qq.com |
99e7db6e7ff1635636e44793f849c84b4c76d03e | bf13574ef4af42b33ee931d2be0ccf862bd297d4 | /util/weather_analysis.py | ac9b30b57963557883a372fbcb09b35fa9534082 | [] | no_license | Futureword123456/WeatherRecommendationSystem | 0f032cb260415a7786b8c4a3c0b801e6377e7125 | 14489fe27eed985d950b8571a205d56612afc8a4 | refs/heads/master | 2023-04-20T23:52:27.565091 | 2021-05-16T08:52:58 | 2021-05-16T08:52:58 | 285,297,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,851 | py | # 该模块用于天气数据分析的相关脚本
from pandas import DataFrame
import pandas as pd
import util
from region.models import Region
from util.normalization import sigmoid, weather_type_normalization, wind_power_normalization
from weather_analysis1.settings import OPTIMUM_MAX_DEGREE, OPTIMUM_MIN_DEGREE, WEIGHTS_DICT
from weather_data.models import WeatherData, WeatherResult
# 获取区域未来六天的天气数据 以列表+字典的形式返回数据
def get_region_weather_data(region: Region):
return WeatherData.objects.filter(region=region).order_by('-created')[:6].values('day_weather', 'day_weather_code',
'day_wind_power', 'max_degree',
'min_degree')
# 获取区域天气数据对应的日期, 以列表形式返回数据
def get_region_weather_date(region: Region) -> list:
return WeatherData.objects.filter(region=region).order_by('-created')[:6].values_list('time')
# 将区域的天气数据整理成DataFrame的形式
def get_region_weather_dataframe(region: Region) -> DataFrame:
data = get_region_weather_data(region)
date = get_region_weather_date(region)
return pd.DataFrame(data, index=date)
# 对区域的天气数据进行归一化处理
def normalize_weather_data(region: Region) -> DataFrame:
df = get_region_weather_dataframe(region)
new_df = pd.DataFrame()
new_df['max_degree'] = 1.5 - (df['max_degree'] - OPTIMUM_MAX_DEGREE).abs().apply(sigmoid)
new_df['min_degree'] = 1.5 - (df['min_degree'] - OPTIMUM_MIN_DEGREE).abs().apply(sigmoid)
new_df['day_weather_code'] = df['day_weather_code'].apply(weather_type_normalization)
new_df['day_wind_power'] = df['day_wind_power'].apply(wind_power_normalization)
return new_df
# 计算给定城市的推荐指数
def caculate_region_result(region: Region):
try:
df = normalize_weather_data(region)
series = pd.Series(WEIGHTS_DICT)
return (df @ series).sum()
except:
return -1
# 将要显示的城市的推荐结果计算出来
def save_display_region_result():
region_list = Region.objects.filter(is_display=True)
for r in region_list:
WeatherResult.objects.create(region=r, result=caculate_region_result(r))
print("%s的结果保存成功!" % r.name)
if __name__ == '__main__':
region = Region.objects.get(name='贵阳市')
print(get_region_weather_data(region))
print(get_region_weather_date(region))
df = pd.DataFrame(get_region_weather_data(region))
df = get_region_weather_dataframe(region)
print(df)
# new = normalize_weather_data(region)
# print(new)
# print(caculate_region_result(region))
save_display_region_result()
| [
"2635681517@qq.com"
] | 2635681517@qq.com |
b613d93b3255137c59d4ada588585fcc82e3c204 | fa93e53a9eee6cb476b8998d62067fce2fbcea13 | /devel/.private/pal_wifi_localization_msgs/lib/python2.7/dist-packages/pal_wifi_localization_msgs/srv/_GetWifiMap.py | 3edfc46c97ca38aeb6d0d2520789e2744c3d9fda | [] | no_license | oyetripathi/ROS_conclusion_project | 2947ee2f575ddf05480dabc69cf8af3c2df53f73 | 01e71350437d57d8112b6cec298f89fc8291fb5f | refs/heads/master | 2023-06-30T00:38:29.711137 | 2021-08-05T09:17:54 | 2021-08-05T09:17:54 | 392,716,311 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 18,324 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from pal_wifi_localization_msgs/GetWifiMapRequest.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class GetWifiMapRequest(genpy.Message):
_md5sum = "d41d8cd98f00b204e9800998ecf8427e"
_type = "pal_wifi_localization_msgs/GetWifiMapRequest"
_has_header = False # flag to mark the presence of a Header object
_full_text = """# Get the map as a wifi_map
"""
__slots__ = []
_slot_types = []
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(GetWifiMapRequest, self).__init__(*args, **kwds)
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from pal_wifi_localization_msgs/GetWifiMapResponse.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import genpy
import geometry_msgs.msg
import nav_msgs.msg
import pal_wifi_localization_msgs.msg
import std_msgs.msg
class GetWifiMapResponse(genpy.Message):
_md5sum = "4273c0e2a4f41c0c71c07a4fee60fcee"
_type = "pal_wifi_localization_msgs/GetWifiMapResponse"
_has_header = False # flag to mark the presence of a Header object
_full_text = """pal_wifi_localization_msgs/WifiSignalMap map
================================================================================
MSG: pal_wifi_localization_msgs/WifiSignalMap
# This represents a 2-D grid map, in which each cell represents the signal strenght models of detected wifi networks.
Header header
#MetaData for the map
nav_msgs/MapMetaData info
# Define the number of sectors to be used on wifi maps that include orientation info.
uint32 sectors
# The map data, in row-major order, starting with (0,0). Wifi signal strenght models
# are gaussian probability distribution functions defined by mean and standard deviation value.
WifiSignalList[] data
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
================================================================================
MSG: nav_msgs/MapMetaData
# This hold basic information about the characterists of the OccupancyGrid
# The time at which the map was loaded
time map_load_time
# The map resolution [m/cell]
float32 resolution
# Map width [cells]
uint32 width
# Map height [cells]
uint32 height
# The origin of the map [m, m, rad]. This is the real-world pose of the
# cell (0,0) in the map.
geometry_msgs/Pose origin
================================================================================
MSG: geometry_msgs/Pose
# A representation of pose in free space, composed of position and orientation.
Point position
Quaternion orientation
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
================================================================================
MSG: pal_wifi_localization_msgs/WifiSignalList
#list of wifi signal models learnt in a specific place
WifiSignal[] networks
time start_time
time end_time
================================================================================
MSG: pal_wifi_localization_msgs/WifiSignal
## Contains data relative to the learnt model of a wifi signal strenght in a specific location
# network id
std_msgs/String id
#Signal is represented through a gaussian pdf.
#The signal strenght is measured in dB
float32 mean
float32 std_dev
================================================================================
MSG: std_msgs/String
string data
"""
__slots__ = ['map']
_slot_types = ['pal_wifi_localization_msgs/WifiSignalMap']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
map
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(GetWifiMapResponse, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.map is None:
self.map = pal_wifi_localization_msgs.msg.WifiSignalMap()
else:
self.map = pal_wifi_localization_msgs.msg.WifiSignalMap()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.map.header.seq, _x.map.header.stamp.secs, _x.map.header.stamp.nsecs))
_x = self.map.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2If2I7dI().pack(_x.map.info.map_load_time.secs, _x.map.info.map_load_time.nsecs, _x.map.info.resolution, _x.map.info.width, _x.map.info.height, _x.map.info.origin.position.x, _x.map.info.origin.position.y, _x.map.info.origin.position.z, _x.map.info.origin.orientation.x, _x.map.info.origin.orientation.y, _x.map.info.origin.orientation.z, _x.map.info.origin.orientation.w, _x.map.sectors))
length = len(self.map.data)
buff.write(_struct_I.pack(length))
for val1 in self.map.data:
length = len(val1.networks)
buff.write(_struct_I.pack(length))
for val2 in val1.networks:
_v1 = val2.id
_x = _v1.data
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val2
buff.write(_get_struct_2f().pack(_x.mean, _x.std_dev))
_v2 = val1.start_time
_x = _v2
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_v3 = val1.end_time
_x = _v3
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.map is None:
self.map = pal_wifi_localization_msgs.msg.WifiSignalMap()
end = 0
_x = self
start = end
end += 12
(_x.map.header.seq, _x.map.header.stamp.secs, _x.map.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.map.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.map.header.frame_id = str[start:end]
_x = self
start = end
end += 80
(_x.map.info.map_load_time.secs, _x.map.info.map_load_time.nsecs, _x.map.info.resolution, _x.map.info.width, _x.map.info.height, _x.map.info.origin.position.x, _x.map.info.origin.position.y, _x.map.info.origin.position.z, _x.map.info.origin.orientation.x, _x.map.info.origin.orientation.y, _x.map.info.origin.orientation.z, _x.map.info.origin.orientation.w, _x.map.sectors,) = _get_struct_2If2I7dI().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.map.data = []
for i in range(0, length):
val1 = pal_wifi_localization_msgs.msg.WifiSignalList()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.networks = []
for i in range(0, length):
val2 = pal_wifi_localization_msgs.msg.WifiSignal()
_v4 = val2.id
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v4.data = str[start:end].decode('utf-8', 'rosmsg')
else:
_v4.data = str[start:end]
_x = val2
start = end
end += 8
(_x.mean, _x.std_dev,) = _get_struct_2f().unpack(str[start:end])
val1.networks.append(val2)
_v5 = val1.start_time
_x = _v5
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
_v6 = val1.end_time
_x = _v6
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
self.map.data.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.map.header.seq, _x.map.header.stamp.secs, _x.map.header.stamp.nsecs))
_x = self.map.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2If2I7dI().pack(_x.map.info.map_load_time.secs, _x.map.info.map_load_time.nsecs, _x.map.info.resolution, _x.map.info.width, _x.map.info.height, _x.map.info.origin.position.x, _x.map.info.origin.position.y, _x.map.info.origin.position.z, _x.map.info.origin.orientation.x, _x.map.info.origin.orientation.y, _x.map.info.origin.orientation.z, _x.map.info.origin.orientation.w, _x.map.sectors))
length = len(self.map.data)
buff.write(_struct_I.pack(length))
for val1 in self.map.data:
length = len(val1.networks)
buff.write(_struct_I.pack(length))
for val2 in val1.networks:
_v7 = val2.id
_x = _v7.data
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val2
buff.write(_get_struct_2f().pack(_x.mean, _x.std_dev))
_v8 = val1.start_time
_x = _v8
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_v9 = val1.end_time
_x = _v9
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.map is None:
self.map = pal_wifi_localization_msgs.msg.WifiSignalMap()
end = 0
_x = self
start = end
end += 12
(_x.map.header.seq, _x.map.header.stamp.secs, _x.map.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.map.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.map.header.frame_id = str[start:end]
_x = self
start = end
end += 80
(_x.map.info.map_load_time.secs, _x.map.info.map_load_time.nsecs, _x.map.info.resolution, _x.map.info.width, _x.map.info.height, _x.map.info.origin.position.x, _x.map.info.origin.position.y, _x.map.info.origin.position.z, _x.map.info.origin.orientation.x, _x.map.info.origin.orientation.y, _x.map.info.origin.orientation.z, _x.map.info.origin.orientation.w, _x.map.sectors,) = _get_struct_2If2I7dI().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.map.data = []
for i in range(0, length):
val1 = pal_wifi_localization_msgs.msg.WifiSignalList()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.networks = []
for i in range(0, length):
val2 = pal_wifi_localization_msgs.msg.WifiSignal()
_v10 = val2.id
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v10.data = str[start:end].decode('utf-8', 'rosmsg')
else:
_v10.data = str[start:end]
_x = val2
start = end
end += 8
(_x.mean, _x.std_dev,) = _get_struct_2f().unpack(str[start:end])
val1.networks.append(val2)
_v11 = val1.start_time
_x = _v11
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
_v12 = val1.end_time
_x = _v12
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
self.map.data.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
_struct_2If2I7dI = None
def _get_struct_2If2I7dI():
global _struct_2If2I7dI
if _struct_2If2I7dI is None:
_struct_2If2I7dI = struct.Struct("<2If2I7dI")
return _struct_2If2I7dI
_struct_2f = None
def _get_struct_2f():
global _struct_2f
if _struct_2f is None:
_struct_2f = struct.Struct("<2f")
return _struct_2f
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
class GetWifiMap(object):
_type = 'pal_wifi_localization_msgs/GetWifiMap'
_md5sum = '4273c0e2a4f41c0c71c07a4fee60fcee'
_request_class = GetWifiMapRequest
_response_class = GetWifiMapResponse
| [
"sandeepan.ghosh.ece20@itbhu.ac.in"
] | sandeepan.ghosh.ece20@itbhu.ac.in |
0b29814de6fb30538f3b17fcbc45abc6d0d378b1 | 9b663934d1bc3288d7b2de35106084f2f1dca766 | /env/Lib/site-packages/pip/_internal/commands/install.py | 66071f6e8195b67ab8010298dd7184f83b7ec4fb | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-python-cwi",
"GPL-1.0-or-later",
"LicenseRef-scancode-newlib-historical",
"OpenSSL",
"bzip2-1.0.6",
"Python-2.0",
"TCL",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyl... | permissive | akshatvg/engagement-monitor | 28dbe63398fca0788d16f58b4bd92e2f7c124a0d | d357befb7e1a29ac0372c8547b3b86ba4a7b98b3 | refs/heads/master | 2021-06-25T12:02:51.742573 | 2021-02-02T04:48:16 | 2021-02-02T04:48:16 | 202,103,224 | 1 | 7 | MIT | 2021-04-06T22:12:53 | 2019-08-13T08:50:10 | CSS | UTF-8 | Python | false | false | 23,803 | py |
# The following comment should be removed at some point in the future.
# It's included for now because without it InstallCommand.run() has a
# couple errors where we have to know req.name is str rather than
# Optional[str] for the InstallRequirement req.
# mypy: strict-optional=False
# mypy: disallow-untyped-defs=False
from __future__ import absolute_import
import errno
import logging
import operator
import os
import shutil
from optparse import SUPPRESS_HELP
from pip._vendor import pkg_resources
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.cache import WheelCache
from pip._internal.cli import cmdoptions
from pip._internal.cli.cmdoptions import make_target_python
from pip._internal.cli.req_command import RequirementCommand
from pip._internal.cli.status_codes import ERROR, SUCCESS
from pip._internal.exceptions import (
CommandError,
InstallationError,
PreviousBuildDirError,
)
from pip._internal.locations import distutils_scheme
from pip._internal.operations.check import check_install_conflicts
from pip._internal.req import RequirementSet, install_given_reqs
from pip._internal.req.req_tracker import RequirementTracker
from pip._internal.utils.filesystem import check_path_owner
from pip._internal.utils.misc import (
ensure_dir,
get_installed_version,
protect_pip_from_modification_on_windows,
write_output,
)
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.virtualenv import virtualenv_no_global
from pip._internal.wheel import WheelBuilder
if MYPY_CHECK_RUNNING:
from optparse import Values
from typing import Any, List, Optional
from pip._internal.models.format_control import FormatControl
from pip._internal.req.req_install import InstallRequirement
from pip._internal.wheel import BinaryAllowedPredicate
logger = logging.getLogger(__name__)
def is_wheel_installed():
"""
Return whether the wheel package is installed.
"""
try:
import wheel # noqa: F401
except ImportError:
return False
return True
def build_wheels(
builder, # type: WheelBuilder
pep517_requirements, # type: List[InstallRequirement]
legacy_requirements, # type: List[InstallRequirement]
):
# type: (...) -> List[InstallRequirement]
"""
Build wheels for requirements, depending on whether wheel is installed.
"""
# We don't build wheels for legacy requirements if wheel is not installed.
should_build_legacy = is_wheel_installed()
# Always build PEP 517 requirements
build_failures = builder.build(
pep517_requirements,
should_unpack=True,
)
if should_build_legacy:
# We don't care about failures building legacy
# requirements, as we'll fall through to a direct
# install for those.
builder.build(
legacy_requirements,
should_unpack=True,
)
return build_failures
def get_check_binary_allowed(format_control):
# type: (FormatControl) -> BinaryAllowedPredicate
def check_binary_allowed(req):
# type: (InstallRequirement) -> bool
canonical_name = canonicalize_name(req.name)
allowed_formats = format_control.get_allowed_formats(canonical_name)
return "binary" in allowed_formats
return check_binary_allowed
class InstallCommand(RequirementCommand):
"""
Install packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports installing from "requirements files", which provide
an easy way to specify a whole environment to be installed.
"""
usage = """
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
def __init__(self, *args, **kw):
super(InstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(cmdoptions.requirements())
cmd_opts.add_option(cmdoptions.constraints())
cmd_opts.add_option(cmdoptions.no_deps())
cmd_opts.add_option(cmdoptions.pre())
cmd_opts.add_option(cmdoptions.editable())
cmd_opts.add_option(
'-t', '--target',
dest='target_dir',
metavar='dir',
default=None,
help='Install packages into <dir>. '
'By default this will not replace existing files/folders in '
'<dir>. Use --upgrade to replace existing packages in <dir> '
'with new versions.'
)
cmdoptions.add_target_python_options(cmd_opts)
cmd_opts.add_option(
'--user',
dest='use_user_site',
action='store_true',
help="Install to the Python user install directory for your "
"platform. Typically ~/.local/, or %APPDATA%\\Python on "
"Windows. (See the Python documentation for site.USER_BASE "
"for full details.)")
cmd_opts.add_option(
'--no-user',
dest='use_user_site',
action='store_false',
help=SUPPRESS_HELP)
cmd_opts.add_option(
'--root',
dest='root_path',
metavar='dir',
default=None,
help="Install everything relative to this alternate root "
"directory.")
cmd_opts.add_option(
'--prefix',
dest='prefix_path',
metavar='dir',
default=None,
help="Installation prefix where lib, bin and other top-level "
"folders are placed")
cmd_opts.add_option(cmdoptions.build_dir())
cmd_opts.add_option(cmdoptions.src())
cmd_opts.add_option(
'-U', '--upgrade',
dest='upgrade',
action='store_true',
help='Upgrade all specified packages to the newest available '
'version. The handling of dependencies depends on the '
'upgrade-strategy used.'
)
cmd_opts.add_option(
'--upgrade-strategy',
dest='upgrade_strategy',
default='only-if-needed',
choices=['only-if-needed', 'eager'],
help='Determines how dependency upgrading should be handled '
'[default: %default]. '
'"eager" - dependencies are upgraded regardless of '
'whether the currently installed version satisfies the '
'requirements of the upgraded package(s). '
'"only-if-needed" - are upgraded only when they do not '
'satisfy the requirements of the upgraded package(s).'
)
cmd_opts.add_option(
'--force-reinstall',
dest='force_reinstall',
action='store_true',
help='Reinstall all packages even if they are already '
'up-to-date.')
cmd_opts.add_option(
'-I', '--ignore-installed',
dest='ignore_installed',
action='store_true',
help='Ignore the installed packages, overwriting them. '
'This can break your system if the existing package '
'is of a different version or was installed '
'with a different package manager!'
)
cmd_opts.add_option(cmdoptions.ignore_requires_python())
cmd_opts.add_option(cmdoptions.no_build_isolation())
cmd_opts.add_option(cmdoptions.use_pep517())
cmd_opts.add_option(cmdoptions.no_use_pep517())
cmd_opts.add_option(cmdoptions.install_options())
cmd_opts.add_option(cmdoptions.global_options())
cmd_opts.add_option(
"--compile",
action="store_true",
dest="compile",
default=True,
help="Compile Python source files to bytecode",
)
cmd_opts.add_option(
"--no-compile",
action="store_false",
dest="compile",
help="Do not compile Python source files to bytecode",
)
cmd_opts.add_option(
"--no-warn-script-location",
action="store_false",
dest="warn_script_location",
default=True,
help="Do not warn when installing scripts outside PATH",
)
cmd_opts.add_option(
"--no-warn-conflicts",
action="store_false",
dest="warn_about_conflicts",
default=True,
help="Do not warn about broken dependencies",
)
cmd_opts.add_option(cmdoptions.no_binary())
cmd_opts.add_option(cmdoptions.only_binary())
cmd_opts.add_option(cmdoptions.prefer_binary())
cmd_opts.add_option(cmdoptions.no_clean())
cmd_opts.add_option(cmdoptions.require_hashes())
cmd_opts.add_option(cmdoptions.progress_bar())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
# type: (Values, List[Any]) -> int
cmdoptions.check_install_build_global(options)
upgrade_strategy = "to-satisfy-only"
if options.upgrade:
upgrade_strategy = options.upgrade_strategy
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
cmdoptions.check_dist_restriction(options, check_target=True)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
if options.prefix_path:
raise CommandError(
"Can not combine '--user' and '--prefix' as they imply "
"different installation locations"
)
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
install_options.append('--user')
install_options.append('--prefix=')
target_temp_dir = None # type: Optional[TempDirectory]
target_temp_dir_path = None # type: Optional[str]
if options.target_dir:
options.ignore_installed = True
options.target_dir = os.path.abspath(options.target_dir)
if (os.path.exists(options.target_dir) and not
os.path.isdir(options.target_dir)):
raise CommandError(
"Target path exists but is not a directory, will not "
"continue."
)
# Create a target directory for using with the target option
target_temp_dir = TempDirectory(kind="target")
target_temp_dir_path = target_temp_dir.path
install_options.append('--home=' + target_temp_dir_path)
global_options = options.global_options or []
session = self.get_default_session(options)
target_python = make_target_python(options)
finder = self._build_package_finder(
options=options,
session=session,
target_python=target_python,
ignore_requires_python=options.ignore_requires_python,
)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
if options.cache_dir and not check_path_owner(options.cache_dir):
logger.warning(
"The directory '%s' or its parent directory is not owned "
"by the current user and caching wheels has been "
"disabled. check the permissions and owner of that "
"directory. If executing pip with sudo, you may want "
"sudo's -H flag.",
options.cache_dir,
)
options.cache_dir = None
with RequirementTracker() as req_tracker, TempDirectory(
options.build_dir, delete=build_delete, kind="install"
) as directory:
requirement_set = RequirementSet(
require_hashes=options.require_hashes,
check_supported_wheels=not options.target_dir,
)
try:
self.populate_requirement_set(
requirement_set, args, options, finder, session,
wheel_cache
)
preparer = self.make_requirement_preparer(
temp_build_dir=directory,
options=options,
req_tracker=req_tracker,
)
resolver = self.make_resolver(
preparer=preparer,
finder=finder,
session=session,
options=options,
wheel_cache=wheel_cache,
use_user_site=options.use_user_site,
ignore_installed=options.ignore_installed,
ignore_requires_python=options.ignore_requires_python,
force_reinstall=options.force_reinstall,
upgrade_strategy=upgrade_strategy,
use_pep517=options.use_pep517,
)
resolver.resolve(requirement_set)
try:
pip_req = requirement_set.get_requirement("pip")
except KeyError:
modifying_pip = None
else:
# If we're not replacing an already installed pip,
# we're not modifying it.
modifying_pip = pip_req.satisfied_by is None
protect_pip_from_modification_on_windows(
modifying_pip=modifying_pip
)
check_binary_allowed = get_check_binary_allowed(
finder.format_control
)
# Consider legacy and PEP517-using requirements separately
legacy_requirements = []
pep517_requirements = []
for req in requirement_set.requirements.values():
if req.use_pep517:
pep517_requirements.append(req)
else:
legacy_requirements.append(req)
wheel_builder = WheelBuilder(
preparer, wheel_cache,
build_options=[], global_options=[],
check_binary_allowed=check_binary_allowed,
)
build_failures = build_wheels(
builder=wheel_builder,
pep517_requirements=pep517_requirements,
legacy_requirements=legacy_requirements,
)
# If we're using PEP 517, we cannot do a direct install
# so we fail here.
if build_failures:
raise InstallationError(
"Could not build wheels for {} which use"
" PEP 517 and cannot be installed directly".format(
", ".join(r.name for r in build_failures)))
to_install = resolver.get_installation_order(
requirement_set
)
# Consistency Checking of the package set we're installing.
should_warn_about_conflicts = (
not options.ignore_dependencies and
options.warn_about_conflicts
)
if should_warn_about_conflicts:
self._warn_about_conflicts(to_install)
# Don't warn about script install locations if
# --target has been specified
warn_script_location = options.warn_script_location
if options.target_dir:
warn_script_location = False
installed = install_given_reqs(
to_install,
install_options,
global_options,
root=options.root_path,
home=target_temp_dir_path,
prefix=options.prefix_path,
pycompile=options.compile,
warn_script_location=warn_script_location,
use_user_site=options.use_user_site,
)
lib_locations = get_lib_location_guesses(
user=options.use_user_site,
home=target_temp_dir_path,
root=options.root_path,
prefix=options.prefix_path,
isolated=options.isolated_mode,
)
working_set = pkg_resources.WorkingSet(lib_locations)
reqs = sorted(installed, key=operator.attrgetter('name'))
items = []
for req in reqs:
item = req.name
try:
installed_version = get_installed_version(
req.name, working_set=working_set
)
if installed_version:
item += '-' + installed_version
except Exception:
pass
items.append(item)
installed_desc = ' '.join(items)
if installed_desc:
write_output(
'Successfully installed %s', installed_desc,
)
except EnvironmentError as error:
show_traceback = (self.verbosity >= 1)
message = create_env_error_message(
error, show_traceback, options.use_user_site,
)
logger.error(message, exc_info=show_traceback)
return ERROR
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
# Clean up
if not options.no_clean:
requirement_set.cleanup_files()
wheel_cache.cleanup()
if options.target_dir:
self._handle_target_dir(
options.target_dir, target_temp_dir, options.upgrade
)
return SUCCESS
def _handle_target_dir(self, target_dir, target_temp_dir, upgrade):
ensure_dir(target_dir)
# Checking both purelib and platlib directories for installed
# packages to be moved to target directory
lib_dir_list = []
with target_temp_dir:
# Checking both purelib and platlib directories for installed
# packages to be moved to target directory
scheme = distutils_scheme('', home=target_temp_dir.path)
purelib_dir = scheme['purelib']
platlib_dir = scheme['platlib']
data_dir = scheme['data']
if os.path.exists(purelib_dir):
lib_dir_list.append(purelib_dir)
if os.path.exists(platlib_dir) and platlib_dir != purelib_dir:
lib_dir_list.append(platlib_dir)
if os.path.exists(data_dir):
lib_dir_list.append(data_dir)
for lib_dir in lib_dir_list:
for item in os.listdir(lib_dir):
if lib_dir == data_dir:
ddir = os.path.join(data_dir, item)
if any(s.startswith(ddir) for s in lib_dir_list[:-1]):
continue
target_item_dir = os.path.join(target_dir, item)
if os.path.exists(target_item_dir):
if not upgrade:
logger.warning(
'Target directory %s already exists. Specify '
'--upgrade to force replacement.',
target_item_dir
)
continue
if os.path.islink(target_item_dir):
logger.warning(
'Target directory %s already exists and is '
'a link. Pip will not automatically replace '
'links, please remove if replacement is '
'desired.',
target_item_dir
)
continue
if os.path.isdir(target_item_dir):
shutil.rmtree(target_item_dir)
else:
os.remove(target_item_dir)
shutil.move(
os.path.join(lib_dir, item),
target_item_dir
)
def _warn_about_conflicts(self, to_install):
try:
package_set, _dep_info = check_install_conflicts(to_install)
except Exception:
logger.error("Error checking for conflicts.", exc_info=True)
return
missing, conflicting = _dep_info
# NOTE: There is some duplication here from pip check
for project_name in missing:
version = package_set[project_name][0]
for dependency in missing[project_name]:
logger.critical(
"%s %s requires %s, which is not installed.",
project_name, version, dependency[1],
)
for project_name in conflicting:
version = package_set[project_name][0]
for dep_name, dep_version, req in conflicting[project_name]:
logger.critical(
"%s %s has requirement %s, but you'll have %s %s which is "
"incompatible.",
project_name, version, req, dep_name, dep_version,
)
def get_lib_location_guesses(*args, **kwargs):
scheme = distutils_scheme('', *args, **kwargs)
return [scheme['purelib'], scheme['platlib']]
def create_env_error_message(error, show_traceback, using_user_site):
"""Format an error message for an EnvironmentError
It may occur anytime during the execution of the install command.
"""
parts = []
# Mention the error if we are not going to show a traceback
parts.append("Could not install packages due to an EnvironmentError")
if not show_traceback:
parts.append(": ")
parts.append(str(error))
else:
parts.append(".")
# Spilt the error indication from a helper message (if any)
parts[-1] += "\n"
# Suggest useful actions to the user:
# (1) using user site-packages or (2) verifying the permissions
if error.errno == errno.EACCES:
user_option_part = "Consider using the `--user` option"
permissions_part = "Check the permissions"
if not using_user_site:
parts.extend([
user_option_part, " or ",
permissions_part.lower(),
])
else:
parts.append(permissions_part)
parts.append(".\n")
return "".join(parts).strip() + "\n"
| [
"rayanuthalas@gmail.com"
] | rayanuthalas@gmail.com |
ba9cab5d111516ba4e1fd9eba0a8fa8c5bc19eeb | 98d61512fdf7f8426d4634a86edd25669944ab9e | /algorithms/BestTimeToBuyAndSellStock/solution.py | 2c6f64b803cdbd44d2e66e2168c317b6104e54ea | [] | no_license | P-ppc/leetcode | 145102804320c6283fa653fc4a7ae89bf745b2fb | 0d90db3f0ca02743ee7d5e959ac7c83cdb435b92 | refs/heads/master | 2021-07-12T02:49:15.369119 | 2018-11-13T05:34:51 | 2018-11-24T12:34:07 | 132,237,265 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
max_profit = 0
min_price = sys.maxint
for price in prices:
min_price = min(min_price, price)
max_profit = max(max_profit, price - min_price)
return max_profit | [
"ppc-user@foxmail.com"
] | ppc-user@foxmail.com |
c3a1d237da21bec7b9c7408ef0008689bbdd134d | 88ae8695987ada722184307301e221e1ba3cc2fa | /printing/backend/PRESUBMIT.py | c400aabee9f16585e38e2c421c765eb7291eb5c0 | [
"BSD-3-Clause"
] | permissive | iridium-browser/iridium-browser | 71d9c5ff76e014e6900b825f67389ab0ccd01329 | 5ee297f53dc7f8e70183031cff62f37b0f19d25f | refs/heads/master | 2023-08-03T16:44:16.844552 | 2023-07-20T15:17:00 | 2023-07-23T16:09:30 | 220,016,632 | 341 | 40 | BSD-3-Clause | 2021-08-13T13:54:45 | 2019-11-06T14:32:31 | null | UTF-8 | Python | false | false | 1,984 | py | # Copyright 2020 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Presubmit script for the printing backend.
See https://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API.
"""
USE_PYTHON3 = True
def _CheckForStringViewFromNullableIppApi(input_api, output_api):
"""
Looks for all affected lines in CL where one constructs either
base::StringPiece or std::string_view from any ipp*() CUPS API call.
Assumes over-broadly that all ipp*() calls can return NULL.
Returns affected lines as a list of presubmit errors.
"""
# Attempts to detect source lines like:
# * base::StringPiece foo = ippDoBar();
# * base::StringPiece foo(ippDoBar());
# and the same for std::string_view.
string_view_re = input_api.re.compile(
r"^.+(base::StringPiece|std::string_view)\s+\w+( = |\()ipp[A-Z].+$")
violations = input_api.canned_checks._FindNewViolationsOfRule(
lambda extension, line:
not (extension in ("cc", "h") and string_view_re.search(line)),
input_api, None)
bulleted_violations = [" * {}".format(entry) for entry in violations]
if bulleted_violations:
return [output_api.PresubmitError(
("Possible construction of base::StringPiece or std::string_view "
"from CUPS IPP API (that can probably return NULL):\n{}").format(
"\n".join(bulleted_violations))),]
return []
def _CommonChecks(input_api, output_api):
"""Actual implementation of presubmits for the printing backend."""
results = []
results.extend(_CheckForStringViewFromNullableIppApi(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
"""Mandatory presubmit entry point."""
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
"""Mandatory presubmit entry point."""
return _CommonChecks(input_api, output_api)
| [
"jengelh@inai.de"
] | jengelh@inai.de |
04c5be843d18e083a0ae2860b4aa6a92a1e60c62 | 1fe0b680ce53bb3bb9078356ea2b25e572d9cfdc | /venv/lib/python2.7/site-packages/ansible/modules/cloud/azure/azure_rm_loganalyticsworkspace_facts.py | 8f7f88260a73e8917841ab5cdddece6d119a127a | [
"MIT"
] | permissive | otus-devops-2019-02/devopscourses_infra | 1929c4a9eace3fdb0eb118bf216f3385fc0cdb1c | e42e5deafce395af869084ede245fc6cff6d0b2c | refs/heads/master | 2020-04-29T02:41:49.985889 | 2019-05-21T06:35:19 | 2019-05-21T06:35:19 | 175,780,457 | 0 | 1 | MIT | 2019-05-21T06:35:20 | 2019-03-15T08:35:54 | HCL | UTF-8 | Python | false | false | 8,682 | py | #!/usr/bin/python
#
# Copyright (c) 2019 Yuwei Zhou, <yuwzho@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_loganalyticsworkspace_facts
version_added: "2.8"
short_description: Get facts of Azure Log Analytics workspaces.
description:
- Get, query Azure Log Analytics workspaces.
options:
resource_group:
description:
- Name of resource group.
required: True
name:
description:
- Name of the workspace.
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
show_intelligence_packs:
description:
- Show the intelligence packs for a workspace.
- Note this will cost one more network overhead for each workspace, expected slow response.
show_management_groups:
description:
- Show the management groups for a workspace.
- Note this will cost one more network overhead for each workspace, expected slow response.
show_shared_keys:
description:
- Show the shared keys for a workspace.
- Note this will cost one more network overhead for each workspace, expected slow response.
show_usages:
description:
- Show the list of usages for a workspace.
- Note this will cost one more network overhead for each workspace, expected slow response.
extends_documentation_fragment:
- azure
author:
- "Yuwei Zhou (@yuwzho)"
'''
EXAMPLES = '''
- name: Query a workspace
azure_rm_loganalyticsworkspace_facts:
resource_group: myResourceGroup
name: myLogAnalyticsWorkspace
show_intelligence_packs: true
show_management_groups: true
show_shared_keys: true
show_usages: true
'''
RETURN = '''
id:
description: Workspace resource path.
type: str
returned: success
example: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.OperationalInsights/workspaces/m
yLogAnalyticsWorkspace"
location:
description:
- Resource location.
type: str
returned: success
example: "eastus"
sku:
description:
- The SKU of the workspace
type: str
returned: success
example: "per_gb2018"
retention_in_days:
description:
- The workspace data retention in days.
- -1 means Unlimited retention for the C(unlimited) C(sku).
- 730 days is the maximum allowed for all other C(sku)s.
type: int
returned: success
example: 40
intelligence_packs:
description:
- Lists all the intelligence packs possible and whether they are enabled or disabled for a given workspace.
type: list
returned: success
example: ['name': 'CapacityPerformance', 'enabled': true]
management_groups:
description:
- List of management groups connected to the workspace.
type: list
returned: success
example: "{'value': []}"
shared_keys:
description:
- Shared keys for the workspace.
type: list
returned: success
example: "{
'primarySharedKey': 'BozLY1JnZbxu0jWUQSY8iRPEM8ObmpP8rW+8bUl3+HpDJI+n689SxXgTgU7k1qdxo/WugRLxechxbolAfHM5uA==',
'secondarySharedKey': '7tDt5W0JBrCQKtQA3igfFltLSzJeyr9LmuT+B/ibzd8cdC1neZ1ePOQLBx5NUzc0q2VUIK0cLhWNyFvo/hT8Ww=='
}"
usages:
description:
- List of usage metrics for the workspace.
type: list
returned: success
example: "{
'value': [
{
'name': {
'value': 'DataAnalyzed',
'localizedValue': 'Data Analyzed'
},
'unit': 'Bytes',
'currentValue': 0,
'limit': 524288000,
'nextResetTime': '2017-10-03T00:00:00Z',
'quotaPeriod': 'P1D'
}
]
}"
''' # NOQA
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake
try:
from msrestazure.tools import parse_resource_id
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMLogAnalyticsWorkspaceFact(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str'),
tags=dict(type='list'),
show_shared_keys=dict(type='bool'),
show_intelligence_packs=dict(type='bool'),
show_usages=dict(type='bool'),
show_management_groups=dict(type='bool')
)
self.results = dict(
changed=False,
workspaces=[]
)
self.resource_group = None
self.name = None
self.tags = None
self.show_intelligence_packs = None
self.show_shared_keys = None
self.show_usages = None
self.show_management_groups = None
super(AzureRMLogAnalyticsWorkspaceFact, self).__init__(self.module_arg_spec, supports_tags=False, facts_module=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
setattr(self, key, kwargs[key])
if self.name:
item = self.get_workspace()
response = [item] if item else []
else:
response = self.list_by_resource_group()
self.results['workspaces'] = [self.to_dict(x) for x in response if self.has_tags(x.tags, self.tags)]
return self.results
def get_workspace(self):
try:
return self.log_analytics_client.workspaces.get(self.resource_group, self.name)
except CloudError:
pass
return None
def list_by_resource_group(self):
try:
return self.log_analytics_client.workspaces.list_by_resource_group(self.resource_group)
except CloudError:
pass
return []
def list_intelligence_packs(self):
try:
response = self.log_analytics_client.workspaces.list_intelligence_packs(self.resource_group, self.name)
return [x.as_dict() for x in response]
except CloudError as exc:
self.fail('Error when listing intelligence packs {0}'.format(exc.message or str(exc)))
def list_management_groups(self):
result = []
try:
response = self.log_analytics_client.workspaces.list_management_groups(self.resource_group, self.name)
while True:
result.append(response.next().as_dict())
except StopIteration:
pass
except CloudError as exc:
self.fail('Error when listing management groups {0}'.format(exc.message or str(exc)))
return result
def list_usages(self):
result = []
try:
response = self.log_analytics_client.workspaces.list_usages(self.resource_group, self.name)
while True:
result.append(response.next().as_dict())
except StopIteration:
pass
except CloudError as exc:
self.fail('Error when listing usages {0}'.format(exc.message or str(exc)))
return result
def get_shared_keys(self):
try:
return self.log_analytics_client.workspaces.get_shared_keys(self.resource_group, self.name).as_dict()
except CloudError as exc:
self.fail('Error when getting shared key {0}'.format(exc.message or str(exc)))
def to_dict(self, workspace):
result = workspace.as_dict()
result['sku'] = _camel_to_snake(workspace.sku.name)
if self.show_intelligence_packs:
result['intelligence_packs'] = self.list_intelligence_packs()
if self.show_management_groups:
result['management_groups'] = self.list_management_groups()
if self.show_shared_keys:
result['shared_keys'] = self.get_shared_keys()
if self.show_usages:
result['usages'] = self.list_usages()
return result
def main():
AzureRMLogAnalyticsWorkspaceFact()
if __name__ == '__main__':
main()
| [
"skydevapp@gmail.com"
] | skydevapp@gmail.com |
3248006133f1a39fee5c6ed3e1d8983387dae879 | 17ef6c9ead83c2a2c18fe029ae3f6ba90d57b8f4 | /unsupervised_learning/0x01-clustering/8-EM.py | c0e7abe3c2039e1dca58a146d909b3d122c2d090 | [] | no_license | shincap8/holbertonschool-machine_learning | ede0c2be6df44f91c125c4497cf5ac1b90f654fe | cfc519b3290a1b8ecd6dc94f70c5220538ee7aa0 | refs/heads/master | 2023-03-26T07:00:10.238239 | 2021-03-18T04:39:01 | 2021-03-18T04:39:01 | 279,436,819 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,875 | py | #!/usr/bin/env python3
"""Function that performs the expectation maximization for a GMM"""
import numpy as np
initialize = __import__('4-initialize').initialize
expectation = __import__('6-expectation').expectation
maximization = __import__('7-maximization').maximization
def expectation_maximization(X, k, iterations=1000, tol=1e-5, verbose=False):
"""X is a numpy.ndarray of shape (n, d) containing the data set
k is a positive integer containing the number of clusters
iterations is a positive integer containing the
maximum number of iterations for the algorithm
tol is a non-negative float containing tolerance
of the log likelihood, used to determine early
stopping i.e. if the difference is less than or
equal to tol you should stop the algorithm
verbose is a boolean that determines if you
should print information about the algorithm
If True, print Log Likelihood after {i} iterations:
{l} every 10 iterations and after the last iteration
{i} is the number of iterations of the EM algorithm
{l} is the log likelihood, rounded to 5 decimal places
You should use:
initialize = __import__('4-initialize').initialize
expectation = __import__('6-expectation').expectation
maximization = __import__('7-maximization').maximization
You may use at most 1 loop
Returns: pi, m, S, g, l, or None, None, None, None, None on failure
pi is a numpy.ndarray of shape (k,)
containing the priors for each cluster
m is a numpy.ndarray of shape (k, d)
containing the centroid means for each cluster
S is a numpy.ndarray of shape (k, d, d)
containing the covariance matrices for each cluster
g is a numpy.ndarray of shape (k, n) containing
the probabilities for each data point in each cluster
l is the log likelihood of the model"""
if type(X) is not np.ndarray or len(X.shape) != 2:
return (None, None, None, None, None)
if type(k) is not int or type(iterations) is not int:
return (None, None, None, None, None)
if k <= 0 or iterations <= 0:
return (None, None, None, None, None)
if type(tol) is not float or tol < 0:
return (None, None, None, None, None)
if type(verbose) is not bool:
return (None, None, None, None, None)
n, d = X.shape
pi, m, S = initialize(X, k)
g, ll = expectation(X, pi, m, S)
ll_old = 0
text = 'Log Likelihood after {} iterations: {}'
for i in range(iterations):
if verbose and i % 10 == 0:
print(text.format(i, ll.round(5)))
pi, m, S = maximization(X, g)
g, ll = expectation(X, pi, m, S)
if np.abs(ll_old - ll) <= tol:
break
ll_old = ll
if verbose:
print(text.format(i + 1, ll.round(5)))
return (pi, m, S, g, ll)
| [
"shincap8@gmail.com"
] | shincap8@gmail.com |
f8bd1fc30da97b0917edc5644b68c14a92de65b1 | 3b50605ffe45c412ee33de1ad0cadce2c5a25ca2 | /python/paddle/fluid/tests/unittests/test_monitor.py | a5d5e30176fb07753a1ec9e47c4031fccf825b92 | [
"Apache-2.0"
] | permissive | Superjomn/Paddle | f5f4072cf75ac9ecb0ff528876ee264b14bbf8d1 | 7a0b0dab8e58b6a3b28b3b82c43d55c9bd3d4188 | refs/heads/develop | 2023-02-04T20:27:54.244843 | 2023-01-26T15:31:14 | 2023-01-26T15:31:14 | 66,896,049 | 4 | 1 | Apache-2.0 | 2023-04-14T02:29:52 | 2016-08-30T01:45:54 | C++ | UTF-8 | Python | false | false | 3,566 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
TestCases for Monitor
"""
import paddle
paddle.enable_static()
import os
import tempfile
import unittest
import paddle.fluid as fluid
import paddle.fluid.core as core
class TestDatasetWithStat(unittest.TestCase):
"""TestCases for Dataset."""
def setUp(self):
self.use_data_loader = False
self.epoch_num = 10
self.drop_last = False
def test_dataset_run_with_stat(self):
temp_dir = tempfile.TemporaryDirectory()
path_a = os.path.join(temp_dir.name, "test_in_memory_dataset_run_a.txt")
path_b = os.path.join(temp_dir.name, "test_in_memory_dataset_run_b.txt")
with open(path_a, "w") as f:
data = "1 1 2 3 3 4 5 5 5 5 1 1\n"
data += "1 2 2 3 4 4 6 6 6 6 1 2\n"
data += "1 3 2 3 5 4 7 7 7 7 1 3\n"
f.write(data)
with open(path_b, "w") as f:
data = "1 4 2 3 3 4 5 5 5 5 1 4\n"
data += "1 5 2 3 4 4 6 6 6 6 1 5\n"
data += "1 6 2 3 5 4 7 7 7 7 1 6\n"
data += "1 7 2 3 6 4 8 8 8 8 1 7\n"
f.write(data)
slots = ["slot1", "slot2", "slot3", "slot4"]
slots_vars = []
for slot in slots:
var = paddle.static.data(
name=slot, shape=[-1, 1], dtype="int64", lod_level=1
)
slots_vars.append(var)
embs = []
for x in slots_vars:
emb = fluid.layers.embedding(x, is_sparse=True, size=[100001, 4])
embs.append(emb)
dataset = paddle.distributed.InMemoryDataset()
dataset._set_batch_size(32)
dataset._set_thread(3)
dataset.set_filelist([path_a, path_b])
dataset._set_pipe_command("cat")
dataset._set_use_var(slots_vars)
dataset.load_into_memory()
dataset._set_fea_eval(1, True)
dataset.slots_shuffle(["slot1"])
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
if self.use_data_loader:
data_loader = fluid.io.DataLoader.from_dataset(
dataset, fluid.cpu_places(), self.drop_last
)
for i in range(self.epoch_num):
for data in data_loader():
exe.run(fluid.default_main_program(), feed=data)
else:
for i in range(self.epoch_num):
try:
exe.train_from_dataset(
fluid.default_main_program(),
dataset,
fetch_list=[embs[0], embs[1]],
fetch_info=["emb0", "emb1"],
print_period=1,
)
except Exception as e:
self.assertTrue(False)
int_stat = core.get_int_stats()
# total 56 keys
print(int_stat["STAT_total_feasign_num_in_mem"])
temp_dir.cleanup()
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | Superjomn.noreply@github.com |
10ec710fb0f23d2d5f488050660929cca1494ae0 | a479a5773fd5607f96c3b84fed57733fe39c3dbb | /napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/interface_ref/__init__.py | 6834e87234c3203df2663b4ff0f91a34779f7e73 | [
"Apache-2.0"
] | permissive | napalm-automation/napalm-yang | 839c711e9294745534f5fbbe115e0100b645dbca | 9148e015b086ebe311c07deb92e168ea36fd7771 | refs/heads/develop | 2021-01-11T07:17:20.226734 | 2019-05-15T08:43:03 | 2019-05-15T08:43:03 | 69,226,025 | 65 | 64 | Apache-2.0 | 2019-05-15T08:43:24 | 2016-09-26T07:48:42 | Python | UTF-8 | Python | false | false | 18,454 | py | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class interface_ref(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/interface-ref. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Reference to an interface or subinterface
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "interface-ref"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"interfaces",
"interface",
"interface-ref",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/interface_ref/config (container)
YANG Description: Configured reference to interface / subinterface
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/interface_ref/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configured reference to interface / subinterface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/interface_ref/state (container)
YANG Description: Operational state for interface-ref
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/interface_ref/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state for interface-ref
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
from . import config
from . import state
class interface_ref(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/interface-ref. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Reference to an interface or subinterface
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "interface-ref"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"interfaces",
"interface",
"interface-ref",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/interface_ref/config (container)
YANG Description: Configured reference to interface / subinterface
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/interface_ref/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configured reference to interface / subinterface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/interface_ref/state (container)
YANG Description: Operational state for interface-ref
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/interface_ref/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state for interface-ref
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
| [
"dbarrosop@dravetech.com"
] | dbarrosop@dravetech.com |
3e1979aafdbea3e2c81383a48cfe6a7cafe0f013 | fb78fd824e904705fb1ee09db8b3c20cc3902805 | /django-guestbook/guestbook/migrations/0002_auto_20191016_1315.py | ef5f841831a597352e01e1f4517998095c6e161b | [] | no_license | Roderich25/mac | 8469833821ac49c539a744db29db5a41d755ad55 | 4f7fe281c88f0199b85d0ac99ce41ffb643d6e82 | refs/heads/master | 2023-01-12T05:55:12.753209 | 2021-11-26T01:16:24 | 2021-11-26T01:16:24 | 207,029,750 | 0 | 0 | null | 2023-01-07T11:49:23 | 2019-09-07T21:51:53 | Jupyter Notebook | UTF-8 | Python | false | false | 431 | py | # Generated by Django 2.2.6 on 2019-10-16 13:15
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('guestbook', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='date_added',
field=models.DateField(default=django.utils.timezone.now),
),
]
| [
"rodrigoavilasolis@gmail.com"
] | rodrigoavilasolis@gmail.com |
974973be60a087d297c0e8b18fb07e630f973b1c | 73ad9af21fd3584a741ee47ce646ea22b6666721 | /mitbbs/268.py | 2ac493b68b429b089eb98aa488a2a99a72abac5e | [] | no_license | zhenggang587/code | a70b5004a4afb3e2eb6b4d8f267fd1aed83a4b74 | fa13c439bcfaad543461b8a8ea397ecb95a066a5 | refs/heads/master | 2020-05-02T18:23:18.892197 | 2014-12-14T05:53:13 | 2014-12-14T05:53:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14 | py |
# see 194.py
| [
"zhenggang@meituan.com"
] | zhenggang@meituan.com |
0922c9ad0dfb9694fc3319082b15404f0dfd3dd1 | 3bb0fe3babe9f30e05a181722c476504e0ab56df | /tests/contrib/permissions/test_models.py | a264941db8418b287856b6f52bb671120629e476 | [
"MIT"
] | permissive | inonit/django-chemtrails | 6616aa121afe70da42a2a237b88b671ee2cddd74 | e8bd97dc68852902b57d314250e616b505db0e16 | refs/heads/master | 2021-01-20T10:55:30.979439 | 2017-12-11T21:32:06 | 2017-12-11T21:32:06 | 80,567,669 | 14 | 2 | MIT | 2017-12-04T05:43:13 | 2017-01-31T22:11:51 | Python | UTF-8 | Python | false | false | 907 | py | # -*- coding: utf-8 -*-
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from chemtrails.contrib.permissions.models import (
AccessRule,
get_node_relations_choices, get_node_permissions_choices
)
class ChoicesHelperFunctionsTestCase(TestCase):
"""
Test various functions for getting choices based on Neo4j data.
"""
def test_get_node_relations_choices(self):
choices = get_node_relations_choices()
self.assertIsInstance(choices, list)
for item in choices:
self.assertIsInstance(item, tuple)
self.assertEqual(len(item), 2)
def test_get_node_permissions_choices(self):
choices = get_node_permissions_choices()
self.assertIsInstance(choices, list)
for item in choices:
self.assertIsInstance(item, tuple)
self.assertEqual(len(item), 2)
| [
"rhblind@gmail.com"
] | rhblind@gmail.com |
1022e667a73f39aa64e68623e43a4c620ca0b8a2 | 47fb8f2ed2510a3777799e1b704bfcfce18789c3 | /challenge.py | dac506dffc3950144d67f5e7ba52f53f0399e6f0 | [] | no_license | ehiaig/learn_python | e8d476c228451fb91be40ec64b5f7700fa12db74 | 205119a15d49dd81d98fca4623e0df99963aece7 | refs/heads/master | 2022-12-25T02:00:32.428483 | 2020-09-19T19:02:32 | 2020-09-19T19:02:32 | 103,679,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | # print ("input is {}".format(input))
# ALTERNATE HUMAN AN DDOG CHALLENGE
stirr = "hd...h...d..d..hd...h..d..h.d"
stir = list(stirr)
line = 0
while (line < len(stir)):
try:
if stir[line] == 'h' and not(stir[line+1] == 'd'):
# #print (stir[line], stir[line + 1])
for ch in stir[line + 1:]:
if ch == 'd':
ch, stir[line +1] = stir[line +1], ch
except (IndexError):
break
line = line + 1
print (''.join(stir))
# HUMAN AND DOG CHALLENGE
# Put all dogs one step in from of human
| [
"ehiagheaigg@gmail.com"
] | ehiagheaigg@gmail.com |
60bfc0644928cdd4cebc77a5c5c4d392a7b5aaeb | c68aea1de91b46ae684792123c61e84c44ea0266 | /code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Tools/freeze/freeze.py | 83aa508a46a93e79664d02fc09be9b3be0489ed4 | [
"Apache-2.0",
"LicenseRef-scancode-python-cwi",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-free-unknown",
"Python-2.0"
] | permissive | Winfredemalx54/algorithm-challenger-1 | 12e23bed89ca889701db1b17ac540ce62ce86d8e | 761c2c39e041fb155f853385998d5c6318a39913 | refs/heads/master | 2022-11-22T15:03:01.548605 | 2020-07-11T12:26:31 | 2020-07-11T12:26:31 | 297,955,141 | 3 | 0 | Apache-2.0 | 2020-09-23T11:58:19 | 2020-09-23T11:58:18 | null | UTF-8 | Python | false | false | 17,174 | py | #! /usr/bin/env python3
"""Freeze a Python script into a binary.
usage: freeze [options...] script [module]...
Options:
-p prefix: This is the prefix used when you ran ``make install''
in the Python build directory.
(If you never ran this, freeze won't work.)
The default is whatever sys.prefix evaluates to.
It can also be the top directory of the Python source
tree; then -P must point to the build tree.
-P exec_prefix: Like -p but this is the 'exec_prefix', used to
install objects etc. The default is whatever sys.exec_prefix
evaluates to, or the -p argument if given.
If -p points to the Python source tree, -P must point
to the build tree, if different.
-e extension: A directory containing additional .o files that
may be used to resolve modules. This directory
should also have a Setup file describing the .o files.
On Windows, the name of a .INI file describing one
or more extensions is passed.
More than one -e option may be given.
-o dir: Directory where the output files are created; default '.'.
-m: Additional arguments are module names instead of filenames.
-a package=dir: Additional directories to be added to the package's
__path__. Used to simulate directories added by the
package at runtime (eg, by OpenGL and win32com).
More than one -a option may be given for each package.
-l file: Pass the file to the linker (windows only)
-d: Debugging mode for the module finder.
-q: Make the module finder totally quiet.
-h: Print this help message.
-x module Exclude the specified module. It will still be imported
by the frozen binary if it exists on the host system.
-X module Like -x, except the module can never be imported by
the frozen binary.
-E: Freeze will fail if any modules can't be found (that
were not excluded using -x or -X).
-i filename: Include a file with additional command line options. Used
to prevent command lines growing beyond the capabilities of
the shell/OS. All arguments specified in filename
are read and the -i option replaced with the parsed
params (note - quoting args in this file is NOT supported)
-s subsystem: Specify the subsystem (For Windows only.);
'console' (default), 'windows', 'service' or 'com_dll'
-w: Toggle Windows (NT or 95) behavior.
(For debugging only -- on a win32 platform, win32 behavior
is automatic.)
-r prefix=f: Replace path prefix.
Replace prefix with f in the source path references
contained in the resulting binary.
Arguments:
script: The Python script to be executed by the resulting binary.
module ...: Additional Python modules (referenced by pathname)
that will be included in the resulting binary. These
may be .py or .pyc files. If -m is specified, these are
module names that are search in the path instead.
NOTES:
In order to use freeze successfully, you must have built Python and
installed it ("make install").
The script should not use modules provided only as shared libraries;
if it does, the resulting binary is not self-contained.
"""
# Import standard modules
import modulefinder
import getopt
import os
import sys
# Import the freeze-private modules
import checkextensions
import makeconfig
import makefreeze
import makemakefile
import parsesetup
import bkfile
# Main program
def main():
# overridable context
prefix = None # settable with -p option
exec_prefix = None # settable with -P option
extensions = []
exclude = [] # settable with -x option
addn_link = [] # settable with -l, but only honored under Windows.
path = sys.path[:]
modargs = 0
debug = 1
odir = ''
win = sys.platform[:3] == 'win'
replace_paths = [] # settable with -r option
error_if_any_missing = 0
# default the exclude list for each platform
if win: exclude = exclude + [
'dos', 'dospath', 'mac', 'macfs', 'MACFS', 'posix', ]
fail_import = exclude[:]
# output files
frozen_c = 'frozen.c'
config_c = 'config.c'
target = 'a.out' # normally derived from script name
makefile = 'Makefile'
subsystem = 'console'
# parse command line by first replacing any "-i" options with the
# file contents.
pos = 1
while pos < len(sys.argv)-1:
# last option can not be "-i", so this ensures "pos+1" is in range!
if sys.argv[pos] == '-i':
try:
with open(sys.argv[pos+1]) as infp:
options = infp.read().split()
except IOError as why:
usage("File name '%s' specified with the -i option "
"can not be read - %s" % (sys.argv[pos+1], why) )
# Replace the '-i' and the filename with the read params.
sys.argv[pos:pos+2] = options
pos = pos + len(options) - 1 # Skip the name and the included args.
pos = pos + 1
# Now parse the command line with the extras inserted.
try:
opts, args = getopt.getopt(sys.argv[1:], 'r:a:dEe:hmo:p:P:qs:wX:x:l:')
except getopt.error as msg:
usage('getopt error: ' + str(msg))
# process option arguments
for o, a in opts:
if o == '-h':
print(__doc__)
return
if o == '-d':
debug = debug + 1
if o == '-e':
extensions.append(a)
if o == '-m':
modargs = 1
if o == '-o':
odir = a
if o == '-p':
prefix = a
if o == '-P':
exec_prefix = a
if o == '-q':
debug = 0
if o == '-w':
win = not win
if o == '-s':
if not win:
usage("-s subsystem option only on Windows")
subsystem = a
if o == '-x':
exclude.append(a)
if o == '-X':
exclude.append(a)
fail_import.append(a)
if o == '-E':
error_if_any_missing = 1
if o == '-l':
addn_link.append(a)
if o == '-a':
modulefinder.AddPackagePath(*a.split("=", 2))
if o == '-r':
f,r = a.split("=", 2)
replace_paths.append( (f,r) )
# modules that are imported by the Python runtime
implicits = []
for module in ('site', 'warnings', 'encodings.utf_8', 'encodings.latin_1'):
if module not in exclude:
implicits.append(module)
# default prefix and exec_prefix
if not exec_prefix:
if prefix:
exec_prefix = prefix
else:
exec_prefix = sys.exec_prefix
if not prefix:
prefix = sys.prefix
# determine whether -p points to the Python source tree
ishome = os.path.exists(os.path.join(prefix, 'Python', 'ceval.c'))
# locations derived from options
version = '%d.%d' % sys.version_info[:2]
if hasattr(sys, 'abiflags'):
flagged_version = version + sys.abiflags
else:
flagged_version = version
if win:
extensions_c = 'frozen_extensions.c'
if ishome:
print("(Using Python source directory)")
binlib = exec_prefix
incldir = os.path.join(prefix, 'Include')
config_h_dir = exec_prefix
config_c_in = os.path.join(prefix, 'Modules', 'config.c.in')
frozenmain_c = os.path.join(prefix, 'Python', 'frozenmain.c')
makefile_in = os.path.join(exec_prefix, 'Makefile')
if win:
frozendllmain_c = os.path.join(exec_prefix, 'Pc\\frozen_dllmain.c')
else:
binlib = os.path.join(exec_prefix,
'lib', 'python%s' % version,
'config-%s' % flagged_version)
incldir = os.path.join(prefix, 'include', 'python%s' % flagged_version)
config_h_dir = os.path.join(exec_prefix, 'include',
'python%s' % flagged_version)
config_c_in = os.path.join(binlib, 'config.c.in')
frozenmain_c = os.path.join(binlib, 'frozenmain.c')
makefile_in = os.path.join(binlib, 'Makefile')
frozendllmain_c = os.path.join(binlib, 'frozen_dllmain.c')
supp_sources = []
defines = []
includes = ['-I' + incldir, '-I' + config_h_dir]
# sanity check of directories and files
check_dirs = [prefix, exec_prefix, binlib, incldir]
if not win:
# These are not directories on Windows.
check_dirs = check_dirs + extensions
for dir in check_dirs:
if not os.path.exists(dir):
usage('needed directory %s not found' % dir)
if not os.path.isdir(dir):
usage('%s: not a directory' % dir)
if win:
files = supp_sources + extensions # extensions are files on Windows.
else:
files = [config_c_in, makefile_in] + supp_sources
for file in supp_sources:
if not os.path.exists(file):
usage('needed file %s not found' % file)
if not os.path.isfile(file):
usage('%s: not a plain file' % file)
if not win:
for dir in extensions:
setup = os.path.join(dir, 'Setup')
if not os.path.exists(setup):
usage('needed file %s not found' % setup)
if not os.path.isfile(setup):
usage('%s: not a plain file' % setup)
# check that enough arguments are passed
if not args:
usage('at least one filename argument required')
# check that file arguments exist
for arg in args:
if arg == '-m':
break
# if user specified -m on the command line before _any_
# file names, then nothing should be checked (as the
# very first file should be a module name)
if modargs:
break
if not os.path.exists(arg):
usage('argument %s not found' % arg)
if not os.path.isfile(arg):
usage('%s: not a plain file' % arg)
# process non-option arguments
scriptfile = args[0]
modules = args[1:]
# derive target name from script name
base = os.path.basename(scriptfile)
base, ext = os.path.splitext(base)
if base:
if base != scriptfile:
target = base
else:
target = base + '.bin'
# handle -o option
base_frozen_c = frozen_c
base_config_c = config_c
base_target = target
if odir and not os.path.isdir(odir):
try:
os.mkdir(odir)
print("Created output directory", odir)
except OSError as msg:
usage('%s: mkdir failed (%s)' % (odir, str(msg)))
base = ''
if odir:
base = os.path.join(odir, '')
frozen_c = os.path.join(odir, frozen_c)
config_c = os.path.join(odir, config_c)
target = os.path.join(odir, target)
makefile = os.path.join(odir, makefile)
if win: extensions_c = os.path.join(odir, extensions_c)
# Handle special entry point requirements
# (on Windows, some frozen programs do not use __main__, but
# import the module directly. Eg, DLLs, Services, etc
custom_entry_point = None # Currently only used on Windows
python_entry_is_main = 1 # Is the entry point called __main__?
# handle -s option on Windows
if win:
import winmakemakefile
try:
custom_entry_point, python_entry_is_main = \
winmakemakefile.get_custom_entry_point(subsystem)
except ValueError as why:
usage(why)
# Actual work starts here...
# collect all modules of the program
dir = os.path.dirname(scriptfile)
path[0] = dir
mf = modulefinder.ModuleFinder(path, debug, exclude, replace_paths)
if win and subsystem=='service':
# If a Windows service, then add the "built-in" module.
mod = mf.add_module("servicemanager")
mod.__file__="dummy.pyd" # really built-in to the resulting EXE
for mod in implicits:
mf.import_hook(mod)
for mod in modules:
if mod == '-m':
modargs = 1
continue
if modargs:
if mod[-2:] == '.*':
mf.import_hook(mod[:-2], None, ["*"])
else:
mf.import_hook(mod)
else:
mf.load_file(mod)
# Alias "importlib._bootstrap" to "_frozen_importlib" so that the
# import machinery can bootstrap. Do the same for
# importlib._bootstrap_external.
mf.modules["_frozen_importlib"] = mf.modules["importlib._bootstrap"]
mf.modules["_frozen_importlib_external"] = mf.modules["importlib._bootstrap_external"]
# Add the main script as either __main__, or the actual module name.
if python_entry_is_main:
mf.run_script(scriptfile)
else:
mf.load_file(scriptfile)
if debug > 0:
mf.report()
print()
dict = mf.modules
if error_if_any_missing:
missing = mf.any_missing()
if missing:
sys.exit("There are some missing modules: %r" % missing)
# generate output for frozen modules
files = makefreeze.makefreeze(base, dict, debug, custom_entry_point,
fail_import)
# look for unfrozen modules (builtin and of unknown origin)
builtins = []
unknown = []
mods = sorted(dict.keys())
for mod in mods:
if dict[mod].__code__:
continue
if not dict[mod].__file__:
builtins.append(mod)
else:
unknown.append(mod)
# search for unknown modules in extensions directories (not on Windows)
addfiles = []
frozen_extensions = [] # Windows list of modules.
if unknown or (not win and builtins):
if not win:
addfiles, addmods = \
checkextensions.checkextensions(unknown+builtins,
extensions)
for mod in addmods:
if mod in unknown:
unknown.remove(mod)
builtins.append(mod)
else:
# Do the windows thang...
import checkextensions_win32
# Get a list of CExtension instances, each describing a module
# (including its source files)
frozen_extensions = checkextensions_win32.checkextensions(
unknown, extensions, prefix)
for mod in frozen_extensions:
unknown.remove(mod.name)
# report unknown modules
if unknown:
sys.stderr.write('Warning: unknown modules remain: %s\n' %
' '.join(unknown))
# windows gets different treatment
if win:
# Taking a shortcut here...
import winmakemakefile, checkextensions_win32
checkextensions_win32.write_extension_table(extensions_c,
frozen_extensions)
# Create a module definition for the bootstrap C code.
xtras = [frozenmain_c, os.path.basename(frozen_c),
frozendllmain_c, os.path.basename(extensions_c)] + files
maindefn = checkextensions_win32.CExtension( '__main__', xtras )
frozen_extensions.append( maindefn )
with open(makefile, 'w') as outfp:
winmakemakefile.makemakefile(outfp,
locals(),
frozen_extensions,
os.path.basename(target))
return
# generate config.c and Makefile
builtins.sort()
with open(config_c_in) as infp, bkfile.open(config_c, 'w') as outfp:
makeconfig.makeconfig(infp, outfp, builtins)
cflags = ['$(OPT)']
cppflags = defines + includes
libs = [os.path.join(binlib, '$(LDLIBRARY)')]
somevars = {}
if os.path.exists(makefile_in):
makevars = parsesetup.getmakevars(makefile_in)
for key in makevars:
somevars[key] = makevars[key]
somevars['CFLAGS'] = ' '.join(cflags) # override
somevars['CPPFLAGS'] = ' '.join(cppflags) # override
files = [base_config_c, base_frozen_c] + \
files + supp_sources + addfiles + libs + \
['$(MODLIBS)', '$(LIBS)', '$(SYSLIBS)']
with bkfile.open(makefile, 'w') as outfp:
makemakefile.makemakefile(outfp, somevars, files, base_target)
# Done!
if odir:
print('Now run "make" in', odir, end=' ')
print('to build the target:', base_target)
else:
print('Now run "make" to build the target:', base_target)
# Print usage message and exit
def usage(msg):
sys.stdout = sys.stderr
print("Error:", msg)
print("Use ``%s -h'' for help" % sys.argv[0])
sys.exit(2)
main()
| [
"bater.makhabel@gmail.com"
] | bater.makhabel@gmail.com |
6d76dfb817a6e4b6b701679da6ac15531301e20f | b32121fbf9cdbb7043fe255ebc01bc367c78ac73 | /backend/react_native_hook_ex_4479/settings.py | 0e34266f386b5732b712a0a471d9bba374dc8b07 | [] | no_license | crowdbotics-apps/react-native-hook-ex-4479 | 1883330009525d866c06b3b5e8135de5fe6c18a1 | b2f5d65eccbfdf427158ce05c57fba8efc6a0914 | refs/heads/master | 2022-12-12T03:09:00.171484 | 2019-06-10T10:08:40 | 2019-06-10T10:08:40 | 191,143,962 | 0 | 0 | null | 2022-12-09T05:45:04 | 2019-06-10T10:08:22 | Python | UTF-8 | Python | false | false | 4,603 | py | """
Django settings for react_native_hook_ex_4479 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '12nhr61!$2hgc7msxz)-u!imhrof5a@zua&mm91le2@z$kn6n0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'react_native_hook_ex_4479.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'react_native_hook_ex_4479.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
import environ
env = environ.Env()
ALLOWED_HOSTS = ['*']
SITE_ID = 1
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
LOCAL_APPS = [
'home',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
# allauth
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = None
LOGIN_REDIRECT_URL = '/'
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
EMAIL_HOST = "smtp.sendgrid.net"
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
217e500f19ff35a819836f710b2c6bcdc325240c | 7b1a5db0a067766a9805fe04105f6c7f9ff131f3 | /pysal/explore/segregation/tests/test_multi_gini_seg.py | c1c1475ea19bacb1f18c2f5aae2607416cb87a5b | [] | permissive | ocefpaf/pysal | 2d25b9f3a8bd87a7be3f96b825995a185624e1d0 | 7e397bdb4c22d4e2442b4ee88bcd691d2421651d | refs/heads/master | 2020-06-26T17:13:06.016203 | 2019-07-31T19:54:35 | 2019-07-31T19:54:35 | 199,696,188 | 0 | 0 | BSD-3-Clause | 2019-07-30T17:17:19 | 2019-07-30T17:17:18 | null | UTF-8 | Python | false | false | 563 | py | import unittest
import pysal.lib
import geopandas as gpd
import numpy as np
from pysal.explore.segregation.aspatial import MultiGiniSeg
class Multi_Gini_Seg_Tester(unittest.TestCase):
def test_Multi_Gini_Seg(self):
s_map = gpd.read_file(pysal.lib.examples.get_path("sacramentot2.shp"))
groups_list = ['WHITE_', 'BLACK_', 'ASIAN_','HISP_']
df = s_map[groups_list]
index = MultiGiniSeg(df, groups_list)
np.testing.assert_almost_equal(index.statistic, 0.5456349992598081)
if __name__ == '__main__':
unittest.main() | [
"sjsrey@gmail.com"
] | sjsrey@gmail.com |
b5b118a49469fea04673123a5d2e2352799f59f3 | 35517b6f40a0672a9c355fa42c899a03735b7c46 | /rooms/urls.py | 58db40c531cc40b09fe0e3eb62ee8b4d64f1f47f | [] | no_license | byungsujeong/airbnb-clone | 45a1bd074897f97faa5c10a85ae103301cbd9de1 | 158bcae353105c90ad2b1899367c90a67bbac6af | refs/heads/master | 2023-04-24T06:35:04.902040 | 2021-05-09T13:24:34 | 2021-05-09T13:24:34 | 355,564,701 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | from django.urls import path
from . import views
app_name = "rooms"
urlpatterns = [
# path("<int:pk>", views.room_detail, name="detail"),
path("create/", views.CreateRoomView.as_view(), name="create"),
path("<int:pk>", views.RoomDetail.as_view(), name="detail"),
path("<int:pk>/edit/", views.EditRoomView.as_view(), name="edit"),
path("<int:pk>/photos/", views.RoomPhotosView.as_view(), name="photos"),
path("<int:pk>/photos/add", views.AddPthotoView.as_view(), name="add-photo"),
path(
"<int:room_pk>/photos/<int:photo_pk>/delete",
views.delete_photo,
name="delete-photo",
),
path(
"<int:room_pk>/photos/<int:photo_pk>/edit",
views.EditPhotoView.as_view(),
name="edit-photo",
),
path("search", views.SearchView.as_view(), name="search"),
]
| [
"byungsu.jeong88@gmail.com"
] | byungsu.jeong88@gmail.com |
fe2fcd6a9850105452a96addca96d68626b41407 | 330dbbefb2e7d6283c812888c89e58498f0b4188 | /_Dist/NeuralNetworks/b_TraditionalML/SVM.py | fd8bc5c1269e79328815274d2214640054952c91 | [
"MIT"
] | permissive | leoatchina/MachineLearning | 93dd3e6c91911f5743617dde1873bf60493171a2 | 071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3 | refs/heads/master | 2023-04-23T02:36:36.809015 | 2021-04-29T15:06:00 | 2021-04-29T15:06:00 | 286,432,406 | 0 | 1 | MIT | 2020-08-10T09:30:34 | 2020-08-10T09:30:34 | null | UTF-8 | Python | false | false | 5,527 | py | import os
import sys
root_path = os.path.abspath("../../../")
if root_path not in sys.path:
sys.path.append(root_path)
import numpy as np
import tensorflow as tf
from _Dist.NeuralNetworks.DistBase import Base, AutoBase, AutoMeta, DistMixin, DistMeta
class LinearSVM(Base):
def __init__(self, *args, **kwargs):
super(LinearSVM, self).__init__(*args, **kwargs)
self._name_appendix = "LinearSVM"
self.c = None
def init_from_data(self, x, y, x_test, y_test, sample_weights, names):
super(LinearSVM, self).init_from_data(x, y, x_test, y_test, sample_weights, names)
metric = self.model_param_settings.setdefault("metric", "binary_acc")
if metric == "acc":
self.model_param_settings["metric"] = "binary_acc"
self.n_class = 1
def init_model_param_settings(self):
self.model_param_settings.setdefault("lr", 0.01)
self.model_param_settings.setdefault("n_epoch", 10 ** 3)
self.model_param_settings.setdefault("max_epoch", 10 ** 6)
super(LinearSVM, self).init_model_param_settings()
self.c = self.model_param_settings.get("C", 1.)
def _build_model(self, net=None):
self._model_built = True
if net is None:
net = self._tfx
current_dimension = net.shape[1].value
self._output = self._fully_connected_linear(
net, [current_dimension, 1], "_final_projection"
)
def _define_loss_and_train_step(self):
self._loss = self.c * tf.reduce_sum(
tf.maximum(0., 1 - self._tfy * self._output)
) + tf.nn.l2_loss(self._ws[0])
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
self._train_step = self._optimizer.minimize(self._loss)
def _get_feed_dict(self, x, y=None, weights=None, is_training=False):
if y is not None:
y[y == 0] = -1
return super(LinearSVM, self)._get_feed_dict(x, y, weights, is_training)
def predict_classes(self, x):
return (self._calculate(x, tensor=self._output, is_training=False) >= 0).astype(np.int32)
class SVM(LinearSVM):
def __init__(self, *args, **kwargs):
super(SVM, self).__init__(*args, **kwargs)
self._name_appendix = "SVM"
self._p = self._gamma = None
self._x = self._gram = self._kernel_name = None
@property
def kernel(self):
if self._kernel_name == "linear":
return self.linear
if self._kernel_name == "poly":
return lambda x, y: self.poly(x, y, self._p)
if self._kernel_name == "rbf":
return lambda x, y: self.rbf(x, y, self._gamma)
raise NotImplementedError("Kernel '{}' is not implemented".format(self._kernel_name))
@staticmethod
def linear(x, y):
return x.dot(y.T)
@staticmethod
def poly(x, y, p):
return (x.dot(y.T) + 1) ** p
@staticmethod
def rbf(x, y, gamma):
return np.exp(-gamma * np.sum((x[..., None, :] - y) ** 2, axis=2))
def init_from_data(self, x, y, x_test, y_test, sample_weights, names):
self._x, y = np.atleast_2d(x).astype(np.float32), np.asarray(y, np.float32)
self._p = self.model_param_settings.setdefault("p", 3)
self._gamma = self.model_param_settings.setdefault("gamma", 1 / self._x.shape[1])
self._kernel_name = self.model_param_settings.setdefault("kernel_name", "rbf")
self._gram, x_test = self.kernel(self._x, self._x), self.kernel(x_test, self._x)
super(SVM, self).init_from_data(self._gram, y, x_test, y_test, sample_weights, names)
def init_model_param_settings(self):
super(SVM, self).init_model_param_settings()
self._p = self.model_param_settings["p"]
self._gamma = self.model_param_settings["gamma"]
self._kernel_name = self.model_param_settings["kernel_name"]
def _define_py_collections(self):
super(SVM, self)._define_py_collections()
self.py_collections += ["_x", "_gram"]
def _define_loss_and_train_step(self):
self._loss = self.c * tf.reduce_sum(tf.maximum(0., 1 - self._tfy * self._output)) + 0.5 * tf.matmul(
self._ws[0], tf.matmul(self._gram, self._ws[0]), transpose_a=True
)[0]
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
self._train_step = self._optimizer.minimize(self._loss)
def _evaluate(self, x=None, y=None, x_cv=None, y_cv=None, x_test=None, y_test=None, metric=None):
n_sample = self._x.shape[0]
cv_feat_dim = None if x_cv is None else x_cv.shape[1]
test_feat_dim = None if x_test is None else x_test.shape[1]
x_cv = None if x_cv is None else self.kernel(x_cv, self._x) if cv_feat_dim != n_sample else x_cv
x_test = None if x_test is None else self.kernel(x_test, self._x) if test_feat_dim != n_sample else x_test
return super(SVM, self)._evaluate(x, y, x_cv, y_cv, x_test, y_test)
def predict(self, x):
# noinspection PyTypeChecker
return self._predict(self.kernel(x, self._x))
def predict_classes(self, x):
return (self.predict(x) >= 0).astype(np.int32)
def evaluate(self, x, y, x_cv=None, y_cv=None, x_test=None, y_test=None, metric=None):
return self._evaluate(self.kernel(x, self._x), y, x_cv, y_cv, x_test, y_test, metric)
class AutoLinearSVM(AutoBase, LinearSVM, metaclass=AutoMeta):
pass
class DistLinearSVM(AutoLinearSVM, DistMixin, metaclass=DistMeta):
pass
| [
"syameimaru.saki@gmail.com"
] | syameimaru.saki@gmail.com |
21415fc4514c480effa3675acde503b0b575df78 | 8602a87e12fe19b28c2e85cfae0bbde27a62855d | /ingenico/connect/sdk/domain/hostedcheckout/create_hosted_checkout_request.py | 28949508bd2f6d00a43dfd02ad2bda2d464ce4f2 | [
"MIT"
] | permissive | king1212/connect-sdk-python2 | 6a687de7967a411fa802477069c7fc7079d059c2 | 203982559c5c10e3dbbb9dfc71123c269908ed26 | refs/heads/master | 2021-07-04T03:10:55.061416 | 2017-09-27T09:29:49 | 2017-09-27T09:29:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,790 | py | # -*- coding: utf-8 -*-
#
# This class was auto-generated from the API references found at
# https://epayments-api.developer-ingenico.com/s2sapi/v1/
#
from ingenico.connect.sdk.data_object import DataObject
from ingenico.connect.sdk.domain.definitions.fraud_fields import FraudFields
from ingenico.connect.sdk.domain.hostedcheckout.definitions.hosted_checkout_specific_input import HostedCheckoutSpecificInput
from ingenico.connect.sdk.domain.payment.definitions.bank_transfer_payment_method_specific_input_base import BankTransferPaymentMethodSpecificInputBase
from ingenico.connect.sdk.domain.payment.definitions.card_payment_method_specific_input_base import CardPaymentMethodSpecificInputBase
from ingenico.connect.sdk.domain.payment.definitions.cash_payment_method_specific_input_base import CashPaymentMethodSpecificInputBase
from ingenico.connect.sdk.domain.payment.definitions.order import Order
from ingenico.connect.sdk.domain.payment.definitions.redirect_payment_method_specific_input_base import RedirectPaymentMethodSpecificInputBase
class CreateHostedCheckoutRequest(DataObject):
__bank_transfer_payment_method_specific_input = None
__card_payment_method_specific_input = None
__cash_payment_method_specific_input = None
__fraud_fields = None
__hosted_checkout_specific_input = None
__order = None
__redirect_payment_method_specific_input = None
@property
def bank_transfer_payment_method_specific_input(self):
"""
| Object containing the specific input details for bank transfer payments
Type: :class:`ingenico.connect.sdk.domain.payment.definitions.bank_transfer_payment_method_specific_input_base.BankTransferPaymentMethodSpecificInputBase`
"""
return self.__bank_transfer_payment_method_specific_input
@bank_transfer_payment_method_specific_input.setter
def bank_transfer_payment_method_specific_input(self, value):
self.__bank_transfer_payment_method_specific_input = value
@property
def card_payment_method_specific_input(self):
"""
| Object containing the specific input details for card payments
Type: :class:`ingenico.connect.sdk.domain.payment.definitions.card_payment_method_specific_input_base.CardPaymentMethodSpecificInputBase`
"""
return self.__card_payment_method_specific_input
@card_payment_method_specific_input.setter
def card_payment_method_specific_input(self, value):
self.__card_payment_method_specific_input = value
@property
def cash_payment_method_specific_input(self):
"""
| Object containing the specific input details for cash payments
Type: :class:`ingenico.connect.sdk.domain.payment.definitions.cash_payment_method_specific_input_base.CashPaymentMethodSpecificInputBase`
"""
return self.__cash_payment_method_specific_input
@cash_payment_method_specific_input.setter
def cash_payment_method_specific_input(self, value):
self.__cash_payment_method_specific_input = value
@property
def fraud_fields(self):
"""
| Object containing additional data that will be used to assess the risk of fraud
Type: :class:`ingenico.connect.sdk.domain.definitions.fraud_fields.FraudFields`
"""
return self.__fraud_fields
@fraud_fields.setter
def fraud_fields(self, value):
self.__fraud_fields = value
@property
def hosted_checkout_specific_input(self):
"""
| Object containing hosted checkout specific data
Type: :class:`ingenico.connect.sdk.domain.hostedcheckout.definitions.hosted_checkout_specific_input.HostedCheckoutSpecificInput`
"""
return self.__hosted_checkout_specific_input
@hosted_checkout_specific_input.setter
def hosted_checkout_specific_input(self, value):
self.__hosted_checkout_specific_input = value
@property
def order(self):
"""
| Order object containing order related data
Type: :class:`ingenico.connect.sdk.domain.payment.definitions.order.Order`
"""
return self.__order
@order.setter
def order(self, value):
self.__order = value
@property
def redirect_payment_method_specific_input(self):
"""
| Object containing the specific input details for payments that involve redirects to 3rd parties to complete, like iDeal and PayPal
Type: :class:`ingenico.connect.sdk.domain.payment.definitions.redirect_payment_method_specific_input_base.RedirectPaymentMethodSpecificInputBase`
"""
return self.__redirect_payment_method_specific_input
@redirect_payment_method_specific_input.setter
def redirect_payment_method_specific_input(self, value):
self.__redirect_payment_method_specific_input = value
def to_dictionary(self):
dictionary = super(CreateHostedCheckoutRequest, self).to_dictionary()
self._add_to_dictionary(dictionary, 'bankTransferPaymentMethodSpecificInput', self.bank_transfer_payment_method_specific_input)
self._add_to_dictionary(dictionary, 'cardPaymentMethodSpecificInput', self.card_payment_method_specific_input)
self._add_to_dictionary(dictionary, 'cashPaymentMethodSpecificInput', self.cash_payment_method_specific_input)
self._add_to_dictionary(dictionary, 'fraudFields', self.fraud_fields)
self._add_to_dictionary(dictionary, 'hostedCheckoutSpecificInput', self.hosted_checkout_specific_input)
self._add_to_dictionary(dictionary, 'order', self.order)
self._add_to_dictionary(dictionary, 'redirectPaymentMethodSpecificInput', self.redirect_payment_method_specific_input)
return dictionary
def from_dictionary(self, dictionary):
super(CreateHostedCheckoutRequest, self).from_dictionary(dictionary)
if 'bankTransferPaymentMethodSpecificInput' in dictionary:
if not isinstance(dictionary['bankTransferPaymentMethodSpecificInput'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['bankTransferPaymentMethodSpecificInput']))
value = BankTransferPaymentMethodSpecificInputBase()
self.bank_transfer_payment_method_specific_input = value.from_dictionary(dictionary['bankTransferPaymentMethodSpecificInput'])
if 'cardPaymentMethodSpecificInput' in dictionary:
if not isinstance(dictionary['cardPaymentMethodSpecificInput'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['cardPaymentMethodSpecificInput']))
value = CardPaymentMethodSpecificInputBase()
self.card_payment_method_specific_input = value.from_dictionary(dictionary['cardPaymentMethodSpecificInput'])
if 'cashPaymentMethodSpecificInput' in dictionary:
if not isinstance(dictionary['cashPaymentMethodSpecificInput'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['cashPaymentMethodSpecificInput']))
value = CashPaymentMethodSpecificInputBase()
self.cash_payment_method_specific_input = value.from_dictionary(dictionary['cashPaymentMethodSpecificInput'])
if 'fraudFields' in dictionary:
if not isinstance(dictionary['fraudFields'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['fraudFields']))
value = FraudFields()
self.fraud_fields = value.from_dictionary(dictionary['fraudFields'])
if 'hostedCheckoutSpecificInput' in dictionary:
if not isinstance(dictionary['hostedCheckoutSpecificInput'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['hostedCheckoutSpecificInput']))
value = HostedCheckoutSpecificInput()
self.hosted_checkout_specific_input = value.from_dictionary(dictionary['hostedCheckoutSpecificInput'])
if 'order' in dictionary:
if not isinstance(dictionary['order'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['order']))
value = Order()
self.order = value.from_dictionary(dictionary['order'])
if 'redirectPaymentMethodSpecificInput' in dictionary:
if not isinstance(dictionary['redirectPaymentMethodSpecificInput'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['redirectPaymentMethodSpecificInput']))
value = RedirectPaymentMethodSpecificInputBase()
self.redirect_payment_method_specific_input = value.from_dictionary(dictionary['redirectPaymentMethodSpecificInput'])
return self
| [
"jenkins@isaac.nl"
] | jenkins@isaac.nl |
0ff04073a92aeec8f89f0c43ffbb6fcfa390b892 | 0d0cf0165ca108e8d94056c2bae5ad07fe9f9377 | /20_Introduction_to_Deep_Learning_in_Python/4_Fine-tuning_keras_models/experimentingWithWiderNetworks.py | 0f882da2de07c8d71246cb278cc2a850532cb36e | [] | no_license | MACHEIKH/Datacamp_Machine_Learning_For_Everyone | 550ec4038ebdb69993e16fe22d5136f00101b692 | 9fe8947f490da221430e6dccce6e2165a42470f3 | refs/heads/main | 2023-01-22T06:26:15.996504 | 2020-11-24T11:21:53 | 2020-11-24T11:21:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,421 | py | # Experimenting with wider networks
# Now you know everything you need to begin experimenting with different models!
# A model called model_1 has been pre-loaded. You can see a summary of this model printed in the IPython Shell. This is a relatively small network, with only 10 units in each hidden layer.
# In this exercise you'll create a new model called model_2 which is similar to model_1, except it has 100 units in each hidden layer.
# After you create model_2, both models will be fitted, and a graph showing both models loss score at each epoch will be shown. We added the argument verbose=False in the fitting commands to print out fewer updates, since you will look at these graphically instead of as text.
# Because you are fitting two models, it will take a moment to see the outputs after you hit run, so be patient.
# Instructions
# 100 XP
# Create model_2 to replicate model_1, but use 100 nodes instead of 10 for the first two Dense layers you add with the 'relu' activation. Use 2 nodes for the Dense output layer with 'softmax' as the activation.
# Compile model_2 as you have done with previous models: Using 'adam' as the optimizer, 'categorical_crossentropy' for the loss, and metrics=['accuracy'].
# Hit 'Submit Answer' to fit both the models and visualize which one gives better results! Notice the keyword argument verbose=False in model.fit(): This prints out fewer updates, since you'll be evaluating the models graphically instead of through text.
# Define early_stopping_monitor
early_stopping_monitor = EarlyStopping(patience=2)
# Create the new model: model_2
model_2 = Sequential()
# Add the first and second layers
model_2.add(Dense(100, activation='relu', input_shape=input_shape))
model_2.add(Dense(100, activation='relu'))
# Add the output layer
model_2.add(Dense(2, activation='softmax'))
# Compile model_2
model_2.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Fit model_1
model_1_training = model_1.fit(predictors, target, epochs=15, validation_split=0.2, callbacks=[early_stopping_monitor], verbose=False)
# Fit model_2
model_2_training = model_2.fit(predictors, target, epochs=15, validation_split=0.2, callbacks=[early_stopping_monitor], verbose=False)
# Create the plot
plt.plot(model_1_training.history['val_loss'], 'r', model_2_training.history['val_loss'], 'b')
plt.xlabel('Epochs')
plt.ylabel('Validation score')
plt.show()
| [
"noreply@github.com"
] | MACHEIKH.noreply@github.com |
c0563b44b76353970e95bd6231b816f97c614228 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnpunctur.py | 537380e29f358fdfeef0b0c50412671a8d811c2b | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 297 | py | ii = [('RogePAV.py', 2), ('WilbRLW5.py', 1), ('GellWPT.py', 1), ('AdamWEP.py', 1), ('CoolWHM.py', 1), ('CrokTPS.py', 1), ('WestJIT2.py', 19), ('KirbWPW2.py', 4), ('WestJIT.py', 2), ('FitzRNS4.py', 1), ('CoolWHM3.py', 2), ('BentJRP.py', 1), ('FitzRNS2.py', 1), ('KeigTSS.py', 1), ('ClarGE4.py', 1)] | [
"varunwachaspati@gmail.com"
] | varunwachaspati@gmail.com |
a5951f67f2d24f9eb6ee99d86c9191910a281899 | 493e4405c421a897304c4d1227e7d91b83eb890f | /douappbook/spiders/rating.py | 354499ca355f524682ab2213dff63c86eb0add07 | [] | no_license | stipid/douappbook | 7f94d2bde5e3ce1af87acb7636d0a038a39352ba | c9fac02e6713c0781f10ebcd985aa25370389432 | refs/heads/master | 2020-12-24T07:53:57.018981 | 2015-03-29T16:06:48 | 2015-03-29T16:06:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,871 | py | # -*- coding: utf-8 -*-
import random
try:
import simplejson as json
except ImportError:
import json
import furl
from scrapy import Request
from douappbook.spiders import DoubanAppSpider
from douappbook.items import RatingItem
from douappbook.models import CrawledBook
class RatingSpider(DoubanAppSpider):
name = "rating"
allowed_domains = ["douban.com"]
def start_requests(self):
book_ids = CrawledBook.get_book_ids()
# randomize book ids
random.shuffle(book_ids)
for book_id in book_ids:
endpoint = 'book/%d/interests' % book_id
url = self.get_api_url(
endpoint,
start=0,
count=50
)
yield Request(url, callback=self.parse)
if self.settings['DEBUG']:
break
def parse(self, response):
api_url = furl.furl(response.url)
book_id = int(api_url.path.segments[3])
res = json.loads(response.body_as_unicode())
start = res['start']
count = res['count']
total = res['total']
interests = res['interests']
for item in interests:
rating = RatingItem()
rating['id'] = item['id']
rating['book_id'] = book_id
rating['user_id'] = item['user']['id']
rating['username'] = item['user']['uid']
rating['rating'] = item['rating']['value']
rating['vote'] = item['vote_count']
rating['comment'] = item['comment']
yield rating
if start + count < total and not self.settings['DEBUG']:
endpoint = 'book/%d/interests' % book_id
url = self.get_api_url(
endpoint,
start=start + count,
count=50
)
yield Request(url, callback=self.parse)
| [
"messense@icloud.com"
] | messense@icloud.com |
2af8e8f2d3a6794386959b990b732044f55ab12a | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/common/Lib/plat-irix5/GLWS.py | 307029b979ba39dee61a455577a638429f9d3b14 | [] | no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 635 | py | # 2017.05.04 15:33:48 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/plat-irix5/GLWS.py
from warnings import warnpy3k
warnpy3k('the GLWS module has been removed in Python 3.0', stacklevel=2)
del warnpy3k
NOERROR = 0
NOCONTEXT = -1
NODISPLAY = -2
NOWINDOW = -3
NOGRAPHICS = -4
NOTTOP = -5
NOVISUAL = -6
BUFSIZE = -7
BADWINDOW = -8
ALREADYBOUND = -100
BINDFAILED = -101
SETFAILED = -102
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\common\Lib\plat-irix5\GLWS.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:33:48 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
fc7cc0349b7e668b07121c37dddfc5b443caae69 | 2c8d3e341e813c1b1b88ae824edeaadb366aec0a | /Parser/SW4/SW4/bin/Debug/smo2-24-path-80.py | b17b6673c49fbaf99f0f2de598f08c667841d5b2 | [] | no_license | kiriphorito/MoveAndTag-Manticore | 2e24a958f4941556b2d2714563718069cc5b208f | d07a3d8c0bacf34cf5f433384a6fd45170896b7a | refs/heads/master | 2021-01-20T11:40:49.232449 | 2017-02-26T14:08:48 | 2017-02-26T14:08:48 | 82,548,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,851 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
u"""
@brief: Path Planning Sample Code with Randamized Rapidly-Exploring Random Trees (RRT)
@author: AtsushiSakai
@license: MIT
"""
import shapely
from shapely.geometry import Polygon, LineString, Point, MultiPoint, GeometryCollection
import matplotlib.pyplot as plt
from ast import literal_eval
import datetime
import random
import math
import copy
def drawRobots(robots):
for (x,y) in robots:
plt.plot(x,y,"o")
def drawPolygonNoFill(points,color):
polygon = plt.Polygon(points,color=color,fill=False)
plt.gca().add_patch(polygon)
def drawPolygon(points):
polygon = plt.Polygon(points)
plt.gca().add_patch(polygon)
def drawPolygons(polygons):
try:
for xs in polygons:
drawPolygon(xs)
except ValueError:
print ("no polygons specified")
def drawPolygonNoFill(points,color):
polygon = plt.Polygon(points,color=color,fill=False)
plt.gca().add_patch(polygon)
def drawPolygonsNoFill(polygons):
try:
for xs in polygons:
drawPolygonNoFill(xs,'red')
except ValueError:
print ("no polygons specified")
class RRT():
u"""
Class for RRT Planning
"""
def __init__(self, start, goal, obstacleList,randArea,expandDis=1.0,goalSampleRate=5,maxIter=500):
u"""
Setting Parameter
start:Start Position [x,y]
goal:Goal Position [x,y]
obstacleList:obstacle Positions [[x,y,size],...]
randArea:Ramdom Samping Area [min,max]
"""
self.start=Node(start[0],start[1])
self.end=Node(goal[0],goal[1])
self.minrand = randArea[0]
self.maxrand = randArea[1]
self.expandDis = expandDis
self.goalSampleRate = goalSampleRate
self.maxIter = maxIter
def Planning(self,animation=True):
u"""
Pathplanning
animation: flag for animation on or off
"""
self.nodeList = [self.start]
while True:
# Random Sampling
if random.randint(0, 100) > self.goalSampleRate:
rnd = [random.uniform(self.minrand, self.maxrand), random.uniform(self.minrand, self.maxrand)]
else:
rnd = [self.end.x, self.end.y]
# Find nearest node
nind = self.GetNearestListIndex(self.nodeList, rnd)
# print(nind)
# expand tree
nearestNode =self.nodeList[nind]
theta = math.atan2(rnd[1] - nearestNode.y, rnd[0] - nearestNode.x)
newNode = copy.deepcopy(nearestNode)
newNode.x += self.expandDis * math.cos(theta)
newNode.y += self.expandDis * math.sin(theta)
newNode.parent = nind
if not self.__CollisionCheck(newNode, obstacleList,nearestNode):
continue
self.nodeList.append(newNode)
# check goal
dx = newNode.x - self.end.x
dy = newNode.y - self.end.y
d = math.sqrt(dx * dx + dy * dy)
if d <= self.expandDis:
if not self.__CollisionCheck(newNode, obstacleList,self.end):
continue
else:
#print("Goal!!")
break
if animation:
self.DrawGraph(rnd)
path=[[self.end.x,self.end.y]]
lastIndex = len(self.nodeList) - 1
while self.nodeList[lastIndex].parent is not None:
node = self.nodeList[lastIndex]
path.append([node.x,node.y])
lastIndex = node.parent
path.append([self.start.x, self.start.y])
return path
def DrawGraph(self,rnd=None):
u"""
Draw Graph
"""
import matplotlib.pyplot as plt
plt.clf()
if rnd is not None:
plt.plot(rnd[0], rnd[1], "^k")
for node in self.nodeList:
if node.parent is not None:
plt.plot([node.x, self.nodeList[node.parent].x], [node.y, self.nodeList[node.parent].y], "-g")
# plt.plot([ox for (ox,oy,size) in obstacleList],[oy for (ox,oy,size) in obstacleList], "ok", ms=size * 20)
drawPolygons(obstacleList)
plt.plot(self.start.x, self.start.y, "xr")
plt.plot(self.end.x, self.end.y, "xr")
plt.axis()
plt.grid(True)
plt.pause(0.01)
def GetNearestListIndex(self, nodeList, rnd):
dlist = [(node.x - rnd[0]) ** 2 + (node.y - rnd[1]) ** 2 for node in nodeList]
minind = dlist.index(min(dlist))
return minind
def __CollisionCheck(self, node,obstacleList,nearestNode):
x1 = nearestNode.x
y1 = nearestNode.y
x2 = node.x
y2 = node.y
first = [x1,y1]
second = [x2,y2]
return LineCollisionCheck(first,second,obstacleList)
def LineCollisionCheck(first,second, obstacleList):
from shapely import geometry,wkt
EPS = 1.2e-16 #======= may need to change this value depending on precision
x1 = first[0]
y1 = first[1]
x2 = second[0]
y2 = second[1]
line = geometry.LineString([(x1,y1),(x2,y2)])
#============ changed here =======
# for p1 in obstacleList:
#
# poly = geometry.Polygon(p1)
# ips = line.intersection(poly.boundary)
## print ips
# if type(ips) is Point:
## print "hello"
# if ips.distance(poly) < EPS:
## print "INTERSECT"
# return False
# elif type(ips) is MultiPoint:
# for i in ips:
# if (i.distance(poly) <EPS):
## print "INTERSECT2"
# return False
# elif type(ips) is GeometryCollection:
# continue
# else:
# print (ips,type(ips))
# return False
# return True
#============ changed here =======
for poly in obstacleList:
p1 = Polygon(poly)
if p1.buffer(EPS).intersects(line):
# print "collision"
return False
# print "safe"
return True
#============ changed here =======
def supersmoothie(smoothie,obstacleList):
path = smoothie
state = True
counter1 = 0
counter2 = len(path)-1
while state:
counter2 = len(path)-1
if counter1 == counter2:
state = False
break
coord1 = path[counter1]
for counter in range(counter2,0,-1):
coord2 = path[counter]
if LineCollisionCheck(coord1,coord2,obstacleList): #if no obstacle
del path[(counter1+1):(counter)]
break
counter1 += 1
return path
class Node():
u"""
RRT Node
"""
def __init__(self, x, y):
self.x = x
self.y = y
self.parent = None
def rrtpath(obstacles,startcoord,goalcoord,randAreas):
rrt = RRT(start=startcoord, goal=goalcoord,randArea = randAreas, obstacleList=obstacles)
path= rrt.Planning(animation=False)
# rrt.DrawGaph()
# plt.plot([x for (x,y) in path], [y for (x,y) in path],'-r')
# print path
smoothiePath = supersmoothie(path,obstacles)
plt.plot([x for (x,y) in smoothiePath], [y for (x,y) in smoothiePath],'-r')
smoothiePath.reverse()
#print smoothiePath
return smoothiePath
obstacleList = [[(0.10887588040823118,0.09142047210771191),(1.1039623485842764,-0.007589228694404357),(0.905942946980044,-1.997762165046495),(-3.074402925724136,-1.601723361838031),(-3.1734126265262517,-2.596809830014076),(-1.1832396901741624,-2.794829231618308),(-1.282249390976279,-3.789915699794353),(-13.223287009088821,-2.6017992901689646),(-13.42130641069305,-4.591972226521057),(-12.426219942517006,-4.690981927323172),(-13.0202781473297,-10.66150073637944),(-12.025191679153657,-10.760510437181557),(-11.431133474340962,-4.789991628125285),(-1.4802687925805116,-5.780088636146442),(-1.5792784933826283,-6.7751751043224875),(-0.5841920252065823,-6.874184805124604),(-0.18815322199811743,-2.8938389324204237),(0.8069332461779276,-2.99284863322254),(0.5099041437715794,-5.978108037750675),(1.5049906119476242,-6.077117738552792),(1.6040003127497404,-5.082031270376746),(2.599086780925785,-5.181040971178863),(1.2129509696961562,-19.112251525643497),(-4.75756783936011,-18.518193320830797),(-4.9555872409643476,-20.50836625718289),(1.0149315680919284,-21.102424461995586),(0.8169121664876853,-23.092597398347678),(3.8021715710158395,-23.389626500754026),(4.000190972620064,-21.399453564401938),(39.82330382695769,-24.963802793278123),(40.41736203177039,-18.993283984221858),(4.594249177432759,-15.428934755345662),(5.584346185453922,-5.47807007358521),(7.574519121806013,-5.676089475189441),(7.6735288226081275,-4.681003007013399),(10.658788227136265,-4.978032109419745),(10.757797927938379,-3.9829456412437065),(7.772538523410243,-3.6859165388373536),(7.8715482242123604,-2.6908300706613093),(1.9010294151560894,-2.0967718658486114),(2.0990488167603214,-0.10659892949652063),(3.0941352849363666,-0.2056086302986369),(3.1931449857384826,0.7894778378774103),(10.158750262970798,0.09640993226260114),(10.257759963772914,1.0914964004386465),(8.267587027420824,1.2895158020428774),(8.36659672822294,2.2846022702189224),(14.33711553727921,1.6905440654062291),(14.436125238081328,2.685630533582274),(8.465606429025057,3.2796887383949676),(8.56461612982717,4.274775206571014),(24.485999620643895,2.6906199937371635),(24.684019022248126,4.680792930089255),(8.762635531431403,6.264948142923105),(8.960654933035634,8.255121079275195),(7.965568464859588,8.354130780077309),(7.767549063255357,6.3639578437252196),(6.772462595079312,6.462967544527336),(7.168501398287773,10.443313417231517),(5.178328461935683,10.641332818835748),(4.78228965872722,6.660986946131567),(3.7872031905511765,6.759996646933682),(3.6881934897490614,5.764910178757638),(7.668539362453242,5.368871375549174),(7.47051996084901,3.378698439197083),(5.480347024496919,3.576717840801315),(5.3813373236948046,2.58163137262527),(7.371510260046895,2.3836119710210384),(7.272500559244779,1.388525502844993),(3.2921546865405977,1.7845643060534553),(3.3911643873427133,2.7796507742295002),(2.3960779191666686,2.8786604750316163),(3.1881555255835923,10.839352220439979),(2.193069057407549,10.938361921242095),(1.9950496558033173,8.948188984890006),(0.004876719451224712,9.146208386494235),(0.4999252234618028,14.12164072737446),(-0.4951612447142417,14.220650428176578),(-0.9902097487248201,9.245218087296351),(-1.9852962169008654,9.344227788098467),(-1.2922283112860569,16.309833065330782),(2.688117561418126,15.913794262122323),(2.7871272622202405,16.908880730298367),(-1.1932186104839406,17.304919533506826),(-0.9951992088797089,19.29509246985892),(19.90161662281724,17.21588875301449),(20.000626323619358,18.210975221190534),(27.961318069027723,17.418897614773613),(28.060327769829833,18.413984082949657),(20.09963602442147,19.20606168936658),(20.19864572522359,20.201148157542626),(-0.6981701064733642,22.280351874387055),(-0.4011410040670157,25.26561127891519),(-4.3814868767711985,25.661650082123654),(-5.965642089605046,9.740266591306927),(-6.960728557781089,9.839276292109044),(-1.4161853128626316,65.56411850996759),(-9.376877058270994,66.3561961163845),(-14.921420303189453,10.63135389852597),(-18.901766175893634,11.02739270173443),(-18.208698270278827,17.99299797896674),(-19.20378473845487,18.092007679768855),(-19.89685264406968,11.126402402536549),(-21.887025580421767,11.324421804140776),(-22.28306438363023,7.344075931436599),(1.599010852594854,4.967843112185823),(1.4009914509906234,2.9776701758337323),(0.4059049828145781,3.076679876635848),(0.30689528201246247,2.0815934084598027),(-6.658709995219853,2.774661314074613),(-6.75771969602197,1.7795748458985678),(0.20788558121034684,1.0865069402837573)],[(-24.246837974034875,-28.490829719121777),(-24.743325915820897,-29.358873337646244),(-27.34745677139434,-27.869409512288186),(-27.843944713180363,-28.737453130812685),(-26.97590109465588,-29.2339410725987),(-29.458340803586015,-33.574159165221104),(-28.590297185061505,-34.07064710700715),(-26.107857476131404,-29.730429014384715),(-25.23981385760692,-30.226916956170736),(-25.736301799392958,-31.094960574695207),(-24.868258180868466,-31.591448516481243),(-25.861234064440506,-33.32753575353023),(-24.993190445916074,-33.82402369531623),(-24.000214562343995,-32.087936458267265),(-23.1321709438195,-32.584424400053294),(-22.63568300203349,-31.716380781528816),(-21.76763938350899,-32.212868723314834),(-23.75359115065313,-35.685043197412774),(-26.35772200622651,-34.1955793720547),(-26.85420994801257,-35.063622990579184),(-24.250079092439112,-36.553086815937256),(-24.74656703422513,-37.42113043446172),(-23.01047979717619,-38.41410631803377),(-22.51399185539019,-37.54606269950931),(-21.64594823686566,-38.04255064129532),(-22.63892412043774,-39.778637878344306),(-24.37501135748671,-38.785661994772234),(-24.871499299272728,-39.65370561329672),(-23.135412062223804,-40.64668149686878),(-23.63190000400979,-41.514725115393276),(-22.76385638548528,-42.01121305717929),(-20.777904618341182,-38.53903858308134),(-19.90986099981669,-39.03552652486735),(-22.392300708746866,-43.37574461748979),(-31.072736893991625,-38.41086519962956),(-32.06571277756373,-40.14695243667849),(-23.385276592318977,-45.111831854538735),(-24.378252475890957,-46.847919091587706),(-28.718470568513336,-44.3654793826576),(-29.21495851029926,-45.23352300118211),(-27.478871273250494,-46.22649888475411),(-27.975359215036338,-47.094542503278625),(-31.447533689134232,-45.10859073613457),(-31.944021630920307,-45.97663435465901),(-30.20793439387137,-46.96961023823104),(-30.704422335657462,-47.83765385675554),(-35.04464042827985,-45.35521414782542),(-34.051664544707755,-43.619126910776444),(-34.91970816323219,-43.1226389689905),(-35.912684046804266,-44.85872620603948),(-36.78072766532869,-44.362238264253435),(-37.277215607114755,-45.230281882777916),(-36.4091719885903,-45.726769824563874),(-37.402147872162224,-47.46285706161295),(-45.21454043888265,-42.994465585538705),(-45.711028380668665,-43.86250920406323),(-50.05124647329119,-41.38006949513293),(-50.54773441507729,-42.24811311365735),(-46.20751632245484,-44.73055282258757),(-46.70400426424072,-45.59859644111208),(-38.8916116975204,-50.066987917186374),(-39.38809963930646,-50.935031535710806),(-41.12418687635535,-49.942055652138826),(-41.620674818141424,-50.81009927066326),(-39.88458758109249,-51.80307515423527),(-40.38107552287865,-52.671118772759655),(-39.51303190435404,-53.16760671454575),(-35.54112837006582,-46.2232577663499),(-34.673084751541374,-46.71974570813593),(-36.65903651868551,-50.191920182233844),(-34.92294928163646,-51.184896065805916),(-32.93699751449242,-47.712721591707975),(-31.2009102774434,-48.70569747528002),(-32.19388616101542,-50.441784712329),(-31.325842542490946,-50.93827265411502),(-29.339890775346838,-47.466098180017084),(-28.47184715682239,-47.962586121803085),(-28.968335098608424,-48.830629740327595),(-28.10029148008388,-49.327117682113595),(-26.610827654725956,-46.72298682654018),(-24.874740417676946,-47.715962710112194),(-25.867716301248908,-49.45204994716118),(-24.13162906419995,-50.44502583073324),(-23.635141122414055,-49.576982212208726),(-21.89905388536502,-50.56995809578078),(-22.892029768937146,-52.30604533282974),(-22.023986150412732,-52.8025332746158),(-21.031010266840603,-51.06644603756682),(-20.162966648316164,-51.562933979352835),(-19.169990764744092,-49.8268467423039),(-22.642165238842082,-47.8408949751598),(-18.17377376276776,-40.02850240843944),(-16.437686525718796,-41.021478292011466),(-19.41661417643492,-46.229740003158376),(-18.548570557910452,-46.72622794494441),(-15.569642907194252,-41.51796623379748),(-14.701599288669806,-42.01445417558353),(-18.673502822957982,-48.9588031237794),(-17.805459204433493,-49.45529106556539),(-16.81248332086139,-47.7192038285164),(-15.944439702336968,-48.215691770302485),(-20.412831178411377,-56.028084337022875),(-17.808700322837804,-57.51754816238086),(-13.340308846763563,-49.70515559566057),(-12.472265228239134,-50.201643537446586),(-16.444168762527234,-57.14599248564241),(-14.708081525478326,-58.13896836921445),(-12.722129758334253,-54.66679389511654),(-2.3056063360404835,-60.62464919654888),(-1.3126304524683619,-58.88856195949985),(-11.729153874762188,-52.930706658067635),(-10.736177991190118,-51.19461942101867),(-9.868134372665613,-51.691107362804615),(-9.371646430879542,-50.82306374428009),(-16.31599537907546,-46.85116020999197),(-15.323019495503349,-45.11507297294297),(-6.642583310258633,-50.07995239080324),(-5.649607426686526,-48.34386515375431),(-14.330043611931314,-43.37898573589403),(-13.833555670145318,-42.51094211736956),(-8.62529395899841,-45.48986976808571),(-7.632318075426419,-43.75378253103675),(-21.521015971818098,-35.80997546246032),(-21.0245280300321,-34.941931843935855),(-18.420397174458653,-36.43139566929394),(-17.923909232672553,-35.56335205076942),(-20.528040088246062,-34.073888225411395),(-20.031552146460037,-33.2058446068869),(-15.69133405383761,-35.688284315816986),(-15.194846112051579,-34.82024069729252),(-16.06288973057603,-34.32375275550647),(-15.069913847004027,-32.58766551845756),(-9.861652135857122,-35.566593169173736),(-10.854628019429134,-37.30268040622266),(-9.986584400904718,-37.79916834800872),(-8.993608517332634,-36.063081110959686),(-8.125564898808125,-36.559569052745694),(-7.629076957022125,-35.69152543422122),(-14.573425905217983,-31.71962189993304),(-13.58045002164594,-29.983534662884065),(-8.372188310499041,-32.96246231360016),(-7.875700368713023,-32.09441869507579),(-10.479831224286471,-30.60495486971766),(-9.486855340714438,-28.868867632668607),(-10.35489895923891,-28.37237969088267),(-11.347874842810956,-30.108466927931588),(-13.083962079859937,-29.115491044359608),(-12.090986196287885,-27.379403807310606),(-13.82707343333683,-26.386427923738584),(-13.330585491550817,-25.51838430521413),(-8.12232378040396,-28.497311955930265),(-7.129347896831874,-26.76122471888129),(-12.33760960797881,-23.78229706816507),(-11.841121666192787,-22.91425344964059),(1.1795326116744818,-30.361572576431005),(0.18655672810243829,-32.09765981347989),(-1.5495305089465425,-31.104683929907907),(-2.0460184507325394,-31.972727548432406),(-0.3099312136836119,-32.96570343200437),(-1.3029070972557086,-34.70179066905344),(-6.511168808402523,-31.72286301833727),(-7.0076567501885485,-32.59090663686165),(-1.7993950390416522,-35.56983428757784),(-2.295882980827667,-36.43787790610241),(-1.427839362303235,-36.934365847888465),(1.0546003466269305,-32.59414775526602),(3.658731202200382,-34.08361158062402),(4.155219143986418,-33.215567962099534),(1.5510882884129416,-31.726104136741554),(2.04757623019902,-30.858060518217),(6.387794322821403,-33.34050022714716),(6.884282264607407,-32.47245660862261),(11.224500357229893,-34.954896317552745),(11.720988299015872,-34.086852699028185),(7.380770206393517,-31.604412990098258),(7.8772581481793935,-30.736369371573765),(-10.351657840834768,-20.310122594067117),(-9.85516989904869,-19.44207897554267),(-10.723213517573202,-18.9455910337566),(-11.219701459359246,-19.813634652281053),(-47.67753343738744,1.0388589027317927),(-50.65646108810366,-4.1694028084151675),(-14.198629110075316,-25.021896363428016),(-14.695117051861338,-25.889939981952526),(-15.563160670385818,-25.393452040166505),(-16.05964861217184,-26.26149565869102),(-29.948346508563603,-18.31768859011467),(-30.941322392135604,-20.053775827163616),(-17.052624495743892,-27.997582895739992),(-19.535064204674015,-32.33780098836241),(-20.403107823198514,-31.84131304657641),(-19.41013193962644,-30.105225809527415),(-20.27817555815091,-29.60873786774138),(-21.27115144172298,-31.344825104790367),(-22.13919506024745,-30.848337163004338),(-21.642707118461438,-29.98029354447985),(-22.510750736985923,-29.48380560269384),(-21.02128691162784,-26.87967474712036),(-21.88933053015232,-26.38318680533435),(-22.385818471938343,-27.25123042385883),(-24.121905708987317,-26.25825454028678),(-24.61839365077332,-27.12629815881128),(-22.88230641372437,-28.11927404238332),(-23.378794355510404,-28.98731766090781)],[(45.20081810829839,2.198693603713543),(46.17018887941854,1.9530916735543375),(45.18778115878171,-1.9243914109262543),(46.157151929901886,-2.169993341085459),(47.13955965053867,1.7074897433951215),(48.108930421658826,1.4618878132359123),(48.354532351818044,2.431258584356081),(55.14012774965909,0.7120450732416423),(55.385729679818304,1.681415844361834),(53.446988137577996,2.1726197046802467),(54.42939585821482,6.050102789160792),(68.0005866538969,2.6116757669319846),(49.334839961797165,-71.06050283819941),(54.18169381739828,-72.28851248899544),(50.25206293485121,-87.79844482691769),(58.00702910381231,-89.7632602681915),(61.93665998635927,-74.25332793026902),(62.906030757479854,-74.49892986042826),(56.02917671302153,-101.64131145179253),(59.906659797502286,-102.62371917242933),(60.88906751813935,-98.7462360879487),(91.90893219398414,-106.60549785304322),(92.8913399146206,-102.72801476856276),(61.871475238776185,-94.868753003468),(66.78351384196029,-75.48133758106509),(67.7528846130803,-75.72693951122432),(86.41863130517979,-2.0547609060927727),(95.14296824526107,-4.265178277525488),(96.86218175637549,2.5204171203155603),(56.14860936932918,12.835698187001881),(56.88541515980677,15.74381050036238),(54.94667361756656,16.235014360680704),(51.508246595337695,2.6638235649986415),(48.60013428197725,3.400629355476238),(48.84573621213646,4.370000126596378),(47.8763654410163,4.615602056755581),(48.367569301334704,6.554343598995885),(50.306310843575005,6.063139738677501),(50.55191277373424,7.0325105097976),(48.613171231493894,7.523714370116037),(48.858773161653104,8.493085141236191),(47.88940239053292,8.738687071395411),(47.64380046037377,7.76931630027523),(46.67442968925359,8.01491823043444),(47.16563354957203,9.95365977267471),(46.196262778451874,10.199261702833924),(45.705058918133474,8.260520160593641),(44.73568814701327,8.506122090752836),(48.910920959719704,24.985425199795415),(60.54337021316148,22.03820203788503),(61.03457407347996,23.976943580125287),(63.94268638684039,23.240137789647708),(64.18828831699955,24.20950856076788),(61.280176003639134,24.94631435124549),(61.52577793379831,25.915685122365513),(49.89332868035652,28.862908284276024),(50.63013447083407,31.77102059763644),(49.66076369971399,32.01662252779561),(50.39756949019143,34.92473484115609),(49.428198719071446,35.1703367713153),(48.69139292859387,32.26222445795486),(47.72202215747368,32.50782638811402),(47.47642022731442,31.53845561699389),(23.242150949310854,37.678503870974),(22.014141298514563,32.8316500153731),(46.2484105765184,26.691601761393162),(45.26600285588167,22.814118676912575),(29.756070517959312,26.74374955945985),(27.79125507668565,18.988783390498536),(43.30118741460808,15.059152507951346),(42.56438162413046,12.151040194590916),(36.748156997409545,13.624651775546113),(36.50255506725035,12.655281004425955),(42.31877969397127,11.181669423470737),(41.82757583365285,9.242927881230445),(39.88883429141257,9.73413174154887),(39.64323236125336,8.764760970428723),(47.398198530214565,6.799945529155094),(46.906994669896164,4.8612039869147825),(45.937623898775996,5.1068059170739915),(45.6920219686168,4.137435145953841),(41.81453888413622,5.119842866590661),(41.568936953976994,4.150472095470501),(45.4464200384576,3.168064374833683)],[(-34.470071299470696,16.54613794841084),(-35.058908409090435,17.354389677969554),(-33.442404949972996,18.532063897209046),(-34.03124205959274,19.340315626767772),(-35.64774551871018,18.16264140752828),(-36.236582628329906,18.970893137087007),(-38.66133781700609,17.20438180822779),(-38.072500707386354,16.39613007866905),(-39.689004166503764,15.218455859429536),(-49.110397920419764,28.15048353236913),(-37.79487370659765,36.39420306704554),(-23.073945966104,16.18790982807754),(-19.0326873183104,19.132095376176387),(-33.75361505880397,39.338388615144254),(-32.137111599686655,40.516062834383746),(-32.725948709306316,41.324314563942494),(-31.10944525018884,42.50198878318193),(-31.69828235980868,43.31024051274066),(-33.31478581892613,42.1325662935012),(-33.90362292854584,42.94081802305989),(-50.876909249278924,30.575238721045288),(-51.46574635889863,31.38349045060414),(-53.082249818016166,30.205816231364473),(-57.204109585354246,35.863578338275644),(-58.012361314913115,35.274741228655756),(-56.834687095673615,33.65823776953829),(-60.067694013908444,31.302889331059422),(-64.77839089086649,37.76890316752914),(-65.58664262042525,37.18006605790926),(-60.87594574346719,30.71405222143965),(-62.49244920258464,29.536378002200134),(-61.90361209296492,28.728126272641532),(-56.24584998605381,32.84998603997967),(-55.06817576681435,31.233482580862223),(-57.49293095549058,29.466971252002843),(-56.90409384587082,28.65871952244413),(-54.479338657194624,30.425230851303468),(-53.89050154757489,29.6169791217447),(-54.69875327713352,29.028142012125006),(-42.922011084738635,12.863107420950525),(-43.73026281429737,12.274270311330774),(-43.141425704677594,11.466018581772055),(-40.716670516001486,13.232529910631323),(-36.00597363904339,6.7665160741615615),(-34.389470179925986,7.944190293401066),(-39.100167056884025,14.410204129870817),(-37.483663597766586,15.58787834911033),(-36.89482648814685,14.779626619551598),(-36.08657475858812,15.36846372917135),(-33.73122632010911,12.13545681093646),(-32.92297459055042,12.724293920556219),(-34.100648809789924,14.340797379673642),(-30.059390161996323,17.284982927772393),(-30.648227271616072,18.09323465733113),(-34.68948591940967,15.14904910923237),(-35.278323029029416,15.957300838791099)],[(10.627782914793798,-49.8988543995859),(11.159320827873213,-50.74588890122038),(9.465251824604236,-51.8089647273792),(7.339100172286599,-48.420826720841255),(6.492065670652125,-48.95236463392063),(7.555141496810892,-50.64643363718961),(5.861072493541947,-51.709509463348496),(6.392610406621339,-52.556543964982936),(8.08667940989033,-51.49346813882412),(8.618217322969738,-52.34050264045863),(7.7711828213352545,-52.872040553538035),(8.302720734414669,-53.71907505517252),(9.14975523604913,-53.18753714209307),(10.744368975287435,-55.728640646996574),(6.509196467115033,-58.38633021239367),(7.040734380194433,-59.23336471402817),(11.275906888366897,-56.57567514863104),(11.807444801446287,-57.422709650265524),(3.337099785101419,-62.738088781059695),(4.4001756112602575,-64.43215778432868),(12.870520627605195,-59.11677865353448),(13.933596453763963,-60.81084765680346),(12.239527450494982,-61.873923482962304),(12.77106536357442,-62.720957984596794),(14.46513436684336,-61.657882158437936),(14.99667227992279,-62.50491666007247),(-5.3321557593048095,-75.26182657397844),(-4.800617846225377,-76.10886107561296),(-5.6476523478599105,-76.64039898869238),(-6.710728174018566,-74.94632998542353),(-7.557762675653134,-75.47786789850284),(-6.494686849494283,-77.17193690177166),(-8.188755852763244,-78.23501272793068),(-9.783369592001527,-75.69390922302719),(-10.630404093636024,-76.22544713610667),(-9.035790354397978,-78.76655064100991),(-11.576893859301194,-80.36116438024834),(-11.04535594622184,-81.2081988818828),(-4.269079933145939,-76.95589557724749),(-3.737542020066501,-77.80293007888193),(11.509079009354028,-68.23524764345244),(12.040616922433557,-69.08228214508685),(9.499513417530245,-70.67689588432515),(10.031051330609444,-71.52393038595963),(10.87808583224389,-70.9923924728802),(11.941161658402773,-72.68646147614916),(12.788196160037426,-72.15492356306973),(11.725120333878444,-70.46085455980082),(12.572154835513047,-69.92931664672136),(13.103692748592517,-70.77635114835587),(13.950727250226974,-70.24481323527642),(12.35611351098867,-67.70370973037298),(13.203148012623139,-67.17217181729359),(15.860837578020202,-71.40734432546598),(-15.479438982455584,-91.07424710940448),(-16.542514808614435,-89.38017810613539),(-8.072169792269815,-84.06479897534123),(-9.135245618428367,-82.37072997207241),(-11.676349123331942,-83.9653437113106),(-15.397114514887974,-78.03610219986919),(-16.24414901652243,-78.5676401129486),(-12.523383624966357,-84.49688162438997),(-13.37041812660102,-85.02841953746949),(-17.622721431236283,-78.25214352439353),(-10.846445418160403,-73.99984021975813),(-11.909521244319315,-72.30577121648932),(-18.685797257395127,-76.5580745211246),(-19.74887308355403,-74.86400551785552),(-22.28997658845745,-76.45861925709389),(-15.911521631504414,-86.62303327670764),(-17.60559063477342,-87.68610910286644),(-18.668666460932066,-85.99204009959763),(-21.209769965835616,-87.58665383883576),(-18.020542487359048,-92.66886084864277),(-23.94978399880044,-96.38962624019877),(-53.715907131248144,-48.95569414866712),(-59.64514864268929,-52.67645954022305),(-29.879025510241938,-100.11039163175451),(-31.573094513511123,-101.17346745791325),(-29.446942861192838,-104.56160546445153),(-34.529149871000286,-107.75083294292766),(-33.9976119579205,-108.59786744456235),(-28.91540494811376,-105.40863996608584),(-27.852329121955044,-107.10270896935472),(19.58160296957632,-77.3365858369073),(20.644678795735004,-79.03065484017638),(22.338747799003976,-77.96757901401747),(14.897217015892071,-66.10909599113474),(16.59128601916107,-65.04602016497589),(17.65436184531997,-66.74008916824482),(19.348430848588798,-65.67701334208603),(18.285355022430068,-63.982944338817134),(19.132389524064386,-63.45140642573755),(31.889299437970685,-83.78023446496515),(34.43040294287405,-82.18562072572686),(21.67349302896802,-61.85679268649932),(22.520527530602457,-61.325254773419914),(28.898982487555504,-71.48966879303374),(30.593051490824465,-70.42659296687485),(26.872286099268496,-64.4973514554335),(30.260424105806496,-62.371199803115694),(29.19734827964763,-60.677130799846815),(25.809210273109628,-62.80328245216454),(24.214596533871443,-60.26217894726105),(33.531976051850705,-54.41526190338752),(33.0004381387713,-53.568227401753),(37.23561064694367,-50.91053783635592),(36.704072733864294,-50.063503334721396),(35.857038232229826,-50.59504124780089),(33.73088657991208,-47.20690324126298),(32.883852078277684,-47.73844115434232),(35.01000373059533,-51.12657916088025),(34.16296922896085,-51.658117073959744),(30.442203837404936,-45.72887556251833),(29.595169335770443,-46.26041347559776),(33.315934727326336,-52.18965498703921),(32.46890022569187,-52.72119290011854),(31.937362312612482,-51.87415839848412),(16.6907412831918,-61.441840833913616),(15.627665457032936,-59.74777183064464),(29.18021748318467,-51.24316522137395),(27.05406583086698,-47.85502721483602),(13.501513804715252,-56.35963382410671),(10.843824239318108,-52.124461315934276),(11.690858740952606,-51.59292340285486),(12.22239665403203,-52.43995790448935),(13.06943115566651,-51.908419991409936),(14.132506981825376,-53.602488994678886),(14.979541483459844,-53.07095108159949),(13.91646565730101,-51.376882078330524),(14.763500158935507,-50.8453441652511),(14.231962245856105,-49.99830966361662),(15.926031249125057,-48.93523383745781),(18.052182901442713,-52.323371843995716),(19.746251904711695,-51.260296017836865),(19.214713991632273,-50.41326151620239),(21.755817496535748,-48.818647776964106),(21.224279583456287,-47.9716132753296),(18.683176078552805,-49.56622701456792),(17.620100252394014,-47.87215801129895),(20.16120375729747,-46.2775442720607),(19.62966584421806,-45.43050977042622),(18.782631342583592,-45.96204768350566),(17.71955551642473,-44.26797868023668),(19.413624519693684,-43.20490285407781),(22.071314085090815,-47.44007536225024),(22.918348586725273,-46.908537449170794),(22.386810673645847,-46.06150294753635),(24.92791417854935,-44.46688920829816),(26.52252791778753,-47.00799271320142),(27.369562419422017,-46.47645480012204),(25.77494868018381,-43.93535129521869),(26.621983181818237,-43.403813382139205),(27.685059007977063,-45.09788238540818),(28.53209350961162,-44.56634447232869),(27.46901768345273,-42.87227546905981),(29.163086686721766,-41.80919964290105),(28.631548773642415,-40.96216514126659),(25.24341076710428,-43.08831679358423),(20.991107462469074,-36.312040780508354),(20.144072960834606,-36.84357869358779),(24.396376265469925,-43.619854706663666),(21.85527276056645,-45.2144684459019),(21.32373484748704,-44.36743394426742),(23.017803850755957,-43.304358118108546),(22.486265937676553,-42.45732361647409),(20.792196934407578,-43.52039944263289),(20.26065902132818,-42.673364940998404),(21.10769352296263,-42.14182702791897),(20.576155609883262,-41.29479252628457),(17.188017603345365,-43.42094417860225),(16.656479690265904,-42.573909676967716),(15.809445188631472,-43.10544759004716),(17.93559684094908,-46.493585596585056),(17.088562339314596,-47.02512350966448),(14.430872773917537,-42.7899510014921),(20.360114285358936,-39.06918560993616),(19.828576372279564,-38.22215110830167),(16.440438365741606,-40.34830276061938),(12.71967297418571,-34.41906124917794),(11.872638472551193,-34.95059916225735),(15.593403864107128,-40.87984067369881),(13.899334860838144,-41.942916499857574),(13.36779694775875,-41.09588199822308),(12.52076244612426,-41.62741991130251),(16.24152783768012,-47.55666142274392),(15.394493336045642,-48.08819933582332),(14.33141750988683,-46.39413033255436),(13.484383008252314,-46.925668245633744),(14.547458834411149,-48.61973724890274),(13.700424332776656,-49.15127516198211),(13.168886419697259,-48.30424066034767),(12.321851918062766,-48.835778573427106),(9.13262443958629,-43.75357156362015),(8.28558993795181,-44.285109476699574),(11.474817416428296,-49.36731648650649)]]
rand = (-120,107)
content = ""
starttime = datetime.datetime.now()
print "Path 80 of 111"
path = []
start = (-48.341654450813245,15.4193353577665)
goal = (-45.054596209354955,63.325874437404536)
print " Node 1 and 2 of 2"
path += rrtpath(obstacleList,start,goal,rand)
pathStr = str(path)[1:-1] + ";"
pathStr = pathStr.replace("[", "(")
pathStr = pathStr.replace("]", ")")
f = open('smo2sol-24-path-80.txt', 'a+')
f.write(pathStr)
f.close
| [
"zcabwhy@ucl.ac.uk"
] | zcabwhy@ucl.ac.uk |
9d6fabcf0453c8517213f483a0dd28f5050d0ae6 | 0a11a15cf64e25585d28f484bb2118e8f858cfeb | /알고리즘/알고리즘문제/5097_회전.py | 945a166c127ee3c6ff9b7e8a6fcbc6b7122dddeb | [] | no_license | seoul-ssafy-class-2-studyclub/GaYoung_SSAFY | 7d9a44afd0dff13fe2ba21f76d0d99c082972116 | 23e0b491d95ffd9c7a74b7f3f74436fe71ed987d | refs/heads/master | 2021-06-30T09:09:00.646827 | 2020-11-30T14:09:03 | 2020-11-30T14:09:03 | 197,476,649 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | for t in range(int(input())):
N, M = map(int, input().split())
data = list(map(int, input().split()))
queue = [data.pop(0)]
for m in range(M):
data.append(queue.pop(0))
queue.append(data.pop(0))
result = queue.pop()
print('#{} {}'.format(t+1, result)) | [
"gyyoon4u@naver.com"
] | gyyoon4u@naver.com |
2cf4092cdf82471b06620d69938f06d6c15b881c | a2cff5d93cfed6b85598992dbf8bba66f735ec65 | /lesson01/flask-todo/venv/lib/python3.7/encodings/gb18030.py | b0546fa74af6a29211e1f34c9c9bc77e9ca8eba7 | [] | no_license | UncleanlyCleric/Python_230 | 79091f68a2de3c07dabeed8b01838ba139b37d08 | 69f72d731d03b6c9e9976d623f10f1c2ab0abec7 | refs/heads/master | 2022-12-12T22:37:41.658498 | 2019-08-12T18:41:49 | 2019-08-12T18:41:49 | 192,572,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | /usr/local/Cellar/python/3.7.2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/encodings/gb18030.py | [
"junyatwin@gmail.com"
] | junyatwin@gmail.com |
585733c3996bda61a1e80b9902673d6f8d8a8733 | aa49120740b051eed9b7199340b371a9831c3050 | /sum_submatrix.py | 6b338ee7bb7f248c32506d5981c498d60b9ed51e | [] | no_license | ashutosh-narkar/LeetCode | cd8d75389e1ab730b34ecd860b317b331b1dfa97 | b62862b90886f85c33271b881ac1365871731dcc | refs/heads/master | 2021-05-07T08:37:42.536436 | 2017-11-22T05:18:23 | 2017-11-22T05:18:23 | 109,366,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | #!/usr/bin/env python
'''
Given a matrix, calculate the sum of a sub matrix given the start and end indices of the submatrix
'''
def sumSubMatrix(matrix, start_row, start_col, end_row, end_col):
if not matrix:
return 0
nrows = len(matrix)
ncols = len(matrix[0])
if start_row >= nrows or end_row < 0:
return 0
if start_col >= ncols or end_col < 0:
return 0
result = 0
for i in range(start_row, end_row + 1):
for j in range(start_col, end_col + 1):
result += matrix[i][j]
return result
if __name__ == '__main__':
input = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]]
# entire matrix
print sumSubMatrix(input, 0, 0, 3, 3)
# same row
print sumSubMatrix(input, 1, 1, 1, 3)
# col
print sumSubMatrix(input, 2, 1, 3, 1)
# range
print sumSubMatrix(input, 1, 0, 2, 2)
| [
"ashutosh.narkar@one.verizon.com"
] | ashutosh.narkar@one.verizon.com |
a27505cee0418d366016211fb8ed496916c44678 | c80546df6a271db01c354d024d49a485066bb81c | /pyansys/_version.py | d7af32038ddfddcd9db097854b131e517734b9e6 | [
"MIT"
] | permissive | csutjf/pyansys | 6c5570c7b470b77ebfea7491881efd2dc08091c2 | 8b9487b72a5483d6d309832c9d48cff104d2edde | refs/heads/master | 2020-06-08T20:32:34.208307 | 2019-06-19T14:25:52 | 2019-06-19T14:25:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | # major, minor, patch
version_info = 0, 37, 2
# Nice string for the version
__version__ = '.'.join(map(str, version_info))
| [
"akascap@gmail.com"
] | akascap@gmail.com |
66b191caad4cf439c094f78b09e6827e3a792f22 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02614/s684487359.py | 15d0f66c289bf45b788f38685224357fa8c988e3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | H, W, K = map(int,input().split())
c = list(list(input()) for _ in range(H))
ans = 0
#bit演算
for i in range(2 ** H):
for j in range(2 ** W):
b = 0
for k in range(H):
for l in range(W):
#縦も横も塗らない色が黒のマスを数える
if i >> k & 1 and j >> l & 1 and c[k][l] == "#": b += 1
if b == K: ans += 1
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
aad1886a583a2c25a51b9c079489b8e629b94068 | 6fa0d5d3b61fbce01fad5a7dd50258c09298ee00 | /Web/04_django/Open_API/MYFORM/articles/models.py | a6d857d2f7321697638dc293f00c179220437d9b | [] | no_license | athletejuan/TIL | c8e6bd9f7e2c6f999dbac759adcdb6b2959de384 | 16b854928af2f27d91ba140ebc1aec0007e5eb04 | refs/heads/master | 2023-02-19T13:59:06.495110 | 2022-03-23T15:08:04 | 2022-03-23T15:08:04 | 188,750,527 | 1 | 0 | null | 2023-02-15T22:54:50 | 2019-05-27T01:27:09 | Python | UTF-8 | Python | false | false | 989 | py | from django.db import models
from imagekit.models import ProcessedImageField
from imagekit.processors import ResizeToFill
class Article(models.Model):
title = models.CharField(max_length=30)
content = models.TextField()
image = ProcessedImageField(
upload_to = 'articles/images',
processors = [ResizeToFill(200,300)],
format = 'jpeg',
options = {'quality': 90}
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
ordering = ['-pk']
def __str__(self):
return f'No.{self.id} - {self.title}'
class Comment(models.Model):
content = models.CharField(max_length=200)
created_at = models.DateTimeField(auto_now_add=True)
article = models.ForeignKey(Article, on_delete=models.CASCADE)
class Meta:
ordering = ['-pk']
def __str__(self):
return f'<Article({self.article_id}) : Comment({self.id})> - {self.content}' | [
"vanillasky84.0627@gmail.com"
] | vanillasky84.0627@gmail.com |
fb076470f17938090d47bcac17fbac9b550f005e | 150d9e4cee92be00251625b7f9ff231cc8306e9f | /largestNumberAtLeastTwiceofOthers.py | 3318a94d71cdb4b6b3e595f6c4ced9a862df489b | [] | no_license | JerinPaulS/Python-Programs | 0d3724ce277794be597104d9e8f8becb67282cb0 | d0778178d89d39a93ddb9b95ca18706554eb7655 | refs/heads/master | 2022-05-12T02:18:12.599648 | 2022-04-20T18:02:15 | 2022-04-20T18:02:15 | 216,547,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,346 | py | '''
747. Largest Number At Least Twice of Others
You are given an integer array nums where the largest integer is unique.
Determine whether the largest element in the array is at least twice as much as every other number in the array. If it is, return the index of the largest element, or return -1 otherwise.
Example 1:
Input: nums = [3,6,1,0]
Output: 1
Explanation: 6 is the largest integer.
For every other number in the array x, 6 is at least twice as big as x.
The index of value 6 is 1, so we return 1.
Example 2:
Input: nums = [1,2,3,4]
Output: -1
Explanation: 4 is less than twice the value of 3, so we return -1.
Example 3:
Input: nums = [1]
Output: 0
Explanation: 1 is trivially at least twice the value as any other number because there are no other numbers.
Constraints:
1 <= nums.length <= 50
0 <= nums[i] <= 100
The largest element in nums is unique.
'''
class Solution(object):
def dominantIndex(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) == 1:
return 0
sorted_nums = sorted(nums)
max_num = sorted_nums[-1]
print max_num, sorted_nums
if max_num >= sorted_nums[len(sorted_nums) - 2] * 2:
return nums.index(max_num)
else:
return -1
obj = Solution()
print(obj.dominantIndex([3,6,1,0])) | [
"jerinsprograms@gmail.com"
] | jerinsprograms@gmail.com |
bd70f4a9e1704eac28e96fd1bf95f8f4f712a2b9 | 7453e69cda5f4d331ef5b6bb437c27c24579d62d | /event/migrations/0001_initial.py | 9d6f8fcb08dc929724c9120d4e10448d7aeffe20 | [] | no_license | jerinisready/learndjangogrouppehia1-eventmgt | f4ac4c24adae9b600ab45b3b4ab9b086fd3e2e00 | 6c5950169b98c97853ae467009cd51df01b855e0 | refs/heads/master | 2023-05-28T15:08:38.408981 | 2021-06-23T16:10:17 | 2021-06-23T16:10:17 | 379,661,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,584 | py | # Generated by Django 3.2.4 on 2021-06-22 15:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=256)),
('description', models.TextField(blank=True, null=True)),
('event_datetime', models.DateTimeField()),
('is_registration_closed', models.BooleanField(default=False)),
('max_no_participants', models.IntegerField(default=1)),
],
),
migrations.CreateModel(
name='Registration',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('point', models.IntegerField(default=0)),
('position', models.CharField(choices=[('First', 'First'), ('Second', 'Second'), ('Third', 'Third')], default='Participant', max_length=12)),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='event.event')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"="
] | = |
c5249c0a093f2ddbb7d8c22e6df76046a65bdef9 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_4341.py | a717937493c372c932eab5d25c3319c300f4ca6f | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | # 3D scatterplots in sage
point_list=[(0.,1.,2.), (2.,2.,3.)]
point3d(point_list)
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
814857a6d92868a9798a1b515b876b382340c4cc | 585fcfd09bcc37ad73c6f301cb8b16261a93df7e | /projects/scipy-master/scipy/ndimage/tests/test_ndimage.py | 0be1331d8f3eeeb7c65960d40be8ebc15cdc59c8 | [
"BSD-3-Clause",
"MIT"
] | permissive | louisXW/Surrogate-Model | e9e8de3ab892eed2f8ed424e09b770e67126c1f3 | 65ec8a89c1b7a19d4c04c62e2c988340c96c69f8 | refs/heads/master | 2021-07-21T09:37:41.045898 | 2017-10-30T11:49:35 | 2017-10-30T11:49:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201,089 | py | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import math
import sys
import warnings
import numpy
import scipy.ndimage as ndimage
from nose import SkipTest
from numpy import fft
from numpy.testing import (assert_, assert_equal, assert_array_equal,
run_module_suite, assert_array_almost_equal, assert_almost_equal, dec)
eps = 1e-12
def sumsq(a, b):
return math.sqrt(((a - b) ** 2).sum())
class TestNdimage:
def setUp(self):
# list of numarray data types
self.integer_types = [numpy.int8, numpy.uint8, numpy.int16,
numpy.uint16, numpy.int32, numpy.uint32,
numpy.int64, numpy.uint64]
self.float_types = [numpy.float32, numpy.float64]
self.types = self.integer_types + self.float_types
# list of boundary modes:
self.modes = ['nearest', 'wrap', 'reflect', 'mirror', 'constant']
def test_correlate01(self):
array = numpy.array([1, 2])
weights = numpy.array([2])
expected = [2, 4]
output = ndimage.correlate(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.convolve(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.correlate1d(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.convolve1d(array, weights)
assert_array_almost_equal(output, expected)
def test_correlate02(self):
array = numpy.array([1, 2, 3])
kernel = numpy.array([1])
output = ndimage.correlate(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.convolve(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.correlate1d(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.convolve1d(array, kernel)
assert_array_almost_equal(array, output)
def test_correlate03(self):
array = numpy.array([1])
weights = numpy.array([1, 1])
expected = [2]
output = ndimage.correlate(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.convolve(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.correlate1d(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.convolve1d(array, weights)
assert_array_almost_equal(output, expected)
def test_correlate04(self):
array = numpy.array([1, 2])
tcor = [2, 3]
tcov = [3, 4]
weights = numpy.array([1, 1])
output = ndimage.correlate(array, weights)
assert_array_almost_equal(output, tcor)
output = ndimage.convolve(array, weights)
assert_array_almost_equal(output, tcov)
output = ndimage.correlate1d(array, weights)
assert_array_almost_equal(output, tcor)
output = ndimage.convolve1d(array, weights)
assert_array_almost_equal(output, tcov)
def test_correlate05(self):
array = numpy.array([1, 2, 3])
tcor = [2, 3, 5]
tcov = [3, 5, 6]
kernel = numpy.array([1, 1])
output = ndimage.correlate(array, kernel)
assert_array_almost_equal(tcor, output)
output = ndimage.convolve(array, kernel)
assert_array_almost_equal(tcov, output)
output = ndimage.correlate1d(array, kernel)
assert_array_almost_equal(tcor, output)
output = ndimage.convolve1d(array, kernel)
assert_array_almost_equal(tcov, output)
def test_correlate06(self):
array = numpy.array([1, 2, 3])
tcor = [9, 14, 17]
tcov = [7, 10, 15]
weights = numpy.array([1, 2, 3])
output = ndimage.correlate(array, weights)
assert_array_almost_equal(output, tcor)
output = ndimage.convolve(array, weights)
assert_array_almost_equal(output, tcov)
output = ndimage.correlate1d(array, weights)
assert_array_almost_equal(output, tcor)
output = ndimage.convolve1d(array, weights)
assert_array_almost_equal(output, tcov)
def test_correlate07(self):
array = numpy.array([1, 2, 3])
expected = [5, 8, 11]
weights = numpy.array([1, 2, 1])
output = ndimage.correlate(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.convolve(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.correlate1d(array, weights)
assert_array_almost_equal(output, expected)
output = ndimage.convolve1d(array, weights)
assert_array_almost_equal(output, expected)
def test_correlate08(self):
array = numpy.array([1, 2, 3])
tcor = [1, 2, 5]
tcov = [3, 6, 7]
weights = numpy.array([1, 2, -1])
output = ndimage.correlate(array, weights)
assert_array_almost_equal(output, tcor)
output = ndimage.convolve(array, weights)
assert_array_almost_equal(output, tcov)
output = ndimage.correlate1d(array, weights)
assert_array_almost_equal(output, tcor)
output = ndimage.convolve1d(array, weights)
assert_array_almost_equal(output, tcov)
def test_correlate09(self):
array = []
kernel = numpy.array([1, 1])
output = ndimage.correlate(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.convolve(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.correlate1d(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.convolve1d(array, kernel)
assert_array_almost_equal(array, output)
def test_correlate10(self):
array = [[]]
kernel = numpy.array([[1, 1]])
output = ndimage.correlate(array, kernel)
assert_array_almost_equal(array, output)
output = ndimage.convolve(array, kernel)
assert_array_almost_equal(array, output)
def test_correlate11(self):
array = numpy.array([[1, 2, 3],
[4, 5, 6]])
kernel = numpy.array([[1, 1],
[1, 1]])
output = ndimage.correlate(array, kernel)
assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output)
output = ndimage.convolve(array, kernel)
assert_array_almost_equal([[12, 16, 18], [18, 22, 24]], output)
def test_correlate12(self):
array = numpy.array([[1, 2, 3],
[4, 5, 6]])
kernel = numpy.array([[1, 0],
[0, 1]])
output = ndimage.correlate(array, kernel)
assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
output = ndimage.convolve(array, kernel)
assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
def test_correlate13(self):
kernel = numpy.array([[1, 0],
[0, 1]])
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[4, 5, 6]], type1)
for type2 in self.types:
output = ndimage.correlate(array, kernel,
output=type2)
assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
assert_equal(output.dtype.type, type2)
output = ndimage.convolve(array, kernel,
output=type2)
assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
assert_equal(output.dtype.type, type2)
def test_correlate14(self):
kernel = numpy.array([[1, 0],
[0, 1]])
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[4, 5, 6]], type1)
for type2 in self.types:
output = numpy.zeros(array.shape, type2)
ndimage.correlate(array, kernel,
output=output)
assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
assert_equal(output.dtype.type, type2)
ndimage.convolve(array, kernel, output=output)
assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
assert_equal(output.dtype.type, type2)
def test_correlate15(self):
kernel = numpy.array([[1, 0],
[0, 1]])
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[4, 5, 6]], type1)
output = ndimage.correlate(array, kernel,
output=numpy.float32)
assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
assert_equal(output.dtype.type, numpy.float32)
output = ndimage.convolve(array, kernel,
output=numpy.float32)
assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
assert_equal(output.dtype.type, numpy.float32)
def test_correlate16(self):
kernel = numpy.array([[0.5, 0],
[0, 0.5]])
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[4, 5, 6]], type1)
output = ndimage.correlate(array, kernel,
output=numpy.float32)
assert_array_almost_equal([[1, 1.5, 2.5], [2.5, 3, 4]], output)
assert_equal(output.dtype.type, numpy.float32)
output = ndimage.convolve(array, kernel,
output=numpy.float32)
assert_array_almost_equal([[3, 4, 4.5], [4.5, 5.5, 6]], output)
assert_equal(output.dtype.type, numpy.float32)
def test_correlate17(self):
array = numpy.array([1, 2, 3])
tcor = [3, 5, 6]
tcov = [2, 3, 5]
kernel = numpy.array([1, 1])
output = ndimage.correlate(array, kernel, origin=-1)
assert_array_almost_equal(tcor, output)
output = ndimage.convolve(array, kernel, origin=-1)
assert_array_almost_equal(tcov, output)
output = ndimage.correlate1d(array, kernel, origin=-1)
assert_array_almost_equal(tcor, output)
output = ndimage.convolve1d(array, kernel, origin=-1)
assert_array_almost_equal(tcov, output)
def test_correlate18(self):
kernel = numpy.array([[1, 0],
[0, 1]])
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[4, 5, 6]], type1)
output = ndimage.correlate(array, kernel,
output=numpy.float32,
mode='nearest', origin=-1)
assert_array_almost_equal([[6, 8, 9], [9, 11, 12]], output)
assert_equal(output.dtype.type, numpy.float32)
output = ndimage.convolve(array, kernel,
output=numpy.float32,
mode='nearest', origin=-1)
assert_array_almost_equal([[2, 3, 5], [5, 6, 8]], output)
assert_equal(output.dtype.type, numpy.float32)
def test_correlate19(self):
kernel = numpy.array([[1, 0],
[0, 1]])
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[4, 5, 6]], type1)
output = ndimage.correlate(array, kernel,
output=numpy.float32,
mode='nearest', origin=[-1, 0])
assert_array_almost_equal([[5, 6, 8], [8, 9, 11]], output)
assert_equal(output.dtype.type, numpy.float32)
output = ndimage.convolve(array, kernel,
output=numpy.float32,
mode='nearest', origin=[-1, 0])
assert_array_almost_equal([[3, 5, 6], [6, 8, 9]], output)
assert_equal(output.dtype.type, numpy.float32)
def test_correlate20(self):
weights = numpy.array([1, 2, 1])
expected = [[5, 10, 15], [7, 14, 21]]
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[2, 4, 6]], type1)
for type2 in self.types:
output = numpy.zeros((2, 3), type2)
ndimage.correlate1d(array, weights, axis=0,
output=output)
assert_array_almost_equal(output, expected)
ndimage.convolve1d(array, weights, axis=0,
output=output)
assert_array_almost_equal(output, expected)
def test_correlate21(self):
array = numpy.array([[1, 2, 3],
[2, 4, 6]])
expected = [[5, 10, 15], [7, 14, 21]]
weights = numpy.array([1, 2, 1])
output = ndimage.correlate1d(array, weights, axis=0)
assert_array_almost_equal(output, expected)
output = ndimage.convolve1d(array, weights, axis=0)
assert_array_almost_equal(output, expected)
def test_correlate22(self):
weights = numpy.array([1, 2, 1])
expected = [[6, 12, 18], [6, 12, 18]]
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[2, 4, 6]], type1)
for type2 in self.types:
output = numpy.zeros((2, 3), type2)
ndimage.correlate1d(array, weights, axis=0,
mode='wrap', output=output)
assert_array_almost_equal(output, expected)
ndimage.convolve1d(array, weights, axis=0,
mode='wrap', output=output)
assert_array_almost_equal(output, expected)
def test_correlate23(self):
weights = numpy.array([1, 2, 1])
expected = [[5, 10, 15], [7, 14, 21]]
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[2, 4, 6]], type1)
for type2 in self.types:
output = numpy.zeros((2, 3), type2)
ndimage.correlate1d(array, weights, axis=0,
mode='nearest', output=output)
assert_array_almost_equal(output, expected)
ndimage.convolve1d(array, weights, axis=0,
mode='nearest', output=output)
assert_array_almost_equal(output, expected)
def test_correlate24(self):
weights = numpy.array([1, 2, 1])
tcor = [[7, 14, 21], [8, 16, 24]]
tcov = [[4, 8, 12], [5, 10, 15]]
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[2, 4, 6]], type1)
for type2 in self.types:
output = numpy.zeros((2, 3), type2)
ndimage.correlate1d(array, weights, axis=0,
mode='nearest', output=output, origin=-1)
assert_array_almost_equal(output, tcor)
ndimage.convolve1d(array, weights, axis=0,
mode='nearest', output=output, origin=-1)
assert_array_almost_equal(output, tcov)
def test_correlate25(self):
weights = numpy.array([1, 2, 1])
tcor = [[4, 8, 12], [5, 10, 15]]
tcov = [[7, 14, 21], [8, 16, 24]]
for type1 in self.types:
array = numpy.array([[1, 2, 3],
[2, 4, 6]], type1)
for type2 in self.types:
output = numpy.zeros((2, 3), type2)
ndimage.correlate1d(array, weights, axis=0,
mode='nearest', output=output, origin=1)
assert_array_almost_equal(output, tcor)
ndimage.convolve1d(array, weights, axis=0,
mode='nearest', output=output, origin=1)
assert_array_almost_equal(output, tcov)
def test_gauss01(self):
input = numpy.array([[1, 2, 3],
[2, 4, 6]], numpy.float32)
output = ndimage.gaussian_filter(input, 0)
assert_array_almost_equal(output, input)
def test_gauss02(self):
input = numpy.array([[1, 2, 3],
[2, 4, 6]], numpy.float32)
output = ndimage.gaussian_filter(input, 1.0)
assert_equal(input.dtype, output.dtype)
assert_equal(input.shape, output.shape)
def test_gauss03(self):
# single precision data"
input = numpy.arange(100 * 100).astype(numpy.float32)
input.shape = (100, 100)
output = ndimage.gaussian_filter(input, [1.0, 1.0])
assert_equal(input.dtype, output.dtype)
assert_equal(input.shape, output.shape)
# input.sum() is 49995000.0. With single precision floats, we can't
# expect more than 8 digits of accuracy, so use decimal=0 in this test.
assert_almost_equal(output.sum(dtype='d'), input.sum(dtype='d'), decimal=0)
assert_(sumsq(input, output) > 1.0)
def test_gauss04(self):
input = numpy.arange(100 * 100).astype(numpy.float32)
input.shape = (100, 100)
otype = numpy.float64
output = ndimage.gaussian_filter(input, [1.0, 1.0],
output=otype)
assert_equal(output.dtype.type, numpy.float64)
assert_equal(input.shape, output.shape)
assert_(sumsq(input, output) > 1.0)
def test_gauss05(self):
input = numpy.arange(100 * 100).astype(numpy.float32)
input.shape = (100, 100)
otype = numpy.float64
output = ndimage.gaussian_filter(input, [1.0, 1.0],
order=1, output=otype)
assert_equal(output.dtype.type, numpy.float64)
assert_equal(input.shape, output.shape)
assert_(sumsq(input, output) > 1.0)
def test_gauss06(self):
input = numpy.arange(100 * 100).astype(numpy.float32)
input.shape = (100, 100)
otype = numpy.float64
output1 = ndimage.gaussian_filter(input, [1.0, 1.0],
output=otype)
output2 = ndimage.gaussian_filter(input, 1.0,
output=otype)
assert_array_almost_equal(output1, output2)
def test_prewitt01(self):
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 1)
output = ndimage.prewitt(array, 0)
assert_array_almost_equal(t, output)
def test_prewitt02(self):
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 1)
output = numpy.zeros(array.shape, type)
ndimage.prewitt(array, 0, output)
assert_array_almost_equal(t, output)
def test_prewitt03(self):
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 1)
t = ndimage.correlate1d(t, [1.0, 1.0, 1.0], 0)
output = ndimage.prewitt(array, 1)
assert_array_almost_equal(t, output)
def test_prewitt04(self):
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
t = ndimage.prewitt(array, -1)
output = ndimage.prewitt(array, 1)
assert_array_almost_equal(t, output)
def test_sobel01(self):
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 1)
output = ndimage.sobel(array, 0)
assert_array_almost_equal(t, output)
def test_sobel02(self):
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 0)
t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 1)
output = numpy.zeros(array.shape, type)
ndimage.sobel(array, 0, output)
assert_array_almost_equal(t, output)
def test_sobel03(self):
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
t = ndimage.correlate1d(array, [-1.0, 0.0, 1.0], 1)
t = ndimage.correlate1d(t, [1.0, 2.0, 1.0], 0)
output = numpy.zeros(array.shape, type)
output = ndimage.sobel(array, 1)
assert_array_almost_equal(t, output)
def test_sobel04(self):
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
t = ndimage.sobel(array, -1)
output = ndimage.sobel(array, 1)
assert_array_almost_equal(t, output)
def test_laplace01(self):
for type in [numpy.int32, numpy.float32, numpy.float64]:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type) * 100
tmp1 = ndimage.correlate1d(array, [1, -2, 1], 0)
tmp2 = ndimage.correlate1d(array, [1, -2, 1], 1)
output = ndimage.laplace(array)
assert_array_almost_equal(tmp1 + tmp2, output)
def test_laplace02(self):
for type in [numpy.int32, numpy.float32, numpy.float64]:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type) * 100
tmp1 = ndimage.correlate1d(array, [1, -2, 1], 0)
tmp2 = ndimage.correlate1d(array, [1, -2, 1], 1)
output = numpy.zeros(array.shape, type)
ndimage.laplace(array, output=output)
assert_array_almost_equal(tmp1 + tmp2, output)
def test_gaussian_laplace01(self):
for type in [numpy.int32, numpy.float32, numpy.float64]:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type) * 100
tmp1 = ndimage.gaussian_filter(array, 1.0, [2, 0])
tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 2])
output = ndimage.gaussian_laplace(array, 1.0)
assert_array_almost_equal(tmp1 + tmp2, output)
def test_gaussian_laplace02(self):
for type in [numpy.int32, numpy.float32, numpy.float64]:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type) * 100
tmp1 = ndimage.gaussian_filter(array, 1.0, [2, 0])
tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 2])
output = numpy.zeros(array.shape, type)
ndimage.gaussian_laplace(array, 1.0, output)
assert_array_almost_equal(tmp1 + tmp2, output)
def test_generic_laplace01(self):
def derivative2(input, axis, output, mode, cval, a, b):
sigma = [a, b / 2.0]
input = numpy.asarray(input)
order = [0] * input.ndim
order[axis] = 2
return ndimage.gaussian_filter(input, sigma, order,
output, mode, cval)
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
output = numpy.zeros(array.shape, type)
tmp = ndimage.generic_laplace(array, derivative2,
extra_arguments=(1.0,), extra_keywords={'b': 2.0})
ndimage.gaussian_laplace(array, 1.0, output)
assert_array_almost_equal(tmp, output)
def test_gaussian_gradient_magnitude01(self):
for type in [numpy.int32, numpy.float32, numpy.float64]:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type) * 100
tmp1 = ndimage.gaussian_filter(array, 1.0, [1, 0])
tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 1])
output = ndimage.gaussian_gradient_magnitude(array,
1.0)
expected = tmp1 * tmp1 + tmp2 * tmp2
expected = numpy.sqrt(expected).astype(type)
assert_array_almost_equal(expected, output)
def test_gaussian_gradient_magnitude02(self):
for type in [numpy.int32, numpy.float32, numpy.float64]:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type) * 100
tmp1 = ndimage.gaussian_filter(array, 1.0, [1, 0])
tmp2 = ndimage.gaussian_filter(array, 1.0, [0, 1])
output = numpy.zeros(array.shape, type)
ndimage.gaussian_gradient_magnitude(array, 1.0,
output)
expected = tmp1 * tmp1 + tmp2 * tmp2
expected = numpy.sqrt(expected).astype(type)
assert_array_almost_equal(expected, output)
def test_generic_gradient_magnitude01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], numpy.float64)
def derivative(input, axis, output, mode, cval, a, b):
sigma = [a, b / 2.0]
input = numpy.asarray(input)
order = [0] * input.ndim
order[axis] = 1
return ndimage.gaussian_filter(input, sigma, order,
output, mode, cval)
tmp1 = ndimage.gaussian_gradient_magnitude(array, 1.0)
tmp2 = ndimage.generic_gradient_magnitude(array,
derivative, extra_arguments=(1.0,),
extra_keywords={'b': 2.0})
assert_array_almost_equal(tmp1, tmp2)
def test_uniform01(self):
array = numpy.array([2, 4, 6])
size = 2
output = ndimage.uniform_filter1d(array, size,
origin=-1)
assert_array_almost_equal([3, 5, 6], output)
def test_uniform02(self):
array = numpy.array([1, 2, 3])
filter_shape = [0]
output = ndimage.uniform_filter(array, filter_shape)
assert_array_almost_equal(array, output)
def test_uniform03(self):
array = numpy.array([1, 2, 3])
filter_shape = [1]
output = ndimage.uniform_filter(array, filter_shape)
assert_array_almost_equal(array, output)
def test_uniform04(self):
array = numpy.array([2, 4, 6])
filter_shape = [2]
output = ndimage.uniform_filter(array, filter_shape)
assert_array_almost_equal([2, 3, 5], output)
def test_uniform05(self):
array = []
filter_shape = [1]
output = ndimage.uniform_filter(array, filter_shape)
assert_array_almost_equal([], output)
def test_uniform06(self):
filter_shape = [2, 2]
for type1 in self.types:
array = numpy.array([[4, 8, 12],
[16, 20, 24]], type1)
for type2 in self.types:
output = ndimage.uniform_filter(array,
filter_shape, output=type2)
assert_array_almost_equal([[4, 6, 10], [10, 12, 16]], output)
assert_equal(output.dtype.type, type2)
def test_minimum_filter01(self):
array = numpy.array([1, 2, 3, 4, 5])
filter_shape = numpy.array([2])
output = ndimage.minimum_filter(array, filter_shape)
assert_array_almost_equal([1, 1, 2, 3, 4], output)
def test_minimum_filter02(self):
array = numpy.array([1, 2, 3, 4, 5])
filter_shape = numpy.array([3])
output = ndimage.minimum_filter(array, filter_shape)
assert_array_almost_equal([1, 1, 2, 3, 4], output)
def test_minimum_filter03(self):
array = numpy.array([3, 2, 5, 1, 4])
filter_shape = numpy.array([2])
output = ndimage.minimum_filter(array, filter_shape)
assert_array_almost_equal([3, 2, 2, 1, 1], output)
def test_minimum_filter04(self):
array = numpy.array([3, 2, 5, 1, 4])
filter_shape = numpy.array([3])
output = ndimage.minimum_filter(array, filter_shape)
assert_array_almost_equal([2, 2, 1, 1, 1], output)
def test_minimum_filter05(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
filter_shape = numpy.array([2, 3])
output = ndimage.minimum_filter(array, filter_shape)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 2, 1, 1, 1],
[5, 3, 3, 1, 1]], output)
def test_minimum_filter06(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 1, 1], [1, 1, 1]]
output = ndimage.minimum_filter(array,
footprint=footprint)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 2, 1, 1, 1],
[5, 3, 3, 1, 1]], output)
def test_minimum_filter07(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.minimum_filter(array,
footprint=footprint)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 3, 1, 3, 1],
[5, 5, 3, 3, 1]], output)
def test_minimum_filter08(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.minimum_filter(array,
footprint=footprint, origin=-1)
assert_array_almost_equal([[3, 1, 3, 1, 1],
[5, 3, 3, 1, 1],
[3, 3, 1, 1, 1]], output)
def test_minimum_filter09(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.minimum_filter(array,
footprint=footprint, origin=[-1, 0])
assert_array_almost_equal([[2, 3, 1, 3, 1],
[5, 5, 3, 3, 1],
[5, 3, 3, 1, 1]], output)
def test_maximum_filter01(self):
array = numpy.array([1, 2, 3, 4, 5])
filter_shape = numpy.array([2])
output = ndimage.maximum_filter(array, filter_shape)
assert_array_almost_equal([1, 2, 3, 4, 5], output)
def test_maximum_filter02(self):
array = numpy.array([1, 2, 3, 4, 5])
filter_shape = numpy.array([3])
output = ndimage.maximum_filter(array, filter_shape)
assert_array_almost_equal([2, 3, 4, 5, 5], output)
def test_maximum_filter03(self):
array = numpy.array([3, 2, 5, 1, 4])
filter_shape = numpy.array([2])
output = ndimage.maximum_filter(array, filter_shape)
assert_array_almost_equal([3, 3, 5, 5, 4], output)
def test_maximum_filter04(self):
array = numpy.array([3, 2, 5, 1, 4])
filter_shape = numpy.array([3])
output = ndimage.maximum_filter(array, filter_shape)
assert_array_almost_equal([3, 5, 5, 5, 4], output)
def test_maximum_filter05(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
filter_shape = numpy.array([2, 3])
output = ndimage.maximum_filter(array, filter_shape)
assert_array_almost_equal([[3, 5, 5, 5, 4],
[7, 9, 9, 9, 5],
[8, 9, 9, 9, 7]], output)
def test_maximum_filter06(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 1, 1], [1, 1, 1]]
output = ndimage.maximum_filter(array,
footprint=footprint)
assert_array_almost_equal([[3, 5, 5, 5, 4],
[7, 9, 9, 9, 5],
[8, 9, 9, 9, 7]], output)
def test_maximum_filter07(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.maximum_filter(array,
footprint=footprint)
assert_array_almost_equal([[3, 5, 5, 5, 4],
[7, 7, 9, 9, 5],
[7, 9, 8, 9, 7]], output)
def test_maximum_filter08(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.maximum_filter(array,
footprint=footprint, origin=-1)
assert_array_almost_equal([[7, 9, 9, 5, 5],
[9, 8, 9, 7, 5],
[8, 8, 7, 7, 7]], output)
def test_maximum_filter09(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.maximum_filter(array,
footprint=footprint, origin=[-1, 0])
assert_array_almost_equal([[7, 7, 9, 9, 5],
[7, 9, 8, 9, 7],
[8, 8, 8, 7, 7]], output)
def test_rank01(self):
array = numpy.array([1, 2, 3, 4, 5])
output = ndimage.rank_filter(array, 1, size=2)
assert_array_almost_equal(array, output)
output = ndimage.percentile_filter(array, 100, size=2)
assert_array_almost_equal(array, output)
output = ndimage.median_filter(array, 2)
assert_array_almost_equal(array, output)
def test_rank02(self):
array = numpy.array([1, 2, 3, 4, 5])
output = ndimage.rank_filter(array, 1, size=[3])
assert_array_almost_equal(array, output)
output = ndimage.percentile_filter(array, 50, size=3)
assert_array_almost_equal(array, output)
output = ndimage.median_filter(array, (3,))
assert_array_almost_equal(array, output)
def test_rank03(self):
array = numpy.array([3, 2, 5, 1, 4])
output = ndimage.rank_filter(array, 1, size=[2])
assert_array_almost_equal([3, 3, 5, 5, 4], output)
output = ndimage.percentile_filter(array, 100, size=2)
assert_array_almost_equal([3, 3, 5, 5, 4], output)
def test_rank04(self):
array = numpy.array([3, 2, 5, 1, 4])
expected = [3, 3, 2, 4, 4]
output = ndimage.rank_filter(array, 1, size=3)
assert_array_almost_equal(expected, output)
output = ndimage.percentile_filter(array, 50, size=3)
assert_array_almost_equal(expected, output)
output = ndimage.median_filter(array, size=3)
assert_array_almost_equal(expected, output)
def test_rank05(self):
array = numpy.array([3, 2, 5, 1, 4])
expected = [3, 3, 2, 4, 4]
output = ndimage.rank_filter(array, -2, size=3)
assert_array_almost_equal(expected, output)
def test_rank06(self):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]])
expected = [[2, 2, 1, 1, 1],
[3, 3, 2, 1, 1],
[5, 5, 3, 3, 1]]
output = ndimage.rank_filter(array, 1, size=[2, 3])
assert_array_almost_equal(expected, output)
output = ndimage.percentile_filter(array, 17,
size=(2, 3))
assert_array_almost_equal(expected, output)
def test_rank07(self):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]])
expected = [[3, 5, 5, 5, 4],
[5, 5, 7, 5, 4],
[6, 8, 8, 7, 5]]
output = ndimage.rank_filter(array, -2, size=[2, 3])
assert_array_almost_equal(expected, output)
def test_rank08(self):
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]])
expected = [[3, 3, 2, 4, 4],
[5, 5, 5, 4, 4],
[5, 6, 7, 5, 5]]
output = ndimage.percentile_filter(array, 50.0,
size=(2, 3))
assert_array_almost_equal(expected, output)
output = ndimage.rank_filter(array, 3, size=(2, 3))
assert_array_almost_equal(expected, output)
output = ndimage.median_filter(array, size=(2, 3))
assert_array_almost_equal(expected, output)
def test_rank09(self):
expected = [[3, 3, 2, 4, 4],
[3, 5, 2, 5, 1],
[5, 5, 8, 3, 5]]
footprint = [[1, 0, 1], [0, 1, 0]]
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
output = ndimage.rank_filter(array, 1,
footprint=footprint)
assert_array_almost_equal(expected, output)
output = ndimage.percentile_filter(array, 35,
footprint=footprint)
assert_array_almost_equal(expected, output)
def test_rank10(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
expected = [[2, 2, 1, 1, 1],
[2, 3, 1, 3, 1],
[5, 5, 3, 3, 1]]
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.rank_filter(array, 0,
footprint=footprint)
assert_array_almost_equal(expected, output)
output = ndimage.percentile_filter(array, 0.0,
footprint=footprint)
assert_array_almost_equal(expected, output)
def test_rank11(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
expected = [[3, 5, 5, 5, 4],
[7, 7, 9, 9, 5],
[7, 9, 8, 9, 7]]
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.rank_filter(array, -1,
footprint=footprint)
assert_array_almost_equal(expected, output)
output = ndimage.percentile_filter(array, 100.0,
footprint=footprint)
assert_array_almost_equal(expected, output)
def test_rank12(self):
expected = [[3, 3, 2, 4, 4],
[3, 5, 2, 5, 1],
[5, 5, 8, 3, 5]]
footprint = [[1, 0, 1], [0, 1, 0]]
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
output = ndimage.rank_filter(array, 1,
footprint=footprint)
assert_array_almost_equal(expected, output)
output = ndimage.percentile_filter(array, 50.0,
footprint=footprint)
assert_array_almost_equal(expected, output)
output = ndimage.median_filter(array,
footprint=footprint)
assert_array_almost_equal(expected, output)
def test_rank13(self):
expected = [[5, 2, 5, 1, 1],
[5, 8, 3, 5, 5],
[6, 6, 5, 5, 5]]
footprint = [[1, 0, 1], [0, 1, 0]]
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
output = ndimage.rank_filter(array, 1,
footprint=footprint, origin=-1)
assert_array_almost_equal(expected, output)
def test_rank14(self):
expected = [[3, 5, 2, 5, 1],
[5, 5, 8, 3, 5],
[5, 6, 6, 5, 5]]
footprint = [[1, 0, 1], [0, 1, 0]]
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
output = ndimage.rank_filter(array, 1,
footprint=footprint, origin=[-1, 0])
assert_array_almost_equal(expected, output)
def test_rank15(self):
"rank filter 15"
expected = [[2, 3, 1, 4, 1],
[5, 3, 7, 1, 1],
[5, 5, 3, 3, 3]]
footprint = [[1, 0, 1], [0, 1, 0]]
for type in self.types:
array = numpy.array([[3, 2, 5, 1, 4],
[5, 8, 3, 7, 1],
[5, 6, 9, 3, 5]], type)
output = ndimage.rank_filter(array, 0,
footprint=footprint, origin=[-1, 0])
assert_array_almost_equal(expected, output)
def test_generic_filter1d01(self):
weights = numpy.array([1.1, 2.2, 3.3])
def _filter_func(input, output, fltr, total):
fltr = fltr / total
for ii in range(input.shape[0] - 2):
output[ii] = input[ii] * fltr[0]
output[ii] += input[ii + 1] * fltr[1]
output[ii] += input[ii + 2] * fltr[2]
for type in self.types:
a = numpy.arange(12, dtype=type)
a.shape = (3, 4)
r1 = ndimage.correlate1d(a, weights / weights.sum(), 0,
origin=-1)
r2 = ndimage.generic_filter1d(a, _filter_func, 3,
axis=0, origin=-1, extra_arguments=(weights,),
extra_keywords={'total': weights.sum()})
assert_array_almost_equal(r1, r2)
def test_generic_filter01(self):
filter_ = numpy.array([[1.0, 2.0], [3.0, 4.0]])
footprint = numpy.array([[1, 0], [0, 1]])
cf = numpy.array([1., 4.])
def _filter_func(buffer, weights, total=1.0):
weights = cf / total
return (buffer * weights).sum()
for type in self.types:
a = numpy.arange(12, dtype=type)
a.shape = (3, 4)
r1 = ndimage.correlate(a, filter_ * footprint)
if type in self.float_types:
r1 /= 5
else:
r1 //= 5
r2 = ndimage.generic_filter(a, _filter_func,
footprint=footprint, extra_arguments=(cf,),
extra_keywords={'total': cf.sum()})
assert_array_almost_equal(r1, r2)
def test_extend01(self):
array = numpy.array([1, 2, 3])
weights = numpy.array([1, 0])
expected_values = [[1, 1, 2],
[3, 1, 2],
[1, 1, 2],
[2, 1, 2],
[0, 1, 2]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate1d(array, weights, 0,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend02(self):
array = numpy.array([1, 2, 3])
weights = numpy.array([1, 0, 0, 0, 0, 0, 0, 0])
expected_values = [[1, 1, 1],
[3, 1, 2],
[3, 3, 2],
[1, 2, 3],
[0, 0, 0]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate1d(array, weights, 0,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend03(self):
array = numpy.array([1, 2, 3])
weights = numpy.array([0, 0, 1])
expected_values = [[2, 3, 3],
[2, 3, 1],
[2, 3, 3],
[2, 3, 2],
[2, 3, 0]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate1d(array, weights, 0,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend04(self):
array = numpy.array([1, 2, 3])
weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
expected_values = [[3, 3, 3],
[2, 3, 1],
[2, 1, 1],
[1, 2, 3],
[0, 0, 0]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate1d(array, weights, 0,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend05(self):
array = numpy.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
weights = numpy.array([[1, 0], [0, 0]])
expected_values = [[[1, 1, 2], [1, 1, 2], [4, 4, 5]],
[[9, 7, 8], [3, 1, 2], [6, 4, 5]],
[[1, 1, 2], [1, 1, 2], [4, 4, 5]],
[[5, 4, 5], [2, 1, 2], [5, 4, 5]],
[[0, 0, 0], [0, 1, 2], [0, 4, 5]]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate(array, weights,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend06(self):
array = numpy.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
weights = numpy.array([[0, 0, 0], [0, 0, 0], [0, 0, 1]])
expected_values = [[[5, 6, 6], [8, 9, 9], [8, 9, 9]],
[[5, 6, 4], [8, 9, 7], [2, 3, 1]],
[[5, 6, 6], [8, 9, 9], [8, 9, 9]],
[[5, 6, 5], [8, 9, 8], [5, 6, 5]],
[[5, 6, 0], [8, 9, 0], [0, 0, 0]]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate(array, weights,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend07(self):
array = numpy.array([1, 2, 3])
weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
expected_values = [[3, 3, 3],
[2, 3, 1],
[2, 1, 1],
[1, 2, 3],
[0, 0, 0]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate(array, weights,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend08(self):
array = numpy.array([[1], [2], [3]])
weights = numpy.array([[0], [0], [0], [0], [0], [0], [0],
[0], [1]])
expected_values = [[[3], [3], [3]],
[[2], [3], [1]],
[[2], [1], [1]],
[[1], [2], [3]],
[[0], [0], [0]]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate(array, weights,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend09(self):
array = numpy.array([1, 2, 3])
weights = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 1])
expected_values = [[3, 3, 3],
[2, 3, 1],
[2, 1, 1],
[1, 2, 3],
[0, 0, 0]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate(array, weights,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_extend10(self):
array = numpy.array([[1], [2], [3]])
weights = numpy.array([[0], [0], [0], [0], [0], [0], [0],
[0], [1]])
expected_values = [[[3], [3], [3]],
[[2], [3], [1]],
[[2], [1], [1]],
[[1], [2], [3]],
[[0], [0], [0]]]
for mode, expected_value in zip(self.modes, expected_values):
output = ndimage.correlate(array, weights,
mode=mode, cval=0)
assert_array_equal(output, expected_value)
def test_boundaries(self):
def shift(x):
return (x[0] + 0.5,)
data = numpy.array([1, 2, 3, 4.])
expected = {'constant': [1.5, 2.5, 3.5, -1, -1, -1, -1],
'wrap': [1.5, 2.5, 3.5, 1.5, 2.5, 3.5, 1.5],
'mirror': [1.5, 2.5, 3.5, 3.5, 2.5, 1.5, 1.5],
'nearest': [1.5, 2.5, 3.5, 4, 4, 4, 4]}
for mode in expected:
assert_array_equal(expected[mode],
ndimage.geometric_transform(data, shift,
cval=-1, mode=mode,
output_shape=(7,),
order=1))
def test_boundaries2(self):
def shift(x):
return (x[0] - 0.9,)
data = numpy.array([1, 2, 3, 4])
expected = {'constant': [-1, 1, 2, 3],
'wrap': [3, 1, 2, 3],
'mirror': [2, 1, 2, 3],
'nearest': [1, 1, 2, 3]}
for mode in expected:
assert_array_equal(expected[mode],
ndimage.geometric_transform(data, shift,
cval=-1, mode=mode,
output_shape=(4,)))
def test_fourier_gaussian_real01(self):
for shape in [(32, 16), (31, 15)]:
for type in [numpy.float32, numpy.float64]:
a = numpy.zeros(shape, type)
a[0, 0] = 1.0
a = fft.rfft(a, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_gaussian(a, [5.0, 2.5],
shape[0], 0)
a = fft.ifft(a, shape[1], 1)
a = fft.irfft(a, shape[0], 0)
assert_almost_equal(ndimage.sum(a), 1)
def test_fourier_gaussian_complex01(self):
for shape in [(32, 16), (31, 15)]:
for type in [numpy.complex64, numpy.complex128]:
a = numpy.zeros(shape, type)
a[0, 0] = 1.0
a = fft.fft(a, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_gaussian(a, [5.0, 2.5], -1,
0)
a = fft.ifft(a, shape[1], 1)
a = fft.ifft(a, shape[0], 0)
assert_almost_equal(ndimage.sum(a.real), 1.0)
def test_fourier_uniform_real01(self):
for shape in [(32, 16), (31, 15)]:
for type in [numpy.float32, numpy.float64]:
a = numpy.zeros(shape, type)
a[0, 0] = 1.0
a = fft.rfft(a, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_uniform(a, [5.0, 2.5],
shape[0], 0)
a = fft.ifft(a, shape[1], 1)
a = fft.irfft(a, shape[0], 0)
assert_almost_equal(ndimage.sum(a), 1.0)
def test_fourier_uniform_complex01(self):
for shape in [(32, 16), (31, 15)]:
for type in [numpy.complex64, numpy.complex128]:
a = numpy.zeros(shape, type)
a[0, 0] = 1.0
a = fft.fft(a, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_uniform(a, [5.0, 2.5], -1, 0)
a = fft.ifft(a, shape[1], 1)
a = fft.ifft(a, shape[0], 0)
assert_almost_equal(ndimage.sum(a.real), 1.0)
def test_fourier_shift_real01(self):
for shape in [(32, 16), (31, 15)]:
for dtype in [numpy.float32, numpy.float64]:
expected = numpy.arange(shape[0] * shape[1], dtype=dtype)
expected.shape = shape
a = fft.rfft(expected, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_shift(a, [1, 1], shape[0], 0)
a = fft.ifft(a, shape[1], 1)
a = fft.irfft(a, shape[0], 0)
assert_array_almost_equal(a[1:, 1:], expected[:-1, :-1])
assert_array_almost_equal(a.imag, numpy.zeros(shape))
def test_fourier_shift_complex01(self):
for shape in [(32, 16), (31, 15)]:
for type in [numpy.complex64, numpy.complex128]:
expected = numpy.arange(shape[0] * shape[1],
dtype=type)
expected.shape = shape
a = fft.fft(expected, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_shift(a, [1, 1], -1, 0)
a = fft.ifft(a, shape[1], 1)
a = fft.ifft(a, shape[0], 0)
assert_array_almost_equal(a.real[1:, 1:], expected[:-1, :-1])
assert_array_almost_equal(a.imag, numpy.zeros(shape))
def test_fourier_ellipsoid_real01(self):
for shape in [(32, 16), (31, 15)]:
for type in [numpy.float32, numpy.float64]:
a = numpy.zeros(shape, type)
a[0, 0] = 1.0
a = fft.rfft(a, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_ellipsoid(a, [5.0, 2.5],
shape[0], 0)
a = fft.ifft(a, shape[1], 1)
a = fft.irfft(a, shape[0], 0)
assert_almost_equal(ndimage.sum(a), 1.0)
def test_fourier_ellipsoid_complex01(self):
for shape in [(32, 16), (31, 15)]:
for type in [numpy.complex64, numpy.complex128]:
a = numpy.zeros(shape, type)
a[0, 0] = 1.0
a = fft.fft(a, shape[0], 0)
a = fft.fft(a, shape[1], 1)
a = ndimage.fourier_ellipsoid(a, [5.0, 2.5], -1,
0)
a = fft.ifft(a, shape[1], 1)
a = fft.ifft(a, shape[0], 0)
assert_almost_equal(ndimage.sum(a.real), 1.0)
def test_spline01(self):
for type in self.types:
data = numpy.ones([], type)
for order in range(2, 6):
out = ndimage.spline_filter(data, order=order)
assert_array_almost_equal(out, 1)
def test_spline02(self):
for type in self.types:
data = numpy.array([1])
for order in range(2, 6):
out = ndimage.spline_filter(data, order=order)
assert_array_almost_equal(out, [1])
def test_spline03(self):
for type in self.types:
data = numpy.ones([], type)
for order in range(2, 6):
out = ndimage.spline_filter(data, order,
output=type)
assert_array_almost_equal(out, 1)
def test_spline04(self):
for type in self.types:
data = numpy.ones([4], type)
for order in range(2, 6):
out = ndimage.spline_filter(data, order)
assert_array_almost_equal(out, [1, 1, 1, 1])
def test_spline05(self):
for type in self.types:
data = numpy.ones([4, 4], type)
for order in range(2, 6):
out = ndimage.spline_filter(data, order=order)
assert_array_almost_equal(out, [[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]])
def test_geometric_transform01(self):
data = numpy.array([1])
def mapping(x):
return x
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
data.shape,
order=order)
assert_array_almost_equal(out, [1])
def test_geometric_transform01_with_output_parameter(self):
data = numpy.array([1])
def mapping(x):
return x
for order in range(0, 6):
out = numpy.empty_like(data)
ndimage.geometric_transform(data, mapping,
data.shape,
output=out)
assert_array_almost_equal(out, [1])
out = numpy.empty_like(data).astype(data.dtype.newbyteorder())
ndimage.geometric_transform(data, mapping,
data.shape,
output=out)
assert_array_almost_equal(out, [1])
def test_geometric_transform02(self):
data = numpy.ones([4])
def mapping(x):
return x
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
data.shape, order=order)
assert_array_almost_equal(out, [1, 1, 1, 1])
def test_geometric_transform03(self):
data = numpy.ones([4])
def mapping(x):
return (x[0] - 1,)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
data.shape, order=order)
assert_array_almost_equal(out, [0, 1, 1, 1])
def test_geometric_transform04(self):
data = numpy.array([4, 1, 3, 2])
def mapping(x):
return (x[0] - 1,)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
data.shape, order=order)
assert_array_almost_equal(out, [0, 4, 1, 3])
def test_geometric_transform05(self):
data = numpy.array([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]])
def mapping(x):
return (x[0], x[1] - 1)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
data.shape, order=order)
assert_array_almost_equal(out, [[0, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 1, 1]])
def test_geometric_transform06(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
def mapping(x):
return (x[0], x[1] - 1)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
data.shape, order=order)
assert_array_almost_equal(out, [[0, 4, 1, 3],
[0, 7, 6, 8],
[0, 3, 5, 3]])
def test_geometric_transform07(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
def mapping(x):
return (x[0] - 1, x[1])
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
data.shape, order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[4, 1, 3, 2],
[7, 6, 8, 5]])
def test_geometric_transform08(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
def mapping(x):
return (x[0] - 1, x[1] - 1)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
data.shape, order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
def test_geometric_transform10(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
def mapping(x):
return (x[0] - 1, x[1] - 1)
for order in range(0, 6):
if (order > 1):
filtered = ndimage.spline_filter(data,
order=order)
else:
filtered = data
out = ndimage.geometric_transform(filtered, mapping,
data.shape, order=order, prefilter=False)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
def test_geometric_transform13(self):
data = numpy.ones([2], numpy.float64)
def mapping(x):
return (x[0] // 2,)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
[4], order=order)
assert_array_almost_equal(out, [1, 1, 1, 1])
def test_geometric_transform14(self):
data = [1, 5, 2, 6, 3, 7, 4, 4]
def mapping(x):
return (2 * x[0],)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
[4], order=order)
assert_array_almost_equal(out, [1, 2, 3, 4])
def test_geometric_transform15(self):
data = [1, 2, 3, 4]
def mapping(x):
return (x[0] / 2,)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
[8], order=order)
assert_array_almost_equal(out[::2], [1, 2, 3, 4])
def test_geometric_transform16(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9.0, 10, 11, 12]]
def mapping(x):
return (x[0], x[1] * 2)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
(3, 2), order=order)
assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]])
def test_geometric_transform17(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (x[0] * 2, x[1])
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
(1, 4), order=order)
assert_array_almost_equal(out, [[1, 2, 3, 4]])
def test_geometric_transform18(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (x[0] * 2, x[1] * 2)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
(1, 2), order=order)
assert_array_almost_equal(out, [[1, 3]])
def test_geometric_transform19(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (x[0], x[1] / 2)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
(3, 8), order=order)
assert_array_almost_equal(out[..., ::2], data)
def test_geometric_transform20(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (x[0] / 2, x[1])
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
(6, 4), order=order)
assert_array_almost_equal(out[::2, ...], data)
def test_geometric_transform21(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (x[0] / 2, x[1] / 2)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
(6, 8), order=order)
assert_array_almost_equal(out[::2, ::2], data)
def test_geometric_transform22(self):
data = numpy.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], numpy.float64)
def mapping1(x):
return (x[0] / 2, x[1] / 2)
def mapping2(x):
return (x[0] * 2, x[1] * 2)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping1,
(6, 8), order=order)
out = ndimage.geometric_transform(out, mapping2,
(3, 4), order=order)
assert_array_almost_equal(out, data)
def test_geometric_transform23(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x):
return (1, x[0] * 2)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
(2,), order=order)
out = out.astype(numpy.int32)
assert_array_almost_equal(out, [5, 7])
def test_geometric_transform24(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
def mapping(x, a, b):
return (a, x[0] * b)
for order in range(0, 6):
out = ndimage.geometric_transform(data, mapping,
(2,), order=order, extra_arguments=(1,),
extra_keywords={'b': 2})
assert_array_almost_equal(out, [5, 7])
def test_map_coordinates01(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
idx = numpy.indices(data.shape)
idx -= 1
for order in range(0, 6):
out = ndimage.map_coordinates(data, idx, order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
def test_map_coordinates01_with_output_parameter(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
idx = numpy.indices(data.shape)
idx -= 1
expected = numpy.array([[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
for order in range(0, 6):
out = numpy.empty_like(expected)
ndimage.map_coordinates(data, idx, order=order, output=out)
assert_array_almost_equal(out, expected)
out = numpy.empty_like(expected).astype(
expected.dtype.newbyteorder())
ndimage.map_coordinates(data, idx, order=order, output=out)
assert_array_almost_equal(out, expected)
def test_map_coordinates02(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
idx = numpy.indices(data.shape, numpy.float64)
idx -= 0.5
for order in range(0, 6):
out1 = ndimage.shift(data, 0.5, order=order)
out2 = ndimage.map_coordinates(data, idx,
order=order)
assert_array_almost_equal(out1, out2)
def test_map_coordinates03(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]], order='F')
idx = numpy.indices(data.shape) - 1
out = ndimage.map_coordinates(data, idx)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
assert_array_almost_equal(out, ndimage.shift(data, (1, 1)))
idx = numpy.indices(data[::2].shape) - 1
out = ndimage.map_coordinates(data[::2], idx)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3]])
assert_array_almost_equal(out, ndimage.shift(data[::2], (1, 1)))
idx = numpy.indices(data[:, ::2].shape) - 1
out = ndimage.map_coordinates(data[:, ::2], idx)
assert_array_almost_equal(out, [[0, 0], [0, 4], [0, 7]])
assert_array_almost_equal(out, ndimage.shift(data[:, ::2], (1, 1)))
# do not run on 32 bit or windows (no sparse memory)
@dec.skipif('win32' in sys.platform or numpy.intp(0).itemsize < 8)
def test_map_coordinates_large_data(self):
# check crash on large data
try:
n = 30000
a = numpy.empty(n ** 2, dtype=numpy.float32).reshape(n, n)
# fill the part we might read
a[n - 3:, n - 3:] = 0
ndimage.map_coordinates(a, [[n - 1.5], [n - 1.5]], order=1)
except MemoryError:
raise SkipTest("Not enough memory available")
def test_affine_transform01(self):
data = numpy.array([1])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1]],
order=order)
assert_array_almost_equal(out, [1])
def test_affine_transform01_with_output_parameter(self):
data = numpy.array([1])
for order in range(0, 6):
out = numpy.empty_like(data)
ndimage.affine_transform(data, [[1]],
order=order,
output=out)
assert_array_almost_equal(out, [1])
out = numpy.empty_like(data).astype(data.dtype.newbyteorder())
ndimage.affine_transform(data, [[1]],
order=order,
output=out)
assert_array_almost_equal(out, [1])
def test_affine_transform02(self):
data = numpy.ones([4])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1]],
order=order)
assert_array_almost_equal(out, [1, 1, 1, 1])
def test_affine_transform03(self):
data = numpy.ones([4])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1]], -1,
order=order)
assert_array_almost_equal(out, [0, 1, 1, 1])
def test_affine_transform04(self):
data = numpy.array([4, 1, 3, 2])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1]], -1,
order=order)
assert_array_almost_equal(out, [0, 4, 1, 3])
def test_affine_transform05(self):
data = numpy.array([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1, 0],
[0, 1]],
[0, -1], order=order)
assert_array_almost_equal(out, [[0, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 1, 1]])
def test_affine_transform06(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1, 0],
[0, 1]],
[0, -1], order=order)
assert_array_almost_equal(out, [[0, 4, 1, 3],
[0, 7, 6, 8],
[0, 3, 5, 3]])
def test_affine_transform07(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1, 0],
[0, 1]],
[-1, 0], order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[4, 1, 3, 2],
[7, 6, 8, 5]])
def test_affine_transform08(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1, 0],
[0, 1]],
[-1, -1], order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
def test_affine_transform09(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
for order in range(0, 6):
if (order > 1):
filtered = ndimage.spline_filter(data,
order=order)
else:
filtered = data
out = ndimage.affine_transform(filtered, [[1, 0],
[0, 1]],
[-1, -1], order=order, prefilter=False)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
def test_affine_transform10(self):
data = numpy.ones([2], numpy.float64)
for order in range(0, 6):
out = ndimage.affine_transform(data, [[0.5]],
output_shape=(4,), order=order)
assert_array_almost_equal(out, [1, 1, 1, 0])
def test_affine_transform11(self):
data = [1, 5, 2, 6, 3, 7, 4, 4]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[2]], 0, (4,),
order=order)
assert_array_almost_equal(out, [1, 2, 3, 4])
def test_affine_transform12(self):
data = [1, 2, 3, 4]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[0.5]], 0,
(8,), order=order)
assert_array_almost_equal(out[::2], [1, 2, 3, 4])
def test_affine_transform13(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9.0, 10, 11, 12]]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1, 0],
[0, 2]], 0,
(3, 2), order=order)
assert_array_almost_equal(out, [[1, 3], [5, 7], [9, 11]])
def test_affine_transform14(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[2, 0],
[0, 1]], 0,
(1, 4), order=order)
assert_array_almost_equal(out, [[1, 2, 3, 4]])
def test_affine_transform15(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[2, 0],
[0, 2]], 0,
(1, 2), order=order)
assert_array_almost_equal(out, [[1, 3]])
def test_affine_transform16(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[1, 0.0],
[0, 0.5]], 0,
(3, 8), order=order)
assert_array_almost_equal(out[..., ::2], data)
def test_affine_transform17(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[0.5, 0],
[0, 1]], 0,
(6, 4), order=order)
assert_array_almost_equal(out[::2, ...], data)
def test_affine_transform18(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
for order in range(0, 6):
out = ndimage.affine_transform(data,
[[0.5, 0],
[0, 0.5]], 0,
(6, 8), order=order)
assert_array_almost_equal(out[::2, ::2], data)
def test_affine_transform19(self):
data = numpy.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], numpy.float64)
for order in range(0, 6):
out = ndimage.affine_transform(data,
[[0.5, 0],
[0, 0.5]], 0,
(6, 8), order=order)
out = ndimage.affine_transform(out,
[[2.0, 0],
[0, 2.0]], 0,
(3, 4), order=order)
assert_array_almost_equal(out, data)
def test_affine_transform20(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[0], [2]], 0,
(2,), order=order)
assert_array_almost_equal(out, [1, 3])
def test_affine_transform21(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
for order in range(0, 6):
out = ndimage.affine_transform(data, [[2], [0]], 0,
(2,), order=order)
assert_array_almost_equal(out, [1, 9])
def test_affine_transform22(self):
# shift and offset interaction; see issue #1547
data = numpy.array([4, 1, 3, 2])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[2]], [-1],
(3,), order=order)
assert_array_almost_equal(out, [0, 1, 2])
def test_affine_transform23(self):
# shift and offset interaction; see issue #1547
data = numpy.array([4, 1, 3, 2])
for order in range(0, 6):
out = ndimage.affine_transform(data, [[0.5]], [-1],
(8,), order=order)
assert_array_almost_equal(out[::2], [0, 4, 1, 3])
def test_affine_transform24(self):
# consistency between diagonal and non-diagonal case; see issue #1547
data = numpy.array([4, 1, 3, 2])
for order in range(0, 6):
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
out1 = ndimage.affine_transform(data, [2], -1,
order=order)
out2 = ndimage.affine_transform(data, [[2]], -1,
order=order)
assert_array_almost_equal(out1, out2)
def test_affine_transform25(self):
# consistency between diagonal and non-diagonal case; see issue #1547
data = numpy.array([4, 1, 3, 2])
for order in range(0, 6):
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
out1 = ndimage.affine_transform(data, [0.5], -1,
order=order)
out2 = ndimage.affine_transform(data, [[0.5]], -1,
order=order)
assert_array_almost_equal(out1, out2)
def test_shift01(self):
data = numpy.array([1])
for order in range(0, 6):
out = ndimage.shift(data, [1], order=order)
assert_array_almost_equal(out, [0])
def test_shift02(self):
data = numpy.ones([4])
for order in range(0, 6):
out = ndimage.shift(data, [1], order=order)
assert_array_almost_equal(out, [0, 1, 1, 1])
def test_shift03(self):
data = numpy.ones([4])
for order in range(0, 6):
out = ndimage.shift(data, -1, order=order)
assert_array_almost_equal(out, [1, 1, 1, 0])
def test_shift04(self):
data = numpy.array([4, 1, 3, 2])
for order in range(0, 6):
out = ndimage.shift(data, 1, order=order)
assert_array_almost_equal(out, [0, 4, 1, 3])
def test_shift05(self):
data = numpy.array([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]])
for order in range(0, 6):
out = ndimage.shift(data, [0, 1], order=order)
assert_array_almost_equal(out, [[0, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 1, 1]])
def test_shift06(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
for order in range(0, 6):
out = ndimage.shift(data, [0, 1], order=order)
assert_array_almost_equal(out, [[0, 4, 1, 3],
[0, 7, 6, 8],
[0, 3, 5, 3]])
def test_shift07(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
for order in range(0, 6):
out = ndimage.shift(data, [1, 0], order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[4, 1, 3, 2],
[7, 6, 8, 5]])
def test_shift08(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
for order in range(0, 6):
out = ndimage.shift(data, [1, 1], order=order)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
def test_shift09(self):
data = numpy.array([[4, 1, 3, 2],
[7, 6, 8, 5],
[3, 5, 3, 6]])
for order in range(0, 6):
if (order > 1):
filtered = ndimage.spline_filter(data,
order=order)
else:
filtered = data
out = ndimage.shift(filtered, [1, 1], order=order,
prefilter=False)
assert_array_almost_equal(out, [[0, 0, 0, 0],
[0, 4, 1, 3],
[0, 7, 6, 8]])
def test_zoom1(self):
for order in range(0, 6):
for z in [2, [2, 2]]:
arr = numpy.array(list(range(25))).reshape((5, 5)).astype(float)
arr = ndimage.zoom(arr, z, order=order)
assert_equal(arr.shape, (10, 10))
assert_(numpy.all(arr[-1, :] != 0))
assert_(numpy.all(arr[-1, :] >= (20 - eps)))
assert_(numpy.all(arr[0, :] <= (5 + eps)))
assert_(numpy.all(arr >= (0 - eps)))
assert_(numpy.all(arr <= (24 + eps)))
def test_zoom2(self):
arr = numpy.arange(12).reshape((3, 4))
out = ndimage.zoom(ndimage.zoom(arr, 2), 0.5)
assert_array_equal(out, arr)
def test_zoom3(self):
err = numpy.seterr(invalid='ignore')
arr = numpy.array([[1, 2]])
try:
out1 = ndimage.zoom(arr, (2, 1))
out2 = ndimage.zoom(arr, (1, 2))
finally:
numpy.seterr(**err)
assert_array_almost_equal(out1, numpy.array([[1, 2], [1, 2]]))
assert_array_almost_equal(out2, numpy.array([[1, 1, 2, 2]]))
def test_zoom_affine01(self):
data = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
for order in range(0, 6):
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
out = ndimage.affine_transform(data, [0.5, 0.5], 0,
(6, 8), order=order)
assert_array_almost_equal(out[::2, ::2], data)
def test_zoom_infinity(self):
# Ticket #1419 regression test
err = numpy.seterr(divide='ignore')
try:
dim = 8
ndimage.zoom(numpy.zeros((dim, dim)), 1. / dim, mode='nearest')
finally:
numpy.seterr(**err)
def test_zoom_zoomfactor_one(self):
# Ticket #1122 regression test
arr = numpy.zeros((1, 5, 5))
zoom = (1.0, 2.0, 2.0)
err = numpy.seterr(invalid='ignore')
try:
out = ndimage.zoom(arr, zoom, cval=7)
finally:
numpy.seterr(**err)
ref = numpy.zeros((1, 10, 10))
assert_array_almost_equal(out, ref)
def test_zoom_output_shape_roundoff(self):
arr = numpy.zeros((3, 11, 25))
zoom = (4.0 / 3, 15.0 / 11, 29.0 / 25)
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
out = ndimage.zoom(arr, zoom)
assert_array_equal(out.shape, (4, 15, 29))
def test_rotate01(self):
data = numpy.array([[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]], dtype=numpy.float64)
for order in range(0, 6):
out = ndimage.rotate(data, 0)
assert_array_almost_equal(out, data)
def test_rotate02(self):
data = numpy.array([[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0]], dtype=numpy.float64)
expected = numpy.array([[0, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]], dtype=numpy.float64)
for order in range(0, 6):
out = ndimage.rotate(data, 90)
assert_array_almost_equal(out, expected)
def test_rotate03(self):
data = numpy.array([[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]], dtype=numpy.float64)
expected = numpy.array([[0, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 0]], dtype=numpy.float64)
for order in range(0, 6):
out = ndimage.rotate(data, 90)
assert_array_almost_equal(out, expected)
def test_rotate04(self):
data = numpy.array([[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]], dtype=numpy.float64)
expected = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0]], dtype=numpy.float64)
for order in range(0, 6):
out = ndimage.rotate(data, 90, reshape=False)
assert_array_almost_equal(out, expected)
def test_rotate05(self):
data = numpy.empty((4, 3, 3))
for i in range(3):
data[:, :, i] = numpy.array([[0, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 0]], dtype=numpy.float64)
expected = numpy.array([[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]], dtype=numpy.float64)
for order in range(0, 6):
out = ndimage.rotate(data, 90)
for i in range(3):
assert_array_almost_equal(out[:, :, i], expected)
def test_rotate06(self):
data = numpy.empty((3, 4, 3))
for i in range(3):
data[:, :, i] = numpy.array([[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]], dtype=numpy.float64)
expected = numpy.array([[0, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 0]], dtype=numpy.float64)
for order in range(0, 6):
out = ndimage.rotate(data, 90)
for i in range(3):
assert_array_almost_equal(out[:, :, i], expected)
def test_rotate07(self):
data = numpy.array([[[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]]] * 2,
dtype=numpy.float64)
data = data.transpose()
expected = numpy.array([[[0, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]] * 2, dtype=numpy.float64)
expected = expected.transpose([2, 1, 0])
for order in range(0, 6):
out = ndimage.rotate(data, 90, axes=(0, 1))
assert_array_almost_equal(out, expected)
def test_rotate08(self):
data = numpy.array([[[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]]] * 2,
dtype=numpy.float64)
data = data.transpose()
expected = numpy.array([[[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0]]] * 2,
dtype=numpy.float64)
expected = expected.transpose()
for order in range(0, 6):
out = ndimage.rotate(data, 90, axes=(0, 1),
reshape=False)
assert_array_almost_equal(out, expected)
def test_watershed_ift01(self):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], numpy.uint8)
markers = numpy.array([[-1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]],
numpy.int8)
out = ndimage.watershed_ift(data, markers,
structure=[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
expected = [[-1, -1, -1, -1, -1, -1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift02(self):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], numpy.uint8)
markers = numpy.array([[-1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]],
numpy.int8)
out = ndimage.watershed_ift(data, markers)
expected = [[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, 1, 1, 1, -1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, -1, 1, 1, 1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift03(self):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]], numpy.uint8)
markers = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 0, 3, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1]],
numpy.int8)
out = ndimage.watershed_ift(data, markers)
expected = [[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, 2, -1, 3, -1, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, -1, 2, -1, 3, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift04(self):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]], numpy.uint8)
markers = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 2, 0, 3, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1]],
numpy.int8)
out = ndimage.watershed_ift(data, markers,
structure=[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
expected = [[-1, -1, -1, -1, -1, -1, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, 2, 2, 3, 3, 3, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift05(self):
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]], numpy.uint8)
markers = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 3, 0, 2, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1]],
numpy.int8)
out = ndimage.watershed_ift(data, markers,
structure=[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
expected = [[-1, -1, -1, -1, -1, -1, -1],
[-1, 3, 3, 2, 2, 2, -1],
[-1, 3, 3, 2, 2, 2, -1],
[-1, 3, 3, 2, 2, 2, -1],
[-1, 3, 3, 2, 2, 2, -1],
[-1, 3, 3, 2, 2, 2, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift06(self):
data = numpy.array([[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], numpy.uint8)
markers = numpy.array([[-1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]],
numpy.int8)
out = ndimage.watershed_ift(data, markers,
structure=[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
expected = [[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_watershed_ift07(self):
shape = (7, 6)
data = numpy.zeros(shape, dtype=numpy.uint8)
data = data.transpose()
data[...] = numpy.array([[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], numpy.uint8)
markers = numpy.array([[-1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]],
numpy.int8)
out = numpy.zeros(shape, dtype=numpy.int16)
out = out.transpose()
ndimage.watershed_ift(data, markers,
structure=[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
output=out)
expected = [[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, 1, 1, 1, 1, 1, -1],
[-1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1]]
assert_array_almost_equal(out, expected)
def test_distance_transform_bf01(self):
# brute force (bf) distance transform
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
out, ft = ndimage.distance_transform_bf(data, 'euclidean',
return_indices=True)
expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 2, 4, 2, 1, 0, 0],
[0, 0, 1, 4, 8, 4, 1, 0, 0],
[0, 0, 1, 2, 4, 2, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_array_almost_equal(out * out, expected)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 1, 2, 2, 2, 2],
[3, 3, 3, 2, 1, 2, 3, 3, 3],
[4, 4, 4, 4, 6, 4, 4, 4, 4],
[5, 5, 6, 6, 7, 6, 6, 5, 5],
[6, 6, 6, 7, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 1, 2, 4, 6, 7, 7, 8],
[0, 1, 1, 1, 6, 7, 7, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
def test_distance_transform_bf02(self):
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
out, ft = ndimage.distance_transform_bf(data, 'cityblock',
return_indices=True)
expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 2, 2, 2, 1, 0, 0],
[0, 0, 1, 2, 3, 2, 1, 0, 0],
[0, 0, 1, 2, 2, 2, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_array_almost_equal(out, expected)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 1, 2, 2, 2, 2],
[3, 3, 3, 3, 1, 3, 3, 3, 3],
[4, 4, 4, 4, 7, 4, 4, 4, 4],
[5, 5, 6, 7, 7, 7, 6, 5, 5],
[6, 6, 6, 7, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 1, 1, 4, 7, 7, 7, 8],
[0, 1, 1, 1, 4, 7, 7, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(expected, ft)
def test_distance_transform_bf03(self):
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
out, ft = ndimage.distance_transform_bf(data, 'chessboard',
return_indices=True)
expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 2, 1, 1, 0, 0],
[0, 0, 1, 2, 2, 2, 1, 0, 0],
[0, 0, 1, 1, 2, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_array_almost_equal(out, expected)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 1, 2, 2, 2, 2],
[3, 3, 4, 2, 2, 2, 4, 3, 3],
[4, 4, 5, 6, 6, 6, 5, 4, 4],
[5, 5, 6, 6, 7, 6, 6, 5, 5],
[6, 6, 6, 7, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 5, 6, 6, 7, 8],
[0, 1, 1, 2, 6, 6, 7, 7, 8],
[0, 1, 1, 2, 6, 7, 7, 7, 8],
[0, 1, 2, 2, 6, 6, 7, 7, 8],
[0, 1, 2, 4, 5, 6, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
def test_distance_transform_bf04(self):
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
tdt, tft = ndimage.distance_transform_bf(data,
return_indices=1)
dts = []
fts = []
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ndimage.distance_transform_bf(data, distances=dt)
dts.append(dt)
ft = ndimage.distance_transform_bf(data,
return_distances=False, return_indices=1)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_bf(data,
return_distances=False, return_indices=True, indices=ft)
fts.append(ft)
dt, ft = ndimage.distance_transform_bf(data,
return_indices=1)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ft = ndimage.distance_transform_bf(data, distances=dt,
return_indices=True)
dts.append(dt)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
dt = ndimage.distance_transform_bf(data,
return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_bf(data, distances=dt,
return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
for dt in dts:
assert_array_almost_equal(tdt, dt)
for ft in fts:
assert_array_almost_equal(tft, ft)
def test_distance_transform_bf05(self):
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
out, ft = ndimage.distance_transform_bf(data,
'euclidean', return_indices=True, sampling=[2, 2])
expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 4, 4, 4, 0, 0, 0],
[0, 0, 4, 8, 16, 8, 4, 0, 0],
[0, 0, 4, 16, 32, 16, 4, 0, 0],
[0, 0, 4, 8, 16, 8, 4, 0, 0],
[0, 0, 0, 4, 4, 4, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_array_almost_equal(out * out, expected)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 1, 2, 2, 2, 2],
[3, 3, 3, 2, 1, 2, 3, 3, 3],
[4, 4, 4, 4, 6, 4, 4, 4, 4],
[5, 5, 6, 6, 7, 6, 6, 5, 5],
[6, 6, 6, 7, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 1, 2, 4, 6, 7, 7, 8],
[0, 1, 1, 1, 6, 7, 7, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
def test_distance_transform_bf06(self):
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
out, ft = ndimage.distance_transform_bf(data,
'euclidean', return_indices=True, sampling=[2, 1])
expected = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 4, 1, 0, 0, 0],
[0, 0, 1, 4, 8, 4, 1, 0, 0],
[0, 0, 1, 4, 9, 4, 1, 0, 0],
[0, 0, 1, 4, 8, 4, 1, 0, 0],
[0, 0, 0, 1, 4, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_array_almost_equal(out * out, expected)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 2, 2, 2, 2, 2],
[3, 3, 3, 3, 2, 3, 3, 3, 3],
[4, 4, 4, 4, 4, 4, 4, 4, 4],
[5, 5, 5, 5, 6, 5, 5, 5, 5],
[6, 6, 6, 6, 7, 6, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 6, 6, 6, 7, 8],
[0, 1, 1, 1, 6, 7, 7, 7, 8],
[0, 1, 1, 1, 7, 7, 7, 7, 8],
[0, 1, 1, 1, 6, 7, 7, 7, 8],
[0, 1, 2, 2, 4, 6, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8]]]
assert_array_almost_equal(ft, expected)
def test_distance_transform_cdt01(self):
# chamfer type distance (cdt) transform
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
out, ft = ndimage.distance_transform_cdt(data,
'cityblock', return_indices=True)
bf = ndimage.distance_transform_bf(data, 'cityblock')
assert_array_almost_equal(bf, out)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 1, 1, 1, 2, 2, 2],
[3, 3, 2, 1, 1, 1, 2, 3, 3],
[4, 4, 4, 4, 1, 4, 4, 4, 4],
[5, 5, 5, 5, 7, 7, 6, 5, 5],
[6, 6, 6, 6, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 1, 1, 4, 7, 7, 7, 8],
[0, 1, 1, 1, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8], ]]
assert_array_almost_equal(ft, expected)
def test_distance_transform_cdt02(self):
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
out, ft = ndimage.distance_transform_cdt(data, 'chessboard',
return_indices=True)
bf = ndimage.distance_transform_bf(data, 'chessboard')
assert_array_almost_equal(bf, out)
expected = [[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 1, 1, 1, 2, 2, 2],
[3, 3, 2, 2, 1, 2, 2, 3, 3],
[4, 4, 3, 2, 2, 2, 3, 4, 4],
[5, 5, 4, 6, 7, 6, 4, 5, 5],
[6, 6, 6, 6, 7, 7, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7, 7],
[8, 8, 8, 8, 8, 8, 8, 8, 8]],
[[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 2, 3, 4, 6, 7, 8],
[0, 1, 1, 2, 2, 6, 6, 7, 8],
[0, 1, 1, 1, 2, 6, 7, 7, 8],
[0, 1, 1, 2, 6, 6, 7, 7, 8],
[0, 1, 2, 2, 5, 6, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
[0, 1, 2, 3, 4, 5, 6, 7, 8], ]]
assert_array_almost_equal(ft, expected)
def test_distance_transform_cdt03(self):
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
tdt, tft = ndimage.distance_transform_cdt(data,
return_indices=True)
dts = []
fts = []
dt = numpy.zeros(data.shape, dtype=numpy.int32)
ndimage.distance_transform_cdt(data, distances=dt)
dts.append(dt)
ft = ndimage.distance_transform_cdt(data,
return_distances=False, return_indices=True)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_cdt(data,
return_distances=False, return_indices=True, indices=ft)
fts.append(ft)
dt, ft = ndimage.distance_transform_cdt(data,
return_indices=True)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.int32)
ft = ndimage.distance_transform_cdt(data, distances=dt,
return_indices=True)
dts.append(dt)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
dt = ndimage.distance_transform_cdt(data,
return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.int32)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_cdt(data, distances=dt,
return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
for dt in dts:
assert_array_almost_equal(tdt, dt)
for ft in fts:
assert_array_almost_equal(tft, ft)
def test_distance_transform_edt01(self):
# euclidean distance transform (edt)
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
out, ft = ndimage.distance_transform_edt(data,
return_indices=True)
bf = ndimage.distance_transform_bf(data, 'euclidean')
assert_array_almost_equal(bf, out)
dt = ft - numpy.indices(ft.shape[1:], dtype=ft.dtype)
dt = dt.astype(numpy.float64)
numpy.multiply(dt, dt, dt)
dt = numpy.add.reduce(dt, axis=0)
numpy.sqrt(dt, dt)
assert_array_almost_equal(bf, dt)
def test_distance_transform_edt02(self):
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
tdt, tft = ndimage.distance_transform_edt(data,
return_indices=True)
dts = []
fts = []
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ndimage.distance_transform_edt(data, distances=dt)
dts.append(dt)
ft = ndimage.distance_transform_edt(data,
return_distances=0, return_indices=True)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_edt(data,
return_distances=False, return_indices=True, indices=ft)
fts.append(ft)
dt, ft = ndimage.distance_transform_edt(data,
return_indices=True)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ft = ndimage.distance_transform_edt(data, distances=dt,
return_indices=True)
dts.append(dt)
fts.append(ft)
ft = numpy.indices(data.shape, dtype=numpy.int32)
dt = ndimage.distance_transform_edt(data,
return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
dt = numpy.zeros(data.shape, dtype=numpy.float64)
ft = numpy.indices(data.shape, dtype=numpy.int32)
ndimage.distance_transform_edt(data, distances=dt,
return_indices=True, indices=ft)
dts.append(dt)
fts.append(ft)
for dt in dts:
assert_array_almost_equal(tdt, dt)
for ft in fts:
assert_array_almost_equal(tft, ft)
def test_distance_transform_edt03(self):
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
ref = ndimage.distance_transform_bf(data, 'euclidean',
sampling=[2, 2])
out = ndimage.distance_transform_edt(data,
sampling=[2, 2])
assert_array_almost_equal(ref, out)
def test_distance_transform_edt4(self):
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], type)
ref = ndimage.distance_transform_bf(data, 'euclidean',
sampling=[2, 1])
out = ndimage.distance_transform_edt(data,
sampling=[2, 1])
assert_array_almost_equal(ref, out)
def test_distance_transform_edt5(self):
# Ticket #954 regression test
out = ndimage.distance_transform_edt(False)
assert_array_almost_equal(out, [0.])
def test_generate_structure01(self):
struct = ndimage.generate_binary_structure(0, 1)
assert_array_almost_equal(struct, 1)
def test_generate_structure02(self):
struct = ndimage.generate_binary_structure(1, 1)
assert_array_almost_equal(struct, [1, 1, 1])
def test_generate_structure03(self):
struct = ndimage.generate_binary_structure(2, 1)
assert_array_almost_equal(struct, [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
def test_generate_structure04(self):
struct = ndimage.generate_binary_structure(2, 2)
assert_array_almost_equal(struct, [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
def test_iterate_structure01(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
out = ndimage.iterate_structure(struct, 2)
assert_array_almost_equal(out, [[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0]])
def test_iterate_structure02(self):
struct = [[0, 1],
[1, 1],
[0, 1]]
out = ndimage.iterate_structure(struct, 2)
assert_array_almost_equal(out, [[0, 0, 1],
[0, 1, 1],
[1, 1, 1],
[0, 1, 1],
[0, 0, 1]])
def test_iterate_structure03(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
out = ndimage.iterate_structure(struct, 2, 1)
expected = [[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0]]
assert_array_almost_equal(out[0], expected)
assert_equal(out[1], [2, 2])
def test_binary_erosion01(self):
for type in self.types:
data = numpy.ones([], type)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, 1)
def test_binary_erosion02(self):
for type in self.types:
data = numpy.ones([], type)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, 1)
def test_binary_erosion03(self):
for type in self.types:
data = numpy.ones([1], type)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [0])
def test_binary_erosion04(self):
for type in self.types:
data = numpy.ones([1], type)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [1])
def test_binary_erosion05(self):
for type in self.types:
data = numpy.ones([3], type)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [0, 1, 0])
def test_binary_erosion06(self):
for type in self.types:
data = numpy.ones([3], type)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [1, 1, 1])
def test_binary_erosion07(self):
for type in self.types:
data = numpy.ones([5], type)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [0, 1, 1, 1, 0])
def test_binary_erosion08(self):
for type in self.types:
data = numpy.ones([5], type)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [1, 1, 1, 1, 1])
def test_binary_erosion09(self):
for type in self.types:
data = numpy.ones([5], type)
data[2] = 0
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [0, 0, 0, 0, 0])
def test_binary_erosion10(self):
for type in self.types:
data = numpy.ones([5], type)
data[2] = 0
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [1, 0, 0, 0, 1])
def test_binary_erosion11(self):
for type in self.types:
data = numpy.ones([5], type)
data[2] = 0
struct = [1, 0, 1]
out = ndimage.binary_erosion(data, struct,
border_value=1)
assert_array_almost_equal(out, [1, 0, 1, 0, 1])
def test_binary_erosion12(self):
for type in self.types:
data = numpy.ones([5], type)
data[2] = 0
struct = [1, 0, 1]
out = ndimage.binary_erosion(data, struct,
border_value=1,
origin=-1)
assert_array_almost_equal(out, [0, 1, 0, 1, 1])
def test_binary_erosion13(self):
for type in self.types:
data = numpy.ones([5], type)
data[2] = 0
struct = [1, 0, 1]
out = ndimage.binary_erosion(data, struct,
border_value=1,
origin=1)
assert_array_almost_equal(out, [1, 1, 0, 1, 0])
def test_binary_erosion14(self):
for type in self.types:
data = numpy.ones([5], type)
data[2] = 0
struct = [1, 1]
out = ndimage.binary_erosion(data, struct,
border_value=1)
assert_array_almost_equal(out, [1, 1, 0, 0, 1])
def test_binary_erosion15(self):
for type in self.types:
data = numpy.ones([5], type)
data[2] = 0
struct = [1, 1]
out = ndimage.binary_erosion(data, struct,
border_value=1,
origin=-1)
assert_array_almost_equal(out, [1, 0, 0, 1, 1])
def test_binary_erosion16(self):
for type in self.types:
data = numpy.ones([1, 1], type)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [[1]])
def test_binary_erosion17(self):
for type in self.types:
data = numpy.ones([1, 1], type)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [[0]])
def test_binary_erosion18(self):
for type in self.types:
data = numpy.ones([1, 3], type)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [[0, 0, 0]])
def test_binary_erosion19(self):
for type in self.types:
data = numpy.ones([1, 3], type)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [[1, 1, 1]])
def test_binary_erosion20(self):
for type in self.types:
data = numpy.ones([3, 3], type)
out = ndimage.binary_erosion(data)
assert_array_almost_equal(out, [[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
def test_binary_erosion21(self):
for type in self.types:
data = numpy.ones([3, 3], type)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
def test_binary_erosion22(self):
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_erosion(data, border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_erosion23(self):
struct = ndimage.generate_binary_structure(2, 2)
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_erosion(data, struct,
border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_erosion24(self):
struct = [[0, 1],
[1, 1]]
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_erosion(data, struct,
border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_erosion25(self):
struct = [[0, 1, 0],
[1, 0, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 0, 1, 1],
[0, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_erosion(data, struct,
border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_erosion26(self):
struct = [[0, 1, 0],
[1, 0, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 0, 0, 1],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1]]
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 0, 1, 1],
[0, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_erosion(data, struct,
border_value=1, origin=(-1, -1))
assert_array_almost_equal(out, expected)
def test_binary_erosion27(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_erosion(data, struct,
border_value=1, iterations=2)
assert_array_almost_equal(out, expected)
def test_binary_erosion28(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=2, output=out)
assert_array_almost_equal(out, expected)
def test_binary_erosion29(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
out = ndimage.binary_erosion(data, struct,
border_value=1, iterations=3)
assert_array_almost_equal(out, expected)
def test_binary_erosion30(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=3, output=out)
assert_array_almost_equal(out, expected)
def test_binary_erosion31(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 1],
[0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=1, output=out, origin=(-1, -1))
assert_array_almost_equal(out, expected)
def test_binary_erosion32(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_erosion(data, struct,
border_value=1, iterations=2)
assert_array_almost_equal(out, expected)
def test_binary_erosion33(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
mask = [[1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1]]
data = numpy.array([[0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0, 1],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_erosion(data, struct,
border_value=1, mask=mask, iterations=-1)
assert_array_almost_equal(out, expected)
def test_binary_erosion34(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
mask = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_erosion(data, struct,
border_value=1, mask=mask)
assert_array_almost_equal(out, expected)
def test_binary_erosion35(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
mask = [[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]], bool)
tmp = [[0, 0, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 1],
[0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1]]
expected = numpy.logical_and(tmp, mask)
tmp = numpy.logical_and(data, numpy.logical_not(mask))
expected = numpy.logical_or(expected, tmp)
out = numpy.zeros(data.shape, bool)
ndimage.binary_erosion(data, struct, border_value=1,
iterations=1, output=out,
origin=(-1, -1), mask=mask)
assert_array_almost_equal(out, expected)
def test_binary_erosion36(self):
struct = [[0, 1, 0],
[1, 0, 1],
[0, 1, 0]]
mask = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
tmp = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 0, 0, 1],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 1, 1, 1, 0, 1, 1],
[0, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]])
expected = numpy.logical_and(tmp, mask)
tmp = numpy.logical_and(data, numpy.logical_not(mask))
expected = numpy.logical_or(expected, tmp)
out = ndimage.binary_erosion(data, struct, mask=mask,
border_value=1, origin=(-1, -1))
assert_array_almost_equal(out, expected)
def test_binary_dilation01(self):
for type in self.types:
data = numpy.ones([], type)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, 1)
def test_binary_dilation02(self):
for type in self.types:
data = numpy.zeros([], type)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, 0)
def test_binary_dilation03(self):
for type in self.types:
data = numpy.ones([1], type)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [1])
def test_binary_dilation04(self):
for type in self.types:
data = numpy.zeros([1], type)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [0])
def test_binary_dilation05(self):
for type in self.types:
data = numpy.ones([3], type)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [1, 1, 1])
def test_binary_dilation06(self):
for type in self.types:
data = numpy.zeros([3], type)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [0, 0, 0])
def test_binary_dilation07(self):
for type in self.types:
data = numpy.zeros([3], type)
data[1] = 1
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [1, 1, 1])
def test_binary_dilation08(self):
for type in self.types:
data = numpy.zeros([5], type)
data[1] = 1
data[3] = 1
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [1, 1, 1, 1, 1])
def test_binary_dilation09(self):
for type in self.types:
data = numpy.zeros([5], type)
data[1] = 1
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [1, 1, 1, 0, 0])
def test_binary_dilation10(self):
for type in self.types:
data = numpy.zeros([5], type)
data[1] = 1
out = ndimage.binary_dilation(data, origin=-1)
assert_array_almost_equal(out, [0, 1, 1, 1, 0])
def test_binary_dilation11(self):
for type in self.types:
data = numpy.zeros([5], type)
data[1] = 1
out = ndimage.binary_dilation(data, origin=1)
assert_array_almost_equal(out, [1, 1, 0, 0, 0])
def test_binary_dilation12(self):
for type in self.types:
data = numpy.zeros([5], type)
data[1] = 1
struct = [1, 0, 1]
out = ndimage.binary_dilation(data, struct)
assert_array_almost_equal(out, [1, 0, 1, 0, 0])
def test_binary_dilation13(self):
for type in self.types:
data = numpy.zeros([5], type)
data[1] = 1
struct = [1, 0, 1]
out = ndimage.binary_dilation(data, struct,
border_value=1)
assert_array_almost_equal(out, [1, 0, 1, 0, 1])
def test_binary_dilation14(self):
for type in self.types:
data = numpy.zeros([5], type)
data[1] = 1
struct = [1, 0, 1]
out = ndimage.binary_dilation(data, struct,
origin=-1)
assert_array_almost_equal(out, [0, 1, 0, 1, 0])
def test_binary_dilation15(self):
for type in self.types:
data = numpy.zeros([5], type)
data[1] = 1
struct = [1, 0, 1]
out = ndimage.binary_dilation(data, struct,
origin=-1, border_value=1)
assert_array_almost_equal(out, [1, 1, 0, 1, 0])
def test_binary_dilation16(self):
for type in self.types:
data = numpy.ones([1, 1], type)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [[1]])
def test_binary_dilation17(self):
for type in self.types:
data = numpy.zeros([1, 1], type)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [[0]])
def test_binary_dilation18(self):
for type in self.types:
data = numpy.ones([1, 3], type)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [[1, 1, 1]])
def test_binary_dilation19(self):
for type in self.types:
data = numpy.ones([3, 3], type)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
def test_binary_dilation20(self):
for type in self.types:
data = numpy.zeros([3, 3], type)
data[1, 1] = 1
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
def test_binary_dilation21(self):
struct = ndimage.generate_binary_structure(2, 2)
for type in self.types:
data = numpy.zeros([3, 3], type)
data[1, 1] = 1
out = ndimage.binary_dilation(data, struct)
assert_array_almost_equal(out, [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
def test_binary_dilation22(self):
expected = [[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_dilation(data)
assert_array_almost_equal(out, expected)
def test_binary_dilation23(self):
expected = [[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 0, 1],
[1, 1, 0, 0, 0, 1, 0, 1],
[1, 0, 0, 1, 1, 1, 1, 1],
[1, 0, 1, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 1, 0, 0, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1]]
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_dilation(data, border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_dilation24(self):
expected = [[1, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_dilation(data, origin=(1, 1))
assert_array_almost_equal(out, expected)
def test_binary_dilation25(self):
expected = [[1, 1, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 0, 0, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1]]
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_dilation(data, origin=(1, 1),
border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_dilation26(self):
struct = ndimage.generate_binary_structure(2, 2)
expected = [[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_dilation(data, struct)
assert_array_almost_equal(out, expected)
def test_binary_dilation27(self):
struct = [[0, 1],
[1, 1]]
expected = [[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_dilation(data, struct)
assert_array_almost_equal(out, expected)
def test_binary_dilation28(self):
expected = [[1, 1, 1, 1],
[1, 0, 0, 1],
[1, 0, 0, 1],
[1, 1, 1, 1]]
for type in self.types:
data = numpy.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], type)
out = ndimage.binary_dilation(data, border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_dilation29(self):
struct = [[0, 1],
[1, 1]]
expected = [[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]], bool)
out = ndimage.binary_dilation(data, struct,
iterations=2)
assert_array_almost_equal(out, expected)
def test_binary_dilation30(self):
struct = [[0, 1],
[1, 1]]
expected = [[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_dilation(data, struct, iterations=2,
output=out)
assert_array_almost_equal(out, expected)
def test_binary_dilation31(self):
struct = [[0, 1],
[1, 1]]
expected = [[0, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]], bool)
out = ndimage.binary_dilation(data, struct,
iterations=3)
assert_array_almost_equal(out, expected)
def test_binary_dilation32(self):
struct = [[0, 1],
[1, 1]]
expected = [[0, 0, 0, 1, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[0, 0, 0, 0, 0]]
data = numpy.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]], bool)
out = numpy.zeros(data.shape, bool)
ndimage.binary_dilation(data, struct, iterations=3,
output=out)
assert_array_almost_equal(out, expected)
def test_binary_dilation33(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_dilation(data, struct,
iterations=-1, mask=mask, border_value=0)
assert_array_almost_equal(out, expected)
def test_binary_dilation34(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.zeros(mask.shape, bool)
out = ndimage.binary_dilation(data, struct,
iterations=-1, mask=mask, border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_dilation35(self):
tmp = [[1, 1, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 0, 0, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1]]
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]])
mask = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
expected = numpy.logical_and(tmp, mask)
tmp = numpy.logical_and(data, numpy.logical_not(mask))
expected = numpy.logical_or(expected, tmp)
for type in self.types:
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_dilation(data, mask=mask,
origin=(1, 1), border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_propagation01(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_propagation(data, struct,
mask=mask, border_value=0)
assert_array_almost_equal(out, expected)
def test_binary_propagation02(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
mask = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.zeros(mask.shape, bool)
out = ndimage.binary_propagation(data, struct,
mask=mask, border_value=1)
assert_array_almost_equal(out, expected)
def test_binary_opening01(self):
expected = [[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 0, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_opening(data)
assert_array_almost_equal(out, expected)
def test_binary_opening02(self):
struct = ndimage.generate_binary_structure(2, 2)
expected = [[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_opening(data, struct)
assert_array_almost_equal(out, expected)
def test_binary_closing01(self):
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 0, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_closing(data)
assert_array_almost_equal(out, expected)
def test_binary_closing02(self):
struct = ndimage.generate_binary_structure(2, 2)
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_closing(data, struct)
assert_array_almost_equal(out, expected)
def test_binary_fill_holes01(self):
expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_fill_holes(data)
assert_array_almost_equal(out, expected)
def test_binary_fill_holes02(self):
expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_fill_holes(data)
assert_array_almost_equal(out, expected)
def test_binary_fill_holes03(self):
expected = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 1, 1, 1],
[0, 1, 1, 1, 0, 1, 1, 1],
[0, 1, 1, 1, 0, 1, 1, 1],
[0, 0, 1, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
data = numpy.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1],
[0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 1, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0]], bool)
out = ndimage.binary_fill_holes(data)
assert_array_almost_equal(out, expected)
def test_grey_erosion01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
output = ndimage.grey_erosion(array,
footprint=footprint)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 3, 1, 3, 1],
[5, 5, 3, 3, 1]], output)
def test_grey_erosion02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
output = ndimage.grey_erosion(array,
footprint=footprint, structure=structure)
assert_array_almost_equal([[2, 2, 1, 1, 1],
[2, 3, 1, 3, 1],
[5, 5, 3, 3, 1]], output)
def test_grey_erosion03(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[1, 1, 1], [1, 1, 1]]
output = ndimage.grey_erosion(array,
footprint=footprint, structure=structure)
assert_array_almost_equal([[1, 1, 0, 0, 0],
[1, 2, 0, 2, 0],
[4, 4, 2, 2, 0]], output)
def test_grey_dilation01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[0, 1, 1], [1, 0, 1]]
output = ndimage.grey_dilation(array,
footprint=footprint)
assert_array_almost_equal([[7, 7, 9, 9, 5],
[7, 9, 8, 9, 7],
[8, 8, 8, 7, 7]], output)
def test_grey_dilation02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[0, 1, 1], [1, 0, 1]]
structure = [[0, 0, 0], [0, 0, 0]]
output = ndimage.grey_dilation(array,
footprint=footprint, structure=structure)
assert_array_almost_equal([[7, 7, 9, 9, 5],
[7, 9, 8, 9, 7],
[8, 8, 8, 7, 7]], output)
def test_grey_dilation03(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[0, 1, 1], [1, 0, 1]]
structure = [[1, 1, 1], [1, 1, 1]]
output = ndimage.grey_dilation(array,
footprint=footprint, structure=structure)
assert_array_almost_equal([[8, 8, 10, 10, 6],
[8, 10, 9, 10, 8],
[9, 9, 9, 8, 8]], output)
def test_grey_opening01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
tmp = ndimage.grey_erosion(array, footprint=footprint)
expected = ndimage.grey_dilation(tmp, footprint=footprint)
output = ndimage.grey_opening(array,
footprint=footprint)
assert_array_almost_equal(expected, output)
def test_grey_opening02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
expected = ndimage.grey_dilation(tmp, footprint=footprint,
structure=structure)
output = ndimage.grey_opening(array,
footprint=footprint, structure=structure)
assert_array_almost_equal(expected, output)
def test_grey_closing01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
tmp = ndimage.grey_dilation(array, footprint=footprint)
expected = ndimage.grey_erosion(tmp, footprint=footprint)
output = ndimage.grey_closing(array,
footprint=footprint)
assert_array_almost_equal(expected, output)
def test_grey_closing02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_dilation(array, footprint=footprint,
structure=structure)
expected = ndimage.grey_erosion(tmp, footprint=footprint,
structure=structure)
output = ndimage.grey_closing(array,
footprint=footprint, structure=structure)
assert_array_almost_equal(expected, output)
def test_morphological_gradient01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp1 = ndimage.grey_dilation(array,
footprint=footprint, structure=structure)
tmp2 = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
expected = tmp1 - tmp2
output = numpy.zeros(array.shape, array.dtype)
ndimage.morphological_gradient(array,
footprint=footprint, structure=structure, output=output)
assert_array_almost_equal(expected, output)
def test_morphological_gradient02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp1 = ndimage.grey_dilation(array,
footprint=footprint, structure=structure)
tmp2 = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
expected = tmp1 - tmp2
output = ndimage.morphological_gradient(array,
footprint=footprint, structure=structure)
assert_array_almost_equal(expected, output)
def test_morphological_laplace01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp1 = ndimage.grey_dilation(array,
footprint=footprint, structure=structure)
tmp2 = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
expected = tmp1 + tmp2 - 2 * array
output = numpy.zeros(array.shape, array.dtype)
ndimage.morphological_laplace(array, footprint=footprint,
structure=structure, output=output)
assert_array_almost_equal(expected, output)
def test_morphological_laplace02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp1 = ndimage.grey_dilation(array,
footprint=footprint, structure=structure)
tmp2 = ndimage.grey_erosion(array, footprint=footprint,
structure=structure)
expected = tmp1 + tmp2 - 2 * array
output = ndimage.morphological_laplace(array,
footprint=footprint, structure=structure)
assert_array_almost_equal(expected, output)
def test_white_tophat01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_opening(array, footprint=footprint,
structure=structure)
expected = array - tmp
output = numpy.zeros(array.shape, array.dtype)
ndimage.white_tophat(array, footprint=footprint,
structure=structure, output=output)
assert_array_almost_equal(expected, output)
def test_white_tophat02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_opening(array, footprint=footprint,
structure=structure)
expected = array - tmp
output = ndimage.white_tophat(array, footprint=footprint,
structure=structure)
assert_array_almost_equal(expected, output)
def test_black_tophat01(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_closing(array, footprint=footprint,
structure=structure)
expected = tmp - array
output = numpy.zeros(array.shape, array.dtype)
ndimage.black_tophat(array, footprint=footprint,
structure=structure, output=output)
assert_array_almost_equal(expected, output)
def test_black_tophat02(self):
array = numpy.array([[3, 2, 5, 1, 4],
[7, 6, 9, 3, 5],
[5, 8, 3, 7, 1]])
footprint = [[1, 0, 1], [1, 1, 0]]
structure = [[0, 0, 0], [0, 0, 0]]
tmp = ndimage.grey_closing(array, footprint=footprint,
structure=structure)
expected = tmp - array
output = ndimage.black_tophat(array, footprint=footprint,
structure=structure)
assert_array_almost_equal(expected, output)
def test_hit_or_miss01(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 0, 1, 1],
[0, 0, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 1],
[0, 1, 1, 1, 1],
[0, 0, 0, 0, 0]], type)
out = numpy.zeros(data.shape, bool)
ndimage.binary_hit_or_miss(data, struct,
output=out)
assert_array_almost_equal(expected, out)
def test_hit_or_miss02(self):
struct = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
expected = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 1, 0, 0, 1, 1, 1, 0],
[1, 1, 1, 0, 0, 1, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_hit_or_miss(data, struct)
assert_array_almost_equal(expected, out)
def test_hit_or_miss03(self):
struct1 = [[0, 0, 0],
[1, 1, 1],
[0, 0, 0]]
struct2 = [[1, 1, 1],
[0, 0, 0],
[1, 1, 1]]
expected = [[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for type in self.types:
data = numpy.array([[0, 1, 0, 0, 1, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], type)
out = ndimage.binary_hit_or_miss(data, struct1,
struct2)
assert_array_almost_equal(expected, out)
class TestDilateFix:
def setUp(self):
# dilation related setup
self.array = numpy.array([[0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, ],
[0, 0, 0, 1, 0, ],
[0, 0, 1, 1, 0, ],
[0, 0, 0, 0, 0, ]], dtype=numpy.uint8)
self.sq3x3 = numpy.ones((3, 3))
dilated3x3 = ndimage.binary_dilation(self.array, structure=self.sq3x3)
self.dilated3x3 = dilated3x3.view(numpy.uint8)
def test_dilation_square_structure(self):
result = ndimage.grey_dilation(self.array, structure=self.sq3x3)
# +1 accounts for difference between grey and binary dilation
assert_array_almost_equal(result, self.dilated3x3 + 1)
def test_dilation_scalar_size(self):
result = ndimage.grey_dilation(self.array, size=3)
assert_array_almost_equal(result, self.dilated3x3)
if __name__ == "__main__":
run_module_suite()
| [
"quanpan302@hotmail.com"
] | quanpan302@hotmail.com |
9e2bc48ca7987ee41405b2bb7640150b642f890c | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/devhub/azure-mgmt-devhub/generated_samples/workflow_create_or_update_with_artifact_gen.py | ecfb1c28f4ed24b756066d1017b0909a8f5eae51 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 3,628 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.devhub import DevHubMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-devhub
# USAGE
python workflow_create_or_update_with_artifact_gen.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = DevHubMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="subscriptionId1",
)
response = client.workflow.create_or_update(
resource_group_name="resourceGroup1",
workflow_name="workflow1",
parameters={
"location": "location1",
"properties": {
"artifactGenerationProperties": {
"appName": "my-app",
"dockerfileGenerationMode": "enabled",
"dockerfileOutputDirectory": "./",
"generationLanguage": "javascript",
"imageName": "myimage",
"imageTag": "latest",
"languageVersion": "14",
"manifestGenerationMode": "enabled",
"manifestOutputDirectory": "./",
"manifestType": "kube",
"namespace": "my-namespace",
"port": "80",
},
"githubWorkflowProfile": {
"acr": {
"acrRegistryName": "registry1",
"acrRepositoryName": "repo1",
"acrResourceGroup": "resourceGroup1",
"acrSubscriptionId": "subscriptionId1",
},
"aksResourceId": "/subscriptions/subscriptionId1/resourcegroups/resourceGroup1/providers/Microsoft.ContainerService/managedClusters/cluster1",
"branchName": "branch1",
"deploymentProperties": {
"kubeManifestLocations": ["/src/manifests/"],
"manifestType": "kube",
"overrides": {"key1": "value1"},
},
"dockerBuildContext": "repo1/src/",
"dockerfile": "repo1/images/Dockerfile",
"oidcCredentials": {
"azureClientId": "12345678-3456-7890-5678-012345678901",
"azureTenantId": "66666666-3456-7890-5678-012345678901",
},
"repositoryName": "repo1",
"repositoryOwner": "owner1",
},
},
"tags": {"appname": "testApp"},
},
)
print(response)
# x-ms-original-file: specification/developerhub/resource-manager/Microsoft.DevHub/preview/2022-10-11-preview/examples/Workflow_CreateOrUpdate_WithArtifactGen.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
9e6f9df4f4afbe859d1c4cf2103cc13eeaa21eab | 1b5546e1fede94587fd2dabee0ef2695699cbab8 | /hyperengine/impl/tensorflow/tensorflow_solver.py | d60c70b21aa3a2b44987826f248958282ca9fb84 | [
"Apache-2.0"
] | permissive | 4thepoch/hyper-engine | 1998ba3f82d23eef732d278655ce1eb293478c20 | 5b73c5fd12ce0ca3f5038b41fd98bc52a1eccb34 | refs/heads/master | 2021-05-09T11:39:42.473874 | 2018-01-25T19:18:02 | 2018-01-25T19:18:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,304 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'maxim'
import tensorflow as tf
from hyperengine.model import BaseSolver
from tensorflow_model_io import TensorflowModelIO
from tensorflow_runner import TensorflowRunner
from tf_util import is_gpu_available
class TensorflowSolver(BaseSolver):
def __init__(self, data, model=None, hyper_params=None, augmentation=None, model_io=None, result_metric='max', **params):
if isinstance(model, TensorflowRunner):
runner = model
else:
runner = TensorflowRunner(model)
self._session = None
self._model_io = model_io if model_io is not None else TensorflowModelIO(**params)
self._save_accuracy_limit = params.get('save_accuracy_limit', 0)
params['eval_flexible'] = params.get('eval_flexible', True) and is_gpu_available()
super(TensorflowSolver, self).__init__(runner, data, hyper_params, augmentation, result_metric, **params)
def create_session(self):
self._session = tf.Session(graph=self._runner.graph())
return self._session
def init_session(self):
self._runner.init(session=self._session)
results = self._load(directory=self._model_io.load_dir, log_level=1)
return results.get('validation_accuracy', 0)
def terminate(self):
self._runner.terminate()
def on_best_accuracy(self, accuracy, eval_result):
if accuracy >= self._save_accuracy_limit:
self._model_io.save_results({'validation_accuracy': accuracy, 'model_size': self._runner.model_size()})
self._model_io.save_hyper_params(self._hyper_params)
self._model_io.save_session(self._session)
self._model_io.save_data(eval_result.get('data'))
def _evaluate_test(self):
# Load the best session if available before test evaluation
current_results = self._load(directory=self._model_io.save_dir, log_level=0)
eval_ = super(TensorflowSolver, self)._evaluate_test()
if not current_results:
return eval_
# Update the current results
current_results['test_accuracy'] = eval_.get('accuracy', 0)
self._model_io.save_results(current_results)
return eval_
def _load(self, directory, log_level):
self._model_io.load_session(self._session, directory, log_level)
results = self._model_io.load_results(directory, log_level)
return results or {}
| [
"Maxim.Podkolzine@jetbrains.com"
] | Maxim.Podkolzine@jetbrains.com |
3925aeff2a743c13635f13ac8542f6b8dbb46427 | ff60aaabe366ebd8f60b8e0e66a86896553f32a3 | /49.py | a639775083edc44ec4fc009a5395bdbfb396a086 | [] | no_license | ramyasutraye/Python-6 | cb2f55339f6b74dffe6f73551f3554703e17b673 | 1cc1c8a9f0045b72e1d55ef1bb3cf48d8df8612c | refs/heads/master | 2020-04-12T02:05:21.861754 | 2018-09-21T19:02:29 | 2018-09-21T19:02:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | a=int(input("enter first no"))
b=int(input("enter second no"))
print(a+b) | [
"noreply@github.com"
] | ramyasutraye.noreply@github.com |
cb518cf4285b1538c6a12b8b2b0d80ae2cf354a2 | fab7b6e422b74424fb59398635f74faca9ff5a58 | /waimak_extended_boundry/model_and_NSMC_build/targets/well_bulding_script.py | f2f0b033d08be8757829d48282d0a4eece8d3768 | [] | no_license | hansonmcoombs/Waimakariri-Model-Ashley-to-Selwyn | c7a56a2ebd0d421c9679cb4a16ae319dfb2041b1 | c96c2663b010975ec08d42840fbc7970f3c2b085 | refs/heads/master | 2023-05-29T10:57:33.916912 | 2020-04-23T21:32:21 | 2020-04-23T21:32:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | # -*- coding: utf-8 -*-
"""
Author: mattH
Date Created: 25/07/2017 11:13 AM
"""
import pandas as pd
import numpy as np
import flopy_mh as flopy
#this script was passed to brioch for inclusion in the pest optimisation process
# the influx wells are presently set to 1 m3/s so the muliplier can range between 0 and 5
well_data = pd.read_csv()# set path
muliplier = 1 # set muliplier
well_data.loc[well_data.type=='lr_boundry_flux','flux'] *= muliplier
g = well_data.groupby(['layer', 'row', 'col'])
outdata = g.aggregate({'flux': np.sum}).reset_index()
outdata = outdata.rename(columns={'layer': 'k', 'row': 'i', 'col': 'j'}).to_records(False)
outdata = outdata.astype(flopy.modflow.ModflowWel.get_default_dtype())
#write into file | [
"hansonmcoombs@gmail.com"
] | hansonmcoombs@gmail.com |
2d0866fa96eb9e8fcd6369fa07e38aeb1ef99bc5 | bd40b576e3c1527826f09286ef6fd186d807c5e5 | /Model/lookalike-model/lookalike_model/pipeline/main_trainready.py | 0fc3247e0e8584b72adc8231b10f27715646a4ea | [
"Apache-2.0"
] | permissive | jimmylao/blue-marlin | 7b48406a7b5486a97e51b30915e16be836f75eb2 | 0569bf19503286a9ba091e7f07bbbe1b204ae730 | refs/heads/master | 2021-12-03T09:24:41.536640 | 2021-07-21T00:59:06 | 2021-07-21T00:59:06 | 241,382,610 | 1 | 0 | Apache-2.0 | 2020-02-18T14:29:44 | 2020-02-18T14:29:43 | null | UTF-8 | Python | false | false | 10,512 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.html
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import argparse
import os
import timeit
from pyspark import SparkContext
from pyspark.sql import functions as fn
from pyspark.sql.functions import lit, col, udf, collect_list, concat_ws, first, create_map, monotonically_increasing_id, row_number
from pyspark.sql.window import Window
from pyspark.sql.types import IntegerType, ArrayType, StringType, LongType
from pyspark.sql import HiveContext
from datetime import datetime, timedelta
from util import write_to_table, write_to_table_with_partition, print_batching_info, resolve_placeholder, load_config, load_batch_config, load_df
from itertools import chain
MAX_USER_IN_BUCKET = 10**9
def date_to_timestamp(dt):
epoch = datetime.utcfromtimestamp(0)
return int((dt - epoch).total_seconds())
def generate_trainready(hive_context, batch_config,
interval_time_in_seconds,
logs_table_name, trainready_table, did_bucket_num):
def group_batched_logs(df_logs):
# group logs from did + interval_time + keyword.
# group 1: group by did + interval_starting_time + keyword
df = df_logs.groupBy('did', 'interval_starting_time', 'keyword_index').agg(
first('keyword').alias('keyword'),
first('age').alias('age'),
first('gender').alias('gender'),
first('did_bucket').alias('did_bucket'),
fn.sum(col('is_click')).alias('kw_clicks_count'),
fn.sum(fn.when(col('is_click') == 0, 1).otherwise(0)).alias('kw_shows_count'),
)
df = df.withColumn('kwi_clicks_count', concat_ws(":", col('keyword_index'), col('kw_clicks_count')))
df = df.withColumn('kwi_shows_count', concat_ws(":", col('keyword_index'), col('kw_shows_count')))
df = df.withColumn('kw_clicks_count', concat_ws(":", col('keyword'), col('kw_clicks_count')))
df = df.withColumn('kw_shows_count', concat_ws(":", col('keyword'), col('kw_shows_count')))
# group 2: group by did + interval_starting_time
df = df.groupBy('did', 'interval_starting_time').agg(
concat_ws(",", collect_list('keyword_index')).alias('kwi'),
concat_ws(",", collect_list('kwi_clicks_count')).alias('kwi_click_counts'),
concat_ws(",", collect_list('kwi_shows_count')).alias('kwi_show_counts'),
concat_ws(",", collect_list('keyword')).alias('interval_keywords'),
concat_ws(",", collect_list('kw_clicks_count')).alias('kw_click_counts'),
concat_ws(",", collect_list('kw_shows_count')).alias('kw_show_counts'),
first('age').alias('age'),
first('gender').alias('gender'),
first('did_bucket').alias('did_bucket')
)
return df
def collect_trainready(df_trainready_batched_temp):
# group 3: group by did with the temp batched did-interval rows.
df = df_trainready_batched_temp
features = ['interval_starting_time', 'interval_keywords', 'kwi', 'kwi_click_counts', 'kwi_show_counts']
agg_attr_list = list(chain(*[(lit(attr), col(attr)) for attr in df.columns if attr in features]))
df = df.withColumn('attr_map', create_map(agg_attr_list))
df = df.groupBy('did').agg(
collect_list('attr_map').alias('attr_map_list'),
first('age').alias('age'),
first('gender').alias('gender'),
first('did_bucket').alias('did_bucket')
)
return df
def build_feature_array(df):
'''
df['attr_map_list']=
[{u'kwi': u'14', u'interval_starting_time': u'1576713600', u'kwi_show_counts': u'14:2', u'kwi_click_counts': u'14:0', u'interval_keywords': u'info'},
{u'kwi': u'14,29', u'interval_starting_time': u'1576886400', u'kwi_show_counts': u'14:2,29:4', u'kwi_click_counts': u'14:0,29:0', u'interval_keywords': u'info,video'},
{u'kwi': u'14', u'interval_starting_time': u'1576800000', u'kwi_show_counts': u'14:4', u'kwi_click_counts': u'14:0', u'interval_keywords': u'info'}],
'''
def udf_function(attr_map_list):
tmp_list = []
for _dict in attr_map_list:
tmp_list.append((_dict['interval_starting_time'], _dict))
tmp_list.sort(reverse=True, key=lambda x: x[0])
interval_starting_time = []
interval_keywords = []
kwi = []
kwi_show_counts = []
kwi_click_counts = []
for time, _dict in tmp_list:
interval_starting_time.append(str(time))
interval_keywords.append(_dict['interval_keywords'])
kwi.append(_dict['kwi'])
kwi_show_counts.append(_dict['kwi_show_counts'])
kwi_click_counts.append(_dict['kwi_click_counts'])
return [interval_starting_time, interval_keywords, kwi, kwi_show_counts, kwi_click_counts]
df = df.withColumn('metrics_list', udf(udf_function, ArrayType(ArrayType(StringType())))(col('attr_map_list')))
return df
trainready_table_temp = trainready_table + '_temp'
timer_start = timeit.default_timer()
'''
1. Find the intervals per user did.
2. Agg on time and kewords so that we have one record be user for each interval.
e.g.
interval = day
unique users per day = 100m
number of records per interval = 100m
'''
start_date, end_date, load_minutes = batch_config
starting_time = datetime.strptime(start_date, "%Y-%m-%d")
ending_time = datetime.strptime(end_date, "%Y-%m-%d")
all_intervals = set()
st = date_to_timestamp(starting_time)
et = date_to_timestamp(ending_time)
x = st
while x < et:
interval_point = x - x % interval_time_in_seconds
all_intervals.add(interval_point)
x += interval_time_in_seconds
all_intervals = list(all_intervals)
all_intervals.sort()
batched_round = 1
for did_bucket in range(did_bucket_num):
for interval_point in all_intervals:
'''
We need the days since we have days partitions.
'''
day_lower = datetime.fromtimestamp(interval_point).strftime("%Y-%m-%d")
day_upper = datetime.fromtimestamp(interval_point+interval_time_in_seconds).strftime("%Y-%m-%d")
command = """SELECT *
FROM {}
WHERE
day >= '{}' AND day <= '{}' AND
interval_starting_time = '{}' AND
did_bucket= '{}' """
df_logs = hive_context.sql(command.format(logs_table_name, day_lower, day_upper, interval_point, did_bucket))
df_trainready = group_batched_logs(df_logs)
mode = 'overwrite' if batched_round == 1 else 'append'
write_to_table_with_partition(df_trainready, trainready_table_temp, partition=('did_bucket'), mode=mode)
batched_round += 1
'''
Now we need to agg for one user over all days to create the whole record.
e.g.
For
100 days
100M unique users per day
10 User buckets
We need cluster that can fit 1000M=1G records.
If not possible we need to increase user bucket number.
'''
trainready_table_temp
batched_round = 1
for did_bucket in range(did_bucket_num):
command = """SELECT *
FROM {}
WHERE
did_bucket= '{}' """
df = hive_context.sql(command.format(trainready_table_temp, did_bucket))
df = collect_trainready(df)
df = build_feature_array(df)
'''
at this point df is like below
[Row(age=6, gender=0, did=u'773e03d2bc89d49c0c9c60270ee650e555abdf32cf5305c9fe27f081e1e64d91', metrics_list=[[u'1576800000'], [u'25'], [u'25:1'], [u'25:0']], did_bucket=u'0')]
'''
for i, feature_name in enumerate(['interval_starting_time', 'interval_keywords', 'kwi', 'kwi_show_counts', 'kwi_click_counts']):
df = df.withColumn(feature_name, col('metrics_list').getItem(i))
# Add did_index
w = Window.orderBy("did_bucket", "did")
df = df.withColumn('row_number', row_number().over(w))
df = df.withColumn('did_index', udf(lambda x: did_bucket*(MAX_USER_IN_BUCKET) + x, LongType())(col('row_number')))
df = df.select('age', 'gender', 'did', 'did_index', 'interval_starting_time', 'interval_keywords',
'kwi', 'kwi_show_counts', 'kwi_click_counts', 'did_bucket')
mode = 'overwrite' if batched_round == 1 else 'append'
write_to_table_with_partition(df, trainready_table, partition=('did_bucket'), mode=mode)
batched_round += 1
return
def run(hive_context, cfg):
cfg_logs = cfg['pipeline']['main_logs']
cfg_clean = cfg['pipeline']['main_clean']
logs_table_name = cfg_logs['logs_output_table_name']
interval_time_in_seconds = cfg_logs['interval_time_in_seconds']
cfg_train = cfg['pipeline']['main_trainready']
trainready_table = cfg_train['trainready_output_table']
did_bucket_num = cfg_clean['did_bucket_num']
batch_config = load_batch_config(cfg)
generate_trainready(hive_context, batch_config, interval_time_in_seconds, logs_table_name, trainready_table, did_bucket_num)
if __name__ == "__main__":
"""
This program performs the followings:
adds normalized data by adding index of features
groups data into time_intervals and dids (labeled by did)
"""
sc, hive_context, cfg = load_config(description="pre-processing train ready data")
resolve_placeholder(cfg)
run(hive_context=hive_context, cfg=cfg)
sc.stop()
| [
"reza_adib@yahoo.com"
] | reza_adib@yahoo.com |
5054c56725b8cb769beb33ac7ddb17a6a1e7bf3d | 7d8f344949755664850cf50c070e0442fecc3e89 | /glynt/apps/graph/tests/__init__.py | dc2587af598ddf5ea373f85898672e9821bed266 | [] | no_license | rosscdh/glynt | 7ac24c74c3ddeee2f6af584f54f548c662a13f18 | a6a083704f300ed2f9fa4cd4e666d07199f52d1e | refs/heads/master | 2020-04-15T00:33:04.930549 | 2016-06-14T08:37:30 | 2016-06-14T08:37:30 | 4,831,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | from test_linkedin_connections_service import *
from test_linkedin_profile_service import *
| [
"ross@lawpal.com"
] | ross@lawpal.com |
781488f21981ce3a35f5823fd69ba77f85484509 | f82e67dd5f496d9e6d42b4fad4fb92b6bfb7bf3e | /scripts/client/gui/dialogsinterface.py | 01e537573c7b9639810015d345977ad9df740966 | [] | no_license | webiumsk/WOT0.10.0 | 4e4413ed4e7b00e22fb85d25fdae9400cbb4e76b | a84f536c73f86d9e8fab559e97f88f99f2ad7e95 | refs/heads/master | 2021-01-09T21:55:00.662437 | 2015-10-23T20:46:45 | 2015-10-23T20:46:45 | 44,835,654 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,750 | py | # Embedded file name: scripts/client/gui/DialogsInterface.py
from gui.Scaleform.Waiting import Waiting
from gui.battle_control import g_sessionProvider
from gui.shared import events, g_eventBus
from gui.shared.utils.decorators import dialog
from gui.shared.utils.functions import showInformationDialog, showConfirmDialog
from gui.Scaleform.daapi.view.dialogs import I18nInfoDialogMeta, I18nConfirmDialogMeta, DisconnectMeta
@dialog
def showDialog(meta, callback):
g_eventBus.handleEvent(events.ShowDialogEvent(meta, callback))
@dialog
def showI18nInfoDialog(i18nKey, callback, meta = None):
if g_sessionProvider.isBattleUILoaded():
customMsg = None
if meta is not None:
customMsg.getMessage()
showInformationDialog(i18nKey, callback, customMessage=customMsg, ns='battle')
else:
showDialog(I18nInfoDialogMeta(i18nKey, meta=meta), callback)
return
@dialog
def showI18nConfirmDialog(i18nKey, callback, meta = None, focusedID = None):
if g_sessionProvider.isBattleUILoaded():
customMsg = None
if meta is not None:
customMsg.getMessage()
showConfirmDialog(i18nKey, callback, customMessage=customMsg, ns='battle')
else:
showDialog(I18nConfirmDialogMeta(i18nKey, meta=meta, focusedID=focusedID), callback)
return
__ifDisconnectDialogShown = False
def showDisconnect(reason = None, isBan = False, expiryTime = None):
global __ifDisconnectDialogShown
if __ifDisconnectDialogShown:
return
Waiting.close()
def callback(_):
global __ifDisconnectDialogShown
__ifDisconnectDialogShown = False
__ifDisconnectDialogShown = True
showDialog(DisconnectMeta(reason, isBan, expiryTime), callback)
| [
"info@webium.sk"
] | info@webium.sk |
35356795d538348ce21e6b7b7d750bc012feb21d | 29da2ca6def1270be13a3096685a8e5d82828dff | /CIM14/IEC61970/Dynamics/TurbineGovernors/TurbineGovernor.py | 8395e78690c1c031b3d3c2f1fa248e1347a8fed4 | [
"MIT"
] | permissive | rimbendhaou/PyCIM | 75eb3bcd3729b2410c03f3d5c66d6f1e05e21df3 | d578bb0bf1af344342bd23344385ed9c06c2d0ee | refs/heads/master | 2022-04-28T01:16:12.673867 | 2020-04-16T02:19:09 | 2020-04-16T02:19:09 | 256,085,381 | 0 | 0 | MIT | 2020-04-16T02:15:20 | 2020-04-16T02:08:14 | null | UTF-8 | Python | false | false | 1,608 | py | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.Core.PowerSystemResource import PowerSystemResource
class TurbineGovernor(PowerSystemResource):
"""The turbine-governor determines the mechanical power (Pm) supplied to the generator model
"""
def __init__(self, *args, **kw_args):
"""Initialises a new 'TurbineGovernor' instance.
"""
super(TurbineGovernor, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = []
_many_refs = []
| [
"rwl@thinker.cable.virginmedia.net"
] | rwl@thinker.cable.virginmedia.net |
599294b469224e38ff049081aca36eb3ce3d2e2f | bc441bb06b8948288f110af63feda4e798f30225 | /pipeline_sdk/model/resource_manage/filter_condition_pb2.py | 8ca58da50e2eaaea09cd76d058b1eaa644f6a04f | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 4,173 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: filter_condition.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pipeline_sdk.model.resource_manage import filter_data_source_pb2 as pipeline__sdk_dot_model_dot_resource__manage_dot_filter__data__source__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='filter_condition.proto',
package='resource_manage',
syntax='proto3',
serialized_options=_b('ZIgo.easyops.local/contracts/protorepo-models/easyops/model/resource_manage'),
serialized_pb=_b('\n\x16\x66ilter_condition.proto\x12\x0fresource_manage\x1a;pipeline_sdk/model/resource_manage/filter_data_source.proto\"\x93\x01\n\x0f\x46ilterCondition\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x63ompare\x18\x02 \x01(\t\x12/\n\x04left\x18\x03 \x01(\x0b\x32!.resource_manage.FilterDataSource\x12\x30\n\x05right\x18\x04 \x01(\x0b\x32!.resource_manage.FilterDataSourceBKZIgo.easyops.local/contracts/protorepo-models/easyops/model/resource_manageb\x06proto3')
,
dependencies=[pipeline__sdk_dot_model_dot_resource__manage_dot_filter__data__source__pb2.DESCRIPTOR,])
_FILTERCONDITION = _descriptor.Descriptor(
name='FilterCondition',
full_name='resource_manage.FilterCondition',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='resource_manage.FilterCondition.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='compare', full_name='resource_manage.FilterCondition.compare', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='left', full_name='resource_manage.FilterCondition.left', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='right', full_name='resource_manage.FilterCondition.right', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=105,
serialized_end=252,
)
_FILTERCONDITION.fields_by_name['left'].message_type = pipeline__sdk_dot_model_dot_resource__manage_dot_filter__data__source__pb2._FILTERDATASOURCE
_FILTERCONDITION.fields_by_name['right'].message_type = pipeline__sdk_dot_model_dot_resource__manage_dot_filter__data__source__pb2._FILTERDATASOURCE
DESCRIPTOR.message_types_by_name['FilterCondition'] = _FILTERCONDITION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
FilterCondition = _reflection.GeneratedProtocolMessageType('FilterCondition', (_message.Message,), {
'DESCRIPTOR' : _FILTERCONDITION,
'__module__' : 'filter_condition_pb2'
# @@protoc_insertion_point(class_scope:resource_manage.FilterCondition)
})
_sym_db.RegisterMessage(FilterCondition)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"service@easyops.cn"
] | service@easyops.cn |
1b6353b6528b8a595913455a59dab726fa53fa19 | 95eed88115075f7e1916a14de7497d05a12a9330 | /abc106d.py | f7b6d1f202fe22e2ff2f13477e4221a8bc71c642 | [] | no_license | ynagi2/atcoder | bdbbd030f1dd39e937b0872b028ce0f38372521e | e404f4500d837bfd6ca473aa2837f46ae71ad84a | refs/heads/master | 2022-04-29T12:48:44.229462 | 2022-04-22T15:04:50 | 2022-04-22T15:04:50 | 241,098,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | # pypyにする
def main():
n, m, Q = map(int, input().split())
lr = [[0]*(n+1) for _ in range(n+1)]
for _ in range(m):
l, r = map(int, input().split())
lr[l][r] += 1
p = []
for _ in range(Q):
_list = list(map(int, input().split()))
p.append(_list)
sums = []
for l in lr:
csum = [0]*(n+2)
for i in range(n):
# lrは0~nで始めているので,今回はl[i+1]で足す
csum[i+1] = csum[i] + l[i+1]
sums.append(csum)
for e in p:
ans = 0
l, r = e[0], e[1]
# 与えられた区間内での計算
for c in sums[l:r+1]:
ans += (c[r] - c[l-1])
print (ans)
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | ynagi2.noreply@github.com |
b0df1d3b85cb9522c935158901ae2bddeaecc037 | a2e1ade428b26fd9009d1ab9e6594f06755ec5a4 | /src/saas/bkuser_shell/categories/serializers.py | 7fce74839512a4fc3584e570c807f34b56a1f0c0 | [
"MIT"
] | permissive | luyouli/bk-user | f285c5ee415cfc8769727b16e3d75ecce3b469d5 | 8ea590958a5c6dd3c71d0b72e1d4866ce327efda | refs/heads/master | 2023-08-07T20:58:36.429072 | 2021-08-24T07:02:32 | 2021-08-24T07:02:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,716 | py | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from bkuser_shell.bkiam.serializers import AuthInfoSLZ
from bkuser_shell.categories.constants import CategoryStatus
from django.utils.translation import ugettext_lazy as _
from rest_framework.serializers import (
BooleanField,
CharField,
ChoiceField,
DateTimeField,
FileField,
IntegerField,
JSONField,
ListField,
Serializer,
SerializerMethodField,
)
class ExtraInfoSLZ(Serializer):
auth_infos = ListField(read_only=True, child=AuthInfoSLZ())
callback_url = CharField(read_only=True)
class CategoryMetaSLZ(Serializer):
"""用户目录基本信息"""
type = CharField(read_only=True)
description = CharField(read_only=True)
name = CharField(read_only=True)
authorized = BooleanField(read_only=True)
extra_info = ExtraInfoSLZ(read_only=True)
class DetailCategorySerializer(Serializer):
id = IntegerField(required=False)
domain = CharField()
display_name = CharField()
default = BooleanField()
enabled = BooleanField()
type = CharField()
description = CharField()
create_time = DateTimeField()
update_time = DateTimeField()
last_synced_time = DateTimeField()
unfilled_namespaces = JSONField()
configured = BooleanField()
activated = SerializerMethodField()
def get_activated(self, obj) -> bool:
if isinstance(obj, dict):
return obj["status"] == CategoryStatus.NORMAL.value
else:
return getattr(obj, "status") == CategoryStatus.NORMAL.value
class CreateCategorySerializer(Serializer):
domain = CharField(max_length=64, label=_("登陆域"))
display_name = CharField(max_length=64, label=_("目录名"))
activated = BooleanField(default=True)
type = ChoiceField(default="local", choices=["mad", "ldap", "local"])
class UpdateCategorySerializer(Serializer):
display_name = CharField(max_length=64, required=False)
activated = BooleanField(default=True, required=False)
description = CharField(required=False)
class ListCategorySerializer(Serializer):
only_enable = BooleanField(default=False)
class CategorySyncSerializer(Serializer):
file = FileField(required=False)
class CategoryTestConnectionSerializer(Serializer):
connection_url = CharField(required=False)
user = CharField(required=False)
password = CharField(required=False)
timeout_setting = IntegerField(required=False, default=120)
use_ssl = BooleanField(default=False, required=False)
class CategoryTestFetchDataSerializer(Serializer):
basic_pull_node = CharField(required=False)
user_filter = CharField(required=False)
organization_class = CharField(required=False)
user_group_filter = CharField(required=False)
class CategoryExportSerializer(Serializer):
department_ids = CharField()
def to_representation(self, instance):
data = super().to_representation(instance)
data["department_ids"] = data["department_ids"].split(",")
return data
| [
"bluesedenyu@gmail.com"
] | bluesedenyu@gmail.com |
a9b4edb83b779b911c53957fae262072b357d724 | c426f269e8f7598d78b0a3bcc5629bfe447d12f6 | /4_Interactive Programming Python (RICE)/MiniProjects/MiniProject3_StopWatch/MiniProject3_v02_user12_DNqZOhd30O_8.py | 322dcf356f777ff0c51ef0d4d02f84a38c735015 | [] | no_license | DmitryVakhrushev/Python | 8d05d083f63822622f43ea5d873b98ef4e8cfd15 | 9dd2f37bcdce25a5cc0146adb4513ed2e539b650 | refs/heads/master | 2022-11-13T18:39:44.131820 | 2020-06-28T02:29:17 | 2020-06-28T02:29:17 | 243,847,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,336 | py | # template for "Stopwatch: The Game"
import simplegui
import time
# define global variables
curTime = 0
# define helper function format that converts time
# in tenths of seconds into formatted string A:BC.D
def format(t):
minutes = str(t // 600)
if ((t - (t//600)*600) // 10) < 10:
seconds = "0" + str((t - (t//600)*600) // 10)
else:
seconds = str((t - (t//600)*600) // 10)
millis = str(t%10)
conv = minutes + ":" + seconds + "." + millis
return conv
# define event handlers for buttons; "Start", "Stop", "Reset"
def startBtn():
tm.start()
def stopBtn():
tm.stop()
def resetBtn():
tm.stop()
global curTime
curTime = 0
# define event handler for timer with 0.1 sec interval
def tick():
global curTime
curTime +=1
# define draw handler
def drawTime(canvas):
global curTime
canvas.draw_text(format(curTime), (80, 120), 60, "White")
# create frame
f = simplegui.create_frame("Stopwatch", 300, 200)
# register event handlers
tm = simplegui.create_timer(100, tick)
f.set_draw_handler(drawTime)
start = f.add_button("Start", startBtn, 150)
stop = f.add_button("Stop", stopBtn, 150)
reset = f.add_button("Reset", resetBtn, 150)
# start frame
f.start()
tm.start()
# Please remember to review the grading rubric
| [
"dm.vakhrushev@gmail.com"
] | dm.vakhrushev@gmail.com |
145f4d44a7d58355228c02ba21b610bc4e85e637 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02619/s875095596.py | 54f8a5836b379689a3036484bd7fc2bf077aa557 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | import sys
def input():
return sys.stdin.readline()[:-1]
d = int(input())
c = list(map(int, input().split()))
C = sum(c)
s = [list(map(int, input().split())) for _ in range(d)]
t = [int(input()) for _ in range(d)]
def score(t):
res = 0
minus = 0
last = [0 for _ in range(26)]
for i, x in enumerate(t):
minus += C - c[x-1] * (i - last[x-1] + 1)
res -= minus
res += s[i][x-1]
last[x-1] = i+1
print(res)
return
score(t) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
fbd1236ee2905d9e213d9ee829032acb53361778 | 20b4be7df5efeb8019356659c5d054f29f450aa1 | /API/gdax/__init__.py | 152d1b6d1e91f266693e8c11ceadc560286aec43 | [
"Apache-2.0",
"MIT"
] | permissive | kumars99/TradzQAI | 75c4138e30796573d67a5f08d9674c1488feb8e4 | 1551321642b6749d9cf26caf2e822051a105b1a5 | refs/heads/master | 2020-03-29T20:14:45.562143 | 2018-09-25T16:07:21 | 2018-09-25T16:07:21 | 150,302,554 | 1 | 0 | null | 2018-09-25T17:17:54 | 2018-09-25T17:17:54 | null | UTF-8 | Python | false | false | 174 | py | from .authenticated_client import AuthenticatedClient
from .public_client import PublicClient
from .websocket_client import WebsocketClient
from .order_book import OrderBook
| [
"awakeproduction@hotmail.fr"
] | awakeproduction@hotmail.fr |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.