blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a2f63a96b80b0d24c88cb051e4b93ed7a5134671
|
2dcf0d5cc921745bd34610162e540632066dd919
|
/library/framesize.py
|
200b812e69468c2e02fe0fd9dd27b4a7a38d54b7
|
[] |
no_license
|
ms412/pyIxia
|
0aac92cfe2239853e5e815db23816252b1eb6997
|
17913d810e859fb776882f203bea4135aec72b36
|
refs/heads/master
| 2021-06-25T14:53:45.102058
| 2017-09-11T15:09:06
| 2017-09-11T15:09:06
| 103,150,462
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,825
|
py
|
import time
from library.ixiaIf import TclClient
from tools.logger import Logger
class Framesize(object):
def __init__(self, *args):
self._api = TclClient()
self._log = Logger()
self._portlist=[]
self._tclportlist =''
for item in args:
self._portlist.append([self._api.chassisID(),item[0],item[1]])
for item in self._portlist:
self._tclportlist = (self._tclportlist + '[list %d %d %d] ' % (item[0], item[1], item[2]))
self._tclportlist=('[list %s]'%(self._tclportlist))
def __del__(self):
self.disconnect()
def createGroup(self):
self._api.call('set group 12')
self._api.call('portGroup create $group')
for _port in self._portlist:
self._api.call('portGroup add $group %d %d %d' % (_port[0], _port[1], _port[2]))
# self._api.call('port setFactoryDefaults %d %d %d' % (_port[0], _port[1], _port[2]))
self._api.call('portGroup write $group')
self._api.call('portGroup setCommand $group resetStatistics')
# self._api.call('portGroup write $group')
time.sleep(2)
def port(self,mode):
print('port config',self._portlist)
# self._api.call('set portlist %s'%(self._TclPortList()))
# if self._api.call_rc('ixTakeOwnership portlist force') != 0:
if self._api.call_rc('ixTakeOwnership %s force'%(self._tclportlist)) != 0:
print('EXIT')
exit()
for _port in self._portlist:
if '1Gbe-opt' in mode:
print('config prot',_port)
self._api.call('port setDefault')
#optisch
self._api.call('port setPhyMode 1 %d %d %d'% (_port[0], _port[1], _port[2]))
self._api.call('port config -speed 1000')
self._api.call('port config -advertise100FullDuplex false')
self._api.call('port config -advertise100HalfDuplex false')
self._api.call('port config -advertise10FullDuplex false')
self._api.call('port config -advertise10HalfDuplex false')
self._api.call('port config -advertise1000FullDuplex true')
self._api.call('port config -speed 1000')
self._api.call('port set %d %d %d' % (_port[0], _port[1], _port[2]))
elif '1Gbe-el'in mode:
self._api.call('port setDefault')
# electrical
self._api.call('port setPhyMode 0 %d %d %d' % (_port[0], _port[1], _port[2]))
self._api.call('port config -speed 1000')
self._api.call('port config -advertise100FullDuplex false')
self._api.call('port config -advertise100HalfDuplex false')
self._api.call('port config -advertise10FullDuplex false')
self._api.call('port config -advertise10HalfDuplex false')
self._api.call('port config -advertise1000FullDuplex true')
self._api.call('port config -speed 1000')
self._api.call('port set %d %d %d' % (_port[0], _port[1], _port[2]))
else:
print('nothing')
def stat(self):
for _port in self._portlist:
self._api.call('stat setDefault')
if self._api.call_rc('stat set %d %d %d' % (_port[0], _port[1], _port[2])) != 0:
exit()
# self._api.call('stat write %d %d %d' % (_port[0], _port[1], _port[2]))
def felxibleTimestamp(self):
for _port in self._portlist:
self._api.call('flexibleTimestamp setDefault')
self._api.call('flexibleTimestamp set %d %d %d' % (_port[0], _port[1], _port[2]))
def filter(self):
for _port in self._portlist:
self._api.call('filter setDefault')
self._api.call('filter config -captureTriggerFrameSizeFrom 12')
self._api.call('filter config -captureTriggerFrameSizeTo 12')
self._api.call('filter config -captureFilterFrameSizeFrom 12')
self._api.call('filter config -captureFilterFrameSizeTo 12')
self._api.call('filter setDefault')
self._api.call('filter set %d %d %d' % (_port[0], _port[1], _port[2]))
def filterPallette(self):
for _port in self._portlist:
self._api.call('filterPallette setDefault')
self._api.call('filterPallette set %d %d %d' % (_port[0], _port[1], _port[2]))
def capture(self):
for _port in self._portlist:
self._api.call('capture setDefault')
self._api.call('capture set %d %d %d' % (_port[0], _port[1], _port[2]))
def interfaceTable(self):
# for _port in self._portlist:
self._api.call('interfaceTable setDefault')
self._api.call('interfaceTable write')
self._api.call('interfaceTable write')
self._api.call('interfaceTable clearAllInterfaces')
self._api.call('interfaceTable write')
def protocolServer(self):
for _port in self._portlist:
self._api.call('protocolServer setDefault')
self._api.call('protocolServer set %d %d %d' % (_port[0], _port[1], _port[2]))
def stream(self,framesize):
self._api.call('stream setDefault')
self._api.call('stream config -name %s'% 'TestStream')
self._api.call('stream config -framesize %d'% int(framesize))
self._api.call('stream config -ifg 96.0')
# self._api.call('stream config -ifgMIN 952.0')
#self._api.call('stream config -ifgMAX 1016.0')
# self._api.call('stream config -ibg 96.0')
self._api.call('stream config -percentPacketRate 100.0')
self._api.call('stream config -enableTimestamp true')
self._api.call('stream config -patternType patternTypeRandom')
self._api.call('stream config -dataPattern allOnes')
self._api.call('stream config -pattern "FF FF"')
self._api.call('stream config -frameType "FF FF"')
self._api.call('stream config -dma stopStream')
self._api.call('stream config -numFrames 1000')
#required for lartency
# self._api.call('stream config -fir true')
for _port in self._portlist:
self._api.call('stream set %d %d %d %d'%(_port[0], _port[1], _port[2],1))
def pauseFrame(self):
self._api.call('stream setDefault')
# self._api.call('stream config -name %s'% 'PauseStream')
self._api.call('protocol setDefault')
self._api.call('protocol config -name PauseStream')
self._api.call('protocol config -ethernetType ethernetII')
self._api.call('pauseControl setDefault')
self._api.call('pauseControl config -da {01 80 C2 00 00 01}')
self._api.call('pauseControl config -pauseTime 128')
for _port in self._portlist:
self._api.call('pauseControl set %d %d %d'%(_port[0], _port[1], _port[2]))
for _port in self._portlist:
self._api.call('stream set %d %d %d %d'%(_port[0], _port[1], _port[2],1))
def protocol(self):
self._api.call('protocol setDefault')
def packetGroup(self):
self._api.call('packetGroup setDefault')
self._api.call('packetGroup config -groupId 1')
self._api.call('packetGroup config -groupOffset 16')
self._api.call('packetGroup config -sequenceNumberOffset 28')
self._api.call('packetGroup config -insertSequenceSignature true')
for _port in self._portlist:
self._api.call('packetGroup setTx %d %d %d %d'%(_port[0], _port[1], _port[2],1))
def dataInegrity(self):
self._api.call('dataInegrity setDefault')
self._api.call('dataIntegrity config -signatureOffset 12')
self._api.call('dataIntegrity config -signature "08 71 18 00"')
def result(self):
_result = {}
for _port in self._portlist:
_str_port = (str(_port[0])+str(_port[1])+str(_port[2]))
print(_str_port)
_result[_str_port] = {}
for _port in self._portlist:
self._api.call_rc('capture get %d %d %d' % (_port[0],_port[1], _port[2]))
self._api.call('capture cget -nPackets')
for _port in self._portlist:
self._api.call_rc('captureBuffer get %d %d %d' % (_port[0],_port[1],_port[2]))
self._api.call_rc('captureBuffer getStatistics')
print('Port %s Latency: %d' % (str(_port), int(self._api.call('captureBuffer cget -averageLatency')[0])))
for _port in self._portlist:
self._api.call('stat get statAllStats %d %d %d'% (_port[0], _port[1], _port[2]))
# print('Port %s LinkState: %d'% (str(_port), int(self._api.call('stat cget -link')[0])))
# print('Port %s txFrames: %d'% (str(_port), int(self._api.call('stat cget -framesSent')[0])))
# print('Port %s rxFrames: %d'% (str(_port), int(self._api.call('stat cget -framesReceived')[0])))
# print('Port %s txBytes: %d'% (str(_port), int(self._api.call('stat cget -bytesSent')[0])))
# print('Port %s rxBytes: %d'% (str(_port), int(self._api.call('stat cget -bytesReceived')[0])))
# print('Port %s Line Rate: %d'% (str(_port), int(self._api.call('stat cget -lineSpeed')[0])))
# _str_port = (str(_port[0]) + '-' + str(_port[1]) + '-' + str(_port[2]))
_testResult = {}
_testResult['txFrame'] = int(self._api.call('stat cget -framesSent')[0])
_testResult['rxFrame'] = int(self._api.call('stat cget -framesReceived')[0])
_testResult['txBytes'] = int(self._api.call('stat cget -bytesSent')[0])
_testResult['rxBytes'] = int(self._api.call('stat cget -bytesReceived')[0])
_str_port = (str(_port[0]) + str(_port[1]) + str(_port[2]))
_result[_str_port] = _testResult
# _testResult['PORT'] = _port
# _resultList.append(_testResult)
# print('RESULT',_result)
return _result
def framesizeTest(self,sizelist):
_framesizeTest = {}
self._api.call('set portList %s' % (self._tclportlist))
self.createGroup()
self.port('1Gbe-opt')
#self.pauseFrame()
# _result = {}
for framesize in sizelist:
self.stat()
self.felxibleTimestamp()
self.filter()
self.capture()
self.filterPallette()
self.interfaceTable()
self.protocolServer()
self.stream(framesize)
if self._api.call_rc('ixWriteConfigToHardware portList') != 0:
exit()
time.sleep(10)
if self._api.call_rc('ixCheckLinkState portList') != 0:
exit()
if self._api.call_rc('ixStartCapture portList') != 0:
exit()
if self._api.call_rc('ixStartTransmit portList') != 0:
exit()
time.sleep(10)
if self._api.call_rc('ixStopCapture portList') != 0:
exit()
if self._api.call_rc('ixStopTransmit portList') != 0:
exit()
# _resultList = self.result()
_framesizeTest[framesize] = self.result()
# for item in _resultList:
# print(item)
# _port = item.get('PORT')
# _str_port = (str(_port[0]) + '-' + str(_port[1]) + '-' + str(_port[2]))
# print(_str_port)
# _framesizeTest[_str_port]['FRAMESIZE'][framesize] = _str_port
# print(_framesizeTest)
# _testresult = self.result()
# print('TESTRESULT', _testresult)
return _framesizeTest
def disconnect(self):
if self._api.call_rc('ixClearOwnership %s' % (self._tclportlist)) != 0:
exit()
|
[
"m.schiesser@gmail.com"
] |
m.schiesser@gmail.com
|
188e8503cdd257dd7cab3babad6f8510a254137d
|
633944f913050debf0764c2a29cf3e88f912670e
|
/v8/depot_tools/bootstrap-3.8.0b1.chromium.1_bin/python3/lib/python3.8/email/mime/base.py
|
132e6913d660b6b7b332267152dbc43c4eddb1af
|
[
"BSD-3-Clause",
"bzip2-1.0.6",
"SunPro",
"Apache-2.0"
] |
permissive
|
bopopescu/V8-lgtm
|
0474c2ff39baf754f556ef57619ceae93e7320fd
|
da307e2f7abfca5fa0e860a809de6cd07fd1b72b
|
refs/heads/master
| 2022-02-16T19:10:54.008520
| 2019-09-25T07:51:13
| 2019-09-25T07:51:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 69
|
py
|
../../../../../.cipd/pkgs/2/_current/lib/python3.8/email/mime/base.py
|
[
"jundong.xjd@antfin.com"
] |
jundong.xjd@antfin.com
|
921643a8d9159a227ee264036f1b48ce70509743
|
c295b7f65e424822c3909634e34ce97c64932a8e
|
/bab 2/server_udp.py
|
d691a69a64fc39f45922d460bc1b0e99c11eb95e
|
[] |
no_license
|
imamsantosa/Jaringan_Komputer
|
e67345c0d6ce012694296ba74cf49ab5fccbccd8
|
8b60176710aafaacca93a0defde8c7c03d71b233
|
refs/heads/master
| 2021-01-10T09:42:41.244174
| 2016-03-14T17:46:56
| 2016-03-14T17:46:56
| 53,878,301
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
from socket import *
serverPort = 12000
serverSocket = socket(AF_INET, SOCK_DGRAM)
serverSocket.bind(('', serverPort))
print "The server is ready to receive"
while 1:
message, clientAddress = serverSocket.recvfrom(2048)
modifiedMessage = message.upper()
serverSocket.sendto(modifiedMessage, clientAddress)
|
[
"imamsantosa123@gmail.com"
] |
imamsantosa123@gmail.com
|
fbb1e388256a6d52fa2526a9bb88fff6647c0df9
|
e2b5849d011596687672d41b2446df2c6c65ee79
|
/ex049.py
|
a8dd4c42f7da9ab8c9de77489c9fe88f68cef447
|
[] |
no_license
|
maroberto/CursoPython
|
ebdbe4b98f6246bdaafc7874ec06acf92a3887f6
|
e79af46242cb46d1189f882b0835cb9140099c4e
|
refs/heads/master
| 2023-03-28T08:14:18.878202
| 2021-04-04T16:26:34
| 2021-04-04T16:26:34
| 330,283,797
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
print(20 * '__')
numero = int(input('Digite um número: '))
for i in range(1, 31):
resultado = (numero * i)
print('{} x {} = {}'.format(numero, i, resultado))
|
[
"63175966+maroberto@users.noreply.github.com"
] |
63175966+maroberto@users.noreply.github.com
|
cd9815be7c9cc8ccdc4c8d46f182389f7124895a
|
0f6581b105ea7eb4b99dbff131378340a634e7ac
|
/pages/select_mall_page.py
|
a47f0ce03ea8ce69435593430a96ed74a92a928e
|
[] |
no_license
|
langdawang678/Se2PO
|
ded5e9f97a329f39a6de8ffaebe92330eb598eff
|
96d7eb6b4e1774b06b2fd9a4781f9bee7d8f5ed6
|
refs/heads/master
| 2023-03-25T10:44:23.140843
| 2021-03-23T09:41:39
| 2021-03-23T09:41:39
| 346,628,227
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from locations.goods_lib_locations import GoodsLibLocations
from common.base_page import BasePage
class SelectMallPage(BasePage):
# 退出元素是否存在
def get_elements_exists(self):
try:
WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(GoodsLibLocations.exit_link))
except:
return False
else:
return True
|
[
"langdawang678@sina.com"
] |
langdawang678@sina.com
|
02d981070765586c6c3e8bb8d57555ee7b1bed74
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/repvgg/src/configs/parser.py
|
ae7dbe646e625e1fa38e245603b0503abce2f6d8
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,155
|
py
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""parser function"""
USABLE_TYPES = set([float, int])
def trim_preceding_hyphens(st):
i = 0
while st[i] == "-":
i += 1
return st[i:]
def arg_to_varname(st: str):
st = trim_preceding_hyphens(st)
st = st.replace("-", "_")
return st.split("=")[0]
def argv_to_vars(argv):
var_names = []
for arg in argv:
if arg.startswith("-") and arg_to_varname(arg) != "config":
var_names.append(arg_to_varname(arg))
return var_names
|
[
"977180923@qq.com"
] |
977180923@qq.com
|
e1b448acf3b730cb600a2828622a2b86bc3e47d9
|
c9f4de7bf63df23325b477d3375a1bfb99865059
|
/main_a3.py
|
2a0057fd589f5aa522859a2167872c1f9d5d7b8e
|
[] |
no_license
|
EliHill/TextAnalysis
|
440a15dca3f467ab5d79a234582a9ca3b4c7ab10
|
44b05bd1995290bbbd7972a1f8953aa5e75be37e
|
refs/heads/master
| 2020-09-30T16:24:18.911419
| 2019-12-06T19:52:17
| 2019-12-06T19:52:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,750
|
py
|
"""main_a3.py
"""
import re
import os
import math
import nltk
from nltk.corpus import brown
from nltk.corpus import wordnet as wn
from nltk.corpus import PlaintextCorpusReader
from fsa import FSA
# NLTK stoplist with 3136 words (multilingual)
STOPLIST = set(nltk.corpus.stopwords.words())
# Vocabulary with 234,377 English words from NLTK
ENGLISH_VOCABULARY = set(w.lower() for w in nltk.corpus.words.words())
# The five categories from Brown that we are using
BROWN_CATEGORIES = ('adventure', 'fiction', 'government', 'humor', 'news')
# Global place to store Brown vocabularies so you calculate them only once
BROWN_VOCABULARIES = None
def is_content_word(word):
"""A content word is not on the stoplist and its first character is a letter."""
return word.lower() not in STOPLIST and word[0].isalpha()
class Text(object):
def __init__(self, path, name=None):
"""Takes a file path, which is assumed to point to a file or a directory,
extracts and stores the raw text and also stores an instance of nltk.text.Text."""
self.name = name
if os.path.isfile(path):
self.raw = open(path).read()
elif os.path.isdir(path):
corpus = PlaintextCorpusReader(path, '.*.mrg')
self.raw = corpus.raw()
self.text = nltk.text.Text( nltk.word_tokenize(self.raw))
def __len__(self):
return len(self.text)
def __getitem__(self, i):
return self.text[i]
def __str__(self):
name = '' if self.name is None else " '%s'" % self.name
return "<Text%s tokens=%s>" % (name, len(self))
def token_count(self):
"""Just return the length of the text."""
return len(self)
def type_count(self):
"""Returns the type count, with minimal normalization by lower casing."""
# an alternative would be to use the method nltk.text.Text.vocab()
return len(set([w.lower() for w in self.text]))
def sentence_count(self):
"""Return number of sentences, using the simplistic measure of counting period,
exclamation marks and question marks."""
# could also use nltk.sent.tokenize on self.raw
return len([t for t in self.text if t in '.!?'])
def most_frequent_content_words(self):
"""Return a list with the 25 most frequent content words and their
frequencies. The list has (word, frequency) pairs and is ordered
on the frequency."""
dist = nltk.FreqDist([w for w in self.text if is_content_word(w.lower())])
return dist.most_common(n=25)
def most_frequent_bigrams(self, n=25):
"""Return a list with the 25 most frequent bigrams that only contain
content words. The list returned should have pairs where the first
element in the pair is the bigram and the second the frequency, as in
((word1, word2), frequency), these should be ordered on frequency."""
filtered_bigrams = [b for b in list(nltk.bigrams(self.text))
if is_content_word(b[0]) and is_content_word(b[1])]
dist = nltk.FreqDist([b for b in filtered_bigrams])
return dist.most_common(n=n)
def concordance(self, word):
self.text.concordance(word)
## new methods for search part of assignment 3
def search(self, pattern):
return re.finditer(pattern, self.raw)
def find_sirs(self):
answer = set()
for match in self.search(r"\bSir \S+\b"):
answer.add(match.group())
return sorted(answer)
def find_brackets(self):
answer = set()
# use a non-greedy match on the characters between the brackets
for match in self.search(r"([\(\[\{]).+?([\)\]\}])"):
brackets = "%s%s" % (match.group(1), match.group(2))
# this tests for matching pairs
if brackets in ['[]', '{}', '()']:
answer.add(match.group())
return sorted(answer)
def find_roles(self):
answer = set()
for match in re.finditer(r"^([A-Z]{2,}[^\:]+): ", self.raw, re.MULTILINE):
answer.add(match.group(1))
return sorted(answer)
def find_repeated_words(self):
answer = set()
for match in self.search(r"(\w{3,}) \1 \1"):
answer.add(match.group())
return sorted(answer)
def apply_fsa(self, fsa):
i = 0
results = []
while i < len(self):
match = fsa.consume(self.text[i:])
if match:
results.append((i, match))
i += len(match)
else:
i += 1
return results
class Vocabulary():
"""Class to store all information on a vocabulary, where a vocabulary is created
from a text. The vocabulary includes the text, a frequency distribution over
that text, the vocabulary items themselves (as a set) and the sizes of the
vocabulary and the text. We do not store POS and gloss, for those we rely on
WordNet. The vocabulary is contrained to those words that occur in a
standard word list. Vocabulary items are not normalized, except for being in
lower case."""
def __init__(self, text):
self.text = text.text
# keeping the unfiltered list around for statistics
self.all_items = set([w.lower() for w in text])
self.items = self.all_items.intersection(ENGLISH_VOCABULARY)
# restricting the frequency dictionary to vocabulary items
self.fdist = nltk.FreqDist(t.lower() for t in text if t.lower() in self.items)
self.text_size = len(self.text)
self.vocab_size = len(self.items)
def __str__(self):
return "<Vocabulary size=%d text_size=%d>" % (self.vocab_size, self.text_size)
def __len__(self):
return self.vocab_size
def frequency(self, word):
return self.fdist[word]
def pos(self, word):
# do not volunteer the pos for words not in the vocabulary
if word not in self.items:
return None
synsets = wn.synsets(word)
# somewhat arbitrary choice to make unknown words nouns, returning None
# or 'UNKNOWN' would have been fine too.
return synsets[0].pos() if synsets else 'n'
def gloss(self, word):
# do not volunteer the gloss (definition) for words not in the vocabulary
if word not in self.items:
return None
synsets = wn.synsets(word)
# make a difference between None for words not in vocabulary and words
# in the vocabulary that do not have a gloss in WordNet
return synsets[0].definition() if synsets else 'NO DEFINITION'
def kwic(self, word):
self.text.concordance(word)
|
[
"marc@cs.brandeis.edu"
] |
marc@cs.brandeis.edu
|
30e9581244610151a0102ea2e8a41a7a363dc6cc
|
f4d2d1bb66ce2f371c734ab8a65f6257778fd68c
|
/Book/chap12.py
|
cc6f4de3925397eed282ceaa64e43e81d11690bb
|
[] |
no_license
|
jdiodati20/hangman
|
0f849cb6cfe6261ee310a0dc5cf14d4da3fc1ac0
|
60dfecf5b2719d4c06686639cdc44104a1f50ba5
|
refs/heads/master
| 2020-04-28T19:28:31.842677
| 2019-03-13T23:26:43
| 2019-03-13T23:26:43
| 175,512,060
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 751
|
py
|
import math
class apple:
def __init__(self, color, size, shape, weight):
self.c = color
self.s = size
self.sh = shape
self.w = weight
class circle:
def __init__(self, radius):
self.r = radius
def area(self):
return math.pi * self.r * self.r
class triangle:
def __init__(self, base, height):
self.b = base
self.h = height
def area(self):
return self.b * self.h * 0.5
class hexagon:
def __init__(self, sideLength):
self.l = sideLength
def calculate_perimeter(self):
return 6 * self.l
cir = circle(1)
a = cir.area()
print(a)
tri = triangle(3, 4)
a1 = tri.area()
print(a1)
hexa = hexagon(6)
a2 = hexa.calculate_perimeter()
print(a2)
|
[
"jacksonpdiodati@Jacksons-MacBook-Pro.local"
] |
jacksonpdiodati@Jacksons-MacBook-Pro.local
|
0b102b1d7919e1f87af000bac931cac71bb3410f
|
9554fddc76cfe7d95101ffe8e7ac16b7b090743e
|
/forms.py
|
2e954138dc88f4390310183beceabac3d7c8646d
|
[] |
no_license
|
levicolquitt97/STQA-assignment-2
|
88712fc345317e66e24fba471054596c751fb7ef
|
2d7b92957894d3ddda77b7dc0fec0e0501306dd8
|
refs/heads/master
| 2021-04-24T03:35:55.228329
| 2020-04-04T19:29:25
| 2020-04-04T19:29:25
| 250,069,623
| 0
| 0
| null | 2020-04-04T19:29:26
| 2020-03-25T19:16:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,476
|
py
|
# LEVI COLQUITT
# Assignment 3+4 STQA
# 3/15/2020
#FORMS.py
#This .py file creates the forms and grabs the user input. then it passes that to the main.py file for further use
from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, DecimalField, SubmitField
from wtforms.validators import DataRequired, Length, InputRequired, NumberRange
[]
#Form that takes input for BMI page and also validates the input
class BMIForm(FlaskForm):
heightFeet = IntegerField('Height feet', validators=[InputRequired(), NumberRange(min=1, max=12)])
heightInches = IntegerField('Height inches', validators=[InputRequired(), NumberRange(min=0, max=12)])
weight = DecimalField('Weight in pounds', validators=[DataRequired(message='MUST BE VALID DECIMAL/INTEGER VALUE'), NumberRange(min=0, max=1000)])
submit = SubmitField('Calculate')
#Form that takes input for retirement page and also validates the input
class retirementForm(FlaskForm):
age = IntegerField('current age:', validators=[InputRequired(), NumberRange(min=0, max=99)])
salary = IntegerField('Annual salary in $:', validators=[InputRequired(), NumberRange(min=0)])
percentSaved = IntegerField('Enter annual percent of salary saved:', validators=[InputRequired(), NumberRange(min=0, max=99)])
desiredSavingsGoal = IntegerField('Enter your desired savings goal:', validators=[InputRequired(), NumberRange(min=0)])
submit = SubmitField('Calculate')
|
[
"noreply@github.com"
] |
noreply@github.com
|
c599481b7904761d4e4518acc651183692d4f2d5
|
2fd087fbc5faf43940153693823969df6c8ec665
|
/pyc_decrypted/latest/xml/etree/cElementTree.py
|
cf14d28fc01a58d8e8845641181b34f25ec71840
|
[] |
no_license
|
mickeystone/DropBoxLibrarySRC
|
ed132bbffda7f47df172056845e5f8f6c07fb5de
|
2e4a151caa88b48653f31a22cb207fff851b75f8
|
refs/heads/master
| 2021-05-27T05:02:30.255399
| 2013-08-27T13:16:55
| 2013-08-27T13:16:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 74
|
py
|
#Embedded file name: xml/etree/cElementTree.py
from _elementtree import *
|
[
"bizonix@me.com"
] |
bizonix@me.com
|
9f1d77e5153cf7759b3748a02218bb0dd1d3d019
|
7ac0ad55050bcc0d583a6a2b11be999ae7577c50
|
/project04/wsgi.py
|
d440f640cb6c5b3b56380d7127c540a810912041
|
[] |
no_license
|
baeseonghyeon/django-blog_project
|
0285935a80052799807670974722b2ba2d736b45
|
a5f89dbc23ead8f0296be05aadeb0943f7d44a86
|
refs/heads/master
| 2022-11-26T12:39:03.527254
| 2020-08-13T19:48:26
| 2020-08-13T19:48:26
| 170,460,817
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
WSGI config for project04 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project04.settings')
application = get_wsgi_application()
|
[
"tjdgusdkf@gmail.com"
] |
tjdgusdkf@gmail.com
|
2ebebca7e2e70361c06ad04d94f701dab5a3c6d3
|
a753d3bd70c4686da5bb886d6652b889ae9d2d0d
|
/test14.py
|
8a8f97c9fa4cd21075dd4b1194269ea8192c643b
|
[] |
no_license
|
sxttxkxnm/python
|
319ac8b11470976aaa14c03916ad8e3194d375e2
|
673dfc94e5c88307b179b9f35b93441fef305be6
|
refs/heads/master
| 2023-06-19T10:57:37.420321
| 2021-07-19T13:38:10
| 2021-07-19T13:38:10
| 336,190,015
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 579
|
py
|
import os
choice = 0
filename = ''
def menu():
global choice
print('Menu\n1.Open Calculator\n2.Open Notepad\n3.Exit')
choice = input('Select Menu : ')
def opennotepad():
filename= 'C:\\Windows\\SysWOW64\\notepad.exe'
print('Memoradum writing %s'%filename)
os.system(filename)
def opencalculator():
filename= 'C:\\Windows\\SysWOW64\\calc.exe'
print('Calculate Number %s'%filename)
os.system(filename)
while True:
menu()
if choice == '1':
opencalculator()
elif choice == '2':
opennotepad()
else:
break
|
[
"poom_1004@outlook.co.th"
] |
poom_1004@outlook.co.th
|
f2462d67b16186954cf4e1db3efde0ad37bf674f
|
4f0ecde0978d1b65ae229c855c9488fb067f4ea9
|
/kakao/kakao_2020/kakao_1/__init__.py
|
2b51456dc8302910c9386534cbde9aa2e2eca95d
|
[] |
no_license
|
LazyRichard/coding-test
|
6d84a8a5287d987c23537162b4276a71d06de216
|
c2fa6c6b307db6e0b3049a12e585c3cb7d1b8e24
|
refs/heads/master
| 2021-05-19T23:41:02.648174
| 2021-04-29T07:51:35
| 2021-04-29T07:51:35
| 252,088,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
from typing import List
def solution(test_input: str) -> int:
if len(test_input) == 1:
return 1
answer: List[int] = []
for i in range(1, (len(test_input) // 2) + 1):
temp: List[str] = []
for j in range(0, len(test_input), i):
temp.append(test_input[j:(j + i)])
answer.append(len(compress(temp)))
return min(answer)
def compress(word_list: List[str]) -> str:
answer: str = ""
test_ch: str = ""
cnt:int = 1
for ch in word_list:
if not test_ch:
test_ch = ch
else:
if test_ch == ch:
cnt = cnt + 1
elif test_ch != ch and cnt == 1:
answer = answer + test_ch
test_ch = ch
else:
answer = answer + "{}{}".format(cnt, test_ch)
test_ch = ch
cnt = 1
if cnt > 1:
answer = answer + "{}{}".format(cnt, test_ch)
else:
answer = answer + test_ch
return answer
|
[
"sof.midnight@live.co.kr"
] |
sof.midnight@live.co.kr
|
0223df23506f1891097b5f5e23917c56f25fef27
|
536204654bec325282191eede91def85cf8f9ecf
|
/tuia/exceptions.py
|
ed7f2543d365f392c6d2f560876676844e4cf0d8
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
kiddfz/QTAF
|
0b5450f47c09f453096a139993ada1383e5c7002
|
8bb4367b3c3e43ac3499daa56f825eea0a279054
|
refs/heads/master
| 2021-04-06T05:25:10.993145
| 2017-11-15T05:19:59
| 2017-11-15T05:19:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,142
|
py
|
# -*- coding: utf-8 -*-
#
# Tencent is pleased to support the open source community by making QTA available.
# Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
#
'''
异常模块定义
'''
#2012/03/16 pear 初稿,创建
class ControlNotFoundError(Exception):
'''控件没有找到
'''
pass
class ControlAmbiguousError(Exception):
'''找到多个控件
'''
pass
class ControlExpiredError(Exception):
'''控件失效错误
'''
pass
class TimeoutError(Exception):
'''超时异常
'''
pass
|
[
"eeelin@tencent.com"
] |
eeelin@tencent.com
|
7ff960b1f5fe2ab8db39e70e382084c495881cb8
|
1b12e6096c47312b67fa6ff223216945d2efb70c
|
/sandbox/vtk/selection/myinteractor.py
|
139202e49f1fe0d1418bde34dcae5a42beb929c2
|
[
"Apache-2.0"
] |
permissive
|
rboman/progs
|
6e3535bc40f78d692f1f63b1a43193deb60d8d24
|
03eea35771e37d4b3111502c002e74014ec65dc3
|
refs/heads/master
| 2023-09-02T17:12:18.272518
| 2023-08-31T15:40:04
| 2023-08-31T15:40:04
| 32,989,349
| 5
| 2
|
Apache-2.0
| 2022-06-22T10:58:38
| 2015-03-27T14:04:01
|
MATLAB
|
UTF-8
|
Python
| false
| false
| 4,329
|
py
|
# -*- coding: utf-8 -*-
import vtk
colors = vtk.vtkNamedColors()
class MyInteractorStyle(vtk.vtkInteractorStyleTrackballCamera):
def __init__(self, parent=None):
"""register to event listening
"""
self.AddObserver("LeftButtonPressEvent", self.leftButtonPressEvent)
self.selection = None
self.selected_mapper = vtk.vtkDataSetMapper()
self.selected_actor = vtk.vtkActor()
self.dataset = None
def select_one(self):
# get the mouse click position
clickPos = self.GetInteractor().GetEventPosition()
# crete a picker and pick at that position
picker = vtk.vtkCellPicker()
picker.Pick(clickPos[0], clickPos[1], 0, self.GetDefaultRenderer())
print("pick")
print(f"\tcell id = {picker.GetCellId()}")
print(f"\t3D pick position = {picker.GetPickPosition()}")
print(f"\t2D mouse position = {picker.GetSelectionPoint()[:2]}")
# the picking could be empty
# in that case, we leave the routine
if picker.GetDataSet():
print(f"\tdataset = {picker.GetDataSet().GetClassName()}")
else:
print(f"\tdataset = None")
return
# no cell has been picked => quit
if picker.GetCellId()==-1:
return
# cell type - we can pick triangles, but also tetras
cell_type = picker.GetDataSet().GetCellType( picker.GetCellId() )
print(f"\tcell type = { vtk.vtkCellTypes.GetClassNameFromTypeId( cell_type )}")
if(cell_type != vtk.VTK_TRIANGLE ):
print("\tWRONG CELL TYPE")
return
# we can pick the wrong ugrid (the red one)
# we store the right one at the first successful picking
if self.dataset == None:
self.dataset = picker.GetDataSet()
if picker.GetDataSet() != self.dataset:
print(f"\tPICKED WRONG DATASET!")
return
# -- cree un "vtkSelectionNode" (données de selection + type de selection)
ids = vtk.vtkIdTypeArray()
ids.SetNumberOfComponents(1)
ids.InsertNextValue(picker.GetCellId())
selectionNode = vtk.vtkSelectionNode()
selectionNode.SetFieldType(vtk.vtkSelectionNode.CELL)
# CELL,POINT,FIELD,VERTEX,EDGE,ROW
selectionNode.SetContentType(vtk.vtkSelectionNode.INDICES)
# SELECTIONS,GLOBALIDS,PEDIGREEIDS,VALUES,INDICES,FRUSTUM,
# LOCATIONS,THRESHOLDS,BLOCKS,QUERY
selectionNode.SetSelectionList(ids)
# -- cree une "vtkSelection" (la sélection en elle-même)
# c'est un ensemble de "noeuds de selection"
if not self.selection:
self.selection = vtk.vtkSelection()
self.selection.AddNode(selectionNode)
else:
self.selection.Union(selectionNode)
print( f"\tThere are {self.selection.GetNumberOfNodes()} 'selection nodes'.")
# -- DISPLAY: cree une "vtkExtractSelection"
extractSelection = vtk.vtkExtractSelection()
extractSelection.SetInputData(0, picker.GetDataSet())
# extractSelection.SetInputConnection(0, filt.GetOutputPort()) # cas d'un filtre
extractSelection.SetInputData(1, self.selection)
extractSelection.Update()
# build a ugrid for display
selected = vtk.vtkUnstructuredGrid()
selected.ShallowCopy(extractSelection.GetOutput())
print( f"\tThere are {selected.GetNumberOfPoints()} points in the selection.")
print( f"\tThere are {selected.GetNumberOfCells()} cells in the selection.")
self.selected_mapper.SetInputData(selected)
self.selected_actor.SetMapper(self.selected_mapper)
self.selected_actor.GetProperty().EdgeVisibilityOn()
self.selected_actor.GetProperty().SetColor( colors.GetColor3d('red') )
self.selected_actor.GetProperty().SetLineWidth(3)
self.GetDefaultRenderer().AddActor(self.selected_actor) # global - n'est pas ajouté si il l'a deja été
print(f'nb of actors = {self.GetDefaultRenderer().GetActors().GetNumberOfItems()}')
def leftButtonPressEvent(self, obj, event):
"""custom event
"""
self.select_one()
self.OnLeftButtonDown() # calls vtk.vtkInteractorStyleTrackballCamera
|
[
"romain.boman@gmail.com"
] |
romain.boman@gmail.com
|
869d7648d5fa41c0f2565d64f68aac7d9ec636dc
|
8221b7f8740ebff0ae44f3fee48fd06c79483617
|
/Physics/Electron.py
|
2f1f3d9a93ffc7d82886123d8a89a7d9c94c4938
|
[] |
no_license
|
austinh2001/CyclotronHonorsProject
|
16b5e31540a85b2c01a87f6efbf4f1a663195e25
|
7544513dbc442c59a67a3aafced5aeea7f55360e
|
refs/heads/main
| 2023-04-02T05:06:26.652334
| 2021-03-25T00:31:56
| 2021-03-25T00:31:56
| 351,251,192
| 1
| 0
| null | 2021-03-25T00:18:21
| 2021-03-24T23:26:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,279
|
py
|
import pygame
from Physics.Particle import Particle
from Physics.Vector import Vector
from Physics.CoordinateSystem import CoordinateSystem
from Physics.Force import Force, CREATEFORCE, NONE, CONSTANTFORCE, EARTHGRAVITY, SQUARELAW, CUBELAW, GRAVITATIONALFORCE, ELECTRICFORCE
class Electron(Particle):
def __init__(self, coordinateSystem = None, position_vector = Vector(2,[0,0]), velocity = Vector(2,[0,0])):
if(coordinateSystem is None):
self.coordinateSystem = CoordinateSystem([pygame.display.get_surface().get_size()[0] / 2, pygame.display.get_surface().get_size()[1] / 2], 100,"PPM", pygame.display.get_surface().get_size())
else:
self.coordinateSystem = coordinateSystem
self.timer = 0
self.pathPoints = []
self.position_vector = position_vector
self.velocity = velocity
self.mass = float(9.1093837015e-31)
self.charge = (-1) * float(1.60217662e-19)
self.netForce = Vector(2, [0, 0])
self.acceleration = self.netForce * (1/self.mass)
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("Electron.png")
self.center_pixel_displacement_vector = Vector(2,self.image.get_rect().topleft) - Vector(2, self.image.get_rect().center)
|
[
"austinh2001@gmail.com"
] |
austinh2001@gmail.com
|
1cca891eba9b94cc67116cdbe896d24da3809e90
|
d67a5cc21cd8a22a707a46621a1f5bcaa89baff1
|
/imports.py
|
31c9bb3e888db5c254bf7a846aad80e5637cc11c
|
[] |
no_license
|
Evotushon/Evotushon-s-Guardian-Bot
|
c9891ce32538d6bab3013d37902d1ccc3a1844a7
|
9973690fee777c44fbcedc62def619fc2309e9d3
|
refs/heads/master
| 2023-08-19T14:55:00.874019
| 2021-10-20T14:45:17
| 2021-10-20T14:45:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
import discord
import os
import dotenv
from dotenv import load_dotenv
from discord import embeds
from discord.ext import commands
import random
from discord.player import FFmpegPCMAudio
import asyncio
import time
import traceback
import sys
import discord
from discord.ext.commands import Bot
from discord.enums import Status
from discord.ext import commands
from discord.utils import get
import utils
|
[
"danielspa2019@gmail.com"
] |
danielspa2019@gmail.com
|
3ed16fe01640223215e8ecb9dd68102306c1c59b
|
592498a0e22897dcc460c165b4c330b94808b714
|
/1000번/1406_에디터.py
|
a89e92eec4a01dc869414b5d997fc614f0d9d6f9
|
[] |
no_license
|
atom015/py_boj
|
abb3850469b39d0004f996e04aa7aa449b71b1d6
|
42b737c7c9d7ec59d8abedf2918e4ab4c86cb01d
|
refs/heads/master
| 2022-12-18T08:14:51.277802
| 2020-09-24T15:44:52
| 2020-09-24T15:44:52
| 179,933,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
from collections import deque
import sys
ip = sys.stdin.readline
lst = deque(list(ip().strip()))
rst = deque([])
for i in range(int(ip())):
cmd = ip().strip()
if cmd[0] == 'L':
if len(lst):
rst.appendleft(lst.pop())
elif cmd[0] == 'D':
if len(rst):
lst.append(rst.popleft())
elif cmd[0] == 'B':
if len(lst):
lst.pop()
else:
lst.append(cmd[2])
for i in lst+rst:
print(i,end='')
|
[
"zeezlelove@gmail.com"
] |
zeezlelove@gmail.com
|
2fac5ac1386dcc3835fa66a4a1a7bf2e0b51315d
|
1bb50dc75cd1de2f68de7947f08965538e9b5f7b
|
/app/core/models.py
|
bb5009a28104125c3b02483dad835d6cbbcd1b50
|
[
"MIT"
] |
permissive
|
victorstevan/recipe-app-api
|
73a7e77adcc608aa873a36686f73b7e19553c781
|
be56d255e7e065478c542c57c59987105709fd0d
|
refs/heads/main
| 2023-03-04T11:58:53.377068
| 2021-02-15T01:04:51
| 2021-02-15T01:04:51
| 338,922,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,177
|
py
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new User"""
if not email:
raise ValueError('Users must have an email adress')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and saves a new super user"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
|
[
"victorstevan@discente.ufg.br"
] |
victorstevan@discente.ufg.br
|
9006babf1473dae988d40f9aa1ea76ff377e10e2
|
76c81dd2a7e65f292fc7fa46f23eebaa5a40e799
|
/luckysheet_obj/luckysheet_obj/wsgi.py
|
1670bfc804a91201a6b2a8c77b3fead8bdb2aa90
|
[] |
no_license
|
billlaw6/luckysheet_django
|
2631374d20d4cf1d7ac78213cdb339fec6158abc
|
bc9ebd89bcfb5cfbf7bcdcb7e168210b183df172
|
refs/heads/main
| 2023-05-01T14:39:34.723875
| 2021-05-15T08:20:08
| 2021-05-15T08:20:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
"""
WSGI config for luckysheet_obj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'luckysheet_obj.settings')
application = get_wsgi_application()
|
[
"lq65535@163.com"
] |
lq65535@163.com
|
9adbe52821ece3b44f5dca11058f29fcf7f0c162
|
4c5de0652ef4a88dbdba8a4c338aa78a341d2938
|
/ixia52xx/src/driver.py
|
efa8989c776b9e0376631ae9c7bc4e97a928a7c5
|
[] |
no_license
|
QualiSystemsLab/Ixia52xx-Shell
|
9788aad8ae676a7cc66d5289c2e69ceeccfcd587
|
ee9226ed3ac38cb792702b91a00be7b3ed9ea27f
|
refs/heads/master
| 2021-01-20T22:31:13.741873
| 2016-08-08T19:58:30
| 2016-08-08T19:58:30
| 64,963,233
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,004
|
py
|
from cloudshell.shell.core.resource_driver_interface import ResourceDriverInterface
from cloudshell.shell.core.driver_context import InitCommandContext, ResourceCommandContext, AutoLoadResource, AutoLoadAttribute, AutoLoadDetails
from cloudshell.api.cloudshell_api import CloudShellAPISession
import requests
from requests.auth import HTTPBasicAuth
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import json
import inject
class Ixia52XxDriver (ResourceDriverInterface):
def cleanup(self):
"""
Destroy the driver session, this function is called everytime a driver instance is destroyed
This is a good place to close any open sessions, finish writing to log files
"""
pass
def __init__(self):
"""
ctor must be without arguments, it is created with reflection at run time
"""
pass
def initialize(self, context):
"""
Initialize the driver session, this function is called everytime a new instance of the driver is created
This is a good place to load and cache the driver configuration, initiate sessions etc.
:param InitCommandContext context: the context the command runs on
"""
pass
# The ApplyConnectivityChanges function is intended to be used for using switches as connectivity providers
# for other devices. If the Switch shell is intended to be used a DUT only there is no need to implement it
def ApplyConnectivityChanges(self, context, request):
"""
Configures VLANs on multiple ports or port-channels
:param ResourceCommandContext context: The context object for the command with resource and reservation info
:param str request: A JSON object with the list of requested connectivity changes
:return: a json object with the list of connectivity changes which were carried out by the switch
:rtype: str
"""
"""
:type context: drivercontext.ResourceCommandContext
:type json: str
"""
session = CloudShellAPISession(host=context.connectivity.server_address,
token_id=context.connectivity.admin_auth_token,
domain="Global")
requestJson = json.loads(request)
#Build Response
response = {"driverResponse":{"actionResults":[]}}
for actionResult in requestJson['driverRequest']['actions']:
actionResultTemplate = {"actionId":None, "type":None, "infoMessage":"", "errorMessage":"", "success":"True", "updatedInterface":"None"}
actionResultTemplate['type'] = str(actionResult['type'])
actionResultTemplate['actionId'] = str(actionResult['actionId'])
response["driverResponse"]["actionResults"].append(actionResultTemplate)
return 'command_json_result=' + str(response) + '=command_json_result_end'
pass
def get_inventory(self, context):
"""
Discovers the resource structure and attributes.
:param AutoLoadCommandContext context: the context the command runs on
:return Attribute and sub-resource information for the Shell resource
:rtype: AutoLoadDetails
"""
session = CloudShellAPISession(host=context.connectivity.server_address,
token_id=context.connectivity.admin_auth_token,
domain="Global")
pw = session.DecryptPassword(context.resource.attributes['Password']).Value
un = context.resource.attributes["User"]
ip = context.resource.address
port = str(context.resource.attributes["API Port"])
prefix = str(context.resource.attributes["API Access"])
url = prefix+"://"+ip+":"+port+"/api"
sub_resources = []
attributes = [AutoLoadAttribute('', 'Model', 'Ixia 58xx'),AutoLoadAttribute('', 'Vendor', 'Ixia')]
# get all ports
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
portsRequest = requests.get(url+'/ports', auth=HTTPBasicAuth(un,pw), verify=False)
portsObj = json.loads(portsRequest.text)
# loop thru each port and learn more
for port in portsObj:
portRequest = requests.get(url+'/ports/'+str(port['id']), auth=HTTPBasicAuth(un,pw), verify=False)
portObj = json.loads(portRequest.text)
sub_resources.append(AutoLoadResource(model='NTO Port', name=portObj['default_name'], relative_address=str(port['id'])))
attributes.append(AutoLoadAttribute(str(port['id']), 'Port Speed', portObj['media_type']))
attributes.append(AutoLoadAttribute(str(port['id']), 'Serial Number', portObj['uuid']))
attributes.append(AutoLoadAttribute(str(port['id']), 'Port Description', str(portObj['name']) + " " + str(portObj['description'])))
return AutoLoadDetails(sub_resources,attributes)
pass
|
[
"chris@grabosky.net"
] |
chris@grabosky.net
|
b29cd8cd90efb7cd3c3dcc4d135b53ae21c536a5
|
f8104b29a8d0dbeb407060e494a206ca69335aeb
|
/tools/datasets/buildchange/json2coco_city_trainval.py
|
dabe0ff848ca135aa66f2af774888c1dc40685b2
|
[] |
no_license
|
Sebastixian/wwtool
|
c19f665f96e8b942e94af47db590f5bb28072f06
|
2f462a3d028b766234d62a3ef706a0f08f10680a
|
refs/heads/master
| 2023-06-01T04:21:22.066639
| 2021-06-25T07:40:13
| 2021-06-25T07:40:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,213
|
py
|
import argparse
import os
import cv2
import json
import csv
import shutil
import numpy as np
import wwtool
import os
import cv2
import mmcv
class SIMPLETXT2COCO():
def __init__(self,
imgpath=None,
annopath=None,
imageset_file=None,
image_format='.jpg',
anno_format='.txt',
data_categories=None,
data_info=None,
data_licenses=None,
data_type="instances",
groundtruth=True,
small_object_area=0,
sub_anno_fold=False,
cities=None):
super(SIMPLETXT2COCO, self).__init__()
self.imgpath = imgpath
self.annopath = annopath
self.image_format = image_format
self.anno_format = anno_format
self.categories = data_categories
self.info = data_info
self.licenses = data_licenses
self.type = data_type
self.small_object_area = small_object_area
self.small_object_idx = 0
self.groundtruth = groundtruth
self.max_object_num_per_image = 0
self.sub_anno_fold = sub_anno_fold
self.imageset_file = imageset_file
self.imgpaths, self.annotpaths = [], []
for label_fn in os.listdir(annopath):
basename = wwtool.get_basename(label_fn)
self.imgpaths.append(os.path.join(imgpath, basename + '.png'))
self.annotpaths.append(os.path.join(annopath, basename + '.json'))
def get_image_annotation_pairs(self):
images = []
annotations = []
index = 0
progress_bar = mmcv.ProgressBar(len(self.imgpaths))
imId = 0
for imgfile, annofile in zip(self.imgpaths, self.annotpaths):
# imgpath = os.path.join(self.imgpath, name + self.image_format)
# annotpath = os.path.join(self.annopath, name + self.anno_format)
name = wwtool.get_basename(imgfile)
annotations_coco = self.__generate_coco_annotation__(annofile, imgfile)
# if annotation is empty, skip this annotation
if annotations_coco != [] or self.groundtruth == False:
height, width, channels = 1024, 1024, 3
images.append({"date_captured": "2019",
"file_name": name + self.image_format,
"id": imId + 1,
"license": 1,
"url": "http://jwwangchn.cn",
"height": height,
"width": width})
for annotation in annotations_coco:
index = index + 1
annotation["image_id"] = imId + 1
annotation["id"] = index
annotations.append(annotation)
imId += 1
if imId % 500 == 0:
print("\nImage ID: {}, Instance ID: {}, Small Object Counter: {}, Max Object Number: {}".format(imId, index, self.small_object_idx, self.max_object_num_per_image))
progress_bar.update()
return images, annotations
def __generate_coco_annotation__(self, annotpath, imgpath):
"""
docstring here
:param self:
:param annotpath: the path of each annotation
:param return: dict()
"""
objects = self.__simpletxt_parse__(annotpath, imgpath)
coco_annotations = []
for object_struct in objects:
bbox = object_struct['bbox']
segmentation = object_struct['segmentation']
label = object_struct['label']
roof_bbox = object_struct['roof_bbox']
building_bbox = object_struct['building_bbox']
roof_mask = object_struct['roof_mask']
footprint_mask = object_struct['footprint_mask']
ignore_flag = object_struct['ignore_flag']
offset = object_struct['offset']
iscrowd = object_struct['iscrowd']
width = bbox[2]
height = bbox[3]
area = height * width
if area <= self.small_object_area and self.groundtruth:
self.small_object_idx += 1
continue
coco_annotation = {}
coco_annotation['bbox'] = bbox
coco_annotation['segmentation'] = [segmentation]
coco_annotation['category_id'] = label
coco_annotation['area'] = np.float(area)
coco_annotation['roof_bbox'] = roof_bbox
coco_annotation['building_bbox'] = building_bbox
coco_annotation['roof_mask'] = roof_mask
coco_annotation['footprint_mask'] = footprint_mask
coco_annotation['ignore_flag'] = ignore_flag
coco_annotation['offset'] = offset
coco_annotation['iscrowd'] = iscrowd
coco_annotations.append(coco_annotation)
return coco_annotations
def __simpletxt_parse__(self, label_file, image_file):
"""
(xmin, ymin, xmax, ymax)
"""
annotations = mmcv.load(label_file)['annotations']
# roof_mask, footprint_mask, roof_bbox, building_bbox, label, ignore, offset
objects = []
for annotation in annotations:
object_struct = {}
roof_mask = annotation['roof']
roof_polygon = wwtool.mask2polygon(roof_mask)
roof_bound = roof_polygon.bounds # xmin, ymin, xmax, ymax
footprint_mask = annotation['footprint']
footprint_polygon = wwtool.mask2polygon(footprint_mask)
footprint_bound = footprint_polygon.bounds
building_xmin = np.minimum(roof_bound[0], footprint_bound[0])
building_ymin = np.minimum(roof_bound[1], footprint_bound[1])
building_xmax = np.maximum(roof_bound[2], footprint_bound[2])
building_ymax = np.maximum(roof_bound[3], footprint_bound[3])
building_bound = [building_xmin, building_ymin, building_xmax, building_ymax]
xmin, ymin, xmax, ymax = list(roof_bound)
bbox_w = xmax - xmin
bbox_h = ymax - ymin
object_struct['bbox'] = [xmin, ymin, bbox_w, bbox_h]
object_struct['roof_bbox'] = object_struct['bbox']
xmin, ymin, xmax, ymax = list(building_bound)
bbox_w = xmax - xmin
bbox_h = ymax - ymin
object_struct['building_bbox'] = [xmin, ymin, bbox_w, bbox_h]
object_struct['roof_mask'] = roof_mask
object_struct['footprint_mask'] = footprint_mask
object_struct['ignore_flag'] = annotation['ignore']
object_struct['offset'] = annotation['offset']
object_struct['segmentation'] = roof_mask
object_struct['label'] = 1
object_struct['iscrowd'] = object_struct['ignore_flag']
objects.append(object_struct)
return objects
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument(
'--imagesets',
type=str,
nargs='+',
choices=['trainval', 'test'])
parser.add_argument(
'--release_version', default='v1', type=str)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
# basic dataset information
info = {"year" : 2019,
"version" : "1.0",
"description" : "SIMPLETXT-Building-COCO",
"contributor" : "Jinwang Wang",
"url" : "jwwangchn.cn",
"date_created" : "2019"
}
licenses = [{"id": 1,
"name": "Attribution-NonCommercial",
"url": "http://creativecommons.org/licenses/by-nc-sa/2.0/"
}]
original_simpletxt_class = {'building': 1}
converted_simpletxt_class = [{'supercategory': 'none', 'id': 1, 'name': 'building', }]
# dataset's information
image_format='.png'
anno_format='.txt'
core_dataset_name = 'buildchange'
cities = ['sampling']
# sub_city_folds = {'shanghai': ['arg']}
# cities = ['shanghai', 'beijing', 'jinan', 'haerbin', 'chengdu']
release_version = 'v2'
groundtruth = True
for idx, city in enumerate(cities):
anno_name = [core_dataset_name, release_version, 'trainval', city, 'roof_footprint']
print("Begin processing: {}".format("_".join(anno_name)))
imgpath = f'./data/{core_dataset_name}/{release_version}/{city}/images'
annopath = f'./data/{core_dataset_name}/{release_version}/{city}/labels_json'
save_path = f'./data/{core_dataset_name}/{release_version}/coco/annotations'
if not os.path.exists(save_path):
os.makedirs(save_path)
simpletxt2coco = SIMPLETXT2COCO(imgpath=imgpath,
annopath=annopath,
image_format=image_format,
anno_format=anno_format,
data_categories=converted_simpletxt_class,
data_info=info,
data_licenses=licenses,
data_type="instances",
groundtruth=groundtruth,
small_object_area=0,
cities=cities)
images, annotations = simpletxt2coco.get_image_annotation_pairs()
json_data = {"info" : simpletxt2coco.info,
"images" : images,
"licenses" : simpletxt2coco.licenses,
"type" : simpletxt2coco.type,
"annotations" : annotations,
"categories" : simpletxt2coco.categories}
with open(os.path.join(save_path, "_".join(anno_name) + ".json"), "w") as jsonfile:
json.dump(json_data, jsonfile, sort_keys=True, indent=4)
|
[
"jwwangchn@outlook.com"
] |
jwwangchn@outlook.com
|
191bcb34fb1ee3e16cc9866069b360e9209d9614
|
ee64a643bddd95ad9dfcffbe18ed7a3aa0cd3879
|
/haverifier/benchmark/scenarios/availability/attacker/baseattacker.py
|
d5fb2c358fd95727a44bf772458688896c95aea5
|
[] |
no_license
|
L-kay/HAverifier
|
75e04a4851a00da09df82b4624b0d7301ea18199
|
72e0099ee1798701a5f9e3a7d17bf5100e9aecb4
|
refs/heads/master
| 2021-01-13T11:40:12.050000
| 2016-10-24T03:46:11
| 2016-10-24T03:46:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,502
|
py
|
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd. and others
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import pkg_resources
import yaml
import logging
import os
import haverifier.common.utils as utils
LOG = logging.getLogger(__name__)
attacker_conf_path = pkg_resources.resource_filename(
"haverifier.benchmark.scenarios.availability",
"attacker_conf.yaml")
class AttackerMgr(object):
def __init__(self):
self._attacker_list = []
def init_attackers(self,attacker_cfgs,context):
LOG.debug("attackerMgr config: %s" % attacker_cfgs)
LOG.debug("attackerMgr context: %s" % context)
for cfg in attacker_cfgs:
attacker_cls = BaseAttacker.get_attacker_cls(cfg)
attacker_ins = attacker_cls(cfg,context)
attacker_ins.key = cfg['key']
attacker_ins.setup()
self._attacker_list.append(attacker_ins)
def __getitem__(self, item):
for obj in self._attacker_list:
if(obj.key == item):
return obj
def recover(self):
for _instance in self._attacker_list:
_instance.recover()
class BaseAttacker(object):
attacker_cfgs = {}
def __init__(self, config, context):
if not BaseAttacker.attacker_cfgs:
with open(attacker_conf_path) as stream:
BaseAttacker.attacker_cfgs = yaml.load(stream)
self._config = config
self._context = context
self.setup_done = False
@staticmethod
def get_attacker_cls(attacker_cfg):
'''return attacker instance of specified type'''
attacker_type = attacker_cfg['fault_type']
for attacker_cls in utils.itersubclasses(BaseAttacker):
if attacker_type == attacker_cls.__attacker_type__:
return attacker_cls
raise RuntimeError("No such runner_type %s" % attacker_type)
def get_script_fullpath(self, path):
base_path = os.path.dirname(attacker_conf_path)
return os.path.join(base_path, path)
def recovery(self):
pass
|
[
"iheyu22@gmail.com"
] |
iheyu22@gmail.com
|
e0cca15b4698cfcef55c59c32ad1ec019b327f0b
|
b576ed1ff65700d505f687961cbed86fe94b1c3f
|
/objectModel/Python/cdm/utilities/copy_data_utils.py
|
52fd4d1ee5390f942bbde1ef66b2b5cca9e4104f
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
AzureMentor/CDM
|
c80761737c92cf6561d4b982b4882b1b1c5265d3
|
84d3928995e7ab3bba0a283771e5e26639408643
|
refs/heads/master
| 2021-11-30T17:52:42.274900
| 2021-11-27T18:38:19
| 2021-11-27T18:38:19
| 217,569,642
| 1
| 0
|
NOASSERTION
| 2021-11-27T18:38:20
| 2019-10-25T16:04:16
|
Java
|
UTF-8
|
Python
| false
| false
| 870
|
py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information
from typing import Union, List, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from cdm.objectmodel import CdmCollection, CdmObject
from cdm.utilities import ResolveOptions, CopyOptions
def _array_copy_data(res_opt: 'ResolveOptions', source: Union['CdmCollection', List['CdmObject']], options: 'CopyOptions') -> Optional[List]:
"""Creates a list object that is a copy of the input IEnumerable object"""
if not source:
return None
casted = []
for elem in source:
if elem:
from cdm.persistence import PersistenceLayer
data = PersistenceLayer.to_data(elem, res_opt, options, PersistenceLayer.CDM_FOLDER)
casted.append(data)
return casted
|
[
"nebanfic@microsoft.com"
] |
nebanfic@microsoft.com
|
e85fdd0b379d44e47a1ca606ee20c8893cb096dc
|
be4373a6ecda4f8b5a911c661ea618f6d1f48b8d
|
/venv-win-py/Scripts/futurize-script.py
|
9c91538bb267339876a19e7009611a40fe362940
|
[] |
no_license
|
zzm88/weirdinstrument_mezzanine
|
a46175c875d4e25a54e923b9845d7b7c413b6ec9
|
954b4faa717ba9dc1aa874b66ad0cbf7c60b094a
|
refs/heads/main
| 2023-07-06T21:08:01.392907
| 2021-08-11T08:34:06
| 2021-08-11T08:34:06
| 390,909,491
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
#!c:\users\35352\documents\localprojects\weirdinstrument_deploy\venv-win-py\scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.18.2','console_scripts','futurize'
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = 'future==0.18.2'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('future==0.18.2', 'console_scripts', 'futurize')())
|
[
"sea3@qq.com"
] |
sea3@qq.com
|
bde76c61c73ab8a6bf67b2eced51bcaa2646bef5
|
6bb36612a6121b0dfb32e2ec83fdff353fc0b349
|
/plot_kmeans_digits.py
|
182d6d54f94e012babe3df34cd725c898b6a970f
|
[] |
no_license
|
mandosoft/Python-Finance
|
526356ae2c5f3cee7e4552bdf8df1c80daefe3ad
|
912845d08298cadc1289593df9fbce4d342afdca
|
refs/heads/master
| 2020-04-19T06:40:36.431995
| 2019-01-28T22:56:04
| 2019-01-28T22:56:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,495
|
py
|
"""
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
#%%
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(82 * '_')
print('init\t\ttime\tinertia\thomo\tcompl\tv-meas\tARI\tAMI\tsilhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('%-9s\t%.2fs\t%i\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(82 * '_')
# #############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, x_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
|
[
"thomasdtownsley@gmail.com"
] |
thomasdtownsley@gmail.com
|
fa73fda921895cf608f6d767d3b04fd7afe93fc9
|
bccb2a4c707415133f7f0bb2459c1f7e60f0b6a4
|
/ResNet_Queue/grad-cam.py
|
b7447fe4cb03f7d380014080f44a136eb02ead85
|
[] |
no_license
|
High-East/Fight_Detection
|
99a5f257f4ebda62e8c347a9d7db73fba8d80b88
|
67668f73bc8cd717c2a903c1f006cd20d7d33390
|
refs/heads/master
| 2020-12-20T06:07:04.808486
| 2020-01-25T10:33:18
| 2020-01-25T10:33:18
| 235,983,794
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,452
|
py
|
# https://subinium.github.io/Keras-5-2/
import os
import numpy as np
import keras
from keras import models
from keras import layers
from keras import optimizers
from keras import backend as K
import matplotlib.pyplot as plt
# 코드 5-40 사전 훈련된 가중치로 VGG16 네트워크 로드하기
from keras.applications.vgg16 import VGG16
K.clear_session()
# 이전 모든 예제에서는 최상단의 완전 연결 분류기를 제외했지만 여기서는 포함합니다
model = VGG16(weights='imagenet')
# 코드 5-41 VGG16을 위해 입력 이미지 전처리하기
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input, decode_predictions
import numpy as np
# 이미지 경로
img_path = './img/elephant.jpg'
# 224 × 224 크기의 파이썬 이미징 라이브러리(PIL) 객체로 반환됩니다
img = image.load_img(img_path, target_size=(224, 224))
# (224, 224, 3) 크기의 넘파이 float32 배열
x = image.img_to_array(img)
# 차원을 추가하여 (1, 224, 224, 3) 크기의 배치로 배열을 변환합니다
x = np.expand_dims(x, axis=0)
# 데이터를 전처리합니다(채널별 컬러 정규화를 수행합니다)
x = preprocess_input(x)
preds = model.predict(x)
print('Predicted:', decode_predictions(preds, top=3)[0])
# 코드 5-42 Grad-CAM 알고리즘 설명하기
idx_ele = np.argmax(preds[0])
# 예측 벡터의 '아프리카 코끼리' 항목
african_elephant_output = model.output[:, idx_ele]
# VGG16의 마지막 합성곱 층인 block5_conv3 층의 특성 맵
last_conv_layer = model.get_layer('block5_conv3')
# block5_conv3의 특성 맵 출력에 대한 '아프리카 코끼리' 클래스의 그래디언트
grads = K.gradients(african_elephant_output, last_conv_layer.output)[0]
# 특성 맵 채널별 그래디언트 평균 값이 담긴 (512,) 크기의 벡터
pooled_grads = K.mean(grads, axis=(0, 1, 2))
# 샘플 이미지가 주어졌을 때 방금 전 정의한 pooled_grads와 block5_conv3의 특성 맵 출력을 구합니다
iterate = K.function([model.input], [pooled_grads, last_conv_layer.output[0]])
# 두 마리 코끼리가 있는 샘플 이미지를 주입하고 두 개의 넘파이 배열을 얻습니다
pooled_grads_value, conv_layer_output_value = iterate([x])
# "아프리카 코끼리" 클래스에 대한 "채널의 중요도"를 특성 맵 배열의 채널에 곱합니다
for i in range(512):
conv_layer_output_value[:, :, i] *= pooled_grads_value[i]
# 만들어진 특성 맵에서 채널 축을 따라 평균한 값이 클래스 활성화의 히트맵입니다
heatmap = np.mean(conv_layer_output_value, axis=-1)
# 코드 5-43 히트맵 후처리하기
heatmap = np.maximum(heatmap, 0)
heatmap /= np.max(heatmap)
plt.matshow(heatmap)
plt.show()
# 코드 5-44 원본 이미지에 히트맵 덧붙이기
import cv2
# cv2 모듈을 사용해 원본 이미지를 로드합니다
img = cv2.imread(img_path)
# heatmap을 원본 이미지 크기에 맞게 변경합니다
heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
# heatmap을 RGB 포맷으로 변환합니다
heatmap = np.uint8(255 * heatmap)
# 히트맵으로 변환합니다
heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
# 0.4는 히트맵의 강도입니다
superimposed_img = heatmap * 0.4 + img
# 디스크에 이미지를 저장합니다
cv2.imwrite('./img/elephant_cam.jpg', superimposed_img)
plt.imshow('./img/elephant_cam.jpg')
|
[
"rhehd127@gmail.com"
] |
rhehd127@gmail.com
|
cbaae3b29f320b1ca247854342e356e573cae8b5
|
75a56d75b6ad05444988371bd61ffdc7844ee740
|
/TopL.py
|
cf8a382c472264e3985afaa35f9a5d29d84c2157
|
[
"MIT"
] |
permissive
|
bflorentino/Contact-Book-App
|
aadfcf375a3790595280898b856bdb3b8553307e
|
1d0601a3646fde2267e2489063f54321816f7ce8
|
refs/heads/main
| 2023-07-11T04:00:27.909803
| 2021-08-14T20:51:42
| 2021-08-14T20:51:42
| 396,089,765
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,537
|
py
|
from GraphicInterface import GraphicInteface
from WorkInterface import WorkInterface
import tkinter as tk
class TopL(GraphicInteface):
"""[summary]
Args:
GraphicInteface ([type]): [description]
"""
def __init__(self, title, geometry, resize, bg, window):
super().__init__(title, geometry, resize, bg, window)
self.workInterface = WorkInterface()
def setMainFrame(self):
"""It will set the topLevel mainFrame """
self.mainFrame = tk.Frame(self.window, bd = 3, relief="sunken")
self.mainFrame.pack(fill = "both", expand = "yes", padx=15, pady=15)
def setDataForm(self):
"""It will set the contacts Form to fill information about new contacts, update their info
or just view their info. The function is gonna generate the different entries and labels
necessary for the user.
"""
self.nameLabel = tk.Label(self.mainFrame, text = "Name:", anchor="nw")
self.lastNameLabel = tk.Label(self.mainFrame, text = "Last Name:", anchor="nw")
self.phoneNumberLabel = tk.Label(self.mainFrame, text="Phone Number:", anchor="nw")
self.emaiLabel = tk.Label(self.mainFrame, text = "Email:", anchor="nw")
self.nameLabel.grid(row = 0, column = 0, pady = 15, padx=10)
self.nameEntry = tk.Entry(self.mainFrame, width=30)
self.nameEntry.grid(row = 0, column = 1)
self.lastNameLabel.grid(row = 1, column = 0, padx=10)
self.lastnameEntry = tk.Entry(self.mainFrame, width=30)
self.lastnameEntry.grid(row = 1, column = 1)
self.phoneNumberLabel.grid(row = 2, column=0, pady = 15, padx = 10)
self.phoneNumberEntry = tk.Entry(self.mainFrame, width=30)
self.phoneNumberEntry.grid(row = 2, column = 1)
self.emaiLabel.grid(row = 3, column = 0, padx = 10)
self.emailEntry = tk.Entry(self.mainFrame, width=30)
self.emailEntry.grid(row = 3, column = 1)
def setAddContactButtons(self, refresh):
"""It will set the necessary buttons so the user can add a new contact
and save in the database.
Args:
refresh (function): [It's a callback function, but it's sent as parameter here just
to send it to another function]
"""
self.buttonAdd = tk.Button(self.mainFrame, text = "Add Contact", width=15, cursor="hand2",
command=lambda: self.workInterface.sendNewContactData(self.nameEntry.get(),
self.lastnameEntry.get(), self.phoneNumberEntry.get(), self.emailEntry.get(),
self.window, refresh))
self.buttonAdd.grid(row = 4, column = 0, columnspan=2 , pady = (45, 10))
self.buttonCancel = tk.Button(self.mainFrame, text = "Cancel", width=15, cursor="hand2",
command = self.window.destroy)
self.buttonCancel.grid(row = 5, columnspan=2)
def setContactInfo(self, data):
"""[It will set the the contact Info in the topLevel.
All contact data will be visible]
Args:
data (list): [it's a list containing all the contact data to set in the toplevel]
"""
data = [data[x] if data[x] != None else "" for x in range(len(data))]
self.setDataForm()
self.nameEntry.insert(0, data[1])
self.nameEntry.config(state="readonly")
self.lastnameEntry.insert(0, data[2])
self.lastnameEntry.config(state="readonly")
self.phoneNumberEntry.insert(0, data[3])
self.phoneNumberEntry.config(state="readonly")
self.emailEntry.insert(0, data[4])
self.emailEntry.config(state="readonly")
def setUpdateContactButtons(self, id, refresh):
"""
In case the user needs to update or delete any contact information, these buttons will ve
visible in the window. This sets the button in the window.
Args:
id (int): [The contact key to update or delete]
refresh (function): [It's a callback function, but it's sent as parameter here just
to send it to another function]
"""
self.updateContact = tk.Button(self.mainFrame, text = "Update Data", width=15, cursor="hand2",
command = lambda: self._setUpdateContactInfoButtons(id, refresh))
self.updateContact.grid(row = 4, column = 0, columnspan=2 , pady = (45, 10))
self.deleteContact = tk.Button(self.mainFrame, text = "Delete Contact", width=15, cursor="hand2",
command= lambda: self.workInterface.Deletecontact(id, refresh, self.window))
self.deleteContact.grid(row = 5, columnspan=2, pady = (0, 10))
self.buttonCancel = tk.Button(self.mainFrame, text = "Cancel", width=15, cursor="hand2",
command = self.window.destroy)
self.buttonCancel.grid(row = 6, columnspan=2)
def _setUpdateContactInfoButtons(self, id, refresh):
""" In case the user has pressed the button to update the contact
this function is gonna make visible one button to save the new information
in the database.
Args:
id (int): [the key contact to update informaction]
refresh (function): [It's a callback function, but it's sent as parameter here just
to send it to another function]
"""
self.returned = False
self.updateContact.grid_forget()
self.deleteContact.grid_forget()
self.nameEntry.config(state="normal")
self.lastnameEntry.config(state="normal")
self.phoneNumberEntry.config(state="normal")
self.emailEntry.config(state="normal")
self.updateContactInfo = tk.Button(self.mainFrame, text = "Save new Data", width=15, cursor="hand2",
command = lambda: self.workInterface.UpdateContactInfo(["contact_Name", "last_Name", "Phone_Number", "Email"],
[self.nameEntry.get(), self.lastnameEntry.get(), self.phoneNumberEntry.get(),
self.emailEntry.get()], id, refresh, self.window))
self.updateContactInfo.grid(row = 4, column = 0, columnspan=2 , pady = (45, 10))
|
[
"bryanmontero81@gmail.com"
] |
bryanmontero81@gmail.com
|
154e62c1adac4bc07d2eb032a2e5be42fb548b37
|
397498fe98dd87abcd79df77fc6cec1c65ff16d7
|
/scripts/fix-focus
|
e55c6f921fcdf382be4351670e9fd4b09d1674ba
|
[] |
no_license
|
CADBOT/.pentadactyl
|
a52db9e2f3f2e1717f1763275ae4ea9298e355ad
|
7bb165604b361cd401b99a4fc719c5fe9afdd887
|
refs/heads/master
| 2020-02-26T16:28:54.514793
| 2015-04-19T14:01:37
| 2015-04-19T14:01:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,218
|
#!/usr/bin/python
# Automator -> Serive -> Run Shell Script -> Service receives: no input -> in: Firefox ->
# Shell: /bin/bash -> content: path-to-this-script -> Saveas: fix-focus
# System Preferences -> Keyboard -> Keyboard Shortcuts -> Services -> fix-focus -> bind a key
from Quartz.CoreGraphics import *
def mouseEvent(type, posx, posy):
theEvent = CGEventCreateMouseEvent(
None,
type,
(posx,posy),
kCGMouseButtonLeft)
CGEventPost(kCGHIDEventTap, theEvent)
def mousemove(posx,posy):
mouseEvent(kCGEventMouseMoved, posx,posy);
def mouseclick(posx,posy):
mouseEvent(kCGEventLeftMouseDown, posx,posy);
mouseEvent(kCGEventLeftMouseUp, posx,posy);
# Save current mouse position
ourEvent = CGEventCreate(None);
currentpos = CGEventGetLocation(ourEvent);
# Click bottom right coner
mainMonitor = CGDisplayBounds(CGMainDisplayID())
mouseclick(int(mainMonitor.size.width - 3), int(mainMonitor.size.height - 16));
# Press ESC
keyEvent = CGEventCreateKeyboardEvent(None, 53, True)
CGEventPost(kCGHIDEventTap, keyEvent)
# Restore mouse position
mousemove(int(currentpos.x),int(currentpos.y));
|
[
"azuwis@gmail.com"
] |
azuwis@gmail.com
|
|
9bbe6d6fdb9a744918ebab1c2d430323a7d02271
|
7c94bd20b7ee069dfb557f41279416aba7d8447a
|
/exchangelib/folders/roots.py
|
7b4ec7acd98342333d43a75de5373ee33c2603cf
|
[
"BSD-2-Clause"
] |
permissive
|
AnkushGupta02/exchangelib
|
63a42d70fe8254ca2edb6075ac05822a8ccaae01
|
5430e603a1b42248c6a154ae24270b63e94cc49d
|
refs/heads/master
| 2022-08-19T08:34:49.634728
| 2020-05-28T10:25:33
| 2020-05-28T10:25:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,465
|
py
|
import logging
from ..errors import ErrorAccessDenied, ErrorFolderNotFound, ErrorNoPublicFolderReplicaAvailable, ErrorItemNotFound, \
ErrorInvalidOperation
from ..fields import EffectiveRightsField
from ..properties import Fields
from ..version import EXCHANGE_2007_SP1, EXCHANGE_2010_SP1
from .collections import FolderCollection
from .base import BaseFolder
from .known_folders import MsgFolderRoot, NON_DELETEABLE_FOLDERS, WELLKNOWN_FOLDERS_IN_ROOT, \
WELLKNOWN_FOLDERS_IN_ARCHIVE_ROOT
from .queryset import SingleFolderQuerySet, SHALLOW
log = logging.getLogger(__name__)
class RootOfHierarchy(BaseFolder):
"""Base class for folders that implement the root of a folder hierarchy"""
# A list of wellknown, or "distinguished", folders that are belong in this folder hierarchy. See
# https://docs.microsoft.com/en-us/dotnet/api/microsoft.exchange.webservices.data.wellknownfoldername
# and https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/distinguishedfolderid
# 'RootOfHierarchy' subclasses must not be in this list.
WELLKNOWN_FOLDERS = []
LOCAL_FIELDS = Fields(
# This folder type also has 'folder:PermissionSet' on some server versions, but requesting it sometimes causes
# 'ErrorAccessDenied', as reported by some users. Ignore it entirely for root folders - it's usefulness is
# deemed minimal at best.
EffectiveRightsField('effective_rights', field_uri='folder:EffectiveRights', is_read_only=True,
supported_from=EXCHANGE_2007_SP1),
)
FIELDS = BaseFolder.FIELDS + LOCAL_FIELDS
__slots__ = tuple(f.name for f in LOCAL_FIELDS) + ('_account', '_subfolders')
# A special folder that acts as the top of a folder hierarchy. Finds and caches subfolders at arbitrary depth.
def __init__(self, **kwargs):
self._account = kwargs.pop('account', None) # A pointer back to the account holding the folder hierarchy
super().__init__(**kwargs)
self._subfolders = None # See self._folders_map()
@property
def account(self):
return self._account
@property
def root(self):
return self
@property
def parent(self):
return None
def refresh(self):
self._subfolders = None
super().refresh()
@classmethod
def register(cls, *args, **kwargs):
if cls is not RootOfHierarchy:
raise TypeError('For folder roots, custom fields must be registered on the RootOfHierarchy class')
return super().register(*args, **kwargs)
@classmethod
def deregister(cls, *args, **kwargs):
if cls is not RootOfHierarchy:
raise TypeError('For folder roots, custom fields must be registered on the RootOfHierarchy class')
return super().deregister(*args, **kwargs)
def get_folder(self, folder_id):
return self._folders_map.get(folder_id, None)
def add_folder(self, folder):
if not folder.id:
raise ValueError("'folder' must have an ID")
self._folders_map[folder.id] = folder
def update_folder(self, folder):
if not folder.id:
raise ValueError("'folder' must have an ID")
self._folders_map[folder.id] = folder
def remove_folder(self, folder):
if not folder.id:
raise ValueError("'folder' must have an ID")
try:
del self._folders_map[folder.id]
except KeyError:
pass
def clear_cache(self):
self._subfolders = None
def get_children(self, folder):
for f in self._folders_map.values():
if not f.parent:
continue
if f.parent.id == folder.id:
yield f
@classmethod
def get_distinguished(cls, account):
"""Gets the distinguished folder for this folder class"""
if not cls.DISTINGUISHED_FOLDER_ID:
raise ValueError('Class %s must have a DISTINGUISHED_FOLDER_ID value' % cls)
try:
return cls.resolve(
account=account,
folder=cls(account=account, name=cls.DISTINGUISHED_FOLDER_ID, is_distinguished=True)
)
except ErrorFolderNotFound:
raise ErrorFolderNotFound('Could not find distinguished folder %s' % cls.DISTINGUISHED_FOLDER_ID)
def get_default_folder(self, folder_cls):
# Returns the distinguished folder instance of type folder_cls belonging to this account. If no distinguished
# folder was found, try as best we can to return the default folder of type 'folder_cls'
if not folder_cls.DISTINGUISHED_FOLDER_ID:
raise ValueError("'folder_cls' %s must have a DISTINGUISHED_FOLDER_ID value" % folder_cls)
# Use cached distinguished folder instance, but only if cache has already been prepped. This is an optimization
# for accessing e.g. 'account.contacts' without fetching all folders of the account.
if self._subfolders:
for f in self._folders_map.values():
# Require exact class, to not match subclasses, e.g. RecipientCache instead of Contacts
if f.__class__ == folder_cls and f.is_distinguished:
log.debug('Found cached distinguished %s folder', folder_cls)
return f
try:
log.debug('Requesting distinguished %s folder explicitly', folder_cls)
return folder_cls.get_distinguished(root=self)
except ErrorAccessDenied:
# Maybe we just don't have GetFolder access? Try FindItems instead
log.debug('Testing default %s folder with FindItem', folder_cls)
fld = folder_cls(root=self, name=folder_cls.DISTINGUISHED_FOLDER_ID, is_distinguished=True)
fld.test_access()
return self._folders_map.get(fld.id, fld) # Use cached instance if available
except ErrorFolderNotFound:
# The Exchange server does not return a distinguished folder of this type
pass
raise ErrorFolderNotFound('No useable default %s folders' % folder_cls)
@property
def _folders_map(self):
if self._subfolders is not None:
return self._subfolders
# Map root, and all subfolders of root, at arbitrary depth by folder ID. First get distinguished folders, so we
# are sure to apply the correct Folder class, then fetch all subfolders of this root.
folders_map = {self.id: self}
distinguished_folders = [
cls(root=self, name=cls.DISTINGUISHED_FOLDER_ID, is_distinguished=True)
for cls in self.WELLKNOWN_FOLDERS
if cls.get_folder_allowed and cls.supports_version(self.account.version)
]
for f in FolderCollection(account=self.account, folders=distinguished_folders).resolve():
if isinstance(f, (ErrorFolderNotFound, ErrorNoPublicFolderReplicaAvailable)):
# This is just a distinguished folder the server does not have
continue
if isinstance(f, ErrorInvalidOperation):
# This is probably a distinguished folder the server does not have. We previously tested the exact
# error message (f.value), but some Exchange servers return localized error messages, so that's not
# possible to do reliably.
continue
if isinstance(f, ErrorItemNotFound):
# Another way of telling us that this is a distinguished folder the server does not have
continue
if isinstance(f, ErrorAccessDenied):
# We may not have GetFolder access, either to this folder or at all
continue
if isinstance(f, Exception):
raise f
folders_map[f.id] = f
for f in SingleFolderQuerySet(account=self.account, folder=self).depth(
self.DEFAULT_FOLDER_TRAVERSAL_DEPTH
).all():
if isinstance(f, ErrorAccessDenied):
# We may not have FindFolder access, or GetFolder access, either to this folder or at all
continue
if isinstance(f, Exception):
raise f
if f.id in folders_map:
# Already exists. Probably a distinguished folder
continue
folders_map[f.id] = f
self._subfolders = folders_map
return folders_map
@classmethod
def from_xml(cls, elem, account):
kwargs = cls._kwargs_from_elem(elem=elem, account=account)
cls._clear(elem)
return cls(account=account, **kwargs)
@classmethod
def folder_cls_from_folder_name(cls, folder_name, locale):
"""Returns the folder class that matches a localized folder name.
locale is a string, e.g. 'da_DK'
"""
for folder_cls in cls.WELLKNOWN_FOLDERS + NON_DELETEABLE_FOLDERS:
if folder_name.lower() in folder_cls.localized_names(locale):
return folder_cls
raise KeyError()
def __repr__(self):
# Let's not create an infinite loop when printing self.root
return self.__class__.__name__ + \
repr((self.account, '[self]', self.name, self.total_count, self.unread_count, self.child_folder_count,
self.folder_class, self.id, self.changekey))
class Root(RootOfHierarchy):
"""The root of the standard folder hierarchy"""
DISTINGUISHED_FOLDER_ID = 'root'
WELLKNOWN_FOLDERS = WELLKNOWN_FOLDERS_IN_ROOT
__slots__ = tuple()
@property
def tois(self):
# 'Top of Information Store' is a folder available in some Exchange accounts. It usually contains the
# distinguished folders belonging to the account (inbox, calendar, trash etc.).
return self.get_default_folder(MsgFolderRoot)
def get_default_folder(self, folder_cls):
try:
return super().get_default_folder(folder_cls)
except ErrorFolderNotFound:
pass
# Try to pick a suitable default folder. we do this by:
# 1. Searching the full folder list for a folder with the distinguished folder name
# 2. Searching TOIS for a direct child folder of the same type that is marked as distinguished
# 3. Searching TOIS for a direct child folder of the same type that is has a localized name
# 4. Searching root for a direct child folder of the same type that is marked as distinguished
# 5. Searching root for a direct child folder of the same type that is has a localized name
log.debug('Searching default %s folder in full folder list', folder_cls)
for f in self._folders_map.values():
# Require exact class to not match e.g. RecipientCache instead of Contacts
if f.__class__ == folder_cls and f.has_distinguished_name:
log.debug('Found cached %s folder with default distinguished name', folder_cls)
return f
# Try direct children of TOIS first. TOIS might not exist.
try:
return self._get_candidate(folder_cls=folder_cls, folder_coll=self.tois.children)
except ErrorFolderNotFound:
# No candidates, or TOIS does ot exist
pass
# No candidates in TOIS. Try direct children of root.
return self._get_candidate(folder_cls=folder_cls, folder_coll=self.children)
def _get_candidate(self, folder_cls, folder_coll):
# Get a single the folder of the same type in folder_coll
same_type = [f for f in folder_coll if f.__class__ == folder_cls]
are_distinguished = [f for f in same_type if f.is_distinguished]
if are_distinguished:
candidates = are_distinguished
else:
candidates = [f for f in same_type if f.name.lower() in folder_cls.localized_names(self.account.locale)]
if candidates:
if len(candidates) > 1:
raise ValueError(
'Multiple possible default %s folders: %s' % (folder_cls, [f.name for f in candidates])
)
if candidates[0].is_distinguished:
log.debug('Found cached distinguished %s folder', folder_cls)
else:
log.debug('Found cached %s folder with localized name', folder_cls)
return candidates[0]
raise ErrorFolderNotFound('No useable default %s folders' % folder_cls)
class PublicFoldersRoot(RootOfHierarchy):
"""The root of the public folders hierarchy. Not available on all mailboxes"""
DISTINGUISHED_FOLDER_ID = 'publicfoldersroot'
DEFAULT_FOLDER_TRAVERSAL_DEPTH = SHALLOW
supported_from = EXCHANGE_2007_SP1
__slots__ = tuple()
def get_children(self, folder):
# EWS does not allow deep traversal of public folders, so self._folders_map will only populate the top-level
# subfolders. To traverse public folders at arbitrary depth, we need to get child folders on demand.
# Let's check if this folder already has any cached children. If so, assume we can just return those.
children = list(super().get_children(folder=folder))
if children:
# Return a generator like our parent does
for f in children:
yield f
return
# Also return early if the server told us that there are no child folders.
if folder.child_folder_count == 0:
return
children_map = {}
try:
for f in SingleFolderQuerySet(account=self.account, folder=folder).depth(
self.DEFAULT_FOLDER_TRAVERSAL_DEPTH
).all():
if isinstance(f, Exception):
raise f
children_map[f.id] = f
except ErrorAccessDenied:
# No access to this folder
pass
# Let's update the cache atomically, to avoid partial reads of the cache.
self._subfolders.update(children_map)
# Child folders have been cached now. Try super().get_children() again.
for f in super().get_children(folder=folder):
yield f
class ArchiveRoot(RootOfHierarchy):
"""The root of the archive folders hierarchy. Not available on all mailboxes"""
DISTINGUISHED_FOLDER_ID = 'archiveroot'
supported_from = EXCHANGE_2010_SP1
WELLKNOWN_FOLDERS = WELLKNOWN_FOLDERS_IN_ARCHIVE_ROOT
__slots__ = tuple()
|
[
"erik@cederstrand.dk"
] |
erik@cederstrand.dk
|
6a546b976d9ed158aa20c642650b31081a6b898c
|
84ba1585d3ca5600ae9d927e25f4bad6d57eccd1
|
/venv/bin/easy_install
|
703bb8d58d8979807529523f494192020d0d024a
|
[] |
no_license
|
dbernstein1/interpreter
|
3ca5c07eadee18bc4785daa11476d5b462eb8c07
|
7adfe33598360161d886ae17ab79acf280e18efd
|
refs/heads/master
| 2021-07-09T20:03:24.360504
| 2021-04-03T08:52:11
| 2021-04-03T08:52:11
| 239,248,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
#!/Users/danielbernstein/Desktop/python/eval/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
|
[
"drbernst@ucsc.edu"
] |
drbernst@ucsc.edu
|
|
75ddbccfa5f20c7a20b3ec8f5f32b5782c159153
|
2e188b60c1c7c8ab8c2bad43a7e04b257f1e4235
|
/pixels/utils/bilinear_sampler.py
|
ab7bb670e1d74be2b01421957d10839724da38f4
|
[] |
no_license
|
zhangpf/pixels
|
12410da19fc4eaee4c0c4e37783b76b0af794bc1
|
6f840218b42fe3d178259f00f8e9e45040a2c459
|
refs/heads/master
| 2021-05-06T14:49:37.726185
| 2017-12-07T12:16:59
| 2017-12-07T12:16:59
| 113,446,850
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,389
|
py
|
import tensorflow as tf
import numpy as np
def bilinear_sampler_1d_h(input_images, x_offset, wrap_mode='border', name='bilinear_sampler', **kwargs):
def _repeat(x, n_repeats):
with tf.variable_scope('_repeat'):
rep = tf.tile(tf.expand_dims(x, 1), [1, n_repeats])
return tf.reshape(rep, [-1])
def _interpolate(im, x, y):
with tf.variable_scope('_interpolate'):
# handle both texture border types
_edge_size = 0
if _wrap_mode == 'border':
_edge_size = 1
im = tf.pad(im, [[0, 0], [1, 1], [1, 1], [0, 0]], mode='CONSTANT')
x = x + _edge_size
y = y + _edge_size
elif _wrap_mode == 'edge':
_edge_size = 0
else:
return None
x = tf.clip_by_value(x, 0.0, _width_f - 1 + 2 * _edge_size)
x0_f = tf.floor(x)
y0_f = tf.floor(y)
x1_f = x0_f + 1
x0 = tf.cast(x0_f, tf.int32)
y0 = tf.cast(y0_f, tf.int32)
x1 = tf.cast(tf.minimum(x1_f, _width_f - 1 + 2 * _edge_size), tf.int32)
dim2 = (_width + 2 * _edge_size)
dim1 = (_width + 2 * _edge_size) * (_height + 2 * _edge_size)
base = _repeat(tf.range(_num_batch) * dim1, _height * _width)
base_y0 = base + y0 * dim2
idx_l = base_y0 + x0
idx_r = base_y0 + x1
im_flat = tf.reshape(im, tf.stack([-1, _num_channels]))
pix_l = tf.gather(im_flat, idx_l)
pix_r = tf.gather(im_flat, idx_r)
weight_l = tf.expand_dims(x1_f - x, 1)
weight_r = tf.expand_dims(x - x0_f, 1)
return weight_l * pix_l + weight_r * pix_r
def _transform(input_images, x_offset):
with tf.variable_scope('transform'):
# grid of (x_t, y_t, 1), eq (1) in ref [1]
x_t, y_t = tf.meshgrid(tf.linspace(0.0, _width_f - 1.0, _width),
tf.linspace(0.0 , _height_f - 1.0 , _height))
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
x_t_flat = tf.tile(x_t_flat, tf.stack([_num_batch, 1]))
y_t_flat = tf.tile(y_t_flat, tf.stack([_num_batch, 1]))
x_t_flat = tf.reshape(x_t_flat, [-1])
y_t_flat = tf.reshape(y_t_flat, [-1])
x_t_flat = x_t_flat + tf.reshape(x_offset, [-1]) * _width_f
input_transformed = _interpolate(input_images, x_t_flat, y_t_flat)
output = tf.reshape(
input_transformed, tf.stack([_num_batch, _height, _width, _num_channels]))
return output
with tf.variable_scope(name):
_num_batch = tf.shape(input_images)[0]
_height = tf.shape(input_images)[1]
_width = tf.shape(input_images)[2]
_num_channels = tf.shape(input_images)[3]
_height_f = tf.cast(_height, tf.float32)
_width_f = tf.cast(_width, tf.float32)
_wrap_mode = wrap_mode
output = _transform(input_images, x_offset)
return output
# def bilinear_sampler(x, v, resize=False, normalize=False, crop=None, out="CONSTANT"):
# """
# Args:
# x - Input tensor [N, H, W, C]
# v - Vector flow tensor [N, H, W, 2], tf.float32
# (optional)
# resize - Whether to resize v as same size as x
# normalize - Whether to normalize v from scale 1 to H (or W).
# h : [-1, 1] -> [-H/2, H/2]
# w : [-1, 1] -> [-W/2, W/2]
# crop - Setting the region to sample. 4-d list [h0, h1, w0, w1]
# out - Handling out of boundary value.
# Zero value is used if out="CONSTANT".
# Boundary values are used if out="EDGE".
# """
# def _get_grid_array(N, H, W, h, w):
# N_i = tf.range(N)
# H_i = tf.range(h+1, h+H+1)
# W_i = tf.range(w+1, w+W+1)
# n, h, w, = tf.meshgrid(N_i, H_i, W_i, indexing='ij')
# n = tf.expand_dims(n, axis=3) # [N, H, W, 1]
# h = tf.expand_dims(h, axis=3) # [N, H, W, 1]
# w = tf.expand_dims(w, axis=3) # [N, H, W, 1]
# n = tf.cast(n, tf.float32) # [N, H, W, 1]
# h = tf.cast(h, tf.float32) # [N, H, W, 1]
# w = tf.cast(w, tf.float32) # [N, H, W, 1]
# return n, h, w
# shape = tf.shape(x) # TRY : Dynamic shape
# N = shape[0]
# if crop is None:
# H_ = H = shape[1]
# W_ = W = shape[2]
# h = w = 0
# else :
# H_ = shape[1]
# W_ = shape[2]
# H = crop[1] - crop[0]
# W = crop[3] - crop[2]
# h = crop[0]
# w = crop[2]
# if resize:
# if callable(resize) :
# v = resize(v, [H, W])
# else :
# v = tf.image.resize_bilinear(v, [H, W])
# if out == "CONSTANT":
# x = tf.pad(x,
# ((0,0), (1,1), (1,1), (0,0)), mode='CONSTANT')
# elif out == "EDGE":
# x = tf.pad(x,
# ((0,0), (1,1), (1,1), (0,0)), mode='REFLECT')
# vy, vx = tf.split(v, 2, axis=3)
# if normalize :
# vy *= (H / 2)
# vx *= (W / 2)
# n, h, w = _get_grid_array(N, H, W, h, w) # [N, H, W, 3]
# vx0 = tf.floor(vx)
# vy0 = tf.floor(vy)
# vx1 = vx0 + 1
# vy1 = vy0 + 1 # [N, H, W, 1]
# H_1 = tf.cast(H_+1, tf.float32)
# W_1 = tf.cast(W_+1, tf.float32)
# iy0 = tf.clip_by_value(vy0 + h, 0., H_1)
# iy1 = tf.clip_by_value(vy1 + h, 0., H_1)
# ix0 = tf.clip_by_value(vx0 + w, 0., W_1)
# ix1 = tf.clip_by_value(vx1 + w, 0., W_1)
# i00 = tf.concat([n, iy0, ix0], 3)
# i01 = tf.concat([n, iy1, ix0], 3)
# i10 = tf.concat([n, iy0, ix1], 3)
# i11 = tf.concat([n, iy1, ix1], 3) # [N, H, W, 3]
# i00 = tf.cast(i00, tf.int32)
# i01 = tf.cast(i01, tf.int32)
# i10 = tf.cast(i10, tf.int32)
# i11 = tf.cast(i11, tf.int32)
# x00 = tf.gather_nd(x, i00)
# x01 = tf.gather_nd(x, i01)
# x10 = tf.gather_nd(x, i10)
# x11 = tf.gather_nd(x, i11)
# w00 = tf.cast((vx1 - vx) * (vy1 - vy), tf.float32)
# w01 = tf.cast((vx1 - vx) * (vy - vy0), tf.float32)
# w10 = tf.cast((vx - vx0) * (vy1 - vy), tf.float32)
# w11 = tf.cast((vx - vx0) * (vy - vy0), tf.float32)
# output = tf.add_n([w00*x00, w01*x01, w10*x10, w11*x11])
# return output
|
[
"zpfalpc23@gmail.com"
] |
zpfalpc23@gmail.com
|
677f06178ebfb81cb956127b976b669823dee70f
|
e4d33e7ca8a78cd63eda5df0fa8b705d8074954f
|
/strategy.py
|
3afa3ae97b8ed11be66431f85503ef4f9743cc19
|
[] |
no_license
|
henriquekfmaia/option-strategy-python
|
2b550118e610c09f3f319e11cacee095f8522f15
|
b27a0ada3c431552d9f9633b5859cbe6415f2a3c
|
refs/heads/master
| 2020-06-30T21:32:06.581209
| 2019-08-07T03:15:19
| 2019-08-07T03:15:19
| 200,957,117
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,324
|
py
|
import numpy as np
from option import Option
class Strategy:
def __init__(self, points = 100, ext_borders=0.05):
self.options = []
self.minimum = 0
self.maximum = 0
self.points = points
self.ext_borders = ext_borders
def add_option(self, side, option_type, strike, price):
option = Option(side, option_type, strike, price)
self.options.append(option)
self.set_min_max(option.minimum, option.maximum)
def set_min_max(self, new_minimum, new_maximum):
if self.minimum == 0:
self.minimum = new_minimum
minimum = min(self.minimum, new_minimum)
maximum = max(self.maximum, new_maximum)
if minimum != self.minimum or maximum != self.maximum:
self.minimum = minimum
self.maximum = maximum
for opt in self.options:
opt.set_min_max(self.minimum, self.maximum, self.ext_borders)
def get_strategy_returns(self, minimum, maximum, points):
strategy_returns = np.zeros(points)
for opt in self.options:
strategy_returns = strategy_returns + opt.get_result_full(minimum, maximum, points)
return strategy_returns
def get_strategy_returns_std(self):
return self.get_strategy_returns(self.minimum, self.maximum, self.points)
def get_market_prices_std(self):
return np.linspace(self.minimum, self.maximum, self.points)
|
[
"henrique.maia@poli.ufrj.br"
] |
henrique.maia@poli.ufrj.br
|
5e7458e42777618ee8bed3956b956c1cfa2981cd
|
31bcb70257fa8611a493e03d2c65b5c5c680a8c3
|
/Interpolation.py
|
fe1343e1afdde82187de89c3b33fe03396f4b6fa
|
[] |
no_license
|
Matthijs-utf8/Numerical_mathematics
|
51ad073e01cdc6fb7c395f7478caf98a90f2a464
|
1326dd9eaa1ea6797db6e7fe5fe765df0de1e68a
|
refs/heads/main
| 2023-02-27T02:03:38.100595
| 2021-01-26T11:14:34
| 2021-01-26T11:14:34
| 333,059,768
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,970
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 16 16:29:45 2020
@author: Matthijs Schrage
"""
import numpy as np
import sympy as sym
x = sym.Symbol("x")
#Some function to help us with syntax
def sin(x):
return sym.sin(x)
def cos(x):
return sym.cos(x)
def sqrt(x):
return sym.sqrt(x)
def linear(f, coordinates, target):
#Assert that we only interpolate bwteen two points
assert len(coordinates) == 2
#This is the formula (hard-coded) for linear interpolation
L_x = ( (target - coordinates[1]) / (coordinates[0] - coordinates[1]) ) * f(coordinates[0]) + ( (target - coordinates[0]) / (coordinates[1] - coordinates[0]) ) * f(coordinates[1])
print("Linear interpolation in " + str(target) + ": " + str(L_x) )
#This function is used in the langrangian interpolation to help make the code more compact and flexible
def helper(i):
# If len(coordinates) == 3:
i = i % 3
return i
def lagrangian(f, coordinates, target):
coordinates = sorted(coordinates)
L_n = 0
for i in range(len(coordinates)):
#For every coordinate, we calculate the lagrangian interpolation
L_i_n = ( ( (x - coordinates[helper(i+1)]) * (x - coordinates[helper(i+2)] ) ) / ( (coordinates[i] - coordinates[helper(i+1)]) * (coordinates[i] - coordinates[helper(i+2)]) ) ) * f(coordinates[i])
L_n += L_i_n
#It is not stricly necessary to make a function, but it is just more neat
L_n = sym.lambdify(x, L_n)
print("Lagrangian interpolation in " + str(target) + ": " + str(L_n(target) ) )
fx2 = sym.lambdify(x, x*(-1 + x**2))
# linear(fx2, [2,3], 2.5)
lagrangian(fx2, [-1, 0, 1], -0.5)
def lagr(coordinates, f_values, k, n):
assert len(coordinates) == len(f_values)
assert k in coordinates
coordinates.remove(k)
L_k_n = 1
for i in coordinates:
L_k_n *= (x - i) / (k - i)
print(L_k_n)
# lagr([0,1,2,3], [0.5*np.sqrt(2), np.e, np.pi, 2*np.sqrt(3)], 2, 3)
|
[
"noreply@github.com"
] |
noreply@github.com
|
eccf91200ca22006ec27e2a110af49ed35f9e3e8
|
556db265723b0cc30ad2917442ed6dad92fd9044
|
/tensorflow/python/kernel_tests/linalg/linear_operator_zeros_test.py
|
8ca4e0f796ff15070fe471e1bafd0e2de2eef998
|
[
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
graphcore/tensorflow
|
c1669b489be0e045b3ec856b311b3139858de196
|
085b20a4b6287eff8c0b792425d52422ab8cbab3
|
refs/heads/r2.6/sdk-release-3.2
| 2023-07-06T06:23:53.857743
| 2023-03-14T13:04:04
| 2023-03-14T13:48:43
| 162,717,602
| 84
| 17
|
Apache-2.0
| 2023-03-25T01:13:37
| 2018-12-21T13:30:38
|
C++
|
UTF-8
|
Python
| false
| false
| 8,721
|
py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables as variables_module
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.platform import test
rng = np.random.RandomState(2016)
@test_util.run_all_in_graph_and_eager_modes
class LinearOperatorZerosTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
@staticmethod
def skip_these_tests():
return [
"cholesky",
"cond",
"inverse",
"log_abs_det",
"solve",
"solve_with_broadcast"
]
@staticmethod
def operator_shapes_infos():
shapes_info = linear_operator_test_util.OperatorShapesInfo
return [
shapes_info((1, 1)),
shapes_info((1, 3, 3)),
shapes_info((3, 4, 4)),
shapes_info((2, 1, 4, 4))]
def operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
del ensure_self_adjoint_and_pd
del use_placeholder
shape = list(build_info.shape)
assert shape[-1] == shape[-2]
batch_shape = shape[:-2]
num_rows = shape[-1]
operator = linalg_lib.LinearOperatorZeros(
num_rows, batch_shape=batch_shape, dtype=dtype)
matrix = array_ops.zeros(shape=shape, dtype=dtype)
return operator, matrix
def test_assert_positive_definite(self):
operator = linalg_lib.LinearOperatorZeros(num_rows=2)
with self.assertRaisesOpError("non-positive definite"):
operator.assert_positive_definite()
def test_assert_non_singular(self):
with self.assertRaisesOpError("non-invertible"):
operator = linalg_lib.LinearOperatorZeros(num_rows=2)
operator.assert_non_singular()
def test_assert_self_adjoint(self):
with self.cached_session():
operator = linalg_lib.LinearOperatorZeros(num_rows=2)
self.evaluate(operator.assert_self_adjoint()) # Should not fail
def test_non_scalar_num_rows_raises_static(self):
with self.assertRaisesRegex(ValueError, "must be a 0-D Tensor"):
linalg_lib.LinearOperatorZeros(num_rows=[2])
with self.assertRaisesRegex(ValueError, "must be a 0-D Tensor"):
linalg_lib.LinearOperatorZeros(num_rows=2, num_columns=[2])
def test_non_integer_num_rows_raises_static(self):
with self.assertRaisesRegex(TypeError, "must be integer"):
linalg_lib.LinearOperatorZeros(num_rows=2.)
with self.assertRaisesRegex(TypeError, "must be integer"):
linalg_lib.LinearOperatorZeros(num_rows=2, num_columns=2.)
def test_negative_num_rows_raises_static(self):
with self.assertRaisesRegex(ValueError, "must be non-negative"):
linalg_lib.LinearOperatorZeros(num_rows=-2)
with self.assertRaisesRegex(ValueError, "must be non-negative"):
linalg_lib.LinearOperatorZeros(num_rows=2, num_columns=-2)
def test_non_1d_batch_shape_raises_static(self):
with self.assertRaisesRegex(ValueError, "must be a 1-D"):
linalg_lib.LinearOperatorZeros(num_rows=2, batch_shape=2)
def test_non_integer_batch_shape_raises_static(self):
with self.assertRaisesRegex(TypeError, "must be integer"):
linalg_lib.LinearOperatorZeros(num_rows=2, batch_shape=[2.])
def test_negative_batch_shape_raises_static(self):
with self.assertRaisesRegex(ValueError, "must be non-negative"):
linalg_lib.LinearOperatorZeros(num_rows=2, batch_shape=[-2])
def test_non_scalar_num_rows_raises_dynamic(self):
with self.cached_session():
num_rows = array_ops.placeholder_with_default([2], shape=None)
with self.assertRaisesError("must be a 0-D Tensor"):
operator = linalg_lib.LinearOperatorZeros(
num_rows, assert_proper_shapes=True)
self.evaluate(operator.to_dense())
def test_negative_num_rows_raises_dynamic(self):
with self.cached_session():
n = array_ops.placeholder_with_default(-2, shape=None)
with self.assertRaisesError("must be non-negative"):
operator = linalg_lib.LinearOperatorZeros(
num_rows=n, assert_proper_shapes=True)
self.evaluate(operator.to_dense())
def test_non_1d_batch_shape_raises_dynamic(self):
with self.cached_session():
batch_shape = array_ops.placeholder_with_default(2, shape=None)
with self.assertRaisesError("must be a 1-D"):
operator = linalg_lib.LinearOperatorZeros(
num_rows=2, batch_shape=batch_shape, assert_proper_shapes=True)
self.evaluate(operator.to_dense())
def test_negative_batch_shape_raises_dynamic(self):
with self.cached_session():
batch_shape = array_ops.placeholder_with_default([-2], shape=None)
with self.assertRaisesError("must be non-negative"):
operator = linalg_lib.LinearOperatorZeros(
num_rows=2, batch_shape=batch_shape, assert_proper_shapes=True)
self.evaluate(operator.to_dense())
def test_wrong_matrix_dimensions_raises_static(self):
operator = linalg_lib.LinearOperatorZeros(num_rows=2)
x = rng.randn(3, 3).astype(np.float32)
with self.assertRaisesRegex(ValueError, "Dimensions.*not compatible"):
operator.matmul(x)
def test_wrong_matrix_dimensions_raises_dynamic(self):
num_rows = array_ops.placeholder_with_default(2, shape=None)
x = array_ops.placeholder_with_default(rng.rand(3, 3), shape=None)
with self.cached_session():
with self.assertRaisesError("Dimensions.*not.compatible"):
operator = linalg_lib.LinearOperatorZeros(
num_rows, assert_proper_shapes=True, dtype=dtypes.float64)
self.evaluate(operator.matmul(x))
def test_is_x_flags(self):
# The is_x flags are by default all True.
operator = linalg_lib.LinearOperatorZeros(num_rows=2)
self.assertFalse(operator.is_positive_definite)
self.assertFalse(operator.is_non_singular)
self.assertTrue(operator.is_self_adjoint)
def test_zeros_matmul(self):
operator1 = linalg_lib.LinearOperatorIdentity(num_rows=2)
operator2 = linalg_lib.LinearOperatorZeros(num_rows=2)
self.assertTrue(isinstance(
operator1.matmul(operator2),
linalg_lib.LinearOperatorZeros))
self.assertTrue(isinstance(
operator2.matmul(operator1),
linalg_lib.LinearOperatorZeros))
def test_ref_type_shape_args_raises(self):
with self.assertRaisesRegex(TypeError, "num_rows.cannot.be.reference"):
linalg_lib.LinearOperatorZeros(num_rows=variables_module.Variable(2))
with self.assertRaisesRegex(TypeError, "num_columns.cannot.be.reference"):
linalg_lib.LinearOperatorZeros(
num_rows=2, num_columns=variables_module.Variable(3))
with self.assertRaisesRegex(TypeError, "batch_shape.cannot.be.reference"):
linalg_lib.LinearOperatorZeros(
num_rows=2, batch_shape=variables_module.Variable([2]))
@test_util.run_all_in_graph_and_eager_modes
class LinearOperatorZerosNotSquareTest(
linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest):
def operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
del use_placeholder
del ensure_self_adjoint_and_pd
shape = list(build_info.shape)
batch_shape = shape[:-2]
num_rows = shape[-2]
num_columns = shape[-1]
operator = linalg_lib.LinearOperatorZeros(
num_rows, num_columns, is_square=False, is_self_adjoint=False,
batch_shape=batch_shape, dtype=dtype)
matrix = array_ops.zeros(shape=shape, dtype=dtype)
return operator, matrix
if __name__ == "__main__":
linear_operator_test_util.add_tests(LinearOperatorZerosTest)
linear_operator_test_util.add_tests(LinearOperatorZerosNotSquareTest)
test.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
b1a7480c58922ee6eaf4efd5d692c12e94936bcc
|
15114b8f23681403d052063ba6d9362ed628ce57
|
/HexToDecimal.py
|
937d4e031dbd26dbe71803f41ab94ee01dbabb77
|
[] |
no_license
|
BenMiller3/HexToDecimal
|
37aac3317ee3ab2c8dd28b4a3bb5bb4cd0a8402d
|
ccf73957da5fe240397cec11f396734893f3713e
|
refs/heads/master
| 2016-09-01T07:42:35.191098
| 2016-03-31T20:23:31
| 2016-03-31T20:23:31
| 48,472,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,521
|
py
|
def hexToDecimal(num): #Converts a hex number (base 16) inputted as a string to it's decimal (base 10) counter part
result = 0 #Declaring the result variable
num = str(num) #Parses the user input to a string
ans = [0]*len(num)
for i in range(len(num)): #Checks all the cases (digits 0-9 and a-f)
if(num[i]==('0')): # There are no case and switch statements in Python so the if-else method suffices
ans[i] = 0 # For 0 it is just equal to 0 (no change)
elif(num[i]==('1')):
x = 2 ** (4*(len(num)-i-1)) # a '1' is represented as a 2 power (incremented by spot on string, and place within string)
ans[i] = x # Sends the final answer to the array ans[] which contains the final result
elif(num[i]==('2')):
x = 2 ** (4*(len(num)-i-1) + 1)
ans[i] = x
elif(num[i]==('3')):
x = (2 ** (4*(len(num)-i-1) + 1) + 2 ** (4*(len(num)-i-1)))
ans[i] = x
elif(num[i]==('4')):
x = 2 ** (4*(len(num)-i-1) + 2)
ans[i] = x
elif(num[i]==('5')):
x = 2 ** (4*(len(num)-i-1) + 2) + 2 ** (4*(len(num)-i-1))
ans[i] = x
elif(num[i]==('6')):
x = 2 ** (4*(len(num)-i-1) + 2) + 2 ** (4*(len(num)-i-1) + 1)
ans[i] = x
elif(num[i]==('7')):
x = 2 ** (4*(len(num)-i-1) + 2) + 2 ** (4*(len(num)-i-1) + 1) + 2 ** (4*(len(num)-i-1))
ans[i] = x
elif(num[i]==('8')):
x = 2 ** (4*(len(num)-i-1) + 3)
ans[i] = x
elif(num[i]==('9')):
x = 2 ** (4*(len(num)-i-1) + 3) + 2 ** (4*(len(num)-i-1))
ans[i] = x
elif(num[i]==('a')):
x = 2 ** (4*(len(num)-i-1) + 3) + 2 ** (4*(len(num)-i-1) + 1)
ans[i] = x
elif(num[i]==('b')):
x = 2 ** (4*(len(num)-i-1) + 3) + 2 ** (4*(len(num)-i-1) + 1) + 2 ** (4*(len(num)-i-1))
ans[i] = x
elif(num[i]==('c')):
x = 2 ** (4*(len(num)-i-1) + 3) + 2 ** (4*(len(num)-i-1) + 2)
ans[i] = x
elif(num[i]==('d')):
x = 2 ** (4*(len(num)-i-1) + 3) + 2 ** (4*(len(num)-i-1) + 2) + 2 ** (4*(len(num)-i-1))
ans[i] = x
elif(num[i]==('e')):
x = 2 ** (4*(len(num)-i-1) + 3) + 2 ** (4*(len(num)-i-1) + 2) + 2 ** (4*(len(num)-i-1) + 1)
ans[i] = x
elif(num[i]==('f')):
x = 2 ** (4*(len(num)-i-1) + 3) + 2 ** (4*(len(num)-i-1) + 2) + 2 ** (4*(len(num)-i-1) + 1) + 2 ** (4*(len(num)-i-1))
ans[i] = x
for i in range(len(num)): #loop through the numbers and append them to result
result = ans[i] + result
return result # Returns the decimal value of the original hex value as an integer (can be parsed)
def main(): # used for testing
run = True
print(" --- Hex to Decimal Converter ---\nType * at any time to quit")
while(run==True):
num = input("\nPlease enter a hex number to be changed to decimal (lowercase): ")
if(num=="*"):
run = False
else:
ans = hexToDecimal(num)
print("\n",num, "as a decimal is: ",ans)
main() # Run the test cases
#-----------------------------------------------------------
def hexToDecimal_2(num):
return int(num,16)
#----------------------------------------------------------
|
[
"benjaminmiller229@gmail.com"
] |
benjaminmiller229@gmail.com
|
e81b437caf42cb4d1816c04793e4a923ab50b9ed
|
3aa891f77e14989bf4034cff2eb55ced5876f8c8
|
/PythonProject/apps/Book/apps.py
|
93f038b480bc135ea4bdfee6e803fe65ac332e9d
|
[] |
no_license
|
shuangyulin/python_django_book
|
ca7a8ee587539f5ac4c4bc2d6bd3ef2b7b8868cd
|
644e1a602fd840149a6c4c6cdea7ef5196f19b59
|
refs/heads/master
| 2023-05-10T19:43:02.631043
| 2023-05-10T05:51:00
| 2023-05-10T05:51:00
| 217,252,803
| 28
| 12
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 93
|
py
|
from django.apps import AppConfig
class BookConfig(AppConfig):
name = 'apps.Book'
|
[
"254540457@qq.com"
] |
254540457@qq.com
|
ecb448a0548acc782921c78e0ddea60c1de6b703
|
b62ab5b2e27a116ca03c8f7fd0291bf9d48310e6
|
/gui_v3/GUI_publisher_ver3_2.py
|
9cfa9026720aaf9bb5b2ec10848d4e55947ef680
|
[] |
no_license
|
ycpiglet/AutonomousDrivingRobot
|
d6b5e0165b26ead6859dc8daf8ad22ff73319450
|
7e84329228a94f0609e9fb0cdb48e473bcc9fd54
|
refs/heads/main
| 2023-07-13T09:34:48.444305
| 2021-08-19T05:44:43
| 2021-08-19T05:44:43
| 370,374,216
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,863
|
py
|
#! /usr/bin/env python3
# -*- coding:utf-8 -*-
from std_msgs.msg import Int32
from std_msgs.msg import String
import rospy
import time
import random
import tkinter
from tkinter import messagebox
from tkinter.constants import S
import tkinter.font
from PIL import ImageTk, Image
#---------------------------------------------- ROS Pulbisher Topic ----------------------------------------------
class ROS_topic():
def __init__(self):
rospy.init_node("GUI_Publisher")
self.count = 0
self.msg = ''
def advertise(self):
self.pub1 = rospy.Publisher('message', String, queue_size=20)
self.pub2 = rospy.Publisher('counter', Int32, queue_size=10)
self.rate = rospy.Rate(2)
self.count += 1
'''
while not rospy.is_shutdown():
self.pub1 = rospy.Publisher('counter', Int32, queue_size=10)
self.rate = rospy.Rate(2)
self.pub1.publish(self.msg)
'''
print(" Start Node ")
#---------------------------------------------- ROS MASTER GUI ----------------------------------------------
class ROS_GUI():
# Interrupt
def key_input(self, event):
key = event.keysym
if key == 'space':
print("Pressed Space")
if key == 'Escape':
self.led_off()
self.buzzer_off()
self.camera_off()
self.rqt_off()
self.termPublisher()
print("Reset")
def destroy(self, event):
self.window.destroy()
# Jetson Nano
def led_on(self):
topic.pub1.publish('led on')
self.btnLedOff['state'] = 'active'
print('LED ON')
def led_off(self):
topic.pub1.publish('led off')
self.btnLedOff['state'] = 'disabled'
self.btnLedOn['state'] = 'active'
print('LED OFF')
def buzzer_on(self):
topic.pub1.publish('buzzer on')
self.btnBuzOn['state'] = 'disabled'
self.btnBuzOff['state'] = 'active'
print('Buzzer ON')
def buzzer_off(self):
topic.pub1.publish('buzzer off')
self.btnBuzOff['state'] = 'disabled'
self.btnBuzOn['state'] = 'active'
print('Buzzer OFF')
def camera_on(self):
topic.pub1.publish('camera on')
self.btnCamOn['state'] = 'disabled'
self.btnCamOff['state'] = 'active'
print('Camera ON')
def camera_off(self):
topic.pub1.publish('camera off')
self.btnCamOff['state'] = 'disabled'
self.btnCamOn['state'] = 'active'
print('Camera OFF')
def rqt_on(self):
topic.pub1.publish('rqt on')
self.btnRqtOn['state'] = 'disabled'
self.btnRqtOff['state'] = 'active'
print('RQT ON')
def rqt_off(self):
topic.pub1.publish('rqt off')
self.btnRqtOff['state'] = 'disabled'
self.btnRqtOn['state'] = 'active'
print('RQT OFF')
def chkState(self):
if self.state == False:
self.btnLedOn['state']='disabled'
self.btnBuzOn['state']='disabled'
self.btnCamOn['state']='disabled'
self.btnRqtOn['state']='disabled'
else:
self.btnLedOn['state']='active'
self.btnBuzOn['state']='active'
self.btnCamOn['state']='active'
self.btnRqtOn['state']='active'
def alt_on(self): # 경고신호
self.label_face2['text'] = '1'
self.window.configure(bg='red')
self.label['bg'] = 'red'
self.label1['bg'] = 'red'
self.label_face1['bg'] = 'red'
self.label_face2['bg'] = 'red'
self.btnAltOn['state'] = 'disabled'
self.btnAltOff['state'] = 'active'
print('Alert ON')
def alt_off(self):
self.label_face2['text'] = '0'
self.window.configure(bg='skyblue')
self.label['bg'] = 'skyblue'
self.label1['bg'] = 'skyblue'
self.label_face1['bg'] = 'skyblue'
self.label_face2['bg'] = 'skyblue'
self.btnAltOn['state'] = 'active'
self.btnAltOff['state'] = 'disabled'
print('Alert ON')
# ROS Node
def initPublisher(self):
topic.advertise()
self.state = True
self.chkState()
self.btnInitPub['state'] = 'disabled'
self.btnTermPub['state'] = 'active'
print('Initiate Publisher')
def termPublisher(self):
self.led_off()
self.buzzer_off()
self.camera_off()
self.rqt_off()
topic.pub1.unregister()
topic.pub2.unregister()
self.state = False
self.chkState()
self.btnInitPub['state'] = 'active'
self.btnTermPub['state'] = 'disabled'
print('Terminate Publisher')
def initSubscriber(self):
print('Initiate Subscriber')
def termSubscriber(self):
print('Terminate Publisher')
# Button Control
def pushed_buzzer_on(self):
self.buzzer_on()
def pushed_buzzer_off(self):
self.buzzer_off()
def pushed_led_on(self):
self.led_on()
def pushed_led_off(self):
self.led_off()
def pushed_btnInitPub(self):
self.initPublisher()
def pushed_btnTermPub(self):
msg_term = messagebox.askyesno(title="Warning", message="Do you want to terminate the Publisher?")
if msg_term == True:
self.termPublisher()
else:
pass
def pushed_CamOn(self):
self.camera_on()
def pushed_CamOff(self):
self.camera_off()
def pushed_RqtOn(self):
self.rqt_on()
def pushed_RqtOff(self):
self.rqt_off()
def pushed_AltOn(self):
self.alt_on()
def pushed_AltOff(self):
self.alt_off()
# GUI Setup
def setupWindow(self):
self.window = tkinter.Tk()
self.window.title("ROS GUI Ver.3.2")
self.window.geometry("600x800")
self.window.configure(bg='skyblue')
self.window.resizable(False, False)
def setupCanvas(self):
self.canvas = tkinter.Canvas(width=600, height=200, bg='white')
self.canvas.pack()
def setupFont(self):
self.fontStyle1 = tkinter.font.Font(self.window, size=24, weight='bold', family='Consoles')
self.fontStyle2 = tkinter.font.Font(self.window, size=12, weight='bold', family='Consoles')
self.fontStyle3 = tkinter.font.Font(self.window, size=12, weight='normal', family='Consoles')
def setupLabel(self):
# self.label = tkinter.Label(text="ROS GUI for Topic", font=self.fontStyle1, bg='skyblue')
# self.label.place(x=150, y=600)
self.label = tkinter.Label(self.frameBlank3, text="ROS GUI for Topic", font=self.fontStyle1, bg='skyblue')
self.label1 = tkinter.Label(self.frameBlank1, text="Message Control", font=self.fontStyle1, bg='skyblue')
self.label_face1 = tkinter.Label(self.frameBlank4, text="Face Detection : ", font=self.fontStyle2, bg='skyblue')
self.label_face2 = tkinter.Label(self.frameBlank4, text="0", font=self.fontStyle2, bg='skyblue')
self.label.pack()
self.label1.pack()
self.label_face1.pack(side='left')
self.label_face2.pack(side='left')
def setupFrame(self):
self.frame = tkinter.Frame(self.window)
self.frame1 = tkinter.Frame(self.window)
self.frame2 = tkinter.Frame(self.window)
self.frame3 = tkinter.Frame(self.window)
self.frame4 = tkinter.Frame(self.window)
self.frame5 = tkinter.Frame(self.window)
self.frame6 = tkinter.Frame(self.window)
self.frameBlank1 = tkinter.Frame(self.canvas, height=30)
self.frameBlank2 = tkinter.Frame(self.window, height=30)
self.frameBlank3 = tkinter.Frame(self.window, height=30)
self.frameBlank4 = tkinter.Frame(self.window, height=30)
self.frameBlank1.pack()
self.frame.pack()
self.frameBlank2.pack()
self.frame1.pack()
self.frame2.pack()
self.frame3.pack()
self.frame4.pack()
self.frame5.pack()
self.frameBlank3.pack()
self.frame6.pack()
self.frameBlank4.pack()
def setupText(self):
self.text = tkinter.Text(self.frame, width=52, height=5)
self.text.configure(font=self.fontStyle3)
self.text.pack()
def setupButton(self):
# frame
self.btnRead = tkinter.Button(self.frame, width=20, height=2, text="Read", command=self.getTextInput, font=self.fontStyle2, state='active')
self.btnErase = tkinter.Button(self.frame, width=20, height=2, text="Erase", command=self.eraseTextInput, font=self.fontStyle2, state='disabled')
self.btnRead.pack(side='left')
self.btnErase.pack(side='left')
# frame1
self.btnInitPub = tkinter.Button(self.frame1, width=20, height=2, text='Initiate Publisher', command=self.pushed_btnInitPub, font=self.fontStyle2, state='active')
self.btnTermPub = tkinter.Button(self.frame1, width=20, height=2, text='Terminate Publisher', command=self.pushed_btnTermPub, font=self.fontStyle2, state='disabled')
self.btnInitPub.pack(side='left')
self.btnTermPub.pack(side='left')
# frame2
self.btnLedOn = tkinter.Button(self.frame2, width=20, height=2, text='LED ON', command=self.pushed_led_on, font=self.fontStyle2, state='disabled')
self.btnLedOff = tkinter.Button(self.frame2, width=20, height=2, text='LED OFF', command=self.pushed_led_off, font=self.fontStyle2, state='disabled')
self.btnLedOn.pack(side='left')
self.btnLedOff.pack(side='left')
# frame3
self.btnBuzOn = tkinter.Button(self.frame3, width=20, height=2, text='Buzzer ON', command=self.pushed_buzzer_on, font=self.fontStyle2, state='disabled')
self.btnBuzOff = tkinter.Button(self.frame3, width=20, height=2, text='Buzzer OFF', command=self.pushed_buzzer_off, font=self.fontStyle2, state='disabled')
self.btnBuzOn.pack(side='left')
self.btnBuzOff.pack(side='left')
# frame4
self.btnCamOn = tkinter.Button(self.frame4, width = 20, height=2, text='Camera On', command=self.pushed_CamOn, font=self.fontStyle2, state='disabled')
self.btnCamOff = tkinter.Button(self.frame4, width = 20, height=2, text='Camera Off', command=self.pushed_CamOff, font=self.fontStyle2, state='disabled')
self.btnCamOn.pack(side='left')
self.btnCamOff.pack(side='left')
# frame5
self.btnRqtOn = tkinter.Button(self.frame5, width = 20, height=2, text='RQT Grpah On', command=self.pushed_RqtOn, font=self.fontStyle2, state='disabled')
self.btnRqtOff = tkinter.Button(self.frame5, width = 20, height=2, text='RQT Graph Off', command=self.pushed_RqtOff, font=self.fontStyle2, state='disabled')
self.btnRqtOn.pack(side='left')
self.btnRqtOff.pack(side='left')
# frame5
self.btnAltOn = tkinter.Button(self.frame6, width = 20, height=2, text='Alert On', command=self.pushed_AltOn, font=self.fontStyle2, state='active')
self.btnAltOff = tkinter.Button(self.frame6, width = 20, height=2, text='Alert Off', command=self.pushed_AltOff, font=self.fontStyle2, state='disabled')
self.btnAltOn.pack(side='left')
self.btnAltOff.pack(side='left')
def setupMenuBar(self):
# Main Frameu
self.menubar = tkinter.Menu(self.window)
# File
self.filemenu = tkinter.Menu(self.menubar, tearoff=0)
self.menubar.add_cascade(label="File", menu=self.filemenu)
self.filemenu.add_command(label="Open")
self.filemenu.add_command(label="Save")
self.filemenu.add_command(label="Exit")
# Info
self.info = tkinter.Menu(self.menubar, tearoff=0)
self.menubar.add_cascade(label="Info", menu=self.info)
self.info.add_command(label="Who", command=self.msg_who)
self.info.add_command(label='Version', command=self.msg_version)
def msg_who(self):
messagebox.showinfo(title="Inventor", message=" 한국산업기술대학교 \n 메카트로닉스공학과 \n 정윤철")
def msg_version(self):
messagebox.showinfo(title="Version", message=" ROS GUI Version 3.2 ")
def getTextInput(self):
self.input = self.text.get("1.0", 'end-1c')
if self.btnTermPub['state'] == 'disabled':
messagebox.showwarning("Messaage Control", " Publiser Doesn't Work!! ")
else:
topic.pub1.publish(self.input)
if self.input == '':
messagebox.showwarning("Messaage Control", " Write Anyting!! ")
self.btnErase['state'] = 'disabled'
else:
self.btnErase['state'] = 'active'
print(self.input)
def eraseTextInput(self):
self.text.delete("1.0", 'end-1c')
self.btnErase['state'] = 'disabled'
# main
def main(self):
# self.window.after(300,self.main)
print(2)
time.sleep(1)
# Class Constructor
def __init__(self):
self.state = False
# Setup GUI
self.setupWindow()
self.setupCanvas()
self.setupFrame()
self.setupFont()
self.setupLabel()
self.setupText()
self.setupButton()
self.setupMenuBar()
# Flag
self.flag_buzzer = False
self.flag_led = False
self.flag_publisher = False
self.flag_subscriber = False
self.window.config(menu=self.menubar)
self.window.bind('<Key>',self.key_input)
self.window.bind('<Control-c>', self.destroy)
self.window.mainloop()
#---------------------------------------------- main ----------------------------------------------
# topic = ROS_topic()
gui = ROS_GUI()
|
[
"noreply@github.com"
] |
noreply@github.com
|
d440f68b24a3ef7d4b340593ae6df74196eef663
|
85af513539b7921dd74077cc564ed850a2ca0d32
|
/sstb_backup.py
|
d098cef41ab2108b2f7b9316404058a14f562bab
|
[] |
no_license
|
yihnanc/cloud_infra_hw3
|
f79315c1438c7d20cf8e2740b2f0f6fbfff6aa2f
|
cfa68e872a71f9133bb31cc65c228f28a4d2e79e
|
refs/heads/master
| 2021-08-19T23:43:47.344873
| 2017-11-27T17:56:36
| 2017-11-27T17:56:36
| 112,129,624
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,488
|
py
|
from memtb import Memtable
from bloom_filter import BloomFilter
import csv
import os
table_size = 10;
class SSTable:
def __init__(self, memtable, level, id):
if not isinstance(memtable, Memtable):
print("Should use memtable to initialize the sstable")
self.mem = None
else:
self.mem = memtable
self.bloom = BloomFilter()
self.data_name = 'data_' + str(level) + '_' + str(id) + '.csv'
self.index_name = 'index_' + str(level) + '_' + str(id) + '.csv'
self.file = open(self.data_name, 'w+')
self.index = open(self.index_name, 'w+')
self.dataid = {}
self.size = memtable.size()
self.min = ""
self.max = ""
length = 0
length_key = ""
#print("fucking length:%d" %(memtable.size()))
header = []
for key, values in self.mem:
tmp = values.keys()
if len(tmp) > length:
length = len(tmp)
header = list(tmp);
length_key = key
#record the place of each column
count = 0
for val in header:
self.dataid[val] = count
count = count + 1
cursor = csv.writer(self.file)
index_cursor = csv.writer(self.index)
#Write csv header
cursor.writerow(header)
for key, values in self.mem:
lst = [""] * length
self.bloom.add(key)
# put the data to the corresponding column
for kv in values.keys():
lst[self.dataid[kv]] = values[kv]
#Record the offset of data block
file_pos = []
file_pos.append(key)
file_pos.append(str(self.file.tell()))
if self.min == "":
self.min = key
self.max = key
else:
if (key < self.min):
self.min = key
elif (key > self.max):
self.max = key
index_cursor.writerow(file_pos)
cursor.writerow(lst)
self.file.close()
self.index.close()
test = open(self.data_name, 'r')
def merge(self, merge_table):
#print("min:%s, max:%s" %(self.min, self.max))
set1 = set()
with open(merge_table.index_name, 'r') as file:
lines = [line.strip() for line in file]
merge_file = open(self.data_name, 'r+')
append = []
for line in lines:
#Get the file offset of each key
key = line.split(',')[0]
updated_idx = int(line.split(',')[1])
if (key in self.bloom):
set1.add(key)
input_file = open(merge_table.data_name, 'r')
#label name of merged file
label = input_file.readline()
label_arr = label.rstrip().split(',')
lst = [""] * len(label_arr)
input_file.seek(updated_idx)
updated_value = input_file.readline()
updated_arr = updated_value.rstrip().split(',')
#put the merged data into corresponding label(the place of the label of two sstable may be different)
for i in range(len(label_arr)):
lst[self.dataid[label_arr[i]]] = updated_arr[i]
idx = self.findIdx(self.index, key)
merge_file.seek(idx)
print(key)
csvwt = csv.writer(merge_file)
csvwt.writerow(lst)
input_file.close()
return set1
def update(self, allset):
keyset = []
valueset = []
for key, values in self.mem:
if (keys not in allset):
keyset.add(key)
valueset.add(values)
return keyset, valueset
def append(self, keyset, valueset):
file = open(self.data_name, 'r')
length = len(file.readline().split(','))
file.close()
for i in range(len(valueset)):
for ky in valueset[i].keys():
if (self.dataid.get(ky) == None):
return False
for i in range(len(keyset)):
lst = [""] * length
#column name is valueset
for ky in valueset[i].keys():
lst[self.dataid[ky]] = valueset[i][ky]
file = open(self.data_name, 'a')
index = open(self.index_name, 'a')
pos = file.tell()
data_csvwt = csv.writer(file)
data_csvwt.writerow(lst)
#Write index
index_row = []
index_row.append(keyset[i])
index_row.append(str(pos))
index_csvwt = csv.writer(index)
index_csvwt.writerow(index_row)
file.close()
index.close()
self.size = self.size + len(keyset)
return True
def findIdx(self, file, key):
fd = open(self.index_name, 'r')
lst = [line.rstrip('\n') for line in fd]
start = 0
end = len(lst) - 1
while (start <= end):
mid = start + int((end - start) / 2)
arr = lst[mid].split(',')
if (arr[0] == key):
fd.close()
return int(arr[1])
elif (arr[0] < key):
start = mid + 1
else:
end = mid - 1
def getKeyRangeg(self):
return self.min, self.max
|
[
"MAGICMCGRADY87@gmail.com"
] |
MAGICMCGRADY87@gmail.com
|
2c322c9fd589e186cb7cc803214a1a15220fc0af
|
3598371233813189cdc39cbb515a72b4f01c01a3
|
/www/pymonitor.py
|
e35cb2c1b2a8949c8848d7949448a29c5398bf57
|
[] |
no_license
|
jsqwe5656/awesome-py3-webapp
|
dbbd49cf13046dd1e26276cad1097e9e05c61b31
|
53a6ab2e5245f1da63844e6985883c2cedcfe954
|
refs/heads/master
| 2021-01-18T02:01:23.585714
| 2017-04-01T09:52:11
| 2017-04-01T09:52:11
| 84,265,338
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,727
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os,sys,time,subprocess
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
def log(s):
print('[Monitor] %s' %s)
class MyFileSystemEventHander(FileSystemEventHandler):
def __init__(self, fn):
super(MyFileSystemEventHander, self).__init__()
self.restart = fn
def on_any_event(self, event):
if event.src_path.endswith('.py'):
log('Python source file changed: %s' % event.src_path)
self.restart()
command = ['echo', 'ok']
process = None
def kill_process():
global process
if process:
log('Kill process [%s]...' % process.pid)
process.kill()
process.wait()
log('Process ended with code %s.' % process.returncode)
process = None
def start_process():
global process, command
log('Start process %s...' % ' '.join(command))
process = subprocess.Popen(command, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr)
def restart_process():
kill_process()
start_process()
def start_watch(path, callback):
observer = Observer()
observer.schedule(MyFileSystemEventHander(restart_process), path, recursive=True)
observer.start()
log('Watching directory %s...' % path)
start_process()
try:
while True:
time.sleep(0.5)
except KeyboardInterrupt:
observer.stop()
observer.join()
if __name__ == '__main__':
argv = sys.argv[1:]
if not argv:
print('Usage: ./pymonitor your-script.py')
exit(0)
if argv[0] != 'python3':
argv.insert(0, 'python3')
command = argv
path = os.path.abspath('.')
start_watch(path, None)
|
[
"516845590@qq.com"
] |
516845590@qq.com
|
bf2df9013b94ee7ca80c35660b101bf47f905569
|
bd4f8320118c4fb25b95d29193c1adb2f5b55ec6
|
/contrib/userproperty_lint.py
|
7d99b16806929b36131ad944ccb545cac48d4c45
|
[
"Apache-2.0"
] |
permissive
|
Khan/khan-linter
|
30229d57ec82466af54b539eb3a57770335e0d65
|
9222e8f8c9aa6dead5c434d1eb7bb326207ed989
|
refs/heads/master
| 2023-07-21T05:06:19.757797
| 2022-07-11T16:54:42
| 2022-07-11T16:54:42
| 4,628,579
| 26
| 8
|
Apache-2.0
| 2023-09-06T21:29:52
| 2012-06-11T18:29:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,639
|
py
|
"""Linter that warns about using the dangerous UserProperty.
UserProperty's user_id value can change depending on whether or not Google
currently has a Google account registered w/ an email address that matches
UserProperty's email property. That means when a user changes email settings
in their Google account it can change the behavior of our queries. We don't
want that.
"""
from __future__ import absolute_import
import re
from shared.testutil import lintutil
# This captures any use of UserProperty on a db or ndb model. It will not
# capture subclasses of UserProperty, but we don't expect any of those to be
# around.
_USERPROPERTY_RE = re.compile(r'\bn?db\.UserProperty\(', re.DOTALL)
def lint_no_user_property(files_to_lint):
"""Enforce that nobody uses UserProperty.
...unless marked as an explicitly approved legacy usage via @Nolint.
"""
files_to_lint = lintutil.filter(files_to_lint, suffix='.py')
for filename in files_to_lint:
contents = lintutil.file_contents(filename)
for fn_match in _USERPROPERTY_RE.finditer(contents):
# Make sure there's no @Nolint anywhere around this function.
newline = contents.find('\n', fn_match.end())
newline = newline if newline > -1 else len(contents)
if '@Nolint' in contents[fn_match.start():newline]:
continue
linenum = 1 + contents.count('\n', 0, fn_match.start())
yield (filename, linenum, # filename and linenum
"Do not use UserProperty, it is not safe. Use UserData's "
"key as its foreign key, instead.")
|
[
"csilvers@khanacademy.org"
] |
csilvers@khanacademy.org
|
424491dd7fa29498ae77a918b8624d70331175eb
|
7aefdb54f5d1c76984a7e4b8e112c72a29cb1d0e
|
/webcam_demo.py
|
fbe18048f2093c824aa922b42fee21b5a0d04d3b
|
[
"MIT"
] |
permissive
|
AksultanMukhanbet/proctoring_intellectual_part
|
fe181d697f2d44b108304d523c3336f7873d2ac3
|
f85db9d31025cb57a732f64ab22358651bc93c69
|
refs/heads/master
| 2023-07-17T07:23:55.490868
| 2021-09-01T04:54:35
| 2021-09-01T04:54:35
| 401,934,395
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,320
|
py
|
import torch
import cv2
import time
import argparse
import posenet
import requests
import datetime as dt
import mysql.connector as msql
mydb = msql.connect(host="192.168.12.3", user="netuser", passwd='987', database='proctoring')
mycursor = mydb.cursor()
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, default=0)
args = parser.parse_args()
def frame_skip(cap, n_skip):
for _ in range(n_skip):
cap.grab()
def insert_db(video_path, log_type, video_time):
barcode, quiz_id = video_path.split("\\")[-1][:7], video_path.split("\\")[-1][7:12]
currentTime = dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
sql = "INSERT INTO `video_logs` (`datetime`, `video_name`, `basr_code`, `quiz_id`, `log_type`, `log_time`) VALUES (%s,%s,%s,%s,%s,%s);"
val = (str(currentTime), str(video_path), int(barcode), int(quiz_id), str(log_type), str(video_time))
mycursor.execute(sql, val)
mydb.commit()
def post_request_json_data(video_path, log_type, video_time):
barcode, quiz_id = video_path.split("\\")[-1][:7], video_path.split("\\")[-1][7:12]
json_data = {"barcode": int(barcode), "quiz_id": int(quiz_id), "suspicious_type": int(log_type), "video_ref": str(video_path.split("\\")[-1]), "time": int(video_time)}
r = requests.post('http://192.168.12.16/proctoringadmin/api/Suspicious/insert_video_action', json=json_data)
def main(video_path):
try:
__ = (int(video_path.split("\\")[-1][:7]), int(video_path.split("\\")[-1][7:12]))
model = posenet.load_model(50)
model = model.cuda()
output_stride = model.output_stride
cap = cv2.VideoCapture(video_path)
cap.set(3, 640)
cap.set(4, 480)
video_fps = cap.get(cv2.CAP_PROP_FPS)
video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
timer = 0
n_skip = 30
frame_count = 0
start = time.time()
prev_frame_time = 0
new_frame_time = 0
no_people = 0
many_people = 0
turn_head = 0
video_time = ''
video_type = ''
while video_length > timer:
frame_skip(cap, n_skip)
input_image, display_image, output_scale = posenet.read_cap(
cap, scale_factor=0.4, output_stride=output_stride)
with torch.no_grad():
input_image = torch.Tensor(input_image).cuda()
heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = model(input_image)
pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multiple_poses(
heatmaps_result.squeeze(0),
offsets_result.squeeze(0),
displacement_fwd_result.squeeze(0),
displacement_bwd_result.squeeze(0),
output_stride=output_stride,
max_pose_detections=2,
min_pose_score=0.15)
keypoint_coords *= output_scale
# TODO this isn't particularly fast, use GL for drawing and display someday...
overlay_image = posenet.draw_skel_and_kp(
display_image, pose_scores, keypoint_scores, keypoint_coords,
min_pose_score=0.15, min_part_score=0.1)
try:
if keypoint_scores[:][0:3].mean() == 0 and no_people == 0:
second = timer/video_fps
if second < 20:
pass
else:
print(round(second//60), ':', round(second%60), 'No people')
video_time = str(round(second//3600)) + ':' + str(round(second//60)%60) + ':' + str(round(second%60))
no_people = 1
post_request_json_data(video_path, 11, second)
elif keypoint_scores[:][0:3].mean() > 0. and ((keypoint_scores[0][3] > 0.8 and keypoint_scores[0][4] > 0.8) or (keypoint_scores[1][3] > 0.8 and keypoint_scores[1][4] > 0.8)):
no_people = 0
if keypoint_scores[1].mean() > 0.5 and many_people == 0:
second = timer/video_fps
if second < 20:
pass
else:
print(round(second//60), ':', round(second%60), 'Many people')
video_time = str(round(second//3600)) + ':' + str(round(second//60)%60) + ':' + str(round(second%60))
many_people = 1
post_request_json_data(video_path, 10, second)
elif keypoint_scores[1].sum() == 0:
many_people = 0
if ((keypoint_scores[0][3] < 0.1 or keypoint_scores[0][4] < 0.1) or (keypoint_scores[0][3] < 0.1 or keypoint_scores[0][4] < 0.1)) and turn_head == 0 and no_people == 0:
second = timer/video_fps
if second < 20:
pass
else:
print(round(second//60), ':', round(second%60), 'Turn Head')
video_time = str(round(second//3600)) + ':' + str(round(second//60)%60) + ':' + str(round(second%60))
turn_head = 1
post_request_json_data(video_path, 9, second)
elif (keypoint_scores[0][3] > 0.8 and keypoint_scores[0][4] > 0.8):
turn_head = 0
except:
pass
new_frame_time = time.time()
if frame_count%30 == 0:
fps = round(30/(new_frame_time-prev_frame_time), 2)
prev_frame_time = new_frame_time
cv2.putText(overlay_image, 'FPS: ' + str(fps), (20, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 100, 0), 1, cv2.LINE_AA)
cv2.imshow('posenet', overlay_image)
frame_count += 1
timer += n_skip
# id, datetime, video_name, video_log_type, video_log_time
if cv2.waitKey(1) & 0xFF == ord('q'):
break
except:
pass
try:
print('Average FPS: ', frame_count / (time.time() - start))
print('Time: ', time.time() - start)
except:
pass
if __name__ == "__main__":
main(args.path)
|
[
"mukhanbetaksultan0414@gmail.com"
] |
mukhanbetaksultan0414@gmail.com
|
181579357a019a7c8a1beb3274dc914dc0713917
|
55b4fe0a6616b30c128b51a9918605050ce49f6d
|
/perturb_prem.py
|
d0db2ad1166cc3a898818c9bce26e01034aa4140
|
[] |
no_license
|
samhaug/ScS_reverb_setup
|
783a4fb7c942a598f18dc6c9e3544aa5e2bbcafe
|
05e96b9f871d25a1e7b5e9284083167993f56cec
|
refs/heads/master
| 2021-01-12T03:35:45.657459
| 2017-06-24T17:24:07
| 2017-06-24T17:24:07
| 78,234,557
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
#!/home/samhaug/anaconda2/bin/python
import numpy as np
def write_output(prem_array,title):
with open(title+'.tvel','a') as f:
f.write('prem_5km.tvel P\n')
f.write('prem_5km.tvel S\n')
np.savetxt(f,prem_array,fmt='%.3f')
perturb = np.round(np.linspace(-0.3,0.3,num=20),3)
print len(perturb)
for ii in perturb:
prem = np.genfromtxt('./models/prem.tvel',skip_header=2)
prem[2:40,1::] *= 1+ii
write_output(prem,'prem_'+str(ii))
|
[
"samhaug@apsalus"
] |
samhaug@apsalus
|
95b5c45037161cace8ce3128cfd2bf49dc2bb7b6
|
fc6eefb980b53baae393980c46ac40d256687014
|
/Udacity-Intro-To-Computer-Science/Lesson 1/Lesson 1 - Quizzes/Final Quiz.py
|
8aa9f447ce3f3fde860303b34c61711a69cb1cb7
|
[] |
no_license
|
Brian-Mascitello/UCB-Third-Party-Classes
|
7bc151d348f753f93850f5e286c263639f782b05
|
e2d26e3d207d364462024759ad2342a8e172f657
|
refs/heads/master
| 2021-01-02T09:10:01.146169
| 2018-10-08T00:19:58
| 2018-10-08T00:19:58
| 99,150,324
| 0
| 0
| null | 2018-02-01T06:33:25
| 2017-08-02T18:47:29
|
Python
|
UTF-8
|
Python
| false
| false
| 780
|
py
|
# Write Python code that assigns to the
# variable url a string that is the value
# of the first URL that appears in a link
# tag in the string page.
# Your code should print http://udacity.com
# Make sure that if page were changed to
# page = '<a href="http://udacity.com">Hello world</a>'
# that your code still assigns the same value to the variable 'url',
# and therefore still prints the same thing.
# page = contents of a web page
page =('<div id="top_bin"><div id="top_content" class="width960">'
'<div class="udacity float-left"><a href="http://udacity.com">')
start_link = page.find('<a href=')
end_link = page.find('>', start_link)
start_position = start_link + len('<a href=') + 1
end_position = end_link - 1
url = page[start_position:end_position]
print(url)
|
[
"bmascitello@gmail.com"
] |
bmascitello@gmail.com
|
782b8a5b2c62d3e62a1de698230c293dddde6246
|
e54554c3bf4a1e066a092748c12e2cf4969743e2
|
/StudentsRegister/Tests/ControllerTest.py
|
5d9b119ddd3d373684598cc4a51d7cde55a6e53b
|
[] |
no_license
|
dianahas/facultate
|
89269234dc7824c5078e56ae23d413f5983de30d
|
54218c414904841d16005222e1ee4043d1ba2f1c
|
refs/heads/master
| 2019-07-02T06:11:17.054228
| 2017-02-20T11:57:18
| 2017-02-20T11:57:18
| 80,839,637
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,318
|
py
|
import unittest
from Domain.Discipline import Discipline
from Domain.Exceptions import DisciplineException
from Repository.DisciplineRepository import DisciplineRepository
from Controller.DisciplineController import DisciplineController
from Domain.Student import Student
from Domain.Exceptions import StudentException
from Repository.StudentRepository import StudentRepository
from Controller.StudentController import StudentController
from Repository.GradeRepository import GradeRepository
from Controller.UndoController import UndoController
class DisciplineControllerTestCase(unittest.TestCase):
'''
unit test for DisciplineController
'''
def setUp(self):
disRepo = DisciplineRepository()
graRepo = GradeRepository()
undoCtrl = UndoController()
self.ctrl = DisciplineController(disRepo, graRepo, undoCtrl)
d1 = Discipline("1", "1")
d2 = Discipline("2", "2")
self.ctrl.addDiscipline(d1)
self.ctrl.addDiscipline(d2)
def testAddDiscipline(self):
d = Discipline("3", "3")
self.ctrl.addDiscipline(d)
self.assertEqual(len(self.ctrl), 3)
self.assertRaises(DisciplineException, self.ctrl.addDiscipline, d)
def testFindDisciplineByTeacher(self):
d = self.ctrl.findDisciplineByTeacher("2")
self.assertEqual(d, [Discipline("2", "2")])
d = self.ctrl.findDisciplineByTeacher("js")
self.assertEqual(d, [])
self.assertTrue(d == [])
def testUpdateDiscipline(self):
upD = Discipline("1", "3")
self.ctrl.updateDiscipline("1","3")
d = self.ctrl.findDisciplineByTeacher("3")
self.assertEqual(d, [upD])
def testRemoveDiscipline(self):
self.ctrl.removeDiscipline("1")
self.assertEqual(len(self.ctrl), 1)
self.assertRaises(DisciplineException, self.ctrl.removeDiscipline, "3")
def testUndoRedo(self):
d = Discipline("3", "3")
self.ctrl.addDiscipline(d)
self.assertEqual(len(self.ctrl), 3)
self.ctrl.undo()
self.assertEqual(len(self.ctrl), 2)
self.ctrl.undo()
self.assertEqual(len(self.ctrl), 1)
self.ctrl.redo()
self.assertEqual(len(self.ctrl), 2)
self.ctrl.redo()
self.assertEqual(len(self.ctrl), 3)
try:
self.ctrl.redo()
assert False
except:
assert True
class StudentControllerTestCase(unittest.TestCase):
'''
unit test for StudentController
'''
def setUp(self):
stuRepo = StudentRepository()
graRepo = GradeRepository()
undoCtrl = UndoController()
self.ctrl = StudentController(stuRepo, graRepo, undoCtrl)
s1 = Student(1, "1")
s2 = Student(2, "2")
self.ctrl.addStudent(s1)
self.ctrl.addStudent(s2)
def testAddStudent(self):
s = Student(3, "3")
self.ctrl.addStudent(s)
self.assertEqual(len(self.ctrl), 3)
self.assertRaises(StudentException, self.ctrl.addStudent, s)
def testFindStudentByName(self):
s = self.ctrl.findStudentByName("2")
self.assertEqual(s, [Student(2, "2")])
s = self.ctrl.findStudentByName("169")
self.assertEqual(s, [])
self.assertTrue(s == [])
def testUpdateStudent(self):
upS = Student(1, "3")
self.ctrl.updateStudent(1,"3")
s = self.ctrl.findStudentByName("3")
self.assertEqual(s, [upS])
def testRemoveStudent(self):
self.ctrl.removeStudent(1)
self.assertEqual(len(self.ctrl), 1)
self.assertRaises(StudentException, self.ctrl.removeStudent, 3)
def testUndoRedo(self):
s = Student(3, "3")
self.ctrl.addStudent(s)
self.assertEqual(len(self.ctrl), 3)
self.ctrl.undo()
self.assertEqual(len(self.ctrl), 2)
self.ctrl.undo()
self.assertEqual(len(self.ctrl), 1)
self.ctrl.redo()
self.assertEqual(len(self.ctrl), 2)
self.ctrl.redo()
self.assertEqual(len(self.ctrl), 3)
try:
self.ctrl.redo()
assert False
except:
assert True
if __name__ == '__main__':
unittest.main(verbosity=2)
|
[
"Diana Hatmanu"
] |
Diana Hatmanu
|
00756a3bf6a511b4023a4efd81b34a9b4b9899fc
|
833f645605c2a0e40418a2f4392c8c64c1d93da2
|
/tests_api/adminSide/test_edit_gender.py
|
bd42ad5c20c5b38e46b8ad94160ca5dad0cb09d9
|
[] |
no_license
|
mehalyna/CH_096_TAQC
|
beefb1f2c937c66720c93239df70d3827242df98
|
14e7d28cb692e30df945b88841dfd0b3d24ed450
|
refs/heads/master
| 2022-12-12T13:42:29.276689
| 2020-02-04T15:57:00
| 2020-02-04T15:57:00
| 224,458,058
| 1
| 2
| null | 2022-12-08T03:26:02
| 2019-11-27T15:14:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,198
|
py
|
import unittest
import allure
from tests_api.testHelper import User
class TestEditGender(unittest.TestCase):
def setUp(self):
self.id = "e02dfd94-a8a9-4b1a-6cfc-08d7a28d1878"
self.name = "Jesus"
self.gender = 2
self.birthday = "2001-06-04"
self.User = User(self.id, self.name, self.gender, self.birthday)
self.base_gender = self.User.get_gender()
@allure.severity(allure.severity_level.NORMAL)
@allure.link(
"http://34.65.101.58:5002/admin/users?page=1",
name='Click me')
def test_edit_g(self):
with allure.step("Edit user gender"):
self.User.edit_gender(self.gender)
self.assertEqual(
self.User.get_gender(),
self.gender,
"Gender has not been changed to:{}".format(self.gender))
def tearDown(self):
with allure.step("Back user gender"):
self.User.edit_gender(self.base_gender)
self.assertEqual(
self.User.get_gender(),
self.base_gender,
"Gender has not been changed to:{}".format(self.base_gender))
if __name__ == '__main__':
unittest.main()
|
[
"57140993+OlexandrToloshnyak@users.noreply.github.com"
] |
57140993+OlexandrToloshnyak@users.noreply.github.com
|
a5c5687e8a3c7d3414ac8648489be15adf1125dd
|
b0f18601edfa82a8f75ff2937b87c04318619cd8
|
/python/make_ik_from_vmd.py
|
be8efbf669e7c9ad2e8c075150b70f729eec705f
|
[
"LicenseRef-scancode-proprietary-license",
"MIT"
] |
permissive
|
khanghugo/PMX-VMD-Scripting-Tools
|
5065906e25bd0d25b55249d6ada5af096f97d350
|
bc978e7f8685ba39c2682aed6bb06bbe53f5bb4b
|
refs/heads/master
| 2022-12-22T22:28:41.127976
| 2020-09-07T04:16:00
| 2020-09-07T04:16:00
| 293,138,167
| 0
| 0
|
MIT
| 2020-09-05T19:06:27
| 2020-09-05T19:06:26
| null |
UTF-8
|
Python
| false
| false
| 20,094
|
py
|
# Nuthouse01 - 08/24/2020 - v5.00
# This code is free to use and re-distribute, but I cannot be held responsible for damages that it may or may not cause.
#####################
# NOTES:
# assumes bones are using semistandard names for feet, toes, footIK, toeIK
# assumes toeIK is a child of footIK, and footIK is a child of root (either directly or through footIKparent)
# NOTE: if you are taking positions from one model and forcing them onto another model, it's not gonna be a perfect solution
# scaling or manual adjustment will probably be required, which kinda defeats the whole point of this script...
# first system imports
from typing import List
# second, wrap custom imports with a try-except to catch it if files are missing
try:
from . import nuthouse01_core as core
from . import nuthouse01_vmd_parser as vmdlib
from . import nuthouse01_vmd_struct as vmdstruct
from . import nuthouse01_pmx_parser as pmxlib
from . import nuthouse01_pmx_struct as pmxstruct
except ImportError as eee:
try:
import nuthouse01_core as core
import nuthouse01_vmd_parser as vmdlib
import nuthouse01_vmd_struct as vmdstruct
import nuthouse01_pmx_parser as pmxlib
import nuthouse01_pmx_struct as pmxstruct
except ImportError as eee:
print(eee.__class__.__name__, eee)
print("ERROR: failed to import some of the necessary files, all my scripts must be together in the same folder!")
print("...press ENTER to exit...")
input()
exit()
core = vmdlib = vmdstruct = pmxlib = pmxstruct = None
# when debug=True, disable the catchall try-except block. this means the full stack trace gets printed when it crashes,
# but if launched in a new window it exits immediately so you can't read it.
DEBUG = False
# if this is true, the IK frames are stored as footIK-position + footIK-rotation
# if this is false, the IK frames are stored as footIK-position + toeIK-position
# not sure about the pros and cons of this setting, honestly
# with this true, the footIK-rotation means the arrows on the IK bones change in a sensible way, so that's nice
STORE_IK_AS_FOOT_ONLY = True
# if this is true, an IK-disp frame will be created that enables the IK-following
# if this is false, when this VMD is loaded the IK bones will be moved but the legs won't follow them
# you will need to manually turn on IK for these bones
INCLUDE_IK_ENABLE_FRAME = True
jp_lefttoe = "左つま先"
jp_lefttoe_ik = "左つま先IK"
jp_leftfoot = "左足首"
jp_leftfoot_ik = "左足IK"
jp_righttoe = "右つま先"
jp_righttoe_ik = "右つま先IK"
jp_rightfoot = "右足首"
jp_rightfoot_ik = "右足IK"
jp_left_waistcancel = "腰キャンセル左"
jp_right_waistcancel = "腰キャンセル右"
class Bone:
def __init__(self, name, xinit, yinit, zinit):
self.name = name
self.xinit = xinit
self.yinit = yinit
self.zinit = zinit
self.xcurr = 0.0
self.ycurr = 0.0
self.zcurr = 0.0
self.xrot = 0.0
self.yrot = 0.0
self.zrot = 0.0
def reset(self):
self.xcurr = self.xinit
self.ycurr = self.yinit
self.zcurr = self.zinit
self.xrot = 0.0
self.yrot = 0.0
self.zrot = 0.0
def rotate3d(origin, angle_quat, point_in):
# "rotate around a point in 3d space"
# subtract "origin" to move the whole system to rotating around 0,0,0
point = [p - o for p, o in zip(point_in, origin)]
# might need to scale the point down to unit-length???
# i'll do it just to be safe, it couldn't hurt
length = core.my_euclidian_distance(point)
if length != 0:
point = [p / length for p in point]
# set up the math as instructed by math.stackexchange
p_vect = [0] + point
r_prime_vect = core.my_quat_conjugate(angle_quat)
# r_prime_vect = [angle_quat[0], -angle_quat[1], -angle_quat[2], -angle_quat[3]]
# P' = R * P * R'
# P' = H( H(R,P), R')
temp = core.hamilton_product(angle_quat, p_vect)
p_prime_vect = core.hamilton_product(temp, r_prime_vect)
# note that the first element of P' will always be 0
point = p_prime_vect[1:4]
# might need to undo scaling the point down to unit-length???
point = [p * length for p in point]
# re-add "origin" to move the system to where it should have been
point = [p + o for p, o in zip(point, origin)]
return point
def build_bonechain(allbones: List[pmxstruct.PmxBone], endbone: str) -> List[Bone]:
nextbone = endbone
buildme = []
while True:
r = core.my_list_search(allbones, lambda x: x.name_jp == nextbone, getitem=True)
if r is None:
core.MY_PRINT_FUNC("ERROR: unable to find '" + nextbone + "' in input file, unable to build parentage chain")
raise RuntimeError()
# 0 = bname, 5 = parent index, 234 = xyz position
nextbone = allbones[r.parent_idx].name_jp
newrow = Bone(r.name_jp, r.pos[0], r.pos[1], r.pos[2])
buildme.append(newrow)
# if parent index is -1, that means there is no parent. so we reached root. so break.
if r.parent_idx == -1:
break
buildme.reverse()
return buildme
helptext = '''=================================================
make_ik_from_vmd:
This script runs forward kinematics for the legs of a model, to calculate where the feet/toes will be and generates IK bone frames for those feet/toes.
This is only useful when the input dance does NOT already use IK frames, such as the dance Conqueror by IA.
** Specifically, if a non-IK dance works well for model X but not for model Y (feet clipping thru floor, etc), this would let you copy the foot positions from model X onto model Y.
** In practice, this isn't very useful... this file is kept around for historical reasons.
The output is a VMD that should be loaded into MMD *after* the original dance VMD is loaded.
Note: does not handle custom interpolation in the input dance VMD, assumes all interpolation is linear.
Note: does not handle models with 'hip cancellation' bones
This requires both a PMX model and a VMD motion to run.
Outputs: VMD file '[dancename]_ik_from_[modelname].vmd' that contains only the IK frames for the dance
'''
def main(moreinfo=True):
# prompt PMX name
core.MY_PRINT_FUNC("Please enter name of PMX input file:")
input_filename_pmx = core.MY_FILEPROMPT_FUNC(".pmx")
pmx = pmxlib.read_pmx(input_filename_pmx, moreinfo=moreinfo)
# get bones
realbones = pmx.bones
# then, make 2 lists: one starting from jp_righttoe, one starting from jp_lefttoe
# start from each "toe" bone (names are known), go parent-find-parent-find until reaching no-parent
bonechain_r = build_bonechain(realbones, jp_righttoe)
bonechain_l = build_bonechain(realbones, jp_lefttoe)
# assert that the bones were found, have correct names, and are in the correct positions
# also verifies that they are direct parent-child with nothing in between
try:
assert bonechain_r[-1].name == jp_righttoe
assert bonechain_r[-2].name == jp_rightfoot
assert bonechain_l[-1].name == jp_lefttoe
assert bonechain_l[-2].name == jp_leftfoot
except AssertionError:
core.MY_PRINT_FUNC("ERROR: unexpected structure found for foot/toe bones, verify semistandard names and structure")
raise RuntimeError()
# then walk down these 2 lists, add each name to a set: build union of all relevant bones
relevant_bones = set()
for b in bonechain_r + bonechain_l:
relevant_bones.add(b.name)
# check if waist-cancellation bones are in "relevant_bones", print a warning if they are
if jp_left_waistcancel in relevant_bones or jp_right_waistcancel in relevant_bones:
# TODO LOW: i probably could figure out how to support them but this whole script is useless so idgaf
core.MY_PRINT_FUNC("Warning: waist-cancellation bones found in the model! These are not supported, tool may produce bad results! Attempting to continue...")
# also need to find initial positions of ik bones (names are known)
# build a full parentage-chain for each leg
bonechain_ikr = build_bonechain(realbones, jp_righttoe_ik)
bonechain_ikl = build_bonechain(realbones, jp_lefttoe_ik)
# verify that the ik bones were found, have correct names, and are in the correct positions
try:
assert bonechain_ikr[-1].name == jp_righttoe_ik
assert bonechain_ikr[-2].name == jp_rightfoot_ik
assert bonechain_ikl[-1].name == jp_lefttoe_ik
assert bonechain_ikl[-2].name == jp_leftfoot_ik
except AssertionError:
core.MY_PRINT_FUNC("ERROR: unexpected structure found for foot/toe IK bones, verify semistandard names and structure")
raise RuntimeError()
# verify that the bonechains are symmetric in length
try:
assert len(bonechain_l) == len(bonechain_r)
assert len(bonechain_ikl) == len(bonechain_ikr)
except AssertionError:
core.MY_PRINT_FUNC("ERROR: unexpected structure found, model is not left-right symmetric")
raise RuntimeError()
# determine how many levels of parentage, this value "t" should hold the first level where they are no longer shared
t = 0
while bonechain_l[t].name == bonechain_ikl[t].name:
t += 1
# back off one level
lowest_shared_parent = t - 1
# now i am completely done with the bones CSV, all the relevant info has been distilled down to:
# !!! bonechain_r, bonechain_l, bonechain_ikr, bonechain_ikl, relevant_bones
core.MY_PRINT_FUNC("...identified " + str(len(bonechain_l)) + " bones per leg-chain, " + str(len(relevant_bones)) + " relevant bones total")
core.MY_PRINT_FUNC("...identified " + str(len(bonechain_ikl)) + " bones per IK leg-chain")
###################################################################################
# prompt VMD file name
core.MY_PRINT_FUNC("Please enter name of VMD dance input file:")
input_filename_vmd = core.MY_FILEPROMPT_FUNC(".vmd")
nicelist_in = vmdlib.read_vmd(input_filename_vmd, moreinfo=moreinfo)
# check if this VMD uses IK or not, print a warning if it does
any_ik_on = False
for ikdispframe in nicelist_in.ikdispframes:
for ik_bone in ikdispframe.ikbones:
if ik_bone.enable is True:
any_ik_on = True
break
if any_ik_on:
core.MY_PRINT_FUNC("Warning: the input VMD already has IK enabled, there is no point in running this script. Attempting to continue...")
# reduce down to only the boneframes for the relevant bones
# also build a list of each framenumber with a frame for a bone we care about
relevant_framenums = set()
boneframe_list = []
for boneframe in nicelist_in.boneframes:
if boneframe.name in relevant_bones:
boneframe_list.append(boneframe)
relevant_framenums.add(boneframe.f)
# sort the boneframes by frame number
boneframe_list.sort(key=lambda x: x.f)
# make the relevant framenumbers also an ascending list
relevant_framenums = sorted(list(relevant_framenums))
boneframe_dict = dict()
# now restructure the data from a list to a dictionary, keyed by bone name. also discard excess data when i do
for b in boneframe_list:
if b.name not in boneframe_dict:
boneframe_dict[b.name] = []
# only storing the frame#(1) + position(234) + rotation values(567)
saveme = [b.f, *b.pos, *b.rot]
boneframe_dict[b.name].append(saveme)
core.MY_PRINT_FUNC("...running interpolation to rectangularize the frames...")
has_warned = False
# now fill in the blanks by using interpolation, if needed
for key,bone in boneframe_dict.items(): # for each bone,
# start a list of frames generated by interpolation
interpframe_list = []
i=0
j=0
while j < len(relevant_framenums): # for each frame it should have,
if i == len(bone):
# if i is beyond end of bone, then copy the values from the last frame and use as a new frame
newframe = [relevant_framenums[j]] + bone[-1][1:7]
interpframe_list.append(newframe)
j += 1
elif bone[i][0] == relevant_framenums[j]: # does it have it?
i += 1
j += 1
else:
# TODO LOW: i could modify this to include my interpolation curve math now that I understand it, but i dont care
if not has_warned:
core.MY_PRINT_FUNC("Warning: interpolation is needed but interpolation curves are not fully tested! Assuming linear interpolation...")
has_warned = True
# if there is a mismatch then the target framenum is less than the boneframe framenum
# build a frame that has frame# + position(123) + rotation values(456)
newframe = [relevant_framenums[j]]
# if target is less than the current boneframe, interp between here and prev boneframe
for p in range(1,4):
# interpolate for each position offset
newframe.append(core.linear_map(bone[i][0], bone[i][p], bone[i-1][0], bone[i-1][p], relevant_framenums[j]))
# rotation interpolation must happen in the quaternion-space
quat1 = core.euler_to_quaternion(bone[i-1][4:7])
quat2 = core.euler_to_quaternion(bone[i][4:7])
# desired frame is relevant_framenums[j] = d
# available frames are bone[i-1][0] = s and bone[i][0] = e
# percentage = (d - s) / (e - s)
percentage = (relevant_framenums[j] - bone[i-1][0]) / (bone[i][0] - bone[i-1][0])
quat_slerp = core.my_slerp(quat1, quat2, percentage)
euler_slerp = core.quaternion_to_euler(quat_slerp)
newframe += euler_slerp
interpframe_list.append(newframe)
j += 1
bone += interpframe_list
bone.sort(key=core.get1st)
# the dictionary should be fully filled out and rectangular now
for bone in boneframe_dict:
assert len(boneframe_dict[bone]) == len(relevant_framenums)
# now i am completely done reading the VMD file and parsing its data! everything has been distilled down to:
# relevant_framenums, boneframe_dict
###################################################################################
# begin the actual calculations
core.MY_PRINT_FUNC("...beginning forward kinematics computation for " + str(len(relevant_framenums)) + " frames...")
# output array
ikframe_list = []
# have list of bones, parentage, initial pos
# have list of frames
# now i "run the dance" and build the ik frames
# for each relevant frame,
for I in range(len(relevant_framenums)):
# for each side,
for (thisik, this_chain) in zip([bonechain_ikr, bonechain_ikl], [bonechain_r, bonechain_l]):
# for each bone in this_chain (ordered, start with root!),
for J in range(len(this_chain)):
# reset the current to be the inital position again
this_chain[J].reset()
# for each bone in this_chain (ordered, start with toe! do children before parents!)
# also, don't read/use root! because the IK are also children of root, they inherit the same root transformations
# count backwards from end to lowest_shared_parent, not including lowest_shared_parent
for J in range(len(this_chain)-1, lowest_shared_parent, -1):
# get bone J within this_chain, translate to name
name = this_chain[J].name
# get bone [name] at index I: position & rotation
try:
xpos, ypos, zpos, xrot, yrot, zrot = boneframe_dict[name][I][1:7]
except KeyError:
continue
# apply position offset to self & children
# also resets the currposition when changing frames
for K in range(J, len(this_chain)):
# set this_chain[K].current456 = current456 + position
this_chain[K].xcurr += xpos
this_chain[K].ycurr += ypos
this_chain[K].zcurr += zpos
# apply rotation offset to all children, but not self
_origin = [this_chain[J].xcurr, this_chain[J].ycurr, this_chain[J].zcurr]
_angle = [xrot, yrot, zrot]
_angle_quat = core.euler_to_quaternion(_angle)
for K in range(J, len(this_chain)):
# set this_chain[K].current456 = current rotated around this_chain[J].current456
_point = [this_chain[K].xcurr, this_chain[K].ycurr, this_chain[K].zcurr]
_newpoint = rotate3d(_origin, _angle_quat, _point)
(this_chain[K].xcurr, this_chain[K].ycurr, this_chain[K].zcurr) = _newpoint
# also rotate the angle of this bone
curr_angle_euler = [this_chain[K].xrot, this_chain[K].yrot, this_chain[K].zrot]
curr_angle_quat = core.euler_to_quaternion(curr_angle_euler)
new_angle_quat = core.hamilton_product(_angle_quat, curr_angle_quat)
new_angle_euler = core.quaternion_to_euler(new_angle_quat)
(this_chain[K].xrot, this_chain[K].yrot, this_chain[K].zrot) = new_angle_euler
pass
pass
# now i have cascaded this frame's pose data down the this_chain
# grab foot/toe (-2 and -1) current position and calculate IK offset from that
# first, foot:
# footikend - footikinit = footikoffset
xfoot = this_chain[-2].xcurr - thisik[-2].xinit
yfoot = this_chain[-2].ycurr - thisik[-2].yinit
zfoot = this_chain[-2].zcurr - thisik[-2].zinit
# save as boneframe to be ultimately formatted for VMD:
# need bonename = (known)
# need frame# = relevantframe#s[I]
# position = calculated
# rotation = 0
# phys = not disabled
# interp = default (20/107)
# # then, foot-angle: just copy the angle that the foot has
if STORE_IK_AS_FOOT_ONLY:
ikframe = [thisik[-2].name, relevant_framenums[I], xfoot, yfoot, zfoot, this_chain[-2].xrot, this_chain[-2].yrot, this_chain[-2].zrot, False]
else:
ikframe = [thisik[-2].name, relevant_framenums[I], xfoot, yfoot, zfoot, 0.0, 0.0, 0.0, False]
ikframe += [20] * 8
ikframe += [107] * 8
# append the freshly-built frame
ikframe_list.append(ikframe)
if not STORE_IK_AS_FOOT_ONLY:
# then, toe:
# toeikend - toeikinit - footikoffset = toeikoffset
xtoe = this_chain[-1].xcurr - thisik[-1].xinit - xfoot
ytoe = this_chain[-1].ycurr - thisik[-1].yinit - yfoot
ztoe = this_chain[-1].zcurr - thisik[-1].zinit - zfoot
ikframe = [thisik[-1].name, relevant_framenums[I], xtoe, ytoe, ztoe, 0.0, 0.0, 0.0, False]
ikframe += [20] * 8
ikframe += [107] * 8
# append the freshly-built frame
ikframe_list.append(ikframe)
# now done with a timeframe for all bones on both sides
# print progress updates
core.print_progress_oneline(I / len(relevant_framenums))
core.MY_PRINT_FUNC("...done with forward kinematics computation, now writing output...")
if INCLUDE_IK_ENABLE_FRAME:
# create a single ikdispframe that enables the ik bones at frame 0
ikbones = [vmdstruct.VmdIkbone(name=jp_rightfoot_ik, enable=True),
vmdstruct.VmdIkbone(name=jp_righttoe_ik, enable=True),
vmdstruct.VmdIkbone(name=jp_leftfoot_ik, enable=True),
vmdstruct.VmdIkbone(name=jp_lefttoe_ik, enable=True)]
ikdispframe_list = [vmdstruct.VmdIkdispFrame(f=0, disp=True, ikbones=ikbones)]
else:
ikdispframe_list = []
core.MY_PRINT_FUNC("Warning: IK following will NOT be enabled when this VMD is loaded, you will need enable it manually!")
# convert old-style bonelist ikframe_list to new object format
ikframe_list = [vmdstruct.VmdBoneFrame(name=r[0], f=r[1], pos=r[2:5], rot=r[5:8], phys_off=r[8], interp=r[9:]) for r in ikframe_list]
# build actual VMD object
nicelist_out = vmdstruct.Vmd(
vmdstruct.VmdHeader(2, "SEMISTANDARD-IK-BONES--------"),
ikframe_list, # bone
[], # morph
[], # cam
[], # light
[], # shadow
ikdispframe_list # ikdisp
)
# write out
output_filename_vmd = "%s_ik_from_%s.vmd" % \
(input_filename_vmd[0:-4], core.get_clean_basename(input_filename_pmx))
output_filename_vmd = core.get_unused_file_name(output_filename_vmd)
vmdlib.write_vmd(output_filename_vmd, nicelist_out, moreinfo=moreinfo)
core.MY_PRINT_FUNC("Done!")
return None
if __name__ == '__main__':
core.MY_PRINT_FUNC("Nuthouse01 - 08/24/2020 - v5.00")
if DEBUG:
# print info to explain the purpose of this file
core.MY_PRINT_FUNC(helptext)
core.MY_PRINT_FUNC("")
main()
core.pause_and_quit("Done with everything! Goodbye!")
else:
try:
# print info to explain the purpose of this file
core.MY_PRINT_FUNC(helptext)
core.MY_PRINT_FUNC("")
main()
core.pause_and_quit("Done with everything! Goodbye!")
except (KeyboardInterrupt, SystemExit):
# this is normal and expected, do nothing and die normally
pass
except Exception as ee:
# if an unexpected error occurs, catch it and print it and call pause_and_quit so the window stays open for a bit
core.MY_PRINT_FUNC(ee.__class__.__name__, ee)
core.pause_and_quit("ERROR: something truly strange and unexpected has occurred, sorry, good luck figuring out what tho")
|
[
"brian.henson1@yahoo.com"
] |
brian.henson1@yahoo.com
|
cd1ff5ac52c4656720ecfe2af85fd0194ec960b6
|
84bf3fa39f10d428a9129e083c492695deaa5e96
|
/doubanmovieSpider/doubanmovieSpider/spiders/doubanbooks.py
|
920ae4819ff0503f71b71f4a351d6f572bc51fab
|
[] |
no_license
|
zhushengl/spider
|
7baf9df71b77a1a03315cd447ea5ca501e22c5cd
|
afc79f1f5d80eb284b8884fb7b29e5be0c762d44
|
refs/heads/master
| 2020-04-12T11:12:37.754215
| 2018-12-20T01:26:04
| 2018-12-20T01:26:04
| 162,452,554
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,894
|
py
|
# -*- coding: utf-8 -*-
import scrapy
from doubanmovieSpider.items import DoubanmoviespiderItem
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
class DoubanbooksSpider(scrapy.Spider):
name = 'doubanbooks'
allowed_domains = ['douban.com']
start_urls = ['http://douban.com/']
start = 0
url = 'https://book.douban.com/top250?start='
end = '&filter='
def start_requests(self):
# 访问 (豆瓣图书 Top 250)
print self.url + str(self.start) + self.end
yield scrapy.Request(self.url + str(self.start) + self.end, callback=self.post_book)
def post_book(self, response):
if response.status == 200:
print '访问成功'
# print response.body
books = response.xpath("//div[@class='article']//table")
item1 = DoubanmoviespiderItem()
for each in books:
print each
title = each.xpath("//td[2]/div/a/text()").extract()
print title[0]
content = each.xpath("//p[1]/text()").extract()
score = each.xpath("//div/span[2]/text()").extract()
info = each.xpath("//p[2]/span/text()").extract()
item1['title'] = title[0]
# 以;作为分隔,将content列表里所有元素合并成一个新的字符串
item1['content'] = ';'.join(content)
item1['score'] = score[0]
item1['info'] = info[0].strip()
# 提交item
print item1
yield item1
if self.start <= 100:
self.start += 25
print self.url + str(self.start) + self.end
yield scrapy.Request(self.url + str(self.start) + self.end, callback=self.post_book)
else:
print '访问失败'
def parse(self, response):
pass
|
[
"2231636899@qq.com"
] |
2231636899@qq.com
|
484d210d3a4c30263b422bd7dad0dc68da1bd706
|
4639c9f39213c0641ef693489e84e10d0c05deca
|
/sqlqueries.py
|
4398e8156991b7deaf741a97bf1a76ebda3ade95
|
[] |
no_license
|
utkranti/samdemo
|
6db60d52aceed668204f06d2acc13bbfc9f74558
|
87965fc68259f33f39387aa2b891afa11878c8ac
|
refs/heads/master
| 2020-09-06T17:13:48.938300
| 2019-11-08T15:07:18
| 2019-11-08T15:07:18
| 220,490,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
py
|
CREATE_TABLE = ''' CREATE TABLE empinfo(
e_id integer,
e_nm varchar(50),
e_age integer,
primary key (e_id)
)
'''
INSERT_QUERY = '''insert into empinfo values({},'{}',{})'''
FETCH_SINGLE_QUERY = '''SELECT * FROM empinfo where e_id = {}'''
FETCH_ALL_QUERY = '''SELECT * FROM empinfo'''
UPDATE_QUERY = '''update empinfo set e_nm = '{}', e_age = {} where e_id = {}'''
DELETE_QUERY = '''delete from empinfo where e_id = {}'''
|
[
"utkrantijadhav@gmail.com"
] |
utkrantijadhav@gmail.com
|
68fd4f165b8a871de48d2cae68b89f179705ff30
|
8e10094a3cbfaaa0a491acb6ca7a0d7b6a6a4295
|
/Assignment-18 Binary Matrix.py
|
6e14d476868011376b8c36e1f83d1ddad82fb4e8
|
[] |
no_license
|
pkenaudekar/NPTEL_python_course_work
|
c81bc1b71ca9a9de3139a03c77115f6075fd1241
|
43c0034e35f15556f5f183401305c14ddc90a3c4
|
refs/heads/master
| 2020-05-03T07:57:59.513281
| 2019-04-16T20:34:29
| 2019-04-16T20:34:29
| 178,513,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 982
|
py
|
"""
Given a matrix with N rows and M columns, the task is to check if the matrix is a Binary Matrix. A binary matrix is a matrix in which all the elements are either 0 or 1.
Input Format:
The first line of the input contains two integer number N and M which represents the number of rows and the number of columns respectively, separated by a space.
From the second line, take N lines input with each line containing M integer elements with each element separated by a space.
Output Format:
Print 'YES' or 'NO' accordingly
Example:
Input:
3 3
1 0 0
0 0 1
1 1 0
Output:
YES
"""
#CODE:
N,M = input().split(' ')
binary = "YES"
arr=[]
while True:
try:
num = input().split()
except EOFError:
break
arr.append(num)
for i in range(int(N)):
for j in range(int(M)):
if arr[i][j] != "1" and arr[i][j] != "0":
binary = "NO"
break
else:
continue
print(binary)
|
[
"noreply@github.com"
] |
noreply@github.com
|
d9ec3c57d4895a1487cc0f6a5b0ffb8782c7de4c
|
08c7e823a01da52aba46fe6332a9b96102261f44
|
/daos/tools/dao_tools.py
|
d6c3c98b20c5d59bcc54ef0cf351e37ccbb7aea1
|
[] |
no_license
|
benharvey1213/csaia_python_p
|
e9488e758a1f6ac88159d4dc92f447f6d4af8e4f
|
2024769646e4bf96dcb087f7d08a29fc0a8055fa
|
refs/heads/master
| 2023-04-12T17:33:58.066276
| 2021-05-07T17:52:37
| 2021-05-07T17:52:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,652
|
py
|
from mysql.connector import connect, Error
def execute(query, *args):
"""
General method for executing queries on the database. Queries can be passed with a list of tuples for multiple
INSERT statements. All non-INSERT queries should be passed preconfigured and
ready to execute.
Parameters
----------
query : Query
The query to be executed
args : list[tuple]
OPTIONAL list of tuples to be inserted
NOTE: only to be passed for INSERT statements
Returns
-------
list[tuple]
This list contains the query results, if any.
NOTE: This will return None for all none-SELECT queries and SELECT queries that return no results.
"""
standard_execution = True if len(args) < 1 else False
try:
with connect(
host="localhost",
# Local dev settings
user="root",
password="Password1234",
database="csaia_database",
) as connection:
ids = []
with connection.cursor() as cursor:
if standard_execution:
cursor.execute(query.get_sql(quote_char=None))
result = cursor.fetchall()
if len(result) > 0:
return result
else:
for arg in args[0]:
cursor.execute(query, arg)
ids.append(cursor.lastrowid)
connection.commit()
return ids
except Error as e:
print(e)
|
[
"scoutscott13@gmail.com"
] |
scoutscott13@gmail.com
|
9a0f789ccd72673d0b66e55dc2e6bb2452bf9406
|
3f03334c1d1f1a1fd05d0950f8af08260e0d5432
|
/HW1/hw1.py
|
47750934110a7cd1fb7fa9237244635b0ae07414
|
[] |
no_license
|
mina-jafari/EECS545-machineLearningCourse
|
697a341e6259845beb09bcd25e8308efdf48f07a
|
3014618b70499465fb4718afe9b4c5e815c8b2a4
|
refs/heads/master
| 2020-04-02T20:13:39.528778
| 2016-06-13T19:55:55
| 2016-06-13T19:55:55
| 61,060,958
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,904
|
py
|
import numpy as np
# This matrix A contains 183 8x8 images of the handwritten digit 3.
# Each image has been reshaped into a length 64 row vector.
A = np.loadtxt("A.csv",delimiter=",")
U, s, V = np.linalg.svd(A, full_matrices=True,compute_uv=True)
print s
#print np.dot(U,U.transpose())
#print V
#print s[:3]
s[3:] = 0
s = np.diag(s)
s.resize(U.shape[0], V.shape[1])
B = np.dot(U, np.dot(s, V))
# TODO: perform SVD on A, zero out all but the top 3 singular values to obtain a
# new matrix B and compute ||A-B||^2
R = A - B
print np.power(np.linalg.norm(R), 2)
# B in the following line of code (just a default line of code that will make the plotting at least work) is wrong.
# Replace it by your code to get correct plots!
#B = np.zeros(A.shape)
# OPTIONAL: You don't need to turn in the output from the code below.
# But if you're curious to see the result of this modification to
# the data using only three singular values, this code snippet will plot
# each handwritten 3 BEFORE (on the top) and AFTER (on the bottom)
# the modification.
# You will need to remove both the lines with ''' to make it run.
# WARNING: You'll need to have matplotlib installed for this to work!
from matplotlib import pyplot as plt
# How many rows and cols in our figure?
NUM_IMGS_TO_SHOW = 5
NUM_PLOT_ROWS = 2
NUM_PLOT_COLS = NUM_IMGS_TO_SHOW
for ind in range(NUM_IMGS_TO_SHOW):
# The data before and after
before_vec = A[ind,:]
after_vec = B[ind,:]
# We reshape the date into an 8x8 grid
before_img = np.reshape(before_vec, [8,8])
after_img = np.reshape(after_vec, [8,8])
# Now let's plot the before an after into two rows:
plt.subplot(NUM_PLOT_ROWS,NUM_PLOT_COLS,ind+1)
plt.imshow(before_img, cmap=plt.cm.gray_r, interpolation='nearest')
plt.subplot(NUM_PLOT_ROWS,NUM_PLOT_COLS,ind + NUM_IMGS_TO_SHOW + 1)
plt.imshow(after_img, cmap=plt.cm.gray_r, interpolation='nearest')
plt.show()
|
[
"mjafari@paulzim-imac5.chem.lsa.umich.edu"
] |
mjafari@paulzim-imac5.chem.lsa.umich.edu
|
516da90e9ca482dbc6eff6c95b85d82b59370415
|
3079cc4b6d6c48c92e547a33a00d1bbca314ce17
|
/BipedalWalker/PiRL/ProgramGen/ParseTreeGenerator.py
|
a93f0a35be7e20907b4e5b2fa1a7bee849142ffd
|
[
"MIT"
] |
permissive
|
VAIBHAV-2303/PiRL
|
71f9a6b5b540475ed9c8e2128870bcbd6631a726
|
063aae20cce2bfc2911e7aff59f4a688740c44da
|
refs/heads/main
| 2023-05-09T11:25:34.571635
| 2021-05-20T14:28:03
| 2021-05-20T14:28:03
| 369,163,062
| 13
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,786
|
py
|
from PiRL.Utils.TreeUtils import newTree
from PiRL.DataStructures.Token import NonTerminal, Terminal, Token
from PiRL.DataStructures.RuleTable import Rule
from PiRL import rule_table
from treelib import Tree, Node
import random
__isValidDepth = {}
def checkRuleDepth(rule : Rule, depth : int):
if (rule, depth) in __isValidDepth:
return __isValidDepth[(rule, depth)]
if depth == 1:
non_terminals = list(filter(lambda x : isinstance(x, NonTerminal), rule.rhs))
return len(non_terminals) == 0
for token in rule.rhs:
if isinstance(token, NonTerminal):
valid_rules = list(filter(
lambda rule: checkRuleDepth(rule, depth-1),
rule_table.getAllRules(token)
))
if len(valid_rules) == 0:
print(rule, depth, token)
__isValidDepth[(rule, depth)] = False
break
__isValidDepth[(rule, depth)] = True
return __isValidDepth[(rule, depth)]
def generate(tree : Tree, parent : Node, token : Token, depth : int):
node = tree.create_node(parent = parent, data = token)
if isinstance(token, Terminal):
return
valid_rules = list(filter(
lambda rule : checkRuleDepth(rule, depth),
rule_table.getAllRules(token)
))
# print(token, valid_rules)
rule = random.choice(valid_rules)
list(map(lambda next_token: generate(tree, node, next_token, depth-1), rule.rhs))
node.data.text = ' '.join(list(map(lambda leaf: leaf.data.name, tree.leaves(node.identifier))))
def getProgTree(start_token, depth):
tree = newTree()
generate(tree, parent=None, token=start_token, depth=depth)
return tree
|
[
"gargvaibav@gmail.com"
] |
gargvaibav@gmail.com
|
346eeb92d6c058c58b47563e2e81d250d4fbcf8d
|
3d1dab8556b0e5e95875d174c7fa08cea24a1ca1
|
/src/utils.py
|
fe868e32bc20540a9cb16218ea7a2fbdb543ee41
|
[] |
no_license
|
ryota-ohno/interlayer_interaction3
|
8bc2151b9ede4c970e0b0964679e28aa49ef847d
|
499e6375b8a921fc6aa6e2633ea24bc3ba1e9719
|
refs/heads/main
| 2023-08-25T16:41:37.521617
| 2021-10-28T09:49:10
| 2021-10-28T09:49:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,382
|
py
|
import numpy as np
from sklearn.decomposition import PCA
def get_E(path_file):
with open(path_file,'r') as f:
lines=f.readlines()
lines_E=[]
for line in lines:
if line.find('E(RB3LYP)')>-1 and len(line.split())>5:
lines_E.append(float(line.split()[4])*627.510)
E_list=[lines_E[5*i]-lines_E[5*i+1]-lines_E[5*i+2] for i in range(int(len(lines_E)/5))]
return E_list
def squeeze_min_df_E(df_E,columns=['A1','A2']):
ddf = df_E.groupby(columns)
df_Emin = df_E.loc[ddf['E'].idxmin(),:]
return df_Emin
def get_rot_axis_from_A2(A2,glide_mode):
A2 = np.radians(A2)
if glide_mode=='a':
rot_axis_i = np.array([-np.sin(A2),np.cos(A2),0.])
rot_axis_t = np.array([-np.sin(-A2),np.cos(-A2),0.])
else:
rot_axis_i = np.array([-np.sin(A2),np.cos(A2),0.])
rot_axis_t = np.array([-np.sin(np.pi-A2),np.cos(np.pi-A2),0.])
return rot_axis_i, rot_axis_t
#Ra,Rb,heri/2 --> R1,R2
def convertor_R(Ra,Rb,theta_):
R1=Ra*np.cos(theta_)+Rb*np.sin(theta_)
R2=-Ra*np.sin(theta_)+Rb*np.cos(theta_)
return R1,R2
# nの周りにtheta_in回転する回転行列
def Rod(n,theta_in):
nx,ny,nz=n
theta_t=np.radians(theta_in)
Rod=np.array([[np.cos(theta_t)+(nx**2)*(1-np.cos(theta_t)),nx*ny*(1-np.cos(theta_t))-nz*np.sin(theta_t),nx*nz*(1-np.cos(theta_t))+ny*np.sin(theta_t)],
[nx*ny*(1-np.cos(theta_t))+nz*np.sin(theta_t),np.cos(theta_t)+(ny**2)*(1-np.cos(theta_t)),ny*nz*(1-np.cos(theta_t))-nx*np.sin(theta_t)],
[nx*nz*(1-np.cos(theta_t))-ny*np.sin(theta_t),ny*nz*(1-np.cos(theta_t))+nx*np.sin(theta_t),np.cos(theta_t)+(nz**2)*(1-np.cos(theta_t))]])
return Rod
def extract_axis(xyz_array):#shape=[n,3]
pca = PCA()
pca.fit(xyz_array)
long_axis = pca.components_[0]
short_axis = pca.components_[1]
return long_axis, short_axis
def heri_to_A3(A1,A2,heri):
N=361
A1=np.radians(A1);A2=np.radians(A2)
ax1=np.array([np.sin(A1)*np.cos(A2),np.sin(A1)*np.sin(A2),np.cos(A1)])
ax2=np.array([np.sin(A1)*np.cos(A2),-np.sin(A1)*np.sin(A2),np.cos(A1)])
heri_list=np.zeros(N,dtype='float64');error_list=np.zeros(N,dtype='float64')
A3_list=np.array([round(A3) for A3 in np.linspace(-180,180,N)])
n1 = np.array([-np.cos(A1)*np.cos(A2),-np.cos(A1)*np.sin(A2),np.sin(A1)])
n2 = np.array([-np.cos(A1)*np.cos(A2),+np.cos(A1)*np.sin(A2),np.sin(A1)])
for i,A3 in enumerate(A3_list):
ex1=np.matmul(Rod(ax1,A3),n1)
ex2=np.matmul(Rod(ax2,-A3),n2)
ex21_cross = np.cross(ex2,ex1)
exS=np.matmul(Rod(ax1,A3-90),n1)
isDirectedToB = exS[1]>0
isOpenHB = ex21_cross[2]>0
heri_abs = np.degrees(np.arccos(np.dot(ex1,ex2)))
if isOpenHB & isDirectedToB:#どちらの八の字か?上向いてるか?
heri_list[i] = heri_abs
else:
heri_list[i] = float('inf')#計算めんどいので例外にする
error_list[i]=abs(heri_list[i]-heri)
idx=np.argsort(error_list);heri_sort=heri_list[idx];A3_sort=A3_list[idx]
A3_1=A3_sort[0]
return A3_1
def R2atom(R):
if R==1.8:
return 'S'
elif R==1.7:
return 'C'
elif R==1.2:
return 'H'
else:
return 'X'
def get_ab_from_params(R1,R2,heri):
A_rad=np.radians(heri/2)
a_=2*(R1*np.cos(A_rad)-R2*np.sin(A_rad))
b_=2*(R2*np.cos(A_rad)+R1*np.sin(A_rad))
return a_, b_
def getA1_from_R3t(a,R3t,glide):
assert glide=='a'
return np.rad2deg(np.arctan(R3t/(a/2)))
def check_calc_status(df_cur,A1,A2,A3,a,b):
try:
return df_cur.loc[
(df_cur['A1']==A1)&
(df_cur['A2']==A2)&
(df_cur['A3']==A3)&
(df_cur['a']==a)&
(df_cur['b']==b), 'status'].values[0] == 'Done'
except IndexError:
return False
def convert_A_df(df):
A1_array = df['A1'].values
A2_array = df['A2'].values
df['A1_new'] = np.degrees(np.arcsin(np.sin(np.radians(A1_array))*np.cos(np.radians(A2_array))))
df['A2_new'] = np.degrees(np.arctan(np.tan(np.radians(A1_array))*np.sin(np.radians(A2_array))))
return df
def convert_A(A1,A2):
A1_new = np.degrees(np.arcsin(np.sin(np.radians(A1))*np.cos(np.radians(A2))))
A2_new = np.degrees(np.arctan(np.tan(np.radians(A1))*np.sin(np.radians(A2))))
return A1_new, A2_new
def invert_A(A1,A2):
A1_old = np.degrees(np.arccos(np.cos(np.radians(A1))*np.cos(np.radians(A2))))
if A1==0:
A2_old = 90 if A2>0 else -90
else:
A2_old = np.degrees(np.arctan(np.sin(np.radians(A2))/np.tan(np.radians(A1))))
def translator_A(_A1_new, _A2_new, _A1_old, _A2_old):
if _A1_new>=0:
return _A1_old, _A2_old
elif _A2_new>0:
return _A1_old, _A2_old+180.0
elif _A2_new==0:
return _A1_old, _A2_old
elif _A2_new<0:
return _A1_old, _A2_old-180.0
A1_old, A2_old = translator_A(A1,A2,A1_old, A2_old)
return A1_old, A2_old
def phi_into_180(phi):
if phi>180:
return phi - 360
elif phi<-180:
return phi + 360
else:
return phi
|
[
"noreply@github.com"
] |
noreply@github.com
|
e0bd0c8393e10d70cd1d7736fc15a898d1f059dc
|
2e858717fbc3b74cc809dc5d60d337a844ae7fed
|
/codegolf/planets.py
|
a4f5b0a908013fcda517843121fbb9b541e6773d
|
[] |
no_license
|
maxbergmark/misc-scripts
|
95a1b5416c34e65b7e8ef26f5c941f9ba0ae0986
|
a1b3b889f8f6d28a452969a62af637a6866b69d3
|
refs/heads/master
| 2020-03-28T10:32:38.362737
| 2019-09-20T12:23:14
| 2019-09-20T12:23:14
| 148,118,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,108
|
py
|
def get_score(l, s):
c = 0
for i, e in enumerate(l):
c += int(e == s[i])
return c
def check_modulo(l, n):
mod = [i%n for i in l]
s = list(set(mod))
if len(s) == len(l):
return 1, get_score(sorted(s), s)
return 0, 0
def check_modulo_sq(l, n):
mod = [(i*i)%n for i in l]
s = list(set(mod))
if len(s) == len(l):
return 1, get_score(sorted(s), s)
return 0, 0
def check_modulo_cu(l, n):
mod = [(i*i*i)%n for i in l]
s = list(set(mod))
if len(s) == len(l):
return 1, get_score(sorted(s), s)
return 0, 0
l0 = [7238995, 32199698004604234, 121437875888467, 126948200247893, 28550423391528270, 448630251845, 495891408214, 1936875853, 7306076016364904775, 474081421652, 34184320687170893, 8031170932136239427, 28489, 1852796749, 107135534003525, 121424973492820, 478695222352, 1936290373, 107088256000328, 27418995543271764]
l1 = [358452458835, 5899229669892068223989509551434, 100801060862113215052800339, 103298841739860633878360661, 6211190611757106977975624033614, 1279847143846962159941, 1593728898723042190678, 21780717397762381, 370629223365341456449924529812037959, 1557125307789592521044, 6131964786814545525129908217165, 349859873446802695454943217443430723, 4812617, 21796097591570253, 83970509390754835569210693, 102090063924849785520616020, 1483554806647179537488, 19547570626458181, 87502894712962091220033864, 6687802272730805039891221866836]
l2 = [5469550, 20958273942611314, 91678030787182, 93949749261683, 22066581848026725, 297987634280, 371068925299, 1298231923, 5143513717239276645, 362546487662, 21785115176039033, 4855281086163547247, 18799, 1299148654, 76370733396065, 92850372243310, 345417020527, 1165126003, 79583419131233, 23759846615443809]
l3 = [474414806382, 9063409245936133368934076540274, 133522356591788631960941166, 139581022297794930405176691, 8036229759209788198835098840677, 1926852259672153551976, 2129837380648217507187, 32495384557351539, 526458259597464047712858951498687589, 2036164201638295527790, 9622030869291023328877655454329, 578706854677080430464104555890308207, 7293295, 31084771269373806, 117796765384867275302989921, 133508170257748661844078446, 2055980324755107837039, 32485561834039667, 117744782670614057051841889, 7717761131972000546125574465889]
l4 = [7695955, 33060607136195914, 129142996492627, 129138701525333, 33060598512444750, 500135649605, 504447788374, 1936875853, 8750051408287654215, 500068606292, 34187606587958093, 8391173042187362627, 28489, 1869573709, 129134373069125, 128034844600660, 504464632912, 1936877893, 129112712765768, 32772496317047124]
# lt = [l0, l1, l2, l3, l4]
lt = [[53, 104]]
c0 = 0
c1 = 0
c2 = 0
max_score = 0
for i in range(2, 10000000000):
for l in lt:
res = check_modulo(l, i)
# res_sq = check_modulo_sq(l, i)
# res_cu = check_modulo_cu(l, i)
res_sq = 0, 0
res_cu = 0, 0
c0 += res[0]
c1 += res_sq[0]
c2 += res_cu[0]
if i % 10000 == 0:
print("\r%d (%d %d %d)" % (i, c0, c1, c2), end="")
if res[1] > max_score or res_sq[1] > max_score or res_cu[1] > max_score:
print("\n%d %s %s %s %d" % (i, res, res_sq, res_cu, len(l)))
max_score = max(res[1], res_sq[1], res_cu[1])
|
[
"max.bergmark@gmail.com"
] |
max.bergmark@gmail.com
|
d3bc53e5e49699888068463a7e4ba09b13a33a91
|
4f7bb51296e7c19b10c8d5f3616f48aa3510c455
|
/src/scseirx/analysis_functions.py
|
44793661d1768c33cb7de2e60fb3c0ce4113d5ed
|
[
"MIT"
] |
permissive
|
Jaoeya/agent_based_COVID_SEIRX
|
3bf04b337ad749ad740906c278280a82fdd8ec6b
|
596a036fd3b95282ec3b6d4b27d47ecca568e750
|
refs/heads/master
| 2023-06-11T07:48:41.836646
| 2021-07-07T18:16:49
| 2021-07-07T18:16:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 33,323
|
py
|
import numpy as np
import pandas as pd
import networkx as nx
import os
import json
import pickle
import bz2
import _pickle as cPickle
from os.path import join
from random import shuffle
import time
from scseirx import construct_school_network as csn
def get_agent(model, ID):
for a in model.schedule.agents:
if a.ID == ID:
return a
def test_infection(a):
if a.infectious or a.recovered or a.exposed:
return 1
else:
return 0
def count_infected(model, agent_type):
infected_agents = np.asarray([test_infection(a) for a in model.schedule.agents \
if a.type == agent_type]).sum()
return infected_agents
def count_infection_endpoints(model):
endpoints = [a for a in model.schedule.agents if a.recovered and \
a.transmissions == 0]
return len(endpoints)
def count_typed_transmissions(model, source_type, target_type):
type_dict = {'t':'teacher', 's':'student', 'f':'family_member', \
'r':'resident', 'e':'employee'}
sources = [a for a in model.schedule.agents if a.type == source_type]
transmissions = 0
for source in sources:
for target, step in source.transmission_targets.items():
if type_dict[target[0]] == target_type:
transmissions += 1
return transmissions
def calculate_R0(model, agent_types):
transmissions = [a.transmissions for a in model.schedule.agents]
infected = [test_infection(a) for a in model.schedule.agents]
IDs = [a.ID for a in model.schedule.agents]
types = [a.type for a in model.schedule.agents]
df = pd.DataFrame({'ID':IDs,
'type':types,
'was_infected':infected,
'transmissions':transmissions})
df = df[df['was_infected'] == 1]
overall_R0 = df['transmissions'].mean()
R0 = {}
for agent_type in agent_types:
agent_R0 = df[df['type'] == agent_type]['transmissions'].mean()
R0.update({agent_type:agent_R0})
return R0
def calculate_finite_size_R0(model):
df = pd.DataFrame(columns=['ID', 'agent_type', 't', 'target'])
for a in model.schedule.agents:
if a.transmissions > 0:
for target in a.transmission_targets.keys():
df = df.append({'ID':a.ID, 'agent_type':a.type,
't':a.transmission_targets[target], 'target':target},
ignore_index=True)
# find first transmission(s)
# NOTE: while it is very unlikely that two first transmissions occurred
# in the same timestep, we have to account for the possibility nevertheless
first_transmitters = df[df['t'] == df['t'].min()]['ID'].values
N_transmissions = []
for ft in first_transmitters:
N_transmissions.append(len(df[df['ID'] == ft]))
if len(N_transmissions) > 0:
mean = np.mean(N_transmissions)
else:
mean = 0
return mean, df
def count_infected_by_age(model, age_brackets):
age_counts = {}
for ab in age_brackets:
lower = int(ab.split('-')[0])
upper = int(ab.split('-')[1])
infected = len([a for a in model.schedule.agents if a.type == 'student' and \
a.recovered == True and a.age >= lower and a.age <= upper])
age_counts[ab] = infected
return age_counts
def get_transmission_network(model):
transmissions = []
for a in model.schedule.agents:
if a.transmissions > 0:
for target in a.transmission_targets.keys():
transmissions.append((a.ID, target))
G = nx.Graph()
G.add_edges_from(transmissions)
return G
def get_statistics(df, col):
if len(df) == 0:
return {
'{}_mean'.format(col):np.nan,
'{}_median'.format(col):np.nan,
'{}_0.025'.format(col):np.nan,
'{}_0.10'.format(col):np.nan,
'{}_0.25'.format(col):np.nan,
'{}_0.75'.format(col):np.nan,
'{}_0.90'.format(col):np.nan,
'{}_0.975'.format(col):np.nan,
'{}_std'.format(col):np.nan
}
else:
return {
'{}_mean'.format(col):df[col].mean(),
'{}_median'.format(col):df[col].median(),
'{}_0.025'.format(col):df[col].quantile(0.025),
'{}_0.10'.format(col):df[col].quantile(0.10),
'{}_0.25'.format(col):df[col].quantile(0.25),
'{}_0.75'.format(col):df[col].quantile(0.75),
'{}_0.90'.format(col):df[col].quantile(0.90),
'{}_0.975'.format(col):df[col].quantile(0.975),
'{}_std'.format(col):df[col].std(),
}
def get_agent_states(model, tm_events):
if type(tm_events) == type(None):
return None
# all agent states in all simulation steps. Agent states include the
# "infection state" ["susceptible", "exposed", "infectious", "recovered"]
# as well as the "quarantine state" [True, False]
state_data = model.datacollector.get_agent_vars_dataframe()
# remove susceptible states: these are the majority of states and since we
# want to reduce the amoung of data stored, we will assume all agents that
# do not have an explicit "exposed", "infectious" or "recovered" state,
# are susceptible.
state_data = state_data[state_data['infection_state'] != 'susceptible']
# we only care about state changes here, that's why we only keep the first
# new entry in the state data table for each (agent, infection_state,
# quarantine_state) triple. We need to reset the index once before we can
# apply the drop_duplicates() operation, since "step" and "AgentID" are in
# the index
state_data = state_data.reset_index()
state_data = state_data.drop_duplicates(subset=['AgentID',\
'infection_state', 'quarantine_state'])
# cleanup of index and column names to match other data output formats
state_data = state_data.rename(columns={'AgentID':'node_ID',
'Step':'day'})
state_data = state_data.reset_index(drop=True)
# for the visualization we need more fine-grained information (hours) on when
# exactly a transmission happened. This information is already stored in the
# transmission events table (tm_events) and we can take it from there and
# add it to the state table. We set all state changes to hour = 0 by default
# since most of them happen at the beginning of the day (becoming infectious,
# changing quarantine state or recovering).
state_data['hour'] = 1
exp = state_data[(state_data['infection_state'] == 'exposed')]\
.sort_values(by='day')
# we iterate over all state changes that correspond to an exposure, ignoring
# the first agent, since that one is the index case, whose exposure happens
# in hour 0. We only add the hour information for the transmission events.
# For these events, we also subtract one from the event "day", to account
# for the fact that agents have to appear "exposed" in the visualization
# from the point in time they had contact with an infectious agent onwards.
for ID in exp['node_ID'][1:]:
ID_data = exp[exp['node_ID'] == ID]
idx = ID_data.index[0]
hour = tm_events[tm_events['target_ID'] == ID]['hour'].values[0]
state_data.loc[idx, 'hour'] = hour
state_data.loc[idx, 'day'] -= 1
# re-order columns to a more sensible order and fix data types
state_data['hour'] = state_data['hour'].astype(int)
state_data = state_data[['day', 'hour', 'node_ID', 'infection_state',
'quarantine_state']]
return state_data
def get_transmission_chain(model, school_type, teacher_schedule, student_schedule):
max_hours = 9
teaching_hours = csn.get_teaching_hours(school_type)
daycare_hours = list(range(teaching_hours + 1, max_hours + 1))
teaching_hours = list(range(1, teaching_hours + 1))
weekday_map = {1:'monday', 2:'tuesday', 3:'wednesday', 4:'thursday',
5:'friday', 6:'saturday', 7:'sunday'}
if 5 in daycare_hours:
daycare_hours.remove(5)
tm_events = pd.DataFrame(columns=['day', 'weekday', 'hour',
'source_ID', 'source_type', 'target_ID', 'target_type'])
for a in model.schedule.agents:
if a.transmissions > 0:
for target, step in a.transmission_targets.items():
location = ''
hour = np.nan
weekday = (step + model.weekday_offset) % 7 + 1
G = model.weekday_connections[weekday]
target = get_agent(model, target)
n1 = a.ID
n2 = target.ID
tmp = [n1, n2]
tmp.sort()
n1, n2 = tmp
key = n1 + n2 + 'd{}'.format(weekday)
s_schedule = student_schedule.loc[weekday]
t_schedule = teacher_schedule.loc[weekday]
## determine transmission locations and times
# transmissions from students to other students, teachers or family
# members
if a.type == 'student':
student_class = G.nodes(data=True)[a.ID]['unit']
if target.type == 'student':
# transmussions during daycare
if G[a.ID][target.ID][key]['link_type'] == 'student_student_daycare':
classroom = s_schedule.loc[a.ID]['hour_8']
location = 'class_{}'.format(int(classroom))
hour = np.random.choice(daycare_hours)
# transmission during morning teaching
elif G[a.ID][target.ID][key]['link_type'] in \
['student_student_intra_class', 'student_student_table_neighbour']:
hour = np.random.choice(teaching_hours)
classroom = s_schedule.loc[a.ID]['hour_1']
location = 'class_{}'.format(int(classroom))
elif G[a.ID][target.ID][key]['link_type'] == 'student_household':
hour = 10
location = 'home'
else:
print('unknown student <-> student link type ',\
G[a.ID][target.ID][key]['link_type'])
# transmissions between students and teachers occur in the student's
# classroom at a time when the teacher is in that classroom
# according to the schedule
elif target.type == 'teacher':
# transmissions during daycare
if G[a.ID][target.ID][key]['link_type'] == 'daycare_supervision_teacher_student':
classroom = s_schedule.loc[a.ID]['hour_8']
location = 'class_{}'.format(int(classroom))
hour = np.random.choice(daycare_hours)
elif G[a.ID][target.ID][key]['link_type'] == 'teaching_teacher_student':
classroom = s_schedule.loc[a.ID]['hour_1']
location = 'class_{}'.format(int(classroom))
# get the hour in which the teacher is teaching in the given location
hour = int(t_schedule.loc[target.ID][t_schedule.loc[target.ID] == classroom]\
.index[0].split('_')[1])
else:
print('unknown student <-> teacher link type', \
G[a.ID][target.ID][key]['link_type'])
# transmissions to family members occur at home after schoole
elif target.type == 'family_member':
location = 'home'
hour = 10
else:
print('agent type not supported')
# transmissions from teachers to other teachers or students
elif a.type == 'teacher':
# transmissions from teachers to students occur in the student's
# classroom at a time when the teacher is in that classroom
# according to the schedule
if target.type == 'student':
# transmissions during daycare
if G[a.ID][target.ID][key]['link_type'] == 'daycare_supervision_teacher_student':
classroom = s_schedule.loc[target.ID]['hour_8']
location = 'class_{}'.format(int(classroom))
hour = np.random.choice(daycare_hours)
elif G[a.ID][target.ID][key]['link_type'] == 'teaching_teacher_student':
classroom = s_schedule.loc[target.ID]['hour_1']
location = 'class_{}'.format(int(classroom))
# get the hour in which the teacher is teaching in the given location
hour = int(t_schedule.loc[a.ID][t_schedule.loc[a.ID] == classroom]\
.index[0].split('_')[1])
else:
print('unknown teacher <-> student link type', \
G[a.ID][target.ID][key]['link_type'])
# transmissions between teachers occur during the lunch break
# in the faculty room
elif target.type == 'teacher':
location = 'faculty_room'
hour = 5
elif target.type == 'family_member':
location = 'home'
hour = 10
else:
print('agent type not supported')
# transmissions from family members to other family members
elif a.type == 'family_member':
if target.type == 'student':
location = 'home'
hour = 10
elif target.type == 'teacher':
print('this should not happen!')
# transmissions between family members occur at home after school
elif target.type == 'family_member':
location = 'home'
hour = 10
else:
print('agent type not supported')
else:
print('agent type not supported')
assert not np.isnan(hour), 'schedule messup!'
assert len(location) > 0, 'location messup!'
tm_events = tm_events.append({
'day':step,
'weekday':weekday_map[weekday],
'hour':hour,
'location':location,
'source_ID':a.ID,
'source_type':a.type,
'target_ID':target.ID,
'target_type':target.type},
ignore_index=True)
if len(tm_events) > 0:
tm_events['day'] = tm_events['day'].astype(int)
tm_events = tm_events.sort_values(by=['day', 'hour']).reset_index(drop=True)
tm_events = tm_events[['day', 'hour', 'location', 'source_ID', 'source_type',
'target_ID', 'target_type']]
return tm_events
else:
return None
def get_ensemble_observables_school(model, run):
R0, _ = calculate_finite_size_R0(model)
N_school_agents = len([a for a in model.schedule.agents if \
a.type == 'teacher' or a.type == 'student'])
N_family_members = len([a for a in model.schedule.agents if a.type == 'family_member'])
infected_students = count_infected(model, 'student')
infected_teachers = count_infected(model, 'teacher')
infected_family_members = count_infected(model, 'family_member')
infected_agents = infected_students + infected_teachers + infected_family_members
data = model.datacollector.get_model_vars_dataframe()
N_diagnostic_tests = data['N_diagnostic_tests'].max()
N_preventive_screening_tests = data['N_preventive_screening_tests'].max()
transmissions = sum([a.transmissions for a in model.schedule.agents])
infected_without_transmissions = count_infection_endpoints(model)
student_student_transmissions = count_typed_transmissions(model, 'student', 'student')
teacher_student_transmissions = count_typed_transmissions(model, 'teacher', 'student')
student_teacher_transmissions = count_typed_transmissions(model, 'student', 'teacher')
teacher_teacher_transmissions = count_typed_transmissions(model, 'teacher', 'teacher')
student_family_member_transmissions = count_typed_transmissions(model, 'student', 'family_member')
family_member_family_member_transmissions = count_typed_transmissions(model, 'family_member', 'family_member')
quarantine_days_student = model.quarantine_counters['student']
quarantine_days_teacher = model.quarantine_counters['teacher']
quarantine_days_family_member = model.quarantine_counters['family_member']
diagnostic_test_detected_infections_student = \
data['diagnostic_test_detected_infections_student'].max()
diagnostic_test_detected_infections_teacher = \
data['diagnostic_test_detected_infections_teacher'].max()
diagnostic_test_detected_infections_family_member = \
data['diagnostic_test_detected_infections_family_member'].max()
preventive_test_detected_infections_student = \
data['preventive_test_detected_infections_student'].max()
preventive_test_detected_infections_teacher = \
data['preventive_test_detected_infections_teacher'].max()
preventive_test_detected_infections_family_member = \
data['preventive_test_detected_infections_family_member'].max()
pending_test_infections = data['pending_test_infections'].max()
undetected_infections = data['undetected_infections'].max()
predetected_infections = data['predetected_infections'].max()
duration = len(data)
diagnostic_tests_per_day_per_agent = N_diagnostic_tests / duration / N_school_agents
preventive_tests_per_day_per_agent = N_preventive_screening_tests / duration / N_school_agents
tests_per_day_per_agent = (N_diagnostic_tests + N_preventive_screening_tests) / duration / N_school_agents
row = {'run':run,
'R0':R0,
'N_school_agents':N_school_agents,
'N_family_members':N_family_members,
'infected_students':infected_students,
'infected_teachers':infected_teachers,
'infected_family_members':infected_family_members,
'infected_agents':infected_agents,
'N_diagnostic_tests':N_diagnostic_tests,
'N_preventive_tests':N_preventive_screening_tests,
'transmissions':transmissions,
'infected_without_transmissions':infected_without_transmissions,
'student_student_transmissions':student_student_transmissions,
'teacher_student_transmissions':teacher_student_transmissions,
'student_teacher_transmissions':student_teacher_transmissions,
'teacher_teacher_transmissions':teacher_teacher_transmissions,
'student_family_member_transmissions':student_family_member_transmissions,
'family_member_family_member_transmissions':family_member_family_member_transmissions,
'quarantine_days_student':quarantine_days_student,
'quarantine_days_teacher':quarantine_days_teacher,
'quarantine_days_family_member':quarantine_days_family_member,
'preventive_test_detected_infections_student':\
preventive_test_detected_infections_student,
'preventive_test_detected_infections_teacher':\
preventive_test_detected_infections_teacher,
'preventive_test_detected_infections_family_member':\
preventive_test_detected_infections_family_member,
'diagnostic_test_detected_infections_student':\
diagnostic_test_detected_infections_student,
'diagnostic_test_detected_infections_teacher':\
diagnostic_test_detected_infections_teacher,
'diagnostic_test_detected_infections_family_member':\
diagnostic_test_detected_infections_family_member,
'pending_test_infections':pending_test_infections,
'undetected_infections':undetected_infections,
'predetected_infections':predetected_infections,
'duration':duration,
'diagnostic_tests_per_day_per_agent':diagnostic_tests_per_day_per_agent,
'preventive_tests_per_day_per_agent':preventive_tests_per_day_per_agent,
'tests_per_day_per_agent':tests_per_day_per_agent}
return row
def compress_pickle(fname, fpath, data):
success = False
while not success:
try:
with bz2.BZ2File(join(fpath, fname + '.pbz2'), 'w') as f:
cPickle.dump(data, f)
success = True
except OSError:
time.sleep(0.5)
print('re-trying to dump model file {} ...'.format(fname))
return
def decompress_pickle(fname, fpath):
data = bz2.BZ2File(join(fpath, fname), 'rb')
data = cPickle.load(data)
return data
def get_representative_run(N_infected, path):
filenames = os.listdir(path)
shuffle(filenames)
medians = {int(f.split('_')[1]):int(f.split('_')[3].split('.')[0]) \
for f in filenames}
dist = np.inf
closest_run = None
for run, median in medians.items():
curr_dist = np.abs(N_infected - median)
if curr_dist < dist:
closest_run = run
dist = curr_dist
if curr_dist == 0:
break
fname = 'run_{}_N_{}.pbz2'.format(closest_run, medians[closest_run])
return decompress_pickle(fname, path)
def dump_JSON(path, school,
test_type, index_case, screen_frequency_student,
screen_frequency_teacher, teacher_mask, student_mask, half_classes,
ventilation_mod, node_list, teacher_schedule, student_schedule,
rep_transmission_events, state_data, start_weekday, duration,
fname_addition='', friendship_contacts=False,
class_size_reduction=False,
m_efficiency_exhale=False, m_efficiency_inhale=False,
s_test_rate=False, t_test_rate=False,
trisk_mod = False):
student_schedule = student_schedule.reset_index()
teacher_schedule = teacher_schedule.reset_index()
school_type = school['type']
classes = school['classes']
students = school['students']
turnover, _, ttype = test_type.split('_')
turnovers = {'same':0, 'one':1, 'two':2, 'three':3}
turnover = turnovers[turnover]
bool_dict = {True:'T', False:'F'}
node_list = json.loads(node_list.to_json(orient='split'))
del node_list['index']
teacher_schedule = json.loads(teacher_schedule.to_json(orient='split'))
del teacher_schedule['index']
student_schedule = json.loads(student_schedule.to_json(orient='split'))
del student_schedule['index']
# can be empty, if there are no transmission events in the simulation
try:
rep_transmission_events = json.loads(rep_transmission_events\
.to_json(orient='split'))
del rep_transmission_events['index']
state_data = json.loads(state_data.to_json(orient='split'))
del state_data['index']
except AttributeError:
pass
data = {'school_type':school_type,
'classes':classes,
'students':students,
'test_type':ttype,
'test_turnover':turnover,
'indexcase':index_case,
'screen_frequency_teacher':screen_frequency_teacher,
'screen_frequency_student':screen_frequency_student,
'teacher_mask':teacher_mask,
'student_mask':student_mask,
'half_classes':half_classes,
'ventilation_mod':ventilation_mod,
'node_list':node_list,
'teacher_schedule':teacher_schedule,
'student_schedule':student_schedule,
'rep_trans_events':rep_transmission_events,
'agent_states':state_data,
'start_weekday':start_weekday,
'duration':duration}
fname = join(path, 'test-{}_'.format(ttype) + \
'turnover-{}_index-{}_tf-{}_'
.format(turnover, index_case[0], screen_frequency_teacher) +\
'sf-{}_tmask-{}_smask-{}'\
.format(screen_frequency_student, bool_dict[teacher_mask],\
bool_dict[student_mask]))
if friendship_contacts:
fname = fname + '_fcontacts-{}'.format(friendship_contacts)
if class_size_reduction:
fname = fname + '_csizered-{}'.format(class_size_reduction)
if m_efficiency_exhale and m_efficiency_inhale:
fname = fname + '_meffinh-{}_meffexh-{}'\
.format(m_efficiency_exhale, m_efficiency_inhale)
if s_test_rate and t_test_rate:
fname = fname + '_stestrate-{}_ttestrate-{}'\
.format(s_test_rate, t_test_rate)
if trisk_mod:
fname = fname + '_trisk-{}'.format(trisk_mod)
fname = fname + '_half-{}_vent-{}'\
.format(bool_dict[half_classes], ventilation_mod)
fname = fname + fname_addition + '.txt'
with open(fname,'w')\
as outfile:
json.dump(data, outfile)
def get_measures(measure_string, test_participation_rate=False,
reduced_class_size=False, added_friendship_contacts=False,
reduced_mask_efficiency=False, transmission_risk_modifier=False):
'''
Convenience function to get the individual measures given a string (filename)
of measures
'''
#print(measure_string)
agents = {
'student':{
'screening_interval': None,
'index_probability': 0,
'mask':False},
'teacher':{
'screening_interval': None,
'index_probability': 0,
'mask':False},
'family_member':{
'screening_interval': None,
'index_probability': 0,
'mask':False}
}
turnovers = {0:'same', 1:'one', 2:'two', 3:'three'}
bmap = {'T':True, 'F':False}
interval_map = {'0':0, '3':3, '7':7, '14':14, 'None':None}
index_map = {'s':'student', 't':'teacher'}
stype, _ = measure_string.split('_test')
rest = measure_string.split(stype + '_')[1]
if test_participation_rate and reduced_class_size and \
reduced_mask_efficiency and transmission_risk_modifier:
ttpype, turnover, index, tf, sf, tmask, smask, \
class_size_reduction, vent, meffexh, meffinh, \
s_test_rate, t_test_rate, trisk = rest.split('_')
tmp = [stype, ttpype, turnover, index, tf, sf, tmask,
smask, class_size_reduction, vent, meffexh, meffinh,
s_test_rate, t_test_rate, trisk]
elif test_participation_rate:
ttpype, turnover, index, tf, sf, tmask, smask, haf, \
s_test_rate, t_test_rate, vent = rest.split('_')
tmp = [stype, ttpype, turnover, index, tf, sf, tmask,
smask, haf, s_test_rate, t_test_rate, vent]
elif reduced_class_size:
ttpype, turnover, index, tf, sf, tmask, smask, haf, \
class_size_reduction, vent = rest.split('_')
tmp = [stype, ttpype, turnover, index, tf, sf, tmask,
smask, haf, class_size_reduction, vent]
elif added_friendship_contacts:
ttpype, turnover, index, tf, sf, tmask, smask, haf, \
friendship_contacts, vent = rest.split('_')
tmp = [stype, ttpype, turnover, index, tf, sf,
tmask, smask, haf, friendship_contacts, vent]
elif reduced_mask_efficiency:
ttpype, turnover, index, tf, sf, tmask, smask, \
meffexh, meffinh, haf, vent = rest.split('_')
tmp = [stype, ttpype, turnover, index, tf, sf,
tmask, smask, haf, meffexh, meffinh, vent]
else:
ttpype, turnover, index, tf, sf, tmask, smask, haf, vent = \
rest.split('_')
tmp = [stype, ttpype, turnover, index, tf, sf, tmask, smask, haf, vent]
tmp = [m.split('-') for m in tmp]
screening_params = {}
half = False
for m in tmp:
if len(m) == 1:
pass
elif m[0] == 'test':
ttype = '{}_day_{}'.format(turnovers[int(tmp[2][1])], tmp[1][1])
screening_params['preventive_test_type'] = ttype
elif m[0] == 'turnover':
pass
elif m[0] == 'index':
screening_params['index_case'] = index_map[m[1]]
elif m[0] == 'tf':
agents['teacher']['screening_interval'] = interval_map[m[1]]
elif m[0] == 'sf':
agents['student']['screening_interval'] = interval_map[m[1]]
elif m[0] == 'tmask':
agents['teacher']['mask'] = bmap[m[1]]
elif m[0] == 'smask':
agents['student']['mask'] = bmap[m[1]]
elif m[0] == 'half':
half = bmap[m[1]]
elif m[0] == 'vent':
screening_params['transmission_risk_ventilation_modifier'] = float(m[1])
elif m[0] == 'csizered':
screening_params['class_size_reduction'] = float(m[1])
elif m[0] == 'stestrate':
screening_params['student_test_rate'] = float(m[1])
elif m[0] == 'ttestrate':
screening_params['teacher_test_rate'] = float(m[1])
elif m[0] == 'fcontacts':
screening_params['added_friendship_contacts'] = float(m[1])
elif m[0] == 'meffexh':
screening_params['mask_efficiency_exhale'] = float(m[1])
elif m[0] == 'meffinh':
screening_params['mask_efficiency_inhale'] = float(m[1])
elif m[0] == 'trisk':
screening_params['transmission_risk_modifier'] = float(m[1])
else:
print('unknown measure type ', m[0])
return screening_params, agents, half
def get_data(stype, src_path, test_participation_rate=False,
reduced_class_size=False, added_friendship_contacts=False,
reduced_mask_efficiency=False, transmission_risk_modifier=False):
'''
Convenience function to read all ensembles from different measures
of a given school type and return one single data frame
'''
data = pd.DataFrame()
stype_path = join(src_path, stype)
files = os.listdir(stype_path)
for f in files:
screening_params, agents, half = get_measures(f.strip('.csv'),
test_participation_rate=test_participation_rate,
reduced_class_size=reduced_class_size,
added_friendship_contacts=added_friendship_contacts,
reduced_mask_efficiency=reduced_mask_efficiency,
transmission_risk_modifier=transmission_risk_modifier)
ensmbl = pd.read_csv(join(stype_path, f))
try:
ensmbl = ensmbl.drop(columns=['Unnamed: 0'])
except KeyError:
pass
ensmbl['preventive_test_type'] = screening_params['preventive_test_type']
ensmbl['index_case'] = screening_params['index_case']
ensmbl['transmission_risk_ventilation_modifier'] = \
screening_params['transmission_risk_ventilation_modifier']
ensmbl['student_mask'] = agents['student']['mask']
ensmbl['teacher_mask'] = agents['teacher']['mask']
ensmbl['student_screening_interval'] = agents['student']['screening_interval']
ensmbl['teacher_screening_interval'] = agents['teacher']['screening_interval']
ensmbl['half_classes'] = half
if reduced_class_size:
ensmbl['class_size_reduction'] = screening_params['class_size_reduction']
if test_participation_rate:
ensmbl['student_test_rate'] = screening_params['student_test_rate']
ensmbl['teacher_test_rate'] = screening_params['teacher_test_rate']
if reduced_mask_efficiency:
ensmbl['mask_efficiency_inhale'] = screening_params['mask_efficiency_inhale']
ensmbl['mask_efficiency_exhale'] = screening_params['mask_efficiency_exhale']
if transmission_risk_modifier:
ensmbl['transmission_risk_modifier'] = screening_params['transmission_risk_modifier']
data = pd.concat([data, ensmbl])
data = data.reset_index(drop=True)
data['teacher_screening_interval'] = data['teacher_screening_interval'].replace({None:'never'})
data['student_screening_interval'] = data['student_screening_interval'].replace({None:'never'})
return data
|
[
"jana.lasser@ds.mpg.de"
] |
jana.lasser@ds.mpg.de
|
f76b2c88d9c798d959e946a73f076dcbfec551e3
|
9d2ef665fb42762279a3d82acd1ed5cfae831c65
|
/venv/bin/easy_install
|
000ab1efcc9488fbff3b240657f1f4833ca899df
|
[] |
no_license
|
thanhtung29497/tts_frontend
|
63c52c30afcb416167618d48db35d72965524518
|
72fe8e08d3dd28eed3d140b4a2f698e60c1939b9
|
refs/heads/master
| 2020-07-19T23:37:07.068984
| 2019-09-05T10:36:45
| 2019-09-05T10:36:45
| 206,532,296
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 450
|
#!/home/tungtt-ai-u/PycharmProjects/tts_frontend/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
|
[
"thanhtung29497@gmail.com"
] |
thanhtung29497@gmail.com
|
|
b21ef021ca3d6afdf535882ef61eb49b75bf895c
|
8b7db851e13737d5c44cc00d38a46a2817c7707b
|
/tests/train.py
|
788e79cd09e75082a8dc8cf4d75b3dd063b824b5
|
[
"MIT"
] |
permissive
|
goelshivam1210/gym-novel-gridworlds
|
b6f24b38cfceb2b44461da9bb7607c56d27f4a9e
|
c8f419da02e4fd716b9e293fcf0b99ee2eb96367
|
refs/heads/master
| 2023-01-15T13:46:23.438199
| 2020-11-23T14:42:13
| 2020-11-23T14:42:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,349
|
py
|
import os
import time
import gym
import gym_novel_gridworlds
import numpy as np
from stable_baselines.common.env_checker import check_env
from stable_baselines import PPO2
from stable_baselines import DQN
from stable_baselines.gail import ExpertDataset
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.common import make_vec_env
from stable_baselines.bench import Monitor
from stable_baselines.common.callbacks import BaseCallback
from stable_baselines.results_plotter import load_results, ts2xy
class RenderOnEachStep(BaseCallback):
"""
Callback for saving a model (the check is done every ``check_freq`` steps)
based on the training reward (in practice, we recommend using ``EvalCallback``).
"""
def __init__(self, env):
super(RenderOnEachStep, self).__init__()
self.env = env
def _on_step(self):
self.env.render()
# time.sleep(0.5)
class SaveOnBestTrainingRewardCallback(BaseCallback):
"""
Callback for saving a model (the check is done every ``check_freq`` steps)
based on the training reward (in practice, we recommend using ``EvalCallback``).
"""
def __init__(self, check_freq, log_dir, model_name):
super(SaveOnBestTrainingRewardCallback, self).__init__()
self.check_freq = check_freq
self.log_dir = log_dir
self.save_path = os.path.join(log_dir, model_name)
self.best_mean_reward = -np.inf
def _on_step(self):
if self.n_calls % self.check_freq == 0:
# Retrieve training reward
x, y = ts2xy(load_results(self.log_dir), 'timesteps')
if len(x) > 0:
# Mean training reward over the last 100 episodes
mean_reward = np.mean(y[-100:])
# New best model, you could save the agent here
if mean_reward > self.best_mean_reward:
self.best_mean_reward = mean_reward
print("Saving new best model to {}".format(self.save_path))
self.model.save(self.save_path)
class RemapActionOnEachStep(BaseCallback):
"""
Callback for saving a model (the check is done every ``check_freq`` steps)
based on the training reward (in practice, we recommend using ``EvalCallback``).
"""
def __init__(self, env, step_num):
super(RemapActionOnEachStep, self).__init__()
self.env = env
self.step_num = step_num
def _on_step(self):
if self.n_calls % self.step_num == 0:
# self.env = remap_action(self.env)
self.env.remap_action()
if __name__ == "__main__":
env_id = 'NovelGridworld-v3'
timesteps = 200000 # 200000
experiment_dir = 'results2' # 'models', results
experiment_code1 = env_id + '_' + str(timesteps)
experiment_code2 = '_' + '8beams0filled40range3items_in_360degrees_lfd' # lfd
model_code = experiment_code1 + experiment_code2
log_dir = experiment_dir + os.sep + env_id + experiment_code2
pretrain = True
os.makedirs(log_dir, exist_ok=True)
env = gym.make(env_id)
env = Monitor(env, log_dir)
# callback = RenderOnEachStep(env)
callback = SaveOnBestTrainingRewardCallback(1000, log_dir, model_code + '_best_model')
# callback = RemapActionOnEachStep(env, 50000)
# multiprocess environment
# env = make_vec_env('NovelGridworld-v0', n_envs=4)
check_env(env, warn=True)
# Optional: PPO2 requires a vectorized environment to run
# the env is now wrapped automatically when passing it to the constructor
# env = DummyVecEnv([lambda: env])
# model = PPO2(MlpPolicy, env, verbose=1)
env = DummyVecEnv([lambda: env])
model = PPO2.load('NovelGridworld-v3_200000_8beams0filled40range3items_in_360degrees_lfd_OLD', env)
# Pretrain the model from human recored dataset
# specify `traj_limitation=-1` for using the whole dataset
if pretrain:
dataset = ExpertDataset(expert_path='expert_NovelGridworld-v3_50demos2.npz', traj_limitation=-1, batch_size=128)
model.pretrain(dataset, n_epochs=2000)
model.save(model_code)
# model.learn(total_timesteps=timesteps)
model.learn(total_timesteps=timesteps, callback=callback)
model.save(model_code + '_last_model')
|
[
"gtatiya@live.com"
] |
gtatiya@live.com
|
9ec064b2a787ac1c0338a8c2f410c4c3d54df4ef
|
164d8556c96b220b1502066e1ad9688836c117f0
|
/misc/counting_cells_load.py
|
e66320a66cc1ea0d306724415a3f94343e7709d0
|
[] |
no_license
|
pwang724/PHD_experiment
|
df7c23281b763159fe2ee61e229689d1fd01d7d0
|
9e290308353e55aa818477d3d183415d633624c9
|
refs/heads/master
| 2023-02-20T13:53:24.472923
| 2023-02-16T03:20:05
| 2023-02-16T03:20:05
| 162,485,012
| 0
| 0
| null | 2019-01-15T02:48:32
| 2018-12-19T20:03:33
|
Python
|
UTF-8
|
Python
| false
| false
| 957
|
py
|
import numpy as np
import _CONSTANTS.conditions as conditions
import _CONSTANTS.config as config
import os
import glob
from tools import file_io
import matlab.engine
import time
mouse = 2
condition = conditions.OFC_COMPOSITE
d = os.path.join(condition.paths[mouse], 'data')
mat_files = glob.glob(os.path.join(d,'*.mat'))
eng = matlab.engine.start_matlab()
rois = []
for mat_file in mat_files:
start_time = time.time()
x = eng.load(mat_file)
obj = list(x.values())[0]
obj_name = "obj"
eng.workspace[obj_name] = obj
roi_m = eng.eval(obj_name + ".roi")
roi = np.asarray(roi_m).squeeze()
print('Time taken: {}'.format(time.time() - start_time))
rois.append(roi)
rois = np.stack(rois)
print(rois.shape)
rois = rois.astype('float32')
data_directory = config.Config().LOCAL_DATA_PATH
data_directory = os.path.join(data_directory, 'registration','ROI', condition.name)
file_io.save_numpy(data_directory, str(mouse), rois)
|
[
"peterwang724@gmail.com"
] |
peterwang724@gmail.com
|
e8a1e8a89b2676c6cd1974df7292669106c5eb73
|
0d17a12333319ba830e3359d07a17dc96a79ec5b
|
/app/asgi.py
|
17d84a01f8a7a30554b58bc417955ebb6a0eecd8
|
[] |
no_license
|
dimasickx/restAPI
|
6758bff307f2be96cdba294365e91d27519daad5
|
e68a13573c3cb1c5996ca77b842ee96967724c66
|
refs/heads/master
| 2023-05-22T18:28:57.197906
| 2021-06-11T13:07:54
| 2021-06-11T13:07:54
| 375,749,727
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
"""
ASGI config for main project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'app.settings')
application = get_asgi_application()
|
[
"dmitry.sorokin.mail@gmail.com"
] |
dmitry.sorokin.mail@gmail.com
|
9e28e0cd12e58048913b3c3764cd180e05af5636
|
9e41adf86b2c166a219f0b6d9371089c5f2d7d93
|
/Exerciciospython2/Função/e100.py
|
0b47e1bb8952e250e0f02facf33b98bfe7653f2f
|
[] |
no_license
|
Nadirlene/Exercicios-python
|
1aaead61dd0efcb5303f6294e765e9e1d54506cc
|
3fe82e166003922ef749756a249840ed1fe940b0
|
refs/heads/main
| 2022-12-25T21:35:06.172839
| 2020-09-28T15:08:37
| 2020-09-28T15:08:37
| 299,343,047
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 514
|
py
|
from random import randint
from time import sleep
númerosSorteados = []
def sorteio(lista):
print(f'Sorteando {len(lista)} valores da lista:', end=' ')
for c in range(0, 5):
lista.append(randint(1, 10))
print(lista[c], end=' ')
sleep(0.3)
print('PRONTO!')
def somaPar(lista):
soma = 0
for c in lista:
if c % 2 == 0:
soma += c
print(f'Somando os valores pares de {lista}, temos {soma}')
sorteio(númerosSorteados)
somaPar(númerosSorteados)
|
[
"nadirleneoliveira@yahoo.com"
] |
nadirleneoliveira@yahoo.com
|
d263c302856e1bd84f621399164fb1e25c9f2f64
|
b9b9c880653eb9cc1d6a31d06da58b7a8ead126a
|
/app/slash.py
|
e65924b67afd229b39552b46590337a8019d612b
|
[] |
no_license
|
oddball/gae-flask-skeleton
|
707f6d199a8fc184e8ad6632f31aa36b23e06ee0
|
c04c803fc184820aa06e8f49aae2ba03eb5f0a46
|
refs/heads/master
| 2021-01-23T12:04:48.102155
| 2018-02-20T19:32:29
| 2018-02-20T19:32:29
| 102,644,208
| 1
| 0
| null | 2018-02-20T19:32:29
| 2017-09-06T18:31:36
|
Python
|
UTF-8
|
Python
| false
| false
| 435
|
py
|
# -*- coding: utf-8 -*-
import httplib
from flask import Blueprint, make_response
from google.appengine.ext import ndb
from google.appengine.ext import blobstore
import flask
from flask_login import login_required
from flask_login import current_user
slash_bp = Blueprint("slash", "slash")
route = slash_bp.route
@route('/', methods=['GET'])
@ndb.synctasklet
def slash():
raise ndb.Return(flask.render_template('slash.html'))
|
[
"andreas.lindh@hiced.com"
] |
andreas.lindh@hiced.com
|
b749430cbf1df74689cf5fbf55ba4631e9e2b98b
|
0fd50b12ed2d79e31fed5ecd91efe9ae63ede429
|
/src/analysis/test_udgm_mrm_plots.py
|
16af63bb46a920a467c315cee341d87fea168b5e
|
[] |
no_license
|
riccardoscilla/scheduled-collect-LPIoT
|
f611726132d35b1f7693120ee4e065c5f17dfaca
|
84cd24d9adb154d51a3b67f382429444d2164b78
|
refs/heads/main
| 2023-05-13T06:42:41.758222
| 2021-06-07T14:26:20
| 2021-06-07T14:26:20
| 374,693,249
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,998
|
py
|
import matplotlib.pyplot as plt
import pandas as pd
from collections import defaultdict
import numpy as np
import math
plt.rcParams.update({'font.size': 15})
def getValues(x, p):
dc_values = []
pdr_values = []
dc_std = []
pdr_std = []
for i in x:
folder = "results/mrm/test_mrm_"+p+"_changeP/"
df = pd.read_csv(folder+"dc"+i+".csv", delimiter="\t")
dc = []
for index, row in df.iterrows():
dc.append(row['dc'])
dc_values.append(np.mean(dc))
dc_std.append(np.std(dc))
folder = "results/mrm/test_mrm_"+p+"_changeP/"
df = pd.read_csv(folder+"pdr"+i+".csv", delimiter="\t")
pdr = []
for index, row in df.iterrows():
pdr.append(row['pdr'])
pdr_values.append(np.mean(pdr))
pdr_std.append(np.std(pdr))
return dc_values, pdr_values, dc_std, pdr_std
fig, (ax1,ax2) = plt.subplots(ncols=2, figsize=(14,7), constrained_layout=True)
x = ["3","7","10","15"]
dc10, pdr10, dc10_error, pdr10_error = getValues(x,"10")
dc20, pdr20, dc20_error, pdr20_error = getValues(x,"20")
dc50, pdr50, dc50_error, pdr50_error = getValues(x,"50")
ax1.set_title('PDR per P and N value')
ax1.set_ylabel('Average PDR (%)')
ax1.set_xlabel('P value')
width = 0.25
xpos = np.arange(len(x))
ax1.bar(xpos-width, pdr10, width, yerr=pdr10_error, capsize=3, label="10")
ax1.bar(xpos, pdr20, width, yerr=pdr20_error, capsize=3, label="20")
ax1.bar(xpos+width, pdr50, width, yerr=pdr50_error, capsize=3, label="50")
ax1.set_xticks(xpos)
ax1.set_xticklabels(x)
ax1.grid(axis="y")
ax1.legend(title="N value",loc=4)
ax2.set_title('DC per P and N value')
ax2.set_ylabel('Average DC (%)')
ax2.set_xlabel('P value')
ax2.grid(axis="y")
ax2.errorbar(x,dc10,yerr=dc10_error,fmt='-o', label="10")
ax2.errorbar(x,dc20,yerr=dc20_error,fmt='-o', label="20")
ax2.errorbar(x,dc50,yerr=dc50_error,fmt='-o', label="50")
ax2.legend(title="N value",loc=4)
plt.show()
fig.savefig('images/mrm.png')
|
[
"66726419+riccardoscilla@users.noreply.github.com"
] |
66726419+riccardoscilla@users.noreply.github.com
|
3b50f9669e427ccf2598af20c57cc0bbf58daad0
|
11700a4b8207850ffc4a5c7ea46db03f0c3ab7e4
|
/.venv/bin/eralchemy
|
b6a0ff3bb3ea945183414cc783385b2bce87776a
|
[] |
no_license
|
nicolas703/exercise-instagram-data-modeling
|
1aea698098c00694732a6b1106319a5a004f78d1
|
40d3c52db05b7e1668beeff76b5582e9ebb00c40
|
refs/heads/master
| 2022-11-23T15:21:30.967254
| 2020-07-09T09:48:20
| 2020-07-09T09:48:20
| 278,325,266
| 1
| 0
| null | 2020-07-09T09:46:48
| 2020-07-09T09:46:47
| null |
UTF-8
|
Python
| false
| false
| 258
|
#!/workspace/exercise-instagram-data-modeling/.venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from eralchemy.main import cli
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(cli())
|
[
"nicolas8859.nd@gmail.com"
] |
nicolas8859.nd@gmail.com
|
|
843dc84990ac79c37f5be80ed069ee89cccc662f
|
387b4a53485b175d2c8c7bca7f3429ad2abbb4f0
|
/pvdet/model/bbox_head/pv_head_base.py
|
b80b845da9a2588a914b8f8e2020ba1e80618782
|
[] |
no_license
|
liangzhao123/IOU-SSD
|
50cf3a52e8b306b024d0396b76bd3931c8a15434
|
b53a1659ffe197da8eeca0f4a35a4a4571db22f4
|
refs/heads/main
| 2023-06-25T08:15:03.553378
| 2021-07-31T17:40:49
| 2021-07-31T17:40:49
| 321,982,002
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,318
|
py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pvdet.model.model_utils.proposal_target_layer import ProposalTargetLayer,class_agnostic_nms
from pvdet.dataset.utils import common_utils
from pvdet.tools.utils import loss_utils,box_coder_utils
class RoIHeadTemplate(nn.Module):
def __init__(self, num_class, model_cfg):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.box_coder = getattr(box_coder_utils, self.model_cfg.TARGET_CONFIG.BOX_CODER)()
self.proposal_target_layer = ProposalTargetLayer(roi_sampler_cfg=self.model_cfg.TARGET_CONFIG)
self.build_losses(self.model_cfg.LOSS_CONFIG)
self.forward_ret_dict = None
def build_losses(self, losses_cfg):
self.add_module(
'reg_loss_func',
loss_utils.WeightedSmoothL1Loss_v1(code_weights=losses_cfg.LOSS_WEIGHTS['code_weights'])
)
def make_fc_layers(self, input_channels, output_channels, fc_list):
fc_layers = []
pre_channel = input_channels
for k in range(0, fc_list.__len__()):
fc_layers.extend([
nn.Conv1d(pre_channel, fc_list[k], kernel_size=1, bias=False),
nn.BatchNorm1d(fc_list[k]),
nn.ReLU()
])
pre_channel = fc_list[k]
if self.model_cfg.DP_RATIO >= 0 and k == 0:
fc_layers.append(nn.Dropout(self.model_cfg.DP_RATIO))
fc_layers.append(nn.Conv1d(pre_channel, output_channels, kernel_size=1, bias=True))
fc_layers = nn.Sequential(*fc_layers)
return fc_layers
def proposal_layer(self, batch_dict, nms_config):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
nms_config:
Returns:
batch_dict:
rois: (B, num_rois, 7+C)
roi_scores: (B, num_rois)
roi_labels: (B, num_rois)
"""
batch_size = batch_dict['batch_size']
batch_box_preds = batch_dict['batch_box_preds']
batch_cls_preds = batch_dict['batch_cls_preds']
rois = batch_box_preds.new_zeros((batch_size, nms_config.NMS_POST_MAXSIZE, batch_box_preds.shape[-1]))
roi_scores = batch_box_preds.new_zeros((batch_size, nms_config.NMS_POST_MAXSIZE))
roi_labels = batch_box_preds.new_zeros((batch_size, nms_config.NMS_POST_MAXSIZE), dtype=torch.long)
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_cls_preds.shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_cls_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_box_preds[batch_mask]
cls_preds = batch_cls_preds[batch_mask]
cur_roi_scores, cur_roi_labels = torch.max(cls_preds, dim=1)
if nms_config.MULTI_CLASSES_NMS:
raise NotImplementedError
else:
selected, selected_scores = class_agnostic_nms(
box_scores=cur_roi_scores, box_preds=box_preds, nms_config=nms_config
)
rois[index, :len(selected), :] = box_preds[selected]
roi_scores[index, :len(selected)] = cur_roi_scores[selected]
roi_labels[index, :len(selected)] = cur_roi_labels[selected]
batch_dict['rois'] = rois
batch_dict['roi_scores'] = roi_scores
batch_dict['roi_labels'] = roi_labels + 1
batch_dict['has_class_labels'] = True if batch_cls_preds.shape[-1] > 1 else False
return batch_dict
def assign_targets(self, batch_dict):
batch_size = batch_dict['batch_size']
with torch.no_grad():
targets_dict = self.proposal_target_layer.forward(batch_dict)
rois = targets_dict['rois'] # (B, N, 7 + C)
gt_of_rois = targets_dict['gt_of_rois'] # (B, N, 7 + C + 1)
targets_dict['gt_of_rois_src'] = gt_of_rois.clone().detach()
# canonical transformation
roi_center = rois[:, :, 0:3]
roi_ry = rois[:, :, 6] % (2 * np.pi)
gt_of_rois[:, :, 0:3] = gt_of_rois[:, :, 0:3] - roi_center
gt_of_rois[:, :, 6] = gt_of_rois[:, :, 6] - roi_ry
# transfer LiDAR coords to local coords
gt_of_rois = common_utils.rotate_points_along_z(
points=gt_of_rois.view(-1, 1, gt_of_rois.shape[-1]), angle=-roi_ry.view(-1)
).view(batch_size, -1, gt_of_rois.shape[-1])
# flip orientation if rois have opposite orientation
heading_label = gt_of_rois[:, :, 6] % (2 * np.pi) # 0 ~ 2pi
opposite_flag = (heading_label > np.pi * 0.5) & (heading_label < np.pi * 1.5)
heading_label[opposite_flag] = (heading_label[opposite_flag] + np.pi) % (2 * np.pi) # (0 ~ pi/2, 3pi/2 ~ 2pi)
flag = heading_label > np.pi
heading_label[flag] = heading_label[flag] - np.pi * 2 # (-pi/2, pi/2)
heading_label = torch.clamp(heading_label, min=-np.pi / 2, max=np.pi / 2)
gt_of_rois[:, :, 6] = heading_label
targets_dict['gt_of_rois'] = gt_of_rois
return targets_dict
def get_box_reg_layer_loss(self, forward_ret_dict):
loss_cfgs = self.model_cfg.LOSS_CONFIG
code_size = self.box_coder.code_size
reg_valid_mask = forward_ret_dict['reg_valid_mask'].view(-1)
gt_boxes3d_ct = forward_ret_dict['gt_of_rois'][..., 0:code_size]
gt_of_rois_src = forward_ret_dict['gt_of_rois_src'][..., 0:code_size].view(-1, code_size)
rcnn_reg = forward_ret_dict['rcnn_reg'] # (rcnn_batch_size, C)
roi_boxes3d = forward_ret_dict['rois']
rcnn_batch_size = gt_boxes3d_ct.view(-1, code_size).shape[0]
fg_mask = (reg_valid_mask > 0)
fg_sum = fg_mask.long().sum().item()
tb_dict = {}
if loss_cfgs.REG_LOSS == 'smooth-l1':
rois_anchor = roi_boxes3d.clone().detach().view(-1, code_size)
rois_anchor[:, 0:3] = 0
rois_anchor[:, 6] = 0
reg_targets = self.box_coder.encode_torch(
gt_boxes3d_ct.view(rcnn_batch_size, code_size), rois_anchor
)
rcnn_loss_reg = self.reg_loss_func(
rcnn_reg.view(rcnn_batch_size, -1).unsqueeze(dim=0),
reg_targets.unsqueeze(dim=0),
) # [B, M, 7]
rcnn_loss_reg = (rcnn_loss_reg.view(rcnn_batch_size, -1) * fg_mask.unsqueeze(dim=-1).float()).sum() / max(fg_sum, 1)
rcnn_loss_reg = rcnn_loss_reg * loss_cfgs.LOSS_WEIGHTS['rcnn_reg_weight']
tb_dict['rcnn_loss_reg'] = rcnn_loss_reg.item()
if loss_cfgs.CORNER_LOSS_REGULARIZATION and fg_sum > 0:
# TODO: NEED to BE CHECK
fg_rcnn_reg = rcnn_reg.view(rcnn_batch_size, -1)[fg_mask]
fg_roi_boxes3d = roi_boxes3d.view(-1, code_size)[fg_mask]
fg_roi_boxes3d = fg_roi_boxes3d.view(1, -1, code_size)
batch_anchors = fg_roi_boxes3d.clone().detach()
roi_ry = fg_roi_boxes3d[:, :, 6].view(-1)
roi_xyz = fg_roi_boxes3d[:, :, 0:3].view(-1, 3)
batch_anchors[:, :, 0:3] = 0
rcnn_boxes3d = self.box_coder.decode_torch(
fg_rcnn_reg.view(batch_anchors.shape[0], -1, code_size), batch_anchors
).view(-1, code_size)
rcnn_boxes3d = common_utils.rotate_points_along_z(
rcnn_boxes3d.unsqueeze(dim=1), roi_ry
).squeeze(dim=1)
rcnn_boxes3d[:, 0:3] += roi_xyz
loss_corner = loss_utils.get_corner_loss_lidar_v1(
rcnn_boxes3d[:, 0:7],
gt_of_rois_src[fg_mask][:, 0:7]
)
loss_corner = loss_corner.mean()
loss_corner = loss_corner * loss_cfgs.LOSS_WEIGHTS['rcnn_corner_weight']
rcnn_loss_reg += loss_corner
tb_dict['rcnn_loss_corner'] = loss_corner.item()
else:
raise NotImplementedError
return rcnn_loss_reg, tb_dict
def get_box_cls_layer_loss(self, forward_ret_dict):
loss_cfgs = self.model_cfg.LOSS_CONFIG
rcnn_cls = forward_ret_dict['rcnn_cls']
rcnn_cls_labels = forward_ret_dict['rcnn_cls_labels'].view(-1)
if loss_cfgs.CLS_LOSS == 'BinaryCrossEntropy':
rcnn_cls_flat = rcnn_cls.view(-1)
batch_loss_cls = F.binary_cross_entropy(torch.sigmoid(rcnn_cls_flat), rcnn_cls_labels.float(), reduction='none')
cls_valid_mask = (rcnn_cls_labels >= 0).float()
rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
elif loss_cfgs.CLS_LOSS == 'CrossEntropy':
batch_loss_cls = F.cross_entropy(rcnn_cls, rcnn_cls_labels, reduction='none', ignore_index=-1)
cls_valid_mask = (rcnn_cls_labels >= 0).float()
rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
else:
raise NotImplementedError
rcnn_loss_cls = rcnn_loss_cls * loss_cfgs.LOSS_WEIGHTS['rcnn_cls_weight']
tb_dict = {'rcnn_loss_cls': rcnn_loss_cls.item()}
return rcnn_loss_cls, tb_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
rcnn_loss = 0
rcnn_loss_cls, cls_tb_dict = self.get_box_cls_layer_loss(self.forward_ret_dict)
rcnn_loss += rcnn_loss_cls
tb_dict.update(cls_tb_dict)
rcnn_loss_reg, reg_tb_dict = self.get_box_reg_layer_loss(self.forward_ret_dict)
rcnn_loss += rcnn_loss_reg
tb_dict.update(reg_tb_dict)
tb_dict['rcnn_loss'] = rcnn_loss.item()
return rcnn_loss, tb_dict
def generate_predicted_boxes(self, batch_size, rois, cls_preds, box_preds):
"""
Args:
batch_size:
rois: (B, N, 7)
cls_preds: (BN, num_class)
box_preds: (BN, code_size)
Returns:
"""
code_size = self.box_coder.code_size
# batch_cls_preds: (B, N, num_class or 1)
batch_cls_preds = cls_preds.view(batch_size, -1, cls_preds.shape[-1])
batch_box_preds = box_preds.view(batch_size, -1, code_size)
roi_ry = rois[:, :, 6].view(-1)
roi_xyz = rois[:, :, 0:3].view(-1, 3)
local_rois = rois.clone().detach()
local_rois[:, :, 0:3] = 0
batch_box_preds = self.box_coder.decode_torch(batch_box_preds, local_rois).view(-1, code_size)
batch_box_preds = common_utils.rotate_points_along_z(
batch_box_preds.unsqueeze(dim=1), roi_ry
).squeeze(dim=1)
batch_box_preds[:, 0:3] += roi_xyz
batch_box_preds = batch_box_preds.view(batch_size, -1, code_size)
return batch_cls_preds, batch_box_preds
|
[
"1094036832@qq.com"
] |
1094036832@qq.com
|
0c8c1261d797d256dc1620389e22afee4a0ae92c
|
7c2cebd157e19714962f6e2ca930f148a08ff333
|
/instructors/urls.py
|
208f4945a13b67f6f986c444d3c29b1b9099e621
|
[] |
no_license
|
Code-Institute-Submissions/black-panther-fitness
|
1f380f9b338e0126d01ac6fa8f30025424265b60
|
ceafccad54f6eaab3deb4144d4a930f057db4fec
|
refs/heads/master
| 2023-01-22T14:28:39.399311
| 2020-12-04T15:55:33
| 2020-12-04T15:55:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 205
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.all_instructors, name='instructors'),
path('<instructor_id>', views.instructor_detail, name='instructor_detail'),
]
|
[
"darylhyde_13@hotmail.com"
] |
darylhyde_13@hotmail.com
|
ad57f4527445a4f8ee853506abb8efae93d2a7ba
|
49a1b328cf3735d05364150940eb5f57e045edb9
|
/plot_vodV5_C.py
|
13f8959c2ac8648460e137d1b0781dabd3664bc6
|
[] |
no_license
|
amucia2/LDAS-Monde
|
465556c78982df4e4916671c475728f724d8d6d0
|
cbf596075fc2ce0b8fe77c04f77be4eacceb8994
|
refs/heads/main
| 2023-04-13T09:17:17.688228
| 2021-04-21T09:59:29
| 2021-04-21T09:59:29
| 360,107,156
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,502
|
py
|
import os
import sys
import numpy as np
from netCDF4 import Dataset
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from mpl_toolkits.basemap import Basemap
import pandas as pd
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
from math import radians, cos, sin, asin, sqrt
from scipy.stats.stats import pearsonr
import pylab as pl
import sys, glob, os, re
import math
import time, tqdm
from tqdm import tqdm
def filter_nan(s,o):
"""
this functions removed the data from simulated and observed data
whereever the observed data contains nan
this is used by all other functions, otherwise they will produce nan as
output
"""
data = np.array([s.flatten(),o.flatten()])
data = np.transpose(data)
data = data[~np.isnan(data).any(1)]
#data = data[~np.isnan(data)]
return data[:,0],data[:,1]
def rmse(s,o):
"""
Root Mean Squared Error
input:
s: simulated
o: observed
output:
rmses: root mean squared error
"""
s,o = filter_nan(s,o)
return np.sqrt(np.mean((s-o)**2))
def correlation(s,o):
"""
correlation coefficient
input:
s: simulated
o: observed
output:
correlation: correlation coefficient
"""
s,o = filter_nan(s,o)
if s.size == 0:
corr = np.NaN
else:
#corr = np.corrcoef(o, s)[0,1]
corr = pearsonr(o, s)
return corr
def bias(s, o):
"""
Bias
input:
s: simulated
o: observed
output:
bias: bias
"""
s,o = filter_nan(s,o)
return np.mean(s-o)
def ubrmse(s,o):
"""
Unbiased root Mean Squared Error
input:
s: simulated
o: observed
output:
ubrmses: unbiased root mean squared error
"""
s,o = filter_nan(s,o)
n = len(o)
if (n!=0):
o_moy = np.mean(o)
s_moy = np.mean(s)
somme = 0.
for i in range(n):
somme = somme + ((s[i]-s_moy)-(o[i]-o_moy))**2
return np.sqrt(somme/n)
else:
return np.nan
FILL = 999.
def readFromFile(file_name,var_names,row_slice=None,col_slice=None,row_invert=False):
'''
Read values from netcdf or hdf5 file
Arguments:
file_name (str): full path to the file to read
var_names (list): names of variables to read
row_slice/col_slice (slices): indices to slice the variables to the output region
row_invert (bool): it True, row order is inverted
Returns:
obs_values (list): values read from file
'''
# determine format
if file_name[-2:] not in ('h5','nc'):
raise Exception('filename extension must be h5 of nc')
file_format = file_name[-2:]
if isinstance(var_names,str): var_names = [var_names]
obs_values = []
if file_format == 'nc':
fid = Dataset(file_name)
for i in range(len(var_names)):
v = fid.variables[var_names[i]]
tmp = v[:].squeeze()
if row_invert: tmp = tmp[::-1,:]
if row_slice is not None and col_slice is not None: tmp = tmp[row_slice,col_slice]
tmp = tmp.astype(float)
if not type(tmp) == pl.ma.core.MaskedArray: tmp = pl.ma.masked_array(tmp)
for a in v.ncattrs():
if a.lower() in ['missing_value','_fillvalue']: tmp.mask = pl.logical_or(tmp.mask,(tmp==float(v.getncattr(a))))
tmp = tmp.filled(FILL)
obs_values.append(tmp)
fid.close()
return obs_values
def getModelPatchFractions(mod_pgd_path):
'''
Read the patch fractions of model grid points from file.
Arguments:
mod_pgd_path (str): directory of PREP.nc
Returns:
mod_patch_frac (numpy.ndarray): patch fractions of each model grid point; shape = n_points,n_patches
'''
mod_pgd_path = '/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US00/sfx-trip/pgd_prep/'
prep_file = mod_pgd_path+'PREP.nc'
if not os.path.isfile(prep_file):
raise Exception(prep_file+' does not exist.')
frac = readFromFile(prep_file,['PATCH'])[0]
return frac.reshape((frac.shape[0],-1))
patch_frac = getModelPatchFractions('')
patch_frac[patch_frac==999.0]=0.
def save(path, ext='png', close=True, verbose=True):
"""Save a figure from pyplot.
Parameters
----------
path : string
The path (and filename, without the extension) to save the
figure to.
ext : string (default='png')
The file extension. This must be supported by the active
matplotlib backend (see matplotlib.backends module). Most
backends support 'png', 'pdf', 'ps', 'eps', and 'svg'.
close : boolean (default=True)
Whether to close the figure after saving. If you want to save
the figure multiple times (e.g., to multiple formats), you
should NOT close it in between saves or you will have to
re-plot it.
verbose : boolean (default=True)
whether to print information about when and where the image
has been saved.
"""
# Extract the directory and filename from the given path
directory = os.path.split(path)[0]
filename = "%s.%s" % (os.path.split(path)[1], ext)
if directory == '':
directory = '.'
#If the directory does not exist, create it
if not os.path.exists(directory):
os.makedirs(directory)
# The final path to save to
savepath = os.path.join(directory, filename)
if verbose:
print("Saving figure to '%s'..." % savepath),
# Actually save the figure
plt.savefig(savepath)
# Close it
if close:
plt.close()
if verbose:
print("Done")
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
import pickle
#tt1 = pd.read_pickle('../CONUS_025_VODX_2003_2006.PData')
#tt2 = pd.read_pickle('../CONUS_025_VODX_2007_2011.PData')
#tt3 = pd.read_pickle('../CONUS_025_VODX_2012_2016.PData')
#tt_lai = pd.concat([tt1,tt2,tt3],axis=1)
#tt_vod = tt_lai
#sys.exit()
#'''
#tt_lai = pd.read_pickle('CONUS_025_VODX_2003_2016.PData')
#tt_vod = pd.read_pickle('CONUS_025_VODX_2003_2016.PData')
numbers = re.compile(r'(\d+)')
def numericalSort(value):
parts = numbers.split(value)
return parts
### Importing all the info seperately, that is not in a Panel, but will require to rewrite massive amounts of code that only uses Panel selection
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US00/Panels/LAI_V2*.PData'),key=numericalSort)
df = [pd.read_pickle(file[3]),pd.read_pickle(file[4]),pd.read_pickle(file[5]),pd.read_pickle(file[6]),pd.read_pickle(file[7]),pd.read_pickle(file[8]),pd.read_pickle(file[9]),pd.read_pickle(file[10]),pd.read_pickle(file[11]),pd.read_pickle(file[12]),pd.read_pickle(file[13]),pd.read_pickle(file[14]),pd.read_pickle(file[15]),pd.read_pickle(file[16]),pd.read_pickle(file[17]),pd.read_pickle(file[18])]
lai_cgls_ = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US00/Panels/LAI_20*.PData'),key=numericalSort)
df = [pd.read_pickle(file[3]),pd.read_pickle(file[4]),pd.read_pickle(file[5]),pd.read_pickle(file[6]),pd.read_pickle(file[7]),pd.read_pickle(file[8]),pd.read_pickle(file[9]),pd.read_pickle(file[10]),pd.read_pickle(file[11]),pd.read_pickle(file[12]),pd.read_pickle(file[13]),pd.read_pickle(file[14]),pd.read_pickle(file[15]),pd.read_pickle(file[16]),pd.read_pickle(file[17]),pd.read_pickle(file[18])]
lai_isba = pd.concat(df)
date = pd.date_range(start="2003-01-01 09:00:00",end='2018-12-31 09:00:00',freq='D')
lai_cgls = lai_cgls_.reindex(date,fill_value=np.nan)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US00/observations/sfx-trip/VODC*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1]),pd.read_pickle(file[2]),pd.read_pickle(file[3]),pd.read_pickle(file[4]),pd.read_pickle(file[5]),pd.read_pickle(file[6]),pd.read_pickle(file[7]),pd.read_pickle(file[8]),pd.read_pickle(file[9]),pd.read_pickle(file[10]),pd.read_pickle(file[11]),pd.read_pickle(file[12]),pd.read_pickle(file[13]),pd.read_pickle(file[14]),pd.read_pickle(file[15])]
vodx = pd.concat(df)
#mvodx = pd.read_pickle('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US00/LAIfromVODCAX_V4.PData')
### Currently cannot import Panels and pickles are all f**ked up too - I'm frustrated, if you can't tell...
#tt_lai = pd.read_pickle('CONUS_2000_2018_LAI_VOD_Panel.PData')
#tt_vod = pd.read_pickle('CONUS_2000_2018_LAI_VOD_Panel.PData')
#tt_lai = pickle.load('CONUS_2000_2018_LAI_VOD_Panel.PData',protocol=2)
'''
Model : LAI ISBA
Analysis : LAI CGLS
Obs : VOD
'''
#test = lai_cgls.mean(axis=1)[~np.isnan(lai_cgls.mean(axis=1))]
#for i in range((tt_vod.shape[2])):
# vodx[i] = pd.rolling_mean(vodx[i],30)
'''
fig, ax1 = plt.subplots()
ax1.plot(test,label='LAI obs.',color='r',linewidth=0,marker='*')
ax1.plot(lai_isba.mean(axis=1),label='LAI isba',color='red')
plt.rcParams.update({'font.size': 15})
ax1.legend(loc='best')
ax1.set_ylabel('LAI',color='red')
ax2 = ax1.twinx()
ax2.plot(vodx.mean(axis=1),label='Cband VOD',color='blue')
ax2.set_ylabel('VOD',color='blue')
plt.show()
'''
'''
corr_tmp_VOD_LAIisba = vodx.corrwith(lai_isba,axis=0)
corr_tmp_VOD_LAIobs = vodx.corrwith(lai_cgls,axis=0)
corr_tmp_LAIisba_LAIobs = lai_isba.corrwith(lai_cgls,axis=0)
corr_tmp_VOD_LAIisba.values[pl.where(pl.isinf(corr_tmp_VOD_LAIisba.values))] = pl.nan
corr_tmp_VOD_LAIobs.values[pl.where(pl.isinf(corr_tmp_VOD_LAIobs.values))] = pl.nan
corr_tmp_LAIisba_LAIobs.values[pl.where(pl.isinf(corr_tmp_LAIisba_LAIobs.values))] = pl.nan
corr_tmp_MVOD_LAIobs = mvodx.corrwith(lai_cgls,axis=0)
corr_tmp_MVOD_LAIobs.values[pl.where(pl.isinf(corr_tmp_MVOD_LAIobs.values))] = pl.nan
v = lai_cgls.mean().values.reshape((140,280))
v2 = vodx.mean().values.reshape((140,280))
v3 = mvodx.mean().values.reshape((140,280))
#v4 = corr_tmp_VOD_LAIobs.values.reshape((140,280))
v4 = corr_tmp_MVOD_LAIobs.values.reshape((140,280))
'''
'''
plt.subplot(221) ; plt.imshow(v,origin='lower',vmin=0,vmax=6,cmap="RdYlGn") ; plt.title('GEOV2 LAI') ; cbar = plt.colorbar(orientation='horizontal',fraction=0.08, pad=0.1)
plt.subplot(222) ; plt.imshow(v2,origin='lower',vmin=0,cmap="RdYlGn"); plt.title('VODCA VOD-X') ; cbar = plt.colorbar(orientation='horizontal',fraction=0.08, pad=0.1)
plt.subplot(223) ; plt.imshow(v3,origin='lower',vmin=0,vmax=6,cmap="RdYlGn"); plt.title('Matched VOD-X') ; cbar = plt.colorbar(orientation='horizontal',fraction=0.08, pad=0.1)
plt.subplot(224) ; plt.imshow(v4,origin='lower',vmin=0,vmax=1,cmap="RdYlGn"); plt.title('R : Matched VOD-X vs LAI GEOV2') ; cbar = plt.colorbar(orientation='horizontal',fraction=0.08, pad=0.1)
plt.rcParams.update({'font.size': 10})
plt.show()
'''
'''
v = corr_tmp_VOD_LAIisba.values.reshape((140,280))
v2 = corr_tmp_VOD_LAIobs.values.reshape((140,280))
v3 = corr_tmp_LAIisba_LAIobs.values.reshape((140,280))
plt.subplot(221) ; plt.imshow(v,origin='lower',vmin=0,vmax=1.,cmap='bwr') ; plt.title('R : VODCA-X vs. LAI ISBA') ; cbar = plt.colorbar(orientation='horizontal')
plt.subplot(222) ; plt.imshow(v2,origin='lower',vmin=0,vmax=1.,cmap='bwr'); plt.title('R : VODCA-X vs. LAI GEOV2') ; cbar = plt.colorbar(orientation='horizontal')
plt.subplot(223) ; plt.imshow(v3,origin='lower',vmin=0,vmax=1,cmap='bwr'); plt.title('R : LAI ISBA vs. LAI GEOV2') ; cbar = plt.colorbar(orientation='horizontal')
plt.rcParams.update({'font.size': 10})
plt.show()
'''
'''
print('R : VOD vs LAIisba (mean / median): ', corr_tmp_VOD_LAIisba.mean() ,' : ',corr_tmp_VOD_LAIisba.median())
print('R : VOD vs LAIobs (mean / median): ', corr_tmp_VOD_LAIobs.mean() ,' : ',corr_tmp_VOD_LAIobs.median())
print('R : LAIisba vs LAIobs (mean / median): ', corr_tmp_LAIisba_LAIobs.mean() ,' : ',corr_tmp_LAIisba_LAIobs.median())
'''
toto = vodx.copy()*np.nan
#sys.exit()
#'''
#for i in tqdm(range((vodx.shape[2]))):
for i in tqdm(range((vodx.shape[1]))):
# season=((vodx.index.month>=0))
# season=((vodx.index.month>=4) & (vodx.index.month<=9))
# if ((len(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]) > 0) & ((patch_frac[0,i]+patch_frac[1,i]+patch_frac[2,i]) < 0.80)):
# try:
# aa = np.polyfit(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])],\
# vodx[i].ix[season][~np.isnan(vodx[i].ix[season])],2)
# toto['Obs'][i].ix[season] = aa[0]*vodx[i].ix[season]**2+aa[1]*vodx[i].ix[season]+aa[2]
# toto['Model'][i] = correlation(toto['Obs'][i].ix[season].values,vodx[i].ix[season].values)[0]
# print i, correlation(toto['Obs'][i].ix[season].values,vodx[i].ix[season].values)[0]
# except ValueError:
# print('ValueError')
#
# season2=((vodx.index.month<=3) | (vodx.index.month>=10))
# if ((len(vodx[i].ix[season2][~np.isnan(vodx[i].ix[season2])]) > 0) & (patch_frac[0,i]+patch_frac[1,i] < 0.10)):
# try:
# aa2 = np.polyfit(vodx[i].ix[season2][~np.isnan(vodx[i].ix[season2])],\
# vodx[i].ix[season2][~np.isnan(vodx[i].ix[season2])],2)
# toto['Obs'][i].ix[season2] = aa2[0]*vodx[i].ix[season2]**2+aa2[1]*vodx[i].ix[season2]+aa2[2]
# toto['Model'][i] = correlation(toto['Obs'][i].ix[season2].values,vodx[i].ix[season2].values)[0]
# print i, correlation(toto['Obs'][i].ix[season2].values,vodx[i].ix[season2].values)[0]
# except ValueError:
# print('ValueError')
# if ( (len(toto['Obs'][i][~np.isnan(toto['Obs'][i])])> 0) & (patch_frac[0,i]+patch_frac[1,i] < 0.10)):
# toto['Model'][i] = correlation(toto['Obs'][i].values,vodx[i].values)[0]
# toto['Analysis'][i] = ubrmse(toto['Obs'][i].values,vodx[i].values)
# print i, toto['Model'][i].ix[0],toto['Analysis'][i].ix[0]
season=((vodx.index.month==12) | (vodx.index.month==1) | (vodx.index.month==2))
season2=(vodx.index.month==1)
if ((len(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]) > 0) & (patch_frac[0,i]+patch_frac[1,i] < 0.80)):
try:
#aa = np.polyfit(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])],\
# vodx[i].ix[season][~np.isnan(vodx[i].ix[season])],2)
#toto['Obs'][i].ix[season2] = aa[0]*vodx[i].ix[season2]**2+aa[1]*vodx[i].ix[season2]+aa[2]
b = (lai_cgls[i].ix[season][~np.isnan(lai_cgls[i].ix[season])]).std()/\
(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]).std()
a = (lai_cgls[i].ix[season][~np.isnan(lai_cgls[i].ix[season])]).mean() - \
b*(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]).mean()
toto[i].ix[season2] = b*(vodx[i].ix[season2]) + a
except ValueError:
print('ValueError')
season=((vodx.index.month==1) | (vodx.index.month==2) | (vodx.index.month==3))
season2=(vodx.index.month==2)
if ((len(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]) > 0) & (patch_frac[0,i]+patch_frac[1,i] < 0.80)):
try:
#aa = np.polyfit(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])],\
# vodx[i].ix[season][~np.isnan(vodx[i].ix[season])],2)
#toto['Obs'][i].ix[season2] = aa[0]*vodx[i].ix[season2]**2+aa[1]*vodx[i].ix[season2]+aa[2]
b = (lai_cgls[i].ix[season][~np.isnan(lai_cgls[i].ix[season])]).std()/\
(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]).std()
a = (lai_cgls[i].ix[season][~np.isnan(lai_cgls[i].ix[season])]).mean() - \
b*(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]).mean()
toto[i].ix[season2] = b*(vodx[i].ix[season2]) + a
except ValueError:
print('ValueError')
season=((vodx.index.month==2) | (vodx.index.month==3) | (vodx.index.month==4))
season2=(vodx.index.month==3)
if ((len(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]) > 0) & (patch_frac[0,i]+patch_frac[1,i] < 0.80)):
try:
b = (lai_cgls[i].ix[season][~np.isnan(lai_cgls[i].ix[season])]).std()/\
(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]).std()
a = (lai_cgls[i].ix[season][~np.isnan(lai_cgls[i].ix[season])]).mean() - \
b*(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]).mean()
toto[i].ix[season2] = b*(vodx[i].ix[season2]) + a
except ValueError:
print('ValueError')
season=((vodx.index.month==3) | (vodx.index.month==4) | (vodx.index.month==5))
season2=(vodx.index.month==4)
if ((len(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]) > 0) & (patch_frac[0,i]+patch_frac[1,i] < 0.80)):
try:
b = (lai_cgls[i].ix[season][~np.isnan(lai_cgls[i].ix[season])]).std()/\
(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]).std()
a = (lai_cgls[i].ix[season][~np.isnan(lai_cgls[i].ix[season])]).mean() - \
b*(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]).mean()
toto[i].ix[season2] = b*(vodx[i].ix[season2]) + a
except ValueError:
print('ValueError')
season=((vodx.index.month==4) | (vodx.index.month==5) | (vodx.index.month==6))
season2=(vodx.index.month==5)
if ((len(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]) > 0) & (patch_frac[0,i]+patch_frac[1,i] < 0.80)):
try:
b = (lai_cgls[i].ix[season][~np.isnan(lai_cgls[i].ix[season])]).std()/\
(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]).std()
a = (lai_cgls[i].ix[season][~np.isnan(lai_cgls[i].ix[season])]).mean() - \
b*(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]).mean()
toto[i].ix[season2] = b*(vodx[i].ix[season2]) + a
except ValueError:
print('ValueError')
season=((vodx.index.month==5) | (vodx.index.month==6) | (vodx.index.month==7))
season2=(vodx.index.month==6)
if ((len(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]) > 0) & (patch_frac[0,i]+patch_frac[1,i] < 0.80)):
try:
b = (lai_cgls[i].ix[season][~np.isnan(lai_cgls[i].ix[season])]).std()/\
(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]).std()
a = (lai_cgls[i].ix[season][~np.isnan(lai_cgls[i].ix[season])]).mean() - \
b*(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]).mean()
toto[i].ix[season2] = b*(vodx[i].ix[season2]) + a
except ValueError:
print('ValueError')
season=((vodx.index.month==6) | (vodx.index.month==7) | (vodx.index.month==8))
season2=(vodx.index.month==7)
if ((len(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]) > 0) & (patch_frac[0,i]+patch_frac[1,i] < 0.80)):
try:
b = (lai_cgls[i].ix[season][~np.isnan(lai_cgls[i].ix[season])]).std()/\
(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]).std()
a = (lai_cgls[i].ix[season][~np.isnan(lai_cgls[i].ix[season])]).mean() - \
b*(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]).mean()
toto[i].ix[season2] = b*(vodx[i].ix[season2]) + a
except ValueError:
print('ValueError')
season=((vodx.index.month==7) | (vodx.index.month==8) | (vodx.index.month==9))
season2=(vodx.index.month==8)
if ((len(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]) > 0) & (patch_frac[0,i]+patch_frac[1,i] < 0.80)):
try:
b = (lai_cgls[i].ix[season][~np.isnan(lai_cgls[i].ix[season])]).std()/\
(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]).std()
a = (lai_cgls[i].ix[season][~np.isnan(lai_cgls[i].ix[season])]).mean() - \
b*(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]).mean()
toto[i].ix[season2] = b*(vodx[i].ix[season2]) + a
except ValueError:
print('ValueError')
season=((vodx.index.month==8) | (vodx.index.month==9) | (vodx.index.month==10))
season2=(vodx.index.month==9)
if ((len(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]) > 0) & (patch_frac[0,i]+patch_frac[1,i] < 0.80)):
try:
b = (lai_cgls[i].ix[season][~np.isnan(lai_cgls[i].ix[season])]).std()/\
(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]).std()
a = (lai_cgls[i].ix[season][~np.isnan(lai_cgls[i].ix[season])]).mean() - \
b*(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]).mean()
toto[i].ix[season2] = b*(vodx[i].ix[season2]) + a
except ValueError:
print('ValueError')
season=((vodx.index.month==9) | (vodx.index.month==10) | (vodx.index.month==11))
season2=(vodx.index.month==10)
if ((len(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]) > 0) & (patch_frac[0,i]+patch_frac[1,i] < 0.80)):
try:
b = (lai_cgls[i].ix[season][~np.isnan(lai_cgls[i].ix[season])]).std()/\
(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]).std()
a = (lai_cgls[i].ix[season][~np.isnan(lai_cgls[i].ix[season])]).mean() - \
b*(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]).mean()
toto[i].ix[season2] = b*(vodx[i].ix[season2]) + a
except ValueError:
print('ValueError')
season=((vodx.index.month==10) | (vodx.index.month==11) | (vodx.index.month==12))
season2=(vodx.index.month==11)
if ((len(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]) > 0) & (patch_frac[0,i]+patch_frac[1,i] < 0.80)):
try:
b = (lai_cgls[i].ix[season][~np.isnan(lai_cgls[i].ix[season])]).std()/\
(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]).std()
a = (lai_cgls[i].ix[season][~np.isnan(lai_cgls[i].ix[season])]).mean() - \
b*(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]).mean()
toto[i].ix[season2] = b*(vodx[i].ix[season2]) + a
except ValueError:
print('ValueError')
season=((vodx.index.month==11) | (vodx.index.month==12) | (vodx.index.month==1))
season2=(vodx.index.month==12)
if ((len(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]) > 0) & (patch_frac[0,i]+patch_frac[1,i] < 0.80)):
try:
b = (lai_cgls[i].ix[season][~np.isnan(lai_cgls[i].ix[season])]).std()/\
(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]).std()
a = (lai_cgls[i].ix[season][~np.isnan(lai_cgls[i].ix[season])]).mean() - \
b*(vodx[i].ix[season][~np.isnan(vodx[i].ix[season])]).mean()
toto[i].ix[season2] = b*(vodx[i].ix[season2]) + a
except ValueError:
print('ValueError')
toto[toto>10]=np.nan
toto[toto<0]=np.nan
'''
for i in range((vodx.shape[2])):
if ( (len(toto['Obs'][i][~np.isnan(toto['Obs'][i])])> 0) & (patch_frac[0,i]+patch_frac[1,i] < 0.80)):
#toto['Obs'][i] = pd.rolling_mean(toto['Obs'][i],30)
toto['Model'][i] = ubrmse(toto['Obs'][i].values,vodx[i].values)
#toto['Model'][i] = correlation(toto['Obs'][i].values,vodx[i].values)[0]
toto['Analysis'][i] = ubrmse(toto['Obs'][i].values,lai_isba[i].values)
#toto['Analysis'][i] = correlation(toto['Obs'][i].values,lai_isba[i].values)[0]
print(i, toto['Model'][i].ix[0],toto['Analysis'][i].ix[0])
'''
'''
v = toto['Obs'].mean(axis=0).values.reshape((140,280))
plt.subplot(221) ; plt.imshow(v,origin='lower') ; plt.title('LAI from VOD')
cbar = plt.colorbar(orientation='horizontal')
v = lai_cgls.mean(axis=0).values.reshape((140,280))
plt.subplot(222) ; plt.imshow(v,origin='lower') ; plt.title('LAI GEOV2')
cbar = plt.colorbar(orientation='horizontal')
v = toto['Model'].ix[0].values.reshape((140,280))
plt.subplot(223) ; plt.imshow(v,origin='lower') ; plt.title('ubRMSD : LAI GEOV2 vs LAI from VOD')
cbar = plt.colorbar(orientation='horizontal')
v = toto['Analysis'].ix[0].values.reshape((140,280))
plt.subplot(224) ; plt.imshow(v,origin='lower') ; plt.title('ubRMSD : LAI ISBA vs LAI from VOD')
#plt.subplot(224) ; plt.imshow(v,origin='lower') ; plt.title('R : LAI ISBA vs LAI from VOD')
cbar = plt.colorbar(orientation='horizontal')
plt.rcParams.update({'font.size': 10})
plt.show()
#save('VOD_FIT_CDF', ext="ps", close=True, verbose=True)
v = toto['Obs'].mean(axis=0).values.reshape((140,280))
plt.subplot(221) ; plt.imshow(v,origin='lower') ; plt.title('LAI from VOD')
cbar = plt.colorbar(orientation='horizontal')
v = lai_cgls.mean(axis=0).values.reshape((140,280))
plt.subplot(222) ; plt.imshow(v,origin='lower') ; plt.title('LAI GEOV2')
cbar = plt.colorbar(orientation='horizontal')
v = toto['Obs'].mean(axis=0).values.reshape((140,280))-lai_cgls.mean(axis=0).values.reshape((140,280))
plt.subplot(223) ; plt.imshow(v,origin='lower')
cbar = plt.colorbar(orientation='horizontal')
#v = toto['Model'].ix[0].reshape((140,280))
#plt.subplot(223) ; plt.imshow(v,origin='lower') ; plt.title('ubRMSD : LAI GEOV2 vs LAI from VOD')
#cbar = plt.colorbar(orientation='horizontal')
#v = toto['Analysis'].ix[0].reshape((140,280))
#plt.subplot(224) ; plt.imshow(v,origin='lower') ; plt.title('ubRMSD : LAI ISBA vs LAI from VOD')
##plt.subplot(224) ; plt.imshow(v,origin='lower') ; plt.title('R : LAI ISBA vs LAI from VOD')
#cbar = plt.colorbar(orientation='horizontal')
plt.rcParams.update({'font.size': 10})
plt.show()
'''
'''
#plt.plot(pd.rolling_mean(toto['Obs'],window=30,min_periods=5,center=True),\
# label='LAI from VOD',marker='*',linewidth=0) ; plt.plot(test, label='LAI_obs',marker='*',linewidth=0)
#plt.plot(lai_isba.mean(axis=1),label='LAI_ISBA') ; plt.legend(loc='best') ; plt.show()
#for ii in [1,2,3,4,5,6,7,8,9,10,11,12]:
# season = (vodx.index.month == ii)
#
# corr_tmp_VOD_LAIisba = vodx.ix[season,:].corrwith(lai_isba.ix[season,:],axis=0)
# corr_tmp_VOD_LAIobs = vodx.ix[season,:].corrwith(vodx.ix[season,:],axis=0)
# corr_tmp_LAIisba_LAIobs = lai_isba.ix[season,:].corrwith(vodx.ix[season,:],axis=0)
#
# corr_tmp_VOD_LAIisba.values[pl.where(pl.isinf(corr_tmp_VOD_LAIisba.values))] = pl.nan
# corr_tmp_VOD_LAIobs.values[pl.where(pl.isinf(corr_tmp_VOD_LAIobs.values))] = pl.nan
# corr_tmp_LAIisba_LAIobs.values[pl.where(pl.isinf(corr_tmp_LAIisba_LAIobs.values))] = pl.nan
#
# v = corr_tmp_VOD_LAIisba.values.reshape((140,280))
# v2 = corr_tmp_VOD_LAIobs.values.reshape((139,280))
# v3 = corr_tmp_LAIisba_LAIobs.values.reshape((140,280))
# plt.subplot(221) ; plt.imshow(v,origin='lower') ; plt.title('R : VOD vs. LAI ISBA') ; cbar = plt.colorbar(orientation='horizontal')
# plt.subplot(222) ; plt.imshow(v2,origin='lower'); plt.title('R : VOD vs. LAI OBS') ; cbar = plt.colorbar(orientation='horizontal')
# plt.subplot(223) ; plt.imshow(v3,origin='lower'); plt.title('R : LAI ISBA vs. LAI OBS') ; cbar = plt.colorbar(orientation='horizontal')
# plt.rcParams.update({'font.size': 12})
# plt.show()
# print 'R : VOD vs LAIisba (mean / median): ', corr_tmp_VOD_LAIisba.mean() ,' : ',corr_tmp_VOD_LAIisba.median()
# print 'R : VOD vs LAIobs (mean / median): ', corr_tmp_VOD_LAIobs.mean() ,' : ',corr_tmp_VOD_LAIobs.median()
# print 'R : LAIisba vs LAIobs (mean / median): ', corr_tmp_LAIisba_LAIobs.mean() ,' : ',corr_tmp_LAIisba_LAIobs.median()
'''
#A = pd.rolling_mean(toto,window=30,min_periods=5,center=True)
A = toto.rolling(window=30,min_periods=5,center=True).mean()
#A.to_pickle('LAIfromVODCAX_V5_rollingMean.PData')
#A = toto['Obs']
#toto['Obs'].to_pickle('LAIfromVODCAX_V4.PData')
#A = toto['Obs']
#sys.exit()
#A = pd.read_pickle('LAIfromVODCAX_V4.PData')
for d in tqdm(range(len(A))):
if d < 5844:
#print(A.index[d])
try:
#print(min(A.ix[d][~np.isnan(A.ix[d])]), max(A.ix[d][~np.isnan(A.ix[d])]))
# Clean nan values
A.ix[d][np.invert(~np.isnan(A.ix[d]))] = 999
# Specify date for file name
YY = str(A.index[d])[2:4]
MM = str(A.index[d])[5:7]
DD = str(A.index[d])[8:10]
# create file
f= open('VODC_2_LAI_cdf/CANARI/CANARI_NATURE_'+YY+MM+DD+'H09.DAT','a+')
# write in file
for i in range(len(A.ix[d])):
#print(A.ix[d][i])
f.write(str(A.ix[d][i])+'\n')
#time.sleep(0.01)
# close file
f.close()
except ValueError:
print('No values')
# Clean nan values
A.ix[d][np.invert(~np.isnan(A.ix[d]))] = 999
# Specify date for file name
YY = str(A.index[d])[2:4]
MM = str(A.index[d])[5:7]
DD = str(A.index[d])[8:10]
# create file
f= open('VODC_2_LAI_cdf/CANARI/CANARI_NATURE_'+YY+MM+DD+'H09.DAT','a+')
# write in file
for i in range(len(A.ix[d])):
f.write(str(A.ix[d][i])+'\n')
# close file
f.close()
#A.to_pickle('LAIfromVODCAX_V5_rollingMean.PData')
import joblib
joblib.dump(A,'LAIfromVODCAC_V7_2003-2018.PData')
'''
a= []
for d in range(len(A)):
#if d > 2191:
print(A.index[d])
try:
print(min(A.ix[d][~np.isnan(A.ix[d])]), max(A.ix[d][~np.isnan(A.ix[d])]))
# Clean nan values
print(len(A.ix[d]))
A.ix[d][np.invert(~np.isnan(A.ix[d]))] = 999
# Specify date for file name
YY = str(A.index[d])[2:4]
MM = str(A.index[d])[5:7]
DD = str(A.index[d])[8:10]
# create file
time.sleep(0.01)
f= open('VOD_2_LAI_cdf/CANARI/CANARI_NATURE_'+YY+MM+DD+'H09.DAT','w+')
print(len(A.ix[d]))
# write in file
for i in range(len(A.ix[d])):
#print(A.ix[d][i])
f.write(str(A.ix[d][i])+'\n')
# close file
time.sleep(0.01)
f.close()
except ValueError:
print('No values')
# Clean nan values
A.ix[d][np.invert(~np.isnan(A.ix[d]))] = 999
# Specify date for file name
YY = str(A.index[d])[2:4]
MM = str(A.index[d])[5:7]
DD = str(A.index[d])[8:10]
# create file
f= open('VOD_2_LAI_cdf/CANARI/CANARI_NATURE_'+YY+MM+DD+'H09.DAT','w+')
# write in file
for i in range(len(A.ix[d])):
f.write(str(A.ix[d][i])+'\n')
# close file
f.close()
'''
'''
for d in range(len(A)):
if len(A.ix[d]) != 39200:
print("Error")
for d in range(len(A)):
#if d > 2191:
print(A.index[d])
try:
print(min(A.ix[d][~np.isnan(A.ix[d])]), max(A.ix[d][~np.isnan(A.ix[d])]))
# Clean nan values
print(len(A.ix[d]))
A.ix[d][np.invert(~np.isnan(A.ix[d]))] = 999
# Specify date for file name
YY = str(A.index[d])[2:4]
MM = str(A.index[d])[5:7]
DD = str(A.index[d])[8:10]
# create file
#time.sleep(0.01)
f= open('VOD_2_LAI_cdf/CANARI/CANARI_NATURE_'+YY+MM+DD+'H09.DAT','w+')
print(len(A.ix[d]))
# write in file
w = A.ix[d]
f.write(str(w)+'\n')
# close file
#time.sleep(0.01)
f.close()
except ValueError:
print('No values')
# Clean nan values
A.ix[d][np.invert(~np.isnan(A.ix[d]))] = 999
# Specify date for file name
YY = str(A.index[d])[2:4]
MM = str(A.index[d])[5:7]
DD = str(A.index[d])[8:10]
# create file
f= open('VOD_2_LAI_cdf/CANARI/CANARI_NATURE_'+YY+MM+DD+'H09.DAT','w+')
# write in file
for i in range(len(A.ix[d])):
f.write(str(A.ix[d][i])+'\n')
# close file
f.close()
'''
|
[
"noreply@github.com"
] |
noreply@github.com
|
2c6de1d98469164b77e496a0c33bfd4a67f22e17
|
1f5420fda4359bfc21b53de3a5f6e6a93b47b996
|
/ch02/ch02_menu.py
|
5abfa489c2386f900a6c3f914341bd20f4c6a22b
|
[] |
no_license
|
fl0wjacky/wxPython
|
600f5bfccad3ef5589e11573b30cffd1e2708b83
|
50b3cd5a63750d36065684b73aab0da70ff650a7
|
refs/heads/master
| 2022-09-02T04:24:47.540157
| 2022-08-10T04:13:17
| 2022-08-10T04:13:17
| 13,976,582
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,071
|
py
|
#! /usr/bin/env python
import wx
import wx.py.images as images
class ToolbarFrame(wx.Frame):
def __init__(self, parent, id):
wx.Frame.__init__(self, parent, id, 'Toolbars', size=(300,200))
panel = wx.Panel(self)
panel.SetBackgroundColour('White')
statusBar = self.CreateStatusBar()#1
toolbar = self.CreateToolBar()#2
toolbar.AddSimpleTool(wx.NewId(),images.getPyBitmap(),"New","Long help for 'New'")#3
toolbar.Realize()#4
menuBar = wx.MenuBar()
menu1 = wx.Menu()
menuBar.Append(menu1,"&File")
menu2 = wx.Menu()
#6
menu2.Append(wx.NewId(),"&Copy","Copy in status bar")
menu2.Append(wx.NewId(),"C&ut","")
menu2.Append(wx.NewId(),"Paste","")
menu2.AppendSeparator()
menu2.Append(wx.NewId(),"&Options...","Display Options")
menuBar.Append(menu2,"&Edit")
self.SetMenuBar(menuBar)
if __name__ == "__main__":
app = wx.PySimpleApp()
frame = ToolbarFrame(parent=None, id = -1)
frame.Show()
app.MainLoop()
|
[
"flowjacky@gmail.com"
] |
flowjacky@gmail.com
|
e9969c27708523aa763276b502fa9419d9cf3558
|
8de8185760c9d3696d7244bb04b0c48d60b73e9b
|
/crawler/tech/gizmodo.py
|
b43d2c9a2466e702dfdeaefc63f48a1c08736273
|
[] |
no_license
|
text-master/textmaster
|
8e9a36003efff020ac44016c7313df4ad0030cc3
|
ba3b9dc09f36bd42a441ed812f8ec66ec853494f
|
refs/heads/master
| 2021-01-24T04:43:11.534736
| 2018-05-12T03:06:21
| 2018-05-12T03:06:21
| 122,946,499
| 0
| 0
| null | 2018-02-26T12:25:02
| 2018-02-26T09:33:40
| null |
UTF-8
|
Python
| false
| false
| 805
|
py
|
from bs4 import BeautifulSoup
import requests
URL = "http://gizmodo.com/tag/science?startIndex=%d"
file = open('gizmodo.txt', 'a')
# r = requests.get("https://techcrunch.com/page/2")
for i in xrange(6, 10):
index = i*20
r = requests.get(URL % index)
print URL % index
soup = BeautifulSoup(r.text, 'lxml')
for link in soup.select('.post-list--pe .headline a'):
link = link.get('href')
print(link)
article = requests.get(link)
article_soup = BeautifulSoup(article.text, 'lxml')
para = article_soup.select('.post-content')[0].find_all('p')
for p in para:
text = p.getText()
text = text.encode('utf-8').strip()
file.write('\n')
file.write(text)
file.write('\n!!!@@@$$$\n')
|
[
"namhoang.td09@gmail.com"
] |
namhoang.td09@gmail.com
|
88563445332a6acdcd4a280669e4ef1ca4812a12
|
6c6cf74998bd03586c23e8881336d9d3cb279582
|
/pypecast/models/model.py
|
da368611e2b4eafb24ab7d3e210e71258b10713d
|
[
"MIT"
] |
permissive
|
guilhermecano/PypeCast
|
7583d9f54a71e25a94e70fb76671a8db3e729b45
|
0282fb9e8fa8ca88e74cbab79e92f82fa3300799
|
refs/heads/master
| 2020-04-09T09:37:52.990972
| 2018-12-14T21:01:18
| 2018-12-14T21:01:18
| 160,240,449
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,907
|
py
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from keras.layers import Dense, Dropout, LSTM, Input
from keras.models import Sequential
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
from pypecast.metrics.metrics import *
class Model(object):
'''Base class for models'''
def __init__(self,
n_lag,
n_seq):
assert n_lag > 0 and n_seq >0, 'n_seq and n_lag cant be null or negative'
self._model = None
self._forecasts = None
self._actual = None
self._naive_init = None
#Must define variables
self._n_lag = n_lag
self._n_seq = n_seq
def summary(self):
if self._model is not None:
self._model.show()
else:
print('The model was not defined yet. Please use the fit() method first.')
def fit(self, train):
raise(NotImplementedError)
def _forecast_model(self, X):
# reshape input pattern to [samples, timesteps, features]
X = X.reshape(1, 1, len(X))
# make forecast
forecast = self._model.predict(X)
# convert to array
return [x for x in forecast[0, :]]
def _design_network(self, out_shape, input_shape=None):
raise(NotImplementedError)
def forecast_series(self, test, scaler, orig_series):
assert self._model is not None, "Model must be trained first"
if isinstance(orig_series,(list,)):
orig_series = np.array(orig_series)
if isinstance(test,(list,)):
test = np.array(test)
forecasts = list()
for i in range(len(test)):
X, y = test[i, 0:self._n_lag], test[i, self._n_lag:]
# make forecast
forecast = self._forecast_model(X)
# store the forecast
forecasts.append(forecast)
self._naive_init = orig_series[orig_series.shape[0] - test.shape[0] - self._n_seq]
#inverse_transform
forecasts = self._inverse_transform(orig_series,forecasts,scaler,test.shape[0]+2)
self._forecasts = forecasts
#Actual values
actual = [row[self._n_lag:] for row in test]
self._actual = self._inverse_transform(orig_series, actual, scaler, test.shape[0]+2)
return forecasts
def get_forecast(self):
return self._forecasts
# invert differenced forecast
def _inverse_difference(self,last_ob, forecast):
# invert first forecast
inverted = list()
inverted.append(forecast[0] + last_ob)
# propagate difference forecast using inverted first value
for i in range(1, len(forecast)):
inverted.append(forecast[i] + inverted[i-1])
return inverted
# inverse data transform on forecasts
def _inverse_transform(self, series, forecasts, scaler, n_test):
inverted = list()
for i in range(len(forecasts)):
# create array from forecast
forecast = np.array(forecasts[i])
forecast = forecast.reshape(1, len(forecast))
# invert scaling
if scaler[0] is not None:
forecast = scaler[0].inverse_transform(forecast)
forecast = forecast[0, :]
if scaler[1]:
# invert differencing
index = len(series) - n_test + i - 1
last_ob = series.values[index]
forecast = self._inverse_difference(last_ob, forecast)
inverted.append(forecast)
return inverted
def _use_metrics(self, actual, predicted):
#RMSE
m1 = rmse(actual, predicted)
#MAE
m2 = mae(actual, predicted)
#MAPE
m3 = mape(actual, predicted)
#sMAPE
m4 = smape(actual, predicted)
return m1,m2,m3,m4
def evaluate_forecast(self, save_report = False, filename = '../reports/evaluation.xlsx', return_dicts = False, verbose = 1):
if verbose!=0:
print('-'*20 + 'Forecast evaluation' + '-'*20)
print('')
steps_metrics = dict()
naive_metrics = dict()
instant_metrics = {'RMSE':[], 'MAE':[], 'MAPE':[], 'sMAPE': []}
# Metrics for each timestep in future
for i in range(self._n_seq):
if verbose!=0:
print('Step t+{}'.format(i+1))
actual = [row[i] for row in self._actual]
#print(np.array(actual))
predicted = [forecast[i] for forecast in self._forecasts]
m1,m2,m3,m4 = self._use_metrics(actual,predicted)
if verbose!=0:
print('t+%d RMSE: %f' % ((i+1), m1))
print('t+%d MAE: %f' % ((i+1), m2))
print('t+%d MAPE: %f' % ((i+1), m3))
print('t+%d sMAPE: %f' % ((i+1),m4))
steps_metrics[(i+1)] = [m1,m2,m3,m4]
if verbose!=0:
print('-'*60)
# Metrics for naive_model:
if verbose!=0:
print()
print('-'*20 + 'Naive forecast evaluation' + '-'*20)
#Get persistent-series frocasts
last_ob = [row[0] for row in self._actual]
last_ob.pop()
last_ob.insert(0,self._naive_init)
#Evaluate the persistent case
naive_forecasts = list()
for i in last_ob:
lst = [i]*self._n_seq
naive_forecasts.append(lst)
for i in range(self._n_seq):
if verbose!=0:
print('Step t+{}'.format(i+1))
actual = [row[i] for row in self._actual]
naive = [nf[i] for nf in naive_forecasts]
m1,m2,m3,m4 = self._use_metrics(actual,naive)
if verbose!=0:
print('t+%d RMSE: %f' % ((i+1), m1))
print('t+%d MAE: %f' % ((i+1), m2))
print('t+%d MAPE: %f' % ((i+1), m3))
print('t+%d sMAPE: %f' % ((i+1),m4))
naive_metrics[(i+1)] = [m1,m2,m3,m4]
if verbose!=0:
print('-'*60)
if verbose!=0:
print()
print('-'*20 + 'Evaluation for each forecast' + '-'*20)
# Metrics for each instant in time-series
for i in range(len(self._actual)):
m1,m2,m3,m4 = self._use_metrics(self._actual[i],self._forecasts[i])
if verbose!=0:
print('Index %d RMSE: %f' % ((i+1), m1))
print('Index %d MAE: %f' % ((i+1), m2))
print('Index %d MAPE: %f' % ((i+1), m3))
print('Index %d sMAPE: %f' % ((i+1),m4))
instant_metrics['RMSE'].append(m1)
instant_metrics['MAE'].append(m2)
instant_metrics['MAPE'].append(m3)
instant_metrics['sMAPE'].append(m4)
if verbose!=0:
print('-'*60)
if save_report:
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter(filename, engine='xlsxwriter')
df1 = pd.DataFrame(steps_metrics, index=['RMSE','MAE','MAPE','sMAPE'])
df2 = pd.DataFrame(steps_metrics, index=['RMSE','MAE','MAPE','sMAPE'])
df3 = pd.DataFrame(instant_metrics)
# Write each dataframe to a different worksheet.
df1.to_excel(writer, sheet_name='Metrics by forecasted step')
df2.to_excel(writer, sheet_name='Naive forecast')
df3.to_excel(writer, sheet_name='Metrics at each index')
writer.save()
if return_dicts:
return steps_metrics, instant_metrics
def plot_forecasts(self, series, forecasts, test):
n_test = test.shape[0]+2
sns.set()
# plot the entire dataset in blue
warnings.filterwarnings("ignore")
plt.figure(0,figsize=[12,6])
plt.plot(series.values, label='True time-series')
# if self._n_seq == 1:
# plot the forecasts
for i in range(len(forecasts)):
off_s = len(series) - n_test + i
off_e = off_s + len(forecasts[i])
xaxis = [x for x in range(off_s, off_e)]
if i==0:
lb = 'Forecasted time-series'
else:
lb = None
if self._n_seq>1:
sns.lineplot(x=xaxis, y=forecasts[i], label=lb,color='r',hue_order=False)
else:
sns.scatterplot(x=xaxis, y=forecasts[i], label=lb,color='r',hue_order=False)
#plt.plot(xaxis, forecasts[i], color='red',label='Forecasted time-series')
# show the plot
plt.title('Forecasting in testing set of time-series')
plt.xlabel('timestep')
plt.ylabel('Value')
plt.show()
sns.reset_defaults()
|
[
"gui.c.lopes@gmail.com"
] |
gui.c.lopes@gmail.com
|
38c1fd8760bcce05b238e280801a028e7fa6b28f
|
7abea7671cfc3037b920b7883e592c1aab82e236
|
/blog/migrations/0005_alter_post_timestamp.py
|
947b852666cae5b39968672274f7db3a44d16b24
|
[] |
no_license
|
RRM00/blog-assignment
|
ae0810e05179c9f7dac6329129af96cdc7ba665a
|
0031500e016c5d19513f27c70f2f96222b693a32
|
refs/heads/main
| 2023-04-14T16:38:49.667508
| 2021-05-03T05:22:12
| 2021-05-03T05:22:12
| 363,724,298
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 402
|
py
|
# Generated by Django 3.2 on 2021-05-02 12:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_post_summary'),
]
operations = [
migrations.AlterField(
model_name='post',
name='timeStamp',
field=models.DateTimeField(auto_now_add=True),
),
]
|
[
"noreply@github.com"
] |
noreply@github.com
|
0318e0aaf209f5b5619b4a826bfaf8af77341a0c
|
57bab37cb279cc6c59262a711ccf228867e8ddc6
|
/Guia2/3.py
|
f9a346b91d48cacb58c02e6a5b659e70ee5d5dd8
|
[] |
no_license
|
gabymy/Guias-de-Estudios-PYTHON
|
d56a94b7bd7b04a25c8af2c368c976e47f3e2b51
|
6ff0eeee6ee5ab8a4057ca12a61202214d0e9065
|
refs/heads/main
| 2023-08-01T22:45:02.101972
| 2021-10-08T19:08:23
| 2021-10-08T19:08:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 695
|
py
|
# Crear una función que pida un año y que escriba si es bisiesto o no. Se recuerda que
# los años bisiestos son múltiplos de 4, pero los múltiplos de 100 no lo son, aunque los
# múltiplos de 400 sí. Estos son algunos ejemplos de posibles respuestas: 2012 es
# bisiesto, 2010 no es bisiesto, 2000 es bisiesto, 1900 no es bisiesto.
print('Comprobador de años bisiestos')
fecha = int(input("Escriba un año y le diré si es bisiesto: "))
def es_bisiesto(anio):
return anio % 400 == 0 or (anio % 100 != 0 and anio % 4 == 0)
if es_bisiesto(fecha):
print("El año", fecha, "es un año bisiesto.")
else:
print("El año", fecha, "no es un año bisiesto.")
|
[
"noreply@github.com"
] |
noreply@github.com
|
d86b86de0883e30ddc235898d94df5c11ee4e183
|
92bf965fb0125da24f4503ed685c114cfb8523c0
|
/sandbox/testPlot.py
|
90e205a962cdca4a8c51308eac423f81dd27fbf4
|
[] |
no_license
|
fsbr/unh-startracker
|
c9f2f1420d3ef7f051d71e32bd7328fbe8dd4aa2
|
f023aa51733633e4ef1fd7f4c09e117f95eb721b
|
refs/heads/master
| 2021-07-16T09:27:01.757223
| 2021-06-29T22:21:50
| 2021-06-29T22:21:50
| 43,343,289
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
py
|
import time
import numpy as np
import matplotlib.pyplot as plt
plt.axis([0, 1000, 0, 1])
# does interactive plotting
plt.ion()
plt.show()
for i in range(1000):
y = np.random.random()
plt.scatter(i, y)
plt.draw()
time.sleep(0.05)
|
[
"thomasckfuller@gmail.com"
] |
thomasckfuller@gmail.com
|
0c7cb64d07da4a007ef4afbd7c24b3fad8653680
|
bebe6e9195dce6f47fe2f52a06ac85519ab969ac
|
/binary_search_2d_array_slices.py
|
11f04a487ed47d95561a858cd78cd8aea22e8a26
|
[] |
no_license
|
jbobo/leetcode
|
f038ee934c4435a2f5a3e987b3d5b70b860a25e2
|
3894425256f8504f7c8f1903e47f670c6fa32a92
|
refs/heads/master
| 2022-11-05T07:19:48.897066
| 2020-06-24T01:20:25
| 2020-06-24T01:20:25
| 274,544,298
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,010
|
py
|
#!/usr/bin/env python3
"""
Write an efficient algorithm that searches for a value in an m x n matrix. This matrix has the following properties:
Integers in each row are sorted in ascending from left to right.
Integers in each column are sorted in ascending from top to bottom.
Example:
Consider the following matrix:
[
[1, 4, 7, 11, 15],
[2, 5, 8, 12, 19],
[3, 6, 9, 16, 22],
[10, 13, 14, 17, 24],
[18, 21, 23, 26, 30]
]
Given target = 5, return true.
Given target = 20, return false.
https://leetcode.com/problems/search-a-2d-matrix-ii/
"""
from math import ceil
# - start in the middle.
# - if node is bigger:
# - add the middle node of the upper-right, lower-right, and lower-left quadrants
# to the queue.
# - if the node is smaller:
# - add the middle node of the upper-right, lower-right, and upper-left quadrants
# to the queue.
# - if the current vertex value is the target, return TRUE.
# - if the queue is empty, return FALSE.
def searchMatrix(matrix, target):
"""Perform a quadrary search for a given target on the given 2d array.
"""
if not matrix or not matrix[0] or not target:
return False
queue = []
row_min = 0
row_max = len(matrix) - 1
col_min = 0
col_max = len(matrix[0]) - 1
queue.append((row_min, row_max, col_min, col_max))
while queue:
submatrix = queue.pop(0)
row_min = submatrix[0]
row_max = submatrix[1]
col_min = submatrix[2]
col_max = submatrix[3]
# Base case, no elements
if (row_max < row_min or col_max < col_min):
return False
# Base case, Single cell
if (row_min == row_max and col_min == col_max):
if matrix[row_min][col_min] == target:
return True
row_mid = row_min + ((row_max - row_min) / 2)
col_mid = col_min + ((col_max - col_min) / 2)
if matrix[row_min][col_min] == target:
return True
# Search Top-Right
queue.append(row_min, row_mid + 1, col_mid + 1, col_max)
# Search Bottom-Left
queue.append(row_mid + 1, row_max, col_min, col_mid)
# Search Top-Left
if (target < matrix[row_mid][col_mid]):
queue.append(row_min, row_mid, col_min, col_mid)
# Search Bottom-Right
else:
queue.append(row_mid + 1, row_max, col_mid + 1, col_max)
return False
if __name__ == "__main__":
# matrix = [
# [1, 4, 7, 11, 15],
# [2, 5, 8, 12, 19],
# [3, 6, 9, 16, 22],
# [10, 13, 14, 17, 24],
# [18, 21, 23, 26, 30]
# ]
# target = 27
# matrix = [
# [1, 4, 7, 11, 15],
# [2, 5, 8, 12, 19],
# [3, 6, 9, 16, 22],
# [10, 13, 14, 17, 24],
# [18, 21, 23, 26, 30]
# ]
# target = 5
matrix = [
[1, 4],
[2, 5]
]
target = 1
print("Is %s in the matrix? %s" % (target, searchMatrix(matrix, target)))
# print(matrix)
|
[
"nedbobo@Neds-MacBook-Pro.local"
] |
nedbobo@Neds-MacBook-Pro.local
|
b6506ca0c4ad00b48fefffd880f27f6b56a0c962
|
ac14df0df265cedd09a13ef049434d00f7f4260b
|
/recoxplainer/config.py
|
96983f417577df3aedbeb673d4788069df087510
|
[] |
no_license
|
amorsun/recoxplainer
|
0e33986a1d98b0050238434467479fd27eafabd8
|
f9afc318e2c29c7519e9880fdd0ced09605cd197
|
refs/heads/master
| 2023-03-14T02:26:49.434910
| 2021-01-31T20:45:17
| 2021-01-31T20:45:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 227
|
py
|
import yaml
from box import Box
import os
with open("configs/config.yml", "r") as yml_file:
full_cfg = yaml.safe_load(yml_file)
cfg = Box({**full_cfg["base"]},
default_box=True,
default_box_attr=None)
|
[
"CobaLu01@de-ber-m45dml85.fritz.box"
] |
CobaLu01@de-ber-m45dml85.fritz.box
|
c927be2460b22f079fb23a09cae7ad7bf3fa7aea
|
0e6802ed46b3be0e58396b0e0a089d9da8727be6
|
/PyPlayground/knapSack/2.py
|
68c99edfa6ccad248b4c27f8ca09989db8dcbd15
|
[] |
no_license
|
wbglaeser/cplayground
|
328825d6d37f23f7acc2d373b02f72b8afbc68cc
|
e3dc84e8198535fcd5d5dafa126d4ea345c59c2c
|
refs/heads/main
| 2023-03-25T21:43:54.998971
| 2021-02-23T09:16:11
| 2021-02-23T09:16:11
| 308,050,125
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 338
|
py
|
W = 30
wt = [1,2,4,6]
v = [5,3,5,6]
v = [a-b for a,b in zip(v, wt)]
K = [[0 for i in range(W + 1)] for j in range(len(wt)+1)]
for w in range(1, W+1):
for k in range(len(wt)):
if wt[k] > w: K[k+1][w] = K[k][w]
else:
K[k+1][w] = max(v[k]+K[k][w-wt[k]], K[k][w])
for line in K: print(line)
print(K[-1][-1])
|
[
"ben.glaeser@tuta.io"
] |
ben.glaeser@tuta.io
|
65c8c7afd51337f9030ae030d6fd82b48e1fa74f
|
f14bbd0133937b8b433685f971f569ad4ca160c0
|
/Code/Peirce.py
|
2c63005601f166d088b7a6f8ebd68c9ba2cdac8b
|
[] |
no_license
|
bchenghi/peirce-alpha-system
|
2e43fc32d9db0c2c35e50f0b6ec480213ca3d107
|
5f7969e3dde47412455dda2931d4f429aa96c499
|
refs/heads/main
| 2023-08-18T16:16:05.466642
| 2021-09-19T16:56:06
| 2021-09-19T16:56:06
| 367,814,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 33,476
|
py
|
import tkinter as tk
import math
from itertools import permutations
from pyswip.prolog import Prolog
# values for horizontal and vertical padding
xpadding = 20
ypadding = 20
# default values for height and width if no children
xdefault = 50
ydefault = 50
# Graph that was copied or cut
copyorcutGraphs = []
# State and state pointers for non-proof mode and proof mode
proofMode = False
states = []
statesProof = []
pointer = -1
pointerProof = -1
graph = None
topGraphId = None
distance_between = 20
winW = 500
winH = 500
boxWidth = 10
bgColor = '#fff'
selectedColor = 'Grey'
defaultColor = '#fb0'
root = tk.Tk()
canvas = tk.Canvas(root, width=winW, height=winH, borderwidth=0, highlightthickness=0, bg=bgColor)
prolog = Prolog()
prolog.consult("peirce.pl")
# Formula entry for start command
enterBtn = None
entry = None
insertionButton = None
insertionEntry = None
displayText = None
var = tk.StringVar()
var.set("Welcome! Enter a formula in 'start' to get started")
multiSelectText = None
mSVar = tk.StringVar()
deiterationButton = None
runIteration = False
runDeiteration = False
multiSelected = []
multiSelectParent = None
firstSelected = []
def deselectMulti():
global multiSelected
for i in range(len(multiSelected)-1, -1, -1):
deselectGraph(multiSelected[i])
# Methods for printing graph
def clearAndPrint():
global graph
global entry
global enterBtn
global displayText
global multiSelectText
global var
canvas.delete("all")
displayText = tk.Label(root, font=("Helvetica",12), width=100, textvariable=var, bg=bgColor)
displayText.place(relx=0.5, rely=0.925, anchor=tk.CENTER)
multiSelectText = tk.Label(root, font=("Helvetica",11), width=100, textvariable=mSVar, bg=bgColor)
multiSelectText.place(relx=0.5, rely=0.97, anchor=tk.CENTER)
if graph != None:
graph.x1 = 2
graph.y1 = 2
graph.calculateCoord()
printGraph(graph)
def printGraph(inGraph):
printGraphRec(inGraph, True)
def printGraphRec(inGraph, firstRun):
global canvas
if (type(inGraph) == Box):
id = canvas.create_rectangle(inGraph.x1, inGraph.y1, inGraph.x2, inGraph.y2, width=boxWidth, outline=defaultColor)
inGraph.id = id
elif type(inGraph) == Atom:
id = canvas.create_text((inGraph.x1 + inGraph.x2)//2, (inGraph.y1 + inGraph.y2)//2, font=('Helvetica', 20), fill=defaultColor, text=inGraph.char)
inGraph.id = id
elif type(inGraph) == Graph:
id = canvas.create_rectangle(inGraph.x1, inGraph.y1, inGraph.x2, inGraph.y2, fill=bgColor, outline=bgColor)
canvas.lift(id)
inGraph.id = id
if firstRun:
global topGraphId
topGraphId = id
for g in inGraph.children:
printGraphRec(g, False)
# Assigns global selected variable with selected graph, updates color of selected/deselected graphs
def select(*args):
global multiSelected
global graph
ids = canvas.find_overlapping(args[0].x, args[0].y, args[0].x+1, args[0].y+1)
selectedGraph = None
if len(ids) > 0:
selectedGraph = find(ids[len(ids)-1], graph)
if len(ids) == 0:
selectedGraph = graph
if runIteration:
multiSelected = [selectedGraph]
iterationSecond()
deselectMulti()
return
# deselect previously selected graph
if selectedGraph in multiSelected:
deselectGraph(selectedGraph)
return
global multiSelectParent
if multiSelectParent == None:
parents = findParents(selectedGraph.id, graph, None)
if len(parents) > 0:
multiSelectParent = parents[len(parents)-1]
else:
# selectedGraph was top level graph, with no parent, give it a place holder multiSelectParent
if type(selectedGraph) == Graph:
multiSelectParent = Graph(1)
else:
if len(multiSelected) == 1 and type(multiSelected[0]) == Graph and selectedGraph in multiSelected[0].children:
var.set("Graph already selected by region")
return
if not selectedGraph in multiSelectParent.children:
var.set("You can only multi-select graphs in the same depth")
return
if selectedGraph == None:
selectedGraph = graph
if type(selectedGraph) == Box:
canvas.itemconfig(selectedGraph.id, outline=selectedColor)
elif type(selectedGraph) == Atom:
canvas.itemconfig(selectedGraph.id, fill=selectedColor)
elif type(selectedGraph) == Graph:
canvas.itemconfig(selectedGraph.id, fill=selectedColor, outline=selectedColor)
if selectedGraph.id == topGraphId:
canvas.config(bg=selectedColor)
displayText.config(bg=selectedColor)
multiSelectText.config(bg=selectedColor)
multiSelected.append(selectedGraph)
def deselectGraph(inGraph):
resetGraphColor(inGraph)
multiSelected.remove(inGraph)
if len(multiSelected) == 0:
global multiSelectParent
multiSelectParent = None
def resetGraphColor(inGraph):
if inGraph != None:
if type(inGraph) == Box:
canvas.itemconfig(inGraph.id, outline=defaultColor)
elif type(inGraph) == Atom:
canvas.itemconfig(inGraph.id, fill=defaultColor)
elif type(inGraph) == Graph:
canvas.itemconfig(inGraph.id, fill=bgColor, outline=bgColor)
canvas.configure(bg=bgColor)
displayText.config(bg=bgColor)
multiSelectText.config(bg=bgColor)
# returns graph with given id
def find(id, current):
if id == current.id:
return current
else:
for child in current.children:
foundGraph = find(id, child)
if foundGraph != None:
return foundGraph
return None
# Returns list of parents
def findParents(id, current, parent):
if current == None:
return []
if id == current.id:
if parent == None:
return []
return [parent]
for child in current.children:
parents = findParents(id, child, current)
if len(parents) > 0:
if parent == None:
return parents
result = [parent]
result.extend(parents)
return result
return []
# Removes graph of given id, returns removed graph
def cut(id, current, parent):
if current == None:
return None
if id == current.id:
# If current is entire graph, i.e. no parent
if parent == None:
global graph
graph = Graph(0)
var.set("Cut successful!")
return current
parent.removeChild(current)
var.set("Cut successful!")
return current
else:
for child in current.children:
removed = cut(id, child, current)
if removed != None:
return removed
return None
# Copies the graph and sets the same ids.
# Printing the graph with clearAndPrint, will set the ids to be unique from the original graph
def copy(inGraph):
copyGraph = None
if type(inGraph) == Box:
copyGraph = Box(inGraph.id)
elif type(inGraph) == Atom:
copyGraph = Atom(inGraph.char, inGraph.id)
else:
copyGraph = Graph(inGraph.id)
if type(inGraph) != Box:
for g in inGraph.children:
childCopy = copy(g)
copyGraph.addChild(childCopy)
else:
childCopy = copy(inGraph.children[0])
copyGraph.children[0] = childCopy
return copyGraph
def paste(inGraph, toPaste):
if type(inGraph) == Atom:
return False
# Should not happen, handled in pasteCommand
if inGraph == None:
return False
inGraph.addChild(toPaste)
return True
# Cuts selected graph and assigns it to copyorcutGraph, and reprints the graph
def cutCommand():
resetStart()
global graph
global copyorcutGraphs
if len(multiSelected) == 0:
var.set("No graph selected yet")
return
copyorcutGraphs = []
copyGraph = copy(graph)
for g in multiSelected:
cutGraph = cut(g.id, copyGraph, None)
copyorcutGraphs.append(cutGraph)
graph = copyGraph
clearAndPrint()
var.set("Cut successful!")
addState()
deselectMulti()
# Returns a copy of graph with given id
def copyCommand():
resetStart()
global copyorcutGraphs
global multiSelected
if len(multiSelected) == 0:
var.set("No graph selected yet")
return
copyorcutGraphs = []
for g in multiSelected:
newCopy = copy(g)
copyorcutGraphs.append(newCopy)
var.set("Copy successful!")
deselectMulti()
# Creates a copy of the copyorcutGraph, and pastes into selected, then reprints the graph
def pasteCommand():
resetStart()
global multiSelected
global copyorcutGraphs
global graph
if len(copyorcutGraphs) == 0:
var.set("No graph copied/cut yet")
return
if len(multiSelected) == 0:
var.set("No graph selected yet")
return
if len(multiSelected) != 1:
var.set("Only select one graph to paste into")
return
pasteGraph = copy(copyorcutGraphs[0])
copyGraph = copy(graph)
toPasteInto = find(multiSelected[0].id, copyGraph)
if len(copyorcutGraphs) > 1:
pasteGraph = Graph(1)
for g in copyorcutGraphs:
copyOfG = copy(g)
pasteGraph.addChild(copyOfG)
pasteSuccessful = paste(toPasteInto, pasteGraph)
if pasteSuccessful:
var.set("Paste successful!")
else:
var.set("Not allowed to paste into atom")
graph = copyGraph
addState()
clearAndPrint()
deselectMulti()
def clearEntry(*args):
global entry
global insertionEntry
if entry != None:
entry.delete(0,'end')
if insertionEntry != None:
insertionEntry.delete(0, 'end')
# Parses user input, then generates and prints graph
def parsePrintClearEntry(*args):
entryInput = repr(entry.get())[1:-1]
entryInput = entryInput.lower()
global var
global prolog
global graph
global states
global pointer
try:
solns = list(prolog.query('input("%s", O).'%(entryInput)))
if len(solns) == 0:
var.set("Issue with input")
return
graph = formGraph(solns[0]['O'])
clearEntry(args)
var.set("Input accepted and transformed into graph!")
addState()
# convert soln into graph, and print
except Exception as err:
var.set("Issue with input")
finally:
resetEnd()
# Forms graph from DCG parse tree, and returns graph
def formGraph(input):
if len(input) == 0:
return Graph(1)
resultGraph = Graph(0)
formGraphRec(input, resultGraph)
return resultGraph
def formGraphRec(input, parent):
if input == 'true':
parent.addChild(Graph(1))
elif input == 'false':
parent.addChild(Box())
elif len(input) == 1:
parent.addChild(Atom(input.upper()))
elif input[:3] == 'and':
terms = split(input)
newGraph = Graph(1)
formGraphRec(terms[0], newGraph)
formGraphRec(terms[1], newGraph)
parent.addChild(newGraph)
elif input[:3] == 'neg':
term = input[4:-1]
box = Box()
formGraphRec(term, box)
parent.addChild(box)
# Splits conjunction and returns a list of two terms
def split(input):
solns = list(prolog.query('split("%s", O, O1).'%(input)))
result = []
for soln in solns:
result.append(soln['O'])
result.append(soln['O1'])
return result
def ins_double_cut():
resetStart()
global multiSelected
global graph
if len(multiSelected) == 0:
var.set("No graph selected yet")
return
box1 = Box()
for g in multiSelected:
box1.addChild(copy(g))
box2 = Box()
box2.addChild(box1)
copyGraph = copy(graph)
parentGraphs = findParents(multiSelected[0].id, copyGraph, None)
parentGraph = None
if len(parentGraphs) > 0:
parentGraph = parentGraphs[len(parentGraphs) - 1]
if len(multiSelected) == 1:
parentGraph.replaceChild(multiSelected[0].id, box2)
else:
for g in multiSelected:
parentGraph.removeChild(g)
parentGraph.addChild(box2)
graph = copyGraph
else:
# parentGraph is None, just set new graph as graph
finalGraph = Graph(0)
finalGraph.addChild(box2)
graph = finalGraph
addState()
clearAndPrint()
deselectMulti()
def rem_double_cut():
resetStart()
global multiSelected
global graph
if len(multiSelected) == 0:
var.set("No graph selected yet")
deselectMulti()
return
if len(multiSelected) != 1:
var.set("Only select a single outer cut to remove double cut")
deselectMulti()
return
if type(multiSelected[0]) != Box:
var.set("Select an outer cut to remove a double cut!")
deselectMulti()
return
# childrenInChildGraph should be a single box to do removal of double cut
childrenInChildGraph = multiSelected[0].getChildren()[0].getChildren()
if len(childrenInChildGraph) != 1 or type(childrenInChildGraph[0]) != Box:
var.set("No double cut found")
deselectMulti()
return
# innerBoxGraphChildren is the children in inner box's graph
copyGraph = copy(graph)
innerBoxGraphChildren = childrenInChildGraph[0].getChildren()[0].getChildren()
selectedsParents = findParents(multiSelected[0].id, copyGraph, None)
# If selected is a cut, will always have parent
# selectedsParent is from the copied graph
selectedsParent = selectedsParents[len(selectedsParents) - 1]
selectedsParent.removeChild(multiSelected[0])
for g in innerBoxGraphChildren:
selectedsParent.addChild(copy(g))
graph = copyGraph
addState()
clearAndPrint()
deselectMulti()
def iteration():
resetStart()
global runIteration
global firstSelected
global multiSelectParent
if len(multiSelected) == 0:
var.set('Outer graph not selected yet')
return
firstSelected = multiSelected
multiSelectParent = None
resetFlags()
runIteration = True
var.set("Select inner region or cut to iterate graph into")
def iterationSecond():
global firstSelected
global runIteration
global graph
runIteration = False
secondSelected = multiSelected[0]
if len(multiSelected) == 0:
var.set('Inner graph not selected yet')
return
if type(secondSelected) != Box and type(secondSelected) != Graph:
var.set('Region or cut was not selected')
for g in firstSelected:
resetGraphColor(g)
firstSelected = []
return
isNested = True
for g in firstSelected:
if not nested(g, secondSelected):
isNested = False
break
if not isNested:
var.set("Second graph is not nested in first graph")
for g in firstSelected:
resetGraphColor(g)
firstSelected = []
return
toCopyGraph = None
if type(firstSelected[0]) != Graph:
newFirstSelected = Graph(1)
for g in firstSelected:
newFirstSelected.addChild(g)
toCopyGraph = newFirstSelected
else:
toCopyGraph = firstSelected[0]
copiedGraph = copy(graph)
copySecondSelected = find(secondSelected.id, copiedGraph)
copyOfFirst = copy(toCopyGraph)
addSuccess = copySecondSelected.addChild(copyOfFirst)
if addSuccess:
var.set('Iteration successful!')
graph = copiedGraph
addState()
resetEnd()
firstSelected = []
def deiteration():
resetStart()
global firstSelected
global runDeiteration
global deiterationButton
global multiSelectParent
if len(multiSelected) == 0:
var.set('Outer graph not selected yet')
return
firstSelected = []
for i in multiSelected:
firstSelected.append(i)
deselectMulti()
setupDeiteration()
multiSelectParent = None
resetFlags()
runDeiteration = True
def deiterationSecond():
global firstSelected
global runDeiteration
global deiterationButton
global graph
runDeiteration = False
if len(multiSelected) == 0:
var.set('Inner graph not selected yet')
resetEnd()
return
secondSelected = multiSelected
# Only used for checking nesting, no need for copy
graphFirstSelected = firstSelected[0]
graphSecondSelected = secondSelected[0]
isNested = nested(graphFirstSelected, graphSecondSelected)
if not isNested:
var.set("Second graph is not nested in first graph")
firstSelected = []
resetEnd()
return
# No need for copy as used only for checking equivalence
if type(firstSelected[0]) != Graph:
aFirstSelected = Graph(1)
for g in firstSelected:
aFirstSelected.addChild(g)
newFirstSelected = aFirstSelected
else:
newFirstSelected = firstSelected[0]
if type(secondSelected[0]) != Graph:
aSecondSelected = Graph(1)
for g in secondSelected:
aSecondSelected.addChild(g)
newSecondSelected = aSecondSelected
else:
newSecondSelected = secondSelected[0]
# newFirstSelected and newSecondSelected do not need to be copies of first and second selected as only check for equivalence
isEquivalent = equivalent(newFirstSelected, newSecondSelected)
if not isEquivalent:
var.set("Second graph is not equivalent to first graph")
firstSelected = []
resetEnd()
return
copiedGraph = copy(graph)
secondParents = None
secondParents = findParents(secondSelected[0].id, copiedGraph, None)
secondParent = None
if len(secondParents) > 0:
secondParent = secondParents[len(secondParents) - 1]
for c in secondSelected:
secondParent.removeChild(c)
var.set('Deiteration successful!')
graph = copiedGraph
resetEnd()
addState()
firstSelected = []
def deiterationButtonCommand():
deiterationSecond()
deselectMulti()
return
def setupDeiteration():
global deiterationButton
deiterationButton = tk.Button(root, text="Run deiteration", font=("Helvetica", 10), width=15, command=deiterationButtonCommand)
deiterationButton.place(relx=0.5, rely=0.85, anchor=tk.CENTER)
var.set("Select nested graph to deiterate")
def collapseDeiteration():
global deiterationButton
if deiterationButton != None:
deiterationButton.destroy()
deiterationButton = None
def erasure():
resetStart()
global multiSelected
global graph
if len(multiSelected) == 0:
var.set("No graph selected yet")
return
g = copy(multiSelected[0])
copyGraph = copy(graph)
ancestors = findParents(g.id, copyGraph, None)
numOfCuts = 0
for a in ancestors:
if type(a) == Box:
numOfCuts += 1
if numOfCuts % 2 != 0:
var.set("Graph is not evenly-enclosed")
deselectMulti()
return
else:
parent = None
if len(ancestors) == 0:
# remove entire graph if graph has no ancestors, then return
graph = Graph(0)
addState()
clearAndPrint()
deselectMulti()
return
parent = ancestors[len(ancestors) - 1]
for c in multiSelected:
parent.removeChild(c)
var.set('Erasure successful!')
graph = copyGraph
addState()
clearAndPrint()
deselectMulti()
def insertion():
resetStart()
global multiSelected
if len(multiSelected) == 0:
var.set("No graph selected yet")
return
if len(multiSelected) > 1:
var.set("Only select a single region or cut to insert into")
deselectMulti()
return
if type(multiSelected[0]) != Box and type(multiSelected[0]) != Graph:
var.set("Only select region or cut to insert into")
deselectMulti()
return
parents = findParents(multiSelected[0].id, graph, None)
num_cuts = 0
for p in parents:
if type(p) == Box:
num_cuts += 1
if num_cuts % 2 == 0:
var.set("Graph is not oddly-enclosed!")
deselectMulti()
return
setupInsertion()
def runInsertion(*args):
entryInput = repr(insertionEntry.get())[1:-1]
entryInput = entryInput.lower()
global var
global prolog
global graph
try:
solns = list(prolog.query('input("%s", O).'%(entryInput)))
if len(solns) == 0:
var.set("Issue with input")
return
newGraph = formGraph(solns[0]['O'])
copiedGraph = copy(graph)
toAddTo = find(multiSelected[0].id, copiedGraph)
toAddTo.addChild(newGraph)
graph = copiedGraph
var.set("Input accepted and inserted into graph!")
addState()
# convert soln into graph, and print
except Exception as err:
print(err)
var.set("Issue with input")
finally:
resetEnd()
def startCommand():
resetStart()
setupStart()
def setupInsertion():
global insertionEntry
global insertionButton
insertionEntry = tk.Entry(root, font=("Helvetica",10), text="Formula", width=20)
insertionEntry.place(relx=0.5, rely=0.775, anchor=tk.CENTER)
insertionButton = tk.Button(root, text="Run insertion", font=("Helvetica", 10), width=15, command=runInsertion)
insertionButton.place(relx=0.5, rely=0.85, anchor=tk.CENTER)
var.set("Type formula of graph to insert")
def collapseInsertion():
global insertionEntry
global insertionButton
if insertionEntry != None:
insertionEntry.destroy()
insertionEntry = None
if insertionButton != None:
insertionButton.destroy()
insertionButton = None
def setupStart():
global entry
global enterBtn
entry = tk.Entry(root, font=("Helvetica",10), text="Formula", width=20)
entry.place(relx=0.5, rely=0.775, anchor=tk.CENTER)
enterBtn = tk.Button(root, text="Enter formula", font=("Helvetica", 10), width=15, command=parsePrintClearEntry)
enterBtn.place(relx=0.5, rely=0.85, anchor=tk.CENTER)
var.set("Insert formula to initialise graph")
def collapseStart():
global entry
global enterBtn
if entry != None:
entry.destroy()
entry = None
if enterBtn != None:
enterBtn.destroy()
enterBtn = None
def resetEnd():
resetFlags()
collapseInsertion()
collapseDeiteration()
collapseStart()
clearAndPrint()
deselectMulti()
def resetStart():
resetFlags()
collapseInsertion()
collapseDeiteration()
collapseStart()
def nested(outerGraph, innerGraph):
isNested = False
outerParents = findParents(outerGraph.id, graph, None)
outerParent = None
if len(outerParents) > 0:
outerParent = outerParents[len(outerParents) - 1]
if outerParent == None:
return False
# Check if outer's parent is inner graph
if outerParent.id == innerGraph.id:
isNested = True
if not isNested:
for child in outerParent.children:
if child.id != outerGraph.id:
found = find(innerGraph.id, child)
if found:
isNested = True
break
return isNested
def equivalent(firstGraph, secondGraph):
if type(firstGraph) == Atom and type(secondGraph) == Atom and firstGraph.char != secondGraph.char:
return False
if type(firstGraph) != type(secondGraph):
return False
if len(firstGraph.getChildren()) != len(secondGraph.getChildren()):
return False
firstGChildrenP = permutations(firstGraph.getChildren())
numOfChildren = len(firstGraph.getChildren())
result = False
for p in firstGChildrenP:
breakEarly = False
for i in range(numOfChildren):
if not equivalent(p[i], secondGraph.getChildren()[i]):
breakEarly = True
break
if not breakEarly:
result = True
break
return result
def resetFlags():
global runIteration
global runDeiteration
runIteration = False
runDeiteration = False
def addState():
if not proofMode:
global states
global pointer
if len(states) > 0:
states = states[0:pointer+1]
states.append(graph)
pointer += 1
else:
global statesProof
global pointerProof
if len(statesProof) > 0:
statesProof = statesProof[0:pointerProof+1]
statesProof.append(graph)
pointerProof += 1
def undoCommand():
global graph
if not proofMode:
global pointer
if pointer == 0:
var.set('No more undo available')
else:
pointer -= 1
graph = states[pointer]
resetEnd()
var.set("Undo successful!")
else:
global pointerProof
if pointerProof == 0:
var.set('No more undo available')
else:
pointerProof -= 1
graph = statesProof[pointerProof]
resetEnd()
var.set("Undo successful!")
def redoCommand():
global graph
if not proofMode:
global pointer
if pointer == len(states) - 1:
var.set('No more redo available')
else:
pointer += 1
graph = states[pointer]
resetEnd()
var.set("Redo successful!")
else:
global pointerProof
if pointerProof == len(statesProof) - 1:
var.set('No more redo available')
else:
pointerProof += 1
graph = statesProof[pointerProof]
resetEnd()
var.set("Redo successful!")
class Graph:
def __init__(self, id):
if id == 0:
self.id = 0
self.children = []
self.x1 = 2
self.y1 = 2
self.x2 = None
self.y2 = None
else:
self.id = id
self.children = []
# Will be set once graph is complete and calculatecoord is called
self.x1 = None
self.y1 = None
self.x2 = None
self.y2 = None
def addChild(self, graphToAdd):
if type(graphToAdd) == Graph:
for child in graphToAdd.children:
self.children.append(child)
return True
self.children.append(graphToAdd)
return True
def getChildren(self):
return self.children
def replaceChild(self, toReplaceId, newGraph):
children = self.getChildren()
for i in range(len(children)):
if children[i].id == toReplaceId:
children.remove(children[i])
if type(newGraph) == Graph:
for g in newGraph.getChildren():
children.insert(i,g)
i += 1
return True
children.insert(i,newGraph)
return True
return False
def removeChild(self, graphToCut):
for i in range(len(self.children)):
if self.children[i].id == graphToCut.id:
del self.children[i]
return
def calculateCoord(self, parentGraph = None, childNum = 0):
if self == None:
return
if parentGraph != None:
if type(parentGraph) == Box:
self.x1 = parentGraph.x1
self.y1 = parentGraph.y1
else:
if childNum == 0:
self.x1 = parentGraph.x1 + xpadding
self.y1 = parentGraph.y1 + ypadding
else:
self.x1 = parentGraph.children[childNum - 1].x2 + xpadding
self.y1 = parentGraph.y1 + ypadding
# If current type is box, only one Graph child, set coord according to Graph child
if type(self) == Box:
self.children[0].calculateCoord(self, 0)
self.x2 = self.children[0].x2
self.y2 = self.children[0].y2
return
furthestChildx2 = 0
largestChildy2 = 0
for i in range(len(self.children)):
self.children[i].calculateCoord(self, i)
if i == len(self.children) - 1:
furthestChildx2 = self.children[i].x2
if largestChildy2 < self.children[i].y2:
largestChildy2 = self.children[i].y2
# self has child
if furthestChildx2 != 0:
self.x2 = furthestChildx2 + xpadding
self.y2 = largestChildy2 + ypadding
else:
self.x2 = self.x1 + xdefault
self.y2 = self.y1 + ydefault
class Atom(Graph):
def __init__(self, char, id = 1):
super().__init__(id)
self.char = char
def addChild(self, graphToAdd):
return False
class Box(Graph):
def __init__(self, id = 1, childBoxId = 1):
super().__init__(id)
self.children = [Graph(childBoxId)]
def addChild(self, graphToAdd):
self.children[0].addChild(graphToAdd)
return True
# Only for cutting the single Graph child a Box has
def removeChild(self, graphToRemove):
self.children = [Graph(1)]
def replaceChild(self, toReplaceId, newGraph):
if self.children[0].id == toReplaceId:
finalGraph = Graph(1)
finalGraph.addChild(newGraph)
self.children[0] = finalGraph
else:
print("Box's child graph does not match toReplaceId")
def printGraphId(inGraph):
if inGraph == None:
return
print(inGraph.id)
for c in inGraph.children:
printGraphId(c)
graph = Graph(0)
addState()
# ===================== Tkinter setup =======================
root.title("Peirce Alpha System")
menuBar = tk.Menu(root)
def proveCommand():
global statesProof
global pointerProof
global graph
global proofMode
resetStart()
root.config(menu=rulesMenuBar)
var.set("Proof started")
proofMode = True
statesProof = []
pointerProof = -1
addState() # adds state in proofMode
commandMenu = tk.Menu(menuBar, tearoff=0)
commandMenu.add_command(label="Undo", command=undoCommand)
commandMenu.add_command(label="Redo", command=redoCommand)
commandMenu.add_separator()
commandMenu.add_command(label="Start", command=startCommand)
commandMenu.add_command(label="Cut",command=cutCommand)
commandMenu.add_command(label="Copy", command=copyCommand)
commandMenu.add_command(label="Paste",command=pasteCommand)
commandMenu.add_separator()
commandMenu.add_command(label="Start Proof",command=proveCommand)
commandMenu.add_separator()
commandMenu.add_command(label="Quit", command=root.destroy)
menuBar.add_cascade(label="Commands", menu=commandMenu)
root.config(menu=menuBar)
rulesMenuBar = tk.Menu(root)
# Sets menu bar to edit mode, sets graph as where the proof started
def stopProveCommand():
global graph
global proofMode
resetStart()
root.config(menu=menuBar)
var.set("Proof stopped")
proofMode = False
graph = states[pointer]
resetEnd()
rulesMenu = tk.Menu(rulesMenuBar, tearoff=0)
rulesMenu.add_command(label="Undo Rule", command=undoCommand)
rulesMenu.add_command(label="Redo Rule", command=redoCommand)
rulesMenu.add_separator()
rulesMenu.add_command(label="Erase", command=erasure)
rulesMenu.add_command(label="Insert", command=insertion)
rulesMenu.add_command(label="Iterate", command=iteration)
rulesMenu.add_command(label="Deiterate", command=deiteration)
rulesMenu.add_command(label="Insert double cut", command=ins_double_cut)
rulesMenu.add_command(label="Remove double cut", command=rem_double_cut)
rulesMenu.add_separator()
rulesMenu.add_command(label="Stop Proof", command=stopProveCommand)
rulesMenuBar.add_cascade(label="Rules", menu=rulesMenu)
clearAndPrint()
canvas.bind("<Button-1>", select)
canvas.pack(fill="both",expand=True)
root.mainloop()
|
[
"bohchenghin@gmail.com"
] |
bohchenghin@gmail.com
|
5e28917d27023f41c32f425858b3b22fab756156
|
29feab13704dc487aba41fc3bc3478c768311991
|
/FigureS2.py
|
f4fed26d4cb6b276a72db05e7fef0bcc119eb4bb
|
[] |
no_license
|
vhartwick/Mars-Wind-Energy
|
f382781e8c09b89a6ea0caa02f18989367a665ec
|
9b0a1ba4cec8b8d5cd3f3f5e4b90240f6ecf5a97
|
refs/heads/main
| 2023-04-14T01:58:35.095050
| 2022-10-25T20:56:16
| 2022-10-25T20:56:16
| 441,526,116
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,232
|
py
|
# Figure S2 : Python script for base analysis and figures for Hartwick+2022
# Author: Victoria Hartwick
# Last Modified : 6/29/22
# Import Scientific Modules
import os
import matplotlib
import matplotlib.pyplot as py
import numpy as np
from matplotlib.ticker import MultipleLocator, FuncFormatter #format ticks
from numpy import sqrt, exp, max, mean, min, log, log10
import xarray as xr
from matplotlib.patches import Rectangle
import matplotlib.gridspec as gridspec
# Get Path
PATH = os.getcwd()
# Import Base Datasets
dataDIR = '/lou/la2/vhartwic/FV3/verona/fv3_2.0_windenergy/fms_mars_MY24_highres/history/MY24_highres_savefile.nc'
DDS_MY24 = xr.open_dataset(dataDIR, decode_times=False)
# -------------------------------------------------------------------------------
# FIGURE S2
# Annual average wind (top panels) and solar power density (bottom panels) [W/m2]
# in one-hour intervals of local time. All locations in each panel show the same
# local time simultaneously. Solar power exceeds 140 W/m2 during daytime hours
# (~7.5-15.5LT) while wind power maximizes at night (~21.5-8.5LT) and in the poles.
# -------------------------------------------------------------------------------
ct = 2
print('Making Figure S2')
# Find Annual Average
wpd_diurn_ave_MY24 = DDS_MY24.wpd.mean('time')
swflx_diurn_ave_MY24 = DDS_MY24.swflx.mean('time')
levels = np.arange(0,160,10)
levels_swflx = np.arange(0,500,25)
fig, axs = py.subplots(4,6, sharey=True, sharex=True, figsize=(10,8))
fig.subplots_adjust(wspace=0.05,hspace=0.35)
fig.suptitle('(a)', ha='left')
for i in range(6):
cs = axs[0,i].contourf(DDS_MY24.lon,DDS_MY24.lat,wpd_diurn_ave_MY24.sel(zagl=50,time_of_day_24=i+0.5),levels=levels,cmap=py.cm.viridis)
axs[0,i].set_title(str(i+0.5)+'LT',fontsize=10)
if i == 0:
axs[0,i].set_ylabel('Latitude',fontsize=8)
axs[0,i].xaxis.set_major_locator(MultipleLocator(60))
axs[0,i].yaxis.set_major_locator(MultipleLocator(30))
axs[0,i].set_yticks(-90,-60,-30,0,30,60,90)
axs[0,i].set_yticklabels(['','-60','','0','','60',''])
for i in range(6):
cs = axs[1,i].contourf(DDS_MY24.lon,DDS_MY24.lat,wpd_diurn_ave_MY24.sel(zagl=50,time_of_day_24=i+6.5),levels=levels,cmap=py.cm.viridis)
axs[1,i].set_title(str(i+6.5)+'LT',fontsize=10)
if i == 0:
axs[1,i].set_ylabel('Latitude',fontsize=8)
for i in range(6):
cs = axs[2,i].contourf(DDS_MY24.lon,DDS_MY24.lat,wpd_diurn_ave_MY24.sel(zagl=50,time_of_day_24=i+12.5),levels=levels,cmap=py.cm.viridis)
axs[2,i].set_title(str(i+12.5)+'LT',fontsize=10)
if i == 0:
axs[2,i].set_ylabel('Latitude',fontsize=8)
for i in range(6):
cs = axs[3,i].contourf(DDS_MY24.lon,DDS_MY24.lat,wpd_diurn_ave_MY24.sel(zagl=50,time_of_day_24=i+18.5),levels=levels,cmap=py.cm.viridis)
axs[3,i].set_title(str(i+18.5)+'LT',fontsize=10)
axs[3,i].set_xlabel('Longitude',fontsize=8)
axs[3,i].set_xticks(0,60,120,180,240,300,360)
axs[3,i].set_xticklabels(['','60','','180','','300',''])
if i == 0:
axs[3,i].set_ylabel('Latitude',fontsize=8)
norm= matplotlib.colors.Normalize(vmin=cs.cvalues.min(), vmax=cs.cvalues.max())
sm = py.cm.ScalarMappable(norm=norm, cmap = cs.cmap)
sm.set_array([])
# add axis, (left, bottom, width, height)
cbar_ax = py.gcf().add_axes([0.93, 0.11, 0.01, 0.77])
clb = fig.colorbar(sm, cax=cbar_ax, orientation='vertical')
clb.set_label('$\ [W/m^{2}]$')
py.savefig(f'{PATH}/WindEnergy_HighRes_FigS{ct}.eps',dpi=300)
# Now Solar
print('Now, Solar')
fig, axs = py.subplots(4,6, sharey=True, sharex=True, figsize=(10,8))
fig.subplots_adjust(wspace=0.05,hspace=0.35)
fig.suptitle('(b)', ha='left')
for i in range(6):
cs = axs[0,i].contourf(DDS_MY24.lon,DDS_MY24.lat,swflx_diurn_ave_MY24.sel(time_of_day_24=i+0.5),levels=levels_swflx,cmap=py.cm.viridis)
axs[0,i].set_title(str(i+0.5)+'LT',fontsize=10)
if i == 0:
axs[0,i].set_ylabel('Latitude',fontsize=8)
axs[0,i].xaxis.set_major_locator(MultipleLocator(60))
axs[0,i].yaxis.set_major_locator(MultipleLocator(30))
axs[0,i].set_yticklabels(['','-60','','0','','60',''])
for i in range(6):
cs = axs[1,i].contourf(DDS_MY24.lon,DDS_MY24.lat,swflx_diurn_ave_MY24.sel(time_of_day_24=i+6.5),levels=levels_swflx,cmap=py.cm.viridis)
axs[1,i].set_title(str(i+6.5)+'LT',fontsize=10)
if i == 0:
axs[1,i].set_ylabel('Latitude',fontsize=8)
for i in range(6):
cs = axs[2,i].contourf(DDS_MY24.lon,DDS_MY24.lat,swflx_diurn_ave_MY24.sel(time_of_day_24=i+12.5),levels=levels_swflx,cmap=py.cm.viridis)
axs[2,i].set_title(str(i+12.5)+'LT',fontsize=10)
if i == 0:
axs[2,i].set_ylabel('Latitude',fontsize=8)
for i in range(6):
cs = axs[3,i].contourf(DDS_MY24.lon,DDS_MY24.lat,swflx_diurn_ave_MY24.sel(time_of_day_24=i+18.5),levels=levels_swflx,cmap=py.cm.viridis)
axs[3,i].set_title(str(i+18.5)+'LT',fontsize=10)
axs[3,i].set_xlabel('Longitude',fontsize=8)
axs[3,i].set_xticklabels(['','60','','180','','300',''])
if i == 0:
axs[3,i].set_ylabel('Latitude',fontsize=8)
norm= matplotlib.colors.Normalize(vmin=cs.cvalues.min(), vmax=cs.cvalues.max())
sm = py.cm.ScalarMappable(norm=norm, cmap = cs.cmap)
sm.set_array([])
# add axis, (left, bottom, width, height)
cbar_ax = py.gcf().add_axes([0.93, 0.11, 0.01, 0.77])
clb = fig.colorbar(sm, cax=cbar_ax, orientation='vertical')
clb.set_label('$\ [W/m^{2}]$')
py.savefig(f'{PATH}/WindEnergy_HighRes_FigS{ct}b.eps',dpi=300)
# Send out Data
var1 = xr.DataArray(swflx_diurn_ave_MY24, coords={'lon':DDS_MY24.lon,'lat':DDS_MY24.lat,'time_of_day_24':DDS_MY24.time_of_day_24},dims=['time_of_day_24','lat','lon'])
var1.name = 'annual_ave_pow_E33'
var2 = xr.DataArray(wpd_diurn_ave_MY24, coords={'lon':DDS_MY24.lon,'lat':DDS_MY24.lat,'zagl':DDS_MY24.zagl,'time_of_day_24':DDS_MY24.time_of_day_24},dims=['time_of_day_24','zagl','lat','lon'])
var2.name = 'wpd_diurn_ave_MY24'
NEW_DF = xr.merge([var1,var2])
NEW_DF.to_netcdf(f'{PATH}/Data/FigureS{ct}.nc')
|
[
"noreply@github.com"
] |
noreply@github.com
|
6f9449c2b377200aaaafa218f8cf2583cedbf482
|
b0703ac3217134e4afceb84ab07335c97ec677ba
|
/6:Wagtail_Content_Prototype/cypmh_prototype/nhs_components/migrations/0004_auto_20210514_1117.py
|
dfc57e61fe424e62fb6197c9bcefc3f39c561983
|
[] |
no_license
|
madetech/G1-Spikes
|
390622eec7a91ea986e1debbb9a270cd9e9091aa
|
fc542216a3102d516e6c005aad0b66755c725544
|
refs/heads/main
| 2023-07-03T08:40:05.080265
| 2021-08-03T10:37:36
| 2021-08-03T10:37:36
| 360,846,858
| 0
| 0
| null | 2021-08-03T10:37:37
| 2021-04-23T10:27:48
|
Python
|
UTF-8
|
Python
| false
| false
| 10,863
|
py
|
# Generated by Django 3.1.8 on 2021-05-14 11:17
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
import wagtailnhsukfrontend.blocks
class Migration(migrations.Migration):
dependencies = [
('nhs_components', '0003_auto_20210514_1112'),
]
operations = [
migrations.AlterField(
model_name='nhspage',
name='body',
field=wagtail.core.fields.StreamField([('action_link', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(label='Link text', required=True)), ('external_url', wagtail.core.blocks.URLBlock(label='URL', required=False)), ('new_window', wagtail.core.blocks.BooleanBlock(label='Open in new window', required=False)), ('internal_page', wagtail.core.blocks.PageChooserBlock(label='Internal Page', required=False))])), ('callout', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(default='Important', required=True)), ('visually_hidden_prefix', wagtail.core.blocks.BooleanBlock(help_text='If the title doesn\'t contain the word "Important" select this to add a visually hidden "Important", to aid screen readers.', label='Visually hidden prefix', required=False)), ('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Default=3, Min=2, Max=6.', max_value=6, min_value=2, required=True)), ('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('heading', wagtail.core.blocks.CharBlock(form_classname='heading')), ('inset_text', wagtail.core.blocks.StructBlock([('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('care_card', wagtail.core.blocks.StructBlock([('type', wagtail.core.blocks.ChoiceBlock(choices=[('primary', 'Non-urgent'), ('urgent', 'Urgent'), ('immediate', 'Immediate')])), ('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Default=3, Min=2, Max=4.', max_value=6, min_value=2, required=True)), ('title', wagtail.core.blocks.CharBlock(required=True)), ('body', wagtail.core.blocks.StreamBlock([('richtext', wagtail.core.blocks.RichTextBlock()), ('action_link', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(label='Link text', required=True)), ('external_url', wagtail.core.blocks.URLBlock(label='URL', required=False)), ('new_window', wagtail.core.blocks.BooleanBlock(label='Open in new window', required=False)), ('internal_page', wagtail.core.blocks.PageChooserBlock(label='Internal Page', required=False))])), ('details', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('body', wagtail.core.blocks.StreamBlock([('richtext', wagtail.core.blocks.RichTextBlock()), ('action_link', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(label='Link text', required=True)), ('external_url', wagtail.core.blocks.URLBlock(label='URL', required=False)), ('new_window', wagtail.core.blocks.BooleanBlock(label='Open in new window', required=False)), ('internal_page', wagtail.core.blocks.PageChooserBlock(label='Internal Page', required=False))])), ('inset_text', wagtail.core.blocks.StructBlock([('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('image', wagtail.core.blocks.StructBlock([('content_image', wagtail.images.blocks.ImageChooserBlock(required=True)), ('alt_text', wagtail.core.blocks.CharBlock(help_text='Only leave this blank if the image is decorative.', required=False)), ('caption', wagtail.core.blocks.CharBlock(required=False))])), ('panel', wagtail.core.blocks.StructBlock([('label', wagtail.core.blocks.CharBlock(required=False)), ('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Ignore this if there is no label. Default=3, Min=2, Max=6.', max_value=6, min_value=2)), ('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('feature_card', wagtail.core.blocks.StructBlock([('feature_heading', wagtail.core.blocks.CharBlock(required=True)), ('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Ignore this if there is no label. Default=3, Min=2, Max=6.', max_value=6, min_value=2)), ('heading_size', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('small', 'Small'), ('medium', 'Medium'), ('large', 'Large')], help_text="The heading size affects the visual size, this follows the front-end library's sizing.", required=False)), ('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('warning_callout', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(default='Important', required=True)), ('visually_hidden_prefix', wagtail.core.blocks.BooleanBlock(help_text='If the title doesn\'t contain the word "Important" select this to add a visually hidden "Important", to aid screen readers.', label='Visually hidden prefix', required=False)), ('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Default=3, Min=2, Max=6.', max_value=6, min_value=2, required=True)), ('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('summary_list', wagtail.core.blocks.StructBlock([('rows', wagtail.core.blocks.ListBlock(wagtailnhsukfrontend.blocks.SummaryListRowBlock)), ('no_border', wagtail.core.blocks.BooleanBlock(default=False, required=False))]))], required=True))])), ('inset_text', wagtail.core.blocks.StructBlock([('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('image', wagtail.core.blocks.StructBlock([('content_image', wagtail.images.blocks.ImageChooserBlock(required=True)), ('alt_text', wagtail.core.blocks.CharBlock(help_text='Only leave this blank if the image is decorative.', required=False)), ('caption', wagtail.core.blocks.CharBlock(required=False))])), ('grey_panel', wagtail.core.blocks.StructBlock([('label', wagtail.core.blocks.CharBlock(label='heading', required=False)), ('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Ignore this if there is no heading. Default=3, Min=2, Max=6.', max_value=6, min_value=2)), ('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('feature_card', wagtail.core.blocks.StructBlock([('feature_heading', wagtail.core.blocks.CharBlock(required=True)), ('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Ignore this if there is no label. Default=3, Min=2, Max=6.', max_value=6, min_value=2)), ('heading_size', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('small', 'Small'), ('medium', 'Medium'), ('large', 'Large')], help_text="The heading size affects the visual size, this follows the front-end library's sizing.", required=False)), ('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('warning_callout', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(default='Important', required=True)), ('visually_hidden_prefix', wagtail.core.blocks.BooleanBlock(help_text='If the title doesn\'t contain the word "Important" select this to add a visually hidden "Important", to aid screen readers.', label='Visually hidden prefix', required=False)), ('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Default=3, Min=2, Max=6.', max_value=6, min_value=2, required=True)), ('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('summary_list', wagtail.core.blocks.StructBlock([('rows', wagtail.core.blocks.ListBlock(wagtailnhsukfrontend.blocks.SummaryListRowBlock)), ('no_border', wagtail.core.blocks.BooleanBlock(default=False, required=False))]))], required=True))])), ('details', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('body', wagtail.core.blocks.StreamBlock([('richtext', wagtail.core.blocks.RichTextBlock()), ('action_link', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(label='Link text', required=True)), ('external_url', wagtail.core.blocks.URLBlock(label='URL', required=False)), ('new_window', wagtail.core.blocks.BooleanBlock(label='Open in new window', required=False)), ('internal_page', wagtail.core.blocks.PageChooserBlock(label='Internal Page', required=False))])), ('inset_text', wagtail.core.blocks.StructBlock([('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('image', wagtail.core.blocks.StructBlock([('content_image', wagtail.images.blocks.ImageChooserBlock(required=True)), ('alt_text', wagtail.core.blocks.CharBlock(help_text='Only leave this blank if the image is decorative.', required=False)), ('caption', wagtail.core.blocks.CharBlock(required=False))])), ('panel', wagtail.core.blocks.StructBlock([('label', wagtail.core.blocks.CharBlock(required=False)), ('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Ignore this if there is no label. Default=3, Min=2, Max=6.', max_value=6, min_value=2)), ('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('feature_card', wagtail.core.blocks.StructBlock([('feature_heading', wagtail.core.blocks.CharBlock(required=True)), ('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Ignore this if there is no label. Default=3, Min=2, Max=6.', max_value=6, min_value=2)), ('heading_size', wagtail.core.blocks.ChoiceBlock(choices=[('', 'Default'), ('small', 'Small'), ('medium', 'Medium'), ('large', 'Large')], help_text="The heading size affects the visual size, this follows the front-end library's sizing.", required=False)), ('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('warning_callout', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(default='Important', required=True)), ('visually_hidden_prefix', wagtail.core.blocks.BooleanBlock(help_text='If the title doesn\'t contain the word "Important" select this to add a visually hidden "Important", to aid screen readers.', label='Visually hidden prefix', required=False)), ('heading_level', wagtail.core.blocks.IntegerBlock(default=3, help_text='The heading level affects users with screen readers. Default=3, Min=2, Max=6.', max_value=6, min_value=2, required=True)), ('body', wagtail.core.blocks.RichTextBlock(required=True))])), ('summary_list', wagtail.core.blocks.StructBlock([('rows', wagtail.core.blocks.ListBlock(wagtailnhsukfrontend.blocks.SummaryListRowBlock)), ('no_border', wagtail.core.blocks.BooleanBlock(default=False, required=False))]))], required=True))])), ('text_content', wagtail.core.blocks.RichTextBlock())]),
),
]
|
[
"liam.miller@madetech.com"
] |
liam.miller@madetech.com
|
20496097f5f7d9d5460ac3bc6b6e4f2aff62bfac
|
fe85b4811c93510006b666858d6029156f167f89
|
/bin/hooks/notify.py
|
6e5dda8635f63227a7b9e6e4d72ce489645fd7c9
|
[
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
9001/copyparty
|
39207421ccdc501566105da562a168996e0f9b4c
|
48a3898aa692770735a926b0c18300d7da8b021f
|
refs/heads/hovudstraum
| 2023-08-18T15:19:36.934124
| 2023-08-16T19:57:19
| 2023-08-16T19:57:19
| 188,700,274
| 273
| 21
|
MIT
| 2023-08-09T20:50:27
| 2019-05-26T15:28:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,559
|
py
|
#!/usr/bin/env python3
import os
import sys
import subprocess as sp
from plyer import notification
_ = r"""
show os notification on upload; works on windows, linux, macos, android
depdencies:
windows: python3 -m pip install --user -U plyer
linux: python3 -m pip install --user -U plyer
macos: python3 -m pip install --user -U plyer pyobjus
android: just termux and termux-api
example usages; either as global config (all volumes) or as volflag:
--xau f,bin/hooks/notify.py
-v srv/inc:inc:r:rw,ed:c,xau=f,bin/hooks/notify.py
^^^^^^^^^^^^^^^^^^^^^^^^^^^
(share filesystem-path srv/inc as volume /inc,
readable by everyone, read-write for user 'ed',
running this plugin on all uploads with the params listed below)
parameters explained,
xau = execute after upload
f = fork so it doesn't block uploads
"""
try:
from copyparty.util import humansize
except:
def humansize(n):
return n
def main():
fp = sys.argv[1]
dp, fn = os.path.split(fp)
try:
sz = humansize(os.path.getsize(fp))
except:
sz = "?"
msg = "{} ({})\n📁 {}".format(fn, sz, dp)
title = "File received"
if "com.termux" in sys.executable:
sp.run(["termux-notification", "-t", title, "-c", msg])
return
icon = "emblem-documents-symbolic" if sys.platform == "linux" else ""
notification.notify(
title=title,
message=msg,
app_icon=icon,
timeout=10,
)
if __name__ == "__main__":
main()
|
[
"s@ocv.me"
] |
s@ocv.me
|
adbdecf911aa42a3a549d55874bba69c09b8a299
|
2ffdfe188859d5be1a427ce1c4457d41331b541c
|
/message/GREEDYMOTIFSEARCH(Dna,k,t)-profile.py
|
4141b3dd1d68935dd12951302f7307f9a4edb569
|
[] |
no_license
|
Hydebutterfy/learn-python
|
18504b0c7281c2b0f94dbc73e77da87c8ac3ff38
|
883614deaf0a3cdf46d8305197fe5659fd609d60
|
refs/heads/master
| 2021-01-24T11:59:44.883949
| 2016-12-02T07:48:40
| 2016-12-02T07:48:40
| 59,213,755
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,545
|
py
|
#GREEDYMOTIFSEARCH(Dna, k, t)
#BestMotifs ← motif matrix formed by first k-mers in each string from Dna
#for each k-mer Motif in the first string from Dna
#Motif1 ← Motif
#for i = 2 to t
#form Profile from motifs Motif1, …, Motifi - 1
#Motifi ← Profile-most probable k-mer in the i-th string in Dna
#Motifs ← (Motif1, …, Motift)
#if Score(Motifs) < Score(BestMotifs)
#BestMotifs ← Motifs
#return BestMotifs
#http://www.mrgraeme.co.uk/greedy-motif-search/ 解释
def GreedyMotifSearchWithPseudocounts(Dna, k, t):
BestMotifs = []
for i in range(0, t):
BestMotifs.append(Dna[i][0:k])
#print(BestMotifs)
n = len(Dna[0])
for i in range(n - k + 1):
Motifs = []
Motifs.append(Dna[0][i:i + k])
# print(Motifs)
for j in range(1, t):
P = Profile(Motifs[0:j])
Motifs.append(ProfileMostProbablePattern(Dna[j], k, P))
if Score(Motifs) < Score(BestMotifs):
BestMotifs = Motifs
return BestMotifs
def Pr(pattern, profile):
temp_Profile=1
for j in range(k):
#print(float(profile[pattern[j]][j]))
#print(temp_Profile)
temp_Profile=temp_Profile*float(profile[pattern[j]][j])
#print(temp_Profile)
return temp_Profile
def ProfileMostProbablePattern(Text, k, Profile):
l=len(Text)
profile_list=[]
for i in range(l - k + 1):
pattern = Text[i:i + k]
profile_list.append(Pr(pattern, Profile))
max_profile = max(profile_list)
position = profile_list.index(max_profile)
return Text[position:position + k]
def Count(Motifs):
count = {}
k = len(Motifs[0])
for symbol in "ACGT":
count[symbol] = []
for j in range(k):
count[symbol].append(1)
#print(count)
t = len(Motifs)
for i in range(t):
for j in range(k):
symbol = Motifs[i][j]
count[symbol][j] += 1
#print(count)
return count
def Consensus(Motifs):
count = Count(Motifs)
k = len(Motifs[0])
consensus = ""
for j in range(k):
m=0
frequentSymbol = ""
for symbol in "ACGT":
if count[symbol][j] > m:
m = count[symbol][j]
frequentSymbol = symbol
consensus += frequentSymbol
return count,consensus
def Score(Motifs):
k = len(Motifs[0])
count,consensus_temp=Consensus(Motifs)
print(count,consensus_temp)
score = []
for i in range(k):
score.append(0)
for i in range(k):
for j in "ATCG":
if consensus_temp[i]!=j:
score[i]+=(count[j][i]-1)
allscore=int(0)
for i in range(k):
allscore=allscore+score[i]
return allscore
def Profile(Motifs):
t = len(Motifs)
k = len(Motifs[0])
profile = {}
for symbol in "ACGT":
profile[symbol] = []
for j in range(k):
profile[symbol].append(0)
Counts = Count(Motifs)
#print(Counts)
for symbol in "ACGT":
for i in range(k):
profile[symbol][i] = (Counts[symbol][i]+1)/(t+4)
#print(profile)
return profile
Dna=[]
filename = input("Enter file name: ")
fileread = open(filename,"r")
for i in fileread:
dna=i.strip()
Dna.append(dna.upper())
k=int(input("what is the k:"))
t=len(Dna)
temp=GreedyMotifSearchWithPseudocounts(Dna, k, t)
print('\n'.join(temp))
print(Score(temp))
#优化profile后改进,提高准确度
|
[
"chenhyde628@gmail.com"
] |
chenhyde628@gmail.com
|
6e7c3ec5e74550ca5f1d604f5f6a2950d3b44151
|
943b2da5e84aa1d61021a85b4f0cdbfe01dd2d74
|
/oaspec/schema/funcs.py
|
11b87886f7d4eade3a538c0c9ddfef777e2c3e26
|
[
"MIT"
] |
permissive
|
platformsh/oaspec
|
cd32a3304b14272501d317e6d5aa40f20cfbc18c
|
8816bec4ce277d3505d65ccd77485e1a0ce095f2
|
refs/heads/master
| 2021-07-13T20:18:03.194655
| 2019-06-18T00:22:44
| 2019-06-18T00:22:44
| 206,879,484
| 0
| 3
|
MIT
| 2021-06-23T13:51:02
| 2019-09-06T21:54:15
|
Python
|
UTF-8
|
Python
| false
| false
| 2,728
|
py
|
# -*- coding: utf-8 -*-
import re
from copy import deepcopy
def def_key(key):
"""Compute a definition ref from a key.
Returns:
str: The computed relative reference
"""
return f"#/definitions/{key}"
def get_all_refs(schema):
"""Get all ref links in a schema.
Traverses a schema and extracts all relative ref links from the schema,
returning a set containing the results.
Parameters:
schema: An OAS schema in the form of nested dicts to be traversed.
Returns:
set: All of the ref links found during traversal.
"""
all_refs = set()
if type(schema) is dict:
for key, val in schema.items():
if key == "$ref" and type(val) is str:
all_refs.add(val)
all_refs.update(get_all_refs(val))
elif type(schema) is list:
for item in schema:
all_refs.update(get_all_refs(item))
return all_refs
def get_def_classes(schema, def_objects, ignore_keys=None):
"""Return the definition objects represented by relative refs in a schema.
Gets all of the relative refs present in a schema object and returns a mapping
of refs to schema objects, recursively resolving references listed in the
retrieved schema definitions. This function is used to collect the referenced
definitions that will be added to each schema class's `_parsing_schema` attribute.
Parameters:
schema: The schema to parse for relative refs.
def_objects: A mapping of relative ref keys and the Schema sub-classes to which they
correspond. The `_parsing_schema` will be extracted from each referenced class.
ignore_keys: A set of keys that should be skipped when encountered during traversal,
in order to prevent infinite recursion when encountering circular references.
Returns:
dict: A mapping of of relative ref keys and their corresponding raw definitions.
"""
# Traverse the schema to get all relative ref links
all_refs = get_all_refs(schema)
if not all_refs:
return {}
def_classes = {}
for key in all_refs:
subkey = key.split("/")[-1]
if ignore_keys and subkey in ignore_keys:
continue
subschema = deepcopy(def_objects[key]._parsing_schema)
def_classes[subkey] = subschema
# Recursively de-reference ref links found in retrieve definition objects
def_classes.update(get_def_classes(def_classes[subkey], def_objects, def_classes.keys()))
return def_classes
def schema_hash(schema):
"""Generate a string-based hash of a schema object.
Returns:
str: Schema hash.
"""
return str(abs(hash(str(schema))))
|
[
"nick.anderegg@platform.sh"
] |
nick.anderegg@platform.sh
|
b7f90b7cf746aca123b7602fbd4d17e1ee97ac65
|
6b533b5ef49306b0e8db2d147d837659b1a7126b
|
/homework-3/ad_engine.py
|
63b9903d99a118275784e95460fdae92733b98ad
|
[
"MIT"
] |
permissive
|
sofiaruiz/AI_class_assignments
|
35bff0d4e2fa5cdb61307e20b5af332f96e557a8
|
16fb74da92e7328163049699b74a4dd897233bcf
|
refs/heads/master
| 2022-04-14T10:46:34.487783
| 2020-04-11T23:37:57
| 2020-04-11T23:37:57
| 148,580,772
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,375
|
py
|
'''
ad_engine.py
CMSI 485 HW 3: Advertisement engine that selects from two
ad traits to maximize expected utility of converting a sale
for the Forney Industries Protectron 3001
'''
import itertools
import unittest
import math
import numpy as np
from pomegranate import *
class AdEngine:
def __init__(self, data_file, structure, dec_vars, util_map):
"""
Responsible for initializing the Decision Network of the
AdEngine from the structure discovered by Tetrad
:param string data_file: path to csv file containing data on which
the network's parameters are to be learned
:param tuple structure: tuple of tuples specifying parental
relationships between variables in the network; see Pomegranate docs
for the expected format. Example:
((), (0), (1)) represents nodes: [0] -> [1] -> [2]
:param list dec_vars: list of string names of variables to be
considered decision points for the agent. Example:
["Ad1", "Ad2"]
:param dict util_map: discrete, tabular, utility map whose keys
are variables in network that are parents of a utility node, and
values are dictionaries mapping that variable's values to a utility
score, e.g.
{
"X": {0: 20, 1: -10}
}
represents a utility node with single parent X whose value of 0
has a utility score of 20, and value 1 has a utility score of -10
"""
data = np.genfromtxt(data_file, names = True, dtype = None, delimiter = ',')
#print (data)
state_names = data.dtype.names
#print(state_names)
x = data.view((int, len(state_names)))
self.model = BayesianNetwork.from_structure(x, structure, state_names = state_names)
#print(self.model)
self.dec_vars = dec_vars
return
def decide(self, evidence):
"""
Given some observed demographic "evidence" about a potential
consumer, selects the ad content that maximizes expected utility
and returns a dictionary over any decision variables and their
best values
:param dict evidence: dict mapping network variables to their
observed values, of the format: {"Obs1": val1, "Obs2": val2, ...}
:return: dict of format: {"DecVar1": val1, "DecVar2": val2, ...}
"""
best_combo, best_util = None, -math.inf
prob = self.model.predict_proba(evidence)
expected_utility =
return best_combo
class AdEngineTests(unittest.TestCase):
def test_defendotron_ad_engine_t1(self):
engine = AdEngine(
data_file = '/Users/sofiaruiz/lmu-cmsi-485/homework-3/hw3_data.csv',
dec_vars = ["Ad1", "Ad2"],
# TODO: Current structure is blank; you need to fill this in using
# the results from the Tetrad analysis!
structure = ((), (), (0, 9,), (6,), (1, 0,), (1, 8,), (), (2, 5,), (), ()),
# TODO: Decide what the utility map should be for the Defendotron
# example; see format of util_map in spec and above!
util_map = {"S" : {0: 0, 1: 5000, 2: 17760}}
)
self.assertEqual(engine.decide({"G": 0}), {"Ad1": 0, "Ad2": 1})
self.assertEqual(engine.decide({"F": 1}), {"Ad1": 1, "Ad2": 0})
self.assertEqual(engine.decide({"G": 1, "T": 0}), {"Ad1": 1, "Ad2": 1})
#
# def test_defendotron_ad_engine_t2(self):
# engine = AdEngine(
# data_file = 'hw3_data.csv',
# # [!] Note: in this example, say we are only deciding upon the ad
# # video (Ad1); our engine's results should adapt accordingly (see
# # tests below)
# dec_vars = ["Ad1"],
# # TODO: Current structure is blank; you need to fill this in using
# # the results from the Tetrad analysis!
# structure = (),
# # TODO: Decide what the utility map should be for the Defendotron
# # example; see format of util_map in spec and above!
# util_map = {}
# )
# self.assertEqual(engine.decide({"A": 1}), {"Ad1": 0})
# self.assertEqual(engine.decide({"P": 1, "A": 0}), {"Ad1": 1})
# self.assertEqual(engine.decide({"A": 1, "G": 1, "T": 1}), {"Ad1": 0})
if __name__ == "__main__":
unittest.main()
|
[
"sruiz13@lion.lmu.edu"
] |
sruiz13@lion.lmu.edu
|
92a2c9a2ec7f194f864497a4a7795eeba899fab1
|
0f727fdca3f37e08ed057abcfeb068b3f4acb84b
|
/gps_helper/test/test_kalman.py
|
1d732c3f244605a4ce37104b91e1cf6df728c182
|
[
"MIT"
] |
permissive
|
Miguel-O-Matic/gps-helper
|
e7939902201f2e5b1260e2e801439660fe3f4d32
|
dae3f143281ef930de84a1b783d33aa6e4446040
|
refs/heads/master
| 2020-06-24T20:51:34.604777
| 2019-08-15T12:42:32
| 2019-08-15T12:42:32
| 199,085,652
| 0
| 0
|
MIT
| 2019-07-26T22:08:41
| 2019-07-26T22:08:41
| null |
UTF-8
|
Python
| false
| false
| 15,770
|
py
|
from .test_helper import GPSTest
from .. import kalman as kf
from .. import simulator as sim
import numpy as np
from numpy.linalg import norm
from numpy import testing as npt
class TestKalman(GPSTest):
"""
Test class for the kalman functions.
"""
_multiprocess_can_split_ = True
def test_simple_kalman_x(self):
"""
If only ten measurements are used, the tests do not cover all the way to convergence. Therefore, every tenth
sample is used for the test.
:return:
"""
dt = 0.1
t = np.arange(0, 10 + dt, dt)
x_saved = np.zeros((len(t), 2))
x_test = np.array([[13.50229134, 13.50229134],
[13.67920055, 13.67920055],
[13.35742003, 13.35742003],
[13.74166822, 13.74166822],
[13.69347514, 13.69347514],
[13.81459412, 13.81459412],
[13.83609622, 13.83609622],
[13.8665808 , 13.8665808 ],
[13.77658251, 13.77658251],
[13.85373983, 13.85373983],
[13.7394069 , 13.7394069 ]])
# Create objects for the simulation
gv = sim.GetVoltage(14.0, dt, sigma_w=2)
sk = kf.SimpleKalman(initial_state=14)
for k in range(len(t)):
z = gv.measurement()
x_saved[k, :] = sk.next_sample(z)
npt.assert_almost_equal(x_test, x_saved[::10])
def test_simple_kalman_k(self):
"""
If only ten measurements are used, the tests do not cover all the way to convergence. Therefore, every tenth
sample is used for the test.
:return:
"""
dt = 0.1
t = np.arange(0, 10 + dt, dt)
k_saved = np.zeros(len(t))
k_test = np.array([0.6, 0.08571429, 0.04615385, 0.03157895, 0.024,
0.01935484, 0.01621622, 0.01395349, 0.0122449 , 0.01090909,
0.00983607])
# Create objects for the simulation
gv = sim.GetVoltage(14.0, dt, sigma_w=2)
sk = kf.SimpleKalman(initial_state=14)
for k in range(len(t)):
z = gv.measurement()
sk.next_sample(z)
k_saved[k] = sk.K
npt.assert_almost_equal(k_test, k_saved[::10])
def test_simple_kalman_p(self):
"""
If only ten measurements are used, the tests do not cover all the way to convergence. Therefore, every tenth
sample is used for the test.
:return:
"""
dt = 0.1
t = np.arange(0, 10 + dt, dt)
p_saved = np.zeros(len(t))
p_test = np.array([2.4, 0.34285714, 0.18461538, 0.12631579, 0.096,
0.07741935, 0.06486486, 0.05581395, 0.04897959, 0.04363636,
0.03934426])
# Create objects for the simulation
gv = sim.GetVoltage(14.0, dt, sigma_w=2)
sk = kf.SimpleKalman(initial_state=14)
for k in range(len(t)):
z = gv.measurement()
sk.next_sample(z)
p_saved[k] = sk.P
npt.assert_almost_equal(p_test, p_saved[::10])
def test_pos_kalman_x_pos(self):
"""
If only ten measurements are used, the tests do not cover all the way to convergence. Therefore, every tenth
sample is used for the test.
:return:
"""
dt = 0.1
t = np.arange(0, 10 + dt, dt)
x_saved = np.zeros((2, len(t)))
x_test = np.array([2.40104478, 85.65186517, 167.37150554, 248.87502207,
328.96915024, 406.71764263, 486.88690622, 566.02913866,
645.68324187, 727.69351353, 810.20295338])
# Create objects for the simulation
Q = np.array([[1, 0], [0, 3]])
R = np.array([[10, 0], [0, 2]])
gpv = sim.GetPosVel(Q=Q, R=R, dt=dt)
pk = kf.PosKalman(Q, R, initial_state=[0, 80])
for k in range(len(t)):
# take a measurement
z = gpv.measurement()
# Update the Kalman filter
x_saved[:, k, None] = pk.next_sample(z)
npt.assert_almost_equal(x_test, x_saved[0, ::10])
def test_pos_kalman_x_vel(self):
"""
If only ten measurements are used, the tests do not cover all the way to convergence. Therefore, every tenth
sample is used for the test.
:return:
"""
dt = 0.1
t = np.arange(0, 10 + dt, dt)
x_saved = np.zeros((2, len(t)))
x_test = np.array([63.71165764, 81.26369543, 79.46747731, 79.05128724, 77.47045411,
78.00121222, 80.21543726, 80.60963484, 81.56581114, 81.90123253,
80.31546328])
# Create objects for the simulation
Q = np.array([[1, 0], [0, 3]])
R = np.array([[10, 0], [0, 2]])
gpv = sim.GetPosVel(Q=Q, R=R, dt=dt)
pk = kf.PosKalman(Q, R, initial_state=[0, 80])
for k in range(len(t)):
# take a measurement
z = gpv.measurement()
# Update the Kalman filter
x_saved[:, k, None] = pk.next_sample(z)
npt.assert_almost_equal(x_test, x_saved[1, ::10])
def test_pos_kalman_p_pos(self):
"""
If only ten measurements are used, the tests do not cover all the way to convergence. Therefore, every tenth
sample is used for the test.
:return:
"""
dt = 0.1
t = np.arange(0, 10 + dt, dt)
x_saved = np.zeros((2, len(t)))
p_diag = np.zeros((len(t),2))
p_test = np.array([3.76669267, 2.76774029, 2.76843135, 2.7685862, 2.76859725,
2.76859802, 2.76859808, 2.76859808, 2.76859808, 2.76859808,
2.76859808])
# Create objects for the simulation
Q = np.array([[1, 0], [0, 3]])
R = np.array([[10, 0], [0, 2]])
gpv = sim.GetPosVel(Q=Q, R=R, dt=dt)
pk = kf.PosKalman(Q, R, initial_state=[0, 80])
for k in range(len(t)):
# take a measurement
z = gpv.measurement()
# Update the Kalman filter
x_saved[:, k, None] = pk.next_sample(z)
p_diag[k, :] = pk.P.diagonal()
npt.assert_almost_equal(p_test, p_diag[::10, 0])
def test_pos_kalman_p_vel(self):
"""
If only ten measurements are used, the tests do not cover all the way to convergence. Therefore, every tenth
sample is used for the test.
:return:
"""
dt = 0.1
t = np.arange(0, 10 + dt, dt)
x_saved = np.zeros((2, len(t)))
p_diag = np.zeros((len(t),2))
p_test = np.array([1.59079563, 1.36849424, 1.36839693, 1.36838998, 1.3683895 ,
1.36838946, 1.36838946, 1.36838946, 1.36838946, 1.36838946,
1.36838946])
# Create objects for the simulation
Q = np.array([[1, 0], [0, 3]])
R = np.array([[10, 0], [0, 2]])
gpv = sim.GetPosVel(Q=Q, R=R, dt=dt)
pk = kf.PosKalman(Q, R, initial_state=[0, 80])
for k in range(len(t)):
# take a measurement
z = gpv.measurement()
# Update the Kalman filter
x_saved[:, k, None] = pk.next_sample(z)
p_diag[k, :] = pk.P.diagonal()
npt.assert_almost_equal(p_test, p_diag[::10, 1])
def test_dv_kalman_x_pos(self):
dt = 0.1
t = np.arange(0, 10 + dt, dt)
x_saved = np.zeros((len(t), 2))
x_test = np.array([3.00505891, 81.24273033, 159.15363574, 239.96205645,
306.83657799, 390.95995075, 482.18752904, 560.48756223,
638.8627147, 716.34708371, 796.09003321])
# Create objects for the simulation
gp = sim.GetPos()
dk = kf.DvKalman()
for k in range(len(t)):
z = gp.measurement()
x_saved[k, :] = dk.next_sample(z)
npt.assert_almost_equal(x_test, x_saved[::10, 0])
def test_dv_kalman_x_vel(self):
dt = 0.1
t = np.arange(0, 10 + dt, dt)
x_saved = np.zeros((len(t), 2))
x_test = np.array([20.08306272, 59.87779911, 72.3189212 , 73.53420293, 64.12422919,
66.87463733, 84.32133029, 79.23214373, 82.47667741, 86.30916162,
80.29449928])
# Create objects for the simulation
gp = sim.GetPos()
dk = kf.DvKalman()
for k in range(len(t)):
z = gp.measurement()
x_saved[k, :] = dk.next_sample(z)
npt.assert_almost_equal(x_test, x_saved[::10, 1])
def test_int_kalman_x_vel(self):
dt = 0.1
t = np.arange(0, 10 + dt, dt)
x_saved = np.zeros((len(t), 2))
x_test = np.array([44.82330127, 80.66266235, 72.73385292, 82.05528032, 74.8722922 ,
82.66549625, 86.53350242, 80.31837348, 80.74624911, 83.77797869,
80.57758014])
# Create objects for the simulation
gv = sim.GetVel()
ik = kf.IntKalman()
for k in range(len(t)):
z = gv.measurement()
x_saved[k, :] = ik.next_sample(z)
npt.assert_almost_equal(x_test, x_saved[::10, 1])
def test_int_kalman_x_pos(self):
dt = 0.1
t = np.arange(0, 10 + dt, dt)
x_saved = np.zeros((len(t), 2))
x_test = np.array([3.55145633, 77.7381104 , 153.44154205, 235.38042071,
313.80303816, 394.62000052, 473.96915187, 554.86351,
630.47869611, 712.59481293, 786.37162823])
# Create objects for the simulation
gv = sim.GetVel()
ik = kf.IntKalman()
for k in range(len(t)):
z = gv.measurement()
x_saved[k, :] = ik.next_sample(z)
npt.assert_almost_equal(x_test, x_saved[::10, 0])
def test_ekf_x(self):
dt = 0.05
n_samples = 500
t = np.arange(n_samples) * dt
n_samples = len(t)
x_saved = np.zeros((n_samples, 3))
x_test = np.array([[ 4.5 , 90. , 1048.35700227],
[ 340.92701179, 129.19059199, 1007.64740711],
[ 793.01850282, 153.83039049, 1003.76887912],
[1187.81309238, 156.6880102 , 1003.96499988],
[1635.12480478, 169.35387149, 1006.01391596],
[1965.96084856, 148.35186341, 1000.27827996],
[2411.82207964, 162.38496029, 1004.06328925],
[2811.82330229, 161.8190544 , 1003.97565162],
[3254.16219351, 168.74746912, 1004.5385335 ],
[3630.89648321, 154.58882883, 1004.22494179]])
gr = sim.GetRadar()
ekf = kf.RadarEKF(dt, initial_state=[0, 90, 1100])
for k in range(n_samples):
xm = gr.measurement()
x_saved[k, :] = ekf.next_sample(xm)
npt.assert_almost_equal(x_test, x_saved[::50, :])
def test_ekf_z(self):
dt = 0.05
n_samples = 500
t = np.arange(n_samples) * dt
n_samples = len(t)
x_saved = np.zeros((n_samples, 3))
z_saved = np.zeros(n_samples)
z_test = np.array([1052.22272082, 1071.57581789, 1288.44638908, 1563.13684492,
1927.27212923, 2210.78424339, 2617.51747043, 2990.06735689,
3409.85996124, 3770.3812422 ])
gr = sim.GetRadar()
ekf = kf.RadarEKF(dt, initial_state=[0, 90, 1100])
for k in range(n_samples):
xm = gr.measurement()
x_saved[k, :] = ekf.next_sample(xm)
z_saved[k] = norm(x_saved[k])
npt.assert_almost_equal(z_test, z_saved[::50])
def test_sigma_points(self):
xm = np.array([[5], [5]])
px = 9 * np.eye(2)
kappa = 2
xi, w = kf.sigma_points(xm, px, kappa) # sigma points and weights
xi_test = [[ 5., 11., 5., -1., 5.],
[ 5., 5., 11., 5., -1.]]
w_test = [0.5, 0.125, 0.125, 0.125, 0.125]
npt.assert_almost_equal(xi_test, xi)
npt.assert_almost_equal(w_test, w)
def test_ut(self):
xm = np.array([[5], [5]])
px = 9 * np.eye(2)
kappa = 2
x_avg_test = [[5.], [5.]]
x_cov_test = [[9., 0.], [0., 9.]]
xi, w = kf.sigma_points(xm, px, kappa) # sigma points and weights
xAvg, xCov = kf.ut(xi, w) # estimate mean vector and covariance matrix using sigma points
npt.assert_almost_equal(x_avg_test, xAvg)
npt.assert_almost_equal(x_cov_test, xCov)
def test_radar_ukf_x(self):
x_test = np.array([[ 4.11483289, 89.9807873 , 1006.07122023],
[ 352.3105395 , 111.00439655, 1007.21596109],
[ 760.28088421, 129.09475887, 1013.98092468],
[1146.57543567, 135.45782881, 1023.0940802 ],
[1570.56204941, 142.75711716, 1041.58425653],
[1920.57768989, 141.88223322, 1039.88681358],
[2352.56816971, 149.54007223, 1056.58613916],
[2759.93367043, 153.00065999, 1063.05210355],
[3197.83961482, 158.44281893, 1072.24895062],
[3601.64732546, 158.77005313, 1073.01176455]])
dt = 0.05
n_samples = 500
t = np.arange(n_samples) * dt
n_samples = len(t)
x_saved = np.zeros((n_samples, 3))
gr = sim.GetRadar()
r_ukf = kf.RadarUKF(dt, initial_state=[0, 90, 1100])
for k in range(n_samples):
xm = gr.measurement()
x_saved[k, :] = r_ukf.next_sample(xm)
npt.assert_almost_equal(x_test, x_saved[::50, :])
def test_radar_ukf_z(self):
z_test = np.array([1010.09542821, 1072.81344351, 1273.91121954, 1542.62942731,
1889.95833525, 2188.63286461, 2583.27572973, 2961.54061968,
3376.5367081 , 3761.43930347])
dt = 0.05
n_samples = 500
t = np.arange(n_samples) * dt
n_samples = len(t)
x_saved = np.zeros((n_samples, 3))
z_saved = np.zeros(n_samples)
gr = sim.GetRadar()
r_ukf = kf.RadarUKF(dt, initial_state=[0, 90, 1100])
for k in range(n_samples):
xm = gr.measurement()
x_saved[k, :] = r_ukf.next_sample(xm)
z_saved[k] = norm(x_saved[k])
npt.assert_almost_equal(z_test, z_saved[::50])
def test_radar_ukf_k(self):
k_test = np.array([[3.72767389e-03, 1.85941828e-04, 9.09049215e-01],
[3.61211252e-01, 7.79001494e-02, 1.78803480e-02],
[8.96156438e-02, 1.19831691e-02, 1.10461769e-02],
[4.37106654e-02, 4.44888833e-03, 8.91105184e-03],
[2.83271141e-02, 2.62417475e-03, 7.81276553e-03],
[2.25960281e-02, 2.85581248e-03, 6.27331967e-03],
[2.09285314e-02, 2.47122914e-03, 4.92643172e-03],
[1.99822813e-02, 2.31160049e-03, 4.09337298e-03],
[1.94611058e-02, 2.22562871e-03, 3.51757877e-03],
[1.91697107e-02, 2.17315151e-03, 3.11253739e-03]])
dt = 0.05
n_samples = 500
t = np.arange(n_samples) * dt
n_samples = len(t)
k_saved = np.zeros((n_samples, 3))
gr = sim.GetRadar()
r_ukf = kf.RadarUKF(dt, initial_state=[0, 90, 1100])
for k in range(n_samples):
xm = gr.measurement()
r_ukf.next_sample(xm)
k_saved[k, :] = r_ukf.K.T
npt.assert_almost_equal(k_test, k_saved[::50, :])
|
[
"chiranthsiddappa@gmail.com"
] |
chiranthsiddappa@gmail.com
|
5eae11d7adebce62256c9c269420987377a3cc26
|
98b2fa349ec2d78bd5826f29dbee0fb95fd10c75
|
/uploads/urls.py
|
3f940b18b16bea99e92aec70a2a8d646f5da9658
|
[] |
no_license
|
aymony80/myproject
|
7df5634ea90752ef1d5a1a75d88c181baa056ab9
|
8b58cabdde6b0d02671da45d14daecbace39f19a
|
refs/heads/master
| 2020-04-23T21:08:41.555707
| 2019-02-19T11:19:21
| 2019-02-19T11:19:21
| 171,108,487
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
from django.conf import settings
from django.conf.urls.static import static
from uploads import views
from django.urls import path
urlpatterns = [
path('', views.model_form_upload, name='model_form_upload'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"aymony80@yahoo.com"
] |
aymony80@yahoo.com
|
02d7db5785df04c2f897243258873da5b4b0ab58
|
012efe58d791dbc906a4485aad7d097ea336a69b
|
/pixels.py
|
c2431788142f24bbc5809f83b2e0ee5ccad10f43
|
[] |
no_license
|
jorjuato/pythonic
|
f3cb7c75e530170b75eee1903b633855debafa98
|
d5ccc603c0748026da18a89a3c3ade218a139cf6
|
refs/heads/master
| 2021-01-15T14:29:01.823584
| 2012-02-27T13:10:16
| 2012-02-27T13:10:16
| 3,560,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,259
|
py
|
#!/usr/bin/env python
import VisionEgg
VisionEgg.start_default_logging(); VisionEgg.watch_exceptions()
from VisionEgg.Core import *
import pygame
from pygame.locals import *
screen = get_default_screen()
screen.set( bgcolor = (0.0,0.0,0.0) ) # black (RGB)
white_data = (Numeric.ones((100,200,3))*255).astype(Numeric.UnsignedInt8)
red_data = white_data.copy()
red_data[:,:,1:] = 0 # zero non-red channels
blue_data = white_data.copy()
blue_data[:,:,:-1] = 0 # zero non-blue channels
frame_timer = FrameTimer() # start frame counter/timer
count = 0
quit_now = 0
# This style of main loop is an alternative to using the
# VisionEgg.FlowControl module.
while not quit_now:
for event in pygame.event.get():
if event.type in (QUIT,KEYDOWN,MOUSEBUTTONDOWN):
quit_now = 1
screen.clear()
count = (count+1) % 3
if count == 0:
pixels = white_data
elif count == 1:
pixels = red_data
elif count == 2:
pixels = blue_data
screen.put_pixels(pixels=pixels,
position=(screen.size[0]/2.0,screen.size[1]/2.0),
anchor="center")
swap_buffers() # display what we've drawn
frame_timer.tick() # register frame draw with timer
frame_timer.log_histogram()
|
[
"yosoymicerinos@yahoo.com"
] |
yosoymicerinos@yahoo.com
|
b1938ef4ef5cf10644ae8f49ecca8a80c622c2e5
|
a1dcff1dc2789c271941f08b8069bfa7936a16a5
|
/prime number between 1-100.py
|
de59701db381a6239a5eeb572c6f16fe42ee618a
|
[] |
no_license
|
Gulnaaznasrin21/Loop
|
004e2de2aeb2a09725e0f28752b5d44318ada678
|
3510276fc391b3ee1e21208c058cef474ef7dc7e
|
refs/heads/main
| 2023-07-22T18:31:36.146417
| 2021-09-05T09:18:12
| 2021-09-05T09:18:12
| 403,263,351
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
i=1
while i<=100:
factor=0
x=1
while x<=i:
if i%x==0:
factor+=1
x+=1
if factor==2:
print(i,"is a prime number")
else:
print(i,"is not a prime number")
i+=1
|
[
"noreply@github.com"
] |
noreply@github.com
|
b20eb31e621b6af9cf0b1d9291f57832e0b170b2
|
e00186e71a1f52b394315a0cbc27162254cfffb9
|
/durga/without_rest_models/testapp/models.py
|
6cac7e2e06e3e0b26b958a2b5e56c8110c3d2e6b
|
[] |
no_license
|
anilkumar0470/git_practice
|
cf132eb7970c40d0d032520d43e6d4a1aca90742
|
588e7f654f158e974f9893e5018d3367a0d88eeb
|
refs/heads/master
| 2023-04-27T04:50:14.688534
| 2023-04-22T05:54:21
| 2023-04-22T05:54:21
| 100,364,712
| 0
| 1
| null | 2021-12-08T19:44:58
| 2017-08-15T10:02:33
|
Python
|
UTF-8
|
Python
| false
| false
| 240
|
py
|
from django.db import models
# Create your models here
class Employee(models.Model):
eno = models.IntegerField()
ename = models.CharField(max_length=64)
esal = models.FloatField()
eadd = models.CharField(max_length=64)
|
[
"anilkumar.0466@gmail.com"
] |
anilkumar.0466@gmail.com
|
6701a5d0ba5e5b012fd5e73566122aec45fe9bd2
|
b47ae1d46a3abfbf9dd0192e1f09b15a3c0be570
|
/getting_started.py
|
8dbd82b18adafc5f5b9e99d14b86f4e66d67716f
|
[] |
no_license
|
tanishka1411/YoutubeAPI-Analysis
|
71aaf217a97ed2d0fc8bb4dea3b098e5066a1793
|
b7f984a2af925c6451b6252e81e8b1056adfa02e
|
refs/heads/main
| 2023-06-09T04:08:41.802770
| 2021-06-29T06:49:57
| 2021-06-29T06:49:57
| 381,096,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 309
|
py
|
from googleapiclient.discovery import build
api_key = '#someAPI key'
youtube = build('youtube', 'v3', developerKey=api_key)
# servuce name and version
request = youtube.channels().list(
part='statistics',
forUsername='schafer5'
)
response = request.execute()
print(response)
|
[
"noreply@github.com"
] |
noreply@github.com
|
22455a3f150ef0f3385bd909b0f90762cfa6b38e
|
a76401f82ed1c9ac47ddaff27681b90f37627426
|
/.history/student_olx/registration/views_20210920162648.py
|
fb7e3c1e36051e6495947c6a53f7162f739cf2d1
|
[] |
no_license
|
RiteshK555/itw-project
|
e90e1dd13517ee8b07d72cc3bd5a42af367ab587
|
a2e4c8682c2030ff77da9ade5ae4677bd475f87a
|
refs/heads/master
| 2023-08-30T03:48:58.904979
| 2021-11-10T09:50:59
| 2021-11-10T09:50:59
| 410,032,076
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 817
|
py
|
from django.shortcuts import render
from django.contrib.auth import models
from django.contrib.auth.forms import UserCreationForm
from django.shortcuts import render,redirect
from django.contrib.auth.models import User
from .models import money
# Create your views here.
def register(request,response):
if response.method=="POST":
form=UserCreationForm(response.POST)
if form.is_valid():
form.save()
return redirect("/")
else:
form=UserCreationForm()
m=money(holder=request.user,amount=0)
m.save()
else:
form=UserCreationForm()
return render(response,"registration/register.html",{"form":form})
def login(response):
# if money.holder==User:
# print("hi")
return render(response,"registration/login.html")
|
[
""
] | |
76e0da4b707763b772483b78ae0ee2af7b3bf9e8
|
f4d38a475a03d26d0a8cd7994ab9c6669a0d36f5
|
/targetDirectory/lib/python3.6/ntpath.py
|
af2cf083df9f1a2cfb1d64f0bbfb9d404ada284c
|
[] |
no_license
|
karsheng/language-translation
|
353a7b10d9d1fae9c4464f3fae301a409b947c2b
|
6eb5c162d029b7c3959b0d5fcabe8e19c1323d47
|
refs/heads/master
| 2021-01-19T22:10:15.704917
| 2017-04-19T16:04:05
| 2017-04-19T16:04:05
| 88,764,381
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 52
|
py
|
/Users/karshenglee/anaconda3/lib/python3.6/ntpath.py
|
[
"karshenglee@Kars-MacBook-Air.local"
] |
karshenglee@Kars-MacBook-Air.local
|
403ab9bd83a5ce8b42777539060c2a980fd1402c
|
320e25d46855acbf3a84107c03addca0595a8676
|
/preshoneypot.py
|
6e229a9b06dbac5089c42f6aeb4c40f7337f2a39
|
[] |
no_license
|
itd/cuddle-fuzz
|
f212914fb4e597834761d14874d67761897c1c23
|
c33f3f179b88cc2cb67851d78d5666fa460faac4
|
refs/heads/master
| 2021-01-10T19:25:03.225598
| 2012-09-13T05:36:20
| 2012-09-13T05:36:20
| 35,767,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,404
|
py
|
#!/usr/bin/python
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# ..__...__.........._____.
# _/ |_|__|._______/ ___\
# \ __\ |/ ___/\ __\.
# .| |.| |\___ \..| |...
# .|__|.|__/____ >.|__|...
# ..............\/.........
#
# NetCat Presistent Honey Pot
#
# This file will create a honeypot and establish it every
# time it goes down. It will save log file for the
# connection to capture.txt and will add
# banner from 'welcome.txt'
#
# Build by Yuval (tisf) Nativ from See-Security Group
# http://www.see-security.com
# https://avtacha.wordpress.com
#
# .__.................__......................_._.........
# / _\.___..___....../ _\.___..___._..._._.__(_) |_._..._.
# \ \./ _ \/ _ \_____\ \./ _ \/ __| |.| | '__| | __| |.| |
# _\ \ __/ __/_____|\ \ __/ (__| |_| | |..| | |_| |_| |
# \__/\___|\___|.....\__/\___|\___|\__,_|_|..|_|\__|\__, |
# ..................................................|___/.
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import sys
import getopt
import os
def printhelp():
print ''
print ' ..__...__.........._____.'
print ' _/ |_|__|._______/ ___\ '
print ' \ __\ |/ ___/\ __\.'
print ' .| |.| |\___ \..| |...'
print ' .|__|.|__/____ >.|__|...'
print ' ..............\/.........'
print ''
print ' Created By Yuval Nativ (tisf) of See-Security'
print ' http://www.see-security.org'
print ' https://avtacha.wordpress.com'
print ''
print 'Syntax not used properly.'
print 'Use the -p or --port for the target port.'
print 'If you use port which is larger than 1024 run as root.'
print 'The banner for the honey pot is at welcome.txt .'
print 'The log will be saved to capture.txt .'
print ''
print ' ex.: ./preshoneypot.py -p 44254'
print ''
def main(argv):
portAsked = ''
rootBar = 1024
try:
opts, args = getopt.getopt(argv,"hp:h",["port=","help="])
except getopt.GetoptError:
printhelp()
sys.exit(2)
for opt, arg in opts:
if opt == ('-h', '--help'):
printhelp()
sys.exit()
elif opt in ('-p', '--port'):
portAsked = arg
if portAsked=='':
printhelp()
sys.exit()
if int(portAsked) > int(65535):
sys.exit('Port cannot be bigger then 65535...')
if int(portAsked) < int(rootBar):
if not os.geteuid() == 0:
sys.exit('For using this script under port 1024 please run as root.')
i=0
i=str("while [ 1 ]; do echo 'Got a connection'; nc -lvvv "+portAsked+" < honeywelcome.txt >> capture.txt; done")
os.system(i)
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"YuvalN210@gmail.com@8cf99bae-c7dc-ee7b-a7b4-1828fa5c92ce"
] |
YuvalN210@gmail.com@8cf99bae-c7dc-ee7b-a7b4-1828fa5c92ce
|
a7314831bd03dc41915aa1ae0b07c843c21756a3
|
f79673770ab37c86804cd46b83b82b72a99e1ec8
|
/python-project/test_isPalindrome.py
|
69ea43b98990852cf7df3aad25796506ac516d3f
|
[] |
no_license
|
madooei/playground-h2n7ay2w
|
ddfde03d8cf6260efdc9f61874b95363ec496e87
|
a86461b4640ae1f1e2c662d715b2f7062f5c68e3
|
refs/heads/master
| 2022-08-11T11:28:19.720013
| 2022-07-21T14:21:24
| 2022-07-21T14:21:24
| 174,290,656
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,998
|
py
|
from isPalindrome import isPalindrome
def send_msg(channel, msg):
print("TECHIO> message --channel \"{}\" \"{}\"".format(channel, msg))
def success():
print("TECHIO> success true")
send_msg("My personal Yoda, you are. 🙏", "* ● ¸ . ¸. :° ☾ ° ¸. ● ¸ . ¸. :. • ")
send_msg("My personal Yoda, you are. 🙏", " ★ ° ☆ ¸. ¸ ★ :. . ")
send_msg("My personal Yoda, you are. 🙏", "__.-._ ° . . . ☾ ° . * ¸ .")
send_msg("My personal Yoda, you are. 🙏", "'-._\\7' . ° ☾ ° ¸.☆ ● . ")
send_msg("My personal Yoda, you are. 🙏", " /'.-c * ● ¸. ° ° ¸. ")
send_msg("My personal Yoda, you are. 🙏", " | /T ° ° ¸. ¸ . ")
send_msg("My personal Yoda, you are. 🙏", "_)_/LI")
def fail():
print("TECHIO> success false")
def tests():
try:
assert isPalindrome("")
assert isPalindrome("a")
except AssertionError as e:
fail()
send_msg("Oops! 🐞", e)
send_msg("Hint 💡", "What is the smallest palindrome? 🤔")
return;
try:
assert isPalindrome("ali")==False, "ali is not palindrome"
assert isPalindrome("bob"), "bob is palindrom"
assert isPalindrome("hannah"), "hannah is palindrome"
assert isPalindrome("ada"), "ada is plaindrome"
assert isPalindrome("anna"), "anna is palindrome"
assert isPalindrome("nitin"), "nitin is palindrome"
assert isPalindrome("otto"), "otto is palindrome"
assert isPalindrome("madam"), "madam is palindrome"
assert isPalindrome("racecar"), "racecar is palindrome"
assert isPalindrome("xerox")==False, "xerox is not palindrome"
success()
except AssertionError as e:
fail()
send_msg("Oops! 🐞", e)
send_msg("Hint 💡", "If the first and the last character of the input are not the same then ...? 🤔")
if __name__ == "__main__":
tests()
|
[
"noreply@github.com"
] |
noreply@github.com
|
dc47da1229fb1fe7b1bab77e741c3d578297a343
|
d205793571b39fe254f513f4437f37c47f4db011
|
/src_py/ztf_data.py
|
8c01c96019f18722082a48d7ffa032ab7d4514e1
|
[] |
no_license
|
memanuel/kepler-sieve
|
ba996c4008edec3458ac0afbfa3bee95367467cd
|
9f11f377d82cb941d77159fd7b97ae2300b6ca6a
|
refs/heads/main
| 2021-11-22T19:07:13.725263
| 2021-08-08T20:44:47
| 2021-08-08T20:44:47
| 236,581,914
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,965
|
py
|
"""
ZTF Data
Calculations to enrich ZTF data after it is loaded.
Michael S. Emanuel
24-Mar-2021
"""
# Standard libraries
import numpy as np
import pandas as pd
# Astronomy related
from astropy.units import deg
# UI
from tqdm.auto import tqdm as tqdm_auto
# MSE imports
from db_utils import df2db, sp2df, sp_run
from astro_utils import mjd_to_datetime
from ra_dec import radec2dir, calc_topos
from rebound_sim import make_sim_planets
from rebound_integrate import integrate_mjds
# *********************************************************************************************************************
def calc_ztf_detection_times():
"""Update entries in table KS.DetectionTime associated with ZTF observations"""
# Distinct ZTF detection times
df = sp2df('ZTF.GetDetectionTimes')
# Array of observations times as MJDs
mjds = df['mjd'].values
# Populate the CalendarDateTime field
df['CalendarDateTime'] = np.array([mjd_to_datetime(mjd) for mjd in mjds])
# All available data sources as a DataFrame
ds = sp2df('KS.GetDataSources')
# Populate DataSourceID and ObservatoryID fields
df['DataSourceID'] = ds.DataSourceID[ds.DataSourceCD=='ZTF'].values[0]
df['ObservatoryID'] = ds.ObservatoryID[ds.DataSourceCD=='ZTF'].values[0]
# Integrate the planets saving outputs at these observation times
print(f'Integrating planets on {df.shape[0]} distinct observation times...')
sim_epoch = make_sim_planets(epoch=mjds[0])
body_ids, body_names, q, v, elts = integrate_mjds(sim_epoch=sim_epoch, mjds=mjds, save_elements=False, progbar=True)
# Earth position at these observation times
earth_idx = np.argmax(body_names=='Earth')
q_earth = q[:,earth_idx,:]
v_earth = v[:,earth_idx,:]
# Calculate topos adjustment
dq_topos, dv_topos = calc_topos(obstime_mjd=mjds, site_name='Palomar')
# The position and velocity of the observatory
q_obs = q_earth + dq_topos.value # in AU
v_obs = v_earth + dv_topos.value # in AU / day
# Position of the Sun
sun_idx = np.argmax(body_names=='Sun')
q_sun = q[:,sun_idx,:]
# Save positions of observatory and sun to DataFrame
df[['qObs_x', 'qObs_y', 'qObs_z']] = q_obs
df[['vObs_x', 'vObs_y', 'vObs_z']] = v_obs
df[['qSun_x', 'qSun_y', 'qSun_z']] = q_sun
# Save these observation times and positions to DB
df2db(df=df, schema='KS', table='DetectionTime', verbose=False, progbar=False)
# *********************************************************************************************************************
def ztf_raw_detections(sz: int = 1000000):
"""Populate missing rows of table KS.RawDetections from ZTF.Detections"""
# Get the number of missing rows
rMax = sp2df('KS.MakeTable_RawDetection_ZTF_RowCount').RowCount[0]
# Set up a tqdm index counter to process the rows in chunks of sz at a time
iMax: int = rMax // sz + 1
idx = tqdm_auto(np.arange(iMax))
# Process the rows in chunks of sz
params={'sz':sz}
print(f'KS.RawDetections missing {rMax} rows from ZTF.Detections. Processing now...')
for i in idx:
sp_run('KS.MakeTable_RawDetection_ZTF', params=params)
# *********************************************************************************************************************
def main():
"""
Main routine for console program
"""
# Call SQL procedure to add new rows to ZTF.DetectionTime from ZTF.Detection
sp_run('ZTF.MakeTable_DetectionTime')
# Call SQL procedure to insert new records to DetectionTimeSlice
sp_run('KS.MakeTable_DetectionTimeSlice', {'sz': 30});
# Rebuild the KS.DetectionTime entries coming from ZTF
calc_ztf_detection_times()
# Update KS.DetectionTimePair
sp_run('KS.MakeTable_DetectionTimePair', {'sz':60})
# *********************************************************************************************************************
if __name__ == '__main__':
main()
|
[
"michael.s.emanuel@gmail.com"
] |
michael.s.emanuel@gmail.com
|
2dd0f2b4ecbe9a9e488c7bbbb3b682fe8d7cc773
|
108f35831d5f81dd1e8eaace0b05e7202d2258dd
|
/searchr_app/file_analyzer/FileAnalyzer.py
|
2a8d62151de3763affead234dd4f587e91b2b260
|
[] |
no_license
|
cebul55/Searchr_Project
|
d54c997bb5cc5eda8a066d74adf8bb06dead9ebe
|
f629439912cdd4e822f549c48740611cab2e9d6f
|
refs/heads/master
| 2020-08-05T20:15:02.560090
| 2020-01-26T23:28:22
| 2020-01-26T23:28:22
| 212,691,605
| 0
| 0
| null | 2020-01-26T18:55:51
| 2019-10-03T22:21:09
|
Python
|
UTF-8
|
Python
| false
| false
| 3,940
|
py
|
import itertools
import json
from bs4 import BeautifulSoup
from searchr_app.file_analyzer.HTMLFileAnalyzer import HTMLFileAnalyzer
from searchr_app.file_analyzer.TextFileAnalyzer import TextFileAnalyzer
class FileAnalyzer(object):
search_result = None
search_phrases_combination = None
# phrases defined by user
search_phrases = None
# query defined by user
search_query = None
html_doc = None
text_doc = None
accuracy = 0.0
def __init__(self, search_result):
self.search_result = search_result
phrases_list = str(search_result.search.phrases_list)
self.search_phrases_combination = self.generate_phrase_combinations_as_text(phrases_list)
self.search_query = self.get_search_query(search_result.search)
if 'pdf' in search_result.content_type or 'word' in search_result.content_type:
self.text_doc = search_result.html_file
elif search_result.html_file is not None:
self.html_doc = search_result.html_file
self.start_analyzing()
# def __init__(self, search_result, phrases_combitation=None, html_doc=None, text_doc=None):
# if phrases_combitation is None:
# self.search_result = search_result
# phrases_list = str(search_result.search.phrases_list)
# self.search_phrases_combination = self.generate_phrase_combinations_as_text(phrases_list)
# if 'pdf' in search_result.content_type or 'word' in search_result.content_type:
# self.text_doc = search_result.html_file
# elif search_result.html_file is not None:
# self.html_doc = search_result.html_file
# self.start_analyzing()
# else:
# self.search_result = search_result
# self.search_phrases_combination = phrases_combitation
# self.html_doc = html_doc
# self.text_doc = text_doc
def start_analyzing(self):
if self.text_doc is not None:
print('startin doc analisys')
self.analyze_text()
elif self.html_doc is not None:
self.analyze_html()
# set status to 'analyzed' after end of analisys
# self.search_result.set_status_to_analyzed()
def generate_phrase_combinations_as_text(self, phrases_list):
phrases_list = self.convert_literal_list_to_list(phrases_list)
self.search_phrases = phrases_list
combinations = []
for L in range(1, len(phrases_list) + 1):
for subset in itertools.combinations(phrases_list, L):
combinations.append(subset)
return combinations
@staticmethod
def convert_literal_list_to_list(literal_list):
import ast
x = u'' + literal_list
x = ast.literal_eval(x)
return_list = x
return_list = [n.strip() for n in return_list]
return return_list
def analyze_text(self):
print('startin doc analisys')
text_analyzer = TextFileAnalyzer(self.search_result, self.search_phrases_combination, self.search_phrases, self.search_query, self.text_doc)
text_analyzer.analyze_text_file()
self.accuracy = text_analyzer.count_result_accuracy()
pass
def analyze_html(self):
html_analyzer = HTMLFileAnalyzer(self.search_result, self.search_phrases_combination, self.search_phrases, self.search_query, self.html_doc)
html_analyzer.analyze_html_file()
self.accuracy = html_analyzer.count_result_accuracy()
def get_accuracy(self):
return self.accuracy
def get_search_query(self, search):
saved_attribs = search.attributes.replace('\"', '#DOUBLEQUOTE#')
saved_attribs = saved_attribs.replace('\'', '\"')
attr_dict = json.loads(saved_attribs)
search_query = attr_dict['query']
search_query = search_query.replace('#DOUBLEQUOTE#', '\"')
return search_query
|
[
"B.Cybulski@stud.elka.pw.edu.pl"
] |
B.Cybulski@stud.elka.pw.edu.pl
|
3a32e88f924763cebc773b157f5ade3bbf566316
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/9BJzrtpdMP8JFQg74_5.py
|
54241e25ddb6e70ccec2ec15821dcacf5ef26e29
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 652
|
py
|
"""
Create a function that given a list, it returns the index where if split in
two-subarrays (last element of the first array has index of (foundIndex-1)),
the sum of them are equal.
### Examples
twins([10, 20, 30, 5, 40, 50, 40, 15]) ➞ 5
# foundIndex 5 : [10+20+30+5+40]=[50+40+15]
twins([1, 2, 3, 4, 5, 5]) ➞ 4
# [1, 2, 3, 4] [5, 5]
twins([3, 3]) ➞ 1
### Notes
Return only the foundIndex, not the divided list.
"""
def twins(lst):
for i in range(1, len(lst)):
temp = []
temp.append(lst[:i])
temp.append(lst[i:])
if sum(temp[0]) == sum(temp[1]):
return i
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
beb48d37a60a78689bd1746ead88875852e1c2e5
|
74fa733260f66c7dd1e9b7ddf2eb3a9bdd624aff
|
/train_evaluate_sesemi_tinyimages.py
|
a64641cca8aaa94cb4e60df467a84dfacec289bd
|
[
"MIT"
] |
permissive
|
ml-lab/sesemi
|
29d6bb3fec0920ed1d6d6f95ea40679c18573c59
|
b325936fb5ae425b5ee298ee7011129051cfc69e
|
refs/heads/master
| 2020-06-11T14:51:37.654989
| 2019-06-26T23:42:09
| 2019-06-26T23:42:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,376
|
py
|
"""
Train and evaluate SESEMI architecture for semi-supevised learning
with self-supervised task of recognizing geometric transformations
defined as 90-degree rotations with horizontal and vertical flips.
"""
# Python package imports
import os
import argparse
import numpy as np
import pickle
# Keras package imports
from keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
# SESEMI package imports
from utils import geometric_transform, global_contrast_normalize
from utils import zca_whitener, gaussian_noise
from utils import LRScheduler, DenseEvaluator, open_sesemi
from utils import load_tinyimages, datagen_tinyimages
from datasets import cifar100
from models import convnet, wrn
def parse_args():
"""Parse command line input arguments."""
parser = argparse.ArgumentParser(description='Train and evaluate SESEMI.')
parser.add_argument('--model', dest='model', type=str, required=True)
parser.add_argument('--extra', dest='nb_extra', type=int, required=True)
parser.add_argument('--gpu', dest='gpu_id', type=int, default=0)
args = parser.parse_args()
return args
def main():
args = parse_args()
model = args.model
nb_extra = args.nb_extra
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
arg2var = {'convnet': convnet, 'wrn': wrn,}
# Dataset-specific parameters
hflip = True
zca = True
epochs = 50
nb_classes = 100
# Load Tiny Images
with open('./datasets/tiny-images/tiny_index.pkl', 'rb') as f:
tinyimg_index = pickle.load(f, encoding='latin1')
if nb_extra == 237203:
print("Using all classes common with CIFAR-100.")
with open('./datasets/cifar-100/meta', 'rb') as f:
cifar_labels = pickle.load(f, encoding='latin1')['fine_label_names']
cifar_to_tinyimg = {'maple_tree': 'maple', 'aquarium_fish': 'fish'}
cifar_labels = [l if l not in cifar_to_tinyimg else cifar_to_tinyimg[l]
for l in cifar_labels]
load_indices = sum([list(range(*tinyimg_index[label]))
for label in cifar_labels], [])
elif nb_extra == 500000:
print("Using %d random images." % nb_extra)
nb_tinyimages = max(e for s, e in tinyimg_index.values())
load_indices = np.arange(nb_tinyimages)
np.random.shuffle(load_indices)
load_indices = load_indices[:nb_extra]
load_indices.sort() # sorted for faster seeks.
else:
raise ValueError('`--extra` must be integer 237203 or 500000.')
nb_aux_images = len(load_indices)
print("Loading %d auxiliary unlabeled tiny images." % nb_aux_images)
z_train = load_tinyimages(load_indices)
z_train = global_contrast_normalize(z_train)
# Load CIFAR-100
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
x_train = global_contrast_normalize(x_train)
x_test = global_contrast_normalize(x_test)
if zca:
zca_whiten = zca_whitener(np.concatenate([x_train, z_train], axis=0))
x_train = zca_whiten(x_train)
z_train = zca_whiten(z_train)
x_test = zca_whiten(x_test)
x_train = x_train.reshape((len(x_train), 32, 32, 3))
z_train = z_train.reshape((len(z_train), 32, 32, 3))
x_test = x_test.reshape((len(x_test), 32, 32, 3))
y_train = to_categorical(y_train)
# Training parameters
input_shape = (32, 32, 3)
batch_size = 12
base_lr = 0.05
lr_decay_power = 0.5
dropout_rate = 0.2
max_iter = (len(x_train) // batch_size) * epochs
sesemi_model, inference_model = open_sesemi(
arg2var[model], input_shape, nb_classes, base_lr, dropout_rate)
print(sesemi_model.summary())
super_datagen = ImageDataGenerator(
width_shift_range=3,
height_shift_range=3,
horizontal_flip=hflip,
preprocessing_function=gaussian_noise,
fill_mode='reflect',
)
self_datagen = ImageDataGenerator(
width_shift_range=3,
height_shift_range=3,
horizontal_flip=False,
preprocessing_function=gaussian_noise,
fill_mode='reflect',
)
super_data = super_datagen.flow(
x_train, y_train, shuffle=True, batch_size=1, seed=None)
self_data = self_datagen.flow(
x_train, shuffle=True, batch_size=1, seed=None)
extra_data = self_datagen.flow(
z_train, shuffle=True, batch_size=1, seed=None)
train_data_loader = datagen_tinyimages(
super_data, self_data, extra_data, batch_size)
lr_poly_decay = LRScheduler(base_lr, max_iter, lr_decay_power)
evaluate = DenseEvaluator(inference_model, (x_test, y_test), hflip)
# Fit the SESEMI model on mini-batches with data augmentation
print('Run configuration:')
print('model=%s,' % model, 'ZCA=%s,' % zca, 'nb_epochs=%d,' % epochs, \
'horizontal_flip=%s,' % hflip, 'nb_extra=%d,' % len(z_train), \
'batch_size=%d,' % batch_size, 'gpu_id=%d' % args.gpu_id)
sesemi_model.fit_generator(train_data_loader,
epochs=epochs, verbose=1,
steps_per_epoch=len(x_train) // batch_size,
callbacks=[lr_poly_decay, evaluate],)
return
if __name__ == '__main__':
main()
|
[
"vuptran@gmail.com"
] |
vuptran@gmail.com
|
b9c2ab2a145c713904bc1750e4837b1d3b4cc7d7
|
bbfa3b7ee2008617d33a7c5c7770d22e1aa8836b
|
/Neural_Network/_base.py
|
4297021e909f92cc59ba0f6ba4d9070986e15fba
|
[
"MIT"
] |
permissive
|
luoshao23/ML_algorithm
|
1a0046ce9c3abed029cceffa35defe57fffa82b2
|
6e94fdd0718cd892118fd036c7c5851cf3e6d796
|
refs/heads/master
| 2021-08-07T08:38:16.102455
| 2020-03-18T06:49:43
| 2020-03-18T06:49:43
| 92,467,636
| 4
| 1
|
MIT
| 2018-01-16T05:01:29
| 2017-05-26T03:20:08
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,611
|
py
|
from scipy.special import expit as logistic_sigmoid
import numpy as np
def identity(X):
return X
def logistic(X):
return logistic_sigmoid(X, out=X)
def tanh(X):
return np.tanh(X, out=X)
def relu(X):
return np.clip(X, 0, np.finfo(X.dtype).max, out=X)
def softmax(X):
tmp = X - X.max(axis=1)[:, np.newaxis]
np.exp(tmp, out=X)
X /= X.sum(axis=1)[:, np.newaxis]
return X
def deriv_identity(a, delta):
"""nothing"""
def deriv_logistic(a, delta):
delta *= a
delta *= (1.0 - a)
def deriv_tanh(a, delta):
delta *= (1.0 - a**2)
def deriv_relu(a, delta):
delta[a <= 0] = 0
def squared_loss(y_true, y_pred):
return ((y_true - y_pred) ** 2).mean() / 2
def log_loss(y_true, y_prob):
y_prob = np.clip(y_prob, 1e-10, 1 - 1e-10)
if y_prob.shape[1] == 1:
y_prob = np.append(1 - y_prob, y_prob, axis=1)
if y_true.shape[1] == 1:
y_true = np.append(1 - y_true, y_true, axis=1)
return -np.sum(y_true * np.log(y_prob)) / y_prob.shape[0]
def binary_log_loss(y_true, y_prob):
y_prob = np.clip(y_prob, 1e-10, 1 - 1e-10)
return -np.sum(y_true * np.log(y_prob) +
(1 - y_true) * np.log(1 - y_prob)) / y_prob.shape[0]
ACTIVATIONS = {'identity': identity, 'logistic': logistic,
'tanh': tanh, 'relu': relu, 'softmax': softmax}
DERIVATIVES = {'identity': deriv_identity, 'logistic': deriv_logistic,
'tanh': deriv_tanh, 'relu': deriv_relu}
LOSS_FUNCTIONS = {'squared_loss': squared_loss, 'log_loss': log_loss,
'binary_log_loss': binary_log_loss}
|
[
"luoshao23@gmail.com"
] |
luoshao23@gmail.com
|
bededd6fe791e5c9a093dd367ca0f71eb18b58a2
|
10be352a4d80672c108fc66e86afec977ec830de
|
/cs1_final/DissemblerSolver.py
|
4f3206e839c973618fadcc7e275b56dfa5e9afda
|
[] |
no_license
|
MeganTj/CS1-Python
|
cef5a7cd812f294812beff58c69514d52e2c1541
|
09b4a7cfb9bfb6bc64b631e6fd8c35ff35a5ca56
|
refs/heads/master
| 2022-02-16T17:49:34.218252
| 2022-01-27T06:22:29
| 2022-01-27T06:22:29
| 178,276,099
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,984
|
py
|
# Name: Megan Tjandrasuwita
# CMS cluster login name: mtjandra
'''
Solves a Dissembler puzzle given a puzzle in the form of a one-line string.
'''
import string
from utils import *
# ----------------------------------------------------------------------
# Global data.
# ----------------------------------------------------------------------
# A list of dissembler puzzles.
# Each puzzle is represented as a single string.
# Blank squares are indicated by '.' and colored squares
# are indicated by a single lowercase letter.
# The letters have no meaning (they aren't short for a specific color).
# The blanks in the string are used to separate different rows of the
# puzzle.
puzzles = [
'aababb',
'aa. b.. abb',
'a.. aba bab ..b',
'abba a..b b..a abba',
'.aa. ..b. baab .b.. .aa.',
'a...a babab a...a',
'....a ababa b.... a.aba b.a.b a...a babab',
'aabb .ba. .ac. .cd. ddcd',
'ababc d...b ad.ec .f.c. fd.eg f...e hhghg',
'aabaa bbcbb ccdcc ddedd eeaee',
'.aabb. .c..c. ca..bc d....d cdccdc .cddc.',
'..aab .ccda .b.cb db.db da.d. cbaa. dcc..',
'abbcbc adaddc dccbcb dadaab',
'ababb b.b.a a.a.a a.b.b bbaba',
'.ab. .ba. .ab. abab a..b',
# Harder puzzles:
'...a... ..bac.. .bdbad. cca.dee .afbeb. ..afb.. ...f...',
'aaaaab cbdcdb cbeadb cabfeb cafefb cddddd',
'abcdded adb.ecd abcccad afggged agf.bad afbbgad',
'abcacc daedfe dbgfef ccbhhi gjcijh gfjffi',
'aabcbcb c.a.d.b cbcdcaa d.a.a.b adcabda d.b.d.b acadcdd'
]
# ----------------------------------------------------------------------
# Helper functions.
# ----------------------------------------------------------------------
def is_adjacent(loc1, loc2):
'''
Arguments:
loc1, loc2 -- (row, column) locations
Return value:
True if two locations are orthogonally adjacent, otherwise False.
'''
assert is_loc(loc1)
assert is_loc(loc2)
x_adj = abs(loc1[0] - loc2[0]) == 1
y_adj = abs(loc1[1] - loc2[1]) == 1
if (abs(loc1[0] - loc2[0]) == 1 and loc1[1] == loc2[1]) or \
(abs(loc1[1] - loc2[1]) == 1 and loc1[0] == loc2[0]):
return True
return False
def adjacent_to_any(loc, locset):
'''
Arguments:
loc -- a (row, column) location
locset -- a set of locations
Return value:
True if `loc` is not in `locset` and at least one location
in `locset` is adjacent to `loc`, otherwise False.
The set `locset` is not altered.
'''
assert is_loc(loc)
assert is_locset(locset)
if loc not in locset:
for loc2 in locset:
if is_adjacent(loc, loc2):
return True
return False
def collect_adjacent(locset, target_set):
'''
Arguments:
locset -- a set of (row, column) locations
target_set -- another set of (row, column) locations
Return value:
A set of all the locations in `locset` that are adjacent
to any location in `target_set`.
The sets `locset` and `target_set` are not altered.
'''
assert is_locset(locset)
assert is_locset(target_set)
adj = set()
for loc in locset:
if adjacent_to_any(loc, target_set):
adj.add(loc)
return adj
def collect_connected(loc, locset):
'''
Arguments:
loc -- a (row, column) location
locset -- a set of locations
Return value:
A set of all the locations in `locset` which are connected to `loc`
via a chain of adjacent locations. Include `loc` in the resulting set.
The set `locset` is not altered.
'''
assert is_loc(loc)
assert is_locset(locset)
connected = set([loc])
orig = locset.copy()
adj = collect_adjacent(orig, connected)
adj.add(loc)
while len(adj.difference(connected)) > 0:
connected = connected.union(adj)
for i in adj:
if i in orig:
orig.remove(i)
adj = adj.union(collect_adjacent(orig, connected))
return connected
def partition_connected(locset):
'''
Partition a set of locations based on being connected via a chain of
adjacent locations. The original locset is not altered.
Return a list of subsets. The subsets must all be disjoint i.e.
the intersection of any two subsets must be the empty set.
Arguments:
locset -- a set of (row, column) locations
Return value:
The list of partitioned subsets.
The set `locset` is not altered.
'''
assert is_locset(locset)
orig = locset.copy()
lst = []
lst2 = []
while len(orig) > 0:
lst.append(orig.pop())
while len(lst) > 0:
adj = collect_connected(lst[0], locset)
lst2.append(adj)
for x in adj:
lst.remove(x)
return lst2
def filter_locset(locset):
'''
Given a locset, partition it into subsets which are connected via a
chain of adjacent locations. Compute two sets:
-- the union of all partitions whose length is < 3
-- the union of all partitions whose length is >= 3
and return them as a tuple of two sets (in that order).
Arguments:
locset -- a set of (row, column) locations
Return value:
The two sets as described above.
The set `locset` is not altered.
'''
assert is_locset(locset)
lst = partition_connected(locset)
all_less = set()
all_three = set()
for i in lst:
if len(i) >= 3:
all_three.update(i)
else:
all_less.update(i)
return (all_less, all_three)
def invert_rep(rep):
'''
Invert the board representation which maps locations to colors.
The inverted representation will map colors to sets of locations.
Arguments:
rep -- a dictionary mapping locations to one-character strings
representing colors
Return value:
a dictionary mapping one-character strings (representing colors)
to sets of locations
The input dictionary 'rep' is not altered.
'''
assert is_rep(rep)
colors = list(rep.values())
locations = list(rep.keys())
loc = []
unique = []
for i in range(len(colors)):
if colors[i] in unique:
index = unique.index(colors[i])
loc[index].add(locations[i])
else:
unique.append(colors[i])
loc.append({locations[i]})
inv = {}
for i in range(len(unique)):
inv[unique[i]] = loc[i]
return inv
def revert_rep(inverted):
'''
Invert the board representation which maps colors to sets of
locations. The new representation will map locations to colors.
Arguments:
inverted -- a dictionary mapping one-character strings
(representing colors) to sets of locations
Return value:
a dictionary mapping locations to one-character strings
representing colors
The input dictionary 'inverted' is not altered.
'''
assert is_inverted_rep(inverted)
locations = list(inverted.values())
colors = list(inverted.keys())
rev = {}
for i in range(len(locations)):
loc_set = locations[i].copy()
while len(loc_set) > 0:
rev[loc_set.pop()] = colors[i]
return rev
def swap_locations(rep, loc1, loc2):
'''
Exchange the contents of two locations.
Arguments:
rep -- a dictionary mapping locations to one-character strings
representing colors
loc1, loc2 -- adjacent locations which are in the board rep
Return value:
a new dictionary with the same structure of 'rep' with the
specified locations having each others' contents
The input dictionary 'rep' is not altered.
'''
assert is_rep(rep)
assert is_loc(loc1)
assert is_loc(loc2)
#assert ls.is_adjacent(loc1, loc2)
assert loc1 in rep
assert loc2 in rep
new_rep = rep.copy()
color1 = new_rep[loc1]
new_rep[loc1] = new_rep[loc2]
new_rep[loc2] = color1
return new_rep
def remove_connected_groups(rep):
'''
Remove all connected color groups covering at least three squares
from a board representation.
Arguments:
rep -- a dictionary mapping locations to one-character strings
representing colors
Return value:
a tuple of two dictionaries of the same kind as the input
(i.e. a mapping between locations and color strings);
the first contains the remaining locations only,
and the second contains the removed locations only
The input dictionary 'rep' is not altered.
'''
assert is_rep(rep)
inv = invert_rep(rep)
colors = list(inv.keys())
keep = {}
discard = {}
for i in colors:
locset = inv[i]
short, connected = filter_locset(locset)
discard[i] = connected
keep[i] = short
return (revert_rep(keep), revert_rep(discard))
class DissemblerSolver:
def __init__(self, puzzle):
self.nrows = 0
self.ncols = 0
self.puzzle = puzzle
self.rep = {}
self.load(self.puzzle)
self.possible = self.possible_moves()
self.visited = [[]]
self.moves = []
self.history = []
self.level = 0
#self.visited_level = 0
self.count = []
def solve_puzzle(self, prev):
if len(self.rep) == 0:
return True
if len(self.possible) == 0:
self.undo()
return
lst = []
for move in self.possible:
lst.append(move)
self.solve_puzzle(lst[0])
self.solve_puzzle(lst[1])
def solve_puzzle2(self):
'''Depth-first traversal. Check subtrees from left to right '''
#for i in self.possible:
#print(i, end = ' ')
#print()
while self.level >= 0:
#for i in self.possible:
#print(i, end = ' ')
#print()
if len(self.rep) == 0:
return True
if len(self.possible) == 0:
#print('UNDO')
self.visited.pop()
self.undo()
else:
#if self.level == 1 and len(self.visited) > 1:
#self.visited = [self.visited[0]]
made = False
for move in self.possible:
if move not in self.visited[self.level]:
#print(move)
#self.prev = self.possible
self.make_move(move)
#self.visited.add(move)
made = True
break
if made == False:
#print('UNDO')
self.visited.pop()
self.undo()
return False
#if len(self.rep) == 0:
#return True
#if len(self.possible) == 0:
#if self.undo():
#self.solve_puzzle()
#return False
#for move in self.possible:
##print('({0}, {1})'.format(move[0], move[1]), end = ' ')
#if move not in self.visited:
##print(move)
#self.make_move(move)
#self.visited.add(move)
#self.solve_puzzle()
#return True
def give_solution(self):
if self.solve_puzzle2():
for i in self.moves:
print('({0}, {1})'.format(i[0], i[1]), end = ' ')
else:
print('No solution :(')
def load(self, puzzle):
'''
Load a puzzle from a string representation of the puzzle.
Convert the string representation into a dictionary representation
mapping (row, column) coordinates to colors.
Arguments:
puzzle -- a string representing the puzzle
Return value: none
'''
rep = {}
lines = puzzle.split()
self.nrows = len(lines)
self.ncols = len(lines[0])
for row in lines:
assert len(row) == self.ncols
for row in range(self.nrows):
for col in range(self.ncols):
color = lines[row][col]
if color == '.':
continue
rep[(row, col)] = color
self.rep = rep
def possible_moves(self):
'''
Compute and return a set of all the possible moves. A "possible move"
is a move where:
-- both locations of the move are adjacent
-- both locations on the board rep are occupied by colors
-- making the move will cause some locations to be vacated
Arguments:
rep -- a dictionary mapping locations to one-character strings
representing colors
nrows -- the number of rows on the board
ncols -- the number of columns on the board
Return value:
the set of possible moves
The input dictionary 'rep' is not altered.
'''
rep = self.rep
nrows = self.nrows
ncols = self.ncols
assert type(nrows) is int and type(ncols) is int
assert nrows > 0 and ncols > 0
adj = self.adjacent_moves(nrows, ncols)
possible = set()
for i in adj:
if i[0] in rep and i[1] in rep:
new_rep = swap_locations(rep, i[0], i[1])
kept, discarded = remove_connected_groups(new_rep)
if len(discarded) >= 3:
possible.add(i)
return possible
def adjacent_moves(self, nrows, ncols):
'''
Create and return a set of all moves on a board with 'nrows' rows and
'ncols' columns. The moves consist of two adjacent (row, column)
locations.
Arguments:
nrows -- the number of rows on the board
ncols -- the number of columns on the board
Return value:
the set of moves, where each move is a pair of adjacent locations
and each location is a (row, column) pair; also the two locations
are ordered in the tuple (the "smallest" comes first)
Note that the moves are independent of the contents of any board
representation; we aren't considering whether the moves would actually
change anything on a board or whether the locations of each move are
occupied by color squares.
'''
assert type(nrows) is int and type(ncols) is int
assert nrows > 0 and ncols > 0
moves = set()
for i in range(nrows):
for j in range(ncols):
current = (i, j)
top = (i - 1, j)
bottom = (i + 1, j)
left = (i, j - 1)
right = (i, j + 1)
if i - 1 >= 0:
if (top, current) not in moves:
moves.add((current, top))
if i + 1 < nrows:
if (bottom, current) not in moves:
moves.add((current, bottom))
if j - 1 >= 0:
if (left, current) not in moves:
moves.add((current, left))
if j + 1 < ncols:
if (right, current) not in moves:
moves.add((current, right))
return moves
def make_move(self, move):
self.history.append(self.rep.copy())
self.rep = swap_locations(self.rep, move[0], move[1])
(self.rep, removed) = remove_connected_groups(self.rep)
#self.history.append(self.rep.copy())
self.moves.append(move)
self.possible = self.possible_moves()
self.level += 1
self.visited.append([])
def undo(self):
if self.history != []:
self.rep = self.history.pop()
move = self.moves.pop()
#if self.visited_level == 0:
#self.visited_level = self.level + 1
#elif (self.visited_level - self.level > 1):
#self.visited_level -= 1
#self.visited = []
#else:
#self.visited_level = self.level + 1
self.visited[self.level - 1].append(move)
self.possible = self.possible_moves()
#all_in = True
#for i in self.possible:
#if i not in self.visited[self.level]:
#all_in = False
#break
#if all_in == True:
#self.visited.pop()
#self.undo()
#self.visited[self.level - 1].append(moves[len(moves) - 1])
#self.possible = set()
self.level -= 1
if __name__ == '__main__':
puzzle = input("Enter a Dissembler puzzle to be solved: ")
solver = DissemblerSolver(puzzle)
solver.give_solution()
|
[
"26992324+MeganTj@users.noreply.github.com"
] |
26992324+MeganTj@users.noreply.github.com
|
a9de852a3c55159c7403edb62378d53119e84ca0
|
dee8056064f6150d15f3180d3b3466834e291bc6
|
/plotLDOSAndDelta.py
|
518f098f80cecc155d434f346e9647e90e901109
|
[] |
no_license
|
glaurung24/SingleMagImp
|
338dde7eaa3d266ba6add3c465087a2d5c718780
|
3baa6c39eb6d5e3d63ee5761ced48b1df765d622
|
refs/heads/master
| 2023-02-13T21:45:06.456833
| 2023-01-27T15:37:22
| 2023-01-27T15:37:22
| 248,021,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,500
|
py
|
# -*- encoding: utf-8 -*-
## @package TBTKview
# @file plotLDOS.py
# @brief Plot local density of states
#
# @author Kristofer Björnson
import h5py
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.axes
import matplotlib.cm
import scipy.ndimage.filters
import mpl_toolkits.mplot3d
import sys
from scipy.signal import find_peaks
if(len(sys.argv) != 2):
print( "Error, the following parameters are needed: .hdf5-file")
exit(1)
filename = sys.argv[1]
sigma = 0.002
peak_height = 7.5
file = h5py.File(filename, 'r');
dataset = file['LDOS']
data_dimensions = dataset.shape
y_plane = int(np.floor(data_dimensions[1]/2))
physical_dimensions = len(data_dimensions) - 1 #Last dimensions are for energy.
energy_resolution = data_dimensions[physical_dimensions];
limits = dataset.attrs['UpLowLimits']
datasetDeltaReal = file['deltaReal0']
datasetDeltaImag = file['deltaImag0']
delta = abs(np.array(datasetDeltaReal) + 1j*np.array(datasetDeltaImag))
size_x = data_dimensions[0]
size_y = data_dimensions[1]
x = np.arange(0, data_dimensions[0], 1)
y = np.arange(limits[1], limits[0], (limits[0] - limits[1])/energy_resolution)
X, Y = np.meshgrid(x, y)
fig = matplotlib.pyplot.figure()
Z = dataset[:,y_plane,:]
sigma_discrete_units = sigma*energy_resolution/(limits[0] - limits[1])
for xp in range(0, size_x):
Z[xp,:] = scipy.ndimage.filters.gaussian_filter1d(Z[xp,:], sigma_discrete_units)
#Color map figure
ax = fig.gca()
im = ax.pcolormesh(X.transpose(), Y.transpose(), Z, cmap=matplotlib.cm.coolwarm)
plt.ylim([-1, 1])
fig.colorbar(im)
fig.savefig('figures/LDOS.png')
sigma = 0.001
sigma_discrete_units = sigma*energy_resolution/(limits[0] - limits[1])
Z1 = dataset[y_plane, y_plane, :]
signal = Z1[: int(data_dimensions[2]/2)]
Z2 = dataset[0, 0, :]
plt.figure()
Z1 = scipy.ndimage.filters.gaussian_filter1d(Z1, sigma_discrete_units)
peaks, _ = find_peaks(signal, height=peak_height)
Z2 = scipy.ndimage.filters.gaussian_filter1d(Z2, sigma_discrete_units)
plt.plot(y, Z1)
plt.plot(y[peaks[-1]], Z1[peaks[-1]], 'x')
plt.plot(y, Z2, '--')
plt.xlim([-1, 1])
plt.savefig('figures/LDOS_middle.png')
plt.close()
plt.figure()
x = np.arange(0, data_dimensions[0], 1)
y = np.arange(0, data_dimensions[1], 1)
X, Y = np.meshgrid(x, y)
Z = delta
plt.pcolormesh(X.transpose(), Y.transpose(), Z, cmap=matplotlib.cm.coolwarm)
plt.colorbar()
plt.savefig("figures/delta.png")
plt.close()
plt.figure()
plt.plot(x, delta[:,y_plane])
plt.savefig("figures/delta_profile.png")
plt.close()
# Find peaks in the LDOS (Eg):
Eg = np.zeros_like(delta)
ratio = np.zeros_like(delta)
for i in range(data_dimensions[0]):
for j in range(data_dimensions[1]):
signal = dataset[i, j, : int(np.floor(data_dimensions[2]/2))]
signal = scipy.ndimage.filters.gaussian_filter1d(signal, sigma_discrete_units)
peaks, _ = find_peaks(signal, height=peak_height)
Eg[i,j] = signal[peaks[-1]]
ratio[i,j] = Eg[i,j]/delta[i,j]
plt.figure()
x = np.arange(0, data_dimensions[0], 1)
y = np.arange(0, data_dimensions[1], 1)
X, Y = np.meshgrid(x, y)
Z = Eg
plt.pcolormesh(X.transpose(), Y.transpose(), Z, cmap=matplotlib.cm.coolwarm)
plt.colorbar()
plt.savefig("figures/Eg.png")
plt.close()
plt.figure()
x = np.arange(0, data_dimensions[0], 1)
y = np.arange(0, data_dimensions[1], 1)
X, Y = np.meshgrid(x, y)
Z = ratio
plt.pcolormesh(X.transpose(), Y.transpose(), Z, cmap=matplotlib.cm.coolwarm)
plt.colorbar()
plt.savefig("figures/ration.png")
plt.close()
|
[
"andreas.theiler@physics.uu.se"
] |
andreas.theiler@physics.uu.se
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.