blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79f647d28b8a1806e461603d2ce0c7ed4feb685c | 6b1b506139088aa30de9fd65cff9e3b6a3a36874 | /sofia_redux/instruments/forcast/tests/test_merge_correlation.py | bf607a0bd2a309efc56e0610b7ce6f90da3bbc00 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | SOFIA-USRA/sofia_redux | df2e6ad402b50eb014b574ea561734334d70f84d | 493700340cd34d5f319af6f3a562a82135bb30dd | refs/heads/main | 2023-08-17T11:11:50.559987 | 2023-08-13T19:52:37 | 2023-08-13T19:52:37 | 311,773,000 | 12 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,672 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.io import fits
import numpy as np
import sofia_redux.instruments.forcast.configuration as dripconfig
from sofia_redux.instruments.forcast.merge_correlation \
import addhist, merge_correlation
from sofia_redux.instruments.forcast.tests.resources \
import nmc_testdata, npc_testdata
class TestMergeCorrelation(object):
def test_addhist(self):
header = fits.header.Header()
addhist(header, 'test history message')
assert 'HISTORY' in header
assert header['HISTORY'] == 'Merge: test history message'
def test_nmc_merge_correlation(self):
test = nmc_testdata()
data = test['data'].copy()
header = test['header'].copy()
varval = 2.0
variance = np.full_like(data, varval)
normmap = np.full_like(data, np.nan)
dripconfig.load()
dripconfig.configuration['border'] = 0
merged, var = merge_correlation(
data, header, variance=variance, normmap=normmap)
dripconfig.load()
nm = np.nanmax(normmap)
assert nm == 4
assert np.nanmin(var) == varval * (nm - 1) / (nm ** 2)
assert np.allclose(np.nanmax(data),
np.nanmax(merged * nm / (nm - 1)),
atol=0.1)
for key in ['MRGDX', 'MRGDY']:
for i in range(2):
assert '%s%i' % (key, i) in header
assert 'MRGX2' not in header
def test_npc_merge_correlation(self):
test = npc_testdata()
data = test['data'].copy()
header = test['header'].copy()
varval = 2.0
variance = np.full_like(data, varval)
normmap = np.full_like(data, np.nan)
dripconfig.load()
dripconfig.configuration['border'] = 0
merged, var = merge_correlation(
data, header, variance=variance, normmap=normmap)
dripconfig.load()
nm = np.nanmax(normmap)
assert nm == 4
assert np.nanmin(var) == varval * nm / (nm ** 2)
assert np.allclose(np.nanmax(data), np.nanmax(merged), atol=0.1)
for key in ['MRGDX', 'MRGDY']:
for i in range(2):
assert '%s%i' % (key, i) in header
def test_errors(self):
test = npc_testdata()
data = test['data'].copy()
header = test['header'].copy()
assert merge_correlation(data, 'a') is None
assert merge_correlation(np.array(10), header) is None
dripconfig.load()
dripconfig.configuration['border'] = data.shape[0]
merged = merge_correlation(data, header)
assert merged is None
dripconfig.load()
# check bad variance
merged = merge_correlation(data, header, variance=10)
assert merged[0] is not None
assert merged[1] is None
def test_upsample(self):
test = npc_testdata()
data = test['data'].copy()
header = test['header'].copy()
dripconfig.load()
dripconfig.configuration['border'] = 0
merge_correlation(data, header, upsample=100)
dx = header['MRGDX0']
assert not np.allclose(dx, int(dx), atol=0.01)
merge_correlation(data, header, upsample=1)
dx = header['MRGDX0']
assert dx == int(dx)
dripconfig.load()
def test_maxregister(self):
test = npc_testdata()
data = test['data'].copy()
dmax = np.nanmax(data)
header = test['header'].copy()
# These settings should result in 0 chop nod so shift
# algorithm will esentially subtract out all source
header['CHPAMP1'] = 0
header['NODAMP'] = 0
dripconfig.load()
dripconfig.configuration['border'] = 0
merged, _ = merge_correlation(data, header, maxshift=0)
mmax = np.nanmax(merged)
# maximum should be close to zero
assert np.allclose(mmax, 0, atol=0.01)
# Now allow a search over the whole image
# Note that this solution may be incorrect as we cannot
# guarantee which negative source correlates with which
# positive source... That's why we need the shift from
# the header as an initial guess.
merged, _ = merge_correlation(data, header, maxregister=None)
dripconfig.load()
# should be closer to data than 0
mmax = np.nanmax(merged)
assert dmax - mmax < mmax
def test_resize(self, capsys):
test = npc_testdata()
data = test['data'].copy()
header = test['header'].copy()
varval = 2.0
variance = np.full_like(data, varval)
normmap = np.full_like(data, np.nan)
dripconfig.load()
dripconfig.configuration['border'] = 0
msmall, vsmall = merge_correlation(
data, header, variance=variance, normmap=normmap,
resize=False)
mlarge, vlarge = merge_correlation(
data, header, variance=variance, normmap=normmap,
resize=True)
for s, l in zip(msmall.shape, mlarge.shape):
assert s < l
for s, l in zip(vsmall.shape, vlarge.shape):
assert s < l
# test border
dripconfig.configuration['border'] = 10
mborder, vborder = merge_correlation(
data, header, variance=variance, normmap=normmap,
resize=False)
assert mborder.shape[0] == msmall.shape[0]
assert mborder.shape[1] == msmall.shape[1]
capt = capsys.readouterr()
assert 'Removing 10 pixel border from consideration' in capt.out
assert np.allclose(msmall, mborder, equal_nan=True)
| [
"melanie.j.clarke@nasa.gov"
] | melanie.j.clarke@nasa.gov |
4bc041a12e26bb826ba1c8c93b32b7804937f6ae | e22e03d9761f5c6d581b5af2e77343e8ee4b201d | /edk2/BaseTools/Tests/TestRegularExpression.py | 8b09841d5078520cc2351b9f24739b392a38c852 | [
"OpenSSL",
"BSD-2-Clause"
] | permissive | SamuelTulach/SecureFakePkg | 759975fcc84d62f05ac577da48353752e5334878 | f34080a6c0efb6ca3dd755365778d0bcdca6b991 | refs/heads/main | 2023-08-17T07:51:22.175924 | 2021-10-01T10:46:14 | 2021-10-01T10:46:14 | 410,938,306 | 94 | 14 | null | null | null | null | UTF-8 | Python | false | false | 1,908 | py | ## @file
# Routines for generating Pcd Database
#
# Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
import unittest
from Common.Misc import RemoveCComments
from Workspace.BuildClassObject import ArrayIndex
class TestRe(unittest.TestCase):
def test_ccomments(self):
TestStr1 = """ {0x01,0x02} """
self.assertEquals(TestStr1, RemoveCComments(TestStr1))
TestStr2 = """ L'TestString' """
self.assertEquals(TestStr2, RemoveCComments(TestStr2))
TestStr3 = """ 'TestString' """
self.assertEquals(TestStr3, RemoveCComments(TestStr3))
TestStr4 = """
{CODE({
{0x01, {0x02, 0x03, 0x04 }},// Data comment
{0x01, {0x02, 0x03, 0x04 }},// Data comment
})
} /*
This is multiple line comments
The seconde line comment
*/
// This is a comment
"""
Expect_TestStr4 = """{CODE({
{0x01, {0x02, 0x03, 0x04 }},
{0x01, {0x02, 0x03, 0x04 }},
})
}"""
self.assertEquals(Expect_TestStr4, RemoveCComments(TestStr4).strip())
def Test_ArrayIndex(self):
TestStr1 = """[1]"""
self.assertEquals(['[1]'], ArrayIndex.findall(TestStr1))
TestStr2 = """[1][2][0x1][0x01][]"""
self.assertEquals(['[1]','[2]','[0x1]','[0x01]','[]'], ArrayIndex.findall(TestStr2))
if __name__ == '__main__':
unittest.main()
| [
"samtulach@gmail.com"
] | samtulach@gmail.com |
f3c98c3f6c5281bef2b32f3be2008492f48a8504 | cdd693705f336a225ef82e1b20f32a2192f43b93 | /check_model_layers.py | ef67e10a05efbc10e81b2b76209dbfd7252cfe52 | [] | no_license | gombru/iMaterialistFashion | 5a2d6628729d0e2e8d9eed1c146b09e9b0c284bc | 27a278ef54856abd3415bb3eb4a66cc59e9c1d08 | refs/heads/master | 2020-03-11T09:17:48.069509 | 2018-12-14T09:36:50 | 2018-12-14T09:36:50 | 129,906,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | import caffe
# Run in GPU
caffe.set_device(0)
caffe.set_mode_gpu()
# load net
net = caffe.Net('evaluation/deploy.prototxt', '../../datasets/iMaterialistFashion/iMaterialistFashion_Inception_iter_95000.caffemodel', caffe.TEST)
print net.blobs
| [
"raulgombru@gmail.com"
] | raulgombru@gmail.com |
507c972bd2d7d0d30352b3d624b7ae44473e0264 | bf287afda71c4360f40ecd78e74049510965db82 | /rampwf/score_types/brier_score.py | 491df244acdd5639c0e22d9e85b7a94e53aac1cf | [
"BSD-3-Clause"
] | permissive | djgagne/ramp-workflow | 26b1bb7188c734c7a46c1c8298504ab522fafda6 | cf1f53e5ef5a2b7d5ca27a21ca30098a17e9fcd7 | refs/heads/master | 2021-01-23T21:48:30.554531 | 2017-09-15T14:42:45 | 2017-09-15T14:42:45 | 102,907,436 | 1 | 0 | null | 2017-09-08T21:52:03 | 2017-09-08T21:52:03 | null | UTF-8 | Python | false | false | 6,207 | py | from .base import BaseScoreType
import numpy as np
from sklearn.metrics import brier_score_loss
class BrierScore(BaseScoreType):
is_lower_the_better = True
minimum = 0.0
maximum = 1.0
def __init__(self, name='brier_score', precision=3):
self.name = name
self.precision = precision
def score_function(self, ground_truths, predictions, valid_indexes=None):
"""A hybrid score.
It tests the the predicted _probability_ of the second class
against the true _label index_ (which is 0 if the first label is the
ground truth, and 1 if it is not, in other words, it is the
tru probabilty of the second class). Thus we have to override the
`Base` function here
"""
if valid_indexes is None:
valid_indexes = slice(None, None, None)
y_proba = predictions.y_pred[valid_indexes][:, 1]
y_true_proba = ground_truths.y_pred_label_index[valid_indexes]
self.check_y_pred_dimensions(y_true_proba, y_proba)
return self.__call__(y_true_proba, y_proba)
def __call__(self, y_true_proba, y_proba):
return brier_score_loss(y_true_proba, y_proba)
class BrierSkillScore(BaseScoreType):
is_lower_the_better = False
minimum = -1.0
maximum = 1.0
def __init__(self, name='brier_score', precision=3):
self.name = name
self.precision = precision
def score_function(self, ground_truths, predictions, valid_indexes=None):
"""A hybrid score.
It tests the the predicted _probability_ of the second class
against the true _label index_ (which is 0 if the first label is the
ground truth, and 1 if it is not, in other words, it is the
tru probabilty of the second class). Thus we have to override the
`Base` function here
"""
if valid_indexes is None:
valid_indexes = slice(None, None, None)
y_proba = predictions.y_pred[valid_indexes][:, 1]
y_true_proba = ground_truths.y_pred_label_index[valid_indexes]
self.check_y_pred_dimensions(y_true_proba, y_proba)
return self.__call__(y_true_proba, y_proba)
def __call__(self, y_true_proba, y_proba):
climo = np.ones(y_true_proba.size) * y_true_proba.mean()
bs = brier_score_loss(y_true_proba, y_proba)
bs_c = brier_score_loss(y_true_proba, climo)
return 1 - bs / bs_c
class BrierScoreReliability(BaseScoreType):
is_lower_the_better = True
minimum = 0.0
maximum = 1.0
def __init__(self, name='brier_score', precision=3, bins=np.arange(0, 1.2, 0.1)):
self.name = name
self.precision = precision
self.bins = bins
self.bin_centers = (bins[1:] - bins[:-1]) * 0.05
self.bin_centers[self.bin_centers > 1] = 1
self.bin_centers[self.bin_centers < 0] = 0
def score_function(self, ground_truths, predictions, valid_indexes=None):
"""A hybrid score.
It tests the the predicted _probability_ of the second class
against the true _label index_ (which is 0 if the first label is the
ground truth, and 1 if it is not, in other words, it is the
tru probabilty of the second class). Thus we have to override the
`Base` function here
"""
if valid_indexes is None:
valid_indexes = slice(None, None, None)
y_proba = predictions.y_pred[valid_indexes][:, 1]
y_true_proba = ground_truths.y_pred_label_index[valid_indexes]
self.check_y_pred_dimensions(y_true_proba, y_proba)
return self.__call__(y_true_proba, y_proba)
def __call__(self, y_true_proba, y_proba):
pos_obs_freq = np.histogram(y_proba[y_true_proba == 1], bins=self.bins)[0]
fore_freq = np.histogram(y_proba, bins=self.bins)[0]
pos_obs_rel_freq = np.zeros(pos_obs_freq.size)
for p in range(pos_obs_rel_freq.size):
if fore_freq[p] > 0:
pos_obs_rel_freq[p] = pos_obs_freq[p] / fore_freq[p]
else:
pos_obs_rel_freq[p] = np.nan
score = 1 / float(y_proba.size) * np.nansum(fore_freq * (self.bin_centers - pos_obs_rel_freq) ** 2)
return score
class BrierScoreResolution(BaseScoreType):
is_lower_the_better = False
minimum = 0.0
maximum = 1.0
def __init__(self, name='brier_score', precision=3, bins=np.arange(0, 1.2, 0.1)):
self.name = name
self.precision = precision
self.bins = bins
self.bin_centers = (bins[1:] - bins[:-1]) * 0.05
self.bin_centers[self.bin_centers > 1] = 1
self.bin_centers[self.bin_centers < 0] = 0
def score_function(self, ground_truths, predictions, valid_indexes=None):
"""A hybrid score.
It tests the the predicted _probability_ of the second class
against the true _label index_ (which is 0 if the first label is the
ground truth, and 1 if it is not, in other words, it is the
tru probabilty of the second class). Thus we have to override the
`Base` function here
"""
if valid_indexes is None:
valid_indexes = slice(None, None, None)
y_proba = predictions.y_pred[valid_indexes][:, 1]
y_true_proba = ground_truths.y_pred_label_index[valid_indexes]
self.check_y_pred_dimensions(y_true_proba, y_proba)
return self.__call__(y_true_proba, y_proba)
def __call__(self, y_true_proba, y_proba):
"""
See Murphy (1973) A vector partition of the probability score
"""
np.seterr(divide="ignore")
pos_obs_freq = np.histogram(y_proba[y_true_proba == 1], bins=self.bins)[0]
fore_freq = np.histogram(y_proba, bins=self.bins)[0]
climo = y_true_proba.mean()
unc = climo * (1 - climo)
pos_obs_rel_freq = np.zeros(pos_obs_freq.size)
for p in range(pos_obs_rel_freq.size):
if fore_freq[p] > 0:
pos_obs_rel_freq[p] = pos_obs_freq[p] / fore_freq[p]
else:
pos_obs_rel_freq[p] = np.nan
score = 1 / float(y_proba.size) * np.nansum(fore_freq * (pos_obs_rel_freq - climo) ** 2)
return score / unc
| [
"djgagne@ou.edu"
] | djgagne@ou.edu |
e70211a6287250bb4f1c24e75fbc7cbe02920446 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03610/s002378580.py | 10133c2cc30824488c99f4f6846df646f0f70405 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | s = input()
s_list = list(s)
l = len(s_list)
k = []
for i in range(0,l,2):
k.append(s_list[i])
s_joined = ''.join(k)
print("{}".format(s_joined)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e225c392fed27a2f54d0b2c84bee22f46b5f8f73 | 493a36f1f8606c7ddce8fc7fe49ce4409faf80be | /.history/B073040023/client_20210614202922.py | 5127563384ff453b6516c3cabfe2a42f5794beb1 | [] | no_license | ZhangRRz/computer_network | f7c3b82e62920bc0881dff923895da8ae60fa653 | 077848a2191fdfe2516798829644c32eaeded11e | refs/heads/main | 2023-05-28T02:18:09.902165 | 2021-06-15T06:28:59 | 2021-06-15T06:28:59 | 376,568,344 | 0 | 0 | null | 2021-06-13T14:48:36 | 2021-06-13T14:48:36 | null | UTF-8 | Python | false | false | 4,670 | py | import socket
import threading
import tcppacket
import struct
from time import sleep
# socket.socket() will create a TCP socket (default)
# socket.socket(socket.AF_INET, socket.SOCK_STREAM) to explicitly define a TCP socket
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) # explicitly define a UDP socket
udp_host = '127.0.0.1' # Host IP
udp_port = 12345 # specified port to connect
def init_new_calc_req(msg):
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
oldmsg = msg
msg = msg.encode('utf-8')
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port))
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port) # Sending message to UDP server
while True:
data, address = sock.recvfrom(512*1024)
sock.connect(address)
s = struct.calcsize('!HHLLBBH')
unpackdata = struct.unpack('!HHLLBBH', data[:s])
msg = data[s:].decode('utf-8')
print(oldmsg,"is", msg)
if(unpackdata[5] % 2):
# fin_falg
fin_falg = 1
else:
fin_falg = 0
tcp = tcppacket.TCPPacket(
data="ACK".encode('utf-8'),
flags_ack=1,
flags_fin=fin_falg)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address)
sock.sendto(tcp.raw, address)
if(fin_falg):
break
def init_new_videoreq_req(i):
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
msg = str("video "+str(i+1)).encode('utf-8')
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port)
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port)) # Sending message to UDP server
recvdata = b''
ack_seq = 0
seq = 0
counter = 0
while True:
data, address = sock.recvfrom(512*1024)
s = struct.calcsize('!HHLLBBHHH')
raw = struct.unpack('!HHLLBBHHH', data[:s])
print("receive packet from ", address)
fin_flag = raw[5] % 2
recvdata += data[s:]
if(raw[2] == ack_seq and raw[7] == 0):
if(fin_flag):
break
elif(raw[2] == ack_seq):
print("Receive ERROR packet from ", address)
ack_seq += 1
counter += 1
# --------------------------------------------
# send ACK
if(counter == 3 or fin_flag):
tcp = tcppacket.TCPPacket(
data=str("ACK").encode('utf-8'),
seq=seq, ack_seq=ack_seq,
flags_ack=1,
flags_fin=fin_flag)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address,"with ack seq:", ack_seq)
sock.sendto(tcp.raw, address)
if(not fin_flag):
counter = 0
seq += 1
# --------------------------------------------
if(fin_flag):
break
savename = str(i+1)+"received.mp4"
f = open(savename, "wb")
f.write(recvdata)
f.close()
def init_new_dns_req(i):
# ---------------------
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
oldmsg = msg = "dns google.com"
msg = msg.encode('utf-8')
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port))
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port)
while True:
data, address = sock.recvfrom(512*1024)
sock.connect(address)
s = struct.calcsize('!HHLLBBH')
unpackdata = struct.unpack('!HHLLBBH', data[:s])
msg = data[s:].decode('utf-8')
print(oldmsg,"is", msg)
if(unpackdata[5] % 2):
# fin_falg
fin_falg = 1
else:
fin_falg = 0
tcp = tcppacket.TCPPacket(
data="ACK".encode('utf-8'),
flags_ack=1,
flags_fin=fin_falg)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address)
sock.sendto(tcp.raw, address)
if(fin_falg):
break
# ----------------------
# def init_new
threads = []
# Calculation--------------------------------------
print("Demo calculation function")
init_new_calc_req("calc (5+5)-(10*10)+(30/6)+")
print("-"*60)
print("Demo DNS request function")
# for i in range(3):
# threads.append(threading.Thread(target = init_new_dns_req, args = (i,)))
# threads[-1].start()
# for i in range(2):
# threads.append(threading.Thread(target = init_new_videoreq_req, args = (i,)))
# threads[-1].start() | [
"tom95011@gmail.com"
] | tom95011@gmail.com |
5600afb59e1efe3b3404e4b6d43937db72335b2f | 40438f8fb232f8f5a213be6d9fd634fcad75f17f | /1_decisionTreeClassifier.py | 5d76aa69af68713d1e947f992f00fad0fc0c0528 | [] | no_license | LintangWisesa/ML_Sklearn_DecisionTree | 6ceaec0ef0c6191353751581d682ac5658eed1ee | b58779d08d4a39090554bdb725fd668e5f748b38 | refs/heads/master | 2020-04-28T14:46:36.064558 | 2019-07-05T02:27:05 | 2019-07-05T02:27:05 | 175,348,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,225 | py | import pandas as pd
import numpy as np
# =================================
# load csv & create dataframe
df = pd.read_csv('0_data.csv')
# print(df)
# =================================
# convert nominal data => ordinal data
from sklearn.preprocessing import LabelEncoder
labelKantor = LabelEncoder()
df['kantorLE'] = labelKantor.fit_transform(df['kantor'])
labelJabatan = LabelEncoder()
df['jabatanLE'] = labelJabatan.fit_transform(df['jabatan'])
labelTitel = LabelEncoder()
df['titelLE'] = labelTitel.fit_transform(df['titel'])
df = df.drop(
['kantor', 'jabatan', 'titel'],
axis = 'columns'
)
# print(df)
# ===============================
# kantor : 0 Facebook, 1 Google, 2 Tesla
# jabatan : 0 GM, 1 Manager, 2 Staf
# titel : 0 S1, 1 S2
# ===============================
# decision tree algo
from sklearn import tree
model = tree.DecisionTreeClassifier()
# train
model.fit(
df[['kantorLE', 'jabatanLE', 'titelLE']],
df['gaji>50']
)
# accuracy
acc = model.score(
df[['kantorLE', 'jabatanLE', 'titelLE']],
df['gaji>50']
)
print(acc * 100, '%')
# predict kantor, jabatan, titel
print(model.predict([[0, 0, 0]]))
print(model.predict([[2, 0, 0]]))
print(model.predict([[1, 3, 0]]))
| [
"lintangwisesa@ymail.com"
] | lintangwisesa@ymail.com |
c1c88a42c6e2b5f2661230d25e9771b94beb8910 | 5524e844f2cbfeebc769397c5a489183db886612 | /3_chapter/links.py | 2f10679f79c029f4c14b210d4d4aebdc376f88b6 | [] | no_license | sirajmuneer123/anand_python_problems | 7c674fbf5097f82c1bd866704bd95866fe0286db | 3c0cb960533da6362e53612cbaf19130a2d69d30 | refs/heads/master | 2021-01-04T02:36:46.064891 | 2015-09-08T05:32:46 | 2015-09-08T05:32:46 | 40,727,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | '''
Problem 8: Write a program links.py that takes URL of a webpage as argument and prints all the URLs linked from that webpage.
'''
import urllib
import re
import sys
url=sys.argv[1]
#connect to a URL
website = urllib.urlopen(url)
#read html code
html = website.read()
#use re.findall to get all the links
links = re.findall('"((http|ftp)s?://.*?)"', html)
for i in links:
print i,"\n"
| [
"sirajmuneer4@gmail.com"
] | sirajmuneer4@gmail.com |
f03bd4337a181a9592e7aef5ac212d0fed682004 | 067fb9ca4acd73a5ac92212d125607b48a3e603a | /arc/species/converter.py | dd8f43f99653acae25b001a9de8739997c2096b3 | [
"MIT"
] | permissive | dranasinghe/ARC | 730a25f820252c6aa2ea37669f573b1a68751002 | 9908484b52ef1db19bf8b2da1141d7691f3291fc | refs/heads/master | 2020-04-30T23:34:30.673944 | 2019-03-21T23:29:08 | 2019-03-21T23:29:08 | 177,147,388 | 1 | 0 | MIT | 2019-03-22T13:43:18 | 2019-03-22T13:43:18 | null | UTF-8 | Python | false | false | 16,890 | py | #!/usr/bin/env python
# encoding: utf-8
from __future__ import (absolute_import, division, print_function, unicode_literals)
import os
import numpy as np
import logging
from rdkit import Chem
import pybel
from rmgpy.species import Species
from rmgpy.molecule.molecule import Atom, Bond, Molecule
from rmgpy.molecule.element import getElement
from rmgpy.exceptions import AtomTypeError
from arkane.common import symbol_by_number
from arc.arc_exceptions import SpeciesError, SanitizationError
from arc.species.xyz_to_2d import MolGraph
##################################################################
def get_xyz_string(xyz, mol=None, number=None, symbol=None):
"""
Convert list of lists xyz form:
[[0.6616514836, 0.4027481525, -0.4847382281],
[-0.6039793084, 0.6637270105, 0.0671637135],
[-1.4226865648, -0.4973210697, -0.2238712255],
[-0.4993010635, 0.6531020442, 1.0853092315],
[-2.2115796924, -0.4529256762, 0.4144516252],
[-1.8113671395, -0.3268900681, -1.1468957003]]
into a geometry form read by ESS:
C 0.6616514836 0.4027481525 -0.4847382281
N -0.6039793084 0.6637270105 0.0671637135
H -1.4226865648 -0.4973210697 -0.2238712255
H -0.4993010635 0.6531020442 1.0853092315
H -2.2115796924 -0.4529256762 0.4144516252
H -1.8113671395 -0.3268900681 -1.1468957003
The atom symbols are derived from either an RMG Molecule object (`mol`) or atom numbers ('number`)
or explicitly given (`symbol`).
`number` and `symbol` are lists (optional parameters)
`xyz` is an array of arrays, as shown in the example above.
This function isn't defined as a method of ARCSpecies since it is also used when parsing opt geometry in Scheduler
"""
result = ''
if symbol is not None:
elements = symbol
elif number is not None:
elements = []
for num in number:
elements.append(getElement(int(num)).symbol)
elif mol is not None:
elements = []
for atom in mol.atoms:
elements.append(atom.element.symbol)
else:
raise ValueError("Must have either an RMG:Molecule object input as `mol`, or atomic numbers \ symbols.")
for i, coord in enumerate(xyz):
result += elements[i] + ' ' * (4 - len(elements[i]))
for c in coord:
result += '{0:14.8f}'.format(c)
result += '\n'
return result
def get_xyz_matrix(xyz):
"""
Convert a string xyz form:
C 0.6616514836 0.4027481525 -0.4847382281
N -0.6039793084 0.6637270105 0.0671637135
H -1.4226865648 -0.4973210697 -0.2238712255
H -0.4993010635 0.6531020442 1.0853092315
H -2.2115796924 -0.4529256762 0.4144516252
H -1.8113671395 -0.3268900681 -1.1468957003
into a list of lists xyz form:
[[0.6616514836, 0.4027481525, -0.4847382281],
[-0.6039793084, 0.6637270105, 0.0671637135],
[-1.4226865648, -0.4973210697, -0.2238712255],
[-0.4993010635, 0.6531020442, 1.0853092315],
[-2.2115796924, -0.4529256762, 0.4144516252],
[-1.8113671395, -0.3268900681, -1.1468957003]]
Returns xyz as well as atom symbols, x, y, and z seperately
"""
xyz = standardize_xyz_string(xyz)
x, y, z, symbols = [], [], [], []
for line in xyz.split('\n'):
if line:
atom, xx, yy, zz = line.split()
x.append(float(xx))
y.append(float(yy))
z.append(float(zz))
symbols.append(atom)
xyz = []
for i, _ in enumerate(x):
xyz.append([x[i], y[i], z[i]])
return xyz, symbols, x, y, z
def xyz_string_to_xyz_file_format(xyz, comment=''):
"""
Convert the ARC xyz string format into the XYZ file format: https://en.wikipedia.org/wiki/XYZ_file_format
"""
xyz = standardize_xyz_string(xyz)
num = int(len(xyz.split()) / 4)
return str(num) + '\n' + comment + '\n' + xyz + '\n'
def standardize_xyz_string(xyz):
"""
A helper function to correct xyz string format input
Usually empty lines are added by the user either in the beginning or the end, and we'd like to remove them
"""
xyz = os.linesep.join([s for s in xyz.splitlines() if s and any(c != ' ' for c in s)])
lines = xyz.splitlines()
if all([len(line.split()) == 6 for line in lines if len(line)]):
# Convert Gaussian output format, e.g., " 1 8 0 3.132319 0.769111 -0.080869"
new_lines = list()
for line in lines:
if line:
split = line.split()
new_lines.append(' '.join([symbol_by_number[int(split[1])], split[3], split[4], split[5]]))
lines = new_lines
return os.linesep.join(line for line in lines if (line and any([char != ' ' for char in line])))
def xyz_to_pybel_mol(xyz):
"""
Convert xyz in string format into an Open Babel molecule object
"""
if not isinstance(xyz, (str, unicode)):
raise SpeciesError('xyz must be a string format, got: {0}'.format(type(xyz)))
try:
pybel_mol = pybel.readstring('xyz', xyz_string_to_xyz_file_format(xyz))
except IOError:
return None
return pybel_mol
def pybel_to_inchi(pybel_mol):
"""
Convert an Open Babel molecule object to InChI
"""
inchi = pybel_mol.write('inchi', opt={'F': None}).strip() # Add fixed H layer
return inchi
def rmg_mol_from_inchi(inchi):
"""
Generate an RMG Molecule object from InChI
"""
try:
rmg_mol = Molecule().fromInChI(str(inchi))
except AtomTypeError:
return None
return rmg_mol
def elementize(atom):
"""
Convert the atomType of an RMG:Atom object into its general parent element atomType (e.g., `S4d` into `S`)
`atom` is an RMG:Atom object
Written by Matt Johnson
"""
atom_type = atom.atomType
atom_type = [at for at in atom_type.generic if at.label != 'R' and at.label != 'R!H' and not 'Val' in at.label]
if atom_type:
atom.atomType = atom_type[0]
def molecules_from_xyz(xyz, multiplicity=None):
"""
Creating RMG:Molecule objects from xyz with correct atom labeling
`xyz` is in a string format
returns `s_mol` (with only single bonds) and `b_mol` (with best guesses for bond orders)
This function is based on the MolGraph.perceive_smiles method
Returns None for b_mol is unsuccessful to infer bond orders
If `multiplicity` is given, the returned species multiplicity will be set to it.
"""
if xyz is None:
return None, None
if not isinstance(xyz, (str, unicode)):
raise SpeciesError('xyz must be a string format, got: {0}'.format(type(xyz)))
xyz = standardize_xyz_string(xyz)
coords, symbols, _, _, _ = get_xyz_matrix(xyz)
mol_graph = MolGraph(symbols=symbols, coords=coords)
infered_connections = mol_graph.infer_connections()
if infered_connections:
mol_s1 = mol_graph.to_rmg_mol() # An RMG Molecule with single bonds, atom order corresponds to xyz
else:
mol_s1, _ = s_bonds_mol_from_xyz(xyz)
if mol_s1 is None:
logging.error('Could not create a 2D graph representation from xyz:\n{0}'.format(xyz))
return None, None
mol_s1_updated = update_molecule(mol_s1, to_single_bonds=True)
pybel_mol = xyz_to_pybel_mol(xyz)
if pybel_mol is not None:
inchi = pybel_to_inchi(pybel_mol)
mol_bo = rmg_mol_from_inchi(inchi) # An RMG Molecule with bond orders, but without preserved atom order
if mol_bo is not None:
if multiplicity is not None:
set_multiplicity(mol_bo, multiplicity)
mol_s1_updated.multiplicity = mol_bo.multiplicity
order_atoms(ref_mol=mol_s1_updated, mol=mol_bo)
set_multiplicity(mol_s1_updated, mol_bo.multiplicity, radical_map=mol_bo)
else:
mol_bo = None
s_mol, b_mol = mol_s1_updated, mol_bo
return s_mol, b_mol
def set_multiplicity(mol, multiplicity, radical_map=None):
"""
Set the multiplicity of `mol` to `multiplicity` and change radicals as needed
if a `radical_map`, which is an RMG Molecule object with the same atom order, is given,
it'll be used to set radicals (useful if bond orders aren't known for a molecule)
"""
mol.multiplicity = multiplicity
if radical_map is not None:
if not isinstance(radical_map, Molecule):
raise TypeError('radical_map sent to set_multiplicity() has to be a Molecule object. Got {0}'.format(
type(radical_map)))
set_radicals_by_map(mol, radical_map)
radicals = mol.getRadicalCount()
if mol.multiplicity != radicals + 1:
# this is not the trivial "multiplicity = number of radicals + 1" case
# either the number of radicals was not identified correctly from the 3D structure (i.e., should be lone pairs),
# or their spin isn't determined correctly
if mol.multiplicity > radicals + 1:
# there are sites that should have radicals, but were'nt identified as such.
# try adding radicals according to missing valances
add_rads_by_atom_valance(mol)
if mol.multiplicity > radicals + 1:
# still problematic, currently there's no automated solution to this case, raise an error
raise SpeciesError('A multiplicity of {0} was given, but only {1} radicals were identified. '
'Cannot infer 2D graph representation for this species.\nMore info:{2}\n{3}'.format(
mol.multiplicity, radicals, mol.toSMILES(), mol.toAdjacencyList()))
if len(mol.atoms) == 1 and mol.multiplicity == 1 and mol.atoms[0].radicalElectrons == 4:
# This is a singlet atomic C or Si
mol.atoms[0].radicalElectrons = 0
mol.atoms[0].lonePairs = 2
if mol.multiplicity < radicals + 1:
# make sure all cabene and nitrene sites, if exist, have lone pairs rather than two unpaired electrons
for atom in mol.atoms:
if atom.radicalElectrons == 2:
atom.radicalElectrons = 0
atom.lonePairs += 1
# final check: an even number of radicals results in an odd multiplicity, and vice versa
if divmod(mol.multiplicity, 2)[1] == divmod(radicals, 2)[1]:
raise SpeciesError('Number of radicals ({0}) and multiplicity ({1}) for {2} do not match.\n{3}'.format(
radicals, mol.multiplicity, mol.toSMILES(), mol.toAdjacencyList()))
def add_rads_by_atom_valance(mol):
"""
A helper function for assigning radicals if not identified automatically
and missing according to the given multiplicity
We assume here that all partial charges are already set, but this assumption could be wrong
This implementation might also be problematic for aromatic species with undefined bond orders
"""
for atom in mol.atoms:
if atom.isNonHydrogen():
atomic_orbitals = atom.lonePairs + atom.radicalElectrons + atom.getBondOrdersForAtom()
missing_electrons = 4 - atomic_orbitals
if missing_electrons:
atom.radicalElectrons = missing_electrons
# print(mol.toAdjacencyList())
def set_radicals_by_map(mol, radical_map):
"""Set radicals in `mol` by `radical_map`, bot are RMG Molecule objects with the same atom order"""
for i, atom in enumerate(mol.atoms):
if atom.element.number != radical_map.atoms[i].element.number:
raise ValueError('Atom order in mol and radical_map in set_radicals_by_map() do not match. '
'{0} is not {1}.'.format(atom.element.symbol, radical_map.atoms[i].symbol))
atom.radicalElectrons = radical_map.atoms[i].radicalElectrons
def order_atoms_in_mol_list(ref_mol, mol_list):
"""Order the atoms in all molecules of mol_list by the atom order in ref_mol"""
for mol in mol_list:
order_atoms(ref_mol, mol)
def order_atoms(ref_mol, mol):
"""Order the atoms in `mol` by the atom order in ref_mol"""
if mol is not None:
ref_mol_is_iso_copy = ref_mol.copy(deep=True)
mol_is_iso_copy = mol.copy(deep=True)
ref_mol_find_iso_copy = ref_mol.copy(deep=True)
mol_find_iso_copy = mol.copy(deep=True)
ref_mol_is_iso_copy = update_molecule(ref_mol_is_iso_copy, to_single_bonds=True)
mol_is_iso_copy = update_molecule(mol_is_iso_copy, to_single_bonds=True)
ref_mol_find_iso_copy = update_molecule(ref_mol_find_iso_copy, to_single_bonds=True)
mol_find_iso_copy = update_molecule(mol_find_iso_copy, to_single_bonds=True)
if mol_is_iso_copy.isIsomorphic(ref_mol_is_iso_copy, saveOrder=True):
mapping = mol_find_iso_copy.findIsomorphism(ref_mol_find_iso_copy, saveOrder=True)
if len(mapping):
if isinstance(mapping, list):
mapping = mapping[0]
index_map = {ref_mol_find_iso_copy.atoms.index(val): mol_find_iso_copy.atoms.index(key)
for key, val in mapping.items()}
mol.atoms = [mol.atoms[index_map[i]] for i, _ in enumerate(mol.atoms)]
else:
raise SanitizationError('Could not map molecules {0}, {1}:\n\n{2}\n\n{3}'.format(
ref_mol.toSMILES(), mol.toSMILES(), ref_mol.toAdjacencyList(), mol.toAdjacencyList()))
else:
raise SanitizationError('Could not map non isomorphic molecules {0}, {1}:\n\n{2}\n\n{3}'.format(
ref_mol.toSMILES(), mol.toSMILES(), ref_mol.toAdjacencyList(), mol.toAdjacencyList()))
def update_molecule(mol, to_single_bonds=False):
"""
Returns a copy of the current molecule with updated atomTypes
if to_single_bonds is True, the returned mol contains only single bonds.
This is useful for isomorphism comparison
"""
new_mol = Molecule()
try:
atoms = mol.atoms
except AttributeError:
return None
atom_mapping = dict()
for atom1 in atoms:
new_atom = new_mol.addAtom(Atom(atom1.element))
atom_mapping[atom1] = new_atom
for atom1 in atoms:
for atom2 in atom1.bonds.keys():
bond_order = 1.0 if to_single_bonds else atom1.bonds[atom2].getOrderNum()
bond = Bond(atom_mapping[atom1], atom_mapping[atom2], bond_order)
new_mol.addBond(bond)
new_mol.updateAtomTypes()
new_mol.multiplicity = mol.multiplicity
return new_mol
def s_bonds_mol_from_xyz(xyz):
"""Create a single bonded molecule from xyz using RMG's connectTheDots()"""
mol = Molecule()
coordinates = list()
if not isinstance(xyz, (str, unicode)):
raise SpeciesError('xyz must be a string format, got: {0}'.format(type(xyz)))
for line in xyz.split('\n'):
if line:
atom = Atom(element=str(line.split()[0]))
coordinates.append([float(line.split()[1]), float(line.split()[2]), float(line.split()[3])])
atom.coords = np.array(coordinates[-1], np.float64)
mol.addAtom(atom)
mol.connectTheDots() # only adds single bonds, but we don't care
return mol, coordinates
def rdkit_conf_from_mol(mol, coordinates):
"""A helper function generating an RDKit:Conformer object from an RMG:Molecule object"""
rd_mol, rd_inds = mol.toRDKitMol(removeHs=False, returnMapping=True)
Chem.AllChem.EmbedMolecule(rd_mol) # unfortunately, this mandatory embedding changes the coordinates
indx_map = dict()
for xyz_index, atom in enumerate(mol.atoms): # generate an atom index mapping dictionary
rd_index = rd_inds[atom]
indx_map[xyz_index] = rd_index
conf = rd_mol.GetConformer(id=0)
for i in range(rd_mol.GetNumAtoms()): # reset atom coordinates
conf.SetAtomPosition(indx_map[i], coordinates[i])
return conf, rd_mol, indx_map
def check_isomorphism(mol1, mol2, filter_structures=True):
"""
Converts `mol1` and `mol2` which are RMG:Molecule objects into RMG:Species object
and generate resonance structures. Then check Species isomorphism.
Return True if one of the molecules in the Species derived from `mol1`
is isomorphic to one of the molecules in the Species derived from `mol2`.
`filter_structures` is being passes to Species.generate_resonance_structures().
make copies of the molecules, since isIsomorphic() changes atom orders
"""
mol1_copy = mol1.copy(deep=True)
mol2_copy = mol2.copy(deep=True)
spc1 = Species(molecule=[mol1_copy])
spc1.generate_resonance_structures(keep_isomorphic=False, filter_structures=filter_structures)
spc2 = Species(molecule=[mol2_copy])
spc2.generate_resonance_structures(keep_isomorphic=False, filter_structures=filter_structures)
return spc1.isIsomorphic(spc2)
| [
"alongd@mit.edu"
] | alongd@mit.edu |
9e454ce7dc822bc4e590a1f283e9adfdb8a1967d | 083c174d42b658b108d35b1600c88ee2fac1b187 | /mvp/config.py | 705ed800f9462b85fc0772f9da695fbc59195c4d | [
"MIT"
] | permissive | sgodanas/mvp | 5202157ad5217e5a19e9bbfe80fa7c5838e3f3ad | 1814db4a7f9966e3c066433f1864c77b8cd0ad5d | refs/heads/master | 2023-04-18T08:31:03.142712 | 2021-04-27T15:09:45 | 2021-04-27T15:09:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | # -*- coding: utf-8 -*-
'''
Configure mvp presets and hooks
'''
import os
import sys
USER_PRESETS_PATH = os.path.expanduser('~/.mvp')
PRESETS_PATH = [USER_PRESETS_PATH]
# Add paths from MVP_PRESETS env var
for path in os.environ.get('MVP_PRESETS', '').split(os.pathsep):
if path:
PRESETS_PATH.insert(0, path)
for path in PRESETS_PATH:
if not os.path.exists(path):
os.makedirs(path)
sys.path.insert(1, path)
| [
"danielbradham@gmail.com"
] | danielbradham@gmail.com |
9249ad119efcabee5c40af68d32a6c718bedc9cd | 6909de83dd90ee1169d6c453c327ab2ce2687485 | /scheme/tests/11.py | 9cbe458b96614b51b34436060c73fa7461d32e63 | [] | no_license | dantefung/cs61a-2021-summer | 730cb0b9ab7327c32c619779d71882531bf328dd | 4f22f20fcfddfb5bf121081919310413209da1b2 | refs/heads/master | 2023-08-19T14:51:27.380738 | 2021-11-01T06:54:33 | 2021-11-01T06:54:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,528 | py | test = {
"name": "Problem 11",
"points": 1,
"suites": [
{
"cases": [
{
"code": r"""
scm> (define (square x) (* x x))
square
scm> square
(lambda (x) (* x x))
scm> (square 21)
441
scm> square ; check to make sure lambda body hasn't changed
(lambda (x) (* x x))
scm> (define square (lambda (x) (* x x)))
square
scm> (square (square 21))
194481
""",
"hidden": False,
"locked": False,
},
{
"code": r"""
scm> ((lambda (x) (list x (list (quote quote) x))) (quote (lambda (x) (list x (list (quote quote) x))))) ; if you're failing this test case and have checked your implementation of Q11, you may want to check your Q6 solution
((lambda (x) (list x (list (quote quote) x))) (quote (lambda (x) (list x (list (quote quote) x)))))
""",
"hidden": False,
"locked": False,
},
],
"scored": True,
"setup": "",
"teardown": "",
"type": "scheme",
},
{
"cases": [
{
"code": r"""
>>> double = do_lambda_form(read_line("((n) (* 2 n))"), env) # make double a LambdaProcedure that doubles a number
>>> f1 = double.make_call_frame(Pair(10, nil), env)
>>> f1.lookup('n')
10
>>> env.define('n', 5)
>>> add_n = do_lambda_form(read_line("((x) (+ x n))"), env)
>>> f2 = add_n.make_call_frame(Pair(5, nil), f1) # pass in a different environment as env
>>> f2.lookup('x')
5
>>> f2.lookup('n') # Hint: make sure you're using self.env not env
5
""",
"hidden": False,
"locked": False,
},
{
"code": r"""
>>> do_twice = do_lambda_form(read_line("((f x) (f (f x)))"), env) # make do_twice a LambdaProcedure that takes f, x, and returns f(f(x))
>>> double = do_lambda_form(read_line("((x) (* 2 x))"), env) # make double a LambdaProcedure that doubles a number
>>> call_frame = do_twice.make_call_frame(Pair(double, Pair(3, nil)), env) # Hint: make sure you're not evaluating args again in make_call_frame
>>> call_frame.lookup('x') # Check that x is properly defined
3
""",
"hidden": False,
"locked": False,
},
],
"scored": True,
"setup": r"""
>>> from scheme import *
>>> env = create_global_frame()
""",
"teardown": "",
"type": "doctest",
},
{
"cases": [
{
"code": r"""
scm> (define (outer x y)
.... (define (inner z x)
.... (+ x (* y 2) (* z 3)))
.... (inner x 10))
71fe94b728b1cb1923a1c51c2533bcd8
# locked
scm> (outer 1 2)
5d3ec98dabcf5b4a06694ccc93722cfb
# locked
scm> (define (outer-func x y)
.... (define (inner z x)
.... (+ x (* y 2) (* z 3)))
.... inner)
0b6323ff730faa1f7ac702f64f4cbfcb
# locked
scm> ((outer-func 1 2) 1 10)
5d3ec98dabcf5b4a06694ccc93722cfb
# locked
""",
"hidden": False,
"locked": True,
},
{
"code": r"""
scm> (define square (lambda (x) (* x x)))
square
scm> (define (sum-of-squares x y) (+ (square x) (square y)))
sum-of-squares
scm> (sum-of-squares 3 4)
25
scm> (define double (lambda (x) (* 2 x)))
double
scm> (define compose (lambda (f g) (lambda (x) (f (g x)))))
compose
scm> (define apply-twice (lambda (f) (compose f f)))
apply-twice
scm> ((apply-twice double) 5)
20
""",
"hidden": False,
"locked": False,
},
],
"scored": True,
"setup": "",
"teardown": "",
"type": "scheme",
},
],
}
| [
"wuyudi1109@gmail.com"
] | wuyudi1109@gmail.com |
a8d7239b51a01a59c937dba8d1195588f320b114 | 007d35a3a2def8b0801ca0eab3469e8422b9f767 | /1_ Preparation/2_Triangle.py | 49dfe65bba82534c2104ba8055903f0b773eb657 | [] | no_license | yosho-18/Programming-Contest-Challenge-Book_Ant-Book | 91fedf6f608f2e0f9d410922c847d2a2046c1b95 | 94cd41d8c7be50e2f09150ac63b86821933268f8 | refs/heads/master | 2020-06-02T10:53:09.218986 | 2019-06-10T08:55:06 | 2019-06-10T08:55:06 | 191,132,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | n = int(input())
a = [int(i) for i in input().split()]
#Longest bar < sum other bar
ans = 0
for i in range(n):
for j in range(i + 1, n):
for k in range(j + 1, n):
len = a[i] + a[j] + a[k]
ma = max(a[i], a[j], a[k])
rest = len - ma
if ma < rest:
ans = max(ans, len)
print(ans) | [
"44283410+wato18@users.noreply.github.com"
] | 44283410+wato18@users.noreply.github.com |
f1bdee81fdd6d3bda77e4cfdcccb42cda93e8d97 | 9009ad47bc1d6adf8ee6d0f2f2b3125dea44c0aa | /00-MY-TEMPLATE-001.py | 84d5cda056758d652204415992f14aea2354c511 | [] | no_license | luctivud/Coding-Trash | 42e880624f39a826bcaab9b6194add2c9b3d71fc | 35422253f6169cc98e099bf83c650b1fb3acdb75 | refs/heads/master | 2022-12-12T00:20:49.630749 | 2020-09-12T17:38:30 | 2020-09-12T17:38:30 | 241,000,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | # जय श्री राम
import sys; import math; from collections import *
# sys.setrecursionlimit(10**6)
def get_ints(): return map(int, input().split())
def get_list(): return list(get_ints())
def printspx(*args): return print(*args, end="")
def printsp(*args): return print(*args, end=" ")
MODPRIME = int(1e9+7); BABYMODPR = 998244353; MAXN = int(1e5)
sys.stdin = open("input.txt","r"); sys.stdout = open("output.txt","w")
# for _testcases_ in range(int(input())):
'''
>>> COMMENT THE STDIN!! CHANGE ONLINE JUDGE !!
THE LOGIC AND APPROACH IS MINE @luctivud ( UDIT GUPTA )
Link may be copy-pasted here if it's taken from other source.
DO NOT PLAGIARISE.
>>> COMMENT THE STDIN!! CHANGE ONLINE JUDGE !!
''' | [
"luctivud@gmail.com"
] | luctivud@gmail.com |
a41b5aa994c57a14e4dc4af3ec85cdec17b93536 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/hzy46_fast-neural-style-tensorflow/fast-neural-style-tensorflow-master/preprocessing/preprocessing_factory.py | 39ee9bf7bc24744fd430f7a725f856708945b2d0 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 2,861 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a factory for building various models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from preprocessing import cifarnet_preprocessing
from preprocessing import inception_preprocessing
from preprocessing import lenet_preprocessing
from preprocessing import vgg_preprocessing
slim = tf.contrib.slim
def get_preprocessing(name, is_training=False):
"""Returns preprocessing_fn(image, height, width, **kwargs).
Args:
name: The name of the preprocessing function.
is_training: `True` if the model is being used for training and `False`
otherwise.
Returns:
preprocessing_fn: A function that preprocessing a single image (pre-batch).
It has the following signature:
image = preprocessing_fn(image, output_height, output_width, ...).
Raises:
ValueError: If Preprocessing `name` is not recognized.
"""
preprocessing_fn_map = {
'cifarnet': cifarnet_preprocessing,
'inception': inception_preprocessing,
'inception_v1': inception_preprocessing,
'inception_v2': inception_preprocessing,
'inception_v3': inception_preprocessing,
'inception_v4': inception_preprocessing,
'inception_resnet_v2': inception_preprocessing,
'lenet': lenet_preprocessing,
'resnet_v1_50': vgg_preprocessing,
'resnet_v1_101': vgg_preprocessing,
'resnet_v1_152': vgg_preprocessing,
'vgg': vgg_preprocessing,
'vgg_a': vgg_preprocessing,
'vgg_16': vgg_preprocessing,
'vgg_19': vgg_preprocessing,
}
if name not in preprocessing_fn_map:
raise ValueError('Preprocessing name [%s] was not recognized' % name)
def preprocessing_fn(image, output_height, output_width, **kwargs):
return preprocessing_fn_map[name].preprocess_image(
image, output_height, output_width, is_training=is_training, **kwargs)
def unprocessing_fn(image, **kwargs):
return preprocessing_fn_map[name].unprocess_image(
image, **kwargs)
return preprocessing_fn, unprocessing_fn
| [
"659338505@qq.com"
] | 659338505@qq.com |
729b0ad28719a30dcb5d3de097cab5678f6cb208 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /kS8tfJD2ggohQbWx7_9.py | 10d83a002c78aabd9196a850bec31aca4ebc4060 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py |
def last_name_lensort(names):
return [' '.join(a) for a in sorted([x.split() for x in names],key=lambda x: (len(x[1]), x[1]))]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
dc8aa8d63bfa7714751d7ffc7b3f4c2a3e2b6b2f | 237ff79d806953b55d9e6db8d2fa498333123949 | /app_verifications/text/text_verifications.py | f234ffd13a301804f934726714fd3d7cbafd388b | [
"MIT"
] | permissive | kskarbinski/threads-api | f86358f43a31271cd939695af200975d5c2825ba | c144c1cb51422095922310d278f80e4996c10ea0 | refs/heads/master | 2020-03-11T04:29:16.776817 | 2019-11-29T15:40:34 | 2019-11-29T15:40:34 | 129,777,543 | 0 | 0 | MIT | 2019-11-29T15:40:35 | 2018-04-16T17:00:27 | Python | UTF-8 | Python | false | false | 531 | py | from .text_checks import TextChecks
from app_errors.http_exceptions import HttpException
class TextVerifications(TextChecks):
"""
Every method verifies something related to text. If verification does not pass a HttpException is thrown.
"""
def verify_text_length(self, min_l, max_l):
if self.check_text_length(min_l=min_l, max_l=max_l):
return True
HttpException.throw_422(
"Text has to be between {min_l} and {max_l} characters".format(min_l=min_l, max_l=max_l)
)
| [
"kripperr@gmail.com"
] | kripperr@gmail.com |
1fd59d3de0e61312329e973df01ed1a703dd2d0c | 2db55786df5fdf009eeec5f000937f0ec3eb6527 | /tasks/files/config/polybar/scripts/translate_lyrics | 475248a2e2c07bd0ca45147da10883c36f61e07c | [] | no_license | preachermanx/environment | 08ef7a1e558a3c9c78bbfb789584b81997b9787d | cba8277663915ce173bab79f44f890a666abbbed | refs/heads/master | 2020-07-21T13:05:45.011963 | 2019-09-05T08:40:31 | 2019-09-05T08:40:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | #!/usr/bin/env python3
import os.path
import re
from subprocess import Popen, PIPE
def convert_line(line):
time_search = re.search(r'\[(\d+):(\d+).\d+\](.*)', line)
in_seconds = 0
if time_search:
in_seconds = int(time_search.group(1)) * 60
in_seconds += int(time_search.group(2))
return (in_seconds, time_search.group(3))
def lyric_exists(music, artist):
return os.path.exists('/tmp/{}_{}.txt'.format(music, artist))
def save_lyric(music, artist, output):
with open('/tmp/{}_{}.txt'.format(music, artist), 'w') as file:
file.write(output)
def recover_lyric(music, artist):
with open('/tmp/{}_{}.txt'.format(music, artist)) as file:
return file.read()
result = os.popen(
'/home/diegorubin/.newenv/polybar/scripts/lyrics | trans :pt')
result = result.readlines()[2]
print(result)
| [
"rubin.diego@gmail.com"
] | rubin.diego@gmail.com | |
865b559ab81261fddb02dd0c45bf559683654b15 | 3a18b8ba06a58231f4ecb2c1a231722cdf862e6b | /python_code/dillonsCode/websites_folder/todo_tutorial/venv/lib/python3.8/site-packages/werkzeug/sansio/request.py | 4bff19e92e68eb6402b6593f63c1ede873721f74 | [] | no_license | dillonallen92/codeStorage | 98dd7f5a8ecb062e37313a1323aacd362ffc44c7 | 23351e0b3348de922283f6494762db9f291579d6 | refs/heads/master | 2023-07-07T10:32:40.428607 | 2023-06-26T23:26:47 | 2023-06-26T23:28:13 | 141,781,205 | 0 | 1 | null | 2023-02-12T00:21:58 | 2018-07-21T04:30:51 | Mathematica | UTF-8 | Python | false | false | 20,795 | py | import typing as t
from datetime import datetime
from .._internal import _to_str
from ..datastructures import Accept
from ..datastructures import Authorization
from ..datastructures import CharsetAccept
from ..datastructures import ETags
from ..datastructures import Headers
from ..datastructures import HeaderSet
from ..datastructures import IfRange
from ..datastructures import ImmutableList
from ..datastructures import ImmutableMultiDict
from ..datastructures import LanguageAccept
from ..datastructures import MIMEAccept
from ..datastructures import MultiDict
from ..datastructures import Range
from ..datastructures import RequestCacheControl
from ..http import parse_accept_header
from ..http import parse_authorization_header
from ..http import parse_cache_control_header
from ..http import parse_cookie
from ..http import parse_date
from ..http import parse_etags
from ..http import parse_if_range_header
from ..http import parse_list_header
from ..http import parse_options_header
from ..http import parse_range_header
from ..http import parse_set_header
from ..urls import url_decode
from ..user_agent import UserAgent
from ..useragents import _UserAgent as _DeprecatedUserAgent
from ..utils import cached_property
from ..utils import header_property
from .utils import get_current_url
from .utils import get_host
class Request:
"""Represents the non-IO parts of a HTTP request, including the
method, URL info, and headers.
This class is not meant for general use. It should only be used when
implementing WSGI, ASGI, or another HTTP application spec. Werkzeug
provides a WSGI implementation at :cls:`werkzeug.wrappers.Request`.
:param method: The method the request was made with, such as
``GET``.
:param scheme: The URL scheme of the protocol the request used, such
as ``https`` or ``wss``.
:param server: The address of the server. ``(host, port)``,
``(path, None)`` for unix sockets, or ``None`` if not known.
:param root_path: The prefix that the application is mounted under.
This is prepended to generated URLs, but is not part of route
matching.
:param path: The path part of the URL after ``root_path``.
:param query_string: The part of the URL after the "?".
:param headers: The headers received with the request.
:param remote_addr: The address of the client sending the request.
.. versionadded:: 2.0
"""
#: The charset used to decode most data in the request.
charset = "utf-8"
#: the error handling procedure for errors, defaults to 'replace'
encoding_errors = "replace"
#: the class to use for `args` and `form`. The default is an
#: :class:`~werkzeug.datastructures.ImmutableMultiDict` which supports
#: multiple values per key. alternatively it makes sense to use an
#: :class:`~werkzeug.datastructures.ImmutableOrderedMultiDict` which
#: preserves order or a :class:`~werkzeug.datastructures.ImmutableDict`
#: which is the fastest but only remembers the last key. It is also
#: possible to use mutable structures, but this is not recommended.
#:
#: .. versionadded:: 0.6
parameter_storage_class: t.Type[MultiDict] = ImmutableMultiDict
#: The type to be used for dict values from the incoming WSGI
#: environment. (For example for :attr:`cookies`.) By default an
#: :class:`~werkzeug.datastructures.ImmutableMultiDict` is used.
#:
#: .. versionchanged:: 1.0.0
#: Changed to ``ImmutableMultiDict`` to support multiple values.
#:
#: .. versionadded:: 0.6
dict_storage_class: t.Type[MultiDict] = ImmutableMultiDict
#: the type to be used for list values from the incoming WSGI environment.
#: By default an :class:`~werkzeug.datastructures.ImmutableList` is used
#: (for example for :attr:`access_list`).
#:
#: .. versionadded:: 0.6
list_storage_class: t.Type[t.List] = ImmutableList
user_agent_class: t.Type[UserAgent] = _DeprecatedUserAgent
"""The class used and returned by the :attr:`user_agent` property to
parse the header. Defaults to
:class:`~werkzeug.user_agent.UserAgent`, which does no parsing. An
extension can provide a subclass that uses a parser to provide other
data.
.. versionadded:: 2.0
"""
#: Valid host names when handling requests. By default all hosts are
#: trusted, which means that whatever the client says the host is
#: will be accepted.
#:
#: Because ``Host`` and ``X-Forwarded-Host`` headers can be set to
#: any value by a malicious client, it is recommended to either set
#: this property or implement similar validation in the proxy (if
#: the application is being run behind one).
#:
#: .. versionadded:: 0.9
trusted_hosts: t.Optional[t.List[str]] = None
def __init__(
self,
method: str,
scheme: str,
server: t.Optional[t.Tuple[str, t.Optional[int]]],
root_path: str,
path: str,
query_string: bytes,
headers: Headers,
remote_addr: t.Optional[str],
) -> None:
#: The method the request was made with, such as ``GET``.
self.method = method.upper()
#: The URL scheme of the protocol the request used, such as
#: ``https`` or ``wss``.
self.scheme = scheme
#: The address of the server. ``(host, port)``, ``(path, None)``
#: for unix sockets, or ``None`` if not known.
self.server = server
#: The prefix that the application is mounted under, without a
#: trailing slash. :attr:`path` comes after this.
self.root_path = root_path.rstrip("/")
#: The path part of the URL after :attr:`root_path`. This is the
#: path used for routing within the application.
self.path = "/" + path.lstrip("/")
#: The part of the URL after the "?". This is the raw value, use
#: :attr:`args` for the parsed values.
self.query_string = query_string
#: The headers received with the request.
self.headers = headers
#: The address of the client sending the request.
self.remote_addr = remote_addr
def __repr__(self) -> str:
try:
url = self.url
except Exception as e:
url = f"(invalid URL: {e})"
return f"<{type(self).__name__} {url!r} [{self.method}]>"
@property
def url_charset(self) -> str:
"""The charset that is assumed for URLs. Defaults to the value
of :attr:`charset`.
.. versionadded:: 0.6
"""
return self.charset
@cached_property
def args(self) -> "MultiDict[str, str]":
"""The parsed URL parameters (the part in the URL after the question
mark).
By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
"""
return url_decode(
self.query_string,
self.url_charset,
errors=self.encoding_errors,
cls=self.parameter_storage_class,
)
@cached_property
def access_route(self) -> t.List[str]:
"""If a forwarded header exists this is a list of all ip addresses
from the client ip to the last proxy server.
"""
if "X-Forwarded-For" in self.headers:
return self.list_storage_class(
parse_list_header(self.headers["X-Forwarded-For"])
)
elif self.remote_addr is not None:
return self.list_storage_class([self.remote_addr])
return self.list_storage_class()
@cached_property
def full_path(self) -> str:
"""Requested path, including the query string."""
return f"{self.path}?{_to_str(self.query_string, self.url_charset)}"
@property
def is_secure(self) -> bool:
"""``True`` if the request was made with a secure protocol
(HTTPS or WSS).
"""
return self.scheme in {"https", "wss"}
@cached_property
def url(self) -> str:
"""The full request URL with the scheme, host, root path, path,
and query string."""
return get_current_url(
self.scheme, self.host, self.root_path, self.path, self.query_string
)
@cached_property
def base_url(self) -> str:
"""Like :attr:`url` but without the query string."""
return get_current_url(self.scheme, self.host, self.root_path, self.path)
@cached_property
def root_url(self) -> str:
"""The request URL scheme, host, and root path. This is the root
that the application is accessed from.
"""
return get_current_url(self.scheme, self.host, self.root_path)
@cached_property
def host_url(self) -> str:
"""The request URL scheme and host only."""
return get_current_url(self.scheme, self.host)
@cached_property
def host(self) -> str:
"""The host name the request was made to, including the port if
it's non-standard. Validated with :attr:`trusted_hosts`.
"""
return get_host(
self.scheme, self.headers.get("host"), self.server, self.trusted_hosts
)
@cached_property
def cookies(self) -> "ImmutableMultiDict[str, str]":
"""A :class:`dict` with the contents of all cookies transmitted with
the request."""
wsgi_combined_cookie = ";".join(self.headers.getlist("Cookie"))
return parse_cookie( # type: ignore
wsgi_combined_cookie,
self.charset,
self.encoding_errors,
cls=self.dict_storage_class,
)
# Common Descriptors
content_type = header_property[str](
"Content-Type",
doc="""The Content-Type entity-header field indicates the media
type of the entity-body sent to the recipient or, in the case of
the HEAD method, the media type that would have been sent had
the request been a GET.""",
read_only=True,
)
@cached_property
def content_length(self) -> t.Optional[int]:
"""The Content-Length entity-header field indicates the size of the
entity-body in bytes or, in the case of the HEAD method, the size of
the entity-body that would have been sent had the request been a
GET.
"""
if self.headers.get("Transfer-Encoding", "") == "chunked":
return None
content_length = self.headers.get("Content-Length")
if content_length is not None:
try:
return max(0, int(content_length))
except (ValueError, TypeError):
pass
return None
content_encoding = header_property[str](
"Content-Encoding",
doc="""The Content-Encoding entity-header field is used as a
modifier to the media-type. When present, its value indicates
what additional content codings have been applied to the
entity-body, and thus what decoding mechanisms must be applied
in order to obtain the media-type referenced by the Content-Type
header field.
.. versionadded:: 0.9""",
read_only=True,
)
content_md5 = header_property[str](
"Content-MD5",
doc="""The Content-MD5 entity-header field, as defined in
RFC 1864, is an MD5 digest of the entity-body for the purpose of
providing an end-to-end message integrity check (MIC) of the
entity-body. (Note: a MIC is good for detecting accidental
modification of the entity-body in transit, but is not proof
against malicious attacks.)
.. versionadded:: 0.9""",
read_only=True,
)
referrer = header_property[str](
"Referer",
doc="""The Referer[sic] request-header field allows the client
to specify, for the server's benefit, the address (URI) of the
resource from which the Request-URI was obtained (the
"referrer", although the header field is misspelled).""",
read_only=True,
)
date = header_property(
"Date",
None,
parse_date,
doc="""The Date general-header field represents the date and
time at which the message was originated, having the same
semantics as orig-date in RFC 822.
.. versionchanged:: 2.0
The datetime object is timezone-aware.
""",
read_only=True,
)
max_forwards = header_property(
"Max-Forwards",
None,
int,
doc="""The Max-Forwards request-header field provides a
mechanism with the TRACE and OPTIONS methods to limit the number
of proxies or gateways that can forward the request to the next
inbound server.""",
read_only=True,
)
def _parse_content_type(self) -> None:
if not hasattr(self, "_parsed_content_type"):
self._parsed_content_type = parse_options_header(
self.headers.get("Content-Type", "")
)
@property
def mimetype(self) -> str:
"""Like :attr:`content_type`, but without parameters (eg, without
charset, type etc.) and always lowercase. For example if the content
type is ``text/HTML; charset=utf-8`` the mimetype would be
``'text/html'``.
"""
self._parse_content_type()
return self._parsed_content_type[0].lower()
@property
def mimetype_params(self) -> t.Dict[str, str]:
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
"""
self._parse_content_type()
return self._parsed_content_type[1]
@cached_property
def pragma(self) -> HeaderSet:
"""The Pragma general-header field is used to include
implementation-specific directives that might apply to any recipient
along the request/response chain. All pragma directives specify
optional behavior from the viewpoint of the protocol; however, some
systems MAY require that behavior be consistent with the directives.
"""
return parse_set_header(self.headers.get("Pragma", ""))
# Accept
@cached_property
def accept_mimetypes(self) -> MIMEAccept:
"""List of mimetypes this client supports as
:class:`~werkzeug.datastructures.MIMEAccept` object.
"""
return parse_accept_header(self.headers.get("Accept"), MIMEAccept)
@cached_property
def accept_charsets(self) -> CharsetAccept:
"""List of charsets this client supports as
:class:`~werkzeug.datastructures.CharsetAccept` object.
"""
return parse_accept_header(self.headers.get("Accept-Charset"), CharsetAccept)
@cached_property
def accept_encodings(self) -> Accept:
"""List of encodings this client accepts. Encodings in a HTTP term
are compression encodings such as gzip. For charsets have a look at
:attr:`accept_charset`.
"""
return parse_accept_header(self.headers.get("Accept-Encoding"))
@cached_property
def accept_languages(self) -> LanguageAccept:
"""List of languages this client accepts as
:class:`~werkzeug.datastructures.LanguageAccept` object.
.. versionchanged 0.5
In previous versions this was a regular
:class:`~werkzeug.datastructures.Accept` object.
"""
return parse_accept_header(self.headers.get("Accept-Language"), LanguageAccept)
# ETag
@cached_property
def cache_control(self) -> RequestCacheControl:
"""A :class:`~werkzeug.datastructures.RequestCacheControl` object
for the incoming cache control headers.
"""
cache_control = self.headers.get("Cache-Control")
return parse_cache_control_header(cache_control, None, RequestCacheControl)
@cached_property
def if_match(self) -> ETags:
"""An object containing all the etags in the `If-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.headers.get("If-Match"))
@cached_property
def if_none_match(self) -> ETags:
"""An object containing all the etags in the `If-None-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.headers.get("If-None-Match"))
@cached_property
def if_modified_since(self) -> t.Optional[datetime]:
"""The parsed `If-Modified-Since` header as a datetime object.
.. versionchanged:: 2.0
The datetime object is timezone-aware.
"""
return parse_date(self.headers.get("If-Modified-Since"))
@cached_property
def if_unmodified_since(self) -> t.Optional[datetime]:
"""The parsed `If-Unmodified-Since` header as a datetime object.
.. versionchanged:: 2.0
The datetime object is timezone-aware.
"""
return parse_date(self.headers.get("If-Unmodified-Since"))
@cached_property
def if_range(self) -> IfRange:
"""The parsed ``If-Range`` header.
.. versionchanged:: 2.0
``IfRange.date`` is timezone-aware.
.. versionadded:: 0.7
"""
return parse_if_range_header(self.headers.get("If-Range"))
@cached_property
def range(self) -> t.Optional[Range]:
"""The parsed `Range` header.
.. versionadded:: 0.7
:rtype: :class:`~werkzeug.datastructures.Range`
"""
return parse_range_header(self.headers.get("Range"))
# User Agent
@cached_property
def user_agent(self) -> UserAgent:
"""The user agent. Use ``user_agent.string`` to get the header
value. Set :attr:`user_agent_class` to a subclass of
:class:`~werkzeug.user_agent.UserAgent` to provide parsing for
the other properties or other extended data.
.. versionchanged:: 2.0
The built in parser is deprecated and will be removed in
Werkzeug 2.1. A ``UserAgent`` subclass must be set to parse
data from the string.
"""
return self.user_agent_class(self.headers.get("User-Agent", ""))
# Authorization
@cached_property
def authorization(self) -> t.Optional[Authorization]:
"""The `Authorization` object in parsed form."""
return parse_authorization_header(self.headers.get("Authorization"))
# CORS
origin = header_property[str](
"Origin",
doc=(
"The host that the request originated from. Set"
" :attr:`~CORSResponseMixin.access_control_allow_origin` on"
" the response to indicate which origins are allowed."
),
read_only=True,
)
access_control_request_headers = header_property(
"Access-Control-Request-Headers",
load_func=parse_set_header,
doc=(
"Sent with a preflight request to indicate which headers"
" will be sent with the cross origin request. Set"
" :attr:`~CORSResponseMixin.access_control_allow_headers`"
" on the response to indicate which headers are allowed."
),
read_only=True,
)
access_control_request_method = header_property[str](
"Access-Control-Request-Method",
doc=(
"Sent with a preflight request to indicate which method"
" will be used for the cross origin request. Set"
" :attr:`~CORSResponseMixin.access_control_allow_methods`"
" on the response to indicate which methods are allowed."
),
read_only=True,
)
@property
def is_json(self) -> bool:
"""Check if the mimetype indicates JSON data, either
:mimetype:`application/json` or :mimetype:`application/*+json`.
"""
mt = self.mimetype
return (
mt == "application/json"
or mt.startswith("application/")
and mt.endswith("+json")
)
| [
"dillon.allen.92@gmail.com"
] | dillon.allen.92@gmail.com |
ea780519562cd70c7534e0046b792df7a61f53bb | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /pkgs/scikit-image-0.12.3-np110py27_0/lib/python2.7/site-packages/skimage/segmentation/tests/test_quickshift.py | 21dbe8eaab30c11172730b52c6e9c8cdb2b3a251 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 1,760 | py | import numpy as np
from numpy.testing import assert_equal, assert_array_equal
from nose.tools import assert_true
from skimage._shared.testing import assert_greater, test_parallel
from skimage.segmentation import quickshift
@test_parallel()
def test_grey():
rnd = np.random.RandomState(0)
img = np.zeros((20, 21))
img[:10, 10:] = 0.2
img[10:, :10] = 0.4
img[10:, 10:] = 0.6
img += 0.1 * rnd.normal(size=img.shape)
seg = quickshift(img, kernel_size=2, max_dist=3, random_seed=0,
convert2lab=False, sigma=0)
# we expect 4 segments:
assert_equal(len(np.unique(seg)), 4)
# that mostly respect the 4 regions:
for i in range(4):
hist = np.histogram(img[seg == i], bins=[0, 0.1, 0.3, 0.5, 1])[0]
assert_greater(hist[i], 20)
def test_color():
rnd = np.random.RandomState(0)
img = np.zeros((20, 21, 3))
img[:10, :10, 0] = 1
img[10:, :10, 1] = 1
img[10:, 10:, 2] = 1
img += 0.01 * rnd.normal(size=img.shape)
img[img > 1] = 1
img[img < 0] = 0
seg = quickshift(img, random_seed=0, max_dist=30, kernel_size=10, sigma=0)
# we expect 4 segments:
assert_equal(len(np.unique(seg)), 4)
assert_array_equal(seg[:10, :10], 1)
assert_array_equal(seg[10:, :10], 2)
assert_array_equal(seg[:10, 10:], 0)
assert_array_equal(seg[10:, 10:], 3)
seg2 = quickshift(img, kernel_size=1, max_dist=2, random_seed=0,
convert2lab=False, sigma=0)
# very oversegmented:
assert_equal(len(np.unique(seg2)), 7)
# still don't cross lines
assert_true((seg2[9, :] != seg2[10, :]).all())
assert_true((seg2[:, 9] != seg2[:, 10]).all())
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
| [
"wgyumg@mgail.com"
] | wgyumg@mgail.com |
aa4128f0e18260df3f78b97a5ee21fbabca3e628 | a29bc741d292d5f1d85f7eaf2d69148077d33d0e | /cleancoders/entities/entity.py | 1f351d5a4315cad16286ceaf4f9cc5e6e0acfcaa | [] | no_license | xstrengthofonex/cleancoders | a919692dfc81f9ed39521d4f0cb279d1ac5cde44 | ba1349f811295ef9dda6a8b5e950d834976528d8 | refs/heads/master | 2020-07-26T09:47:10.826517 | 2019-09-15T17:52:59 | 2019-09-15T17:52:59 | 208,607,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | from __future__ import annotations
from dataclasses import dataclass
@dataclass
class Entity:
id: str
| [
"xstrengthofonex@gmail.com"
] | xstrengthofonex@gmail.com |
d582dbffd90140f90a648bfa5407b2d45d6eb998 | aa01560e68a07033d4b24c4770966771349e2b4f | /src/jobs/migrations/0007_jobapplication_timestamp.py | 4d5d3057c72f090b51b9eeaba9de95a481cbf144 | [] | no_license | fluffcoding/solitaireHR | a0a357e1b19b955caae8df11ca92188cad79e217 | b97a29f9accc5b45cd62986b62673a6ba802771b | refs/heads/main | 2023-04-05T11:46:41.855323 | 2021-04-26T04:57:27 | 2021-04-26T04:57:27 | 322,067,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | # Generated by Django 3.1.2 on 2020-12-09 15:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0006_auto_20201209_1527'),
]
operations = [
migrations.AddField(
model_name='jobapplication',
name='timestamp',
field=models.DateTimeField(auto_now_add=True, null=True),
),
]
| [
"fluffcoding@gmail.com"
] | fluffcoding@gmail.com |
3157e2ee8eb20e93eacbac32b7d5b25a09afd0f8 | aa9f8d7b48dbe3cbecca5eaa2ad3bbea262dbf24 | /preprocess_tweets.py | afa800638256beb34c242c6a4bccc1bca1796074 | [] | no_license | ssinayoko/Pyhton_Cours | 5381a98c42cba021f34b482776933accd3442a6c | 56b391aeb673b40b564c59053295ac68e2576a1c | refs/heads/master | 2020-08-30T13:32:59.662715 | 2019-10-25T12:50:25 | 2019-10-25T12:50:25 | 218,395,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py |
import re
with open("tweets.csv", "w", encoding="utf-8") as result:
with open("stockerbot-export.csv", "r", encoding="utf-8") as f:
lines = f.readlines()
res_lines = []
for line in lines:
if ",http" in line:
line = line[:20] + line[20:].replace(",http", ',"http')\
.replace(",True",'",True')\
.replace(",False",'",False')
res_lines.append(line)
result.writelines(res_lines)
# try:
# f = open("stockerbot-export.csv", "r", encoding="utf-8")
# except:
# pass
# finally:
# f.close()
| [
"mickael.bolnet@gmail.com"
] | mickael.bolnet@gmail.com |
d43ae73fa9193326ebf2db488e5cf9323da92c95 | 1904c201d353118f319586d7ed6827b4bbefa0fe | /gcg/exceptions.py | b32edae34ed4afac8ecb74b1b3119f3fa06f2d23 | [] | no_license | byt3-m3/lab_conf_generator | aa9c023c6223869d41d5cf4d5a1d072f3cd8db3c | eb79ca8cb6f6bb45b44605660e70d0585968c598 | refs/heads/master | 2022-12-20T13:42:38.219334 | 2020-10-14T12:55:46 | 2020-10-14T12:55:46 | 225,886,211 | 0 | 1 | null | 2022-12-08T09:54:37 | 2019-12-04T14:28:38 | Python | UTF-8 | Python | false | false | 416 | py | class InvalidTypeError(BaseException):
"""
Used to represent an Invalid Type
"""
class LCGSchemaValidationError(BaseException):
"""
Used to represent an Schema Validation Error
"""
class GCGValidationError(BaseException):
"""
Used to represent an Schema Validation Error
"""
class GCGError(BaseException):
"""
Used to represent an basic GCG Error
"""
| [
"cbaxtertech@gmail.com"
] | cbaxtertech@gmail.com |
849e328890d84d7e0fef4cd4f87c9db2a4533ae8 | 4e33bfc70b0fa0a07ff00960406281ec04044fbc | /task_server/test.py | 8120b975f2822afddaf7102c04169067ffe3ca9b | [] | no_license | JX-Wang/NS_Server_Kafka | 53e22cb7858c20e2ad6fa970ba8b2a3011e7652f | 59f1270084c1cab3736bdab16f17b64cf04722fa | refs/heads/master | 2022-01-24T05:12:19.276923 | 2019-08-04T13:33:36 | 2019-08-04T13:33:36 | 197,998,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py | # from confluent_kafka import Consumer, KafkaError
#
# mybroker = "10.245.146.221:9092"
#
# c = Consumer({
# 'bootstrap.servers': mybroker,
# 'group.id': '1',
# 'default.topic.config': {
# 'auto.offset.reset': 'smallest'
# }
# })
#
# c.subscribe(['test'])
#
# while True:
# msg = c.poll()
#
# if msg is None:
# print "ops"
# continue
# if msg.error():
# if msg.error().code() == KafkaError._PARTITION_EOF:
# continue
# else:
# print(msg.error())
# break
#
# print('Received message: {}'.format(msg.value().decode('utf-8')))
#
# c.close()
from confluent_kafka import Producer
p = Producer({'bootstrap.servers': '10.245.146.221:9092'})
def delivery_report(err, msg):
if err is not None:
print('Message delivery failed: {}'.format(err))
else:
print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))
data = "XXXX"
p.produce('test', data.encode('utf-8'), callback=delivery_report)
p.poll(10)
p.flush()
| [
"1411349759@qq.com"
] | 1411349759@qq.com |
058851f99bfd8b50b0aac5390fb417b484e5be27 | 1b6a460652f5b43e42614d63c26a57b4e14b6360 | /tests/vm/primitives/test_mirror.py | 5630c176666b04ef700b94d7df85b77fd2a521c6 | [] | no_license | cfbolz/tinySelf | 68800b2782f998f46b7950a688a6a0ec663c661b | f20b38aaaa85b4be89cf1fc2ebc890cd1e33c62b | refs/heads/master | 2020-05-09T10:02:05.385599 | 2019-04-12T15:12:23 | 2019-04-12T15:15:50 | 181,025,576 | 0 | 0 | null | 2019-04-12T14:35:12 | 2019-04-12T14:35:11 | null | UTF-8 | Python | false | false | 619 | py | # -*- coding: utf-8 -*-
from tinySelf.vm.primitives import Mirror
from tinySelf.vm.primitives import PrimitiveIntObject
from tinySelf.vm.primitives import PrimitiveStrObject
from tinySelf.vm.object_layout import Object
def test_mirror():
o = Object()
m = Mirror(o)
assert not o.slot_lookup("v")
add_primitive = m.slot_lookup("toSlot:Add:")
assert add_primitive.map.primitive_code
result = add_primitive.map.primitive_code(
None,
m,
[PrimitiveStrObject("v"), PrimitiveIntObject(1)]
)
assert result == o
assert o.slot_lookup("v") == PrimitiveIntObject(1)
| [
"bystrousak@kitakitsune.org"
] | bystrousak@kitakitsune.org |
46b800034adec043d313b5378e4f031082b04f5b | 78d5a6e0846cb6b03544e4f717651ca59dfc620c | /treasury-admin/cashflow/migrations/0014_auto_20180509_1721.py | b65d15269a321a05a67752040eeb929833d9dfb9 | [] | no_license | bsca-bank/treasury-admin | 8952788a9a6e25a1c59aae0a35bbee357d94e685 | 5167d6c4517028856701066dd5ed6ac9534a9151 | refs/heads/master | 2023-02-05T12:45:52.945279 | 2020-12-13T08:07:41 | 2020-12-13T08:07:41 | 320,323,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-05-09 16:21
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cashflow', '0013_delete_cashflowdetailtwowayflowproxy'),
]
operations = [
migrations.AlterField(
model_name='cashflowdetail',
name='account',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tiers.AccountCorresp'),
),
]
| [
"cn.makodo@gmail.com"
] | cn.makodo@gmail.com |
8283511947571039bbb770029f9d366f2c907ae6 | f47e6aab3e58a1cc12e390492fe982ab1da9aa28 | /modoboa/core/__init__.py | cc91ec6c11ea9816f34bd1f1a699c6c4f799d4c2 | [
"ISC"
] | permissive | kaxdev/modoboa | 7e80b5496f362adf5c503588cc5680a894e365eb | d5f2652a5f957a0d68043f063b8c530dbf76ebf6 | refs/heads/master | 2021-01-12T21:45:59.003052 | 2015-04-10T07:16:58 | 2015-04-10T07:16:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,011 | py | import os
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _, ugettext_lazy
from modoboa.lib import parameters, events
BASE_EVENTS = [
"CanCreate",
"AccountCreated",
"AccountAutoCreated",
"AccountModified",
"AccountDeleted",
"AccountExported",
"AccountImported",
"PasswordUpdated",
"ExtraAccountActions",
"RoleChanged",
"GetExtraRoles",
"GetExtraRolePermissions",
"PasswordChange",
"UserCanSetRole",
"InitialDataLoaded",
"UserMenuDisplay",
"AdminMenuDisplay",
"GetStaticContent",
"UserLogin",
"UserLogout",
"GetAnnouncement",
"TopNotifications",
"ExtraAdminContent",
"ExtraUprefsRoutes",
"ExtraUprefsJS",
"GetExtraParameters",
"ExtraFormFields",
"SaveExtraFormFields",
]
PERMISSIONS = {
"DomainAdmins": [
["core", "user", "add_user"],
["core", "user", "change_user"],
["core", "user", "delete_user"],
],
"SimpleUsers": []
}
def load_core_settings():
"""Load core settings.
This function must be manually called (see :file:`urls.py`) in
order to load base settings.
"""
from modoboa.core.app_settings import GeneralParametersForm, UserSettings
parameters.register(GeneralParametersForm, ugettext_lazy("General"))
parameters.register(UserSettings, ugettext_lazy("General"))
events.declare(BASE_EVENTS)
@events.observe("TopNotifications")
def check_for_new_version(request, include_all):
"""
Check if a new version of Modoboa is available.
"""
from modoboa.core.utils import new_version_available
if not request.user.is_superuser:
return []
if new_version_available(request) is None:
return [{"id": "newversionavailable"}] if include_all else []
return [{
"id": "newversionavailable",
"url": reverse("core:index") + "#info/",
"text": _("New Modoboa version available"),
"level": "info",
}]
| [
"tonio@ngyn.org"
] | tonio@ngyn.org |
3b630469b7e080681f9dcfbc22de9de397c47abe | 90afc972b2259054e7cc9b63ec19bf11c3153e48 | /problems/B/KanaAndDragonQuestGame.py | fa2b220b68f40f19c967dba57ce00ca8c6361ddf | [
"MIT"
] | permissive | Ahsanhabib1080/CodeForces | 88ca768ceefa409b0c10cac500148bfaf19e3c7e | 707b374f03012ec68054841f791d48b33ae4ef1b | refs/heads/master | 2023-05-26T20:58:23.180369 | 2021-06-19T02:40:15 | 2021-06-19T02:40:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,091 | py | __author__ = 'Devesh Bajpai'
'''
https://codeforces.com/contest/1337/problem/B
Solution: The need is decide the ordering of spells.
Void Absorption: VA : x/2 + 10
Lightening Strikes: LS: x - 10
if we do LS and then VA: (x-10)/2 + 10 = x/2 - 5 + 10 = x/2 + 5
if we do VA and then LS: x/2 + 10 - 10 = x/2
Hence we would want to do all the VAs till doing that actually reduces x. e.g. imagine x = 2.
Then x/2 + 10 = 11. Then in second round it becomes 11/2 + 10 = 15. So it stars growing. That is when
we need to employ LS. So once the VAs are over, we need to check we have enough LS to bring x to 0 or less.
This means x <= m * 10. Return the decision accordingly.
'''
def solve(x, n, m):
while x > 0 and n > 0 and x/2 + 10 < x:
if n > 0:
x = x/2 + 10
n -= 1
return "YES" if x <= m * 10 else "NO"
if __name__ == "__main__":
t = int(raw_input())
results = list()
for _ in xrange(0, t):
x, n, m = map(int, raw_input().split(" "))
results.append(solve(x, n, m))
for result in results:
print result
| [
"devesh.bajpai19@gmail.com"
] | devesh.bajpai19@gmail.com |
e42b34cbc4a94970b7b097398f5ea03951ccfa57 | f1190541e28b38cb951c691a45d6f3a7f784ca67 | /DocumentFeatureSelection/common/data_converter.py | b87bf2892cac77d15af73c25236ceb1e8696136b | [
"LicenseRef-scancode-cecill-b-en"
] | permissive | chandupentela72/DocumentFeatureSelection | ac5b03a6f2fbaab68ddae5d6f2b00478ab6238bf | 799493e2d92d6deca13f894737db01a0826fd14f | refs/heads/master | 2020-12-15T05:50:05.715637 | 2018-10-24T16:12:17 | 2018-10-24T16:12:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,749 | py | #! -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from DocumentFeatureSelection.common import utils, func_data_converter
from DocumentFeatureSelection.models import DataCsrMatrix, AvailableInputTypes, PersistentDict
from DocumentFeatureSelection.init_logger import logger
from sqlitedict import SqliteDict
import sys
import numpy
import tempfile
import json
from typing import Dict, List, Tuple, Any, Union
python_version = sys.version_info
__author__ = 'kensuke-mi'
class DataConverter(object):
"""This class is for converting data type from dict-object into DataCsrMatrix-object which saves information of matrix.
"""
def __init__(self):
# for keeping old version
self.labeledMultiDocs2TermFreqMatrix = self.convert_multi_docs2term_frequency_matrix
self.labeledMultiDocs2DocFreqMatrix = self.convert_multi_docs2document_frequency_matrix
def count_term_frequency_distribution(self, labeled_documents:AvailableInputTypes, label2id:Dict[str,int]):
"""Count term-distribution per label.
"""
assert isinstance(labeled_documents, (SqliteDict, dict))
assert isinstance(label2id, dict)
# count total term-frequency per label
term_frequency_distribution = {
label: len(list(utils.flatten(document_lists)))
for label, document_lists
in labeled_documents.items()
}
# make list of distribution
term_frequency_distribution_list = [0] * len(labeled_documents)
for label_string, n_doc in term_frequency_distribution.items():
#term_index = label2id[numpy.where(label2id['key'] == label_string.encode('utf-8'))][0]['value']
term_index = label2id[label_string]
term_frequency_distribution_list[term_index] = n_doc
return numpy.array(term_frequency_distribution_list, dtype='i8')
def count_document_distribution(self, labeled_documents:AvailableInputTypes, label2id:Dict[str,int])->numpy.ndarray:
"""This method count n(docs) per label.
"""
assert isinstance(labeled_documents, (SqliteDict, dict))
assert isinstance(label2id, dict)
# count n(docs) per label
n_doc_distribution = {
label: len(document_lists)
for label, document_lists
in labeled_documents.items()
}
# make list of distribution
n_doc_distribution_list = [0] * len(labeled_documents)
for label_string, n_doc in n_doc_distribution.items():
#docs_index = label2id[numpy.where(label2id['key'] == label_string.encode('utf-8'))][0]['value']
docs_index = label2id[label_string]
n_doc_distribution_list[docs_index] = n_doc
return numpy.array(n_doc_distribution_list, dtype='i8')
def __make_feature_object2json_string(self, seq_feature_in_doc:List[Union[str,List[str],Tuple[str,...]]])->List[str]:
"""Sub-method of make_feature_object2json_string()"""
replaced_seq_feature_in_doc = [None] * len(seq_feature_in_doc) # type: List[str]
for i, feature_object in enumerate(seq_feature_in_doc):
if isinstance(feature_object, str):
replaced_seq_feature_in_doc[i] = json.dumps(tuple([feature_object]), ensure_ascii=False)
elif isinstance(feature_object, (tuple, list)):
replaced_seq_feature_in_doc[i] = json.dumps(feature_object, ensure_ascii=False)
else:
raise Exception("feature type must be either of str,list,tuple. Detected={}".format(type(feature_object)))
else:
return replaced_seq_feature_in_doc
def make_feature_object2json_string(self, labeled_document:AvailableInputTypes)->Dict[str,AvailableInputTypes]:
"""* What u can do
- This function converts feature-object in sequence object into json string.
- This function make every object into json string.
- string object -> json array which has one string. Ex. "feature" -> '["feature"]'
- list object -> json array. Ex. ["feature", "feature"] -> '["feature", "feature"]'
- tuple object -> json array. Ex. ("feature", "feature") -> '["feature", "feature"]'
* Parameters
- labeled_document: dict object which has key of 'label-name', and value is 2-dim list of features.
"""
assert isinstance(labeled_document, (dict,PersistentDict,SqliteDict))
replaced_labeled_document = {key: [] for key in labeled_document}
for key, docs_in_label in labeled_document.items():
assert isinstance(docs_in_label, list)
replaced_docs_in_label = [None] * len(docs_in_label)
for i, doc_label in enumerate(docs_in_label):
replaced_docs_in_label[i] = self.__make_feature_object2json_string(doc_label)
else:
replaced_labeled_document[key] = replaced_docs_in_label
else:
return replaced_labeled_document
def convert_multi_docs2term_frequency_matrix(self,
labeled_documents: AvailableInputTypes,
is_use_cache: bool = False,
is_use_memmap: bool = False,
path_working_dir: str = tempfile.mkdtemp(),
cache_backend: str = 'PersistentDict',
n_jobs: int = 1):
"""* What you can do
- This function makes TERM-frequency matrix for TF-IDF calculation.
- TERM-frequency matrix is scipy.csr_matrix.
* Params
- labeled_documents: Dict object which has category-name as key, and list of features as value
- is_use_cache: boolean flag to use disk-drive for keeping objects which tends to be huge.
- path_working_dir: path to directory for saving cache files
"""
labeled_documents = self.make_feature_object2json_string(labeled_documents)
logger.debug(msg='Now pre-processing before CSR matrix')
# convert data structure
set_document_information = func_data_converter.make_multi_docs2term_freq_info(labeled_documents)
# count n(docs) per label
n_docs_distribution = self.count_document_distribution(
labeled_documents=labeled_documents,
label2id=set_document_information.label2id
)
# count term-frequency per label
term_frequency_distribution = self.count_term_frequency_distribution(
labeled_documents=labeled_documents,
label2id=set_document_information.label2id
)
return DataCsrMatrix(
csr_matrix_=set_document_information.matrix_object,
label2id_dict=set_document_information.label2id,
vocabulary=set_document_information.feature2id,
n_docs_distribution=n_docs_distribution,
n_term_freq_distribution=term_frequency_distribution,
is_use_cache=is_use_cache,
is_use_memmap=is_use_memmap,
path_working_dir=path_working_dir,
cache_backend=cache_backend
)
def convert_multi_docs2document_frequency_matrix(self,
labeled_documents:AvailableInputTypes,
is_use_cache:bool=False,
is_use_memmap:bool=False,
path_working_dir:str=None,
n_jobs:int=1)->DataCsrMatrix:
"""This function makes document-frequency matrix. Document-frequency matrix is scipy.csr_matrix.
* Input object
- "labeled_structure" is either of Dict object or shelve.DbfilenameShelf. The example format is below
>>> {"label_a": [["I", "aa", "aa", "aa", "aa", "aa"],["bb", "aa", "aa", "aa", "aa", "aa"],["I", "aa", "hero", "some", "ok", "aa"]],
>>> "label_b": [["bb", "bb", "bb"],["bb", "bb", "bb"],["hero", "ok", "bb"],["hero", "cc", "bb"],],
>>> "label_c": [["cc", "cc", "cc"],["cc", "cc", "bb"],["xx", "xx", "cc"],["aa", "xx", "cc"],]}
* Output
- DataCsrMatrix object.
"""
labeled_documents = self.make_feature_object2json_string(labeled_documents)
logger.debug(msg='Now pre-processing before CSR matrix')
# convert data structure
set_document_information = func_data_converter.make_multi_docs2doc_freq_info(labeled_documents,n_jobs=n_jobs)
assert isinstance(set_document_information, func_data_converter.SetDocumentInformation)
# count n(docs) per label
n_docs_distribution = self.count_document_distribution(
labeled_documents=labeled_documents,
label2id=set_document_information.label2id
)
# count term-frequency per label
term_frequency_distribution = self.count_term_frequency_distribution(
labeled_documents=labeled_documents,
label2id=set_document_information.label2id
)
return DataCsrMatrix(
csr_matrix_=set_document_information.matrix_object,
label2id_dict=set_document_information.label2id,
vocabulary=set_document_information.feature2id,
n_docs_distribution=n_docs_distribution,
n_term_freq_distribution=term_frequency_distribution,
is_use_cache=is_use_cache,
is_use_memmap=is_use_memmap,
path_working_dir=path_working_dir
)
| [
"kensuke.mit@gmail.com"
] | kensuke.mit@gmail.com |
851d21e0d75f1d35f412ced736f15966338ad668 | 8e138461e9ed8e36245965e215685ce978742535 | /qiskit/qpy/__init__.py | 2ede88a790eb89f245a688dbfb032e068abee1c2 | [
"Apache-2.0"
] | permissive | faraimazh/qiskit-terra | 15d8c378114ee109f7b757a7d3795b4c9079c0a8 | 11c2e3ed89452cb6487db784c17c68a8a6284a57 | refs/heads/master | 2023-03-16T11:31:27.071954 | 2022-09-27T00:33:02 | 2022-09-27T00:33:02 | 220,650,207 | 0 | 0 | Apache-2.0 | 2023-03-06T18:13:26 | 2019-11-09T13:59:40 | Python | UTF-8 | Python | false | false | 21,087 | py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021, 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
###########################################################
QPY serialization (:mod:`qiskit.qpy`)
###########################################################
.. currentmodule:: qiskit.qpy
*********
Using QPY
*********
Using QPY is defined to be straightforward and mirror the user API of the
serializers in Python's standard library, ``pickle`` and ``json``. There are
2 user facing functions: :func:`qiskit.circuit.qpy_serialization.dump` and
:func:`qiskit.circuit.qpy_serialization.load` which are used to dump QPY data
to a file object and load circuits from QPY data in a file object respectively.
For example::
from qiskit.circuit import QuantumCircuit
from qiskit.circuit import qpy_serialization
qc = QuantumCircuit(2, name='Bell', metadata={'test': True})
qc.h(0)
qc.cx(0, 1)
qc.measure_all()
with open('bell.qpy', 'wb') as fd:
qpy_serialization.dump(qc, fd)
with open('bell.qpy', 'rb') as fd:
new_qc = qpy_serialization.load(fd)[0]
API documentation
=================
.. autosummary::
:toctree: ../stubs/
load
dump
QPY Compatibility
=================
The QPY format is designed to be backwards compatible moving forward. This means
you should be able to load a QPY with any newer Qiskit version than the one
that generated it. However, loading a QPY file with an older Qiskit version is
not supported and may not work.
For example, if you generated a QPY file using qiskit-terra 0.18.1 you could
load that QPY file with qiskit-terra 0.19.0 and a hypothetical qiskit-terra
0.29.0. However, loading that QPY file with 0.18.0 is not supported and may not
work.
**********
QPY Format
**********
The QPY serialization format is a portable cross-platform binary
serialization format for :class:`~qiskit.circuit.QuantumCircuit` objects in Qiskit. The basic
file format is as follows:
A QPY file (or memory object) always starts with the following 7
byte UTF8 string: ``QISKIT`` which is immediately followed by the overall
file header. The contents of the file header as defined as a C struct are:
.. code-block:: c
struct {
uint8_t qpy_version;
uint8_t qiskit_major_version;
uint8_t qiskit_minor_version;
uint8_t qiskit_patch_version;
uint64_t num_circuits;
}
All values use network byte order [#f1]_ (big endian) for cross platform
compatibility.
The file header is immediately followed by the circuit payloads.
Each individual circuit is composed of the following parts:
``HEADER | METADATA | REGISTERS | CUSTOM_DEFINITIONS | INSTRUCTIONS``
There is a circuit payload for each circuit (where the total number is dictated
by ``num_circuits`` in the file header). There is no padding between the
circuits in the data.
.. _qpy_version_4:
Version 4
=========
Version 4 is identical to :ref:`qpy_version_3` except that it adds 2 new type strings
to the INSTRUCTION_PARAM struct, ``z`` to represent ``None`` (which is encoded as
no data), ``q`` to represent a :class:`.QuantumCircuit` (which is encoded as
a QPY circuit), ``r`` to represent a ``range`` of integers (which is encoded as
a :ref:`qpy_range_pack`), and ``t`` to represent a ``sequence`` (which is encoded as
defined by :ref:`qpy_sequence`). Additionally, version 4 changes the type of register
index mapping array from ``uint32_t`` to ``int64_t``. If the values of any of the
array elements are negative they represent a register bit that is not present in the
circuit.
The :ref:`qpy_registers` header format has also been updated to
.. code-block:: c
struct {
char type;
_Bool standalone;
uint32_t size;
uint16_t name_size;
_bool in_circuit;
}
which just adds the ``in_circuit`` field which represents whether the register is
part of the circuit or not.
.. _qpy_range_pack:
RANGE
-----
A RANGE is a representation of a ``range`` object. It is defined as:
.. code-block:: c
struct {
int64_t start;
int64_t stop;
int64_t step;
}
.. _qpy_sequence:
SEQUENCE
--------
A SEQUENCE is a reprentation of a arbitrary sequence object. As sequence are just fixed length
containers of arbitrary python objects their QPY can't fully represent any sequence,
but as long as the contents in a sequence are other QPY serializable types for
the INSTRUCTION_PARAM payload the ``sequence`` object can be serialized.
A sequence instruction parameter starts with a header defined as:
.. code-block:: c
struct {
uint64_t size;
}
followed by ``size`` elements that are INSTRUCTION_PARAM payloads, where each of
these define an element in the sequence. The sequence object will be typecasted
into proper type, e.g. ``tuple``, afterwards.
.. _qpy_version_3:
Version 3
=========
Version 3 of the QPY format is identical to :ref:`qpy_version_2` except that it defines
a struct format to represent a :class:`~qiskit.circuit.library.PauliEvolutionGate`
natively in QPY. To accomplish this the :ref:`qpy_custom_definition` struct now supports
a new type value ``'p'`` to represent a :class:`~qiskit.circuit.library.PauliEvolutionGate`.
Enties in the custom instructions tables have unique name generated that start with the
string ``"###PauliEvolutionGate_"`` followed by a uuid string. This gate name is reservered
in QPY and if you have a custom :class:`~qiskit.circuit.Instruction` object with a definition
set and that name prefix it will error. If it's of type ``'p'`` the data payload is defined
as follows:
.. _pauli_evo_qpy:
PAULI_EVOLUTION
---------------
This represents the high level :class:`~qiskit.circuit.library.PauliEvolutionGate`
.. code-block:: c
struct {
uint64_t operator_count;
_Bool standalone_op;
char time_type;
uint64_t time_size;
uint64_t synthesis_size;
}
This is immediately followed by ``operator_count`` elements defined by the :ref:`qpy_pauli_sum_op`
payload. Following that we have ``time_size`` bytes representing the ``time`` attribute. If
``standalone_op`` is ``True`` then there must only be a single operator. The
encoding of these bytes is determined by the value of ``time_type``. Possible values of
``time_type`` are ``'f'``, ``'p'``, and ``'e'``. If ``time_type`` is ``'f'`` it's a double,
``'p'`` defines a :class:`~qiskit.circuit.Parameter` object which is represented by a
:ref:`qpy_param_struct`, ``e`` defines a :class:`~qiskit.circuit.ParameterExpression` object
(that's not a :class:`~qiskit.circuit.Parameter`) which is represented by a :ref:`qpy_param_expr`.
Following that is ``synthesis_size`` bytes which is a utf8 encoded json payload representing
the :class:`.EvolutionSynthesis` class used by the gate.
.. _qpy_pauli_sum_op:
SPARSE_PAULI_OP_LIST_ELEM
-------------------------
This represents an instance of :class:`.PauliSumOp`.
.. code-block:: c
struct {
uint32_t pauli_op_size;
}
which is immediately followed by ``pauli_op_size`` bytes which are .npy format [#f2]_
data which represents the :class:`~qiskit.quantum_info.SparsePauliOp`.
Version 3 of the QPY format also defines a struct format to represent a
:class:`~qiskit.circuit.ParameterVectorElement` as a distinct subclass from
a :class:`~qiskit.circuit.Parameter`. This adds a new parameter type char ``'v'``
to represent a :class:`~qiskit.circuit.ParameterVectorElement` which is now
supported as a type string value for an INSTRUCTION_PARAM. The payload for these
parameters are defined below as :ref:`qpy_param_vector`.
.. _qpy_param_vector:
PARAMETER_VECTOR_ELEMENT
------------------------
A PARAMETER_VECTOR_ELEMENT represents a :class:`~qiskit.circuit.ParameterVectorElement`
object the data for a INSTRUCTION_PARAM. The contents of the PARAMETER_VECTOR_ELEMENT are
defined as:
.. code-block:: c
struct {
uint16_t vector_name_size;
uint64_t vector_size;
char uuid[16];
uint64_t index;
}
which is immediately followed by ``vector_name_size`` utf8 bytes representing
the parameter's vector name.
.. _qpy_param_expr_v3:
PARAMETER_EXPR
--------------
Additionally, since QPY format version v3 distinguishes between a
:class:`~qiskit.circuit.Parameter` and :class:`~qiskit.circuit.ParameterVectorElement`
the payload for a :class:`~qiskit.circuit.ParameterExpression` needs to be updated
to distinguish between the types. The following is the modified payload format
which is mostly identical to the format in Version 1 and :ref:`qpy_version_2` but just
modifies the ``map_elements`` struct to include a symbol type field.
A PARAMETER_EXPR represents a :class:`~qiskit.circuit.ParameterExpression`
object that the data for an INSTRUCTION_PARAM. The contents of a PARAMETER_EXPR
are defined as:
.. code-block:: c
struct {
uint64_t map_elements;
uint64_t expr_size;
}
Immediately following the header is ``expr_size`` bytes of utf8 data containing
the expression string, which is the sympy srepr of the expression for the
parameter expression. Following that is a symbol map which contains
``map_elements`` elements with the format
.. code-block:: c
struct {
char symbol_type;
char type;
uint64_t size;
}
The ``symbol_type`` key determines the payload type of the symbol representation
for the element. If it's ``p`` it represents a :class:`~qiskit.circuit.Parameter`
and if it's ``v`` it represents a :class:`~qiskit.circuit.ParameterVectorElement`.
The map element struct is immediately followed by the symbol map key payload, if
``symbol_type`` is ``p`` then it is followed immediately by a :ref:`qpy_param_struct`
object (both the struct and utf8 name bytes) and if ``symbol_type`` is ``v``
then the struct is imediately followed by :ref:`qpy_param_vector` (both the struct
and utf8 name bytes). That is followed by ``size`` bytes for the
data of the symbol. The data format is dependent on the value of ``type``. If
``type`` is ``p`` then it represents a :class:`~qiskit.circuit.Parameter` and
size will be 0, the value will just be the same as the key. Similarly if the
``type`` is ``v`` then it represents a :class:`~qiskit.circuit.ParameterVectorElement`
and size will be 0 as the value will just be the same as the key. If
``type`` is ``f`` then it represents a double precision float. If ``type`` is
``c`` it represents a double precision complex, which is represented by the
:ref:`qpy_complex`. Finally, if type is ``i`` it represents an integer which is an
``int64_t``.
.. _qpy_version_2:
Version 2
=========
Version 2 of the QPY format is identical to version 1 except for the HEADER
section is slightly different. You can refer to the :ref:`qpy_version_1` section
for the details on the rest of the payload format.
HEADER
------
The contents of HEADER are defined as a C struct are:
.. code-block:: c
struct {
uint16_t name_size;
char global_phase_type;
uint16_t global_phase_size;
uint32_t num_qubits;
uint32_t num_clbits;
uint64_t metadata_size;
uint32_t num_registers;
uint64_t num_instructions;
uint64_t num_custom_gates;
}
This is immediately followed by ``name_size`` bytes of utf8 data for the name
of the circuit. Following this is immediately ``global_phase_size`` bytes
representing the global phase. The content of that data is dictated by the
value of ``global_phase_type``. If it's ``'f'`` the data is a float and is the
size of a ``double``. If it's ``'p'`` defines a :class:`~qiskit.circuit.Parameter`
object which is represented by a PARAM struct (see below), ``e`` defines a
:class:`~qiskit.circuit.ParameterExpression` object (that's not a
:class:`~qiskit.circuit.Parameter`) which is represented by a PARAM_EXPR struct
(see below).
.. _qpy_version_1:
Version 1
=========
HEADER
------
The contents of HEADER as defined as a C struct are:
.. code-block:: c
struct {
uint16_t name_size;
double global_phase;
uint32_t num_qubits;
uint32_t num_clbits;
uint64_t metadata_size;
uint32_t num_registers;
uint64_t num_instructions;
uint64_t num_custom_gates;
}
This is immediately followed by ``name_size`` bytes of utf8 data for the name
of the circuit.
METADATA
--------
The METADATA field is a UTF8 encoded JSON string. After reading the HEADER
(which is a fixed size at the start of the QPY file) and the ``name`` string
you then read the ``metadata_size`` number of bytes and parse the JSON to get
the metadata for the circuit.
.. _qpy_registers:
REGISTERS
---------
The contents of REGISTERS is a number of REGISTER object. If num_registers is
> 0 then after reading METADATA you read that number of REGISTER structs defined
as:
.. code-block:: c
struct {
char type;
_Bool standalone;
uint32_t size;
uint16_t name_size;
}
``type`` can be ``'q'`` or ``'c'``.
Immediately following the REGISTER struct is the utf8 encoded register name of
size ``name_size``. After the ``name`` utf8 bytes there is then an array of
int64_t values of size ``size`` that contains a map of the register's index to
the circuit's qubit index. For example, array element 0's value is the index
of the ``register[0]``'s position in the containing circuit's qubits list.
.. note::
Prior to QPY :ref:`qpy_version_4` the type of array elements was uint32_t. This was changed
to enable negative values which represent bits in the array not present in the
circuit
The standalone boolean determines whether the register is constructed as a
standalone register that was added to the circuit or was created from existing
bits. A register is considered standalone if it has bits constructed solely
as part of it, for example::
qr = QuantumRegister(2)
qc = QuantumCircuit(qr)
the register ``qr`` would be a standalone register. While something like::
bits = [Qubit(), Qubit()]
qr = QuantumRegister(bits=bits)
qc = QuantumCircuit(bits=bits)
``qr`` would have ``standalone`` set to ``False``.
.. _qpy_custom_definition:
CUSTOM_DEFINITIONS
------------------
This section specifies custom definitions for any of the instructions in the circuit.
CUSTOM_DEFINITION_HEADER contents are defined as:
.. code-block:: c
struct {
uint64_t size;
}
If size is greater than 0 that means the circuit contains custom instruction(s).
Each custom instruction is defined with a CUSTOM_INSTRUCTION block defined as:
.. code-block:: c
struct {
uint16_t name_size;
char type;
_Bool custom_definition;
uint64_t size;
}
Immediately following the CUSTOM_INSTRUCTION struct is the utf8 encoded name
of size ``name_size``.
If ``custom_definition`` is ``True`` that means that the immediately following
``size`` bytes contains a QPY circuit data which can be used for the custom
definition of that gate. If ``custom_definition`` is ``False`` then the
instruction can be considered opaque (ie no definition). The ``type`` field
determines what type of object will get created with the custom definition.
If it's ``'g'`` it will be a :class:`~qiskit.circuit.Gate` object, ``'i'``
it will be a :class:`~qiskit.circuit.Instruction` object.
INSTRUCTIONS
------------
The contents of INSTRUCTIONS is a list of INSTRUCTION metadata objects
.. code-block:: c
struct {
uint16_t name_size;
uint16_t label_size;
uint16_t num_parameters;
uint32_t num_qargs;
uint32_t num_cargs;
_Bool has_conditional;
uint16_t conditional_reg_name_size;
int64_t conditional_value;
}
This metadata object is immediately followed by ``name_size`` bytes of utf8 bytes
for the ``name``. ``name`` here is the Qiskit class name for the Instruction
class if it's defined in Qiskit. Otherwise it falls back to the custom
instruction name. Following the ``name`` bytes there are ``label_size`` bytes of
utf8 data for the label if one was set on the instruction. Following the label
bytes if ``has_conditional`` is ``True`` then there are
``conditional_reg_name_size`` bytes of utf8 data for the name of the conditional
register name. In case of single classical bit conditions the register name
utf8 data will be prefixed with a null character "\\x00" and then a utf8 string
integer representing the classical bit index in the circuit that the condition
is on.
This is immediately followed by the INSTRUCTION_ARG structs for the list of
arguments of that instruction. These are in the order of all quantum arguments
(there are num_qargs of these) followed by all classical arguments (num_cargs
of these).
The contents of each INSTRUCTION_ARG is:
.. code-block:: c
struct {
char type;
uint32_t index;
}
``type`` can be ``'q'`` or ``'c'``.
After all arguments for an instruction the parameters are specified with
``num_parameters`` INSTRUCTION_PARAM structs.
The contents of each INSTRUCTION_PARAM is:
.. code-block:: c
struct {
char type;
uint64_t size;
}
After each INSTRUCTION_PARAM the next ``size`` bytes are the parameter's data.
The ``type`` field can be ``'i'``, ``'f'``, ``'p'``, ``'e'``, ``'s'``, ``'c'``
or ``'n'`` which dictate the format. For ``'i'`` it's an integer, ``'f'`` it's
a double, ``'s'`` if it's a string (encoded as utf8), ``'c'`` is a complex and
the data is represented by the struct format in the :ref:`qpy_param_expr` section.
``'p'`` defines a :class:`~qiskit.circuit.Parameter` object which is
represented by a :ref:`qpy_param_struct` struct, ``e`` defines a
:class:`~qiskit.circuit.ParameterExpression` object (that's not a
:class:`~qiskit.circuit.Parameter`) which is represented by a :ref:`qpy_param_expr`
struct (on QPY format :ref:`qpy_version_3` the format is tweak slightly see:
:ref:`qpy_param_expr_v3`), ``'n'`` represents an object from numpy (either an
``ndarray`` or a numpy type) which means the data is .npy format [#f2]_ data,
and in QPY :ref:`qpy_version_3` ``'v'`` represents a
:class:`~qiskit.circuit.ParameterVectorElement` which is represented by a
:ref:`qpy_param_vector` struct.
.. _qpy_param_struct:
PARAMETER
---------
A PARAMETER represents a :class:`~qiskit.circuit.Parameter` object the data for
a INSTRUCTION_PARAM. The contents of the PARAMETER are defined as:
.. code-block:: c
struct {
uint16_t name_size;
char uuid[16];
}
which is immediately followed by ``name_size`` utf8 bytes representing the
parameter name.
.. _qpy_param_expr:
PARAMETER_EXPR
--------------
A PARAMETER_EXPR represents a :class:`~qiskit.circuit.ParameterExpression`
object that the data for an INSTRUCTION_PARAM. The contents of a PARAMETER_EXPR
are defined as:
The PARAMETER_EXPR data starts with a header:
.. code-block:: c
struct {
uint64_t map_elements;
uint64_t expr_size;
}
Immediately following the header is ``expr_size`` bytes of utf8 data containing
the expression string, which is the sympy srepr of the expression for the
parameter expression. Follwing that is a symbol map which contains
``map_elements`` elements with the format
.. code-block:: c
struct {
char type;
uint64_t size;
}
Which is followed immediately by ``PARAMETER`` object (both the struct and utf8
name bytes) for the symbol map key. That is followed by ``size`` bytes for the
data of the symbol. The data format is dependent on the value of ``type``. If
``type`` is ``p`` then it represents a :class:`~qiskit.circuit.Parameter` and
size will be 0, the value will just be the same as the key. If
``type`` is ``f`` then it represents a double precision float. If ``type`` is
``c`` it represents a double precision complex, which is represented by :ref:`qpy_complex`.
Finally, if type is ``i`` it represents an integer which is an ``int64_t``.
.. _qpy_complex:
COMPLEX
-------
When representing a double precision complex value in QPY the following
struct is used:
.. code-block:: c
struct {
double real;
double imag;
}
this matches the internal C representation of Python's complex type. [#f3]_
.. [#f1] https://tools.ietf.org/html/rfc1700
.. [#f2] https://numpy.org/doc/stable/reference/generated/numpy.lib.format.html
.. [#f3] https://docs.python.org/3/c-api/complex.html#c.Py_complex
"""
from .interface import dump, load
# For backward compatibility. Provide, Runtime, Experiment call these private functions.
from .binary_io import (
_write_instruction,
_read_instruction,
_write_parameter_expression,
_read_parameter_expression,
_read_parameter_expression_v3,
)
| [
"noreply@github.com"
] | faraimazh.noreply@github.com |
23c67eb0e36af3db5f176a78c5e5de976a388429 | e780a5bd72f98ca2513c993d64a85b08578166a6 | /zinstance/bin/fullrelease | 92515ab7490c40a6ef8bc78e3378398b986397a2 | [] | no_license | vedantc98/Plone-test | 023246597ffe848e2a49b9f65742ff49127b190b | 9fd520fc78481e2c0b9b7ec427821e7f961c777e | refs/heads/master | 2021-03-30T22:14:33.368739 | 2018-03-11T19:22:58 | 2018-03-11T19:22:58 | 124,671,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,380 | #!/home/ubuntu/workspace/Plone/zinstance/bin/python
import sys
sys.path[0:0] = [
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/zest.releaser-6.13.4-py2.7.egg',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/zest.pocompile-1.4-py2.7.egg',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/python_gettext-3.0-py2.7.egg',
'/home/ubuntu/workspace/Plone/zinstance/lib/python2.7/site-packages',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/twine-1.9.1-py2.7.egg',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/six-1.10.0-py2.7.egg',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/colorama-0.3.9-py2.7.egg',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/tqdm-4.19.4-py2.7.egg',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/requests_toolbelt-0.8.0-py2.7.egg',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/requests-2.18.4-py2.7.egg',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/pkginfo-1.4.1-py2.7.egg',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/urllib3-1.22-py2.7.egg',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/idna-2.6-py2.7.egg',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/chardet-3.0.4-py2.7.egg',
'/home/ubuntu/workspace/Plone/buildout-cache/eggs/certifi-2017.11.5-py2.7.egg',
]
import zest.releaser.fullrelease
if __name__ == '__main__':
sys.exit(zest.releaser.fullrelease.main())
| [
"vedantc98@gmail.com"
] | vedantc98@gmail.com | |
222a4bf6b0ede4075937141a871684c0bf325701 | 60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24 | /IronPythonStubs/release/stubs.min/Autodesk/Revit/DB/__init___parts/FamilyManager.py | 9782230089fa487843ed9370f9d82992dd886f19 | [
"MIT"
] | permissive | shnlmn/Rhino-Grasshopper-Scripts | a9411098c5d1bbc55feb782def565d535b27b709 | 0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823 | refs/heads/master | 2020-04-10T18:59:43.518140 | 2020-04-08T02:49:07 | 2020-04-08T02:49:07 | 161,219,695 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 12,040 | py | class FamilyManager(APIObject,IDisposable):
""" The family manager object to manage the family types and parameters in family document. """
def AddParameter(self,*__args):
"""
AddParameter(self: FamilyManager,familyDefinition: ExternalDefinition,parameterGroup: BuiltInParameterGroup,isInstance: bool) -> FamilyParameter
Add a new shared parameter to the family.
familyDefinition: The definition of the loaded shared parameter.
parameterGroup: The group to which the family parameter belongs.
isInstance: Indicates if the new parameter is instance or type.
Returns: If creation was successful the new shared parameter is returned,
otherwise an
exception with failure information will be thrown.
AddParameter(self: FamilyManager,parameterName: str,parameterGroup: BuiltInParameterGroup,parameterType: ParameterType,isInstance: bool) -> FamilyParameter
Add a new family parameter with a given name.
parameterName: The name of the new family parameter.
parameterGroup: The group to which the family parameter belongs.
parameterType: The type of new family parameter.
isInstance: Indicates if the new family parameter is instance or type.
Returns: If creation was successful the new parameter is returned,
otherwise an
exception with failure information will be thrown.
AddParameter(self: FamilyManager,parameterName: str,parameterGroup: BuiltInParameterGroup,familyCategory: Category,isInstance: bool) -> FamilyParameter
Add a new family type parameter to control the type of a nested family within
another family.
parameterName: The name of the new family parameter.
parameterGroup: The group to which the family parameter belongs.
familyCategory: The category to which the new family parameter binds.
isInstance: Indicates if the new family parameter is instance or type.
Returns: If creation was successful the new parameter is returned,
otherwise an
exception with failure information will be thrown.
"""
pass
def AssociateElementParameterToFamilyParameter(self,elementParameter,familyParameter):
"""
AssociateElementParameterToFamilyParameter(self: FamilyManager,elementParameter: Parameter,familyParameter: FamilyParameter)
Associates or disassociates the element parameter to an existing family
parameter.
elementParameter: The parameter of an element in family.
familyParameter: The existing family parameter. If the input to this argument is ll,
it will
disassociate the element parameter from any family parameters.
"""
pass
def CanElementParameterBeAssociated(self,elementParameter):
"""
CanElementParameterBeAssociated(self: FamilyManager,elementParameter: Parameter) -> bool
Indicates if this element parameter can be associated with a family parameter.
"""
pass
def DeleteCurrentType(self):
"""
DeleteCurrentType(self: FamilyManager)
Remove the current family type.
"""
pass
def Dispose(self):
""" Dispose(self: APIObject,A_0: bool) """
pass
def GetAssociatedFamilyParameter(self,elementParameter):
"""
GetAssociatedFamilyParameter(self: FamilyManager,elementParameter: Parameter) -> FamilyParameter
Gets the associated family parameter of an element parameter.
elementParameter: The parameter of an element in family.
Returns: The associated family parameter if there is an association between them,
returns ll if not.
"""
pass
def GetParameters(self):
"""
GetParameters(self: FamilyManager) -> IList[FamilyParameter]
Gets the parameters associated to family types in order.
Returns: A collection containing all family parameters.
"""
pass
def IsParameterLockable(self,familyParameter):
"""
IsParameterLockable(self: FamilyManager,familyParameter: FamilyParameter) -> bool
For Conceptual Mass and Curtain Panel families,
indicate whether the specified
parameter can be locked.
Returns: True if the family is a Conceptual Mass or Curtain
Panel Family and the
parameter drives one or more
dimensions; false otherwise.
"""
pass
def IsParameterLocked(self,familyParameter):
"""
IsParameterLocked(self: FamilyManager,familyParameter: FamilyParameter) -> bool
For Conceptual Mass and Curtain Panel families,
indicate whether the specified
dimension-driving
parameter is locked.
Returns: True if the parameter is lockable
and is locked; false otherwise.
"""
pass
def IsUserAssignableParameterGroup(self,parameterGroup):
"""
IsUserAssignableParameterGroup(self: FamilyManager,parameterGroup: BuiltInParameterGroup) -> bool
Checks if the given parameter group can be assigned to new parameters.
Returns: True if the parameter group can be assigned to new parameters,false otherwise.
"""
pass
def MakeInstance(self,familyParameter):
"""
MakeInstance(self: FamilyManager,familyParameter: FamilyParameter)
Set the family parameter as an instance parameter.
"""
pass
def MakeNonReporting(self,familyParameter):
"""
MakeNonReporting(self: FamilyManager,familyParameter: FamilyParameter)
Set the reporting family parameter as a regular/driving parameter.
"""
pass
def MakeReporting(self,familyParameter):
"""
MakeReporting(self: FamilyManager,familyParameter: FamilyParameter)
Set the family parameter as a reporting parameter.
"""
pass
def MakeType(self,familyParameter):
"""
MakeType(self: FamilyManager,familyParameter: FamilyParameter)
Set the family parameter as a type parameter.
"""
pass
def NewType(self,typeName):
"""
NewType(self: FamilyManager,typeName: str) -> FamilyType
Add a new family type with a given name and makes it be the current type.
typeName: The name of new family type.
"""
pass
def ReleaseManagedResources(self,*args):
""" ReleaseManagedResources(self: APIObject) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: APIObject) """
pass
def RemoveParameter(self,familyParameter):
"""
RemoveParameter(self: FamilyManager,familyParameter: FamilyParameter)
Remove an existing family parameter from the family.
familyParameter: The family parameter.
"""
pass
def RenameCurrentType(self,typeName):
"""
RenameCurrentType(self: FamilyManager,typeName: str)
Rename the current family type.
typeName: The new name of the current family type.
"""
pass
def RenameParameter(self,familyParameter,name):
"""
RenameParameter(self: FamilyManager,familyParameter: FamilyParameter,name: str)
Rename a family parameter.
familyParameter: The family parameter.
name: The new name.
"""
pass
def ReorderParameters(self,parameters):
""" ReorderParameters(self: FamilyManager,parameters: IList[FamilyParameter]) """
pass
def ReplaceParameter(self,currentParameter,*__args):
"""
ReplaceParameter(self: FamilyManager,currentParameter: FamilyParameter,familyDefinition: ExternalDefinition,parameterGroup: BuiltInParameterGroup,isInstance: bool) -> FamilyParameter
Replace a family parameter with a shared parameter.
currentParameter: The current family parameter.
familyDefinition: The definition of the loaded shared parameter.
parameterGroup: The group to which the new shared parameter belongs.
isInstance: Indicates if the new parameter is instance or type.
Returns: If replacement was successful the new shared parameter is returned,
otherwise
an exception with failure information will be thrown.
ReplaceParameter(self: FamilyManager,currentParameter: FamilyParameter,parameterName: str,parameterGroup: BuiltInParameterGroup,isInstance: bool) -> FamilyParameter
Replace a shared family parameter with a new non-shared family parameter.
currentParameter: The current family parameter.
parameterName: The name of the new family parameter.
parameterGroup: The group to which the new family parameter belongs.
isInstance: Indicates if the new parameter is instance or type.
Returns: If replacement was successful the new family parameter is returned,
otherwise
an exception with failure information will be thrown.
"""
pass
def Set(self,familyParameter,value):
"""
Set(self: FamilyManager,familyParameter: FamilyParameter,value: str)
Set the string value of a family parameter of the current family type.
familyParameter: A family parameter of the current type.
value: The new value for family parameter.
Set(self: FamilyManager,familyParameter: FamilyParameter,value: int)
Set the integer value of a family parameter of the current family type.
familyParameter: A family parameter of the current type.
value: The new value for family parameter.
Set(self: FamilyManager,familyParameter: FamilyParameter,value: ElementId)
Set the ElementId value of a family parameter of the current family type.
familyParameter: A family parameter of the current type.
value: The new value for family parameter.
Set(self: FamilyManager,familyParameter: FamilyParameter,value: float)
Set the double value of a family parameter of the current family type.
familyParameter: A family parameter of the current type.
value: The new value for family parameter.
"""
pass
def SetDescription(self,familyParameter,description):
"""
SetDescription(self: FamilyManager,familyParameter: FamilyParameter,description: str)
Set the description for an existing family parameter.
The description will be
used as tooltip in the Revit UI including in the properties palette.
familyParameter: The family parameter.
description: The description of the family parameter.
"""
pass
def SetFormula(self,familyParameter,formula):
"""
SetFormula(self: FamilyManager,familyParameter: FamilyParameter,formula: str)
Set the formula of a family parameter.
familyParameter: The family parameter.
formula: The formula string,input ll to clean the formula of the parameter.
"""
pass
def SetParameterLocked(self,familyParameter,locked):
"""
SetParameterLocked(self: FamilyManager,familyParameter: FamilyParameter,locked: bool)
For Conceptual Mass and Curtain Panel families,
lock or unlock a
dimension-driving
parameter.
"""
pass
def SetValueString(self,familyParameter,value):
"""
SetValueString(self: FamilyManager,familyParameter: FamilyParameter,value: str)
Set the string value of a family parameter of the current family type.
familyParameter: The family parameter of current type.
value: The new value string for family parameter.
"""
pass
def SortParameters(self,order):
"""
SortParameters(self: FamilyManager,order: ParametersOrder)
Sorts the family parameters according to the desired sort order.
order: The desired sort order.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
CurrentType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The current family type.
Get: CurrentType(self: FamilyManager) -> FamilyType
Set: CurrentType(self: FamilyManager)=value
"""
Parameters=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""All family parameters in this family.
Get: Parameters(self: FamilyManager) -> FamilyParameterSet
"""
Types=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""All family types in the family.
Get: Types(self: FamilyManager) -> FamilyTypeSet
"""
| [
"magnetscoil@gmail.com"
] | magnetscoil@gmail.com |
86e84e1014becbb3d10692c91931c5a9e404c2ed | 63381bdd170be82ac307cb7038f66883af219b40 | /filmweb/migrations/0005_auto_20190416_0022.py | 59ab2f8337a8792d4910ac3d1e621cb4aed62781 | [] | no_license | ofemofem/filmwebapi | 469bb0592c7c201f09a2fd39f0b027fa0537cd53 | 843d10422ab547e39d6e69febcef2ff6887a2a48 | refs/heads/master | 2020-05-10T00:01:59.815109 | 2019-05-14T21:53:07 | 2019-05-14T21:53:07 | 181,519,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | # Generated by Django 2.2 on 2019-04-15 22:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('filmweb', '0004_auto_20190413_1557'),
]
operations = [
migrations.AlterField(
model_name='movierate',
name='rate',
field=models.FloatField(),
),
]
| [
"you@example.com"
] | you@example.com |
f99ca48d3852d4c88874fba1340faf146fe721e3 | 51a6413af4995a221bb7cf9bce20a00acedcff9d | /snakify-problems-python/ALL/07_02_even_elements.py | bf6a4977e061271a11a60c4baf349e1c72195400 | [] | no_license | famaxth/Way-to-Coding | 4dff099de31c1a5870cf72a2aaaab74fdcbfed36 | bcb2048898edf418b059ec506eb5ad1507889cfb | refs/heads/main | 2023-08-15T19:34:16.510571 | 2021-10-08T04:53:42 | 2021-10-08T04:53:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | for elem in [int(s) for s in input().split()]:
if elem % 2 == 0:
print(elem, end=' ') | [
"jobin25051999@gmail.com"
] | jobin25051999@gmail.com |
face982b4ba6173f837eb471228815ecc2215c09 | 3af6960c805e9903eb27c09d8bc7ebc77f5928fe | /problems/0086_Partition_List/__init__.py | c7c3687f27954729aac793e8ba3b85961232e9fd | [] | no_license | romain-li/leetcode | b3c8d9d4473eebd039af16ad2d4d99abc2768bdd | 5e82b69bd041c2c168d75cb9179a8cbd7bf0173e | refs/heads/master | 2020-06-04T20:05:03.592558 | 2015-06-08T18:05:03 | 2015-06-08T18:05:03 | 27,431,664 | 2 | 1 | null | 2015-06-08T18:05:04 | 2014-12-02T12:31:58 | Python | UTF-8 | Python | false | false | 455 | py | ID = '86'
TITLE = 'Partition List'
DIFFICULTY = 'Medium'
URL = 'https://oj.leetcode.com/problems/partition-list/'
BOOK = False
PROBLEM = r"""Given a linked list and a value _x_, partition it such that all nodes less
than _x_ come before nodes greater than or equal to _x_.
You should preserve the original relative order of the nodes in each of the
two partitions.
For example,
Given `1->4->3->2->5->2` and _x_ = 3,
return `1->2->2->4->3->5`.
"""
| [
"romain_li@163.com"
] | romain_li@163.com |
38cd6a598240e65f5b94b46d8558809f61b8ac38 | 43b6bffc820d26dfd223728bed71241fb3d54983 | /abc/243/b.py | ba04b3356257b7a3f024e0b783adebfaa9600184 | [] | no_license | kiccho1101/atcoder | 3a163b6a38a62c578dad6d15ccb586d0fcd1e004 | c86cb8e08b881a0a01dc2ef538f0699f3951e897 | refs/heads/master | 2023-03-02T13:27:17.747402 | 2022-05-30T13:51:00 | 2022-05-30T13:51:00 | 223,152,693 | 1 | 0 | null | 2023-02-11T01:29:47 | 2019-11-21T10:52:49 | Python | UTF-8 | Python | false | false | 283 | py | N = int(input())
A = list(map(int, input().split()))
B = list(map(int, input().split()))
count_1 = 0
count_2 = 0
A_set = set(A)
for i in range(N):
if A[i] == B[i]:
count_1 += 1
if B[i] in A_set and A[i] != B[i]:
count_2 += 1
print(count_1)
print(count_2)
| [
"youodf11khp@gmail.com"
] | youodf11khp@gmail.com |
3d4b5f3283fdc684ada01e6fe923d14cf91de7a8 | 2ca07aecfa6ff25b0baae6dc9a707a284c2d1b6d | /common/sectools/sectools/common/crypto/functions/utils/__init__.py | 77a586234824b6d3c65e3214d2a6f28fa93e4306 | [
"BSD-3-Clause",
"OpenSSL",
"MIT",
"LicenseRef-scancode-openssl",
"LicenseRef-scancode-ssleay-windows",
"BSL-1.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | zhilangtaosha/msm8996-wp-1-0_test_device | ef05af263ba7955263ff91eb81d45b2437bc492e | 6af9b44abbc4a367a9aaae26707079974c535f08 | refs/heads/master | 2023-03-19T02:42:09.581740 | 2021-02-21T01:20:19 | 2021-02-21T01:20:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,330 | py | # ===============================================================================
#
# Copyright (c) 2013-2016 Qualcomm Technologies, Inc.
# All Rights Reserved.
# Confidential and Proprietary - Qualcomm Technologies, Inc.
#
# ===============================================================================
'''
Created on Oct 25, 2014
@author: hraghav
'''
import abc
import subprocess
from sectools.common.crypto.functions import FuncImplDiscovery, FUNC_UTILS
class UtilsImplDiscovery(FuncImplDiscovery):
@classmethod
def is_plugin(cls):
return True
@classmethod
def func(cls):
return FUNC_UTILS
@classmethod
def modules(cls):
modules = {}
try:
from sectools.common.crypto.discovery.defines import MOD_OPENSSL
from sectools.common.crypto.functions.utils.openssl import UtilsOpenSSLImpl
modules[MOD_OPENSSL] = UtilsOpenSSLImpl
except Exception:
pass
return modules
class UtilsBase(object):
FORMAT_DER = 'der'
FORMAT_PEM = 'pem'
FORMATS_SUPPORTED = [FORMAT_DER, FORMAT_PEM]
HASH_ALGO_SHA1 = 'sha1'
HASH_ALGO_SHA2 = 'sha2'
def __init__(self, module):
self.module = module
@abc.abstractmethod
def hash(self, hashing_algorithm, file_to_hash):
pass
| [
"lonelyjskj@gmail.com"
] | lonelyjskj@gmail.com |
a6107df3c15761e8c74b5b2a07292c4f084d3f81 | 1c14f85aa4f9c98362568d5cd30cf56f814a54a0 | /user_accounts/migrations/0006_auto_20180302_1721.py | 451d5345628601842ea74db9e9ccfdd8a41c3992 | [] | no_license | dimyG/zakanda_public | 26da2e8c70e34b97097f71e57fd0e732bbeac5a0 | 0445ad0c4f23aa40a472afb7ae3f1dd96e9e009d | refs/heads/master | 2022-11-30T14:01:28.132512 | 2020-08-07T14:54:10 | 2020-08-07T14:54:10 | 285,815,022 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,007 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('user_accounts', '0005_auto_20180226_1732'),
]
operations = [
migrations.AlterField(
model_name='basicstats',
name='bet_yield',
field=models.FloatField(default=0, null=True),
),
migrations.AlterField(
model_name='basicstats',
name='num_bet_groups',
field=models.PositiveIntegerField(default=1, null=True),
),
migrations.AlterField(
model_name='basicstats',
name='num_bets',
field=models.PositiveIntegerField(default=0, null=True),
),
migrations.AlterField(
model_name='basicstats',
name='num_followers',
field=models.PositiveIntegerField(default=0, null=True),
),
migrations.AlterField(
model_name='basicstats',
name='num_following',
field=models.PositiveIntegerField(default=0, null=True),
),
migrations.AlterField(
model_name='basicstats',
name='num_losses',
field=models.PositiveIntegerField(default=0, null=True),
),
migrations.AlterField(
model_name='basicstats',
name='num_opens',
field=models.PositiveIntegerField(default=0, null=True),
),
migrations.AlterField(
model_name='basicstats',
name='num_wins',
field=models.PositiveIntegerField(default=0, null=True),
),
migrations.AlterField(
model_name='basicstats',
name='roi',
field=models.FloatField(default=0, null=True),
),
migrations.AlterField(
model_name='basicstats',
name='score',
field=models.FloatField(default=0, null=True),
),
]
| [
"dimgeows@gmail.com"
] | dimgeows@gmail.com |
ca3bf979f3f95bb0227e30197227edeb4ad65fdb | 94bd1e0d8caae89e8ae7b917d78fb241df650d46 | /gfssi_e03_ssi_area.py | 4aafab0014b207a178784ba5f664388ebe853e80 | [
"MIT"
] | permissive | NingAnMe/GFSSI | 79d36449764a96482b9e2bc3224675c7160ddcd3 | 066ac3dcffe04927aa497ee8b2257bee3ec3789a | refs/heads/master | 2021-06-23T22:00:24.367544 | 2021-02-05T06:10:44 | 2021-02-05T06:10:44 | 196,914,413 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,020 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2019/8/12
@Author : AnNing
"""
import os
import h5py
import numpy as np
from lib.lib_read_ssi import FY4ASSI, FY3DSSI
from lib.lib_constant import FULL_VALUE
from lib.lib_get_index_by_lonlat import get_data_by_index, get_area_index
def _write_out_file(out_file, result):
out_dir = os.path.dirname(out_file)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
valid_count = 0
for key in result:
if result[key] is None:
continue
else:
valid_count += 1
if valid_count == 0:
print('没有足够的有效数据,不生成结果文件')
return
# try:
compression = 'gzip'
compression_opts = 5
shuffle = True
with h5py.File(out_file, 'w') as hdf5:
for dataset in result.keys():
data = result[dataset]
if data is not None:
data[np.isnan(data)] = FULL_VALUE
hdf5.create_dataset(dataset,
dtype=np.float32, data=result[dataset], compression=compression,
compression_opts=compression_opts,
shuffle=shuffle)
print('成功生成HDF文件 >>>:{}'.format(out_file))
# except Exception as why:
# print(why)
# print('HDF写入数据错误')
# os.remove(out_file)
def area(in_file, out_file, left_up_lon=None, left_up_lat=None, right_down_lon=None, right_down_lat=None,
resolution_type=None, resultid=None):
print('area <<< :{}'.format(in_file))
if not os.path.isfile(in_file):
print('数据不存在:{}'.format(in_file))
return
out_path = os.path.dirname(out_file)
if not os.path.isdir(out_path):
os.makedirs(out_path)
if 'fy4a' in resultid.lower() and '4km' in resolution_type.lower():
loader = FY4ASSI
lons = FY4ASSI.get_longitude_4km()
lats = FY4ASSI.get_latitude_4km()
elif 'fy4a' in resultid.lower() and '1km' in resolution_type.lower():
loader = FY4ASSI
lons = FY4ASSI.get_longitude_1km()
lats = FY4ASSI.get_latitude_1km()
elif 'fy3d' in resultid.lower() and '1km' in resolution_type.lower():
loader = FY3DSSI
lons = FY3DSSI.get_longitude_1km()
lats = FY3DSSI.get_latitude_1km()
else:
raise ValueError('不支持此分辨率: {}'.format(resolution_type))
data_all = {
'SSI': None,
'DirSSI': None,
'DifSSI': None,
'G0': None,
'Gt': None,
'DNI': None,
'Latitude': None,
'Longitude': None,
}
try:
datas = loader(in_file)
data_get = {
'SSI': datas.get_ssi,
'DirSSI': datas.get_ib,
'DifSSI': datas.get_id,
'G0': datas.get_g0,
'Gt': datas.get_gt,
'DNI': datas.get_dni,
'Latitude': lats,
'Longitude': lons,
}
(row_min, row_max), (col_min, col_max) = get_area_index(lons=lons, lats=lats, left_up_lon=left_up_lon,
left_up_lat=left_up_lat, right_down_lon=right_down_lon,
right_down_lat=right_down_lat)
for dataname in data_all:
if callable(data_get[dataname]):
data = data_get[dataname]()
else:
data = data_get[dataname]
data_all[dataname] = get_data_by_index(data=data, row_min=row_min, row_max=row_max,
col_min=col_min, col_max=col_max)
except Exception as why:
print(why)
print('选取数据过程出错,文件为:{}'.format(in_file))
return
try:
_write_out_file(out_file, data_all)
except Exception as why:
print(why)
print('输出结果文件错误')
return
return out_file
| [
"ninganme@qq.com"
] | ninganme@qq.com |
89c6129f1154222ee78183c7ae79f9ac99733671 | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/heatmap/colorbar/_separatethousands.py | de1cd8c74ee61d486a9c57a33353a8d2f030d73c | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 455 | py | import _plotly_utils.basevalidators
class SeparatethousandsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="separatethousands", parent_name="heatmap.colorbar", **kwargs
):
super(SeparatethousandsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs
)
| [
"noreply@github.com"
] | hugovk.noreply@github.com |
d810867bcfc8c9188e598edd86bf8ba9c1bd4c83 | 71aad099dc4fce3f4afebf0b38d57d11fddabeff | /fabfile.py | 98bacd1656f30e139d9b31cf99c310abd87fc151 | [] | no_license | spiritEcosse/neatapps | e683a0ca746f78f1f0e096ac10ce4f73ed3bcf58 | 623bcac577b7a4d3ea1bfea32c16cbfa39ad37a6 | refs/heads/master | 2020-04-22T09:59:05.586784 | 2015-10-07T12:40:22 | 2015-10-07T12:40:22 | 170,289,874 | 0 | 0 | null | 2019-05-21T08:44:56 | 2019-02-12T09:28:42 | CSS | UTF-8 | Python | true | false | 2,155 | py | __author__ = 'igor'
from fabric.api import local, run, cd, settings
import os
from neatapps.settings import BASE_DIR
from fabric.state import env
from neatapps.settings_local import HOSTS
env.user = 'root'
env.skip_bad_hosts = True
env.warn_only = False
env.parallel = True
env.shell = "/bin/bash -l -i -c"
REQUIREMENTS_FILE = 'requirements.txt'
def deploy():
"""
deploy project on remote server
:return:
"""
local_act()
update_requirements()
remote_act()
def remote_act():
"""
run remote acts
:return: None
"""
for host, dir_name in HOSTS:
with settings(host_string=host):
with cd(dir_name):
run("git reset --hard")
run("kill -9 $(ps -ef|grep -v grep |grep 'neatapps' | awk '{print $2}')")
run("neatapps")
def local_act():
"""
prepare deploy
:return: None
"""
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "neatapps.settings")
activate_env = os.path.expanduser(os.path.join(BASE_DIR, ".env/bin/activate_this.py"))
execfile(activate_env, dict(__file__=activate_env))
local("./manage.py test")
local("./manage.py compilemessages")
local("./manage.py makemigrations")
local("./manage.py migrate")
local("%s%s" % ('pip freeze > ', REQUIREMENTS_FILE))
local("./manage.py collectstatic --noinput -c")
local("git add .")
local("git commit -a -F git_commit_message")
current_branch = local("git symbolic-ref --short -q HEAD", capture=True)
if current_branch != 'master':
local("git checkout master")
local("git merge %s" % current_branch)
local("git branch -d %s" % current_branch)
local("git push origin")
local("git push production")
local("git push my_repo_neatapps_bit")
local("git push my-production")
def update_requirements():
"""
install external requirements on remote host
:return: None
"""
for host, dir_name in HOSTS:
with settings(host_string=host):
with cd(dir_name):
run('%s && %s%s' % ('source .env/bin/activate', 'pip install -r ', REQUIREMENTS_FILE))
| [
"shevchenkcoigor@gmail.com"
] | shevchenkcoigor@gmail.com |
c474e70bdbc4fb0f290fde3f5c81eb82caf24564 | 406e59a45b07c36717073ff2a446a7d5ce8057db | /data_visual/chapter15/dice_visual.py | bc071f90bf277b590d6b6342b524b89d3a5aa4df | [] | no_license | 15032373556/mystudy | ea074802c03ac3efe2577871a39e54f0a81c7b92 | 08861c40633b5cd93122248b676adbc4748bed7d | refs/heads/master | 2022-12-05T18:33:26.058627 | 2020-08-31T10:25:20 | 2020-08-31T10:25:20 | 291,670,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,478 | py | import pygal
from die import Die
from sum_num import sum_2,sum_3,multip
#创建两个D6骰子
die_1 = Die()
die_2 = Die()
# #创建两个D8骰子
# die_1 = Die(8)
# die_2 = Die(8)
# #创建三个D6骰子
# die_1 = Die()
# die_2 = Die()
# die_3 = Die()
#掷几次骰子,并将结果存储在一个列表中
results = []
for roll_num in range(1000):
result = die_1.roll() * die_2.roll()
#result = die_1.roll() + die_2.roll() + die_3.roll()
results.append(result)
#分析结果
frequencies = []
max_result = die_1.num_sides+1 * die_2.num_sides + 1
#max_result = die_1.num_sides+1 + die_2.num_sides + die_3.num_sides + 1
#for value in range(2,max_result):
for value in range(1, max_result):
frequency = results.count(value)
frequencies.append(frequency)
#对结果进行可视化
hist = pygal.Bar()
hist.title = "Results of rolling two D6 dice 1000 times."
# hist.x_labels = ['2','3','4','5','6','7','8','9','10','11','12']
#hist.x_labels = sum(6,6)
#hist.x_labels = sum_2(8,8)
#hist.x_labels = sum_3(6,6,6)
hist.x_labels = multip(6,6)
hist.x_title = "Result"
hist.y_title = "Frequency of Result"
# hist.add('D6 + D6',frequencies)
# hist.render_to_file('dice_visual.svg')
# hist.add('D8 + D8',frequencies)
# hist.render_to_file('dice_visual_8.svg')
# hist.add('D6 + D6 + D6',frequencies)
# hist.render_to_file('dice_visual_6.svg')
hist.add('D6 * D6',frequencies)
hist.render_to_file('dice_visual_66.svg') | [
"1798549164@qq.com"
] | 1798549164@qq.com |
24aa4af8736e1ea53d2d9ba0b3343622cf855c3f | 6feaf484be6ab130f82dc2cab66684499d7be1bb | /history/perlin_1554525139.025642.py | 2a76530991e878efa09cd9f1aae1162245a90151 | [] | no_license | JacobBunker/2DArt | d3b3d4d6b4f92bc64aab95139a0d0285c9cb2be0 | b8f6bdd081016a9922dc5a0a58226b9262c2ad98 | refs/heads/master | 2020-05-05T05:44:39.844350 | 2019-12-01T06:41:41 | 2019-12-01T06:41:41 | 179,763,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,816 | py | import numpy as np
from PIL import Image
import datetime;
import subprocess
sizeX = 512
sizeY = 512
repeat = 0
permutation = np.random.randint(low=0,high=255,size=[256],dtype=np.uint16)
p = np.empty([sizeX],dtype=np.uint16)
i = 0
while(i < 512):
p[i] = permutation[i%256]
i += 1
def fade(t):
#6t^5 - 15t^4 + 10t^3
return ((t*t*t)*((t*t*6) - (t*15) + (10)))
def grad(h, x, y, z):
#print("grad x: {0} y: {1}".format(x,y))
z = h & 0xF
if(z == 0x0):
return x + y
if(z == 0x1):
return -x + y
if(z == 0x2):
return x - y
if(z == 0x3):
return -x - y
if(z == 0x4):
return x + z
if(z == 0x5):
return -x + z
if(z == 0x6):
return x - z
if(z == 0x7):
return -x - z
if(z == 0x8):
return y + z
if(z == 0x9):
return -y + z
if(z == 0xA):
return y - z
if(z == 0xB):
return -y - z
if(z == 0xC):
return y + x
if(z == 0xD):
return -y + z
if(z == 0xE):
return y - x
if(z == 0xF):
return -y - z
def lerp(a, b, x):
#print("inputs: a:{0} b:{1} x:{2}".format(a,b,x))
out = a+x*(b-a)
return out
def perlin(x, y, z=0):
xi = int(np.floor(x)) & 255
yi = int(np.floor(y)) & 255
zi = int(np.floor(z)) & 255
x = x - np.floor(x)
y = y - np.floor(y)
z = z - np.floor(z)
u = fade(x)
v = fade(y)
w = fade(z)
a = p[xi] + yi
aa = p[a] + zi
ab = p[a+1] + zi
b = p[xi+1]+ yi
ba = p[b] + zi
bb = p[b+1] + zi
l1a = lerp(grad(p[aa ], x , y , z),
grad(p[ba ], x-1, y , z), u)
l1b = lerp(grad(p[ab ], x , y-1, z),
grad(p[bb ], x-1, y-1, z), u)
l2a = lerp(grad(p[aa+1], x , y , z-1),
grad(p[ba+1], x-1, y , z-1), u)
l2b = lerp(grad(p[ab+1], x , y-1, z-1),
grad(p[bb+1], x-1, y-1, z-1), u)
l1 = lerp(l1a,l1b,v)
l2 = lerp(l2a,l2b,v)
return (lerp(l1, l2, w))
picSizeX = 1000 #1000
picSizeY = 1400 #1400
desiredMax = 300.#30.12
interval = desiredMax / picSizeX
print(interval)
#make array
#temp = np.random.randint(low=0,high=255,size=[picSizeX,picSizeY,3],dtype=np.uint16)
temp = np.empty([picSizeX,picSizeY,3])
if(False):
i = 0
while(i < picSizeX):
print(i)
ii = 0
while(ii < picSizeY):
tr = np.random.random()*0.1 #*(2*i/picSizeX)
pout = perlin((i-(picSizeX/2))*(interval*(1+tr)),(ii-(picSizeY/2))*(interval*(1+tr)))
pout = (pout*10 + 255/2)
if(pout < 150):
pout = 0
if(pout > 255):
pout = 255
#print(int(pout))
temp[i,ii,0] = 0
temp[i,ii,1] = 0
temp[i,ii,2] = pout
ii += 1
i += 1
print(temp.shape)
#clear out center
center = np.asarray([picSizeX/2,picSizeY/2])
if(False):
i = 0
while(i < picSizeX):
print(i)
ii = 0
while(ii < picSizeY):
if(np.linalg.norm(center-np.asarray([i,ii])) < 400+0.5+50+50):
temp[i,ii,0] = 0
temp[i,ii,1] = 0
temp[i,ii,2] = 0
ii += 1
i += 1
#noise to black by distance to center
if(False):
i = 0
while(i < picSizeX):
print(i)
ii = 0
while(ii < picSizeY):
dist = np.linalg.norm(center-np.asarray([i,ii]))
#normalize to 0,1
r = picSizeY - 0
a = (dist - 0) / r
if(np.random.random() > a):
temp[i,ii,0] = 0
temp[i,ii,1] = 0
temp[i,ii,2] = 0
ii += 1
i += 1
#white to black by distance to center
if(False):
i = 0
while(i < picSizeX):
print(i)
ii = 0
while(ii < picSizeY):
dist = np.linalg.norm(center-np.asarray([i,ii]))
#normalize to 0,1
r = picSizeY - 0
a = (dist - 0) / r
#scale to [0,255]
r2 = 255 - 0
a = (a * r2) + 0
temp[i,ii,0] = a
temp[i,ii,1] = a
temp[i,ii,2] = a
ii += 1
i += 1
#draw ring
def xfun(x,y,t,z):
return x+(400-z*4)*np.cos(t*30)
def yfun(x,y,t,z):
return y+(400-z*4)*np.sin(t*30)
#center the starting point
x = picSizeX/2
y = picSizeY/2
if(False):
#draw
t = 0
tMax = 800
stepSize = 0.5
while(t < tMax):
i = 0
while(i < 100):
xt = xfun(x,y,t,i)
yt = yfun(x,y,t,i)
#xt = xfun(x+((np.random.random()-0.5)*5),y,t,i)
#yt = yfun(x,y+((np.random.random()-0.5)*5),t,i)
if(xt < picSizeX and xt >= 0 and yt < picSizeY and yt >= 0):
ixt = int(xt)
iyt = int(yt)
temp[ixt,iyt,0] = (t / tMax)*255
temp[ixt,iyt,1] = ((tMax-t) / tMax)*255
temp[ixt,iyt,2] = 55#(t / tMax)*255
i += 1
print("t:{0}\n".format(t))
t += np.random.random()*stepSize
def xfun(x,y,t,z):
return x+(450.25)*np.cos(t*30)+50*np.cos(t*5)
def yfun(x,y,t,z):
return y+(450.25)*np.sin(t*30)+50*np.sin(t*5)
if(False):
#draw
t = 0
tMax = 800*4
stepSize = 0.05
while(t < tMax):
i = 0
while(i < 1):
xt = xfun(x,y,t,i)
yt = yfun(x,y,t,i)
if(xt < picSizeX and xt >= 0 and yt < picSizeY and yt >= 0):
ixt = int(xt)
iyt = int(yt)
temp[ixt,iyt,0] = (t / tMax)*255
temp[ixt,iyt,1] = 255
temp[ixt,iyt,2] = 55#(t / tMax)*255
i += 1
print("t:{0}\n".format(t))
t += np.random.random()*stepSize
def xfun(x,y,t,z):
return x+(1*z)*np.cos(np.radians(t))
def yfun(x,y,t,z):
return y+(1*z)*np.sin(np.radians(t))
def xfun2(x,y,t,z):
return x+(5+z)*np.cos(np.radians(t))
def yfun2(x,y,t,z):
return y+(5+z)*np.sin(np.radians(t))
golden_ratio = 1.61803398875
if(False):
z = 0
while(z < 1):
#draw
ringCount = 1
ringTimer = 0.
t = 0
tMax = 360*1000
stepSize = 1.
while(t < tMax):
xt = xfun(x,y,t,ringCount)
yt = yfun(x,y,t,ringCount)
if(False and xt < picSizeX and xt >= 0 and yt < picSizeY and yt >= 0):
ixt = int(xt)
iyt = int(yt)
temp[ixt,iyt,0] = 200
temp[ixt,iyt,1] = 0
temp[ixt,iyt,2] = 255#(t / tMax)*255
if(ringTimer > 360/golden_ratio):
print("ring {0}".format(ringCount))
ringCount += 1
ringTimer = 0.
i = 0
while(i < 360):
xt2 = xfun2(xt,yt,i,ringCount)
yt2 = yfun2(xt,yt,i,ringCount)
if(xt2 < picSizeX and xt2 >= 0 and yt2 < picSizeY and yt2 >= 0):
ixt2 = int(xt2)
iyt2 = int(yt2)
dist = np.linalg.norm(center-np.asarray([ixt2,iyt2]))
#normalize to 0,1
r = 300
a = (dist - 0) / r
#scale to [0,255]
r2 = 255 - 0
a = (a * r2) + 0
a2 = a
if(a > 255):
a = 255
temp[ixt2,iyt2,0] = 255 - a
temp[ixt2,iyt2,2] = 255 - a
if(255 - a2/1.5 < 0):
temp[ixt2,iyt2,0] = 255
else:
temp[ixt2,iyt2,1] = 255 - a2/1.5
i += 0.05
#print("t:{0}\n".format(t))
ringTimer += stepSize
t += stepSize
z += 1
def xfun(x,y,t,z):
return x+(450.25)*np.cos(np.radians(t/4))
def yfun(x,y,t,z):
return y+(0-(4*z))*np.sin(np.radians(t/4))
if(True):
z = 1
while(z < 100):
print ("eye {0}".format(z))
t = 0
tMax = 360*4
stepSize = 1.
while(t < tMax):
xt = xfun(x+(np.random.random()-1)*100,y,t,z)
yt = yfun(x,y+(np.random.random()-1)*100,t,z)
if(xt < picSizeX and xt >= 0 and yt < picSizeY and yt >= 0):
ixt = int(xt)
iyt = int(yt)
temp[ixt,iyt,0] = 0
temp[ixt,iyt,1] = 0
temp[ixt,iyt,2] = 255#(t / tMax)*255
t += stepSize
z += 1
#black pupil
center = np.asarray([picSizeX/2,picSizeY/2])
if(False):
i = int(picSizeX/2 - 100)
while(i < int(picSizeX/2 + 100)):
print(i)
ii = int(picSizeY/2 - 100)
while(ii < int(picSizeY/2 + 100)):
if(np.linalg.norm(center-np.asarray([i,ii])) < 10):
temp[i,ii,0] = 0
temp[i,ii,1] = 0
temp[i,ii,2] = 0
ii += 1
i += 1
# convert array to Image
img = Image.fromarray(temp.astype('uint8'))
img.save("perlin_test.png", "PNG")
# make and store timestamped copy
ts = datetime.datetime.now().timestamp()
name = "history/perlin_{0}.png".format(ts)
img.save(name, "PNG")
subprocess.run(["cp", "./perlin2.py", "./history/perlin_{0}.py".format(ts)])
#note: I'm on a mac and I usually run with:
#python art.py; open -a Preview test.png
#which opens the image in preview as soon as it's done drawing
#not sure what the windows/linux equivalent is
| [
"jacob.n.bunker@gmail.com"
] | jacob.n.bunker@gmail.com |
6548a60cc2557c2b62190d7f8081789daf34ade7 | b92adbd59161b701be466b3dbeab34e2b2aaf488 | /.c9/metadata/environment/fb_post_learning/fb_post_v2/tests/interactors/tes_get_reactions_to_post_interactor.py | 89ac666e9950eb9917135a99700dc29c9b28d5a4 | [] | no_license | R151865/cloud_9_files | 7486fede7af4db4572f1b8033990a0f07f8749e8 | a468c44e9aee4a37dea3c8c9188c6c06e91cc0c4 | refs/heads/master | 2022-11-22T10:45:39.439033 | 2020-07-23T09:31:52 | 2020-07-23T09:31:52 | 281,904,416 | 0 | 1 | null | 2022-11-20T00:47:10 | 2020-07-23T09:08:48 | Python | UTF-8 | Python | false | false | 7,237 | py | {"changed":true,"filter":false,"title":"tes_get_reactions_to_post_interactor.py","tooltip":"/fb_post_learning/fb_post_v2/tests/interactors/tes_get_reactions_to_post_interactor.py","value":"from unittest.mock import create_autospec\n\nfrom fb_post_v2.interactors.storages import StorageInterface\n\nfrom fb_post_v2.interactors.presenters import PresenterInterface\n\nfrom fb_post_v2.interactors.reply_ import ReplyToCommentInteractor\n\n\nimport pytest\nfrom django_swagger_utils.drf_server.exceptions import NotFound\n\n\ndef test_with_invalid_post_id_raise_exception():\n # Arrange\n invalid_post_id = -1\n user_id = 1\n comment_content ='New comment'\n storage = create_autospec(StorageInterface)\n presenter = create_autospec(PresenterInterface)\n storage.is_valid_post_id.return_value = False\n \n interactor = CreateCommentInteractor(\n storage=storage,\n presenter=presenter\n )\n presenter.raise_invalid_post_id_exception.side_effect = NotFound\n # Act\n with pytest.raises(NotFound):\n interactor.create_comment(\n user_id=user_id,\n post_id=invalid_post_id,\n comment_content=comment_content\n )\n\n\ndef test_reply_to_comment_with_invalid_comment_id_raise_exception():\n \n # Arrange\n user_id=1\n invalid_comment_id = -1\n reply_content='Replyt Content'\n\n storage = create_autospec(StorageInterface)\n presenter = create_autospec(PresenterInterface)\n\n interactor = ReplyToCommentInteractor(\n storage=storage,\n presenter=presenter\n )\n storage.is_valid_comment_id.return_value = False\n presenter.raise_invalid_comment_id_exception.side_effect = NotFound\n\n # Act\n with pytest.raises(NotFound):\n interactor.reply_to_comment(\n user_id=user_id,\n comment_id=invalid_comment_id,\n reply_content=reply_content\n )\n # Assert","undoManager":{"mark":-2,"position":7,"stack":[[{"start":{"row":0,"column":0},"end":{"row":63,"column":12},"action":"insert","lines":["","from unittest.mock import create_autospec","from fb_post_v2.interactors.storages import \\"," StorageInterface","","from fb_post_v2.interactors.presenters import \\"," PresenterInterface","","from fb_post_v2.interactors.create_comment_interactor import CreateCommentInteractor","from fb_post_v2.interactors.reply_to_comment_interactor import ReplyToCommentInteractor","","","import pytest","from django_swagger_utils.drf_server.exceptions import NotFound","","","def test_with_invalid_post_id_raise_exception():"," # Arrange"," invalid_post_id = -1"," user_id = 1"," comment_content ='New comment'"," storage = create_autospec(StorageInterface)"," presenter = create_autospec(PresenterInterface)"," storage.is_valid_post_id.return_value = False"," "," interactor = CreateCommentInteractor("," storage=storage,"," presenter=presenter"," )"," presenter.raise_invalid_post_id_exception.side_effect = NotFound"," # Act"," with pytest.raises(NotFound):"," interactor.create_comment("," user_id=user_id,"," post_id=invalid_post_id,"," comment_content=comment_content"," )","","","def test_reply_to_comment_with_invalid_comment_id_raise_exception():"," "," # Arrange"," user_id=1"," invalid_comment_id = -1"," reply_content='Replyt Content'",""," storage = create_autospec(StorageInterface)"," presenter = create_autospec(PresenterInterface)",""," interactor = ReplyToCommentInteractor("," storage=storage,"," presenter=presenter"," )"," storage.is_valid_comment_id.return_value = False"," presenter.raise_invalid_comment_id_exception.side_effect = NotFound",""," # Act"," with pytest.raises(NotFound):"," interactor.reply_to_comment("," user_id=user_id,"," comment_id=invalid_comment_id,"," reply_content=reply_content"," )"," # Assert"],"id":1}],[{"start":{"row":3,"column":0},"end":{"row":3,"column":4},"action":"remove","lines":[" "],"id":2},{"start":{"row":2,"column":45},"end":{"row":3,"column":0},"action":"remove","lines":["",""]},{"start":{"row":2,"column":44},"end":{"row":2,"column":45},"action":"remove","lines":["\\"]}],[{"start":{"row":5,"column":0},"end":{"row":5,"column":4},"action":"remove","lines":[" "],"id":3},{"start":{"row":4,"column":47},"end":{"row":5,"column":0},"action":"remove","lines":["",""]},{"start":{"row":4,"column":46},"end":{"row":4,"column":47},"action":"remove","lines":["\\"]}],[{"start":{"row":6,"column":0},"end":{"row":7,"column":0},"action":"remove","lines":["from fb_post_v2.interactors.create_comment_interactor import CreateCommentInteractor",""],"id":4}],[{"start":{"row":0,"column":0},"end":{"row":1,"column":0},"action":"remove","lines":["",""],"id":5}],[{"start":{"row":0,"column":41},"end":{"row":1,"column":0},"action":"insert","lines":["",""],"id":6}],[{"start":{"row":6,"column":54},"end":{"row":6,"column":55},"action":"remove","lines":["r"],"id":7},{"start":{"row":6,"column":53},"end":{"row":6,"column":54},"action":"remove","lines":["o"]},{"start":{"row":6,"column":52},"end":{"row":6,"column":53},"action":"remove","lines":["t"]},{"start":{"row":6,"column":51},"end":{"row":6,"column":52},"action":"remove","lines":["c"]},{"start":{"row":6,"column":50},"end":{"row":6,"column":51},"action":"remove","lines":["a"]},{"start":{"row":6,"column":49},"end":{"row":6,"column":50},"action":"remove","lines":["r"]},{"start":{"row":6,"column":48},"end":{"row":6,"column":49},"action":"remove","lines":["e"]},{"start":{"row":6,"column":47},"end":{"row":6,"column":48},"action":"remove","lines":["t"]},{"start":{"row":6,"column":46},"end":{"row":6,"column":47},"action":"remove","lines":["n"]},{"start":{"row":6,"column":45},"end":{"row":6,"column":46},"action":"remove","lines":["i"]},{"start":{"row":6,"column":44},"end":{"row":6,"column":45},"action":"remove","lines":["_"]},{"start":{"row":6,"column":43},"end":{"row":6,"column":44},"action":"remove","lines":["t"]},{"start":{"row":6,"column":42},"end":{"row":6,"column":43},"action":"remove","lines":["n"]},{"start":{"row":6,"column":41},"end":{"row":6,"column":42},"action":"remove","lines":["e"]},{"start":{"row":6,"column":40},"end":{"row":6,"column":41},"action":"remove","lines":["m"]},{"start":{"row":6,"column":39},"end":{"row":6,"column":40},"action":"remove","lines":["m"]},{"start":{"row":6,"column":38},"end":{"row":6,"column":39},"action":"remove","lines":["o"]}],[{"start":{"row":6,"column":37},"end":{"row":6,"column":38},"action":"remove","lines":["c"],"id":8},{"start":{"row":6,"column":36},"end":{"row":6,"column":37},"action":"remove","lines":["_"]},{"start":{"row":6,"column":35},"end":{"row":6,"column":36},"action":"remove","lines":["o"]},{"start":{"row":6,"column":34},"end":{"row":6,"column":35},"action":"remove","lines":["t"]}]]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":6,"column":34},"end":{"row":6,"column":34},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1589816555147} | [
"r151865@rguktrkv.ac.in"
] | r151865@rguktrkv.ac.in |
faa303ad648a6c7ff8825e4cfa35fe13c27619fb | 5dd47abf7061201d9378e73e51f08fbb314ba2fd | /envdsys/envproject/migrations/0002_auto_20200326_1744.py | 2fa88af2d3c78743c8c0cdfa293983bf102cd8ba | [
"Unlicense"
] | permissive | NOAA-PMEL/envDataSystem | 4d264ae5209015e4faee648f37608d68a4461d0a | 4db4a3569d2329658799a3eef06ce36dd5c0597d | refs/heads/master | 2023-02-23T22:33:14.334737 | 2021-07-22T01:09:16 | 2021-07-22T01:09:16 | 191,809,007 | 1 | 0 | Unlicense | 2023-02-08T00:45:54 | 2019-06-13T17:50:03 | Python | UTF-8 | Python | false | false | 940 | py | # Generated by Django 2.2.1 on 2020-03-26 17:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('envproject', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='project',
name='logo',
field=models.ImageField(blank=True, null=True, upload_to='', verbose_name='Logo Image'),
),
migrations.AddField(
model_name='project',
name='website',
field=models.URLField(blank=True, null=True, verbose_name='Project Website'),
),
migrations.AlterField(
model_name='project',
name='long_name',
field=models.CharField(blank=True, max_length=200),
),
migrations.AlterField(
model_name='project',
name='name',
field=models.CharField(max_length=100),
),
]
| [
"derek.coffman@noaa.gov"
] | derek.coffman@noaa.gov |
ddb5c9e743c62271e928590de05e16f13c7cf342 | f8db8b11ad41d246cd4a0d71b75e3a324a481b14 | /SecretPlots/assemblers/__init__.py | 039b40cfcf37663da14edd45bbf7f7bca50520be | [
"MIT"
] | permissive | secretBiology/SecretPlots | b978703497d255eb099b471fdafcea7fdf793bd3 | eca1d0e0932e605df49d1f958f98a1f41200d589 | refs/heads/master | 2022-07-07T07:00:57.625486 | 2022-01-14T05:44:54 | 2022-01-14T05:44:54 | 206,092,023 | 0 | 1 | MIT | 2022-06-21T22:45:18 | 2019-09-03T14:03:56 | Python | UTF-8 | Python | false | false | 433 | py | # SecretPlots
# Copyright (c) 2019. SecretBiology
#
# Author: Rohit Suratekar
# Organisation: SecretBiology
# Website: https://github.com/secretBiology/SecretPlots
# Licence: MIT License
# Creation: 05/10/19, 7:44 PM
#
from SecretPlots.assemblers._base import Assembler
from SecretPlots.assemblers._bars import BarAssembler, BarGroupedAssembler
from SecretPlots.assemblers._matrix import ColorMapAssembler, BooleanAssembler
| [
"rohitsuratekar@gmail.com"
] | rohitsuratekar@gmail.com |
02fc867d8ca2fded13b2e27d71b97cb8471db1c6 | 55815c281f6746bb64fc2ba46d074ca5af966441 | /medium/1261.py | c20fe47a1a06a1dbaa21b9bc1188fc7d9b490f82 | [] | no_license | brandoneng000/LeetCode | def5107b03187ad7b7b1c207d39c442b70f80fc2 | c7a42753b2b16c7b9c66b8d7c2e67b683a15e27d | refs/heads/master | 2023-08-30T23:38:04.845267 | 2023-08-30T08:42:57 | 2023-08-30T08:42:57 | 199,584,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | py | from typing import Optional
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class FindElements:
def __init__(self, root: Optional[TreeNode]):
def dfs(root: TreeNode, expected_val: int):
if not root:
return
self.tree_vals.add(expected_val)
dfs(root.left, expected_val * 2 + 1)
dfs(root.right, expected_val * 2 + 2)
self.tree_vals = set()
dfs(root, 0)
def find(self, target: int) -> bool:
return target in self.tree_vals
# Your FindElements object will be instantiated and called as such:
# obj = FindElements(root)
# param_1 = obj.find(target) | [
"brandoneng000@gmail.com"
] | brandoneng000@gmail.com |
6104deeb0fc1599dd5f3e3b4bbeb0f3de85eae1e | e8b5fde61cda3752507c3d8309c9d7075047a8d0 | /desihub/specsim/specsim/config.py | 0b89a824fb9a7c7f4d98be67ac51aea1fde72277 | [] | no_license | michaelJwilson/LBGCMB | f83af41d80d1991644607d473cd00246432fe601 | fca7d0cd515b756233dfd530e9f779c637730bc4 | refs/heads/master | 2021-08-15T19:40:43.151033 | 2020-04-22T17:42:11 | 2020-04-22T17:42:11 | 163,872,551 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 26,243 | py | ## Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Manage simulation configuration data.
Configuration data is normally loaded from a yaml file. Some standard
configurations are included with this package and can be loaded by name,
for example:
>>> test_config = load_config('test')
Otherwise any filename with extension .yaml can be loaded::
my_config = load_config('path/my_config.yaml')
Configuration data is accessed using attribute notation to specify a
sequence of keys:
>>> test_config.name
'Test Simulation'
>>> test_config.atmosphere.airmass
1.0
Use :meth:`Configuration.get_constants` to parse values with dimensions and
:meth:`Configuration.load_table` to load and interpolate tabular data.
"""
from __future__ import print_function, division
import os
import os.path
import math
import re
import warnings
import yaml
import numpy as np
import scipy.interpolate
import astropy.units
import astropy.table
import astropy.utils.data
import astropy.coordinates
import astropy.time
import astropy.io.fits
import astropy.wcs
def is_string(x):
"""Test if x is a string type.
This function is un-necessary when this package is installed
via setup.py (which uses 2to3). However, we include it here to
support using the package directly from a git clone.
Note that we avoid the usual trick of defining basestring at
module scope since this causes problems with sphinx.
Parameters
----------
x : any
Variable to be tested.
Returns
-------
bool
Returns true if x is a string type.
"""
try:
# python 2
return isinstance(x, basestring)
except NameError:
# python 3
return isinstance(x, str)
# Extract a number from a string with optional leading and
# trailing whitespace.
_float_pattern = re.compile(
r'\s*([-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)\s*')
def parse_quantity(quantity, dimensions=None):
"""Parse a string containing a numeric value with optional units.
The result is a :class:`Quantity <astropy.units.Quantity` object even
when units are not present. Optional units are interpreted by
:class:`astropy.units.Unit`. Some valid examples::
1.23
1.23um
123 um / arcsec
1 electron/adu
Used by :meth:`Configuration.get_constants`.
Parameters
----------
quantity : str or astropy.units.Quantity
String to parse. If a quantity is provided, it is checked against
the expected dimensions and passed through.
dimensions : str or astropy.units.Unit or None
The units of the input quantity are expected to have the same
dimensions as these units, if not None. Raises a ValueError if
the input quantity is not convertible to dimensions.
Returns
-------
astropy.units.Quantity
If dimensions is not None, the returned quantity will be converted
to its units.
Raises
------
ValueError
Unable to parse quantity.
"""
if not isinstance(quantity, astropy.units.Quantity):
# Look for a valid number starting the string.
found_number = _float_pattern.match(quantity)
if not found_number:
raise ValueError('Unable to parse quantity.')
value = float(found_number.group(1))
unit = quantity[found_number.end():]
quantity = astropy.units.Quantity(value, unit)
if dimensions is not None:
try:
if not isinstance(dimensions, astropy.units.Unit):
dimensions = astropy.units.Unit(dimensions)
quantity = quantity.to(dimensions)
except (ValueError, astropy.units.UnitConversionError):
raise ValueError('Quantity "{0}" is not convertible to {1}.'
.format(quantity, dimensions))
return quantity
class Node(object):
"""A single node of a configuration data structure.
"""
def __init__(self, value, path=[]):
self._assign('_value', value)
self._assign('_path', path)
def keys(self):
return self._value.keys()
def _assign(self, name, value):
# Bypass our __setattr__
super(Node, self).__setattr__(name, value)
def __str__(self):
return '.'.join(self._path)
def __getattr__(self, name):
# This method is only called when self.name fails.
child_path = self._path[:]
child_path.append(name)
if name in self._value:
child_value = self._value[name]
if isinstance(child_value, dict):
return Node(child_value, child_path)
else:
# Return the actual value for leaf nodes.
return child_value
else:
raise AttributeError(
'No such config node: {0}'.format('.'.join(child_path)))
def __setattr__(self, name, value):
# This method is always triggered by self.name = ...
child_path = self._path[:]
child_path.append(name)
if name in self._value:
child_value = self._value[name]
if isinstance(child_value, dict):
raise AttributeError(
'Cannot assign to non-leaf config node: {0}'
.format('.'.join(child_path)))
else:
self._value[name] = value
else:
raise AttributeError(
'No such config node: {0}'.format('.'.join(child_path)))
class Configuration(Node):
"""Configuration parameters container and utilities.
This class specifies the required top-level keys and delegates the
interpretation and validation of their values to other functions.
Parameters
----------
config : dict
Dictionary of configuration parameters, normally obtained by parsing
a YAML file with :func:`load`.
Raises
------
ValueError
Missing required top-level configuration key.
Attributes
----------
wavelength : astropy.units.Quantity
Array of linearly increasing wavelength values used for all simulation
calculations. Determined by the wavelength_grid configuration
parameters.
abs_base_path : str
Absolute base path used for loading tabulated data. Determined by
the basepath configuration parameter.
"""
def __init__(self, config):
Node.__init__(self, config)
self.update()
def update(self):
"""
Update this configuration.
Updates the wavelength and abs_base_path attributes based on
the current settings of the wavelength_grid and base_path nodes.
"""
# Initialize our wavelength grid.
grid = self.wavelength_grid
nwave = 1 + int(math.floor(
(grid.max - grid.min) / grid.step))
if nwave <= 0:
raise ValueError('Invalid wavelength grid.')
wave_unit = astropy.units.Unit(grid.unit)
wave = (grid.min + grid.step * np.arange(nwave)) * wave_unit
self._assign('wavelength', wave)
# Use environment variables to interpolate {NAME} in the base path.
base_path = self.base_path
if base_path == '<PACKAGE_DATA>':
self._assign(
'abs_base_path', astropy.utils.data._find_pkg_data_path('data'))
else:
try:
self._assign('abs_base_path', base_path.format(**os.environ))
except KeyError as e:
raise ValueError('Environment variable not set: {0}.'.format(e))
def get_sky(self, parent):
"""
Create a sky coordinate from a configuration node.
Parameters
----------
parent : :class:`Node`
Parent node in this configuration whose ``sky`` child
will be processed.
Returns
-------
astropy.coordinates.SkyCoord
Sky coordinates object constructed from node parameters.
"""
node = parent.sky
frame = getattr(node, 'frame', None)
return astropy.coordinates.SkyCoord(node.coordinates, frame=frame)
def get_timestamp(self, parent):
"""
Create a timestamp from a configuration node.
Parameters
----------
parent : :class:`Node`
Parent node in this configuration whose ``timestamp`` child
will be processed.
Returns
-------
astropy.time.Time
Timestamp object constructed from node parameters.
"""
node = parent.timestamp
format = getattr(node, 'format', None)
scale = getattr(node, 'scale', None)
return astropy.time.Time(node.when, format=format, scale=scale)
def get_constants(self, parent, required_names=None, optional_names=None):
"""
Interpret a constants node in this configuration.
Constant values are parsed by :func:`parse_quantity`.
Parameters
----------
parent : :class:`Node`
Parent node in this configuration whose ``constants`` child
will be processed.
required_names : iterable or None
List of constant names that are required to be present for this
method to succeed. If None, then no specific names are required.
When specified, exactly these names are required and any other
names will raise a RuntimeError.
optional_names : iterable or None
List of constant names that are optional for the parent node.
When specified, all non-required names must be listed here or
else a RuntimeError will be raised.
Returns
-------
dict
Dictionary of (name, value) pairs where each value is an
:class:`astropy.units.Quantity`. When ``required_names`` is
specified, they are guaranteed to be present as keys of the returned
dictionary.
Raises
------
RuntimeError
Constants present in the node do not match the required or
optional names.
"""
constants = {}
node = parent.constants
if node is None:
names = []
else:
names = sorted(node.keys())
# All required names must be present, if specified.
if required_names is not None:
if not (set(required_names) <= set(names)):
raise RuntimeError(
'Expected {0} for "{1}.constants"'
.format(required_names, parent))
else:
extra_names = set(names) - set(required_names)
else:
extra_names = set(names)
# All non-required names must be listed in optional_names, if specified.
if optional_names is not None:
extra_names -= set(optional_names)
# If either required_names or optional_names is specified, there
# should not be any extra names.
if required_names is not None or optional_names is not None:
if extra_names:
raise RuntimeError(
'Unexpected "{0}.constants" names: {1}.'
.format(parent, extra_names))
for name in names:
value = getattr(node, name)
try:
if is_string(value):
constants[name] = parse_quantity(value)
else:
constants[name] = astropy.units.Quantity(float(value))
except ValueError:
raise RuntimeError('Invalid value for {0}.{1}: {2}'
.format(node, name, value))
return constants
def load_table(self, parent, column_names, interpolate=True, as_dict=False):
"""
Load and interpolate tabular data from one or more files.
Reads a single file if parent.table.path exists, or else reads
multiple files if parent.table.paths exists (and returns a dictionary).
If as_dict is True, always return a dictionary using the 'default' key
when only a single parent.table.path is present.
"""
node = parent.table
# Check that the required column names are present.
if is_string(column_names):
return_scalar = True
column_names = [column_names]
else:
return_scalar = False
required_names = column_names[:]
if interpolate:
required_names.append('wavelength')
required_names = sorted(required_names)
columns = node.columns
config_column_names = sorted(columns.keys())
if required_names != config_column_names:
raise RuntimeError('Expected {0} for "{1}"'.format(required_names, columns))
# Prepare the arguments we will send to astropy.table.Table.read()
read_args = {}
keys = node.keys()
for key in ('format', 'hdu'):
if key in keys:
read_args[key] = getattr(node, key)
# Prepare a list of paths we will load tables from.
paths = []
path_keys = None
try:
paths.append(os.path.join(self.abs_base_path, node.path)) # Look for parent.table.path first.
except AttributeError:
path_keys = list(node.paths.keys())
for key in path_keys:
path = getattr(node.paths, key)
paths.append(os.path.join(self.abs_base_path, path))
tables = {}
# Loop over tables to load.
for i, path in enumerate(paths):
key = path_keys[i] if path_keys else 'default'
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=astropy.units.core.UnitsWarning)
table = astropy.table.Table.read(path, **read_args)
## if self.verbose:
print('Loaded {0} rows from {1} with args {2}'.format(len(table), path, read_args))
# Loop over columns to read.
loaded_columns = {}
for config_name in config_column_names:
column = getattr(columns, config_name)
try:
column_data = table.columns[column.index] # Look up the column data by index first, then by name.
except AttributeError:
column_data = table[column.name]
column_values = column_data.data
try:
column_unit = astropy.units.Unit(column.unit) # Resolve column units.
except AttributeError:
column_unit = None
try:
override_unit = column.override_unit
assert override_unit in (True, False)
except AttributeError:
override_unit = False
if override_unit or column_data.unit is None:
if column_unit is not None:
# Assign the unit specified in our config.
column_data.unit = column_unit
else:
if ((column_unit is not None) and (column_unit != column_data.unit)):
raise RuntimeError('Units do not match for "{0}".'.format(column))
if interpolate:
loaded_columns[config_name] = column_data
else:
unit = column_data.unit
if unit:
loaded_columns[config_name] = column_data.data * unit
else:
loaded_columns[config_name] = column_data.data
if interpolate:
wavelength_column = loaded_columns['wavelength']
# Convert wavelength column units if necesary.
if wavelength_column.unit is None:
raise RuntimeError('Wavelength units required for "{0}"'.format(columns))
wavelength = wavelength_column.data * wavelength_column.unit
if wavelength.unit != self.wavelength.unit:
wavelength = wavelength.to(self.wavelength.unit)
# Initialize extrapolation if requested.
try:
fill_value = node.extrapolated_value
bounds_error = False
except AttributeError:
fill_value = None
bounds_error = True
# Loop over other columns to interpolate onto our wavelength grid.
for column_name in column_names:
interpolator = scipy.interpolate.interp1d(wavelength.value, loaded_columns[column_name].data, kind='linear', copy=False, bounds_error=bounds_error, fill_value=fill_value)
interpolated_values = interpolator(self.wavelength.value)
unit = loaded_columns[column_name].unit
if unit:
interpolated_values = interpolated_values * unit
loaded_columns[column_name] = interpolated_values
# Delete the temporary wavelength column now we have finished using it for interpolation.
del loaded_columns['wavelength']
if return_scalar:
# Return just the one column that was requested.
tables[key] = loaded_columns[column_names[0]]
else:
# Return a dictionary of all requested columns.
tables[key] = loaded_columns
if path_keys is None and not as_dict:
return tables['default']
else:
return tables
def load_table2d(self, node, y_column_name, x_column_prefix):
"""
Read values for some quantity tabulated along 2 axes.
Parameters
----------
filename : str
Name of the file to read using :meth:`astropy.table.Table.read`.
y_column_name : str
Name of the column containing y coordinate values.
x_column_prefix : str
Prefix for column names at different values of the x coordinate. The
remainder of the column name must be interpretable by
:meth:`specsim.config.parse_quantity` as the x coordinate value.
Values in each column correspond to ``data[:, x]``.
format : str
A table format supported by :meth:`astropy.table.Table.read`.
Returns
-------
:class:`scipy.interpolate.RectBivariateSpline`
A 2D linear interpolator in (x,y) that handles units correctly.
"""
path = os.path.join(self.abs_base_path, node.path)
fmt = getattr(node, 'format', None)
table = astropy.table.Table.read(path, format=fmt)
ny = len(table)
y_col = table[y_column_name]
y_value = np.array(y_col.data)
if y_col.unit is not None:
y_unit = y_col.unit
else:
y_unit = 1
# Look for columns whose name has the specified prefix.
x_value, x_index = [], []
x_unit, data_unit = 1, 1
for i, colname in enumerate(table.colnames):
if colname.startswith(x_column_prefix):
# Parse the column name as a value.
x = parse_quantity(colname[len(x_column_prefix):])
if x_unit == 1:
x_unit = x.unit
elif x_unit != x.unit:
raise RuntimeError('Column unit mismatch: {0} != {1}.'
.format(x_unit, x.unit))
if data_unit == 1:
data_unit = table[colname].unit
elif data_unit != table[colname].unit:
raise RuntimeError('Data unit mismatch: {0} != {1}.'
.format(data_unit, table[colname].unit))
x_value.append(x.value)
x_index.append(i)
# Extract values for each x,y pair.
nx = len(x_value)
data = np.empty((nx, ny))
for j, i in enumerate(x_index):
data[j] = table.columns[i].data
if self.verbose:
print('Loaded {0} x {1} values from {2}.'.format(nx, ny, path))
# Build a 2D linear interpolator.
interpolator = scipy.interpolate.RectBivariateSpline(
x_value, y_value, data, kx=1, ky=1, s=0)
# Return a wrapper that handles units. Note that default parameters
# are used below to capture values (rather than references) in the
# lambda closures.
if x_unit != 1:
get_x = lambda x, u=x_unit: x.to(u).value
else:
get_x = lambda x: np.asarray(x)
if y_unit != 1:
get_y = lambda y, u=y_unit: y.to(u).value
else:
get_y = lambda y: np.asarray(y)
return (
lambda x, y, f=interpolator, u=data_unit:
f.ev(get_x(x), get_y(y)) * u)
def load_fits2d(self, filename, xy_unit, **hdus):
"""
Load the specified FITS file.
The data in each image HDU is interpreted with x mapped to columns
(NAXIS1) and y mapped to rows (NAXIS2). The x, y coordinates are
inferred from each image HDUs basic WCS parameters.
The returned interpolators expect parameter with units and return
interpolated values with units. Units for x, y are specified via
a parameter and assumed to be the same for all HDUs. Units for
the interpolated data are taken from the BUNIT header keyword, and
must be interpretable by astropy.
Parameters
----------
filename : str
Name of the file to read using :meth:`astropy.table.Table.read`.
xy_unit : astropy.units.Unit
Unit of x, y coordinates.
hdus : dict
Dictionary of name, hdu mappings where each hdu is specified by
its integer offset or its name.
Returns
-------
dict
Dictionary of 2D linear interpolators corresponding to each hdu,
with the same keys that appear in the hdus input parameter.
"""
path = os.path.join(self.abs_base_path, filename)
hdu_list = astropy.io.fits.open(path, memmap=False)
interpolators = {}
for name in hdus:
hdu = hdu_list[hdus[name]]
ny, nx = hdu.data.shape
# Use the header WCS to reconstruct the x,y grids.
wcs = astropy.wcs.WCS(hdu.header)
x, _ = wcs.wcs_pix2world(np.arange(nx), [0], 0)
_, y = wcs.wcs_pix2world([0], np.arange(ny), 0)
try:
bunit = hdu.header['BUNIT']
data_unit = astropy.units.Unit(bunit)
except KeyError:
raise KeyError('Missing BUNIT header keyword for HDU {0}.'
.format(hdus[name]))
except ValueError:
raise ValueError('Invalid BUNIT "{0}" for HDU {1}.'
.format(bunit, hdus[name]))
dimensionless_interpolator = scipy.interpolate.RectBivariateSpline(
x, y, hdu.data, kx=1, ky=1, s=0)
# Note that the default arg values are used to capture the
# current values of dimensionless_interpolator and data_unit
# in the closure of this inner function.
def interpolator(x, y, f=dimensionless_interpolator, u=data_unit):
return f.ev(x.to(xy_unit).value, y.to(xy_unit).value) * u
interpolators[name] = interpolator
if self.verbose:
print('Loaded {0} from HDU[{1}] of {2}.'
.format(name, hdus[name], path))
hdu_list.close()
return interpolators
def load_config(name, config_type = Configuration):
'''
Load configuration data from a YAML file.
Valid configuration files are YAML files containing no custom types, no
sequences (lists), and with all mapping (dict) keys being valid python
identifiers.
Parameters
----------
name : str
Name of the configuration to load, which can either be a pre-defined
name or else the name of a yaml file (with extension .yaml) to load.
Pre-defined names are mapped to corresponding files in this package's
data/config/ directory.
Returns
-------
Configuration
Initialized configuration object.
Raises
------
ValueError
File name has wrong extension or does not exist.
RuntimeError
Configuration data failed a validation test.
'''
base_name, extension = os.path.splitext(name)
if extension not in ('', '.yaml'):
raise ValueError('Config file must have .yaml extension.')
if extension:
file_name = name
else:
file_name = astropy.utils.data._find_pkg_data_path('data/config/{0}.yaml'.format(name))
print('\n\nFinding: %s' % file_name)
if not os.path.isfile(file_name):
raise ValueError('No such config file: "{0}".'.format(file_name))
## Validate that all mapping keys are valid python identifiers.
valid_key = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*\Z')
with open(file_name) as f:
next_value_is_key = False
for token in yaml.scan(f):
if isinstance(token, (yaml.BlockSequenceStartToken, yaml.FlowSequenceStartToken)):
raise RuntimeError('Config sequences not implemented yet.')
if next_value_is_key:
if not isinstance(token, yaml.ScalarToken):
raise RuntimeError('Invalid config key type: {0}'.format(token))
if not valid_key.match(token.value):
raise RuntimeError('Invalid config key name: {0}'.format(token.value))
next_value_is_key = isinstance(token, yaml.KeyToken)
with open(file_name) as f:
return config_type(yaml.safe_load(f))
| [
"mjw@roe.ac.uk"
] | mjw@roe.ac.uk |
1dbfce8ff2e4ea921f1c9038ecf9b33e745429a5 | 0213dfa0195d88fd8bfa3027a5888c2b98c79562 | /firebase_login.py | e20b5747826492447e98c86b398125b5e2bcf6ef | [] | no_license | DavidArmendariz/dash-python | 9cd02a06eab46a49ec33e430d9c470263b8d06c6 | f0574c73b635929d1842dd27834e339b048516b5 | refs/heads/master | 2023-01-02T18:43:50.976001 | 2020-11-02T16:36:13 | 2020-11-02T16:40:39 | 222,872,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | import requests
_verify_password_url = 'https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyPassword'
api_key = "your_api_key"
def sign_in_with_password(email, password):
body = {'email': email, 'password': password}
params = {'key': api_key}
try:
resp = requests.request('post', _verify_password_url, params=params, json=body)
resp.raise_for_status()
return True
except:
return False | [
"darmendariz1998@outlook.com"
] | darmendariz1998@outlook.com |
c2f5082906869508409e4d0c94b359cb332319c9 | 241724e83f5c12ed9d7dd3b825dfe4e2b1b0f777 | /pde/grids/spherical.py | 473250a053f7850d04875ebd1b259c242fdf6dc5 | [
"MIT"
] | permissive | xuanxu/py-pde | d8be358ab76d4060b14afc74bc7d836591c6188e | de33d938aea8680eff872ae1b64569895662a248 | refs/heads/master | 2021-03-09T21:37:13.920717 | 2020-03-10T12:18:03 | 2020-03-10T12:18:03 | 246,382,909 | 0 | 0 | MIT | 2020-03-10T18:54:22 | 2020-03-10T18:54:22 | null | UTF-8 | Python | false | false | 26,200 | py | '''
Spherically-symmetric grids in 2 and 3 dimensions. These are grids that only
discretize the radial direction, assuming symmetry with respect to all angles.
.. codeauthor:: David Zwicker <david.zwicker@ds.mpg.de>
'''
from abc import ABCMeta
from typing import Tuple, Dict, Any, Union, Callable, Generator, TYPE_CHECKING
import numpy as np
from scipy import interpolate
from .base import GridBase, discretize_interval, _check_shape
from .cartesian import CartesianGridBase, CartesianGrid
from ..tools.spherical import volume_from_radius
from ..tools.cache import cached_property, cached_method
if TYPE_CHECKING:
from .boundaries import Boundaries # @UnusedImport
PI_4 = 4 * np.pi
PI_43 = 4 / 3 * np.pi
class SphericalGridBase(GridBase, metaclass=ABCMeta):
r""" Base class for d-dimensional spherical grids with angular symmetry
The angular symmetry implies that states only depend on the radial
coordinate :math:`r`, which is discretized uniformly as
.. math::
r_i = R_\mathrm{inner} + \left(i + \frac12\right) \Delta r
\quad \text{for} \quad i = 0, \ldots, N - 1
\quad \text{with} \quad
\Delta r = \frac{R_\mathrm{outer} - R_\mathrm{inner}}{N}
where :math:`R_\mathrm{outer}` is the outer radius of the grid and
:math:`R_\mathrm{inner}` corresponds to a possible inner radius, which is
zero by default. The radial direction is discretized by :math:`N` support
points.
"""
periodic = [False] # the radial axis is not periodic
num_axes = 1 # the number of independent axes
def __init__(self, radius: Union[float, Tuple[float, float]],
shape: Union[Tuple[int], int]):
r"""
Args:
radius (float or tuple of floats):
radius :math:`R_\mathrm{outer}` in case a simple float is given.
If a tuple is supplied it is interpreted as the inner and outer
radius, :math:`(R_\mathrm{inner}, R_\mathrm{outer})`.
shape (tuple or int): A single number setting the number :math:`N`
of support points along the radial coordinate
"""
super().__init__()
shape_list = _check_shape(shape)
if not len(shape_list) == 1:
raise ValueError('`shape` must be a single number, not '
f'{shape_list}')
self.shape: Tuple[int] = (int(shape_list[0]),)
try:
r_inner, r_outer = radius # type: ignore
except TypeError:
r_inner, r_outer = 0, float(radius) # type: ignore
if r_inner < 0:
raise ValueError('Inner radius must be positive')
if r_inner >= r_outer:
raise ValueError('Outer radius must be larger than inner radius')
# radial discretization
rs, dr = discretize_interval(r_inner, r_outer, self.shape[0])
self.axes_coords = (rs,)
self.axes_bounds = ((r_inner, r_outer),)
self.discretization = np.array((dr,))
@property
def state(self) -> Dict[str, Any]:
""" state: the state of the grid """
return {'radius': self.radius,
'shape': self.shape}
@property
def has_hole(self) -> bool:
""" returns whether the inner radius is larger than zero """
return self.axes_bounds[0][0] > 0
@classmethod
def from_state(cls, state) -> "SphericalGridBase":
""" create a field from a stored `state`.
Args:
state (dict):
The state from which the grid is reconstructed.
"""
state_copy = state.copy()
obj = cls(radius=state_copy.pop('radius'),
shape=state_copy.pop('shape'))
if state_copy:
raise ValueError(f'State items {state_copy.keys()} were not used')
return obj
@property
def radius(self) -> Union[float, Tuple[float, float]]:
""" float: radius of the sphere """
r_inner, r_outer = self.axes_bounds[0]
if r_inner == 0:
return r_outer
else:
return r_inner, r_outer
@property
def volume(self) -> float:
""" float: total volume of the grid """
r_inner, r_outer = self.axes_bounds[0]
volume = volume_from_radius(r_outer, dim=self.dim)
if r_inner > 0:
volume -= volume_from_radius(r_inner, dim=self.dim)
return volume
@cached_property()
def cell_volume_data(self):
""" :class:`numpy.ndarray`: the volumes of all cells """
dr = self.discretization[0]
rs = self.axes_coords[0]
volumes_h = volume_from_radius(rs + 0.5 * dr, dim=self.dim)
volumes_l = volume_from_radius(rs - 0.5 * dr, dim=self.dim)
return (volumes_h - volumes_l).reshape(self.shape[0])
def contains_point(self, point):
""" check whether the point is contained in the grid
Args:
point (vector): Coordinates of the point
"""
point = np.atleast_1d(point)
if point.shape[-1] != self.dim:
raise ValueError('Dimension mismatch')
r = np.linalg.norm(point, axis=-1)
r_inner, r_outer = self.axes_bounds[0]
return (r_inner <= r <= r_outer)
def get_random_point(self, boundary_distance: float = 0,
cartesian: bool = True, avoid_center: bool = False):
""" return a random point within the grid
Note that these points will be uniformly distributed on the radial axis,
which implies that they are not uniformly distributed in the volume.
Args:
boundary_distance (float): The minimal distance this point needs to
have from all boundaries.
cartesian (bool): Determines whether the point is returned in
Cartesian coordinates or grid coordinates.
avoid_center (bool): Determines whether the boundary distance
should also be kept from the center, i.e., whether points close
to the center are returned.
Returns:
:class:`numpy.ndarray`: The coordinates of the point
"""
# handle the boundary distance
r_inner, r_outer = self.axes_bounds[0]
r_min = r_inner
if avoid_center:
r_min += boundary_distance
r_mag = r_outer - boundary_distance - r_min
if r_mag <= 0:
raise RuntimeError('Random points would be too close to boundary')
# create random point
r = np.array([r_mag * np.random.random() + r_min])
if cartesian:
return self.point_to_cartesian(r)
else:
return r
def get_line_data(self, data, extract: str = 'auto') -> Dict[str, Any]:
""" return a line cut along the radial axis
Args:
data (:class:`numpy.ndarray`):
The values at the grid points
extract (str):
Determines which cut is done through the grid. This parameter is
mainly supplied for a consistent interface and has no effect for
polar grids.
Returns:
A dictionary with information about the line cut, which is
convenient for plotting.
"""
if extract not in {'auto', 'r', 'radial'}:
raise ValueError(f'Unknown extraction method `{extract}`')
return {'data_x': self.axes_coords[0],
'data_y': data,
'extent_x': self.axes_bounds[0],
'label_x': self.axes[0]}
def get_image_data(self, data, performance_goal: str = 'speed',
fill_value: float = 0) -> Dict[str, Any]:
""" return a 2d-image of the data
Args:
data (:class:`numpy.ndarray`): The values at the grid points
performance_goal (str): Determines the method chosen for
interpolation. Possible options are `speed` and `quality`.
fill_value (float): The value assigned to invalid positions (those
inside the hole or outside the bcs).
Returns:
A dictionary with information about the image, which is convenient
for plotting.
"""
r_outer = self.axes_bounds[0][1]
r_data = self.axes_coords[0]
if self.has_hole:
num = int(np.ceil(r_outer / self.discretization[0]))
x_positive, _ = discretize_interval(0, r_outer, num)
else:
x_positive = r_data
x = np.r_[-x_positive[::-1], x_positive]
xs, ys = np.meshgrid(x, x, indexing='ij')
r_img = np.hypot(xs, ys)
if performance_goal == 'speed':
# interpolate over the new coordinates using linear interpolation
f = interpolate.interp1d(r_data, data, copy=False,
bounds_error=False,
fill_value=fill_value,
assume_sorted=True)
data_int = f(r_img.flat).reshape(r_img.shape)
elif performance_goal == 'quality':
# interpolate over the new coordinates using radial base function
f = interpolate.Rbf(r_data, data, function='cubic')
data_int = f(r_img)
else:
raise ValueError(f'Performance goal `{performance_goal}` undefined')
return {'data': data_int,
'x': x, 'y': x,
'extent': (-r_outer, r_outer, -r_outer, r_outer),
'label_x': 'x', 'label_y': 'y'}
def iter_mirror_points(self, point, with_self: bool = False,
only_periodic: bool = True) -> Generator:
""" generates all mirror points corresponding to `point`
Args:
point (:class:`numpy.ndarray`): the point within the grid
with_self (bool): whether to include the point itself
only_periodic (bool): whether to only mirror along periodic axes
Returns:
A generator yielding the coordinates that correspond to mirrors
"""
point = np.asanyarray(point, dtype=np.double)
if with_self:
yield point
def normalize_point(self, point, reduced_coords: bool = False):
""" normalize coordinates, which is a no-op for spherical coordinates.
Args:
point (:class:`numpy.ndarray`): Coordinates of a single point
reduced_coords (bool): Flag determining whether only the coordinates
corresponding to axes in this grid are given
Returns:
:class:`numpy.ndarray`: The input array
"""
point = np.asarray(point, dtype=np.double)
size = self.num_axes if reduced_coords else self.dim
if point.size == 0:
return np.zeros((0, size))
if point.shape[-1] != size:
raise ValueError('Dimension mismatch: Array of shape '
f'{point.shape} does not describe points of '
f'dimension {size}.')
return point
def point_from_cartesian(self, points):
""" convert points given in Cartesian coordinates to this grid
Args:
points (:class:`numpy.ndarray`):
Points given in Cartesian coordinates.
Returns:
:class:`numpy.ndarray`: Points given in the coordinates of the grid
"""
points = self.normalize_point(points)
return np.linalg.norm(points, axis=-1, keepdims=True)
def cell_to_point(self, cells, cartesian: bool = True):
""" convert cell coordinates to real coordinates
This function returns points restricted to the x-axis, i.e., the
y-coordinate will be zero.
Args:
cells (:class:`numpy.ndarray`):
Indices of the cells whose center coordinates are requested.
This can be float values to indicate positions relative to the
cell center.
cartesian (bool):
Determines whether the point is returned in Cartesian
coordinates or grid coordinates.
Returns:
:class:`numpy.ndarray`: The center points of the respective cells
"""
cells = np.atleast_1d(cells)
# convert from cells indices to grid coordinates
r_inner, _ = self.axes_bounds[0]
points = r_inner + (cells + 0.5) * self.discretization[0]
if cartesian:
return self.point_to_cartesian(points)
else:
return points
def point_to_cell(self, points):
""" Determine cell(s) corresponding to given point(s)
This function respects periodic boundary conditions, but it does not
throw an error when coordinates lie outside the bcs (for
non-periodic axes).
Args:
points (:class:`numpy.ndarray`): Real coordinates
Returns:
:class:`numpy.ndarray`: The indices of the respective cells
"""
# convert from grid coordinates to cells indices
r_inner, _ = self.axes_bounds[0]
r = self.point_from_cartesian(points)
cells = (r - r_inner) / self.discretization[0]
return cells.astype(np.int)
def difference_vector_real(self, p1, p2):
""" return the vector pointing from p1 to p2.
In case of periodic boundary conditions, the shortest vector is returned
Args:
p1 (:class:`numpy.ndarray`): First point(s)
p2 (:class:`numpy.ndarray`): Second point(s)
Returns:
:class:`numpy.ndarray`: The difference vectors between the points
with periodic boundary conditions applied.
"""
return np.atleast_1d(p2) - np.atleast_1d(p1)
def polar_coordinates_real(self, origin=[0, 0, 0], ret_angle: bool = False):
""" return spherical coordinates associated with the grid
Args:
origin (vector): Coordinates of the origin at which the polar
coordinate system is anchored. Note that this must be of the
form `[0, 0, z_val]`, where only `z_val` can be chosen freely.
ret_angle (bool): Determines whether angles are returned alongside
the distance. If `False` only the distance to the origin is
returned for each support point of the grid.
If `True`, the distance and angles are returned. Note that in
the case of spherical grids, this angle is zero by convention.
"""
origin = np.array(origin, dtype=np.double, ndmin=1)
if not np.array_equal(origin, np.zeros(self.dim)):
raise RuntimeError(f'Origin must be {str([0]*self.dim)}')
# the distance to the origin is exactly the radial coordinate
rs = self.axes_coords[0]
if ret_angle:
return rs, np.zeros_like(rs)
else:
return rs
def get_boundary_conditions(self, bc='natural') -> "Boundaries":
""" constructs boundary conditions from a flexible data format.
Note that the inner boundary condition for grids without holes need not
be specified.
Args:
bc (str or list or tuple or dict):
Boundary conditions specified in the flexible format accepted by
:func:`pde.grids.boundaries.Boundaries.from_data`.
Raises:
ValueError: If the data given in `bc` cannot be read
PeriodicityError: If the boundaries are not compatible with the
periodic axes of the grid.
"""
from .boundaries.local import BCBase, NeumannBC
from .boundaries.axis import BoundaryPair
from .boundaries import Boundaries # @Reimport
if self.has_hole: # grid has holes => specify two boundary conditions
if bc == 'natural':
b_pair = BoundaryPair(NeumannBC(self, 0, upper=False),
NeumannBC(self, 0, upper=True))
else:
b_pair = BoundaryPair.from_data(self, 0, bc)
else: # grid has no hole => need only one boundary condition
b_inner = NeumannBC(self, 0, upper=False)
if bc == 'natural':
b_pair = BoundaryPair(b_inner, NeumannBC(self, 0, upper=True))
else:
try:
b_outer = BCBase.from_data(self, 0, upper=True, data=bc)
except ValueError:
# this can happen when two boundary conditions have been
# supplied
b_pair = BoundaryPair.from_data(self, 0, bc)
if b_inner != b_pair.low:
raise ValueError(
f'Unsupported boundary format: `{bc}`. Note that '
'spherical symmetry implies vanishing derivatives '
'at the origin at r=0 for all fields. This '
'boundary condition need not be specified.')
else:
b_pair = BoundaryPair(b_inner, b_outer)
return Boundaries([b_pair])
def interpolate_to_cartesian(self, data,
grid: Union[CartesianGridBase, str] = 'valid',
ret_grid: bool = False):
""" return the given cylindrical `data` in a 3d Cartesian grid
Args:
data (:class:`numpy.ndarray`):
The actual data values on the cylindrical grid.
grid (str or CartesianGridBase):
determines the grid on which the data is returned. This can
either be an instance of CartesianGridBase, the string 'valid'
or the string 'full'. Using 'valid' only returns points that are
fully resolved in the spherical grid using an inscribed
sphere. Conversely, 'full' returns all data, using a
circumscribed sphere.
ret_grid (bool):
Determines whether the respective Rectangular grid should be
returned alongside the data.
Returns:
:class:`numpy.ndarray`: The data values on the Cartesian grid.
If `ret_grid=True`, a tuple of these data values and the associated
grid are returned.
"""
if self.has_hole:
raise NotImplementedError
assert data.shape == self.shape
if isinstance(grid, GridBase):
# supplied grid is already constructed
if not isinstance(grid, CartesianGridBase):
raise TypeError('Supplied grid needs to be of type '
'`CartesianGridBase`')
if grid.dim != self.dim:
raise ValueError('Dimension mismatch')
else:
# need to build the grid instance
if grid == 'valid':
bounds = self.radius / np.sqrt(self.dim)
elif grid == 'full':
bounds = self.radius
else:
raise ValueError(f'Do not support grid `{grid}`')
num = round(bounds / self.discretization[0])
grid_bounds = [(-bounds, bounds)] * self.dim
grid_shape = (2*num,) * self.dim
grid = CartesianGrid(grid_bounds, grid_shape)
coords = np.meshgrid(*grid.axes_coords, indexing='ij')
rs = np.sqrt(sum(c**2 for c in coords))
# interpolate over the new coordinates
f = interpolate.Rbf(*self.axes_coords, data, function='cubic')
data_int = f(rs)
if ret_grid:
return data_int, grid
else:
return data_int
class PolarGrid(SphericalGridBase):
r""" 2-dimensional polar grid assuming angular symmetry
The angular symmetry implies that states only depend on the radial
coordinate :math:`r`, which is discretized uniformly as
.. math::
r_i = R_\mathrm{inner} + \left(i + \frac12\right) \Delta r
\quad \text{for} \quad i = 0, \ldots, N - 1
\quad \text{with} \quad
\Delta r = \frac{R_\mathrm{outer} - R_\mathrm{inner}}{N}
where :math:`R_\mathrm{outer}` is the outer radius of the grid and
:math:`R_\mathrm{inner}` corresponds to a possible inner radius, which is
zero by default. The radial direction is discretized by :math:`N` support
points.
"""
dim = 2 # dimension of the described space
axes = ['r']
axes_symmetric = ['phi']
coordinate_constraints = [0, 1] # axes not described explicitly
def point_to_cartesian(self, points):
""" convert coordinates of a point to Cartesian coordinates
This function returns points along the y-coordinate, i.e, the x
coordinates will be zero.
Returns:
:class:`numpy.ndarray`: The Cartesian coordinates of the point
"""
points = np.atleast_1d(points)
if points.shape[-1] != 1:
raise ValueError(f'Dimension mismatch: Points {points} invalid')
y = points[..., 0]
x = np.zeros_like(y)
return np.stack((x, y), axis=-1)
def plot(self, **kwargs):
r""" visualize the grid
Args:
\**kwargs: Extra arguments are passed on the to the matplotlib
plotting routines, e.g., to set the color of the lines
"""
import matplotlib.pyplot as plt
kwargs.setdefault('color', 'k')
rb, = self.axes_bounds
rmax = rb[1]
ax = plt.gca()
for r in np.linspace(*rb, self.shape[0] + 1):
if r == 0:
plt.plot(0, 0, '.', **kwargs)
else:
c = plt.Circle((0, 0), r, fill=False, **kwargs)
ax.add_artist(c)
plt.xlim(-rmax, rmax)
plt.xlabel('x')
plt.ylim(-rmax, rmax)
plt.ylabel('y')
ax.set_aspect(1)
@cached_method()
def get_operator(self, op: str, bc) -> Callable:
""" return a discretized operator defined on this grid
Args:
op (str): Identifier for the operator. Some examples are 'laplace',
'gradient', or 'divergence'.
bc (str or list or tuple or dict):
Boundary conditions specified in the flexible format accepted by
:func:`pde.grids.boundaries.Boundaries.from_data`.
Returns:
A function that takes the discretized data as an input and returns
the data to which the operator `op` has been applied. This function
optionally supports a second argument, which provides allocated
memory for the output.
"""
from .operators import polar
return polar.make_operator(op, bcs=self.get_boundary_conditions(bc))
class SphericalGrid(SphericalGridBase):
r""" 3-dimensional spherical grid assuming spherical symmetry
The symmetry implies that states only depend on the radial coordinate
:math:`r`, which is discretized as follows:
.. math::
r_i = R_\mathrm{inner} + \left(i + \frac12\right) \Delta r
\quad \text{for} \quad i = 0, \ldots, N - 1
\quad \text{with} \quad
\Delta r = \frac{R_\mathrm{outer} - R_\mathrm{inner}}{N}
where :math:`R_\mathrm{outer}` is the outer radius of the grid and
:math:`R_\mathrm{inner}` corresponds to a possible inner radius, which is
zero by default. The radial direction is discretized by :math:`N` support
points.
"""
dim = 3 # dimension of the described space
axes = ['r']
axes_symmetric = ['theta', 'phi']
coordinate_constraints = [0, 1, 2] # axes not described explicitly
def point_to_cartesian(self, points):
""" convert coordinates of a point to Cartesian coordinates
This function returns points along the z-coordinate, i.e, the x and y
coordinates will be zero.
Args:
points (:class:`numpy.ndarray`):
Points given in the coordinates of the grid
Returns:
:class:`numpy.ndarray`: The Cartesian coordinates of the point
"""
points = np.atleast_1d(points)
if points.shape[-1] != 1:
raise ValueError(f'Dimension mismatch: Points {points} invalid')
z = points[..., 0]
x = np.zeros_like(z)
return np.stack((x, x, z), axis=-1)
@cached_method()
def get_operator(self, op, bc: str) -> Callable:
""" return a discretized operator defined on this grid
Args:
op (str): Identifier for the operator. Some examples are 'laplace',
'gradient', or 'divergence'.
bc (str or list or tuple or dict):
Boundary conditions specified in the flexible format accepted by
:func:`pde.grids.boundaries.Boundaries.from_data`.
Returns:
A function that takes the discretized data as an input and returns
the data to which the operator `op` has been applied. This function
optionally supports a second argument, which provides allocated
memory for the output.
"""
from .operators import spherical
return spherical.make_operator(op, bcs=self.get_boundary_conditions(bc))
| [
"david.zwicker@ds.mpg.de"
] | david.zwicker@ds.mpg.de |
ff4498ec0589e7899a19c0ba51e30c0908698ad8 | 33b92f44ab665c6418742d3142104864312b6597 | /ValveBatchExport/ValveBatchExportRules/QuantificationResults.py | eb6e90791a3eb16bd6f5b6a3f43b9b2d28c20b84 | [
"BSD-3-Clause",
"CC-BY-4.0"
] | permissive | SlicerHeart/SlicerHeart | ebe46aec9fd00f9cee7907fa5ad2774ff3e25c93 | 09e440d1cc821e5b5f01146eddfcbd65edec7f32 | refs/heads/master | 2023-08-17T08:42:01.948700 | 2023-08-16T20:19:14 | 2023-08-16T20:19:14 | 38,175,829 | 78 | 33 | BSD-3-Clause | 2023-09-07T14:07:01 | 2015-06-27T21:23:46 | Python | UTF-8 | Python | false | false | 6,557 | py | import os
import logging
import slicer
from pathlib import Path
from collections import OrderedDict
from .base import ValveBatchExportRule
from HeartValveLib.helpers import getSpecificHeartValveMeasurementNodes, getAllFilesWithExtension
class QuantificationResultsExportRule(ValveBatchExportRule):
BRIEF_USE = "Valve quantification results (.csv)"
DETAILED_DESCRIPTION = """Export results computed in Valve quantification module. All metrics will be
recomputed using current software version
"""
WIDE_COLUMNS = ['Filename', 'Phase', 'Measurement']
LONG_COLUMNS = WIDE_COLUMNS + ['Value']
UNIT_COLUMNS = ['Measurement','Unit']
WIDE_CSV_OUTPUT_FILENAME = 'QuantificationResults_wide.csv'
LONG_CSV_OUTPUT_FILENAME = 'QuantificationResults_long.csv'
HYBRID_CSV_OUTPUT_FILENAME = 'QuantificationResults_hybrid.csv'
UNITS_CSV_OUTPUT_FILENAME = 'QuantificationUnits.csv'
OUTPUT_CSV_FILES = [
WIDE_CSV_OUTPUT_FILENAME,
LONG_CSV_OUTPUT_FILENAME,
HYBRID_CSV_OUTPUT_FILENAME,
UNITS_CSV_OUTPUT_FILENAME
]
CMD_FLAG = "-qr"
QUANTIFICATION_RESULTS_IDENTIFIER = 'Quantification results'
def processStart(self):
self.unitsDictionary = OrderedDict()
self.wideResultsTableNode = self.createTableNode(*self.WIDE_COLUMNS)
self.longResultsTableNode = self.createTableNode(*self.LONG_COLUMNS)
self.hybridTempValues = dict()
self.valveQuantificationLogic = slicer.modules.valvequantification.widgetRepresentation().self().logic
def processScene(self, sceneFileName):
for measurementNode in getSpecificHeartValveMeasurementNodes(self.QUANTIFICATION_RESULTS_IDENTIFIER):
cardiacCyclePhaseNames = self.valveQuantificationLogic.getMeasurementCardiacCyclePhaseShortNames(measurementNode)
cardiacCyclePhaseName = ''
if len(cardiacCyclePhaseNames) == 1:
cardiacCyclePhaseName = cardiacCyclePhaseNames[0]
if not cardiacCyclePhaseName in self.EXPORT_PHASES:
continue
elif len(cardiacCyclePhaseNames) > 1:
cardiacCyclePhaseName = "multiple"
if not all(phaseName in self.EXPORT_PHASES for phaseName in cardiacCyclePhaseNames):
logging.debug("Multiple phases compare measurement node found but selected phases don't match those. Skipping")
continue
# Recompute all measurements
try:
self.addLog(f"Computing metrics for '{cardiacCyclePhaseName}'")
self.valveQuantificationLogic.computeMetrics(measurementNode)
except Exception as exc:
logging.warning(f"{sceneFileName} failed with error message: \n{exc}")
import traceback
traceback.print_exc()
continue
quantificationResultsTableNode = \
self.getTableNode(measurementNode, self.QUANTIFICATION_RESULTS_IDENTIFIER)
measurementPresetId = self.valveQuantificationLogic.getMeasurementPresetId(measurementNode)
if quantificationResultsTableNode:
filename, file_extension = os.path.splitext(os.path.basename(sceneFileName))
# long data table
self.addRowData(self.longResultsTableNode, filename, cardiacCyclePhaseName, "ValveType", measurementPresetId)
# wide table
resultsTableRowIndex = \
self.addRowData(self.wideResultsTableNode, filename, cardiacCyclePhaseName, measurementPresetId)
numberOfMetrics = quantificationResultsTableNode.GetNumberOfRows()
for metricIndex in range(numberOfMetrics):
metricName, metricValue, metricUnit = self.getColData(quantificationResultsTableNode, metricIndex, range(3))
# wide data table
self.setValueInTable(self.wideResultsTableNode, resultsTableRowIndex, metricName, metricValue)
# long data table
self.addRowData(self.longResultsTableNode, filename, cardiacCyclePhaseName, metricName, metricValue)
# hybrid data table
if not metricName in list(self.hybridTempValues.keys()):
self.hybridTempValues[metricName] = dict()
if not filename in list(self.hybridTempValues[metricName].keys()):
self.hybridTempValues[metricName][filename] = dict()
self.hybridTempValues[metricName][filename][cardiacCyclePhaseName] = metricValue
self.unitsDictionary[metricName] = metricUnit
def processEnd(self):
self._writeUnitsTable()
self.writeTableNodeToCsv(self.wideResultsTableNode, self.WIDE_CSV_OUTPUT_FILENAME, useStringDelimiter=True)
self.writeTableNodeToCsv(self.longResultsTableNode, self.LONG_CSV_OUTPUT_FILENAME, useStringDelimiter=True)
def getPhases():
_phases = list()
for _filenames in self.hybridTempValues.values():
for __phases in _filenames.values():
_phases.extend(list(__phases.keys()))
return set(_phases)
# hybrid data table
phases = sorted(getPhases())
resultsHybridTableNode = self.createTableNode('Measurement', 'Filename', *phases)
for metricName, filenames in self.hybridTempValues.items():
for filename, values in filenames.items():
phaseValues = [values[phase] if phase in values.keys() else "" for phase in phases]
self.addRowData(resultsHybridTableNode, metricName, filename, *phaseValues)
self.writeTableNodeToCsv(resultsHybridTableNode, self.HYBRID_CSV_OUTPUT_FILENAME, useStringDelimiter=True)
def _writeUnitsTable(self):
unitsTableNode = self.createTableNode(*self.UNIT_COLUMNS)
# iterate over units dict
for metricName, metricUnit in self.unitsDictionary.items():
self.addRowData(unitsTableNode, metricName, metricUnit)
self.writeTableNodeToCsv(unitsTableNode, self.UNITS_CSV_OUTPUT_FILENAME, useStringDelimiter=True)
def mergeTables(self, inputDirectories, outputDirectory):
unitCSVs = self.findCorrespondingFilesInDirectories(inputDirectories, self.UNITS_CSV_OUTPUT_FILENAME)
self.concatCSVsAndSave(unitCSVs, Path(outputDirectory) / self.UNITS_CSV_OUTPUT_FILENAME, removeDuplicateRows=True)
longCSVs = self.findCorrespondingFilesInDirectories(inputDirectories, self.LONG_CSV_OUTPUT_FILENAME)
self.concatCSVsAndSave(longCSVs, Path(outputDirectory) / self.LONG_CSV_OUTPUT_FILENAME)
wideCSVs = self.findCorrespondingFilesInDirectories(inputDirectories, self.WIDE_CSV_OUTPUT_FILENAME)
self.concatCSVsAndSave(wideCSVs, Path(outputDirectory) / self.WIDE_CSV_OUTPUT_FILENAME)
hybridCSVs = self.findCorrespondingFilesInDirectories(inputDirectories, self.HYBRID_CSV_OUTPUT_FILENAME)
self.concatCSVsAndSave(hybridCSVs, Path(outputDirectory) / self.HYBRID_CSV_OUTPUT_FILENAME) | [
"lasso@queensu.ca"
] | lasso@queensu.ca |
362055f348f5bdd9e9815a9fd4a90b337fb7e476 | 3ced55b04ec82df5257f0e3b500fba89ddf73a8a | /tests/molecular/writers/xyz/conftest.py | 4762327c235ce62b0574093cd599d68259bead29 | [
"MIT"
] | permissive | rdguerrerom/stk | 317282d22f5c4c99a1a8452023c490fd2f711357 | 1ac2ecbb5c9940fe49ce04cbf5603fd7538c475a | refs/heads/master | 2023-08-23T21:04:46.854062 | 2021-10-16T14:01:38 | 2021-10-16T14:01:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | import pytest
import stk
from .case_data import CaseData
@pytest.fixture(
params=(
lambda: CaseData(
molecule=stk.BuildingBlock('BrCCBr', [stk.BromoFactory()]),
writer=stk.XyzWriter(),
string=(
'8\n\nBr -1.423838 1.561473 0.322335\nC -0.740543 -0.2'
'57311 0.127980\nC 0.714791 -0.115704 -0.338259\nBr 1.'
'626726 0.889555 1.068701\nH -1.351758 -0.807456 -0.59'
'3854\nH -0.776931 -0.696380 1.144036\nH 0.769475 0.52'
'7986 -1.238698\nH 1.182078 -1.102163 -0.492240\n'
),
),
),
)
def case_data(request) -> CaseData:
return request.param()
| [
"noreply@github.com"
] | rdguerrerom.noreply@github.com |
d85bd4322220b2482ece10b207a43c99487b7f9c | bdb3716c644b8d031af9a5285626d7ccf0ecb903 | /code/UI/ClientExamples/Python/ExampleQuery_ARAXi__Workflow_Combo.py | 2746ad3c61839a9ddfbb3d73c805ae15c72504ac | [
"MIT",
"Apache-2.0"
] | permissive | RTXteam/RTX | 97d2a8946d233d48cc1b165f5e575af21bda4b26 | ed0693dd03149e56f7dfaf431fb8a82ace0c4ef3 | refs/heads/master | 2023-09-01T21:48:49.008407 | 2023-09-01T20:55:06 | 2023-09-01T20:55:06 | 111,240,202 | 43 | 31 | MIT | 2023-09-14T16:20:01 | 2017-11-18T21:19:13 | Python | UTF-8 | Python | false | false | 2,385 | py | """ This example sends a simple set of DSL commands to the ARAX API.
"""
# Import minimal requirements
import requests
import json
import re
# Set the base URL for the ARAX reasoner and its endpoint
endpoint_url = 'https://arax.ncats.io/api/arax/v1.1/query'
# Create a dict of the request, specifying the list of DSL commands
request = {
"message": {},
"operations": { "actions": [
"add_qnode(name=acetaminophen, key=n00)",
"add_qnode(categories=biolink:Protein, key=n01)",
"add_qedge(subject=n01, object=n00, key=e00)",
"expand()",
"overlay(action=compute_ngd, virtual_relation_label=N1, subject_qnode_key=n00, object_qnode_key=n01)",
"resultify()",
] },
"workflow": [
{ "id": "filter_results_top_n", "parameters": { "max_results": 17 } }
]
}
# Send the request to RTX and check the status
print(f"INFO: Sending ARAXi + workflow combo program to {endpoint_url}")
response_content = requests.post(endpoint_url, json=request, headers={'accept': 'application/json'})
status_code = response_content.status_code
if status_code != 200:
print("ERROR returned with status "+str(status_code))
response_dict = response_content.json()
print(json.dumps(response_dict, indent=2, sort_keys=True))
exit()
# Unpack the response content into a dict
response_dict = response_content.json()
#print(json.dumps(response_dict, indent=2, sort_keys=True))
# Display the information log
for message in response_dict['logs']:
if True or message['level'] != 'DEBUG':
print(f"{message['timestamp']}: {message['level']}: {message['message']}")
# Display the results
print(f"Results ({len(response_dict['message']['results'])}):")
for result in response_dict['message']['results']:
confidence = 0.0
if 'confidence' in result:
confidence = result['confidence']
if confidence is None:
confidence = 0.0
essence = '?'
if 'essence' in result:
essence = result['essence']
print(" -" + '{:6.3f}'.format(confidence) + f"\t{essence}")
# These URLs provide direct access to resulting data and GUI
print(f"Data: {response_dict['id']}")
if response_dict['id'] is not None:
match = re.search(r'(\d+)$', response_dict['id'])
if match:
print(f"GUI: https://arax.ncats.io/NewFmt/?r={match.group(1)}")
| [
"edeutsch@systemsbiology.org"
] | edeutsch@systemsbiology.org |
1ce57ab64e06a5bbeeefde752bdbc5f55551c62b | 020eb7ca1826df843cc294590ce55ef8b2076263 | /coursework/control-flow/using-conditional-statements/exercise3/func.py | c96fa26d8270a91a99b6487c75080eb6c2a3bddc | [] | no_license | murffious/pythonclass-cornell | d95e3a4a50653f3caee462fcd6bd6a5f476c7248 | da4abfac3af0f4706c1c4afcf1ff978aa41e765c | refs/heads/master | 2023-02-06T05:27:41.199544 | 2020-03-23T17:27:36 | 2020-03-23T17:27:36 | 227,713,413 | 0 | 0 | null | 2023-02-02T05:14:41 | 2019-12-12T23:11:32 | Python | UTF-8 | Python | false | false | 1,636 | py | """
A function to extract names from e-mail addresses.
Author: Paul Murff
Date: Jan 9 2020
"""
import introcs
def extract_name(s):
"""
Returns the first name of the person in e-mail address s.
We assume (see the precondition below) that the e-mail address is in one of
three forms:
last.first@megacorp.com
last.first.middle@consultant.biz
first.last@mompop.net
where first, last, and middle correspond to the person's first, middle, and
last name. Names are not empty, and contain only letters. Everything after the
@ is guaranteed to be exactly as shown.
The function preserves the capitalization of the e-mail address.
Examples:
extract_name('smith.john@megacorp.com') returns 'john'
extract_name('McDougal.Raymond.Clay@consultant.biz') returns 'Raymond'
extract_name('maggie.white@mompop.net') returns 'maggie'
extract_name('Bob.Bird@mompop.net') returns 'Bob'
Parameter s: The e-mail address to extract from
Precondition: s is in one of the two address formats described above
"""
# You must use an if-elif-else statement in this function.
full_name = s[:introcs.find_str(s, '@')]
first = ''
if '@megacorp.com' in s:
first = full_name[introcs.find_str(s, '.')+1:]
elif '@mompop.net' in s:
first = full_name[:introcs.find_str(s, '.')]
elif '@consultant.biz' in s:
half_name = full_name[introcs.find_str(s, '.')+1:]
first = half_name[:introcs.find_str(half_name, '.')]
else:
return first
return first
| [
"titan.murff@gmail.com"
] | titan.murff@gmail.com |
7cca05a0f87a3f121f81026ce5455ef7300d7eca | 201b42947d38be70cd0985fb3fc46bd6068ca2f8 | /virtual_env/libs/mysql-connector/python2/mysql/connector/protocol.py | 693dcb3b5a0bf442c553b0fb35efa4351d3155c2 | [
"MIT",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-or-later",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | rcosnita/fantastico | d4fade25a10231f50b62fff6b41d814815dd4b62 | 81c8590556baa9e1148071b7835d74b1efada561 | refs/heads/master | 2021-03-12T21:55:03.260192 | 2020-07-28T07:16:39 | 2020-07-28T07:16:39 | 9,280,607 | 3 | 1 | MIT | 2020-07-28T07:16:40 | 2013-04-07T17:52:51 | Python | UTF-8 | Python | false | false | 10,039 | py | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Implementing the MySQL Client/Server protocol
"""
import struct
from decimal import Decimal
try:
from hashlib import sha1
except ImportError:
from sha import new as sha1
from mysql.connector.constants import (FieldFlag, ServerCmd)
from mysql.connector import (errors, utils)
class MySQLProtocol(object):
def _scramble_password(self, passwd, seed):
"""Scramble a password ready to send to MySQL"""
hash4 = None
try:
hash1 = sha1(passwd).digest()
hash2 = sha1(hash1).digest() # Password as found in mysql.user()
hash3 = sha1(seed + hash2).digest()
xored = [ utils.intread(h1) ^ utils.intread(h3)
for (h1,h3) in zip(hash1, hash3) ]
hash4 = struct.pack('20B', *xored)
except Exception, err:
raise errors.InterfaceError(
'Failed scrambling password; %s' % err)
return hash4
def _prepare_auth(self, usr, pwd, db, flags, seed):
"""Prepare elements of the authentication packet"""
if usr is not None and len(usr) > 0:
if isinstance(usr, unicode):
usr = usr.encode('utf8')
_username = usr + '\x00'
else:
_username = '\x00'
if pwd is not None and len(pwd) > 0:
if isinstance(pwd, unicode):
pwd = pwd.encode('utf8')
_password = utils.int1store(20) +\
self._scramble_password(pwd,seed)
else:
_password = '\x00'
if db is not None and len(db):
_database = db + '\x00'
else:
_database = '\x00'
return (_username, _password, _database)
def make_auth(self, seed, username=None, password=None, database=None,
charset=33, client_flags=0,
max_allowed_packet=1073741824):
"""Make a MySQL Authentication packet"""
if not seed:
raise errors.ProgrammingError('Seed missing')
auth = self._prepare_auth(username, password, database,
client_flags, seed)
return utils.int4store(client_flags) +\
utils.int4store(max_allowed_packet) +\
utils.int1store(charset) +\
'\x00' * 23 + auth[0] + auth[1] + auth[2]
def make_auth_ssl(self, charset=33, client_flags=0,
max_allowed_packet=1073741824):
"""Make a SSL authentication packet"""
return utils.int4store(client_flags) +\
utils.int4store(max_allowed_packet) +\
utils.int1store(charset) +\
'\x00' * 23
def make_command(self, command, argument=None):
"""Make a MySQL packet containing a command"""
data = utils.int1store(command)
if argument is not None:
data += str(argument)
return data
def make_change_user(self, seed, username=None, password=None,
database=None, charset=33, client_flags=0):
"""Make a MySQL packet with the Change User command"""
if not seed:
raise errors.ProgrammingError('Seed missing')
auth = self._prepare_auth(username, password, database,
client_flags, seed)
data = utils.int1store(ServerCmd.CHANGE_USER) +\
auth[0] + auth[1] + auth[2] + utils.int2store(charset)
return data
def parse_handshake(self, packet):
"""Parse a MySQL Handshake-packet"""
res = {}
(packet, res['protocol']) = utils.read_int(packet[4:], 1)
(packet, res['server_version_original']) = utils.read_string(
packet, end='\x00')
(packet, res['server_threadid']) = utils.read_int(packet, 4)
(packet, res['scramble']) = utils.read_bytes(packet, 8)
packet = packet[1:] # Filler 1 * \x00
(packet, res['capabilities']) = utils.read_int(packet, 2)
(packet, res['charset']) = utils.read_int(packet, 1)
(packet, res['server_status']) = utils.read_int(packet, 2)
packet = packet[13:] # Filler 13 * \x00
(packet, scramble_next) = utils.read_bytes(packet, 12)
res['scramble'] += scramble_next
return res
def parse_ok(self, packet):
"""Parse a MySQL OK-packet"""
if not packet[4] == '\x00':
raise errors.InterfaceError("Failed parsing OK packet.")
ok = {}
try:
(packet, ok['field_count']) = utils.read_int(packet[4:], 1)
(packet, ok['affected_rows']) = utils.read_lc_int(packet)
(packet, ok['insert_id']) = utils.read_lc_int(packet)
(packet, ok['server_status']) = utils.read_int(packet, 2)
(packet, ok['warning_count']) = utils.read_int(packet, 2)
if packet:
(packet, ok['info_msg']) = utils.read_lc_string(packet)
except ValueError:
raise errors.InterfaceError("Failed parsing OK packet.")
return ok
def parse_column_count(self, packet):
"""Parse a MySQL packet with the number of columns in result set"""
return utils.read_lc_int(packet[4:])[1]
def parse_column(self, packet):
"""Parse a MySQL column-packet"""
column = {}
(packet, column['catalog']) = utils.read_lc_string(packet[4:])
(packet, column['db']) = utils.read_lc_string(packet)
(packet, column['table']) = utils.read_lc_string(packet)
(packet, column['org_table']) = utils.read_lc_string(packet)
(packet, column['name']) = utils.read_lc_string(packet)
(packet, column['org_name']) = utils.read_lc_string(packet)
packet = packet[1:] # filler 1 * \x00
(packet, column['charset']) = utils.read_int(packet, 2)
(packet, column['length']) = utils.read_int(packet, 4)
(packet, column['type']) = utils.read_int(packet, 1)
(packet, column['flags']) = utils.read_int(packet, 2)
(packet, column['decimal']) = utils.read_int(packet, 1)
packet = packet[2:] # filler 2 * \x00
return (
column['name'],
column['type'],
None, # display_size
None, # internal_size
None, # precision
None, # scale
~column['flags'] & FieldFlag.NOT_NULL, # null_ok
column['flags'], # MySQL specific
)
def parse_eof(self, packet):
"""Parse a MySQL EOF-packet"""
if not (packet[4] == '\xfe' and len(packet) <= 9):
raise errors.InterfaceError("Failed parsing EOF packet.")
res = {}
packet = packet[5:] # disregard the first checking byte
(packet, res['warning_count']) = utils.read_int(packet, 2)
(packet, res['status_flag']) = utils.read_int(packet, 2)
return res
def parse_statistics(self, packet):
"""Parse the statistics packet"""
errmsg = "Failed getting COM_STATISTICS information"
res = {}
# Information is separated by 2 spaces
pairs = packet[4:].split('\x20\x20')
for pair in pairs:
try:
(lbl, val) = [ v.strip() for v in pair.split(':', 2) ]
except:
raise errors.InterfaceError(errmsg)
# It's either an integer or a decimal
try:
res[lbl] = long(val)
except:
try:
res[lbl] = Decimal(val)
except:
raise errors.InterfaceError(
"%s (%s:%s)." % (errmsg, lbl, val))
return res
def read_text_result(self, sock, count=1):
"""Read MySQL text result
Reads all or given number of rows from the socket.
Returns a tuple with 2 elements: a list with all rows and
the EOF packet.
"""
rows = []
eof = None
rowdata = None
i = 0
while True:
if eof is not None:
break
if i == count:
break
packet = sock.recv()
if packet[0:3] == '\xff\xff\xff':
data = packet[4:]
packet = sock.recv()
while packet[0:3] == '\xff\xff\xff':
data += packet[4:]
packet = sock.recv()
if packet[4] == '\xfe':
eof = self.parse_eof(packet)
else:
data += packet[4:]
rowdata = utils.read_lc_string_list(data)
elif packet[4] == '\xfe':
eof = self.parse_eof(packet)
rowdata = None
else:
eof = None
rowdata = utils.read_lc_string_list(packet[4:])
if eof is None and rowdata is not None:
rows.append(rowdata)
i += 1
return (rows, eof)
| [
"radu.cosnita@gmail.com"
] | radu.cosnita@gmail.com |
14540ef1b5e178096ca59f1de0d3ba6f681d6f22 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/fabric/rtfabricsetup.py | 21a22fba816522d0574584c791d13cd993e22106 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,544 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RtFabricSetup(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = TargetRelationMeta("cobra.model.fabric.RtFabricSetup", "cobra.model.aaa.SecRelnHolder")
meta.moClassName = "fabricRtFabricSetup"
meta.rnFormat = "rtaaaFabricSetup-[%(tDn)s]"
meta.category = MoCategory.RELATIONSHIP_FROM_LOCAL
meta.label = "Security Relations Holder"
meta.writeAccessMask = 0x80000000001
meta.readAccessMask = 0x80000000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.fabric.SetupPol")
meta.superClasses.add("cobra.model.reln.From")
meta.superClasses.add("cobra.model.reln.Inst")
meta.rnPrefixes = [
('rtaaaFabricSetup-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 22655, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1515
prop.defaultValueStr = "aaaSecRelnHolder"
prop._addConstant("aaaSecRelnHolder", None, 1515)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 22654, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("tDn", prop)
meta.namingProps.append(getattr(meta.props, "tDn"))
getattr(meta.props, "tDn").needDelimiter = True
def __init__(self, parentMoOrDn, tDn, markDirty=True, **creationProps):
namingVals = [tDn]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
4b001d53c03bc9f56445647b9cb02b088ddb632e | 264ce32d9eebb594cc424ecb3b8caee6cb75c2f3 | /content/hw/04_parameter_null/ok/tests/q2_07.py | 84c54e5929d731f39069c4cd1da758bbe23ce0c3 | [] | no_license | anhnguyendepocen/psych101d | a1060210eba2849f371d754e8f79e416754890f9 | 41057ed5ef1fd91e243ab41040f71b51c6443924 | refs/heads/master | 2022-03-24T02:20:32.268048 | 2019-12-21T02:51:02 | 2019-12-21T02:51:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,443 | py | test = {
"name": "2: Estimating p and the null distribution of t, easy task",
"points": 1,
"suites": [
{
"cases": [
{
"code": r"""
>>> # Is the center of the distribution plausible?
>>> np.abs(null_samples.mean() - true_mu_t) < sem_bound
True
>>> # Is the spread of the distribution reasonable?
>>> np.abs(null_samples.var() - true_var_t) < sev_bound
True
""",
"hidden": False,
"locked": False
},
{
"code": r"""
>>> # Is the value of p reasonable?
>>> 0.5 < easy_p_pymc < 0.6
True
""",
"hidden": False,
"locked": False
}
],
"setup": """
>>> null_samples = pd.Series(pymc_null_ts_easy)
>>> n_samples = len(null_samples)
>>> df = 18
>>> true_mu_t = 0
>>> true_var_t = bound.compute_var_t(df)
>>> sem_bound = bound.get_bound_t_mean(n_samples, df=df)
>>> sev_bound = bound.get_bound_t_variance(n_samples, df=df)
""",
"teardown": "",
"type": "doctest"}]
}
| [
"charlesfrye@berkeley.edu"
] | charlesfrye@berkeley.edu |
46c94c81f3ad86f95e0553a584efa6694c53d4d2 | 5241641cba4a6cf3b87284b72dcc5b6e70504f32 | /inmemory/apps.py | 10cba8a219f2e21b4dcf2d3831d0aae29811074d | [] | no_license | sdnnet3/coocooclub | a11505b2559b199164f2d881fa37a65cf9767aac | 5b1708194386048f62aa8222ef619f854758c556 | refs/heads/master | 2020-06-11T15:37:01.437796 | 2019-08-26T05:37:48 | 2019-08-26T05:37:48 | 194,009,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | from django.apps import AppConfig
class InmemoryConfig(AppConfig):
name = 'inmemory'
| [
"clayton.hutton@gmail.com"
] | clayton.hutton@gmail.com |
7c18941abf61e4145754fe7e1e3b4d587799f0d0 | f70c83e63a1a6ae55083d879197ffeeecfb6036c | /catkin_ws/buildold/ros_arduino_bridge/ros_arduino_firmware/catkin_generated/pkg.installspace.context.pc.py | b73a6eac062336446d3f1ec5d298dac4998391e4 | [] | no_license | jdumont0201/ros-cpp-robot | ed46b004bfac890a32b76d346a3ad506624c3cda | b48e0f0a84c8720e59aebdfc3a28f014a64675c6 | refs/heads/master | 2020-03-16T16:22:15.134808 | 2018-05-23T08:24:40 | 2018-05-23T08:24:40 | 132,784,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "ros_arduino_firmware"
PROJECT_SPACE_DIR = "/home/jbmdumont/catkin_ws/install"
PROJECT_VERSION = "0.2.0"
| [
"jbmdumont@localhost.localdomain"
] | jbmdumont@localhost.localdomain |
a74f3d34d7c2cdc48582c70f7525ba1946d0cca9 | dc089930c6dd627b1ca37188c9515b6b7bfc3ba3 | /conscious_consumer/store/urls.py | b3978acfece9cb1ceabcc26029a7ad4ce0f8a4a8 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | UPstartDeveloper/conscious-consumer | 923e57b6d1478b654786bfdaa905e7fde17305d7 | 98c5b8c9612b536aa8873353dd48d916287dff5d | refs/heads/master | 2022-11-29T16:16:57.726101 | 2021-04-26T19:57:59 | 2021-04-26T19:57:59 | 247,784,145 | 0 | 0 | MIT | 2022-11-22T07:39:21 | 2020-03-16T18:03:00 | JavaScript | UTF-8 | Python | false | false | 632 | py | from django.urls import path
from .views import (
ProductList,
ProductCreate,
ProductDetail,
ProductUpdate,
ProductDelete,
)
app_name = "store"
urlpatterns = [
# Product CRUD-related URLs
path("products/", ProductList.as_view(), name="product_list"),
path("products/new/", ProductCreate.as_view(), name="product_create"),
path("products/<slug:slug>/edit/", ProductUpdate.as_view(), name="product_update"),
path(
"products/<slug:slug>/delete/", ProductDelete.as_view(), name="product_delete"
),
path("products/<slug:slug>/", ProductDetail.as_view(), name="product_detail"),
]
| [
"zainr7989@gmail.com"
] | zainr7989@gmail.com |
97823ad8e8fe53a9e20e34f9ce7ce89c75df003e | 010c5fbc97731286be00028ff33fc981d943bca3 | /primal/src/code/impute/tests/cgi/count_cgi_genotypes.old.py | f36441491c351e27f58bc314722118098484e0fa | [] | no_license | orenlivne/ober | 6ce41e0f75d3a8baebc53e28d7f6ae4aeb645f30 | 810b16b2611f32c191182042240851152784edea | refs/heads/master | 2021-01-23T13:48:49.172653 | 2014-04-03T13:57:44 | 2014-04-03T13:57:44 | 6,902,212 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,419 | py | #!/usr/bin/env python
'''
============================================================
Calculate call rates in imputed CGI files.
Created on February 18, 2012
@author: Oren Livne <livne@uchicago.edu>
============================================================
'''
import sys, os, csv, itertools, numpy as np, traceback, util
from impute.phasing.examples import wgs_sample_index
from optparse import OptionParser
#---------------------------------------------
# Constants
#---------------------------------------------
# All possible genotypes
GENOTYPES = [x[0] + x[1] for x in list(itertools.product('N01', 'N01'))]
# Converts CGI allele code to our numeric code
CGI_LETTER_TO_ALLELE = {'N': 0, '0': 1, '1': 2}
def genotype_start_index(line):
'''Return the start index of g entries in the list line. If not found, returns -1.'''
index = 6
for x in line[6:]:
if x in GENOTYPES:
return index
else:
index += 1
return -1
def print_count_by_snp(lines, out, id_list):
'''Count total genotypes for each SNPs.'''
# Initialize all genotype counts at 0
# Stream lines and increment counts
for line in lines:
# Lines may start with a variable no. of items from the csv reader's perspective (e.g.,
# indel with empty substitution fields will result in consecutive spaces. Calculate the
# start of the genotype sublist
index = genotype_start_index(line)
genotype = line[index:]
# Pick out the relevant IDs
count = dict(zip(GENOTYPES, [0] * len(GENOTYPES)))
for x in (np.array(genotype)[id_list] if id_list is not None else genotype):
count[x] += 1
print_count_total(count, out)
def count_total(lines, id_list, variant_type=None, phasing_rate= 0.0):
'''Count total genotypes over the entire file.'''
# Initialize all genotype counts at 0
count = dict(zip(GENOTYPES, [0] * len(GENOTYPES)))
wgs = wgs_sample_index()
total_wgs = len(wgs)
filter_on_phasing = phasing_rate > 0.0001
filter_on_variant_type = variant_type != 'all'
fully_called = lambda x: x == '00' or x == '01' or x == '10' or x == '11'
# Stream lines and increment counts
for line in lines:
# Filter variant type
if filter_on_variant_type and line[4] != variant_type:
continue
# Lines may start with a variable no. of items from the csv reader's perspective (e.g.,
# indel with empty substitution fields will result in consecutive spaces. Calculate the
# start of the genotype sublist
genotype = line[genotype_start_index(line):]
# Filter to phasing rate >= phasing_rate
if filter_on_phasing:
rate = float(len(np.where(map(fully_called, np.array(genotype)[wgs]))[0])) / total_wgs
if rate < phasing_rate:
continue
# Pick out the relevant IDs
for x in (np.array(genotype)[id_list] if id_list is not None else genotype):
count[x] += 1
return count
def print_count_total(count, out):
'''Print total count results: (genotype count frequency) columns for all genotypes.'''
total = sum(count.itervalues())
for k in GENOTYPES:
out.write('%s %8d %.3f ' % (''.join(map(str, map(CGI_LETTER_TO_ALLELE.get, k))), count[k], (1.0 * count[k]) / total))
out.write('\n')
####################################################################################
def __parse_command_line_args():
'''Parse and validate command-line arguments.'''
PROGRAM = os.path.basename(sys.argv[0])
usage = 'Usage: %s\n' \
'Calculate call rates in a CGI imputed tab-delimited standard input.\n' \
'\nType ''%s -h'' to display full help.' % (PROGRAM, PROGRAM)
parser = OptionParser(usage=usage)
parser.add_option('-d', '--data-file', type='str' , dest='data_file',
default=None, help='If specified, reads from data file, otherwise reads from stdin')
parser.add_option('-i', '--id-index-file', type='str' , dest='id_file',
default=None, help='If specified, outputs only the IDs listed in this file (these are indices between 0 and #ids-1, if the input file has #ids genotype columns)')
parser.add_option('-s', '--snp', action='store_true' , dest='group_by_snp', default=False,
help='Group by snp')
parser.add_option('-t', '--variant-type', type='str', dest='variant_type', default='all',
help='Variant type to select (e.g. snp). ''all'' counts all variants.')
parser.add_option('-p', '--min-phasing-rate', type='float', dest='phasing_rate', default= 0.0,
help='Minimum WGS phasing rate to consider (non-negative value will disable this option)')
options, args = parser.parse_args(sys.argv[1:])
if len(args) != 0:
print usage
sys.exit(1)
return options
def __main(options):
'''Main program - accepts an options struct.'''
# If id file is specified, read into the 'id_list' array
id_list = np.loadtxt(options.id_file, dtype=np.int) if options.id_file else None
# Init dictionary of all-possible-genotypes-to-counts
try:
f = open(options.data_file, 'rb') if options.data_file else sys.stdin
lines = (line for line in csv.reader(f, delimiter='\t', skipinitialspace=True) if line)
if options.group_by_snp:
print_count_by_snp(lines, sys.stdout, id_list)
else:
count = count_total(lines, id_list, variant_type=options.variant_type,
phasing_rate=options.phasing_rate)
print_count_total(count, sys.stdout)
except (IOError, OSError):
traceback.print_exc(file=sys.stdout)
sys.exit(141)
def main(**kwargs):
'''Main program - accepts argument dictionary.'''
# Default options
options = util.Struct(data_file=None, id_file=None, group_by_snp=False, variant_type='all',
phasing_rate= 0.0)
# Override with passed arguments
options.update(**kwargs)
# (valid, options, error_msg) = __validate_options(options)
# if not valid:
# raise ValueError('Bad options: %s' % (error_msg,))
return __main(options)
if __name__ == '__main__':
'''Main program - accepts CLI arguments.'''
__main(__parse_command_line_args())
| [
"oren.livne@gmail.com"
] | oren.livne@gmail.com |
8522128d89b824867b58a7b3c2b8e336b6cb1814 | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Keras_tensorflow/source/tensorflow/contrib/learn/python/learn/tests/__init__.py | f6a58889f9c89c6369708df68a903013932811c5 | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 891 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.learn tests."""
# TODO(ptucker): Move these to the packages of the units under test.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| [
"ryfeus@gmail.com"
] | ryfeus@gmail.com |
9acfaeaff0e83be2eca65c5a519804e3e11d64ff | 6fdbd55d5232d66a740e2893cc9939cb6e89455d | /geupshik_translator/config/storages.py | 181ae2bf99c7e69e523a9a27a78d7df2d6bd122c | [] | no_license | suhjohn/Slang-Translator-KR | f24ff76b034cc78926a30a7ee9b50b15855f5b5a | 69f524ae4c1ce8bbac481ce73221f18747144001 | refs/heads/master | 2021-08-08T18:55:02.819204 | 2017-11-10T12:23:53 | 2017-11-10T12:41:40 | 110,214,064 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | from storages.backends.s3boto3 import S3Boto3Storage
from django.conf import settings
class StaticStorage(S3Boto3Storage):
location = settings.STATICFILES_LOCATION
class MediaStorage(S3Boto3Storage):
location = settings.MEDIAFILES_LOCATION
| [
"johnsuh94@gmail.com"
] | johnsuh94@gmail.com |
76b9d760319a2058057ef8578b535f1dca0e79af | 49f61714a6f78d984fd2194d6064d84e891bc5b7 | /2019-1/220/users/2023/codes/1647_2443.py | 6cbe7ef05d32e52d810ed45d557bc58163e7e18e | [] | no_license | psbarros/Variaveis3 | b5c4e1517e7d94a846ee03791d25d5821a1c651c | 3dcf6f810709ce03c78335acf9533e008a2ae125 | refs/heads/master | 2023-06-13T07:05:00.878430 | 2021-07-06T17:51:37 | 2021-07-06T17:51:37 | 383,549,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | # Tanque de combustivel.
from math import*
raio = float(input("digite o raio:"))
altura = float(input("digite a altura:"))
opcao = float(input("digite 1 para volume do ar ou 2 para do combustivel:"))
calota_esferica = ((pi*(altura**2)*(3*raio-altura)))/3
volume_esfera = 4*pi*(raio**3)/3
if (opcao==1):
v = calota_esferica
else:
v = volume_esfera - calota_esferica
print(round(v,4))
| [
"psb@icomp.ufam.edu.br"
] | psb@icomp.ufam.edu.br |
42b8ceb2a472566a9eb54fe3e020f1d07b99f9a9 | 7b4f9a5937c6d390289d7252266cfdd3c62be728 | /how_to_think/chapter_3/second_set/exercise_3_4_4_7_b.py | 6029766e3231f4baef19c32fac035a8f6bb033d6 | [] | no_license | LCfP-basictrack/basictrack-2020-2021-2b | d7ea1dc651c202d9e433588c9df8cf3554fd80e8 | 268f066a9baade3c4300a72ef7a866e535a714e0 | refs/heads/master | 2023-05-05T22:32:41.738918 | 2021-05-27T15:12:40 | 2021-05-27T15:12:40 | 358,275,084 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | import turtle
screen = turtle.Screen()
pirate = turtle.Turtle()
steps = [160, -43, 270, -97, -43, 200, -940, 17, -86]
heading = 0
for step in steps:
pirate.left(step)
pirate.forward(100)
heading += step
print("The final heading is", heading % 360)
screen.exitonclick()
| [
"mail@vincentvelthuizen.com"
] | mail@vincentvelthuizen.com |
cbdbd4b45853d6a0849a51a1b17e1bae1b10a9ce | c97830c72b99f005a2024ce57f8af9a912b18c59 | /acc/migrations/0006_profile_id_image.py | ca338e9102c9f4fe6b21eb4d004a7ff4e328efc8 | [] | no_license | toluwanicareer/xenos | 77b4d9c6516c45fffef6affbcaeccdc02ec5ff3a | ba1c25c68c87aaddcf1237e2c055b055e24fa4bc | refs/heads/master | 2021-05-04T15:15:00.373122 | 2018-04-08T20:10:00 | 2018-04-08T20:10:00 | 120,223,340 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-02-17 17:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('acc', '0005_auto_20180217_0620'),
]
operations = [
migrations.AddField(
model_name='profile',
name='id_image',
field=models.ImageField(null=True, upload_to=b''),
),
]
| [
"abiodun.toluwanii@gmail.com"
] | abiodun.toluwanii@gmail.com |
76e9b88a8acfc25d43ab10dc0fc98798ffa21f65 | cb6461bfae8b0935b7885697dad0df60670da457 | /pychron/dashboard/tasks/server/panes.py | 1ec709a17a70d5aaec69977e3dd98bc2924d76de | [
"Apache-2.0"
] | permissive | USGSMenloPychron/pychron | 00e11910511ca053e8b18a13314da334c362695a | 172993793f25a82ad986e20e53e979324936876d | refs/heads/develop | 2021-01-12T14:09:18.983658 | 2018-02-06T14:25:05 | 2018-02-06T14:25:05 | 69,751,244 | 0 | 0 | null | 2016-10-01T16:59:46 | 2016-10-01T16:59:46 | null | UTF-8 | Python | false | false | 3,111 | py | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from pyface.tasks.traits_dock_pane import TraitsDockPane
from pyface.tasks.traits_task_pane import TraitsTaskPane
from traitsui.api import View, UItem, VGroup, HGroup, Group, VSplit
# ============= standard library imports ========================
# ============= local library imports ==========================
from traitsui.editors import TableEditor, InstanceEditor, ListEditor
from traitsui.extras.checkbox_column import CheckboxColumn
from traitsui.table_column import ObjectColumn
from pychron.core.ui.custom_label_editor import CustomLabel
class DashboardCentralPane(TraitsTaskPane):
def traits_view(self):
url = CustomLabel('object.notifier.url', label='URL')
agrp = VGroup(UItem('devices', editor=ListEditor(mutable=False,
style='custom',
editor=InstanceEditor(
view=View(UItem('graph', style='custom'))))), label='All')
igrp = VGroup(UItem('selected_device', style='custom'), label='Individual')
tgrp = HGroup(url, UItem('clear_button', tooltip='Clear current errors'))
# v = View(
# VGroup(HGroup(url, UItem('clear_button', tooltip='Clear current errors')),
# UItem('selected_device',
# style='custom'),
#
# )))
v = View(VGroup(tgrp, Group(agrp, igrp, layout='tabbed')))
return v
class DashboardDevicePane(TraitsDockPane):
id = 'pychron.dashboard.devices'
def traits_view(self):
cols = [CheckboxColumn(name='use'),
ObjectColumn(name='name', editable=False)]
editor = TableEditor(columns=cols,
selected='selected_device')
cols = [ObjectColumn(name='name', label='Name'),
ObjectColumn(name='last_value', label='Value'),
ObjectColumn(name='last_time_str', label='Timestamp')]
veditor = TableEditor(columns=cols,
editable=False)
v = View(VSplit(UItem('devices', editor=editor),
UItem('values', editor=veditor)))
return v
# ============= EOF =============================================
| [
"jirhiker@gmail.com"
] | jirhiker@gmail.com |
5b86ad9f6206d82584c6a1da085a0e3e72e2b250 | 11aac6edab131293027add959b697127bf3042a4 | /busiestServers.py | 6fee9c8789a62b906f53a1556f08cd3f3b0a2b41 | [] | no_license | jdanray/leetcode | a76b3436002b31865967b757b73c85992636383b | fd736af3e79899b86dac89d4d925d5bd985944ad | refs/heads/master | 2023-08-15T01:20:05.110565 | 2023-08-14T00:25:58 | 2023-08-14T00:25:58 | 148,686,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | # https://leetcode.com/problems/find-servers-that-handled-most-number-of-requests/
from sortedcontainers import SortedList
class Solution(object):
def busiestServers(self, k, arrival, load):
count = collections.Counter()
avail = SortedList(list(range(k)))
busy = []
for (i, a) in enumerate(arrival):
while busy and busy[0][0] <= a:
_, s = heapq.heappop(busy)
avail.add(s)
if avail:
j = avail.bisect_left(i % k)
if j >= len(avail):
s = avail[0]
else:
s = avail[j]
avail.remove(s)
heapq.heappush(busy, (a + load[i], s))
count[s] += 1
maxc = max(count.values())
return [s for s in range(k) if count[s] == maxc]
| [
"jdanray@users.noreply.github.com"
] | jdanray@users.noreply.github.com |
2f947ab99d89b2f0ef78907083e8ebf86d2ad25a | 9452f681ea486fc53ad88d05392aed5fc450805c | /code25_all/python/2350110.txt | cb11aa556ef4974922179fb7719a30672dd9d2db | [] | no_license | CoryCollins/src-class | 11a6df24f4bd150f6db96ad848d7bfcac152a695 | f08a2dd917f740e05864f51ff4b994c368377f97 | refs/heads/master | 2023-08-17T11:53:28.754781 | 2021-09-27T21:13:23 | 2021-09-27T21:13:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,273 | txt | #!/usr/bin/python
import timeit
import numpy
import ctypes
import random
USE_RANDOM=True
USE_STATIC_BUFFER=True
STATIC_BUFFER = numpy.empty(4096*20, dtype=numpy.float32)
def render(i):
# pretend these are different each time
if USE_RANDOM:
tex_left, tex_right, tex_top, tex_bottom = random.random(), random.random(), random.random(), random.random()
left, right, top, bottom = random.random(), random.random(), random.random(), random.random()
else:
tex_left, tex_right, tex_top, tex_bottom = 0.0, 1.0, 1.0, 0.0
left, right, top, bottom = -1.0, 1.0, 1.0, -1.0
ibuffer = (
tex_left, tex_bottom, left, bottom, 0.0, # Lower left corner
tex_right, tex_bottom, right, bottom, 0.0, # Lower right corner
tex_right, tex_top, right, top, 0.0, # Upper right corner
tex_left, tex_top, left, top, 0.0, # upper left
)
return ibuffer
# create python list.. convert to numpy array at end
def create_array_1():
ibuffer = []
for x in xrange(4096):
data = render(x)
ibuffer += data
ibuffer = numpy.array(ibuffer, dtype=numpy.float32)
return ibuffer
# numpy.array, placing individually by index
def create_array_2():
if USE_STATIC_BUFFER:
ibuffer = STATIC_BUFFER
else:
ibuffer = numpy.empty(4096*20, dtype=numpy.float32)
index = 0
for x in xrange(4096):
data = render(x)
for v in data:
ibuffer[index] = v
index += 1
return ibuffer
# using slicing
def create_array_3():
if USE_STATIC_BUFFER:
ibuffer = STATIC_BUFFER
else:
ibuffer = numpy.empty(4096*20, dtype=numpy.float32)
index = 0
for x in xrange(4096):
data = render(x)
ibuffer[index:index+20] = data
index += 20
return ibuffer
# using numpy.concat on a list of ibuffers
def create_array_4():
ibuffer_concat = []
for x in xrange(4096):
data = render(x)
# converting makes a diff!
data = numpy.array(data, dtype=numpy.float32)
ibuffer_concat.append(data)
return numpy.concatenate(ibuffer_concat)
# using numpy array.put
def create_array_5():
if USE_STATIC_BUFFER:
ibuffer = STATIC_BUFFER
else:
ibuffer = numpy.empty(4096*20, dtype=numpy.float32)
index = 0
for x in xrange(4096):
data = render(x)
ibuffer.put( xrange(index, index+20), data)
index += 20
return ibuffer
# using ctype array
CTYPES_ARRAY = ctypes.c_float*(4096*20)
def create_array_6():
ibuffer = []
for x in xrange(4096):
data = render(x)
ibuffer += data
ibuffer = CTYPES_ARRAY(*ibuffer)
return ibuffer
def equals(a, b):
for i,v in enumerate(a):
if b[i] != v:
return False
return True
if __name__ == "__main__":
number = 100
# if random, don't try and compare arrays
if not USE_RANDOM and not USE_STATIC_BUFFER:
a = create_array_1()
assert equals( a, create_array_2() )
assert equals( a, create_array_3() )
assert equals( a, create_array_4() )
assert equals( a, create_array_5() )
assert equals( a, create_array_6() )
t = timeit.Timer( "testing2.create_array_1()", "import testing2" )
print 'from list:', t.timeit(number)/number*1000.0, 'ms'
t = timeit.Timer( "testing2.create_array_2()", "import testing2" )
print 'array: indexed:', t.timeit(number)/number*1000.0, 'ms'
t = timeit.Timer( "testing2.create_array_3()", "import testing2" )
print 'array: slicing:', t.timeit(number)/number*1000.0, 'ms'
t = timeit.Timer( "testing2.create_array_4()", "import testing2" )
print 'array: concat:', t.timeit(number)/number*1000.0, 'ms'
t = timeit.Timer( "testing2.create_array_5()", "import testing2" )
print 'array: put:', t.timeit(number)/number*1000.0, 'ms'
t = timeit.Timer( "testing2.create_array_6()", "import testing2" )
print 'ctypes float array:', t.timeit(number)/number*1000.0, 'ms'
$ python testing2.py
from list: 15.0486779213 ms
array: indexed: 24.8184704781 ms
array: slicing: 50.2214789391 ms
array: concat: 44.1691994667 ms
array: put: 73.5879898071 ms
ctypes float array: 20.6674289703 ms
| [
"znsoft@163.com"
] | znsoft@163.com |
2cbc0e35a8851a5ac3b4a3a4cae1d7c9d6e26397 | 4033fed719d91ccea96bb854f499a4ff253c54d3 | /custom_features/DBTT.py | 79b3b098c08eec105c67913a76224dce5ea0fd59 | [] | no_license | H0lland/MAST-ML | 612cc195c08f612b4846b0da40f113c3b40dfc47 | 40ef2c17bb8bb26b928f6cc4623a3a46e02c671a | refs/heads/master | 2020-03-15T13:33:25.533453 | 2018-08-10T15:10:15 | 2018-08-10T15:10:15 | 132,169,522 | 0 | 0 | null | 2018-06-12T15:40:50 | 2018-05-04T17:25:49 | Jupyter Notebook | UTF-8 | Python | false | false | 1,577 | py | import numpy as np
import copy
from FeatureOperations import FeatureNormalization, FeatureIO
__author__ = "Tam Mayeshiba"
class DBTT():
"""Class for creating custom feature columns specifically for the
DBTT project.
New methods may be added.
Args:
dataframe <data object>
Returns:
Raises:
ValueError if dataframe is None
"""
def __init__(self, dataframe=None):
"""Custom data handler
Attributes:
self.original_dataframe <data object>: Dataframe
self.df <data object>: Dataframe
Each custom feature should take keyword arguments.
"""
if dataframe is None:
raise ValueError("No dataframe.")
self.original_dataframe = copy.deepcopy(dataframe)
self.df = copy.deepcopy(dataframe)
return
def calculate_EffectiveFluence(self, pvalue=0, ref_flux = 3e10, flux_feature="",fluence_feature="", scale_min = 1e17, scale_max = 1e25, **params):
"""Calculate effective fluence
"""
fluence = self.df[fluence_feature]
flux = self.df[flux_feature]
EFl = fluence * (ref_flux / flux) ** pvalue
EFl = np.log10(EFl)
fio = FeatureIO(self.df)
new_df = fio.add_custom_features(["EFl"],EFl)
fnorm = FeatureNormalization(new_df)
N_EFl = fnorm.minmax_scale_single_feature("EFl",
smin = np.log10(scale_min),
smax = np.log10(scale_max))
return N_EFl
| [
"mayeshiba@wisc.edu"
] | mayeshiba@wisc.edu |
a8880daa6f3d42715335e1ab99fa44f4a9d4f40b | 8b09c10f7ebabccf01dcf21775c7fe0550ee0d9e | /virtual/bin/confusable_homoglyphs | d477d110b1c479190e2e1f2ca41abed42a2bb72f | [
"MIT"
] | permissive | MaryMbugua/Safe | f25da88ccefcc5d54ea142b634be3493be1e24e1 | 2aaa4760cfa96aafc4d37233fe7b4df584e2ed79 | refs/heads/master | 2020-03-18T11:26:22.678249 | 2018-06-05T06:21:58 | 2018-06-05T06:21:58 | 134,671,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | #!/home/nish/Desktop/projects/neighbourhoodwatch/virtual/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from confusable_homoglyphs.cli import cli
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(cli())
| [
"marymbugua.nm@gmail.com"
] | marymbugua.nm@gmail.com | |
6eb1f05ced6608ae3c37f76584a08945f16a2723 | f9cc7246e323a9ce0d93a4e9722e1408e494fb4f | /paymentserver/app/urls.py | d9b4627f88e861d9f3fc7e2a54918547846fd3a8 | [] | no_license | ketsalot1/apps-payment-server | a21e3043b71f85db103e6f094e1ebb243119f491 | e571776af364ebf32dec9fbe4fb1a6554d279328 | refs/heads/master | 2021-01-13T07:09:34.186299 | 2012-03-23T04:06:53 | 2012-03-23T04:06:53 | 69,055,263 | 1 | 0 | null | 2016-09-23T19:37:14 | 2016-09-23T19:37:14 | null | UTF-8 | Python | false | false | 225 | py | from django.conf.urls.defaults import *
from django.conf import settings
urlpatterns = patterns('app.views',
(r'^$', 'home'),
url(r'^payment_succeeded$', 'payment_succeeded',
name='app.payment_succeeded'),
)
| [
"kumar.mcmillan@gmail.com"
] | kumar.mcmillan@gmail.com |
2e699869612e99e69257d64e9b23679d35d7c5b7 | f0e10b8dfabfe931e7fa9d9adda6ca4fcde6940d | /tutu/utils.py | cbfe3a94cdf25490e2330e9b42b4dfd63d60f418 | [
"MIT"
] | permissive | priestc/django-tutu | e0d4cbd977ca1d7ed97f1fb510aa655ada6d74d3 | 267a44a0dd75a1de42d05801149f36ef1ac84b57 | refs/heads/master | 2020-09-27T07:13:01.442268 | 2020-02-08T05:50:53 | 2020-02-08T05:50:53 | 226,460,887 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,217 | py | from tutu.metrics import Metric
import datetime
from django.conf import settings
def validate_metric(metric):
if isinstance(metric, Metric):
return metric
elif isinstance(metric, type) and issubclass(metric, Metric):
return metric()
else:
raise ValueError("Must be a Metric class or instance")
def get_installed_metrics():
metrics = []
for item in settings.INSTALLED_TUTU_METRICS:
metrics.append(validate_metric(item))
return metrics
def get_metric_from_name(name):
for metric in get_installed_metrics():
if name == metric.internal_name:
return metric
def get_metrics_from_names(metric_names):
metric_list = []
for metric in get_installed_metrics():
if metric.internal_name in metric_names:
metric_list.append(metric)
return metric_list
def get_column_number_and_instance():
column_numbers = {}
for i, metric in enumerate(get_installed_metrics()):
column_numbers[metric.internal_name] = [i+1, metric]
return column_numbers
######################################################
######################################################
def make_test_ticks(start, end):
from tutu.models import Tick
target = start
while(target < end):
Tick.objects.create(date=target, machine="TestMachine")
target += datetime.timedelta(minutes=5)
def make_poll_results(metrics):
import random
from tutu.models import Tick, PollResult
for tick in Tick.objects.all():
for item in metrics:
metric = validate_metric(item)
result = metric.poll()
PollResult.objects.create(
tick=tick,
metric_name=metric.internal_name,
result=result,
success=True,
seconds_to_poll=1
)
def make_nginx_ticks():
from tutu.metrics import Nginx, NginxByStatusCode, NginxPercentUniqueIP, NginxBandwidth
n = Nginx()
start = n.parse_dt("27/Jan/2020:07:35:07 -0800")
end = n.parse_dt("31/Jan/2020:13:28:15 -0800")
make_test_ticks(start, end)
make_poll_results([n, NginxByStatusCode(), NginxPercentUniqueIP(), NginxBandwidth()])
| [
"cp368202@ohiou.edu"
] | cp368202@ohiou.edu |
666fecfb2a75f1bc7777fe2d07e22f40e6d985eb | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D04B/CUSEXPD04BUN.py | e420b19fdad2bfde8e8a20be17750d9949db5fad | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 2,519 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD04BUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'LOC', MIN: 0, MAX: 5},
{ID: 'CNT', MIN: 0, MAX: 9},
{ID: 'NAD', MIN: 1, MAX: 1, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 5, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 5},
]},
]},
{ID: 'TDT', MIN: 1, MAX: 1, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
]},
]},
{ID: 'EQD', MIN: 0, MAX: 99, LEVEL: [
{ID: 'SEL', MIN: 0, MAX: 9},
]},
{ID: 'RFF', MIN: 0, MAX: 999, LEVEL: [
{ID: 'NAD', MIN: 0, MAX: 2},
{ID: 'CNT', MIN: 0, MAX: 1},
{ID: 'CNI', MIN: 1, MAX: 9999, LEVEL: [
{ID: 'SGP', MIN: 0, MAX: 9},
{ID: 'CNT', MIN: 0, MAX: 9},
{ID: 'MEA', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 2},
{ID: 'NAD', MIN: 0, MAX: 5},
{ID: 'GDS', MIN: 0, MAX: 1, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
]},
{ID: 'PAC', MIN: 0, MAX: 999, LEVEL: [
{ID: 'PCI', MIN: 0, MAX: 1},
]},
{ID: 'TOD', MIN: 0, MAX: 1, LEVEL: [
{ID: 'LOC', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 1},
]},
{ID: 'MOA', MIN: 0, MAX: 10, LEVEL: [
{ID: 'CUX', MIN: 0, MAX: 1, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
]},
{ID: 'TAX', MIN: 0, MAX: 9, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 1},
{ID: 'GEI', MIN: 0, MAX: 1},
]},
{ID: 'DOC', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 1},
]},
{ID: 'CST', MIN: 0, MAX: 99, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 1, MAX: 1},
{ID: 'MEA', MIN: 0, MAX: 9},
{ID: 'TAX', MIN: 0, MAX: 9, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 1},
{ID: 'GEI', MIN: 0, MAX: 1},
]},
]},
]},
]},
{ID: 'AUT', MIN: 0, MAX: 1, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
50dd5ace63ee64ad2858aaefffaeb374c9271f86 | 49b1b98e9bbba2e377d8b5318fcc13776bbcb10b | /museum/spiders/exhibition17.py | e1d1505e4d08052e575979f21026047abbede4ea | [] | no_license | BUCT-CS1808-SoftwareEngineering/MusemData_Collection_System | fa4e02ec8e8aaa9a240ba92cf7be33dbc0e8e31f | 023e829c77037ba6d2183d8d64dcb20696b66931 | refs/heads/master | 2023-04-29T05:43:01.627439 | 2021-05-23T03:02:09 | 2021-05-23T03:02:09 | 360,040,880 | 0 | 0 | null | 2021-05-23T03:02:09 | 2021-04-21T05:18:41 | Python | UTF-8 | Python | false | false | 2,232 | py | import scrapy
from museum.items import exhibitionItem
import re
import json
# scrapy crawl exhibition17
class Exhibition17Spider(scrapy.Spider):
name = 'exhibition17'
# allowed_domains = ['www.xxx.com']
start_urls = ['http://www.3gmuseum.cn/web/exhibitionHallOften/conventionalExhibitionPage.do?pageNumber=1&pageSize=12&itemno=25434353']
# headers={
# 'Host': 'www.3gmuseum.cn',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0',
# 'Accept': 'application/json, text/javascript, */*; q=0.01',
# 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
# 'Accept-Encoding': 'gzip, deflate',
# 'Referer': 'http://www.3gmuseum.cn/web/exhibitionHallOften/longExhibition.do?itemno=23&itemsonno=25434353',
# 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
# 'X-Requested-With': 'XMLHttpRequest',
# 'Content-Length': '40',
# 'Origin': 'http://www.3gmuseum.cn',
# 'Connection': 'keep-alive',
# 'Cookie': 'JSESSIONID=16433817DCFCAFEF924469AD000E1054; UM_distinctid=1794b9a2a6354e-090ba774c3f989-4c3f2c72-1fa400-1794b9a2a6491a; CNZZDATA1254436347=90473301-1620471035-%7C1620471035',
# 'Pragma': 'no-cache',
# 'Cache-Control': 'no-cache'
# } headers=self.headers,
def start_requests(self):
yield scrapy.Request(url=self.start_urls[0], callback=self.parse, method="POST")
def parse(self, response):
item = exhibitionItem()
coll_list = json.loads(response.text)["list"]
for i in coll_list:
collectionName = i["formattitle"]
collectionName = ''.join(collectionName)
collectionImageUrl = i["themeimg"]
collectionImageUrl = ''.join(collectionImageUrl)
collectionDescription = str(i["contents"])
collectionDescription = re.sub(r'<\/?.+?\/?>','',collectionDescription)
# collectionDescription = ''.join(collectionDescription)
# collectionImageUrl = 'http://www.njmuseum.com' + ''.join(collectionImageUrl)
print((collectionName, collectionDescription, collectionImageUrl))
| [
"szqszq00766@163.com"
] | szqszq00766@163.com |
f67376bd7e13509194aea02f91ac23207e7fca15 | 4bd207d288c95b9f20785bb841224b914f05c280 | /code-master/lib/bitbots/modules/behaviour/head/decisions/head_duty_decider.py | 693c083124300ef7e2a84e8530d287cd73e15850 | [] | no_license | hendrikvgl/RoboCup-Spielererkennung | 435e17ee540c4b4c839e26d54db2528a60e6a110 | c41269a960f4b5ea0814a49f5a20ae17eb0a9d71 | refs/heads/master | 2021-01-10T10:39:00.586760 | 2015-10-21T12:42:27 | 2015-10-21T12:42:27 | 44,675,342 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,726 | py | # -*- coding:utf-8 -*-
"""
HeadDutyDecider
^^^^^^^^^^^^^^^
Entscheidet was der Kopf tun soll
History:
* 19.08.14: Created (Nils Rokita)
"""
import time
from bitbots.modules.abstract.abstract_decision_module import AbstractDecisionModule
from bitbots.modules.abstract.abstract_module import debug_m
from bitbots.modules.behaviour.head.decisions.search_and_confirm import SearchAndConfirmBall, SearchAndConfirmEnemyGoal
from bitbots.modules.behaviour.head.decisions.continious_search import ContiniousSearch
from bitbots.util import get_config
class HeadDutyDecider(AbstractDecisionModule):
def __init__(self, _):
super(HeadDutyDecider, self).__init__()
toggles = get_config()["Behaviour"]["Toggles"]["Head"]
self.toggle_goal_vison_tracking = toggles["goalVisionTracking"]
self.toggle_switch_ball_goal = toggles["switchBallGoalSearch"]
config = get_config()
self.confirm_time = config["Behaviour"]["Common"]["Search"]["confirmTime"]
self.last_confirmd_goal = 0
self.fail_goal_counter = 0
self.ball_prio = 0
self.goal_prio = 0
self.trackjustball_aftergoal = False
def perform(self, connector, reevaluate=False):
# todo refactor in more decisions
""" This is the root for the head stack machine """
if connector.raw_vision_capsule().ball_seen():
self.ball_prio = max(0, self.ball_prio - 3)
else:
self.ball_prio = min(120, self.ball_prio + 5)
if connector.raw_vision_capsule().any_goal_seen():
self.goal_prio = max(0, self.goal_prio - 2)
else:
self.goal_prio = min(100, self.goal_prio + 3)
debug_m(4, "GoalPrio", self.goal_prio)
debug_m(4, "BallPrio", self.ball_prio)
debug_m(4, "BallLastCOnfirmed", time.time() - connector.blackboard_capsule().get_confirmed_ball())
debug_m(4, "BallLastStratedconfirm", time.time() - connector.blackboard_capsule().get_started_confirm_ball())
if connector.blackboard_capsule().is_no_head_movement_at_all():
debug_m(4, "Headdoes", "Nothing")
return self.interrupt()
if connector.blackboard_capsule().is_ball_tracking_still_active():
debug_m(4, "Headdoes", "BallTracking")
return self.push(SearchAndConfirmBall)
if connector.blackboard_capsule().is_enemy_goal_tracking_still_active():
debug_m(4, "Headdoes", "GoalTracking")
return self.push(SearchAndConfirmEnemyGoal)
if connector.blackboard_capsule().is_tracking_both_still_active(): # todo to be tested
debug_m(4, "TrackbothTime", time.time())
if time.time() - connector.blackboard_capsule().get_confirmed_ball() > 5:
debug_m(4, "Headdoes", "TrackBothBall")
return self.push(SearchAndConfirmBall)
# ball long enough seen
elif time.time() - connector.blackboard_capsule().get_confirmed_goal() > 6:
debug_m(4, "Headdoes", "TrackBothGoal")
return self.push(SearchAndConfirmEnemyGoal)
elif self.trackjustball_aftergoal:
debug_m(4, "Headdoes", "TrackBothElse")
return self.push(SearchAndConfirmBall)
if self.toggle_switch_ball_goal:
debug_m(4, "Headdoes", "Priorities")
if self.ball_prio >= self.goal_prio:
return self.push(SearchAndConfirmBall)
else:
return self.push(SearchAndConfirmEnemyGoal)
# Default Head Behaviour
debug_m(4, "Headdoes", "Standardsearch")
return self.push(ContiniousSearch)
def get_reevaluate(self):
return True
| [
"hendrik.vgl@gmail.com"
] | hendrik.vgl@gmail.com |
b3828d96fd97bc7eba9ac11905e8647994ec276a | 24e7e0dfaaeaca8f911b40fcc2937342a0f278fd | /venv/Lib/site-packages/plotly/express/_doc.py | 3d4c5d214689f018d81ccf174782cce8575bfd25 | [
"MIT"
] | permissive | BimiLevi/Covid19 | 90e234c639192d62bb87364ef96d6a46d8268fa0 | 5f07a9a4609383c02597373d76d6b6485d47936e | refs/heads/master | 2023-08-04T13:13:44.480700 | 2023-08-01T08:36:36 | 2023-08-01T08:36:36 | 288,455,446 | 1 | 0 | MIT | 2021-01-22T19:36:26 | 2020-08-18T12:53:43 | HTML | UTF-8 | Python | false | false | 26,439 | py | import inspect
from textwrap import TextWrapper
try:
getfullargspec = inspect.getfullargspec
except AttributeError: # python 2
getfullargspec = inspect.getargspec
colref_type = "str or int or Series or array-like"
colref_desc = "Either a name of a column in `data_frame`, or a pandas Series or array_like object."
colref_list_type = "list of str or int, or Series or array-like"
colref_list_desc = (
"Either names of columns in `data_frame`, or pandas Series, or array_like objects"
)
docs = dict(
data_frame=[
"DataFrame or array-like or dict",
"This argument needs to be passed for column names (and not keyword names) to be used.",
"Array-like and dict are tranformed internally to a pandas DataFrame.",
"Optional: if missing, a DataFrame gets constructed under the hood using the other arguments.",
],
x=[
colref_type,
colref_desc,
"Values from this column or array_like are used to position marks along the x axis in cartesian coordinates.",
],
y=[
colref_type,
colref_desc,
"Values from this column or array_like are used to position marks along the y axis in cartesian coordinates.",
],
z=[
colref_type,
colref_desc,
"Values from this column or array_like are used to position marks along the z axis in cartesian coordinates.",
],
x_start=[
colref_type,
colref_desc,
"(required)",
"Values from this column or array_like are used to position marks along the x axis in cartesian coordinates.",
],
x_end=[
colref_type,
colref_desc,
"(required)",
"Values from this column or array_like are used to position marks along the x axis in cartesian coordinates.",
],
a=[
colref_type,
colref_desc,
"Values from this column or array_like are used to position marks along the a axis in ternary coordinates.",
],
b=[
colref_type,
colref_desc,
"Values from this column or array_like are used to position marks along the b axis in ternary coordinates.",
],
c=[
colref_type,
colref_desc,
"Values from this column or array_like are used to position marks along the c axis in ternary coordinates.",
],
r=[
colref_type,
colref_desc,
"Values from this column or array_like are used to position marks along the radial axis in polar coordinates.",
],
theta=[
colref_type,
colref_desc,
"Values from this column or array_like are used to position marks along the angular axis in polar coordinates.",
],
values=[
colref_type,
colref_desc,
"Values from this column or array_like are used to set values associated to sectors.",
],
parents=[
colref_type,
colref_desc,
"Values from this column or array_like are used as parents in sunburst and treemap charts.",
],
ids=[
colref_type,
colref_desc,
"Values from this column or array_like are used to set ids of sectors",
],
path=[
colref_list_type,
colref_list_desc,
"List of columns names or columns of a rectangular dataframe defining the hierarchy of sectors, from root to leaves.",
"An error is raised if path AND ids or parents is passed",
],
lat=[
colref_type,
colref_desc,
"Values from this column or array_like are used to position marks according to latitude on a map.",
],
lon=[
colref_type,
colref_desc,
"Values from this column or array_like are used to position marks according to longitude on a map.",
],
locations=[
colref_type,
colref_desc,
"Values from this column or array_like are to be interpreted according to `locationmode` and mapped to longitude/latitude.",
],
base=[
colref_type,
colref_desc,
"Values from this column or array_like are used to position the base of the bar.",
],
dimensions=[
colref_list_type,
colref_list_desc,
"Values from these columns are used for multidimensional visualization.",
],
dimensions_max_cardinality=[
"int (default 50)",
"When `dimensions` is `None` and `data_frame` is provided, "
"columns with more than this number of unique values are excluded from the output.",
"Not used when `dimensions` is passed.",
],
error_x=[
colref_type,
colref_desc,
"Values from this column or array_like are used to size x-axis error bars.",
"If `error_x_minus` is `None`, error bars will be symmetrical, otherwise `error_x` is used for the positive direction only.",
],
error_x_minus=[
colref_type,
colref_desc,
"Values from this column or array_like are used to size x-axis error bars in the negative direction.",
"Ignored if `error_x` is `None`.",
],
error_y=[
colref_type,
colref_desc,
"Values from this column or array_like are used to size y-axis error bars.",
"If `error_y_minus` is `None`, error bars will be symmetrical, otherwise `error_y` is used for the positive direction only.",
],
error_y_minus=[
colref_type,
colref_desc,
"Values from this column or array_like are used to size y-axis error bars in the negative direction.",
"Ignored if `error_y` is `None`.",
],
error_z=[
colref_type,
colref_desc,
"Values from this column or array_like are used to size z-axis error bars.",
"If `error_z_minus` is `None`, error bars will be symmetrical, otherwise `error_z` is used for the positive direction only.",
],
error_z_minus=[
colref_type,
colref_desc,
"Values from this column or array_like are used to size z-axis error bars in the negative direction.",
"Ignored if `error_z` is `None`.",
],
color=[
colref_type,
colref_desc,
"Values from this column or array_like are used to assign color to marks.",
],
opacity=["float", "Value between 0 and 1. Sets the opacity for markers."],
line_dash=[
colref_type,
colref_desc,
"Values from this column or array_like are used to assign dash-patterns to lines.",
],
line_group=[
colref_type,
colref_desc,
"Values from this column or array_like are used to group rows of `data_frame` into lines.",
],
symbol=[
colref_type,
colref_desc,
"Values from this column or array_like are used to assign symbols to marks.",
],
size=[
colref_type,
colref_desc,
"Values from this column or array_like are used to assign mark sizes.",
],
radius=["int (default is 30)", "Sets the radius of influence of each point."],
hover_name=[
colref_type,
colref_desc,
"Values from this column or array_like appear in bold in the hover tooltip.",
],
hover_data=[
"list of str or int, or Series or array-like, or dict",
"Either a list of names of columns in `data_frame`, or pandas Series,",
"or array_like objects",
"or a dict with column names as keys, with values True (for default formatting)",
"False (in order to remove this column from hover information),",
"or a formatting string, for example ':.3f' or '|%a'",
"or list-like data to appear in the hover tooltip",
"or tuples with a bool or formatting string as first element,",
"and list-like data to appear in hover as second element",
"Values from these columns appear as extra data in the hover tooltip.",
],
custom_data=[
colref_list_type,
colref_list_desc,
"Values from these columns are extra data, to be used in widgets or Dash callbacks for example. This data is not user-visible but is included in events emitted by the figure (lasso selection etc.)",
],
text=[
colref_type,
colref_desc,
"Values from this column or array_like appear in the figure as text labels.",
],
names=[
colref_type,
colref_desc,
"Values from this column or array_like are used as labels for sectors.",
],
locationmode=[
"str",
"One of 'ISO-3', 'USA-states', or 'country names'",
"Determines the set of locations used to match entries in `locations` to regions on the map.",
],
facet_row=[
colref_type,
colref_desc,
"Values from this column or array_like are used to assign marks to facetted subplots in the vertical direction.",
],
facet_col=[
colref_type,
colref_desc,
"Values from this column or array_like are used to assign marks to facetted subplots in the horizontal direction.",
],
facet_col_wrap=[
"int",
"Maximum number of facet columns.",
"Wraps the column variable at this width, so that the column facets span multiple rows.",
"Ignored if 0, and forced to 0 if `facet_row` or a `marginal` is set.",
],
facet_row_spacing=[
"float between 0 and 1",
"Spacing between facet rows, in paper units. Default is 0.03 or 0.0.7 when facet_col_wrap is used.",
],
facet_col_spacing=[
"float between 0 and 1",
"Spacing between facet columns, in paper units Default is 0.02.",
],
animation_frame=[
colref_type,
colref_desc,
"Values from this column or array_like are used to assign marks to animation frames.",
],
animation_group=[
colref_type,
colref_desc,
"Values from this column or array_like are used to provide object-constancy across animation frames: rows with matching `animation_group`s will be treated as if they describe the same object in each frame.",
],
symbol_sequence=[
"list of str",
"Strings should define valid plotly.js symbols.",
"When `symbol` is set, values in that column are assigned symbols by cycling through `symbol_sequence` in the order described in `category_orders`, unless the value of `symbol` is a key in `symbol_map`.",
],
symbol_map=[
"dict with str keys and str values (default `{}`)",
"String values should define plotly.js symbols",
"Used to override `symbol_sequence` to assign a specific symbols to marks corresponding with specific values.",
"Keys in `symbol_map` should be values in the column denoted by `symbol`.",
"Alternatively, if the values of `symbol` are valid symbol names, the string `'identity'` may be passed to cause them to be used directly.",
],
line_dash_map=[
"dict with str keys and str values (default `{}`)",
"Strings values define plotly.js dash-patterns.",
"Used to override `line_dash_sequences` to assign a specific dash-patterns to lines corresponding with specific values.",
"Keys in `line_dash_map` should be values in the column denoted by `line_dash`.",
"Alternatively, if the values of `line_dash` are valid line-dash names, the string `'identity'` may be passed to cause them to be used directly.",
],
line_dash_sequence=[
"list of str",
"Strings should define valid plotly.js dash-patterns.",
"When `line_dash` is set, values in that column are assigned dash-patterns by cycling through `line_dash_sequence` in the order described in `category_orders`, unless the value of `line_dash` is a key in `line_dash_map`.",
],
color_discrete_sequence=[
"list of str",
"Strings should define valid CSS-colors.",
"When `color` is set and the values in the corresponding column are not numeric, values in that column are assigned colors by cycling through `color_discrete_sequence` in the order described in `category_orders`, unless the value of `color` is a key in `color_discrete_map`.",
"Various useful color sequences are available in the `plotly.express.colors` submodules, specifically `plotly.express.colors.qualitative`.",
],
color_discrete_map=[
"dict with str keys and str values (default `{}`)",
"String values should define valid CSS-colors",
"Used to override `color_discrete_sequence` to assign a specific colors to marks corresponding with specific values.",
"Keys in `color_discrete_map` should be values in the column denoted by `color`.",
"Alternatively, if the values of `color` are valid colors, the string `'identity'` may be passed to cause them to be used directly.",
],
color_continuous_scale=[
"list of str",
"Strings should define valid CSS-colors",
"This list is used to build a continuous color scale when the column denoted by `color` contains numeric data.",
"Various useful color scales are available in the `plotly.express.colors` submodules, specifically `plotly.express.colors.sequential`, `plotly.express.colors.diverging` and `plotly.express.colors.cyclical`.",
],
color_continuous_midpoint=[
"number (default `None`)",
"If set, computes the bounds of the continuous color scale to have the desired midpoint.",
"Setting this value is recommended when using `plotly.express.colors.diverging` color scales as the inputs to `color_continuous_scale`.",
],
size_max=["int (default `20`)", "Set the maximum mark size when using `size`."],
log_x=[
"boolean (default `False`)",
"If `True`, the x-axis is log-scaled in cartesian coordinates.",
],
log_y=[
"boolean (default `False`)",
"If `True`, the y-axis is log-scaled in cartesian coordinates.",
],
log_z=[
"boolean (default `False`)",
"If `True`, the z-axis is log-scaled in cartesian coordinates.",
],
log_r=[
"boolean (default `False`)",
"If `True`, the radial axis is log-scaled in polar coordinates.",
],
range_x=[
"list of two numbers",
"If provided, overrides auto-scaling on the x-axis in cartesian coordinates.",
],
range_y=[
"list of two numbers",
"If provided, overrides auto-scaling on the y-axis in cartesian coordinates.",
],
range_z=[
"list of two numbers",
"If provided, overrides auto-scaling on the z-axis in cartesian coordinates.",
],
range_color=[
"list of two numbers",
"If provided, overrides auto-scaling on the continuous color scale.",
],
range_r=[
"list of two numbers",
"If provided, overrides auto-scaling on the radial axis in polar coordinates.",
],
range_theta=[
"list of two numbers",
"If provided, overrides auto-scaling on the angular axis in polar coordinates.",
],
title=["str", "The figure title."],
template=[
"str or dict or plotly.graph_objects.layout.Template instance",
"The figure template name (must be a key in plotly.io.templates) or definition.",
],
width=["int (default `None`)", "The figure width in pixels."],
height=["int (default `None`)", "The figure height in pixels."],
labels=[
"dict with str keys and str values (default `{}`)",
"By default, column names are used in the figure for axis titles, legend entries and hovers.",
"This parameter allows this to be overridden.",
"The keys of this dict should correspond to column names, and the values should correspond to the desired label to be displayed.",
],
category_orders=[
"dict with str keys and list of str values (default `{}`)",
"By default, in Python 3.6+, the order of categorical values in axes, legends and facets depends on the order in which these values are first encountered in `data_frame` (and no order is guaranteed by default in Python below 3.6).",
"This parameter is used to force a specific ordering of values per column.",
"The keys of this dict should correspond to column names, and the values should be lists of strings corresponding to the specific display order desired.",
],
marginal=[
"str",
"One of `'rug'`, `'box'`, `'violin'`, or `'histogram'`.",
"If set, a subplot is drawn alongside the main plot, visualizing the distribution.",
],
marginal_x=[
"str",
"One of `'rug'`, `'box'`, `'violin'`, or `'histogram'`.",
"If set, a horizontal subplot is drawn above the main plot, visualizing the x-distribution.",
],
marginal_y=[
"str",
"One of `'rug'`, `'box'`, `'violin'`, or `'histogram'`.",
"If set, a vertical subplot is drawn to the right of the main plot, visualizing the y-distribution.",
],
trendline=[
"str",
"One of `'ols'` or `'lowess'`.",
"If `'ols'`, an Ordinary Least Squares regression line will be drawn for each discrete-color/symbol group.",
"If `'lowess`', a Locally Weighted Scatterplot Smoothing line will be drawn for each discrete-color/symbol group.",
],
trendline_color_override=[
"str",
"Valid CSS color.",
"If provided, and if `trendline` is set, all trendlines will be drawn in this color.",
],
render_mode=[
"str",
"One of `'auto'`, `'svg'` or `'webgl'`, default `'auto'`",
"Controls the browser API used to draw marks.",
"`'svg`' is appropriate for figures of less than 1000 data points, and will allow for fully-vectorized output.",
"`'webgl'` is likely necessary for acceptable performance above 1000 points but rasterizes part of the output. ",
"`'auto'` uses heuristics to choose the mode.",
],
direction=[
"str",
"One of '`counterclockwise'` or `'clockwise'`. Default is `'clockwise'`",
"Sets the direction in which increasing values of the angular axis are drawn.",
],
start_angle=[
"int (default `90`)",
"Sets start angle for the angular axis, with 0 being due east and 90 being due north.",
],
histfunc=[
"str (default `'count'` if no arguments are provided, else `'sum'`)",
"One of `'count'`, `'sum'`, `'avg'`, `'min'`, or `'max'`."
"Function used to aggregate values for summarization (note: can be normalized with `histnorm`).",
],
histnorm=[
"str (default `None`)",
"One of `'percent'`, `'probability'`, `'density'`, or `'probability density'`",
"If `None`, the output of `histfunc` is used as is.",
"If `'probability'`, the output of `histfunc` for a given bin is divided by the sum of the output of `histfunc` for all bins.",
"If `'percent'`, the output of `histfunc` for a given bin is divided by the sum of the output of `histfunc` for all bins and multiplied by 100.",
"If `'density'`, the output of `histfunc` for a given bin is divided by the size of the bin.",
"If `'probability density'`, the output of `histfunc` for a given bin is normalized such that it corresponds to the probability that a random event whose distribution is described by the output of `histfunc` will fall into that bin.",
],
barnorm=[
"str (default `None`)",
"One of `'fraction'` or `'percent'`.",
"If `'fraction'`, the value of each bar is divided by the sum of all values at that location coordinate.",
"`'percent'` is the same but multiplied by 100 to show percentages.",
"`None` will stack up all values at each location coordinate.",
],
groupnorm=[
"str (default `None`)",
"One of `'fraction'` or `'percent'`.",
"If `'fraction'`, the value of each point is divided by the sum of all values at that location coordinate.",
"`'percent'` is the same but multiplied by 100 to show percentages.",
"`None` will stack up all values at each location coordinate.",
],
barmode=[
"str (default `'relative'`)",
"One of `'group'`, `'overlay'` or `'relative'`",
"In `'relative'` mode, bars are stacked above zero for positive values and below zero for negative values.",
"In `'overlay'` mode, bars are drawn on top of one another.",
"In `'group'` mode, bars are placed beside each other.",
],
boxmode=[
"str (default `'group'`)",
"One of `'group'` or `'overlay'`",
"In `'overlay'` mode, boxes are on drawn top of one another.",
"In `'group'` mode, baxes are placed beside each other.",
],
violinmode=[
"str (default `'group'`)",
"One of `'group'` or `'overlay'`",
"In `'overlay'` mode, violins are on drawn top of one another.",
"In `'group'` mode, violins are placed beside each other.",
],
stripmode=[
"str (default `'group'`)",
"One of `'group'` or `'overlay'`",
"In `'overlay'` mode, strips are on drawn top of one another.",
"In `'group'` mode, strips are placed beside each other.",
],
zoom=["int (default `8`)", "Between 0 and 20.", "Sets map zoom level."],
orientation=[
"str, one of `'h'` for horizontal or `'v'` for vertical. ",
"(default `'v'` if `x` and `y` are provided and both continous or both categorical, ",
"otherwise `'v'`(`'h'`) if `x`(`y`) is categorical and `y`(`x`) is continuous, ",
"otherwise `'v'`(`'h'`) if only `x`(`y`) is provided) ",
],
line_close=[
"boolean (default `False`)",
"If `True`, an extra line segment is drawn between the first and last point.",
],
line_shape=["str (default `'linear'`)", "One of `'linear'` or `'spline'`."],
scope=[
"str (default `'world'`).",
"One of `'world'`, `'usa'`, `'europe'`, `'asia'`, `'africa'`, `'north america'`, or `'south america'`)"
"Default is `'world'` unless `projection` is set to `'albers usa'`, which forces `'usa'`.",
],
projection=[
"str ",
"One of `'equirectangular'`, `'mercator'`, `'orthographic'`, `'natural earth'`, `'kavrayskiy7'`, `'miller'`, `'robinson'`, `'eckert4'`, `'azimuthal equal area'`, `'azimuthal equidistant'`, `'conic equal area'`, `'conic conformal'`, `'conic equidistant'`, `'gnomonic'`, `'stereographic'`, `'mollweide'`, `'hammer'`, `'transverse mercator'`, `'albers usa'`, `'winkel tripel'`, `'aitoff'`, or `'sinusoidal'`"
"Default depends on `scope`.",
],
center=[
"dict",
"Dict keys are `'lat'` and `'lon'`",
"Sets the center point of the map.",
],
mapbox_style=[
"str (default `'basic'`, needs Mapbox API token)",
"Identifier of base map style, some of which require a Mapbox API token to be set using `plotly.express.set_mapbox_access_token()`.",
"Allowed values which do not require a Mapbox API token are `'open-street-map'`, `'white-bg'`, `'carto-positron'`, `'carto-darkmatter'`, `'stamen-terrain'`, `'stamen-toner'`, `'stamen-watercolor'`.",
"Allowed values which do require a Mapbox API token are `'basic'`, `'streets'`, `'outdoors'`, `'light'`, `'dark'`, `'satellite'`, `'satellite-streets'`.",
],
points=[
"str or boolean (default `'outliers'`)",
"One of `'outliers'`, `'suspectedoutliers'`, `'all'`, or `False`.",
"If `'outliers'`, only the sample points lying outside the whiskers are shown.",
"If `'suspectedoutliers'`, all outlier points are shown and those less than 4*Q1-3*Q3 or greater than 4*Q3-3*Q1 are highlighted with the marker's `'outliercolor'`.",
"If `'outliers'`, only the sample points lying outside the whiskers are shown.",
"If `'all'`, all sample points are shown.",
"If `False`, no sample points are shown and the whiskers extend to the full range of the sample.",
],
box=["boolean (default `False`)", "If `True`, boxes are drawn inside the violins."],
notched=["boolean (default `False`)", "If `True`, boxes are drawn with notches."],
geojson=[
"GeoJSON-formatted dict",
"Must contain a Polygon feature collection, with IDs, which are references from `locations`.",
],
featureidkey=[
"str (default: `'id'`)",
"Path to field in GeoJSON feature object with which to match the values passed in to `locations`."
"The most common alternative to the default is of the form `'properties.<key>`.",
],
cumulative=[
"boolean (default `False`)",
"If `True`, histogram values are cumulative.",
],
nbins=["int", "Positive integer.", "Sets the number of bins."],
nbinsx=["int", "Positive integer.", "Sets the number of bins along the x axis."],
nbinsy=["int", "Positive integer.", "Sets the number of bins along the y axis."],
branchvalues=[
"str",
"'total' or 'remainder'",
"Determines how the items in `values` are summed. When"
"set to 'total', items in `values` are taken to be value"
"of all its descendants. When set to 'remainder', items"
"in `values` corresponding to the root and the branches"
":sectors are taken to be the extra part not part of the"
"sum of the values at their leaves.",
],
maxdepth=[
"int",
"Positive integer",
"Sets the number of rendered sectors from any given `level`. Set `maxdepth` to -1 to render all the"
"levels in the hierarchy.",
],
)
def make_docstring(fn, override_dict={}, append_dict={}):
tw = TextWrapper(width=75, initial_indent=" ", subsequent_indent=" ")
result = (fn.__doc__ or "") + "\nParameters\n----------\n"
for param in getfullargspec(fn)[0]:
if override_dict.get(param):
param_doc = list(override_dict[param])
else:
param_doc = list(docs[param])
if append_dict.get(param):
param_doc += append_dict[param]
param_desc_list = param_doc[1:]
param_desc = (
tw.fill(" ".join(param_desc_list or ""))
if param in docs or param in override_dict
else "(documentation missing from map)"
)
param_type = param_doc[0]
result += "%s: %s\n%s\n" % (param, param_type, param_desc)
result += "\nReturns\n-------\n"
result += " plotly.graph_objects.Figure"
return result
| [
"50989568+BimiLevi@users.noreply.github.com"
] | 50989568+BimiLevi@users.noreply.github.com |
498cfa85a40dbce059482bbd34786e6433736369 | ff844756ad80c33c2cfcdd7f41a010e8e5674bbf | /test/app_test.py | ed87550c798181e8fababda910e263f710ed8f83 | [] | no_license | riquellopes/congratulations | f133be337413b500052639f30d6ea6b7182681da | 074df3cb2b8dfab007e7fd93f5f1786b0fd7ad95 | refs/heads/master | 2016-09-05T11:52:32.941162 | 2012-03-05T17:18:19 | 2012-03-05T17:18:19 | 3,546,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,440 | py | # coding: utf-8
import datetime
import unittest
from mock import Mock, patch
from nose.tools import assert_equals, assert_true, assert_raises, assert_false
from app import Congratulations, CongratulationsExEnd, app
class MockUrllib(Mock):
def __init__(self, file_test):
self.file_test = file_test
def read(self):
handle = open(self.file_test)
html = "".join( handle )
return html
class MockCongratulations(Congratulations):
pass
class CongratulationsTest(unittest.TestCase):
def test_class_Congratulations_existe(self):
assert_true(isinstance(Congratulations, object))
def test_objeto_recebe_nome_do_consinscrito(self):
"""
Caso instâcia seja criada, o nome do consinscrito deve ser passado para pesquisa::
"""
c = Congratulations(name='leandro')
assert_equals((c.name.upper()), 'LEANDRO')
def test_caso_nome_nao_seja_passado_deve_haver_um_exception(self):
"""
Caso o nome do consinscrito não seja passso, sistema deve levantar um exception::
"""
assert_raises(Exception, Congratulations)
@patch('app.urllib2.urlopen')
def test_jonas_brother_no_accepted(self, sr):
"""
Consinscrito Jonas Brother não teve seu perfil aprovado::
"""
sr.return_value = MockUrllib('teste_dentista.html')
c = Congratulations(name='Jonas Brother', url=app.config['URL_D'])
c.search()
assert_equals(c.status.lower(), "no accepted")
@patch('app.urllib2.urlopen')
def test_leandro_accepted(self, sr):
"""
Consinscrito Leandro teve seu perfil aprovado::
"""
sr.return_value = MockUrllib('teste_dentista.html')
c = Congratulations(name='Leandro', url=app.config['URL_D'])
c.search()
assert_equals(c.status.lower(), "accepted")
@patch('app.urllib2.urlopen')
def test_jarbas_no_processed(self, sr):
"""
Consinscrito Jarbas ainda não teve seu perfil processado::
"""
sr.return_value = MockUrllib('teste.html')
c = Congratulations(name='Jarbas', url=app.config['URL_S'])
c.search()
assert_equals(c.status.lower(), "no processed")
@patch('app.urllib2.urlopen')
def test_menssage_tela_jarbas(self, sr):
"""
Caso situação do Jarbas ainda não tem cido processada, sistema gera mensagem::
"""
sr.return_value = MockUrllib('teste.html')
c = Congratulations(name='Jarbas', url=app.config['URL_S'], name_display='@riquellopes')
c.search()
assert_equals(c.display_menssage.lower(), "your curriculum wasn't <span class='wait'>processed</span>")
@patch('app.urllib2.urlopen')
def test_menssagem_tela_jonas(self, sr):
"""
Caso situação do Jonas já tenha cido processada, sistema gera mensagem::
"""
sr.return_value = MockUrllib('teste_dentista.html')
c = Congratulations(name='Jonas Brother', url=app.config['URL_D'], name_display='@brother')
c.search()
assert_equals(c.display_menssage.lower(), "sorry your curriculum wasn't <span class='failure'>accepted</span>")
@patch('app.urllib2.urlopen')
def test_messagem_tela_leandro(self, sr):
"""
Caso situação do Leandro já tenha cido processada, sistema gera mensagem::
"""
sr.return_value = MockUrllib('teste_dentista.html')
c = Congratulations(name='Leandro', url=app.config['URL_D'], name_display='@leandro')
c.search()
assert_equals(c.display_menssage.lower(), "congratulations your curriculum was <span class='sucess'>accepted</span>")
def test_caso_search_nao_seja_chamado(self):
"""
Caso método search não seja chamado antes do display_menssage, deve haver um exception::
"""
c = Congratulations(name='Leandro', url=app.config['URL_D'], name_display='@leandro')
try:
c.display_menssage
except Exception, e:
assert_true(True)
@patch('app.urllib2.urlopen')
def test_periodo(self, sr):
"""
Caso período de liberação de resultado já tenha encerrado, search deve levantar exception::
"""
sr.return_value = MockUrllib('teste_dentista.html')
c = Congratulations(name='Leandro', url=app.config['URL_D'], name_display='@leandro', date_end='2012-02-26')
assert_raises(CongratulationsExEnd, c.search)
@patch('app.urllib2.urlopen')
def test_save(self, sr):
"""
Método save deve gravar as informações em congratulatios.json::
"""
sr.return_value = MockUrllib('teste_dentista.html')
date_end = datetime.datetime.now().strftime("%Y-%m-%d")
c = Congratulations(name='Leandro', url=app.config['URL_D'], name_display='@leandro', date_end=date_end)
assert_true(c.save())
@patch('app.urllib2.urlopen')
def test_save_none(self, sr):
"""
Caso periodo de veficação tenha encerrado, save deve retorna None::
"""
sr.return_value = MockUrllib('teste_dentista.html')
c = Congratulations(name='Leandro', url=app.config['URL_D'], name_display='@leandro', date_end='2012-02-26')
assert_true(c.save() is None)
class ViewTest(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
@patch('app.Congratulations.save')
def test_home(self, cg):
"""
Título na página home deve ser Congratulatios app::
"""
rs = self.app.get("/")
assert_true('<title>Congratulations APP</title>' in str(rs.data) )
@patch('app.urllib2.urlopen')
def test_process(self, sr):
"""
Toda vez que o index for acessado, sistema deve atualizar as informações do arquivo index.html::
"""
sr.return_value = MockUrllib('teste_sistema.html')
rs = self.app.get('/')
assert_true('Last update: <i>%s</i>' % (datetime.datetime.now().strftime("%Y %B, %d %H:%M")) in str(rs.data)) | [
"riquellopes@gmail.com"
] | riquellopes@gmail.com |
6b83013931c9e1b0bc505ae8a5906aa6c3985271 | afcb260d6f0c1d88232d2e300d26d8fb71b5ef43 | /django-app/member/views.py | 0a3ad5f76c034d58d7139ec6bc3f14240c145f63 | [] | no_license | JeongEuiJin/deploy-eb-docker | e5d10f65166ca8a1a4a5fdd32c9647c0d8f5feed | 1f5b57aa5e119f68c169f059e9bf88d5fbf76850 | refs/heads/master | 2020-12-02T17:46:19.905183 | 2017-07-13T07:32:36 | 2017-07-13T07:32:36 | 96,424,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 892 | py | from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from .forms import CustomUserCreationForm
# Create your views here.
# @login_required
def home(request):
return render(request, 'member/home.html')
def signup(request):
if request.method == 'POST':
form = CustomUserCreationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=raw_password)
login(request, user)
return redirect('member:home')
else:
form = CustomUserCreationForm()
context = {
'form': form,
}
return render(request, 'member/signup.html', context)
| [
"hehar1020@gmail.com"
] | hehar1020@gmail.com |
7f028271161ec849660d7f6454b99fd19c01e985 | 2475acb15e1dcd4a42ed8849a6ae7c6f383d5b59 | /calendar_api/migrations/0008_auto_20190529_1753.py | 78acb710d43a80372e2816d2f5f91e5967eda897 | [] | no_license | amigo2/Goker | 2e66cabb32253bfe43a188241b58df962a9a1583 | 0509446382eacabdb7c006a60362891f217bcc3a | refs/heads/master | 2022-12-13T09:50:47.720250 | 2019-08-13T20:54:43 | 2019-08-13T20:54:43 | 169,242,538 | 0 | 0 | null | 2022-12-08T12:23:28 | 2019-02-05T13:06:35 | Python | UTF-8 | Python | false | false | 2,230 | py | # Generated by Django 2.1.4 on 2019-05-29 15:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('calendar_api', '0007_auto_20190529_1749'),
]
operations = [
migrations.RenameField(
model_name='adquisition',
old_name='end_date',
new_name='end',
),
migrations.RenameField(
model_name='adquisition',
old_name='end_time',
new_name='start',
),
migrations.RenameField(
model_name='news',
old_name='end_date',
new_name='end',
),
migrations.RenameField(
model_name='news',
old_name='end_time',
new_name='start',
),
migrations.RenameField(
model_name='recontact',
old_name='end_date',
new_name='end',
),
migrations.RenameField(
model_name='recontact',
old_name='end_time',
new_name='start',
),
migrations.RenameField(
model_name='salesevent',
old_name='end_date',
new_name='end',
),
migrations.RenameField(
model_name='salesevent',
old_name='end_time',
new_name='start',
),
migrations.RemoveField(
model_name='adquisition',
name='start_date',
),
migrations.RemoveField(
model_name='adquisition',
name='start_time',
),
migrations.RemoveField(
model_name='news',
name='start_date',
),
migrations.RemoveField(
model_name='news',
name='start_time',
),
migrations.RemoveField(
model_name='recontact',
name='start_date',
),
migrations.RemoveField(
model_name='recontact',
name='start_time',
),
migrations.RemoveField(
model_name='salesevent',
name='start_date',
),
migrations.RemoveField(
model_name='salesevent',
name='start_time',
),
]
| [
"amigo2@hotmail.com"
] | amigo2@hotmail.com |
623bec47ef4142d1023c81692b0918123a72b98f | 2b0f7d5e7b43bb9d32ee4044a79b0fc67294c986 | /test_skewed.py | 8dcd19468398179bf0edd731c04d67b71f0f73e4 | [] | no_license | rajikalk/Scripts | 766fcc16b97dfd4f918d8efbd2ec529a2dd71639 | 8caec46a6272ff1c7aeb5e359610d7e695dd7d34 | refs/heads/master | 2023-08-30T12:53:09.220312 | 2023-08-28T15:08:23 | 2023-08-28T15:08:23 | 87,136,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,653 | py | import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import math as math
import scipy.special as sp
import pickle
import matplotlib.gridspec as gridspec
def func(x, sigma, mu, alpha, c, amp):
#normal distribution
normpdf = (1/(sigma*np.sqrt(2*math.pi)))*np.exp(-(np.power((x-mu),2)/(2*np.power(sigma,2))))
normcdf = (0.5*(1+sp.erf((alpha*((x-mu)/sigma))/(np.sqrt(2)))))
return 2*amp*normpdf*normcdf + c
files = ["Mach_0.1/multiple_folds_over_5_orbits.pkl","Mach_0.2/multiple_folds_over_5_orbits.pkl"]
max_accretion = []
base_accretion = []
strength = []
beta = []
y_fits = []
plot_e = []
for file in files:
file_open = open(file, 'rb')
multiple_folds, phase_centers, mean_eccentricity, std_eccentricity, accretion_err, n_lines, multiple_folds_normalised = pickle.load(file_open)
file_open.close()
plot_e.append(mean_eccentricity)
x_data = phase_centers[23:-15]
x = np.linspace(np.min(x_data),np.max(x_data),100)
max_accretion.append([])
base_accretion.append([])
beta.append([])
strength.append([])
y_fits.append([])
file_name = file.split('/')[0] +'/'
for orbit in range(len(multiple_folds_normalised)):
'''
if mean_eccentricity[orbit] == 0.27:
import pdb
pdb.set_trace()
'''
y_data = multiple_folds_normalised[orbit][23:-15]
plt.clf()
plt.plot(x_data,y_data,ls='steps-mid')
results = []
for tries in range(50):
sigma = np.random.random()*2*0.15
amp = np.random.random()*2*np.max(y_data)
p = np.array([sigma, x_data[np.argmax(y_data)], -5,np.min(y_data),amp])
try:
popt, pcov = curve_fit(func, x_data, y_data, p)
except:
pass
err = np.sum(np.abs(func(x_data, *popt) - y_data))
results.append((err, popt))
if err < 0.1:
break
err, popt = min(results, key=lambda x:x[0])
if mean_eccentricity[orbit] == 0.27:
popt = np.array([0.35, x_data[np.argmax(y_data)]+0.15, -5,np.median(y_data)-0.5,np.max(y_data)*0.2])
y_fit= func(x, *popt)
sigmag, mu, alpha, base, amp = popt
max = np.max(y_fit)
max_accretion[-1].append(max)
base_accretion[-1].append(np.min(y_fit))
beta[-1].append(max/np.min(y_fit))
strength[-1].append(sigmag)
plt.plot(x,y_fit)
plt.ylim([0,6])
y_fits[-1].append(y_fit)
print('---------------------------------------------')
print('eccentricity = '+str(mean_eccentricity[orbit]))
print('amplitude = '+str(amp))
print('maximum_value = '+str(np.max(y_fit)))
print('base_accretion = '+str(np.min(y_fit)))
print('strength = '+str(sigmag))
plt.savefig(file_name+'fittted_eccentricity_'+str(mean_eccentricity[orbit])+'.pdf')
#Make normalised fits plot
plt.clf()
fig = plt.figure()
fig.set_size_inches(4.0, 6.0)
gs = gridspec.GridSpec(2, 1)
gs.update(hspace=0.0)
ax1 = fig.add_subplot(gs[0,0])
ax2 = fig.add_subplot(gs[1,0], sharex=ax1, sharey=ax1)
n_lines = len(y_fits[0])
c_index = np.linspace(0.0, 0.95, n_lines)
e_int = 0
for fit in y_fits[0]:
ax1.plot(x, fit, color=plt.cm.magma(c_index[e_int]), label='e='+str(plot_e[0][e_int]))
e_int = e_int + 1
ax1.legend(loc='center left', bbox_to_anchor=(0.985, 0.5))
ax1.set_ylabel("Normalised Accretion")
xticklabels = ax1.get_xticklabels()
plt.setp(xticklabels, visible=False)
ax1.tick_params(axis='x', which='major', direction="in")
e_int = 0
for fit in y_fits[1]:
ax2.plot(x, fit, color=plt.cm.magma(c_index[e_int]), label='e='+str(plot_e[0][e_int]))
e_int = e_int + 1
ax2.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax2.set_xlabel("Orbital Phase ($\phi$)")
ax2.set_ylabel("Normalised Accretion")
ax2.text(0.1, ax1.get_ylim()[1]*0.9, 'T2', va="center", ha="left", color='k', fontsize=args.text_font)
ax1.text(0.1, ax1.get_ylim()[1]*0.9, 'T1', va="center", ha="left", color='k', fontsize=args.text_font)
ax2.tick_params(axis='x', which='major', direction="in")
yticklabels = ax2.get_yticklabels()
plt.setp(yticklabels[-1], visible=False)
plt.savefig('normalised_fits.eps', bbox_inches='tight', pad_inches = 0.02)
plt.savefig('normalised_fits.pdf', bbox_inches='tight', pad_inches = 0.02)
#make beta plot
plt.clf()
plt.scatter(mean_eccentricity, beta[0], label='T1', marker='o')
plt.scatter(mean_eccentricity, beta[1], label='T2', marker='^')
plt.xlabel('eccentricity')
plt.ylabel('$\\beta$')
plt.legend(loc='best')
plt.savefig('beta.pdf')
| [
"reggie@Reggies-MBP.unicph.domain"
] | reggie@Reggies-MBP.unicph.domain |
53a6dc003f4989217077b1e3d96e789daac4ada0 | cb620e43469856c5a9b578ada5e37a3e610adbbb | /tests/orm/relations/test_morph_to_many.py | 82803862b0910b9c05d28a5de19c63b630db2bd5 | [
"MIT"
] | permissive | sxslex/orator | 5c889bab5a5f43be672275f1623135c7e1cbc98c | 0835a7c0341a5ab7e051318a52ab27f58b695916 | refs/heads/develop | 2021-01-09T07:04:54.556519 | 2015-11-25T11:42:53 | 2015-11-25T11:42:53 | 46,634,951 | 1 | 0 | null | 2015-11-21T21:37:25 | 2015-11-21T21:37:24 | null | UTF-8 | Python | false | false | 5,908 | py | # -*- coding: utf-8 -*-
import arrow
from flexmock import flexmock, flexmock_teardown
from ... import OratorTestCase
from ...utils import MockConnection
from orator.query.builder import QueryBuilder
from orator.query.grammars import QueryGrammar
from orator.query.processors import QueryProcessor
from orator.query.expression import QueryExpression
from orator.orm.builder import Builder
from orator.orm.model import Model
from orator.orm.relations import MorphToMany
from orator.orm.relations.pivot import Pivot
from orator.orm.collection import Collection
class OrmMorphToManyTestCase(OratorTestCase):
def tearDown(self):
flexmock_teardown()
def test_eager_constraints_are_properly_added(self):
relation = self._get_relation()
relation.get_query().get_query().should_receive('where_in').once().with_args('taggables.taggable_id', [1, 2])
relation.get_query().should_receive('where').once()\
.with_args('taggables.taggable_type', relation.get_parent().__class__.__name__)
model1 = OrmMorphToManyModelStub()
model1.id = 1
model2 = OrmMorphToManyModelStub()
model2.id = 2
relation.add_eager_constraints([model1, model2])
def test_attach_inserts_pivot_table_record(self):
flexmock(MorphToMany, touch_if_touching=lambda: True)
relation = self._get_relation()
query = flexmock()
query.should_receive('from_').once().with_args('taggables').and_return(query)
query.should_receive('insert').once()\
.with_args(
[{
'taggable_id': 1,
'taggable_type': relation.get_parent().__class__.__name__,
'tag_id': 2,
'foo': 'bar',
}])\
.and_return(True)
mock_query_builder = flexmock()
relation.get_query().should_receive('get_query').and_return(mock_query_builder)
mock_query_builder.should_receive('new_query').once().and_return(query)
relation.should_receive('touch_if_touching').once()
relation.attach(2, {'foo': 'bar'})
def test_detach_remove_pivot_table_record(self):
flexmock(MorphToMany, touch_if_touching=lambda: True)
relation = self._get_relation()
query = flexmock()
query.should_receive('from_').once().with_args('taggables').and_return(query)
query.should_receive('where').once().with_args('taggable_id', 1).and_return(query)
query.should_receive('where').once()\
.with_args('taggable_type', relation.get_parent().__class__.__name__).and_return(query)
query.should_receive('where_in').once().with_args('tag_id', [1, 2, 3])
query.should_receive('delete').once().and_return(True)
mock_query_builder = flexmock()
relation.get_query().should_receive('get_query').and_return(mock_query_builder)
mock_query_builder.should_receive('new_query').once().and_return(query)
relation.should_receive('touch_if_touching').once()
self.assertTrue(relation.detach([1, 2, 3]))
def test_detach_clears_all_records_when_no_ids(self):
flexmock(MorphToMany, touch_if_touching=lambda: True)
relation = self._get_relation()
query = flexmock()
query.should_receive('from_').once().with_args('taggables').and_return(query)
query.should_receive('where').once().with_args('taggable_id', 1).and_return(query)
query.should_receive('where').once()\
.with_args('taggable_type', relation.get_parent().__class__.__name__).and_return(query)
query.should_receive('where_in').never()
query.should_receive('delete').once().and_return(True)
mock_query_builder = flexmock()
relation.get_query().should_receive('get_query').and_return(mock_query_builder)
mock_query_builder.should_receive('new_query').once().and_return(query)
relation.should_receive('touch_if_touching').once()
self.assertTrue(relation.detach())
def _get_relation(self):
builder, parent = self._get_relation_arguments()[:2]
return MorphToMany(builder, parent, 'taggable', 'taggables', 'taggable_id', 'tag_id')
def _get_relation_arguments(self):
parent = flexmock(Model())
parent.should_receive('get_morph_name').and_return(parent.__class__.__name__)
parent.should_receive('get_key').and_return(1)
parent.should_receive('get_created_at_column').and_return('created_at')
parent.should_receive('get_updated_at_column').and_return('updated_at')
query = flexmock(QueryBuilder(MockConnection().prepare_mock(), QueryGrammar(), QueryProcessor()))
flexmock(Builder)
builder = Builder(query)
builder.should_receive('get_query').and_return(query)
related = flexmock(Model())
builder.set_model(related)
builder.should_receive('get_model').and_return(related)
related.should_receive('get_key_name').and_return('id')
related.should_receive('get_table').and_return('tags')
related.should_receive('get_morph_name').and_return(parent.__class__.__name__)
builder.get_query().should_receive('join').once().with_args('taggables', 'tags.id', '=', 'taggables.tag_id')
builder.should_receive('where').once().with_args('taggables.taggable_id', '=', 1)
builder.should_receive('where').once().with_args('taggables.taggable_type', parent.__class__.__name__)
return builder, parent, 'taggable', 'taggables', 'taggable_id', 'tag_id', 'relation_name', False
class OrmMorphToManyModelStub(Model):
__guarded__ = []
class OrmMorphToManyModelPivotStub(Model):
__guarded__ = []
def __init__(self):
super(OrmMorphToManyModelPivotStub, self).__init__()
self.pivot = OrmMorphToManyPivotStub()
class OrmMorphToManyPivotStub(object):
pass
| [
"sebastien.eustace@gmail.com"
] | sebastien.eustace@gmail.com |
db54103a8e02fc43379d24cc3ceb775e95ccf87f | 42e5ed5df5d8c8141691426a8f1996d955584a5e | /sprzet/urls.py | a364724a3a9177f575b8e3bc7bdc514455718412 | [] | no_license | pomidorki185ic/wypozyczalnia | 8ac71d1b5f438b8569ce756279d8a43805c1797a | c57ce4f5e48421b7562183f0b76da6920b2c0bca | refs/heads/master | 2023-05-01T16:15:19.980479 | 2021-05-19T11:00:54 | 2021-05-19T11:00:54 | 317,298,366 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | from django.urls import path
from django.views.generic.base import TemplateView
from . import views
urlpatterns = [
path('spis_sprzetu', views.spis_sprzetu, name = 'spis_sprzetu'),
# path('profilKlienta/rejestracja', views.rejestracja, name = 'profilKlienta/rejestracja'),
#path('profilKlienta/profilKlienta/rejestracja', views.AboutView, name = 'home'),
]
| [
"dawidpawlowski98@wp.pl"
] | dawidpawlowski98@wp.pl |
9781044e5880f01dc60c019e6cb074879a5f0e35 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2018_09_01/models/registry_usage_py3.py | 4580c01f22c3d11f336e70cb9766cd0fb3f80ed2 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 1,529 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RegistryUsage(Model):
"""The quota usage for a container registry.
:param name: The name of the usage.
:type name: str
:param limit: The limit of the usage.
:type limit: long
:param current_value: The current value of the usage.
:type current_value: long
:param unit: The unit of measurement. Possible values include: 'Count',
'Bytes'
:type unit: str or
~azure.mgmt.containerregistry.v2018_09_01.models.RegistryUsageUnit
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'limit': {'key': 'limit', 'type': 'long'},
'current_value': {'key': 'currentValue', 'type': 'long'},
'unit': {'key': 'unit', 'type': 'str'},
}
def __init__(self, *, name: str=None, limit: int=None, current_value: int=None, unit=None, **kwargs) -> None:
super(RegistryUsage, self).__init__(**kwargs)
self.name = name
self.limit = limit
self.current_value = current_value
self.unit = unit
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
7fe23b074a91fc220c26cac409543a4218fa0930 | 91d8fc56bb263296c9f3d2bae4c7929f38057319 | /nmrmint/GUI/widgets.py | 39057f9d5d1bb08003175e6c6fa4ac2d2b1a41d7 | [
"MIT"
] | permissive | sametz/nmrmint | f9f8ca63ea0573c93a38834beba33652eee3595e | 9785e09b62e581bdbb5628e85df49d41fb86ec66 | refs/heads/master | 2022-02-02T03:50:08.509722 | 2018-07-03T02:01:57 | 2018-07-03T02:01:57 | 106,969,220 | 2 | 0 | null | 2017-10-15T00:43:10 | 2017-10-14T23:08:00 | Python | UTF-8 | Python | false | false | 27,493 | py | """Custom widgets composed from standard tkinter widgets.
Provides the following classes:
* _BaseEntryFrame: a base class for a Frame that contains a Label and an Entry
widget, has custom behavior, and calls back when a change to the entry has
been committed.
* ArrayBox: a subclass of _BaseEntryFrame that reads/writes its value from/to a
2-D numpy array.
* ArraySpinBox: a subclass of ArrayBox that uses a SpinBox widget instead of
an Entry widget.
* VarBox: Similar to _BaseEntryFrame, but is not provided a data structure or
callback in its arguments. Instead, it assumes the parent has the
necessary attribute and method. TODO: refactor this out of the first-order
toolbars and use ArrayBox instead.
* IntBox: Similar to VarBox, but with the Entry restricted to integers
instead of floats. TODO: refactor so that it either subclasses ArrayBox or so
that the widgets in this module use subclasses of Entry specific to
float-only or int-only entries.
* VarButtonBox: emulates the WINDNMR-style entry boxes, like a deluxe
SpinBox. TODO: refactor so that up/down arrow behavior, methods etc are
identical to those in ArraySpinBox.
* SimpleVariableBox: A subclass of _BaseEntryFrame that takes initial and
minimum-value argument, instantiates the Entry with the initial value,
and defaults to the custom minimum value when empty.
"""
# TODO: keep implementing composition over inheritance for customizing widgets
# TODO: better names, e.g. VarBox, SimpleVariableBox
from tkinter import *
up_arrow = u"\u21e7"
down_arrow = u"\u21e9"
left_arrow = u"\u21e6"
right_arrow = u"\u21e8"
class _BaseEntryFrame(Frame):
"""A tkinter Frame that holds a labeled entry widget, and a callback for
when a change is committed to the Entry's value.
_BaseEntryFrame is intended as a new base class that will be inherited from.
Methods:
* get_value: return the contents of the Entry as a str.
* set_value: set the contents of the entry to a supplied argument
Attributes:
* current_value: the current value stored in the entry-like widget.
"""
def __init__(self, parent=None, name='', color='white',
callback=None,
**options):
"""
__init__ is broken into multiple method references, to allow
subclasses/mixins to modify as needed.
Keyword arguments:
:param parent: The parent tkinter object
:param name: (str) Optional name. Used as Label text as well as
widget identification.
:param color: (str) Default color for widget and contents.
:param callback: function to be called when change in Entry contents
committed.
:param options: (dict) Standard kwargs for a tkinter Frame
"""
Frame.__init__(self, parent, relief=RIDGE, borderwidth=0,
background=color, **options)
self._name = name
self._color = color
self._callback = callback
# The initial value type for the widget depends on subclass, so:
# Uncomment the code below to test _BaseEntryFrame
try:
assert self._initial_value is not None
except AttributeError:
self._initial_value = 0.00 # Should be overridden by subclass
self._initialize()
self._add_label()
self._add_entry()
self._bind_entry()
self._validate_entry()
def _initialize(self):
"""
Create a StringVar object; _initialize self.value with the initial
number, and _initialize StringVar with that same value.
Subclasses of BasentryFrame should overwrite this function to
accomodate
however initial values are passed into them.
"""
self._value_var = StringVar()
self.current_value = self._initial_value
self._value_var.set(self.current_value)
def _add_label(self):
"""Add self._name to a Label at the top of the frame."""
Label(self, text=self._name, bg=self._color, bd=0).pack(side=TOP)
def _add_entry(self):
"""Add an Entry widget to the _BaseEntryFrame.
Subclasses of EntryBox that use a different entry widget (e.g. SpinBox)
should overwrite this function.
"""
self._entry = Entry(self, width=7,
validate='key') # check for number on keypress)
self._entry.pack(side=TOP, fill=X)
self._entry.config(textvariable=self._value_var)
def _bind_entry(self):
"""Define behavior when the Entry widget loses focus.
_BaseEntryFrame assumes action should only be taken when a change in the
Entry widget is "committed" by hitting Return, Tab, or clicking
outside the widget.
Subclasses may overwrite/extend _bind_entry to tailor behavior.
"""
self._entry.bind('<Return>', lambda event: self._on_return(event))
self._entry.bind('<Tab>', lambda event: self._on_tab(event))
self._entry.bind('<FocusOut>', lambda event: self._refresh())
self._entry.bind('<FocusIn>',
lambda event: self._entry.select_range(0, END))
# noinspection PyUnusedLocal
def _on_return(self, event):
"""Refresh the view and shift focus when Return key is hit."""
# Previously self._refresh() had to be called here, but since
# <FocusOut> triggers refresh, omitting it here avoids double calls
# to _callback
self._find_next_entry(self._entry).focus()
def _refresh(self):
"""Save the Entry value to the data structure then request a view
refresh.
"""
if self._entry_is_changed():
self._save_entry()
self._callback()
def _entry_is_changed(self):
"""Check if the current Entry value differs from the last saved
value.
:return: True if changed, False if not.
"""
get_value = self._value_var.get() # for debugging
return str(self.current_value) != get_value
def _save_entry(self):
"""Saves widget's entry as self.stored_value , filling the entry with
0.00 if it was empty.
Subclasses should overwrite _save_entry to suit needs of their data
type and call to _callback.
"""
if not self._value_var.get(): # if entry left blank,
self._value_var.set(0.00) # fill it with zero
value = float(self._value_var.get())
self.current_value = value
def _find_next_entry(self, current_widget):
"""Return the next Entry-like widget in tkinter's widget traversal.
Used to ignore the other widgets in the GUI such as Buttons,
RadioButtons, and matplotlib widgets.
Subclasses can modify this behavior if other widget types are to be
acknowledged.
:param current_widget: the widget that needs to lose focus
:return: the next entry-like widget
"""
next_entry = current_widget.tk_focusNext()
if next_entry.widgetName in ['entry', 'spinbox']:
return next_entry
else:
return self._find_next_entry(next_entry)
def _on_tab(self, *event):
"""Refresh the view and shift focus when Tab key is hit."""
self._on_return(event)
return 'break' # override default tkinter tab behavior
def _validate_entry(self):
"""Restrict Entry inputs to a valid type"""
# check on each keypress if new result will be valid
self._entry['validatecommand'] = (self.register(self._is_valid), '%P')
# sound 'bell' if bad keypress
self._entry['invalidcommand'] = 'bell'
@staticmethod
def _is_valid(entry):
"""Test to see if entry is acceptable (either empty, or able to be
converted to the desired type.)
The _BaseEntryFrame class assumes the entry contents should be
a float, and that a blank entry should be filled with 0.00. A subclass
that wants non-float entries must override this method.
"""
if not entry:
return True # Empty string: OK if entire entry deleted
if entry == '-':
return True # OK to start entering a negative value
try:
float(entry)
return True
except ValueError:
return False
# TODO: consider using @property here
def get_value(self):
"""Returns the contents of the Entry widget as a str.
Known issue: loss of decimal places if contents a decimal number.
e.g. if set with 0.00, becomes '0.0'.
:return: (str)
"""
return self._value_var.get()
def set_value(self, val):
"""Sets the contents of the Entry widget to val, and updates
self.current_val.
"""
self._value_var.set(val)
# Tentatively, the fix to issues with toolbars detecting refreshes when
# subspectra are reloaded is to not update current_val directly here,
# but call _save_entry:
self._save_entry()
# self.current_value = val
class ArrayBox(_BaseEntryFrame):
"""
Overrides _BaseEntryFrame to accept a numpy 2D-array, and a coordinate to a
specific cell in the array to read to/write from.
Methods overridden: (public)
* set_value
"""
def __init__(self, parent=None,
array=None, coord=(0, 0),
**options):
"""Extend _BaseEntryFrame with references to a 2-D array and the
coordinate to a specific cell in the array.
:param array: a 2-D numpy array.
:param coord: (int, int) tuple for the (row, column) of the array to
associate the Entry with."""
self._array = array
self._row, self._col = coord
self._initial_value = self._array[self._row, self._col]
_BaseEntryFrame.__init__(self, parent, **options)
def _save_entry(self):
"""Record widget's current value to the array, filling the entry with
0.00 if it was empty.
Currently assumes, if the array has more than one row, that it is meant
to be a symmetric matrix, and updates the cross-diagonal element
as well.
"""
if not self._value_var.get():
self._value_var.set(0.00)
self.current_value = float(self._value_var.get())
self._array[self._row, self._col] = self.current_value
# if more than one row, assume J matrix and fill cross-diagonal element
if self._array.shape[0] > 1:
self._array[self._col, self._row] = self.current_value
def set_value(self, val):
"""Set the Entry contents to val, and save it to the associated
array.
"""
self._value_var.set(val)
self._save_entry()
class ArraySpinBox(ArrayBox):
"""A subclass of ArrayBox using a SpinBox instead of an Entry widget."""
def __init__(self, parent=None, from_=0.00, to=100.00, increment=1,
realtime=False,
**options):
"""Extend super.__init__ with kwargs for the SpinBox
initialization, and a boolean flag for desired spinbox behavior.
:param from_: (float) Minimum value for the SpinBox entry.
:param to: (float) Maximum value for the SpinBox entry.
:param increment: (float) size of increment/decrement to SpinBox
entry when a SpinBox arrow is clicked.
:param realtime: (boolean) True if view should be constantly updated
while the mouse button is held down (e.g. as an up/down widget arrow
is depressed).
"""
self._realtime = realtime
self._spinbox_kwargs = {'from_': from_,
'to': to,
'increment': increment}
ArrayBox.__init__(self, parent, **options)
def _add_entry(self):
"""Override ArrayEntry method to add a SpinBox widget rather than an
Entry widget."""
self._add_spinbox(**self._spinbox_kwargs)
def _add_spinbox(self, **kwargs):
"""Add a SpinBox widget to the ArraySpinBox frame."""
self._entry = Spinbox(self, width=7,
validate='key', # check for number on keypress
**kwargs)
self._entry.pack(side=TOP, fill=X)
self._entry.config(textvariable=self._value_var)
def _bind_entry(self):
"""Extend the ArrayFrame method to include bindings for mouse button
press/release.
"""
self._entry.bind('<Return>', lambda event: self._on_return(event))
self._entry.bind('<Tab>', lambda event: self._on_tab(event))
self._entry.bind('<FocusOut>', lambda event: self._refresh())
self._entry.bind('<FocusIn>',
lambda event: self._entry.selection('range', 0, END))
self._entry.bind('<ButtonPress-1>', lambda event: self._on_press())
self._entry.bind('<ButtonRelease-1>', lambda event: self._on_release())
def _on_press(self):
"""Trigger the 'update view' loop if 'realtime' behavior was
specified."""
if self._realtime:
self._loop_refresh()
def _loop_refresh(self):
"""Refresh the view every 50 ms until cancelled by the _on_release
method.
"""
self._refresh()
self.button_held_job = self._root().after(50, self._loop_refresh)
def _on_release(self):
"""Cancel _loop_refresh if 'realtime' behavior was specified."""
if self._realtime:
self._root().after_cancel(self.button_held_job)
# A 1-ms delay allows the StringVar to be updated prior to the
# _entry_is_changed check. See related StackOverflow question:
# https://stackoverflow.com/questions/46504930/
self.after(1, self._refresh)
class VarBox(_BaseEntryFrame):
"""
A subclass of _BaseEntryFrame that takes a dict as an argument,
and reads/writes from/to that dict.
Requirements:
The dict must use the 'name' kwarg as the key, and have a val of the
same type as the Entry widget.
"""
def __init__(self, parent=None, name='', dict_=None, **options):
"""Initialize, with the initial Entry value as dict_[name].
:param parent: the parent tkinter object
:param name: used as text for the Label widget, plus used as a dict key
and as a name for identifying the widget.
:param **options: the standard optional kwargs for a Frame object
"""
self._dict = dict_
self._initial_value = self._dict[name]
_BaseEntryFrame.__init__(self, parent, name, **options)
def _save_entry(self):
"""Saves widget's entry in the parent's dict, filling the entry with
0.00 if it was empty.
"""
if not self._value_var.get():
self._value_var.set(0.00)
self.current_value = float(self._value_var.get())
self._dict[self._name] = self.current_value
class IntBox(VarBox):
"""Subclass of VarBox where Entry is restricted to integers only."""
def __init__(self, parent=None, **options):
VarBox.__init__(self, parent, **options)
# The only thing keeping Intbox from just using MixinInt is that
# _save_entry needs to save to a dict. TODO: refactor for composition
def _save_entry(self):
"""Saves widget's entry in the parent's dict, filling the entry with
0.00 if it was empty.
"""
if not self._value_var.get(): # if entry left blank,
self._value_var.set(0) # fill it with zero
value = int(self._value_var.get())
self.current_value = value
# Add the widget's status to the container's dictionary
self._dict[self._name] = value
@staticmethod
def _is_valid(entry):
"""Test to see if entry is acceptable (either empty, or able to be
converted to the desired type.)
"""
if not entry:
return True # Empty string: OK if entire entry deleted
if entry == '-':
return True # OK to try and enter a negative value
try:
int(entry)
return True
except ValueError:
return False
# TODO: decide if VarButtonBox will be useful in this project; delete if not
class VarButtonBox(VarBox):
"""
A deluxe VarBox that is closer to WINDNMR-style entry boxes.
ent = entry that holds the value used for calculations
increment = the amount added to or subtracted from ent by the buttons
minus and plus buttons subtract/add once;
up and down buttons repeat as long as button held down.
Arguments:
-text: appears above the entry box
-default: default value in entry
"""
# To do: use inheritance to avoid repeating code for different widgets
def __init__(self, from_=0.00, to=100.00, increment=1, realtime=False,
**options):
VarBox.__init__(self, **options)
# Following attributes/arguments are for consistency with SpinBox API
# from_ and to not implemented yet here
self.min = from_
self.max = to
self.increment = increment
self.realtime = realtime
self.add_increment_widgets()
def add_increment_widgets(self):
increment_frame = Frame(self)
increment_frame.rowconfigure(0,
minsize=30) # make 2 rows ~same height
increment_frame.columnconfigure(2,
weight=1) # lets arrow buttons fill
increment_frame.pack(side=TOP, expand=Y, fill=X)
minus = Button(increment_frame, text='-',
command=lambda: self.decrease())
plus = Button(increment_frame, text='+',
command=lambda: self.increase())
up = Button(increment_frame, text=up_arrow, command=lambda: None)
up.bind('<Button-1>', lambda event: self.zoom_up())
up.bind('<ButtonRelease-1>', lambda event: self.stop_action())
minus.grid(row=0, column=0, sticky=NSEW)
plus.grid(row=0, column=1, sticky=NSEW)
up.grid(row=0, column=2, sticky=NSEW)
# Increment is also limited to numerical entry
increment_entry = Entry(increment_frame, width=4, validate='key')
increment_entry.grid(row=1, column=0, columnspan=2, sticky=NSEW)
self.increment_var = StringVar()
increment_entry.config(textvariable=self.increment_var)
self.increment_var.set(str(1)) # 1 replaced by argument later?
increment_entry['validatecommand'] = (self.register(self._is_valid),
'%P')
increment_entry['invalidcommand'] = 'bell'
down = Button(increment_frame, text=down_arrow, command=lambda: None)
down.grid(row=1, column=2, sticky=NSEW)
down.bind('<Button-1>', lambda event: self.zoom_down())
down.bind('<ButtonRelease-1>', lambda event: self.stop_action())
self.mouse1 = False # Flag used to check if left button held down
def stop_action(self):
"""ButtonRelease resets self.mouse1 flag to False"""
self.mouse1 = False
def increase(self):
"""Increases ent by inc"""
current = float(self._value_var.get())
increment = float(self.increment_var.get())
self._value_var.set(str(current + increment))
self._on_tab()
def decrease(self):
"""Decreases ent by inc"""
current = float(self._value_var.get())
decrement = float(self.increment_var.get())
self._value_var.set(str(current - decrement))
self._on_tab()
def zoom_up(self):
"""Increases ent by int as long as button-1 held down"""
self.mouse1 = True
self.change_value(float(self.increment_var.get()))
def zoom_down(self):
"""Decreases ent by int as long as button-1 held down"""
decrement = - float(self.increment_var.get())
self.mouse1 = True
self.change_value(decrement)
def change_value(self, increment):
"""Adds increment to the value in ent
:param increment: (float) the change to be made to the float value of
the current Entry contents."""
if self.mouse1:
current_float = float(self._value_var.get())
new_float = current_float + increment
self._value_var.set(str(new_float))
self._on_tab() # store value, call _callback
# Delay was originally set to 10, but after MVC refactor this
# caused an infinite loop (apparently a race condition where
# stop action never fired. Testing with the two singlet DNMR
# _callback: still loops at 30 ms; 40 works but uneven; 50 works
# fine.
# May want to refactor how up/down arrows work
self.after(50, lambda: self.change_value(increment))
class SimpleVariableBox(_BaseEntryFrame):
"""Subclass of _BaseEntryFrame that stores the entry value as its
current_value argument and has a minimum value limit.
"""
def __init__(self, parent=None, value=0.5, min_=0, **options):
"""Extend _BaseEntryFrame by implementing initial value and minimum
value parameters.
:param value: (float) Value to instantiate Entry with.
:param min_: (float) Minimum value the Entry is allowed to hold.
"""
self._initial_value = value
self._min_value = min_
_BaseEntryFrame.__init__(self, parent, **options)
def _save_entry(self):
"""Overrides parent method so that an empty Entry field is filled
with min value.
"""
if not self._value_var.get(): # if entry left blank,
self._value_var.set(self._min_value)
self.current_value = float(self._value_var.get())
class MixinHorizontal:
"""Override _add_label and _add_entry methods to provide a horizontal
arrangement instead of vertical.
"""
def _add_label(self):
"""Add self._name to a Label at the left of the frame."""
Label(self, text=self._name, bg=self._color, bd=0).pack(side=LEFT)
def _add_entry(self):
"""Add an Entry widget."""
self._entry = Entry(self, width=7,
validate='key') # check for number on keypress)
self._entry.pack(side=LEFT, fill=X)
self._entry.config(textvariable=self._value_var)
class MixinInt:
"""Override _save_entry and _is_valid methods to restrict Entry values to
integers."""
def _save_entry(self):
"""Saves widget's entry as current_value, filling the entry with
0 if it was empty.
"""
if not self._value_var.get(): # if entry left blank,
self._value_var.set(0) # fill it with zero
value = int(self._value_var.get())
self.current_value = value
@staticmethod
def _is_valid(entry):
"""Test to see if entry is acceptable (either empty, or able to be
converted to the desired type.)
"""
if not entry:
return True # Empty string: OK if entire entry deleted
if entry == '-':
return True # OK to try and enter a negative value
try:
int(entry)
return True
except ValueError:
return False
class MixinIntRange:
"""Similar to MixinIntRange, but restricts integer range to specified
min/max values.
Currently hardcoded to 2-8 range."""
# TODO: make general, with min_ and max_ args
def _save_entry(self):
"""Saves widget's entry in the parent's dict, filling the entry with
0.00 if it was empty.
"""
if not self._value_var.get(): # if entry left blank,
self._value_var.set(0) # fill it with zero
value = int(self._value_var.get())
self.current_value = value
@staticmethod
def _is_valid(entry):
"""Test to see if entry is acceptable (either empty, or able to be
converted to the desired type.)
"""
if not entry:
return True # Empty string: OK if entire entry deleted
try:
int(entry)
return 2 <= int(entry) <= 8
except ValueError:
return False
class HorizontalIntBox(MixinHorizontal, IntBox):
"""An IntBox with a horizontal layout."""
def __init__(self, **kwargs):
super(HorizontalIntBox, self).__init__(**kwargs)
class HorizontalEntryFrame(MixinHorizontal, SimpleVariableBox):
"""A SimpleVariableBox with a horizontal layout."""
def __init__(self, **kwargs):
super(HorizontalEntryFrame, self).__init__(**kwargs)
class HorizontalIntEntryFrame(MixinHorizontal, MixinInt, SimpleVariableBox):
"""A SimpleVariableBox with a horizontal layout, and with Entry values
limited to integers."""
def __init__(self, **kwargs):
super(HorizontalIntEntryFrame, self).__init__(**kwargs)
class HorizontalRangeEntryFrame(MixinHorizontal, MixinIntRange,
SimpleVariableBox):
"""A SimpleVariableBox with a horizontal layout, and with Entry values
limited to integers in the 2-8 range (currently hardcoded in
MixinIntRange).
"""
def __init__(self, **kwargs):
super(HorizontalRangeEntryFrame, self).__init__(**kwargs)
if __name__ == '__main__':
import numpy as np
class DummyFrame(Frame):
def __init__(self, parent, **options):
Frame.__init__(self, parent, **options)
self.vars = {}
def dummy_callback():
print('callback called')
dummy_array = np.array([[1, 42, 99]])
dummy_dict = {'VarBox example': 11.00,
'IntBox example': 12,
'VarButtonBox example': 42.0}
root = Tk()
root.title('test widgets')
mainwindow = DummyFrame(root)
mainwindow.pack()
widgets = {'Array Box': ArrayBox,
'ArraySpinBox': ArraySpinBox,
'VarBox': VarBox,
'IntBox': IntBox}
widget_list = [val(parent=mainwindow, name=key, array=dummy_array,
callback=dummy_callback) if 'Array' in key
else val(parent=mainwindow, name=key + ' example',
dict_=dummy_dict, callback=dummy_callback)
for key, val in widgets.items()]
simple_variable_box = SimpleVariableBox(parent=mainwindow,
name='SimpleVariableBox example',
value=20.0)
widget_list.append(simple_variable_box)
horizontal_test = HorizontalRangeEntryFrame(parent=mainwindow,
name='horizontal test',
value=18)
widget_list.append(horizontal_test)
for widget in widget_list:
widget.pack(side=LEFT)
demo_varbuttonbox = VarButtonBox(
parent=mainwindow, name='VarButtonBox example',
dict_=dummy_dict, callback=dummy_callback, realtime=True,
from_=0.00, to=100.00, increment=1)
demo_varbuttonbox.pack(side=LEFT)
# TODO: add code to test behavior as well as instantiation
# workaround fix for Tk problems and mac mouse/trackpad:
while True:
try:
root.mainloop()
break
except UnicodeDecodeError:
pass
| [
"sametz@udel.edu"
] | sametz@udel.edu |
879917b46e8c4961318c964911dd89f79325b030 | a974de6285db76b669937e9619a805c226bb11a6 | /04_The_Path_of_Python/T-resource_Python_201904/ex/ex4_3.py | 3cdbf41669af8c237a20b793e2c8e0d841b68d20 | [] | no_license | Vincent105/python | 1b3f753e6b9db711e320b53c3e0a04c9fc818c37 | 65351234310a81a85331b0f11aef42507868774d | refs/heads/master | 2021-08-03T21:06:57.977065 | 2021-07-30T09:02:56 | 2021-07-30T09:02:56 | 185,359,652 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | # ex4_3.py
f = open("out.txt",mode="w")
print(" 姓名 國文 英文 總分 平均",file=f)
print("%3s %4d %4d %4d %3.1f" % ("洪冰儒", 98, 90, 188, 188/2),file=f)
print("%3s %4d %4d %4d %3.1f" % ("洪雨星", 96, 95, 191, 191/2),file=f)
print("%3s %4d %4d %4d %3.1f" % ("洪冰雨", 92, 88, 180, 180/2),file=f)
print("%3s %4d %4d %4d %3.1f" % ("洪星宇", 93, 97, 190, 190/2),file=f)
f.close()
| [
"vincent1050917@gmail.com"
] | vincent1050917@gmail.com |
c2365e9ea85baa1801be84fa21fd6de815a01787 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_examples/_algorithms_challenges/pybites/intermediate/147_v2/hundred_days.py | 93e78caccfce0c3ed9b677ebafe9b895680e3cc5 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 452 | py | from datetime import date
from dateutil.rrule import *
import dateutil
TODAY = date(year=2018, month=11, day=29)
def get_hundred_weekdays(start_date=TODAY):
"""Return a list of hundred date objects starting from
start_date up till 100 weekdays later, so +100 days
skipping Saturdays and Sundays"""
data = rrule(DAILY,count=100,byweekday=range(0,5),dtstart=start_date)
return [value.date() for value in data]
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
9059dc9806200db1b678dcd1aab3278e172c9e60 | 8e1be167066e30eff91c26c0757211cf3cf8b016 | /django/full_stack/login_and_registration/apps/users/migrations/0001_initial.py | 859f377ff7d427d869acba8cbbf4213ceebab8a9 | [] | no_license | dojo-solutions/online-ft-python | 074d0ba968f5a77eaec1bca0904232f2aa29051a | b4f6941d0bba376d121a40a6429b815d5b03c32f | refs/heads/master | 2020-04-21T11:52:31.390772 | 2019-03-02T01:27:54 | 2019-03-02T01:27:54 | 169,542,448 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 884 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2019-02-25 17:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('email', models.CharField(max_length=255)),
('pw_hash', models.CharField(max_length=500)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"wes@tao.team"
] | wes@tao.team |
846bfc48306db4e64463137f1678d3024cae52f4 | d6952f048727add5b54a521d04f6c9b5889bcd35 | /pollination_sdk/models/new_recipe_package.py | 8be4cb90b14461dfbef3d33cd2532f98983cdceb | [] | no_license | TfedUD/python-sdk | bf719644041c2ab7b741af9c7fb8e5acfe085922 | 7ddc34611de44d2f9c5b217cf9b9e7cec27b2a27 | refs/heads/master | 2023-08-10T21:13:45.270193 | 2021-06-21T14:48:36 | 2021-06-21T14:51:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,401 | py | # coding: utf-8
"""
pollination-server
Pollination Server OpenAPI Definition # noqa: E501
The version of the OpenAPI document: 0.13.0
Contact: info@pollination.cloud
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from pollination_sdk.configuration import Configuration
class NewRecipePackage(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'manifest': 'Recipe',
'readme': 'str'
}
attribute_map = {
'manifest': 'manifest',
'readme': 'readme'
}
def __init__(self, manifest=None, readme='', local_vars_configuration=None): # noqa: E501
"""NewRecipePackage - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._manifest = None
self._readme = None
self.discriminator = None
self.manifest = manifest
if readme is not None:
self.readme = readme
@property
def manifest(self):
"""Gets the manifest of this NewRecipePackage. # noqa: E501
The Recipe manifest to be created # noqa: E501
:return: The manifest of this NewRecipePackage. # noqa: E501
:rtype: Recipe
"""
return self._manifest
@manifest.setter
def manifest(self, manifest):
"""Sets the manifest of this NewRecipePackage.
The Recipe manifest to be created # noqa: E501
:param manifest: The manifest of this NewRecipePackage. # noqa: E501
:type manifest: Recipe
"""
if self.local_vars_configuration.client_side_validation and manifest is None: # noqa: E501
raise ValueError("Invalid value for `manifest`, must not be `None`") # noqa: E501
self._manifest = manifest
@property
def readme(self):
"""Gets the readme of this NewRecipePackage. # noqa: E501
The README file to attach to this package # noqa: E501
:return: The readme of this NewRecipePackage. # noqa: E501
:rtype: str
"""
return self._readme
@readme.setter
def readme(self, readme):
"""Sets the readme of this NewRecipePackage.
The README file to attach to this package # noqa: E501
:param readme: The readme of this NewRecipePackage. # noqa: E501
:type readme: str
"""
self._readme = readme
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NewRecipePackage):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, NewRecipePackage):
return True
return self.to_dict() != other.to_dict()
| [
"antoinedao1@gmail.com"
] | antoinedao1@gmail.com |
78ad72f1d54511f8f597e4533e092800f77e17a0 | 99f145ac3a1b9192e54c114379f16bf992781251 | /venv/lib/python2.7/site-packages/pandas/tests/test_panel4d.py | 1b5a7b6ee1e83bfb8fc40cf51cc47fb7a05c1910 | [
"MIT"
] | permissive | dushyantRathore/Cricket-API | 0a7df84f9760090e8a24dc61689e63e123c33d1f | d28bc5e6c613052793117e3dbd9035e4540901bb | refs/heads/master | 2021-03-24T10:18:58.362716 | 2020-07-08T17:52:38 | 2020-07-08T17:52:38 | 79,565,447 | 3 | 1 | MIT | 2018-10-01T19:16:47 | 2017-01-20T14:24:08 | Python | UTF-8 | Python | false | false | 35,347 | py | # -*- coding: utf-8 -*-
from datetime import datetime
from pandas.compat import range, lrange
import operator
import nose
import numpy as np
from pandas.types.common import is_float_dtype
from pandas import Series, Index, isnull, notnull
from pandas.core.panel import Panel
from pandas.core.panel4d import Panel4D
from pandas.core.series import remove_na
from pandas.tseries.offsets import BDay
from pandas.util.testing import (assert_panel_equal,
assert_panel4d_equal,
assert_frame_equal,
assert_series_equal,
assert_almost_equal)
import pandas.util.testing as tm
def add_nans(panel4d):
for l, label in enumerate(panel4d.labels):
panel = panel4d[label]
tm.add_nans(panel)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel4d)
def test_iter(self):
tm.equalContents(list(self.panel4d), self.panel4d.labels)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel4d, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel4d
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_panel_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_panel_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
@classmethod
def assert_panel4d_equal(cls, x, y):
assert_panel4d_equal(x, y)
def test_get_axis(self):
assert(self.panel4d._get_axis(0) is self.panel4d.labels)
assert(self.panel4d._get_axis(1) is self.panel4d.items)
assert(self.panel4d._get_axis(2) is self.panel4d.major_axis)
assert(self.panel4d._get_axis(3) is self.panel4d.minor_axis)
def test_set_axis(self):
new_labels = Index(np.arange(len(self.panel4d.labels)))
# TODO: unused?
# new_items = Index(np.arange(len(self.panel4d.items)))
new_major = Index(np.arange(len(self.panel4d.major_axis)))
new_minor = Index(np.arange(len(self.panel4d.minor_axis)))
# ensure propagate to potentially prior-cached items too
# TODO: unused?
# label = self.panel4d['l1']
self.panel4d.labels = new_labels
if hasattr(self.panel4d, '_item_cache'):
self.assertNotIn('l1', self.panel4d._item_cache)
self.assertIs(self.panel4d.labels, new_labels)
self.panel4d.major_axis = new_major
self.assertIs(self.panel4d[0].major_axis, new_major)
self.assertIs(self.panel4d.major_axis, new_major)
self.panel4d.minor_axis = new_minor
self.assertIs(self.panel4d[0].minor_axis, new_minor)
self.assertIs(self.panel4d.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel4d._get_axis_number('labels'), 0)
self.assertEqual(self.panel4d._get_axis_number('items'), 1)
self.assertEqual(self.panel4d._get_axis_number('major'), 2)
self.assertEqual(self.panel4d._get_axis_number('minor'), 3)
def test_get_axis_name(self):
self.assertEqual(self.panel4d._get_axis_name(0), 'labels')
self.assertEqual(self.panel4d._get_axis_name(1), 'items')
self.assertEqual(self.panel4d._get_axis_name(2), 'major_axis')
self.assertEqual(self.panel4d._get_axis_name(3), 'minor_axis')
def test_arith(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self._test_op(self.panel4d, operator.add)
self._test_op(self.panel4d, operator.sub)
self._test_op(self.panel4d, operator.mul)
self._test_op(self.panel4d, operator.truediv)
self._test_op(self.panel4d, operator.floordiv)
self._test_op(self.panel4d, operator.pow)
self._test_op(self.panel4d, lambda x, y: y + x)
self._test_op(self.panel4d, lambda x, y: y - x)
self._test_op(self.panel4d, lambda x, y: y * x)
self._test_op(self.panel4d, lambda x, y: y / x)
self._test_op(self.panel4d, lambda x, y: y ** x)
self.assertRaises(Exception, self.panel4d.__add__,
self.panel4d['l1'])
@staticmethod
def _test_op(panel4d, op):
result = op(panel4d, 1)
assert_panel_equal(result['l1'], op(panel4d['l1'], 1))
def test_keys(self):
tm.equalContents(list(self.panel4d.keys()), self.panel4d.labels)
def test_iteritems(self):
"""Test panel4d.iteritems()"""
self.assertEqual(len(list(self.panel4d.iteritems())),
len(self.panel4d.labels))
def test_combinePanel4d(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = self.panel4d.add(self.panel4d)
self.assert_panel4d_equal(result, self.panel4d * 2)
def test_neg(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.assert_panel4d_equal(-self.panel4d, self.panel4d * -1)
def test_select(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
p = self.panel4d
# select labels
result = p.select(lambda x: x in ('l1', 'l3'), axis='labels')
expected = p.reindex(labels=['l1', 'l3'])
self.assert_panel4d_equal(result, expected)
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel4d_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15),
axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel4d_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=3)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel4d_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo',), axis='items')
self.assert_panel4d_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
def test_abs(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = self.panel4d.abs()
expected = np.abs(self.panel4d)
self.assert_panel4d_equal(result, expected)
p = self.panel4d['l1']
result = p.abs()
expected = np.abs(p)
assert_panel_equal(result, expected)
df = p['ItemA']
result = df.abs()
expected = np.abs(df)
assert_frame_equal(result, expected)
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel4d.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
expected = self.panel4d['l2']
result = self.panel4d.pop('l2')
assert_panel_equal(expected, result)
self.assertNotIn('l2', self.panel4d.labels)
del self.panel4d['l3']
self.assertNotIn('l3', self.panel4d.labels)
self.assertRaises(Exception, self.panel4d.__delitem__, 'l3')
values = np.empty((4, 4, 4, 4))
values[0] = 0
values[1] = 1
values[2] = 2
values[3] = 3
panel4d = Panel4D(values, lrange(4), lrange(4),
lrange(4), lrange(4))
# did we delete the right row?
panel4dc = panel4d.copy()
del panel4dc[0]
assert_panel_equal(panel4dc[1], panel4d[1])
assert_panel_equal(panel4dc[2], panel4d[2])
assert_panel_equal(panel4dc[3], panel4d[3])
panel4dc = panel4d.copy()
del panel4dc[1]
assert_panel_equal(panel4dc[0], panel4d[0])
assert_panel_equal(panel4dc[2], panel4d[2])
assert_panel_equal(panel4dc[3], panel4d[3])
panel4dc = panel4d.copy()
del panel4dc[2]
assert_panel_equal(panel4dc[1], panel4d[1])
assert_panel_equal(panel4dc[0], panel4d[0])
assert_panel_equal(panel4dc[3], panel4d[3])
panel4dc = panel4d.copy()
del panel4dc[3]
assert_panel_equal(panel4dc[1], panel4d[1])
assert_panel_equal(panel4dc[2], panel4d[2])
assert_panel_equal(panel4dc[0], panel4d[0])
def test_setitem(self):
# LongPanel with one item
# lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
# self.assertRaises(Exception, self.panel.__setitem__,
# 'ItemE', lp)
# Panel
p = Panel(dict(
ItemA=self.panel4d['l1']['ItemA'][2:].filter(items=['A', 'B'])))
self.panel4d['l4'] = p
self.panel4d['l5'] = p
p2 = self.panel4d['l4']
assert_panel_equal(p, p2.reindex(items=p.items,
major_axis=p.major_axis,
minor_axis=p.minor_axis))
# scalar
self.panel4d['lG'] = 1
self.panel4d['lE'] = True
self.assertEqual(self.panel4d['lG'].values.dtype, np.int64)
self.assertEqual(self.panel4d['lE'].values.dtype, np.bool_)
# object dtype
self.panel4d['lQ'] = 'foo'
self.assertEqual(self.panel4d['lQ'].values.dtype, np.object_)
# boolean dtype
self.panel4d['lP'] = self.panel4d['l1'] > 0
self.assertEqual(self.panel4d['lP'].values.dtype, np.bool_)
def test_setitem_by_indexer(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Panel
panel4dc = self.panel4d.copy()
p = panel4dc.iloc[0]
def func():
self.panel4d.iloc[0] = p
self.assertRaises(NotImplementedError, func)
# DataFrame
panel4dc = self.panel4d.copy()
df = panel4dc.iloc[0, 0]
df.iloc[:] = 1
panel4dc.iloc[0, 0] = df
self.assertTrue((panel4dc.iloc[0, 0].values == 1).all())
# Series
panel4dc = self.panel4d.copy()
s = panel4dc.iloc[0, 0, :, 0]
s.iloc[:] = 1
panel4dc.iloc[0, 0, :, 0] = s
self.assertTrue((panel4dc.iloc[0, 0, :, 0].values == 1).all())
# scalar
panel4dc = self.panel4d.copy()
panel4dc.iloc[0] = 1
panel4dc.iloc[1] = True
panel4dc.iloc[2] = 'foo'
self.assertTrue((panel4dc.iloc[0].values == 1).all())
self.assertTrue(panel4dc.iloc[1].values.all())
self.assertTrue((panel4dc.iloc[2].values == 'foo').all())
def test_setitem_by_indexer_mixed_type(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# GH 8702
self.panel4d['foo'] = 'bar'
# scalar
panel4dc = self.panel4d.copy()
panel4dc.iloc[0] = 1
panel4dc.iloc[1] = True
panel4dc.iloc[2] = 'foo'
self.assertTrue((panel4dc.iloc[0].values == 1).all())
self.assertTrue(panel4dc.iloc[1].values.all())
self.assertTrue((panel4dc.iloc[2].values == 'foo').all())
def test_comparisons(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
p1 = tm.makePanel4D()
p2 = tm.makePanel4D()
tp = p1.reindex(labels=p1.labels.tolist() + ['foo'])
p = p1[p1.labels[0]]
def test_comp(func):
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, p)
result3 = func(self.panel4d, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel4d.values, 0))
with np.errstate(invalid='ignore'):
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_major_xs(self):
ref = self.panel4d['l1']['ItemA']
idx = self.panel4d.major_axis[5]
xs = self.panel4d.major_xs(idx)
assert_series_equal(xs['l1'].T['ItemA'],
ref.xs(idx), check_names=False)
# not contained
idx = self.panel4d.major_axis[0] - BDay()
self.assertRaises(Exception, self.panel4d.major_xs, idx)
def test_major_xs_mixed(self):
self.panel4d['l4'] = 'foo'
xs = self.panel4d.major_xs(self.panel4d.major_axis[0])
self.assertEqual(xs['l1']['A'].dtype, np.float64)
self.assertEqual(xs['l4']['A'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel4d['l1']['ItemA']
idx = self.panel4d.minor_axis[1]
xs = self.panel4d.minor_xs(idx)
assert_series_equal(xs['l1'].T['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel4d.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel4d['l4'] = 'foo'
xs = self.panel4d.minor_xs('D')
self.assertEqual(xs['l1'].T['ItemA'].dtype, np.float64)
self.assertEqual(xs['l4'].T['ItemA'].dtype, np.object_)
def test_xs(self):
l1 = self.panel4d.xs('l1', axis=0)
expected = self.panel4d['l1']
assert_panel_equal(l1, expected)
# view if possible
l1_view = self.panel4d.xs('l1', axis=0)
l1_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel4d['l1'].values).all())
# mixed-type
self.panel4d['strings'] = 'foo'
result = self.panel4d.xs('D', axis=3)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
panel4d = self.panel4d
labels = panel4d.labels[[1, 0]]
items = panel4d.items[[1, 0]]
dates = panel4d.major_axis[::2]
cols = ['D', 'C', 'F']
# all 4 specified
assert_panel4d_equal(panel4d.ix[labels, items, dates, cols],
panel4d.reindex(labels=labels, items=items,
major=dates, minor=cols))
# 3 specified
assert_panel4d_equal(panel4d.ix[:, items, dates, cols],
panel4d.reindex(items=items, major=dates,
minor=cols))
# 2 specified
assert_panel4d_equal(panel4d.ix[:, :, dates, cols],
panel4d.reindex(major=dates, minor=cols))
assert_panel4d_equal(panel4d.ix[:, items, :, cols],
panel4d.reindex(items=items, minor=cols))
assert_panel4d_equal(panel4d.ix[:, items, dates, :],
panel4d.reindex(items=items, major=dates))
# only 1
assert_panel4d_equal(panel4d.ix[:, items, :, :],
panel4d.reindex(items=items))
assert_panel4d_equal(panel4d.ix[:, :, dates, :],
panel4d.reindex(major=dates))
assert_panel4d_equal(panel4d.ix[:, :, :, cols],
panel4d.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
pass
def test_get_value(self):
for label in self.panel4d.labels:
for item in self.panel4d.items:
for mjr in self.panel4d.major_axis[::2]:
for mnr in self.panel4d.minor_axis:
result = self.panel4d.get_value(
label, item, mjr, mnr)
expected = self.panel4d[label][item][mnr][mjr]
assert_almost_equal(result, expected)
def test_set_value(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
for label in self.panel4d.labels:
for item in self.panel4d.items:
for mjr in self.panel4d.major_axis[::2]:
for mnr in self.panel4d.minor_axis:
self.panel4d.set_value(label, item, mjr, mnr, 1.)
assert_almost_equal(
self.panel4d[label][item][mnr][mjr], 1.)
res3 = self.panel4d.set_value('l4', 'ItemE', 'foobar', 'baz', 5)
self.assertTrue(is_float_dtype(res3['l4'].values))
# resize
res = self.panel4d.set_value('l4', 'ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel4D)
self.assertIsNot(res, self.panel4d)
self.assertEqual(res.get_value('l4', 'ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel4d.set_value('l4', 'ItemE', 'foobar', 'baz', 5)
self.assertTrue(is_float_dtype(res3['l4'].values))
class TestPanel4d(tm.TestCase, CheckIndexing, SafeForSparse,
SafeForLongAndSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel4d_equal(cls, x, y):
assert_panel4d_equal(x, y)
def setUp(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.panel4d = tm.makePanel4D(nper=8)
add_nans(self.panel4d)
def test_constructor(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
panel4d = Panel4D(self.panel4d._data)
self.assertIs(panel4d._data, self.panel4d._data)
panel4d = Panel4D(self.panel4d._data, copy=True)
self.assertIsNot(panel4d._data, self.panel4d._data)
assert_panel4d_equal(panel4d, self.panel4d)
vals = self.panel4d.values
# no copy
panel4d = Panel4D(vals)
self.assertIs(panel4d.values, vals)
# copy
panel4d = Panel4D(vals, copy=True)
self.assertIsNot(panel4d.values, vals)
# GH #8285, test when scalar data is used to construct a Panel4D
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
panel4d = Panel4D(val, labels=range(2), items=range(
3), major_axis=range(4), minor_axis=range(5))
vals = np.empty((2, 3, 4, 5), dtype=dtype)
vals.fill(val)
expected = Panel4D(vals, dtype=dtype)
assert_panel4d_equal(panel4d, expected)
# test the case when dtype is passed
panel4d = Panel4D(1, labels=range(2), items=range(
3), major_axis=range(4), minor_axis=range(5), dtype='float32')
vals = np.empty((2, 3, 4, 5), dtype='float32')
vals.fill(1)
expected = Panel4D(vals, dtype='float32')
assert_panel4d_equal(panel4d, expected)
def test_constructor_cast(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
zero_filled = self.panel4d.fillna(0)
casted = Panel4D(zero_filled._data, dtype=int)
casted2 = Panel4D(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel4D(zero_filled._data, dtype=np.int32)
casted2 = Panel4D(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_consolidate(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.assertTrue(self.panel4d._data.is_consolidated())
self.panel4d['foo'] = 1.
self.assertFalse(self.panel4d._data.is_consolidated())
panel4d = self.panel4d.consolidate()
self.assertTrue(panel4d._data.is_consolidated())
def test_ctor_dict(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
l1 = self.panel4d['l1']
l2 = self.panel4d['l2']
d = {'A': l1, 'B': l2.ix[['ItemB'], :, :]}
panel4d = Panel4D(d)
assert_panel_equal(panel4d['A'], self.panel4d['l1'])
assert_frame_equal(panel4d.ix['B', 'ItemB', :, :],
self.panel4d.ix['l2', ['ItemB'], :, :]['ItemB'])
def test_constructor_dict_mixed(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
data = dict((k, v.values) for k, v in self.panel4d.iteritems())
result = Panel4D(data)
exp_major = Index(np.arange(len(self.panel4d.major_axis)))
self.assert_index_equal(result.major_axis, exp_major)
result = Panel4D(data,
labels=self.panel4d.labels,
items=self.panel4d.items,
major_axis=self.panel4d.major_axis,
minor_axis=self.panel4d.minor_axis)
assert_panel4d_equal(result, self.panel4d)
data['l2'] = self.panel4d['l2']
result = Panel4D(data)
assert_panel4d_equal(result, self.panel4d)
# corner, blow up
data['l2'] = data['l2']['ItemB']
self.assertRaises(Exception, Panel4D, data)
data['l2'] = self.panel4d['l2'].values[:, :, :-1]
self.assertRaises(Exception, Panel4D, data)
def test_constructor_resize(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
data = self.panel4d._data
labels = self.panel4d.labels[:-1]
items = self.panel4d.items[:-1]
major = self.panel4d.major_axis[:-1]
minor = self.panel4d.minor_axis[:-1]
result = Panel4D(data, labels=labels, items=items,
major_axis=major, minor_axis=minor)
expected = self.panel4d.reindex(
labels=labels, items=items, major=major, minor=minor)
assert_panel4d_equal(result, expected)
result = Panel4D(data, items=items, major_axis=major)
expected = self.panel4d.reindex(items=items, major=major)
assert_panel4d_equal(result, expected)
result = Panel4D(data, items=items)
expected = self.panel4d.reindex(items=items)
assert_panel4d_equal(result, expected)
result = Panel4D(data, minor_axis=minor)
expected = self.panel4d.reindex(minor=minor)
assert_panel4d_equal(result, expected)
def test_conform(self):
p = self.panel4d['l1'].filter(items=['ItemA', 'ItemB'])
conformed = self.panel4d.conform(p)
tm.assert_index_equal(conformed.items, self.panel4d.labels)
tm.assert_index_equal(conformed.major_axis, self.panel4d.major_axis)
tm.assert_index_equal(conformed.minor_axis, self.panel4d.minor_axis)
def test_reindex(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
ref = self.panel4d['l2']
# labels
result = self.panel4d.reindex(labels=['l1', 'l2'])
assert_panel_equal(result['l2'], ref)
# items
result = self.panel4d.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['l2']['ItemB'], ref['ItemB'])
# major
new_major = list(self.panel4d.major_axis[:10])
result = self.panel4d.reindex(major=new_major)
assert_frame_equal(
result['l2']['ItemB'], ref['ItemB'].reindex(index=new_major))
# raise exception put both major and major_axis
self.assertRaises(Exception, self.panel4d.reindex,
major_axis=new_major, major=new_major)
# minor
new_minor = list(self.panel4d.minor_axis[:2])
result = self.panel4d.reindex(minor=new_minor)
assert_frame_equal(
result['l2']['ItemB'], ref['ItemB'].reindex(columns=new_minor))
result = self.panel4d.reindex(labels=self.panel4d.labels,
items=self.panel4d.items,
major=self.panel4d.major_axis,
minor=self.panel4d.minor_axis)
# don't necessarily copy
result = self.panel4d.reindex()
assert_panel4d_equal(result, self.panel4d)
self.assertFalse(result is self.panel4d)
# with filling
smaller_major = self.panel4d.major_axis[::5]
smaller = self.panel4d.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel4d.major_axis,
method='pad')
assert_panel_equal(larger.ix[:, :, self.panel4d.major_axis[1], :],
smaller.ix[:, :, smaller_major[0], :])
# don't necessarily copy
result = self.panel4d.reindex(
major=self.panel4d.major_axis, copy=False)
assert_panel4d_equal(result, self.panel4d)
self.assertTrue(result is self.panel4d)
def test_not_hashable(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
p4D_empty = Panel4D()
self.assertRaises(TypeError, hash, p4D_empty)
self.assertRaises(TypeError, hash, self.panel4d)
def test_reindex_like(self):
# reindex_like
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
smaller = self.panel4d.reindex(labels=self.panel4d.labels[:-1],
items=self.panel4d.items[:-1],
major=self.panel4d.major_axis[:-1],
minor=self.panel4d.minor_axis[:-1])
smaller_like = self.panel4d.reindex_like(smaller)
assert_panel4d_equal(smaller, smaller_like)
def test_sort_index(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
import random
rlabels = list(self.panel4d.labels)
ritems = list(self.panel4d.items)
rmajor = list(self.panel4d.major_axis)
rminor = list(self.panel4d.minor_axis)
random.shuffle(rlabels)
random.shuffle(ritems)
random.shuffle(rmajor)
random.shuffle(rminor)
random_order = self.panel4d.reindex(labels=rlabels)
sorted_panel4d = random_order.sort_index(axis=0)
assert_panel4d_equal(sorted_panel4d, self.panel4d)
def test_fillna(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.assertFalse(np.isfinite(self.panel4d.values).all())
filled = self.panel4d.fillna(0)
self.assertTrue(np.isfinite(filled.values).all())
self.assertRaises(NotImplementedError,
self.panel4d.fillna, method='pad')
def test_swapaxes(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = self.panel4d.swapaxes('labels', 'items')
self.assertIs(result.items, self.panel4d.labels)
result = self.panel4d.swapaxes('labels', 'minor')
self.assertIs(result.labels, self.panel4d.minor_axis)
result = self.panel4d.swapaxes('items', 'minor')
self.assertIs(result.items, self.panel4d.minor_axis)
result = self.panel4d.swapaxes('items', 'major')
self.assertIs(result.items, self.panel4d.major_axis)
result = self.panel4d.swapaxes('major', 'minor')
self.assertIs(result.major_axis, self.panel4d.minor_axis)
# this should also work
result = self.panel4d.swapaxes(0, 1)
self.assertIs(result.labels, self.panel4d.items)
# this works, but return a copy
result = self.panel4d.swapaxes('items', 'items')
assert_panel4d_equal(self.panel4d, result)
self.assertNotEqual(id(self.panel4d), id(result))
def test_update(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
p4d = Panel4D([[[[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]]])
other = Panel4D([[[[3.6, 2., np.nan]],
[[np.nan, np.nan, 7]]]])
p4d.update(other)
expected = Panel4D([[[[3.6, 2, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 7],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]]])
assert_panel4d_equal(p4d, expected)
def test_dtypes(self):
result = self.panel4d.dtypes
expected = Series(np.dtype('float64'), index=self.panel4d.labels)
assert_series_equal(result, expected)
def test_repr_empty(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
empty = Panel4D()
repr(empty)
def test_rename(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
mapper = {'l1': 'foo',
'l2': 'bar',
'l3': 'baz'}
renamed = self.panel4d.rename_axis(mapper, axis=0)
exp = Index(['foo', 'bar', 'baz'])
self.assert_index_equal(renamed.labels, exp)
renamed = self.panel4d.rename_axis(str.lower, axis=3)
exp = Index(['a', 'b', 'c', 'd'])
self.assert_index_equal(renamed.minor_axis, exp)
# don't copy
renamed_nocopy = self.panel4d.rename_axis(mapper,
axis=0,
copy=False)
renamed_nocopy['foo'] = 3.
self.assertTrue((self.panel4d['l1'].values == 3).all())
def test_get_attr(self):
assert_panel_equal(self.panel4d['l1'], self.panel4d.l1)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| [
"dushyant.bgs@gmail.com"
] | dushyant.bgs@gmail.com |
830a93a7ff43fc82e126221c9908f7a338e3214e | 42e44bfdca15da665fce8f83f57aaa08bc580dce | /tests/test_multiple_sheets.py | e2624e4339c843e005ed4e1b068c81792f2f42c0 | [
"BSD-3-Clause"
] | permissive | andrealmar/pyexcel | b6e20451047c9663ca075f73305c33fce91f319c | 350b6fbf13b9ddaa4d58fd0b65e8028774573013 | refs/heads/master | 2020-12-02T15:08:00.804201 | 2015-09-26T22:30:36 | 2015-09-26T22:30:36 | 43,714,151 | 0 | 0 | null | 2015-10-05T21:12:26 | 2015-10-05T21:12:26 | null | UTF-8 | Python | false | false | 19,842 | py | from base import PyexcelMultipleSheetBase
import pyexcel as pe
import os
from base import create_sample_file1, clean_up_files
from _compact import OrderedDict
from nose.tools import raises
class TestXlsNXlsmMultipleSheets(PyexcelMultipleSheetBase):
def setUp(self):
self.testfile = "multiple1.xls"
self.testfile2 = "multiple1.xlsm"
self.content = OrderedDict()
self.content.update({"Sheet1": [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]]})
self.content.update({"Sheet2": [[4, 4, 4, 4], [5, 5, 5, 5], [6, 6, 6, 6]]})
self.content.update({"Sheet3": [[u'X', u'Y', u'Z'], [1, 4, 7], [2, 5, 8], [3, 6, 9]]})
self._write_test_file(self.testfile)
def tearDown(self):
self._clean_up()
class TestCSVNXlsMultipleSheets:
def setUp(self):
self.testfile = "multiple1.csv"
self.content = OrderedDict()
self.content.update({"Sheet1": [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]]})
self.content.update({"Sheet2": [[4, 4, 4, 4], [5, 5, 5, 5], [6, 6, 6, 6]]})
self.content.update({"Sheet3": [[u'X', u'Y', u'Z'], [1, 4, 7], [2, 5, 8], [3, 6, 9]]})
w = pe.BookWriter(self.testfile)
w.write_book_from_dict(self.content)
w.close()
def test_read_multiple_csv_into_book(self):
book = pe.load_book(self.testfile)
assert book.sheet_names() == ["Sheet1", "Sheet2", "Sheet3"]
book["Sheet1"].format(int)
assert self.content["Sheet1"] == book["Sheet1"].to_array()
book["Sheet2"].format(int)
assert self.content["Sheet2"] == book["Sheet2"].to_array()
book["Sheet3"].format(int)
assert self.content["Sheet3"] == book["Sheet3"].to_array()
def tearDown(self):
if os.path.exists("multiple1__Sheet1__0.csv"):
os.unlink("multiple1__Sheet1__0.csv")
if os.path.exists("multiple1__Sheet2__1.csv"):
os.unlink("multiple1__Sheet2__1.csv")
if os.path.exists("multiple1__Sheet3__2.csv"):
os.unlink("multiple1__Sheet3__2.csv")
class TestCSVzMultipleSheets:
def setUp(self):
self.testfile = "multiple1.csvz"
self.content = OrderedDict()
self.content.update({"Sheet1": [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]]})
self.content.update({"Sheet2": [[4, 4, 4, 4], [5, 5, 5, 5], [6, 6, 6, 6]]})
self.content.update({"Sheet3": [[u'X', u'Y', u'Z'], [1, 4, 7], [2, 5, 8], [3, 6, 9]]})
w = pe.BookWriter(self.testfile)
w.write_book_from_dict(self.content)
w.close()
def test_read_multiple_csv_into_book(self):
book = pe.load_book(self.testfile)
assert book.sheet_names() == ["Sheet1", "Sheet2", "Sheet3"]
book["Sheet1"].format(int)
assert self.content["Sheet1"] == book["Sheet1"].to_array()
book["Sheet2"].format(int)
assert self.content["Sheet2"] == book["Sheet2"].to_array()
book["Sheet3"].format(int)
assert self.content["Sheet3"] == book["Sheet3"].to_array()
def tearDown(self):
if os.path.exists("multiple1.csvz"):
os.unlink("multiple1.csvz")
class TestSingleSheetReaderForMulitpleSheetBook:
def setUp(self):
self.testfile = "multiple1.xls"
self.content = OrderedDict()
self.content.update({"Sheet1": [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]]})
self.content.update({"Sheet2": [[4, 4, 4, 4], [5, 5, 5, 5], [6, 6, 6, 6]]})
self.content.update({"Sheet3": [[u'X', u'Y', u'Z'], [1, 4, 7], [2, 5, 8], [3, 6, 9]]})
w = pe.BookWriter(self.testfile)
w.write_book_from_dict(self.content)
w.close()
def test_non_default_sheet_as_single_sheet_reader(self):
r = pe.Reader(self.testfile, "Sheet1")
data = pe.utils.to_array(r)
assert data == self.content["Sheet1"]
r2 = pe.Reader(self.testfile, "Sheet2")
data = pe.utils.to_array(r2)
assert data == self.content["Sheet2"]
r3 = pe.Reader(self.testfile, "Sheet3")
data = pe.utils.to_array(r3)
assert data == self.content["Sheet3"]
def test_non_default_sheet_as_single_sheet_reader_series(self):
r = pe.SeriesReader(self.testfile, "Sheet3")
data = pe.utils.to_array(r.rows())
assert data == self.content["Sheet3"][1:]
def test_non_default_sheet_as_single_sheet_plain_reader(self):
r = pe.load(self.testfile, "Sheet2")
data = pe.utils.to_array(r.rows())
assert data == self.content["Sheet2"]
def tearDown(self):
if os.path.exists(self.testfile):
os.unlink(self.testfile)
class TestReader:
def setUp(self):
"""
Make a test csv file as:
a,b,c,d
e,f,g,h
i,j,1.1,1
"""
self.testfile = "testcsv.csv"
create_sample_file1(self.testfile)
def test_csv_book_reader(self):
r = pe.BookReader(self.testfile)
assert r.number_of_sheets() == 1
assert r.sheet_names() == [self.testfile]
def tearDown(self):
if os.path.exists(self.testfile):
os.unlink(self.testfile)
class TestCSVSingleSheet:
def _write_test_file(self, file, content):
"""
Make a test file as:
1,1,1,1
2,2,2,2
3,3,3,3
"""
w = pe.BookWriter(file)
w.write_book_from_dict(content)
w.close()
def setUp(self):
self.testfile = "multiple1.csv"
self.content = OrderedDict()
self.content.update({"Sheet1": [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]]})
self.content.update({"Sheet2": [[4, 4, 4, 4], [5, 5, 5, 5], [6, 6, 6, 6]]})
self.content.update({"Sheet3": [[u'X', u'Y', u'Z'], [1, 4, 7], [2, 5, 8], [3, 6, 9]]})
self._write_test_file(self.testfile, self.content)
def test_load_a_single_sheet(self):
b1 = pe.load_book(self.testfile, sheet_name="Sheet1")
b1['Sheet1'].format(int)
assert len(b1.sheet_names()) == 1
assert b1['Sheet1'].to_array() == self.content['Sheet1']
def test_load_a_single_sheet2(self):
b1 = pe.load_book(self.testfile, sheet_index=1)
b1['Sheet2'].format(int)
assert len(b1.sheet_names()) == 1
assert b1['Sheet2'].to_array() == self.content['Sheet2']
@raises(IndexError)
def test_load_a_single_sheet3(self):
pe.load_book(self.testfile, sheet_index=10000)
@raises(ValueError)
def test_load_a_single_sheet4(self):
pe.load_book(self.testfile, sheet_name="Not exist")
def tearDown(self):
clean_up_files([
"multiple1__Sheet1__0.csv",
"multiple1__Sheet2__1.csv",
"multiple1__Sheet3__2.csv",
])
class TestCSVZSingleSheet:
def _write_test_file(self, file, content):
"""
Make a test file as:
1,1,1,1
2,2,2,2
3,3,3,3
"""
w = pe.BookWriter(file)
w.write_book_from_dict(content)
w.close()
def setUp(self):
self.testfile = "multiple1.csvz"
self.content = OrderedDict()
self.content.update({"Sheet1": [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]]})
self.content.update({"Sheet2": [[4, 4, 4, 4], [5, 5, 5, 5], [6, 6, 6, 6]]})
self.content.update({"Sheet3": [[u'X', u'Y', u'Z'], [1, 4, 7], [2, 5, 8], [3, 6, 9]]})
self._write_test_file(self.testfile, self.content)
def test_load_a_single_sheet(self):
b1 = pe.load_book(self.testfile, sheet_name="Sheet1")
b1['Sheet1'].format(int)
assert len(b1.sheet_names()) == 1
assert b1['Sheet1'].to_array() == self.content['Sheet1']
def test_load_a_single_sheet2(self):
b1 = pe.load_book(self.testfile, sheet_index=1)
b1['Sheet2'].format(int)
assert len(b1.sheet_names()) == 1
assert b1['Sheet2'].to_array() == self.content['Sheet2']
@raises(IndexError)
def test_load_a_single_sheet3(self):
pe.load_book(self.testfile, sheet_index=10000)
@raises(ValueError)
def test_load_a_single_sheet4(self):
pe.load_book(self.testfile, sheet_name="Not exist")
def tearDown(self):
clean_up_files([
self.testfile
])
class TestAddBooks:
def _write_test_file(self, file, content):
"""
Make a test file as:
1,1,1,1
2,2,2,2
3,3,3,3
"""
w = pe.BookWriter(file)
w.write_book_from_dict(content)
w.close()
def setUp(self):
self.testfile = "multiple1.xlsm"
self.testfile2 = "multiple1.xls"
self.testfile3 = "multiple2.xlsx"
self.content = {
"Sheet1": [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]],
"Sheet2": [[4, 4, 4, 4], [5, 5, 5, 5], [6, 6, 6, 6]],
"Sheet3": [[u'X', u'Y', u'Z'], [1, 4, 7], [2, 5, 8], [3, 6, 9]]
}
self._write_test_file(self.testfile, self.content)
self._write_test_file(self.testfile2, self.content)
self.test_single_sheet_file = "single.xls"
self.content1 = {
"Sheet1": [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]],
}
self._write_test_file(self.test_single_sheet_file, self.content1)
@raises(KeyError)
def test_delete_sheets(self):
"""Can delete by sheet name"""
b1 = pe.load_book(self.testfile)
assert len(b1.sheet_names()) == 3
del b1["Sheet1"]
assert len(b1.sheet_names()) == 2
del b1["Sheet1"] # bang, already deleted
@raises(IndexError)
def test_delete_sheets2(self):
"""Can delete by index"""
b1 = pe.load_book(self.testfile)
assert len(b1.sheet_names()) == 3
del b1[2]
del b1[1]
assert len(b1.sheet_names()) == 1
del b1[1] # bang, already deleted
@raises(TypeError)
def test_delete_sheets3(self):
"""Test float in []"""
b1 = pe.load_book(self.testfile)
del b1[1.1]
def test_delete_sheets4(self):
"""repetitively delete first sheet"""
b1 = pe.load_book(self.testfile)
del b1[0]
assert len(b1.sheet_names()) == 2
del b1[0]
assert len(b1.sheet_names()) == 1
del b1[0]
assert len(b1.sheet_names()) == 0
def test_add_book1(self):
"""
test this scenario: book3 = book1 + book2
"""
b1 = pe.BookReader(self.testfile)
b2 = pe.BookReader(self.testfile2)
b3 = b1 + b2
content = pe.utils.to_dict(b3)
sheet_names = content.keys()
assert len(sheet_names) == 6
for name in sheet_names:
if "Sheet3" in name:
assert content[name] == self.content["Sheet3"]
elif "Sheet2" in name:
assert content[name] == self.content["Sheet2"]
elif "Sheet1" in name:
assert content[name] == self.content["Sheet1"]
def test_add_book1_in_place(self):
"""
test this scenario: book1 += book2
"""
b1 = pe.BookReader(self.testfile)
b2 = pe.BookReader(self.testfile2)
b1 += b2
content = pe.utils.to_dict(b1)
sheet_names = content.keys()
assert len(sheet_names) == 6
for name in sheet_names:
if "Sheet3" in name:
assert content[name] == self.content["Sheet3"]
elif "Sheet2" in name:
assert content[name] == self.content["Sheet2"]
elif "Sheet1" in name:
assert content[name] == self.content["Sheet1"]
def test_add_book2(self):
"""
test this scenario: book3 = book1 + sheet3
"""
b1 = pe.BookReader(self.testfile)
b2 = pe.BookReader(self.testfile2)
b3 = b1 + b2["Sheet3"]
content = pe.utils.to_dict(b3)
sheet_names = content.keys()
assert len(sheet_names) == 4
for name in sheet_names:
if "Sheet3" in name:
assert content[name] == self.content["Sheet3"]
elif "Sheet2" in name:
assert content[name] == self.content["Sheet2"]
elif "Sheet1" in name:
assert content[name] == self.content["Sheet1"]
def test_add_book2_in_place(self):
"""
test this scenario: book3 = book1 + sheet3
"""
b1 = pe.BookReader(self.testfile)
b2 = pe.BookReader(self.testfile2)
b1 += b2["Sheet3"]
content = pe.utils.to_dict(b1)
sheet_names = content.keys()
assert len(sheet_names) == 4
for name in sheet_names:
if "Sheet3" in name:
assert content[name] == self.content["Sheet3"]
elif "Sheet2" in name:
assert content[name] == self.content["Sheet2"]
elif "Sheet1" in name:
assert content[name] == self.content["Sheet1"]
def test_add_book3(self):
"""
test this scenario: book3 = sheet1 + sheet2
"""
b1 = pe.BookReader(self.testfile)
b2 = pe.BookReader(self.testfile2)
b3 = b1["Sheet1"] + b2["Sheet3"]
content = pe.utils.to_dict(b3)
sheet_names = content.keys()
assert len(sheet_names) == 2
assert content["Sheet3"] == self.content["Sheet3"]
assert content["Sheet1"] == self.content["Sheet1"]
def test_add_book4(self):
"""
test this scenario: book3 = sheet1 + book
"""
b1 = pe.BookReader(self.testfile)
b2 = pe.BookReader(self.testfile2)
b3 = b1["Sheet1"] + b2
content = pe.utils.to_dict(b3)
sheet_names = content.keys()
assert len(sheet_names) == 4
for name in sheet_names:
if "Sheet3" in name:
assert content[name] == self.content["Sheet3"]
elif "Sheet2" in name:
assert content[name] == self.content["Sheet2"]
elif "Sheet1" in name:
assert content[name] == self.content["Sheet1"]
def test_add_book4_2(self):
"""
test this scenario: book3 = sheet1 + book
use . notation
"""
b1 = pe.BookReader(self.testfile)
b2 = pe.BookReader(self.testfile2)
b3 = b1.Sheet1 + b2
content = pe.utils.to_dict(b3)
sheet_names = content.keys()
assert len(sheet_names) == 4
for name in sheet_names:
if "Sheet3" in name:
assert content[name] == self.content["Sheet3"]
elif "Sheet2" in name:
assert content[name] == self.content["Sheet2"]
elif "Sheet1" in name:
assert content[name] == self.content["Sheet1"]
def test_add_book5(self):
"""
test this scenario: book3 = single_sheet_book + book
"""
b1 = pe.BookReader(self.test_single_sheet_file)
b2 = pe.BookReader(self.testfile2)
b3 = b1 + b2
content = pe.utils.to_dict(b3)
sheet_names = content.keys()
assert len(sheet_names) == 4
for name in sheet_names:
if "Sheet3" in name:
assert content[name] == self.content["Sheet3"]
elif "Sheet2" in name:
assert content[name] == self.content["Sheet2"]
elif "Sheet1" in name:
assert content[name] == self.content["Sheet1"]
elif "single.xls" in name:
assert content[name] == self.content1["Sheet1"]
def test_add_book6(self):
"""
test this scenario: book3 = book + single_sheet_book
"""
b1 = pe.BookReader(self.test_single_sheet_file)
b2 = pe.BookReader(self.testfile2)
b3 = b2 + b1
content = pe.utils.to_dict(b3)
sheet_names = content.keys()
assert len(sheet_names) == 4
for name in sheet_names:
if "Sheet3" in name:
assert content[name] == self.content["Sheet3"]
elif "Sheet2" in name:
assert content[name] == self.content["Sheet2"]
elif "Sheet1" in name:
assert content[name] == self.content["Sheet1"]
elif "single.xls" in name:
assert content[name] == self.content1["Sheet1"]
def test_add_sheet(self):
"""
test this scenario: book3 = sheet1 + single_sheet_book
"""
b1 = pe.BookReader(self.testfile)
b2 = pe.BookReader(self.test_single_sheet_file)
b3 = b1["Sheet1"] + b2
content = pe.utils.to_dict(b3)
sheet_names = content.keys()
assert len(sheet_names) == 2
for name in sheet_names:
if "Sheet1" in name:
assert content[name] == self.content["Sheet1"]
elif "single.xls" in name:
assert content[name] == self.content1["Sheet1"]
def test_add_sheet2(self):
"""
test this scenario: book3 = sheet1 + single_sheet_book
"""
b1 = pe.BookReader(self.testfile)
b3 = b1["Sheet1"] + b1["Sheet1"]
content = pe.utils.to_dict(b3)
sheet_names = content.keys()
assert len(sheet_names) == 2
for name in sheet_names:
if "Sheet1" in name:
assert content[name] == self.content["Sheet1"]
@raises(TypeError)
def test_add_book_error(self):
"""
test this scenario: book3 = book + integer
"""
b1 = pe.BookReader(self.testfile)
b1 + 12 # bang, cannot add integer
@raises(TypeError)
def test_add_book_error2(self):
b1 = pe.BookReader(self.testfile)
b1 += 12 # bang cannot iadd integer
@raises(TypeError)
def test_add_sheet_error(self):
"""
test this scenario: book3 = sheet1 + integer
"""
b1 = pe.BookReader(self.testfile)
b1["Sheet1"] + 12 # bang, cannot add integer
@raises(NotImplementedError)
def test_add_sheet_error2(self):
b1 = pe.BookReader(self.testfile)
b1["Sheet1"] += 12 #bang, cannot iadd integer
def tearDown(self):
if os.path.exists(self.testfile):
os.unlink(self.testfile)
if os.path.exists(self.testfile2):
os.unlink(self.testfile2)
if os.path.exists(self.testfile3):
os.unlink(self.testfile3)
if os.path.exists(self.test_single_sheet_file):
os.unlink(self.test_single_sheet_file)
class TestMergeCSVsIntoOne:
def test_merging(self):
# set up
data = [[1,2,3],[4,5,6],[7,8,9]]
import pyexcel as pe
w=pe.Writer("1.csv")
w.write_rows(data)
w.close()
data2 = [['a','b','c'],['d','e','f'],['g','h','i']]
w=pe.Writer("2.csv")
w.write_rows(data2)
w.close()
data3=[[1.1, 2.2, 3.3],[4.4, 5.5, 6.6],[7.7, 8.8, 9.9]]
w=pe.Writer("3.csv")
w.write_rows(data3)
w.close()
# execute
merged = pe.Sheet()
for file in ["1.csv", "2.csv", "3.csv"]:
r = pe.Reader(file)
merged.row += r
writer = pe.Writer("merged.csv")
writer.write_reader(merged)
writer.close()
r=pe.Reader("merged.csv")
actual = pe.utils.to_array(r)
result = [
[u'1', u'2', u'3'],
[u'4', u'5', u'6'],
[u'7', u'8', u'9'],
[u'a', u'b', u'c'],
[u'd', u'e', u'f'],
[u'g', u'h', u'i'],
[u'1.1', u'2.2', u'3.3'],
[u'4.4', u'5.5', u'6.6'],
[u'7.7', u'8.8', u'9.9']
]
assert result == actual
# verify
os.unlink("1.csv")
os.unlink("2.csv")
os.unlink("3.csv")
os.unlink("merged.csv")
| [
"wangc_2011@hotmail.com"
] | wangc_2011@hotmail.com |
b3acd8deb234d935d0bc79788d1e84b43210055d | 42685099f1e25e5c1db51b98546e0be495d2789f | /v6.0.2/log_syslogd/fortios_log_syslogd_override_filter.py | 9df76b4adb66ef98a0ba3135671867ef7fba21a6 | [
"Apache-2.0"
] | permissive | sxhdroid/ansible_fgt_modules | 02aaf9af33063d8178e7e898666ac9cdef150a00 | 58d02d80a8d0ff145bee226b345ad9738af523f6 | refs/heads/master | 2020-04-18T01:34:45.990750 | 2019-01-22T10:47:36 | 2019-01-22T10:47:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,969 | py | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_log_syslogd_override_filter
short_description: Override filters for remote system server in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure log_syslogd feature and override_filter category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip adress.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
log_syslogd_override_filter:
description:
- Override filters for remote system server.
default: null
suboptions:
anomaly:
description:
- Enable/disable anomaly logging.
choices:
- enable
- disable
dns:
description:
- Enable/disable detailed DNS event logging.
choices:
- enable
- disable
filter:
description:
- Syslog filter.
filter-type:
description:
- Include/exclude logs that match the filter.
choices:
- include
- exclude
forward-traffic:
description:
- Enable/disable forward traffic logging.
choices:
- enable
- disable
gtp:
description:
- Enable/disable GTP messages logging.
choices:
- enable
- disable
local-traffic:
description:
- Enable/disable local in or out traffic logging.
choices:
- enable
- disable
multicast-traffic:
description:
- Enable/disable multicast traffic logging.
choices:
- enable
- disable
netscan-discovery:
description:
- Enable/disable netscan discovery event logging.
choices:
netscan-vulnerability:
description:
- Enable/disable netscan vulnerability event logging.
choices:
severity:
description:
- Lowest severity level to log.
choices:
- emergency
- alert
- critical
- error
- warning
- notification
- information
- debug
sniffer-traffic:
description:
- Enable/disable sniffer traffic logging.
choices:
- enable
- disable
ssh:
description:
- Enable/disable SSH logging.
choices:
- enable
- disable
voip:
description:
- Enable/disable VoIP logging.
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Override filters for remote system server.
fortios_log_syslogd_override_filter:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
log_syslogd_override_filter:
anomaly: "enable"
dns: "enable"
filter: "<your_own_value>"
filter-type: "include"
forward-traffic: "enable"
gtp: "enable"
local-traffic: "enable"
multicast-traffic: "enable"
netscan-discovery: "<your_own_value>"
netscan-vulnerability: "<your_own_value>"
severity: "emergency"
sniffer-traffic: "enable"
ssh: "enable"
voip: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: string
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: string
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: string
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: string
sample: "key1"
name:
description: Name of the table used to fulfill the request
returned: always
type: string
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: string
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: string
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: string
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: string
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: string
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: string
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_log_syslogd_override_filter_data(json):
option_list = ['anomaly', 'dns', 'filter',
'filter-type', 'forward-traffic', 'gtp',
'local-traffic', 'multicast-traffic', 'netscan-discovery',
'netscan-vulnerability', 'severity', 'sniffer-traffic',
'ssh', 'voip']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def log_syslogd_override_filter(data, fos):
vdom = data['vdom']
log_syslogd_override_filter_data = data['log_syslogd_override_filter']
filtered_data = filter_log_syslogd_override_filter_data(log_syslogd_override_filter_data)
return fos.set('log.syslogd',
'override-filter',
data=filtered_data,
vdom=vdom)
def fortios_log_syslogd(data, fos):
login(data)
methodlist = ['log_syslogd_override_filter']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"log_syslogd_override_filter": {
"required": False, "type": "dict",
"options": {
"anomaly": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dns": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"filter": {"required": False, "type": "str"},
"filter-type": {"required": False, "type": "str",
"choices": ["include", "exclude"]},
"forward-traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"gtp": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"local-traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"multicast-traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"netscan-discovery": {"required": False, "type": "str",
"choices": []},
"netscan-vulnerability": {"required": False, "type": "str",
"choices": []},
"severity": {"required": False, "type": "str",
"choices": ["emergency", "alert", "critical",
"error", "warning", "notification",
"information", "debug"]},
"sniffer-traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ssh": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"voip": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_log_syslogd(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| [
"magonzalez@fortinet.com"
] | magonzalez@fortinet.com |
28ed22db5c9fb33f2e867f2a54595fb76d76c037 | 4820b6d9665a487b7bd3f91a64354110dc61d31f | /palindrome permutations 2.py | c23d54d40333fba9768cfa1a99410ea2efd0d478 | [] | no_license | iamshivamgoswami/backtracking | 4f5c84342e7bf1865556ef87ee089c249fc73aef | 7e86f7f2852c14335d1f2b4a2a6bc8e577f33c3d | refs/heads/main | 2023-07-03T07:03:27.100901 | 2021-07-27T07:27:25 | 2021-07-27T07:27:25 | 389,594,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | import collections
class Solution:
def generatePalindromes(self, s: str) :
c=collections.Counter(s)
half="".join([k*(v//2) for k,v in c.items() ])
half=[c for c in half]
mid=[k for k,v in c.items() if v%2]
if len(mid)>1:
return []
mid="" if len(mid)==0 else mid[0]
ans=[]
def func(tmp=[],counter=collections.Counter(half)):
if len(tmp)==len(half):
curr="".join(tmp)
ans.append(curr+mid+curr[::-1])
return
else:
for num in counter:
if counter[num] > 0:
tmp.append(num)
counter[num] -= 1
func(tmp, counter)
counter[num] += 1
tmp.pop()
func()
return ans
a=Solution()
print(a.generatePalindromes("aaa"))
| [
"shivamgoswami12@gmail.com"
] | shivamgoswami12@gmail.com |
483fa6de553dfe149fd491fe21befb574ca1242e | 06aa3ec3262f6dd6866ea194ed6385f8e53509bf | /manuscript_codes/AML211DiffTrack/generateNetwork.py | 1fbc726986d5530780c38878ff1d0b29d5980caf | [] | no_license | KuehLabUW/UPSIDE | 95ce078382792d1beb0574c3b19c04e467befa58 | 3c90de9677f24e258800cb95bce6cb528f4ad4ac | refs/heads/master | 2023-07-13T15:58:07.963672 | 2021-08-30T21:14:48 | 2021-08-30T21:14:48 | 329,134,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,187 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 2 13:04:17 2020
@author: phnguyen
"""
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
from graphviz import Digraph
import pandas as pd
import os
def plotNetwork(Matrix):
# the matrix is set up as position i j is direction from element i to j.
# weight of the line is the magnitude of the transition
# size of the node is how much residence the node has
# all is done in graphviz
f = Digraph('cell_state_transition3')
for nidx in range(len(Matrix)):
circ_size = int(Matrix[nidx,nidx]*10)
text_label = False
f.attr('node',shape = 'circle',fixedsize = 'false',width = '{}'.format(circ_size),height = '{}'.format(circ_size))
#f.attr('node',shape = 'circle')
if text_label == True:
if nidx + 1 == 1:
f.node('A1',fontsize = '{}'.format(int(Matrix[nidx,nidx]/10)))
elif nidx + 1 == 2:
f.node('A2',fontsize = '{}'.format(int(Matrix[nidx,nidx]/10)))
elif nidx + 1 == 3:
f.node('A3',fontsize = '{}'.format(int(Matrix[nidx,nidx]/10)))
elif nidx + 1 == 4:
f.node('S1',fontsize = '{}'.format(int(Matrix[nidx,nidx]/10)))
elif nidx + 1 == 5:
f.node('S2',fontsize = '{}'.format(int(Matrix[nidx,nidx]/10)))
elif nidx + 1 == 6:
f.node('S3',fontsize = '{}'.format(int(Matrix[nidx,nidx]/10)))
elif nidx + 1 == 7:
f.node('S4',fontsize = '{}'.format(int(Matrix[nidx,nidx]/10)))
elif nidx + 1 == 8:
f.node('DB',fontsize = '{}'.format(int(Matrix[nidx,nidx]/10)))
else:
f.node('{}'.format(nidx),fontsize = '{}'.format(int(Matrix[nidx,nidx]*100)))
threshold = 0.09
for i in range(len(Matrix)):
for j in range(len(Matrix)):
if i != j and Matrix[i,j] > threshold:
thickness = int(Matrix[i,j]*40)
f.edge('{}'.format(i),'{}'.format(j),penwidth = '{}'.format(2+thickness))
f.view()
#%% now load the the transition matrix
csvs_dirname = '/media/phnguyen/Data2/Imaging/CellMorph/data/AML211DiffTrack/csvs/'
os.chdir(csvs_dirname)
df = pd.read_csv('CombinedUMAPDirFluoClusterTC.csv')
df = df.reset_index()
df = df.replace({'cluster':6}, {'cluster': 4}, regex=True)
#%%
subdf_crit = (df['pos']>0) & (df['pos']<6) & (df['t']>0*20) & (df['t']<90*20+1)
subdf = df[subdf_crit]
subdf = subdf.reset_index()
#get group information
subdf['cluster'] = subdf['cluster'] - 1
cluster = subdf['cluster'].values
#make an empty matrix
AM = np.zeros((len(np.unique(cluster)),len(np.unique(cluster))))
#fill out the adjacent matrix
for c in range(0,len(cluster)):
g_now = subdf.cluster[c]
pos_now = subdf.pos[c]
t_now =subdf.t[c]
pcell = subdf.pcell[c]
if pcell != 0 :
df_partner = subdf[(subdf['pos'] == pos_now) & (subdf['cell'] == pcell) & (subdf['t'] == t_now+1)]
if len(df_partner['cluster']) == 1:
g_partner = df_partner.cluster
AM[g_now,g_partner] = AM[g_now,g_partner] + 1
#print(c)
# Normalize by total transitions in each state
NormF = np.sum(AM,axis = 1)
AMN = AM/NormF[:,None]
# plot the the figure
plotNetwork(AMN)
#%%
#calculate distance traveled
DIST =[];
for c in range(0,len(df)):
x_now = df.Xcenter[c]
y_now = df.Ycenter[c]
t_now =df.t[c]
pos_now = df.pos[c]
pcell = df.pcell[c]
if pcell != 0 :
df_partner = df[(df['pos'] == pos_now) & (df['cell'] == pcell) & (df['t'] == t_now+1)]
if len(df_partner['cluster']) == 1:
x_partner = float(df_partner.Xcenter.values)
y_partner = float(df_partner.Ycenter.values)
dist = np.linalg.norm(np.array((x_now,y_now))-np.array((x_partner,y_partner)))
DIST.append(dist)
else:
DIST.append(0)
else:
DIST.append(0)
df['distance'] = DIST
#df.to_csv('CombinedUMAPDirFluoClusterTCdist.csv')
| [
"kuehlab@uw.edu"
] | kuehlab@uw.edu |
9f7dfcce54592311239776e190afd9b0a94e515a | dc891347a3cb625064fe6f88a0cb726d80a4c485 | /backend/manage.py | 1c155b642b5ffcbc4a4e45bfe5071a0646323736 | [] | no_license | crowdbotics-apps/web3-26079 | 5302040b077526e164888b5d2f6a388a40aad460 | 74990a2af2e2debca65caf283030f69da3741a32 | refs/heads/master | 2023-04-12T17:59:31.956124 | 2021-05-03T04:19:52 | 2021-05-03T04:19:52 | 363,814,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web3_26079.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
55a82d57386c5a5cf6b2d392e5d774c58f62e1d3 | 2008ff2a5e0a7790c4578d2c8ce402e0bb24bfda | /orm_demo/store/migrations/0002_product_description.py | d7f8266267093ff7c3ade5c4483586a0a25ca433 | [] | no_license | ganqzz/django_demo | 788a44a212cbf9e82b2ca84ba86147a90943756a | c880fc784e7bca4a78709ad76772c924b97dc393 | refs/heads/master | 2023-07-09T07:53:56.549362 | 2021-08-13T11:46:39 | 2021-08-13T11:46:39 | 356,131,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | # Generated by Django 3.1.3 on 2020-12-20 22:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='product',
name='description',
field=models.TextField(default='', blank=True),
),
]
| [
"ganqzz@users.noreply.github.com"
] | ganqzz@users.noreply.github.com |
baea0afbd350e02e867f40df5a66499c289ce39d | c13f48423a7b9817ab64addc1fab3c1391cbd860 | /rohdeschwarz/instruments/genericinstrument.py | 03d7004c2ef3fce6cff8b56fea8f8df59a498c44 | [] | no_license | yczhao1984/rohdeschwarz | 7bb891395967e398fefb7660492113e5ff8102fe | b0a72955608280580e13c900103752517b9e7bb5 | refs/heads/master | 2020-03-17T01:35:38.644954 | 2018-03-27T05:41:15 | 2018-03-27T05:41:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,644 | py | import sys
import os
#import struct
import numpy
from rohdeschwarz.general import ConnectionMethod
from rohdeschwarz.bus.tcp import TcpBus
from rohdeschwarz.bus.visa import VisaBus
class GenericInstrument(object):
_MAX_PRINT = 100
def __init__(self):
self.log = None
self.bus = None
self.buffer_size = 1024
self.connection_method = ''
self.address = ''
self.bytes_transferred = 0
def __del__(self):
self.close()
def open(self, connection_method = ConnectionMethod.tcpip, address = '127.0.0.1'):
self.bus = VisaBus()
self.bus.open(connection_method, address)
def open_tcp(self, ip_address='127.0.0.1', socket=5025):
self.connection_method = ConnectionMethod.tcpip
self.address = "{0}:{1}".format(ip_address, socket)
self.bus = TcpBus()
self.bus.open(ip_address, socket)
def close(self):
if self.bus:
self.bus.close()
self.bus = None
def connected(self):
if not self.bus:
return False
try:
return len(self.id_string()) > 0
except:
return False
# Else
return True
def _timeout_ms(self):
return self.bus.timeout_ms
def _set_timeout_ms(self, time):
self.bus.timeout_ms = time
timeout_ms = property(_timeout_ms, _set_timeout_ms)
def open_log(self, filename):
self.log = open(filename, 'w')
if self.log.closed:
message = "Could not open log at '{0}'\n"
message = message.format(filename)
sys.stderr.write(message)
self.log = None
def close_log(self):
if self.log:
self.log.flush()
self.log.close()
self.log = None
def id_string(self):
return self.query('*IDN?').strip()
def options_string(self):
return self.query("*OPT?").strip()
def is_error(self):
return bool(self._errors())
def next_error(self):
code = 0;
message = '';
result = self.query(':SYST:ERR?').strip()
comma_index = result.find(',')
code = int(result[:comma_index])
message = result[comma_index+2:-1]
if (code != 0):
return(code, message)
else:
return None
def _errors(self):
errors = []
error = self.next_error()
while error:
errors.append(error)
error = self.next_error()
return errors
errors = property(_errors)
def clear_status(self):
self.write("*CLS")
def preset(self):
self.write("*RST")
def local(self):
self.write("@LOC")
def remote(self):
self.write("@REM")
def is_rohde_schwarz(self):
return ("ROHDE" in self.id_string().upper())
def wait(self):
self.write('*WAI')
def pause(self, timeout_ms=1000):
# Take greater of timeout amounts
timeout_ms = self.timeout_ms if self.timeout_ms > timeout_ms else timeout_ms
old_timeout = self.timeout_ms
self.timeout_ms = timeout_ms
result = self.query('*OPC?').strip() == "1"
self.timeout_ms = old_timeout
return result
def initialize_polling(self):
self.write("*OPC")
def is_operation_complete(self):
opcBit = 1
esr = int(self.query('*ESR?').strip())
return opcBit & esr > 0
def print_info(self):
_log = self.log
self.log = None
_log.write('INSTRUMENT INFO\n')
_log.write('Connection: {0}\n'.format(self.connection_method))
_log.write('Address: {0}\n'.format(self.address))
if self.is_rohde_schwarz():
_log.write('Make: Rohde & Schwarz\n')
else:
_log.write('Make: Unknown\n')
_log.write('Id string: {0}\n\n'.format(self.id_string()))
self.log = _log
def read(self):
buffer = self.bus.read()
self.bytes_transferred = len(buffer)
self._print_read(buffer)
return buffer
def write(self, buffer):
self.bus.write(buffer)
self.bytes_transferred = len(buffer)
self._print_write(buffer)
def query(self, buffer):
self.write(buffer)
return self.read()
def read_raw_no_end(self, buffer_size=102400):
buffer = self.bus.read_raw_no_end(buffer_size)
self.bytes_transferred = len(buffer)
self._print_read(buffer)
return buffer
def write_raw_no_end(self, buffer):
self.bus.write_raw_no_end(buffer)
self.bytes_transferred = len(buffer)
self._print_write(buffer)
def query_raw_no_end(self, buffer, buffer_size=102400):
self.write_raw_no_end(buffer)
return self.read_raw_no_end(buffer_size)
def read_block_data(self):
buffer = self.read_raw_no_end()
size, buffer = self.parse_block_data_header(buffer)
while len(buffer) < size+1:
buffer += self.read_raw_no_end()
buffer = buffer[:size]
return (size, buffer)
def write_block_data(self, buffer):
header = self.create_block_data_header(len(buffer))
buffer = header + buffer
self.write_raw_no_end(buffer)
def read_block_data_to_file(self, filename, buffer_size=102400):
if buffer_size < 11:
buffer_size = 11
data = self.read_raw_no_end(buffer_size)
size, data = self.parse_block_data_header(data)
if len(data) > size:
data = data[:size]
with open(filename, 'wb') as file:
file.write(data)
size -= len(data)
while size > 0:
data = self.read_raw_no_end(buffer_size)
if len(data) > size:
data = data[:size]
file.write(data)
size -= len(data)
def write_block_data_from_file(self, filename, buffer_size=1024*1024):
header = self.create_block_data_header(os.path.getsize(filename))
self.write_raw_no_end(header)
with open(filename, 'rb') as file:
data = file.read(buffer_size)
while data:
self.write_raw_no_end(data)
data = file.read(buffer_size)
self.write_raw_no_end('\n') # Firmware won't move until to confirm end somehow
def read_64_bit_vector_block_data(self):
size, buffer = self.read_block_data()
return numpy.frombuffer(buffer, 'float64')
def write_64_bit_vector_block_data(self, data):
if not isinstance(data, numpy.ndarray):
raise ValueError(0, 'Expected numpy.ndarray')
if data.dtype != 'float64':
raise ValueError(0, "Expected array values of type 'float64'")
data = data.tobytes()
header = self.create_block_data_header(len(data))
data = header + data
self.write_raw_no_end(data)
def read_64_bit_complex_vector_block_data(self):
buffer = self.read_64_bit_vector_block_data()
return numpy.frombuffer(buffer, 'complex128')
def write_64_bit_complex_vector_block_data(self, data):
if not isinstance(data, numpy.ndarray):
raise ValueError(0, 'Expected numpy.ndarray')
if data.dtype != 'complex128':
raise ValueError(0, "Expected array values of type 'float64'")
data = data.tobytes()
header = self.create_block_data_header(len(data))
data = header + data
self.write_raw_no_end(data)
def parse_block_data_header(self, buffer):
if buffer[0:1] != b'#':
raise ValueError(0, 'Not bytes in IEEE 488.2 block data format')
header_size = 2 + int(buffer[1:2])
header = buffer[:header_size]
buffer = buffer[header_size:]
size = int(header[2:])
return (size, buffer)
def create_block_data_header(self, buffer_length):
size_string = str(buffer_length)
result = "#" + str(len(size_string)) + size_string
return result.encode()
def _print_read(self, buffer):
if not self.log or self.log.closed:
return
buffer = buffer.strip()
if isinstance(buffer, str):
if len(buffer) > self._MAX_PRINT:
buffer = buffer[:self._MAX_PRINT]
buffer += "..."
self.log.write('Read: "{0}"\n'.format(buffer))
else:
if len(buffer) > self._MAX_PRINT:
buffer = buffer[:self._MAX_PRINT]
buffer += b"..."
self.log.write('Read: {0}\n'.format(buffer))
self.log.write('Bytes: {0}\n'.format(self.bytes_transferred))
status = self.bus.status_string()
if status:
self.log.write('Status: {0}\n'.format(status))
self.log.write('\n')
def _print_write(self, buffer):
if not self.log or self.log.closed:
return
buffer = buffer.strip()
if isinstance(buffer, str):
if len(buffer) > self._MAX_PRINT:
buffer = buffer[:self._MAX_PRINT]
buffer += "..."
self.log.write('Write: "{0}"\n'.format(buffer))
else:
if len(buffer) > self._MAX_PRINT:
buffer = buffer[:self._MAX_PRINT]
buffer += b"..."
self.log.write('Write: {0}\n'.format(buffer))
self.log.write('Bytes: {0}\n'.format(self.bytes_transferred))
status = self.bus.status_string()
if status:
self.log.write('Status: {0}\n'.format(status))
self.log.write('\n')
| [
"nick.lalic@gmail.com"
] | nick.lalic@gmail.com |
83342e6852c6e51579957f5c07ce95932a2d5367 | 87b8fc1ba1d5a5a9014cf2e69dcabd7df19e1fc2 | /src/pustakalaya_apps/document/migrations/0025_auto_20180510_1559.py | a4ca646f78a9ca9b2cfbb2a94e9d888169e4310b | [] | no_license | Becram/elibrary-olen | 5a885871b05dc9076a04575748a5bbce6b40c69c | f73772569d1a67fda4e5d44af9d1ed2ddd41166e | refs/heads/master | 2020-04-06T11:13:29.676136 | 2018-10-25T06:36:42 | 2018-10-25T06:36:42 | 157,408,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 759 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-05-10 10:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('document', '0024_auto_20180509_1547'),
]
operations = [
migrations.AlterField(
model_name='document',
name='publication_year_on_text',
field=models.CharField(blank=True, max_length=35, null=True, verbose_name='Publication year'),
),
migrations.AlterField(
model_name='document',
name='year_of_available_on_text',
field=models.CharField(blank=True, max_length=35, null=True, verbose_name='Year of available'),
),
]
| [
"nishantkarki305@gmail.com"
] | nishantkarki305@gmail.com |
517cf48de68a5d5a2b690755e4ee2036cd8f8b42 | 65b9a63e8c132f32aeb56961968f5e363bd9a087 | /20191107_Keras乘用车销量预测神经网络训练对比实验/code/run.py | f46def6bde3e7c0d5fea16a16fd4dd136888bbbf | [] | no_license | 346644054/examples2019 | e70f13cfb56c3478fc6e335c730e0e70e70a6226 | 5f9777e7a887e635971156354f56ce065fa3f41e | refs/heads/master | 2022-04-09T03:52:52.973414 | 2020-02-28T03:05:02 | 2020-02-28T03:05:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,656 | py | # -*- coding: utf-8 -*-
"""
network
fun:
env:
Linux ubuntu 4.4.0-31-generic x86_64 GNU;python 2.7;tensorflow1.10.1;Keras2.2.4
pip2,matplotlib2.2.3
"""
from __future__ import print_function
import os
import numpy as np
import pandas as pd
import keras
import time
import datetime
import proprocess
import network
from keras import models, optimizers
from keras.layers import Dense, Dropout
from keras.models import load_model,model_from_json
from keras import backend as K
from sklearn import preprocessing
import datetime
from sklearn.utils import shuffle
from keras.callbacks import EarlyStopping
from keras.layers.normalization import BatchNormalization
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
if __name__ == "__main__":
time1 = time.time()
data_path = "dataset/train_sales_data.csv"
#################### ######################
mode = "train" # train test docker
print("#################work mode", mode, "#######################")
if mode == "train":
# 数据预处理
#(trainX, trainY) = proprocess.generate_train_seq(train_images_folder_path, train_track_folder_path)
load_data = proprocess.DataSets
trainX, trainY, validX, validY = load_data.load_passenger_car(data_path)
model = network.build_network()
history = network.train_network(trainX, trainY, validX, validY, model, epochs=1000)
network.plt_result(history, "output", "history.png")
elif mode == "test":
network.helloworld()
else:
print("mode error!")
time2 = time.time()
print('time use:' + str(time2 - time1) + 's')
| [
"elesun2018@gmail.com"
] | elesun2018@gmail.com |
ec037038c62bc775b8da6c8634bf9e729c1f0797 | b2319c5e14c94edfb5a39e4c490c1ae6183651ed | /deepgoweb/apps/accounts/models.py | bc9bce7da4d76623d803e2b74b30143d7724999e | [] | no_license | coolmaksat/deepgoweb | 6d67f45059d7bdb4548d50c182a038c6f9c70a31 | fd4904b6b18dd2af06e000679f406b7353a3534f | refs/heads/master | 2021-06-12T14:42:14.513686 | 2021-04-17T10:23:39 | 2021-04-17T10:23:39 | 161,017,035 | 0 | 0 | null | 2018-12-09T07:49:26 | 2018-12-09T07:49:26 | null | UTF-8 | Python | false | false | 1,119 | py | from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save, pre_save
from django.utils.translation import ugettext as _
from django.core.exceptions import ValidationError
GENDER_CHOICES = (
(0, 'Male'),
(1, 'Female'))
def check_unique_email(sender, instance, **kwargs):
if instance.email and sender.objects.filter(
email=instance.email).exclude(username=instance.username).count():
raise ValidationError(_("The email %(email)s already exists!") % {
'email': instance.email
})
pre_save.connect(check_unique_email, sender=User)
class UserProfile(models.Model):
user = models.OneToOneField(
User, primary_key=True, on_delete=models.CASCADE)
birth_date = models.DateField(null=True)
gender = models.IntegerField(choices=GENDER_CHOICES, default=0)
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(
user=instance)
post_save.connect(create_user_profile, sender=User)
| [
"coolmaksat@gmail.com"
] | coolmaksat@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.