blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5f3cfb719b58e5c7c05e33d5bb548c5f0a306fa7 | 5168da0fb501135a3c86e4e95679f54a825d69d0 | /openquake/hazardlib/tests/gsim/allen_2012_test.py | 72173ea3d3ef0d060765778b3e310674efae37b0 | [
"AGPL-3.0-only",
"BSD-3-Clause"
] | permissive | GFZ-Centre-for-Early-Warning/shakyground | 266b29c05ea2cfff6d9d61f21b5114282c6fa117 | 0da9ba5a575360081715e8b90c71d4b16c6687c8 | refs/heads/master | 2023-06-01T21:41:11.127323 | 2018-10-09T10:31:48 | 2018-10-09T10:31:48 | 144,732,068 | 1 | 3 | BSD-3-Clause | 2019-11-18T07:58:49 | 2018-08-14T14:32:50 | Python | UTF-8 | Python | false | false | 1,264 | py | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2013-2018 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from openquake.hazardlib.gsim.allen_2012 import Allen2012
from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase
import numpy
# Test data generated from EQRM implementation.
class Allen2012TestCase(BaseGSIMTestCase):
GSIM_CLASS = Allen2012
def test_mean(self):
self.check('A12/ALLEN2012_MEAN.csv',
max_discrep_percentage=0.4)
def test_std_total(self):
self.check('A12/ALLEN2012_STD_TOTAL.csv',
max_discrep_percentage=0.1)
| [
"mhaas@gfz-potsdam.de"
] | mhaas@gfz-potsdam.de |
2d9bba4e31f58ea55ec61a6c6c7285e3cf7e8ec9 | 868cd4895a8da17a7e3e2c8da0ec9e139f8d0c30 | /model/sample/fashion/keras_fashion_save.py | db5946f50bb4623f6d9dbc90553ff5c81a33419f | [] | no_license | inJAJA/Study | 35d4e410df7b476a4c298664bb99ce9b09bf6296 | c2fd9a1e1f3a31cb3737cbb4891d848cc802f1d4 | refs/heads/master | 2022-12-21T11:41:15.396610 | 2020-09-20T23:51:45 | 2020-09-20T23:51:45 | 263,212,524 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,958 | py | # 과제 2
# Sequential형으로 완성하시오.
# 하단에 주석으로 acc와 loss결과 명시하시오
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Dropout
from keras.utils.np_utils import to_categorical
#1. data
from keras.datasets import fashion_mnist
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
print(x_train.shape) # (60000, 28, 28)
print(x_test.shape) # (10000, 28, 28)
# x : reshape
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
print(x_train.shape) # (60000, 28, 28, 1)
# y : one hot encoding
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print(y_train.shape) # (60000, 10)
print(y_test.shape) # (10000, 10)
#2. model
model = Sequential()
model.add(Conv2D(100, (3, 3), input_shape = (28, 28, 1), padding = 'same', activation = 'relu'))
model.add(MaxPooling2D(pool_size = 3))
model.add(Dropout(0.2))
model.add(Conv2D(80, (3, 3), padding = 'same', activation = 'relu'))
model.add(MaxPooling2D(pool_size = 3))
model.add(Dropout(0.2))
model.add(Conv2D(60, (3, 3), padding = 'same', activation = 'relu'))
model.add(MaxPooling2D(pool_size = 3))
model.add(Dropout(0.2))
model.add(Conv2D(40, (3, 3), padding = 'same', activation = 'relu'))
model.add(Dropout(0.2))
model.add(Conv2D(20, (3, 3), padding = 'same', activation = 'relu'))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(10, activation = 'softmax'))
""" model_save """
model.save('./model/sample/fashion/fashion_model_save.h5')
# checkpoint
from keras.callbacks import ModelCheckpoint
modelpath = ('./model/sample/fashion/fashion_checkpoint_best_{epoch:02d}-{val_loss:.4f}.hdf5')
checkpoint = ModelCheckpoint(filepath = modelpath, monitor = 'val_loss',
save_best_only = True, save_weights_only = False)
#3. fit
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['acc'])
model.fit(x_train, y_train, epochs = 50, batch_size = 64, callbacks = [checkpoint],
validation_split = 0.2, shuffle = True, verbose =2 )
""" save_weights """
model.save_weights('./model/sample/fashion/fashion_save_weights.h5')
#4. evaluate
loss, acc = model.evaluate(x_test, y_test, batch_size =64)
print('loss: ', loss)
print('acc: ', acc)
# acc: 0.9114999771118164
#3. fit
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['acc'])
model.fit(x_train, y_train, epochs = 50, batch_size = 64,
validation_split = 0.2, shuffle = True, verbose =2 )
#4. evaluate
loss, acc = model.evaluate(x_test, y_test, batch_size =64)
print('loss: ', loss)
print('acc: ', acc)
# acc: 0.9114999771118164
| [
"zaiin4050@gmail.com"
] | zaiin4050@gmail.com |
b8e678aa7a01cca1f318838b5736d392d887983e | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/8/u4d.py | fea3d8e9b6fc7c7ead651484efac64ab7081ee84 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'u4D':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
ce28bd933d914f1600d5282107cdfeb974612c7c | b823e499ce70d3470f91605c3621248b08069fd8 | /test/ci_tests/utests.py | 67affd5985a01680a567c2614236dad06d5f6cf7 | [
"Apache-2.0"
] | permissive | bbonenfant/wrf-python | 6c6e90b88f56fdbb9a82a52fb0cdab72b6058d22 | 5fe8d3f4e409e94f42bb94716450e56c8e7212d8 | refs/heads/develop | 2020-04-17T19:14:06.127323 | 2019-01-21T18:33:36 | 2019-01-21T18:33:36 | 166,858,996 | 0 | 0 | NOASSERTION | 2019-01-21T18:33:21 | 2019-01-21T18:14:03 | Python | UTF-8 | Python | false | false | 10,366 | py | import unittest as ut
import numpy.testing as nt
import numpy as np
import numpy.ma as ma
import os, sys
import subprocess
from wrf import (getvar, interplevel, interpline, vertcross, vinterp,
disable_xarray, xarray_enabled, to_np,
xy_to_ll, ll_to_xy, xy_to_ll_proj, ll_to_xy_proj,
extract_global_attrs, viewitems, CoordPair,
omp_get_num_procs, omp_set_num_threads)
from wrf.util import is_multi_file
TEST_FILE = "ci_test_file.nc"
REF_FILE = "ci_result_file.nc"
# Python 3
if sys.version_info > (3,):
xrange = range
# Using helpful information at:
# http://eli.thegreenplace.net/2014/04/02/dynamically-generating-python-test-cases
def make_test(varname, wrf_in, referent, multi=False, repeat=3, pynio=False):
def test(self):
from netCDF4 import Dataset as NetCDF
timeidx = 0
in_wrfnc = NetCDF(wrf_in)
refnc = NetCDF(referent)
# These have a left index that defines the product type
multiproduct = varname in ("uvmet", "uvmet10", "cape_2d", "cape_3d",
"cfrac")
ref_vals = refnc.variables[varname][:]
if (varname == "tc"):
my_vals = getvar(in_wrfnc, "temp", timeidx=timeidx, units="c")
tol = 1/100.
atol = .1 # Note: NCL uses 273.16 as conversion for some reason
nt.assert_allclose(to_np(my_vals), ref_vals, tol, atol)
elif (varname == "pw"):
my_vals = getvar(in_wrfnc, "pw", timeidx=timeidx)
tol = .5/100.0
atol = 0 # NCL uses different constants and doesn't use same
# handrolled virtual temp in method
nt.assert_allclose(to_np(my_vals), ref_vals, tol, atol)
elif (varname == "cape_2d"):
cape_2d = getvar(in_wrfnc, varname, timeidx=timeidx)
tol = 0/100.
atol = 200.0
# Let's only compare CAPE values until the F90 changes are
# merged back in to NCL. The modifications to the R and CP
# changes TK enough that non-lifting parcels could lift, thus
# causing wildly different values in LCL
nt.assert_allclose(to_np(cape_2d[0,:]), ref_vals[0,:], tol, atol)
elif (varname == "cape_3d"):
cape_3d = getvar(in_wrfnc, varname, timeidx=timeidx)
# Changing the R and CP constants, while keeping TK within
# 2%, can lead to some big changes in CAPE. Tolerances
# have been set wide when comparing the with the original
# NCL. Change back when the F90 code is merged back with
# NCL
tol = 0/100.
atol = 200.0
#print np.amax(np.abs(to_np(cape_3d[0,:]) - ref_vals[0,:]))
nt.assert_allclose(to_np(cape_3d), ref_vals, tol, atol)
else:
my_vals = getvar(in_wrfnc, varname, timeidx=timeidx)
tol = 2/100.
atol = 0.1
#print (np.amax(np.abs(to_np(my_vals) - ref_vals)))
nt.assert_allclose(to_np(my_vals), ref_vals, tol, atol)
return test
def _get_refvals(referent, varname, repeat, multi):
from netCDF4 import Dataset as NetCDF
refnc = NetCDF(referent)
ref_vals = refnc.variables[varname][:]
return ref_vals
def make_interp_test(varname, wrf_in, referent, multi=False,
repeat=3, pynio=False):
def test(self):
from netCDF4 import Dataset as NetCDF
timeidx = 0
in_wrfnc = NetCDF(wrf_in)
if (varname == "interplevel"):
ref_ht_850 = _get_refvals(referent, "interplevel", repeat, multi)
hts = getvar(in_wrfnc, "z", timeidx=timeidx)
p = getvar(in_wrfnc, "pressure", timeidx=timeidx)
# Check that it works with numpy arrays
hts_850 = interplevel(to_np(hts), p, 850)
#print (hts_850)
hts_850 = interplevel(hts, p, 850)
nt.assert_allclose(to_np(hts_850), ref_ht_850)
elif (varname == "vertcross"):
ref_ht_cross = _get_refvals(referent, "vertcross", repeat, multi)
hts = getvar(in_wrfnc, "z", timeidx=timeidx)
p = getvar(in_wrfnc, "pressure", timeidx=timeidx)
pivot_point = CoordPair(hts.shape[-1] // 2, hts.shape[-2] // 2)
# Check that it works with numpy arrays
ht_cross = vertcross(to_np(hts), to_np(p),
pivot_point=pivot_point, angle=90.)
#print (ht_cross)
ht_cross = vertcross(hts, p, pivot_point=pivot_point, angle=90.)
nt.assert_allclose(to_np(ht_cross), ref_ht_cross, rtol=.01)
elif (varname == "interpline"):
ref_t2_line = _get_refvals(referent, "interpline", repeat, multi)
t2 = getvar(in_wrfnc, "T2", timeidx=timeidx)
pivot_point = CoordPair(t2.shape[-1] // 2, t2.shape[-2] // 2)
# Check that it works with numpy arrays
t2_line1 = interpline(to_np(t2), pivot_point=pivot_point,
angle=90.0)
#print (t2_line1)
t2_line1 = interpline(t2, pivot_point=pivot_point, angle=90.0)
nt.assert_allclose(to_np(t2_line1), ref_t2_line)
elif (varname == "vinterp"):
# Tk to theta
fld_tk_theta = _get_refvals(referent, "vinterp", repeat, multi)
fld_tk_theta = np.squeeze(fld_tk_theta)
tk = getvar(in_wrfnc, "temp", timeidx=timeidx, units="k")
interp_levels = [200,300,500,1000]
# Check that it works with numpy arrays
field = vinterp(in_wrfnc,
field=to_np(tk),
vert_coord="theta",
interp_levels=interp_levels,
extrapolate=True,
field_type="tk",
timeidx=timeidx,
log_p=True)
#print (field)
field = vinterp(in_wrfnc,
field=tk,
vert_coord="theta",
interp_levels=interp_levels,
extrapolate=True,
field_type="tk",
timeidx=timeidx,
log_p=True)
tol = 5/100.
atol = 0.0001
field = np.squeeze(field)
nt.assert_allclose(to_np(field), fld_tk_theta, tol, atol)
return test
def make_latlon_test(testid, wrf_in, referent, single, multi=False, repeat=3,
pynio=False):
def test(self):
from netCDF4 import Dataset as NetCDF
timeidx = 0
in_wrfnc = NetCDF(wrf_in)
refnc = NetCDF(referent)
if testid == "xy":
# Since this domain is not moving, the reference values are the
# same whether there are multiple or single files
ref_vals = refnc.variables["xy"][:]
# Lats/Lons taken from NCL script, just hard-coding for now
lats = [22.0, 25.0, 27.0]
lons = [-90.0, -87.5, -83.75]
xy = ll_to_xy(in_wrfnc, lats[0], lons[0])
nt.assert_allclose(to_np(xy), ref_vals)
else:
# Since this domain is not moving, the reference values are the
# same whether there are multiple or single files
ref_vals = refnc.variables["ll"][:]
# i_s, j_s taken from NCL script, just hard-coding for now
# NCL uses 1-based indexing for this, so need to subtract 1
x_s = np.asarray([10, 50, 90], int)
y_s = np.asarray([10, 50, 90], int)
ll = xy_to_ll(in_wrfnc, x_s[0], y_s[0])
nt.assert_allclose(to_np(ll), ref_vals)
return test
class WRFVarsTest(ut.TestCase):
longMessage = True
class WRFInterpTest(ut.TestCase):
longMessage = True
class WRFLatLonTest(ut.TestCase):
longMessage = True
if __name__ == "__main__":
ignore_vars = [] # Not testable yet
wrf_vars = ["avo", "eth", "cape_2d", "cape_3d", "ctt", "dbz", "mdbz",
"geopt", "helicity", "lat", "lon", "omg", "p", "pressure",
"pvo", "pw", "rh2", "rh", "slp", "ter", "td2", "td", "tc",
"theta", "tk", "tv", "twb", "updraft_helicity", "ua", "va",
"wa", "uvmet10", "uvmet", "z", "cfrac", "zstag"]
interp_methods = ["interplevel", "vertcross", "interpline", "vinterp"]
latlon_tests = ["xy", "ll"]
import netCDF4
for var in wrf_vars:
if var in ignore_vars:
continue
test_func1 = make_test(var, TEST_FILE, REF_FILE)
setattr(WRFVarsTest, 'test_{0}'.format(var), test_func1)
for method in interp_methods:
test_interp_func1 = make_interp_test(method, TEST_FILE,
REF_FILE)
setattr(WRFInterpTest, 'test_{0}'.format(method),
test_interp_func1)
for testid in latlon_tests:
for single in (True,):
for multi in (False,):
test_ll_func = make_latlon_test(testid, TEST_FILE,
REF_FILE,
single=single, multi=multi,
repeat=3, pynio=False)
multistr = "" if not multi else "_multi"
singlestr = "_nosingle" if not single else "_single"
test_name = "test_{}{}{}".format(testid, singlestr,
multistr)
setattr(WRFLatLonTest, test_name, test_ll_func)
ut.main()
| [
"ladwig@ucar.edu"
] | ladwig@ucar.edu |
610c90abeea73d06318c9768d6a3ccd4ee7ca167 | e0980f704a573894350e285f66f4cf390837238e | /.history/streams/blocks_20201029154124.py | 144d082f653b6fb9010adafaf907c6ceb91d68c2 | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,711 | py | from django import forms
from wagtail.core import blocks
from wagtail.images.blocks import ImageChooserBlock
from wagtail.contrib.table_block.blocks import TableBlock
class TitleBlock(blocks.StructBlock):
text = blocks.CharBlock(
required = True,
elp_text='Tekst do wyświetlenia',
)
class Meta:
template = 'streams/title_block.html'
icon = 'edycja'
label = 'Tytuł'
help_text = 'Wyśrodkowany tekst do wyświetlenia na stronie.'
class LinkValue(blocks.StructValue):
"""Dodatkowao logika dla lików"""
def url(self) -> str:
internal_page = self.get('internal_page')
external_link = self.get('external_link')
if internal_page:
return internal_page.url
elif external_link:
return external_link
return ''
from django.core.exception
class Link(blocks.StructBlock):
link_text = blocks.CharBlock(
max_length=50,
default='Więcej szczegółów'
)
internal_page = blocks.PageChooserBlock(
required=False
)
external_link = blocks.URLBlock(
required=False
)
class Meta:
value_class = LinkValue
class Card(blocks.StructBlock):
title = blocks.CharBlock(
max_length=100,
help_text = 'Pogrubiony tytuł tej karty. Maksymalnie 100 znaków.'
)
text = blocks.TextBlock(
max_length=255,
help_text='Opcjonalny tekst tej karty. Maksymalnie 255 znaków.'
)
image = ImageChooserBlock(
help_text = 'Obraz zostanie automatycznie przycięty o 570 na 370 pikseli'
)
link = Link(help_text = 'Wwybierz link')
class CardsBlock(blocks.StructBlock):
cards = blocks.ListBlock(
Card()
)
class Meta:
template = 'streams/card_block.html'
icon = 'image'
label = 'Karty standardowe'
class RadioSelectBlock(blocks.ChoiceBlock):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.field.widget = forms.RadioSelect(
choices=self.field.widget.choices
)
class ImageAndTextBlock(blocks.StructBlock):
image = ImageChooserBlock(help_text='Obraz automatycznie przycięty do rozmiaru 786 na 552 px.')
image_alignment = RadioSelectBlock(
choices = (
('left','Opraz po lewej stronie'),
('right', 'Obraz po prawej stronie'),
),
default = 'left',
help_text = 'Obraz po lewej stronie, tekst po prawej lub obraz po prawej stronie tekst po lewej.'
)
title = blocks.CharBlock(
max_length=60,
help_text='Maksymalna długość 60 znaków.'
)
text = blocks.CharBlock(
max_length = 140,
required = False,
)
link = Link()
class Meta:
template = 'streams/image_and_text_block.html'
icon = 'image'
label = 'Obraz & Tekst'
class CallToActionBlock(blocks.StructBlock):
title =blocks.CharBlock(
max_length = 200,
help_text = 'Maksymalnie 200 znaków.'
)
link = Link()
class Meta:
template = 'streams/call_to_action_block.html'
icon = 'plus'
label = 'Wezwanie do działania'
class PricingTableBlock(TableBlock):
"""Blok tabeli cen."""
class Meta:
template = 'streams/pricing_table_block.html'
label = 'Tabela cen'
icon = 'table'
help_text = 'Twoje tabele z cenami powinny zawierać zawsze 4 kolumny.'
'''
class RichTextWithTitleBlock(blocks.StructBlock):
title = blocks.CharBlock(max_length=50)
context = blocks.RichTextBlock(features=[])
class Meta:
template = 'streams/simple_richtext_block.html'
''' | [
"rucinska.patrycja@gmail.com"
] | rucinska.patrycja@gmail.com |
485bef81f64f6243691a9ec5a7e5c712d0c9c91b | bcaaf8535f639be14558216fb45cab6b4635895b | /list/020 Majority Element Efficient.py | 135fa26b6bac2c4e9bf494cf366e22bffaa22a8e | [] | no_license | RavinderSinghPB/Data-Structure-And-Algorithm-Python | 7f3b61216318e58eb58881d5181561d8e06b092b | 12a126803f4c6bee0e6dbd380604f703cf678de4 | refs/heads/main | 2023-01-25T03:47:16.264926 | 2020-12-02T04:45:02 | 2020-12-02T04:45:02 | 316,112,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | def findMajority(arr, n):
res = 0
count = 1
for i in range(1, n):
if arr[res] == arr[i]:
count += 1
else:
count -= 1
if count == 0:
res = i
count = 1
count = 0
for i in range(0, n):
if arr[res] == arr[i]:
count += 1
if count <= n // 2:
res = -1
return res
if __name__ == "__main__":
arr = [8, 7, 6, 8, 6, 6, 6, 6]
n = len(arr)
idx = findMajority(arr, n)
if idx != -1:
print(arr[idx])
| [
"ravindersingh.gfg@gmail.com"
] | ravindersingh.gfg@gmail.com |
187a0a5b1b63e6fb5ecdf4a16d709fada04e53b2 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-dataartsstudio/huaweicloudsdkdataartsstudio/v1/model/create_workspace_params.py | 69a6d3bae31e48ef092dcf6aae5927f4f94f6316 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 6,989 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateWorkspaceParams:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'bad_record_location_name': 'str',
'description': 'str',
'eps_id': 'str',
'job_log_location_name': 'str',
'name': 'str'
}
attribute_map = {
'bad_record_location_name': 'bad_record_location_name',
'description': 'description',
'eps_id': 'eps_id',
'job_log_location_name': 'job_log_location_name',
'name': 'name'
}
def __init__(self, bad_record_location_name=None, description=None, eps_id=None, job_log_location_name=None, name=None):
"""CreateWorkspaceParams
The model defined in huaweicloud sdk
:param bad_record_location_name: DLI脏数据OBS路径
:type bad_record_location_name: str
:param description: 工作空间描述
:type description: str
:param eps_id: 企业项目id,如果当前为公有云,且用户开启企业项目,则必选
:type eps_id: str
:param job_log_location_name: 作业日志OBS路径
:type job_log_location_name: str
:param name: 工作空间名称
:type name: str
"""
self._bad_record_location_name = None
self._description = None
self._eps_id = None
self._job_log_location_name = None
self._name = None
self.discriminator = None
if bad_record_location_name is not None:
self.bad_record_location_name = bad_record_location_name
if description is not None:
self.description = description
self.eps_id = eps_id
if job_log_location_name is not None:
self.job_log_location_name = job_log_location_name
self.name = name
@property
def bad_record_location_name(self):
"""Gets the bad_record_location_name of this CreateWorkspaceParams.
DLI脏数据OBS路径
:return: The bad_record_location_name of this CreateWorkspaceParams.
:rtype: str
"""
return self._bad_record_location_name
@bad_record_location_name.setter
def bad_record_location_name(self, bad_record_location_name):
"""Sets the bad_record_location_name of this CreateWorkspaceParams.
DLI脏数据OBS路径
:param bad_record_location_name: The bad_record_location_name of this CreateWorkspaceParams.
:type bad_record_location_name: str
"""
self._bad_record_location_name = bad_record_location_name
@property
def description(self):
"""Gets the description of this CreateWorkspaceParams.
工作空间描述
:return: The description of this CreateWorkspaceParams.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this CreateWorkspaceParams.
工作空间描述
:param description: The description of this CreateWorkspaceParams.
:type description: str
"""
self._description = description
@property
def eps_id(self):
"""Gets the eps_id of this CreateWorkspaceParams.
企业项目id,如果当前为公有云,且用户开启企业项目,则必选
:return: The eps_id of this CreateWorkspaceParams.
:rtype: str
"""
return self._eps_id
@eps_id.setter
def eps_id(self, eps_id):
"""Sets the eps_id of this CreateWorkspaceParams.
企业项目id,如果当前为公有云,且用户开启企业项目,则必选
:param eps_id: The eps_id of this CreateWorkspaceParams.
:type eps_id: str
"""
self._eps_id = eps_id
@property
def job_log_location_name(self):
"""Gets the job_log_location_name of this CreateWorkspaceParams.
作业日志OBS路径
:return: The job_log_location_name of this CreateWorkspaceParams.
:rtype: str
"""
return self._job_log_location_name
@job_log_location_name.setter
def job_log_location_name(self, job_log_location_name):
"""Sets the job_log_location_name of this CreateWorkspaceParams.
作业日志OBS路径
:param job_log_location_name: The job_log_location_name of this CreateWorkspaceParams.
:type job_log_location_name: str
"""
self._job_log_location_name = job_log_location_name
@property
def name(self):
"""Gets the name of this CreateWorkspaceParams.
工作空间名称
:return: The name of this CreateWorkspaceParams.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CreateWorkspaceParams.
工作空间名称
:param name: The name of this CreateWorkspaceParams.
:type name: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateWorkspaceParams):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
5a55cace1700df5eeea3d479465e72d438fb341c | 2cd86616a2d29b2a19ddb906c5216ed7a48d2208 | /biobb_vs/gromacs_wrapper/genion.py | 83452c81f210fea238442bc7e2bbbfc922723b74 | [
"Apache-2.0"
] | permissive | bioexcel/biobb_vs_alpha | e0c8ab1bad864bd3a87cafa1ee7f6eddc50ee3ae | 5a7403bad0935ee4380c377d930bd24967770501 | refs/heads/master | 2021-09-26T00:24:15.473393 | 2018-10-26T12:57:16 | 2018-10-26T12:57:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,457 | py | #!/usr/bin/env python
"""Python wrapper for the GROMACS genion module
"""
import sys
import json
import configuration.settings as settings
from command_wrapper import cmd_wrapper
from tools import file_utils as fu
class Genion(object):
"""Wrapper for the 5.1.2 version of the genion module
Args:
input_tpr_path (str): Path to the input portable run input TPR file.
output_gro_path (str): Path to the input structure GRO file.
input_top_zip_path (str): Path the input TOP topology in zip format.
output_top_zip_path (str): Path the output topology TOP and ITP files zipball.
properties (dic):
output_top_path (str): Path the output topology TOP file.
replaced_group (str): Group of molecules that will be replaced by the solvent.
neutral (bool): Neutralize the charge of the system.
concentration (float): Concentration of the ions in (mol/liter).
seed (int): Seed for random number generator.
gmx_path (str): Path to the GROMACS executable binary.
"""
def __init__(self, input_tpr_path, output_gro_path, input_top_zip_path,
output_top_zip_path, properties, **kwargs):
if isinstance(properties, basestring):
properties=json.loads(properties)
self.input_tpr_path = input_tpr_path
self.output_gro_path = output_gro_path
self.input_top_zip_path = input_top_zip_path
self.output_top_zip_path = output_top_zip_path
self.output_top_path = properties.get('output_top_path','gio.top')
self.replaced_group = properties.get('replaced_group','SOL')
self.neutral = properties.get('neutral',False)
self.concentration = properties.get('concentration',0.05)
self.seed = properties.get('seed',1993)
self.gmx_path = properties.get('gmx_path',None)
self.mutation = properties.get('mutation',None)
self.step = properties.get('step',None)
self.path = properties.get('path','')
self.mpirun = properties.get('mpirun', False)
self.mpirun_np = properties.get('mpirun_np', None)
self.global_log= properties.get('global_log', None)
def launch(self):
"""Launches the execution of the GROMACS genion module.
"""
if self.global_log is not None:
if self.concentration:
self.global_log.info(19*' '+'To reach up '+str(self.concentration)+' mol/litre concentration')
out_log, err_log = fu.get_logs(path=self.path, mutation=self.mutation, step=self.step)
self.output_top_path = fu.add_step_mutation_path_to_name(self.output_top_path, self.step, self.mutation)
# Unzip topology to topology_out
fu.unzip_top(zip_file=self.input_top_zip_path, top_file=self.output_top_path)
gmx = 'gmx' if self.gmx_path is None else self.gmx_path
cmd = [gmx, 'genion',
'-s', self.input_tpr_path,
'-o', self.output_gro_path,
'-p', self.output_top_path]
if self.mpirun_np is not None:
cmd.insert(0, str(self.mpirun_np))
cmd.insert(0, '-np')
if self.mpirun:
cmd.insert(0, 'mpirun')
if self.neutral:
cmd.append('-neutral')
if self.concentration:
cmd.append('-conc')
cmd.append(str(self.concentration))
if self.seed is not None:
cmd.append('-seed')
cmd.append(str(self.seed))
if self.mpirun:
cmd.append('<<<')
cmd.append('\"'+self.replaced_group+'\"')
else:
cmd.insert(0, '|')
cmd.insert(0, '\"'+self.replaced_group+'\"')
cmd.insert(0, 'echo')
command = cmd_wrapper.CmdWrapper(cmd, out_log, err_log)
returncode = command.launch()
# zip new_topology
fu.zip_top(self.output_top_path, self.output_top_zip_path, remove_files=True)
return returncode
#Creating a main function to be compatible with CWL
def main():
system=sys.argv[1]
step=sys.argv[2]
properties_file=sys.argv[3]
prop = settings.YamlReader(properties_file, system).get_prop_dic()[step]
Genion(input_tpr_path = sys.argv[4],
output_gro_path = sys.argv[5],
input_top_zip_path = sys.argv[6],
output_top_zip_path = sys.argv[7],
properties=prop).launch()
if __name__ == '__main__':
main()
| [
"andriopau@gmail.com"
] | andriopau@gmail.com |
f78acc475c7f7428db7e6c915fe5f87224ca1fd2 | 1e177ebdcb470f738c058606ac0f86a36085f661 | /Python/AdafruitIO/PublishMQTT.py | 4768892f13f10590fc92c6a35aa4d873ecebd641 | [] | no_license | robingreig/raspi-git | 5cbdd295c1048a0571aa2c2f8576438269439f07 | 7373bf94557d7a88c8f343362ba64f9cd19c8ce7 | refs/heads/master | 2023-08-31T03:16:17.286700 | 2023-08-26T11:54:23 | 2023-08-26T11:54:23 | 16,873,881 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | #!/usr/bin/python
import paho.mqtt.publish as publish
import time
print("Sending 0...")
publish.single("ledStatus", "0", hostname="raspi13")
time.sleep(1)
print("Sending 1...")
publish.single("ledStatus", "1", hostname="raspi13")
| [
"robin.greig@calalta.com"
] | robin.greig@calalta.com |
7b709a119701579dbcb5028cb79c513adfa97765 | f1fcd165cd8444310ce5d201e481e3982dc28110 | /easy/1901/190114/jang.py | bdbf8e0098b135c5249f6bb41fd103aac1ca0c6f | [] | no_license | JoosJuliet/algoStudy | 310a71a0fcc8f3c23281544cf3458ed999040176 | 3fc1e850f9d8b9f290f41fddd59ff403fbfffa05 | refs/heads/master | 2020-04-20T19:26:25.485875 | 2019-03-27T22:37:27 | 2019-03-27T22:37:27 | 169,049,593 | 1 | 0 | null | 2019-02-04T08:43:07 | 2019-02-04T08:43:07 | null | UTF-8 | Python | false | false | 264 | py | d, m, y = map(int, input().split())
d2, m2, y2 = map(int, input().split())
fine = 0
if y - y2 > 0:
fine += (y-y2)*10000
elif y - y2 == 0 and m - m2 > 0:
fine += (m-m2)*500
elif y - y2 == 0 and m - m2 == 0 and d - d2 > 0:
fine += (d-d2)*15
print(fine) | [
"wkdtjsgur100@naver.com"
] | wkdtjsgur100@naver.com |
bb176b48ac2cd265804ea129251c713845c40005 | 42aa9bb3d11a191c37c83674f43950569027b664 | /tests/test_log.py | c70839536656e1ada7a9347434fe085e2c17f9eb | [
"Apache-2.0",
"MIT"
] | permissive | microsoft/WAFBench | b594d6d51d3c9ae3babc9b45f8bdf1b392ccba96 | a09a0025a72d82ad880df32f5e0bbad55276fe01 | refs/heads/master | 2023-09-02T05:46:01.623055 | 2022-09-08T15:16:47 | 2022-09-08T15:16:47 | 138,645,963 | 79 | 21 | MIT | 2022-09-08T15:16:48 | 2018-06-25T20:23:14 | C | UTF-8 | Python | false | false | 7,610 | py | from ftw_compatible_tool import log
from ftw_compatible_tool import broker
from ftw_compatible_tool import context
from ftw_compatible_tool import traffic
_TEST_MODSECURITY_LOG = '''
2019/04/11 19:20:46 [error] 392#392: [client 172.17.0.1] ModSecurity: collection_store_ex_origin: Failed to access DBM file "/var/log/modsecurity//ip": No such file or directory [hostname
"0a9de2faed93"] [uri "/AppScan_fingerprint/MAC_ADDRESS_01234567890.html"] [unique_id "AFAcUcOOAcAcAcXcAcwcAcAc"] [requestheaderhostname "0a9de2faed93"]
2019/04/11 19:20:46 [error] 392#412: [client 172.17.0.1] ModSecurity: Access denied with code 403 (phase 1). Pattern match "magic-(\\w*)" at REQUEST_HEADERS:Host. [file "/root/src/owasp-modsecurity-crs-3.1.0/modsecurity_init.conf"] [line "8"] [id "010203"] [msg "delimiter-magic-284006541951478418388062796500664128516"] [hostname "0a9de2faed93"] [uri "/"] [unique_id "AcAcAcAcAcAcAcAcAcAcCcAc"] [requestheaderhostname "0a9de2faed93"]
2019/04/11 19:20:46 [error] 392#413: [client 172.17.0.1] ModSecurity: Access denied with code 403 (phase 1). Pattern match "magic-(\\w*)" at REQUEST_HEADERS:Host. [file "/root/src/owasp-modsecurity-crs-3.1.0/modsecurity_init.conf"] [line "8"] [id "010203"] [msg "delimiter-magic-284026009895571423421096282120140690436"] [hostname "0a9de2faed93"] [uri "/"] [unique_id "A8UcAcscMcAcAcAcAcAlAcAA"] [requestheaderhostname "0a9de2faed93"]
2019/04/11 19:20:46 [error] 392#414: [client 172.17.0.1] ModSecurity: Warning. Matched phrase "/nessus_is_probing_you_" at REQUEST_FILENAME. [file "/root/src/owasp-modsecurity-crs-3.1.0/rules/REQUEST-913-SCANNER-DETECTION.conf"] [line "108"] [id "913120"] [msg "Found request filename/argument associated with security scanner"] [data "Matched Data: /nessus_is_probing_you_ found within REQUEST_FILENAME: /nessus_is_probing_you_"] [severity "CRITICAL"] [ver "OWASP_CRS/3.1.0"] [tag "application-multi"] [tag "language-multi"] [tag "platform-multi"] [tag "attack-reputation-scanner"] [tag "OWASP_CRS/AUTOMATION/SECURITY_SCANNER"] [tag "WASCTC/WASC-21"] [tag "OWASP_TOP_10/A7"] [tag "PCI/6.5.10"] [hostname "0a9de2faed93"] [uri "/nessus_is_probing_you_"] [unique_id "AnAcucAcacUOAUhcAcAcM5zc"] [requestheaderhostname "0a9de2faed93"]
2019/04/11 19:20:46 [error] 392#414: [client 172.17.0.1] ModSecurity: Warning. Operator EQ matched 0 at REQUEST_HEADERS. [file "/root/src/owasp-modsecurity-crs-3.1.0/rules/REQUEST-920-PROTOCOL-ENFORCEMENT.conf"] [line "1335"] [id "920320"] [msg "Missing User Agent Header"] [severity "NOTICE"] [ver "OWASP_CRS/3.1.0"] [tag "application-multi"] [tag "language-multi"] [tag "platform-multi"] [tag "attack-protocol"] [tag "OWASP_CRS/PROTOCOL_VIOLATION/MISSING_HEADER_UA"] [tag "WASCTC/WASC-21"] [tag "OWASP_TOP_10/A7"] [tag "PCI/6.5.10"] [tag "paranoia-level/2"] [hostname "0a9de2faed93"] [uri "/nessus_is_probing_you_"] [unique_id "AnAcucAcacUOAUhcAcAcM5zc"] [requestheaderhostname "0a9de2faed93"]
2019/04/11 19:20:46 [error] 392#414: [client 172.17.0.1] ModSecurity: Access denied with code 403 (phase 2). Operator GE matched 5 at TX:anomaly_score. [file "/root/src/owasp-modsecurity-crs-3.1.0/rules/REQUEST-949-BLOCKING-EVALUATION.conf"] [line "93"] [id "949110"] [msg "Inbound Anomaly Score Exceeded (Total Score: 7)"] [severity "CRITICAL"] [tag "application-multi"] [tag "language-multi"] [tag "platform-multi"] [tag "attack-generic"] [hostname "0a9de2faed93"] [uri "/nessus_is_probing_you_"] [unique_id "AnAcucAcacUOAUhcAcAcM5zc"] [requestheaderhostname "0a9de2faed93"]
2019/04/11 19:20:46 [error] 392#392: [client 172.17.0.1] ModSecurity: Warning. Operator GE matched 5 at TX:inbound_anomaly_score. [file "/root/src/owasp-modsecurity-crs-3.1.0/rules/RESPONSE-980-CORRELATION.conf"] [line "86"] [id "980130"] [msg "Inbound Anomaly Score Exceeded (Total Inbound Score: 7 - SQLI=0,XSS=0,RFI=0,LFI=0,RCE=0,PHPI=0,HTTP=0,SESS=0): Missing User Agent Header; individual paranoia level scores: 5, 2, 0, 0"] [tag "event-correlation"] [hostname "0a9de2faed93"] [uri "/nessus_is_probing_you_"] [unique_id "AnAcucAcacUOAUhcAcAcM5zc"] [requestheaderhostname "0a9de2faed93"]
2019/04/11 19:20:46 [error] 392#392: [client 172.17.0.1] ModSecurity: collection_store_ex_origin: Failed to access DBM file "/var/log/modsecurity//ip": No such file or directory [hostname
"0a9de2faed93"] [uri "/nessus_is_probing_you_"] [unique_id "AnAcucAcacUOAUhcAcAcM5zc"] [requestheaderhostname "0a9de2faed93"]
2019/04/11 19:20:46 [error] 392#415: [client 172.17.0.1] ModSecurity: Access denied with code 403 (phase 1). Pattern match "magic-(\\w*)" at REQUEST_HEADERS:Host. [file "/root/src/owasp-modsecurity-crs-3.1.0/modsecurity_init.conf"] [line "8"] [id "010203"] [msg "delimiter-magic-284026009895571423421096282120140690436"] [hostname "0a9de2faed93"] [uri "/"] [unique_id "AcPcAUANAcQcAcAcAcmcAtAc"] [requestheaderhostname "0a9de2faed93"]
2019/04/11 19:20:46 [error] 392#416: [client 172.17.0.1] ModSecurity: Access denied with code 403 (phase 1). Pattern match "magic-(\\w*)" at REQUEST_HEADERS:Host. [file "/root/src/owasp-modsecurity-crs-3.1.0/modsecurity_init.conf"] [line "8"] [id "010203"] [msg "delimiter-magic-282099401237516304385679803300151104516"] [hostname "0a9de2faed93"] [uri "/"] [unique_id "AcAcAcAc3cscXcAiAcAcAcAc"] [requestheaderhostname "0a9de2faed93"]
2019/04/11 19:20:46 [error] 392#417: [client 172.17.0.1] ModSecurity: Warning. Match of "pm AppleWebKit Android" against "REQUEST_HEADERS:User-Agent" required. [file "/root/src/owasp-modsecurity-crs-3.1.0/rules/REQUEST-920-PROTOCOL-ENFORCEMENT.conf"] [line "1276"] [id "920300"] [msg "Request Missing an Accept Header"] [severity "NOTICE"] [ver "OWASP_CRS/3.1.0"] [tag "application-multi"] [tag "language-multi"] [tag "platform-multi"] [tag "attack-protocol"] [tag "OWASP_CRS/PROTOCOL_VIOLATION/MISSING_HEADER_ACCEPT"] [tag "WASCTC/WASC-21"] [tag "OWASP_TOP_10/A7"] [tag "PCI/6.5.10"] [tag "paranoia-level/2"] [hostname "0a9de2faed93"] [uri "/"] [unique_id "AcAp75AcAlAYAcAcAcXcAcAh"] [requestheaderhostname "0a9de2faed93"]
2019/04/11 19:20:46 [error] 392#418: [client 172.17.0.1] ModSecurity: Warning. Match of "pm AppleWebKit Android" against "REQUEST_HEADERS:User-Agent" required. [file "/root/src/owasp-modsecurity-crs-3.1.0/rules/REQUEST-920-PROTOCOL-ENFORCEMENT.conf"] [line "1276"] [id "920300"] [msg "Request Missing an Accept Header"] [severity "NOTICE"] [ver "OWASP_CRS/3.1.0"] [tag "application-multi"] [tag "language-multi"] [tag "platform-multi"] [tag "attack-protocol"] [tag "OWASP_CRS/PROTOCOL_VIOLATION/MISSING_HEADER_ACCEPT"] [tag "WASCTC/WASC-21"] [tag "OWASP_TOP_10/A7"] [tag "PCI/6.5.10"] [tag "paranoia-level/2"] [hostname "0a9de2faed93"] [uri "/index.html"] [unique_id "AcAcAcpcAcAcAcAcAcANA6Ac"] [requestheaderhostname "0a9de2faed93"]
2019/04/11 19:20:46 [error] 392#419: [client 172.17.0.1] ModSecurity: Access denied with code 403 (phase 1). Pattern match "magic-(\\w*)" at REQUEST_HEADERS:Host. [file "/root/src/owasp-modsecurity-crs-3.1.0/modsecurity_init.conf"] [line "8"] [id "010203"] [msg "delimiter-magic-282099401237516304385679803300151104516"] [hostname "0a9de2faed93"] [uri "/"] [unique_id "ncAcYcAnAkAc4cAWAcAcACAc"] [requestheaderhostname "0a9de2faed93"]
'''
def test_log_extract():
counter = {
"log" : 0,
}
def get_log(*args):
counter["log"] += 1
ctx = context.Context(broker.Broker(), traffic.Delimiter("magic"))
ctx.broker.subscribe(broker.TOPICS.SQL_COMMAND, get_log)
collector = log.LogCollector(ctx)
for line in _TEST_MODSECURITY_LOG.splitlines():
ctx.broker.publish(broker.TOPICS.RAW_LOG, line + "\n")
assert(counter["log"] == 2)
| [
"noreply@github.com"
] | microsoft.noreply@github.com |
8a974657debbb33dd868b65d2757c458567a3ffd | b0a162b1db3004b30cd735500971edea39e775ed | /wave1/Labs/Lab1of2.2.py | eec6582f29a7c4c13c1223c12938dc4853b6c6c6 | [] | no_license | geofferyj/WEJAPA_INTERNSHIP | 40da98c335affbbaf74d018d8a2f38fb30183f10 | 92a101d0280e0f732dc3cfd8727e436de86cdb62 | refs/heads/master | 2022-12-08T04:40:18.627904 | 2020-08-16T07:39:51 | 2020-08-16T07:39:51 | 286,264,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | #Quiz: Assign and Modify Variables
#Now it's your turn to work with variables. The comments in this quiz (the lines that begin with #) have instructions for creating and modifying variables. After each comment write a line of code that implements the instruction.
#Note that this code uses scientific notation to define large numbers. 4.445e8 is equal to 4.445 * 10 ** 8 which is equal to 444500000.0.
# Write your function here. Make sure to use "population_density" as the name of the fucntion. so, the test below works.
def population_density(val1, val2):
return val1/val2
# test cases for your function Dont change anything below this comment.
test1 = population_density(10, 1)
expected_result1 = 10
print("expected result: {}, actual result: {}".format(expected_result1, test1))
test2 = population_density(864816, 121.4)
expected_result2 = 7123.6902801
print("expected result: {}, actual result: {}".format(expected_result2, test2))
| [
"geofferyjoseph1@gmail.com"
] | geofferyjoseph1@gmail.com |
45f625839b142e095671acdb09a4ea53a6a605a6 | 92a0977e694e49ca70adbcaaa0fd6a66576f85e6 | /blog/migrations/0001_initial.py | d46ac7e7b66d02b5a4f68a2609c3a3a894175958 | [] | no_license | Melody1992/my-first-blog | 9ca6cbf8f47257b1d7d12af98d8797cb3d3f2972 | 40e44bcc48883626e6ecc34417ded1aa7de12d08 | refs/heads/master | 2021-01-20T12:16:45.719637 | 2017-08-29T10:00:44 | 2017-08-29T10:00:44 | 101,709,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-29 09:13
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"you@example.com"
] | you@example.com |
195d63b02681ad0d2d5fb06c1b8351574c2a7ff4 | 59f8e783abe9949cf9e9aef5936d2349f7df7414 | /methyl/ma_analysis/epigenotyping-old/decodingpath.py | 026184ea4b5bc9b8fb3a55161e1cddb4b2520f1d | [] | no_license | bhofmei/analysis-scripts | c4d8eafde2834b542c71c305e66c4e6f8a6e2c57 | 189bf355f0f878c5603b09a06b3b50b61a11ad93 | refs/heads/master | 2021-01-17T17:26:30.799097 | 2019-10-27T12:49:10 | 2019-10-27T12:49:10 | 56,076,808 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,397 | py | ### Decoding types ###
import pandas as pd
import numpy as np
import math
class DecodePath:
''' Base class for decoding types
'''
def __init__( self, df, transMat ):
# n bins, m states
self.labels = np.array( ['mother', 'MPV', 'father'] )
self.data = df # n x m
self.transitions = transMat # fraction probs not log, m x m
self.emissions = self._getEmissions( df ) # m x n
#print('*',self.emissions.dtype)
self.states, self.size = self.emissions.shape
def _getEmissions( self, data ):
#print(list(data))
idVars = ['sample','bin','prediction']
if 'num.feat' in list(data):
idVars += ['num.feat']
dfm = pd.melt( data, id_vars=idVars)
#print('--\n',dfm.iloc[0:4,:])
dfp = dfm.pivot( index='variable', columns='bin', values='value' )
#print(dfp.values.dtype)
dfe = dfp.reindex( self.labels )
return dfe.values.astype(np.float64)
#return dfe.values
class DecodeViterbi( DecodePath ):
''' Viterbi decoding
'''
def run( self ):
''' main function accessible by outside classes '''
self._initializeV()
self._fillV()
self._pathV()
return self.data
def _initializeV( self ):
# take log of transitions
self.log_transitions = np.log( self.transitions ) # m x m
# initialize empty data structures for dynamic programming
self.probabilities = np.zeros( (self.size, self.states) ) # n x m
self.traceback = np.zeros( (self.size, self.states), dtype=np.int8 ) # n x m
def _fillV( self ):
# loop through rows/bins
for i in range(self.size):
# loop through states
for j in range(self.states):
em = self.emissions[j,i] # note: m x n
maxS, maxP = self._computeScore( i, j, em )
self.probabilities[i,j] = maxS
self.traceback[i,j] = maxP
# end for j
# end for i
def _computeScore( self, i, j, prob ):
scores = np.array( [prob]*3 ) # 1 x m
for k in range(self.states):
if i != 0:
scores[k] += self.probabilities[i-1,k]
scores[k] += self.log_transitions[k,j]
# end for k
maxS = scores.max()
maxP = (-1 if i == 0 else scores.argmax() )
return maxS, maxP
def _pathV( self ):
# add columns to output
self.data['vit.score.mother'] = self.probabilities[:,0]
self.data['vit.score.MPV'] = self.probabilities[:,1]
self.data['vit.score.father'] = self.probabilities[:,2]
self.data['vit.prediction'] = 'NA'
vals = self.probabilities[self.size-1]
# start traceback
nextJ = vals.argmax()
for i in range( self.size-1, -1, -1):
nextJ = self._tracebackHelper( i, nextJ )
if nextJ == -1:
break # finished traceback
# end for i
def _tracebackHelper( self, i, j ):
# get column numer where to record decoded prediction
colI = np.nonzero(self.data.columns.values == 'vit.prediction')[0][0]
# get current label to record
label = self.labels[j]
self.data.iloc[i, colI] = label
# return next cell to travel to
return self.traceback[i,j]
class DecodeForwardBackward( DecodePath ):
''' Forward-backward decoding
'''
def run( self ):
''' main function accessible by outside classes '''
self._initializeF()
self._fillF()
self._pathF()
return self.data
def _initializeF( self ):
#print( '**',self.emissions.dtype )
# transform emissions from log to fractions
self.prob_emissions = np.exp( self.emissions )
#self.prob_emissions = [ [ math.exp(x) for x in self.emissions[y] ] for y in self.emissions ]
# initialize forward and backward dynamic programming structures
self.forward = np.zeros( (self.states, self.size+1) ) # m x n+1
self.forward[:,0] = 1.0/self.states
self.backward = np.zeros( (self.states, self.size+1) ) # m x n+1
self.backward[:,-1] = 1.0
# initialize posterior prob dist
self.posterior = np.zeros( (self.size, self.states) ) # n x m
def _fillF( self ):
# fill forward -> loop across bins
for i in range(self.size):
# get current column values
fCol = np.matrix( self.forward[:,i] )
# fill in next column
self.forward[:,i+1] = fCol * np.matrix( self.transitions ) * np.matrix( np.diag( self.prob_emissions[:,i] ) )
# normalize
self.forward[:,i+1] = self.forward[:,i+1] / np.sum( self.forward[:,i+1] )
# end for i
# fill backwards -> loop across bins
for i in range( self.size, 0, -1 ):
# get current column values
bRow = np.matrix( self.backward[:,i]).transpose()
# get values for next column
tmpCol = ( np.matrix(self.transitions) * np.matrix(np.diag(self.prob_emissions[:,i-1])) * bRow).transpose()
# normalize
self.backward[:,i-1] = tmpCol / np.sum( tmpCol )
# end for i
# combine
tmpPosterior = np.zeros((self.states, self.size))
tmpPosterior = np.array( self.forward[:,1:] ) * np.array( self.backward[:,:-1] )
# normalize
tmpPosterior = tmpPosterior / np.sum( tmpPosterior, 0)
self.posterior = np.transpose(tmpPosterior)
def _pathF( self ):
# add columns to output
self.data['fb.score.mother'] = self.posterior[:,0]
self.data['fb.score.MPV'] = self.posterior[:,1]
self.data['fb.score.father'] = self.posterior[:,2]
maxI = self.posterior.argmax( axis=1 )
self.data['fb.prediction'] = self.labels[maxI]
class DecodeAll( DecodeViterbi, DecodeForwardBackward ):
''' Viterbi and foward-backward decoding
'''
def run( self ):
''' main function accessible by outside classes '''
# Viterbi
self._initializeV()
self._fillV()
self._pathV()
# FB
self._initializeF()
self._fillF()
self._pathF()
return self.data
| [
"bhofmei@gmail.com"
] | bhofmei@gmail.com |
1b371ce2d76c8b9c0dafca699c63800a51a7d093 | 4d4fcde3efaa334f7aa56beabd2aa26fbcc43650 | /server/src/uds/migrations/0037_service_token.py | 95647ca5e24fc6567d3090492d73e90580495ee2 | [] | no_license | xezpeleta/openuds | a8b11cb34eb0ef7bb2da80f67586a81b2de229ef | 840a7a02bd7c9894e8863a8a50874cdfdbf30fcd | refs/heads/master | 2023-08-21T17:55:48.914631 | 2021-10-06T10:39:06 | 2021-10-06T10:39:06 | 414,489,331 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | # Generated by Django 3.0.3 on 2020-02-08 18:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('uds', '0036_auto_20200131_1224'),
]
operations = [
migrations.AddField(
model_name='service',
name='token',
field=models.CharField(blank=True, default=None, max_length=32, null=True, unique=True),
),
]
| [
"dkmaster@dkmon.com"
] | dkmaster@dkmon.com |
f2bdcb2ebe82957a5b18e994bc9df717a42e7ea2 | 99a4d88d2004bad1e9e79f92c33a9ab1eb5644c4 | /Solution/BOJ/11286 절댓값 힙.py | 94741ae799a6f6a4b3acd56a64aeef4332d76e6b | [] | no_license | ginger-kang/Problem-Solving | cb64a4f6a0275419fe7be67fb50a9eb48e4b5869 | 1fc074d39a47a416d990e6e3b95a6c9f62a838f7 | refs/heads/master | 2023-08-14T13:54:00.706663 | 2021-09-10T11:39:16 | 2021-09-10T11:39:16 | 255,123,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | import heapq
import sys
input = sys.stdin.readline
N = int(input())
q = []
for _ in range(N):
x = int(input())
if x != 0:
heapq.heappush(q, (abs(x), x))
else:
if not len(q):
print(0)
else:
print(heapq.heappop(q)[1])
| [
"kdhoon07@gmail.com"
] | kdhoon07@gmail.com |
4eda55367dffe239294e5f2e103ff3db01021f09 | 30fe7671b60825a909428a30e3793bdf16eaaf29 | /.metadata/.plugins/org.eclipse.core.resources/.history/18/e08b59a165fa00161174a93fd5908e78 | 4f3868130d32db869552206dcc0d483e87aa16e2 | [] | no_license | abigdream84/PythonStudy | 0fc7a3b6b4a03a293b850d0ed12d5472483c4fb1 | 059274d3ba6f34b62ff111cda3fb263bd6ca8bcb | refs/heads/master | 2021-01-13T04:42:04.306730 | 2017-03-03T14:54:16 | 2017-03-03T14:54:16 | 79,123,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,174 | #!/usr/bin/env python
#coding:UTF-8
from audit_demo.utility.MySqlHelper import MySqlHelper
class s_g_u_relation(object):
def __init__(self):
self.__helper = MySqlHelper()
def get_uid(self,username):
sql = 'select u_id from u_table where u_name = %s'
try:
u_id = self.__helper.select(sql,username)[0][0]
return u_id
except Exception as e:
print(e)
return False
def get_gid(self,gpname):
sql = 'select g_id from g_table where g_name = %s'
try:
g_id = self.__helper.select(sql,gpname)[0][0]
return g_id
except Exception as e:
print(e)
return False
def get_sid(self,serip):
sql = 'select s_id from s_table where s_ip = %s'
try:
s_id = self.__helper.select(sql,serip)[0][0]
return s_id
except Exception as e:
print(e)
return False
def add_s_g(self,serip,gpname):
sid = str(self.get_sid(serip))
gid = str(self.get_gid(gpname))
sql = 'insert into s_g_u_relation(f_s_id,f_g_id) values(%s , %s)'
params = (sid,gid)
try:
self.__helper.insert_one(sql,params)
except Exception as e:
print(e)
return False
def add_s_u(self,serip,username):
sid = str(self.get_sid(serip))
uid = str(self.get_uid(username))
sql = 'insert into s_g_u_relation(f_s_id,f_u_id) values(%s , %s)'
params = (sid,uid)
try:
self.__helper.insert_one(sql,params)
except Exception as e:
print(e)
return False
def get_s_u_g_id(self, serip):
sid = str(self.get_sid(serip))
sql = 'select s_g_u_id from s_g_u_relation where f_s_id = %s'
params = (sid)
try:
tmplist = self.__helper.select(sql,params)
s_u_g_id_list = []
for i in tmplist:
t = i[0]
s_u_g_id_list.append(t)
return s_u_g_id_list
except Exception as e:
print(e)
return False
def get_s_g_id(self, gpname):
gid = str(self.get_gid(gpname))
sql = 'select s_g_u_id from s_g_u_relation where f_g_id = %s'
params = (gid)
try:
tmplist = self.__helper.select(sql,params)
s_g_id_list = []
for i in tmplist:
t = i[0]
s_g_id_list.append(t)
return s_g_id_list
except Exception as e:
print(e)
return False
def get_s_u_id(self, username):
uid = str(self.get_uid(username))
sql = 'select s_g_u_id from s_g_u_relation where f_u_id = %s'
params = (uid)
try:
tmplist = self.__helper.select(sql,params)
s_u_id_list = []
for i in tmplist:
t = i[0]
s_u_id_list.append(t)
return s_u_id_list
except Exception as e:
print(e)
return False
def get_s_u_ser(self, username):
uid = str(self.get_uid(username))
sql = 'select f_s_id from s_g_u_relation where f_u_id = %s'
params = (uid)
try:
tmplist = self.__helper.select(sql,params)
s_u_list = []
for i in tmplist:
t = i[0]
s_u_list.append(t)
return s_u_list
except Exception as e:
print(e)
return False
def del_s_g(self, gpname):
sql = 'delete from s_g_u_relation where s_g_u_id = %s'
if not self.get_s_g_id(gpname):
print('No relations of %s in s_g_u_relation table.' %gpname)
else:
s_g_id_list = self.get_s_g_id(gpname)
try:
for i in s_g_id_list:
params = i
self.__helper.delete(sql,params)
except Exception as e:
print(e)
def del_s_u(self, username):
sql = 'delete from s_g_u_relation where s_g_u_id = %s'
if not self.get_s_u_id(username):
print('No relations of %s in s_g_u_relation table.' %username)
else:
s_u_id_list = self.get_s_u_id(username)
try:
for i in s_u_id_list:
params = i
self.__helper.delete(sql,params)
except Exception as e:
print(e)
def del_s_g_u(self, serip):
sql = 'delete from s_g_u_relation where s_g_u_id = %s'
if not self.get_s_u_g_id(serip):
print('No relations of %s in s_g_u_relation table.' %serip)
else:
s_g_u_id_list = self.get_s_u_g_id(serip)
try:
for i in s_g_u_id_list:
params = i
self.__helper.delete(sql,params)
except Exception as e:
print(e)
'''
t = s_g_u_relation()
#t.add_s_g('192.168.0.1', 'gp2')
print(t.add_s_u('192.168.0.1', 'user2'))
'''
| [
"abigdream@hotmail.com"
] | abigdream@hotmail.com | |
47f6bad4ec08b075bab3d5983b0dee9335efe10b | ab11444273824fb46eac78d7f3dd532ae65e3bf3 | /doc/conf.py | 7d97517b8acb380b0f9be389abd16b9fa6517b2e | [
"MIT"
] | permissive | firasm/sphinx-comments | 30f2d262a723ca11e7e53f153506ee926d52e3b1 | 25db7a450af426fd898c4b1f8c656c786f37ca8c | refs/heads/master | 2022-12-01T21:40:05.283906 | 2020-08-10T17:00:26 | 2020-08-10T17:00:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,365 | py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = "Sphinx Comments"
copyright = "2018, Chris Holdgraf"
author = "Chris Holdgraf"
# The short X.Y version
version = ""
# The full version, including alpha/beta/rc tags
release = ""
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx_comments", "myst_parser"]
comments_config = {
# "hypothesis": True,
# "utterances": {
# "repo": "executablebooks/sphinx-comments",
# },
# "dokieli": True
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_book_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# CopyButton configuration
copybutton_prompt_text = ">>> "
# Switches for testing but shouldn't be activated in the live docs
# copybutton_only_copy_prompt_lines = False
# copybutton_remove_prompts = False
# copybutton_image_path = "test/TEST_COPYBUTTON.png"
# copybutton_selector = "div"
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "SphinxCommentsdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"SphinxComments.tex",
"Sphinx Comments Documentation",
"Chris Holdgraf",
"manual",
),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "SphinxComments", "Sphinx Comments Documentation", [author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"SphinxComments",
"Sphinx Comments Documentation",
author,
"SphinxComments",
"One line description of project.",
"Miscellaneous",
),
]
| [
"choldgraf@berkeley.edu"
] | choldgraf@berkeley.edu |
b654052281e54dfedd953387d9a22ef9cbba28f0 | a2b598d8e89c1755f683d6b6fe35c3f1ef3e2cf6 | /past_archive/swexpert/3499(perfectShuffle).py | d2712767eb99f3281392018895df8ae2b11d6d43 | [
"MIT"
] | permissive | DongHyunByun/algorithm_practice | cbe82606eaa7f372d9c0b54679bdae863aab0099 | dcd595e6962c86f90f29e1d68f3ccc9bc673d837 | refs/heads/master | 2022-09-24T22:47:01.556157 | 2022-09-11T07:36:42 | 2022-09-11T07:36:42 | 231,518,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | for t in range(int(input())):
N=int(input())
L=input().split()
print(f"#{t+1}",end="")
if N%2==0:
k=int(N/2)
for i in range(k):
print("",L[i],end="")
print("",L[i+k],end="")
print("")
else:
K=int(N/2)
for i in range(K):
print("",L[i],end="")
print("",L[i+K+1],end="")
print("",L[K]) | [
"ngoodsamari@naver.com"
] | ngoodsamari@naver.com |
a2ae2488e7f512cc79d0aa7a2686667f739a5498 | f0a14b961cd74f4193c760b1b27e52dc14698de0 | /lib/python2.7/site-packages/commondata/be/oostvlaanderen.py | 694fe5bd9fd4360b585a2021ff5d3dbdfa886669 | [] | no_license | miller2082/lino_polls | d4aa81c9c066f783739b200951fcb11ef2b93382 | 511e87699c71a8980e156a549ba87e066fd61091 | refs/heads/master | 2021-01-15T18:53:38.142552 | 2017-02-01T13:25:08 | 2017-02-01T13:25:08 | 78,850,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,202 | py | # -*- coding: UTF-8 -*-
# Copyright 2014 Luc Saffre
# This file is part of the commondata library.
# The commondata library is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3 of
# the License, or (at your option) any later version.
# The commondata library is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with the commondata library; if not, see
# <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
def populate(pg):
pg.set_args('fr nl de en')
pg.province(
"Flandre de l'Est", "Oost-Vlaanderen", "Ostflandern")
pg.set_args('zip_code fr nl de en')
pg.city("9300", "Alost", "Aalst", "Aalst")
pg.village("9308", "Gijzegem", "", "", "")
pg.village("9310", "Baardegem ", "", "", "")
pg.village("9320", "Erembodegem", "", "", "")
pg.village("9310", "Herdersem", "", "", "")
pg.village("9308", "Hofstade", "", "", "")
pg.village("9310", "Meldert", "", "", "")
pg.village("9320", "Nieuwerkerken", "", "", "")
pg.village("9310", "Moorsel", "", "", "")
pg.city("9000", "Gand", "Gent", "Gent", "Gent")
pg.city("9030", "Mariakerke (Gent)", "", "", "")
pg.city("9031", "Drongen", "", "", "")
pg.city("9032", "Wondelgem", "", "", "")
pg.city("9040", "Sint-Amandsberg", "", "", "")
pg.city("9041", "Oostakker", "", "", "")
pg.city("9042", "Mendonk", "", "", "")
pg.city("9042", "Sint-Kruis-Winkel", "", "", "")
pg.city("9050", "Gentbrugge", "", "", "")
pg.city("9050", "Ledeberg (Gent)", "", "", "")
pg.city("9051", "Afsnee", "", "", "")
pg.city("9051", "Sint-Denijs-Westrem", "", "", "")
pg.city("9052", "Zwijnaarde", "", "", "")
pg.city("9060", "Zelzate", "", "", "")
pg.city("9070", "Destelbergen", "", "", "")
pg.city("9070", "Heusden (O.-Vl.)", "", "", "")
pg.city("9080", "Beervelde", "", "", "")
pg.city("9080", "Lochristi", "", "", "")
pg.city("9080", "Zaffelare", "", "", "")
pg.city("9080", "Zeveneken", "", "", "")
pg.city("9090", "Gontrode", "", "", "")
pg.city("9090", "Melle", "", "", "")
pg.city("9100", "Nieuwkerken-Waas", "", "", "")
pg.city("9100", "Saint-Nicolas", "Sint-Niklaas", "Saint-Nicolas", "Saint-Nicolas")
pg.city("9111", "Belsele", "", "", "")
pg.city("9112", "Sinaai-Waas", "", "", "")
pg.city("9120", "Beveren-Waas", "", "", "")
pg.city("9120", "Haasdonk", "", "", "")
pg.city("9120", "Kallo (Beveren-Waas)", "", "", "")
pg.city("9120", "Melsele", "", "", "")
pg.city("9120", "Vrasene", "", "", "")
pg.city("9130", "Doel", "", "", "")
pg.city("9130", "Kallo (Kieldrecht)", "", "", "")
pg.city("9130", "Kieldrecht (Beveren)", "", "", "")
pg.city("9130", "Verrebroek", "", "", "")
pg.city("9140", "Elversele", "", "", "")
pg.city("9140", "Steendorp", "", "", "")
pg.city("9140", "Temse", "", "", "")
pg.city("9140", "Tielrode", "", "", "")
pg.city("9150", "Bazel", "", "", "")
pg.city("9150", "Kruibeke", "", "", "")
pg.city("9150", "Rupelmonde", "", "", "")
pg.city("9160", "Daknam", "", "", "")
pg.city("9160", "Eksaarde", "", "", "")
pg.city("9160", "Lokeren", "", "", "")
pg.city("9170", "De Klinge", "", "", "")
pg.city("9170", "Meerdonk", "", "", "")
pg.city("9170", "Sint-Gillis-Waas", "", "", "")
pg.city("9170", "Sint-Pauwels", "", "", "")
pg.city("9180", "Moerbeke-Waas", "", "", "")
pg.city("9185", "Wachtebeke", "", "", "")
pg.city("9190", "Kemzeke", "", "", "")
pg.city("9190", "Stekene", "", "", "")
pg.city("9200", "Appels", "", "", "")
pg.city("9200", "Baasrode", "", "", "")
pg.city("9200", "Dendermonde", "", "", "")
pg.city("9200", "Grembergen", "", "", "")
pg.city("9200", "Mespelare", "", "", "")
pg.city("9200", "Oudegem", "", "", "")
pg.city("9200", "Schoonaarde", "", "", "")
pg.city("9200", "St-Gillis-bij-Dendermonde", "", "", "")
pg.city("9220", "Hamme (O.-Vl.)", "", "", "")
pg.city("9220", "Moerzeke", "", "", "")
pg.city("9230", "Massemen", "", "", "")
pg.city("9230", "Westrem", "", "", "")
pg.city("9230", "Wetteren", "", "", "")
pg.city("9240", "Zele", "", "", "")
pg.city("9250", "Waasmunster", "", "", "")
pg.city("9255", "Buggenhout", "", "", "")
pg.city("9255", "Opdorp", "", "", "")
pg.city("9260", "Schellebelle", "", "", "")
pg.city("9260", "Serskamp", "", "", "")
pg.city("9260", "Wichelen", "", "", "")
pg.city("9270", "Kalken", "", "", "")
pg.city("9270", "Laarne", "", "", "")
pg.city("9280", "Denderbelle", "", "", "")
pg.city("9280", "Lebbeke", "", "", "")
pg.city("9280", "Wieze", "", "", "")
pg.city("9290", "Berlare", "", "", "")
pg.city("9290", "Overmere", "", "", "")
pg.city("9290", "Uitbergen", "", "", "")
pg.city("9340", "Impe", "", "", "")
pg.city("9340", "Lede", "", "", "")
pg.city("9340", "Oordegem", "", "", "")
pg.city("9340", "Smetlede", "", "", "")
pg.city("9340", "Wanzele", "", "", "")
pg.city("9400", "Appelterre-Eichem", "", "", "")
pg.city("9400", "Denderwindeke", "", "", "")
pg.city("9400", "Lieferinge", "", "", "")
pg.city("9400", "Nederhasselt", "", "", "")
pg.city("9400", "Ninove", "", "", "")
pg.city("9400", "Okegem", "", "", "")
pg.city("9400", "Voorde", "", "", "")
pg.city("9401", "Pollare", "", "", "")
pg.city("9402", "Meerbeke", "", "", "")
pg.city("9403", "Neigem", "", "", "")
pg.city("9404", "Aspelare", "", "", "")
pg.city("9406", "Outer", "", "", "")
pg.city("9420", "Aaigem", "", "", "")
pg.city("9420", "Bambrugge", "", "", "")
pg.city("9420", "Burst", "", "", "")
pg.city("9420", "Erondegem", "", "", "")
pg.city("9420", "Erpe", "", "", "")
pg.city("9420", "Erpe-Mere", "", "", "")
pg.city("9420", "Mere", "", "", "")
pg.city("9420", "Ottergem", "", "", "")
pg.city("9420", "Vlekkem", "", "", "")
pg.city("9450", "Denderhoutem", "", "", "")
pg.city("9450", "Haaltert", "", "", "")
pg.city("9450", "Heldergem", "", "", "")
pg.city("9451", "Kerksken", "", "", "")
pg.city("9470", "Denderleeuw", "", "", "")
pg.city("9472", "Iddergem", "", "", "")
pg.city("9473", "Welle", "", "", "")
pg.city("9500", "Goeferdinge", "", "", "")
pg.city("9500", "Moerbeke", "", "", "")
pg.city("9500", "Nederboelare", "", "", "")
pg.city("9500", "Onkerzele", "", "", "")
pg.city("9500", "Ophasselt", "", "", "")
pg.city("9500", "Overboelare", "", "", "")
pg.city("9500", "Viane", "", "", "")
pg.city("9500", "Zarlardinge", "", "", "")
pg.city("9500", "Grammont", "Geraardsbergen", "Grammont")
pg.city("9506", "Grimminge", "", "", "")
pg.city("9506", "Idegem", "", "", "")
pg.city("9506", "Nieuwenhove", "", "", "")
pg.city("9506", "Schendelbeke", "", "", "")
pg.city("9506", "Smeerebbe-Vloerzegem", "", "", "")
pg.city("9506", "Waarbeke", "", "", "")
pg.city("9506", "Zandbergen", "", "", "")
pg.city("9520", "Bavegem", "", "", "")
pg.city("9520", "Oomb.(St-Lievens-Houtem)", "", "", "")
pg.city("9520", "Vlierzele", "", "", "")
pg.city("9520", "Zonnegem", "", "", "")
pg.city(
"9520", "Hautem-Saint-Liévin",
"Sint-Lievens-Houtem", "Sint-Lievens-Houtem")
pg.city("9521", "Letterhoutem", "", "", "")
pg.city("9550", "Herzele", "", "", "")
pg.city("9550", "Hillegem", "", "", "")
pg.city("9550", "Sint-Antelinks", "", "", "")
pg.city("9550", "Sint-Lievens-Esse", "", "", "")
pg.city("9550", "Steenhuize-Wijnhuize", "", "", "")
pg.city("9550", "Woubrechtegem", "", "", "")
pg.city("9551", "Ressegem", "", "", "")
pg.city("9552", "Borsbeke", "", "", "")
pg.city("9570", "Deftinge", "", "", "")
pg.city("9570", "Lierde", "", "", "")
pg.city("9570", "Sint-Maria-Lierde", "", "", "")
pg.city("9571", "Hemelveerdegem", "", "", "")
pg.city("9572", "Sint-Martens-Lierde", "", "", "")
pg.city("9600", "Renaix", "", "", "")
pg.city("9600", "Ronse", "", "", "")
pg.city("9620", "Elene", "", "", "")
pg.city("9620", "Erwetegem", "", "", "")
pg.city("9620", "Godveerdegem", "", "", "")
pg.city("9620", "Grotenberge", "", "", "")
pg.city("9620", "Leeuwergem", "", "", "")
pg.city("9620", "Oombergen (Zottegem)", "", "", "")
pg.city("9620", "Sint-Goriks-Oudenhove", "", "", "")
pg.city("9620", "St-Maria-Oudenhove(Zott.)", "", "", "")
pg.city("9620", "Strijpen", "", "", "")
pg.city("9620", "Velzeke-Ruddershove", "", "", "")
pg.city("9620", "Zottegem", "", "", "")
pg.city("9630", "Beerlegem", "", "", "")
pg.city("9630", "Dikkele", "", "", "")
pg.city("9630", "Hundelgem", "", "", "")
pg.city("9630", "Meilegem", "", "", "")
pg.city("9630", "Munkzwalm", "", "", "")
pg.city("9630", "Paulatem", "", "", "")
pg.city("9630", "Roborst", "", "", "")
pg.city("9630", "Rozebeke", "", "", "")
pg.city("9630", "Sint-Blasius-Boekel", "", "", "")
pg.city("9630", "Sint-Denijs-Boekel", "", "", "")
pg.city("9630", "Sint-Maria-Latem", "", "", "")
pg.city("9630", "Zwalm", "", "", "")
pg.city("9636", "Nederzwalm-Hermelgem", "", "", "")
pg.city("9660", "Brakel", "", "", "")
pg.city("9660", "Elst", "", "", "")
pg.city("9660", "Everbeek", "", "", "")
pg.city("9660", "Michelbeke", "", "", "")
pg.city("9660", "Nederbrakel", "", "", "")
pg.city("9660", "Opbrakel", "", "", "")
pg.city("9660", "St-Maria-Oudenhove(Brakel)", "", "", "")
pg.city("9660", "Zegelsem", "", "", "")
pg.city("9661", "Parike", "", "", "")
pg.city("9667", "Horebeke", "", "", "")
pg.city("9667", "Sint-Kornelis-Horebeke", "", "", "")
pg.city("9667", "Sint-Maria-Horebeke", "", "", "")
pg.city("9680", "Etikhove", "", "", "")
pg.city("9680", "Maarkedal", "", "", "")
pg.city("9680", "Maarke-Kerkem", "", "", "")
pg.city("9681", "Nukerke", "", "", "")
pg.city("9688", "Schorisse", "", "", "")
pg.city("9690", "Berchem (O.-Vl.)", "", "", "")
pg.city("9690", "Kluisbergen", "", "", "")
pg.city("9690", "Kwaremont", "", "", "")
pg.city("9690", "Ruien", "", "", "")
pg.city("9690", "Zulzeke", "", "", "")
pg.city("9700", "Bevere", "", "", "")
pg.city("9700", "Edelare", "", "", "")
pg.city("9700", "Eine", "", "", "")
pg.city("9700", "Ename", "", "", "")
pg.city("9700", "Heurne", "", "", "")
pg.city("9700", "Leupegem", "", "", "")
pg.city("9700", "Mater", "", "", "")
pg.city("9700", "Melden", "", "", "")
pg.city("9700", "Mullem", "", "", "")
pg.city("9700", "Nederename", "", "", "")
pg.city("9700", "Ooike (Oudenaarde)", "", "", "")
pg.city("9700", "Oudenaarde", "", "", "")
pg.city("9700", "Volkegem", "", "", "")
pg.city("9700", "Welden", "", "", "")
pg.city("9750", "Huise", "", "", "")
pg.city("9750", "Ouwegem", "", "", "")
pg.city("9750", "Zingem", "", "", "")
pg.city("9770", "Kruishoutem", "", "", "")
pg.city("9771", "Nokere", "", "", "")
pg.city("9772", "Wannegem-Lede", "", "", "")
pg.city("9790", "Elsegem", "", "", "")
pg.city("9790", "Moregem", "", "", "")
pg.city("9790", "Ooike (Wortegem-Petegem)", "", "", "")
pg.city("9790", "Petegem-aan-de-Schelde", "", "", "")
pg.city("9790", "Wortegem", "", "", "")
pg.city("9790", "Wortegem-Petegem", "", "", "")
pg.city("9800", "Astene", "", "", "")
pg.city("9800", "Bachte-Maria-Leerne", "", "", "")
pg.city("9800", "Deinze", "", "", "")
pg.city("9800", "Gottem", "", "", "")
pg.city("9800", "Grammene", "", "", "")
pg.city("9800", "Meigem", "", "", "")
pg.city("9800", "Petegem-aan-de-Leie", "", "", "")
pg.city("9800", "Sint-Martens-Leerne", "", "", "")
pg.city("9800", "Vinkt", "", "", "")
pg.city("9800", "Wontergem", "", "", "")
pg.city("9800", "Zeveren", "", "", "")
pg.city("9810", "Eke", "", "", "")
pg.city("9810", "Nazareth", "", "", "")
pg.city("9820", "Bottelare", "", "", "")
pg.city("9820", "Lemberge", "", "", "")
pg.city("9820", "Melsen", "", "", "")
pg.city("9820", "Merelbeke", "", "", "")
pg.city("9820", "Munte", "", "", "")
pg.city("9820", "Schelderode", "", "", "")
pg.city("9830", "Laethem-Saint-Martin",
"Sint-Martens-Latem", "Sint-Martens-Latem")
pg.city("9831", "Deurle", "", "", "")
pg.city("9840", "De Pinte", "", "", "")
pg.city("9840", "Zevergem", "", "", "")
pg.city("9850", "Hansbeke", "", "", "")
pg.city("9850", "Landegem", "", "", "")
pg.city("9850", "Merendree", "", "", "")
pg.city("9850", "Nevele", "", "", "")
pg.city("9850", "Poesele", "", "", "")
pg.city("9850", "Vosselare", "", "", "")
pg.city("9860", "Balegem", "", "", "")
pg.city("9860", "Gijzenzele", "", "", "")
pg.city("9860", "Landskouter", "", "", "")
pg.city("9860", "Moortsele", "", "", "")
pg.city("9860", "Oosterzele", "", "", "")
pg.city("9860", "Scheldewindeke", "", "", "")
pg.city("9870", "Machelen (O.-Vl.)", "", "", "")
pg.city("9870", "Olsene", "", "", "")
pg.city("9870", "Zulte", "", "", "")
pg.city("9880", "Aalter", "", "", "")
pg.city("9880", "Lotenhulle", "", "", "")
pg.city("9880", "Poeke", "", "", "")
pg.city("9881", "Bellem", "", "", "")
pg.city("9890", "Asper", "", "", "")
pg.city("9890", "Baaigem", "", "", "")
pg.city("9890", "Dikkelvenne", "", "", "")
pg.city("9890", "Gavere", "", "", "")
pg.city("9890", "Semmerzake", "", "", "")
pg.city("9890", "Vurste", "", "", "")
pg.city("9900", "Eeklo", "", "", "")
pg.city("9910", "Knesselare", "", "", "")
pg.city("9910", "Ursel", "", "", "")
pg.city("9920", "Lovendegem", "", "", "")
pg.city("9921", "Vinderhoute", "", "", "")
pg.city("9930", "Zomergem", "", "", "")
pg.city("9931", "Oostwinkel", "", "", "")
pg.city("9932", "Ronsele", "", "", "")
pg.city("9940", "Ertvelde", "", "", "")
pg.city("9940", "Evergem", "", "", "")
pg.city("9940", "Kluizen", "", "", "")
pg.city("9940", "Sleidinge", "", "", "")
pg.city("9950", "Waarschoot", "", "", "")
pg.city("9960", "Assenede", "", "", "")
pg.city("9961", "Boekhoute", "", "", "")
pg.city("9968", "Bassevelde", "", "", "")
pg.city("9968", "Oosteeklo", "", "", "")
pg.city("9970", "Kaprijke", "", "", "")
pg.city("9971", "Lembeke", "", "", "")
pg.city("9980", "Sint-Laureins", "", "", "")
pg.city("9981", "Sint-Margriete", "", "", "")
pg.city("9982", "Sint-Jan-in-Eremo", "", "", "")
pg.city("9988", "Waterland-Oudeman", "", "", "")
pg.city("9988", "Watervliet", "", "", "")
pg.city("9990", "Maldegem", "", "", "")
pg.city("9991", "Adegem", "", "", "")
pg.city("9992", "Middelburg", "", "", "")
| [
"jmillerconcom@gmail.com"
] | jmillerconcom@gmail.com |
6d5e2b8faed3400b9be9ec71c62c78e65f70c8c5 | 9b36652dafb58888b7a584806ee69a33fcb609d5 | /objutils/tests/testTek.py | d26b7c1df514a10fceac180155a4ecf70ca605da | [] | no_license | pySART/objutils | db33e4576cf68111cb4debbafec06a0204844938 | 5ba4631b2245caae80d4dbe0053db0f2706ba53f | refs/heads/master | 2020-06-29T03:35:24.485977 | 2016-11-21T14:21:56 | 2016-11-21T14:21:56 | 74,451,500 | 5 | 2 | null | 2016-11-22T08:36:10 | 2016-11-22T08:36:10 | null | UTF-8 | Python | false | false | 961 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from objutils import loads, dumps
from objutils.section import Section
from objutils.image import Image, Builder
import unittest
TEK = b"""/B000100C576F77212044696420796F7520726561A5
/B010100D6C6C7920676F207468726F7567682061C1
/B020100E6C6C20746861742074726F75626C6520AF
/B0300D1B746F207265616420746869733F8D
/B03D001B"""
S19 = b"""S113B000576F77212044696420796F7520726561D8
S113B0106C6C7920676F207468726F756768206143
S113B0206C6C20746861742074726F75626C652036
S110B030746F207265616420746869733F59
S5030004F8"""
class TestRoundtrip(unittest.TestCase):
def testLoadsWorks(self):
data = loads("tek", TEK)
#data.hexdump()
#print(dumps("srec", data))
self.assertEqual(dumps("srec", data, s5record = True), S19)
def testDumpsWorks(self):
data = loads("srec", S19)
self.assertEqual(dumps("tek", data), TEK)
if __name__ == '__main__':
unittest.main()
| [
"cpu12.gems@googlemail.com"
] | cpu12.gems@googlemail.com |
10b2e78bdd20211096522dfd8c9647defebbde56 | 78cb6dadc7599e01b078682b175f21be673ed199 | /438. Find All Anagrams in a String.py | 6cc8986e066d54440059175d3e147ddeb642285b | [] | no_license | AlexWufan/leetcode-python | 5cf5f13dbc7d1e425fde646df618e50c488fa79f | 435323a9fcea6a4d09266785e88fb78735e0cc3e | refs/heads/master | 2021-01-13T00:49:49.870468 | 2018-04-13T18:44:19 | 2018-04-13T18:44:19 | 51,347,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 892 | py | class Solution(object):
def findAnagrams(self, s, p):
"""
:type s: str
:type p: str
:rtype: List[int]
"""
output = []
d= {}
pdic = {}
if len(s) < len(p): return output
window = s[0:len(p)]
window = list(window)
for x in window:
d[x] = d.get(x, 0) + 1
for x in p:
pdic[x] = pdic.get(x, 0) + 1
if d == pdic: output.append(0)
for i in range(len(p),len(s)):
d[window[0]] -= 1
if d[window[0]] == 0:
del d[window[0]]
del window[0]
window.append(s[i])
d[window[-1]] = d.get(window[-1], 0) + 1
if d == pdic:
output.append(i-len(p)+1)
return output
if __name__=='__main__':
asolution = Solution()
print(asolution.findAnagrams("cbaebabacd", "abc")) | [
"mengnanszw@gmail.com"
] | mengnanszw@gmail.com |
13e08c57c264da4490caff5851d599862f2ad5e9 | 665d16b4042b7e3e632c9fdf227f0c875f5ec4a1 | /venv/bin/easy_install-2.7 | cb2c773053201facbdee101928d9870f82d1a1f3 | [] | no_license | mutaihillary/raspberry-pi | f6a3a77d88cfd2e744ca555d0145a6193e90d887 | 07bfc3e7322ef808fa8fa85cd288fc39bed4a4bb | refs/heads/master | 2020-12-31T07:18:56.200274 | 2016-09-16T15:09:17 | 2016-09-16T15:09:17 | 67,683,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | 7 | #!/home/kipkoech/Repos/raspberry-pi/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"mutaihillary@yahoo.com"
] | mutaihillary@yahoo.com |
6331faf2779685d448c3fd00e25cd3fe87609f67 | 2173909e5a0a87d72f86f2805e602c1d73e07568 | /w3af-repo/w3af/core/controllers/misc/number_generator.py | ed8d1c1c56f485f8ad42374fc8ff73a1121aadf2 | [] | no_license | ZenSecurity/w3af-module | 78d603ed076f879b8bd280c0bf3382d153aaacec | 13967bffaa211fe7f793204796802f1a5967f1d7 | refs/heads/master | 2021-01-15T13:48:24.183830 | 2016-08-05T13:09:49 | 2016-08-05T13:09:49 | 40,010,219 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,518 | py | """
number_generator.py
Copyright 2009 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from threading import Lock
class number_generator(object):
"""
The simplest class that returns a sequence of consecutive numbers.
This is used for assigning IDs to HTTP request and responses.
"""
def __init__(self):
"""
Start the counter and be thread safe.
"""
self._lock = Lock()
self._id = 0
def inc(self):
"""
:return: The next number.
"""
with self._lock:
self._id += 1
return self._id
def get(self):
"""
:return: The current number
"""
return self._id
def reset(self):
"""
Reset internal counter to 0.
"""
with self._lock:
self._id = 0
consecutive_number_generator = number_generator()
| [
"andres.riancho@gmail.com"
] | andres.riancho@gmail.com |
33e9c5a127191634c357502c02ba4bb43a209411 | e4616ae545872442f24b35e46d76d351edab22b0 | /test/sql/test_select.py | 4c00cb53c790e5d8c3c31a9b4e420e04e1325ebc | [
"MIT"
] | permissive | StefanosChaliasos/sqlalchemy | 0915d5fb66420eaf5dbb3468ed4a2c283f8802c0 | 8c228be322023041b11691d93dafa1be090f01a0 | refs/heads/master | 2022-12-05T02:43:43.684766 | 2020-08-26T13:12:24 | 2020-08-26T13:12:24 | 290,499,121 | 0 | 1 | MIT | 2020-08-26T13:08:17 | 2020-08-26T13:08:17 | null | UTF-8 | Python | false | false | 7,070 | py | from sqlalchemy import Column
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import tuple_
from sqlalchemy.sql import column
from sqlalchemy.sql import table
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import fixtures
table1 = table(
"mytable",
column("myid", Integer),
column("name", String),
column("description", String),
)
table2 = table(
"myothertable", column("otherid", Integer), column("othername", String)
)
metadata = MetaData()
parent = Table(
"parent",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
)
child = Table(
"child",
metadata,
Column("id", Integer, primary_key=True),
Column("parent_id", ForeignKey("parent.id")),
Column("data", String(50)),
)
class FutureSelectTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_legacy_calling_style_kw_only(self):
stmt = select(
whereclause=table1.c.myid == table2.c.otherid
).add_columns(table1.c.myid)
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable, myothertable "
"WHERE mytable.myid = myothertable.otherid",
)
def test_legacy_calling_style_col_seq_only(self):
stmt = select([table1.c.myid]).where(table1.c.myid == table2.c.otherid)
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable, myothertable "
"WHERE mytable.myid = myothertable.otherid",
)
def test_new_calling_style(self):
stmt = select(table1.c.myid).where(table1.c.myid == table2.c.otherid)
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable, myothertable "
"WHERE mytable.myid = myothertable.otherid",
)
def test_kw_triggers_old_style(self):
assert_raises_message(
exc.ArgumentError,
r"select\(\) construct created in legacy mode, "
"i.e. with keyword arguments",
select,
table1.c.myid,
whereclause=table1.c.myid == table2.c.otherid,
)
def test_join_nofrom_implicit_left_side_explicit_onclause(self):
stmt = select(table1).join(table2, table1.c.myid == table2.c.otherid)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable JOIN myothertable "
"ON mytable.myid = myothertable.otherid",
)
def test_join_nofrom_explicit_left_side_explicit_onclause(self):
stmt = select(table1).join_from(
table1, table2, table1.c.myid == table2.c.otherid
)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable JOIN myothertable "
"ON mytable.myid = myothertable.otherid",
)
def test_join_nofrom_implicit_left_side_implicit_onclause(self):
stmt = select(parent).join(child)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent JOIN child "
"ON parent.id = child.parent_id",
)
def test_join_nofrom_explicit_left_side_implicit_onclause(self):
stmt = select(parent).join_from(parent, child)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent JOIN child "
"ON parent.id = child.parent_id",
)
def test_join_froms_implicit_left_side_explicit_onclause(self):
stmt = (
select(table1)
.select_from(table1)
.join(table2, table1.c.myid == table2.c.otherid)
)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable JOIN myothertable "
"ON mytable.myid = myothertable.otherid",
)
def test_join_froms_explicit_left_side_explicit_onclause(self):
stmt = (
select(table1)
.select_from(table1)
.join_from(table1, table2, table1.c.myid == table2.c.otherid)
)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable JOIN myothertable "
"ON mytable.myid = myothertable.otherid",
)
def test_join_froms_implicit_left_side_implicit_onclause(self):
stmt = select(parent).select_from(parent).join(child)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent JOIN child "
"ON parent.id = child.parent_id",
)
def test_join_froms_explicit_left_side_implicit_onclause(self):
stmt = select(parent).select_from(parent).join_from(parent, child)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent JOIN child "
"ON parent.id = child.parent_id",
)
def test_joins_w_filter_by(self):
stmt = (
select(parent)
.filter_by(data="p1")
.join(child)
.filter_by(data="c1")
.join_from(table1, table2, table1.c.myid == table2.c.otherid)
.filter_by(otherid=5)
)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent JOIN child "
"ON parent.id = child.parent_id, mytable JOIN myothertable "
"ON mytable.myid = myothertable.otherid "
"WHERE parent.data = :data_1 AND child.data = :data_2 "
"AND myothertable.otherid = :otherid_1",
checkparams={"data_1": "p1", "data_2": "c1", "otherid_1": 5},
)
def test_filter_by_no_property(self):
assert_raises_message(
exc.InvalidRequestError,
'Entity namespace for "mytable" has no property "foo"',
select(table1).filter_by,
foo="bar",
)
def test_select_tuple_outer(self):
stmt = select(tuple_(table1.c.myid, table1.c.name))
assert_raises_message(
exc.CompileError,
r"Most backends don't support SELECTing from a tuple\(\) object. "
"If this is an ORM query, consider using the Bundle object.",
stmt.compile,
)
def test_select_tuple_subquery(self):
subq = select(
table1.c.name, tuple_(table1.c.myid, table1.c.name)
).subquery()
stmt = select(subq.c.name)
# if we aren't fetching it, then render it
self.assert_compile(
stmt,
"SELECT anon_1.name FROM (SELECT mytable.name AS name, "
"(mytable.myid, mytable.name) AS anon_2 FROM mytable) AS anon_1",
)
| [
"mike_mp@zzzcomputing.com"
] | mike_mp@zzzcomputing.com |
9509c638c0f00031012f7d8b3195c967ca88f329 | 2a2ce1246252ef6f59e84dfea3888c5a98503eb8 | /examples/introduction.to.programming.with.turtle/for_all/3-4-4.flower.py | 917b9cc24fd5d2ca8f82570037dad0dd7ee82e1b | [
"BSD-3-Clause"
] | permissive | royqh1979/PyEasyGraphics | c7f57c1fb5a829287e9c462418998dcc0463a772 | 842121e461be3273f845866cf1aa40c312112af3 | refs/heads/master | 2021-06-11T10:34:03.001842 | 2021-04-04T10:47:52 | 2021-04-04T10:47:52 | 161,438,503 | 8 | 4 | BSD-3-Clause | 2021-04-04T10:47:53 | 2018-12-12T05:43:31 | Python | UTF-8 | Python | false | false | 337 | py | from easygraphics.turtle import *
def main():
create_world(800, 600)
set_speed(400)
for i in range(6):
for j in range(60):
fd(3)
rt(1)
rt(120)
for j in range(60):
fd(3)
rt(1)
rt(120)
rt(60)
pause()
close_world()
easy_run(main) | [
"royqh1979@gmail.com"
] | royqh1979@gmail.com |
4f676bf12515b70baea4496e48da040e19db6938 | 0dca74ba205f42b38c1d1a474350e57ff78352b4 | /Geometry/HGCalGeometry/test/python/testHGCalNeighbor_cfg.py | 2fa941f4241557585e83e0bb0e699de4e83df2f8 | [
"Apache-2.0"
] | permissive | jaimeleonh/cmssw | 7fd567997a244934d6c78e9087cb2843330ebe09 | b26fdc373052d67c64a1b5635399ec14525f66e8 | refs/heads/AM_106X_dev | 2023-04-06T14:42:57.263616 | 2019-08-09T09:08:29 | 2019-08-09T09:08:29 | 181,003,620 | 1 | 0 | Apache-2.0 | 2019-04-12T12:28:16 | 2019-04-12T12:28:15 | null | UTF-8 | Python | false | false | 1,732 | py | import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process("PROD",eras.Phase2C4)
process.load("SimGeneral.HepPDTESSource.pdt_cfi")
process.load("Configuration.Geometry.GeometryExtended2023D28Reco_cff")
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('TrackingTools.TrackAssociator.DetIdAssociatorESProducer_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Geometry.HGCalGeometry.hgcalTestNeighbor_cfi')
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
from Configuration.AlCa.autoCond import autoCond
process.GlobalTag.globaltag = autoCond['phase2_realistic']
if hasattr(process,'MessageLogger'):
process.MessageLogger.categories.append('HGCalGeom')
process.load("IOMC.RandomEngine.IOMC_cff")
process.RandomNumberGeneratorService.generator.initialSeed = 456789
process.source = cms.Source("EmptySource")
process.generator = cms.EDProducer("FlatRandomEGunProducer",
PGunParameters = cms.PSet(
PartID = cms.vint32(14),
MinEta = cms.double(-3.5),
MaxEta = cms.double(3.5),
MinPhi = cms.double(-3.14159265359),
MaxPhi = cms.double(3.14159265359),
MinE = cms.double(9.99),
MaxE = cms.double(10.01)
),
AddAntiParticle = cms.bool(False),
Verbosity = cms.untracked.int32(0),
firstRun = cms.untracked.uint32(1)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
#process.p1 = cms.Path(process.generator*process.hgcalEETestNeighbor)
process.p1 = cms.Path(process.generator*process.hgcalEETestNeighbor*process.hgcalHEFTestNeighbor*process.hgcalHEBTestNeighbor)
| [
"sunanda.banerjee@cern.ch"
] | sunanda.banerjee@cern.ch |
11ae87bb61dbbdaf88257bb33a9cffad4e4b2702 | a38180435ac5786185c0aa48891c0aed0ab9d72b | /S4/S4 Decompiler/decompyle3/parsers/reducecheck/not_or_check.py | 91f0417f69f9a07e915508dd197ca8319a38c9a4 | [
"CC-BY-4.0"
] | permissive | NeonOcean/Environment | e190b6b09dd5dbecba0a38c497c01f84c6f9dc7d | ca658cf66e8fd6866c22a4a0136d415705b36d26 | refs/heads/master | 2022-12-03T13:17:00.100440 | 2021-01-09T23:26:55 | 2021-01-09T23:26:55 | 178,096,522 | 1 | 1 | CC-BY-4.0 | 2022-11-22T20:24:59 | 2019-03-28T00:38:17 | Python | UTF-8 | Python | false | false | 1,598 | py | # Copyright (c) 2020 Rocky Bernstein
def not_or_check(
self, lhs: str, n: int, rule, ast, tokens: list, first: int, last: int
) -> bool:
# Note (exp1 and exp2) and (not exp1 or exp2) are close, especially in
# an control structure like an "if".
# "exp1 and exp2":
# exp1; POP_JUMP_IF_FALSE endif; exp2; POP_JUMP_IF_FALSE endif; then
#
# "not exp1 or exp2":
# exp1; POP_JUMP_IF_FALSE then; exp2 POP_JUMP_IF_FALSE endif; then
# The difference is whether the POP_JUMPs go to the same place or not.
expr_pjif = ast[0]
end_token = tokens[last-1]
if end_token.kind.startswith("POP_JUMP_IF_FALSE"):
while expr_pjif == "and_parts":
expr_pjif = expr_pjif[0]
pass
assert expr_pjif == "expr_pjif"
if expr_pjif[-1].attr != end_token.attr:
return True
# More "and" in a condition vs. "not or":
# Intuitively it has to do with where we go with the "and" or
# "not or". Right now if there are loop jumps involved
# we are saying this is "and", but this empirical and not on
# solid ground.
# If test jump is a backwards then, we have an "and", not a "not or".
first_offset = tokens[first].off2int()
if end_token.attr < first_offset:
return True
# Similarly if the test jump goes to another jump it is (probably?) an "and".
jump_target_inst_index = self.offset2inst_index[end_token.attr]
inst = self.insts[jump_target_inst_index-1]
return inst.is_jump()
pass
return False
| [
"40919586+NeonOcean@users.noreply.github.com"
] | 40919586+NeonOcean@users.noreply.github.com |
0350c795fa887e71cffc61e9518bb61ec12bd3d0 | 4b41a76c5c366ba2daa30843acea16609b8f5da7 | /2017/21/AoC17_21_1.py | 74d5bfb689ee820e1ec5834706e68b017c97dce6 | [] | no_license | grandfoosier/AdventOfCode | c4706cfefef61e80060cca89b0433636e42bf974 | a43fdd72fe4279196252f24a4894500a4e272a5d | refs/heads/master | 2020-06-11T12:36:48.699811 | 2019-01-14T23:44:44 | 2019-01-14T23:44:44 | 75,665,958 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,995 | py | class Art(object):
def __init__(self):
fname = "AoC17_21_1.txt"
self.rules = [line.rstrip("\n").split() for line in open(fname)]
self.lkup = [i[0] for i in self.rules]
self.grid = ['.#.','..#','###']
def _twfh(self, s):
return (s[1] + s[0] + '/' + # 01 -> 10
s[4] + s[3]) # 34 -> 43
def _twfv(self, s):
return (s[3] + s[4] + '/' + # 01 -> 34
s[0] + s[1]) # 34 -> 01
def _twr1(self, s):
return (s[3] + s[0] + '/' + # 01 -> 30
s[4] + s[1]) # 34 -> 41
def _twr2(self, s):
return (s[4] + s[3] + '/' + # 01 -> 43
s[1] + s[0]) # 34 -> 10
def _twr3(self, s):
return (s[1] + s[4] + '/' + # 01 -> 14
s[0] + s[3]) # 34 -> 03
def _twf1(self, s):
return (s[4] + s[1] + '/' + # 01 -> 41
s[3] + s[0]) # 34 -> 30
def _twf3(self, s):
return (s[0] + s[3] + '/' + # 01 -> 03
s[1] + s[4]) # 34 -> 14
def _thfh(self, s):
return (s[2] + s[1] + s[0] + '/' + # 012 210
s[6] + s[5] + s[4] + '/' + # 456 -> 654
s[10] + s[9] + s[8]) # 89A A98
def _thfv(self, s):
return (s[8] + s[9] + s[10] + '/' + # 012 89A
s[4] + s[5] + s[6] + '/' + # 456 -> 456
s[0] + s[1] + s[2]) # 89A 012
def _thr1(self, s):
return (s[8] + s[4] + s[0] + '/' + # 012 840
s[9] + s[5] + s[1] + '/' + # 456 -> 951
s[10] + s[6] + s[2]) # 89A A62
def _thr2(self, s):
return (s[10] + s[9] + s[8] + '/' + # 012 A98
s[6] + s[5] + s[4] + '/' + # 456 -> 654
s[2] + s[1] + s[0]) # 89A 210
def _thr3(self, s):
return (s[2] + s[6] + s[10] + '/' + # 012 26A
s[1] + s[5] + s[9] + '/' + # 456 -> 159
s[0] + s[4] + s[8]) # 89A 048
def _thf1(self, s):
return (s[10] + s[6] + s[2] + '/' + # 012 A62
s[9] + s[5] + s[1] + '/' + # 456 -> 951
s[8] + s[4] + s[0]) # 89A 840
def _thf3(self, s):
return (s[0] + s[4] + s[8] + '/' + # 012 048
s[1] + s[5] + s[9] + '/' + # 456 -> 159
s[2] + s[6] + s[10]) # 89A 26A
def _tw2th(self):
fmd = []
for i in range(len(self.grid)/2):
fmd.append([])
for j in range(len(self.grid)/2):
fmd[i].append(self.grid[2*i][2*j:2*j+2] + '/' +
self.grid[2*i+1][2*j:2*j+2])
new = []
for i in range(len(fmd)):
new.append([])
for j in fmd[i]:
if j in self.lkup:
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._twfh(j) in self.lkup:
j = self._twfh(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._twfv(j) in self.lkup:
j = self._twfv(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._twr1(j) in self.lkup:
j = self._twr1(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._twr2(j) in self.lkup:
j = self._twr2(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._twr3(j) in self.lkup:
j = self._twr3(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._twf1(j) in self.lkup:
j = self._twf1(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._twf3(j) in self.lkup:
j = self._twf3(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
else:
pause = raw_input("OOPS")
self.grid = []
for i in range(len(new)):
self.grid.extend(['','',''])
for j in range(len(new)):
self.grid[3*i+0] += new[i][j][0:3]
self.grid[3*i+1] += new[i][j][4:7]
self.grid[3*i+2] += new[i][j][8:11]
print ""
for i in self.grid: print i
def _th2fo(self):
fmd = []
for i in range(len(self.grid)/3):
fmd.append([])
for j in range(len(self.grid)/3):
fmd[i].append(self.grid[3*i][3*j:3*j+3] + '/' +
self.grid[3*i+1][3*j:3*j+3] + '/' +
self.grid[3*i+2][3*j:3*j+3])
new = []
for i in range(len(fmd)):
new.append([])
for j in fmd[i]:
if j in self.lkup:
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._thfh(j) in self.lkup:
j = self._thfh(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._thfv(j) in self.lkup:
j = self._thfv(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._thr1(j) in self.lkup:
j = self._thr1(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._thr2(j) in self.lkup:
j = self._thr2(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._thr3(j) in self.lkup:
j = self._thr3(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._thf1(j) in self.lkup:
j = self._thf1(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
elif self._thf3(j) in self.lkup:
j = self._thf3(j)
x = self.lkup.index(j)
print self.rules[x]
new[i].append(self.rules[x][2])
else:
pause = raw_input("OOPS")
self.grid = []
for i in range(len(new)):
self.grid.extend(['','','',''])
for j in range(len(new)):
self.grid[4*i+0] += new[i][j][0:4]
self.grid[4*i+1] += new[i][j][5:9]
self.grid[4*i+2] += new[i][j][10:14]
self.grid[4*i+3] += new[i][j][15:19]
print ""
for i in self.grid: print i
def increment(self, n):
for i in self.grid: print i
print ""
for i in range(n):
if len(self.grid) % 2 == 0: self._tw2th()
else: self._th2fo()
pause = raw_input("")
def count_on(self):
c = 0
for i in self.grid: c += i.count('#')
return c
A = Art()
print ""
A.increment(5)
print A.count_on()
print "\n"
| [
"noreply@github.com"
] | grandfoosier.noreply@github.com |
fad51eb5f3e5f98fc3c8c6f2df4c0bf604c80a66 | e0ef688e339e6f4a68382d821d159185e4297628 | /rhodopsin/experiment_base.py | bcbc827fdccc8fee442bc7d768722f86188783e1 | [
"MIT"
] | permissive | djpetti/rhodopsin | bc11befcc5e90e29705d74ab59e1405586df998b | 97bdb9a6ba3c29b1fe1dd1e60b0b41e5a247ccf1 | refs/heads/master | 2021-07-01T08:00:21.414567 | 2019-05-18T21:39:36 | 2019-05-18T21:39:36 | 147,679,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,528 | py | import abc
import os
import signal
from . import menu
from . import params
class ExperimentBase(abc.ABC):
""" Base class for experiments that defines the API. """
def __init__(self, save_file="experiment.rhp", hyperparams=None,
status=None):
"""
Args:
save_file: File in which to save the model data.
hyperparams: Optional custom hyperparameters to use.
status: Optional custom status parameters to use. """
self.__save_file = save_file
# Create hyperparameters.
self.__params = hyperparams
if self.__params is None:
self.__params = params.HyperParams()
# Create status parameters.
self.__status = status
if self.__status is None:
self.__status = params.Status()
# Add default status parameters.
self.__status.add_if_not_set("iterations", 0)
# Register the signal handler.
signal.signal(signal.SIGINT, self._handle_signal)
# Create the menu tree.
self.__menus = menu.MenuTree()
main_menu = menu.MainMenu(self.__params, self.__status)
adjust_menu = menu.AdjustMenu(self.__params, self.__status)
status_menu = menu.StatusMenu(self.__params, self.__status)
self.__menus.add_menu(main_menu)
self.__menus.add_menu(adjust_menu)
self.__menus.add_menu(status_menu)
# Run custom initialization code.
self._init_experiment()
# Check for an existing model.
if self._model_exists(self.__save_file):
load_menu = menu.LoadModelMenu(self.__params, self.__status,
self.__save_file)
load_menu.show()
# Check what was selected.
if load_menu.should_load():
# Load the model.
self._load_model(self.__save_file)
@abc.abstractmethod
def _handle_signal(self, signum, frame):
""" Handles the user hitting Ctrl+C. This is supposed to bring up the
menu.
Args:
signum: The signal number that triggered this.
frame: Current stack frame. """
pass
def _show_main_menu(self):
""" Show the main menu. """
self.__menus.show("main")
def _checkpoint(self):
"""
Saves the model at this point.
"""
self._save_model(self.__save_file)
def _init_experiment(self):
""" Runs any custom initialization code for the experiment. This will be
run right after we've configured parameters and hyperparameters, and
before we've attempted to load the model. By default, it does nothing.
"""
pass
@abc.abstractmethod
def _run_training_step(self):
""" Runs a single training iteration. This is meant to be overidden by a
subclass. """
pass
@abc.abstractmethod
def _run_testing_step(self):
""" Runs a single testing iteration. This is meant to be overidden by a
subclass. """
pass
def _save_model(self, save_file):
""" Saves the model. By default, it does nothing. It should be
implemented by a subclass.
Args:
save_file: The path at which to save the model. """
pass
def _load_model(self, save_file):
""" Loads a model from disk. If _save_model() is used, this must be
implemented by a subclass. Note that this is not an abstract method,
because if save_model is not used, it need not be implemented either.
Args:
save_file: The path from which to load the model. """
raise NotImplementedError(
"_load_model() must be implemented by subclass.")
@classmethod
def _model_exists(cls, save_file):
""" Checks if a saved model exists. By default, it just checks if
save_path exists, but it can be overridden to allow for more
sophisticated functionality.
Args:
save_file: The possible path to the saved model. """
return os.path.exists(save_file)
@abc.abstractmethod
def train(self):
""" Runs the training procedure to completion. """
pass
def get_params(self):
"""
Returns:
The hyperparameters being used for this experiment. """
return self.__params
def get_status(self):
"""
Returns:
The status parameters being used for this experiment. """
return self.__status
| [
"djpetti@gmail.com"
] | djpetti@gmail.com |
507c7a2c804bb7e49d3d43b11b73884c2d80ed71 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/network/v20180401/traffic_manager_user_metrics_key.py | 6a8b7be397ddbe4a0f2f1fb0d12eca8f2972edca | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 5,274 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['TrafficManagerUserMetricsKeyArgs', 'TrafficManagerUserMetricsKey']
@pulumi.input_type
class TrafficManagerUserMetricsKeyArgs:
def __init__(__self__):
"""
The set of arguments for constructing a TrafficManagerUserMetricsKey resource.
"""
pass
class TrafficManagerUserMetricsKey(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
__props__=None):
"""
Class representing Traffic Manager User Metrics.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[TrafficManagerUserMetricsKeyArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Class representing Traffic Manager User Metrics.
:param str resource_name: The name of the resource.
:param TrafficManagerUserMetricsKeyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TrafficManagerUserMetricsKeyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TrafficManagerUserMetricsKeyArgs.__new__(TrafficManagerUserMetricsKeyArgs)
__props__.__dict__["key"] = None
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20180401:TrafficManagerUserMetricsKey"), pulumi.Alias(type_="azure-native:network:TrafficManagerUserMetricsKey"), pulumi.Alias(type_="azure-nextgen:network:TrafficManagerUserMetricsKey"), pulumi.Alias(type_="azure-native:network/v20180801:TrafficManagerUserMetricsKey"), pulumi.Alias(type_="azure-nextgen:network/v20180801:TrafficManagerUserMetricsKey")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(TrafficManagerUserMetricsKey, __self__).__init__(
'azure-native:network/v20180401:TrafficManagerUserMetricsKey',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'TrafficManagerUserMetricsKey':
"""
Get an existing TrafficManagerUserMetricsKey resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = TrafficManagerUserMetricsKeyArgs.__new__(TrafficManagerUserMetricsKeyArgs)
__props__.__dict__["key"] = None
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
return TrafficManagerUserMetricsKey(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def key(self) -> pulumi.Output[Optional[str]]:
"""
The key returned by the User Metrics operation.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[Optional[str]]:
"""
The type of the resource. Ex- Microsoft.Network/trafficManagerProfiles.
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | morrell.noreply@github.com |
ae4858ddf9b7ff0bb77dffc2a48b39cb7643782b | 0b01cb61a4ae4ae236a354cbfa23064e9057e434 | /alipay/aop/api/domain/KoubeiQualityTestCloudacptItemQueryModel.py | bbc02ad96b03b678a8fa106974b1af53977ddacb | [
"Apache-2.0"
] | permissive | hipacloud/alipay-sdk-python-all | e4aec2869bf1ea6f7c6fb97ac7cc724be44ecd13 | bdbffbc6d5c7a0a3dd9db69c99443f98aecf907d | refs/heads/master | 2022-11-14T11:12:24.441822 | 2020-07-14T03:12:15 | 2020-07-14T03:12:15 | 277,970,730 | 0 | 0 | Apache-2.0 | 2020-07-08T02:33:15 | 2020-07-08T02:33:14 | null | UTF-8 | Python | false | false | 2,209 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiQualityTestCloudacptItemQueryModel(object):
def __init__(self):
self._activity_id = None
self._batch_id = None
self._pid = None
self._uid = None
@property
def activity_id(self):
return self._activity_id
@activity_id.setter
def activity_id(self, value):
self._activity_id = value
@property
def batch_id(self):
return self._batch_id
@batch_id.setter
def batch_id(self, value):
self._batch_id = value
@property
def pid(self):
return self._pid
@pid.setter
def pid(self, value):
self._pid = value
@property
def uid(self):
return self._uid
@uid.setter
def uid(self, value):
self._uid = value
def to_alipay_dict(self):
params = dict()
if self.activity_id:
if hasattr(self.activity_id, 'to_alipay_dict'):
params['activity_id'] = self.activity_id.to_alipay_dict()
else:
params['activity_id'] = self.activity_id
if self.batch_id:
if hasattr(self.batch_id, 'to_alipay_dict'):
params['batch_id'] = self.batch_id.to_alipay_dict()
else:
params['batch_id'] = self.batch_id
if self.pid:
if hasattr(self.pid, 'to_alipay_dict'):
params['pid'] = self.pid.to_alipay_dict()
else:
params['pid'] = self.pid
if self.uid:
if hasattr(self.uid, 'to_alipay_dict'):
params['uid'] = self.uid.to_alipay_dict()
else:
params['uid'] = self.uid
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiQualityTestCloudacptItemQueryModel()
if 'activity_id' in d:
o.activity_id = d['activity_id']
if 'batch_id' in d:
o.batch_id = d['batch_id']
if 'pid' in d:
o.pid = d['pid']
if 'uid' in d:
o.uid = d['uid']
return o
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
2aa1abfe9c766dd05c3b82d22bb3d5e20a3d7ec2 | 039c2e60b859d88bb686c0e66bc6dab2ab723b8e | /apps/door_limits/migrations/0003_auto_20191024_1554.py | ec0156b7c63c8b53f585a6b765b18fe2dccfcca7 | [] | no_license | ccc-0/ECS | 850613971e4c6fd9cbb6ddcbe2c51b5285d622ac | ef4d69cb4c6fd1b1bbd40ba9c754c8e50c56d8ee | refs/heads/master | 2020-09-13T21:50:42.033517 | 2020-02-13T03:47:10 | 2020-02-13T03:47:10 | 222,913,137 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | # Generated by Django 2.2.6 on 2019-10-24 15:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('door_limits', '0002_door_approval_user_id'),
]
operations = [
migrations.AlterField(
model_name='door_approval',
name='door_audittime',
field=models.DateTimeField(null=True, verbose_name='审批时间'),
),
]
| [
"1056179315@qq.com"
] | 1056179315@qq.com |
e287619a2a0981b72f43ac0898537ce82c00e73f | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /BDcaZaqCuBCczeKZL_3.py | 8357b7a3a4aac0e77ac9886cc061630253f03962 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py |
def arrow(num):
a = [">" * i for i in range(1, num + 1)]
return a + a[-1 - num % 2::-1]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
394692ee3fefc7b275c194eaa0d52e5d4fbe8196 | a35481b94be5a95a6cb3edc69e23180884a40e5c | /docs/source/conf.py | 348a175a050c6dc0c399465212ec1d860f815905 | [] | no_license | damian-codematic/python3-boilerplate | 1b9452662878f1b1039a4993268b2753b65cff10 | b502f6b65dfd2576ef1830ca7b5061d3d9205e1a | refs/heads/master | 2022-11-05T08:22:13.597150 | 2018-05-02T07:47:11 | 2018-05-02T07:47:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,282 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# package_name documentation build configuration file, created by
# sphinx-quickstart on Wed May 2 15:02:32 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.githubpages',
'numpydoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'package_name'
copyright = '2018, Seung Jae (Ryan) Lee'
author = 'Seung Jae (Ryan) Lee'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'package_namedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'package_name.tex', 'package_name Documentation',
'Seung Jae (Ryan) Lee', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'package_name', 'package_name Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'package_name', 'package_name Documentation',
author, 'package_name', 'One line description of project.',
'Miscellaneous'),
]
| [
"seungjaeryanlee@gmail.com"
] | seungjaeryanlee@gmail.com |
99ddc35394b2c713a9f3db7196b74ee57aae18a7 | 8dc64db8a0d7ddb8778c8eae2dac9075b9a90e2b | /env/Lib/site-packages/google/protobuf/__init__.py | 22a6072ffd23516dc785ef07d521ab6ec4a616e9 | [
"MIT"
] | permissive | theXtroyer1221/Cloud-buffer | c3992d1b543a1f11fde180f6f7d988d28b8f9684 | 37eabdd78c15172ea980b59d1aff65d8628cb845 | refs/heads/master | 2022-11-22T22:37:10.453923 | 2022-02-25T01:15:57 | 2022-02-25T01:15:57 | 240,901,269 | 1 | 1 | MIT | 2022-09-04T14:48:02 | 2020-02-16T14:00:32 | HTML | UTF-8 | Python | false | false | 1,705 | py | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Copyright 2007 Google Inc. All Rights Reserved.
__version__ = '3.15.1'
| [
"jaddou2005@gmail.com"
] | jaddou2005@gmail.com |
99917f155da4e2422ceaad95199050b2dcce42ba | 2052a12f0ab7a827d6427b5533b6ae29847dcc3b | /auto_commit.py | 62ed14d9722982e710ac10eb7f581bae2986a7e1 | [
"MIT"
] | permissive | cleiveliu/leetcodecn | 7db7af4da18e62bd592afc9f81dfa4aab46adced | 618a4b63a9cd055f1782903e860e9a93dfd30fc9 | refs/heads/master | 2020-08-28T23:44:10.102451 | 2020-07-19T15:26:45 | 2020-07-19T15:26:45 | 217,856,119 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,636 | py | """
auto commit git and push to master with a git path and a optional commit message.
usage:
put this file in your git project dir and run it or \
script.py [-p|-pathname] filename [-m|-message] message
"""
import sys
import os
class Args:
def __init__(
self, pathname=os.path.dirname(__file__), commit_message="auto commit"
):
self.pathname = pathname
self.commit_message = commit_message
def __repr__(self):
return "Args(pathname={}, commit_message={})".format(
self.pathname, self.commit_message
)
def _exit():
print(__doc__)
sys.exit(1)
def perse_args():
args = sys.argv[1:]
args = list(map(lambda x: x.lower(), args))
theArgs = Args()
index = 0
if index < len(args):
if args[index] in ("-p", "-pathname"):
if index + 1 < len(args):
theArgs.pathname = args[index + 1]
index += 2
else:
_exit()
if index < len(args):
if args[index] in ("-m", "-message", "--m"):
if index + 1 < len(args):
theArgs.commit_massage = args[index + 1]
index += 2
else:
_exit()
else:
_exit()
if index < len(args):
_exit()
return theArgs
def execute(args: Args):
os.chdir(args.pathname)
os.system("git add .")
os.system('git commit -m "{}"'.format(args.commit_massage))
os.system("git push")
if __name__ == "__main__":
args = perse_args()
print(f"args:\n{args}")
execute(args)
| [
"cleiveliu1@gmail.com"
] | cleiveliu1@gmail.com |
7c7528acff2e58560608a00d39c207443eb6d648 | be37e5a350ef9cd4e1742c321cde206434593c27 | /test/integration/query_block.py | ecbfe177a112065b729795a188b1eb0c09390e2d | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | points-org/fabric-sdk-py | 9b8e763dc9e3e8e3710fb35fedd2e44ddc682126 | d689032772873027938599b8e54a676bf083be1f | refs/heads/master | 2021-07-01T15:13:11.688343 | 2020-09-08T11:28:05 | 2020-09-14T03:53:33 | 162,869,379 | 0 | 0 | Apache-2.0 | 2019-08-14T06:12:00 | 2018-12-23T06:58:31 | Python | UTF-8 | Python | false | false | 4,789 | py | # Copyright IBM ALL Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
import logging
from time import sleep
from hfc.fabric.peer import create_peer
from hfc.fabric.transaction.tx_context import create_tx_context
from hfc.fabric.transaction.tx_proposal_request import create_tx_prop_req, \
CC_INVOKE, CC_TYPE_GOLANG, CC_INSTANTIATE, CC_INSTALL, TXProposalRequest
from hfc.util.crypto.crypto import ecies
from hfc.util.utils import build_tx_req, send_transaction
from test.integration.utils import get_peer_org_user,\
BaseTestCase
from test.integration.config import E2E_CONFIG
from test.integration.e2e_utils import build_channel_request,\
build_join_channel_req
from queue import Queue
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
test_network = E2E_CONFIG['test-network']
CC_PATH = 'github.com/example_cc'
CC_NAME = 'example_cc'
CC_VERSION = '1.0'
class QueryBlockTest(BaseTestCase):
def invoke_chaincode(self):
self.channel = self.client.new_channel(self.channel_name)
org1 = "org1.example.com"
peer_config = test_network['org1.example.com']['peers']['peer0']
tls_cacerts = peer_config['tls_cacerts']
opts = (('grpc.ssl_target_name_override',
peer_config['server_hostname']),)
endpoint = peer_config['grpc_request_endpoint']
self.org1_peer = create_peer(endpoint=endpoint,
tls_cacerts=tls_cacerts,
opts=opts)
self.org1_admin = get_peer_org_user(org1,
"Admin",
self.client.state_store)
crypto = ecies()
tran_prop_req_install = create_tx_prop_req(
prop_type=CC_INSTALL,
cc_path=CC_PATH,
cc_type=CC_TYPE_GOLANG,
cc_name=CC_NAME,
cc_version=CC_VERSION)
tx_context_install = create_tx_context(
self.org1_admin,
crypto,
tran_prop_req_install)
args_dep = ['a', '200', 'b', '300']
tran_prop_req_dep = create_tx_prop_req(
prop_type=CC_INSTANTIATE,
cc_type=CC_TYPE_GOLANG,
cc_name=CC_NAME,
cc_version=CC_VERSION,
args=args_dep,
fcn='init')
tx_context_dep = create_tx_context(self.org1_admin,
crypto,
tran_prop_req_dep)
args = ['a', 'b', '100']
tran_prop_req = create_tx_prop_req(prop_type=CC_INVOKE,
cc_type=CC_TYPE_GOLANG,
cc_name=CC_NAME,
cc_version=CC_VERSION,
fcn='invoke',
args=args)
tx_context = create_tx_context(self.org1_admin, crypto, tran_prop_req)
request = build_channel_request(self.client,
self.channel_tx,
self.channel_name)
self.client._create_channel(request)
sleep(5)
join_req = build_join_channel_req(org1, self.channel, self.client)
self.channel.join_channel(join_req)
sleep(5)
self.client.send_install_proposal(tx_context_install, [self.org1_peer])
sleep(5)
res = self.channel.send_instantiate_proposal(tx_context_dep,
[self.org1_peer])
sleep(5)
tran_req = build_tx_req(res)
send_transaction(self.channel.orderers, tran_req, tx_context)
sleep(5)
tx_context_tx = create_tx_context(self.org1_admin,
crypto,
TXProposalRequest())
res = self.channel.send_tx_proposal(tx_context, [self.org1_peer])
tran_req = build_tx_req(res)
sleep(5)
send_transaction(self.channel.orderers, tran_req, tx_context_tx)
def test_query_block_success(self):
self.invoke_chaincode()
tx_context = create_tx_context(self.org1_admin,
ecies(),
TXProposalRequest())
response = self.channel.query_block(tx_context,
[self.org1_peer],
"1")
q = Queue(1)
response.subscribe(on_next=lambda x: q.put(x),
on_error=lambda x: q.put(x))
res = q.get(timeout=10)
logger.debug(res[0][0][0])
self.assertEqual(res[0][0][0].response.status, 200)
| [
"dixingxu@gmail.com"
] | dixingxu@gmail.com |
e882965dc976548eb945b75960f6b5fca4d2bc1f | 7769cb512623c8d3ba96c68556b2cea5547df5fd | /configs/carafe/faster_rcnn_r50_fpn_carafe_1x.py | 94c8a0fc1a56bc1a6601421de7e9a46df277b5d6 | [
"MIT"
] | permissive | JialeCao001/D2Det | 0e49f4c76e539d574e46b02f278242ca912c31ea | a76781ab624a1304f9c15679852a73b4b6770950 | refs/heads/master | 2022-12-05T01:00:08.498629 | 2020-09-04T11:33:26 | 2020-09-04T11:33:26 | 270,723,372 | 312 | 88 | MIT | 2020-07-08T23:53:23 | 2020-06-08T15:37:35 | Python | UTF-8 | Python | false | false | 5,751 | py | # model settings
model = dict(
type='FasterRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN_CARAFE',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
start_level=0,
end_level=-1,
norm_cfg=None,
activation=None,
order=('conv', 'norm', 'act'),
upsample_cfg=dict(
type='carafe',
up_kernel=5,
up_group=1,
encoder_kernel=3,
encoder_dilation=1,
compressed_channels=64)),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)
)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
evaluation = dict(interval=1)
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/faster_rcnn_r50_fpn_carafe_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| [
"connor@tju.edu.cn"
] | connor@tju.edu.cn |
051cdb1c37fae845be8313b348917477fe0c38b2 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/arc026/A/4781959.py | 05338ad4307124120e6d44e3642dc59bfc7ff9e1 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | def main():
n, a, b = map(int, input().split())
nb = min(n, 5)
na = n - nb
r = b * nb + a * na
print(r)
main() | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
a4b2b2d69ee2f1995a73f520d25aa4cd34320ae2 | 3db2fcd1a34ae7b22225029587369f49424457dd | /classifier_alignment/AnnotationLoader.py | ce50dc114c4bb171a9b8ded4eeaff6235048fe87 | [] | no_license | pombredanne/realigner | 7f0fdfdf42f757fead45cdeb5ea2901c4965e944 | b0c32cace20dd720c7609f009d86846d9ecb750f | refs/heads/master | 2021-01-18T03:57:30.977009 | 2014-05-06T09:35:46 | 2014-05-06T09:35:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,277 | py | import re
__author__ = 'michal'
from hmm.HMMLoader import HMMLoader
import track
from tools.intervalmap import intervalmap
from classifier_alignment.AnnotationConfig import register as register_annotations
import constants
class AnnotationLoader:
def __init__(self, sequence_regexp, loader=None):
if loader is None:
self.loader = HMMLoader()
register_annotations(self.loader)
self.x_regexp = sequence_regexp[0]
self.y_regexp = sequence_regexp[1]
@staticmethod
def get_annotation_at(annotations, i):
"""
Returns annotations at position i
@param annotations:
@param i:
"""
base_annotation = dict()
if annotations is not None:
for key in annotations:
base_annotation[key] = annotations[key][i]
return base_annotation
def _intervals_to_interval_map(self, intervals, offset):
"""
Converts intervals from track to intervalmap, for searching
currently supports binary annotations only
"""
m = intervalmap()
m[:] = 0
for i in intervals:
m[i[1]+offset:i[2]+offset] = 1
return m
def _get_annotation_from_bed(self, fname, offset):
"""
Reads intervals from BED file
"""
try:
with track.load(fname) as ann:
ann = ann.read(fields=['start', 'end'])
intervals = self._intervals_to_interval_map(ann, offset)
except Exception:
intervals = self._intervals_to_interval_map([], 0)
return intervals
def _get_sequence_annotations(
self,
annotations,
sequence_annotations_config
):
"""
Returns annotations for one sequence
"""
res = dict()
for annotation in annotations:
res[annotation] = self._get_annotation_from_bed(
*sequence_annotations_config[annotation]
)
return res
def _get_seq_name(self, names, regexp):
r = re.compile(regexp)
matches = [name for name in names if r.match(name)]
if len(matches) != 1:
raise RuntimeError(
'Cannot get name for regexp', regexp, '. Found', len(matches), 'matches.'
)
return matches[0]
def get_annotations_from_model(self, model):
if not constants.annotations_enabled:
return None, None, None
if model is None:
raise RuntimeError('No annotation model!')
names = model.sequences.keys()
x_name = self._get_seq_name(names, self.x_regexp)
y_name = self._get_seq_name(names, self.y_regexp)
annotations = model.annotations
# print 'Using annotations for x:', x_name
annotations_x = self._get_sequence_annotations(
annotations, model.sequences[x_name]
)
# print 'Using annotations for y:', y_name
annotations_y = self._get_sequence_annotations(
annotations, model.sequences[y_name]
)
return annotations, annotations_x, annotations_y
def get_annotations(self, fname):
model = self.loader.load(fname)
return self.get_annotations_from_model(model)
| [
"mhozza@gmail.com"
] | mhozza@gmail.com |
5debebbb9c744aa5a364da2377b51f878320f1b7 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-bssintl/huaweicloudsdkbssintl/v2/model/amount_infomation_v2.py | 32205b516525da648718474ca956d06fdb23c7ca | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 8,573 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class AmountInfomationV2:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'discounts': 'list[DiscountItemV2]',
'flexipurchase_coupon_amount': 'float',
'coupon_amount': 'float',
'stored_card_amount': 'float',
'commission_amount': 'float',
'consumed_amount': 'float'
}
attribute_map = {
'discounts': 'discounts',
'flexipurchase_coupon_amount': 'flexipurchase_coupon_amount',
'coupon_amount': 'coupon_amount',
'stored_card_amount': 'stored_card_amount',
'commission_amount': 'commission_amount',
'consumed_amount': 'consumed_amount'
}
def __init__(self, discounts=None, flexipurchase_coupon_amount=None, coupon_amount=None, stored_card_amount=None, commission_amount=None, consumed_amount=None):
"""AmountInfomationV2
The model defined in huaweicloud sdk
:param discounts: 费用项。 具体请参见表7。
:type discounts: list[:class:`huaweicloudsdkbssintl.v2.DiscountItemV2`]
:param flexipurchase_coupon_amount: 现金券金额,预留。
:type flexipurchase_coupon_amount: float
:param coupon_amount: 代金券金额。
:type coupon_amount: float
:param stored_card_amount: 储值卡金额,预留。
:type stored_card_amount: float
:param commission_amount: 手续费(仅退订订单存在)。
:type commission_amount: float
:param consumed_amount: 消费金额(仅退订订单存在)。
:type consumed_amount: float
"""
self._discounts = None
self._flexipurchase_coupon_amount = None
self._coupon_amount = None
self._stored_card_amount = None
self._commission_amount = None
self._consumed_amount = None
self.discriminator = None
if discounts is not None:
self.discounts = discounts
if flexipurchase_coupon_amount is not None:
self.flexipurchase_coupon_amount = flexipurchase_coupon_amount
if coupon_amount is not None:
self.coupon_amount = coupon_amount
if stored_card_amount is not None:
self.stored_card_amount = stored_card_amount
if commission_amount is not None:
self.commission_amount = commission_amount
if consumed_amount is not None:
self.consumed_amount = consumed_amount
@property
def discounts(self):
"""Gets the discounts of this AmountInfomationV2.
费用项。 具体请参见表7。
:return: The discounts of this AmountInfomationV2.
:rtype: list[:class:`huaweicloudsdkbssintl.v2.DiscountItemV2`]
"""
return self._discounts
@discounts.setter
def discounts(self, discounts):
"""Sets the discounts of this AmountInfomationV2.
费用项。 具体请参见表7。
:param discounts: The discounts of this AmountInfomationV2.
:type discounts: list[:class:`huaweicloudsdkbssintl.v2.DiscountItemV2`]
"""
self._discounts = discounts
@property
def flexipurchase_coupon_amount(self):
"""Gets the flexipurchase_coupon_amount of this AmountInfomationV2.
现金券金额,预留。
:return: The flexipurchase_coupon_amount of this AmountInfomationV2.
:rtype: float
"""
return self._flexipurchase_coupon_amount
@flexipurchase_coupon_amount.setter
def flexipurchase_coupon_amount(self, flexipurchase_coupon_amount):
"""Sets the flexipurchase_coupon_amount of this AmountInfomationV2.
现金券金额,预留。
:param flexipurchase_coupon_amount: The flexipurchase_coupon_amount of this AmountInfomationV2.
:type flexipurchase_coupon_amount: float
"""
self._flexipurchase_coupon_amount = flexipurchase_coupon_amount
@property
def coupon_amount(self):
"""Gets the coupon_amount of this AmountInfomationV2.
代金券金额。
:return: The coupon_amount of this AmountInfomationV2.
:rtype: float
"""
return self._coupon_amount
@coupon_amount.setter
def coupon_amount(self, coupon_amount):
"""Sets the coupon_amount of this AmountInfomationV2.
代金券金额。
:param coupon_amount: The coupon_amount of this AmountInfomationV2.
:type coupon_amount: float
"""
self._coupon_amount = coupon_amount
@property
def stored_card_amount(self):
"""Gets the stored_card_amount of this AmountInfomationV2.
储值卡金额,预留。
:return: The stored_card_amount of this AmountInfomationV2.
:rtype: float
"""
return self._stored_card_amount
@stored_card_amount.setter
def stored_card_amount(self, stored_card_amount):
"""Sets the stored_card_amount of this AmountInfomationV2.
储值卡金额,预留。
:param stored_card_amount: The stored_card_amount of this AmountInfomationV2.
:type stored_card_amount: float
"""
self._stored_card_amount = stored_card_amount
@property
def commission_amount(self):
"""Gets the commission_amount of this AmountInfomationV2.
手续费(仅退订订单存在)。
:return: The commission_amount of this AmountInfomationV2.
:rtype: float
"""
return self._commission_amount
@commission_amount.setter
def commission_amount(self, commission_amount):
"""Sets the commission_amount of this AmountInfomationV2.
手续费(仅退订订单存在)。
:param commission_amount: The commission_amount of this AmountInfomationV2.
:type commission_amount: float
"""
self._commission_amount = commission_amount
@property
def consumed_amount(self):
"""Gets the consumed_amount of this AmountInfomationV2.
消费金额(仅退订订单存在)。
:return: The consumed_amount of this AmountInfomationV2.
:rtype: float
"""
return self._consumed_amount
@consumed_amount.setter
def consumed_amount(self, consumed_amount):
"""Sets the consumed_amount of this AmountInfomationV2.
消费金额(仅退订订单存在)。
:param consumed_amount: The consumed_amount of this AmountInfomationV2.
:type consumed_amount: float
"""
self._consumed_amount = consumed_amount
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AmountInfomationV2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
dad547f66f869ac30955e1734c0fdf2097890e2c | bd2a3d466869e0f8cb72075db7daec6c09bbbda1 | /sdk/network/azure-mgmt-network/azure/mgmt/network/_operations_mixin.py | 264c78dbcd0d5ffb95a36599de146d425e3d3237 | [
"MIT"
] | permissive | samvaity/azure-sdk-for-python | 7e8dcb2d3602d81e04c95e28306d3e2e7d33b03d | f2b072688d3dc688fed3905c558cff1fa0849b91 | refs/heads/master | 2021-08-11T21:14:29.433269 | 2019-07-19T17:40:10 | 2019-07-19T17:40:10 | 179,733,339 | 0 | 1 | MIT | 2019-04-05T18:17:43 | 2019-04-05T18:17:42 | null | UTF-8 | Python | false | false | 7,825 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest import Serializer, Deserializer
class NetworkManagementClientOperationsMixin(object):
def check_dns_name_availability(self, location, domain_name_label, custom_headers=None, raw=False, **operation_config):
"""Checks whether a domain name in the cloudapp.azure.com zone is
available for use.
:param location: The location of the domain name.
:type location: str
:param domain_name_label: The domain name to be verified. It must
conform to the following regular expression:
^[a-z][a-z0-9-]{1,61}[a-z0-9]$.
:type domain_name_label: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DnsNameAvailabilityResult or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.network.v2019_04_01.models.DnsNameAvailabilityResult or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = self._get_api_version('check_dns_name_availability')
if api_version == '2015-06-15':
from .v2015_06_15.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2016-09-01':
from .v2016_09_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2016-12-01':
from .v2016_12_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2017-03-01':
from .v2017_03_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2017-06-01':
from .v2017_06_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2017-08-01':
from .v2017_08_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2017-09-01':
from .v2017_09_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2017-10-01':
from .v2017_10_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2017-11-01':
from .v2017_11_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2018-01-01':
from .v2018_01_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2018-02-01':
from .v2018_02_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2018-04-01':
from .v2018_04_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2018-06-01':
from .v2018_06_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2018-07-01':
from .v2018_07_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2018-08-01':
from .v2018_08_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2018-10-01':
from .v2018_10_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2018-11-01':
from .v2018_11_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2018-12-01':
from .v2018_12_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2019-02-01':
from .v2019_02_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2019-04-01':
from .v2019_04_01.operations import NetworkManagementClientOperationsMixin as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance.config = self.config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.check_dns_name_availability(location, domain_name_label, custom_headers, raw, **operation_config)
def supported_security_providers(self, resource_group_name, virtual_wan_name, custom_headers=None, raw=False, **operation_config):
"""Gives the supported security providers for the virtual wan.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN for which
supported security providers are needed.
:type virtual_wan_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualWanSecurityProviders or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.network.v2019_04_01.models.VirtualWanSecurityProviders or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<azure.mgmt.network.v2019_04_01.models.ErrorException>`
"""
api_version = self._get_api_version('supported_security_providers')
if api_version == '2018-08-01':
from .v2018_08_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2018-10-01':
from .v2018_10_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2018-11-01':
from .v2018_11_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2018-12-01':
from .v2018_12_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2019-02-01':
from .v2019_02_01.operations import NetworkManagementClientOperationsMixin as OperationClass
elif api_version == '2019-04-01':
from .v2019_04_01.operations import NetworkManagementClientOperationsMixin as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance.config = self.config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.supported_security_providers(resource_group_name, virtual_wan_name, custom_headers, raw, **operation_config)
| [
"noreply@github.com"
] | samvaity.noreply@github.com |
c8d9acb81ae074a09b5bba7f60d7cb919bfd6a0b | 5c099927aedc6fdbc515f40ff543c65b3bf4ec67 | /algorithms/path-sum-iii/src/Solution.py | ad4282344a76d00167c30841afc494b03849c924 | [] | no_license | bingzhong-project/leetcode | 7a99cb6af1adfbd9bb1996a7f66a65679053c478 | ba82e7d94840b3fec272e4c5f82e3a2cfe4b0505 | refs/heads/master | 2020-04-15T09:27:33.979519 | 2020-03-10T03:43:07 | 2020-03-10T03:43:07 | 164,550,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def pathSum(self, root: 'TreeNode', sum: 'int') -> 'int':
def func(node, res):
if node.val == sum:
res[0] += 1
node_sums = [node.val]
left_sums = []
right_sums = []
if node.left:
left_sums = func(node.left, res)
if node.right:
right_sums = func(node.right, res)
for left_sum in left_sums:
temp = left_sum + node.val
if temp == sum:
res[0] += 1
node_sums.append(temp)
for right_sum in right_sums:
temp = right_sum + node.val
if temp == sum:
res[0] += 1
node_sums.append(temp)
return node_sums
res = [0]
if root:
func(root, res)
return res[0]
| [
"zhongyongbin@foxmail.com"
] | zhongyongbin@foxmail.com |
98c96ab2dc8528ebf464d9dbbcfed74f52d757fc | 6928300db08139784e1fe27d1b6f2087c0cb89b7 | /cristianoronaldoyopmailcom_228/urls.py | e01cffb873469ce075991a534e642f1549dbd762 | [] | no_license | payush/cristianoronaldoyopmailcom-228 | 6a6df334d4402bc08294de8a1d340cee1af56726 | 8462bdc537a14e62d010b8350fa2cbad6a913674 | refs/heads/master | 2020-03-23T12:31:58.272363 | 2018-07-19T10:37:13 | 2018-07-19T10:37:13 | 141,564,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | """cristianoronaldoyopmailcom_228 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url('', include('home.urls')),
url(r'^accounts/', include('allauth.urls')),
url(r'^api/v1/', include('home.api.v1.urls')),
url(r'^admin/', admin.site.urls),
]
| [
"ayushpuroheet@gmail.com"
] | ayushpuroheet@gmail.com |
590ee220a2c115301a03c113221b088ef9621b15 | 753cd066a9bd26b6c37c8d53a86c7a9c659ec18c | /tutorials/tutorials/popxl/2_custom_optimiser/mnist_template.py | 7e03ec57cf9d9988438c713cb22828ce52192172 | [
"MIT"
] | permissive | graphcore/examples | ac872015808ed2a913d4d7bf0d63202ce15ebbae | e2f834dd60e7939672c1795b4ac62e89ad0bca49 | refs/heads/master | 2023-08-05T02:08:12.341836 | 2023-07-27T11:13:10 | 2023-07-27T11:13:10 | 143,977,106 | 311 | 80 | MIT | 2023-09-11T16:42:56 | 2018-08-08T07:29:17 | Python | UTF-8 | Python | false | false | 11,250 | py | # Copyright (c) 2022 Graphcore Ltd. All rights reserved.
from typing import Dict, Mapping, Optional, Union
import argparse
from functools import partial
import numpy as np
import torch
import torchvision
from tqdm import tqdm
import popxl
import popxl_addons as addons
import popxl.ops as ops
np.random.seed(42)
def get_mnist_data(test_batch_size: int, batch_size: int):
training_data = torch.utils.data.DataLoader(
torchvision.datasets.MNIST(
"~/.torch/datasets",
train=True,
download=True,
transform=torchvision.transforms.Compose(
[
torchvision.transforms.ToTensor(),
# mean and std computed on the training set.
torchvision.transforms.Normalize((0.1307,), (0.3081,)),
]
),
),
batch_size=batch_size,
shuffle=True,
drop_last=True,
)
validation_data = torch.utils.data.DataLoader(
torchvision.datasets.MNIST(
"~/.torch/datasets",
train=False,
download=True,
transform=torchvision.transforms.Compose(
[
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.1307,), (0.3081,)),
]
),
),
batch_size=test_batch_size,
shuffle=True,
drop_last=True,
)
return training_data, validation_data
def accuracy(predictions: np.ndarray, labels: np.ndarray):
ind = np.argmax(predictions, axis=-1).flatten()
labels = labels.detach().numpy().flatten()
return np.mean(ind == labels) * 100.0
class Linear(addons.Module):
def __init__(self, out_features: int, bias: bool = True):
super().__init__()
self.out_features = out_features
self.bias = bias
def build(self, x: popxl.Tensor) -> popxl.Tensor:
# add a state variable to the module
w = self.add_variable_input(
"weight",
partial(np.random.normal, 0, 0.02, (x.shape[-1], self.out_features)),
x.dtype,
)
y = x @ w
if self.bias:
# add a state variable to the module
b = self.add_variable_input("bias", partial(np.zeros, y.shape[-1]), x.dtype)
y = y + b
return y
class Net(addons.Module):
def __init__(self, cache: Optional[addons.GraphCache] = None):
super().__init__(cache=cache)
self.fc1 = Linear(512)
self.fc2 = Linear(512)
self.fc3 = Linear(512)
self.fc4 = Linear(10)
def build(self, x: popxl.Tensor):
x = x.reshape((-1, 28 * 28))
x = ops.gelu(self.fc1(x))
x = ops.gelu(self.fc2(x))
x = ops.gelu(self.fc3(x))
x = self.fc4(x)
return x
"""
Adam optimiser.
Defines Adam update step for a single variable
"""
class Adam(addons.Module):
# We need to specify `in_sequence` because a lot of operations are in-place
# and their order shouldn't be rearranged
@popxl.in_sequence()
def build(
self,
weight: popxl.TensorByRef,
grad: popxl.Tensor,
*,
lr: Union[float, popxl.Tensor],
beta1: Union[float, popxl.Tensor] = 0.9,
beta2: Union[float, popxl.Tensor] = 0.999,
eps: Union[float, popxl.Tensor] = 1e-5,
weight_decay: Union[float, popxl.Tensor] = 0.0,
first_order_dtype: popxl.dtype = popxl.float16,
bias_correction: bool = True,
):
# Gradient estimator for the variable `weight` - same shape as the variable
first_order = self.add_variable_input(
"first_order",
partial(np.zeros, weight.shape),
first_order_dtype,
by_ref=True,
)
ops.var_updates.accumulate_moving_average_(first_order, grad, f=beta1)
# Variance estimator for the variable `weight` - same shape as the variable
second_order = self.add_variable_input(
"second_order", partial(np.zeros, weight.shape), popxl.float32, by_ref=True
)
ops.var_updates.accumulate_moving_average_square_(second_order, grad, f=beta2)
# Adam is a biased estimator: provide the step to correct bias
step = None
if bias_correction:
step = self.add_variable_input("step", partial(np.zeros, ()), popxl.float32, by_ref=True)
# Calculate the weight increment with Adam heuristic
# Here we use the built-in `adam_updater`, but you can write your own.
dw = ops.var_updates.adam_updater(
first_order,
second_order,
weight=weight,
weight_decay=weight_decay,
time_step=step,
beta1=beta1,
beta2=beta2,
epsilon=eps,
)
# in-place weight update: weight += (-lr)*dw
ops.scaled_add_(weight, dw, b=-lr)
"""
Update all variables creating per-variable optimisers.
"""
def optimiser_step(
variables,
grads: Dict[popxl.Tensor, popxl.Tensor],
optimiser: addons.Module,
lr: popxl.float32 = 1e-3,
):
for name, var in variables.named_tensors.items():
# Create optimiser and state factories for the variable
opt_facts, opt_graph = optimiser.create_graph(var, var.spec, lr=lr, weight_decay=0.0, bias_correction=True)
state = opt_facts.init()
# bind the graph to its state and call it.
# Both the state and the variables are updated in-place and are passed by ref,
# hence after the graph is called they are updated.
opt_graph.bind(state).call(var, grads[var])
def train(train_session, training_data, opts, input_streams, loss_stream):
nr_batches = len(training_data)
with train_session:
for epoch in range(1, opts.epochs + 1):
print(f"Epoch {epoch}/{opts.epochs}")
bar = tqdm(training_data, total=nr_batches)
for data, labels in bar:
inputs: Mapping[popxl.HostToDeviceStream, np.ndarray] = dict(
zip(input_streams, [data.squeeze().float(), labels.int()])
)
loss = train_session.run(inputs)
bar.set_description(f"Loss:{loss[loss_stream]:0.4f}")
def test(test_session, test_data, input_streams, out_stream):
nr_batches = len(test_data)
sum_acc = 0.0
with test_session:
for data, labels in tqdm(test_data, total=nr_batches):
inputs: Mapping[popxl.HostToDeviceStream, np.ndarray] = dict(
zip(input_streams, [data.squeeze().float(), labels.int()])
)
output = test_session.run(inputs)
sum_acc += accuracy(output[out_stream], labels)
test_set_accuracy = sum_acc / len(test_data)
print(f"Accuracy on test set: {test_set_accuracy:0.2f}%")
def train_program(opts):
ir = popxl.Ir(replication=1)
with ir.main_graph:
# Create input streams from host to device
img_stream = popxl.h2d_stream((opts.batch_size, 28, 28), popxl.float32, "image")
img_t = ops.host_load(img_stream) # load data
label_stream = popxl.h2d_stream((opts.batch_size,), popxl.int32, "labels")
labels = ops.host_load(label_stream, "labels")
# Create forward graph
facts, fwd_graph = Net().create_graph(img_t)
# Create backward graph via autodiff transform
bwd_graph = addons.autodiff(fwd_graph)
# Initialise variables (weights)
variables = facts.init()
# Call the forward graph with call_with_info because we want to retrieve
# information from the call site
fwd_info = fwd_graph.bind(variables).call_with_info(img_t)
x = fwd_info.outputs[0] # forward output
# Compute loss and starting gradient for backpropagation
loss, dx = addons.ops.cross_entropy_with_grad(x, labels)
# Setup a stream to retrieve loss values from the host
loss_stream = popxl.d2h_stream(loss.shape, loss.dtype, "loss")
ops.host_store(loss_stream, loss)
# Retrieve activations from the forward graph
activations = bwd_graph.grad_graph_info.inputs_dict(fwd_info)
# Call the backward graph providing the starting value for
# backpropagation and activations
bwd_info = bwd_graph.call_with_info(dx, args=activations)
# Adam optimiser, with cache
grads_dict = bwd_graph.grad_graph_info.fwd_parent_ins_to_grad_parent_outs(fwd_info, bwd_info)
optimiser = Adam(cache=True)
optimiser_step(variables, grads_dict, optimiser, opts.lr)
ir.num_host_transfers = 1
return (
popxl.Session(ir, "ipu_hw"),
[img_stream, label_stream],
variables,
loss_stream,
)
def test_program(opts):
ir = popxl.Ir()
ir.replication_factor = 1
with ir.main_graph:
# Inputs
in_stream = popxl.h2d_stream((opts.test_batch_size, 28, 28), popxl.float32, "image")
in_t = ops.host_load(in_stream)
# Create graphs
facts, graph = Net().create_graph(in_t)
# Initialise variables
variables = facts.init()
# Forward
(outputs,) = graph.bind(variables).call(in_t)
out_stream = popxl.d2h_stream(outputs.shape, outputs.dtype, "outputs")
ops.host_store(out_stream, outputs)
ir.num_host_transfers = 1
return popxl.Session(ir, "ipu_hw"), [in_stream], variables, out_stream
def main():
parser = argparse.ArgumentParser(description="MNIST training in popxl.addons")
parser.add_argument("--batch-size", type=int, default=8, help="batch size for training (default: 8)")
parser.add_argument(
"--test-batch-size",
type=int,
default=80,
help="batch size for testing (default: 80)",
)
parser.add_argument("--epochs", type=int, default=1, help="number of epochs to train (default: 1)")
parser.add_argument("--lr", type=float, default=1e-3, help="learning rate (default: 1e-3)")
opts = parser.parse_args()
training_data, test_data = get_mnist_data(opts.test_batch_size, opts.batch_size)
train_session, train_input_streams, train_variables, loss_stream = train_program(opts)
train(train_session, training_data, opts, train_input_streams, loss_stream)
# get weights data : dictionary { train_session variables : tensor data (numpy) }
train_vars_to_data = train_session.get_tensors_data(train_variables.tensors)
# create test session
test_session, test_input_streams, test_variables, out_stream = test_program(opts)
# dictionary { train_session variables : test_session variables }
train_vars_to_test_vars = train_variables.to_mapping(test_variables)
# Create a dictionary { test_session variables : tensor data (numpy) }
test_vars_to_data = {
test_var: train_vars_to_data[train_var].copy() for train_var, test_var in train_vars_to_test_vars.items()
}
# Copy trained weights to the program, with a single host to device transfer at the end
test_session.write_variables_data(test_vars_to_data)
test(test_session, test_data, test_input_streams, out_stream)
if __name__ == "__main__":
main()
| [
"adams@graphcore.ai"
] | adams@graphcore.ai |
b2456060afc71d8ae1bafe6a039a40981cd94970 | b8ddb0028579ba735bfde8de5e615884e05b012f | /jamaica/v1/lists/serializers.py | 8cf34ebb1a3a5f0804777c537e6d465b456aaf4d | [] | no_license | cohoe/jamaica | f4636eacd6a900de769641e3c3f60fe197be1999 | 0bf053e7b6db291b5aeb53fbd6f7f45082c9df9b | refs/heads/master | 2021-08-10T08:10:36.803415 | 2021-07-18T16:49:45 | 2021-07-18T16:49:45 | 235,926,691 | 2 | 0 | null | 2021-03-13T02:23:06 | 2020-01-24T02:24:22 | Python | UTF-8 | Python | false | false | 1,059 | py | from flask_restx import fields
from jamaica.v1.restx import api
from jamaica.v1.serializers import SearchResultBase
ListItemObject = api.model('ListItemObject', {
'cocktail_slug': fields.String(attribute='cocktail_slug', description='Slug of the cocktail.'),
'spec_slug': fields.String(attribute='spec_slug', description='Optional slug of the specific spec.', required=False),
'highlight': fields.Boolean(attribute='highlight', description='Boolean of whether this is highlighted or not.')
})
ListObject = api.model('ListObject', {
'id': fields.String(attribute='id', description='ID of this list.'),
'display_name': fields.String(attribute='display_name', description='Display name of this list.'),
'items': fields.List(fields.Nested(ListItemObject), attribute='items'),
})
ListSearchItem = api.inherit('ListSearchItem', SearchResultBase, {
'slug': fields.String(attribute='hit.slug', description='This items slug.'),
'display_name': fields.String(attribute='hit.display_name', description='This items display name.'),
})
| [
"grant@grantcohoe.com"
] | grant@grantcohoe.com |
a4d310d2b5b8002735888fb0537e58489cea744e | 99094cc79bdbb69bb24516e473f17b385847cb3a | /33.Search in Rotated Sorted Array/Solution.py | ca44fc1c7fe4e25beda59fa0010124fad406e966 | [] | no_license | simonxu14/LeetCode_Simon | 7d389bbfafd3906876a3f796195bb14db3a1aeb3 | 13f4595374f30b482c4da76e466037516ca3a420 | refs/heads/master | 2020-04-06T03:33:25.846686 | 2016-09-10T00:23:11 | 2016-09-10T00:23:11 | 40,810,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | __author__ = 'Simon'
class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
l = 0
r = len(nums) - 1
while l <= r:
mid = (r + l)/2
if nums[mid] == target:
return mid
if nums[l] <= nums[mid]:
if nums[l] <= target < nums[mid]:
r = mid - 1
else:
l = mid + 1
else:
if nums[mid] < target <= nums[r]:
l = mid + 1
else:
r = mid - 1
return -1 | [
"simonxu14@gmail.com"
] | simonxu14@gmail.com |
1ba91cb7cb377d72057feb853079b30e95e00789 | b052410ce0cd5c7ae54af34e7f7f4a623f3f7b32 | /jlu/papers/prop_2012a.py | 8a689bb3cc2359211757795b8682c4310008e291 | [] | no_license | jluastro/JLU-python-code | b847936410d46cf73071300c217d2715cdf286de | db60fd0589aa7694101812224f4bdcdde0deb3ff | refs/heads/master | 2023-04-10T11:24:52.930029 | 2023-03-29T16:09:16 | 2023-03-29T16:09:16 | 3,509,766 | 9 | 7 | null | 2020-07-29T18:52:32 | 2012-02-22T00:08:20 | Python | UTF-8 | Python | false | false | 36,955 | py | import pylab as py
import numpy as np
import atpy
import asciidata
import math
import pdb
import pyfits
import pycurl
import coords
import os
from gcwork import objects
from pyraf import iraf
##################################################
#
# Upper Sco
#
##################################################
def upper_sco():
rootDir = '/u/jlu/doc/proposals/irtf/2012A/'
# Read in a reference table for converting between
# spectral types and effective temperatures.
ref = atpy.Table(rootDir + 'Teff_SpT_table.txt', type='ascii')
sp_type = np.array([ii[0] for ii in ref.col1])
sp_class = np.array([float(ii[1:4]) for ii in ref.col1])
sp_teff = ref.col2
# Read in the upper sco table
us = atpy.Table(rootDir + 'upper_sco_sample_simbad.txt', type='ascii')
us_sp_type = np.array([ii[0] for ii in us.spectype])
us_sp_class = np.zeros(len(us_sp_type), dtype=int)
us_sp_teff = np.zeros(len(us_sp_type), dtype=int)
for ii in range(len(us_sp_class)):
if (us_sp_type[ii] == "~"):
us_sp_class[ii] = -1
else:
if ((len(us.spectype[ii]) < 2) or
(us.spectype[ii][1].isdigit() == False)):
us_sp_class[ii] = 5 # Arbitrarily assigned
else:
us_sp_class[ii] = us.spectype[ii][1]
# Assign effective temperature
idx = np.where(us_sp_type[ii] == sp_type)[0]
tdx = np.abs(us_sp_class[ii] - sp_class[idx]).argmin()
us_sp_teff[ii] = sp_teff[idx[tdx]]
# Trim out the ones that don't have spectral types and K-band
# magnitudes for plotting purposes.
idx = np.where((us_sp_type != "~") & (us.K != "~") & (us.J != "~"))[0]
print 'Keeping %d of %d with spectral types and K mags.' % \
(len(idx), len(us_sp_type))
us.add_column('sp_type', us_sp_type)
us.add_column('sp_class', us_sp_class)
us.add_column('sp_teff', us_sp_teff)
us = us.rows([idx])
J = np.array(us.J[0], dtype=float)
H = np.array(us.H[0], dtype=float)
K = np.array(us.K[0], dtype=float)
JKcolor = J - K
# Get the unique spectral classes and count how many of each
# we have in the sample.
sp_type_uniq = np.array(['O', 'B', 'A', 'F', 'G', 'K', 'M', 'L', 'T'])
sp_type_count = np.zeros(len(sp_type_uniq), dtype=int)
sp_type_idx = []
sp_type_J = np.zeros(len(sp_type_uniq), dtype=float)
sp_type_JK = np.zeros(len(sp_type_uniq), dtype=float)
for ii in range(len(sp_type_uniq)):
idx = np.where(us.sp_type[0] == sp_type_uniq[ii])[0]
sp_type_count[ii] = len(idx)
sp_type_idx.append(idx)
# Calc the mean J and J-K color for each spectral type
if len(idx) > 2:
sp_type_J[ii] = J[idx].mean()
sp_type_JK[ii] = JKcolor[idx].mean()
print '%s %3d J = %4.1f J-K = %4.1f' % \
(sp_type_uniq[ii], sp_type_count[ii],
sp_type_J[ii], sp_type_JK[ii])
# Plot up the distribution of spectral types
xloc = np.arange(len(sp_type_uniq)) + 1
py.figure(2, figsize=(10,6))
py.clf()
py.bar(xloc, sp_type_count, width=0.5)
py.xticks(xloc+0.25, sp_type_uniq)
py.xlim(0.5, xloc.max()+0.5)
py.xlabel('Spectral Type')
py.ylabel('Upper Sco Sample')
py.savefig(rootDir + 'USco_spec_type_hist.png')
# Plot Teff vs. J-band mag
py.figure(1)
py.clf()
py.semilogx(us.sp_teff[0], J, 'k.')
rng = py.axis()
py.axis([rng[1], rng[0], rng[3], rng[2]])
py.xlabel('Teff (K, log scale)')
py.ylabel('J Magnitude')
py.xlim(40000, 1000)
py.savefig(rootDir + 'USco_HR.png')
py.clf()
py.plot(JKcolor, J, 'kx')
idx = np.where(sp_type_J != 0)[0]
py.plot(sp_type_JK[idx], sp_type_J[idx], 'bs')
for ii in idx:
py.text(sp_type_JK[ii]+0.05, sp_type_J[ii]-0.5, sp_type_uniq[ii], color='blue')
rng = py.axis()
py.axis([rng[0], rng[1], rng[3], rng[2]])
py.xlabel('J - K (mag)')
py.ylabel('J (mag)')
py.xlim(-0.25, 1.75)
py.savefig(rootDir + 'USco_CMD.png')
idx = np.where(J < 11)[0]
print '%d stars with J<11 and Teff = [%3d - %4d] K' % \
(len(idx), us.sp_teff[0][idx].min(), us.sp_teff[0][idx].max())
return us
def spex_snr(tint, mag, band):
snr0 = 10.0 # sigma
tint0 = 60.0 # min
mag0 = {'J': 14.6, 'H': 14.1, 'K': 13.7}
snr = snr0 * 10**(-(mag - mag0[band])/2.5) * math.sqrt(tint / tint0 )
return snr
##################################################
#
# CEMP-no origin
#
##################################################
frebel_elements = ['Li', 'C', 'N', 'O', 'Na', 'Mg', 'Al', 'Si', 'Ca', 'Sc',
'Ti', 'V', 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Zn', 'Ga',
'Ge', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Ru', 'Rh', 'Pd',
'Ag', 'Ba', 'La', 'Ce', 'Pr', 'Nd', 'Sm', 'Eu', 'Gd',
'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb', 'Lu', 'Hf', 'Os',
'Ir', 'Pt', 'Au', 'Pb', 'Th', 'U']
dwarf_spheroidals = ['Boo', 'Car', 'Com', 'Dra', 'Fnx', 'Her', 'Leo', 'S10',
'S11', 'S12', 'S14', 'S15', 'Sci', 'Sex', 'Sta', 'Uma',
'UMi']
dir = '/u/jlu/doc/proposals/keck/caltech/11B/'
def cemp_no_properties():
d = load_frebel_table()
# Lets identify some sub-samples. Criteria from Beers 2008.
emp = np.where((d.CFe < 0.9) & (d.ra != -999) & (d.V > 0) &
(d.FeH != -999) & (d.CFe != -999) & (d.BaFe != -999))[0]
empr = np.where((d.CFe < 0.9) & (d.ra != -999) & (d.V > 0) &
(d.FeH != -999) & (d.CFe != -999) & (d.BaFe != -999) &
(d.BaEu < 0))[0]
cemp = np.where(d.CFe >= 0.9)[0]
cempr = np.where((d.CFe >= 0.9) & (d.EuFe > 1))[0]
cemps = np.where((d.CFe >= 0.9) & (d.BaFe > 1) &
(d.EuFe > -999) & (d.BaEu > 0.5))[0]
cempno = np.where((d.CFe >= 0.9) & (d.BaFe > -999) & (d.BaFe < 0))[0]
# Plot up the histogram of Iron abundances:
bins_FeH = np.arange(-7, 1, 0.5)
py.clf()
py.hist(d.FeH, histtype='step', bins=bins_FeH,
label='%d stars' % len(d.name))
py.hist(d.FeH[cemp], histtype='step', bins=bins_FeH,
label='%d CEMP' % len(cemp))
py.hist(d.FeH[cemps], histtype='step', bins=bins_FeH,
label='%d CEMP-s' % len(cemps))
py.hist(d.FeH[cempno], histtype='step', bins=bins_FeH,
label='%d CEMP-no' % len(cempno))
py.xlabel('[Fe/H]')
py.ylabel('Number')
py.legend(loc='upper left')
py.ylim(0, 100)
py.savefig(dir + 'hist_FeH.png')
# Fix the ones with no V-band magnitudes
bad_s = np.where(d.V[cemps] < 0)[0]
bad_no = np.where(d.V[cempno] < 0)[0]
# Bad CEMP-s stars
print ''
print '## Bad CEMP-s'
for ii in bad_s:
print '%20s %10.5f %10.5f %5.2f' % \
(d.name[cemps[ii]], d.ra[cemps[ii]],
d.dec[cemps[ii]], d.V[cemps[ii]])
print '## Bad CEMP-no'
for ii in bad_no:
print '%20s %10.5f %10.5f %5.2f' % \
(d.name[cempno[ii]], d.ra[cempno[ii]],
d.dec[cempno[ii]], d.V[cempno[ii]])
# Get rid of the stars without info.
cemps = np.delete(cemps, bad_s)
cempno = np.delete(cempno, bad_no)
# Fix the RA and Dec for CEMP-s and CEMP-no stars
d.ra[cemps], d.dec[cemps] = getCoordsFromSimbad(d.simbad[cemps])
d.ra[cempno], d.dec[cempno] = getCoordsFromSimbad(d.simbad[cempno])
py.clf()
py.plot(d.V[cemps], d.FeH[cemps], 'rs', label='CEMP-s')
py.plot(d.V[cempno], d.FeH[cempno], 'bo', label='CEMP-no')
py.legend(loc='upper left', numpoints=1)
py.xlabel('V magnitude')
py.ylabel('[Fe/H]')
py.savefig('v_vs_feh_cemp_s_no.png')
# Now lets figure out what is observable this semester.
py.clf()
py.plot(d.ra[cemps], d.dec[cemps], 'rs', label='CEMP-s')
py.plot(d.ra[cempno], d.dec[cempno], 'bo', label='CEMP-no')
py.xlabel('R.A. (degrees)')
py.ylabel('Dec. (degrees)')
py.legend(loc='upper right', numpoints=1)
lim_RA_mar01 = [ 90, 237]
lim_RA_may01 = [156, 284]
lim_RA_jul01 = [223, 342]
py.plot(lim_RA_mar01, [10, 10], 'm-', linewidth=3)
py.plot(lim_RA_may01, [20, 20], 'k-', linewidth=3, color='cyan')
py.plot(lim_RA_jul01, [30, 30], 'g-', linewidth=3)
py.text(95, 12, 'Mar 01, 2012', color='magenta')
py.text(160, 22, 'May 01, 2012', color='cyan')
py.text(235, 32, 'Jul 01, 2011', color='green')
py.xlim(0, 360)
py.ylim(-30, 70)
py.savefig('ra_dec_cemp_s_no.png')
# RA vs. V-band
py.clf()
py.plot(d.ra[cemps], d.V[cemps], 'rs', label='CEMP-s')
py.plot(d.ra[cempno], d.V[cempno], 'bo', label='CEMP-no')
py.xlabel('R.A. (degrees)')
py.ylabel('V Magnitude')
py.gca().set_ylim(py.gca().get_ylim()[::-1])
py.legend(loc='upper right', numpoints=1)
py.plot(lim_RA_mar01, [12, 12], 'm-', linewidth=3)
py.plot(lim_RA_may01, [11, 11], 'k-', linewidth=3, color='cyan')
py.plot(lim_RA_jul01, [10, 10], 'g-', linewidth=3)
py.text(95, 11.9, 'Mar 01, 2012', color='magenta')
py.text(160, 10.9, 'May 01, 2012', color='cyan')
py.text(235, 9.9, 'Jul 01, 2011', color='green')
py.xlim(0, 360)
py.ylim(17, 6)
py.savefig('ra_v_cemp_s_no.png')
print('')
print 'After removing stars without info:'
hdrfmt = '{:16s} {:^15s} {:^15s} {:^5s} {:^5s} {:^5s} {:^5s}'
starfmt = '{:<16s} {:15s} {:15s} {:5.2f} {:5.2f} {:5.2f} {:5.2f}'
# Print out all the emp-r stars
print('')
print(' {:d} EMP-r stars (non-Carbon enhanced)'.format(len(empr)))
print(hdrfmt.format('Name', 'RA', 'Dec', 'Vmag',
'Fe/H', 'C/Fe', 'Ba/Fe'))
for ii in empr:
hmsdms = coords.Position((d.ra[ii], d.dec[ii])).hmsdms().split()
ra_hex = hmsdms[0].replace(':', ' ')
dec_hex = hmsdms[1].replace(':', ' ')
print(starfmt.format(d.name[ii], ra_hex, dec_hex, d.V[ii],
d.FeH[ii], d.CFe[ii], d.BaFe[ii]))
print('')
print(' {:d} CEMP-s stars'.format(len(cemps)))
print(hdrfmt.format('Name', 'RA', 'Dec', 'Vmag',
'Fe/H', 'C/Fe', 'Ba/Fe'))
for ii in cemps:
hmsdms = coords.Position((d.ra[ii], d.dec[ii])).hmsdms().split()
ra_hex = hmsdms[0].replace(':', ' ')
dec_hex = hmsdms[1].replace(':', ' ')
print(starfmt.format(d.name[ii], ra_hex, dec_hex, d.V[ii],
d.FeH[ii], d.CFe[ii], d.BaFe[ii]))
# Print out all the cemp-no stars
print('')
print(' {:d} CEMP-no stars'.format(len(cempno)))
print(hdrfmt.format('Name', 'RA', 'Dec', 'Vmag',
'Fe/H', 'C/Fe', 'Ba/Fe'))
for ii in cempno:
print ii, d.ra[ii], d.dec[ii]
hmsdms = coords.Position((d.ra[ii], d.dec[ii])).hmsdms().split()
ra_hex = hmsdms[0].replace(':', ' ')
dec_hex = hmsdms[1].replace(':', ' ')
print(starfmt.format(d.name[ii], ra_hex, dec_hex, d.V[ii],
d.FeH[ii], d.CFe[ii], d.BaFe[ii]))
def load_frebel_table(verbose=False):
d = objects.DataHolder()
# Load fits table.
data = pyfits.getdata(dir + 'frebel2010_fixed.fits')
name = data.field('NAME')
simbad = data.field('SIMBAD')
ra = data.field('RA')
dec = data.field('DEC')
B = data.field('B')
V = data.field('V')
R = data.field('R')
I = data.field('I')
FeH = data.field('FEH') # Iron abundance
abundances = data.field('XFE') # All [X/Fe] abundances, carbon is idx=1
upperlim = data.field('UPPERLIM')
rv = data.field('RV')
ref = data.field('ABUNDREF')
refYear = np.zeros(len(ref), dtype=int)
for rr in range(len(ref)):
tmp = ref[rr][3:5]
if tmp.startswith('0'):
refYear[rr] = float('20%s' % tmp)
else:
refYear[rr] = float('19%s' % tmp)
# Report how many stars don't have info:
idx = np.where(ra < 0)[0]
if verbose:
print 'Found %d out of %d stars without information\n' % \
(len(idx), len(name))
idx_C = frebel_elements.index('C')
idx_Ba = frebel_elements.index('Ba')
idx_Eu = frebel_elements.index('Eu')
# Pull out some elements of interest
CFe = abundances[:, idx_C]
BaFe = abundances[:, idx_Ba]
EuFe = abundances[:, idx_Eu]
BaEu = BaFe - EuFe
# First we need to deal with duplicates
if verbose:
print '## Duplicates'
duplicates = np.array([], dtype=int)
for ii in range(len(name)):
if ii in duplicates:
# Already addressed this duplicate star, skip it
continue
idx = np.where(name == name[ii])[0]
if len(idx) > 1:
if verbose:
print '%20s %10.5f %10.5f %5.2f %7.2f %7.2f %7.2f %7.2f' % \
(name[ii], ra[ii], dec[ii], V[ii], FeH[ii], CFe[ii],
BaFe[ii], EuFe[ii])
# The easy case is where there is only one measurement
# for a given element. Then we just take the one.
good = np.where(FeH[idx] != 0)[0]
if len(good) == 1:
FeH[idx[0]] = FeH[idx][good[0]]
if len(good) > 1:
# otherwise take the most recent one
recent = refYear[idx][good].argmax()
FeH[idx[0]] = FeH[idx][good][recent]
# The easy case is where there is only one measurement
# for a given element. Then we just take the one.
good = np.where(CFe[idx] > -999)[0]
if len(good) == 1:
CFe[idx[0]] = CFe[idx][good[0]]
if len(good) > 1:
# otherwise take the most recent one
recent = refYear[idx][good].argmax()
CFe[idx[0]] = CFe[idx][good][recent]
if len(good) == 0:
# No carbon measurements, get rid of star all together.
if verbose:
print 'No C measurements for %s' % name[ii]
duplicates = np.append(duplicates, idx)
# The easy case is where there is only one measurement
# for a given element. Then we just take the one.
good = np.where(BaFe[idx] > -999)[0]
if len(good) == 1:
BaFe[idx[0]] = BaFe[idx][good[0]]
if len(good) > 1:
# otherwise take the most recent one
recent = refYear[idx][good].argmax()
BaFe[idx[0]] = BaFe[idx][good][recent]
# The easy case is where there is only one measurement
# for a given element. Then we just take the one.
good = np.where(EuFe[idx] > -999)[0]
if len(good) == 1:
EuFe[idx[0]] = EuFe[idx][good[0]]
if len(good) > 1:
# otherwise take the most recent one
recent = refYear[idx][good].argmax()
EuFe[idx[0]] = EuFe[idx][good][recent]
if verbose:
print '%20s %10.5f %10.5f %5.2f %7.2f %7.2f %7.2f %7.2f' % \
(name[ii], ra[ii], dec[ii], V[ii], FeH[ii], CFe[ii],
BaFe[ii], EuFe[ii])
# Delete the other ones
duplicates = np.append(duplicates, idx[1:])
# Now delete the duplicates from the tables.
d.name = np.delete(name, duplicates)
d.simbad = np.delete(simbad, duplicates)
d.ra = np.array(np.delete(ra, duplicates), dtype=float)
d.dec = np.array(np.delete(dec, duplicates), dtype=float)
d.B = np.array(np.delete(B, duplicates), dtype=float)
d.V = np.array(np.delete(V, duplicates), dtype=float)
d.R = np.array(np.delete(R, duplicates), dtype=float)
d.I = np.array(np.delete(I, duplicates), dtype=float)
d.FeH = np.array(np.delete(FeH, duplicates), dtype=float)
d.abundances = np.array(np.delete(abundances, duplicates), dtype=float)
d.rv = np.array(np.delete(rv, duplicates), dtype=float)
d.ref = np.delete(ref, duplicates)
d.refYear = np.delete(refYear, duplicates)
d.CFe = np.array(np.delete(CFe, duplicates), dtype=float)
d.BaFe = np.array(np.delete(BaFe, duplicates), dtype=float)
d.EuFe = np.array(np.delete(EuFe, duplicates), dtype=float)
d.BaEu = np.array(np.delete(BaEu, duplicates), dtype=float)
if verbose:
print '## Removed %d duplicates, %d stars left' % \
(len(duplicates), len(name))
return d
def load_masseron_table():
data = pyfits.getdata('masseron2010.fits')
d = objects.DataHolder()
d.name = data.field('Name')
d.type = data.field('Type')
d.ra = data.field('_RA')
d.dec = data.field('_DE')
d.simbadName = data.field('SimbadName')
d.FeH = data.field('[Fe/H]')
d.CFe = data.field('[C/Fe]')
d.BaFe = data.field('[Ba/Fe]')
d.EuFe = data.field('[Eu/Fe]')
d.BaEu = d.BaFe - d.EuFe
d.V = getVbandFromSimbad(d.simbadName)
return d
def getVbandFromSimbad(starNames):
"""
Pull the star name (strip off any "*_" prefixes) and submit
a query to simbad to find the radial velocity. If there is no
entry, return 0.
"""
# Write the simbad query
queryFile = 'simbad_query.txt'
_query = open(queryFile, 'w')
_query.write('output console=off\n')
_query.write('output script=off\n')
_query.write('output error=off\n')
_query.write('set limit 1\n')
_query.write('format object fmt1 "%IDLIST(1), %FLUXLIST(V,R)[%*(F),]"\n')
_query.write('result full\n')
for ii in range(len(starNames)):
_query.write('query id %s\n' % starNames[ii])
_query.close()
# Connect to simbad and submit the query. Save to a buffer.
replyFile = 'simbad_results.txt'
_reply = open(replyFile, 'w')
curl = pycurl.Curl()
curl.setopt(pycurl.POST, 1)
curl.setopt(pycurl.URL, 'http://simbad.harvard.edu/simbad/sim-script')
curl.setopt(pycurl.WRITEFUNCTION, _reply.write)
curlform = [("scriptFile", (pycurl.FORM_FILE, queryFile))]
curl.setopt(pycurl.HTTPPOST, curlform)
curl.perform()
_reply.close()
# Search reply for flux info.
_reply = open(replyFile)
foo = _reply.readlines()
# Find the start of the data
dataLine = 0
simbadError = False
for ff in range(len(foo)):
if foo[ff].startswith('::data'):
dataLine = ff
break
if (foo[ff].startswith('::error')):
simbadError = True
break
vmag = np.zeros(len(starNames), dtype=float)
if not simbadError:
for ff in range(len(starNames)):
fields = foo[dataLine+2+ff].split(',')
msg = ''
if (len(fields) > 1):
# Found a valid entry
vmagTmp = fields[1] # last entry string in km/s
if vmagTmp == '~':
vmagTmp = fields[2]
msg = '(using R-mag)'
if vmagTmp == '~':
vmag[ff] = -999.0
msg = '(no data found)'
break
if vmagTmp[0] is '-':
vmag[ff] = -1.0 * float(vmagTmp[1:])
else:
vmag[ff] = float(vmagTmp)
print 'SIMBAD: Found %s with V = %5.1f mag %s' % \
(starNames[ff], vmag[ff], msg)
else:
print 'SIMBAD: Error on query'
return vmag
def getCoordsFromSimbad(starNames):
"""
Pull the star name (strip off any "*_" prefixes) and submit
a query to simbad to find the radial velocity. If there is no
entry, return 0.
"""
# Write the simbad query
queryFile = 'simbad_query.txt'
_query = open(queryFile, 'w')
_query.write('output console=off\n')
_query.write('output script=off\n')
_query.write('output error=off\n')
_query.write('set limit 1\n')
_query.write('format object fmt1 "%IDLIST(1),%COO(d;A),%COO(d;D)"\n')
_query.write('result full\n')
for ii in range(len(starNames)):
_query.write('query id %s\n' % starNames[ii])
_query.close()
# Connect to simbad and submit the query. Save to a buffer.
replyFile = 'simbad_results.txt'
_reply = open(replyFile, 'w')
curl = pycurl.Curl()
curl.setopt(pycurl.POST, 1)
curl.setopt(pycurl.URL, 'http://simbad.harvard.edu/simbad/sim-script')
curl.setopt(pycurl.WRITEFUNCTION, _reply.write)
curlform = [("scriptFile", (pycurl.FORM_FILE, queryFile))]
curl.setopt(pycurl.HTTPPOST, curlform)
curl.perform()
_reply.close()
# Search reply for flux info.
_reply = open(replyFile)
foo = _reply.readlines()
# Find the start of the data
dataLine = 0
simbadError = False
for ff in range(len(foo)):
if foo[ff].startswith('::data'):
dataLine = ff
break
if (foo[ff].startswith('::error')):
simbadError = True
break
ra = np.zeros(len(starNames), dtype=float)
dec = np.zeros(len(starNames), dtype=float)
if not simbadError:
for ff in range(len(starNames)):
fields = foo[dataLine+2+ff].split(',')
msg = ''
if (len(fields) > 2):
# Found a valid entry
ra[ff] = float(fields[1])
dec[ff] = float(fields[2])
print 'SIMBAD: Found %s at RA = %10.5f, DEC = %10.5f' % \
(starNames[ff], ra[ff], dec[ff])
else:
print 'SIMBAD: Error on query'
return ra, dec
def cempno_table():
names = ['CD-38_245', 'CS22942-019', 'HD6755', 'CS22958-042',
'BD+44_493', 'BS16545-089', 'HE1150-0428', 'BS16920-005',
'HE1300+0157', 'BS16929-005', 'HE1300-0641', 'CS22877-001',
'CS22880-074', 'CS29498-043', 'CS29502-092', 'CS22949-037',
'CS22957-027', 'HE2356-0410', 'HE1012-1540', 'HE1330-0354']
d = load_frebel_table()
idx = np.array([], dtype=int)
for ii in range(len(names)):
foo = np.where(d.name == names[ii])[0]
idx = np.append(idx, foo)
sdx = d.ra[idx].argsort()
idx = idx[sdx]
# Trim out what we need
ra = d.ra[idx]
dec = d.dec[idx]
V = d.V[idx]
B = d.B[idx]
R = d.R[idx]
FeH = d.FeH[idx]
CFe = d.CFe[idx]
BaFe = d.BaFe[idx]
# Fix the RA and Dec for CEMP-s and CEMP-no stars
ra, dec = getCoordsFromSimbad(d.simbad[idx])
# Make a LaTeX table of the targets we want to observe
_out = open('table_cemp_no.tex', 'w')
_out.write('\\begin{deluxetable}{lrrrrrr}\n')
_out.write('\\tablewidth{0pt}\n')
_out.write('\\tablecaption{CEMP-no Stars}\n')
_out.write('\\tablehead{\n')
_out.write(' \\colhead{Name} &\n')
_out.write(' \\colhead{R.A. (J2000)} &\n')
_out.write(' \\colhead{Dec. (J2000)} &\n')
_out.write(' \\colhead{V} &\n')
_out.write(' \\colhead{[Fe/H]} &\n')
_out.write(' \\colhead{[C/Fe]} &\n')
_out.write(' \\colhead{[Ba/Fe]}\n')
_out.write('}\n')
_out.write('\\startdata\n')
for ii in range(len(names)):
hmsdms = coords.Position((ra[ii], dec[ii])).hmsdms().split()
ra_hex = hmsdms[0]
dec_hex = hmsdms[1]
_out.write('%25s & %15s & %15s & ' % (names[ii].replace('_', '\_'),
ra_hex, dec_hex))
_out.write('%5.2f & %5.2f & %5.2f & %5.2f \\\\ \n' %
(V[ii], FeH[ii], CFe[ii], BaFe[ii]))
_out.write('\\enddata\n')
_out.write('\\end{deluxetable}\n')
_out.close()
# Now lets loop through and plot when these targets are observable.
py.clf()
py.subplots_adjust(left=0.05, right=0.95, top=0.95)
# Calculate the hour angle at which the object goes above airmass = 2.
# Relevant equations are:
# airmass = 1 / cos(z)
# hour angle = sidereal time - RA
# cos z = sin L sin Dec + cos L cos Dec cos HA
# We will solve for HA.
iraf.noao()
obs = iraf.noao.observatory
obs(command="set", obsid="keck")
airmassLim = 2.0
latRad = np.radians(obs.latitude)
decRad = np.radians(dec)
top = (1.0 / airmassLim) - np.sin(latRad) * np.sin(decRad)
bot = np.cos(latRad) * np.cos(decRad)
hangle = np.degrees( np.arccos(top / bot) )
madeLegend = False
for ii in range(len(names)):
minLST = (ra[ii] - hangle[ii]) / 15.
maxLST = (ra[ii] + hangle[ii]) / 15.
hix = 360.0 / 15.0
if (minLST >= 0) and (maxLST < hix):
if madeLegend == True:
py.plot([minLST, maxLST], [ii+1, ii+1], linewidth=5,
color='black')
else:
py.plot([minLST, maxLST], [ii+1, ii+1], linewidth=5,
color='black', label='CEMP-no stars')
madeLegend = True
if minLST < 0:
py.plot([0, maxLST], [ii+1, ii+1], linewidth=5, color='black')
py.plot([minLST + hix, hix], [ii+1, ii+1], linewidth=5, color='black')
if maxLST > hix:
py.plot([minLST, hix], [ii+1, ii+1], linewidth=5, color='black')
py.plot([0, maxLST - hix], [ii+1, ii+1], linewidth=5, color='black')
py.xlim(0, hix)
# Get the LST ranges for March, May, July
months = np.array([3, 5, 7])
days = np.array([1, 1, 1])
years = np.array([2012, 2012, 2012])
colors = ['red', 'green', 'blue']
labels = ['Mar 1, 2012 (HST)', 'May 1, 2012 (HST)', 'Jul 1, 2012 (HST)']
rng = py.axis()
for ii in range(len(months)):
twi = get_twilight_lst(years[ii], months[ii], days[ii])
minLST = twi[0] * 15.0
maxLST = twi[1] * 15.0
ypos = rng[3] + 2*(len(months) - ii)
if minLST < 0:
minLST += 360.0
if maxLST > 360:
maxLST -= 360.0
x1 = np.array([minLST, maxLST]) / 15.0
x2 = None
y = np.array([ypos, ypos])
if minLST > 0 and maxLST < 360 and minLST > maxLST:
x1 = np.array([0, maxLST]) / 15.0
x2 = np.array([minLST, 360]) / 15.0
py.plot(x1, y, linewidth=10, color=colors[ii], label=labels[ii])
if x2 != None:
py.plot(x2, y, linewidth=10, color=colors[ii])
py.ylim(0, rng[3] + 7*len(months))
py.legend(loc='upper left')
# py.xlim(0, 360)
py.xlim(0, 24)
py.xlabel('LST (hours)')
py.gca().yaxis.set_visible(False)
py.savefig('obs_ra_v_cemp_s_no.png')
def cempno_starlist():
names = np.array(['CD-38_245', 'CS22942-019', 'HD6755', 'CS22958-042',
'BD+44_493', 'BS16545-089', 'HE1150-0428', 'BS16920-005',
'HE1300+0157', 'BS16929-005', 'HE1300-0641', 'CS22877-001',
'CS22880-074', 'CS29498-043', 'CS29502-092', 'CS22949-037',
'CS22957-027', 'HE2356-0410', 'HE1012-1540', 'HE1330-0354'])
d = load_frebel_table()
idx = np.array([], dtype=int)
for ii in range(len(names)):
foo = np.where(d.name == names[ii])[0]
idx = np.append(idx, foo)
# sdx = d.ra[idx].argsort()
# idx = idx[sdx]
# Trim out what we need
ra = d.ra[idx]
dec = d.dec[idx]
V = d.V[idx]
B = d.B[idx]
R = d.R[idx]
FeH = d.FeH[idx]
CFe = d.CFe[idx]
BaFe = d.BaFe[idx]
simbad = d.simbad[idx]
# Fix the RA and Dec for CEMP-s and CEMP-no stars
ra, dec = getCoordsFromSimbad(d.simbad[idx])
# Make a LaTeX table of the targets we want to observe
_out = open('cemp_no_starlist.tel', 'w')
_out2 = open('cemp_no_simbadnames.tel', 'w')
for ii in range(len(names)):
hmsdms = coords.Position((ra[ii], dec[ii])).hmsdms().split()
ra_hex = hmsdms[0].replace(':', ' ')
dec_hex = hmsdms[1].replace(':', ' ')
print ii, names[ii], simbad[ii]
_out.write('%-16s %12s %12s 2000.0 vmag=%5.2f\n' % (names[ii], ra_hex, dec_hex, V[ii]))
_out2.write('%s\n' % simbad[ii])
_out.close()
_out2.close()
# Now lets loop through and plot when these targets are observable.
py.clf()
py.subplots_adjust(left=0.05, right=0.95, top=0.95)
# Calculate the hour angle at which the object goes above airmass = 2.
# Relevant equations are:
# airmass = 1 / cos(z)
# hour angle = sidereal time - RA
# cos z = sin L sin Dec + cos L cos Dec cos HA
# We will solve for HA.
iraf.noao()
obs = iraf.noao.observatory
obs(command="set", obsid="keck")
airmassLim = 2.0
latRad = np.radians(obs.latitude)
decRad = np.radians(dec)
top = (1.0 / airmassLim) - np.sin(latRad) * np.sin(decRad)
bot = np.cos(latRad) * np.cos(decRad)
print 'TOP: ', top, 'BOT: ', bot
hangle = np.degrees( np.arccos(top / bot) )
madeLegend = False
for ii in range(len(names)):
minLST = (ra[ii] - hangle[ii]) / 15.
maxLST = (ra[ii] + hangle[ii]) / 15.
hix = 360.0 / 15.0
if (minLST >= 0) and (maxLST < hix):
if madeLegend == True:
py.plot([minLST, maxLST], [ii+1, ii+1], linewidth=5,
color='black')
else:
py.plot([minLST, maxLST], [ii+1, ii+1], linewidth=5,
color='black', label='CEMP-no stars')
madeLegend = True
if minLST < 0:
py.plot([0, maxLST], [ii+1, ii+1], linewidth=5, color='black')
py.plot([minLST + hix, hix], [ii+1, ii+1], linewidth=5, color='black')
if maxLST > hix:
py.plot([minLST, hix], [ii+1, ii+1], linewidth=5, color='black')
py.plot([0, maxLST - hix], [ii+1, ii+1], linewidth=5, color='black')
py.xlim(0, hix)
# Get the LST ranges for March, May, July
months = np.array([3, 5, 7])
days = np.array([1, 1, 1])
years = np.array([2012, 2012, 2012])
colors = ['red', 'green', 'blue']
labels = ['Mar 1, 2012 (HST)', 'May 1, 2012 (HST)', 'Jul 1, 2012 (HST)']
rng = py.axis()
for ii in range(len(months)):
twi = get_twilight_lst(years[ii], months[ii], days[ii])
minLST = twi[0] * 15.0
maxLST = twi[1] * 15.0
ypos = rng[3] + 2*(len(months) - ii)
if minLST < 0:
minLST += 360.0
if maxLST > 360:
maxLST -= 360.0
x1 = np.array([minLST, maxLST]) / 15.0
x2 = None
y = np.array([ypos, ypos])
if minLST > 0 and maxLST < 360 and minLST > maxLST:
x1 = np.array([0, maxLST]) / 15.0
x2 = np.array([minLST, 360]) / 15.0
py.plot(x1, y, linewidth=10, color=colors[ii], label=labels[ii])
if x2 != None:
py.plot(x2, y, linewidth=10, color=colors[ii])
py.ylim(0, rng[3] + 7*len(months))
py.legend(loc='upper left')
# py.xlim(0, 360)
py.xlim(0, 24)
py.xlabel('LST (hours)')
py.gca().yaxis.set_visible(False)
py.savefig('obs_ra_v_cemp_s_no_new.png')
def cempno_control_starlist():
names = np.array(['HE0132-2429', 'HE1347-1025', 'HE1356-0622', 'HE1424-0241', 'BS16467-062',
'G64-12', 'CS29518-051', 'CS29502-042', 'CS22953-003',
'CS22896-154', 'CS22183-031', 'CS29491-069', 'CS29497-004', 'CS31082-001',
'HE0430-4901', 'HE0432-0923', 'HE1127-1143', 'HE1219-0312', 'HE2224+0143',
'HE2327-5642'])
d = load_frebel_table()
idx = np.array([], dtype=int)
for ii in range(len(names)):
foo = np.where(d.name == names[ii])[0]
if len(foo) == 0:
print 'No star %s' % names[ii]
idx = np.append(idx, foo)
sdx = d.ra[idx].argsort()
idx = idx[sdx]
# Trim out what we need
ra = d.ra[idx]
dec = d.dec[idx]
V = d.V[idx]
B = d.B[idx]
R = d.R[idx]
FeH = d.FeH[idx]
CFe = d.CFe[idx]
BaFe = d.BaFe[idx]
simbad = d.simbad[idx]
# Fix the RA and Dec for CEMP-s and CEMP-no stars
ra, dec = getCoordsFromSimbad(d.simbad[idx])
# Make a LaTeX table of the targets we want to observe
_out = open('control_starlist.tel', 'w')
_out2 = open('control_simbadnames.tel', 'w')
for ii in range(len(names)):
hmsdms = coords.Position((ra[ii], dec[ii])).hmsdms().split()
ra_hex = hmsdms[0].replace(':', ' ')
dec_hex = hmsdms[1].replace(':', ' ')
print ii, names[ii], simbad[ii]
_out.write('%-16s %12s %12s 2000.0 vmag=%5.2f\n' % (names[ii], ra_hex, dec_hex, V[ii]))
_out2.write('%s\n' % simbad[ii])
_out.close()
_out2.close()
# Now lets loop through and plot when these targets are observable.
py.clf()
py.subplots_adjust(left=0.05, right=0.95, top=0.95)
# Calculate the hour angle at which the object goes above airmass = 2.
# Relevant equations are:
# airmass = 1 / cos(z)
# hour angle = sidereal time - RA
# cos z = sin L sin Dec + cos L cos Dec cos HA
# We will solve for HA.
iraf.noao()
obs = iraf.noao.observatory
obs(command="set", obsid="keck")
airmassLim = 2.0
latRad = np.radians(obs.latitude)
decRad = np.radians(dec)
top = (1.0 / airmassLim) - np.sin(latRad) * np.sin(decRad)
bot = np.cos(latRad) * np.cos(decRad)
hangle = np.degrees( np.arccos(top / bot) )
madeLegend = False
for ii in range(len(names)):
minLST = (ra[ii] - hangle[ii]) / 15.
maxLST = (ra[ii] + hangle[ii]) / 15.
hix = 360.0 / 15.0
if (minLST >= 0) and (maxLST < hix):
if madeLegend == True:
py.plot([minLST, maxLST], [ii+1, ii+1], linewidth=5,
color='black')
else:
py.plot([minLST, maxLST], [ii+1, ii+1], linewidth=5,
color='black', label='CEMP-no stars')
madeLegend = True
if minLST < 0:
py.plot([0, maxLST], [ii+1, ii+1], linewidth=5, color='black')
py.plot([minLST + hix, hix], [ii+1, ii+1], linewidth=5, color='black')
if maxLST > hix:
py.plot([minLST, hix], [ii+1, ii+1], linewidth=5, color='black')
py.plot([0, maxLST - hix], [ii+1, ii+1], linewidth=5, color='black')
py.xlim(0, hix)
# Get the LST ranges for March, May, July
months = np.array([3, 5, 7])
days = np.array([1, 1, 1])
years = np.array([2012, 2012, 2012])
colors = ['red', 'green', 'blue']
labels = ['Mar 1, 2012 (HST)', 'May 1, 2012 (HST)', 'Jul 1, 2012 (HST)']
rng = py.axis()
for ii in range(len(months)):
twi = get_twilight_lst(years[ii], months[ii], days[ii])
minLST = twi[0] * 15.0
maxLST = twi[1] * 15.0
ypos = rng[3] + 2*(len(months) - ii)
if minLST < 0:
minLST += 360.0
if maxLST > 360:
maxLST -= 360.0
x1 = np.array([minLST, maxLST]) / 15.0
x2 = None
y = np.array([ypos, ypos])
if minLST > 0 and maxLST < 360 and minLST > maxLST:
x1 = np.array([0, maxLST]) / 15.0
x2 = np.array([minLST, 360]) / 15.0
py.plot(x1, y, linewidth=10, color=colors[ii], label=labels[ii])
if x2 != None:
py.plot(x2, y, linewidth=10, color=colors[ii])
py.ylim(0, rng[3] + 7*len(months))
py.legend(loc='upper left')
# py.xlim(0, 360)
py.xlim(0, 24)
py.xlabel('LST (hours)')
py.gca().yaxis.set_visible(False)
py.savefig('obs_ra_v_control_new.png')
def get_twilight_lst(year, month, day):
"""
Get the 12-degree evening and morning twilight
times (in LST) for the specified date.
"""
# Get sunset and sunrise times on the first day
scinName = 'skycalc.input'
scoutName = 'skycalc.output'
scin = open(scinName, 'w')
scin.write('m\n')
scin.write('y %4d %2d %2d a' % (year, month, day))
scin.write('Q\n')
scin.close()
# Spawn skycalc
os.system('skycalc < %s > %s' % (scinName, scoutName))
# Now read in skycalc data
scout = open(scoutName, 'r')
lines = scout.readlines()
for line in lines:
fields = line.split()
if (len(fields) < 3):
continue
if (fields[0] == 'Sunset'):
sunset = float(fields[5]) + float(fields[6]) / 60.0
sunset -= 24.0
sunrise = float(fields[9]) + float(fields[10]) / 60.0
if (fields[0] == '12-degr'):
twilite1 = float(fields[2]) + float(fields[3]) / 60.0
twilite1 -= 24.0
twilite2 = float(fields[6]) + float(fields[7]) / 60.0
print twilite1, twilite2
if (fields[0] == 'Evening'):
twilite1LST = float(fields[9]) + float(fields[10]) / 60.0
if (fields[0] == 'Morning'):
twilite2LST = float(fields[9]) + float(fields[10]) / 60.0
if ((fields[0] == 'The') and (fields[1] == 'sun')):
darkTime = (twilite2 - twilite1) - 0.5 # 0.5=LGS checkout
splittime = twilite1 + 0.5 + darkTime/2
if (splittime > 24.0):
splittime -= 24.0
return np.array([twilite1LST, twilite2LST])
| [
"jlu@ifa.hawaii.edu"
] | jlu@ifa.hawaii.edu |
77c6ff64d70bf43e677e3a109dec9871b6a399d4 | 0bde5f7f09aa537ed1f4828d4e5ebee66475918f | /h2o-bindings/bin/custom/R/gen_svd.py | 29b777c6fd2ccee93d258de42738e4edb898a7cc | [
"Apache-2.0"
] | permissive | Winfredemalx54/h2o-3 | d69f1c07e1f5d2540cb0ce5e6073415fa0780d32 | dfb163c82ff3bfa6f88cdf02465a9bb4c8189cb7 | refs/heads/master | 2022-12-14T08:59:04.109986 | 2020-09-23T08:36:59 | 2020-09-23T08:36:59 | 297,947,978 | 2 | 0 | Apache-2.0 | 2020-09-23T11:28:54 | 2020-09-23T11:28:54 | null | UTF-8 | Python | false | false | 1,431 | py | rest_api_version = 99
extensions = dict(
required_params=['training_frame', 'x', 'destination_key'],
validate_required_params="",
set_required_params="""
parms$training_frame <- training_frame
if(!missing(x))
parms$ignored_columns <- .verify_datacols(training_frame, x)$cols_ignore
if(!missing(destination_key)) {
warning("'destination_key' is deprecated; please use 'model_id' instead.")
if(missing(model_id)) {
parms$model_id <- destination_key
}
}
""",
)
doc = dict(
preamble="""
Singular value decomposition of an H2O data frame using the power method
""",
params=dict(
x="""
A vector containing the \code{character} names of the predictors in the model.
""",
destination_key="""
(Optional) The unique key assigned to the resulting model.
Automatically generated if none is provided.
""",
),
returns="""
an object of class \linkS4class{H2ODimReductionModel}.
""",
references="""
N. Halko, P.G. Martinsson, J.A. Tropp. {Finding structure with randomness: Probabilistic algorithms for constructing approximate matrix decompositions}[http://arxiv.org/abs/0909.4061]. SIAM Rev., Survey and Review section, Vol. 53, num. 2, pp. 217-288, June 2011.
""",
examples="""
library(h2o)
h2o.init()
australia_path <- system.file("extdata", "australia.csv", package = "h2o")
australia <- h2o.uploadFile(path = australia_path)
h2o.svd(training_frame = australia, nv = 8)
"""
)
| [
"noreply@github.com"
] | Winfredemalx54.noreply@github.com |
ab940f2287f99c5c3edf5a43baede27081603152 | e13f6678fb1be916f4746d8663edfbd841531f7e | /ebc/pauta/interfaces/servico.py | e23c1b36241754dee8b39345394ee8fba1e7813b | [
"Unlicense"
] | permissive | lflrocha/ebc.pauta | 67cf41ad0c751c70a53f609204f913c441de0ab3 | 1a77e9f47e22b60af88cf23f492a8b47ddfd27b6 | refs/heads/master | 2021-01-10T08:38:43.924935 | 2015-05-30T19:24:00 | 2015-05-30T19:24:00 | 36,572,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | from zope import schema
from zope.interface import Interface
from zope.app.container.constraints import contains
from zope.app.container.constraints import containers
from ebc.pauta import pautaMessageFactory as _
class IServico(Interface):
""""""
# -*- schema definition goes here -*-
| [
"lflrocha@gmail.com"
] | lflrocha@gmail.com |
802ec2a72d6779bc336c31b7a68a721565858c5a | 7a39aed5ceff9070864afea30d3369ec70da093d | /tests/conftest.py | e1e05dcb51c92af9c1b689352a91423da84af5bb | [
"BSD-3-Clause"
] | permissive | Omulosi/reader | a0f3fc2c787481c14254edd7bfcd81f715f51a5d | 12759bb9441846eb5fce618137a6e70e2ec3c286 | refs/heads/master | 2023-07-15T12:02:10.365563 | 2021-08-30T11:37:35 | 2021-08-30T11:37:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,183 | py | import sqlite3
import sys
from contextlib import closing
from functools import wraps
import py.path
import pytest
from utils import reload_module
from reader import make_reader as original_make_reader
from reader._storage import Storage
def pytest_addoption(parser):
parser.addoption(
"--runslow", action="store_true", default=False, help="run slow tests"
)
def pytest_collection_modifyitems(config, items): # pragma: no cover
apply_runslow(config, items)
apply_flaky_pypy_sqlite3(items)
def apply_runslow(config, items): # pragma: no cover
if config.getoption("--runslow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
def apply_flaky_pypy_sqlite3(items): # pragma: no cover
# getting intermittent sqlite3 errors on pypy;
# https://github.com/lemon24/reader/issues/199#issuecomment-716475686
if sys.implementation.name != 'pypy':
return
def rerun_filter(err, *args):
return issubclass(err[0], sqlite3.InterfaceError)
sqlite3_flaky = pytest.mark.flaky(rerun_filter=rerun_filter, max_runs=10)
for item in items:
item.add_marker(sqlite3_flaky)
@pytest.fixture
def make_reader(request):
@wraps(original_make_reader)
def make_reader(*args, **kwargs):
reader = original_make_reader(*args, **kwargs)
request.addfinalizer(reader.close)
return reader
return make_reader
@pytest.fixture
def reader():
with closing(original_make_reader(':memory:', feed_root='')) as reader:
yield reader
@pytest.fixture
def storage():
with closing(Storage(':memory:')) as storage:
yield storage
def call_update_feeds(reader, _):
reader.update_feeds()
def call_update_feeds_workers(reader, _):
reader.update_feeds(workers=2)
def call_update_feeds_iter(reader, _):
for _ in reader.update_feeds_iter():
pass
def call_update_feeds_iter_workers(reader, _):
for _ in reader.update_feeds_iter(workers=2):
pass
def call_update_feed(reader, url):
reader.update_feed(url)
@pytest.fixture(
params=[
call_update_feeds,
pytest.param(call_update_feeds_workers, marks=pytest.mark.slow),
call_update_feeds_iter,
pytest.param(call_update_feeds_iter_workers, marks=pytest.mark.slow),
call_update_feed,
]
)
def call_update_method(request):
return request.param
def feed_arg_as_str(feed):
return feed.url
def feed_arg_as_feed(feed):
return feed
@pytest.fixture(params=[feed_arg_as_str, feed_arg_as_feed])
def feed_arg(request):
return request.param
def entry_arg_as_tuple(entry):
return entry.feed.url, entry.id
def entry_arg_as_entry(entry):
return entry
@pytest.fixture(params=[entry_arg_as_tuple, entry_arg_as_entry])
def entry_arg(request):
return request.param
@pytest.fixture
def db_path(tmpdir):
return str(tmpdir.join('db.sqlite'))
@pytest.fixture
def data_dir():
return py.path.local(__file__).dirpath().join('data')
| [
"damian.adrian24@gmail.com"
] | damian.adrian24@gmail.com |
04d36b8214eca85f0cc1b1a99b63e644c6978f84 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/coverage-big-1333.py | a47f1e663eb94c1415272a2dacb4669d2651711d | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,350 | py | count:int = 0
count2:int = 0
count3:int = 0
count4:int = 0
count5:int = 0
def foo(s: str) -> int:
return len(s)
def foo2(s: str, s2: str) -> int:
return len(s)
def foo3(s: str, s2: str, s3: str) -> int:
return len(s)
def foo4(s: str, s2: str, s3: str, s4: str) -> int:
return len(s)
def foo5(s: str, s2: str, s3: str, s4: str, s5: str) -> int:
return len(s)
class bar(object):
p: bool = True
def baz(self:"bar", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar2(object):
p: bool = True
p2: bool = True
def baz(self:"bar2", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar2", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar3(object):
p: bool = True
p2: bool = True
p3: bool = True
def baz(self:"bar3", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar3", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: $Type, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar3", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar4(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
def baz(self:"bar4", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar4", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar4", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar4", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar5(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
p5: bool = True
def baz(self:"bar5", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar5", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar5", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz5(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int], xx5: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
x5:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
y5:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
def qux5(y: int, y2: int, y3: int, y4: int, y5: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
nonlocal x5
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
print(bar().baz([1,2]))
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
e68f8e37594831072a4f8e00d262136ca8794866 | 5d902e2565b08dc6b8bb2f06231a4319d9715513 | /polyaxon/runner/spawners/notebook_spawner.py | 72319bf6c8f57f367669104d98e7564e0899ea9e | [
"MIT"
] | permissive | rohansaphal97/polyaxon | bd4febfc94b7d1aa95ef8152472c3dcba725f6b2 | ee42a05e40c4d400a281b3b2c5d26f5b46bd785c | refs/heads/master | 2020-03-13T19:51:44.783780 | 2018-04-26T17:52:35 | 2018-04-26T18:06:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,763 | py | import json
import logging
import random
from django.conf import settings
from libs.utils import get_hmac
from projects.paths import get_project_repos_path
from runner.spawners.base import get_pod_volumes
from runner.spawners.project_spawner import ProjectSpawner
from runner.spawners.templates import constants, deployments, ingresses, pods, services
logger = logging.getLogger('polyaxon.spawners.notebook')
class NotebookSpawner(ProjectSpawner):
NOTEBOOK_JOB_NAME = 'notebook'
PORT = 8888
def get_notebook_url(self):
return self._get_service_url(self.NOTEBOOK_JOB_NAME)
def get_notebook_token(self):
return get_hmac(settings.APP_LABELS_NOTEBOOK, self.project_uuid)
@staticmethod
def get_notebook_code_volume():
volume = pods.get_volume(volume=constants.REPOS_VOLUME,
claim_name=settings.REPOS_CLAIM_NAME,
volume_mount=settings.REPOS_ROOT)
volume_mount = pods.get_volume_mount(volume=constants.REPOS_VOLUME,
volume_mount=settings.REPOS_ROOT)
return volume, volume_mount
def request_notebook_port(self):
if not self._use_ingress():
return self.PORT
labels = 'app={},role={}'.format(settings.APP_LABELS_NOTEBOOK,
settings.ROLE_LABELS_DASHBOARD)
ports = [service.spec.ports[0].port for service in self.list_services(labels)]
port = random.randint(*settings.NOTEBOOK_PORT_RANGE)
while port in ports:
port = random.randint(*settings.NOTEBOOK_PORT_RANGE)
return port
def start_notebook(self, image, resources=None):
ports = [self.request_notebook_port()]
target_ports = [self.PORT]
volumes, volume_mounts = get_pod_volumes()
code_volume, code_volume_mount = self.get_notebook_code_volume()
volumes.append(code_volume)
volume_mounts.append(code_volume_mount)
deployment_name = constants.DEPLOYMENT_NAME.format(
project_uuid=self.project_uuid, name=self.NOTEBOOK_JOB_NAME)
notebook_token = self.get_notebook_token()
notebook_url = self._get_proxy_url(
namespace=self.namespace,
job_name=self.NOTEBOOK_JOB_NAME,
deployment_name=deployment_name,
port=ports[0])
notebook_dir = get_project_repos_path(self.project_name)
notebook_dir = '{}/{}'.format(notebook_dir, notebook_dir.split('/')[-1])
deployment = deployments.get_deployment(
namespace=self.namespace,
app=settings.APP_LABELS_NOTEBOOK,
name=self.NOTEBOOK_JOB_NAME,
project_name=self.project_name,
project_uuid=self.project_uuid,
volume_mounts=volume_mounts,
volumes=volumes,
image=image,
command=["/bin/sh", "-c"],
args=[
"jupyter notebook "
"--no-browser "
"--port={port} "
"--ip=0.0.0.0 "
"--allow-root "
"--NotebookApp.token={token} "
"--NotebookApp.trust_xheaders=True "
"--NotebookApp.base_url={base_url} "
"--NotebookApp.notebook_dir={notebook_dir} ".format(
port=self.PORT,
token=notebook_token,
base_url=notebook_url,
notebook_dir=notebook_dir)],
ports=target_ports,
container_name=settings.CONTAINER_NAME_PLUGIN_JOB,
resources=resources,
role=settings.ROLE_LABELS_DASHBOARD,
type=settings.TYPE_LABELS_EXPERIMENT)
deployment_labels = deployments.get_labels(app=settings.APP_LABELS_NOTEBOOK,
project_name=self.project_name,
project_uuid=self.project_uuid,
role=settings.ROLE_LABELS_DASHBOARD,
type=settings.TYPE_LABELS_EXPERIMENT)
self.create_or_update_deployment(name=deployment_name, data=deployment)
service = services.get_service(
namespace=self.namespace,
name=deployment_name,
labels=deployment_labels,
ports=ports,
target_ports=target_ports,
service_type=self._get_service_type())
self.create_or_update_service(name=deployment_name, data=service)
if self._use_ingress():
annotations = json.loads(settings.K8S_INGRESS_ANNOTATIONS)
paths = [{
'path': '/notebook/{}'.format(self.project_name.replace('.', '/')),
'backend': {
'serviceName': deployment_name,
'servicePort': ports[0]
}
}]
ingress = ingresses.get_ingress(namespace=self.namespace,
name=deployment_name,
labels=deployment_labels,
annotations=annotations,
paths=paths)
self.create_or_update_ingress(name=deployment_name, data=ingress)
def stop_notebook(self):
deployment_name = constants.DEPLOYMENT_NAME.format(project_uuid=self.project_uuid,
name=self.NOTEBOOK_JOB_NAME)
self.delete_deployment(name=deployment_name)
self.delete_service(name=deployment_name)
if self._use_ingress():
self.delete_ingress(name=deployment_name)
| [
"mouradmourafiq@gmail.com"
] | mouradmourafiq@gmail.com |
ef03b435fce9dbb91ba88d54ee8e945bdd417016 | 9d03d3e8b739a0a1aae7eca09fce6a6e3cd7fd9d | /model/position/__init__.py | 2d890d9eb6b9146c76025d13d2280bad7b01502c | [] | no_license | CallingWisdom/trade | c30954c8be17d7b140ad376011486caede69fd68 | a231ade6dbe99288a4ada2eec0e187b1e28594da | refs/heads/master | 2021-05-06T18:12:46.058972 | 2017-06-09T09:21:09 | 2017-06-09T09:21:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,280 | py | # -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from model.position.base_position import BasePosition
from model.position.future_position import FuturePosition
from model.position.stock_position import StockPosition
class Positions(dict):
def __init__(self, position_cls):
super(Positions, self).__init__()
self._position_cls = position_cls
self._cached_positions = {}
def __missing__(self, key):
if key not in self._cached_positions:
self._cached_positions[key] = self._position_cls(key)
return self._cached_positions[key]
def get_or_create(self, key):
if key not in self:
self[key] = self._position_cls(key)
return self[key]
| [
"511735184@qq.com"
] | 511735184@qq.com |
2cc9599e40b6bdaa83c3872fc6617694066af3ab | f1c3a21c820fc1b0d182c859486cc6461f299bb9 | /TCN-TF/tcn.py | 4331af07f57e7fee40cbfa8036ae0ad6083a1730 | [] | no_license | JZDBB/AVEC | 85ee92a90ca9517780e4cc59d250d0b82c12cdeb | 79bd55b80be4e2ebd08c376f91900dbbb60e6dca | refs/heads/master | 2020-04-17T03:14:04.481303 | 2019-01-15T11:34:48 | 2019-01-27T11:35:03 | 166,172,956 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,691 | py | #coding: utf-8
'''
Author: Weiping Song
Time: April 24, 2018
'''
import tensorflow as tf
from wnconv1d import wnconv1d
class TemporalConvNet(object):
def __init__(self, num_channels, stride=1, kernel_size=2, dropout=0.2):
self.kernel_size=kernel_size
self.stride = stride
self.num_levels = len(num_channels)
self.num_channels = num_channels
self.dropout = dropout
self.is_training = tf.placeholder(shape=[], dtype=tf.bool)
def __call__(self, inputs):
inputs_shape = inputs.get_shape().as_list()
outputs = [inputs]
for i in range(self.num_levels):
dilation_size = 2 ** i
in_channels = inputs_shape[-1] if i == 0 else self.num_channels[i-1]
out_channels = self.num_channels[i]
output = self._TemporalBlock(outputs[-1], in_channels, out_channels, self.kernel_size,
self.stride, dilation=dilation_size, padding=(self.kernel_size-1)*dilation_size,
dropout=self.dropout, level=i)
outputs.append(output)
tf.summary.histogram('%d'%i, output)
return outputs[-1]
def _TemporalBlock(self, value, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2, level=0):
padded_value1 = tf.pad(value, [[0,0], [padding,0], [0,0]])
self.conv1 = wnconv1d(inputs=padded_value1,
filters=n_outputs,
kernel_size=kernel_size,
strides=stride,
padding='valid',
dilation_rate=dilation,
activation=None,
weight_norm=True, #default is false.
kernel_initializer=tf.random_normal_initializer(0, 0.01),
bias_initializer=tf.zeros_initializer(),
name='layer'+str(level)+'_conv1')
self.output1 = tf.contrib.layers.dropout(tf.nn.elu(self.conv1), keep_prob=1-dropout, is_training=self.is_training)
padded_value2 = tf.pad(self.output1, [[0,0], [padding,0], [0,0]])
self.conv2 = wnconv1d(inputs=padded_value2,
filters=n_outputs,
kernel_size=kernel_size,
strides=stride,
padding='valid',
dilation_rate=dilation,
activation=None,
weight_norm=True, #default is False.
kernel_initializer=tf.random_normal_initializer(0, 0.01),
bias_initializer=tf.zeros_initializer(),
name='layer'+str(level)+'_conv2')
self.output2 = tf.contrib.layers.dropout(tf.nn.elu(self.conv2), keep_prob=1-dropout, is_training=self.is_training)
if n_inputs != n_outputs:
res_x = tf.layers.conv1d(inputs=value,
filters=n_outputs,
kernel_size=1,
activation=None,
kernel_initializer=tf.random_normal_initializer(0, 0.01),
bias_initializer=tf.zeros_initializer(),
name='layer'+str(level)+'_conv')
else:
res_x = value
return tf.nn.elu(res_x + self.output2)[:,2*padding:,:]
| [
"oxuyining@gmail.com"
] | oxuyining@gmail.com |
182058774046558c47942649a24c9481da11c275 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_4/vhrjoc001/question1.py | 7ecb00ea83049b6f3e69ecfb564bccfff7664dc8 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | # test program for box drawer
import boxes
choice = input ("Choose test:\n")
action = choice[:1]
if action == 'a':
boxes.print_square ()
elif action == 'b':
width, height = map (int, choice[2:].split(" "))
print ("calling function")
boxes.print_rectangle (width, lll)
print ("called function")
elif action == 'c':
width, height = map (int, choice[2:].split(" "))
print ("calling function")
figure = boxes.get_rectangle (width, height)
print ("called function")
print (figure)
| [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
7e104bc331c9f6d2a933011e4a39b82e9eadc828 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/ec_14465-1033/sdB_EC_14465-1033_coadd.py | fd7c867f87c741b45aa844001303b9f6bd4ad19b | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[222.304042,-10.760042], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_EC_14465-1033/sdB_EC_14465-1033_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_EC_14465-1033/sdB_EC_14465-1033_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
0a817a5866d268b5b73ed64c8d8315ad39e5aadb | dca61d9b7cb3d746cd6354ebb0ff874d70242444 | /examples/e712.py | 02543ed08b945f374cf49fd1b7ff35918c69bcde | [] | no_license | patrickod/peppy | 5224be76148dc27e23376d7319009b9941eed690 | b70b266e4735f9ffe31b7b3cdac74c5f5d53ce7a | refs/heads/master | 2021-01-15T16:37:10.528218 | 2015-03-27T23:10:42 | 2015-03-27T23:10:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12 | py | if r==True:p | [
"david@drmaciver.com"
] | david@drmaciver.com |
53c5f847e2583a782f9bd1c46700bb9d009aaef4 | 0910e259a9bd252300f19b2ff22049d640f19b1a | /ml/m16_pipeline_RS3_wine.py | 7b06a57c93e8fc7ce2d3be0c4654e2fd695ad072 | [] | no_license | kimtaeuk-AI/Study | c7259a0ed1770f249b78f096ad853be7424a1c8e | bad5a0ea72a0117035b5e45652819a3f7206c66f | refs/heads/master | 2023-05-05T12:34:52.471831 | 2021-05-22T16:16:12 | 2021-05-22T16:16:12 | 368,745,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,357 | py | #전처리 하나와 모델을 합침
import numpy as np
import tensorflow as tf
import pandas as pd
from sklearn.datasets import load_wine
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split, KFold, cross_val_score, GridSearchCV, RandomizedSearchCV
from sklearn.metrics import accuracy_score
from sklearn.svm import LinearSVC, SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline, make_pipeline
import timeit
start_time = timeit.default_timer()
import warnings
warnings.filterwarnings('ignore')
dataset = load_wine()
x = dataset.data
y = dataset.target
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=44)
# Pipeline은 전처리 + 모델해줘서 MinMaxScaler문 생략 가능
# from sklearn.preprocessing import MinMaxScaler
# scale = MinMaxScaler()
# scale.fit(x_train)
# x_train = scale.transform(x_train)
# x_test = scale.transform(x_test)
parameters = [
{"svc__C" :[1,10,100,1000], "svc__kernel":["linear"]}, # 1주고 linear, 10주고 linear, ... 4번
{"svc__C" :[1,10,100], "svc__kernel":["rbf"], "svc__gamma":[0.001, 0.0001]}, #3x2 6번
{"svc__C" :[1,10,100,1000], "svc__kernel":["sigmoid"],"svc__gamma":[0.001, 0.0001]}] #4x2 8번
parameters = [
{"mal__C" :[1,10,100,1000], "mal__kernel":["linear"]}, # 1주고 linear, 10주고 linear, ... 4번
{"mal__C" :[1,10,100], "mal__kernel":["rbf"], "mal__gamma":[0.001, 0.0001]}, #3x2 6번
{"mal__C" :[1,10,100,1000], "mal__kernel":["sigmoid"],"mal__gamma":[0.001, 0.0001]}] #4x2 8번
# 언더바 (_) 두개 써줘야한다
# 2. 모델
Pipe = Pipeline([('scale', MinMaxScaler()), ('mal', SVC())]) #SVC모델과 MinMax 를합친다 , 괄호 조심
# pipe = make_pipeline(StandardScaler(), SVC()) # 두가지 방법이 있다.
# Pipeline 써주는 이유 : 트레인만 하는게 효과적, cv만큼 스케일링, 과적합 방지, 모델에 적합해서 성능이 강화 .....
model = GridSearchCV(Pipe, parameters, cv=5)
model.fit(x_train, y_train)
results = model.score(x_test, y_test)
print('results : ', results)
# MinMaxScaler
# results : 0.9666666666666667
# StandardScaler
# results : 0.9666666666666667
| [
"ki3123.93123@gmail.com"
] | ki3123.93123@gmail.com |
0d389ad98371546fa8ff77dbfb4acf4c1ea82b87 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_pyres.py | 451934aa41c0b1e439c98c40e31627b1188cf4eb | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py |
from xai.brain.wordbase.nouns._pyre import _PYRE
#calss header
class _PYRES(_PYRE, ):
def __init__(self,):
_PYRE.__init__(self)
self.name = "PYRES"
self.specie = 'nouns'
self.basic = "pyre"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
0214d99b83874837d35712c1e0bd2eb3d8662662 | ab9a0e787695b9d04483cac5b710931287ed3e54 | /ruia_cache/cache_patch/__init__.py | 0172eabb8a618b63f96a73bc4c75f3327546d912 | [
"Apache-2.0"
] | permissive | python-ruia/ruia-cache | 42a529b17192b31237bc18ad2126400ec20ce9dd | a18609b29e76ad11c81aa1254e6b2d8a49454abd | refs/heads/main | 2023-02-15T08:38:31.565745 | 2021-01-03T13:44:36 | 2021-01-03T13:44:36 | 325,297,883 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | #!/usr/bin/env python
"""
Created by howie.hu at 2021/1/3.
"""
from .req_cache import req_cache
from .resp_cache import resp_cache
| [
"howie6879@gmail.com"
] | howie6879@gmail.com |
73e7b9a82f081a1016e159620287c4a2a6122cfc | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03591/s884764547.py | 1815b3208c1d9b6c990814ebd5e42c8730365a3d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | S = str(input())
if S[0:4] == "YAKI":
print("Yes")
else:
print("No")
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c6a56fe501ce17f3f011512f6b4d3bee67f1950e | 9c228dfa2033689acbc480dea99181c13e9b0dfc | /manage.py | 38409518ac0027dd2c3d985c8a507e1a31a14dc5 | [] | no_license | crowdbotics-apps/test-dj-app-11-dev-1596 | 3c4dae8cf0bda00501dcc46a65c4efd40e22beb4 | baa41a3ef71a28fc395a4b53b1dda0d70c6764e6 | refs/heads/master | 2022-04-04T13:09:52.739143 | 2020-02-04T17:35:58 | 2020-02-04T17:35:58 | 238,265,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_dj_app_11_dev_1596.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
a7c4d6b4e6bd2511795d7fcb758420f5aa2a213c | db58ec54f85fd8d4ef6904529c5b17393ee041d8 | /elements-of-programming-interviews/6-strings/6.0-is-palindromic/is_palindromic.py | 51af6cba620333b2d1bb581fc6a5ee4538ae64b0 | [
"MIT"
] | permissive | washimimizuku/python-data-structures-and-algorithms | 90ae934fc7d2bac5f50c18e7fbc463ba0c026fa4 | 537f4eabaf31888ae48004d153088fb28bb684ab | refs/heads/main | 2023-08-28T07:45:19.603594 | 2021-11-08T07:53:52 | 2021-11-08T07:53:52 | 334,213,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | '''
A palindromic string is one which reads
the same when it is reversed.
'''
def is_palindromic(s): # Time: O(n) | Space: O(1)
# Note that s[~i] for i in [0, len(s) - 1] is s[-(i + 1)].
return all(s[i] == s[~i] for i in range(len(s) // 2))
assert(is_palindromic('abba') == True)
assert(is_palindromic('abcba') == True)
assert(is_palindromic('abcdef') == False)
| [
"nuno.barreto@inventsys.ch"
] | nuno.barreto@inventsys.ch |
64725aaf76e56daf87bc258c20f88508543ed1e4 | f632abaa923aa5be0cd53e1afbf370487467cd4a | /libstasis/interfaces.py | c80b7ddaeaea0c8d8d81d058a6df536def5eb8b1 | [] | no_license | fschulze/libstasis | a8706d12b8dab0657a6c178d5eaac3c99357c7b9 | e5fd7bafd918b1e315c105a55fe68fd3cee1af7f | refs/heads/master | 2016-08-04T19:21:25.703387 | 2014-03-16T09:36:39 | 2014-03-16T09:36:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | from zope.interface import Interface
class IAspects(Interface):
pass
| [
"florian.schulze@gmx.net"
] | florian.schulze@gmx.net |
c410f87411390859475a290062a07792f214d010 | e21599d08d2df9dac2dee21643001c0f7c73b24f | /Others/Modules/pandas/t.py | 5f8e000908eb234c7fd0d1dc48139513a8afcbf0 | [] | no_license | herolibra/PyCodeComplete | c7bf2fb4ce395737f8c67749148de98a36a71035 | 4ef7d2c3aec6d28a53eed0e649cdeb74df3d783b | refs/heads/master | 2022-07-17T05:39:03.554760 | 2020-05-03T07:00:14 | 2020-05-03T07:00:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | #!/usr/bin/env python
# coding=utf-8
import pandas as pd
import numpy as np
dates = pd.date_range('20170101', periods=6)
df = pd.DataFrame(np.random.randn(6,4), index=dates, columns=list('ABCD'))
print(df)
print(df.T) | [
"ijumper@163.com"
] | ijumper@163.com |
59ef791b08647f7d518ef7a5725b686527f30927 | fe6d422ab3f5c6f922d22cd3db2a7d539128620e | /apps/course/migrations/0001_initial.py | 013263198febe2f303ce675ec9670b827b4a4523 | [] | no_license | syongyulin/syl_online | 521e81199bf07c2d83371d486a00842716cfa99e | ffb8da789059c115e9ac7597a547d50e6d69603e | refs/heads/master | 2023-01-10T00:35:07.120987 | 2020-11-13T00:14:34 | 2020-11-13T00:14:34 | 312,281,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,838 | py | # Generated by Django 2.2.16 on 2020-11-03 11:13
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='课程名')),
('desc', models.CharField(max_length=300, verbose_name='课程描述')),
('detail', models.TextField(verbose_name='课程详情')),
('degree', models.CharField(choices=[('cj', '初级'), ('zj', '中级'), ('gj', '高级')], max_length=2, verbose_name='难度')),
('learn_times', models.IntegerField(default=0, verbose_name='学习时长(分钟数)')),
('students', models.IntegerField(default=0, verbose_name='学习人数')),
('fav_nums', models.IntegerField(default=0, verbose_name='收藏人数')),
('image', models.ImageField(upload_to='courses/%Y/%m', verbose_name='封面图')),
('click_nums', models.IntegerField(default=0, verbose_name='点击数')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
],
options={
'verbose_name': '课程',
'verbose_name_plural': '课程',
},
),
migrations.CreateModel(
name='Lesson',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='章节名')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='course.Course', verbose_name='课程')),
],
options={
'verbose_name': '章节',
'verbose_name_plural': '章节',
},
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='视频名')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
('lesson', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='course.Lesson', verbose_name='章节')),
],
options={
'verbose_name': '视频',
'verbose_name_plural': '视频',
},
),
migrations.CreateModel(
name='CourseResource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='名称')),
('download', models.FileField(upload_to='course/resource/%Y/%m', verbose_name='资源文件')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='course.Course', verbose_name='课程')),
],
options={
'verbose_name': '课程资源',
'verbose_name_plural': '课程资源',
},
),
]
| [
"1030918477@qq.com"
] | 1030918477@qq.com |
7f5bb23c3917710fc33198ccef0a8d813bb00a59 | cf332f2c6f4d1e1b6c650bdb803fd6bc2966858b | /apps/commission/migrations/0002_initial.py | 9008340c0d3e76449ffa45469b2fe4bc1250c03b | [] | no_license | drc-ima/rabito-crm-sample | 7b873df8e8b6cc4722dfe730c82644943b41f871 | 26ed884b445e8a03b04fd5ea2b4d5402aa66b659 | refs/heads/main | 2023-08-23T01:30:28.147646 | 2021-09-20T22:56:55 | 2021-09-20T22:56:55 | 404,700,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,512 | py | # Generated by Django 3.2.6 on 2021-08-23 09:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('commission', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='commissionsetup',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='commission_setups', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='commissionsetup',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='modified_commission_setups', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='commission',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='commissions', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='commission',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='modified_commissions', to=settings.AUTH_USER_MODEL),
),
]
| [
"emmanuelofosu472@gmail.com"
] | emmanuelofosu472@gmail.com |
40f9d36ee88b4c5c002c4b400c15077afd21d657 | 0df5d17a8c359a4b6f02a29d444a60be946f12e3 | /sem título0.py | 65b5c48272e8bdf02a3f6d6f6f8132fac627aec4 | [] | no_license | andre23arruda/dicom3d-viewer-python | 1ef10d4c7c76b0afb93016bcb7f000fe00f2f8ca | 87423548aafabe5e0067437037c927fbd4da7467 | refs/heads/master | 2020-09-22T07:33:44.066649 | 2019-12-04T02:53:00 | 2019-12-04T02:53:00 | 225,106,019 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 2 23:29:42 2019
@author: alca0
"""
#%%
import pydicom
dicom_obj = pydicom.dcmread(r"C:\Users\alca0\Downloads\img_example\136_BIOIMAGEM_ARC_45_20190713_CC_L_2.dcm")
dicom_obj.add_new(0x999999,'HUE', 'HARPIA' )
dicom_obj | [
"andre23arruda@gmail.com"
] | andre23arruda@gmail.com |
0bee84389eb15d39e82917e0c96f63e9af076257 | 710816603a6a4988ca8f162da6e70cb6127f3595 | /utilipy/extern/doc_parse_tools/tests/test_napolean_parse_tools.py | 98e6058c5e1029c0a59cc3c71cfc587cde060688 | [
"BSD-3-Clause"
] | permissive | nstarman/utilipy | 8b1e51d5a73f4e0f3226c0905101cb6d02cc9bf0 | 17984942145d31126724df23500bafba18fb7516 | refs/heads/master | 2023-07-25T19:17:03.925690 | 2020-12-21T05:03:10 | 2020-12-21T05:03:10 | 192,425,953 | 2 | 1 | BSD-3-Clause | 2022-09-12T18:57:44 | 2019-06-17T22:20:57 | Python | UTF-8 | Python | false | false | 841 | py | # -*- coding: utf-8 -*-
"""Tests for :mod:`~utilipy.utils.doc_parse_tools.napoleon_parse_tools`."""
__all__ = [
"test_napoleon_parse_tools",
]
##############################################################################
# IMPORTS
# THIRD PARTY
import pytest
##############################################################################
# PARAMETERS
##############################################################################
# CODE
##############################################################################
@pytest.mark.skip(reason="TODO")
def test_napoleon_parse_tools():
"""Test :mod:`~utilipy.utils.doc_parse_tools.napoleon_parse_tools`."""
# /def
# -------------------------------------------------------------------
##############################################################################
# END
| [
"nstarkman@protonmail.com"
] | nstarkman@protonmail.com |
b3c660de08f22bb4f340e969ba674dd9deb04317 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_MovingMedian_NoCycle_NoAR.py | fa39c3df3ad94b5d3666ad67652be37c9f374d52 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 151 | py | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['MovingMedian'] , ['NoCycle'] , ['NoAR'] ); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
4a284415b37d849bfb8c4d03e088073e324cb5d3 | 44fd3b7a2b1f1e382ceffa72afa8a173c509d278 | /test/test_helpers.py | 9a9c81bed74d4eb5d7cba089964c5cb3ddc3284f | [
"Apache-2.0"
] | permissive | flit/cmdis | 1692aec69ea3b53e18efbb2ba85c3f6ea407e3d2 | 91b7c7430114f3cf260206abc926b86c6e81c51b | refs/heads/master | 2020-12-24T19:05:14.820761 | 2019-06-09T00:21:56 | 2019-06-09T00:21:56 | 56,406,698 | 10 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,942 | py | # Copyright (c) 2016-2019 Chris Reed
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cmdis.bitstring import *
from cmdis.helpers import *
class TestAlign:
def test_0(self):
assert Align(0x1001, 4) == 0x1000
def test_1(self):
assert Align(0x1003, 4) == 0x1000
def test_2(self):
assert Align(0x1003, 2) == 0x1002
def test_3(self):
assert Align(0x1007, 16) == 0x1000
def test_4(self):
assert Align(bitstring(0x1001), 4) == bitstring(0x1000)
def test_5(self):
assert Align(bitstring(0x1003), 4) == bitstring(0x1000)
def test_6(self):
assert Align(bitstring(0x1003), 2) == bitstring(0x1002)
def test_7(self):
assert Align(bitstring(0x1007), 16) == bitstring(0x1000)
class TestAddWithCarry:
def test_0(self):
x = bitstring('00000')
y = bitstring('00000')
assert AddWithCarry(x, y, bit0) == (0, 0, 0)
def test_1(self):
x = bitstring('00001')
y = bitstring('00000')
assert AddWithCarry(x, y, bit0) == (1, 0, 0)
def test_1p1(self):
x = bitstring('00001')
y = bitstring('00001')
assert AddWithCarry(x, y, bit0) == (2, 0, 0)
def test_a(self):
x = bitstring('00101')
y = bitstring('00011')
assert AddWithCarry(x, y, bit0) == ('01000', 0, 0)
def test_carry_0(self):
x = bitstring('00000')
y = bitstring('00000')
assert AddWithCarry(x, y, bit1) == ('00001', 0, 0)
def test_b(self):
x = bitstring(5432)
y = bitstring(143223)
assert AddWithCarry(x, y, bit0) == (148655, 0, 0)
def test_c(self):
# x = bitstring()
pass
class TestLSL:
def test_0(self):
assert LSL_C(bitstring('1001'), 1) == ('0010', '1')
def test_1(self):
assert LSL_C(bitstring('0001'), 1) == ('0010', '0')
class TestLSR:
def test_0(self):
assert LSR_C(bitstring('1001'), 1) == ('0100', '1')
def test_1(self):
assert LSR_C(bitstring('0100'), 1) == ('0010', '0')
class TestASR:
def test_0(self):
assert ASR_C(bitstring('1001000'), 1) == ('1100100', '0')
def test_1(self):
assert ASR_C(bitstring('0100000'), 1) == ('0010000', '0')
def test_2(self):
assert ASR_C(bitstring('0100001'), 1) == ('0010000', '1')
def test_3(self):
assert ASR_C(bitstring('1001001'), 1) == ('1100100', '1')
def test_4(self):
assert ASR_C(bitstring('1001001'), 4) == ('1111100', '1')
class TestROR:
def test_0(self):
assert ROR_C(bitstring('1001'), 1) == ('1100', '1')
def test_1(self):
assert ROR_C(bitstring('0100'), 1) == ('0010', '0')
class TestRRX:
def test_0(self):
assert RRX_C(bitstring('1001'), bit0) == ('0100', '1')
def test_1(self):
assert RRX_C(bitstring('0100'), bit1) == ('1010', '0')
def test_2(self):
assert RRX_C(bitstring('0111'), bit1) == ('1011', '1')
def test_3(self):
assert RRX_C(bitstring('0110'), bit0) == ('0011', '0')
class TestDecodeImmShift:
def test_lsl(self):
assert DecodeImmShift(bitstring('00'), bitstring('00000')) == (SRType.SRType_LSL, 0)
assert DecodeImmShift(bitstring('00'), bitstring('00001')) == (SRType.SRType_LSL, 1)
assert DecodeImmShift(bitstring('00'), bitstring('11111')) == (SRType.SRType_LSL, 31)
def test_lsr(self):
assert DecodeImmShift(bitstring('01'), bitstring('00000')) == (SRType.SRType_LSR, 32)
assert DecodeImmShift(bitstring('01'), bitstring('00001')) == (SRType.SRType_LSR, 1)
assert DecodeImmShift(bitstring('01'), bitstring('11111')) == (SRType.SRType_LSR, 31)
def test_asr(self):
assert DecodeImmShift(bitstring('10'), bitstring('00000')) == (SRType.SRType_ASR, 32)
assert DecodeImmShift(bitstring('10'), bitstring('00001')) == (SRType.SRType_ASR, 1)
assert DecodeImmShift(bitstring('10'), bitstring('11111')) == (SRType.SRType_ASR, 31)
def test_rrx(self):
assert DecodeImmShift(bitstring('11'), bitstring('00000')) == (SRType.SRType_RRX, 1)
def test_ror(self):
assert DecodeImmShift(bitstring('11'), bitstring('00001')) == (SRType.SRType_ROR, 1)
assert DecodeImmShift(bitstring('11'), bitstring('11111')) == (SRType.SRType_ROR, 31)
class TestThumbExpandImm:
def test_a(self):
pass
| [
"flit@me.com"
] | flit@me.com |
f31310cb2b2ed1f2ba6a0021484a25f897389644 | fd18ce27b66746f932a65488aad04494202e2e0d | /d16_plot_linalg/codes_plt/mpl14_Axes_Legend_mix.py | c89af939d9ab40b8d34855eb7c6724018b3c3b4e | [] | no_license | daofeng123/ClassCodes | 1acbd843836e550c9cebf67ef21dfca9f6b9fc87 | fbcd1f24d79b8bb56ad0669b07ad118064609612 | refs/heads/master | 2020-06-24T12:34:28.148197 | 2019-08-15T03:56:40 | 2019-08-15T03:56:40 | 198,963,469 | 3 | 0 | null | 2019-07-26T06:53:45 | 2019-07-26T06:53:44 | null | UTF-8 | Python | false | false | 2,760 | py | #!/usr/bin/python
# coding=utf-8
import matplotlib.pyplot as plt
import numpy as np
'''
操作并管理Axes中的Legend。
1、绘制主题:
|-Axes.legend(*args, **kwargs)
返回matplotlib.legend.Legend对象
|-handles : sequence of Artist, optional
|-labels : sequence of strings, optional
|-loc : int or string or pair of floats, default: rcParams["legend.loc"] ('best' for axes, 'upper right' for figures)
|-bbox_to_anchor : BboxBase, 2-tuple, or 4-tuple of floats
|-ncol : integer
|-prop : None or matplotlib.font_manager.FontProperties or dict
|-fontsize : int or float or {'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'}
|-numpoints : None or int
|-scatterpoints : None or int
|-scatteryoffsets : iterable of floats
|-markerscale : None or int or float
|-markerfirst : bool
|-frameon : None or bool
|-fancybox : None or bool
|-shadow : None or bool
|-framealpha : None or float
|-facecolor : None or "inherit" or a color spec
|-edgecolor : None or "inherit" or a color spec
|-mode : {"expand", None}
|-bbox_transform : None or matplotlib.transforms.Transform
|-title : str or None
|-title_fontsize: str or None
|-borderpad : float or None
|-labelspacing : float or None
|-handlelength : float or None
|-handletextpad : float or None
|-borderaxespad : float or None
|-columnspacing : float or None
|-handler_map : dict or None
2、返回主题:
|-Axes.get_legend()
|-返回Legend对象
|-Axes.get_legend_handles_labels(legend_handler_map=None)
|-返回Legend句柄与标签
'''
figure = plt.figure('Legend使用', figsize=(5, 4))
# 可以直接在add_axes函数中设置
ax = figure.add_axes([0.1, 0.1, 0.8, 0.8])
ax.set_ylim(-1, 1)
ax.set_xlim(-1, 1)
line1 = plt.Line2D(
xdata=np.linspace(-1, 1, 20),
ydata=np.random.uniform(-1, 1, size=20),
label='Line1',
color=(0, 1, 0, 1)
)
line2 = plt.Line2D(
xdata=np.linspace(-1, 1, 20),
ydata=np.random.uniform(-1, 1, size=20),
label='Line2',
color=(1, 0, 0, 1)
)
line3 = ax.plot(
np.linspace(-1, 1, 20),
np.random.uniform(-1, 1, size=20),
'#0000FF',
label='Line3'
)
# ax.add_artist(line1)
# add_artist不会自动显示label。
ax.add_line(line1)
ax.add_line(line2)
pie = ax.pie([2, 3, 4], labels=['X', 'Y', 'Z'], frame=True)
# 注意上面的添加顺序
# ---------------------------------------------
lg = ax.legend()
# ax.set_axis_on()
# ax.set_frame_on(b=True) # 控制可见
# ---------------------------------------------
figure.show(warn=False)
plt.show()
| [
"38395870@qq.com"
] | 38395870@qq.com |
7ed11620705eef521c64e9e2c2acf8e3d8abf122 | ff81a9d7880f1b85a1dc19d5eba5ac72d7179c86 | /pychron/dvc/fix_identifier.py | abe5d9947d53c7e593ce83ee2e9fa2da1f6f2930 | [
"Apache-2.0"
] | permissive | UManPychron/pychron | 2fb7e479a9f492423c0f458c70102c499e1062c4 | b84c9fd70072f9cbda30abe2c471e64fe3dd75d8 | refs/heads/develop | 2022-12-03T23:32:45.579326 | 2020-01-29T19:02:20 | 2020-01-29T19:02:20 | 36,100,637 | 0 | 0 | null | 2015-05-23T00:10:06 | 2015-05-23T00:10:05 | null | UTF-8 | Python | false | false | 3,816 | py | # ===============================================================================
# Copyright 2018 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os
import shutil
from pychron.dvc import analysis_path, dvc_load, dvc_dump
def fix_identifier(source_id, destination_id, root, repo, aliquots, steps, dest_steps=None, dest_aliquots=None):
if dest_aliquots is None:
dest_aliquots = aliquots
if dest_steps is None:
dest_steps = steps
# for a, da, step, dstep in zip(aliquots, dest_aliquots, steps, dest_steps):
# src_id = '{}-{:02n}{}'.format(source_id, a, step)
# dest_id = '{}-{:02n}{}'.format(destination_id, da, dstep)
def _fix_id(src_id, dest_id, identifier, root, repo, new_aliquot=None):
sp = analysis_path(src_id, repo, root=root)
dp = analysis_path(dest_id, repo, root=root, mode='w')
print(sp, dp)
if not os.path.isfile(sp):
print('not a file', sp)
return
jd = dvc_load(sp)
jd['identifier'] = identifier
if new_aliquot:
jd['aliquot']= new_aliquot
dvc_dump(jd, dp)
print('{}>>{}'.format(sp, dp))
for modifier in ('baselines', 'blanks', 'extraction',
'intercepts', 'icfactors', 'peakcenter', '.data'):
sp = analysis_path(src_id, repo, modifier=modifier, root=root)
dp = analysis_path(dest_id, repo, modifier=modifier, root=root, mode='w')
print('{}>>{}'.format(sp,dp))
if sp and os.path.isfile(sp):
# shutil.copy(sp, dp)
shutil.move(sp,dp)
def swap_identifier(a, a_id, b, b_id, c_id, root, repo):
'''
a -> c
replace a with b
replace b with c
'''
_fix_id(a_id, c_id, a, root, repo)
_fix_id(b_id, a_id, a, root, repo)
_fix_id(c_id, b_id, b, root, repo)
if __name__ == '__main__':
root = '/Users/ross/PychronDev/data/.dvc/repositories/'
repo = 'Henry01104'
_fix_id('66151-01', '66150-21', '66150', root, repo, new_aliquot=21)
_fix_id('66151-12', '66150-22', '66150', root, repo, new_aliquot=22)
_fix_id('66149-20', '66150-23', '66150', root, repo, new_aliquot=23)
_fix_id('66150-06', '66149-29', '66149', root, repo, new_aliquot=29)
# repo = 'FCTest'
#
# a_id = '26389-03'
# a = '26389'
# b_id = '26381-03'
# b = '26381'
# c_id = '26389-03X'
#
# swap_identifier(a,a_id, b, b_id, c_id, root, repo)
# repo = 'Saifuddeen01097'
# fix_identifier('66340', '66431', '/Users/ross/PychronDev/data/.dvc/repositories/',
# 'Saifuddeen01097',
# aliquots=[2],
# steps=['A','B']
# dest_aliquots=[1]
# # aliquots=[1,]#2,3,4,5,6]
# )
# _fix_id('66340-02A', '66341-01A', '66341', root, repo)
# _fix_id('66340-02B', '66341-01B', '66341', root, repo)
# identifier = '66550'
# source_identifier = '66560'
# for step in 'ABCDEFGHIJL':
# _fix_id('{}-01{}'.format(source_identifier, step), '{}-01{}'.format(identifier, step), identifier, root, repo)
# ============= EOF =============================================
| [
"jirhiker@gmail.com"
] | jirhiker@gmail.com |
b616ac2d64848df30978df2bd3675649d66d0a24 | 466607c14d76c8d798e08f05dde2d79a07f6e069 | /src/stk/_internal/topology_graphs/metal_complex/porphyrin.py | f08b0ce0358bd563c93fd9a87d2a2c8a85a7cee8 | [
"MIT"
] | permissive | andrewtarzia/stk | 7c77006bacd4d3d45838ffb3b3b4c590f1bce336 | 9242c29dd4b9eb6927c202611d1326c19d73caea | refs/heads/main | 2023-08-03T12:29:21.096641 | 2023-07-27T09:45:25 | 2023-07-27T09:45:25 | 191,198,174 | 0 | 1 | MIT | 2023-09-04T16:53:05 | 2019-06-10T15:49:25 | Python | UTF-8 | Python | false | false | 3,290 | py | """
Porphyrin
=========
"""
from stk._internal.topology_graphs.edge import Edge
from .metal_complex import MetalComplex
from .vertices import MetalVertex, UnaligningVertex
class Porphyrin(MetalComplex):
"""
Represents a metal complex topology graph.
.. moldoc::
import moldoc.molecule as molecule
import stk
bb1 = stk.BuildingBlock(
smiles='[Fe+2]',
functional_groups=(
stk.SingleAtom(stk.Fe(0, charge=2))
for i in range(6)
),
position_matrix=[[0, 0, 0]],
)
bb2 = stk.BuildingBlock(
smiles=(
'C1=CC2=CC3=CC=C([N]3)C=C4C=CC'
'(=N4)C=C5C=CC(=N5)C=C1[N]2'
),
functional_groups=[
stk.SmartsFunctionalGroupFactory(
smarts='[#6]~[#7]~[#6]',
bonders=(1, ),
deleters=(),
),
],
)
complex = stk.ConstructedMolecule(
topology_graph=stk.metal_complex.Porphyrin(
metals=bb1,
ligands=bb2,
),
)
moldoc_display_molecule = molecule.Molecule(
atoms=(
molecule.Atom(
atomic_number=atom.get_atomic_number(),
position=position,
) for atom, position in zip(
complex.get_atoms(),
complex.get_position_matrix(),
)
),
bonds=(
molecule.Bond(
atom1_id=bond.get_atom1().get_id(),
atom2_id=bond.get_atom2().get_id(),
order=(
1
if bond.get_order() == 9
else bond.get_order()
),
) for bond in complex.get_bonds()
),
)
Metal building blocks with at least four functional groups are
required for this topology.
Ligand building blocks with at least four functional group are
required for this topology graph.
When using a :class:`dict` for initialization, a
:class:`.BuildingBlock` needs to be assigned to each of the
following numbers:
| metals: (0, )
| ligands: (0, )
See :class:`.MetalComplex` for more details and examples.
"""
_metal_vertex_prototypes = (MetalVertex(0, (0, 0, 0)),)
_ligand_vertex_prototypes = (UnaligningVertex(1, (0, 0, 0)),)
_edge_prototypes = (
Edge(
id=0,
vertex1=_metal_vertex_prototypes[0],
vertex2=_ligand_vertex_prototypes[0],
position=(0.1, 0, 0),
),
Edge(
id=1,
vertex1=_metal_vertex_prototypes[0],
vertex2=_ligand_vertex_prototypes[0],
position=(0, 0.1, 0),
),
Edge(
id=2,
vertex1=_metal_vertex_prototypes[0],
vertex2=_ligand_vertex_prototypes[0],
position=(-0.1, 0, 0),
),
Edge(
id=3,
vertex1=_metal_vertex_prototypes[0],
vertex2=_ligand_vertex_prototypes[0],
position=(0, -0.1, 0),
),
)
| [
"noreply@github.com"
] | andrewtarzia.noreply@github.com |
0666fd08858744b6c502b012d302740cac401fd8 | 466e5e56d2f350bcea90683af67e160138af836c | /Onsite/Week-1/Wednesday/Latency.py | 471e936cb47495a806c37af5f5adf64c0b45ab64 | [] | no_license | max180643/Pre-Programming-61 | bafbb7ed3069cda5c2e64cf1de590dfb4a542273 | e68d4a69ffeedd4269fffc64b9b81e845a10da4d | refs/heads/master | 2021-06-17T14:10:44.889814 | 2019-08-01T09:35:42 | 2019-08-01T09:35:42 | 135,553,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | """Dorm EP.2 - Latency"""
def main():
"""Main Function"""
latency1 = int(input())
latency2 = int(input())
latency3 = int(input())
latency4 = int(input())
latency5 = int(input())
latency6 = int(input())
print("%i ms"%(min(latency1, latency2, latency3, latency4, latency5, latency6)))
main()
| [
"noreply@github.com"
] | max180643.noreply@github.com |
02f6daeee451eb6708e5ba187eab3cbf161a4536 | d6be053915c065fe6da71afddd28429d144fee68 | /realpython_tutorials/python_type_checking/game_annotated.py | f0c26e340af003ff168f45e93fb238c2911132af | [] | no_license | DrShushen/practice_py | 61bc28f52783f8304cce1d834def4934ba6ee8e1 | cf40ec43ccd73aa835c4e65e6a4b41408b90a3ea | refs/heads/master | 2023-01-08T06:57:10.852157 | 2023-01-03T22:58:11 | 2023-01-03T22:58:11 | 211,668,464 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 920 | py | # game.py
import random
from typing import Dict, List, Tuple
SUITS = "♠ ♡ ♢ ♣".split()
RANKS = "2 3 4 5 6 7 8 9 10 J Q K A".split()
Card = Tuple[str, str]
Deck = List[Card]
def create_deck(shuffle: bool = False) -> Deck:
"""Create a new deck of 52 cards"""
deck = [(s, r) for r in RANKS for s in SUITS]
if shuffle:
random.shuffle(deck)
return deck
def deal_hands(deck: Deck) -> Tuple[Deck, Deck, Deck, Deck]:
"""Deal the cards in the deck into four hands"""
return (deck[0::4], deck[1::4], deck[2::4], deck[3::4])
def play():
"""Play a 4-player card game"""
deck = create_deck(shuffle=True)
names = "P1 P2 P3 P4".split()
hands = {n: h for n, h in zip(names, deal_hands(deck))}
for name, cards in hands.items():
card_str = " ".join(f"{s}{r}" for (s, r) in cards)
print(f"{name}: {card_str}")
if __name__ == "__main__":
play()
| [
"e.s.saveliev@gmail.com"
] | e.s.saveliev@gmail.com |
4fd702bbbd12443a824253f0216192ae4fc30f7c | 62855ed774f7e0b45e1810dde659a8ce0320fe32 | /demxf/viz.py | cf14601da1843a7ead6e617adaf4b5ed51d4261d | [
"MIT"
] | permissive | dhockaday/demxf | d55323cf974310c47b98e3ae2e871f02dc95ff25 | c45d06ce88dbd173a13ec6da35869d2117e77fee | refs/heads/master | 2023-03-17T05:10:12.574869 | 2017-11-16T12:28:57 | 2017-11-16T12:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,000 | py | import graphviz
from demxf.decompiler import (
BasePatchDecompiler,
filter_keys,
get_box_printable_class,
nonprinted_keys,
remove_aesthetics,
)
class PatchGrapher(BasePatchDecompiler):
def initialize(self, content):
super(PatchGrapher, self).initialize(content)
self.graph = graphviz.Digraph()
def process_box(self, box):
id = box['id']
box = box.copy()
if box['maxclass'] == 'comment':
return
d_id = self.divined_id_map[id]
filtered_box = filter_keys(remove_aesthetics(box), nonprinted_keys)
formatted_keys = ['{}={}'.format(key, repr(value)) for (key, value) in sorted(filtered_box.items())]
if box['maxclass'] == 'newobj' and len(filtered_box) == 1:
label = filtered_box['text']
else:
label = '{name}\n{keys}'.format(
name=get_box_printable_class(box),
keys='\n'.join(l[:200] for l in formatted_keys),
)
self.graph.node(d_id, label, shape='box', style=('dotted' if box.get('hidden') else ''))
outlet_types = dict(enumerate(box.get('outlettype', [])))
lines_from_here = self.lines_by_source_id[id]
for line in sorted(lines_from_here):
(source_id, source_pin), (dest_id, dest_pin) = line
type = outlet_types.get(source_pin) or ''
source_outlets = box['numoutlets']
dest_inlets = self.boxes[dest_id]['numinlets']
if source_outlets > 1 or dest_inlets > 1:
label = '%s %d:%d' % (type, source_pin + 1, dest_pin + 1)
else:
label = type
self.graph.edge(
tail_name='{id}'.format(id=self.divined_id_map[source_id], pin=source_pin),
head_name='{id}'.format(id=self.divined_id_map[dest_id], pin=dest_pin),
label=label,
)
def dump(self):
super(PatchGrapher, self).dump()
return self.graph
| [
"akx@iki.fi"
] | akx@iki.fi |
c6f54efd2d6d69af5e8998d81812b49e130f86e0 | 94e06376dc265c7bf1a2e51acb9714d02b21503a | /python打卡/day1_zhihu.py | f6af91e41527a127672468ee695a24ed2082e28a | [] | no_license | zhangquanliang/python | 4b2db32bed4e4746c8c49c309563f456dc41c6be | f45ef96e385b1cd6c5dfb53bf81042d953a9ec46 | refs/heads/master | 2021-04-26T23:30:12.217397 | 2019-03-20T06:18:14 | 2019-03-20T06:18:14 | 124,005,916 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,869 | py | # -*- coding: utf-8 -*-
import requests
import re
import urllib3
urllib3.disable_warnings()
"""
Title = 知乎关注粉丝
Date = 2018-03-27
"""
class ZhiHu:
"""获取知乎粉丝信息"""
def __init__(self):
self.url = 'https://zhuanlan.zhihu.com/wajuejiprince'
self.headers = {
"user-agent": "Mozilla/1.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)"
" Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0"
}
# 知乎粉丝信息
def zh_fans(self):
r = requests.session()
response = r.get(self.url, headers=self.headers, verify=False)
# 查找共有多少人关注
reg = re.findall('target="_blank">(.*?)人关注</a>', response.text)
fans_number = int(reg[0].strip())
num = int(fans_number/20) # 共有44页
f = open('test.txt', 'a', encoding='utf-8')
for i in range(num+1):
if i == 0:
fans_url = 'https://zhuanlan.zhihu.com/api/columns/wajuejiprince/followers?limit=20'
else:
offset = i*20
fans_url = 'https://zhuanlan.zhihu.com/api/columns/wajuejiprince/followers?limit=20&offset={}'\
.format(offset)
response = r.get(fans_url, headers=self.headers, verify=False)
for fans_list in response.json():
job_name = fans_list['bio']
name = fans_list['name']
uid = str(fans_list['uid'])
if job_name is None:
job_name = ""
f.write(name)
f.write(' ')
f.write(job_name)
f.write(' ')
f.write(uid)
f.write('\n')
f.flush()
f.close()
if __name__ == '__main__':
zhihu = ZhiHu()
zhihu.zh_fans() | [
"1007228376@qq.com"
] | 1007228376@qq.com |
badacbed26551cc4a429b5598c7e25ef3836be2d | a47ac7c64cb6bb1f181eadff8e4b24735c19080a | /PythonStudy/9-Tkinter/11-Menu菜单.py | 02c687ce0e7cf07976ffc9e57f3d9cd58f84776b | [
"MIT"
] | permissive | CoderTitan/PythonDemo | 6dcc88496b181df959a9d43b963fe43a6e4cb032 | feb5ef8be91451b4622764027ac684972c64f2e0 | refs/heads/master | 2020-03-09T09:15:28.299827 | 2018-08-21T03:43:25 | 2018-08-21T03:43:25 | 128,708,650 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,163 | py | # 主窗口
from tkinter import *
# 创建主窗口
window = Tk()
# 设置标题
window.title('Titanjun')
# 设置窗口大小
window.geometry('400x400')
'''
def menuAction1():
print('menubar')
# 菜单条
menubar = Menu(window)
window.configure(menu=menubar)
# 创建一个菜单选项
menu1 = Menu(menubar, tearoff=False)
# 菜单选项添加内容
for item in ['Python', 'PHP', 'CPP', 'C', 'Java', 'JavaScript', 'VBScript', 'Exit']:
if item == 'Exit':
# 添加分割线
menu1.add_separator()
menu1.add_command(label=item, command=window.quit)
else:
menu1.add_command(label=item, command=menuAction1)
# 想菜单条上添加菜单选项
menubar.add_cascade(label='语言', menu=menu1)
# 菜单2的事件处理
def menuAction2():
print(menuStr.get())
menuStr = StringVar()
menu2 = Menu(menubar, tearoff=True)
for item in ['red', 'orange', 'blue', 'gray']:
menu2.add_radiobutton(label=item, variable=menuStr, command=menuAction2)
# 添加到菜单列表
menubar.add_cascade(label='颜色', menu=menu2)
'''
# 鼠标右键菜单
menubar2 = Menu(window)
menu3 = Menu(menubar2, tearoff=False)
for item in ['Python', 'PHP', 'CPP', 'C', 'Java', 'JavaScript', 'VBScript', 'Exit']:
menu3.add_command(label=item)
menubar2.add_cascade(label='开发语言', menu=menu3)
# 添加/删除菜单
def menuClick():
print("menu3")
# 添加command项
menu3.insert_command(1, label='command', command=menuClick)
# 添加radiobutton项
menu3.insert_radiobutton(3, label='radiobutton', command=menuClick)
# 添加checkbutton项
menu3.insert_checkbutton(5, label='checkbutton', command=menuClick)
# 添加分割线
menu3.insert_separator(4)
# menu3.insert_separator(0)
# 删除
# 两个参数: 参数1为开始的索引,参数2为结束的索引,如果不指定参数2,只获取第一个索引处的内容
menu3.delete(2, 4)
menu3.delete(0)
# 用于显示菜单
def showMenu(event):
print('window')
# 鼠标点击处的坐标
menubar2.post(event.x_root, event.y_root)
# window绑定鼠标事件
window.bind("<Button-2>", showMenu)
# 进入消息循环
window.mainloop()
| [
"quanjunt@163.com"
] | quanjunt@163.com |
e6d3436e3d161e1e676e909ca9d50025702b66c8 | fc9bd84a2e560309dd7ddb2509b4bfcfbfe37e6b | /timesketch/ui/views/user_test.py | 20addf67538f17ff539c078accb72cde0a9d5567 | [
"Apache-2.0"
] | permissive | oi-buhtig/timesketch | 37ad823c08f24fc9021764a8843ede7d1693ab4b | bb2dccc041dff907ae428155ae45dbf5f26f19a3 | refs/heads/master | 2020-05-29T09:52:24.480339 | 2015-10-14T13:20:01 | 2015-10-14T13:20:01 | 44,555,537 | 1 | 0 | null | 2015-10-19T18:36:37 | 2015-10-19T18:36:37 | null | UTF-8 | Python | false | false | 2,071 | py | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the user views."""
from flask_login import current_app
from flask_login import current_user
from timesketch.lib.definitions import HTTP_STATUS_CODE_REDIRECT
from timesketch.lib.testlib import BaseTest
class UserViewTest(BaseTest):
"""Test the user view."""
def test_login_view_unauthenticated(self):
"""Test the login view handler with an unauthenticated session."""
response = self.client.get(u'/login/')
self.assert200(response)
self.assert_template_used(u'user/login.html')
def test_login_view_form_authenticated(self):
"""Test the login view handler with an authenticated session."""
self.login()
response = self.client.get(u'/login/')
self.assertEquals(response.status_code, HTTP_STATUS_CODE_REDIRECT)
def test_login_view_sso_authenticated(self):
"""Test the login view handler with an SSO authenticated session."""
current_app.config[u'SSO_ENABLED'] = True
with self.client:
response = self.client.get(
u'/login/', environ_base={u'REMOTE_USER': u'test1'})
self.assertEqual(current_user.username, u'test1')
self.assertEquals(response.status_code, HTTP_STATUS_CODE_REDIRECT)
def test_logout_view(self):
"""Test the logout view handler."""
self.login()
response = self.client.get(u'/logout/')
self.assertEquals(response.status_code, HTTP_STATUS_CODE_REDIRECT)
| [
"jberggren@gmail.com"
] | jberggren@gmail.com |
f97b66198afcbad41be07d665f2c5589fda3d6e7 | 3992ae714b747b37341d8eb49a5f907ecb6e8d2a | /akshare/futures_derivative/jyfm_tools_func.py | 02d7da2d36f2ea1b534fb8c51bac90591d64180f | [
"MIT"
] | permissive | rhkzleek/akshare | da0c6453e2bcd9ac0cc414b648ab5b2aee73ab30 | 804642122a29b58eb8c43ac825c9eedfb8d2f6d9 | refs/heads/master | 2021-05-20T01:13:56.644648 | 2020-04-01T07:49:44 | 2020-04-01T07:49:44 | 252,123,197 | 1 | 0 | MIT | 2020-04-01T08:58:58 | 2020-04-01T08:58:57 | null | UTF-8 | Python | false | false | 37,686 | py | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: Albert King
date: 2020/01/02 17:37
contact: jindaxiang@163.com
desc: 获取交易法门-工具: https://www.jiaoyifamen.com/tools/
交易法门首页: https://www.jiaoyifamen.com/
# 交易法门-工具-套利分析
交易法门-工具-套利分析-跨期价差(自由价差)
交易法门-工具-套利分析-自由价比
交易法门-工具-套利分析-多腿组合
交易法门-工具-套利分析-FullCarry
交易法门-工具-套利分析-套利价差矩阵*
# 交易法门-工具-资讯汇总
交易法门-工具-资讯汇总-研报查询
交易法门-工具-资讯汇总-交易日历
# 交易法门-工具-持仓分析
交易法门-工具-持仓分析-期货持仓
交易法门-工具-持仓分析-席位持仓
交易法门-工具-持仓分析-持仓季节性
# 交易法门-工具-资金分析
交易法门-工具-资金分析-资金流向
交易法门-工具-资金分析-沉淀资金
交易法门-工具-资金分析-资金季节性
交易法门-工具-资金分析-成交排名
# 交易法门-工具-席位分析
交易法门-工具-席位分析-持仓结构
# 交易法门-工具-仓单分析
交易法门-工具-仓单分析-仓单日报
交易法门-工具-仓单分析-仓单查询
交易法门-工具-仓单分析-虚实盘比日报
交易法门-工具-仓单分析-虚实盘比查询
# 交易法门-工具-期限分析
交易法门-工具-期限分析-基差日报
交易法门-工具-期限分析-基差分析
交易法门-工具-期限分析-期限结构
交易法门-工具-期限分析-价格季节性
# 交易法门-工具-交易规则
交易法门-工具-交易规则-限仓规定
交易法门-工具-交易规则-仓单有效期
交易法门-工具-交易规则-品种手册
"""
import time
import matplotlib.pyplot as plt
import pandas as pd
import requests
from akshare.futures_derivative.cons import (
csa_payload,
csa_url_spread,
csa_url_ratio,
csa_url_customize,
)
from akshare.futures_derivative.jyfm_login_func import jyfm_login
# pd.set_option('display.max_columns', None)
# 交易法门-工具-套利分析
def jyfm_tools_futures_spread(
type_1="RB", type_2="RB", code_1="01", code_2="05", headers="", plot=True
):
"""
交易法门-工具-套利分析-跨期价差(自由价差)
:param type_1: str
:param type_2: str
:param code_1: str
:param code_2: str
:param plot: Bool
:return: pandas.Series or pic
"""
csa_payload_his = csa_payload.copy()
csa_payload_his.update({"type1": type_1})
csa_payload_his.update({"type2": type_2})
csa_payload_his.update({"code1": code_1})
csa_payload_his.update({"code2": code_2})
res = requests.get(csa_url_spread, params=csa_payload_his, headers=headers)
data_json = res.json()
data_df = pd.DataFrame([data_json["category"], data_json["value"]]).T
data_df.index = pd.to_datetime(data_df.iloc[:, 0])
data_df = data_df.iloc[:, 1]
data_df.name = "value"
if plot:
data_df.plot()
plt.legend(loc="best")
plt.xlabel("date")
plt.ylabel("value")
plt.show()
return data_df
else:
return data_df
def jyfm_tools_futures_ratio(
type_1="RB", type_2="RB", code_1="01", code_2="05", headers="", plot=True
):
"""
交易法门-工具-套利分析-自由价比
:param type_1: str
:param type_2: str
:param code_1: str
:param code_2: str
:param plot: Bool
:return: pandas.Series or pic
2013-01-04 -121
2013-01-07 -124
2013-01-08 -150
2013-01-09 -143
2013-01-10 -195
...
2019-10-21 116
2019-10-22 126
2019-10-23 123
2019-10-24 126
2019-10-25 134
"""
csa_payload_his = csa_payload.copy()
csa_payload_his.update({"type1": type_1})
csa_payload_his.update({"type2": type_2})
csa_payload_his.update({"code1": code_1})
csa_payload_his.update({"code2": code_2})
res = requests.get(csa_url_ratio, params=csa_payload_his, headers=headers)
data_json = res.json()
data_df = pd.DataFrame([data_json["category"], data_json["value"]]).T
data_df.index = pd.to_datetime(data_df.iloc[:, 0])
data_df = data_df.iloc[:, 1]
data_df.name = "value"
if plot:
data_df.plot()
plt.legend(loc="best")
plt.xlabel("date")
plt.ylabel("value")
plt.show()
return data_df
else:
return data_df
def jyfm_tools_futures_customize(
formula="RB01-1.6*I01-0.5*J01-1200", headers="", plot=True
):
"""
交易法门-工具-套利分析-多腿组合
:param formula: str
:param plot: Bool
:return: pandas.Series or pic
"""
params = {"formula": formula}
res = requests.get(csa_url_customize, params=params, headers=headers)
data_json = res.json()
data_df = pd.DataFrame([data_json["category"], data_json["value"]]).T
data_df.index = pd.to_datetime(data_df.iloc[:, 0])
data_df = data_df.iloc[:, 1]
data_df.name = "value"
if plot:
data_df.plot()
plt.legend(loc="best")
plt.xlabel("date")
plt.ylabel("value")
plt.show()
return data_df
else:
return data_df
def jyfm_tools_futures_full_carry(
begin_code="05", end_code="09", ratio="4", headers=""
):
"""
交易法门-工具-套利分析-FullCarry
https://www.jiaoyifamen.com/tools/future/full/carry?beginCode=05&endCode=09&ratio=4
注: 正向转抛成本主要是仓储费和资金成本,手续费占比很小,故忽略。增值税不确定,故也未列入计算。使用该表时注意仓单有效期问题、升贴水问题以及生鲜品种其他较高费用的问题。实际Full Carry水平要略高于这里的测算水平。
:param begin_code: 开始月份
:type begin_code: str
:param end_code: 结束月份
:type end_code: str
:param ratio: 百分比, 这里输入绝对值
:type ratio: str
:param headers: 请求头
:type headers: dict
:return: 正向市场转抛成本估算
:rtype: pandas.DataFrame
"""
url = "https://www.jiaoyifamen.com/tools/future/full/carry"
params = {
"beginCode": begin_code,
"endCode": end_code,
"ratio": ratio,
}
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json()["table_data"])
def jyfm_tools_futures_arbitrage_matrix(
category="1", type1="RB", type2="RB", headers=""
):
"""
交易法门-工具-套利分析-跨期价差矩阵
https://www.jiaoyifamen.com/tools/future/arbitrage/matrix
:param category: 1: 跨期价差; 2: 自由价差; 3: 自由价比
:type category: str
:param type1: 种类一
:type type1: str
:param type2: 种类二
:type type2: str
:param headers: 请求头
:type headers: dict
:return: 对应的矩阵
:rtype: pandas.DataFrame
"""
url = "https://www.jiaoyifamen.com/tools/future/arbitrage/matrix"
params = {
"category": category,
"type1": type1,
"type2": type2,
"_": "1583846468579",
}
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json()["data"])
jyfm_exchange_symbol_dict = {
"中国金融期货交易所": {
"TF": "五债",
"T": "十债",
"IC": "中证500",
"IF": "沪深300",
"IH": "上证50",
"TS": "二债",
},
"郑州商品交易所": {
"FG": "玻璃",
"RS": "菜籽",
"CF": "棉花",
"LR": "晚稻",
"CJ": "红枣",
"JR": "粳稻",
"ZC": "动力煤",
"TA": "PTA",
"SA": "纯碱",
"AP": "苹果",
"WH": "强麦",
"SF": "硅铁",
"MA": "甲醇",
"CY": "棉纱",
"RI": "早稻",
"OI": "菜油",
"SM": "硅锰",
"RM": "菜粕",
"UR": "尿素",
"PM": "普麦",
"SR": "白糖",
},
"大连商品交易所": {
"PP": "PP",
"RR": "粳米",
"BB": "纤板",
"A": "豆一",
"EG": "乙二醇",
"B": "豆二",
"C": "玉米",
"JM": "焦煤",
"I": "铁矿",
"J": "焦炭",
"L": "塑料",
"M": "豆粕",
"P": "棕榈",
"CS": "淀粉",
"V": "PVC",
"Y": "豆油",
"JD": "鸡蛋",
"FB": "胶板",
"EB": "苯乙烯",
},
"上海期货交易所": {
"SS": "不锈钢",
"RU": "橡胶",
"AG": "沪银",
"AL": "沪铝",
"FU": "燃油",
"RB": "螺纹",
"CU": "沪铜",
"PB": "沪铅",
"BU": "沥青",
"AU": "沪金",
"ZN": "沪锌",
"SN": "沪锡",
"HC": "热卷",
"NI": "沪镍",
"WR": "线材",
"SP": "纸浆",
},
"上海国际能源交易中心": {"SC": "原油", "NR": "20号胶"},
}
# 交易法门-工具-资讯汇总
def jyfm_tools_research_query(limit="100", headers=""):
"""
交易法门-工具-资讯汇总-研报查询
https://www.jiaoyifamen.com/tools/research/qryPageList
:param limit: 返回条数
:type limit: str
:return: 返回研报信息数据
:rtype: pandas.DataFrame
"""
url = "https://www.jiaoyifamen.com/tools/research/qryPageList"
params = {
"page": "1",
"limit": limit,
}
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json()["data"])
def jyfm_tools_trade_calendar(trade_date="2020-01-03", headers=""):
"""
交易法门-工具-资讯汇总-交易日历
此函数可以返回未来的交易日历数据
https://www.jiaoyifamen.com/tools/trade-calendar/events
:param trade_date: 指定交易日
:type trade_date: str
:return: 返回指定交易日的交易日历数据
:rtype: pandas.DataFrame
"""
url = "https://www.jiaoyifamen.com/tools/trade-calendar/events"
params = {
"page": "1",
"limit": "1000",
"day": trade_date,
}
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json()["data"])
# 交易法门-工具-持仓分析
def jyfm_tools_position_detail(
symbol="JM", code="jm2005", trade_date="2020-01-03", headers=""
):
"""
交易法门-工具-持仓分析-期货持仓
:param symbol: 指定品种
:type symbol: str
:param code: 指定合约
:type code: str
:param trade_date: 指定交易日
:type trade_date: str
:param headers: headers with cookies
:type headers:dict
:return: 指定品种的指定合约的指定交易日的期货持仓数据
:rtype: pandas.DataFrame
"""
url = f"https://www.jiaoyifamen.com/tools/position/details/{symbol}?code={code}&day={trade_date}&_=1578040551329"
res = requests.get(url, headers=headers)
return pd.DataFrame(res.json()["short_rank_table"])
def jyfm_tools_position_seat(seat="永安期货", trade_date="2020-01-03", headers=""):
"""
交易法门-工具-持仓分析-持仓分析-席位持仓
:param seat: 指定期货公司
:type seat: str
:param trade_date: 具体交易日
:type trade_date: str
:param headers: headers with cookies
:type headers: dict
:return: 指定期货公司指定交易日的席位持仓数据
:rtype: pandas.DataFrame
"""
url = "https://www.jiaoyifamen.com/tools/position/seat"
params = {
"seat": seat,
"day": trade_date,
"type": "",
"_": "1578040989932",
}
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json()["data"])
def jyfm_tools_position_season(symbol="RB", code="05", headers=""):
"""
交易法门-工具-持仓分析-持仓分析-持仓季节性
https://www.jiaoyifamen.com/tools/position/season
:param symbol: 具体品种
:type symbol: str
:param code: 具体合约月份
:type code: str
:param headers: headers with cookies
:type headers: dict
:return: 合约持仓季节性规律
:rtype: pandas.DataFrame
"""
url = "https://www.jiaoyifamen.com/tools/position/season"
params = {
"type": symbol,
"code": code,
}
res = requests.get(url, params=params, headers=headers)
data_json = res.json()
temp_df = pd.DataFrame(
[
data_json["year2013"],
data_json["year2014"],
data_json["year2015"],
data_json["year2016"],
data_json["year2017"],
data_json["year2018"],
data_json["year2019"],
data_json["year2020"],
],
columns=data_json["dataCategory"],
).T
temp_df.columns = ["2013", "2014", "2015", "2016", "2017", "2018", "2019", "2020"]
return temp_df
# 交易法门-工具-资金分析
def jyfm_tools_position_fund_direction(
trade_date="2020-02-24", indicator="期货品种资金流向排名", headers=""
):
"""
交易法门-工具-资金分析-资金流向
https://www.jiaoyifamen.com/tools/position/fund/?day=2020-01-08
:param trade_date: 指定交易日
:type trade_date: str
:param indicator: "期货品种资金流向排名" or "期货主力合约资金流向排名"
:type indicator: str
:param headers: headers with cookies
:type headers: dict
:return: 指定交易日的资金流向数据
:rtype: pandas.DataFrame
"""
params = {
"day": trade_date,
}
url = "https://www.jiaoyifamen.com/tools/position/fund/"
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
if indicator == "期货品种资金流向排名":
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["flowCategory"]),
data_json["flowCategory"],
data_json["flowValue"],
],
index=["date", "symbol", "fund"],
).T
else:
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["dominantFlowCategory"]),
data_json["dominantFlowCategory"],
data_json["dominantFlowValue"],
],
index=["date", "symbol", "fund"],
).T
def jyfm_tools_position_fund_down(
trade_date="2020-02-24", indicator="期货品种沉淀资金排名", headers=""
):
"""
交易法门-工具-资金分析-沉淀资金
https://www.jiaoyifamen.com/tools/position/fund/?day=2020-01-08
:param trade_date: 指定交易日
:type trade_date: str
:param indicator: "期货品种沉淀资金排名" or "期货主力合约沉淀资金排名"
:type indicator: str
:param headers: headers with cookies
:type headers: dict
:return: 指定交易日的沉淀资金
:rtype: pandas.DataFrame
"""
params = {
"day": trade_date,
}
url = "https://www.jiaoyifamen.com/tools/position/fund/"
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
if indicator == "期货品种沉淀资金排名":
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["precipitationCategory"]),
data_json["precipitationCategory"],
data_json["precipitationValue"],
],
index=["date", "symbol", "fund"],
).T
else:
return pd.DataFrame(
[
[data_json["tradingDay"]]
* len(data_json["dominantPrecipitationCategory"]),
data_json["dominantPrecipitationCategory"],
data_json["dominantPrecipitationValue"],
],
index=["date", "symbol", "fund"],
).T
def jyfm_tools_position_fund_season(symbol="RB", code="05", headers=""):
"""
交易法门-工具-资金分析-资金季节性
https://www.jiaoyifamen.com/tools/position/fund/?day=2020-01-08
:param symbol: 指定品种
:type symbol: str
:param code: 合约到期月
:type code: str
:param headers: headers with cookies
:type headers: dict
:return: 指定交易日的资金资金季节性
:rtype: pandas.DataFrame
"""
params = {
"type": symbol,
"code": code,
}
url = "https://www.jiaoyifamen.com/tools/position/fund/season"
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
data_df = pd.DataFrame(
[
data_json["dataCategory"],
data_json["year2013"],
data_json["year2014"],
data_json["year2015"],
data_json["year2016"],
data_json["year2017"],
data_json["year2018"],
data_json["year2019"],
data_json["year2020"],
],
index=["date", "2013", "2014", "2015", "2016", "2017", "2018", "2019", "2020"],
).T
return data_df
def jyfm_tools_position_fund_deal(
trade_date="2020-02-24", indicator="期货品种成交量排名", headers=""
):
"""
交易法门-工具-资金分析-成交排名
https://www.jiaoyifamen.com/tools/position/fund/?day=2020-01-08
:param trade_date: 指定交易日
:type trade_date: str
:param indicator: "期货品种成交量排名" or "期货主力合约成交量排名"
:type indicator: str
:param headers: headers with cookies
:type headers: dict
:return: 指定交易日的资金成交排名
:rtype: pandas.DataFrame
"""
params = {
"day": trade_date,
}
url = "https://www.jiaoyifamen.com/tools/position/fund/"
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
if indicator == "期货品种成交量排名":
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["turnOverCategory"]),
data_json["turnOverCategory"],
data_json["turnOverValue"],
],
index=["date", "symbol", "fund"],
).T
else:
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["dominantTurnOverCategory"]),
data_json["dominantTurnOverCategory"],
data_json["dominantTurnOverValue"],
],
index=["date", "symbol", "fund"],
).T
# 交易法门-工具-席位分析-持仓结构
def jyfm_tools_position_structure(
trade_date="2020-03-02", seat="永安期货", indicator="long", headers=""
):
"""
交易法门-工具-席位分析-持仓结构
https://www.jiaoyifamen.com/tools/position/seat
:param trade_date: 指定交易日
:type trade_date: str
:param seat: broker name, e.g., seat="永安期货"
:type seat: str
:param indicator: broker name, e.g., long
:type indicator: str
:param headers: headers with cookies
:type headers: dict
:return: 指定交易日指定机构的持仓结构
:rtype: pandas.DataFrame
"""
params = {
"seat": seat,
"day": trade_date,
"structure": "structure",
"_": int(time.time() * 1000),
}
url = "https://www.jiaoyifamen.com/tools/position/seat"
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
if indicator == "short":
return pd.DataFrame(data_json["shortPosition"])
if indicator == "long":
return pd.DataFrame(data_json["longPosition"])
if indicator == "pure":
return pd.DataFrame(data_json["purePosition"])
# 交易法门-工具-仓单分析
def jyfm_tools_warehouse_receipt_daily(trade_date="2020-01-02", headers=""):
"""
交易法门-工具-仓单分析-仓单日报
https://www.jiaoyifamen.com/tools/warehouse-receipt/daily
:param trade_date: 指定交易日
:type trade_date: str
:param headers: headers with cookies
:type headers: dict
:return: 指定交易日的仓单日报数据
:rtype: pandas.DataFrame
"""
params = {
"day": trade_date,
"_": "1578555328412",
}
url = "https://www.jiaoyifamen.com/tools/warehouse-receipt/daily"
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json()["data"])
def jyfm_tools_warehouse_receipt_query(symbol="AL", indicator="仓单数据走势图", headers=""):
"""
交易法门-工具-仓单分析-仓单查询
https://www.jiaoyifamen.com/tools/warehouse-receipt/query
:param symbol: 指定品种
:type symbol: str
:param indicator: 指定需要获取的指标, ["仓单数据走势图", "仓单数据季节图"]
:type indicator: str
:param headers: headers with cookies
:type headers: dict
:return: 指定品种的仓单数量走势图/季节图数据
:rtype: pandas.DataFrame
"""
params = {
"type": symbol,
}
url = "https://www.jiaoyifamen.com/tools/warehouse-receipt/query"
res = requests.get(url, params=params, headers=headers)
data_json = res.json()
if indicator == "仓单数据走势图":
return pd.DataFrame(
[data_json["category"], data_json["value"], data_json["value2"]]
).T
return pd.DataFrame(
[
data_json["dataCategory"],
data_json["year2013"],
data_json["year2014"],
data_json["year2015"],
data_json["year2016"],
data_json["year2017"],
data_json["year2018"],
data_json["year2019"],
data_json["year2020"],
],
index=[
"date",
"year2013",
"year2014",
"year2015",
"year2016",
"year2017",
"year2018",
"year2019",
"year2020",
],
).T
def jyfm_tools_warehouse_virtual_fact_daily(trade_date="2020-01-20", headers=""):
"""
交易法门-工具-仓单分析-虚实盘比日报
https://www.jiaoyifamen.com/tools/warehouse-receipt/virtualfact/daily?day=&_=1579532255369
:param trade_date: 指定日期
:type trade_date: str
:param headers: headers with cookies
:type headers: dict
:return: 指定品种指定合约的虚实盘比数据
:rtype: pandas.DataFrame
"""
params = {
"day": trade_date,
"_": "1579532255370",
}
url = "https://www.jiaoyifamen.com/tools/warehouse-receipt/virtualfact/daily"
res = requests.get(url, params=params, headers=headers)
data_json = res.json()["data"]
return pd.DataFrame(data_json)
def jyfm_tools_warehouse_virtual_fact_ratio(symbol="AL", code="05", headers=""):
"""
交易法门-工具-仓单分析-虚实盘比查询
https://www.jiaoyifamen.com/tools/warehouse-receipt/ratio
:param symbol: 指定品种
:type symbol: str
:param code: 指定日期的合约
:type code: str
:param headers: headers with cookies
:type headers: dict
:return: 指定品种指定合约的虚实盘比数据
:rtype: pandas.DataFrame
"""
params = {
"type": symbol,
"code": code,
}
url = "https://www.jiaoyifamen.com/tools/warehouse-receipt/ratio"
res = requests.get(url, params=params, headers=headers)
data_json = res.json()
return pd.DataFrame(
[
data_json["dataCategory"],
data_json["year2013"],
data_json["year2014"],
data_json["year2015"],
data_json["year2016"],
data_json["year2017"],
data_json["year2018"],
data_json["year2019"],
data_json["year2020"],
],
index=[
"date",
"year2013",
"year2014",
"year2015",
"year2016",
"year2017",
"year2018",
"year2019",
"year2020",
],
).T
# 交易法门-工具-期限分析-基差日报
def jyfm_tools_futures_basis_daily(
trade_date="2020-02-05", indicator="基差率", headers=""
):
"""
交易法门-工具-期限分析-基差日报
:param trade_date: 指定交易日期, 注意格式为 "2020-01-02"
:type trade_date: str
:param indicator: ["基差率", "基差日报"] 二选一
:type indicator: str
:param headers: headers with cookies
:type headers: dict
:return: 指定日期的基差日报数据
:rtype: pandas.DataFrame
"""
params = {
"day": trade_date,
}
url = "https://www.jiaoyifamen.com/tools/future/basis/daily"
res = requests.get(url, params=params, headers=headers)
json_data = res.json()
if indicator == "基差率":
# x 轴, y 轴
return pd.DataFrame(
[json_data["category"], json_data["value"]], index=["x", "y"]
).T
if indicator == "基差日报":
return pd.DataFrame(json_data["table_data"])
# 交易法门-工具-期限分析-基差日报-地区选取
def jyfm_tools_futures_basis_daily_area(symbol="Y", headers=""):
"""
交易法门-工具-期限分析-基差日报-地区选取
:param symbol: 品种代码
:type symbol: str
:param headers: headers with cookies
:type headers: dict
:return: 指定品种的可选地区
:rtype: list
"""
params = {
"type": symbol,
}
url = "https://www.jiaoyifamen.com/tools/future/area"
res = requests.get(url, params=params, headers=headers)
return res.json()["areas"]
def jyfm_tools_futures_basis_analysis(
symbol="RB", area="上海", indicator="基差率分布图", headers=""
):
"""
交易法门-工具-期限分析-基差分析
:param symbol: 品种代码
:type symbol: str
:param area: one of ["上海", "天津"], 不同品种不同日期通过 jyfm_tools_futures_basis_daily_area 返回
:type area: str
:param indicator: one of ["基差走势图", "基差率季节图", "基差率分布图"]
:type indicator: str
:param headers: headers with cookies
:type headers: dict
:return: 指定品种和地区的基差日报
:rtype: pandas.DataFrame
"""
url = "https://www.jiaoyifamen.com/tools/future/basis/analysis"
params = {
"type": symbol,
"area": area,
}
res = requests.get(url, params=params, headers=headers)
json_data = res.json()
if indicator == "基差走势图":
# x 轴 y 轴
return pd.DataFrame(
[json_data["cashValue"], json_data["futureValue"], json_data["basisValue"]],
columns=json_data["category"],
index=["现货", "期货", "基差"],
).T
if indicator == "基差率季节图":
# x 轴 y 轴
return pd.DataFrame(
[
json_data["year2013"],
json_data["year2014"],
json_data["year2015"],
json_data["year2016"],
json_data["year2017"],
json_data["year2018"],
json_data["year2019"],
json_data["year2020"],
],
index=[
"year2013",
"year2014",
"year2015",
"year2016",
"year2017",
"year2018",
"year2019",
"year2020",
],
columns=json_data["dataCategory"],
).T
if indicator == "基差率分布图":
# x 轴 y 轴
return pd.DataFrame(
[json_data["limitCategory"], json_data["limitValue"]], index=["x", "y"]
).T
def jyfm_tools_futures_basis_structure(symbol="RB", headers=""):
"""
交易法门-工具-期限分析-期限结构
:param symbol: 合约品种
:type symbol: str
:param headers: headers with cookies
:type headers: dict
:return: 指定品种的期限结构数据
:rtype: pandas.DataFrame
"""
params = {
"type": symbol,
}
url = "https://www.jiaoyifamen.com/tools/future/basis/structure"
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json())
def jyfm_tools_futures_basis_rule(
symbol="RB", code="05", indicator="期货涨跌统计", headers=""
):
"""
交易法门-工具-期限分析-价格季节性
:param symbol: 品种
:type symbol: str
:param code: 合约具体月份
:type code: str
:param indicator: ["期货涨跌统计", "季节性走势图"], 默认为: 期货涨跌统计
:type indicator: str
:param headers: headers with cookies
:type headers: dict
:return: 期货涨跌统计 or 季节性走势图
:rtype: pandas.DataFrame
"""
params = {
"type": symbol,
"code": code,
}
url = "https://www.jiaoyifamen.com/tools/future/basis/rule"
res = requests.get(url, params=params, headers=headers)
data_json = res.json()
if indicator == "期货涨跌统计":
return pd.DataFrame(data_json["ratioData"])
if indicator == "季节性走势图":
return pd.DataFrame(
[
data_json["dataCategory"],
data_json["year2013"],
data_json["year2014"],
data_json["year2015"],
data_json["year2016"],
data_json["year2017"],
data_json["year2018"],
data_json["year2019"],
data_json["year2020"],
],
index=[
"date",
"2013",
"2014",
"2015",
"2016",
"2017",
"2018",
"2019",
"2020",
],
).T
# 交易法门-工具-交易规则
def jyfm_tools_position_limit_info(exchange="CFFEX", headers=""):
"""
交易法门-工具-交易规则-限仓规定
:param exchange: one of ["INE", "DCE", "CZCE", "SHFE", "CFFEX"], default is "CFFEX"]
:type exchange: str
:param headers: headers with cookies
:type headers: dict
:return:
:rtype: pandas.DataFrame
"""
params = {
"page": "1",
"limit": "10",
"exchange": exchange,
}
url = "https://www.jiaoyifamen.com/tools/position-limit/query"
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json()["data"])
def jyfm_tools_receipt_expire_info(headers=""):
"""
交易法门-工具-交易规则-仓单有效期
:param headers: headers with cookies
:type headers: dict
:return: all history data
:rtype: pandas.DataFrame
"""
temp_df = pd.DataFrame()
for page in range(1, 4):
params = {
"page": str(page),
"limit": "20",
}
res = requests.get(
f"https://www.jiaoyifamen.com/tools/receipt-expire-info/all",
params=params,
headers=headers,
)
temp_df = temp_df.append(pd.DataFrame(res.json()["data"]), ignore_index=True)
return temp_df
def jyfm_tools_symbol_handbook(headers=""):
"""
交易法门-工具-交易规则-品种手册
:param headers: headers with cookies
:type headers: dict
:return: all history data
:rtype: pandas.DataFrame
"""
res = requests.get(
"https://www.jiaoyifamen.com/tools/receipt-expire-info/variety", headers=headers
)
temp_df = pd.DataFrame(res.json()["data"])
return temp_df
if __name__ == "__main__":
# 如果要测试函数, 请先在交易法门网站: https://www.jiaoyifamen.com/ 注册帐号密码, 在下面输入对应的帐号和密码后再运行 jyfm_login 函数!
headers = jyfm_login(account="", password="")
# 交易法门-工具-套利分析
jyfm_tools_futures_spread_df = jyfm_tools_futures_spread(
type_1="RB", type_2="RB", code_1="01", code_2="05", headers=headers, plot=True
)
print(jyfm_tools_futures_spread_df)
jyfm_tools_futures_ratio_df = jyfm_tools_futures_ratio(
type_1="RB", type_2="RB", code_1="01", code_2="05", headers=headers, plot=True
)
print(jyfm_tools_futures_ratio_df)
jyfm_tools_futures_customize_df = jyfm_tools_futures_customize(
formula="RB01-1.6*I01-0.5*J01-1200", headers=headers, plot=True
)
print(jyfm_tools_futures_customize_df)
jyfm_tools_futures_full_carry_df = jyfm_tools_futures_full_carry(
begin_code="05", end_code="09", ratio="4", headers=headers
)
print(jyfm_tools_futures_full_carry_df)
jyfm_tools_futures_arbitrage_matrix_df = jyfm_tools_futures_arbitrage_matrix(
category="1", type1="RB", type2="RB", headers=headers
)
print(jyfm_tools_futures_arbitrage_matrix_df)
# 交易法门-工具-资讯汇总
jyfm_tools_research_query_df = jyfm_tools_research_query(
limit="100", headers=headers
)
print(jyfm_tools_research_query_df)
jyfm_tools_trade_calendar_df = jyfm_tools_trade_calendar(
trade_date="2020-01-03", headers=headers
)
print(jyfm_tools_trade_calendar_df)
# 交易法门-工具-持仓分析
jyfm_tools_position_detail_df = jyfm_tools_position_detail(
symbol="JM", code="jm2005", trade_date="2020-01-03", headers=headers
)
print(jyfm_tools_position_detail_df)
jyfm_tools_position_seat_df = jyfm_tools_position_seat(
seat="永安期货", trade_date="2020-01-03", headers=headers
)
print(jyfm_tools_position_seat_df)
jyfm_tools_position_season_df = jyfm_tools_position_season(
symbol="RB", code="05", headers=headers
)
print(jyfm_tools_position_season_df)
# 交易法门-工具-资金分析
# 交易法门-工具-资金分析-资金流向
jyfm_tools_position_fund_direction_df = jyfm_tools_position_fund_direction(
trade_date="2020-02-24", indicator="期货主力合约资金流向排名", headers=headers
)
print(jyfm_tools_position_fund_direction_df)
# 交易法门-工具-资金分析-沉淀资金
jyfm_tools_position_fund_down_df = jyfm_tools_position_fund_down(
trade_date="2020-02-24", indicator="期货主力合约沉淀资金排名", headers=headers
)
print(jyfm_tools_position_fund_down_df)
# 交易法门-工具-资金分析-资金季节性
jyfm_tools_position_fund_season_df = jyfm_tools_position_fund_season(
symbol="RB", code="05", headers=headers
)
print(jyfm_tools_position_fund_season_df)
# 交易法门-工具-资金分析-成交排名
jyfm_tools_position_fund_deal_df = jyfm_tools_position_fund_deal(
trade_date="2020-02-24", indicator="期货主力合约成交量排名", headers=headers
)
print(jyfm_tools_position_fund_deal_df)
# 交易法门-工具-席位分析
# 交易法门-工具-席位分析-持仓结构
jyfm_tools_position_structure_df = jyfm_tools_position_structure(
trade_date="2020-03-02", seat="永安期货", indicator="long", headers=headers
)
print(jyfm_tools_position_structure_df)
# 交易法门-工具-仓单分析
# 交易法门-工具-仓单分析-仓单日报
jyfm_tools_warehouse_receipt_daily_df = jyfm_tools_warehouse_receipt_daily(
trade_date="2020-01-02", headers=headers
)
print(jyfm_tools_warehouse_receipt_daily_df)
# 交易法门-工具-仓单分析-仓单查询
jyfm_tools_warehouse_receipt_query_df = jyfm_tools_warehouse_receipt_query(
symbol="AL", indicator="仓单数据走势图", headers=headers
)
print(jyfm_tools_warehouse_receipt_query_df)
# 交易法门-工具-仓单分析-虚实盘比日报
jyfm_tools_warehouse_virtual_fact_daily_df = jyfm_tools_warehouse_virtual_fact_daily(
trade_date="2020-01-20", headers=headers
)
print(jyfm_tools_warehouse_virtual_fact_daily_df)
# 交易法门-工具-仓单分析-虚实盘比查询
jyfm_tools_warehouse_receipt_ratio_df = jyfm_tools_warehouse_virtual_fact_ratio(
symbol="AL", code="05", headers=headers
)
print(jyfm_tools_warehouse_receipt_ratio_df)
# 交易法门-工具-期限分析
jyfm_tools_futures_basis_daily_df = jyfm_tools_futures_basis_daily(
trade_date="2020-01-02", indicator="基差率", headers=headers
)
print(jyfm_tools_futures_basis_daily_df)
jyfm_tools_futures_basis_analysis_area_df = jyfm_tools_futures_basis_daily_area(
symbol="Y", headers=headers
)
print(jyfm_tools_futures_basis_analysis_area_df)
jyfm_tools_futures_basis_analysis_df = jyfm_tools_futures_basis_analysis(
symbol="RB", area="上海", indicator="基差率分布图", headers=headers
)
print(jyfm_tools_futures_basis_analysis_df)
jyfm_tools_futures_basis_structure_df = jyfm_tools_futures_basis_structure(
symbol="RB", headers=headers
)
print(jyfm_tools_futures_basis_structure_df)
jyfm_tools_futures_basis_rule_df = jyfm_tools_futures_basis_rule(
symbol="RB", code="05", indicator="期货涨跌统计", headers=headers
)
print(jyfm_tools_futures_basis_rule_df)
# 交易法门-工具-交易规则
# 交易法门-工具-交易规则-限仓规定
jyfm_tools_receipt_expire_info_df = jyfm_tools_receipt_expire_info(headers=headers)
print(jyfm_tools_receipt_expire_info_df)
# 交易法门-工具-交易规则-仓单有效期
jyfm_tools_position_limit_info_df = jyfm_tools_position_limit_info(
exchange="CFFEX", headers=headers
)
print(jyfm_tools_position_limit_info_df)
# 交易法门-工具-交易规则-品种手册
jyfm_tools_symbol_handbook_df = jyfm_tools_symbol_handbook(headers=headers)
print(jyfm_tools_symbol_handbook_df)
| [
"jindaxiang@163.com"
] | jindaxiang@163.com |
4be91b40e73d1f9dfd744a278f9275e198d7b882 | 8f2c55a2530c3e59dab5907c0044c618b88dd09b | /tests_python/resources/_debugger_case_generator2.py | 59c6ee88d1d2b503bcdb97c56156fb71084af719 | [
"Apache-2.0",
"EPL-1.0"
] | permissive | fabioz/PyDev.Debugger | 5a9c6d4c09be85a0e2d9fb93567fd65faf04c81d | 26864816cbfcf002a99913bcc31ebef48042a4ac | refs/heads/main | 2023-08-18T01:08:34.323363 | 2023-04-15T11:15:47 | 2023-04-15T11:15:47 | 21,870,144 | 363 | 126 | Apache-2.0 | 2023-07-30T23:03:31 | 2014-07-15T18:01:12 | Python | UTF-8 | Python | false | false | 287 | py | def get_return():
return 10
def generator():
print('start') # break here
yield 10 # step 1
return \
get_return() # step 2
if __name__ == '__main__':
for i in generator(): # generator return
print(i)
print('TEST SUCEEDED!') # step 3
| [
"fabiofz@gmail.com"
] | fabiofz@gmail.com |
5327fc8f8debafd9cb4c83a7e844fc228e595b5c | 457776337d8dcaa75df1c48869e9be1ab58be253 | /py3/pachong/2017_07_18a.py | d922f93beebcd18c966ae80f17e495c7829dfb0b | [] | no_license | tiankangbo/dashboard | c63d0df6e2eb60a6e17e4d7fde19e8e12a7958a9 | 8f6c6f806f3f8a8fe4ab8937d5c5a00986522c40 | refs/heads/master | 2021-07-14T20:18:18.843903 | 2017-10-22T06:16:15 | 2017-10-22T06:16:15 | 103,809,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 844 | py | # coding:utf8
__author__ = 'tiankangbo'
from multiprocessing import Pipe, Process
import random
import time, os
def proc_send(pipe, urls):
"""
pipe的发送端
:param pipe:
:param urls:
:return:
"""
for url in urls:
print("process %s --send %s " %(os.getpid(), url) )
pipe.send(url)
time.sleep(random.random())
def proc_recv(pipe):
"""
数据接收端
:param pipe:
:return:
"""
while True:
print('process %s >>>rev%s ' % (os.getpid(), pipe.recv()))
time.sleep(random.random())
if __name__ == '__main__':
pipe = Pipe()
p1 = Process(target=proc_send, args=(pipe[0], ['url_'+str(i) for i in range(10)],))
p2 = Process(target=proc_recv, args=(pipe[1],))
#启动进程
p1.start()
p2.start()
p1.join()
p2.terminate() | [
"tiankangbo@gmail.com"
] | tiankangbo@gmail.com |
d3dbf1b61ef1ea1dea9fe204714c8749ba0135f7 | 17381d148b86fc4354d1ac0e4062a35215eafd09 | /paiza/D035.py | f0f47caeb734b128220958b867c5cda906e30d2e | [] | no_license | worldyone/workspace | 027a93e1f227eb1c10485f6f2082a92bd98710d5 | 1e3fa21e23d6e91268882c9e480b76c4a3c4920f | refs/heads/master | 2023-03-09T01:21:53.402440 | 2022-09-20T14:47:49 | 2022-09-20T14:47:49 | 174,147,113 | 0 | 1 | null | 2023-02-10T22:53:26 | 2019-03-06T13:07:08 | Python | UTF-8 | Python | false | false | 34 | py | print("/".join(input().split()))
| [
"amanohikari142857@gmail.com"
] | amanohikari142857@gmail.com |
bd1ac0ef7ac86f79c795a06aa2ed66928a0b9339 | 9afeff62ff369bcf0a370b257e2d5c82ea1e27fb | /map/test_layers.py | 96281ae3e9ec72935297b79c011c5d563faea87f | [] | no_license | raphaelshirley/decals-web | d5404765c29f0e538193c70a13737cb400eb92b0 | 4897e21fa6844a3179667910c4b0a99c7b4883ad | refs/heads/master | 2020-06-04T16:32:15.841911 | 2019-06-11T14:41:31 | 2019-06-11T14:41:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,511 | py | test_layers = []
test_cats = []
test_ccds = []
# test_layers.append(("dr8-test1", "DR8 test1 images"))
# test_layers.append(("dr8-test1-model", "DR8 test1 models"))
# test_layers.append(("dr8-test1-resid", "DR8 test1 residuals"))
# test_layers.append(("dr8-test2", "DR8 test2 (outliers) images"))
# test_layers.append(("dr8-test2-model", "DR8 test2 (outliers) models"))
# test_layers.append(("dr8-test2-resid", "DR8 test2 (outliers) residuals"))
# test_layers.append(("dr8-test3", "DR8 test3 (outliers) images"))
# test_layers.append(("dr8-test3-model", "DR8 test3 (outliers) models"))
# test_layers.append(("dr8-test3-resid", "DR8 test3 (outliers) residuals"))
# test_layers.append(("dr8-test4", "DR8 test4 (large-galaxies) images"))
# test_layers.append(("dr8-test4-model", "DR8 test4 (large-galaxies) models"))
# test_layers.append(("dr8-test4-resid", "DR8 test4 (large-galaxies) residuals"))
# test_layers.append(("dr8-test5", "DR8 test5 (trident) images"))
# test_layers.append(("dr8-test5-model", "DR8 test5 (trident) models"))
# test_layers.append(("dr8-test5-resid", "DR8 test5 (trident) residuals"))
#
# test_layers.append(("dr8-test6", "DR8 test6 (sky) images"))
#
# test_layers.append(("dr8-test7", "DR8 test7 (outliers) images"))
#
# test_layers.append(("dr8-test10", "DR8 test10 (rc) images"))
# test_layers.append(("dr8-test10-model", "DR8 test10 (rc) models"))
# test_layers.append(("dr8-test10-resid", "DR8 test10 (rc) residuals"))
#
# test_layers.append(("dr8-test14", "DR8 test14 (rc) images"))
# test_layers.append(("dr8-test14-model", "DR8 test14 (rc) models"))
# test_layers.append(("dr8-test14-resid", "DR8 test14 (rc) residuals"))
test_layers.append(("dr8a", "DR8a (rc) images"))
test_layers.append(("dr8a-model", "DR8a (rc) models"))
test_layers.append(("dr8a-resid", "DR8a (rc) residuals"))
test_layers.append(("dr8b-decam", "DR8b DECam images"))
test_layers.append(("dr8b-decam-model", "DR8b DECam models"))
test_layers.append(("dr8b-decam-resid", "DR8b DECam residuals"))
test_layers.append(("dr8b-90p-mos", "DR8b BASS+MzLS images"))
test_layers.append(("dr8b-90p-mos-model", "DR8b BASS+MzLS models"))
test_layers.append(("dr8b-90p-mos-resid", "DR8b BASS+MzLS residuals"))
test_cats.append(("dr8b-decam", "Catalog: DR8b DECam"))
test_cats.append(("dr8b-90p-mos", "Catalog: DR8b BASS+MzLS"))
test_layers.append(("dr8c-90p-mos", "DR8c BASS+MzLS images"))
test_layers.append(("dr8c-90p-mos-model", "DR8c BASS+MzLS models"))
test_layers.append(("dr8c-90p-mos-resid", "DR8c BASS+MzLS residuals"))
test_cats.append(("dr8c-90p-mos", "Catalog: DR8c BASS+MzLS"))
test_layers.append(("dr8c-decam", "DR8c DECam images"))
test_layers.append(("dr8c-decam-model", "DR8c DECam models"))
test_layers.append(("dr8c-decam-resid", "DR8c DECam residuals"))
test_cats.append(("dr8c-decam", "Catalog: DR8c DECam"))
test_ccds.append(("dr8c-decam", "CCDs: DR8c DECam"))
test_layers.append(("dr8i-decam", "DR8i DECam images"))
test_layers.append(("dr8i-decam-model", "DR8i DECam models"))
test_layers.append(("dr8i-decam-resid", "DR8i DECam residuals"))
test_layers.append(("dr8i-90p-mos", "DR8i MzLS+BASS images"))
test_layers.append(("dr8i-90p-mos-model", "DR8i MzLS+BASS models"))
test_layers.append(("dr8i-90p-mos-resid", "DR8i MzLS+BASS residuals"))
test_cats.append(("dr8i-decam", "Catalog: DR8i DECam"))
test_cats.append(("dr8i-90p-mos", "Catalog: DR8i MzLS+BASS"))
test_ccds.append(("dr8i-decam", "CCDs: DR8i DECam"))
test_ccds.append(("dr8i-90p-mos", "CCDs: DR8i MzLS+BASS"))
| [
"dstndstn@gmail.com"
] | dstndstn@gmail.com |
5177b8f64631bc86748170335a481b8931d1265a | 84d891b6cb6e1e0d8c5f3e285933bf390e808946 | /Demo/python_MOOC/Python基础/Unit_5_函数和代码复用/KochDrawV1.py | df7942f10207b2f26d0486d9b5de3038bf80277b | [] | no_license | zzlzy1989/web_auto_test | 4df71a274eb781e609de1067664264402c49737e | 3e20a55836144e806496e99870f5e8e13a85bb93 | refs/heads/master | 2020-05-24T10:37:29.709375 | 2019-10-28T06:14:31 | 2019-10-28T06:14:31 | 187,230,775 | 2 | 0 | null | 2019-06-20T11:06:32 | 2019-05-17T14:29:11 | null | UTF-8 | Python | false | false | 554 | py | # -*- coding:utf-8 -*-
# @Author : GaoXu
# @Time : 2019/8/24 16:30
# @File : KochDrawV1.py
# @Software : web_auto_test
# 科赫曲线绘制源代码
import turtle
def koch(size, n):
if n == 0:
turtle.fd(size)
else:
for angle in [0, 60, -120, 60]:
turtle.left(angle)
koch(size/3, n-1)
def main():
turtle.setup(800,400)
turtle.penup()
turtle.goto(-300, -50)
turtle.pendown()
turtle.pensize(2)
koch(600,3) # 0阶科赫曲线长度,阶数
turtle.hideturtle()
main() | [
"394845369@qq.com"
] | 394845369@qq.com |
cfa73bc6fadbdcbbdfc5cf8dd20e5d89e3cd7bb1 | 2c95e0f7bb3f977306f479d5c99601ab1d5c61f2 | /tests/setup_nodes.py | 5817f6c9a5620296c7b06fc358b7c88d2766b7d1 | [
"Apache-2.0"
] | permissive | Olive-blockchain/Olive-blockchain-CLI | d62444f8456467f8105531178d2ae53d6e92087d | 8c4a9a382d68fc1d71c5b6c1da858922a8bb8808 | refs/heads/main | 2023-07-19T03:51:08.700834 | 2021-09-19T16:05:10 | 2021-09-19T16:05:10 | 406,045,499 | 0 | 0 | Apache-2.0 | 2021-09-19T16:05:10 | 2021-09-13T16:20:38 | Python | UTF-8 | Python | false | false | 15,434 | py | import asyncio
import signal
from secrets import token_bytes
from typing import Dict, List, Optional
from olive.consensus.constants import ConsensusConstants
from olive.daemon.server import WebSocketServer, create_server_for_daemon, daemon_launch_lock_path, singleton
from olive.full_node.full_node_api import FullNodeAPI
from olive.server.start_farmer import service_kwargs_for_farmer
from olive.server.start_full_node import service_kwargs_for_full_node
from olive.server.start_harvester import service_kwargs_for_harvester
from olive.server.start_introducer import service_kwargs_for_introducer
from olive.server.start_service import Service
from olive.server.start_timelord import service_kwargs_for_timelord
from olive.server.start_wallet import service_kwargs_for_wallet
from olive.simulator.start_simulator import service_kwargs_for_full_node_simulator
from olive.timelord.timelord_launcher import kill_processes, spawn_process
from olive.types.peer_info import PeerInfo
from olive.util.bech32m import encode_puzzle_hash
from olive.util.block_tools import BlockTools, test_constants
from olive.util.hash import std_hash
from olive.util.ints import uint16, uint32
from olive.util.keychain import Keychain, bytes_to_mnemonic
from tests.time_out_assert import time_out_assert_custom_interval
bt = BlockTools(constants=test_constants)
self_hostname = bt.config["self_hostname"]
def constants_for_dic(dic):
return test_constants.replace(**dic)
async def _teardown_nodes(node_aiters: List) -> None:
awaitables = [node_iter.__anext__() for node_iter in node_aiters]
for sublist_awaitable in asyncio.as_completed(awaitables):
try:
await sublist_awaitable
except StopAsyncIteration:
pass
async def setup_daemon(btools):
root_path = btools.root_path
config = btools.config
lockfile = singleton(daemon_launch_lock_path(root_path))
crt_path = root_path / config["daemon_ssl"]["private_crt"]
key_path = root_path / config["daemon_ssl"]["private_key"]
ca_crt_path = root_path / config["private_ssl_ca"]["crt"]
ca_key_path = root_path / config["private_ssl_ca"]["key"]
assert lockfile is not None
create_server_for_daemon(btools.root_path)
ws_server = WebSocketServer(root_path, ca_crt_path, ca_key_path, crt_path, key_path)
await ws_server.start()
yield ws_server
await ws_server.stop()
async def setup_full_node(
consensus_constants: ConsensusConstants,
db_name,
port,
local_bt,
introducer_port=None,
simulator=False,
send_uncompact_interval=0,
sanitize_weight_proof_only=False,
connect_to_daemon=False,
):
db_path = local_bt.root_path / f"{db_name}"
if db_path.exists():
db_path.unlink()
config = local_bt.config["full_node"]
config["database_path"] = db_name
config["send_uncompact_interval"] = send_uncompact_interval
config["target_uncompact_proofs"] = 30
config["peer_connect_interval"] = 50
config["sanitize_weight_proof_only"] = sanitize_weight_proof_only
if introducer_port is not None:
config["introducer_peer"]["host"] = self_hostname
config["introducer_peer"]["port"] = introducer_port
else:
config["introducer_peer"] = None
config["dns_servers"] = []
config["port"] = port
config["rpc_port"] = port + 1000
overrides = config["network_overrides"]["constants"][config["selected_network"]]
updated_constants = consensus_constants.replace_str_to_bytes(**overrides)
if simulator:
kwargs = service_kwargs_for_full_node_simulator(local_bt.root_path, config, local_bt)
else:
kwargs = service_kwargs_for_full_node(local_bt.root_path, config, updated_constants)
kwargs.update(
parse_cli_args=False,
connect_to_daemon=connect_to_daemon,
)
service = Service(**kwargs)
await service.start()
yield service._api
service.stop()
await service.wait_closed()
if db_path.exists():
db_path.unlink()
async def setup_wallet_node(
port,
consensus_constants: ConsensusConstants,
local_bt,
full_node_port=None,
introducer_port=None,
key_seed=None,
starting_height=None,
):
config = bt.config["wallet"]
config["port"] = port
config["rpc_port"] = port + 1000
if starting_height is not None:
config["starting_height"] = starting_height
config["initial_num_public_keys"] = 5
entropy = token_bytes(32)
keychain = Keychain(entropy.hex(), True)
if key_seed is None:
key_seed = entropy
keychain.add_private_key(bytes_to_mnemonic(key_seed), "")
first_pk = keychain.get_first_public_key()
assert first_pk is not None
db_path_key_suffix = str(first_pk.get_fingerprint())
db_name = f"test-wallet-db-{port}-KEY.sqlite"
db_path_replaced: str = db_name.replace("KEY", db_path_key_suffix)
db_path = bt.root_path / db_path_replaced
if db_path.exists():
db_path.unlink()
config["database_path"] = str(db_name)
config["testing"] = True
config["introducer_peer"]["host"] = self_hostname
if introducer_port is not None:
config["introducer_peer"]["port"] = introducer_port
config["peer_connect_interval"] = 10
else:
config["introducer_peer"] = None
if full_node_port is not None:
config["full_node_peer"] = {}
config["full_node_peer"]["host"] = self_hostname
config["full_node_peer"]["port"] = full_node_port
else:
del config["full_node_peer"]
kwargs = service_kwargs_for_wallet(local_bt.root_path, config, consensus_constants, keychain)
kwargs.update(
parse_cli_args=False,
connect_to_daemon=False,
)
service = Service(**kwargs)
await service.start(new_wallet=True)
yield service._node, service._node.server
service.stop()
await service.wait_closed()
if db_path.exists():
db_path.unlink()
keychain.delete_all_keys()
async def setup_harvester(port, farmer_port, consensus_constants: ConsensusConstants, b_tools):
kwargs = service_kwargs_for_harvester(b_tools.root_path, b_tools.config["harvester"], consensus_constants)
kwargs.update(
server_listen_ports=[port],
advertised_port=port,
connect_peers=[PeerInfo(self_hostname, farmer_port)],
parse_cli_args=False,
connect_to_daemon=False,
)
service = Service(**kwargs)
await service.start()
yield service._node, service._node.server
service.stop()
await service.wait_closed()
async def setup_farmer(
port,
consensus_constants: ConsensusConstants,
b_tools,
full_node_port: Optional[uint16] = None,
):
config = bt.config["farmer"]
config_pool = bt.config["pool"]
config["xol_target_address"] = encode_puzzle_hash(b_tools.farmer_ph, "xol")
config["pool_public_keys"] = [bytes(pk).hex() for pk in b_tools.pool_pubkeys]
config["port"] = port
config_pool["xol_target_address"] = encode_puzzle_hash(b_tools.pool_ph, "xol")
if full_node_port:
config["full_node_peer"]["host"] = self_hostname
config["full_node_peer"]["port"] = full_node_port
else:
del config["full_node_peer"]
kwargs = service_kwargs_for_farmer(b_tools.root_path, config, config_pool, b_tools.keychain, consensus_constants)
kwargs.update(
parse_cli_args=False,
connect_to_daemon=False,
)
service = Service(**kwargs)
await service.start()
yield service._api, service._node.server
service.stop()
await service.wait_closed()
async def setup_introducer(port):
kwargs = service_kwargs_for_introducer(
bt.root_path,
bt.config["introducer"],
)
kwargs.update(
advertised_port=port,
parse_cli_args=False,
connect_to_daemon=False,
)
service = Service(**kwargs)
await service.start()
yield service._api, service._node.server
service.stop()
await service.wait_closed()
async def setup_vdf_client(port):
vdf_task_1 = asyncio.create_task(spawn_process(self_hostname, port, 1))
def stop():
asyncio.create_task(kill_processes())
asyncio.get_running_loop().add_signal_handler(signal.SIGTERM, stop)
asyncio.get_running_loop().add_signal_handler(signal.SIGINT, stop)
yield vdf_task_1
await kill_processes()
async def setup_vdf_clients(port):
vdf_task_1 = asyncio.create_task(spawn_process(self_hostname, port, 1))
vdf_task_2 = asyncio.create_task(spawn_process(self_hostname, port, 2))
vdf_task_3 = asyncio.create_task(spawn_process(self_hostname, port, 3))
def stop():
asyncio.create_task(kill_processes())
asyncio.get_running_loop().add_signal_handler(signal.SIGTERM, stop)
asyncio.get_running_loop().add_signal_handler(signal.SIGINT, stop)
yield vdf_task_1, vdf_task_2, vdf_task_3
await kill_processes()
async def setup_timelord(port, full_node_port, sanitizer, consensus_constants: ConsensusConstants, b_tools):
config = b_tools.config["timelord"]
config["port"] = port
config["full_node_peer"]["port"] = full_node_port
config["sanitizer_mode"] = sanitizer
config["fast_algorithm"] = False
if sanitizer:
config["vdf_server"]["port"] = 7999
kwargs = service_kwargs_for_timelord(b_tools.root_path, config, consensus_constants)
kwargs.update(
parse_cli_args=False,
connect_to_daemon=False,
)
service = Service(**kwargs)
await service.start()
yield service._api, service._node.server
service.stop()
await service.wait_closed()
async def setup_two_nodes(consensus_constants: ConsensusConstants):
"""
Setup and teardown of two full nodes, with blockchains and separate DBs.
"""
node_iters = [
setup_full_node(
consensus_constants, "blockchain_test.db", 21234, BlockTools(constants=test_constants), simulator=False
),
setup_full_node(
consensus_constants, "blockchain_test_2.db", 21235, BlockTools(constants=test_constants), simulator=False
),
]
fn1 = await node_iters[0].__anext__()
fn2 = await node_iters[1].__anext__()
yield fn1, fn2, fn1.full_node.server, fn2.full_node.server
await _teardown_nodes(node_iters)
async def setup_n_nodes(consensus_constants: ConsensusConstants, n: int):
"""
Setup and teardown of two full nodes, with blockchains and separate DBs.
"""
port_start = 21244
node_iters = []
for i in range(n):
node_iters.append(
setup_full_node(
consensus_constants,
f"blockchain_test_{i}.db",
port_start + i,
BlockTools(constants=test_constants),
simulator=False,
)
)
nodes = []
for ni in node_iters:
nodes.append(await ni.__anext__())
yield nodes
await _teardown_nodes(node_iters)
async def setup_node_and_wallet(consensus_constants: ConsensusConstants, starting_height=None, key_seed=None):
btools = BlockTools(constants=test_constants)
node_iters = [
setup_full_node(consensus_constants, "blockchain_test.db", 21234, btools, simulator=False),
setup_wallet_node(21235, consensus_constants, btools, None, starting_height=starting_height, key_seed=key_seed),
]
full_node_api = await node_iters[0].__anext__()
wallet, s2 = await node_iters[1].__anext__()
yield full_node_api, wallet, full_node_api.full_node.server, s2
await _teardown_nodes(node_iters)
async def setup_simulators_and_wallets(
simulator_count: int,
wallet_count: int,
dic: Dict,
starting_height=None,
key_seed=None,
starting_port=50000,
):
simulators: List[FullNodeAPI] = []
wallets = []
node_iters = []
consensus_constants = constants_for_dic(dic)
for index in range(0, simulator_count):
port = starting_port + index
db_name = f"blockchain_test_{port}.db"
bt_tools = BlockTools(consensus_constants, const_dict=dic) # block tools modifies constants
sim = setup_full_node(
bt_tools.constants,
db_name,
port,
bt_tools,
simulator=True,
)
simulators.append(await sim.__anext__())
node_iters.append(sim)
for index in range(0, wallet_count):
if key_seed is None:
seed = std_hash(uint32(index))
else:
seed = key_seed
port = starting_port + 5000 + index
bt_tools = BlockTools(consensus_constants, const_dict=dic) # block tools modifies constants
wlt = setup_wallet_node(
port,
bt_tools.constants,
bt_tools,
None,
key_seed=seed,
starting_height=starting_height,
)
wallets.append(await wlt.__anext__())
node_iters.append(wlt)
yield simulators, wallets
await _teardown_nodes(node_iters)
async def setup_farmer_harvester(consensus_constants: ConsensusConstants):
node_iters = [
setup_harvester(21234, 21235, consensus_constants, bt),
setup_farmer(21235, consensus_constants, bt),
]
harvester, harvester_server = await node_iters[0].__anext__()
farmer, farmer_server = await node_iters[1].__anext__()
yield harvester, farmer
await _teardown_nodes(node_iters)
async def setup_full_system(
consensus_constants: ConsensusConstants, b_tools=None, b_tools_1=None, connect_to_daemon=False
):
if b_tools is None:
b_tools = BlockTools(constants=test_constants)
if b_tools_1 is None:
b_tools_1 = BlockTools(constants=test_constants)
node_iters = [
setup_introducer(21233),
setup_harvester(21234, 21235, consensus_constants, b_tools),
setup_farmer(21235, consensus_constants, b_tools, uint16(21237)),
setup_vdf_clients(7800),
setup_timelord(21236, 21237, False, consensus_constants, b_tools),
setup_full_node(
consensus_constants, "blockchain_test.db", 21237, b_tools, 21233, False, 10, True, connect_to_daemon
),
setup_full_node(
consensus_constants, "blockchain_test_2.db", 21238, b_tools_1, 21233, False, 10, True, connect_to_daemon
),
setup_vdf_client(7999),
setup_timelord(21239, 21238, True, consensus_constants, b_tools_1),
]
introducer, introducer_server = await node_iters[0].__anext__()
harvester, harvester_server = await node_iters[1].__anext__()
farmer, farmer_server = await node_iters[2].__anext__()
async def num_connections():
count = len(harvester.server.all_connections.items())
return count
await time_out_assert_custom_interval(10, 3, num_connections, 1)
vdf_clients = await node_iters[3].__anext__()
timelord, timelord_server = await node_iters[4].__anext__()
node_api_1 = await node_iters[5].__anext__()
node_api_2 = await node_iters[6].__anext__()
vdf_sanitizer = await node_iters[7].__anext__()
sanitizer, sanitizer_server = await node_iters[8].__anext__()
yield (
node_api_1,
node_api_2,
harvester,
farmer,
introducer,
timelord,
vdf_clients,
vdf_sanitizer,
sanitizer,
node_api_1.full_node.server,
)
await _teardown_nodes(node_iters)
| [
"87711356+Olive-blockchain@users.noreply.github.com"
] | 87711356+Olive-blockchain@users.noreply.github.com |
d9507018888a7618e9bea33aa3984633483a1f65 | 15514b8cdb9ef2bb25a33e44a2abe79e5eb86439 | /analyze_in_vivo/analyze_domnisoru/plots_for_thesis/fraction_burst.py | 29db8337758722bb7be32ad824a545b4741947a8 | [] | no_license | cafischer/analyze_in_vivo | 389ce0d51c6cbeb3e39648aaff13263f0c99060a | e38e1057420b5329504f7095f1ee89e2a293df23 | refs/heads/master | 2021-06-10T00:18:47.741793 | 2019-09-14T08:47:53 | 2019-09-14T08:47:53 | 100,512,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,853 | py | import matplotlib.pyplot as pl
import numpy as np
import os
from analyze_in_vivo.load.load_domnisoru import get_cell_ids_DAP_cells, get_celltype_dict, load_cell_ids
pl.style.use('paper')
if __name__ == '__main__':
save_dir_img = '/home/cf/Dropbox/thesis/figures_results'
save_dir = '/home/cf/Phd/programming/projects/analyze_in_vivo/analyze_in_vivo/data/domnisoru'
save_dir_ISI_hist = '/home/cf/Phd/programming/projects/analyze_in_vivo/analyze_in_vivo/results/domnisoru/whole_trace/ISI_hist'
cell_type_dict = get_celltype_dict(save_dir)
grid_cells = np.array(load_cell_ids(save_dir, 'grid_cells'))
stellate_cells = load_cell_ids(save_dir, 'stellate_cells')
pyramidal_cells = load_cell_ids(save_dir, 'pyramidal_cells')
stellate_idxs = np.array([np.where(cell_id == grid_cells)[0][0] for cell_id in stellate_cells])
pyramidal_idxs = np.array([np.where(cell_id == grid_cells)[0][0] for cell_id in pyramidal_cells])
# load
fraction_burst = np.load(os.path.join(save_dir_ISI_hist, 'cut_ISIs_at_200', 'grid_cells', 'fraction_burst.npy'))
# plot
fig, ax = pl.subplots(figsize=(4, 6))
ax.plot(np.zeros(len(stellate_cells)), fraction_burst[stellate_idxs], 'ok')
ax.errorbar(0.2, np.mean(fraction_burst[stellate_idxs]), yerr=np.std(fraction_burst[stellate_idxs]),
marker='o', color='k', capsize=3)
ax.plot(np.ones(len(pyramidal_cells))*0.6, fraction_burst[pyramidal_idxs], 'ok')
ax.errorbar(0.8, np.mean(fraction_burst[pyramidal_idxs]), yerr=np.std(fraction_burst[pyramidal_idxs]),
marker='o', color='k', capsize=3)
ax.set_xlim(-0.2, 1.0)
ax.set_xticks([0, 0.6])
ax.set_xticklabels(['Stellate', 'Pyramidal'])
ax.set_ylabel('Fraction burst')
pl.tight_layout()
pl.savefig(os.path.join(save_dir_img, 'fraction_burst.png'))
pl.show()
| [
"coralinefischer@gmail.com"
] | coralinefischer@gmail.com |
7c64a40cefe9184f4e1d25be41ad8c3dbe5a006a | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/network/v20191201/route_filter.py | 5b32788416bb2d4e7928a5e77daf9939b8ca1619 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,170 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['RouteFilterArgs', 'RouteFilter']
@pulumi.input_type
class RouteFilterArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a RouteFilter resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] route_filter_name: The name of the route filter.
:param pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]] rules: Collection of RouteFilterRules contained within a route filter.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if route_filter_name is not None:
pulumi.set(__self__, "route_filter_name", route_filter_name)
if rules is not None:
pulumi.set(__self__, "rules", rules)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="routeFilterName")
def route_filter_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the route filter.
"""
return pulumi.get(self, "route_filter_name")
@route_filter_name.setter
def route_filter_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "route_filter_name", value)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]]]:
"""
Collection of RouteFilterRules contained within a route filter.
"""
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]]]):
pulumi.set(self, "rules", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class RouteFilter(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteFilterRuleArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Route Filter Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] route_filter_name: The name of the route filter.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteFilterRuleArgs']]]] rules: Collection of RouteFilterRules contained within a route filter.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RouteFilterArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Route Filter Resource.
:param str resource_name: The name of the resource.
:param RouteFilterArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RouteFilterArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteFilterRuleArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RouteFilterArgs.__new__(RouteFilterArgs)
__props__.__dict__["id"] = id
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["route_filter_name"] = route_filter_name
__props__.__dict__["rules"] = rules
__props__.__dict__["tags"] = tags
__props__.__dict__["etag"] = None
__props__.__dict__["ipv6_peerings"] = None
__props__.__dict__["name"] = None
__props__.__dict__["peerings"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:network:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20161201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170301:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170601:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170901:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20171001:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20171101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180401:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180601:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180701:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20181001:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20181101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20181201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190401:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190601:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190701:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190901:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20191101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200301:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200401:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200501:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200601:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200701:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20201101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20210201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20210301:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20210501:RouteFilter")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(RouteFilter, __self__).__init__(
'azure-native:network/v20191201:RouteFilter',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'RouteFilter':
"""
Get an existing RouteFilter resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = RouteFilterArgs.__new__(RouteFilterArgs)
__props__.__dict__["etag"] = None
__props__.__dict__["ipv6_peerings"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["peerings"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["rules"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return RouteFilter(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="ipv6Peerings")
def ipv6_peerings(self) -> pulumi.Output[Sequence['outputs.ExpressRouteCircuitPeeringResponse']]:
"""
A collection of references to express route circuit ipv6 peerings.
"""
return pulumi.get(self, "ipv6_peerings")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def peerings(self) -> pulumi.Output[Sequence['outputs.ExpressRouteCircuitPeeringResponse']]:
"""
A collection of references to express route circuit peerings.
"""
return pulumi.get(self, "peerings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the route filter resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def rules(self) -> pulumi.Output[Optional[Sequence['outputs.RouteFilterRuleResponse']]]:
"""
Collection of RouteFilterRules contained within a route filter.
"""
return pulumi.get(self, "rules")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
8b94f7c4a36aaa24745fd041bcb686096c08b3a8 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_15633.py | f7aa235c9d624fa3adcab2a0b41cfee1e342232d | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,839 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((493.911, 488.674, 431.968), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((474.089, 456.501, 375.67), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((449.084, 429.403, 301.651), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((511.721, 553.784, 312.087), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((370.467, 331.485, 153.352), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((470.44, 472.737, 395.274), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((469.849, 473.373, 396.164), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((473.717, 500.696, 389.919), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((449.847, 514.957, 391.68), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((422.178, 516.391, 395.378), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((400.372, 533.376, 390.149), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((397.345, 526.683, 417.311), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((486.38, 466.908, 418.183), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((305.393, 579.582, 418.162), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((280.558, 440.831, 273.306), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((280.558, 440.831, 273.306), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((308.384, 449.53, 279.589), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((334.894, 458.045, 288.983), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((361.401, 464.487, 300.637), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((387.104, 467.79, 314.734), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((408.756, 463.003, 334.411), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((424.286, 452.72, 357.826), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((214.743, 581.915, 287.281), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((628.359, 312.906, 444.516), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((426.157, 410.539, 339.96), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((426.157, 410.539, 339.96), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((432.982, 436.439, 330.003), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((447.533, 458.144, 318.083), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((466.474, 458.16, 296.327), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((527.358, 512.25, 390.319), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((411.601, 402.62, 198.132), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((490.734, 478.286, 371.063), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((491.105, 478.634, 370.941), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((501.791, 481.534, 396.792), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((490.474, 487.828, 421.85), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((465.142, 494.316, 432.096), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((444.409, 505.837, 447.393), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((416.86, 507.124, 441.766), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((392.755, 497.122, 431.181), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((425.108, 418.691, 419.148), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((361.001, 578.114, 440.176), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((458.594, 399.335, 393.917), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((464.417, 416.309, 374.422), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((478.828, 453.215, 332.71), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((493.577, 489.918, 291.262), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((560.619, 500.107, 336.636), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((464.39, 530.715, 199.96), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((534.817, 487.324, 399.606), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((529.768, 473.166, 375.265), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((518.88, 455.843, 354.904), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((509.294, 437.877, 333.736), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((496.817, 419.925, 313.596), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((484.963, 400.059, 294.626), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((483.226, 433.15, 365.78), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((483.688, 360.18, 220.403), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"batxes@gmail.com"
] | batxes@gmail.com |
8ffe7071c683152556da838ec3a17236daa37678 | c92a60d7968130cf21b20a943976c0e1929b6eb8 | /apps/dashboard/forms/courses_form.py | 42fc57a32e986e0c4b09b13a1a6744fad9bb1d16 | [] | no_license | BeAnhTran/yoga-center-website | efb40103f343b9be627ce926156e66fe9f985435 | 8b7532f103a73a467cd903923f7cd2ccfc09d949 | refs/heads/master | 2022-11-25T09:51:27.000385 | 2020-08-03T01:44:22 | 2020-08-03T01:44:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,480 | py | from django import forms
from django.db import transaction
from apps.courses.models import Course
from apps.lectures.models import Lecture
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, Row, Column, Fieldset
from apps.cards.models import CardType
from django.utils.translation import ugettext_lazy as _
from ..forms.lectures_form import LectureInlineForm
from django.forms.models import inlineformset_factory
from apps.dashboard.custom_layout_object import Formset
LectureFormSet = inlineformset_factory(
Course, Lecture, form=LectureInlineForm,
fields=['name'], extra=1, can_delete=True
)
class CourseForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['name'].widget.attrs.update(
{'autofocus': 'autofocus', 'placeholder': 'Yoga cơ bản'})
self.fields['description'].widget.attrs.update(
{'placeholder': 'Yoga cho người mới bắt đầu với động tác cơ bản'})
self.fields['price_per_lesson'].widget.attrs.update({
'placeholder': '50.000'
})
self.fields['price_per_month'].widget.attrs.update({
'placeholder': '600.000'
})
self.fields['price_for_training_class'].widget.attrs.update({
'placeholder': '10.000.000'
})
self.fields['card_types'] = forms.ModelMultipleChoiceField(label=_(
'Card types'), widget=forms.CheckboxSelectMultiple(), queryset=CardType.objects.all())
self.fields['level'].required = False
self.helper = FormHelper()
self.helper.layout = Layout(
'name',
'description',
Row(
Column('course_type', css_class='form-group col-md-4 mb-0'),
Column('level', css_class='form-group col-md-4 mb-0'),
css_class='form-row'
),
'card_types',
'content',
'image',
'wages_per_lesson',
Row(
Column('price_per_lesson', css_class='form-group col-md-4 mb-0'),
Column('price_per_month', css_class='form-group col-md-4 mb-0'),
Column('price_for_training_class',
css_class='form-group col-md-4 mb-0'),
css_class='form-row'
),
Fieldset(_('Lectures'), Formset('lectures')),
Submit('submit', 'Save', css_class='btn btn-success')
)
class Meta:
model = Course
exclude = ['slug', 'created_at', 'updated_at']
def clean_name(self):
from django.utils.text import slugify
from django.core.exceptions import ValidationError
name = self.cleaned_data['name']
slug = slugify(name)
if Course.objects.filter(slug=slug).exists():
raise ValidationError('A course with this name already exists.')
return name
class CourseEditForm(CourseForm):
def clean_name(self):
name = self.cleaned_data['name']
if 'name' in self.changed_data:
from django.utils.text import slugify
from django.core.exceptions import ValidationError
slug = slugify(name)
if Course.objects.filter(slug=slug).exists():
raise ValidationError(
_('A course with this name already exists.'))
return name
else:
return name
| [
"giatruongtran27@gmail.com"
] | giatruongtran27@gmail.com |
8f16e1b5e64421ec6bc8d889c797ff5980fe3a0a | 564d6a4d305a8ac6a7e01c761831fb2081c02d0f | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_10_01/aio/operations/_application_gateways_operations.py | 01d689ece54c34313dc39c75573bb130fb16761a | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | paultaiton/azure-sdk-for-python | 69af4d889bac8012b38f5b7e8108707be679b472 | d435a1a25fd6097454b7fdfbbdefd53e05029160 | refs/heads/master | 2023-01-30T16:15:10.647335 | 2020-11-14T01:09:50 | 2020-11-14T01:09:50 | 283,343,691 | 0 | 0 | MIT | 2020-07-28T22:43:43 | 2020-07-28T22:43:43 | null | UTF-8 | Python | false | false | 55,284 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ApplicationGatewaysOperations:
"""ApplicationGatewaysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
async def get(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs
) -> "models.ApplicationGateway":
"""Gets the specified application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_10_01.models.ApplicationGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
application_gateway_name: str,
parameters: "models.ApplicationGateway",
**kwargs
) -> "models.ApplicationGateway":
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ApplicationGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
application_gateway_name: str,
parameters: "models.ApplicationGateway",
**kwargs
) -> AsyncLROPoller["models.ApplicationGateway"]:
"""Creates or updates the specified application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param parameters: Parameters supplied to the create or update application gateway operation.
:type parameters: ~azure.mgmt.network.v2018_10_01.models.ApplicationGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ApplicationGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_10_01.models.ApplicationGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
application_gateway_name: str,
parameters: "models.TagsObject",
**kwargs
) -> "models.ApplicationGateway":
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
application_gateway_name: str,
parameters: "models.TagsObject",
**kwargs
) -> AsyncLROPoller["models.ApplicationGateway"]:
"""Updates the specified application gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param parameters: Parameters supplied to update application gateway tags.
:type parameters: ~azure.mgmt.network.v2018_10_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ApplicationGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_10_01.models.ApplicationGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.ApplicationGatewayListResult"]:
"""Lists all application gateways in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationGatewayListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_10_01.models.ApplicationGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["models.ApplicationGatewayListResult"]:
"""Gets all the application gateways in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationGatewayListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_10_01.models.ApplicationGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGateways'} # type: ignore
async def _start_initial(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/start'} # type: ignore
async def begin_start(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Starts the specified application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/start'} # type: ignore
async def _stop_initial(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/stop'} # type: ignore
async def begin_stop(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Stops the specified application gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._stop_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/stop'} # type: ignore
async def _backend_health_initial(
self,
resource_group_name: str,
application_gateway_name: str,
expand: Optional[str] = None,
**kwargs
) -> Optional["models.ApplicationGatewayBackendHealth"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.ApplicationGatewayBackendHealth"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self._backend_health_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationGatewayBackendHealth', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_backend_health_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/backendhealth'} # type: ignore
async def begin_backend_health(
self,
resource_group_name: str,
application_gateway_name: str,
expand: Optional[str] = None,
**kwargs
) -> AsyncLROPoller["models.ApplicationGatewayBackendHealth"]:
"""Gets the backend health of the specified application gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param expand: Expands BackendAddressPool and BackendHttpSettings referenced in backend health.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ApplicationGatewayBackendHealth or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_10_01.models.ApplicationGatewayBackendHealth]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGatewayBackendHealth"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._backend_health_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
expand=expand,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayBackendHealth', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_backend_health.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/backendhealth'} # type: ignore
async def list_available_waf_rule_sets(
self,
**kwargs
) -> "models.ApplicationGatewayAvailableWafRuleSetsResult":
"""Lists all available web application firewall rule sets.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationGatewayAvailableWafRuleSetsResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_10_01.models.ApplicationGatewayAvailableWafRuleSetsResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGatewayAvailableWafRuleSetsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.list_available_waf_rule_sets.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGatewayAvailableWafRuleSetsResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_available_waf_rule_sets.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableWafRuleSets'} # type: ignore
async def list_available_ssl_options(
self,
**kwargs
) -> "models.ApplicationGatewayAvailableSslOptions":
"""Lists available Ssl options for configuring Ssl policy.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationGatewayAvailableSslOptions, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_10_01.models.ApplicationGatewayAvailableSslOptions
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGatewayAvailableSslOptions"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.list_available_ssl_options.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGatewayAvailableSslOptions', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_available_ssl_options.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableSslOptions/default'} # type: ignore
def list_available_ssl_predefined_policies(
self,
**kwargs
) -> AsyncIterable["models.ApplicationGatewayAvailableSslPredefinedPolicies"]:
"""Lists all SSL predefined policies for configuring Ssl policy.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationGatewayAvailableSslPredefinedPolicies or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_10_01.models.ApplicationGatewayAvailableSslPredefinedPolicies]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGatewayAvailableSslPredefinedPolicies"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_available_ssl_predefined_policies.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayAvailableSslPredefinedPolicies', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_available_ssl_predefined_policies.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableSslOptions/default/predefinedPolicies'} # type: ignore
async def get_ssl_predefined_policy(
self,
predefined_policy_name: str,
**kwargs
) -> "models.ApplicationGatewaySslPredefinedPolicy":
"""Gets Ssl predefined policy with the specified policy name.
:param predefined_policy_name: Name of Ssl predefined policy.
:type predefined_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationGatewaySslPredefinedPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_10_01.models.ApplicationGatewaySslPredefinedPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGatewaySslPredefinedPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.get_ssl_predefined_policy.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'predefinedPolicyName': self._serialize.url("predefined_policy_name", predefined_policy_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGatewaySslPredefinedPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_ssl_predefined_policy.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableSslOptions/default/predefinedPolicies/{predefinedPolicyName}'} # type: ignore
| [
"noreply@github.com"
] | paultaiton.noreply@github.com |
cafdb03b3c85c33fab56156890ff9c9e6e356031 | 7fba01da6426480612d7cef9ceb2e15f3df6d01c | /PYTHON/pythonDesafios/desafio026.py | 09a9abe13c925116c934a1eaefb7fb7274226769 | [
"MIT"
] | permissive | Santos1000/Curso-Python | f320fec1e7ced4c133ade69acaa798d431e14113 | 549223a1633f6f619c87554dd8078cf7841bb1df | refs/heads/main | 2023-05-26T12:01:23.868814 | 2021-05-26T13:22:58 | 2021-05-26T13:22:58 | 371,039,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | frase = str(input('Digite a frase:')).upper().strip()
print('A primeira letra A aparece: na {} posicao.'.format(frase.find('A')+1))
print('A letra A aparece: {} vezes.'.format(frase.count('A')))
print('A ultima letra A aparece: na {} posicao.'.format(frase.rfind('A')+1))
| [
"83990871+Santos1000@users.noreply.github.com"
] | 83990871+Santos1000@users.noreply.github.com |
e1d69df5b96a8284caf0e616b10529f32b2b0641 | 3775102a3f59bc8aac9b8121ba2aef87409724ee | /Medium/pass_triangle.py | 7fecb020cf0b692b29563050feb428790dc7045e | [] | no_license | csikosdiana/CodeEval | a446ec6673e9f97439662bfccbd7454e5740d509 | 15cdd9ca454939e93c77d5ed5076595ecc7e4301 | refs/heads/master | 2016-08-11T14:49:27.565799 | 2016-03-22T17:48:20 | 2016-03-22T17:48:20 | 46,176,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | data = ["5", "9 6", "4 6 8", "0 7 1 5"]
#import sys
#test_cases = open(sys.argv[1], 'r')
#data = test_cases.readlines()
S = []
for test in data:
test = test.rstrip()
nums = test.split(" ")
nums = map(int, nums)
if S == []:
S = nums
continue
else:
T = nums
E = []
for i in range(0, len(T)):
if i == 0:
E.append((T[i] + S[i]))
elif i == len(T) - 1:
E.append((T[i] + S[len(S)-1]))
else:
m = max(S[i-1], S[i])
E.append((T[i] + m))
S = E
print max(S)
#test_cases.close()
| [
"csikosdiana@gmail.com"
] | csikosdiana@gmail.com |
f840e829bc9dfe916b409569d8d751b1d3de80fc | 8c6867a4019ca8e622df216dc0e2566c51b3d396 | /ashesiundergraduate/migrations/0001_initial.py | bd2a3c50c5fc4bdce49570d8e213249771eaf334 | [] | no_license | deone/apply | 3cd36233bc191393b05ef32073fdaa4a3b56fb2e | 724e024c1455cd193901e2f7f5a8377806ffe974 | refs/heads/master | 2021-01-21T04:41:33.605076 | 2019-05-22T12:29:13 | 2019-05-22T12:29:13 | 54,662,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,268 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-03 09:01
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('setup', '0011_auto_20160402_2153'),
]
operations = [
migrations.CreateModel(
name='PersonalInformation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('middle_name', models.CharField(max_length=30, verbose_name='middle name')),
('date_of_birth', models.DateField(null=True, verbose_name='date of birth')),
('applied_before', models.NullBooleanField(choices=[(True, 'Yes'), (False, 'No')], verbose_name='applied before')),
('year_applied', models.CharField(max_length=4, null=True, verbose_name='year applied')),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=1, null=True, verbose_name='gender')),
('application', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='setup.Application')),
],
),
]
| [
"alwaysdeone@gmail.com"
] | alwaysdeone@gmail.com |
6bb6754a843f836266657cf3d64f58fec17a3cc7 | b0a1008bd20b8325328f62473acab216216fd72f | /static/playdrone/Reference_ref/com.google.android.apps.books-30149/constraints1_0.py | bb63b21546dc9ee9d3c67a7ce311cf4cb20ebbd6 | [] | no_license | zhuowei/IntelliDroidUiEvaluationCode | 14bffcbf707455a83bdcb7fc7e75f7dfcf461c11 | eab8bed4490739e2f5b2633e2817b46500e6d201 | refs/heads/master | 2020-03-09T07:33:00.053786 | 2018-04-09T18:26:45 | 2018-04-09T18:26:45 | 128,666,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | # Entrypoint: com.google.android.apps.books.app.AccountPickerFragment.onDismiss(Landroid/content/DialogInterface;)V
# Target: invokevirtual < Application, Landroid/app/Activity, startActivity(Landroid/content/Intent;)V >@38
IAAv0 = Int('IAAv0') # Pointer<972229980>.equal(Pointer<-570473851>.getCurrentAccount())
s.add((IAAv0 == 0))
| [
"zhuoweizhang@yahoo.com"
] | zhuoweizhang@yahoo.com |
a7f5240b5cde3a626f32d2dca6f20e950e24b9b6 | 43598dd1e251b1733ed16981a148834bd9faca9b | /main.py | a0dfa3259f49652c346ab01ac8a5adc5a0e0ea8b | [] | no_license | SamL98/PhysicsEngine | 86e6f38a34d7261c13cc76e78f2702e72c4c0c3b | 440e9042cc999277bbc1961cbb4b8f2300f28fde | refs/heads/master | 2020-03-23T18:51:45.246905 | 2018-07-22T22:50:14 | 2018-07-22T22:50:14 | 141,936,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | import cv2 as cv
import numpy as np
from time import sleep
from draw_util import clear
from draw import draw_shape
from line import create_line
from circle import create_ball
from collider import collide_inelastic
g = 9.8
w, h = 250, 500
kexit = 27
canvas = np.ones((h, w))
ball = create_ball(pos=[0, w//2-10])
ball.a = [g, 0]
ball.mass = 10.0
#line = create_line((h-2, 0), (h//2, w-1))
line = create_line((h//2, 0), (h-2, w-1))
line.restitution = 10.0
shapes = [ball, line]
def shdUpdate():
for i in range(len(shapes)):
if shapes[i].needsUpdate:
return True
return False
cv.namedWindow('canvas')
init_timestep = True
while(1):
if init_timestep or shdUpdate():
init_timestep = False
clear(canvas)
draw_shape(canvas, ball)
draw_shape(canvas, line)
collide_inelastic(ball, line)
for shape in shapes:
shape.nextPos()
cv.imshow('canvas', canvas)
k = cv.waitKey(20)
if k == kexit:
break
cv.destroyAllWindows() | [
"lerner98@gmail.com"
] | lerner98@gmail.com |
e1d31a086d8d654f5f697c298c2e6e14355fe901 | 02ce6d29fec0d68ca2a2a778d37d2f2cff1a590e | /Old/PythonOne/20.3.1.py | bb4f66a90f65ad532544ff1f65ce40b1439c06cf | [] | no_license | CalvinCheungCoder/Python-100-Days | 605045122e40c119abc32466c32479559a4d4b9b | 0f9bec8893954d4afbe2037dad92885c7d4d31f8 | refs/heads/master | 2020-04-17T11:49:42.148478 | 2019-09-19T10:22:37 | 2019-09-19T10:22:37 | 166,556,771 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | import threading
import time
def thread_body():
t = threading.current_thread()
for n in range(5):
print('第 {0} 次执行线程 {1}'.format(n, t.name))
time.sleep(1)
print('线程 {0} 执行完成!'.format(t.name))
def main():
t1 = threading.Thread(target=thread_body())
t1.start()
t2 = threading.Thread(target=thread_body(),name='MyThread')
t2.start()
if __name__ == '__main__':
main() | [
"984382258@qq.com"
] | 984382258@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.