blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b1d7f66d5f56705e13cc4e005bef0389f0354b73 | 7f4fae8e0a9e29fc3abee784a2d7d0beb8252bd5 | /wulifang/_util/_assert_isinstance.py | c9453ea2fc556fa8c84e22a3203df7402c3e6723 | [] | no_license | WuLiFang/Nuke | a303646e927c9745f2eaf8dad4e5e1ccc09a30e7 | 49df48ded0985771147b1a40707b5454291eab19 | refs/heads/master | 2023-07-21T13:36:27.423572 | 2023-07-17T10:34:04 | 2023-07-17T10:34:04 | 100,696,180 | 16 | 5 | null | 2020-03-08T11:50:16 | 2017-08-18T09:28:26 | Python | UTF-8 | Python | false | false | 626 | py | # -*- coding=UTF-8 -*-
# pyright: ignore
from __future__ import absolute_import, division, print_function, unicode_literals
def assert_isinstance(object, class_or_tuple):
"""raise CastError when object is not isinstance of given class.
Args:
object: object to test
class_or_tuple: second arg of `isinstance`
Raises:
CastError: When object is not match
Returns:
object unchanged
"""
assert isinstance(
object, class_or_tuple
), "unexpected instance type, expected=%s, actual=%s" % (
class_or_tuple,
type(object),
)
return object
| [
"NateScarlet@Gmail.com"
] | NateScarlet@Gmail.com |
69c935a05e1c18dc5da6afcea69b5e0e22e168f4 | b92d3221a9301db2d53a4b085f3224cef52964b8 | /showmethesnps/test.py | a1a4a9e9bef300ca9a8bde308b37983c34e0dbea | [] | no_license | ypchan/misc-genomics-tools | 472ad4a7b01dc0aef5f6f18935a8e5ddbb8abf56 | 7b1e1b24dae88dc3c81386898a35f91b1fa03d34 | refs/heads/master | 2020-04-24T15:36:51.647274 | 2015-11-23T12:09:42 | 2015-11-23T12:09:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,114 | py | import subprocess
import vcf
import random
import sys
from reportlab.platypus import BaseDocTemplate, SimpleDocTemplate, Paragraph, Spacer, Preformatted, Frame, PageTemplate, KeepTogether
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.rl_config import defaultPageSize
from reportlab.lib.pagesizes import letter, portrait, landscape, A4
from reportlab.lib.units import inch
PAGE_HEIGHT=defaultPageSize[0]; PAGE_WIDTH=defaultPageSize[1]
styles = getSampleStyleSheet()
def get_vcf(stream):
return [(v.CHROM, v.POS) for v in vcf.Reader(stream)]
monkeys = random.sample(get_vcf(sys.stdin), 18)
def get_ttview(chrom, pos, rows):
X = 100
args = ["../samtools-utilities/bin/ttview",
"-X", str(X),
"-g", "%s:%s" % (chrom, max(0, pos - (X/2))),
"../demodata/sample_906_sorted.bam",
"../demodata/Pseudomonas_aeruginosa_NCGM2S1.fasta"]
p = subprocess.Popen(args, stdout=subprocess.PIPE)
out, err = p.communicate()
return "\n".join(out.split("\n")[0:rows])
Title = "Hello world"
pageinfo = "platypus example"
def myFirstPage(canvas, doc):
pass
def go():
#create the basic page and frames
doc = BaseDocTemplate("phello.pdf", leftMargin = 10, rightMargin = 0, topMargin = 0, bottomMargin = 0)
doc.pagesize = landscape(A4)
frameCount = 2
frameWidth = doc.height/frameCount
frameHeight = doc.width-.05*inch
frames = []
#construct a frame for each column
for frame in range(frameCount):
leftMargin = doc.leftMargin + frame*frameWidth
column = Frame(leftMargin, doc.bottomMargin, frameWidth, frameHeight)
print leftMargin, doc.bottomMargin, frameWidth, frameHeight
frames.append(column)
template = PageTemplate(frames=frames)
doc.addPageTemplates(template)
# doc = SimpleDocTemplate("phello.pdf", id='TwoColumns')
# doc.pagesize = landscape(A4) ## TODO: make configurable
Story = []
style = styles["Normal"]
style.fontName = 'Courier'
style.fontSize = 6
for monkey in monkeys:
p = Preformatted(get_ttview(monkey[0], monkey[1], 15), style)
Story.append(KeepTogether(p))
Story.append(Spacer(1,0.05*inch))
doc.build(Story)
#, onFirstPage=myFirstPage)
go()
| [
"n.j.loman@bham.ac.uk"
] | n.j.loman@bham.ac.uk |
fe1de5211cea63949bb807de5ad40eaad40e2b47 | 7983657a020df84754276d8da9ea8a2c0b7fea6b | /Method_Inheritance_Example2.py | a9fa6f3ef8546289743076fd59fa5ee3e325185b | [] | no_license | gsudarshan1990/PythonClassesExample | fd65d72a68b44f3db2a64c73a0b5d6a17bd46676 | 56f155e2200cd0147d5c0767e8a63f891c7d4183 | refs/heads/master | 2022-11-08T00:23:00.077377 | 2020-06-29T08:37:02 | 2020-06-29T08:37:02 | 273,926,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | class Teacher:
def __init__(self,name,age,code):
self.name = name
self.age = age
self.code = code
def welcome_students(self):
print("Welcome students")
class PhysicsTeacher(Teacher):
def welcome(self):
Teacher.welcome_students(self)
print('This is physics class')
class BiologyTeacher(Teacher):
def welcome(self):
Teacher.welcome_students(self)
print('This is biology class')
pt=PhysicsTeacher('sonu',29,'phy')
pt.welcome()
bt=BiologyTeacher('deepu',29,'BIO')
bt.welcome() | [
"sudarshan2009@live.in"
] | sudarshan2009@live.in |
8f5296e6acb0a9b2c619b54ddecb8c2906c39f1d | 698cdadbcfd878f6a607337dd37ad7bf974f9709 | /sigpy/mri/precond_test.py | 0b182b661a17dd41f010e30ac41ccce8cccd40a6 | [
"BSD-3-Clause"
] | permissive | jtamir/sigpy | 654916486babc45fc769bf8be98da410a2ea46df | cd64f3f30819a1f36e3b7f7811b17815a97a2e08 | refs/heads/master | 2020-03-30T00:30:28.881622 | 2018-10-29T05:12:11 | 2018-10-29T05:12:11 | 150,527,263 | 0 | 0 | BSD-3-Clause | 2018-09-27T04:08:06 | 2018-09-27T04:08:05 | null | UTF-8 | Python | false | false | 5,287 | py | import unittest
import numpy as np
import sigpy as sp
import numpy.testing as npt
from sigpy.mri import linop, precond
if __name__ == '__main__':
unittest.main()
class TestPrecond(unittest.TestCase):
def test_kspace_precond_cart(self):
nc = 4
n = 10
shape = (nc, n)
mps = sp.randn(shape)
mps /= np.linalg.norm(mps, axis=0, keepdims=True)
weights = sp.randn([n]) >= 0
A = sp.linop.Multiply(shape, weights**0.5) * linop.Sense(mps)
AAH = np.zeros((nc, n, nc, n), np.complex)
for d in range(nc):
for j in range(n):
x = np.zeros((nc, n), np.complex)
x[d, j] = 1.0
AAHx = A(A.H(x))
for c in range(nc):
for i in range(n):
AAH[c, i, d, j] = AAHx[c, i]
p_expected = np.ones((nc, n), np.complex)
for c in range(nc):
for i in range(n):
if weights[i]:
p_expected_inv_ic = 0
for d in range(nc):
for j in range(n):
p_expected_inv_ic += abs(AAH[c, i, d, j])**2 / abs(AAH[c, i, c, i])
p_expected[c, i] = 1 / p_expected_inv_ic
p = precond.kspace_precond(mps, weights=weights)
npt.assert_allclose(p[:, weights==1], p_expected[:, weights==1])
def test_kspace_precond_noncart(self):
n = 10
nc = 3
shape = [nc, n]
mps = sp.randn(shape)
mps /= np.linalg.norm(mps, axis=0, keepdims=True)
coord = sp.randn([n, 1], dtype=np.float)
A = linop.Sense(mps, coord=coord)
AAH = np.zeros((nc, n, nc, n), np.complex)
for d in range(nc):
for j in range(n):
x = np.zeros(shape, np.complex)
x[d, j] = 1.0
AAHx = A(A.H(x))
for c in range(nc):
for i in range(n):
AAH[c, i, d, j] = AAHx[c, i]
p_expected = np.zeros([nc, n], np.complex)
for c in range(nc):
for i in range(n):
p_expected_inv_ic = 0
for d in range(nc):
for j in range(n):
p_expected_inv_ic += abs(AAH[c, i, d, j])**2 / abs(AAH[c, i, c, i])
p_expected[c, i] = 1 / p_expected_inv_ic
p = precond.kspace_precond(mps, coord=coord)
npt.assert_allclose(p, p_expected, atol=1e-2, rtol=1e-2)
def test_kspace_precond_simple_cart(self):
# Check identity
mps_shape = [1, 1]
mps = np.ones(mps_shape, dtype=np.complex)
p = precond.kspace_precond(mps)
npt.assert_allclose(p, np.ones(mps_shape))
# Check scaling
mps_shape = [1, 3]
mps = np.ones(mps_shape, dtype=np.complex)
p = precond.kspace_precond(mps)
npt.assert_allclose(p, np.ones(mps_shape))
# Check 2d
mps_shape = [1, 3, 3]
mps = np.ones(mps_shape, dtype=np.complex)
p = precond.kspace_precond(mps)
npt.assert_allclose(p, np.ones(mps_shape))
# Check weights
mps_shape = [1, 3]
mps = np.ones(mps_shape, dtype=np.complex)
weights = np.array([1, 0, 1], dtype=np.complex)
p = precond.kspace_precond(mps, weights=weights)
npt.assert_allclose(p, [[1, 1, 1]])
def test_kspace_precond_simple_noncart(self):
# Check identity
mps_shape = [1, 1]
mps = np.ones(mps_shape, dtype=np.complex)
coord = np.array([[0.0]])
p = precond.kspace_precond(mps, coord=coord)
npt.assert_allclose(p, [[1.0]], atol=1, rtol=1e-1)
mps_shape = [1, 3]
mps = np.ones(mps_shape, dtype=np.complex)
coord = np.array([[0.0], [-1], [1]])
p = precond.kspace_precond(mps, coord=coord)
npt.assert_allclose(p, [[1.0, 1.0, 1.0]], atol=1, rtol=1e-1)
def test_circulant_precond_cart(self):
nc = 4
n = 10
shape = (nc, n)
mps = sp.randn(shape)
mps /= np.linalg.norm(mps, axis=0, keepdims=True)
weights = sp.randn([n]) >= 0
A = sp.linop.Multiply(shape, weights**0.5) * linop.Sense(mps)
F = sp.linop.FFT([n])
p_expected = np.zeros(n, np.complex)
for i in range(n):
if weights[i]:
x = np.zeros(n, np.complex)
x[i] = 1.0
p_expected[i] = 1 / F(A.H(A(F.H(x))))[i]
p = precond.circulant_precond(mps, weights=weights)
npt.assert_allclose(p[weights == 1], p_expected[weights == 1])
def test_circulant_precond_noncart(self):
nc = 4
n = 10
shape = [nc, n]
mps = np.ones(shape, dtype=np.complex)
mps /= np.linalg.norm(mps, axis=0, keepdims=True)
coord = sp.randn([n, 1], dtype=np.float)
A = linop.Sense(mps, coord=coord)
F = sp.linop.FFT([n])
p_expected = np.zeros(n, np.complex)
for i in range(n):
x = np.zeros(n, np.complex)
x[i] = 1.0
p_expected[i] = 1 / F(A.H(A(F.H(x))))[i]
p = precond.circulant_precond(mps, coord=coord)
npt.assert_allclose(p, p_expected, atol=1e-1, rtol=1e-1)
| [
"frankong@berkeley.edu"
] | frankong@berkeley.edu |
ee2e612b46243311c27550523c0fc7de8bee7c10 | 597b5658208f34987fc05584e7fa9c20867e08d2 | /data_/wsgi.py | ddfeba1fdda7bc73c31a8f076dc197e93461f9d4 | [] | no_license | OmiGitProjects/django_class_based_view_1 | 40170112ffdefe5a3ef4676d9062583bdc0c3072 | 2ac6d1c3cdf974841235a3deea33c0eb16a97df5 | refs/heads/master | 2022-12-21T21:12:59.760694 | 2020-09-28T07:12:01 | 2020-09-28T07:12:01 | 299,220,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | """
WSGI config for data_ project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'data_.settings')
application = get_wsgi_application()
| [
"onkarnardekar2407@gmail.com"
] | onkarnardekar2407@gmail.com |
98cd99a96f2908d7120ef87419595277cc8cc979 | 3bdafea71f3412bb9116d9dbc8e78a14a6117702 | /python/hamming/hamming.py | 4277c03b087bc8c8acd15fe5733d9d8f8bec95b7 | [] | no_license | michaelrbock/exercism | 4b8490ae1851f1550c9118658eb62ff7e7ff59d5 | 505f944b696395041101f20a819ecb6ae063e9de | refs/heads/master | 2021-06-25T05:24:41.015749 | 2020-12-20T02:48:40 | 2020-12-20T02:48:40 | 147,027,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | def distance(strand_a, strand_b):
if len(strand_a) != len(strand_b):
raise ValueError('Strands must be the same length.')
# Idea:
# Go thru strings, comparing char at each index & adding to count of diffs.
# O(n)
hamming = 0
for i in range(len(strand_a)):
if strand_a[i] != strand_b[i]:
hamming += 1
return hamming
| [
"mykel.bock@gmail.com"
] | mykel.bock@gmail.com |
4906e5dc912425b12badd64c3784e45676adee61 | 3e30e334d759a3606be1dce6dca1154568808d68 | /xlsxwriter/test/utility/test_xl_range.py | 65cd873dcadf64b88b9afdb73d16e52db2466fab | [
"BSD-2-Clause-Views"
] | permissive | cimarronm/XlsxWriter | 59668816c4faf5b2e5fc7b96a4ab98d5797891b7 | b440055bebfcc08339bc3e43cc2ce9819a142004 | refs/heads/master | 2021-01-18T10:07:21.729676 | 2014-08-12T23:17:43 | 2014-08-12T23:17:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,109 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2014, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...utility import xl_range
from ...utility import xl_range_abs
class TestUtility(unittest.TestCase):
"""
Test xl_range() utility function.
"""
def test_xl_range(self):
"""Test xl_range()"""
tests = [
# first_row, first_col, last_row, last_col, Range
(0, 0, 9, 0, 'A1:A10'),
(1, 2, 8, 2, 'C2:C9'),
(0, 0, 3, 4, 'A1:E4'),
(0, 0, 0, 0, 'A1:A1'),
(0, 0, 0, 1, 'A1:B1'),
(0, 2, 0, 9, 'C1:J1'),
(1, 0, 2, 0, 'A2:A3'),
(9, 0, 1, 24, 'A10:Y2'),
(7, 25, 9, 26, 'Z8:AA10'),
(1, 254, 1, 255, 'IU2:IV2'),
(1, 256, 0, 16383, 'IW2:XFD1'),
(0, 0, 1048576, 16384, 'A1:XFE1048577'),
]
for first_row, first_col, last_row, last_col, cell_range in tests:
exp = cell_range
got = xl_range(first_row, first_col, last_row, last_col)
self.assertEqual(got, exp)
def test_xl_range_abs(self):
"""Test xl_range_abs()"""
tests = [
# first_row, first_col, last_row, last_col, Range
(0, 0, 9, 0, '$A$1:$A$10'),
(1, 2, 8, 2, '$C$2:$C$9'),
(0, 0, 3, 4, '$A$1:$E$4'),
(0, 0, 0, 0, '$A$1:$A$1'),
(0, 0, 0, 1, '$A$1:$B$1'),
(0, 2, 0, 9, '$C$1:$J$1'),
(1, 0, 2, 0, '$A$2:$A$3'),
(9, 0, 1, 24, '$A$10:$Y$2'),
(7, 25, 9, 26, '$Z$8:$AA$10'),
(1, 254, 1, 255, '$IU$2:$IV$2'),
(1, 256, 0, 16383, '$IW$2:$XFD$1'),
(0, 0, 1048576, 16384, '$A$1:$XFE$1048577'),
]
for first_row, first_col, last_row, last_col, cell_range in tests:
exp = cell_range
got = xl_range_abs(first_row, first_col, last_row, last_col)
self.assertEqual(got, exp)
if __name__ == '__main__':
unittest.main()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
5813565bd0e51f5432aad74d968342ced3098a3f | 26e4bea46942b9afa5a00b9cde9a84f2cc58e3c9 | /wikidocs.net/dscriptor.py | ab2dac7cd991edf939b8978e72461e8927e8420f | [] | no_license | MeetLuck/works | 46da692138cb9741a913d84eff6822f107510dc7 | ab61175bb7e2ed5c5113bf150e0541ae18eb04c4 | refs/heads/master | 2020-04-12T05:40:25.143075 | 2017-08-21T17:01:06 | 2017-08-21T17:01:06 | 62,373,576 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 852 | py | class Meter(object):
'''Descriptor for a meter.'''
def __init__(self, value=0.0):
self.value = float(value)
def __get__(self, instance, owner):
return self.value
def __set__(self, instance, value):
self.value = float(value)
class Foot(object):
'''Descriptor for a foot.'''
def __get__(self, instance, owner):
print instance
return instance.meter * 3.2808
def __set__(self, instance, value):
print instance
instance.meter = float(value) / 3.2808
class Distance(object):
'''Class to represent distance holding two descriptors for feet and
meters.'''
meter = Meter()
foot = Foot()
if __name__ == '__main__':
d = Distance()
'd is instance, owner is Distance'
d.meter = 1
print d.meter, d.foot
d.foot = 1
print d.meter, d.foot
| [
"withpig1994@hanmail.net"
] | withpig1994@hanmail.net |
50fadb9983f241b8f7abe275c8b3d961f71ec44e | d23a1e18aaa379e636bfef6df5544330ae246fdd | /tconfig_nmm.py | bbea716ace3a7dd325b6df60122a3f2229e04800 | [
"MIT"
] | permissive | richteague/csalt | 2d8999e630a54c3723ac16b0148a7d96b6e6404a | c44fa7a691cc8057abe9ab7a458fe69334cb5393 | refs/heads/main | 2023-07-01T14:44:17.805761 | 2021-08-11T20:17:22 | 2021-08-11T20:17:22 | 395,091,765 | 0 | 0 | MIT | 2021-08-11T19:07:23 | 2021-08-11T19:07:22 | null | UTF-8 | Python | false | false | 1,102 | py | """
This is the main control file, used to generate synthetic data or to model
real (or synthetic) datasets.
"""
import numpy as np
# naming
template_dir = 'obs_templates/'
simobs_dir = '/pool/asha0/casa-release-5.7.2-4.el7/data/alma/simmos/'
# spectral settings
dfreq0 = 122.0703125 * 1e3 # native channel spacing (Hz)
restfreq = 230.538e9 # rest frequency (Hz)
vtune = 4.0e3 # LSRK velocity tuning for central channel (m/s)
vspan = 15.0e3 # +/- velocity width around vtune for simulation (m/s)
# spatial settings
RA = '16:00:00.00' # phase center RA
DEC = '-40:00:00.00' # phase center DEC
HA = '0.0h' # hour angle at start of EB
date = '2022/05/20' # UTC date for start of EB
RA_pieces = [np.float(RA.split(':')[i]) for i in np.arange(3)]
RAdeg = 15 * np.sum(np.array(RA_pieces) / [1., 60., 3600.])
DEC_pieces = [np.float(DEC.split(':')[i]) for i in np.arange(3)]
DECdeg = np.sum(np.array(DEC_pieces) / [1., 60., 3600.])
# observation settings
integ = '6s' # integration time
ttotal = '20min' # total on-source EB time
config = '6' # antenna configuration index
| [
"sandrews@cfa.harvard.edu"
] | sandrews@cfa.harvard.edu |
ef290bb1ea9c1591c883c5c84e577ab5318b4357 | 82aace1431e0af949b1294d979a16f8dc18f48c2 | /Python-Web-Framework-Softuni/templates_advanced/templates_advanced/settings.py | f303fd8078b08cb6e6f835ebc1dcb329ba65decc | [
"MIT"
] | permissive | borisboychev/SoftUni | 6778450417f889f8e89c709897b9e26c7129dbf6 | 22062312f08e29a1d85377a6d41ef74966d37e99 | refs/heads/master | 2023-03-27T13:11:17.378197 | 2021-03-26T09:14:25 | 2021-03-26T09:14:25 | 295,463,442 | 1 | 0 | null | 2020-10-12T12:54:13 | 2020-09-14T15:46:12 | Python | UTF-8 | Python | false | false | 3,406 | py | """
Django settings for templates_advanced project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from os.path import join
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 't+csd-v4wvmxou7t^&2kb3t_3*k+u!53ele6wb_bt#z$f3xnb+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'pythons_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pythons_auth'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'templates_advanced.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'templates_advanced.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'python_wiki_db',
'USER': 'postgres',
'PASSWORD': 'postgres951753'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
join(STATIC_URL, 'static'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = join(BASE_DIR, 'media')
| [
"borisboychev007@Gmail.com"
] | borisboychev007@Gmail.com |
5db57f8a71168cc469ecc6b15fef5c0140e1a12c | dc6115d100c1f784f6a352580b2b28c46dedb66e | /models/psmc/make_psmc_from_twodof.py | 4632dd0ba30ba6bb2f9d5434ae2b719144e5520d | [
"BSD-3-Clause"
] | permissive | sot/xija | f0efbfec774fb661fb62956148e49003725e720d | 5430f1c4f182dda9f715b4c514f67d9f0501db90 | refs/heads/master | 2023-08-16T18:09:33.669660 | 2023-08-13T14:58:51 | 2023-08-13T14:58:51 | 1,733,443 | 7 | 3 | BSD-3-Clause | 2023-08-13T14:58:53 | 2011-05-11T14:10:52 | Python | UTF-8 | Python | false | false | 2,471 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import xija
import numpy as np
import asciitable
from Chandra.Time import DateTime
from Ska.Matplotlib import plot_cxctime
pars = dict(acis150 = 28.029,
acis50 = 54.192,
acis90 = 26.975,
c1 = 114.609,
c2 = 11.362,
hrci150 = 32.977,
hrci50 = 38.543,
hrci90 = 28.053,
hrcs150 = 37.265,
hrcs50 = 30.715,
hrcs90 = 30.013,
u01 = 6.036,
u01quad = -0.599,
u12 = 8.451,
)
u01 = pars['u01']
u12 = pars['u12']
c1 = pars['c1']
c2 = pars['c2']
P_pitches = [50, 90, 150]
P_vals = []
for instr in ('hrcs', 'hrci', 'acis'):
for pitch in P_pitches:
P_vals.append(pars['{0}{1}'.format(instr, pitch)])
P_vals = np.array(P_vals).reshape(3,3) * u01 / c1
P_vals = P_vals.tolist()
tau_e = c1 / u01
T_e = -128.0 * (1. / u01 + 1. / u12)
k = 1. / c2
tau12 = c1 / u12
tau21 = c2 / u12
mdl = xija.ThermalModel('psmc', start='2011:103:00:00:00.00', stop='2011:124:00:00:00')
pin1at = mdl.add(xija.Node, '1pin1at')
pdeaat = mdl.add(xija.Node, '1pdeaat')
pitch = mdl.add(xija.Pitch)
sim_z = mdl.add(xija.SimZ)
coup12 = mdl.add(xija.Coupling, pin1at, pdeaat, tau=tau12)
coup21 = mdl.add(xija.Coupling, pdeaat, pin1at, tau=tau21)
sol = mdl.add(xija.AcisPsmcSolarHeat, pin1at, pitch, sim_z, P_pitches=P_pitches, P_vals=P_vals)
heat = mdl.add(xija.HeatSink, pin1at, T=T_e, tau=tau_e)
# pow = mdl.add(xija.AcisPsmcPower, pdeaat, k=k)
fep_count = mdl.add(xija.CmdStatesData,
u'fep_count')
ccd_count = mdl.add(xija.CmdStatesData,
u'ccd_count')
vid_board = mdl.add(xija.CmdStatesData,
u'vid_board')
clocking = mdl.add(xija.CmdStatesData,
u'clocking')
pow = mdl.add(xija.AcisDpaStatePower, pdeaat, fep_count=fep_count,
ccd_count=ccd_count, vid_board=vid_board, clocking=clocking)
mdl.make()
mdl.calc()
mdl.write('psmc_classic.json')
psmc = asciitable.read('models_dev/psmc/out_2010103_2010124/temperatures.dat')
figure(1)
clf()
plot_cxctime(pdeaat.times, pdeaat.mvals, 'b')
plot_cxctime(pdeaat.times, pdeaat.dvals, 'r')
plot_cxctime(psmc['time'], psmc['1pdeaat'], 'g')
figure(2)
plot_cxctime(pin1at.times, pin1at.mvals, 'b')
plot_cxctime(pin1at.times, pin1at.dvals, 'r')
plot_cxctime(psmc['time'], psmc['1pin1at'], 'g')
| [
"taldcroft@gmail.com"
] | taldcroft@gmail.com |
539d2377aa8278873892049835a4ee0836416dbf | 593caafc362112c4769fb40be8b6dffc3685a53c | /noladata/neighborhoodgroups/load.py | 709e7f090b411ca4f74c3a9baa0a9b7a7fe6fd3d | [] | no_license | 596acres/noladata | 095b69e9124258c44835c439257fd46593883b28 | 5ba607eb85b63db3b7eb1054601feeb2b0a4cd8c | refs/heads/master | 2021-01-22T14:01:55.099289 | 2015-09-05T20:43:41 | 2015-09-05T20:43:41 | 13,478,710 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,577 | py | import os
import re
from fastkml import kml
import lxml
from django.contrib.gis import geos
from ..load import get_processed_data_file
from .models import NeighborhoodGroup
def _clean_description(description):
"""Try to wipe HTML from the given description."""
description = description.replace(' ', ' ')
description = lxml.html.fromstring(description).text_content()
(description, n) = re.subn('\s+', ' ', description)
return description
def _get_full_name(description):
"""
Attempt to parse the full name of the neighborhood group from its
description.
"""
description = re.sub('Extent of( the)? ', '', description)
description = re.sub('\.? ?(Contact|Email|See|Visit).*', '', description)
description = description.strip()
description = re.sub('\.$', '', description)
return description
def _get_url(description):
"""Try to get a linked-to URL from the given description."""
match = re.match('.*href="([^"]+)".*', description)
if match:
return match.group(1)
return None
def _get_features(filename):
k = kml.KML()
k.from_string(open(filename, 'r').read())
return list(k.features())[0].features()
def _get_filename():
return get_processed_data_file(os.path.join('neighborhoodgroups',
'NOLANeighborhoodGroups.kml'))
def _get_geometry(group):
try:
wkt, n = re.subn(' 0.0', '', group.geometry.wkt)
return geos.MultiPolygon(geos.fromstr(wkt))
except Exception:
return None
def from_kml(strict=False, progress=True, verbose=False, **kwargs):
"""
Load neighborhood group data into the database from the KML shapefile.
"""
for group in _get_features(_get_filename()):
# Skip if we have a group with this name already
if NeighborhoodGroup.objects.filter(name=group.name).exists():
continue
# Try to get geometry and move on if we can't
geometry = _get_geometry(group)
if not geometry:
print 'Did not get geometry from %s' % group.name
continue
else:
print geometry
description = _clean_description(group.description)
saved_group = NeighborhoodGroup(
description=description,
full_name=_get_full_name(description),
geometry=geometry,
label=group.name,
name=group.name,
url=_get_url(group.description),
)
saved_group.save()
def load(**kwargs):
from_kml(**kwargs)
| [
"ebrelsford@gmail.com"
] | ebrelsford@gmail.com |
4287f92f32335249b79d13573e221e1d3464cbd7 | 57fa7d6820ca63d14f6568adb9185b8a8ea47589 | /reviseClassView/reviseApp/migrations/0002_auto_20200417_1627.py | 039da033464041a081c9407a1d5c3716a5b45488 | [] | no_license | kunjabijukchhe/Web-Development | 0dd0a5f415adb863f96c1552d90b6b7a282b6945 | e9bd5c5cc4b0f12f2f1714986c612494be9ab8ea | refs/heads/master | 2020-09-23T04:43:39.118376 | 2020-07-21T08:21:30 | 2020-07-21T08:21:30 | 225,405,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | # Generated by Django 3.0.5 on 2020-04-17 16:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('reviseApp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='student',
name='school',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='students', to='reviseApp.School'),
),
]
| [
"bijukchhekunja@gamil.com"
] | bijukchhekunja@gamil.com |
fa84fa2c26d7ffbe7fe287437c29f822bb29f124 | 3db261fac5a9e46eeda05240b8341048f0d96e11 | /scripts/haven | 0eaa7937014197722d2dfa5c4261383fb30349c6 | [
"Apache-2.0"
] | permissive | logichen/haven-ai | 684fac2415ba894ce20abbc906a940fefe68f868 | a12142c6711b818871fa7b4845c28013d6973718 | refs/heads/master | 2023-03-03T12:08:42.507761 | 2021-02-17T00:39:12 | 2021-02-17T00:39:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,645 | #!/usr/bin/env python
import haven_cli, sys, argparse, os
from haven import haven_utils as hu
from haven_jobs import toolkit_manager as ho
def form(string):
try:
if '.' in string:
return float(string)
else:
return int(string)
except ValueError:
return string
# print(sys.argv)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--fname')
parser.add_argument('-m', '--method', required=True)
parser.add_argument('-ei', '--exp_id', required=True)
parser.add_argument('-sb', '--savedir_base', required=True)
parser.add_argument('-j', '--run_jobs', default=0, type=int)
parser.add_argument('-v', '--visualize', default=0, type=int)
args, rest = parser.parse_known_args()
print(args)
print(rest)
# assert()
assert os.path.dirname(args.fname) == ''
from importlib import import_module
fname = args.fname
if not os.path.exists(fname):
raise ValueError('%s not found...' % fname)
sys.path.append(os.path.dirname(fname))
name = os.path.split(fname)[-1].replace('.py','')
module = import_module(name)
exp_group = {}
for i in range(0, len(rest), 2):
h_key, h_val = rest[i], rest[i+1]
exp_group[h_key.replace('--','')] = [form(v) for v in h_val.split(',')]
exp_list = hu.cartesian_exp_group(exp_group)
print(exp_group)
if args.exp_id is not None:
# select one experiment
savedir = os.path.join(args.savedir_base, args.exp_id)
exp_dict = hu.load_json(os.path.join(savedir, "exp_dict.json"))
exp_list = [exp_dict]
if args.visualize:
pass
else:
if args.run_jobs:
api = ho.get_api(token=None)
command_list = []
for exp_dict in exp_list:
exp_id, savedir = hu.create_experiment(exp_dict,
args.savedir_base,
reset=0, verbose=True, copy_code=1)
if hj.skip_job(reset=True):
continue
command_list += ['python %s']
ho.submit_job_v2(api=api,
account_id='',
command=command,
job_config={},
savedir=savedir)
else:
for exp_dict in exp_list:
hu.create_experiment(exp_dict, args.savedir_base, reset=True,
verbose=True, copy_code=1)
getattr(module, args.method)(exp_dict, args.savedir_base)
| [
"issam.laradji@gmail.com"
] | issam.laradji@gmail.com | |
b3257a02fdedde18d2ee57e46c39cb25c3a94ca4 | 48aa9436bdd738cbbd09cfa381b6a5f60d18525d | /bricks/api/controllers/v1/collection.py | 9df7cc8d3f68106278f3d18874acaedc040f0e01 | [
"Apache-2.0"
] | permissive | CloudBrewery/bricks-service | b41b302d88b7dead01440e39c6e6765a169c1140 | e905c324b062bfd8a8b3917c788849ea0d497e54 | refs/heads/master | 2020-05-17T20:18:40.431719 | 2014-11-06T20:59:50 | 2014-11-06T20:59:50 | 33,747,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,101 | py | import pecan
from wsme import types as wtypes
from bricks.api.controllers.v1 import base
from bricks.api.controllers.v1 import link
class Collection(base.APIBase):
next = wtypes.text
"A link to retrieve the next subset of the collection"
@property
def collection(self):
return getattr(self, self._type)
def has_next(self, limit):
"""Return whether collection has more items."""
return len(self.collection) and len(self.collection) == limit
def get_next(self, limit, url=None, **kwargs):
"""Return a link to the next subset of the collection."""
if not self.has_next(limit):
return wtypes.Unset
resource_url = url or self._type
q_args = ''.join(['%s=%s&' % (key, kwargs[key]) for key in kwargs])
next_args = '?%(args)slimit=%(limit)d&marker=%(marker)s' % {
'args': q_args,
'limit': limit,
'marker': self.collection[-1].uuid}
return link.Link.make_link('next', pecan.request.host_url,
resource_url, next_args).href
| [
"thurloat@gmail.com"
] | thurloat@gmail.com |
311dbec004401cec15cad17549d8de3f2ad7eb49 | d992fcabff6508e4f53fb3f688d0ea67fd2047a5 | /similarity/similarity.py | e1e809c042df02db0d50b63240a63b8c880f9c0a | [] | no_license | turgunyusuf/car-datascience-toolkit | 5924fe9e9d930e864ce72a7f3b3090e042efa7ea | 0e62c55ddf24affc7a7ba406081bb78bac0a9da0 | refs/heads/master | 2021-01-23T04:53:27.051286 | 2012-06-05T00:05:08 | 2012-06-05T00:05:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,460 | py | """
similarity.py
Below are implementations of five common similarity metrics used in data and text
mining applications. Each of the metrics takes two vectors as input, represented
by Python lists.
Most of the similarity metrics in this package are most easily conceptualized in
geometric spaces. As such, each vector in this context typically represents a point
in n-dimensional space. For example:
>> print euclidean([1,2], [2,1])
More information on each of these metrics can be found here:
Euclidean distance: http://en.wikipedia.org/wiki/Euclidean_distance
Jaccard similarity: http://en.wikipedia.org/wiki/Jaccard_index
Hamming distance: http://en.wikipedia.org/wiki/Hamming_distance
Pearson correlation: http://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient
Cosine distance: http://en.wikipedia.org/wiki/Cosine_similarity
"""
import math
import operator
from itertools import imap
def euclidean(v1, v2):
"""
Think of Euclidean distance as "as the crow flies" distance between two
n-dimensional vectors. It is often used in things like clustering algorithms
to determine whether points are close enough together to fit into a cluster,
but it can also be used to determine similarity for tasks like recommendation.
One warning: The more dimensions your vector has, the less useful Euclidean
distance is. This is known as the "Curse of Dimensionality." More on that here:
http://en.wikipedia.org/wiki/Curse_of_dimensionality
"""
try: # Test to ensure vectors are the same length
assert len(v1) == len(v2)
except AssertionError, e:
raise(AssertionError("Vectors must be same length!"))
sum = 0 # Compute sum of squares
for i in range(len(v1)):
sum += (v1[i] - v2[i]) ** 2
return 1 / (1 + (math.sqrt(sum))) # Normalize as a score between 0 and 1
def jaccard(v1, v2):
"""
Jaccard takes two vectors and computes a score based on the number of items
that overlap. Specifically, it is defined as the number of items contained
in both sets (the intersection) divided by the total number of items in both
sets combined (the union).
This metric can be useful for calculating things like string similarity. A
variation on this metric, described on its Wikipedia page, is especially helpful
for measuring binary "market basket" similarity.
"""
intersection = list(set(v1) & set(v2))
union = list(set(v1) | set(v2))
# Subtracting from 1.0 converts the measure into a distance
return 1.0 - float(len(intersection)) / float(len(union))
def hamming(v1, v2):
"""
Hamming distance is a measure of similarity that takes into account the order
of items in a sequence. Looked at another way, it represents the number of
changes that would need to be made for two sequences (or strings) to be made
identical.
It can be useful for applications such as comparing categorical data over a time
series. It is especially useful for binary data and can be used for tasks like
anomaly detection.
"""
try: # Test to ensure vectors are the same length
assert len(v1) == len(v2)
except AssertionError, e:
raise(AssertionError("Vectors must be same length!"))
# Iterate over each vector and test every item against each other in sequence
return sum(i != j for i, j in zip(v1, v2))
def pearson(v1, v2):
"""
Pearson distance measures the degree to which two vectors are linearly related.
Journalists might also know it as simple correlation. We use it to determine
whether, for example, low test scores are related to income; or large sums
of campaign contribution are related to years of incumbency.
In data mining, Pearson can be useful in determining similarity for the
purposes of recommendation. It can also be more useful than Euclidean distance
in cases where data is not well normalized. The value will always be between
-1 and 1, with 0 indicating no correlation, -1 indicating a perfect negative
correlation and 1 indicating a perfect positive correlation.
This particularly concise implementation comes from Cloudera chief scientist Jeff
Hammerbacher: http://bit.ly/wNIgqu
"""
try: # Test to ensure vectors are the same length
assert len(v1) == len(v2)
except AssertionError, e:
raise(AssertionError( "Vectors must be same length!"))
n = len(v1) # Length of both vectors (because they have to be the same)
sum_x = float(sum(v1)) # Sum of all items in vector v1
sum_y = float(sum(v2)) # Sum of all items in vector v2
sum_x_sq = sum(map(lambda x: pow(x, 2), v1)) # Sum of squares in v1
sum_y_sq = sum(map(lambda x: pow(x, 2), v2)) # Sum of squares in v2
psum = sum(imap(lambda x, y: x * y, v1, v2))
num = psum - (sum_x * sum_y/n)
den = pow((sum_x_sq - pow(sum_x, 2) / n) * (sum_y_sq - pow(sum_y, 2) / n), 0.5)
if den == 0: return 0
return num / den
def cosine(v1, v2):
"""
Cosine distance measures the similarity of two vectors by taking the cosine
of the angle between them. It returns a value between -1 and 1, with -1 meaning
the two vectors are exactly the opposite and 1 meaning they are exactly the same.
Cosine distance is commonly used in text mining to compare document similiarities,
typically by being applied to TF-IDF vector outputs. Like the other metrics
in this library, it has many other applications as well.
Note that cosine distance doesn't take magnitude into account, meaning it doesn't
pay attention to the number of times a given word is listed in a document.
"""
try: # Test to ensure vectors are the same length
assert len(v1) == len(v2)
except AssertionError, e:
raise(AssertionError("Vectors must be same length!"))
n = len(v1) # Length of both vectors (because they have to be the same)
# Calculate dot product of the two input vectors
dot = sum([v1[i] * v2[i] for i in range(n)])
# Normalize the two input vectors so a cosine can be calculated between them
norm1 = math.sqrt(sum([v1[i] * v1[i] for i in range(n)]))
norm2 = math.sqrt(sum([v2[i] * v2[i] for i in range(n)]))
return dot / (norm1 * norm2) # Return the cosine of the angle
if __name__ == '__main__':
print jaccard([4,12,31,6], [4,5,9,4])
#print tanimoto([1,0,1,1,0], [1,1,0,0,1]) | [
"chase.davis@gmail.com"
] | chase.davis@gmail.com |
34a73aed387f0db4ed22acfd612bae35e1b641b8 | 6d79783fa490d209e19639276051fc9e7e470fba | /integration-test/1716-reduce-surface-tag-precision.py | 8bb7a4dc0d16043b38e6f80507865999e25ce2c7 | [] | permissive | va2ron1/vector-datasource | d9007c2558d6866f75679d1a711a62044fdc81ab | 0145844e27d1e3cb3256b449040511b6791febd7 | refs/heads/master | 2021-05-10T15:30:59.456868 | 2019-06-18T00:36:58 | 2019-06-18T00:36:58 | 118,549,672 | 0 | 0 | MIT | 2019-06-18T00:36:59 | 2018-01-23T03:03:00 | Python | UTF-8 | Python | false | false | 2,369 | py | # -*- encoding: utf-8 -*-
from . import FixtureTest
class ResidentialTest(FixtureTest):
def test_z16(self):
self._check(zoom=16, expect_surface='fine_gravel')
def test_z15(self):
self._check(zoom=15, expect_surface='fine_gravel')
def test_z14(self):
self._check(zoom=14, expect_surface='unpaved')
def test_z13(self):
self._check(zoom=13, expect_surface='unpaved')
def test_z12(self):
self._check(zoom=12, expect_surface='unpaved')
def setUp(self):
FixtureTest.setUp(self)
import dsl
z, x, y = (16, 0, 0)
full_tags = {
'source': 'openstreetmap.org',
'highway': 'residential',
'surface': 'fine_gravel',
}
self.generate_fixtures(
dsl.way(1, dsl.tile_diagonal(z, x, y), full_tags),
)
def _check(self, zoom=16, expect_surface=None):
self.assert_has_feature(
zoom, 0, 0, 'roads', {
'id': 1,
'surface': expect_surface,
})
class HighwayTest(FixtureTest):
def test_z16(self):
self._check(zoom=16, expect_surface='asphalt')
def test_z15(self):
self._check(zoom=15, expect_surface='asphalt')
def test_z14(self):
self._check(zoom=14, expect_surface='asphalt')
def test_z13(self):
self._check(zoom=13, expect_surface='asphalt')
def test_z12(self):
self._check(zoom=12, expect_surface='asphalt')
def test_z11(self):
self._check(zoom=11, expect_surface='asphalt')
def test_z10(self):
self._check(zoom=10, expect_surface='paved')
def test_z09(self):
self._check(zoom=9, expect_surface='paved')
def test_z08(self):
self._check(zoom=8, expect_surface='paved')
def setUp(self):
FixtureTest.setUp(self)
import dsl
z, x, y = (16, 0, 0)
full_tags = {
'source': 'openstreetmap.org',
'highway': 'motorway',
'surface': 'asphalt',
}
self.generate_fixtures(
dsl.way(1, dsl.tile_diagonal(z, x, y), full_tags),
)
def _check(self, zoom=16, expect_surface=None):
self.assert_has_feature(
zoom, 0, 0, 'roads', {
'id': 1,
'surface': expect_surface,
})
| [
"zerebubuth@gmail.com"
] | zerebubuth@gmail.com |
f01d5f25df457c208e4a9f78f2e18acd215147e4 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/95/usersdata/247/55410/submittedfiles/desvpad.py | 63111cc38a63a095f561ae87d03e3f43789c25ed | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | # -*- coding: utf-8 -*-
import math
n=int(input('digite: '))
a=[ ]
for i in range(1,n+1,1):
valor=float(input(digite: '))
a.append(valor)
def media(a):
soma=0
for i in range(0,n,1):
soma=soma+a[i]
media=soma/n
return media
def desvio(a):
soma=0
for i in range(0,n,1):
soma=soma+((a[i]-media(a)**2)
desvio=((soma/n-1)**0.5)
return desvio
print('%.2f'%a[0])
print('%.2f'%a[n-1])
print('%.2f'%media(a))
print('%.2f'%desvio(a))
print(a)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
20687736946c26be718d24ed3116934822fa70fe | b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1 | /tensorflow/python/eager/memory_tests/memory_test_util.py | cb832711a91cac223f68426854d04e9f0512ffcf | [
"Apache-2.0"
] | permissive | uve/tensorflow | e48cb29f39ed24ee27e81afd1687960682e1fbef | e08079463bf43e5963acc41da1f57e95603f8080 | refs/heads/master | 2020-11-29T11:30:40.391232 | 2020-01-11T13:43:10 | 2020-01-11T13:43:10 | 230,088,347 | 0 | 0 | Apache-2.0 | 2019-12-25T10:49:15 | 2019-12-25T10:49:14 | null | UTF-8 | Python | false | false | 2,061 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for memory tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import six
from tensorflow.python.eager import context
# memory_profiler might not be available in the OSS version of TensorFlow.
try:
import memory_profiler # pylint:disable=g-import-not-at-top
except ImportError:
memory_profiler = None
def assert_no_leak(f, num_iters=100000, increase_threshold_absolute_mb=10):
"""Assert memory usage doesn't increase beyond given threshold for f."""
with context.eager_mode():
# Warm up.
f()
# Wait for background threads to start up and take over memory.
# FIXME: The nature of this test leaves few other options. Maybe there
# is a better way to do this.
time.sleep(4)
initial = memory_profiler.memory_usage(-1)[0]
for _ in six.moves.range(num_iters):
f()
increase = memory_profiler.memory_usage(-1)[0] - initial
assert increase < increase_threshold_absolute_mb, (
"Increase is too high. Initial memory usage: %f MB. Increase: %f MB. "
"Maximum allowed increase: %f") % (initial, increase,
increase_threshold_absolute_mb)
def memory_profiler_is_available():
return memory_profiler is not None
| [
"v-grniki@microsoft.com"
] | v-grniki@microsoft.com |
3a66fa87dd85dd96fde2365bf84703081018a24e | c8b39acfd4a857dc15ed3375e0d93e75fa3f1f64 | /Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/idlelib/ReplaceDialog.py | 54c270df23c5fbeac5a15d60957aa9887164f042 | [
"MIT",
"LicenseRef-scancode-proprietary-license",
"NCSA",
"BSD-3-Clause",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"Python-2.0",
"TCL",
"OpenSSL",
"LicenseRef-scancode-python-cwi",
"GPL-1.0-or-later",
"Lice... | permissive | windystrife/UnrealEngine_NVIDIAGameWorks | c3c7863083653caf1bc67d3ef104fb4b9f302e2a | b50e6338a7c5b26374d66306ebc7807541ff815e | refs/heads/4.18-GameWorks | 2023-03-11T02:50:08.471040 | 2022-01-13T20:50:29 | 2022-01-13T20:50:29 | 124,100,479 | 262 | 179 | MIT | 2022-12-16T05:36:38 | 2018-03-06T15:44:09 | C++ | UTF-8 | Python | false | false | 5,828 | py | from Tkinter import *
from idlelib import SearchEngine
from idlelib.SearchDialogBase import SearchDialogBase
import re
def replace(text):
root = text._root()
engine = SearchEngine.get(root)
if not hasattr(engine, "_replacedialog"):
engine._replacedialog = ReplaceDialog(root, engine)
dialog = engine._replacedialog
dialog.open(text)
class ReplaceDialog(SearchDialogBase):
title = "Replace Dialog"
icon = "Replace"
def __init__(self, root, engine):
SearchDialogBase.__init__(self, root, engine)
self.replvar = StringVar(root)
def open(self, text):
SearchDialogBase.open(self, text)
try:
first = text.index("sel.first")
except TclError:
first = None
try:
last = text.index("sel.last")
except TclError:
last = None
first = first or text.index("insert")
last = last or first
self.show_hit(first, last)
self.ok = 1
def create_entries(self):
SearchDialogBase.create_entries(self)
self.replent = self.make_entry("Replace with:", self.replvar)
def create_command_buttons(self):
SearchDialogBase.create_command_buttons(self)
self.make_button("Find", self.find_it)
self.make_button("Replace", self.replace_it)
self.make_button("Replace+Find", self.default_command, 1)
self.make_button("Replace All", self.replace_all)
def find_it(self, event=None):
self.do_find(0)
def replace_it(self, event=None):
if self.do_find(self.ok):
self.do_replace()
def default_command(self, event=None):
if self.do_find(self.ok):
if self.do_replace(): # Only find next match if replace succeeded.
# A bad re can cause a it to fail.
self.do_find(0)
def _replace_expand(self, m, repl):
""" Helper function for expanding a regular expression
in the replace field, if needed. """
if self.engine.isre():
try:
new = m.expand(repl)
except re.error:
self.engine.report_error(repl, 'Invalid Replace Expression')
new = None
else:
new = repl
return new
def replace_all(self, event=None):
prog = self.engine.getprog()
if not prog:
return
repl = self.replvar.get()
text = self.text
res = self.engine.search_text(text, prog)
if not res:
text.bell()
return
text.tag_remove("sel", "1.0", "end")
text.tag_remove("hit", "1.0", "end")
line = res[0]
col = res[1].start()
if self.engine.iswrap():
line = 1
col = 0
ok = 1
first = last = None
# XXX ought to replace circular instead of top-to-bottom when wrapping
text.undo_block_start()
while 1:
res = self.engine.search_forward(text, prog, line, col, 0, ok)
if not res:
break
line, m = res
chars = text.get("%d.0" % line, "%d.0" % (line+1))
orig = m.group()
new = self._replace_expand(m, repl)
if new is None:
break
i, j = m.span()
first = "%d.%d" % (line, i)
last = "%d.%d" % (line, j)
if new == orig:
text.mark_set("insert", last)
else:
text.mark_set("insert", first)
if first != last:
text.delete(first, last)
if new:
text.insert(first, new)
col = i + len(new)
ok = 0
text.undo_block_stop()
if first and last:
self.show_hit(first, last)
self.close()
def do_find(self, ok=0):
if not self.engine.getprog():
return False
text = self.text
res = self.engine.search_text(text, None, ok)
if not res:
text.bell()
return False
line, m = res
i, j = m.span()
first = "%d.%d" % (line, i)
last = "%d.%d" % (line, j)
self.show_hit(first, last)
self.ok = 1
return True
def do_replace(self):
prog = self.engine.getprog()
if not prog:
return False
text = self.text
try:
first = pos = text.index("sel.first")
last = text.index("sel.last")
except TclError:
pos = None
if not pos:
first = last = pos = text.index("insert")
line, col = SearchEngine.get_line_col(pos)
chars = text.get("%d.0" % line, "%d.0" % (line+1))
m = prog.match(chars, col)
if not prog:
return False
new = self._replace_expand(m, self.replvar.get())
if new is None:
return False
text.mark_set("insert", first)
text.undo_block_start()
if m.group():
text.delete(first, last)
if new:
text.insert(first, new)
text.undo_block_stop()
self.show_hit(first, text.index("insert"))
self.ok = 0
return True
def show_hit(self, first, last):
text = self.text
text.mark_set("insert", first)
text.tag_remove("sel", "1.0", "end")
text.tag_add("sel", first, last)
text.tag_remove("hit", "1.0", "end")
if first == last:
text.tag_add("hit", first)
else:
text.tag_add("hit", first, last)
text.see("insert")
text.update_idletasks()
def close(self, event=None):
SearchDialogBase.close(self, event)
self.text.tag_remove("hit", "1.0", "end")
| [
"tungnt.rec@gmail.com"
] | tungnt.rec@gmail.com |
26f368da117167af0a9e48e402d0d679c4e07cad | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_lamas.py | f27558af565dc9b3033bcd91b54bd03bbf64294f | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py |
#calss header
class _LAMAS():
def __init__(self,):
self.name = "LAMAS"
self.definitions = lama
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['lama']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
84465fe9bd15ea6d49c60d6d6db45dfb641ce5e0 | 04ae1836b9bc9d73d244f91b8f7fbf1bbc58ff29 | /653/Solution.py | ad29035086ba2a7376d88da8ca49f33fac1dcbfe | [] | no_license | zhangruochi/leetcode | 6f739fde222c298bae1c68236d980bd29c33b1c6 | cefa2f08667de4d2973274de3ff29a31a7d25eda | refs/heads/master | 2022-07-16T23:40:20.458105 | 2022-06-02T18:25:35 | 2022-06-02T18:25:35 | 78,989,941 | 14 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,611 | py | """
Given a Binary Search Tree and a target number, return true if there exist two elements in the BST such that their sum is equal to the given target.
Example 1:
Input:
5
/ \
3 6
/ \ \
2 4 7
Target = 9
Output: True
Example 2:
Input:
5
/ \
3 6
/ \ \
2 4 7
Target = 28
Output: False
"""
class Solution:
def inorder(self,root,nums):
if not root:
return
self.inorder(root.left,nums)
nums.append(root.val)
self.inorder(root.right,nums)
def findTarget(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: bool
"""
nums = []
self.inorder(root,nums)
low,high = 0,len(nums)-1
while low < high:
tmp = nums[low] + nums[high]
if tmp < k:
low += 1
elif tmp > k:
high -= 1
else:
return True
return False
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def findTarget(self, root: TreeNode, k: int) -> bool:
hash_set = set()
res = None
def in_order(root):
nonlocal res
if root:
in_order(root.left)
if (k - root.val) in hash_set:
res = True
hash_set.add(root.val)
in_order(root.right)
in_order(root)
return res
| [
"zrc720@gmail.com"
] | zrc720@gmail.com |
3cab9af1c0e2c5f36c1fea5f7f757921493c2980 | 959a7b17884aa9af1d38d9c6b0afe2045a9be5d2 | /Ransom Note.py | d4edb311d8c0e26b54aae2eba9195da4712b596c | [] | no_license | dumavit/leetcode | a1c998f4c56411b061995d939093b03f7aae366b | 866689f564125ca4152dc1b6b3d268991d7ec89a | refs/heads/master | 2022-02-07T20:44:17.388121 | 2022-01-31T21:02:28 | 2022-01-31T21:02:28 | 252,981,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | class Solution:
def canConstruct(self, ransomNote: str, magazine: str) -> bool:
store = {}
for char in magazine:
store[char] = store.get(char, 0) + 1
for char in ransomNote:
if store.get(char, 0) > 0:
store[char] -= 1
else:
return False
return True
| [
"vitalii.duma@corva.ai"
] | vitalii.duma@corva.ai |
08702a21dc5a84441037de8ff3cab67f6650f2e0 | c4c81058dd9fa111f706a5db7ee80064873271ba | /HLTrigger/special/hltPixelActivityHFSumEnergyFilter_cfi.py | f69d1b345cc86cb56cdc7ff23cc07a1165849e0f | [] | no_license | fwyzard/cmssw-cfipython | e142c3a3e707c599dae491333ec48522de3f2f34 | cae55b22a46433b55ea6ff5b36aecc043792d16c | refs/heads/master | 2021-07-25T21:04:42.950199 | 2017-10-24T06:29:00 | 2017-10-24T06:29:00 | 109,701,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | import FWCore.ParameterSet.Config as cms
hltPixelActivityHFSumEnergyFilter = cms.EDFilter('HLTPixelActivityHFSumEnergyFilter',
inputTag = cms.InputTag('hltSiPixelClusters'),
HFHitCollection = cms.InputTag('hltHfreco'),
eCut_HF = cms.double(0),
eMin_HF = cms.double(10000),
offset = cms.double(-1000),
slope = cms.double(0.5)
)
| [
"cmsbuild@cern.ch"
] | cmsbuild@cern.ch |
8864d2e4c0c87bcd58c979ef808fef7caf77bb93 | a8c76e503df527445b14163f74faa8aca7e90ecd | /tests/integration/test_payout.py | ae0fccbf167afe5babb69638fb0682e7c75496f5 | [
"MIT"
] | permissive | baseup/xendit-python | c2a4f0766886f6124a86810e0831653e1ca1e1f4 | 8b677fbbad5fe3bbcd0a2b93e30e8040543b8f61 | refs/heads/master | 2023-03-09T02:56:53.513101 | 2021-03-04T08:06:44 | 2021-03-04T08:06:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,366 | py | import pytest
import time
from .base_integration_test import BaseIntegrationTest
from tests.sampleresponse.payout import payout_response
from tests.sampleresponse.payout import void_payout_response
class TestPayout(BaseIntegrationTest):
@pytest.fixture
def Payout(self, xendit_instance):
return xendit_instance.Payout
@pytest.fixture
def payout_data(self, Payout):
payout = Payout.create(
external_id=f"payout-{int(time.time())}",
amount=50000,
email="test@email.co",
)
return payout
def test_create_payout_return_correct_keys(self, payout_data):
payout = payout_data
self.assert_returned_object_has_same_key_as_sample_response(
payout, payout_response()
)
def test_get_payout_return_correct_keys(self, Payout, payout_data):
payout = payout_data
payout = Payout.get(id=payout.id)
self.assert_returned_object_has_same_key_as_sample_response(
payout, payout_response()
)
def test_void_payout_return_correct_keys(self, Payout, payout_data):
payout = payout_data
void_payout = Payout.void(id=payout.id)
self.assert_returned_object_has_same_key_as_sample_response(
void_payout, void_payout_response()
)
| [
"adyaksa.wisanggeni@gmail.com"
] | adyaksa.wisanggeni@gmail.com |
6cbf4bccbc63df688a85042dc3db41248b453259 | 90c6262664d013d47e9a3a9194aa7a366d1cabc4 | /tests/storage/cases/test_KT1KuHk7XKYJG2sVJGyKjPHK31rMS5ZBQJxX.py | da5742485aadf4888556c9366f0094e88875748c | [
"MIT"
] | permissive | tqtezos/pytezos | 3942fdab7aa7851e9ea81350fa360180229ec082 | a4ac0b022d35d4c9f3062609d8ce09d584b5faa8 | refs/heads/master | 2021-07-10T12:24:24.069256 | 2020-04-04T12:46:24 | 2020-04-04T12:46:24 | 227,664,211 | 1 | 0 | MIT | 2020-12-30T16:44:56 | 2019-12-12T17:47:53 | Python | UTF-8 | Python | false | false | 1,130 | py | from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1KuHk7XKYJG2sVJGyKjPHK31rMS5ZBQJxX(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/zeronet/KT1KuHk7XKYJG2sVJGyKjPHK31rMS5ZBQJxX.json')
def test_storage_encoding_KT1KuHk7XKYJG2sVJGyKjPHK31rMS5ZBQJxX(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1KuHk7XKYJG2sVJGyKjPHK31rMS5ZBQJxX(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1KuHk7XKYJG2sVJGyKjPHK31rMS5ZBQJxX(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
| [
"mz@baking-bad.org"
] | mz@baking-bad.org |
bca8bffd28c10e681ff68b383a3ec34b1b0b62ab | 8f0b0ec0a0a2db00e2134b62a1515f0777d69060 | /scripts/study_case/ID_5/matchzoo/metrics/normalized_discounted_cumulative_gain.py | 717656f01ad1b197a4867fb92421ea4a164b590e | [
"Apache-2.0"
] | permissive | Liang813/GRIST | 2add5b4620c3d4207e7661eba20a79cfcb0022b5 | 544e843c5430abdd58138cdf1c79dcf240168a5f | refs/heads/main | 2023-06-09T19:07:03.995094 | 2021-06-30T05:12:19 | 2021-06-30T05:12:19 | 429,016,034 | 0 | 0 | Apache-2.0 | 2021-11-17T11:19:48 | 2021-11-17T11:19:47 | null | UTF-8 | Python | false | false | 2,037 | py | """Normalized discounted cumulative gain metric for ranking."""
import numpy as np
from scripts.study_case.ID_5.matchzoo.engine.base_metric import (
BaseMetric, sort_and_couple, RankingMetric
)
from .discounted_cumulative_gain import DiscountedCumulativeGain
class NormalizedDiscountedCumulativeGain(RankingMetric):
"""Normalized discounted cumulative gain metric."""
ALIAS = ['normalized_discounted_cumulative_gain', 'ndcg']
def __init__(self, k: int = 1, threshold: float = 0.):
"""
:class:`NormalizedDiscountedCumulativeGain` constructor.
:param k: Number of results to consider
:param threshold: the label threshold of relevance degree.
"""
self._k = k
self._threshold = threshold
def __repr__(self) -> str:
""":return: Formated string representation of the metric."""
return f"{self.ALIAS[0]}@{self._k}({self._threshold})"
def __call__(self, y_true: np.array, y_pred: np.array) -> float:
"""
Calculate normalized discounted cumulative gain (ndcg).
Relevance is positive real values or binary values.
Example:
>>> y_true = [0, 1, 2, 0]
>>> y_pred = [0.4, 0.2, 0.5, 0.7]
>>> ndcg = NormalizedDiscountedCumulativeGain
>>> ndcg(k=1)(y_true, y_pred)
0.0
>>> round(ndcg(k=2)(y_true, y_pred), 2)
0.52
>>> round(ndcg(k=3)(y_true, y_pred), 2)
0.52
>>> type(ndcg()(y_true, y_pred))
<class 'float'>
:param y_true: The ground true label of each document.
:param y_pred: The predicted scores of each document.
:return: Normalized discounted cumulative gain.
"""
dcg_metric = DiscountedCumulativeGain(k=self._k,
threshold=self._threshold)
idcg_val = dcg_metric(y_true, y_true)
dcg_val = dcg_metric(y_true, y_pred)
return dcg_val / idcg_val if idcg_val != 0 else 0
| [
"793679547@qq.com"
] | 793679547@qq.com |
81f5445a17b6faf6a97f2acbc8f3b4b938fd885f | f836c4b262c533a56d90439f80a12a586367ddf4 | /kuaishou_v2/models/BasicModule.py | ca98633b7836c2f2162af6526f06ab7387e23c65 | [] | no_license | yuanyuanzijin/learn-pytorch | eba6a3d9f5347c80d9b6e934a272f4ea1537cd87 | 0038a420fa83c5a0f1e3145ce49ee98c70f71725 | refs/heads/master | 2020-03-14T17:12:55.233173 | 2018-06-28T11:01:16 | 2018-06-28T11:01:16 | 131,714,344 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | #coding:utf8
import torch as t
import time
class BasicModule(t.nn.Module):
"""
封装了nn.Module,主要是提供了save和load两个方法
"""
def __init__(self):
super(BasicModule, self).__init__()
self.model_name = "LSTM"
def load(self, path):
"""
可加载指定路径的模型
"""
self.load_state_dict(t.load(path))
def save(self, name=None):
"""
保存模型,默认使用“模型名字+时间”作为文件名
"""
if name is None:
prefix = 'checkpoints/' + self.model_name + '_'
name = time.strftime(prefix + '%m%d_%H:%M:%S.pth')
t.save(self.state_dict(), name)
return name
class Flat(t.nn.Module):
"""
把输入reshape成(batch_size,dim_length)
"""
def __init__(self):
super(Flat, self).__init__()
#self.size = size
def forward(self, x):
return x.view(x.size(0), -1)
| [
"jinluyuan@vip.qq.com"
] | jinluyuan@vip.qq.com |
faa1fba06fd5432415ed81c7aa2d39851ef6a7f5 | 84d0a95c577a978e916fdcf35448d585c679cf33 | /colordetection.py | a70488485c63d70076622d4f4f647e58d68013ef | [] | no_license | wonderfulsuccess/fcCubeSolver | 436a91bcb7a12894a10ff60cf50ddf8ef71175a2 | 0969e1aa89f3dc2b7689e6e70f6b8c297bda8def | refs/heads/master | 2021-05-08T10:53:10.024365 | 2018-02-01T17:32:40 | 2018-02-01T17:32:40 | 119,869,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,002 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Filename : colordetection.py
# Author : Kim K
# Created : Tue, 26 Jan 2016
# Last Modified : Sun, 31 Jan 2016
# from recognize import Cubereg
from sys import exit as Die
try:
import sys
import numpy as np
import pickle
except ImportError as err:
Die(err)
# cubereg = Cubereg()
# cubereg.test_score()
class ColorDetection:
def __init__(self):
with open('color_detect.pickle', 'rb') as handle:
self.clf = pickle.load(handle)
def get_color_name(self, rgb):
""" Get the name of the color based on the hue.
:returns: string
"""
data = list(rgb)
# color = cubereg.predict([data])
color = self.clf.predict(np.array([data]))
return str(color[0])
def name_to_rgb(self, name):
"""
Get the main RGB color for a name.
:param name: the color name that is requested
:returns: tuple
"""
color = {
'R' : (0,0,255),
'O' : (0,165,255),
'B' : (255,0,0),
'G' : (0,255,0),
'W' : (255,255,255),
'Y' : (0,255,255)
}
return color[name]
def average_rgb(self, roi):
""" Average the HSV colors in a region of interest.
:param roi: the image array
:returns: tuple
"""
h = 0
s = 0
v = 0
num = 0
for y in range(len(roi)):
if y % 10 == 0:
for x in range(len(roi[y])):
if x % 10 == 0:
chunk = roi[y][x]
num += 1
h += chunk[0]
s += chunk[1]
v += chunk[2]
h /= num
s /= num
v /= num
return (int(h), int(s), int(v))
ColorDetector = ColorDetection()
| [
"wonderfulsuccess@163.com"
] | wonderfulsuccess@163.com |
1189b36781d1447cec78f4b7ded925743adecca3 | 7c98dd4ef5e16abe1f9c27732f6c08c68545c203 | /Arquivos Exercicios/Exercicios/Ex036.py | 6eb0983ea1298aaf38716e2c7f2ae4ed24702a10 | [] | no_license | ronaldoedicassio/Pyhthon | ba79164e0c663fc89b384c95da7b29793adeb28a | 63fd77d968ff44394637c513b9e3188319ccd8e6 | refs/heads/master | 2023-03-21T08:15:43.123215 | 2021-03-01T16:46:12 | 2021-03-01T16:46:12 | 330,780,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | """
Escreve um programa para aprovar o empréstimo bancário para a compara de
uma casa. O programa vai perguntar:
- valor da casa
- Salario
- Quantos anos ele vai pagar
Calcule o valor da prestação mensal, sabendo que nao pode ultrapassar
30% do salario
desconsidere os juros de financiamento, somente considere o valor da casa para exercicio
"""
from datetime import datetime
now = datetime.now()
if now.hour < 12:
print("Bom dia")
elif now.hour < 18 and now.hour > 12:
print("Boa tarde")
else:
print("boa noite")
valor = float(input("Entre com Valor da casa: "))
salario = float(input("Digite o valor do salario liquido: "))
tempo = float(input("Quantos anos pretende pagar o imovel? "))
prestação = valor/(tempo*12)
if prestação > salario*0.3:
print("Financiamento não aprovado, pois a prestão de R${:.2f} e maior que 30% do salario {:.2f}".format(prestação, salario))
else:
print("Financiamento aprovado!!! \nPrestações de R$ {:.2f} serão cobradas no proxímo mês".format(prestação)) | [
"75760249+ronaldoedicassio@users.noreply.github.com"
] | 75760249+ronaldoedicassio@users.noreply.github.com |
237264a96257e7ff1531821900dd2bcb382d806b | 4f1fae975df7c8f116bd78ccca80aaad2375180b | /test/test_stats_single_season_stat.py | 5fee9bcf643dd2dc2873b05a64a75f88315cfc4f | [] | no_license | phwissmann/nhl_openapi_py | 5604d8dc05600fa5b1df8a2dd89a285104fe44c9 | 9924820ffb8b3a3fa5bfbee9780685a124257113 | refs/heads/master | 2021-06-03T06:33:56.097813 | 2020-12-30T13:47:34 | 2020-12-30T13:47:34 | 149,875,820 | 0 | 0 | null | 2021-06-01T22:48:26 | 2018-09-22T12:43:32 | Python | UTF-8 | Python | false | false | 923 | py | # coding: utf-8
"""
NHL API
Documenting the publicly accessible portions of the NHL API. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.stats_single_season_stat import StatsSingleSeasonStat # noqa: E501
from openapi_client.rest import ApiException
class TestStatsSingleSeasonStat(unittest.TestCase):
"""StatsSingleSeasonStat unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testStatsSingleSeasonStat(self):
"""Test StatsSingleSeasonStat"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.stats_single_season_stat.StatsSingleSeasonStat() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"phwissmann@posteo.ch"
] | phwissmann@posteo.ch |
c4328ca74b588af87ae19e83a8235af735671787 | caa16eb083841e91a70603653928564f9fd3fed2 | /Python/read-n-characters-given-read4.py | 912eb1c250339f639e57a4573dbbbbfe9941989a | [] | no_license | guker/letcode | ea5198a3678c5f6ab2aebd61dc882153a4af1199 | a8c93ead3db7fe86c66b4aee9a8c67cc71cf95ec | refs/heads/master | 2021-01-24T04:08:33.839243 | 2015-08-11T11:19:24 | 2015-08-11T11:19:24 | 40,593,439 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,676 | py | # Time: O(n)
# Space: O(1)
#
# The API: int read4(char *buf) reads 4 characters at a time from a file.
#
# The return value is the actual number of characters read. For example, it returns 3 if there is only 3 characters left in the file.
#
# By using the read4 API, implement the function int read(char *buf, int n) that reads n characters from the file.
#
# Note:
# The read function will only be called once for each test case.
#
# The read4 API is already defined for you.
# @param buf, a list of characters
# @return an integer
def read4(buf):
global file_content
i = 0
while i < len(file_content) and i < 4:
buf[i] = file_content[i]
i += 1
if len(file_content) > 4:
file_content = file_content[4:]
else:
file_content = ""
return i
class Solution:
# @param buf, Destination buffer (a list of characters)
# @param n, Maximum number of characters to read (an integer)
# @return The number of characters read (an integer)
def read(self, buf, n):
read_bytes = 0
eof = False
buffer = ['' for _ in xrange(4)]
while not eof and read_bytes < n:
size = read4(buffer)
if size < 4:
eof = True
bytes = min(n - read_bytes, size)
for i in xrange(bytes):
buf[read_bytes + i] = buffer[i]
read_bytes += bytes
return read_bytes
if __name__ == "__main__":
global file_content
buf = ['' for _ in xrange(100)]
file_content = "a"
print buf[:Solution().read(buf, 9)]
file_content = "abcdefghijklmnop"
print buf[:Solution().read(buf, 9)] | [
"kamyu104@gmail.com"
] | kamyu104@gmail.com |
32d3258b396f01d9edf0b1cebbf45e45bcf3c375 | ed5a082d977aefcecc8c40c76046d26334615a8e | /contest/abc/abc060/ABC060B.py | 20e20152e98eabc99fcd3a2785868d34887330ee | [] | no_license | arakoma/competitive_programming | 0ff9b9a97d2f37a3a1dac96c157f3235dde96b85 | ebbc5621860aca320a6949433f1707f1cbfcf911 | refs/heads/master | 2021-08-07T10:50:08.890353 | 2021-07-10T14:10:15 | 2021-07-10T14:10:15 | 223,712,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | A, B, C = map(int, input().split())
sum_A = A
for i in range(B):
sum_A += A
if sum_A % B == C:
print("YES")
break
else:
print("NO") | [
"arakoma555@gmail.com"
] | arakoma555@gmail.com |
36d568e8f41524bd7e38a34f4c350bf4959fddf5 | 43d138375c9068b3ea9dc0f75bab9d3f3bc0b8f1 | /polling_stations/apps/data_collection/management/commands/import_midlothian.py | ca62caf2dc9e941745002f483770a8386eb0c6af | [] | permissive | dantagg/UK-Polling-Stations | 69da767c934dd38d74d7ff632fefacdae82053cb | 2271b3fbfe5242de659892d24fad4d8851c804ba | refs/heads/master | 2023-01-03T20:19:58.315246 | 2020-09-28T11:09:40 | 2020-09-28T11:10:29 | 300,301,660 | 0 | 0 | BSD-3-Clause | 2020-10-01T14:07:00 | 2020-10-01T14:06:59 | null | UTF-8 | Python | false | false | 225 | py | from data_collection.management.commands import BaseScotlandSpatialHubImporter
class Command(BaseScotlandSpatialHubImporter):
council_id = "S12000019"
council_name = "Midlothian"
elections = ["parl.2019-12-12"]
| [
"chris.shaw480@gmail.com"
] | chris.shaw480@gmail.com |
101bcc4ec5a1e173bae4c70f4048463537e40d00 | d4a569dcf616b7f05e53a44803e38196b436b8b9 | /Thesis@3.9.1/Lib/site-packages/isort/utils.py | 83840872513ed9b6e83a88e0ed2f6aeeeb1facdc | [
"MIT"
] | permissive | nverbois/TFE21-232 | ac3178d24939c872c02a671c0f1d8cc471af516b | 7113837b5263b5c508bfc6903cb6982b48aa7ee4 | refs/heads/main | 2023-06-05T18:50:59.207392 | 2021-06-25T19:54:40 | 2021-06-25T19:54:40 | 337,691,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | import os
import sys
def exists_case_sensitive(path: str) -> bool:
"""Returns if the given path exists and also matches the case on Windows.
When finding files that can be imported, it is important for the cases to match because while
file os.path.exists("module.py") and os.path.exists("MODULE.py") both return True on Windows,
Python can only import using the case of the real file.
"""
result = os.path.exists(path)
if (
sys.platform.startswith("win") or sys.platform == "darwin"
) and result: # pragma: no cover
directory, basename = os.path.split(path)
result = basename in os.listdir(directory)
return result
| [
"38432529+nverbois@users.noreply.github.com"
] | 38432529+nverbois@users.noreply.github.com |
81510181cb9cd72445000d65553f3fb0960ff2ec | cedfdd1398b947b15eccf4473e9bbaddccb878d7 | /SDK/openstack/tests/unit/cloud/test_domain_params.py | cc0ce12cafa64fd04fe16c75cbbff91c2f2bef47 | [] | no_license | Doctor-DC/CMP-Recycle | 36fb1fdcf7c3a396bfef89d03948bd0ce626b053 | e3e6421f0b5dc28a075bc5bf91be9a45bcbe97c6 | refs/heads/dev | 2022-12-15T06:28:12.695868 | 2019-02-26T06:22:21 | 2019-02-26T06:22:21 | 142,127,512 | 0 | 0 | null | 2022-12-08T02:29:44 | 2018-07-24T08:18:46 | Python | UTF-8 | Python | false | false | 2,911 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from SDK.openstack.cloud import exc
from SDK.openstack.tests.unit import base
class TestDomainParams(base.TestCase):
def test_identity_params_v3(self):
project_data = self._get_project_data(v3=True)
self.register_uris([
dict(method='GET',
uri='https://identity.example.com/v3/projects',
json=dict(projects=[project_data.json_response['project']]))
])
ret = self.cloud._get_identity_params(
domain_id='5678', project=project_data.project_name)
self.assertIn('default_project_id', ret)
self.assertEqual(ret['default_project_id'], project_data.project_id)
self.assertIn('domain_id', ret)
self.assertEqual(ret['domain_id'], '5678')
self.assert_calls()
def test_identity_params_v3_no_domain(self):
project_data = self._get_project_data(v3=True)
self.assertRaises(
exc.OpenStackCloudException,
self.cloud._get_identity_params,
domain_id=None, project=project_data.project_name)
self.assert_calls()
def test_identity_params_v2(self):
self.use_keystone_v2()
project_data = self._get_project_data(v3=False)
self.register_uris([
dict(method='GET',
uri='https://identity.example.com/v2.0/tenants',
json=dict(tenants=[project_data.json_response['tenant']]))
])
ret = self.cloud._get_identity_params(
domain_id='foo', project=project_data.project_name)
self.assertIn('tenant_id', ret)
self.assertEqual(ret['tenant_id'], project_data.project_id)
self.assertNotIn('domain', ret)
self.assert_calls()
def test_identity_params_v2_no_domain(self):
self.use_keystone_v2()
project_data = self._get_project_data(v3=False)
self.register_uris([
dict(method='GET',
uri='https://identity.example.com/v2.0/tenants',
json=dict(tenants=[project_data.json_response['tenant']]))
])
ret = self.cloud._get_identity_params(
domain_id=None, project=project_data.project_name)
self.assertIn('tenant_id', ret)
self.assertEqual(ret['tenant_id'], project_data.project_id)
self.assertNotIn('domain', ret)
self.assert_calls()
| [
"8920622362@qq.com"
] | 8920622362@qq.com |
4cf8c925f2e9225d1ac343a71090b100d96d56c9 | 358519772669c73092f625f630722c38e1d33783 | /examples/moltools/setup-complex-leap/computeangles.py | 08d92650857de44fcaab5bc2d8a7c8ef7f6e7918 | [] | no_license | minghao2016/mmtools | e7e61aca084498408ceae965dd6c9450ad89eafa | 3ade988afb51cd54ee5a4067d8deaad88afbb0fe | refs/heads/master | 2021-09-21T01:02:22.522187 | 2014-09-19T03:40:03 | 2014-09-19T03:40:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,573 | py | #!/usr/bin/python
"""Script to compute angles/distances describing the ligand's six degrees of freedom relative to the protein. Prompts for reference atoms."""
#python computeangles.py -f system.g96 -s minimization-constrained.tpr
import os
import re
import random
import sys
from optparse import OptionParser
parser=OptionParser()
def find_atom_nums(atom_locator_strings,g96file):
""" Takes a list of strings to recognize in the input file (representing atom locators)
and returns the GROMACS atom number corresponding to them in a list in the same order.
ONLY WORKS FOR G96 FILES RIGHT NOW"""
regexes = [re.compile(i) for i in atom_locator_strings]
atom_nums = [None for i in atom_locator_strings]
for line in g96file:
for i in range(0,len(regexes)):
if regexes[i].search(line):
atom_nums[i]=line.split()[3]
return atom_nums
def three_ligand_atoms(g96file):
""" Takes a g96 file and returns the atom indices of three atoms to use as restraint atoms.
Currently uses three random atoms because it's easy to code....
TODO: something not retarded."""
regex=re.compile("^ *[0-9]* MOL")
molatoms=[]
for line in g96file:
if regex.search(line):
molatoms.append(line.split()[3])
return random.sample(molatoms,3)
#######
#Read input
#######
#Input trajectory
parser.add_option("-f","--file", dest="infile", help="FILE= Input trajectory file including extension. Can be xtc or trr. tpr file assumed to have same prefix unless supplied by -s option.", metavar="FILE")
#Input tpr file
parser.add_option("-s", "--tprfile", dest="tprfile", help="Name of input tpr file with extension. Default: Same as name of input trr/xtc file.", metavar="FILE")
#Use old anglegps.ndx?
parser.add_option("-o", "--old", action="store_true", dest="useoldangle", default=False, help="Use old anglegps.ndx file? (Useful if don't want to input atom numbers again if we're just re-analyzing or something). Default: False. No argument required.", metavar="NONE")
# random seed
parser.add_option("-d", "--seed", dest="seed", default=None, help="Random seed to initialize RNG. Defaults to system time", metavar="NONE")
#Name of output
parser.add_option("-n", "--name", dest="outname", help="Name for output files. Default: Same as input name.", metavar="NAME")
(options, args)=parser.parse_args()
if not options.infile:
parser.error("Please supply input filename using -f.")
FileError="ERROR: Cannot find input file." #For use below.
random.seed(options.seed)
infile=options.infile
#xtc or trr
intype=infile[:-4]
#Error check for existence
if not (os.path.isfile(infile)):
print "ERROR: Cannot find file %(intype)s."
raise FileError
#Output name?
outname=''
if not options.outname:
#Same as input name less extension
outname=infile[0:-4]
else:
outname=options.outname
#Tpr name?
tprname=''
if not options.tprfile:
tprname=infile[0:-4]+'.tpr'
else:
tprname=options.tprfile
#Error check
if not (os.path.isfile(tprname)):
print "ERROR: Cannot find file %(tprname)s."
raise FileError
#Now, unless using old angle file, create an anglegps.ndx index file with user input
# Use particular atoms for SAMPL calculations
#TODO : generalize this method for arbitrary input residue/atoms
protein_atoms=["155 ASN CA","106 VAL CA","109 LEU CA"]
g96file=open(infile,"rt")
atoms=find_atom_nums(protein_atoms,g96file)
g96file.seek(0)
# lowercase=protein, uppercase=ligand
(atoma,atomb,atomc)=atoms
ligatoms=three_ligand_atoms(g96file)
(atomA,atomB,atomC)=ligatoms
#Write anglegps.ndx
file=open('anglegps.ndx','w')
text="""[ atom_a ]
%(atoma)s
[ atom_A ]
%(atomA)s
[ theta_A ]
%(atomb)s %(atoma)s %(atomA)s
[ theta_B ]
%(atoma)s %(atomA)s %(atomB)s
[ phi_A ]
%(atomc)s %(atomb)s %(atoma)s %(atomA)s
[ phi_B ]
%(atomb)s %(atoma)s %(atomA)s %(atomB)s
[ phi_C ]
%(atoma)s %(atomA)s %(atomB)s %(atomC)s
""" % vars()
file.write(text)
file.close()
#Done with creation of angle groups file.
#Now process angles
anglenames=['theta_A', 'theta_B', 'phi_A', 'phi_B', 'phi_C']
num=1
for anglename in anglenames:
num+=1
#Need to decide if angle or dihedral
#If it has a theta, type is angle
if anglename.find('theta')>-1:
type='angle'
else:
type='dihedral'
#Now analyze
os.system("echo \"%(num)s\" | g_angle -f %(infile)s -s %(tprname)s -n anglegps.ndx -ov %(outname)s_avg_%(anglename)s.xvg -type %(type)s" % vars())
#Done with angles. Do distance, also. Distance atom indices are 0 and 1.
os.system('echo \"0 \n 1\" | g_dist -f %(infile)s -s %(tprname)s -n anglegps.ndx -o %(outname)s_r_aA.xvg' % vars())
| [
"choderaj@mskcc.org"
] | choderaj@mskcc.org |
ba695391a4e019b68076275b6a4992840df0599e | 93e55f080779f16f47a7382a3fb0b29a4189e074 | /convertor/huawei/impl/asin.py | 7806afea7dd526d423fe8359766439f01b3f30ed | [] | no_license | jizhuoran/caffe-huawei-atlas-convertor | b00cfdec3888da3bb18794f52a41deea316ada67 | 148511a31bfd195df889291946c43bb585acb546 | refs/heads/master | 2022-11-25T13:59:45.181910 | 2020-07-31T07:37:02 | 2020-07-31T07:37:02 | 283,966,371 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,265 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Copyright (C) 2019. Huawei Technologies Co., Ltd. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the Apache License Version 2.0.You may not use
this file except in compliance with the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Apache License for more details at
http://www.apache.org/licenses/LICENSE-2.0
asin
Op_description :
Computes acos of x element-wise
# asin(
# x,
# y,
# kernel_name="cce_asin")
Supportive_dtype_format :
['float16', 'float32']
['ALL']
Constraint :
[1] All : shape size limit is 2147483648.
"""
from impl.util import util_compute
from te import tvm
import te.lang.cce
from te.platform.cce_conf import api_check_support
from te.platform.fusion_manager import fusion_manager
from te.utils.op_utils import check_dtype
from te.utils.op_utils import check_shape
from te.utils.op_utils import refine_shape_axes
import topi
from topi.cce import util
NUM_ONE = 1.0
NEG_NUM_ONE = -1.0
HALF_PI = 1.5707963267948966192313216916398
BOUNDARY_1 = 0.70710678118654752440084436210485
# Taylor coefficient
COEF = (1.0,
0.16666666666666666666666666666667,
0.075,
0.04464285714285714285714285714286,
0.03038194444444444444444444444444,
0.02237215909090909090909090909091,
0.01735276442307692307692307692308,
0.01396484375)
# TAYLOR COUNT
TAYLOR_COUNT = 7
def _taylor_compute(data_x, x_square=None):
"""
do arcsinx compute use the 15th order taylor expansion when 0 <= x <= BOUNDARY_1
asin(x) = x + 1/6*x^3 + 3/40*x^5 + 5/112*x^7 + ... + 13!!/(14!!*15)*x^15
Parameters:
----------
data_x : the placeholder of data input
x_square : the placeholder of the square of data_x
Returns : A Tensor. Has the same type as data.
-------
"""
if x_square is None:
x_square = te.lang.cce.vmul(data_x, data_x)
res = te.lang.cce.vmuls(x_square, tvm.const(COEF[TAYLOR_COUNT],
x_square.dtype))
for temp in reversed(range(TAYLOR_COUNT)):
res = te.lang.cce.vadds(res, tvm.const(COEF[temp], x_square.dtype))
if temp == 0:
res = te.lang.cce.vmul(res, data_x)
else:
res = te.lang.cce.vmul(x_square, res)
return res
# pylint: disable=locally-disabled,too-many-arguments,unused-argument,invalid-name
@fusion_manager.register("asin")
def asin_compute(x, y, kernel_name="asin"):
"""
do element-wise asin compute
asin(x) = | arcsin(sqrt(1-x^2)) - HALF_PI, x belongs to (-1, -2^(-0.5))
| the 15th order taylor expansion, x belongs to (-2^(-0.5), 2^(-0.5))
| HALF_PI - arcsin(sqrt(1-x^2)), x belongs to (2^(-0.5), 1)
Parameters:
----------
x: the placeholder of data input
y : the dict of output
kernel_name : cce kernel name, default value is "cce_asin"
Returns : A Tensor. Has the same type as data_input.
-------
"""
shape = x.shape
dtype = x.dtype
# Change dtype to float32
if dtype == "float16" and \
api_check_support("te.lang.cce.vadd", "float32"):
x = te.lang.cce.cast_to(x, "float32")
boundary_mask1 = te.lang.cce.broadcast(tvm.const(BOUNDARY_1, \
"float32"), shape)
else:
boundary_mask1 = te.lang.cce.broadcast(tvm.const(BOUNDARY_1, \
"float16"), shape)
# Sign mask
sign = util_compute.sign(x)
# All positive
x = te.lang.cce.vmul(x, sign)
# x belongs to (0, 2^(-0.5))
choice_1 = te.lang.cce.vmin(x, boundary_mask1)
choice_1 = te.lang.cce.vsub(choice_1, boundary_mask1)
choice_1 = te.lang.cce.vmuls(te.lang.cce.floor(choice_1), NEG_NUM_ONE)
res_1 = _taylor_compute(x)
res_1 = te.lang.cce.vmul(res_1, choice_1)
# x belongs to (2^(-0.5), 1)
choice_2 = te.lang.cce.vmuls(choice_1, tvm.const(NEG_NUM_ONE, x.dtype))
choice_2 = te.lang.cce.vadds(choice_2, tvm.const(NUM_ONE, x.dtype))
res_2 = te.lang.cce.vmul(x, x)
res_2 = te.lang.cce.vmuls(res_2, tvm.const(NEG_NUM_ONE, x.dtype))
res_2 = te.lang.cce.vadds(res_2, tvm.const(NUM_ONE, x.dtype))
res_2_sqrt = te.lang.cce.vsqrt(res_2)
res_2 = _taylor_compute(res_2_sqrt, res_2)
res_2 = te.lang.cce.vmuls(res_2, tvm.const(NEG_NUM_ONE, x.dtype))
res_2 = te.lang.cce.vadds(res_2, tvm.const(HALF_PI, x.dtype))
res_2 = te.lang.cce.vmul(res_2, choice_2)
# Restore sign
res_1 = te.lang.cce.vadd(res_1, res_2)
res_1 = te.lang.cce.vmul(res_1, sign)
# Restore dtype
if dtype == "float16":
res_1 = te.lang.cce.cast_to(res_1, "float16")
return res_1
@util.check_input_type(dict, dict, str)
def asin(x, y, kernel_name="asin"):
"""
----------
asin(x) = | arcsin(sqrt(1-x^2)) - HALF_PI, x belongs to (-1, 2^(-0.5))
| the 15th order taylor expansion, x belongs to (-2^(-0.5), 2^(-0.5))
| HALF_PI - arcsin(sqrt(1-x^2)), x belongs to (2^(-0.5), 1)
Parameters:
----------
x : the placeholder of data input
y : the dict of output
kernel_name : cce kernel name, default value is "asin"
Returns : None
-------
"""
shape_input = x.get("shape")
dtype_input = x.get("dtype")
util.check_kernel_name(kernel_name)
check_shape(shape_input)
shape_input, _ = refine_shape_axes(shape_input, [])
check_list = ("float16", "float32")
check_dtype(dtype_input, check_list)
inp_dtype = dtype_input.lower()
data_input = tvm.placeholder(shape_input, dtype=inp_dtype,
name="data_input")
res = asin_compute(data_input, y, kernel_name)
with tvm.target.cce():
auto_sch = topi.generic.auto_schedule(res)
config = {"name": kernel_name,
"print_ir": False,
"tensor_list": [data_input, res],
"bool_storage_as_1bit": False}
te.lang.cce.cce_build_code(auto_sch, config)
| [
"jizr@connect.hku.hk"
] | jizr@connect.hku.hk |
d054346c87be5b288789161df7aab86f6529a381 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02618/s735365678.py | 8420da1520836f214d193d40cdff980b0cf6ba93 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | day = int(input())
costlist = input().split() #int型
costs = []
for cost in costlist:
costs.append(int(cost))
satisfactions = []
for i in range(day):
satisfactions.append(input().split())
for i in range(day):
for j in range(26):
satisfactions[i][j] = int(satisfactions[i][j])
type_output = []
last_date = [0 for x in range(26)]
for d in range(day):
outtype = 1
best_score = -99999999
for i in range(26):
tmp_last_date = last_date.copy()
tmp_last_date[i] = d+1
plus = satisfactions[d][i]
minus = 0
for j in range(26):
minus += costs[j] * (d+1 - tmp_last_date[j])
current_score = plus-minus
if current_score >= best_score:
best_score = current_score
outtype = i+1
last_date[outtype-1] = d+1
type_output.append(outtype)
#print(type_output)
for i in range(day):
print(type_output[i])
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
ed7c107962c8afadcbe032e3adf1ec694165e874 | e9757274ddb8484e27590ff0cc3f24550776c6cc | /Solved/0332/0332.py | b8e1940ab67ca07b91ba21270c3e0854482ab6e7 | [] | no_license | Jinmin-Goh/LeetCode | 948a9b3e77eb03507aad6f3c78640aa7f00e6ad5 | d6e80b968032b08506c5b185f66d35c6ff1f8bb9 | refs/heads/master | 2020-09-22T10:22:18.443352 | 2020-09-06T06:34:12 | 2020-09-06T06:34:12 | 225,153,497 | 1 | 1 | null | 2020-01-29T15:16:53 | 2019-12-01T11:55:25 | Python | UTF-8 | Python | false | false | 1,590 | py | # Problem No.: 332
# Solver: Jinmin Goh
# Date: 20200628
# URL: https://leetcode.com/problems/reconstruct-itinerary/
import sys
class Solution:
def findItinerary(self, tickets: List[List[str]]) -> List[str]:
if len(tickets) == 1:
return tickets[0]
posDict = {}
posSet = set(["JFK"])
for i in tickets:
if i[0] not in posDict:
posDict[i[0]] = [[i[1], True]]
else:
posDict[i[0]].append([i[1], True])
posDict[i[0]].sort()
if i[0] in posSet:
posSet.remove(i[0])
else:
posSet.add(i[0])
if i[1] in posSet:
posSet.remove(i[1])
else:
posSet.add(i[1])
posSet = list(posSet)
destination = posSet[0]
ansLen = len(tickets) + 1
ans = ["JFK"]
def backtrack(_ans: [str]) -> [str]:
if len(_ans) == ansLen and _ans[-1] == destination:
return _ans
if _ans[-1] not in posDict:
return None
for i in posDict[_ans[-1]]:
if not i[1]:
continue
_ans.append(i[0])
i[1] = False
tempAns = backtrack(_ans)
if tempAns:
return tempAns
_ans.pop()
i[1] = True
return None
ans = backtrack(ans)
return ans
| [
"eric970901@gmail.com"
] | eric970901@gmail.com |
ee6749ee770a3fd4f2931fe24b75704624443202 | 478e5ee3c6ae59718e6c8dfe4ed577b61e7807a8 | /webooks/scripts/zhangbook.py | d8b861b46cdb14d664b8d262b5aa9bed018ce074 | [] | no_license | sunhuang163/webooks | 538ff34f5b94a22f04734b0eddddbc23f82fcd73 | 2d1592cea8b58f2b367efcad2d69001d7b4fb735 | refs/heads/master | 2021-01-11T07:57:49.922957 | 2013-09-28T03:59:39 | 2013-09-28T03:59:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | # -*- coding: utf-8 -*-
# __author__ = chenchiyuan
from __future__ import division, unicode_literals, print_function
import requests
from xml.etree import cElementTree as ET
from webooks.utils.spiders import SpiderHelper
helper = SpiderHelper()
categories = {
u"玄幻·奇幻": 1,
u"武侠·仙侠": 2,
u"竞技·游戏": 3,
u"科幻·军事": 4,
u"灵异·恐怖": 5,
u"言情·感情": 7,
u"青春·都市": 8,
u"耽美·唯美": 9,
u"动漫·同人": 10,
u"侦探·推理": 11,
u"笑话·幽默": 12,
}
def parse_book_list():
category = u"玄幻·奇幻"
id = 1
| [
"chenchiyuan03@gmail.com"
] | chenchiyuan03@gmail.com |
c35366e7f4f451ac4c2be9245d09199e758419e6 | bb32566f0c4688292b8f37d29630e0b7a18be24b | /checkio/test.py | bd98cfbffc9068ea7359e5f02a91ad8efd3daedf | [] | no_license | Chencheng78/python-learning | 4c7dd3a5ad39ac2e96b9ff0a1b9aabb56e863f7f | 63eb7ee9e547f899eafa698556f6adeb518795bb | refs/heads/master | 2022-07-27T02:40:03.067820 | 2022-07-12T09:05:47 | 2022-07-12T09:05:47 | 55,224,905 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | def div2(num,size=2):
l = []
for i in range(1,num //2 +1):
l.append([i,num-i])
if size != 2:
size -=1
for i in l:
i[-1] = div2(i[-1],size)
return l
#
# def flat(l):
# for i in l:
# if len(i)==1:
# l2.append(l[i][0])
# if type(l[i][1]) == list:
# flat(l[i][1])
print(div2(20))
print(div2(10,3))
print(div2(10,4))
| [
"geniuscc7@163.com"
] | geniuscc7@163.com |
e669bdac41a9f08c4cacd7a2039a8804bd8b0c14 | d4c720f93631097ee048940d669e0859e85eabcf | /tools/rust/build_crubit.py | a4470c672d00d8733fc1fa655dadba6fee96544b | [
"BSD-3-Clause"
] | permissive | otcshare/chromium-src | 26a7372773b53b236784c51677c566dc0ad839e4 | 3b920d87437d9293f654de1f22d3ea341e7a8b55 | refs/heads/webnn | 2023-03-21T03:20:15.377034 | 2023-01-25T21:19:44 | 2023-01-25T21:19:44 | 209,262,645 | 18 | 21 | BSD-3-Clause | 2023-03-23T06:20:07 | 2019-09-18T08:52:07 | null | UTF-8 | Python | false | false | 7,807 | py | #!/usr/bin/env python3
# Copyright 2022 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Builds the Crubit tool.
!!! DO NOT USE IN PRODUCTION
Builds the Crubit tool (an experiment for Rust/C++ FFI bindings generation).
This script clones the Crubit repository, checks it out to a defined revision,
and then uses Bazel to build Crubit.
'''
import argparse
import collections
import hashlib
import os
import pipes
import shutil
import stat
import string
import subprocess
import sys
from pathlib import Path
# Get variables and helpers from Clang update script
sys.path.append(
os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'clang',
'scripts'))
from update import (CLANG_REVISION, CLANG_SUB_REVISION, LLVM_BUILD_DIR)
from build import (LLVM_BOOTSTRAP_INSTALL_DIR, MaybeDownloadHostGcc)
from update_rust import (CHROMIUM_DIR, CRUBIT_REVISION, THIRD_PARTY_DIR,
BUILD_MAC_ARM)
BAZEL_DIR = os.path.join(CHROMIUM_DIR, 'tools', 'bazel')
if sys.platform == 'darwin':
if BUILD_MAC_ARM or platform.machine() == 'arm64':
BAZEL_EXE = os.path.join(BAZEL_DIR, 'mac-arm64', 'bazel')
else:
BAZEL_EXE = os.path.join(BAZEL_DIR, 'mac-amd64', 'bazel')
elif sys.platform == 'win32':
BAZEL_EXE = os.path.join(BAZEL_DIR, 'windows-amd64', 'bazel.exe')
else:
BAZEL_EXE = os.path.join(BAZEL_DIR, 'linux-amd64', 'bazel')
CRUBIT_SRC_DIR = os.path.join(THIRD_PARTY_DIR, 'crubit', 'src')
def RunCommand(command, env=None, cwd=None, fail_hard=True):
print('Running', command)
if subprocess.run(command, env=env, cwd=cwd,
shell=sys.platform == 'win32').returncode == 0:
return True
print('Failed.')
if fail_hard:
raise RuntimeError(f"Failed to run {command}")
return False
def CheckoutCrubit(commit, dir):
"""Checkout the Crubit repo at a certain git commit in dir. Any local
modifications in dir will be lost."""
print('Checking out crubit repo %s into %s' % (commit, dir))
# Try updating the current repo if it exists and has no local diff.
if os.path.isdir(dir):
os.chdir(dir)
# git diff-index --quiet returns success when there is no diff.
# Also check that the first commit is reachable.
if (RunCommand(['git', 'diff-index', '--quiet', 'HEAD'],
fail_hard=False)
and RunCommand(['git', 'fetch'], fail_hard=False)
and RunCommand(['git', 'checkout', commit], fail_hard=False)):
return
# If we can't use the current repo, delete it.
os.chdir(CHROMIUM_DIR) # Can't remove dir if we're in it.
print('Removing %s.' % dir)
RmTree(dir)
clone_cmd = ['git', 'clone', 'https://github.com/google/crubit.git', dir]
if RunCommand(clone_cmd, fail_hard=False):
os.chdir(dir)
if RunCommand(['git', 'checkout', commit], fail_hard=False):
return
print('CheckoutCrubit failed.')
sys.exit(1)
def BuildCrubit(gcc_toolchain_path):
# TODO(https://crbug.com/1337346): Use locally built Rust instead of having
# Bazel always download the whole Rust toolchain from the internet.
# TODO(https://crbug.com/1337348): Use crates from chromium/src/third_party/rust.
# This environment variable is consumed by crubit/bazel/llvm.bzl and will
# configure Crubit's build to include and link against LLVM+Clang headers
# and libraries built when building Chromium toolchain. (Instead of
# downloading LLVM+Clang and building it during Crubit build.)
env = {"LLVM_INSTALL_PATH": LLVM_BOOTSTRAP_INSTALL_DIR}
# Use the compiler and linker from `LLVM_BUILD_DIR`.
#
# Note that we use `bin/clang` from `LLVM_BUILD_DIR`, but depend on headers
# and libraries from `LLVM_BOOTSTRAP_INSTALL_DIR`. The former helps ensure
# that we use the same compiler as the final one used elsewhere in Chromium.
# The latter is needed, because the headers+libraries are not available
# anywhere else.
clang_path = os.path.join(LLVM_BUILD_DIR, "bin", "clang")
env["CXX"] = f"{clang_path}++"
env["LD"] = f"{clang_path}++"
# CC is set via `--repo_env` rather than via `env` to ensure that we
# override the defaults from `crubit/.bazelrc`.
extra_args = [
"--repo_env=CC=", # Unset/ignore the value set via crubit/.bazelrc
f"--repo_env=CC={clang_path}",
]
# Include and link against the C++ stdlib from the GCC toolchain.
gcc_toolchain_flag = (f'--gcc-toolchain={gcc_toolchain_path}'
if gcc_toolchain_path else '')
env["BAZEL_CXXOPTS"] = gcc_toolchain_flag
env["BAZEL_LINKOPTS"] = f"{gcc_toolchain_flag}:-static-libstdc++"
env["BAZEL_LINKLIBS"] = f"{gcc_toolchain_path}/lib64/libstdc++.a:-lm"
# Run bazel build ...
args = [BAZEL_EXE, "build", "rs_bindings_from_cc:rs_bindings_from_cc_impl"]
RunCommand(args + extra_args, env=env, cwd=CRUBIT_SRC_DIR)
def InstallCrubit(install_dir):
assert os.path.isdir(install_dir)
print('Installing crubit binaries to %s' % install_dir)
BAZEL_BIN_DIR = os.path.join(CRUBIT_SRC_DIR, "bazel-bin")
SOURCE_PATH = os.path.join(BAZEL_BIN_DIR, "rs_bindings_from_cc",
"rs_bindings_from_cc_impl")
TARGET_PATH = os.path.join(install_dir, "rs_bindings_from_cc")
shutil.copyfile(SOURCE_PATH, TARGET_PATH)
# Change from r-xr-xr-x to rwxrwxr-x, so that future copies will work fine.
os.chmod(TARGET_PATH,
stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH)
def CleanBazel():
RunCommand([BAZEL_EXE, "clean", "--expunge"], cwd=CRUBIT_SRC_DIR)
def ShutdownBazel():
RunCommand([BAZEL_EXE, "shutdown"], cwd=CRUBIT_SRC_DIR)
def WritableDir(d):
""" Utility function to use as `argparse` `type` to verify that the argument
is a writeable dir (and resolve it as an absolute path). """
try:
real_d = os.path.realpath(d)
except Exception as e:
raise ArgumentTypeError(f"realpath failed: {e}")
if not os.path.isdir(real_d):
raise ArgumentTypeError(f"Not a directory: {d}")
if not os.access(real_d, os.W_OK):
raise ArgumentTypeError(f"Cannot write to: {d}")
return real_d
def main():
parser = argparse.ArgumentParser(
description='Build and package Crubit tools')
parser.add_argument('-v',
'--verbose',
action='count',
help='run subcommands with verbosity')
parser.add_argument(
'--install-to',
type=WritableDir,
help='skip Crubit git checkout. Useful for trying local changes')
parser.add_argument(
'--skip-clean',
action='store_true',
help='skip cleanup. Useful for retrying/rebuilding local changes')
parser.add_argument(
'--skip-checkout',
action='store_true',
help='skip Crubit git checkout. Useful for trying local changes')
args, rest = parser.parse_known_args()
args.gcc_toolchain = None
if sys.platform.startswith('linux'):
# Fetch GCC package to build against same libstdc++ as Clang. This
# function will only download it if necessary, and it will set the
# `args.gcc_toolchain` if so.
MaybeDownloadHostGcc(args)
if not args.skip_checkout:
CheckoutCrubit(CRUBIT_REVISION, CRUBIT_SRC_DIR)
try:
if not args.skip_clean:
CleanBazel()
BuildCrubit(args.gcc_toolchain)
if args.install_to:
InstallCrubit(args.install_to)
finally:
ShutdownBazel()
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"chromium-scoped@luci-project-accounts.iam.gserviceaccount.com"
] | chromium-scoped@luci-project-accounts.iam.gserviceaccount.com |
d15029009e83d30e08cbf7f44e678d402f135d54 | 68bad4b3d92872bb5b77b4ee503e588d20511a27 | /python/scripts_inhibition/old_script/simulate_slow_wave_ZZZ33_slow_sw.py | 6c004604f872b3248e1b1c9640823ae64211d643 | [] | no_license | mickelindahl/bgmodel | 647be626a7311a8f08f3dfc897c6dd4466fc0a92 | 78e6f2b73bbcbecd0dba25caf99f835313c914ee | refs/heads/master | 2023-08-29T13:57:04.122115 | 2022-02-11T14:28:23 | 2022-02-11T14:28:23 | 17,148,386 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,035 | py | '''
Created on Aug 12, 2013
@author: lindahlm
'''
from core import monkey_patch as mp
mp.patch_for_milner()
from scripts_inhibition.base_simulate import (get_path_rate_runs,
get_path_logs, get_args_list_oscillation,
get_kwargs_list_indv_nets,
par_process_and_thread,
pert_set_data_path_to_milner_on_supermicro,
pert_add_oscillations)
from core.network import default_params
from core.network.manager import Builder_slow_wave2 as Builder
from core.parallel_excecution import loop
import numpy
import scripts_inhibition.base_oscillation_sw as module
import sys
import oscillation_perturbations33_slow_sw as op
import pprint
pp=pprint.pprint
path_rate_runs=get_path_rate_runs('simulate_inhibition_ZZZ33_slow_sw/')
FILE_NAME=__file__.split('/')[-1][0:-3]
FROM_DISK_0=int(sys.argv[1]) if len(sys.argv)>1 else 2
LOAD_MILNER_ON_SUPERMICRO=False
NUM_NETS=2
amp_base=numpy.arange(.95, 1.1, 0.05)
freqs=numpy.arange(0.1, 1.0, 0.3)
# amp_base=[1.0] #numpy.arange(0.7, 1.1, 0.05)
# freqs=numpy.arange(0.1, 1.0, 0.1)
n=len(amp_base)
m=len(freqs)
amp_base=list(numpy.array([m*[v] for v in amp_base]).ravel())
freqs=list(freqs)*n
num_runs=len(freqs)*len(op.get())
num_sims=NUM_NETS*num_runs
kwargs={
'amp_base':amp_base,
'Builder':Builder,
'cores_milner':40*1,
'cores_superm':40,
'debug':False,
# 'database_save':True,
'do_runs':range(num_runs), #A run for each perturbation
'do_obj':False,
'external_input_mod':['EA', 'EI'],
'file_name':FILE_NAME,
'freqs':freqs,
'freq_oscillation':1.,
'from_disk_0':FROM_DISK_0,
'i0':FROM_DISK_0,
'job_name':'sw_ZZZ33_slow_sw',
'l_hours': ['00','01','00'],
'l_minutes':['45','00','05'],
'l_seconds':['00','00','00'],
'local_threads_milner':10,
'local_threads_superm':4,
'module':module,
'nets':['Net_'+str(i) for i in range(2)], #The nets for each run
'path_code':default_params.HOME_CODE,
'path_rate_runs':path_rate_runs,
'path_results':get_path_logs(LOAD_MILNER_ON_SUPERMICRO,
FILE_NAME),
'perturbation_list':op.get(),
'sim_time':40000.0,
'size':20000.0,
}
d_process_and_thread=par_process_and_thread(**kwargs)
pp(d_process_and_thread)
kwargs.update(d_process_and_thread)
pp(kwargs)
p_list = pert_add_oscillations(**kwargs)
p_list = pert_set_data_path_to_milner_on_supermicro(p_list,
LOAD_MILNER_ON_SUPERMICRO)
for i, p in enumerate(p_list): print i, p
a_list=get_args_list_oscillation(p_list, **kwargs)
k_list=get_kwargs_list_indv_nets(len(p_list), kwargs)
print 'from disk', FROM_DISK_0
loop(20,[num_sims,num_sims,num_sims/2], a_list, k_list )
| [
"mickelindahl@gmail.com"
] | mickelindahl@gmail.com |
fd3b6bcd15d022a5ab129d6512ca3262d9b18dfc | 81313cbd75bbd29cd48632d3cfc6b84884884650 | /LimitHelpers/ModelIndependent/ModelIndependentExcl_sr_ht_1100_mbl_400_all_unblind.py | 4e42c3752592292f08417118c98a9eac1c4eac94 | [] | no_license | UPenn-SUSY/PennSUSYFrame | ee93fd299e4e36ebc74e0065db0740451309682a | 41303b163dbc05451b22c19b00b436cc25440cf6 | refs/heads/master | 2019-01-19T10:28:47.417027 | 2015-05-08T15:07:24 | 2015-05-08T15:07:24 | 13,934,522 | 2 | 0 | null | 2015-05-08T15:07:24 | 2013-10-28T18:23:35 | C++ | UTF-8 | Python | false | false | 165 | py | test_sr = 'sr_ht_1100_mbl_400'
flavor_channel = 'all'
is_blind = False
execfile('/'.join([os.environ['BASE_WORK_DIR'], 'LimitHelpers', 'ModelIndependentExcl.py']))
| [
"bjack3@gmail.com"
] | bjack3@gmail.com |
1a42507992db088bfcb8a7e98e127f1a23b03cad | 2d6d256c88cfc984de80431263eb3e872d00a96c | /glue/viewers/image/tests/test_composite_array.py | a9e45d3450daf12b8d135d7e346191bc33833bfa | [
"BSD-3-Clause"
] | permissive | shreyasbapat/glue | c37b06c9d52cc7fd9b82b0bb0502ca52f63e957a | 06ad71cd80b2ed1ee85c02819461a727032f0f72 | refs/heads/master | 2020-04-11T04:35:37.095942 | 2018-11-28T07:55:15 | 2018-11-28T07:55:15 | 161,517,892 | 1 | 0 | null | 2018-12-12T16:49:24 | 2018-12-12T16:49:24 | null | UTF-8 | Python | false | false | 5,053 | py | from __future__ import absolute_import, division, print_function
from mock import MagicMock
import numpy as np
from numpy.testing import assert_allclose
from matplotlib.pyplot import cm
from ..composite_array import CompositeArray
class TestCompositeArray(object):
def setup_method(self, method):
self.array1 = np.array([[2.0, 1.0], [0.0, 0.0]])
self.array2 = np.array([[np.nan, 1.0], [0.0, 0.0]])
self.array3 = np.array([[0.0, 0.0], [1.0, 0.0]])
self.array4 = np.array([[0.0, 0.0], [0.0, 1.0]])
self.composite = CompositeArray()
def test_shape_size_ndim_dtype(self):
assert self.composite.shape is None
assert self.composite.size is None
assert self.composite.ndim == 2 # for now, this is hard-coded
assert self.composite.dtype is np.float # for now, this is hard-coded
self.composite.allocate('a')
self.composite.set('a', array=self.array1)
assert self.composite.shape == (2, 2)
assert self.composite.size == 4
assert self.composite.ndim == 2
assert self.composite.dtype is np.float
def test_shape_function(self):
array_func = MagicMock()
array_func.return_value = None
assert self.composite.shape is None
self.composite.allocate('a')
self.composite.set('a', array=array_func)
assert self.composite.shape is None
array_func.return_value = self.array1
assert self.composite.shape == (2, 2)
def test_cmap_blending(self):
self.composite.allocate('a')
self.composite.allocate('b')
self.composite.set('a', zorder=0, visible=True, array=self.array1,
color=cm.Blues, clim=(0, 2))
self.composite.set('b', zorder=1, visible=True, array=self.array2,
color=cm.Reds, clim=(0, 1))
# Determine expected result for each layer individually in the absence
# of transparency
expected_a = np.array([[cm.Blues(1.), cm.Blues(0.5)],
[cm.Blues(0.), cm.Blues(0.)]])
expected_b = np.array([[cm.Reds(0.), cm.Reds(1.)],
[cm.Reds(0.), cm.Reds(0.)]])
# If both layers have alpha=1, the top layer should be the only one visible
assert_allclose(self.composite[...], expected_b)
# If the top layer has alpha=0, the bottom layer should be the only one visible
self.composite.set('b', alpha=0.)
assert_allclose(self.composite[...], expected_a)
# If the top layer has alpha=0.5, the result should be an equal blend of each
self.composite.set('b', alpha=0.5)
assert_allclose(self.composite[...], 0.5 * (expected_b + expected_a))
def test_color_blending(self):
self.composite.allocate('a')
self.composite.allocate('b')
self.composite.set('a', zorder=0, visible=True, array=self.array1,
color=(0, 0, 1, 1), clim=(0, 2))
self.composite.set('b', zorder=1, visible=True, array=self.array2,
color=(1, 0, 0, 1), clim=(0, 1))
# Determine expected result for each layer individually in the absence
# of transparency
expected_a = np.array([[(0, 0, 1, 1), (0, 0, 0.5, 1)],
[(0, 0, 0, 1), (0, 0, 0, 1)]])
expected_b = np.array([[(0, 0, 0, 1), (1, 0, 0, 1)],
[(0, 0, 0, 1), (0, 0, 0, 1)]])
# In this mode, the zorder shouldn't matter, and if both layers have
# alpha=1, we should see a normal blend of the colors
assert_allclose(self.composite[...], np.maximum(expected_a, expected_b))
# If the top layer has alpha=0, the bottom layer should be the only one visible
self.composite.set('b', alpha=0.)
assert_allclose(self.composite[...], expected_a)
# If the top layer has alpha=0.5, the result should have
self.composite.set('b', alpha=0.5)
assert_allclose(self.composite[...], np.maximum(expected_a, expected_b * 0.5))
def test_deallocate(self):
self.composite.allocate('a')
self.composite.set('a', array=self.array1, color='1.0', clim=(0, 2))
assert self.composite.shape == (2, 2)
expected = np.ones(4) * self.array1[:, :, np.newaxis] / 2.
expected[:, :, 3] = 1
assert_allclose(self.composite[...], expected)
self.composite.deallocate('a')
assert self.composite.shape is None
assert self.composite[...] is None
def test_getitem_noactive(self):
# Regression test for a bug that caused __getitem__ to return an array
# with the wrong size if a view was used and no layers were active.
array = np.random.random((100, 100))
self.composite.allocate('a')
self.composite.set('a', array=array, visible=False)
assert self.composite[:].shape == (100, 100, 4)
assert self.composite[10:90, ::10].shape == (80, 10, 4)
| [
"thomas.robitaille@gmail.com"
] | thomas.robitaille@gmail.com |
9540e4321dc2f728b30a87c2029f118b4d5f9391 | a56a74b362b9263289aad96098bd0f7d798570a2 | /venv/lib/python3.8/site-packages/prompt_toolkit/output/conemu.py | c74e2a037823a2484edbd16e8977ffa785009087 | [
"MIT"
] | permissive | yoonkt200/ml-theory-python | 5812d06841d30e1068f6592b5730a40e87801313 | 7643136230fd4f291b6e3dbf9fa562c3737901a2 | refs/heads/master | 2022-12-21T14:53:21.624453 | 2021-02-02T09:33:07 | 2021-02-02T09:33:07 | 132,319,537 | 13 | 14 | MIT | 2022-12-19T17:23:57 | 2018-05-06T08:17:45 | Python | UTF-8 | Python | false | false | 1,493 | py | from typing import Any, TextIO
from prompt_toolkit.data_structures import Size
from prompt_toolkit.renderer import Output
from .vt100 import Vt100_Output
from .win32 import Win32Output
__all__ = [
"ConEmuOutput",
]
class ConEmuOutput:
"""
ConEmu (Windows) output abstraction.
ConEmu is a Windows console application, but it also supports ANSI escape
sequences. This output class is actually a proxy to both `Win32Output` and
`Vt100_Output`. It uses `Win32Output` for console sizing and scrolling, but
all cursor movements and scrolling happens through the `Vt100_Output`.
This way, we can have 256 colors in ConEmu and Cmder. Rendering will be
even a little faster as well.
http://conemu.github.io/
http://gooseberrycreative.com/cmder/
"""
def __init__(self, stdout: TextIO) -> None:
self.win32_output = Win32Output(stdout)
self.vt100_output = Vt100_Output(stdout, lambda: Size(0, 0))
def __getattr__(self, name: str) -> Any:
if name in (
"get_size",
"get_rows_below_cursor_position",
"enable_mouse_support",
"disable_mouse_support",
"scroll_buffer_to_prompt",
"get_win32_screen_buffer_info",
"enable_bracketed_paste",
"disable_bracketed_paste",
):
return getattr(self.win32_output, name)
else:
return getattr(self.vt100_output, name)
Output.register(ConEmuOutput)
| [
"kitae.yoon@deliveryhero.co.kr"
] | kitae.yoon@deliveryhero.co.kr |
be8f0664dab21383d31ea2513b147caf6229a26f | 1d1c1de4cbaecb647a723221344f47e7704c9b6f | /upload/file_handler.py | 658018c697ac93567641a4a7a1769597e8ecb86b | [] | no_license | vprusso/waveproject | 2a1228b9f11d8b656c07519a2e874a1601601369 | b72d8915553929fe0e3eecd517a7f65d0d622746 | refs/heads/master | 2021-06-15T11:06:20.157860 | 2016-12-05T22:42:01 | 2016-12-05T22:42:01 | 73,842,382 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,353 | py | import csv
import collections
import os
from .models import Document, DocumentEntry, MonthlyExpenditure
from .utils import SanitizeInput, CurrencyHelper
def write_csv_file(path, csv_file):
fout = open(path, 'wb')
for chunk in csv_file.chunks():
fout.write(chunk)
fout.close()
def save_file_content_to_database(csv_file):
path = os.getcwd() + r'/media/' + csv_file.name
write_csv_file(path, csv_file)
newdoc = Document(docfile=path, name=csv_file.name)
newdoc.save()
sanitize = SanitizeInput()
reader = csv.DictReader(open(path))
for row in reader:
date = sanitize.sanitize_date_format(row['date'])
category = row['category']
employee_name = row['employee name']
employee_address = row['employee address']
expense_description = row['expense description']
pre_tax_amount = sanitize.sanitize_float_format(row['pre-tax amount'])
tax_name = row['tax name']
tax_amount = sanitize.sanitize_float_format(row['tax amount'])
entry = DocumentEntry(
document=newdoc, date=date,
category=category, employee_name=employee_name,
employee_address=employee_address,
expense_description=expense_description,
pre_tax_amount=pre_tax_amount, tax_name=tax_name,
tax_amount=tax_amount
)
entry.save()
monthly_expenses = calculate_total_expenses_per_month(newdoc.id)
save_total_monthly_expenses_to_database(monthly_expenses, newdoc)
def calculate_total_expenses_per_month(document_id):
"""
For a given document, we process each row and sum the
tax amount and pre-tax amount to obtain a total amount per row.
The object returned is a dict where the first entry is the year
and month, and the second is the relative total amount.
"""
currency_helper = CurrencyHelper()
year_month_dict = {}
for instance in DocumentEntry.objects.all().filter(document=document_id):
year_month = str(instance.date.year) +\
"-" + str('%02d' % instance.date.month)
if year_month not in year_month_dict:
year_month_dict[year_month] = \
instance.tax_amount + instance.pre_tax_amount
else:
year_month_dict[year_month] += \
instance.tax_amount + instance.pre_tax_amount
# The costs are stored in terms of cents instead of
# dollars to eliminate floating point accuracy issues.
# We then need to convert back to dollars.
year_month_dict = {
k: currency_helper.cents_to_dollars(v)
for k, v in year_month_dict.items()
}
# Order in reverse chronological order
year_month_dict = collections.OrderedDict(sorted(year_month_dict.items()))
return year_month_dict
def save_total_monthly_expenses_to_database(monthly_expenses, document_object):
"""
Given a specific uploaded document, calculates and stores the expenses
for each month.
"""
for key, value in monthly_expenses.items():
year_month_split = key.split("-")
year = year_month_split[0]
month = year_month_split[1]
expense = MonthlyExpenditure(
document=document_object,
month=month, year=year,
monthly_expenditure=value
)
expense.save()
| [
"vincentrusso1@gmail.com"
] | vincentrusso1@gmail.com |
71bc10c3ee710d1f537805297fb35fc28ac61000 | 3c6c0c35b5d1b26bbb8f07e90848a30a412e203f | /test173_3.py | 61400530cd973ee5790ffd60a43dae2e4e02ad0c | [] | no_license | MikhailRyskin/test1 | 3a4788ab229801c3210908dedf589eb3c9393788 | 78ef44d50249fe0db63bd9f6c8ddb5ee0a309214 | refs/heads/master | 2023-06-13T07:40:37.556943 | 2021-07-10T14:15:47 | 2021-07-10T14:15:47 | 346,758,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | import random
team_1 = [random.randint(50, 80) for _ in range(10)]
team_2 = [random.randint(30, 60) for _ in range(10)]
our_team = ['Погиб' if team_1[i] + team_2[i] > 100 else 'Выжил' for i in range(10)]
print('Урон первого отряда: ', team_1)
print('Урон второго отряда: ', team_2)
print('Состояние третьего отряда:', our_team)
| [
"rymv@yandex.ru"
] | rymv@yandex.ru |
24a66fa605d58fe106e9eaef551d4f1ac1d2cd10 | 4626631c5e68a13ed4dde041212da39d344d74d9 | /examples/scripts/clear-alerts.py | c040de1a1849f852cb538495a4eb0ba4b8079608 | [
"MIT"
] | permissive | xod442/python-hpOneView | a1482677e3252dabf1e14f9349c119428331089f | b78fb81cba34992bb84ed3814aae04ce05ef913f | refs/heads/master | 2021-01-18T05:53:42.466348 | 2015-08-11T15:59:16 | 2015-08-11T15:59:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,591 | py | #!/usr/bin/env python3
###
# (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
import sys
import re
if sys.version_info < (3, 4):
raise Exception('Must use Python 3.4 or later')
import hpOneView as hpov
from pprint import pprint
def acceptEULA(con):
# See if we need to accept the EULA before we try to log in
con.get_eula_status()
try:
if con.get_eula_status() is True:
print('EULA display needed')
con.set_eula('no')
except Exception as e:
print('EXCEPTION:')
print(e)
def login(con, credential):
# Login with givin credentials
try:
con.login(credential)
except:
print('Login failed')
def clear_all_alerts(act):
alerts = act.get_alerts('Active')
for alert in alerts:
print('Clearing alert: ', alert['description'])
alertMap = hpov.common.make_alertMap_dict('', alert['eTag'],
state='Cleared')
act.update_alert(alert, alertMap)
def main():
parser = argparse.ArgumentParser(add_help=True,
formatter_class=argparse.RawTextHelpFormatter,
description='''
Clear all Active alerts
Usage: ''')
parser.add_argument('-a', dest='host', required=True,
help='''
HP OneView Appliance hostname or IP address''')
parser.add_argument('-u', dest='user', required=False,
default='Administrator',
help='''
HP OneView Username''')
parser.add_argument('-p', dest='passwd', required=True,
help='''
HP OneView Password''')
parser.add_argument('-c', dest='cert', required=False,
help='''
Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''')
parser.add_argument('-y', dest='proxy', required=False,
help='''
Proxy (host:port format''')
args = parser.parse_args()
credential = {'userName': args.user, 'password': args.passwd}
con = hpov.connection(args.host)
act= hpov.activity(con)
if args.proxy:
con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1])
if args.cert:
con.set_trusted_ssl_bundle(args.cert)
login(con, credential)
acceptEULA(con)
clear_all_alerts(act)
if __name__ == '__main__':
import sys
import argparse
sys.exit(main())
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| [
"troy@debdev.org"
] | troy@debdev.org |
0e44e38cdf5170ac441004d596be1f6bb458b75f | 2091dc754d0346a345d84dce32177a4d6aa2097b | /Payload_Type/Apollo/mythic/agent_functions/assembly_inject.py | 8428a3175a08723a697dcfd7d9eae405c9706178 | [
"BSD-3-Clause",
"MIT"
] | permissive | dycsy/Apollo | 132d5d5f98ae2951e6c58df796be1dfbc495c03f | 6ec815cbb87379b48c12d2108e6dd669ce5ce37e | refs/heads/master | 2023-04-21T07:30:38.551661 | 2021-04-22T19:53:13 | 2021-04-22T19:53:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,360 | py | from CommandBase import *
import json
from uuid import uuid4
from os import path
from sRDI import ShellcodeRDI
from MythicFileRPC import *
class AssemblyInjectArguments(TaskArguments):
def __init__(self, command_line):
super().__init__(command_line)
self.args = {
"pid": CommandParameter(name="PID", type=ParameterType.Number, description="Process ID to inject into."),
"arch": CommandParameter(name="Process Architecture", type=ParameterType.String, choices=["x86", "x64"], description="Architecture of the remote process."),
"assembly_name": CommandParameter(name="Assembly Name", type=ParameterType.String, description="Name of the assembly to execute."),
"assembly_arguments": CommandParameter(name="Assembly Arguments", type=ParameterType.String, description="Arguments to pass to the assembly."),
}
async def parse_arguments(self):
if self.command_line == 0:
raise self.invalidNumberArgs
parts = self.command_line.split(" ", maxsplit=3)
if len(parts) < 3:
raise Exception("Invalid number of arguments.\n\tUsage: {}".format(AssemblyInjectCommand.help_cmd))
pid = parts[0]
arch = parts[1]
assembly_name = parts[2]
assembly_args = ""
valid_arch = ["x86", "x64"]
if len(parts) == 4:
assembly_args = parts[3]
if arch not in valid_arch:
arches = ", ".join(valid_arch)
raise Exception(f"Invalid arch of \"{arch}\" specified. Must be one of {arches}")
self.args["pid"].value = pid
self.args["arch"].value = arch
self.args["assembly_name"].value = assembly_name
self.args["assembly_arguments"].value = assembly_args
pass
class AssemblyInjectCommand(CommandBase):
cmd = "assembly_inject"
needs_admin = False
help_cmd = "assembly_inject [pid] [x64|x86] [assembly] [args]"
description = "Inject the unmanaged assembly loader into a remote process. The loader will then execute the .NET binary in the context of the injected process."
version = 1
is_exit = False
is_file_browse = False
is_process_list = False
is_download_file = False
is_upload_file = False
is_remove_file = False
author = "@djhohnstein"
argument_class = AssemblyInjectArguments
browser_script = BrowserScript(script_name="unmanaged_injection", author="@djhohnstein")
attackmapping = []
async def create_tasking(self, task: MythicTask) -> MythicTask:
arch = task.args.get_arg("arch")
pipe_name = str(uuid4())
dllFile = path.join(self.agent_code_path, f"AssemblyLoader_{arch}.dll")
dllBytes = open(dllFile, 'rb').read()
converted_dll = ShellcodeRDI.ConvertToShellcode(dllBytes, ShellcodeRDI.HashFunctionName("InitializeNamedPipeServer"), pipe_name.encode(), 0)
task.args.add_arg("pipe_name", pipe_name)
resp = await MythicFileRPC(task).register_file(converted_dll)
if resp.status == MythicStatus.Success:
task.args.add_arg("loader_stub_id", resp.agent_file_id)
else:
raise Exception(f"Failed to host sRDI loader stub: {resp.error_message}")
task.args.remove_arg("arch")
return task
async def process_response(self, response: AgentResponse):
pass | [
"djhohnstein@gmail.com"
] | djhohnstein@gmail.com |
d22335bad1dfe797d48ffc83e724c870a8b20d86 | 60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24 | /IronPythonStubs/release/stubs.min/Rhino/Geometry/__init___parts/ExtrudeCornerType.py | 3aef1744b150326694e54571f7c4f9a737d59b82 | [
"MIT"
] | permissive | shnlmn/Rhino-Grasshopper-Scripts | a9411098c5d1bbc55feb782def565d535b27b709 | 0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823 | refs/heads/master | 2020-04-10T18:59:43.518140 | 2020-04-08T02:49:07 | 2020-04-08T02:49:07 | 161,219,695 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | class ExtrudeCornerType(Enum,IComparable,IFormattable,IConvertible):
"""
Corner types used for creating a tapered extrusion
enum ExtrudeCornerType,values: Chamfer (4),None (0),Round (2),Sharp (1),Smooth (3)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Chamfer=None
None=None
Round=None
Sharp=None
Smooth=None
value__=None
| [
"magnetscoil@gmail.com"
] | magnetscoil@gmail.com |
478d5ffdfd3fc7379408c66075233d070c3d51e0 | 1f41b828fb652795482cdeaac1a877e2f19c252a | /maya_tools_backup/3dGroupTools/python/modeling/subTagging/view.py | 734ea99a9c5495e60cd1f7dc1c0d6773d847bf85 | [] | no_license | jonntd/mayadev-1 | e315efe582ea433dcf18d7f1e900920f5590b293 | f76aeecb592df766d05a4e10fa2c2496f0310ca4 | refs/heads/master | 2021-05-02T07:16:17.941007 | 2018-02-05T03:55:12 | 2018-02-05T03:55:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,081 | py | #encoding=utf-8
import maya.cmds as cmds
class SubdivisionTagging():
def __init__( self ):
self.windowName = 'SubdivisionTaggingWindow'
self.tagAttrName = 'smoothOn_mark'
def getSelectedMesh( self ):
meshs = []
selected = cmds.ls( sl = True )
for select in selected:
mesh = cmds.listRelatives( select, s = True )[0]
meshs.append( mesh )
return meshs
def tagOn( self, *args ):
meshs = self.getSelectedMesh()
for mesh in meshs:
attrExist = cmds.attributeQuery( self.tagAttrName, n = mesh, ex = True )
if not attrExist:
cmds.addAttr( mesh, ln = self.tagAttrName, at = 'byte', k = True )
cmds.setAttr( (mesh + '.'+self.tagAttrName), 0 )
def tagOff( self, *args ):
meshs = self.getSelectedMesh()
for mesh in meshs:
attrExist = cmds.attributeQuery( self.tagAttrName, n = mesh, ex = True )
if attrExist:
cmds.deleteAttr( mesh, at = self.tagAttrName )
# ????????attr
attrExist = cmds.attributeQuery( 'objectName', n = mesh, ex = True )
if attrExist:
cmds.deleteAttr( mesh, at = 'objectName' )
def tagOnSelect( self, *args ):
select = []
meshs = cmds.ls( typ = 'mesh' )
for mesh in meshs:
attrExist = cmds.attributeQuery( self.tagAttrName, n = mesh, ex = True )
if attrExist:
if cmds.getAttr( mesh + '.'+self.tagAttrName ) == 0:
select.append( mesh )
if select:
cmds.select( select, r = True )
def tagClear( self, *args ):
meshs = cmds.ls( typ = 'mesh' )
for mesh in meshs:
attrExist = cmds.attributeQuery( self.tagAttrName, n = mesh, ex = True )
if attrExist:
cmds.deleteAttr( mesh, at = self.tagAttrName )
# ????????attr
attrExist = cmds.attributeQuery( 'objectName', n = mesh, ex = True )
if attrExist:
cmds.deleteAttr( mesh, at = 'objectName' )
def gui( self ):
if cmds.window( self.windowName, q = True, ex = True ):
cmds.deleteUI( self.windowName )
cmds.window( self.windowName, t = 'Smooth Tagging' )
cmds.columnLayout()
cmds.separator( st = 'none', h = 2 )
cmds.rowLayout( nc = 2, cw2 = (170, 150), cl2 = ('center', 'center') )
cmds.button( l = 'Tag On Selected', c = self.tagOn , w = 150, h = 30 )
cmds.button( l = 'Tag Off Selected', c = self.tagOff, w = 150, h = 30 )
cmds.setParent( '..' )
cmds.separator( w = 330, h = 10 )
cmds.button( l = 'Select Tag On', c = self.tagOnSelect, w = 320, h = 30 )
cmds.separator( w = 330, h = 10 )
cmds.button( l = 'Clear Tag All', w = 320, h = 30, c = self.tagClear )
cmds.showWindow()
#cmds.window( self.windowName, e = True, wh = (330, 100) )
def showUI():
tag = SubdivisionTagging()
tag.gui()
| [
"kimsung9k@naver.com"
] | kimsung9k@naver.com |
9041bdac32ce8409d2303facd7d0f9ea74667b15 | 3d8e9d2ba283ebb6ed9442439539e2cd8112ceb2 | /snippets/urls.py | cc3c173d1ee7e331f6fb97e127ca5981708eaa68 | [] | no_license | kamal0072/Django-based-Api2 | 15a78faedd754a04f07ffc5dc94dff9942180784 | ce52bee27795e1d6df6bb456df4479b8994f8faf | refs/heads/master | 2023-06-02T01:59:11.918978 | 2021-06-14T18:09:20 | 2021-06-14T18:09:20 | 376,915,073 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,069 | py | from django.urls import path
from . import views
from rest_framework.urlpatterns import format_suffix_patterns
from snippets.views import SnippetViewSet, UserViewSet, api_root
from rest_framework import renderers
snippet_list = SnippetViewSet.as_view({
'get': 'list',
'post': 'create'
})
snippet_detail = SnippetViewSet.as_view({
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
})
snippet_highlight = SnippetViewSet.as_view({
'get': 'highlight'
}, renderer_classes=[renderers.StaticHTMLRenderer])
user_list = UserViewSet.as_view({
'get': 'list'
})
user_detail = UserViewSet.as_view({
'get': 'retrieve'
})
urlpatterns = format_suffix_patterns([
path('', api_root),
path('snippets/', snippet_list, name='snippet-list'),
path('snippets/<int:pk>/', snippet_detail, name='snippet-detail'),
path('snippets/<int:pk>/highlight/', snippet_highlight, name='snippet-highlight'),
path('users/', user_list, name='user-list'),
path('users/<int:pk>/', user_detail, name='user-detail')
]) | [
"hasan.kamaal0072@gmail.com"
] | hasan.kamaal0072@gmail.com |
82c4f1ac4db277c46ae34d0b2290b084d066788a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03634/s202269731.py | 9d6796240df4ea7fdde9a0928d354736efbfd285 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | import heapq
n = int(input())
g = [[] for _ in range(n)]
for i in range(n-1):
a, b, c = map(int, input().split())
g[a-1].append((b-1, c))
g[b-1].append((a-1, c))
q, k = map(int, input().split())
dis = [float("inf")] * n
dis[k-1] = 0
pq = [(0, k-1)]
while pq:
d, node = heapq.heappop(pq)
if d > dis[node]: continue
for nxt, cost in g[node]:
if d + cost < dis[nxt]:
dis[nxt] = d + cost
heapq.heappush(pq, (dis[nxt], nxt))
ans = []
for i in range(q):
x, y = map(int, input().split())
ans.append(dis[x-1]+dis[y-1])
print("\n".join(map(str, ans)))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c7722e7398c1e9e47e50cdab5aa3bb78a7f93fe9 | 13377cb2618d2f5c64bc224ccaf4cbf898d5a136 | /core/digitDetect.py | 4bcc43c0ad045c03d5a7181aaadb2c802d3fbba7 | [] | no_license | GachaSalt/Arknights-Drop-Recognition | 1af8607392e34d188e40efd2d9201f6389dc108b | 44440d9e44f3beb07993e7d73cef685ddf3e3229 | refs/heads/master | 2022-07-09T11:19:56.739770 | 2019-12-28T05:25:47 | 2019-12-28T05:25:47 | 205,408,184 | 19 | 2 | null | 2022-06-21T22:37:40 | 2019-08-30T15:24:01 | Jupyter Notebook | UTF-8 | Python | false | false | 2,568 | py | import cv2
import numpy as np
from .DATA import expDigitSample, lvDigitSample, stageDigitSample, itemDigitSample
from .DATA import sample_size
# from .fileHandle import *
from .ssim import Thumbnail
# expDigitSample = load_data(r'.\\DATA\\expDigitSample.dat')
# lvDigitSample = load_data(r'.\\DATA\\lvDigitSample.dat')
# stageDigitSample = load_data(r'.\\DATA\\stageDigitSample.dat')
# itemDigitSample = load_data(r'.\\DATA\\itemDigitSample.dat')
# def __reset_sample(sample, path):
# for key in sample:
# sample[key].reset()
# save_data(sample, path)
#
# __reset_sample(expDigitSample, r'.\\DATA\\expDigitSample.dat')
# __reset_sample(lvDigitSample, r'.\\DATA\\lvDigitSample.dat')
# __reset_sample(stageDigitSample, r'.\\DATA\\stageDigitSample.dat')
# __reset_sample(itemDigitSample, r'.\\DATA\\itemDigitSample.dat')
def __initial_sample(sample):
for key in sample:
sample[key].initial()
__initial_sample(expDigitSample)
__initial_sample(lvDigitSample)
__initial_sample(stageDigitSample)
__initial_sample(itemDigitSample)
# print('Digits sample initialed.')
digit_list = [str(i) for i in range(10)]
def __which_digit(img, sample):
temp = [img - sample[i] for i in digit_list]
result = np.argmax(temp)
confidence = temp[result]
return digit_list[result], confidence
def which_digit(raw, fast=False, t=0.8):
if type(raw) is not Thumbnail:
if raw.shape[0] != sample_size or raw.shape[1] != sample_size:
img = Thumbnail(cv2.resize(raw, (sample_size, sample_size)))
else:
img = Thumbnail(raw)
else:
img = raw
a1, s1 = __which_digit(img, expDigitSample)
if fast and s1 > t:
return a1, s1
a2, s2 = __which_digit(img, lvDigitSample)
if fast and s2 > t:
return a2, s2
a3, s3 = __which_digit(img, stageDigitSample)
if fast and s3 > t:
return digit_list[a3], s3
a4, s4 = __which_digit(img, itemDigitSample)
a = [a1, a2, a3, a4]
s = [s1, s2, s3, s4]
result = np.argmax(s)
return a[result], s[result]
stage_digit_list = list(stageDigitSample.keys())
def which_digit_stage(raw):
if type(raw) is not Thumbnail:
if raw.shape[0] != sample_size or raw.shape[1] != sample_size:
img = Thumbnail(cv2.resize(raw, (sample_size, sample_size)))
else:
img = Thumbnail(raw)
else:
img = raw
temp1 = [img - stageDigitSample[i] for i in stage_digit_list]
a1 = np.argmax(temp1)
s1 = temp1[a1]
return stage_digit_list[a1], s1
| [
"email@example.com"
] | email@example.com |
faeb371583a241c0902fb9f98104d0e4da9e36e0 | 519aa4942b6eb6663811dd2a050f498c8d3e0f95 | /Python 2.X/ZERO/CGI/Programs/Calendar Maker/htbin/z_html.py | cd8acd88569020bdf489f1cea20d481a11f1f856 | [] | no_license | jacobbridges/my-chaos | 2b5aab5dcac703b268f03efb07fc54e9d4984f29 | 45837fc39f99b5f7f69919ed2f6732e6b7bec936 | refs/heads/master | 2020-05-20T03:21:32.747460 | 2016-08-13T02:12:25 | 2016-08-13T02:12:25 | 29,456,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,205 | py | '''Support module for CGI applications.
This modules provides access to the HTML_Table and HTML_Month
classes which provide abstractions to the HTML code involved.'''
__version__ = 1.1
################################################################################
import calendar
import z_matrix
class HTML_Table:
'HTML_Table(rows, columns, indent_level, indent_style) -> new HTML_Table'
def __init__(self, rows, columns, indent_level, indent_style):
'x.__init__(...) initializes x'
self.__matrix = z_matrix.Matrix(rows, columns, '')
self.__special = z_matrix.Matrix(rows, columns, False)
self.__indent_level = indent_level
self.__indent_style = indent_style
self.__table_attributes = ''
self.__row_attributes = ''
self.__cell_attributes = ''
def mutate(self, row, column, text):
'Mutates a cell in the HTML table.'
assert type(text) is str
self.__matrix[row][column] = text
return self
def access(self, row, column):
'Accesses a cell in the HTML table.'
return self.__matrix[row][column]
def special(self, row, column, special):
self.__special[row][column] = special
def set_table(self, **attributes):
'Sets the attributes for the table.'
self.__table_attributes = self.__parse(attributes)
return self
def set_row(self, **attributes):
'Sets the attributes for each row.'
self.__row_attributes = self.__parse(attributes)
return self
def set_cell(self, **attributes):
'Sets the attributes for each cell.'
self.__cell_attributes = self.__parse(attributes)
return self
def __parse(self, attributes):
'Parses the attributes into a string.'
return ''.join([' %s="%s"' % (key, attributes[key]) for key in sorted(attributes)])
def html(self):
'Returns the HTML code for the current table.'
html = self.__indent_style * self.__indent_level + '<table' + self.__table_attributes + '>\n'
for row, s_row in zip(self.__matrix, self.__special):
html += self.__indent_style * (self.__indent_level + 1) + '<tr' + self.__row_attributes + '>\n'
for cell, special in zip(row, s_row):
html += self.__indent_style * (self.__indent_level + 2) + '<td' + self.__cell_attributes + '>\n'
if special:
html += cell + '\n'
else:
html += ''.join([self.__indent_style * (self.__indent_level + 3) + line + '\n' for line in cell.splitlines()])
html += self.__indent_style * (self.__indent_level + 2) + '</td>\n'
html += self.__indent_style * (self.__indent_level + 1) + '</tr>\n'
return html + self.__indent_style * self.__indent_level + '</table>'
class HTML_Month:
'HTML_Month(month, year, indent_level, indent_style) -> new HTML_Month'
def __init__(self, month, year, indent_level, indent_style):
'x.__init__(...) initializes x'
calendar.setfirstweekday(calendar.SUNDAY)
matrix = calendar.monthcalendar(year, month)
self.__table = HTML_Table(len(matrix) + 1, 7, indent_level, indent_style)
for column, text in enumerate(calendar.day_name[-1:] + calendar.day_name[:-1]):
self.__table.mutate(0, column, '<b>%s</b>' % text)
for row, week in enumerate(matrix):
for column, day in enumerate(week):
if day:
self.__table.mutate(row + 1, column, '<b>%02d</b>\n<hr>\n' % day)
self.__weekday, self.__alldays = calendar.monthrange(year, month)
self.__weekday = ((self.__weekday + 1) % 7) + 6
def mutate(self, day, text):
'Mutates a day in the HTML month.'
row, column = self.__row_column(day)
self.__table.mutate(row, column, '<b>%02d</b>\n<hr>\n%s' % (day, text))
return self
def access(self, day):
'Accesses a day in the HTML month.'
row, column = self.__row_column(day)
return self.__table.access(row, column)[15:]
def special(self, day, special):
row, column = self.__row_column(day)
self.__table.special(row, column, special)
def __row_column(self, day):
'Calculates the row and column of day.'
assert 1 <= day <= self.__alldays
index = day + self.__weekday
return index / 7, index % 7
def set_month(self, **attributes):
'Set the attributes for the month.'
self.__table.set_table(**attributes)
return self
def set_week(self, **attributes):
'Set the attributes for each week.'
self.__table.set_row(**attributes)
return self
def set_day(self, **attributes):
'Set the attributes for each day.'
self.__table.set_cell(**attributes)
return self
def html(self):
'Returns the HTML code for the current month.'
return self.__table.html()
################################################################################
if __name__ == '__main__':
import sys
print 'Content-Type: text/plain'
print
print file(sys.argv[0]).read()
| [
"him@jacobandkate143.com"
] | him@jacobandkate143.com |
a834a8d9c23a2ae067fe536f2ad84db042456538 | 8737c8e25279bd1df95e939244a6ee1bc6be33f7 | /webapp/python/isucon.py | a5a6c32fde47939cc981801ef646f79b7dcacce2 | [] | no_license | methane/isucon | e4ef846346a8ba718c676340936461da41616d87 | b9fa8b74805a479754e7ade65123a7a6c72a7903 | refs/heads/master | 2021-01-17T23:59:24.902081 | 2012-10-12T23:14:04 | 2012-10-12T23:14:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,410 | py | import os
import json
import pymysql
from pymysql.cursors import DictCursor
from threading import local
config = json.load(open('../config/hosts.json'))
ctx = local()
class ConnectionPool(local):
@property
def con(self):
if not hasattr(self, '_con'):
self._con = self._get_con()
return self._con
def _get_con(self):
#host = str(config['servers']['database'][0])
host = 'localhost'
return pymysql.connect(
host=host,
user='isuconapp',
passwd='isunageruna',
db='isucon',
charset='utf8',
)
db = ConnectionPool()
print config
import flask
app = flask.Flask(__name__)
import re
import jinja2
from jinja2 import evalcontextfilter, Markup, escape, Environment
jinja_env = Environment(loader=jinja2.FileSystemLoader('views'))
def render(template, **params):
return jinja_env.get_template(template).render(**params)
def fetch_recent_commented_articles():
cur = db.con.cursor(DictCursor)
cur.execute(
'SELECT a.id, a.title FROM comment c INNER JOIN article a ON c.article = a.id '
'GROUP BY a.id ORDER BY MAX(c.created_at) DESC LIMIT 10')
return cur.fetchall()
def fetch_articles():
cur = db.con.cursor(DictCursor)
cur.execute('SELECT id,title,body,created_at FROM article ORDER BY id DESC LIMIT 10')
return cur.fetchall()
def fetch_article(id):
cur = db.con.cursor(DictCursor)
cur.execute('SELECT id,title,body,created_at FROM article WHERE id=%s', (id,))
return cur.fetchone()
@app.route('/')
def index():
return render("index.jinja",
articles=fetch_articles(),
recent_commented_articles=fetch_recent_commented_articles(),
)
@app.route('/article/<int:articleid>')
def article(articleid):
return render('article.jinja',
article=fetch_article(articleid),
recent_commented_articles=fetch_recent_commented_articles(),
)
@app.route('/post', methods=('GET', 'POST'))
def post():
if flask.request.method == 'GET':
return render("post.jinja", recent_commented_articles=fetch_recent_commented_articles())
cur = db.con.cursor()
cur.execute("INSERT INTO article SET title=%s, body=%s", (flask.request.form['title'], flask.request.form['body']))
db.con.commit()
return flask.redirect('/')
@app.route('/comment/<int:articleid>', methods=['POST'])
def comment(articleid):
cur = db.con.cursor()
form = flask.request.form
cur.execute("INSERT INTO comment SET article=%s, name=%s, body=%s",
(articleid, form['name'], form['body'])
)
db.con.commit()
return flask.redirect('/')
@app.route('/<path:path>')
def css(path):
if '..' in path or path.startswith('/'):
flask.abort(404)
with open('../staticfiles/' + path) as f:
d = f.read()
if path.endswith('.css'):
content_type = 'text/css'
elif path.endswith('.js'):
content_type = 'application/javascript'
elif path.endswith(('.jpg', '.jpeg')):
content_type = 'image/jpeg'
else:
content_type = 'text/plain'
def res(e, s):
s("200 OK", [('Content-Length', str(len(d))), ('Content-Type', content_type)])
return [d]
return res
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| [
"inada-n@klab.com"
] | inada-n@klab.com |
19384f25bee5b7f981caf21727bfaba836cc9b6a | 57e35fea8d2059934ea927386da0a85306262344 | /@Groome_Danila_bot/app/handlers/errors.py | 952cfd142a89f899a183239030bc2fbfea30efdb | [] | no_license | Cicadadenis/ZZZ | b46de063566d2ffefc5d4aa270b0108eff40af2f | b972dbcdff78fd23b605816b10c13b9c5cfa2714 | refs/heads/main | 2023-07-29T14:35:11.881094 | 2021-09-13T16:40:10 | 2021-09-13T16:40:10 | 387,025,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | import logging
from aiogram import Dispatcher
from app.bot import config
async def errors_handler(update, exception):
"""
Exceptions handler. Catches all
exceptions within task factory tasks.
"""
logging.exception(exception)
logging.debug(update)
return True
def setup(dp: Dispatcher):
dp.register_errors_handler(errors_handler)
| [
"noreply@github.com"
] | Cicadadenis.noreply@github.com |
981e0d20bb6af93143a8b3eab6c7c73a56793a71 | 6fb81184a885e0fc75df8b73f378d8f5d8738a2e | /tests/test_script.py | 109c62ee78d11977b81914176b2e39369782c375 | [
"MIT"
] | permissive | jkehler/Flask-RQ2 | dd41e7b5b38a205a9aef57eed610b92508a16abe | abd5981c763c08f11be8b940b539f4bdf36571c8 | refs/heads/master | 2021-07-10T22:21:07.507708 | 2017-10-11T04:02:18 | 2017-10-11T04:02:18 | 106,503,322 | 0 | 0 | null | 2017-10-11T03:55:06 | 2017-10-11T03:55:05 | null | UTF-8 | Python | false | false | 3,493 | py | import logging
import sys
from flask import current_app
import pytest
from flask_rq2 import app as flask_rq2_app
from flask_rq2 import script as flask_rq2_script
from flask_rq2 import RQ
from flask_rq2.script import RQCommand, RQManager, _commands
def test_with_rq_commands(rq):
manager = RQManager(rq)
assert set(_commands.keys()) == set(manager._commands.keys())
for command in manager._commands.values():
assert command.rq == rq
def test_extra_command(rq, monkeypatch):
monkeypatch.setattr(flask_rq2_script, '_commands', {})
@RQCommand.register()
class TestRQCommand(RQCommand):
name = 'testrq'
def run(self):
assert self.app
assert 'testrq' in flask_rq2_script._commands
def test_decorator_condition(monkeypatch):
monkeypatch.setattr(flask_rq2_script, '_commands', {})
@RQCommand.register(False)
class FalseCommand(RQCommand):
def run(self):
assert self.app
assert 'false' not in flask_rq2_script._commands.keys()
def test_app_context(app):
rq = RQ(app)
class ContextCommand(RQCommand):
def run(self):
assert current_app == app
return current_app.name
command = ContextCommand(rq)
assert command.rq == rq
result = command(app)
assert result == app.name
@pytest.mark.parametrize('command,output,uses_logging', [
('empty', '0 jobs removed from', False),
('requeue', 'Nothing to do', False),
('info', '1 queues, 0 jobs total', False),
('worker --burst', 'Listening on', True),
('suspend', 'Suspending workers', False),
('resume', 'Resuming workers', False),
])
def test_commands(command, output, uses_logging, app, caplog, capsys,
monkeypatch, request):
rq = RQ(app)
manager = RQManager(app=app, rq=rq)
monkeypatch.setattr(sys, 'argv', ['manage.py'] + command.split())
try:
manager.run()
except SystemExit as e:
exit_code = e.code
else:
exit_code = None
assert exit_code == 0
if uses_logging:
caplog.setLevel(logging.INFO, logger='rq.worker')
out = caplog.text()
else:
out, err = capsys.readouterr()
assert output in out
def flush():
rq.connection.flushdb()
request.addfinalizer(flush)
def test_scheduler_command_pid(config, app, monkeypatch, tmpdir):
monkeypatch.setattr(flask_rq2_app.Scheduler, 'run',
lambda *args, **kwargs: None)
rq = RQ(app)
manager = RQManager(app=app, rq=rq)
pid = tmpdir.join('rq2_scheduler.pid')
assert not pid.exists()
monkeypatch.setattr(sys, 'argv', ['rq', 'scheduler', '--pid', pid.strpath])
try:
manager.run()
except SystemExit as e:
exit_code = e.code
else:
exit_code = None
assert exit_code == 0
assert pid.read() != ''
def test_scheduler_command_verbose(config, app, monkeypatch):
monkeypatch.setattr(flask_rq2_app.Scheduler, 'run',
lambda *args, **kwargs: None)
rq = RQ(app)
manager = RQManager(app=app, rq=rq)
def setup_loghandlers(level):
assert level == 'DEBUG'
monkeypatch.setattr(flask_rq2_script, 'setup_loghandlers',
setup_loghandlers)
monkeypatch.setattr(sys, 'argv', ['rq', 'scheduler', '--verbose'])
try:
manager.run()
except SystemExit as e:
exit_code = e.code
else:
exit_code = None
assert exit_code == 0
| [
"jannis@leidel.info"
] | jannis@leidel.info |
3a72418624bb2562579564803c390abf99b1303b | 4cc285b0c585241ff4404087e6fbb901195639be | /NeuralNetworkNumbers/venv/Lib/site-packages/sklearn/tests/test_min_dependencies_readme.py | 2f005cb5b06048b84244adf40820491edaeae740 | [] | no_license | strazhg/NeuralNetworksPython | 815542f4ddbb86e918e657f783158f8c078de514 | 15038e44a5a6c342336c119cdd2abdeffd84b5b1 | refs/heads/main | 2023-04-16T18:51:29.602644 | 2021-04-27T14:46:55 | 2021-04-27T14:46:55 | 361,944,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:28f33f79edf396dab0b7995ee7d7b479b4297dbd933cc11b96bf2fab27b7e544
size 1477
| [
"golubstrazh@gmail.com"
] | golubstrazh@gmail.com |
26682b4c75d58703977d68452dd3d74556eef28a | 91ca710a3087207116f9c7e7b73802f73c1e7a97 | /Python_Challenge_2.py | 678daf6266070df73bcf585c63cb9ad36d7e472c | [] | no_license | LiquidHologram/Problem_2 | 577b34eb5513e3d96fa3f60d77195602f3f6f2fb | 1ecee0fed83e9ac0dbb7e42f42c0341099665c98 | refs/heads/master | 2021-01-01T06:38:43.584986 | 2013-03-11T03:38:54 | 2013-03-11T03:38:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | from string import maketrans
intab = "abcdefghijklmnopqrstuvwxyz"
outtab = "cdefghijklmnopqrstuvwxyzab"
transtab = maketrans(intab, outtab)
str = "g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj.";
print str.translate(transtab); | [
"none"
] | none |
b00a450b410934e57d431a2cbb42481388a6a3eb | 0be81f5104d9b06348e2fb481b60af57dcb67cef | /Source/KqlPython/setup.py | 8f6b1df4ba1ac72c49a526a546f29e78d5a4da1e | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | microsoft/KqlTools | 46373181e033ac7ee31846333c0f6dd2d6657140 | 890ab51c8c4032a94762a842c6f6e3214e4a5912 | refs/heads/master | 2023-06-30T03:42:44.939969 | 2023-03-11T01:28:08 | 2023-03-11T01:28:08 | 274,771,839 | 67 | 19 | NOASSERTION | 2022-10-21T15:29:30 | 2020-06-24T21:20:19 | C# | UTF-8 | Python | false | false | 1,440 | py | import os
import glob
from setuptools import setup, find_packages
import kqlpythondir
with open(os.path.join(kqlpythondir.directory, 'VERSION.txt'), encoding='utf-8') as f:
version = f.read()
with open(os.path.join(kqlpythondir.directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'realtimekql',
version=version,
author='CDOC Engineering Open Source',
author_email='CDOCEngOpenSourceAdm@microsoft.com',
url='https://github.com/microsoft/kqltools',
description='A module for exploring real-time streams of events',
long_description=long_description,
long_description_content_type='text/markdown',
install_requires=['pythonnet', 'pandas', 'azure.kusto.data', 'azure.kusto.ingest'],
packages=['.'],
data_files=[('lib', glob.glob(os.path.join('lib', '*')))],
include_package_data=True,
classifiers=[
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: Microsoft :: Windows',
'Programming Language :: C#',
'Programming Language :: Python :: 3',
'Topic :: Security',
'Topic :: System :: Logging',
'Topic :: System :: Monitoring',
'Topic :: System :: Networking',
'Topic :: System :: Networking :: Monitoring',
'Topic :: System :: Operating System',
'Topic :: System :: Operating System Kernels',
],
)
| [
"noreply@github.com"
] | microsoft.noreply@github.com |
a1f55a92288f94eb9bfbc27cfeba3f0411236c7d | 6f15f94ea6865d228856dadd57e4fafabac89db1 | /setup.py | 71d41bdba51b768a15e5278b5cbc3da063754079 | [] | no_license | DanielLSM/canton | 00afaefd45ab2681d1d65b0c0df701a07d2951c0 | a2734c0c2f24e2393513ea9b78d25ccb066b836e | refs/heads/master | 2021-01-19T12:51:40.344413 | 2017-05-13T09:15:15 | 2017-05-13T09:15:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,890 | py | # Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='canton',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.19',
description='TensorFlow Wrapper',
long_description=long_description,
# The project's main homepage.
url='https://github.com/ctmakro/canton',
# Author details
author='Qin Yongliang',
author_email='ctmakro@gmail.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# 'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
# 'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.6',
# 'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='deep learning framework',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['tensorflow','numpy'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
# extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
)
| [
"ctmakro@gmail.com"
] | ctmakro@gmail.com |
e69afa84efd82f83eafbca1c0a19fdd81e046e33 | f1717664d7a54e3210b1b5b424bb5ab2576e8428 | /newspaper/users/views.py | bb3aebf9fcef810b436e62bf13e1ce8d8c2f553c | [] | no_license | mehedi432/newspaper | 0f906f53e9d9f0178add4e4b0a72a5f79523cda6 | d43efa2179f3ab88810947aee2b92fc769e59707 | refs/heads/main | 2023-08-27T20:01:34.226169 | 2021-11-01T12:04:29 | 2021-11-01T12:04:29 | 423,448,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | from django.urls import reverse_lazy
from .forms import CustomUserCreationForm
from django.views.generic import CreateView
class SignupView(CreateView):
form_class = CustomUserCreationForm
success_url = reverse_lazy('login')
template_name = 'signup.html'
| [
"aamehedi93@gmail.com"
] | aamehedi93@gmail.com |
90fc58ea4ab4fa74a0e67517ed7de611dbee07e7 | c89f5856fe74cff49a9d96dde9ed0117109e3845 | /Web-scraping/Web.py | f48ac0612b551e8b99e87098b09aa0193bf438f3 | [] | no_license | sinoclover/python | b2b3f435d15840ec16a34c62d50308bdfb9d6c3e | 02f5347bc8219f1df52486077adf0017fe6d5211 | refs/heads/master | 2020-08-27T22:14:20.672846 | 2019-12-01T13:27:42 | 2019-12-01T13:27:42 | 158,791,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,017 | py | # # 采集整个网站
# # 为了避免一个页面被采集多次,需要对链接进行去重
# from urllib.request import urlopen
# from urllib.error import HTTPError, URLError
# from bs4 import BeautifulSoup
# import re
#
# pages = set()
#
# def getLinks(pageUrl):
# global pages
# try:
# html = urlopen('http://baike.baidu.com' + pageUrl)
# except (HTTPError, URLError) as e:
# return None
# try:
# bsObj = BeautifulSoup(html, 'html.parser')
# except AttributeError as e:
# return None
#
# for link in bsObj.findAll('a', target='_blank', href=re.compile('^(/item/)')):
# if 'href' in link.attrs:
# if link.attrs['href'] not in pages:
# newPage = link.attrs['href']
# print(newPage)
# pages.add(newPage)
# getLinks(newPage)
#
# getLinks('') # 即从主页进行爬取链接
#
# # 默认的递归限制是1000次
# 完善收集整个百度百科网站数据
from urllib.request import urlopen
from urllib.error import HTTPError,URLError
from bs4 import BeautifulSoup
import re
pages = set()
def getLinks(pageUrl):
global pages
try:
html = urlopen('http://baike.baidu.com' + pageUrl)
except (HTTPError, URLError) as e:
return None
try:
bsObj = BeautifulSoup(html, 'html.parser')
except AttributeError as e:
return None
try:
print(bsObj.h1.get_text())
print(bsObj.find('div', {'class': 'lemma-summary'}).find('div', {'class': 'para'}).get_text())
except AttributeError as e:
print('页面缺失一些属性!不过不用担心!')
for link in bsObj.findAll('a', target='_blank', href=re.compile('^(/item/)((?!=).)*$')):
if 'href' in link.attrs:
if link.attrs['href'] not in pages:
newPage = link.attrs['href']
print('----------------\n' + newPage)
pages.add(newPage)
getLinks(newPage)
getLinks('') | [
"zzxy123007@163.com"
] | zzxy123007@163.com |
36f0adaaa9888342af92a0b5b402094061685185 | fef98678398c84d76e4e3abed161af6f1b66097f | /partnersbankonline/spiders/partnersbankonline.py | 83da342ca796fb3cdb0dec17227b84f72b3b6c7b | [] | no_license | daniel-kanchev/partnersbankonline | 4ea52618e0d8eabdb54accd60b32a530a9f44464 | 21694df9013b9a3a84abdc2d3ea425f5f981253b | refs/heads/main | 2023-04-01T10:11:58.303585 | 2021-04-08T10:34:47 | 2021-04-08T10:34:47 | 355,863,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,492 | py | import scrapy
from scrapy.loader import ItemLoader
from itemloaders.processors import TakeFirst
from datetime import datetime
from partnersbankonline.items import Article
class partnersbankonlineSpider(scrapy.Spider):
name = 'partnersbankonline'
start_urls = ['https://www.partnersbankonline.com/blog/']
def parse(self, response):
links = response.xpath('//a[@class="post__title"]/@href').getall()
yield from response.follow_all(links, self.parse_article)
next_page = response.xpath('//a[@class="next page-numbers"]/@href').get()
if next_page:
yield response.follow(next_page, self.parse)
def parse_article(self, response):
if 'pdf' in response.url:
return
item = ItemLoader(Article())
item.default_output_processor = TakeFirst()
title = response.xpath('//h1/text()').get()
if title:
title = title.strip()
date = response.xpath('//h3[@class="interior-subtitle"]/text()').get()
if date:
date = " ".join(date.split()[2:5])
content = response.xpath('//div[@class="site-content"]//text()').getall()
content = [text for text in content if text.strip() and '{' not in text]
content = "\n".join(content[2:]).strip()
item.add_value('title', title)
item.add_value('date', date)
item.add_value('link', response.url)
item.add_value('content', content)
return item.load_item()
| [
"daniel.kanchev@adata.pro"
] | daniel.kanchev@adata.pro |
5931c0fe0c75584045f243763f8151c5095b1e6c | 61a5e9adfcd292dcf06fceda993c0fbeb0b469cc | /alg/mianshipython/1_1_3.py | f143171d89600d6592f720217e8d275793b70ed6 | [] | no_license | godspysonyou/everything | f76e0ade065601990ff5b449f8c5955ba3c07374 | 03c7ec9d9e5a802ffbc854a9f929ca770ae7add8 | refs/heads/master | 2020-03-20T23:34:50.812645 | 2019-04-21T13:16:58 | 2019-04-21T13:16:58 | 137,853,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py | # 如何实现链表的逆序
# 方法三:插入法
class LNode:
def __init__(self, x):
self.data = x
self.next = None
def Reverse(head):
# 判断链表是否为空
if not head or not head.next:
return
cur = head.next.next
head.next.next = None
while cur:
next = cur.next
cur.next = head.next
head.next = cur
cur = next
if __name__ == "__main__":
i = 1
head = LNode(None)
head.next = None
tmp = None
cur = head
# 构造单链表
while i < 8:
tmp = LNode(i)
#tmp.data = i
tmp.next = None
cur.next = tmp
cur = tmp
i += 1
print("逆序前:")
cur = head.next
while cur:
print(cur.data)
cur = cur.next
print("\n逆序后:")
Reverse(head)
cur = head.next
while cur:
print(cur.data)
cur = cur.next
'''
引申:
1)不带头结点,用方法2中的RecursiveReverse,或者用插入法
2)从尾到头输出链表
1.就地逆序+顺序输出 这样改变了原链表的结构
2.栈
3.递归输出
def ReversePrint(firstNode):
if not firstNode:
return
ReversePrint(firstNode.next)
print(firstNode.data)
''' | [
"1344094556@qq.com"
] | 1344094556@qq.com |
f05e72895295159de85fe856a785e91d44bac7fb | 5a017fc861db92e3a2919f260d54f1301afbb3e5 | /MIDI Remote Scripts/MPK261/__init__.py | 19c1958dda064fcbf5db356f0144b2211d4c4a68 | [] | no_license | kera67/livepy_diff_ten | 8d8d0f3b76048f1fe5d4c0fbc02549dc922c7d5b | 12a0af9e9c57d0721af5036ce23af549df2c95f0 | refs/heads/master | 2023-07-14T18:26:33.591915 | 2020-11-19T07:50:28 | 2020-11-19T07:50:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | from __future__ import absolute_import, print_function, unicode_literals
from .MPK261 import MPK261
from _Framework.Capabilities import controller_id, inport, outport, CONTROLLER_ID_KEY, PORTS_KEY, NOTES_CC, SCRIPT, REMOTE
def get_capabilities():
return {CONTROLLER_ID_KEY: controller_id(vendor_id=2536, product_ids=[37], model_name=u'MPK261'),
PORTS_KEY: [inport(props=[NOTES_CC, SCRIPT, REMOTE]), outport(props=[SCRIPT, REMOTE])]}
def create_instance(c_instance):
return MPK261(c_instance)
| [
"aumhaa@gmail.com"
] | aumhaa@gmail.com |
dd52c71db7f7b4f795663e29ff119f8cea011454 | 538cee184d5ba586b2dfddd58e243febfbde81e6 | /examples/Example - SRM0 Neurons.py | 3d557938e7f4dd19b73dd93ffcc7c5a9ad969c3e | [
"MIT"
] | permissive | bblais/Plasticnet | 05ea7763cc7a188dfb88611cb12dfc4edd015955 | 5fc8a4fc715d4877bfa255feacd37b4c23201777 | refs/heads/master | 2023-07-10T06:15:06.286575 | 2023-06-21T10:38:11 | 2023-06-21T10:38:11 | 34,905,565 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,244 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from splikes import *
# In[13]:
from pylab import figure,legend,plot,linspace
# <img src="./images/epsp1.png">
# Input Rate: $\rho$
#
# Equation for Unsmoothed:
#
# \begin{eqnarray}
# u&\rightarrow& u+a\cdot w_i \mbox{ (input spike on input $i$)}\\
# \frac{du}{dt}&=& -u/\tau
# \end{eqnarray}
#
# Average value:
# \begin{eqnarray}
# <u> = a\cdot w\cdot \tau\cdot \rho
# \end{eqnarray}
#
# Equation for Smoothed:
#
# \begin{eqnarray}
# v&\rightarrow& v+a\cdot w_i \mbox{ (input spike on input $i$)}\\
# \frac{dv}{dt}&=& -v/\tau\\
# \frac{du}{dt}&=& (v-u)/\tau
# \end{eqnarray}
#
# Average value:
# \begin{eqnarray}
# <u> = a\cdot w\cdot \tau\cdot \rho
# \end{eqnarray}
#
# In[4]:
pre=neurons.poisson_pattern([10])
post=neurons.srm0(1)
post.smoothed=True
post.tau=0.01
post2=neurons.srm0(1)
post2.smoothed=False
post2.tau=0.01
post2.name='unsmoothed'
c=connection(pre,post,[1,1])
c2=connection(pre,post2,[1,1])
sim=simulation(.3,dt=0.0001)
sim.monitor(post,['u',],0.001)
sim.monitor(post2,['u',],0.001)
run_sim(sim,[pre,post,post2],[c,c2])
figure(figsize=(10,3))
m=sim.monitors['u']
m.plot()
m=sim.monitors['u [unsmoothed]']
m.plot()
legend(['Smoothed','Unsmoothed'])
# In[6]:
pre=neurons.poisson_pattern([20])
pre.save_spikes_begin=0.0
pre.save_spikes_end=10
post=neurons.srm0(1)
post.smoothed=True
post.tau=0.01
c=connection(pre,post,[1,1])
sim=simulation(2,dt=0.0001)
sim.monitor(post,['u',],0.001)
run_sim(sim,[pre,post],[c])
figure(figsize=(15,5))
m=sim.monitors['u']
m.plot()
for t,n in pre.saved_spikes:
plot([t,t],[0,0.1],'g',linewidth=3)
# In[7]:
pre.saved_spikes
# In[8]:
pre=neurons.poisson_pattern([10])
post=neurons.srm0(1)
post.smoothed=True
post.tau=0.1
post.a=10
post2=neurons.srm0(1)
post2.smoothed=False
post2.tau=0.1
post2.a=10
post2.name='unsmoothed'
c=connection(pre,post,[1,1])
c2=connection(pre,post2,[1,1])
sim=simulation(10,dt=0.0001)
sim.monitor(post,['u',],0.001)
sim.monitor(post2,['u',],0.001)
run_sim(sim,[pre,post,post2],[c,c2])
figure(figsize=(10,5))
m=sim.monitors['u']
m.plot()
m=sim.monitors['u [unsmoothed]']
m.plot()
legend(['Smoothed','Unsmoothed'])
plot([0,11],[10,10],'r--',linewidth=3)
paramtext(0.15,0.7,
r'%d Hz' % (10),
r'$a=%.f$' % (post2.a),
r'$\tau=%.1f$' % (post2.tau),
)
# ### try with isi invgauss input
# In[9]:
ISI=neurons.isi_distributions.invgauss(0,1.0)
pre=neurons.isi_pattern([10],ISI)
pre.time_between_patterns=1*second
pre.save_spikes_begin=0
pre.save_spikes_end=10
post=neurons.srm0(1)
post.smoothed=True
post.tau=0.1
post.a=10
post.save_spikes_begin=0
post.save_spikes_end=10
post2=neurons.srm0(1)
post2.smoothed=False
post2.tau=0.1
post2.a=10
post2.name='unsmoothed'
post2.save_spikes_begin=0
post2.save_spikes_end=10
c=connection(pre,post,[1,1])
c2=connection(pre,post2,[1,1])
sim=simulation(10,dt=0.0001)
sim.monitor(post,['u',],0.001)
sim.monitor(post2,['u',],0.001)
run_sim(sim,[pre,post,post2],[c,c2])
figure(figsize=(10,5))
m=sim.monitors['u']
m.plot()
m=sim.monitors['u [unsmoothed]']
m.plot()
legend(['Smoothed','Unsmoothed'])
plot([0,11],[10,10],'r--',linewidth=3)
paramtext(0.15,0.7,
r'%d Hz' % (10),
r'$a=%.f$' % (post2.a),
r'$\tau=%.1f$' % (post2.tau),
)
figure()
pre.plot_spikes()
figure()
post.plot_spikes()
post2.plot_spikes(1)
# In[10]:
ISI=neurons.isi_distributions.invgauss(0,1.0)
pre=neurons.isi_pattern([10],ISI)
pre.time_between_patterns=1*second
pre.save_spikes_begin=0
pre.save_spikes_end=10
ISI2a=neurons.isi_distributions.invgauss(0,1.0)
ISI2b=neurons.isi_distributions.invgauss(0,1.0)
post=neurons.srm0_isi(1,ISI2a)
post.smoothed=True
post.tau=0.1
post.a=10
post.save_spikes_begin=0
post.save_spikes_end=10
post2=neurons.srm0_isi(1,ISI2b)
post2.smoothed=False
post2.tau=0.1
post2.a=10
post2.name='unsmoothed'
post2.save_spikes_begin=0
post2.save_spikes_end=10
c=connection(pre,post,[1,1])
c2=connection(pre,post2,[1,1])
sim=simulation(10,dt=0.0001)
sim.monitor(post,['u',],0.001)
sim.monitor(post2,['u',],0.001)
run_sim(sim,[pre,post,post2],[c,c2])
figure(figsize=(10,5))
m=sim.monitors['u']
m.plot()
m=sim.monitors['u [unsmoothed]']
m.plot()
legend(['Smoothed','Unsmoothed'])
plot([0,11],[10,10],'r--',linewidth=3)
paramtext(0.15,0.7,
r'%d Hz' % (10),
r'$a=%.f$' % (post2.a),
r'$\tau=%.1f$' % (post2.tau),
)
figure()
pre.plot_spikes()
figure()
post.plot_spikes()
post2.plot_spikes(1)
# In[11]:
c.weights
# <img src="images/input_rate1.png">
# In[15]:
from pylab import mean
# In[16]:
rate_arr=linspace(1,50,100)
#print rate_arr
mean_arr=[]
for rate in rate_arr:
pre=neurons.poisson_pattern([rate])
post=neurons.srm0(1)
post.tau=0.1
post.a=10.0
c=connection(pre,post,[1,1])
sim=simulation(10,dt=0.0001)
sim.monitor(post,['u',],0.001)
run_sim(sim,[pre,post],[c],print_time=False)
u=sim.monitors['u'].array()
mean_arr.append(mean(u))
plot(rate_arr,mean_arr,'o')
xlabel(r'Input Rate ($\rho$)')
ylabel('Mean $u$')
plot(rate_arr,rate_arr*post.a*post.tau,'r--')
paramtext(.2,.7,
r'$a=%s$' % post.a,
r'$\tau=%s$' % post.tau,
r'$w=%s$' % float(c.weights),
)
paramtext(.5,.9,
r'$\langle u \rangle = w\cdot \rho \cdot a \cdot \tau$')
# <img src="images/weight_dependence1.png">
# In[17]:
w_arr=linspace(0.01,2,100)
#print w_arr
mean_arr=[]
rate=10
for w in w_arr:
pre=neurons.poisson_pattern([rate])
post=neurons.srm0(1)
post.tau=0.1
post.a=10.0
c=connection(pre,post,[w,w])
sim=simulation(10,dt=0.0001)
sim.monitor(post,['u',],0.001)
run_sim(sim,[pre,post],[c],print_time=False)
u=sim.monitors['u'].array()
mean_arr.append(mean(u))
plot(w_arr,mean_arr,'o')
xlabel('Connection Strength')
ylabel('Mean $u$')
plot(w_arr,w_arr*rate*post.a*post.tau,'r--')
paramtext(.2,.7,
r'$a=%s$' % post.a,
r'$\tau=%s$' % post.tau,
r'$\rho=%s$' % rate,
)
paramtext(.5,.9,
r'$\langle u \rangle = w\cdot \rho \cdot a \cdot \tau$')
# In[ ]:
| [
"bblais@gmail.com"
] | bblais@gmail.com |
28db36636e2b7c78c9df1ecc53aeaf467dd2e4be | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/union/anyURI-float/Schema+Instance/NISTXML-SV-IV-union-anyURI-float-enumeration-5-2.py | 009d6fd0c6e33d18c17fc88242dbc2ae4a3d9407 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 577 | py | from output.models.nist_data.union.any_uri_float.schema_instance.nistschema_sv_iv_union_any_uri_float_enumeration_5_xsd.nistschema_sv_iv_union_any_uri_float_enumeration_5 import NistschemaSvIvUnionAnyUriFloatEnumeration5
from output.models.nist_data.union.any_uri_float.schema_instance.nistschema_sv_iv_union_any_uri_float_enumeration_5_xsd.nistschema_sv_iv_union_any_uri_float_enumeration_5 import NistschemaSvIvUnionAnyUriFloatEnumeration5Type
obj = NistschemaSvIvUnionAnyUriFloatEnumeration5(
value=NistschemaSvIvUnionAnyUriFloatEnumeration5Type.VALUE_1_7427166_E27
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
d134d5655639e1fb2db6bbe1fb0c5191d61ecc3a | f1364645e43b60c05b746083981e5c7f169702ba | /strategy/phase_out/ml03.py | 094517388e99e28588f6cee44aa862c7867bcf29 | [] | no_license | geniustom/AI_Trading | bf41b946abe1f431a98078878d4b5f28a9a0d5c3 | 8c21d966053d64a224dc6b277ac9a33c576134ea | refs/heads/master | 2020-05-29T18:35:43.400029 | 2020-03-28T21:11:09 | 2020-03-28T21:11:09 | 189,305,491 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | # coding=UTF-8
import lib.indicator as ind; reload(ind);
###############################################################################
def s1(self,PRICE,i,I):
if i==15:ind.GetIndicatorByType(I,"大台贏家")
if I.get("大台純贏家作為")[i-1]<-2 : self.EnterShort(PRICE)
if I.get("大台純贏家作為")[i-1]>2 : self.EnterLong(PRICE)
self.CheckDailyExitAll(I.get("TIME")[i],PRICE)
###############################################################################
import os
STittle=u"[ml03]大台純贏家作為策略"
FName=os.path.split(__file__)[1].split('.')[0]
if __name__ == '__main__':
exec(open(os.path.split(os.path.realpath(__file__))[0]+'\\init.py').read()) | [
"Geniustom@gmail.com"
] | Geniustom@gmail.com |
27722507ab885b075a068f4b3b62c6e5d6e5004f | 73f1075c99338984795f4bd7bd7b9563ecc36d87 | /Queue_Stack/752.Open_the_Lock.py | 6206f9f6c151bfaca1c1aecc94a9b8ec596c8896 | [] | no_license | grg909/LCtrip | 314bd173f87ec98ff13234bdd148c76482db2df7 | 96836da905526b47f0cdee8c0bb4790c4cdd6c79 | refs/heads/master | 2020-09-12T13:38:52.486189 | 2020-08-28T14:22:50 | 2020-08-28T14:22:50 | 222,442,472 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,267 | py | # -*- coding: UTF-8 -*-
# @Date : 2019/12/1
# @Author : WANG JINGE
# @Email : wang.j.au@m.titech.ac.jp
# @Language: python 3.7
"""
"""
from collections import deque
class Solution:
def openLock(self, deadends: List[str], target: str) -> int:
deadends = set(deadends) # in 操作在set中时间复杂度为O(1)
begin = set()
end = set()
if '0000' in deadends:
return -1
if target == '0000':
return 0
# -------------------------------BFS 开始--------------------------------
q = deque([])
q.append(('0000', 0))
while q:
# 取出一个节点
node, step = q.popleft()
step += 1
# 放入周围节点
for i in range(4):
for add in (1, -1):
cur = node[:i] + str(
(int(node[i]) + add) % 10) + node[i + 1:]
if cur == target:
return step
if cur not in deadends:
q.append((cur, step))
deadends.add(cur) # 避免重复搜索
# -------------------------------------------------------------------------
return -1
| [
"grg909@foxmail.com"
] | grg909@foxmail.com |
fac3011e283433b33679740677265c838001650f | 775822912186176ced195cc224659f2b4f5994d3 | /whoville/cloudbreak/models/structured_notification_event.py | 455702372e7915ee73aad70a11df31a854d3b62c | [
"Apache-2.0"
] | permissive | Chaffelson/whoville | 7885d7d0994659dd1d1c02b4bcd87d48b161c5d0 | f71fda629c9fd50d0a482120165ea5abcc754522 | refs/heads/master | 2021-06-04T06:17:29.603774 | 2021-02-11T13:35:28 | 2021-02-11T13:35:28 | 93,987,343 | 31 | 32 | Apache-2.0 | 2019-10-01T04:59:13 | 2017-06-11T06:57:39 | Python | UTF-8 | Python | false | false | 6,958 | py | # coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class StructuredNotificationEvent(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'type': 'str',
'operation': 'OperationDetails',
'notification_details': 'NotificationDetails',
'duration': 'int',
'status': 'str'
}
attribute_map = {
'type': 'type',
'operation': 'operation',
'notification_details': 'notificationDetails',
'duration': 'duration',
'status': 'status'
}
def __init__(self, type=None, operation=None, notification_details=None, duration=None, status=None):
"""
StructuredNotificationEvent - a model defined in Swagger
"""
self._type = None
self._operation = None
self._notification_details = None
self._duration = None
self._status = None
if type is not None:
self.type = type
if operation is not None:
self.operation = operation
if notification_details is not None:
self.notification_details = notification_details
if duration is not None:
self.duration = duration
if status is not None:
self.status = status
@property
def type(self):
"""
Gets the type of this StructuredNotificationEvent.
:return: The type of this StructuredNotificationEvent.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this StructuredNotificationEvent.
:param type: The type of this StructuredNotificationEvent.
:type: str
"""
self._type = type
@property
def operation(self):
"""
Gets the operation of this StructuredNotificationEvent.
:return: The operation of this StructuredNotificationEvent.
:rtype: OperationDetails
"""
return self._operation
@operation.setter
def operation(self, operation):
"""
Sets the operation of this StructuredNotificationEvent.
:param operation: The operation of this StructuredNotificationEvent.
:type: OperationDetails
"""
self._operation = operation
@property
def notification_details(self):
"""
Gets the notification_details of this StructuredNotificationEvent.
:return: The notification_details of this StructuredNotificationEvent.
:rtype: NotificationDetails
"""
return self._notification_details
@notification_details.setter
def notification_details(self, notification_details):
"""
Sets the notification_details of this StructuredNotificationEvent.
:param notification_details: The notification_details of this StructuredNotificationEvent.
:type: NotificationDetails
"""
self._notification_details = notification_details
@property
def duration(self):
"""
Gets the duration of this StructuredNotificationEvent.
:return: The duration of this StructuredNotificationEvent.
:rtype: int
"""
return self._duration
@duration.setter
def duration(self, duration):
"""
Sets the duration of this StructuredNotificationEvent.
:param duration: The duration of this StructuredNotificationEvent.
:type: int
"""
self._duration = duration
@property
def status(self):
"""
Gets the status of this StructuredNotificationEvent.
:return: The status of this StructuredNotificationEvent.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this StructuredNotificationEvent.
:param status: The status of this StructuredNotificationEvent.
:type: str
"""
self._status = status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, StructuredNotificationEvent):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"chaffelson@gmail.com"
] | chaffelson@gmail.com |
8670f20cd30b86b5b933a71379055cb7e6566132 | cd6d1e5f91bef10701ecb2ac4161e2982bd87e8d | /pwndbg/commands/telescope.py | 4801641857dede03f36232a4df95eddcbe461416 | [
"MIT"
] | permissive | 0xDEC0DE8/pwndbg | 3b9edea1ce435067a0d90a35b25b5ebe04f77e47 | e2cb1ab5da9dff2779ef285b0b06cc48655a1a2e | refs/heads/master | 2021-01-24T16:03:32.693237 | 2016-04-18T02:12:05 | 2016-04-18T02:12:05 | 56,667,804 | 1 | 0 | null | 2016-04-20T08:02:20 | 2016-04-20T08:02:20 | null | UTF-8 | Python | false | false | 2,479 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Prints out pointer chains starting at some address in memory.
Generally used to print out the stack or register values.
"""
import collections
import pwndbg.arch
import pwndbg.chain
import pwndbg.commands
import pwndbg.memory
import pwndbg.regs
import pwndbg.typeinfo
@pwndbg.commands.ParsedCommand
@pwndbg.commands.OnlyWhenRunning
def telescope(address=None, count=8, to_string=False):
"""
Recursively dereferences pointers starting at the specified address
($sp by default)
"""
address = int(address if address else pwndbg.regs.sp) & pwndbg.arch.ptrmask
count = int(count) & pwndbg.arch.ptrmask
# Allow invocation of "hexdump 20" to dump 20 bytes at the stack pointer
if address < pwndbg.memory.MMAP_MIN_ADDR and not pwndbg.memory.peek(address):
count = address
address = pwndbg.regs.sp
# Allow invocation of "hexdump a b" to dump all bytes from A to B
if int(address) < int(count):
count -= address
reg_values = collections.defaultdict(lambda: [])
for reg in pwndbg.regs.common:
reg_values[pwndbg.regs[reg]].append(reg)
# address = pwndbg.memory.poi(pwndbg.typeinfo.ppvoid, address)
ptrsize = pwndbg.typeinfo.ptrsize
start = address
stop = address + (count*ptrsize)
step = ptrsize
# Find all registers which show up in the trace
regs = {}
for i in range(start, stop, step):
values = list(reg_values[i])
for width in range(1, pwndbg.arch.ptrsize):
values.extend('%s-%i' % (r,width) for r in reg_values[i+width])
regs[i] = ' '.join(values)
# Find the longest set of register information
if regs:
longest_regs = max(map(len, regs.values())) + 1
else:
longest_regs = 0
# Print everything out
result = []
for i,addr in enumerate(range(start, stop, step)):
if not pwndbg.memory.peek(addr):
result.append("<Could not read memory at %#x>" % addr)
break
line = ' '.join(("%02x:%04x|" % (i, addr-start),
regs[addr].ljust(longest_regs),
pwndbg.chain.format(addr)))
result.append(line)
if not to_string:
print('\n'.join(result))
return result
@pwndbg.commands.ParsedCommand
@pwndbg.commands.OnlyWhenRunning
def stack(*a):
"""
Recursively dereferences pointers on the stack
"""
telescope(*a)
| [
"riggle@google.com"
] | riggle@google.com |
c803bd1c8eef597005d492bccadc025979abe5d8 | aa1972e6978d5f983c48578bdf3b51e311cb4396 | /nitro-python-1.0/nssrc/com/citrix/netscaler/nitro/resource/config/network/interfacepair.py | 39fc75681392eb748d6d36f185842b26c53eb5fa | [
"Python-2.0",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | MayankTahil/nitro-ide | 3d7ddfd13ff6510d6709bdeaef37c187b9f22f38 | 50054929214a35a7bb19ed10c4905fffa37c3451 | refs/heads/master | 2020-12-03T02:27:03.672953 | 2017-07-05T18:09:09 | 2017-07-05T18:09:09 | 95,933,896 | 2 | 5 | null | 2017-07-05T16:51:29 | 2017-07-01T01:03:20 | HTML | UTF-8 | Python | false | false | 7,063 | py | #
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class interfacepair(base_resource) :
""" Configuration for "Interface Pair" resource. """
def __init__(self) :
self._id = None
self._ifnum = []
self._ifaces = None
self.___count = 0
@property
def id(self) :
r"""The Interface pair id.<br/>Minimum length = 1<br/>Maximum length = 255.
"""
try :
return self._id
except Exception as e:
raise e
@id.setter
def id(self, id) :
r"""The Interface pair id.<br/>Minimum length = 1<br/>Maximum length = 255
"""
try :
self._id = id
except Exception as e:
raise e
@property
def ifnum(self) :
r"""The constituent interfaces in the interface pair.<br/>Minimum length = 1.
"""
try :
return self._ifnum
except Exception as e:
raise e
@ifnum.setter
def ifnum(self, ifnum) :
r"""The constituent interfaces in the interface pair.<br/>Minimum length = 1
"""
try :
self._ifnum = ifnum
except Exception as e:
raise e
@property
def ifaces(self) :
r"""Names of all member interfaces of this Interface Pair.
"""
try :
return self._ifaces
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(interfacepair_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.interfacepair
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.id is not None :
return str(self.id)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
r""" Use this API to add interfacepair.
"""
try :
if type(resource) is not list :
addresource = interfacepair()
addresource.id = resource.id
addresource.ifnum = resource.ifnum
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ interfacepair() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].id = resource[i].id
addresources[i].ifnum = resource[i].ifnum
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
r""" Use this API to delete interfacepair.
"""
try :
if type(resource) is not list :
deleteresource = interfacepair()
if type(resource) != type(deleteresource):
deleteresource.id = resource
else :
deleteresource.id = resource.id
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ interfacepair() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].id = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ interfacepair() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].id = resource[i].id
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the interfacepair resources that are configured on netscaler.
"""
try :
if not name :
obj = interfacepair()
response = obj.get_resources(client, option_)
else :
if type(name) is not list :
if type(name) == cls :
raise Exception('Invalid parameter name:{0}'.format(type(name)))
obj = interfacepair()
obj.id = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
if type(name[0]) == cls :
raise Exception('Invalid parameter name:{0}'.format(type(name[0])))
response = [interfacepair() for _ in range(len(name))]
obj = [interfacepair() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = interfacepair()
obj[i].id = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
r""" Use this API to fetch filtered set of interfacepair resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = interfacepair()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
r""" Use this API to count the interfacepair resources configured on NetScaler.
"""
try :
obj = interfacepair()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
r""" Use this API to count filtered the set of interfacepair resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = interfacepair()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class interfacepair_response(base_response) :
def __init__(self, length=1) :
self.interfacepair = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.interfacepair = [interfacepair() for _ in range(length)]
| [
"Mayank@Mandelbrot.local"
] | Mayank@Mandelbrot.local |
a9263e67addfe5f9590da5d7d50a327ab031fa55 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/3610a2457f89362cc61255fbfa93df0d60b33677-<disconnect_all_containers>-fix.py | 85a2023c6feaa3eb9cd281e1cd09b7a6fb48cf89 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | def disconnect_all_containers(self):
containers = self.client.inspect_network(self.parameters.network_name)['Containers']
if (not containers):
return
for cont in containers.values():
self.disconnect_container(cont['Name']) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
352366fc0f661f720e8ecaa26e3e78d8c2fc7aaf | 020fbf1db497520abcb30cd3889cfe61c601723f | /practice/readmodclass2.py | c5296cc7b83d8fed132cf438e7d688040c05200a | [] | no_license | bluecrt/first-project | bc53a49ae23c3cc6beb0ede4d00d94e4ad568771 | 7f5f376adcaa0d48caf4540db6613f843d40e173 | refs/heads/master | 2023-06-21T19:16:46.644489 | 2021-07-22T13:13:15 | 2021-07-22T13:13:15 | 386,676,255 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | from classcars import Cars
car = Cars('goden', 'big')
print('your car\'s color is {}, it\'s shape is {}, now you can drive'.format(car.color, car.shape)) | [
"email"
] | email |
eda5f6ff03acd99d8f0adfd9303c8116a7387bb6 | 91b638e9e684441fd9503717cf1d94065f65d484 | /helmet_assignment/modules/train/train_factory.py | 4565a2cd31fe1b84815b3af009dbd5869828f8b7 | [] | no_license | Vadbeg/helmet_assignment | 5604090b96aa8cfbc4566cfb1104ad1a1e863392 | becb8f46067fd91225c4e9c3b1d8f4f2127e1c6a | refs/heads/master | 2023-08-21T21:59:41.324270 | 2021-10-16T08:35:50 | 2021-10-16T08:35:50 | 410,277,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | from __future__ import absolute_import, division, print_function
from .mot import MotTrainer
train_factory = {
'mot': MotTrainer,
}
| [
"vadbeg@tut.by"
] | vadbeg@tut.by |
7aa3e1094b2eb5692034210ca85d643c8c3e407a | a5d5a1824db7a09b58b81fda0cbba61764e6ac0c | /wsdBabelfyNet.py | de40bc580c8fa5389850cb6f954b8ad84bedb9ab | [] | no_license | HencyChine/babelNetAPI | 25260ffa69d68332c2ae31184cecd783720655b1 | 180f9ef0910886b30605391aaee1b8ea4f967a15 | refs/heads/master | 2021-01-24T20:13:31.742512 | 2016-01-16T09:38:30 | 2016-01-16T09:38:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,462 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 25 16:46:57 2015
@author: spacegoing
"""
from DocUtils.concaTextData import readALTA2015Data
from queryBabelfy import getDocBnSets, getFilteredDocBnSets
from queryBabelNet import getUniqueSynsetIDInfo,\
uniqueSynIDDict2docIndexLangSimpleLemma
def queryBabelfySynsetID(docIndexString, docStringIndices,\
key = '29738119-195a-42c8-ace1-8a78de74b891'):
"""
Return
----
docIndexBabelSynsetID: dict{docid: np.ndarray([index, synSetID]...)}
index is the term's index in source data file
Example
----
>>> srcFolder = "/Users/spacegoing/AllSymlinks/Document Analysis/sharedTask/Data/"
>>> fileName = "Train.txt"
>>> docIndexString, docStringIndices = readALTA2015Data(srcFolder + fileName)
>>> docIndexBabelSynsetID, docFilteredIndexString = queryBabelfySynsetID(docIndexString, docStringIndices)
"""
rawDocBnSets = getDocBnSets(docStringIndices)
docIndexBabelSynsetID, docFilteredIndexString = \
getFilteredDocBnSets(docStringIndices, rawDocBnSets)
return docIndexBabelSynsetID, docFilteredIndexString
def queryBabelNetSimpleLemma(docIndexBabelSynsetID, keyset =
['0b8bb0c1-7e51-41f6-8d32-31d15a9ca7ad',\
'29738119-195a-42c8-ace1-8a78de74b891'], limit = 1000):
"""
This will assign each uniqueSynIDBabelNetDict item to its belonging doc's
index's synsetID in docIndexBabelSynsetID. It will return a new dict contains
docIndexLangTrans which only contains each translation's simple lemma.
Parameters
----
docIndexBabelSynsetID : dict(docid:[[index, synsetID]...])
wsdBabelfyNe.getDocIndexBabelSynsetID's return
Return
----
docIndexLangTrans : {docid: np.array([index, {"EN": [simpleLemma...], "FR": [simpleLemma...]}]...)}
Dict contains docid, each docs' all indexes and their simple lemmas in EN and FR
Example
----
>>> docIndexBabelSynsetID = queryBabelfySynsetID(docIndexString, docStringIndices)
>>> docIndexLangTrans = queryBabelNetSimpleLemma(docIndexBabelSynsetID)
"""
uniqueSynIDBabelNetDict = getUniqueSynsetIDInfo(docIndexBabelSynsetID,\
keyset, limit)
return uniqueSynIDDict2docIndexLangSimpleLemma\
(uniqueSynIDBabelNetDict, docIndexBabelSynsetID)
##
if __name__ == "__main__":
from configFile import returnParams
websiteParams, filePath = returnParams()
docIndexString, docStringIndices = readALTA2015Data(filePath)
docIndexBabelSynsetID, docFilteredIndexString = queryBabelfySynsetID(docIndexString, \
docStringIndices, websiteParams['keyset'][0])
keyset = websiteParams['keyset']
limit = websiteParams['limit']
docIndexLangTrans = queryBabelNetSimpleLemma(docIndexBabelSynsetID, **websiteParams)
# # Pickle the synsetID
# import pickle
# outputSysetIDSetPath = "/Users/spacegoing/AllSymlinks/Document Analysis/sharedTask/Data/testSynsetID.pkl"
# outputSysetID = open(outputSysetIDSetPath,"wb")
# pickle.dump(docIndexBabelSynsetID, outputSysetID, -1)
# outputSysetID.close()
# outputdocFilteredIndexStringPath = \
# "/Users/spacegoing/AllSymlinks/Document Analysis/sharedTask/Data/testdocFilteredIndexString.pkl"
# outputdocFilteredIndexString= open(outputdocFilteredIndexStringPath,"wb")
# pickle.dump(docFilteredIndexString, outputdocFilteredIndexString, -1)
# outputdocFilteredIndexString.close() | [
"spacegoing@gmail.com"
] | spacegoing@gmail.com |
fbf182a5aebac14053d20f67c9dec2ea8d97eee7 | 59bd639757fd8afcfdba73298a69482fd1f88069 | /cifar10/2_Network_in_Network/2process.py | e2bbd8d506716b3ccf0e3ecf678cb9cd53a9e971 | [] | no_license | DeepReduce/DeepReduce | f1e14b985affba2796c80d9e795b36cfd4ed9a55 | 707c2b411d65ed77967a3d1ea1506a91cc9d4bfd | refs/heads/master | 2020-08-01T01:47:23.293453 | 2020-06-14T18:09:01 | 2020-06-14T18:09:01 | 210,813,368 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | import os
import sys
from tqdm import tqdm
def readFile(filepath):
f = open(filepath)
content = f.read()
f.close()
return content.splitlines()
if __name__ == '__main__':
# threshold = str(sys.argv[1])
filename = str(sys.argv[1])
# path = os.getcwd() + '/Cov/activeneuron/' + threshold + 'ase/'
path = os.getcwd() + '/Cov/'
# cov = readFile(path + 'neuron_cov')
cov = readFile(path + filename)
cnum = len(cov[0])
nnum = len(cov)
# f = open(path + 'test_cov','w')
f = open(path + 'test_cov_' + filename, 'w')
for i in tqdm(range(cnum)):
tstr = ''
for j in range(nnum):
if cov[j][i] == '1':
tstr += '1'
else:
tstr += '0'
f.write(tstr + '\n')
f.close()
| [
"DLR_ICLR20@163.com"
] | DLR_ICLR20@163.com |
4b1aa02eb3759b71cc4a3d06d5eefa76b29d2c84 | 22cbb7cffc3e5cf53fe87d2db216fdb88c8b7a8c | /stems/gis/utils.py | 6c1c1a57cfa434370e182fd86623d2e63432e8c0 | [
"BSD-3-Clause"
] | permissive | ceholden/stems | 838eb496978f7b68ae72988e0469c60e8730cb9c | 2e219eb76a44d6897881642635103b3353fc5539 | refs/heads/master | 2022-02-12T21:56:41.939073 | 2019-08-19T23:09:49 | 2019-08-19T23:09:49 | 164,480,487 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,550 | py | """ Assorted GIS utilities
"""
import logging
from osgeo import osr
from rasterio.crs import CRS
osr.UseExceptions()
def crs2osr(crs):
""" Return `osgeo.osr.SpatialReference` of a `rasterio.crs.CRS`
Parameters
----------
crs : rasterio.crs.CRS
Rasterio coordinate reference system
Returns
-------
osr.SpatialReference
CRS as OSR object
"""
crs_osr = osr.SpatialReference()
crs_osr.ImportFromWkt(crs.wkt)
crs_osr.Fixup()
return crs_osr
def same_crs(*crs):
""" Check if multiple CRS are the same
Parameters
----------
crs : rasterio.crs.CRS
Multiple CRS to compare
Returns
-------
bool
True if all of the CRS inputs are the same (according to rasterio)
See Also
--------
osr_same_crs
"""
assert len(crs) >= 1
base = crs[0]
for _crs in crs[1:]:
if base != _crs:
return False
return True
def osr_same_crs(*crs):
""" Use OSR to compare for CRS equality
Converts Rasterio ``CRS`` to ``OSRSpatialReference``
and compares using OSR because comparing CRS from
WKT with CRS from EPSG/Proj4 can be buggy.
Parameters
----------
*crs : rasterio.crs.CRS
Two or more CRS
Returns
-------
bool
True if all CRS are equivalent
"""
assert len(crs) >= 1
sr_crs = [crs2osr(crs_) for crs_ in crs]
base = sr_crs[0]
for other in sr_crs[1:]:
if not bool(base.IsSame(other)):
return False
return True
| [
"ceholden@gmail.com"
] | ceholden@gmail.com |
195e3bade6b6220496dfe8170e85b2cd8ce68897 | 18ad97292b34a679b8dea8a85090541c5bbf6174 | /alphabetanimals.py | 1a309b830abfa2c306308068fc05ba06e3c505c6 | [] | no_license | Jyotirm0y/kattis | b941044e39dc36d169450480fc33fd33bd2e0f8e | 2b9c1819ba29419bbea3db2e8ad7851155abbb3a | refs/heads/master | 2023-05-31T21:11:38.350044 | 2021-06-12T08:21:47 | 2021-06-12T08:21:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | from collections import defaultdict
s = input()
n = int(input())
l = []
starts = defaultdict(set)
ends = defaultdict(set)
for i in range(n):
a = input()
l.append(a)
start, end = a[0], a[-1]
starts[start].add(i)
ends[end].add(i)
end = s[-1]
if end not in starts:
print('?')
else:
done = False
valid = '?'
for i in starts[end]:
e = l[i][-1]
if len(starts[e]) == 0 or (len(starts[e]) == 1 and i in starts[e]):
print(l[i]+'!')
done = True
break
elif valid == '?':
valid = l[i]
if not done:
print(valid)
| [
"ainunnajib@gmail.com"
] | ainunnajib@gmail.com |
65e74997650a39fc859bc82f81453ed9a460af5a | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/audio/Tacotron2_for_PyTorch/main.py | 4ae9c129e3491812b68182363a46b47cf098f3f3 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 1,290 | py | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from train import main as main_train
from inference_perf import main as main_infer
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('--bench-class', type=str, choices=['train', 'perf-infer', 'perf-train'], required=True, help='Choose test class')
return parser
def main():
parser = argparse.ArgumentParser(description='PyTorch Tacotron 2 Testing')
parser = parse_args(parser)
args, unknown_args = parser.parse_known_args()
if "train" in args.bench_class:
main_train()
else:
main_infer()
if __name__ == '__main__':
main()
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
3f40f48b5251fff1748fef88ceb1aeecf048e98e | 6af9e2257647e1d06e6f004dd8394a9f3d49b1aa | /Python/my_environments/djangoPy3Env/bin/python-config | 7834ca873cf629389a61caa92c560328d323bf88 | [] | no_license | ronaldaguerrero/coding_dojo | 43b05507fb616e7499d2c46b1049530b30d4b781 | 06ac3daadb9510873836b3fffe4bc880b5bee457 | refs/heads/master | 2020-05-01T05:58:40.564306 | 2019-03-23T04:53:13 | 2019-03-23T04:53:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,391 | #!/Users/xh0tsaucex/Desktop/coding_dojo/Python/my_environments/djangoPy3Env/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"ronald.a.guerrero@gmail.com"
] | ronald.a.guerrero@gmail.com | |
6b5a9e9289a441b65137b41916a12629715cd91d | 5da5473ff3026165a47f98744bac82903cf008e0 | /packages/google-cloud-network-security/samples/generated_samples/networksecurity_v1beta1_generated_network_security_list_authorization_policies_sync.py | 4c781f4207561a12c7f3c8cff7ff0104887d323a | [
"Apache-2.0"
] | permissive | googleapis/google-cloud-python | ed61a5f03a476ab6053870f4da7bc5534e25558b | 93c4e63408c65129422f65217325f4e7d41f7edf | refs/heads/main | 2023-09-04T09:09:07.852632 | 2023-08-31T22:49:26 | 2023-08-31T22:49:26 | 16,316,451 | 2,792 | 917 | Apache-2.0 | 2023-09-14T21:45:18 | 2014-01-28T15:51:47 | Python | UTF-8 | Python | false | false | 2,027 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListAuthorizationPolicies
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-network-security
# [START networksecurity_v1beta1_generated_NetworkSecurity_ListAuthorizationPolicies_sync]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import network_security_v1beta1
def sample_list_authorization_policies():
# Create a client
client = network_security_v1beta1.NetworkSecurityClient()
# Initialize request argument(s)
request = network_security_v1beta1.ListAuthorizationPoliciesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_authorization_policies(request=request)
# Handle the response
for response in page_result:
print(response)
# [END networksecurity_v1beta1_generated_NetworkSecurity_ListAuthorizationPolicies_sync]
| [
"noreply@github.com"
] | googleapis.noreply@github.com |
fcb3f59e78d48b84256be1823eae354735602da0 | 3ab7e700203054e104e6c60295c0a8455bc388b1 | /i_entity_extractor/etl/wenshu/clean_wenshu.py | 740794b660e7e97bdf2b4364a2a0d12375ba731a | [] | no_license | youfeng243/crawler | e8114ab5ef68bb9fd7e4296452d63b53d3d4080a | 59eaabef94de67444f09cfe5b25d481034d10f29 | refs/heads/master | 2021-07-11T10:12:27.946819 | 2017-10-12T11:35:27 | 2017-10-12T11:35:27 | 106,583,181 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,362 | py | # coding=utf8
import time
import traceback
import json
from i_entity_extractor.etl.base_etl import BaseEtl
from i_entity_extractor.extractors.judge_wenshu.judge_wenshu_extractor import JudgeWenshuExtractor
from i_entity_extractor.entity_extractor_route import EntityExtractorRoute
class Wenshu(BaseEtl):
def __init__(self, conf):
BaseEtl.__init__(self,conf)
route = EntityExtractorRoute(conf)
self.topic_id = 32
topic_info = route.all_topics.get(self.topic_id, None)
self.wenshu_obj = JudgeWenshuExtractor(topic_info, conf.get('log'))
def process_data(self, item):
etl_data = self.wenshu_obj.format_extract_data(item, self.topic_id)
etl_data = self.wenshu_obj.after_process(etl_data)
# etl_data.pop("_id")
# print json.dumps(etl_data,ensure_ascii=False,encoding='utf8')
return etl_data
def etl(self):
'''清洗总入口'''
begin_time = time.time()
# 1 获取源数据
src_table = self.conf.get('topic2table').get(str(self.topic_id), '')
last_table = src_table + "_last"
ret = self.db_obj.cur_table_bak(src_table,last_table)
if not ret:
return False
bak_table = self.db_obj.get_last_table(last_table)
cursor = self.db_obj.db.select(bak_table, {})
# 2 清洗数据
insert_data_list = []
num = 0
try:
for item in cursor:
num += 1
if item.has_key("doc_content"):
etl_data = self.process_data(item)
else:
etl_data = item
insert_data_list.append(etl_data)
if len(insert_data_list) >= self.max_insert_data:
self.db_obj.insert_info_batch(src_table, insert_data_list)
del insert_data_list[:]
if num % 500 == 0:
self.log.info("process_num:%s\ttime_cost:%s" % (num, time.time() - begin_time))
except Exception as e:
self.log.error(traceback.format_exc())
return False
self.db_obj.insert_info_batch(src_table, insert_data_list)
del insert_data_list[:]
self.log.info("finish_wenshu_etl,process_num:%s\ttime_cost:%s" % (num, time.time() - begin_time))
return True
| [
"you@163.com"
] | you@163.com |
6969fa9ad834ea991f7d2c94a5d60cba026c28de | 3eecaca9c842bffe8757917bcdb5a6911a7ed157 | /python_examples/datatype_ex1.py | 8a96d531cab000f7630d8613b520655eecc5ceb3 | [] | no_license | matamkiran/python2020 | 2c79cb16e526ea7242e976c79c5db61d97326ec7 | c86b96ed3a266245f14b26fc7e8c0c35e76d4946 | refs/heads/master | 2023-02-10T06:21:50.609567 | 2023-02-01T18:43:28 | 2023-02-01T18:43:28 | 244,181,848 | 5 | 1 | null | 2021-04-28T17:34:01 | 2020-03-01T16:18:05 | Python | UTF-8 | Python | false | false | 131 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 1 11:31:44 2020
@author: 666292
"""
a="hello"
print(type(a))
print(a+10) | [
"noreply@github.com"
] | matamkiran.noreply@github.com |
ce3c070c24eb5d404aa75b57729a0bb2c12d2af1 | 52b53a03b38107f2c62bf4097b9603c428faeb17 | /mingtak/portlet/scriptbox/tests/test_portlet.py | eae602f04af73804c105036a5e2f2a3a2becd00f | [] | no_license | mingtak/mingtak.portlet.scriptbox | ebf2680f17383bedac47d9fb5f5284e00c330157 | aa105c39303f519ef549a2510519c0b9c3db653d | refs/heads/master | 2021-01-01T19:28:27.114993 | 2015-03-27T13:03:09 | 2015-03-27T13:03:09 | 32,986,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,158 | py | from zope.component import getUtility, getMultiAdapter
from plone.portlets.interfaces import IPortletType
from plone.portlets.interfaces import IPortletManager
from plone.portlets.interfaces import IPortletAssignment
from plone.portlets.interfaces import IPortletDataProvider
from plone.portlets.interfaces import IPortletRenderer
from plone.app.portlets.storage import PortletAssignmentMapping
from mingtak.portlet.scriptbox import scriptbox
from mingtak.portlet.scriptbox.tests.base import TestCase
class TestPortlet(TestCase):
def afterSetUp(self):
self.setRoles(('Manager', ))
def test_portlet_type_registered(self):
portlet = getUtility(
IPortletType,
name='mingtak.portlet.scriptbox.ScriptBox')
self.assertEquals(portlet.addview,
'mingtak.portlet.scriptbox.ScriptBox')
def test_interfaces(self):
# TODO: Pass any keyword arguments to the Assignment constructor
portlet = scriptbox.Assignment()
self.failUnless(IPortletAssignment.providedBy(portlet))
self.failUnless(IPortletDataProvider.providedBy(portlet.data))
def test_invoke_add_view(self):
portlet = getUtility(
IPortletType,
name='mingtak.portlet.scriptbox.ScriptBox')
mapping = self.portal.restrictedTraverse(
'++contextportlets++plone.leftcolumn')
for m in mapping.keys():
del mapping[m]
addview = mapping.restrictedTraverse('+/' + portlet.addview)
# TODO: Pass a dictionary containing dummy form inputs from the add
# form.
# Note: if the portlet has a NullAddForm, simply call
# addview() instead of the next line.
addview.createAndAdd(data={})
self.assertEquals(len(mapping), 1)
self.failUnless(isinstance(mapping.values()[0],
scriptbox.Assignment))
def test_invoke_edit_view(self):
# NOTE: This test can be removed if the portlet has no edit form
mapping = PortletAssignmentMapping()
request = self.folder.REQUEST
mapping['foo'] = scriptbox.Assignment()
editview = getMultiAdapter((mapping['foo'], request), name='edit')
self.failUnless(isinstance(editview, scriptbox.EditForm))
def test_obtain_renderer(self):
context = self.folder
request = self.folder.REQUEST
view = self.folder.restrictedTraverse('@@plone')
manager = getUtility(IPortletManager, name='plone.rightcolumn',
context=self.portal)
# TODO: Pass any keyword arguments to the Assignment constructor
assignment = scriptbox.Assignment()
renderer = getMultiAdapter(
(context, request, view, manager, assignment), IPortletRenderer)
self.failUnless(isinstance(renderer, scriptbox.Renderer))
class TestRenderer(TestCase):
def afterSetUp(self):
self.setRoles(('Manager', ))
def renderer(self, context=None, request=None, view=None, manager=None,
assignment=None):
context = context or self.folder
request = request or self.folder.REQUEST
view = view or self.folder.restrictedTraverse('@@plone')
manager = manager or getUtility(
IPortletManager, name='plone.rightcolumn', context=self.portal)
# TODO: Pass any default keyword arguments to the Assignment
# constructor.
assignment = assignment or scriptbox.Assignment()
return getMultiAdapter((context, request, view, manager, assignment),
IPortletRenderer)
def test_render(self):
# TODO: Pass any keyword arguments to the Assignment constructor.
r = self.renderer(context=self.portal,
assignment=scriptbox.Assignment())
r = r.__of__(self.folder)
r.update()
#output = r.render()
# TODO: Test output
def test_suite():
from unittest import TestSuite, makeSuite
suite = TestSuite()
suite.addTest(makeSuite(TestPortlet))
suite.addTest(makeSuite(TestRenderer))
return suite
| [
"andy@mingtak.com.tw"
] | andy@mingtak.com.tw |
8a327a97a4b87884798af8ffd29a87e5035858bb | e3fd35a8443aaf2f293ae03a5f6c819046a4dd21 | /leetcode-python/medium/_1641_count_sorted_vowel/test_solution.py | b52e872dbdeea6eaf006b9f0636a6319ac82773a | [] | no_license | hieutran106/leetcode-ht | 2223ea6bcd459c2cdbc33344c0ff69df7f8a3c7f | 8332eb20e613f82cda2e326218154c7803a32403 | refs/heads/main | 2023-08-09T02:52:41.360360 | 2023-07-27T10:12:28 | 2023-07-27T10:12:28 | 234,890,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py | import unittest
from .solution import Solution
class MyTestCase(unittest.TestCase):
def test_case_1(self):
s = Solution()
actual = s.countVowelStrings(1)
self.assertEqual(actual, 5)
def test_case_2(self):
s = Solution()
actual = s.countVowelStrings(2)
self.assertEqual(actual, 15)
def test_case_3(self):
s = Solution()
actual = s.countVowelStrings(33)
self.assertEqual(actual, 66045)
if __name__ == '__main__':
unittest.main()
| [
"hieutran106@gmail.com"
] | hieutran106@gmail.com |
b72e7e5a131348264207ff7de8be597ae68d8009 | e6d4a87dcf98e93bab92faa03f1b16253b728ac9 | /algorithms/python/reachaNumber/reachaNumber.py | 9075d05dd1c0c4837b48b158680c41697784530c | [] | no_license | MichelleZ/leetcode | b5a58e1822e3f6ef8021b29d9bc9aca3fd3d416f | a390adeeb71e997b3c1a56c479825d4adda07ef9 | refs/heads/main | 2023-03-06T08:16:54.891699 | 2023-02-26T07:17:47 | 2023-02-26T07:17:47 | 326,904,500 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Source: https://leetcode.com/problems/reach-a-number/
# Author: Miao Zhang
# Date: 2021-03-07
class Solution:
def reachNumber(self, target: int) -> int:
target = abs(target)
sumval = 0
k = 0
while sumval < target:
k += 1
sumval += k
d = sumval - target
if d % 2 == 0: return k
return k + 1 + (k % 2)
| [
"zhangdaxiaomiao@163.com"
] | zhangdaxiaomiao@163.com |
48a0e9927bd0933b73741dc42fec8437ed601ce8 | d700b9ad1e0b7225871b65ce0dafb27fb408c4bc | /students/k3343/practical_works/Nazarenko_Uliana/Pr_1/project_first_app/views.py | cea5bffc122bd6851cc0d5a90e2d289581c93437 | [
"MIT"
] | permissive | TonikX/ITMO_ICT_WebProgramming_2020 | a8c573ed467fdf99327777fb3f3bfeee5714667b | ba566c1b3ab04585665c69860b713741906935a0 | refs/heads/master | 2023-01-11T22:10:17.003838 | 2020-10-22T11:22:03 | 2020-10-22T11:22:03 | 248,549,610 | 10 | 71 | MIT | 2023-01-28T14:04:21 | 2020-03-19T16:18:55 | Python | UTF-8 | Python | false | false | 338 | py | from django.http import Http404
from django.shortcuts import render
from .models import Car_owner
def detail(request, Car_owner_id):
try:
p = Car_owner.objects.get(pk=Car_owner_id)
except Car_owner.DoesNotExist:
raise Http404("Car owner does not exist")
return render(request, 'owner.html', {'Car_owner': p})
| [
"noreply@github.com"
] | TonikX.noreply@github.com |
71ae6c4ee53126de3dbbf698b8cc90d5edb3a6c5 | 91a2ecfaf5dc6c917ec2fda31f56291103f68ceb | /tests/data/test_preprocess.py | bfc9de96416567f7d9974cb413f0aa58a742d4f7 | [
"BSD-3-Clause"
] | permissive | MyrtleSoftware/myrtlespeech | 635d1d16d1bd60fb07a4d30edbf9acb61786c13f | 8522048fd37744ffa06827a0cbd202b839a15453 | refs/heads/master | 2021-07-16T14:55:00.479967 | 2020-03-20T14:33:15 | 2020-03-20T14:33:15 | 192,501,300 | 12 | 1 | NOASSERTION | 2020-03-20T14:33:17 | 2019-06-18T08:44:33 | Python | UTF-8 | Python | false | false | 3,114 | py | from typing import Dict
import hypothesis.strategies as st
import pytest
import torch
from hypothesis import given
from myrtlespeech.data.preprocess import AddSequenceLength
from myrtlespeech.data.preprocess import SpecAugment
from tests.utils.utils import tensors
# Fixtures and Strategies -----------------------------------------------------
@st.composite
def spec_augments(draw) -> st.SearchStrategy[SpecAugment]:
"""Returns a SearchStrategy for SpecAugment."""
kwargs: Dict = {}
kwargs["feature_mask"] = draw(st.integers(0, 30))
kwargs["time_mask"] = draw(st.integers(0, 30))
kwargs["n_feature_masks"] = draw(st.integers(0, 3))
kwargs["n_time_masks"] = draw(st.integers(0, 3))
spec_augment = SpecAugment(**kwargs)
return spec_augment
# Tests -----------------------------------------------------------------------
@given(data=st.data(), tensor=tensors(min_n_dims=1))
def test_add_sequence_length_returns_correct_seq_len(
data, tensor: torch.Tensor
) -> None:
"""Ensures AddSequenceLength returns correct sequence length."""
length_dim = data.draw(
st.integers(min_value=0, max_value=len(tensor.size()) - 1)
)
add_seq_len = AddSequenceLength(length_dim=length_dim)
out, seq_len = add_seq_len(tensor)
assert torch.all(out == tensor)
assert seq_len == torch.tensor([tensor.size(length_dim)])
# SpecAugment ---------------------------
@given(
spec_augment=spec_augments(),
tensor=tensors(
min_n_dims=3, max_n_dims=3, min_dim_size=1, max_dim_size=100
),
)
def test_spec_augment_returns_tensor_same_shape(
spec_augment: SpecAugment, tensor: torch.Tensor
) -> None:
"""Ensures SpecAugment returns a tensor with the same shape."""
out = spec_augment(tensor)
assert out.size() == tensor.size()
@given(
sa=spec_augments(),
tensor=tensors(
min_n_dims=3, max_n_dims=3, min_dim_size=1, max_dim_size=100
),
)
def test_spec_augment_n_zeros_less_than_max(
sa: SpecAugment, tensor: torch.Tensor
) -> None:
"""Ensures number of parameters zeroed by SpecAugment is less than the max.
The maximum number is:
channels*(
n_feature_masks*feature_mask*time_steps +
n_time_masks*time_mask*features
)
"""
tensor.fill_(1) # ensure no zeros before SpecAugment applied
channels, features, time_steps = tensor.size()
out = sa(tensor)
max = sa.n_feature_masks * sa.feature_mask * time_steps
max += sa.n_time_masks * sa.time_mask * features
max *= channels
assert (out == 0).sum() <= max
def test_spec_augment_raises_value_error_invalid_params() -> None:
"""Ensures ValueError raised when parameters less than zero."""
with pytest.raises(ValueError):
SpecAugment(feature_mask=-1, time_mask=1)
with pytest.raises(ValueError):
SpecAugment(feature_mask=1, time_mask=-1)
with pytest.raises(ValueError):
SpecAugment(feature_mask=1, time_mask=1, n_feature_masks=-1)
with pytest.raises(ValueError):
SpecAugment(feature_mask=1, time_mask=1, n_time_masks=-1)
| [
"sam@samgd.com"
] | sam@samgd.com |
7a53a09278a9790ba83079630db2571f94f8a9fa | b252671086f81fdc4b0abb7ddc502c60b8585810 | /pdbremix/util.py | 8db5d05ac8a088873f50ce89a06fc7dbc51521c9 | [
"MIT"
] | permissive | tcardlab/pdbremix | 96936915123a551307223d5150d749808bc3294e | bc7afca41da08f6166e518976b1c6dc40e35d78e | refs/heads/master | 2020-05-18T14:28:14.607579 | 2019-05-01T21:26:22 | 2019-05-01T21:26:22 | 184,396,705 | 0 | 0 | MIT | 2019-05-01T09:46:55 | 2019-05-01T09:46:55 | null | UTF-8 | Python | false | false | 5,763 | py | # encoding: utf-8
__doc__ = """
Utility functions.
- directory handling
- pathname mangling
- running shell commands
- timer
- file checking
"""
import os
import stat
import tempfile
import copy
import re
import glob
import json
import subprocess
import time
import math
import pprint
import sys
def re_glob(dir_tag, reg_exp=""):
fnames = glob.glob(dir_tag)
return [f for f in fnames if re.search(reg_exp, f)]
def check_dir(dirname):
if dirname.strip() == '':
return
if not os.path.isdir(dirname):
os.makedirs(dirname)
def goto_dir(new_dir):
if new_dir.strip() == '':
return
if not os.path.isdir(new_dir):
os.makedirs(new_dir)
os.chdir(new_dir)
def relpath(path):
if path == '':
return ''
dirpath, basename = os.path.split(path)
dirpath = os.path.relpath(dirpath, os.getcwd())
if dirpath == '' or dirpath == '.':
return basename
return os.path.join(dirpath, basename)
def temp_fname(suffix=''):
fd, fname = tempfile.mkstemp(suffix, 'tmp-', '.')
f = os.fdopen(fd, 'w')
f.close()
os.unlink(fname)
return os.path.basename(fname)
def fname_variant(fname):
root, ext = os.path.splitext(fname)
i = 1
new_fname = "%s-%d%s" % (root, i, ext)
while os.path.isfile(new_fname):
i += 1
new_fname = "%s-%d%s" % (root, i, ext)
return new_fname
def clean_fname(*fnames):
for fname in fnames:
if os.path.isdir(fname):
for root, dirs, files in os.walk(fname, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(fname)
elif os.path.isfile(fname):
try:
os.remove(fname)
except:
pass
def get_floats_from_string(s):
val_strs = re.finditer(r'[-+]?([0-9]*\.[0-9]+|[0-9]+)', s)
return [float(v.group()) for v in val_strs]
def write_dict(fname, d, indent=2):
pprint.pprint(d, indent=2, stream=open(fname, "w"))
def read_dict(fname):
try:
txt = open(fname).read()
d = eval(txt)
if not isinstance(d, dict):
raise Exception('Not a dictionary in ' + fname)
return d
except:
raise Exception('Couldn\'t parse dictionary in ' + fname)
def is_same_dict_in_file(d, fname):
try:
saved_d = read_dict(fname)
return saved_d == d
except:
return False
def words_in_file(fname):
result = []
for line in open(fname).readlines():
result.extend(line.split())
return result
def elapsed_time_str(time):
s = str(time) + ' '
minute = math.floor(time / 60.0)
if minute > 60:
hour = math.floor(minute / 60.0)
partial_minute = math.fmod(time, 60.0)
s += "%.f:%02.f:" % (hour, partial_minute)
elif minute >= 1:
s += "%.f:" % minute
sec = math.fmod(time, 60.0)
if sec < 0.01:
s += "%07.4fs" % sec
else:
s += "%05.2fs" % sec
return s
class Timer:
def __init__(self):
self._elapsed = 0;
self._start = time.time()
def start(self):
self._start = time.time()
self._elapsed = 0
def stop(self):
self._elapsed = time.time() - self._start
def elapsed(self):
if self._elapsed == 0:
return time.time() - self._start
else:
return self._elapsed
def str(self):
elapsed_time = self.elapsed()
return elapsed_time_str(elapsed_time)
def __str__(self):
return self.str()
def val_range(start, end, step):
vals = []
v = start
while v <= end:
vals.append(v)
v += step
return vals
class FileException(Exception):
pass
def check_files(*fnames):
"""
Checks for existence of fnames. Raises error if not found.
"""
for fname in fnames:
if not os.path.isfile(fname):
raise FileException("Can't find {}".format(fname))
def which(program, other_binary_dirs=[]):
"""
Reproduces Unix 'which' and looks in other_binary_dirs
"""
def is_binary(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_binary(program):
return program
else:
binary_dirs = os.environ["PATH"].split(os.pathsep)
binary_dirs.extend(other_binary_dirs)
for path in binary_dirs:
exe_file = os.path.join(path, program)
if is_binary(exe_file):
return exe_file
return None
def check_program(program):
if not which(program):
raise FileException("Can't find executable: " + program)
def check_output(fname, bad_words=[]):
if not os.path.isfile(fname):
raise FileException("Can't find output file: " + fname)
txt = open(fname).read()
if txt.strip() == '':
raise Exception("Empty file: " + fname)
for bad_word in bad_words:
for i_line, line in enumerate(txt.splitlines()):
if bad_word in line:
raise Exception(
"Output indicates %s error in line %d: %s" % (bad_word, i_line+1, fname))
def run_with_output(cmd):
p = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return p.stdout.read()
def run_with_output_file(cmd, out_fname=None, in_fname=None):
in_f = None
out_f = None
if in_fname and os.path.isfile(in_fname):
in_f = open(in_fname)
if out_fname:
log_file = out_fname + '.log'
out_f = open(log_file, 'w')
sh_file = out_fname + '.sh'
sh_cmd = cmd
if in_f:
sh_cmd += ' < ' + in_fname
if log_file:
sh_cmd += ' &> ' + log_file
open(sh_file, 'w').write(sh_cmd)
os.chmod(sh_file, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
stopwatch = Timer()
subprocess.call(
cmd,
shell=True,
stdin=in_f,
stdout=out_f,
stderr=out_f)
if out_fname:
stopwatch.stop()
open(out_fname + '.time', 'w').write(stopwatch.str())
out_f.close()
| [
"apposite@gmail.com"
] | apposite@gmail.com |
e1bb31c692a6d9a3d3350b50e04ef4bbc35e3b46 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03069/s161415384.py | a0e703281bf318e53f83aa4bd48bd498a925271d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | n = int(input())
s = input()
wn = s.count('.')
bn = n - wn
ans = min(wn,bn)
lw = 0
lb = 0
for i in range(n):
if s[i]=='.':
lw += 1
else:
lb += 1
ans = min(ans, lb + (wn - lw))
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
362745ea8dbfbca945350ed9d1c42392b3ed5825 | dfd942e65c5d574cde590eee1b309d06269050c8 | /jsoncompare | 4b6e607e7c32e5c99c73a12db50d2bdf9dde3020 | [] | no_license | pfuntner/fun | 4ab552569b72806dad4b6b61f02270b63869c931 | f27fde6a55b88ad07d27c32f4465ff416bde57d1 | refs/heads/master | 2023-07-08T09:44:50.575213 | 2023-07-04T11:03:20 | 2023-07-04T11:03:20 | 168,697,624 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | #! /usr/bin/env python2
import copy
def test(expression):
print "{expression}: {value}".format(value=repr(eval(expression)), **locals())
x = [
1,
{
2.1: "foo",
2.2: None,
},
3.0,
]
y = [
1,
{
2.2: None,
2.1: "foo",
},
3.0,
]
z = copy.deepcopy(x)
test("x")
test("y")
test("z")
test("sorted(z)")
test("x == y")
test("x == z")
test("x == y == z")
test("x == sorted(z)")
| [
"jpfuntne@cisco.com"
] | jpfuntne@cisco.com | |
145a416a170d64b9f49c25984aee455f123ac6fa | c01e3615698ee16575febf73d925445d52972939 | /src/JINTFP/logical/primitive/__init__.py | 211697e7b640a5ad7a66ec240c2c996ee03c8455 | [] | no_license | grasshopperTrainer/CADengine | 929fe81134219713a9881b10803c0e4598f19b8b | 1dc3987915f16fcbf8a34b1fef51953af56303c5 | refs/heads/master | 2023-04-21T15:49:25.345950 | 2021-05-12T13:03:32 | 2021-05-12T13:03:32 | 245,972,066 | 1 | 1 | null | 2021-05-12T12:52:22 | 2020-03-09T07:40:46 | Python | UTF-8 | Python | false | false | 183 | py | from .._logical_nodes import *
class Equal(LogicalNode):
in0_a = Input()
in1_b = Input()
out0_result = Output()
def calculate(self, a, b):
return (a == b,)
| [
"grasshoppertrainer@gmail.com"
] | grasshoppertrainer@gmail.com |
2a2177ba2b68b5c3a2b30b1831af2e11a42dc3ba | f5fbdb4080a38e20e7a165e07b8328cbb37270cb | /swimercamp0/comments/api/views.py | 0216d157def2e70dabe7dfeae3ad52c2b5034c49 | [] | no_license | ayoub-smayen/socialswimerv2 | 390d3742d0282c3ff7eee405f4fffd6adf8e69dc | 536b6c1b7f51e2f6ac449657a38a89cf49be7094 | refs/heads/master | 2023-06-29T16:42:07.153060 | 2021-08-01T20:05:48 | 2021-08-01T20:05:48 | 391,725,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,253 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from rest_framework.generics import DestroyAPIView, ListCreateAPIView
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from comments.models import Comment
from .permissions import IsCommenterOrReadOnly
from .serializers import CommentSerializer
class CommentListCreateAPIView(ListCreateAPIView):
"""
View that returns comments list of a single subject & handles the creation
of comments & returns data back.
"""
serializer_class = CommentSerializer
permission_classes = [IsAuthenticatedOrReadOnly]
def get_queryset(self, *args, **kwargs):
subject_slug = self.request.GET.get('subject_slug', '')
queryset_list = Comment.get_comments(subject_slug)
return queryset_list
def perform_create(self, serializer):
serializer.save(commenter=self.request.user)
class CommentDestroyAPIView(DestroyAPIView):
"""
View that delete (if user is the commenter of) the comment.
"""
queryset = Comment.objects.all()
serializer_class = CommentSerializer
permission_classes = [IsAuthenticatedOrReadOnly, IsCommenterOrReadOnly]
lookup_field = 'id'
lookup_url_kwarg = 'id'
| [
"ayoubjobs.2019@gmail.com"
] | ayoubjobs.2019@gmail.com |
a43a969728aee7978c20c1b5837c35a2d4ae1a82 | 3603f8f76ff81ea75bfc916888bdcfa55b7f12e4 | /alds/alds1_1_a.py | 046cab5b22c208d596b9d533fe95bb2477be5b31 | [] | no_license | kimotot/aizu | 4de0319959a3b166b8c2c4940ab7b701b6ee3395 | 315be1240cff733e1c6a7cd98942a95b3bd7ec96 | refs/heads/master | 2021-07-24T12:37:41.935302 | 2021-03-10T09:05:05 | 2021-03-10T09:05:05 | 91,927,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | def insertSort(a):
for i in range(len(a)):
v = a[i]
j = i - 1
while j >= 0 and a[j] > v:
a[j + 1] = a[j]
j -= 1
a[j + 1] = v
print(" ".join([str(x) for x in a]))
if __name__ == '__main__':
n = int(input())
a = [int(x) for x in input().split()]
insertSort(a)
| [
"god4bid@hear.to"
] | god4bid@hear.to |
3b6d79b9ee68bad6addcb58240ff42d44e26a17d | 95267d92a6438665cf7848de229bea691d537f81 | /test-ferma.py | e3ddda4936707d0ac1667ad55c095f48566657bb | [
"MIT"
] | permissive | webkadiz/olympiad-problems | 2874eb1846c59778e70bcdc9550b3484bc3aa9cc | b3a8a3e83d0930947a89ec42e86e3058f464ea40 | refs/heads/master | 2022-11-15T23:48:39.854546 | 2022-11-04T12:55:48 | 2022-11-04T12:55:48 | 228,297,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | import random
import math
n = int(input())
def test_ferma(n):
if n <= 1:
return False
r = 10 ** 6
i = 0
while i < 100:
rand = random.randint(1, r)
if math.gcd(n, rand) != 1:
continue
if rand ** (n - 1) % n != 1:
return False
i += 1
return True
print(test_ferma(n)) | [
"webkadiz@gmail.com"
] | webkadiz@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.