blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
50029d742fdf872199ac05d382e8a46edf30c565 | d1e4f29e583ee964d63bc48554eaa73d67d58eb2 | /analytics/migrations/0012_add_on_delete.py | 24b2d5421292422475152c04fa1d2adc982982f8 | [
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] | permissive | hygolei/zulip | 299f636f9238f50b0d2746f1c371748f182f1f4e | 39fe66ab0824bc439929debeb9883c3046c6ed70 | refs/heads/master | 2023-07-11T22:50:27.434398 | 2021-08-09T10:07:35 | 2021-08-09T10:07:35 | 375,401,165 | 1 | 1 | Apache-2.0 | 2021-08-09T10:07:36 | 2021-06-09T15:20:09 | Python | UTF-8 | Python | false | false | 1,300 | py | # Generated by Django 1.11.6 on 2018-01-29 08:14
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("analytics", "0011_clear_analytics_tables"),
]
operations = [
migrations.AlterField(
model_name="installationcount",
name="anomaly",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.SET_NULL, to="analytics.Anomaly"
),
),
migrations.AlterField(
model_name="realmcount",
name="anomaly",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.SET_NULL, to="analytics.Anomaly"
),
),
migrations.AlterField(
model_name="streamcount",
name="anomaly",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.SET_NULL, to="analytics.Anomaly"
),
),
migrations.AlterField(
model_name="usercount",
name="anomaly",
field=models.ForeignKey(
null=True, on_delete=django.db.models.deletion.SET_NULL, to="analytics.Anomaly"
),
),
]
| [
"tabbott@zulipchat.com"
] | tabbott@zulipchat.com |
16223a5f2a1c413d52701ed8ee134cfd53475775 | 7a2d2cfbe99a13920e55e462bd40627e34d18f23 | /tests/openbb_terminal/portfolio/portfolio_optimization/conftest.py | 7237236327ef8a8f4b91f9b8ec13ffb3523f7ebf | [
"MIT"
] | permissive | conrad-strughold/GamestonkTerminal | b9ada627929dbc1be379f19c69b34e24764efcff | c9aa674d979a7c7fd7f251410ceaa1c8a4ef2e6e | refs/heads/main | 2023-06-24T02:59:45.096493 | 2023-05-16T15:15:20 | 2023-05-16T15:15:20 | 342,313,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | import pytest
from _pytest.nodes import Node
def pytest_runtest_setup(item: Node):
if not item.config.getoption("--optimization"):
pytest.skip(msg="Runs only with option : --optimization")
| [
"noreply@github.com"
] | conrad-strughold.noreply@github.com |
8d50dc2df16514bc977ef796e90045f9ebe1b83b | be24b5f37823125b2b901c0029175bfb2f25fb0e | /tests/homework/test_homework6.py | 4c1972342f82f74bfdb69cc577b4e437d09f7552 | [
"MIT"
] | permissive | acc-cosc-1336/cosc-1336-spring-2018-Miguelh1997 | 1bd75c51e72431037a46a1b3079d7695c41920ce | ac4b0405c4070758d0fc07458d4dca8a8a0313de | refs/heads/master | 2021-05-11T09:11:41.887630 | 2018-05-12T03:11:38 | 2018-05-12T03:11:38 | 118,070,058 | 0 | 1 | MIT | 2018-05-12T03:16:17 | 2018-01-19T03:13:02 | Python | UTF-8 | Python | false | false | 1,725 | py | import unittest
from src.homework.homework6 import (get_point_mutation, get_dna_complement, transcribe_dna_into_rna, get_gc_content)
#write import statement for homework 6 file
class TestHomework6(unittest.TestCase):
def test_sample(self):
self.assertEqual(1,1)
#create a test case for function find_motif_in_dna with arguments GATATATGCATATACTT and ATAT
#the result should be 2 4 10 (three different integers)
#create a test case for function get_point_mutations with arguments GAGCCTACTAACGGGAT and CATCGTAATGACGGCCT
#the result should be 7
def test_get_point_mutation_GAGCCTACTAACGGGAT(self):
self.assertEqual(7, get_point_mutation('GAGCCTACTAACGGGAT','CATCGTAATGACGGCCT'))
#create a test case for function get_dna_complement with argument AAAACCCGGT the result should be ACCGGGTTTT
def test_get_dna_complement_AAAACCCGGT(self):
self.assertEqual('ACCGGGTTTT',get_dna_complement('AAAACCCGGT'))
#create a test case for function transcribe_dna_to_rna with argument GATGGAACTTGACTACGTAAATT
#the result should be GAUGGAACUUGACUACGUAAAUU
def test_transcribe_dna_into_rna_GATGGAACTTGACTACGTAAATT(self):
self.assertEqual('GAUGGAACUUGACUACGUAAAUU',transcribe_dna_into_rna('GATGGAACTTGACTACGTAAATT'))
#create a test case for function get_gc_content with arguments
#CCACCCTCGTGGTATGGCTAGGCATTCAGGAACCGGAGAACGCTTCAGACCAGCCCGGACTGGGAACCTGCGGGCAGTAGGTGGAAT
#the result should be 60.919540
def test_get_gc_content(self):
self.assertEqual('60.919540',get_gc_content('CCACCCTCGTGGTATGGCTAGGCATTCAGGAACCGGAGAACGCTTCAGACCAGCCCGGACTGGGAACCTGCGGGCAGTAGGTGGAAT'))
if __name__ == '__main__':
unittest.main(verbosity=2)
| [
"noreply@github.com"
] | acc-cosc-1336.noreply@github.com |
d1e4c1312d44ba6a8cf0909a1585f9a29083c265 | 55540f3e86f1d5d86ef6b5d295a63518e274efe3 | /toolchain/riscv/MSYS/python/Lib/test/test_tools/test_pindent.py | 2635fd68b44d5cfad1cb73de7c76c8ab3cc16301 | [
"bzip2-1.0.6",
"LicenseRef-scancode-proprietary-license",
"OpenSSL",
"Python-2.0",
"LicenseRef-scancode-newlib-historical",
"TCL",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | bouffalolab/bl_iot_sdk | bc5eaf036b70f8c65dd389439062b169f8d09daa | b90664de0bd4c1897a9f1f5d9e360a9631d38b34 | refs/heads/master | 2023-08-31T03:38:03.369853 | 2023-08-16T08:50:33 | 2023-08-18T09:13:27 | 307,347,250 | 244 | 101 | Apache-2.0 | 2023-08-28T06:29:02 | 2020-10-26T11:16:30 | C | UTF-8 | Python | false | false | 8,963 | py | """Tests for the pindent script in the Tools directory."""
import os
import sys
import unittest
import subprocess
import textwrap
from test import support
from test.support.script_helper import assert_python_ok
from test.test_tools import scriptsdir, skip_if_missing
skip_if_missing()
class PindentTests(unittest.TestCase):
script = os.path.join(scriptsdir, 'pindent.py')
def assertFileEqual(self, fn1, fn2):
with open(fn1) as f1, open(fn2) as f2:
self.assertEqual(f1.readlines(), f2.readlines())
def pindent(self, source, *args):
with subprocess.Popen(
(sys.executable, self.script) + args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
universal_newlines=True) as proc:
out, err = proc.communicate(source)
self.assertIsNone(err)
return out
def lstriplines(self, data):
return '\n'.join(line.lstrip() for line in data.splitlines()) + '\n'
def test_selftest(self):
self.maxDiff = None
with support.temp_dir() as directory:
data_path = os.path.join(directory, '_test.py')
with open(self.script) as f:
closed = f.read()
with open(data_path, 'w') as f:
f.write(closed)
rc, out, err = assert_python_ok(self.script, '-d', data_path)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
backup = data_path + '~'
self.assertTrue(os.path.exists(backup))
with open(backup) as f:
self.assertEqual(f.read(), closed)
with open(data_path) as f:
clean = f.read()
compile(clean, '_test.py', 'exec')
self.assertEqual(self.pindent(clean, '-c'), closed)
self.assertEqual(self.pindent(closed, '-d'), clean)
rc, out, err = assert_python_ok(self.script, '-c', data_path)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
with open(backup) as f:
self.assertEqual(f.read(), clean)
with open(data_path) as f:
self.assertEqual(f.read(), closed)
broken = self.lstriplines(closed)
with open(data_path, 'w') as f:
f.write(broken)
rc, out, err = assert_python_ok(self.script, '-r', data_path)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
with open(backup) as f:
self.assertEqual(f.read(), broken)
with open(data_path) as f:
indented = f.read()
compile(indented, '_test.py', 'exec')
self.assertEqual(self.pindent(broken, '-r'), indented)
def pindent_test(self, clean, closed):
self.assertEqual(self.pindent(clean, '-c'), closed)
self.assertEqual(self.pindent(closed, '-d'), clean)
broken = self.lstriplines(closed)
self.assertEqual(self.pindent(broken, '-r', '-e', '-s', '4'), closed)
def test_statements(self):
clean = textwrap.dedent("""\
if a:
pass
if a:
pass
else:
pass
if a:
pass
elif:
pass
else:
pass
while a:
break
while a:
break
else:
pass
for i in a:
break
for i in a:
break
else:
pass
try:
pass
finally:
pass
try:
pass
except TypeError:
pass
except ValueError:
pass
else:
pass
try:
pass
except TypeError:
pass
except ValueError:
pass
finally:
pass
with a:
pass
class A:
pass
def f():
pass
""")
closed = textwrap.dedent("""\
if a:
pass
# end if
if a:
pass
else:
pass
# end if
if a:
pass
elif:
pass
else:
pass
# end if
while a:
break
# end while
while a:
break
else:
pass
# end while
for i in a:
break
# end for
for i in a:
break
else:
pass
# end for
try:
pass
finally:
pass
# end try
try:
pass
except TypeError:
pass
except ValueError:
pass
else:
pass
# end try
try:
pass
except TypeError:
pass
except ValueError:
pass
finally:
pass
# end try
with a:
pass
# end with
class A:
pass
# end class A
def f():
pass
# end def f
""")
self.pindent_test(clean, closed)
def test_multilevel(self):
clean = textwrap.dedent("""\
def foobar(a, b):
if a == b:
a = a+1
elif a < b:
b = b-1
if b > a: a = a-1
else:
print 'oops!'
""")
closed = textwrap.dedent("""\
def foobar(a, b):
if a == b:
a = a+1
elif a < b:
b = b-1
if b > a: a = a-1
# end if
else:
print 'oops!'
# end if
# end def foobar
""")
self.pindent_test(clean, closed)
def test_preserve_indents(self):
clean = textwrap.dedent("""\
if a:
if b:
pass
""")
closed = textwrap.dedent("""\
if a:
if b:
pass
# end if
# end if
""")
self.assertEqual(self.pindent(clean, '-c'), closed)
self.assertEqual(self.pindent(closed, '-d'), clean)
broken = self.lstriplines(closed)
self.assertEqual(self.pindent(broken, '-r', '-e', '-s', '9'), closed)
clean = textwrap.dedent("""\
if a:
\tif b:
\t\tpass
""")
closed = textwrap.dedent("""\
if a:
\tif b:
\t\tpass
\t# end if
# end if
""")
self.assertEqual(self.pindent(clean, '-c'), closed)
self.assertEqual(self.pindent(closed, '-d'), clean)
broken = self.lstriplines(closed)
self.assertEqual(self.pindent(broken, '-r'), closed)
def test_escaped_newline(self):
clean = textwrap.dedent("""\
class\\
\\
A:
def\
\\
f:
pass
""")
closed = textwrap.dedent("""\
class\\
\\
A:
def\
\\
f:
pass
# end def f
# end class A
""")
self.assertEqual(self.pindent(clean, '-c'), closed)
self.assertEqual(self.pindent(closed, '-d'), clean)
def test_empty_line(self):
clean = textwrap.dedent("""\
if a:
pass
""")
closed = textwrap.dedent("""\
if a:
pass
# end if
""")
self.pindent_test(clean, closed)
def test_oneline(self):
clean = textwrap.dedent("""\
if a: pass
""")
closed = textwrap.dedent("""\
if a: pass
# end if
""")
self.pindent_test(clean, closed)
if __name__ == '__main__':
unittest.main()
| [
"jczhang@bouffalolab.com"
] | jczhang@bouffalolab.com |
8e3381accfc766a875a987cabcb997c6987cb556 | 0e5f7fbea53b56ddeb0905c687aff43ae67034a8 | /src/resource/script/helper/cafm_api/RequestCheckData.py | 1dfdf090a7eceda36ead35ad6b1aba6fac678a09 | [] | no_license | arkanmgerges/cafm.identity | 359cdae2df84cec099828719202b773212549d6a | 55d36c068e26e13ee5bae5c033e2e17784c63feb | refs/heads/main | 2023-08-28T18:55:17.103664 | 2021-07-27T18:50:36 | 2021-07-27T18:50:36 | 370,453,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
@author: Arkan M. Gerges<arkan.m.gerges@gmail.com>
"""
class RequestCheckData:
def __init__(self, requestId, checkForId=False, resultIdName=None, ignoreIfExists=False, returnResult=True):
self.requestId = requestId
self.checkForId=checkForId
self.resultIdName=resultIdName
self.ignoreIfExists=ignoreIfExists
self.returnResult=returnResult | [
"arkan.m.gerges@gmail.com"
] | arkan.m.gerges@gmail.com |
e5d5959f54521aae879a71ae8ee0fa751ca5f922 | a08d885cb9150d7e84f5ffbf0c9734893105a898 | /2022/Day 12/hill_climbing_algorithm_test.py | 8af50ef4df758b9a1ccd29b85665b167104ab870 | [] | no_license | vhsw/Advent-of-Code | ab422c389340a1caf2ec17c5db4981add6433fbe | 3c1dac27667472202ab15098c48efaac19348edf | refs/heads/master | 2022-12-29T03:56:59.648395 | 2022-12-26T11:01:45 | 2022-12-26T11:01:45 | 162,491,163 | 0 | 0 | null | 2022-05-10T08:43:32 | 2018-12-19T21:10:26 | Python | UTF-8 | Python | false | false | 355 | py | """Day 12: tests"""
from hill_climbing_algorithm import DATA, part1, part2
EXAMPLE = """
Sabqponm
abcryxxl
accszExk
acctuvwj
abdefghi
""".strip()
def test_part1():
"""Part 1 test"""
assert part1(EXAMPLE) == 31
assert part1(DATA) == 408
def test_part2():
"""Part 2 test"""
assert part2(EXAMPLE) == 29
assert part2(DATA) == 399
| [
"nevermind1025@gmail.com"
] | nevermind1025@gmail.com |
316d3967e9d92294530800e1eb8ba2b5054e610d | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5646553574277120_0/Python/krukovna/solve.py | e02a7c8f9c1b3e907cf93bbe302a8c63e63dcc89 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | import sys
class Solved(Exception):
pass
def check(value, items):
s = value
for i in range(0, len(items)):
if s >= items[i]:
s -= items[i]
return s == 0
def solve(top, items):
a = 0
items = list(reversed(items))
for i in range(1, top+1):
if not check(i, items):
items = list(reversed(sorted([i] + items)))
a += 1
raise Solved(a)
if __name__ == '__main__':
for i in range(int(sys.stdin.readline())):
data = list(map(int, sys.stdin.readline().strip().split(' ')))
amounts = list(sorted(map(int, sys.stdin.readline().strip().split(' '))))
try:
solve(data[2], amounts)
except Solved as e:
print('Case #{}: {}'.format(i+1, e))
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
f740cf330191dc26a3bd03d4333a03e49014095a | 5e915a39fe966811424df0574f6670d252f895c8 | /micropython/p4_temperatura_f.py | 56ba1eda6cf14c319ef6a799437c06ba9f51ead4 | [
"MIT"
] | permissive | monkmakes/micro_bit_kit_es | c9c2f77f722f2a8a7e2657164d700b6fc758ce92 | be2a76f0ad45a70bef66c7ba548b2578ab35ede8 | refs/heads/master | 2022-11-21T19:09:11.445210 | 2020-07-22T16:07:42 | 2020-07-22T16:07:42 | 281,723,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | # P4 Temperatura F
from microbit import *
while True:
lectura = pin1.read_analog()
temperatura_f = round(lectura * 0.135 +1)
display.scroll(str(temperatura_f))
| [
"evilgeniusauthor@gmail.com"
] | evilgeniusauthor@gmail.com |
1d2dbd370c5088150b15093ffa636dc0ae89bcf1 | 025d7484c52b204bc286dfb9d17fc08e8e03604e | /base_branch_company/__init__.py | db33f366ec546e53aff32c5c46331dd48ef9a9c8 | [] | no_license | gotorishab/stpi | 3e2d2393a3b64f313c688bfcb4855052ea5e62b4 | a548e923f80e124ea5f90f4559ec727193c70528 | refs/heads/master | 2021-07-05T16:19:39.932782 | 2021-04-30T03:58:05 | 2021-04-30T03:58:05 | 236,436,956 | 0 | 6 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | # -*- coding: utf-8 -*-
# Part of odoo. See LICENSE file for full copyright and licensing details.
from . import models
from . import wizard
| [
"gotorishab@gmail.com"
] | gotorishab@gmail.com |
99d3e82d29dc3df93ee1a712c799d4279cf6595d | e2e08d7c97398a42e6554f913ee27340226994d9 | /pyautoTest-master(ICF-7.5.0)/test_case/scg/scg_Administrator/test_c139428.py | 61510e984e6bcc959e07dc4e89da92d75977f1af | [] | no_license | lizhuoya1111/Automated_testing_practice | 88e7be512e831d279324ad710946232377fb4c01 | b3a532d33ddeb8d01fff315bcd59b451befdef23 | refs/heads/master | 2022-12-04T08:19:29.806445 | 2020-08-14T03:51:20 | 2020-08-14T03:51:20 | 287,426,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,400 | py | import pytest
import time
import sys
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))))
from page_obj.scg.scg_def_sys import *
from page_obj.scg.scg_def import *
from page_obj.scg.scg_button import *
from page_obj.scg.scg_def_log import *
from page_obj.common.rail import *
from page_obj.scg.scg_dev import *
from page_obj.scg.scg_def_ifname_OEM import *
test_id = 139428
def test_c139428(browser):
try:
login_web(browser, url=dev1)
# # 定位到默认frame
# browser.switch_to.default_content()
# browser.switch_to.frame("lefttree")
# # 点击系统
# browser.find_element_by_xpath(系统).click()
# if not browser.find_element_by_xpath('//*[@id="menu"]/div[1]/div/ul/li[2]/ul').is_displayed():
# # 如果不可见,点击加号,展开元素
# browser.find_element_by_xpath(系统管理).click()
# # 点击物理接口
# browser.find_element_by_xpath(管理员).click()
# # 切换到默认frame
# browser.switch_to.default_content()
# # 切换到内容frame
# browser.switch_to.frame("content")
into_fun(browser, 管理员)
time.sleep(5)
browser.find_element_by_xpath('//*[@id="tabs"]/li[2]/a/span').click()
time.sleep(5)
browser.find_element_by_xpath('//*[@id="button_area"]/div/input').click()
time.sleep(3)
browser.find_element_by_xpath('//*[@id="profilename"]').send_keys("@#¥%&")
browser.find_element_by_xpath('//*[@id="description"]').send_keys("admin_profile")
browser.find_element_by_xpath('//*[@id="configsystem_0"]').click()
browser.find_element_by_xpath('//*[@id="reportsystem_0"]').click()
# 点击保存
browser.find_element_by_xpath('//*[@id="container"]/div/form/div[2]/div[2]/div/input[2]').click()
# 获取提示框信息
time.sleep(2)
alert = browser.switch_to_alert()
print(alert.text)
web_info = alert.text
# 接受告警
browser.switch_to_alert().accept()
try:
assert "name输入错误" in web_info
rail_pass(test_run_id, test_id)
except:
rail_fail(test_run_id, test_id)
assert "name输入错误" in web_info
except Exception as err:
# 如果上面的步骤有报错,重新设备,恢复配置
print(err)
reload(hostip=dev1)
rail_fail(test_run_id, test_id)
assert False
if __name__ == '__main__':
pytest.main(["-v", "-s", "test_c" + str(test_id) + ".py"])
| [
"15501866985@163.com"
] | 15501866985@163.com |
ec63a34cd757f9cabca23c6fcc9fb1e4d474b126 | 68a294455c03ada90e9ab80867c33b73672152f9 | /apps/producto/models.py | 11fc3eb1f9f9db601bd1427326252d430784871f | [] | no_license | chrisstianandres/citas | f7e89aa9481ee6aa260bd28cae44091a2c6db900 | 21f7f90ec958cabd71aa41c852877f0657677ade | refs/heads/master | 2023-08-28T07:24:35.187428 | 2021-11-20T00:03:08 | 2021-11-20T00:03:08 | 347,208,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,103 | py | import os
from datetime import datetime
from io import BytesIO
import qrcode
from PIL import Image, ImageDraw
from django.core.files import File
from django.db import models
from django.forms import model_to_dict
from apps.categoria.models import Categoria
from apps.presentacion.models import Presentacion
from citas.settings import STATIC_URL, MEDIA_URL, BASE_DIR, SECRET_KEY_ENCRIPT, MEDIA_ROOT
class Producto(models.Model):
categoria = models.ForeignKey(Categoria, on_delete=models.PROTECT, null=True, blank=True)
presentacion = models.ForeignKey(Presentacion, on_delete=models.PROTECT, null=True, blank=True)
nombre = models.CharField(max_length=100)
descripcion = models.CharField(max_length=200)
imagen = models.ImageField(upload_to='productos', blank=True, null=True)
qr = models.ImageField(upload_to='productos/qr', blank=True, null=True)
def __str__(self):
return '{}'.format(self.nombre)
def get_image(self):
if self.imagen:
return '{}{}'.format(MEDIA_URL, self.imagen)
else:
return '{}{}'.format(MEDIA_URL, 'productos/no_disponible.jpg')
def get_qr(self):
if self.qr:
return '{}{}'.format(MEDIA_URL, self.qr)
def get_qr_2(self):
if self.qr:
return '{}{}'.format(MEDIA_ROOT, self.qr)
# def save(self, *args, **kwargs):
#
# super().save(*args, *kwargs)
def toJSON(self):
item = model_to_dict(self)
item['presentacion'] = self.presentacion.toJSON()
item['categoria'] = self.categoria.toJSON()
item['imagen'] = self.get_image()
item['qr'] = self.get_qr()
item['tipo'] = 'Producto'
return item
class Meta:
db_table = 'producto'
verbose_name = 'producto'
verbose_name_plural = 'productos'
ordering = ['-id']
class envio_stock_dia(models.Model):
fecha = models.DateField(default=datetime.now(), unique=True)
enviado = models.BooleanField(default=True)
def __str__(self):
return '{}'.format(self.fecha.strftime('%Y-%m-%d'))
| [
"chrisstianandres@gmail.com"
] | chrisstianandres@gmail.com |
1192cded4effd3395252540e02dbd727c5dfe410 | 24cee07743790afde5040c38ef95bb940451e2f6 | /cci/LinkedList/2_4.py | e7b6c3f28f9924dec5761d25f28d55ff0ff9c01a | [] | no_license | tinaba96/coding | fe903fb8740d115cf5a7f4ff5af73c7d16b9bce1 | d999bf5620e52fabce4e564c73b9f186e493b070 | refs/heads/master | 2023-09-01T02:24:33.476364 | 2023-08-30T15:01:47 | 2023-08-30T15:01:47 | 227,594,153 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | from LinkedList import LinkedList
def partition(ll,x):
current = ll.tail = ll.head
print('current:', current)
print('llinit:', ll)
print('lltail:', ll.tail)
print('currentnext:', current.next)
while current:
nextNode = current.next
print('nextNode:' , nextNode)
current.next = None
print('ll:',ll)
if current.value <= x:
print('head1:', ll.head)
current.next = ll.head
ll.head = current
print('head2:', ll.head)
else:
ll.tail.next = current
ll.tail = current
current = nextNode
if ll.tail.next is not None:
ll.tail.next = None
ll = LinkedList()
ll.generate(10, 0, 99)
print(ll)
partition(ll, ll.head.value)
print(ll)
| [
"tinaba178.96@gmail.com"
] | tinaba178.96@gmail.com |
8bfed1015851b962c2225cfa88aca33414e65fbe | 7b35ddab50851b774bffbc633fc6d4fd4faa1efa | /simplifytour/core/views.py | a7dc604ef44572d89b7585c7c2449762739c408f | [] | no_license | Tushant/simplifytour | e585607efef9937f4a32165a526c38cbc192a562 | 2cb9f70b8cf27fd4beddf251966fdc214a1dcd85 | refs/heads/master | 2020-07-22T06:46:52.480528 | 2019-09-08T12:05:15 | 2019-09-08T12:05:15 | 207,106,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,555 | py | import os
import mimetypes
try:
from urllib.parse import urljoin, urlparse
except ImportError:
from urlparse import urljoin, urlparse
from json import dumps
from django.contrib.admin.views.decorators import staff_member_required
from django.http import (HttpResponse, HttpResponseNotFound)
from django.utils.translation import ugettext_lazy as _
from django.contrib.staticfiles import finders
from simplifytour.core.models import Displayable
from simplifytour.conf import settings
@staff_member_required
def static_proxy(request):
"""
Serves TinyMCE plugins inside the inline popups and the uploadify
SWF, as these are normally static files, and will break with
cross-domain JavaScript errors if ``STATIC_URL`` is an external
host. URL for the file is passed in via querystring in the inline
popup plugin template, and we then attempt to pull out the relative
path to the file, so that we can serve it locally via Django.
"""
normalize = lambda u: ("//" + u.split("://")[-1]) if "://" in u else u
url = normalize(request.GET["u"])
host = "//" + request.get_host()
static_url = normalize(settings.STATIC_URL)
for prefix in (host, static_url, "/"):
if url.startswith(prefix):
url = url.replace(prefix, "", 1)
response = ""
(content_type, encoding) = mimetypes.guess_type(url)
if content_type is None:
content_type = "application/octet-stream"
path = finders.find(url)
if path:
if isinstance(path, (list, tuple)):
path = path[0]
if url.endswith(".htm"):
# Inject <base href="{{ STATIC_URL }}"> into TinyMCE
# plugins, since the path static files in these won't be
# on the same domain.
static_url = settings.STATIC_URL + os.path.split(url)[0] + "/"
if not urlparse(static_url).scheme:
static_url = urljoin(host, static_url)
base_tag = "<base href='%s'>" % static_url
with open(path, "r") as f:
response = f.read().replace("<head>", "<head>" + base_tag)
else:
try:
with open(path, "rb") as f:
response = f.read()
except IOError:
return HttpResponseNotFound()
return HttpResponse(response, content_type=content_type)
def displayable_links_js(request):
"""
Renders a list of url/title pairs for all ``Displayable`` subclass
instances into JSON that's used to populate a list of links in
TinyMCE.
"""
links = []
if "simplifytour.pages" in settings.INSTALLED_APPS:
from simplifytour.pages.models import Page
is_page = lambda obj: isinstance(obj, Page)
else:
is_page = lambda obj: False
# For each item's title, we use its model's verbose_name, but in the
# case of Page subclasses, we just use "Page", and then sort the items
# by whether they're a Page subclass or not, then by their URL.
for url, obj in Displayable.objects.url_map(for_user=request.user).items():
title = getattr(obj, "titles", obj.title)
real = hasattr(obj, "id")
page = is_page(obj)
if real:
verbose_name = _("Page") if page else obj._meta.verbose_name
title = "%s: %s" % (verbose_name, title)
links.append((not page and real, {"title": str(title), "value": url}))
sorted_links = sorted(links, key=lambda link: (link[0], link[1]['value']))
return HttpResponse(dumps([link[1] for link in sorted_links])) | [
"programmertushant@gmail.com"
] | programmertushant@gmail.com |
d0c90f25fddfaf49151612cb7ab6bc5f675ce960 | 7879c47da4cfa94ad676dc4f0a5aea308b6a05b9 | /banners/migrations/0019_auto_20190409_1629.py | df151200ad21b7b95dd60e9267ba70b2115debc2 | [] | no_license | SoloTodo/solotodo_core | 9bc51fb276a22d25d3d894552a20f07403eb1555 | 72d8e21512b8a358335c347c3cc9b39fc8789c9b | refs/heads/develop | 2023-08-13T04:21:03.957429 | 2023-08-10T16:14:44 | 2023-08-10T16:14:44 | 96,940,737 | 15 | 5 | null | 2023-07-25T15:46:18 | 2017-07-11T21:59:06 | Python | UTF-8 | Python | false | false | 408 | py | # Generated by Django 2.0.3 on 2019-04-09 16:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('banners', '0018_auto_20190313_1137'),
]
operations = [
migrations.AlterField(
model_name='bannerupdate',
name='status_message',
field=models.TextField(blank=True, null=True),
),
]
| [
"vkhemlan@gmail.com"
] | vkhemlan@gmail.com |
8e365f7fb7dbf06d1ec12db9b886d675b708e32a | a3d6556180e74af7b555f8d47d3fea55b94bcbda | /third_party/blink/web_tests/external/wpt/webdriver/tests/classic/get_window_rect/get.py | f7592a30e067030f3c6433bc2419db06c0db8da8 | [
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | chromium/chromium | aaa9eda10115b50b0616d2f1aed5ef35d1d779d6 | a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c | refs/heads/main | 2023-08-24T00:35:12.585945 | 2023-08-23T22:01:11 | 2023-08-23T22:01:11 | 120,360,765 | 17,408 | 7,102 | BSD-3-Clause | 2023-09-10T23:44:27 | 2018-02-05T20:55:32 | null | UTF-8 | Python | false | false | 833 | py | from tests.support.asserts import assert_error, assert_success
def get_window_rect(session):
return session.transport.send(
"GET", "session/{session_id}/window/rect".format(**vars(session)))
def test_no_top_browsing_context(session, closed_window):
response = get_window_rect(session)
assert_error(response, "no such window")
def test_no_browsing_context(session, closed_frame):
response = get_window_rect(session)
assert_success(response)
def test_payload(session):
expected = session.execute_script("""return {
x: window.screenX,
y: window.screenY,
width: window.outerWidth,
height: window.outerHeight
}""")
response = get_window_rect(session)
value = assert_success(response)
assert isinstance(value, dict)
assert value == expected
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
b78201c41112819c6d5c05a0df40bc262974948d | 66727e413dc0899502eb22d9798c11c07ce5bcda | /tools/utilities/pythonlibs/audio/play_audio.py | 2054c201a6309ad74f5e7239b233959abea6cfc9 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | yunqu/ELL | 83e9f01d9be1dbcfc5b3814929797e5cf0b44159 | bfeb0239ee8c90953a7210fca1087241749a52d4 | refs/heads/master | 2020-05-04T04:09:38.810236 | 2019-04-01T23:05:25 | 2019-04-01T23:05:25 | 178,960,204 | 1 | 0 | null | 2019-04-01T23:01:56 | 2019-04-01T23:01:56 | null | UTF-8 | Python | false | false | 1,658 | py | #!/usr/bin/env python3
###################################################################################################
#
# Project: Embedded Learning Library (ELL)
# File: play_audio.py
# Authors: Chris Lovett
#
# Requires: Python 3.x
#
###################################################################################################
import argparse
import wav_reader
import speaker
# this is a test script to show how to use WavReader and Speaker classes.
arg_parser = argparse.ArgumentParser(description="Play an audio file after resampling it")
arg_parser.add_argument("filename", help="wav file to play ")
arg_parser.add_argument("--sample_rate", "-s", help="Audio sample rate to use", default=16000, type=int)
arg_parser.add_argument("--channels", "-c", help="Audio channels to use", default=1, type=int)
args = arg_parser.parse_args()
# First tell the WavReader what sample rate and channels we want the audio converted to
reader = wav_reader.WavReader(args.sample_rate, args.channels)
# Create a speaker object which we will give to the WavReader. The WavReader will pass
# the re-sampled audio to the Speaker so you can hear what it sounds like
speaker = speaker.Speaker()
# open the reader asking for 256 size chunks of audio, converted to floating point betweeo -1 and 1.
reader.open(args.filename, 256, speaker)
print("wav file contains sample rate {} and {} channels".format(reader.actual_rate, reader.actual_channels))
# pump the reader until it returns None. In a real app you would assign the results of read() to
# a variable so you can process the audio chunks returned.
while reader.read() is not None:
pass
| [
"clovett@microsoft.com"
] | clovett@microsoft.com |
d6c9aa19d252414fe4a3ac029740b73baa7788ed | 625f2f86f2b2e07cb35204d9b3232427bf462a09 | /official/HIRun2017PP/QCDPhoton_pThat-30_TuneCP5_5p02TeV_pythia8/crabConfig_FOREST.py | 71cf587527010f861fb8c2574246a542d877d2c4 | [] | no_license | ttrk/production | abb84c423a076fd9966276b7ed4350936c755e0b | f8a64c9c38de215802799365f0f7a99e1ee78276 | refs/heads/master | 2023-02-08T23:48:56.355141 | 2023-01-26T08:46:22 | 2023-01-26T08:46:22 | 52,877,406 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,710 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = "QCDPhoton_pThat-30_TuneCP5_5p02TeV_pythia8_FOREST"
config.General.transferLogs = False
config.section_("JobType")
config.JobType.pluginName = "Analysis"
config.JobType.psetName = "runForestAOD_pp_MC_94X.py"
#config.JobType.maxMemoryMB = 2500 # request high memory machines.
#config.JobType.maxJobRuntimeMin = 2750 # request longer runtime, ~48 hours.
## software : CMSSW_9_4_10
## forest_CMSSW_9_4_10
# https://github.com/CmsHI/cmssw/commit/a46919490e0f037a901b12e85e40e2444d7230af
## runForestAOD_pp_MC_94X.py commit + ggHi.doEffectiveAreas + enable ggHiNtuplizerGED doRecHits and doPhoERegression + activate l1object + HiGenParticleAna.etaMax = 5, ptMin = 0.4
# https://github.com/CmsHI/cmssw/commit/a46919490e0f037a901b12e85e40e2444d7230af
# dataset summary on DAS
# Number of blocks: 11 Number of events: 926276 Number of files: 28 Number of lumis: 17451 sum(file_size): 68949676101 (68.9GB)
config.section_("Data")
config.Data.inputDataset = "/QCDPhoton_pThat-30_TuneCP5_5p02TeV_pythia8/RunIIpp5Spring18DR-94X_mc2017_realistic_forppRef5TeV_v1-v1/AODSIM"
config.Data.inputDBS = "global"
config.Data.splitting = "FileBased"
config.Data.unitsPerJob = 1
config.Data.totalUnits = -1
config.Data.publication = False
config.Data.outputDatasetTag = "RunIIpp5Spring18DR-94X_mc2017_realistic_forppRef5TeV_v1-v1-FOREST"
config.Data.outLFNDirBase = "/store/user/katatar/official/HIRun2017PP/"
config.section_("Site")
config.Site.storageSite = "T2_US_MIT"
#config.Site.whitelist = ["T2_US_MIT"]
#config.section_("Debug")
#config.Debug.extraJDL = ["+CMS_ALLOW_OVERFLOW=False"]
| [
"tatark@mit.edu"
] | tatark@mit.edu |
00e411ddf0f13b487f205308a2467da5f9032f51 | 40de6d687cc0131eebde6edcd8b1ab640d2ca727 | /Web/API/old/1.py | a421ae21ffa1d3237082c846727a21b62120ebe2 | [] | no_license | Larionov0/DimaKindruk_Lessons | ad9bf6a4b8534de11fd445434481042ae3863cec | 2fb38b2d65df84ad8909541c82bf7bef96deb24e | refs/heads/master | 2023-06-05T11:42:28.503979 | 2021-06-24T17:08:33 | 2021-06-24T17:08:33 | 338,129,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,380 | py | import requests
import json
def print_structure(struct):
print(json.dumps(struct, indent=4))
def main_menu():
city = 'Kyiv'
while True:
print('--= Погода зараз =--')
print(f"Місто: {city}")
print('1 - дізнатись погоду')
print('2 - змінити місто')
print('0 - вихід з програми')
choice = input('Ваш вибір: ')
if choice == '1':
url = f'http://api.openweathermap.org/data/2.5/weather' \
f'?q={city}' \
f'&appid=cb5c7fc26a28e83605cff4b8efb1b85f' \
f'&units=metric'
try:
dct = requests.get(url).json()
text = '---= Погода =---\n' \
f'Головна: {dct["weather"][0]["main"]}\n' \
f'Температура: {dct["main"]["temp"]}\n' \
f'Відчувається як: {dct["main"]["feels_like"]}\n' \
f'Швидкість вітру: {dct["wind"]["speed"]}'
print(text)
except json.decoder.JSONDecodeError:
print('Щось не так з містом')
elif choice == '2':
city = input('Нове місто: ')
elif choice == '0':
break
main_menu()
| [
"larionov1001@gmail.com"
] | larionov1001@gmail.com |
a59ec9983b0fe83019a84fcdb7d3102b3379d6b6 | d7dc62a713617ebe10bb3ce228494637eca9ab7c | /scripts/dataset_summary.py | c6e53de833f547610e82a24a85108693127f3c03 | [
"MIT"
] | permissive | marcofavorito/google-hashcode-2020 | 8416bbdff0a09724065c6742ba8d7ae659bdd829 | 5e44b155eb4a7c6ed4202dd264bcc4d36ac953f2 | refs/heads/master | 2022-04-04T11:13:04.576572 | 2020-02-20T21:51:43 | 2020-02-20T21:51:43 | 241,432,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,753 | py | import argparse
from hashcode20.helpers import Input
import numpy as np
parser = argparse.ArgumentParser("hashcode20", description="CLI util for Google Hash Code 2019. "
"It assumes the input provided in stdin.")
parser.add_argument("--in", dest="in_file", type=str, default=None, help="provide an input data file.")
args = parser.parse_args()
def _score_book_list(book_ids, score):
return sum(map(lambda book_id: score[book_id], book_ids))
def print_stats(data, label):
print("Avg {}: {}".format(label, np.mean(data)))
print("Std {}: {}".format(label, np.std(data)))
print("Max {}: {}".format(label, np.max(data)))
print("Min {}: {}".format(label, np.min(data)))
print("00th {}: {}".format(label, np.percentile(data, 0) ))
print("25th {}: {}".format(label, np.percentile(data, 25) ))
print("50th {}: {}".format(label, np.percentile(data, 50) ))
print("75th {}: {}".format(label, np.percentile(data, 75) ))
print("100th {}: {}".format(label, np.percentile(data, 100)))
print("-"*50)
if __name__ == '__main__':
input_ = Input.read(args.in_file) # type: Input
print("# Libraries: {}".format(len(input_.libraries)))
print("# Book: {}".format(input_.nb_books))
print("# Days: {}".format(input_.nb_days))
print_stats(input_.scores, "Book score")
print_stats([len(l.books) for l in input_.libraries], "Books per Library")
print_stats([_score_book_list(l.books, input_.scores) for l in input_.libraries], "Score per Library")
print_stats(list(map(lambda l: l.ship_book_rate, input_.libraries)), "Shipping rate")
print_stats(list(map(lambda l: l.nb_signup_days, input_.libraries)), "signup day period")
| [
"marco.favorito@gmail.com"
] | marco.favorito@gmail.com |
0617e588eccff156ad691170642df8ed9583d1f0 | 1abcd4686acf314a044a533d2a541e83da835af7 | /backjoon_level_python/12025.py | 3031c47f8b001bd4d4ae1511ee00366b4039bde1 | [] | no_license | HoYoung1/backjoon-Level | 166061b2801514b697c9ec9013db883929bec77e | f8e49c8d2552f6d62be5fb904c3d6548065c7cb2 | refs/heads/master | 2022-05-01T05:17:11.305204 | 2022-04-30T06:01:45 | 2022-04-30T06:01:45 | 145,084,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | def solve(password, k):
answer = ''
bit = bin(k-1)[2:]
bit = ''.join(reversed(bit))
# print('bit', bit)
count = 0
password = password.replace('6', '1').replace('7','2')
for s in password[::-1]:
if count < len(bit):
if s == '1':
if bit[count] == '1':
answer += '6'
else:
answer += s
count += 1
elif s == '2':
if bit[count] == '1':
answer += '7'
else:
answer += s
count += 1
else:
answer += s
else:
answer += s
# print(count)
# print(''.join(reversed(answer)))
if count == len(bit):
return ''.join(reversed(answer))
else:
return -1
if __name__ == '__main__':
password = input()
k = int(input())
print(solve(password, k)) | [
"empire1641@gmail.com"
] | empire1641@gmail.com |
ce95d7b7a66baa823b91cc6a6f6966e045647cd1 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.5/tests/regressiontests/utils/itercompat.py | f9fd701578c88b009be3b4b6546d7b96ea5bb57f | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.5/tests/regressiontests/utils/itercompat.py | [
"ron.y.kagan@gmail.com"
] | ron.y.kagan@gmail.com |
2a63406ee420d62bf9d5c58274c937ec531958df | d922b02070c11c19ba6104daa3a1544e27a06e40 | /HW_4_6/venv/Scripts/pip3.8-script.py | 41620d84fc4a4267b4ade6a9be2cd60c46989b15 | [] | no_license | viharivnv/DSA | 2ca393a8e304ee7b4d540ff435e832d94ee4b2a7 | 777c7281999ad99a0359c44291dddaa868a2525c | refs/heads/master | 2022-10-15T15:26:59.045698 | 2020-06-17T15:55:33 | 2020-06-17T15:55:33 | 273,020,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | #!C:\Users\vihar\PycharmProjects\HW_4_6\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
| [
"52350934+viharivnv@users.noreply.github.com"
] | 52350934+viharivnv@users.noreply.github.com |
d800850a9e2c4d86bb5615c1930155a165048d9b | 0a79b1804588be9a1f504b0d8b2425d39debb272 | /barriers/models/history/__init__.py | a329ef511e8981b7fe916bb2053b47d2bb4b1ab7 | [
"MIT"
] | permissive | cad106uk/market-access-python-frontend | 9d44d455e1c7d5f20991fbad18d1aa9172696cf9 | f9d5143e2330613385b8617f7134acbe01f196f7 | refs/heads/master | 2023-03-05T18:37:40.481455 | 2021-01-18T10:28:00 | 2021-01-18T10:28:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,247 | py | from .assessments.economic import EconomicAssessmentHistoryItem
from .assessments.economic_impact import EconomicImpactAssessmentHistoryItem
from .assessments.resolvability import ResolvabilityAssessmentHistoryItem
from .assessments.strategic import StrategicAssessmentHistoryItem
from .barriers import BarrierHistoryItem
from .notes import NoteHistoryItem
from .public_barriers import PublicBarrierHistoryItem
from .public_barrier_notes import PublicBarrierNoteHistoryItem
from .team_members import TeamMemberHistoryItem
from .utils import PolymorphicBase
from .wto import WTOHistoryItem
class HistoryItem(PolymorphicBase):
"""
Polymorphic wrapper for HistoryItem classes
Delegates to the correct subclass based on the value of data["model"]
That class then delegates to a subclass based on data["field"]
"""
key = "model"
subclasses = (
BarrierHistoryItem,
EconomicAssessmentHistoryItem,
EconomicImpactAssessmentHistoryItem,
NoteHistoryItem,
PublicBarrierHistoryItem,
PublicBarrierNoteHistoryItem,
ResolvabilityAssessmentHistoryItem,
StrategicAssessmentHistoryItem,
TeamMemberHistoryItem,
WTOHistoryItem,
)
class_lookup = {}
| [
"noreply@github.com"
] | cad106uk.noreply@github.com |
5d8989700260fd1d8dafa0e88e688ae38b405076 | 22b348a0d10519cb1f1da5e886fdf2d3c167cf5a | /myweb/test/_paste/_routes/demo_2.py | 67efaf264fa01ff34cd2c23b7403abf5e51bb3ce | [] | no_license | liuluyang/openstack_mogan_study | dab0a8f918ffd17e0a747715998e81304672b75b | 8624f765da7f5aa0c210f0fa945fc50cf8a67b9e | refs/heads/master | 2021-01-19T17:03:15.370323 | 2018-04-12T09:50:38 | 2018-04-12T09:50:38 | 101,040,396 | 1 | 1 | null | 2017-11-01T02:17:31 | 2017-08-22T08:30:22 | Python | UTF-8 | Python | false | false | 314 | py | from routes import Mapper
map = Mapper()
print map
print type(map)
map.connect(None, '/error/{action}/{id}', controller='error')
result = map.match('/error/lixin/200')
print result
map.connect(None, '/error/{action:index|lixin}/{id:\d+}', controller='error')
result = map.match('/error/lixin/200')
print result | [
"1120773382@qq.com"
] | 1120773382@qq.com |
365edf48bfff6fdeccdcb1003149c574c7972c07 | 97dae48fa3c613a84655c1c0b12cdc0db2c555bb | /algorithm/patternsearch/anagram_search.py | e0aab3a3dca8cf884dda3fe0c3bcd65162ad4911 | [] | no_license | klknet/geeks4geeks | 6aa5841b15be41057dc987524721ea1ea37e02ea | d7d9099af7617a4000f38c75d2c7214bed570eda | refs/heads/master | 2021-07-12T06:34:30.048691 | 2020-06-22T07:51:14 | 2020-06-22T07:51:14 | 170,288,225 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,288 | py | """
Search for all permutations.
1)Store counts of frequencies of pattern in first count array countP[]. Also store counts of frequencies of characters
in first window of text in array countTW[].
2)Now run a loop from i=M to N-1, do following in loop:
a)If the two count arrays are identical, we found an occurrence.
b)Increment count of current character of text in countTW[].
c)Decrement count of first character of previous window in countTW[].
3)The last window is not checked by above loop. so explicitly check it.
"""
no_of_chars = 256
def anagram_search(pat, txt):
m, n = len(pat), len(txt)
pat_count = [0] * no_of_chars
cur_count = [0] * no_of_chars
for i in range(m):
pat_count[ord(pat[i])] += 1
cur_count[ord(txt[i])] += 1
for i in range(m, n):
if compare(pat_count, cur_count, pat):
print(i - m)
cur_count[ord(txt[i])] += 1
cur_count[ord(txt[i - m])] -= 1
if i == n - 1:
if compare(pat_count, cur_count, pat):
print(n - m)
def compare(patCount, curCount, pat):
m = len(pat)
for j in range(m):
if patCount[ord(pat[j])] != curCount[ord(pat[j])]:
return False
return True
pat = "ABCD"
txt = "BACDGABCDA"
anagram_search(pat, txt)
| [
"konglk@aliyun.com"
] | konglk@aliyun.com |
1af0bee0929738dc142282f5829ece2b158125a4 | ec2b6cd4e9c183bc5e99ad917110d91985dfc2e8 | /touchdown/aws/vpc/customer_gateway.py | 57ab247b587073333858ec7d25c5ddf89ac39980 | [
"Apache-2.0"
] | permissive | triplekill/touchdown | 128ac7a9260709dae5ccbce6db344ab109cc75da | 8b70d4ac448bebd1cd088fa415be9cf6c74108cc | refs/heads/master | 2020-12-25T03:20:34.717218 | 2016-05-29T18:54:03 | 2016-05-29T18:54:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,914 | py | # Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from touchdown.core import argument
from touchdown.core.plan import Plan
from touchdown.core.resource import Resource
from ..common import SimpleApply, SimpleDescribe, SimpleDestroy, TagsMixin
from .vpc import VPC
class CustomerGateway(Resource):
resource_name = "customer_gateway"
name = argument.String(field="Name", group="tags")
type = argument.String(default="ipsec.1", choices=["ipsec.1"], field="Type")
public_ip = argument.IPAddress(field="PublicIp")
bgp_asn = argument.Integer(default=65000, field="BgpAsn")
tags = argument.Dict()
vpc = argument.Resource(VPC)
class Describe(SimpleDescribe, Plan):
resource = CustomerGateway
service_name = 'ec2'
describe_action = "describe_customer_gateways"
describe_envelope = "CustomerGateways"
key = "CustomerGatewayId"
def get_describe_filters(self):
vpc = self.runner.get_plan(self.resource.vpc)
if not vpc.resource_id:
return None
return {
"Filters": [
{'Name': 'tag:Name', 'Values': [self.resource.name]},
],
}
class Apply(TagsMixin, SimpleApply, Describe):
create_action = "create_customer_gateway"
waiter = "customer_gateway_available"
class Destroy(SimpleDestroy, Describe):
destroy_action = "delete_customer_gateway"
| [
"john.carr@unrouted.co.uk"
] | john.carr@unrouted.co.uk |
b3e47bb92563fa3756c12e43de9cb2e777ddcdd2 | 4c9c028936379c510cebfe4830f460817d9bc3c8 | /account/urls.py | 48532b9b4e93777e52ea3e0cdcab3456e5d9b824 | [] | no_license | preciousidam/management-system | cd47d7c564fe0ff0ae459c702c63a3cb16eee8ab | c984012e2cbc7554b20b00fabafd24f3f5752ba8 | refs/heads/main | 2023-04-02T08:44:24.416866 | 2021-03-11T20:09:11 | 2021-03-11T20:09:11 | 341,899,263 | 0 | 0 | null | 2021-04-12T14:35:07 | 2021-02-24T12:50:41 | Python | UTF-8 | Python | false | false | 733 | py | from django.urls import path, re_path
from django.conf.urls import url, include
from rest_framework import routers
from .views import (CorttsAccountViewSet, CompanyViewSet,
OtherAccountViewSet, TransactionViewSet,
ExpenseAccountViewSet, TopUpViewSet)
router = routers.DefaultRouter()
router.register(r'accounts/cortts', CorttsAccountViewSet)
router.register(r'accounts/others', OtherAccountViewSet)
router.register(r'accounts/expenses', ExpenseAccountViewSet)
router.register(r'accounts/transactions', TransactionViewSet)
router.register(r'accounts/topup', TopUpViewSet)
router.register(r'companies', CompanyViewSet)
urlpatterns = [
url(r'^', include(router.urls)),
] | [
"preciousidam@gmail.com"
] | preciousidam@gmail.com |
4dc3acd2ad170769aa171cdcd7190d67995c3df2 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipayMarketingCashvoucherTemplateCreateResponse.py | 5a0f8dd9303d72dccf432f218972e12ade589662 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,382 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayMarketingCashvoucherTemplateCreateResponse(AlipayResponse):
def __init__(self):
super(AlipayMarketingCashvoucherTemplateCreateResponse, self).__init__()
self._confirm_uri = None
self._fund_order_no = None
self._template_id = None
@property
def confirm_uri(self):
return self._confirm_uri
@confirm_uri.setter
def confirm_uri(self, value):
self._confirm_uri = value
@property
def fund_order_no(self):
return self._fund_order_no
@fund_order_no.setter
def fund_order_no(self, value):
self._fund_order_no = value
@property
def template_id(self):
return self._template_id
@template_id.setter
def template_id(self, value):
self._template_id = value
def parse_response_content(self, response_content):
response = super(AlipayMarketingCashvoucherTemplateCreateResponse, self).parse_response_content(response_content)
if 'confirm_uri' in response:
self.confirm_uri = response['confirm_uri']
if 'fund_order_no' in response:
self.fund_order_no = response['fund_order_no']
if 'template_id' in response:
self.template_id = response['template_id']
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
1520ca420b7242a13c8ee4a587eaf2836de6ab5e | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/exercism/python-exercises-master_with_unittest/hamming/hamming.py | 911760d87b510cc1e962ecd4af143c5c2c4f942a | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 21 | py | ___ distance
p..
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
a6bff4b60a92bd23a58b23a4e36b4942b22ec63a | 4adbebc69f2d2552234664f4cf6bf4b6a4a90aa2 | /examples/eight_schools/eight_schools.py | 711a6bd33f5635856ef2ff33142b772018126426 | [
"Apache-2.0"
] | permissive | coryshain/edward | 291c50123182a19273c1bf1723e894a54a9014ff | 494a85e6354504d8c71ec6a7b70021a20470fec8 | refs/heads/master | 2022-11-10T01:16:51.875938 | 2020-06-18T14:15:32 | 2020-06-18T14:15:32 | 273,252,033 | 0 | 0 | NOASSERTION | 2020-06-18T13:54:22 | 2020-06-18T13:54:21 | null | UTF-8 | Python | false | false | 2,802 | py | """Implement the stan 8 schools example using the recommended non-centred
parameterization.
The Stan example is slightly modified to avoid improper priors and
avoid half-Cauchy priors. Inference is with Edward using both HMC
and KLQP.
This model has a hierachy and an inferred variance - yet the example is
very simple - only the Normal distribution is used.
#### References
https://github.com/stan-dev/rstan/wiki/RStan-Getting-Started
http://mc-stan.org/users/documentation/case-studies/divergences_and_bias.html
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import tensorflow as tf
import numpy as np
from edward.models import Normal, Empirical
def main(_):
# data
J = 8
data_y = np.array([28, 8, -3, 7, -1, 1, 18, 12])
data_sigma = np.array([15, 10, 16, 11, 9, 11, 10, 18])
# model definition
mu = Normal(0., 10.)
logtau = Normal(5., 1.)
theta_prime = Normal(tf.zeros(J), tf.ones(J))
sigma = tf.placeholder(tf.float32, J)
y = Normal(mu + tf.exp(logtau) * theta_prime, sigma * tf.ones([J]))
data = {y: data_y, sigma: data_sigma}
# ed.KLqp inference
with tf.variable_scope('q_logtau'):
q_logtau = Normal(tf.get_variable('loc', []),
tf.nn.softplus(tf.get_variable('scale', [])))
with tf.variable_scope('q_mu'):
q_mu = Normal(tf.get_variable('loc', []),
tf.nn.softplus(tf.get_variable('scale', [])))
with tf.variable_scope('q_theta_prime'):
q_theta_prime = Normal(tf.get_variable('loc', [J]),
tf.nn.softplus(tf.get_variable('scale', [J])))
inference = ed.KLqp({logtau: q_logtau, mu: q_mu,
theta_prime: q_theta_prime}, data=data)
inference.run(n_samples=15, n_iter=60000)
print("==== ed.KLqp inference ====")
print("E[mu] = %f" % (q_mu.mean().eval()))
print("E[logtau] = %f" % (q_logtau.mean().eval()))
print("E[theta_prime]=")
print((q_theta_prime.mean().eval()))
print("==== end ed.KLqp inference ====")
print("")
print("")
# HMC inference
S = 400000
burn = S // 2
hq_logtau = Empirical(tf.get_variable('hq_logtau', [S]))
hq_mu = Empirical(tf.get_variable('hq_mu', [S]))
hq_theta_prime = Empirical(tf.get_variable('hq_thetaprime', [S, J]))
inference = ed.HMC({logtau: hq_logtau, mu: hq_mu,
theta_prime: hq_theta_prime}, data=data)
inference.run()
print("==== ed.HMC inference ====")
print("E[mu] = %f" % (hq_mu.params.eval()[burn:].mean()))
print("E[logtau] = %f" % (hq_logtau.params.eval()[burn:].mean()))
print("E[theta_prime]=")
print(hq_theta_prime.params.eval()[burn:, ].mean(0))
print("==== end ed.HMC inference ====")
print("")
print("")
if __name__ == "__main__":
tf.app.run()
| [
"dustinviettran@gmail.com"
] | dustinviettran@gmail.com |
bea8b455adb55b38f6aaae2a0a97e58b2d9eccbc | 5ea136ca2e8066b77b39afdf15e3d0e6bc74761f | /scripts/substitute-prototypes.py | 4e56598f45506ae50cc08157da2a187c6741fbe6 | [] | no_license | reneang17/ttbar | 4023421846a70c22c13a2962520f7723ad35636b | 75f4fff1b5d79af097ea04aab437e2963215a232 | refs/heads/master | 2020-06-12T15:07:11.918815 | 2019-06-28T22:24:07 | 2019-06-28T22:24:07 | 194,339,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,166 | py | #!/usr/bin/env python3
#
# todo:
#
import argparse
import os
import re
import string
import subprocess
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,\
description = \
''' Substitute prototypes in IdSolver output by integrals.'''
)
parser.add_argument("file",\
help = ("out file from reduction"))
parser.add_argument("--tmp", action = "store_true", \
help = ("keep temporary files"))
args = parser.parse_args()
#-------------------------------------------------------------------------------
def prepare_form_file_content(input_list):
content = "#-\n"
content += "#include decls\n"
content += "#include {0}\n\n".format(args.file)
for i in range(0,len(input_list)):
content +="l integral{0} = {1};\n".\
format(i,input_list[i].strip(string.whitespace))
content += "\n"
content += "#include finalsubstitutions\n\n"
content += "print;\n"
content += ".end"
return content
#-------------------------------------------------------------------------------
def determine_integrals(outfile):
content = ""
with open(args.file) as fh: content = fh.read()
prototypes_re = re.compile('fill\s+(PR\d+\([^\)]+\))\s+=')
return prototypes_re.findall(content)
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
if __name__ == '__main__':
#-----------------------------------------------------------------------------
prototypes = determine_integrals(args.file)
form_file_content = ""
form_file_content = prepare_form_file_content(prototypes)
form_fname = ".substitute.frm"
with open(form_fname,"w") as fh:
fh.write(form_file_content)
command = "form {0}".format(form_fname)
try:
subprocess.check_call(command, shell=True)
#output = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True)
#print(output.decode("utf-8"))
except (subprocess.CalledProcessError) as err:
print("Error in {0}:\n{1}".format(os.path.basename(__file__), err))
if not args.tmp:
os.remove(form_fname)
| [
"reneang17@gmail.com"
] | reneang17@gmail.com |
2bdf6125fc161c83cf1d0a6eed0f207c318f8d40 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/cv/image_classification/mBART_ID1550_for_PyTorch/dataset/data_loader_iter.py | 4b1183c752968be0a7ee951799016572ffe96c99 | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-or-later",
"GPL-3.0-only"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 2,986 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
"""
pytorch-dl
Created by raj at 09:11
Date: February 20, 2020
"""
from torch.utils.data.dataset import IterableDataset
import torch.npu
import os
NPU_CALCULATE_DEVICE = 0
if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
if torch.npu.current_device() != NPU_CALCULATE_DEVICE:
torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')
class MyIterableDataset(IterableDataset):
def __init__(self, filename):
# Store the filename in object's memory
self.filename = filename
# And that's it, we no longer need to store the contents in the memory
def preprocess(self, text):
# Do something with text here
text_pp = text.lower().strip()
return text_pp
def line_mapper(self, line):
# Splits the line into text and label and applies preprocessing to the text
text, label = line.split(',')
text = self.preprocess(text)
return text, label
def __iter__(self):
# Create an iterator
file_itr = open(self.filename)
# Map each element using the line_mapper
mapped_itr = map(self.line_mapper, file_itr)
return mapped_itr
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
f9fd97e9d2c666e13a10ed4f2a3f3efa6c94dcc1 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/55f014a207123e37696fa342fd06feff8f1d4b28-<_load_label>-bug.py | e2655af953c09968c6f208353b936e225b93bfe2 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | def _load_label(self, idx):
'Parse xml file and return labels.'
img_id = self._items[idx]
anno_path = self._anno_path.format(*img_id)
root = ET.parse(anno_path).getroot()
size = root.find('size')
width = float(size.find('width').text)
height = float(size.find('height').text)
if (idx not in self._im_shapes):
self._im_shapes[idx] = (width, height)
label = []
for obj in root.iter('object'):
difficult = int(obj.find('difficult').text)
cls_name = obj.find('name').text.strip().lower()
if (cls_name not in self.classes):
continue
cls_id = self.index_map[cls_name]
xml_box = obj.find('bndbox')
xmin = (float(xml_box.find('xmin').text) - 1)
ymin = (float(xml_box.find('ymin').text) - 1)
xmax = (float(xml_box.find('xmax').text) - 1)
ymax = (float(xml_box.find('ymax').text) - 1)
try:
self._validate_label(xmin, ymin, xmax, ymax, width, height)
except AssertionError as e:
raise RuntimeError('Invalid label at {}, {}'.format(anno_path, e))
label.append([xmin, ymin, xmax, ymax, cls_id, difficult])
return np.array(label) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
9a90e00ca7c3cc3a44b1e2909de8f45cefc60fcf | f8b9e5de8823ff810ec445b6fa6d0e34f7b6319f | /Django/django_project/apps/Surveys_app/views.py | 0c4499e1aab25caa03e11a9324303ae1038795fe | [] | no_license | amalfushi/Python | 6c042443a8aeae15fc96a41a692abdbea05db863 | 067c2cef722457e884833f77baf9f44f45a4a165 | refs/heads/master | 2021-01-24T04:08:21.278071 | 2018-02-26T06:25:59 | 2018-02-26T06:25:59 | 122,923,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, HttpResponse
# Create your views here.
def main(request):
return HttpResponse('Placeholder to display all the surveys created')
def new(request):
return HttpResponse('Placeholder for users to add a new survey') | [
"dustin.p.schroeder@gmail.com"
] | dustin.p.schroeder@gmail.com |
8b524197c2dedd42f3f50922a7bdf22dda937dba | c78e61ccee6ac695d3f71f72fc3212fdd2c1d193 | /sync_plot_fig1618.py | b303b15494c6cc01c2f00ce050c78d02d1aa3603 | [] | no_license | bbw7561135/phd_code | 28557e84228119dd204f9e16ca27d7c7cef81188 | ef06c317115f0744a7941796c4092e489923ef4e | refs/heads/master | 2021-06-13T02:35:08.475474 | 2017-02-26T21:12:17 | 2017-02-26T21:12:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,694 | py | #------------------------------------------------------------------------------#
# #
# This code is a Python script that reads in arrays of synchrotron intensity, #
# and calculates the structure function slope and integrated quadrupole ratio #
# for different simulations as a function of an observational effect. Two #
# plots are produced, looking at noise and angular resolution. #
# #
# Author: Chris Herron #
# Start Date: 13/2/2015 #
# #
#------------------------------------------------------------------------------#
# First import numpy for array handling, matplotlib for plotting, astropy.io
# for fits manipulation, astropy.convolution for convolution functions,
# scipy.stats for calculating statistical quantities
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.convolution import convolve_fft, Gaussian2DKernel
from scipy import stats
# Import the functions that calculate the structure and correlation functions
# using FFT, as well as the function that calculates the radially averaged
# structure or correlation functions. Also import the function that calculates
# multipoles of the 2D structure functions, and the function that calculates the
# magnitude and argument of the quadrupole ratio
from sf_fft import sf_fft
from cf_fft import cf_fft
from sfr import sfr
from calc_multipole_2D import calc_multipole_2D
from calc_quad_ratio import calc_quad_ratio
# Define a function that calculates the errors in statistics by breaking up
# synchrotron images into quarters, calculating statistics for each quarter, and
# then calculates the standard deviation of the statistics.
def calc_err_bootstrap(sync_map_y, sync_map_z):
'''
Description
This function divides the given images into quarters, and then
calculates statistics for each quarter. The standard deviation of the
calculated statistics is then returned, representing the error on
each statistic.
Required Input
sync_map_y - The synchrotron intensity map observed for a line of sight
along the y axis.
sync_map_z - The synchrotron intensity map observed for a line of sight
along the z axis. Must have the same size as the map
for a line of sight along the y axis.
Output
m_err - The error calculated for the structure function slope of the
synchrotron intensity
residual_err - The error calculated for the residual of the linear fit
to the structure function of synchrotron intensity
int_quad_err - The error calculated for the integrated quadrupole ratio
modulus of the synchrotron intensity
'''
# Create an array that will hold the quarters of the synchrotron images
quarter_arr = np.zeros((8,np.shape(sync_map_y)[0]/2,np.shape(sync_map_y)[1]/2))
# Add the quarters of the images into the array
quarter_arr[0], quarter_arr[1] = np.split(np.split(sync_map_y,2,axis=0)[0],2,axis=1)
quarter_arr[2], quarter_arr[3] = np.split(np.split(sync_map_y,2,axis=0)[1],2,axis=1)
quarter_arr[4], quarter_arr[5] = np.split(np.split(sync_map_z,2,axis=0)[0],2,axis=1)
quarter_arr[6], quarter_arr[7] = np.split(np.split(sync_map_z,2,axis=0)[1],2,axis=1)
# Create arrays that will hold the calculated statistics for each quarter
m_val = np.zeros(np.shape(quarter_arr)[0])
resid_val = np.zeros(np.shape(quarter_arr)[0])
int_quad_val = np.zeros(np.shape(quarter_arr)[0])
# Loop over the quarters, to calculate statistics for each one
for i in range(np.shape(quarter_arr)[0]):
# Extract the current image quarter from the array
image = quarter_arr[i]
# Calculate the structure function (two-dimensional) of the synchrotron
# intensity map. Note that no_fluct = True is set, because we are not
# subtracting the mean from the synchrotron maps before calculating the
# structure function.
strfn = sf_fft(image, no_fluct = True)
# Radially average the calculated 2D structure function, using the
# specified number of bins.
rad_sf = sfr(strfn, num_bins, verbose = False)
# Extract the calculated radially averaged structure function
sf = rad_sf[1]
# Extract the radius values used to calculate this structure function.
sf_rad_arr = rad_sf[0]
# Calculate the spectral index of the structure function calculated for
# this value of gamma. Note that only the first third of the structure
# function is used in the calculation, as this is the part that is
# close to a straight line.
spec_ind_data = np.polyfit(np.log10(\
sf_rad_arr[11:16]),\
np.log10(sf[11:16]), 1, full = True)
# Extract the returned coefficients from the polynomial fit
coeff = spec_ind_data[0]
# Extract the sum of the residuals from the polynomial fit
resid_val[i] = spec_ind_data[1]
# Enter the value of m, the slope of the structure function minus 1,
# into the corresponding array
m_val[i] = coeff[0]-1.0
# Calculate the 2D structure function for this slice of the synchrotron
# intensity data cube. Note that no_fluct = True is set, because we are
# not subtracting the mean from the synchrotron maps before calculating
# the structure function. We are also calculating the normalised
# structure function, which only takes values between 0 and 2.
norm_strfn = sf_fft(image, no_fluct = True, normalise = True)
# Shift the 2D structure function so that the zero radial separation
# entry is in the centre of the image.
norm_strfn = np.fft.fftshift(norm_strfn)
# Calculate the magnitude and argument of the quadrupole ratio
quad_mod, quad_arg, quad_rad = calc_quad_ratio(norm_strfn, num_bins)
# Integrate the magnitude of the quadrupole / monopole ratio from
# one sixth of the way along the radial separation bins, until three
# quarters of the way along the radial separation bins. This integration
# is performed with respect to log separation (i.e. I am ignoring the
# fact that the points are equally separated in log space, to calculate
# the area under the quadrupole / monopole ratio plot when the x axis
# is scaled logarithmically). I normalise the value that is returned by
# dividing by the number of increments in log radial separation used in
# the calculation.
int_quad_val[i] = np.trapz(quad_mod[11:20], dx = 1.0)\
/ (19 - 11)
# At this point, the statistics have been calculated for each quarter
# The next step is to calculate the standard error of the mean of each
# statistic
m_err = np.std(m_val) / np.sqrt(len(m_val))
residual_err = np.std(resid_val) / np.sqrt(len(resid_val))
int_quad_err = np.std(int_quad_val) / np.sqrt(len(int_quad_val))
# Now that all of the calculations have been performed, return the
# calculated errors
return m_err, residual_err, int_quad_err
# Set a variable to hold the number of bins to use in calculating the
# correlation functions
num_bins = 25
# Create a string for the directory that contains the simulated magnetic fields
# and synchrotron intensity maps to use.
simul_loc = '/Volumes/CAH_ExtHD/Madison_2014/Simul_Data/'
# Create a string for the specific simulated data sets to use in calculations.
# The directories end in:
# b.1p.1_Oct_Burk
# b.1p.01_Oct_Burk
# b.1p2_Aug_Burk
# b1p.1_Oct_Burk
# b1p.01_Oct_Burk
# b1p2_Aug_Burk
# c512b.1p.0049
# c512b.1p.0077
# c512b.1p.025
# c512b.1p.05
# c512b.1p.7
# c512b1p.0049
# c512b1p.0077
# c512b1p.025
# c512b1p.05
# c512b1p.7
# c512b3p.01
# c512b5p.01
# c512b5p2
# Create strings giving the directories for the simulations produced with a
# low magnetic field
low_B_sims = ['b.1p.01_Oct_Burk/', 'b.1p.1_Oct_Burk/', 'c512b.1p.7/', \
'b.1p2_Aug_Burk/']
# Create strings giving the directories for the simulations produced with a
# high magnetic field
high_B_sims = ['b1p.01_Oct_Burk/', 'b1p.1_Oct_Burk/', 'c512b1p.7/', \
'b1p2_Aug_Burk/']
# Create strings giving the simulation codes, for the low magnetic field
# simulations used to produce plots
low_B_short = ['b.1p.01', 'b.1p.1', 'b.1p.7', 'b.1p2']
# Create strings giving the simulation codes, for the high magnetic field
# simulations used to produce plots
high_B_short = ['b1p.01', 'b1p.1', 'b1p.7', 'b1p2']
# Create strings giving the simulation codes in terms of Mach numbers, for the
# low magnetic field simulations used to produce plots
low_B_short_M = ['Ms7.02Ma1.76', 'Ms2.38Ma1.86', 'Ms0.83Ma1.74', 'Ms0.45Ma1.72']
# Create strings giving the simulation codes in terms of Mach numbers, for the
# high magnetic field simulations used to produce plots
high_B_short_M = ['Ms6.78Ma0.52', 'Ms2.41Ma0.67', 'Ms0.87Ma0.7', 'Ms0.48Ma0.65']
# Create an array of marker symbols, so that the plot for each gamma value has
# a different plot symbol
symbol_arr = ['o','^','s','*']
# Create an array that specifies the value of gamma used to produce each
# synchrotron emissivity cube
gamma_arr = np.array([1.0,1.5,2.0,2.5,3.0,3.5,4.0])
# Create a variable that stores the index corresponding to the value of gamma to
# use in the calculations
gam_index = 2
# Create a variable that just holds the value of gamma being used
gamma = gamma_arr[gam_index]
# Create a string that determines what observational effect will be studied
# String can be one of the following:
# noise - Study how statistics change as noise level is varied
# res - Study how statistics change as the spatial resolution is varied
obs_effect = 'noise'
# Create a variable that controls how many data points are being used for the
# free parameter
free_num = 20
# Depending on what observational effect is being studied, create an array of
# values over which we will iterate. This array represents the values of the
# free parameter related to the observational effect
if obs_effect == 'noise':
# Create an array of values that will be used to determine the standard
# deviation of the Gaussian distribution from which noise values are
# generated. The standard deviation will be calculated by multiplying the
# median synchrotron intensity by the values in this array.
iter_array = np.linspace(0.02, 0.5, free_num)
# Create a label for the x-axis of plots that are made against noise
# standard deviation
xlabel = 'Noise StandDev [frac median inten]'
# Create a string to be used in the titles of any plots that are made
# against noise standard deviation
title_string = 'Noise StandDev'
# Create a string to be used in legends involving spectral channel width
leg_string = 'Noise = '
elif obs_effect == 'res':
# Create an array of values that represent the standard deviation of the
# Gaussian used to smooth the synchrotron maps. All values are in pixels.
iter_array = np.linspace(1.0, 50.0, free_num)
# Create an array of values representing the final angular resolution of
# the image after smoothing. The final resolution is calculated by
# quadrature from the initial resolution (1 pixel) and the standard
# deviation of the convolving Gaussian.
final_res = np.sqrt(1.0 + np.power(iter_array,2.0))
# Create a label for the x-axis of plots that are made against angular
# resolution
xlabel = 'Angular Resolution [pixels]'
# Create a string to be used in the titles of any plots that are made
# against angular resolution
title_string = 'Angular Resolution'
# Create a string to be used in legends involving angular resolution
leg_string = 'AngRes = '
# Create a two dimensional array that will hold all of the structure function
# slope values for the different low magnetic field simulations. The first index
# gives the simulation the second gives the strength of the observational effect
sf_low_arr_y = np.zeros((len(low_B_sims), len(iter_array)))
sf_low_arr_z = np.zeros((len(low_B_sims), len(iter_array)))
# Create a two dimensional array that will hold all of the structure function
# slope values for the different high magnetic field simulations. The first
# index gives the simulation the second gives the strength of the observational
# effect
sf_high_arr_y = np.zeros((len(high_B_sims), len(iter_array)))
sf_high_arr_z = np.zeros((len(high_B_sims), len(iter_array)))
# Create a two dimensional array that will hold all of the integrated quadrupole
# ratio values for the different low magnetic field simulations. The first index
# gives the simulation the second gives the strength of the observational effect
quad_low_arr_y = np.zeros((len(low_B_sims), len(iter_array)))
quad_low_arr_z = np.zeros((len(low_B_sims), len(iter_array)))
# Create a two dimensional array that will hold all of the integrated quadrupole
# ratio values for the different high magnetic field simulations. The first
# index gives the simulation the second gives the strength of the observational
# effect
quad_high_arr_y = np.zeros((len(high_B_sims), len(iter_array)))
quad_high_arr_z = np.zeros((len(high_B_sims), len(iter_array)))
# Create error arrays for each of the statistics. These errors are only for the
# statistics calculated from the y and z axes (perpendicular to the mean
# magnetic field), and are calculated by the standard deviation of the
# statistics calculated for sub-images of the synchrotron maps.
m_err_low_arr = np.zeros((len(low_B_sims), len(iter_array)))
residual_err_low_arr = np.zeros((len(low_B_sims), len(iter_array)))
int_quad_err_low_arr = np.zeros((len(low_B_sims), len(iter_array)))
# For high magnetic field simulations
m_err_high_arr = np.zeros((len(high_B_sims), len(iter_array)))
residual_err_high_arr = np.zeros((len(high_B_sims), len(iter_array)))
int_quad_err_high_arr = np.zeros((len(high_B_sims), len(iter_array)))
# Loop over the simulations, as we need to calculate the statistics for each
# simulation
for i in range(len(low_B_sims)):
# Create a string for the full directory path to use in the calculation for
# low and high magnetic field simulations
data_loc_low = simul_loc + low_B_sims[i]
data_loc_high = simul_loc + high_B_sims[i]
# Open the FITS file that contains the simulated synchrotron intensity
# map for this line of sight, for low and high magnetic fields
sync_fits_low_y = fits.open(data_loc_low + 'synint_p1-4y.fits')
sync_fits_high_y = fits.open(data_loc_high + 'synint_p1-4y.fits')
# For z LOS
sync_fits_low_z = fits.open(data_loc_low + 'synint_p1-4.fits')
sync_fits_high_z = fits.open(data_loc_high + 'synint_p1-4.fits')
# Extract the data for the simulated synchrotron intensities for the current
# low and high magnetic field simulations
sync_data_low_y = sync_fits_low_y[0].data
sync_data_high_y = sync_fits_high_y[0].data
# For z LOS
sync_data_low_z = sync_fits_low_z[0].data
sync_data_high_z = sync_fits_high_z[0].data
# Extract the synchrotron intensity map for the value of gamma, for
# low and high magnetic field simulations
sync_map_low_y = sync_data_low_y[gam_index]
sync_map_high_y = sync_data_high_y[gam_index]
# For z LOS
sync_map_low_z = sync_data_low_z[gam_index]
sync_map_high_z = sync_data_high_z[gam_index]
# Print a message to the screen to show what simulation group is being used
print 'Starting calculation for simulation group {}'.format(i)
# Loop over the values for the parameter related to the observational
# effect, to calculate the structure function slope and integrated
# quadrupole ratio for the low and high magnetic field simulations
for j in range(len(iter_array)):
# Check to see which observational effect is being studied
if obs_effect == 'noise':
# In this case, we are taking into account the effect of noise in
# the telescope. We start with an array of values that, when
# multiplied by the median intensity of the synchrotron map, give
# the standard deviation of the Gaussian noise.
# Take into account an observing frequency of 1.4 GHz, by multiplying
# the extracted synchrotron maps by a gamma dependent frequency factor
sync_map_low_f_y = sync_map_low_y * np.power(1.4, -(gamma - 1))
sync_map_high_f_y = sync_map_high_y * np.power(1.4, -(gamma - 1))
# For z LOS
sync_map_low_f_z = sync_map_low_z * np.power(1.4, -(gamma - 1))
sync_map_high_f_z = sync_map_high_z * np.power(1.4, -(gamma - 1))
# Calculate the standard deviation of the Gaussian noise that will
# affect the synchrotron maps. This needs to be done individually
# for low and high magnetic field simulations
noise_stdev_low_y = iter_array[j] * np.median(sync_map_low_f_y)
noise_stdev_high_y = iter_array[j] * np.median(sync_map_high_f_y)
# For z LOS
noise_stdev_low_z = iter_array[j] * np.median(sync_map_low_f_z)
noise_stdev_high_z = iter_array[j] * np.median(sync_map_high_f_z)
# Create an array of values that are randomly drawn from a Gaussian
# distribution with the specified standard deviation. This
# represents the noise at each pixel of the image.
noise_matrix_low_y = np.random.normal(scale = noise_stdev_low_y,\
size = np.shape(sync_map_low_y))
noise_matrix_high_y = np.random.normal(scale = noise_stdev_high_y,\
size = np.shape(sync_map_high_y))
# For z LOS
noise_matrix_low_z = np.random.normal(scale = noise_stdev_low_z,\
size = np.shape(sync_map_low_z))
noise_matrix_high_z = np.random.normal(scale = noise_stdev_high_z,\
size = np.shape(sync_map_high_z))
# Add the noise maps onto the synchrotron intensity maps, to produce
# the mock 'observed' maps
sync_map_free_param_low_y = sync_map_low_f_y + noise_matrix_low_y
sync_map_free_param_high_y = sync_map_high_f_y + noise_matrix_high_y
# For z LOS
sync_map_free_param_low_z = sync_map_low_f_z + noise_matrix_low_z
sync_map_free_param_high_z = sync_map_high_f_z + noise_matrix_high_z
elif obs_effect == 'res':
# In this case, we are taking into account the effect of spatial
# resolution. We start with an array of values that specifies the
# standard deviation of the Gaussian to be used to smooth the data.
# Take into account an observing frequency of 1.4 GHz, by multiplying
# the extracted synchrotron maps by a gamma dependent frequency factor
sync_map_low_f_y = sync_map_low_y * np.power(1.4, -(gamma - 1))
sync_map_high_f_y = sync_map_high_y * np.power(1.4, -(gamma - 1))
# For z LOS
sync_map_low_f_z = sync_map_low_z * np.power(1.4, -(gamma - 1))
sync_map_high_f_z = sync_map_high_z * np.power(1.4, -(gamma - 1))
# Create a Gaussian kernel to use to smooth the synchrotron map,
# using the given standard deviation
gauss_kernel = Gaussian2DKernel(iter_array[j])
# Smooth the synchrotron maps to the required resolution by
# convolution with the above Gaussian kernel.
sync_map_free_param_low_y = convolve_fft(sync_map_low_f_y, gauss_kernel, boundary = 'wrap')
sync_map_free_param_high_y = convolve_fft(sync_map_high_f_y, gauss_kernel, boundary = 'wrap')
# For z LOS
sync_map_free_param_low_z = convolve_fft(sync_map_low_f_z, gauss_kernel, boundary = 'wrap')
sync_map_free_param_high_z = convolve_fft(sync_map_high_f_z, gauss_kernel, boundary = 'wrap')
# Replace the array of standard deviations with the array of final
# resolutions, so that the final resolutions are used in all plots
iter_array[j] = final_res[j]
# Calculate the structure function (two-dimensional) of the synchrotron
# intensity maps, for the low and high magnetic field simulations. Note
# that no_fluct = True is set, because we are not subtracting the mean
# from the synchrotron maps before calculating the structure function.
strfn_low_y = sf_fft(sync_map_free_param_low_y, no_fluct = True)
strfn_high_y = sf_fft(sync_map_free_param_high_y, no_fluct = True)
# For z LOS
strfn_low_z = sf_fft(sync_map_free_param_low_z, no_fluct = True)
strfn_high_z = sf_fft(sync_map_free_param_high_z, no_fluct = True)
# Radially average the calculated 2D structure function, using the
# specified number of bins, for low and high magnetic field simulations.
rad_sf_low_y = sfr(strfn_low_y, num_bins, verbose = False)
rad_sf_high_y = sfr(strfn_high_y, num_bins, verbose = False)
# For z LOS
rad_sf_low_z = sfr(strfn_low_z, num_bins, verbose = False)
rad_sf_high_z = sfr(strfn_high_z, num_bins, verbose = False)
# Extract the calculated radially averaged structure function for low
# and high magnetic field simulations
sf_low_y = rad_sf_low_y[1]
sf_high_y = rad_sf_high_y[1]
# For z LOS
sf_low_z = rad_sf_low_z[1]
sf_high_z = rad_sf_high_z[1]
# Extract the radius values used to calculate this structure function,
# for low and high magnetic field simulations.
sf_rad_arr_low_y = rad_sf_low_y[0]
sf_rad_arr_high_y = rad_sf_high_y[0]
# For z LOS
sf_rad_arr_low_z = rad_sf_low_z[0]
sf_rad_arr_high_z = rad_sf_high_z[0]
# Calculate the spectral index of the structure function calculated for
# this value of gamma. Note that only the first third of the structure
# function is used in the calculation, as this is the part that is
# close to a straight line. Perform a linear fit for the low magnetic
# field simulation
spec_ind_data_low_y = np.polyfit(np.log10(\
sf_rad_arr_low_y[11:16]),\
np.log10(sf_low_y[11:16]), 1, full = True)
# Perform a linear fit for the high magnetic field simulation
spec_ind_data_high_y = np.polyfit(np.log10(\
sf_rad_arr_high_y[11:16]),\
np.log10(sf_high_y[11:16]), 1, full = True)
# For z LOS
# Perform a linear fit for the low magnetic field simulation
spec_ind_data_low_z = np.polyfit(np.log10(\
sf_rad_arr_low_z[11:16]),\
np.log10(sf_low_z[11:16]), 1, full = True)
# Perform a linear fit for the high magnetic field simulation
spec_ind_data_high_z = np.polyfit(np.log10(\
sf_rad_arr_high_z[11:16]),\
np.log10(sf_high_z[11:16]), 1, full = True)
# Extract the returned coefficients from the polynomial fit, for low and
# high magnetic field simulations
coeff_low_y = spec_ind_data_low_y[0]
coeff_high_y = spec_ind_data_high_y[0]
# For z LOS
coeff_low_z = spec_ind_data_low_z[0]
coeff_high_z = spec_ind_data_high_z[0]
# Enter the value of m, the slope of the structure function minus 1,
# into the corresponding array, for low and high magnetic field
# simulations
sf_low_arr_y[i,j] = coeff_low_y[0]-1.0
sf_high_arr_y[i,j] = coeff_high_y[0]-1.0
# For z LOS
sf_low_arr_z[i,j] = coeff_low_z[0]-1.0
sf_high_arr_z[i,j] = coeff_high_z[0]-1.0
# Calculate the 2D structure function for this slice of the synchrotron
# intensity data cube. Note that no_fluct = True is set, because we are
# not subtracting the mean from the synchrotron maps before calculating
# the structure function. We are also calculating the normalised
# structure function, which only takes values between 0 and 2.
norm_strfn_low_y = sf_fft(sync_map_free_param_low_y, no_fluct = True, normalise = True)
norm_strfn_high_y = sf_fft(sync_map_free_param_high_y, no_fluct = True, normalise = True)
# For z LOS
norm_strfn_low_z = sf_fft(sync_map_free_param_low_z, no_fluct = True, normalise = True)
norm_strfn_high_z = sf_fft(sync_map_free_param_high_z, no_fluct = True, normalise = True)
# Shift the 2D structure function so that the zero radial separation
# entry is in the centre of the image. This is done for low and high
# magnetic field simulations
norm_strfn_low_y = np.fft.fftshift(norm_strfn_low_y)
norm_strfn_high_y = np.fft.fftshift(norm_strfn_high_y)
# For z LOS
norm_strfn_low_z = np.fft.fftshift(norm_strfn_low_z)
norm_strfn_high_z = np.fft.fftshift(norm_strfn_high_z)
# Calculate the magnitude and argument of the quadrupole ratio, for
# low and high magnetic field simulations
quad_mod_low_y, quad_arg_low_y, quad_rad_low_y = calc_quad_ratio(norm_strfn_low_y, num_bins)
quad_mod_high_y, quad_arg_high_y, quad_rad_high_y = calc_quad_ratio(norm_strfn_high_y, num_bins)
# For z LOS
quad_mod_low_z, quad_arg_low_z, quad_rad_low_z = calc_quad_ratio(norm_strfn_low_z, num_bins)
quad_mod_high_z, quad_arg_high_z, quad_rad_high_z = calc_quad_ratio(norm_strfn_high_z, num_bins)
# Integrate the magnitude of the quadrupole / monopole ratio from
# one sixth of the way along the radial separation bins, until three
# quarters of the way along the radial separation bins. This integration
# is performed with respect to log separation (i.e. I am ignoring the
# fact that the points are equally separated in log space, to calculate
# the area under the quadrupole / monopole ratio plot when the x axis
# is scaled logarithmically). I normalise the value that is returned by
# dividing by the number of increments in log radial separation used in
# the calculation. This is done for low and high magnetic field
# simulations
quad_low_arr_y[i,j] = np.trapz(quad_mod_low_y[11:20], dx = 1.0) / (19 - 11)
quad_high_arr_y[i,j] = np.trapz(quad_mod_high_y[11:20], dx = 1.0) / (19 - 11)
# For z LOS
quad_low_arr_z[i,j] = np.trapz(quad_mod_low_z[11:20], dx = 1.0) / (19 - 11)
quad_high_arr_z[i,j] = np.trapz(quad_mod_high_z[11:20], dx = 1.0) / (19 - 11)
# Create errors for each of the statistics. These errors are only for the
# statistics calculated from the y and z axes (perpendicular to the mean
# magnetic field), and are calculated by the standard deviation of the
# statistics calculated for sub-images of the synchrotron maps.
m_err_low_arr[i,j], residual_err_low_arr[i,j], int_quad_err_low_arr[i,j]\
= calc_err_bootstrap(sync_map_free_param_low_y, sync_map_free_param_low_z)
m_err_high_arr[i,j],residual_err_high_arr[i,j], int_quad_err_high_arr[i,j]\
= calc_err_bootstrap(sync_map_free_param_high_y, sync_map_free_param_high_z)
# Close the FITS files, now that we are finished using them, to save
# memory
sync_fits_low_y.close()
sync_fits_high_y.close()
# For z LOS
sync_fits_low_z.close()
sync_fits_high_z.close()
# Print a message to show that the calculation has finished successfully
# for this simulation group
print 'All statistics calculated for simulation group {}'.format(i)
# Create mean value arrays for each of the statistics. These values are only for
# the statistics calculated from the y and z axes (perpendicular to the mean
# magnetic field), for y and z lines of sight
m_mean_low_arr = (sf_low_arr_y + sf_low_arr_z) / 2.0
int_quad_mean_low_arr = (quad_low_arr_y + quad_low_arr_z) / 2.0
# For high magnetic field simulations
m_mean_high_arr = (sf_high_arr_y + sf_high_arr_z) / 2.0
int_quad_mean_high_arr = (quad_high_arr_y + quad_high_arr_z) / 2.0
# When the code reaches this point, the statistics have been saved for every
# simulation, so start making the final plots.
# ------------------- Plots of SF slope and quadrupole ratio -------------------
# Here we want to produce one plot with four subplots. There should be two rows
# of subplots, with two subplots in each row. The top row will be SF slope, and
# the bottom row will be quadrupole ratio. The left column will be low magnetic
# field simulations, and the right column will be high magnetic field
# simulations.
# Create a figure to hold all of the subplots
fig = plt.figure(1, figsize=(9,6), dpi = 300)
# Create an axis for the first subplot to be produced, which is for the SF slope
# of low magnetic field simulations
ax1 = fig.add_subplot(221)
# Loop over the low magnetic field simulations to produce plots for each simulation
for i in range(len(low_B_sims)):
# Plot the SF slope for this simulation, against the observational effect
plt.errorbar(iter_array, m_mean_low_arr[i], fmt='-' + symbol_arr[i],\
label = '{}'.format(low_B_short_M[i]),yerr=m_err_low_arr[i])
# Force the legends to appear on the plot
plt.legend(loc = 1, fontsize = 10, numpoints=1)
# Add a label to the y-axis
plt.ylabel('m', fontsize = 20)
# Set the x axis limits for the plot
ax1.set_xlim([np.min(iter_array), np.max(iter_array)])
# Make the x axis tick labels invisible
plt.setp( ax1.get_xticklabels(), visible=False)
# Create an axis for the second subplot to be produced, which is for the
# SF slope of high magnetic field simulations. Make the y axis limits the same
# as for the low magnetic field plot
ax2 = fig.add_subplot(222, sharey = ax1)
# Loop over the high magnetic field simulations to produce plots for each simulation
for i in range(len(high_B_sims)):
# Plot the SF slope for this simulation, against the observational effect
plt.errorbar(iter_array, m_mean_high_arr[i], fmt='-' + symbol_arr[i],\
label = '{}'.format(high_B_short_M[i]),yerr=m_err_high_arr[i])
# Force the legends to appear on the plot
plt.legend(loc = 1, fontsize = 10, numpoints=1)
# Set the x axis limits for the plot
ax2.set_xlim([np.min(iter_array), np.max(iter_array)])
# Make the x axis tick labels invisible
plt.setp( ax2.get_xticklabels(), visible=False)
# Make the y axis tick labels invisible
plt.setp( ax2.get_yticklabels(), visible=False)
# Create an axis for the third subplot to be produced, which is for the
# integrated quadrupole ratio of low magnetic field simulations. Make the x axis
# limits the same as for the first plot
ax3 = fig.add_subplot(223, sharex = ax1)
# Loop over the low magnetic field simulations to produce plots for each simulation
for i in range(len(low_B_sims)):
# Plot the integrated quadrupole ratio for this simulation, against the
# observational effect
plt.errorbar(iter_array, int_quad_mean_low_arr[i], fmt = '-' + symbol_arr[i],\
yerr=int_quad_err_low_arr[i])
# Add a label to the y-axis
plt.ylabel('Int Quad Ratio', fontsize = 20)
# Set the x axis limits for the plot
ax3.set_xlim([np.min(iter_array), np.max(iter_array)])
# Create an axis for the fourth subplot to be produced, which is for the
# integrated quadrupole ratio of high magnetic field simulations. Make the x
# axis limits the same as for the second plot
ax4 = fig.add_subplot(224, sharex = ax2, sharey = ax3)
# Loop over the high magnetic field simulation to produce plots for each simulation
for i in range(len(high_B_sims)):
# Plot the integrated quadrupole ratio for this simulation, against the
# observational effect
plt.errorbar(iter_array, int_quad_mean_high_arr[i], fmt='-' + symbol_arr[i],\
yerr=int_quad_err_high_arr[i])
# Set the x axis limits for the plot
ax4.set_xlim([np.min(iter_array), np.max(iter_array)])
# Make the y axis tick labels invisible
plt.setp( ax4.get_yticklabels(), visible=False)
# Add a label to the x-axis
plt.figtext(0.5, 0.0, xlabel, ha = 'center', va = 'bottom', fontsize = 20)
# Add some text to the figure, to label the left plot as figure a
plt.figtext(0.19, 0.95, 'a) m, low B', fontsize = 18)
# Add some text to the figure, to label the left plot as figure b
plt.figtext(0.61, 0.95, 'b) m, high B', fontsize = 18)
# Add some text to the figure, to label the right plot as figure c
plt.figtext(0.19, 0.475, 'c) Quad, low B', fontsize = 18)
# Add some text to the figure, to label the right plot as figure d
plt.figtext(0.61, 0.475, 'd) Quad, high B', fontsize = 18)
# Depending on the observational effect being studied, change the filename used
# to save the figure
if obs_effect == 'noise':
# Save the figure using the given filename and format
plt.savefig(simul_loc + 'Publication_Plots/fig18.eps', format = 'eps')
elif obs_effect == 'res':
# Save the figure using the given filename and format
plt.savefig(simul_loc + 'Publication_Plots/fig16.eps', format = 'eps')
# Close the figure so that it does not stay in memory
plt.close() | [
"cher7851@uni.sydney.edu.au"
] | cher7851@uni.sydney.edu.au |
b13014013bfe7f16e2c291f768ee50207dacf92d | aec9a1f3d1d36f19724e745ca4d09a20f67208dc | /talent/migrations/0016_auto_20210210_0904.py | 9fcc9bb77bf5ed3a188a98dbe181747f8acaf2b7 | [] | no_license | endlessor/open-united-backend | b1b1c3411d0d48bc79b35895c70f24d773ac7344 | 86f6905cce14b834b6bf059fd33157249978bd14 | refs/heads/main | 2023-04-29T13:35:28.529360 | 2021-05-17T14:16:39 | 2021-05-17T14:16:39 | 368,211,786 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | # Generated by Django 3.1 on 2021-02-10 09:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('talent', '0015_person_headline'),
]
operations = [
migrations.AlterField(
model_name='person',
name='headline',
field=models.CharField(max_length=255),
),
]
| [
"robcoder@hotmail.com"
] | robcoder@hotmail.com |
47a3f8525c7b4f21d5f964bd6f5404fafc9d03a4 | 20176bf4fbd8aec139c7b5a27f2c2e155e173e6e | /data/all-pratic/VivekKumar_DCC/python_2/Day2_1.py | 73d19dd534ff3c2cd430a11bb817b05e35bd6e66 | [] | no_license | githubjyotiranjan/pytraining | 4ac4a1f83cc4270e2939d9d32c705019c5bc61c5 | 8b50c4ab7848bd4cbfdfbc06489768d577289c66 | refs/heads/master | 2020-03-19T06:22:20.793296 | 2018-06-15T20:08:11 | 2018-06-15T20:08:11 | 136,013,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | randomList=int(input('enter the count='))
vals=1;
while(vals<= randomList):
try:
if(randomList%2!= 0):
print("The odd= ", vals)
vals=vals+1
except:
print("The Even= ", vals)
| [
"jsatapathy007@gmail.com"
] | jsatapathy007@gmail.com |
baff5fe97381c6dd6353e82b1b8d9a68aa02bc51 | 33c0d36ba88af9c3b35acd000a8a83fa5c24ed8a | /Problems/Isomorphic Strings.py | 7434a2397d00e41aa76436b961747b27f904d915 | [] | no_license | ElliottBarbeau/Leetcode | e58dab31937a36e7557990846898cd2b2586a27c | 43c3698c829f5a613ed3e9516a146e7576d81146 | refs/heads/master | 2021-11-28T02:06:39.848174 | 2021-08-30T23:37:13 | 2021-08-30T23:37:13 | 221,090,906 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | class Solution:
def isIsomorphic(self, s: str, t: str) -> bool:
d = {}
if len(s) != len(t):
return False
for i in range(len(s)):
if s[i] not in d and t[i] not in d.values():
d[s[i]] = t[i]
elif s[i] in d and t[i] == d[s[i]]:
continue
else:
return False
return True
print(Solution().isIsomorphic('ab', 'aa')) | [
"elliottbarbeau@gmail.com"
] | elliottbarbeau@gmail.com |
b435561acbf322a0401ebbf926b601484d79c440 | 215eadf839ecc40a37ae22063bf7f9c5c9450699 | /hr_employee.py | 51c7e4843a1f91ca38c6ca6712a1b5c9cd3e7f07 | [] | no_license | davidsetiyadi/hr_webcam | c12e751e91c4757938cae54697df084c99ed9b4a | 4740d9f104c8ebeba7e6ef5e196068f5c5fd6111 | refs/heads/master | 2021-01-19T12:40:22.010104 | 2017-09-25T12:34:38 | 2017-09-25T12:34:38 | 100,796,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,242 | py | from openerp import models
import numpy as np
import cv2
import dlib
import face_recognition
import urllib
import base64
from common import clock, draw_str
class hr_employee(models.Model):
_inherit = 'hr.employee'
def action_take_picture(self, cr, uid, ids, context=None):
if context is None:
context = {}
res_model, res_id = self.pool.get(
'ir.model.data').get_object_reference(cr, uid,
'hr_webcam',
'action_take_photo')
dict_act_window = self.pool.get(
'ir.actions.client').read(cr, uid, res_id, [])
if not dict_act_window.get('params', False):
dict_act_window.update({'params': {}})
dict_act_window['params'].update(
{'employee_id': len(ids) and ids[0] or False})
return dict_act_window
def detect(img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE)
if len(rects) == 0:
return []
rects[:,2:] += rects[:,:2]
return rects
def draw_rects(img, rects, color):
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
def action_take_opencv(self, cr, uid, ids, context=None):
# print 'David_____________TESTET'
employee_obj = self.pool.get('hr.employee')
employee_ids = employee_obj.search(cr,uid,[],limit=100)
# print employee_ids,'employee_idsss'
dictionary = {}
face_encoding = {}
for employee in employee_ids:
employees = employee_obj.browse(cr,uid,employee)
# dictionary[employees.name] = "http://127.0.6.1:7777/web/binary/image?model=hr.employee&field=image_medium&id="+str(employee)
# urllib.urlretrieve("/web/binary/image?model=hr.employee&field=image_medium&id="+str(employee), str(employee)+"_uid.png")
imgstring = employees.image
# print imgstring
if imgstring:
convert = base64.b64decode(imgstring)
file = ("lebahganteng%s.png")% employee
# print file,'davidddd'
t = open(file, "w+")
t.write(convert)
t.close()
biden_image = face_recognition.load_image_file(file)
# print biden_image,'david'
# imgdata = base64.b64decode(imgstring)
# filename = 'some_image.png' # I assume you have a way of picking unique filenames
# with open(filename, 'wb') as f:
# f.write(imgdata)
# dictionary[employees.name] = face_recognition.load_image_file("http://127.0.6.1:7777/web/binary/image?model=hr.employee&field=image_medium&id="+str(employee))
# print dictionary[employee.name],'dictionaryyyy'
# face_encoding [employees.name] = face_recognition.face_encodings(dictionary[employees.name][0])
# c = {}
# for b in a:
# c[b]=b+1
# data = []
# for a in dictionary:
# data.append(dictionary[a])
# biden_face_encoding = face_recognition.face_encodings(biden_image)[0]
# obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
# unknown_face_encoding = face_recognition.face_encodings(unknown_image)[0]
# print ("david123")
# known_faces = [
# biden_face_encoding,
# obama_face_encoding
# ]
# # results is an array of True/False telling if the unknown face matched anyone in the known_faces array
# results = face_recognition.compare_faces(known_faces, unknown_face_encoding)
print dictionary
return True | [
"davidsetiadi11@gmail.com"
] | davidsetiadi11@gmail.com |
3e2abc01b00cc24995d645655e3a0d8aa6ace57c | bfc25f1ad7bfe061b57cfab82aba9d0af1453491 | /data/external/repositories/147881/kaggle_forest_cover_type-master/my_model.py | 6a7942ec6a23229e7c5286d8e10e805c980a5499 | [
"MIT"
] | permissive | Keesiu/meta-kaggle | 77d134620ebce530d183467202cf45639d9c6ff2 | 87de739aba2399fd31072ee81b391f9b7a63f540 | refs/heads/master | 2020-03-28T00:23:10.584151 | 2018-12-20T19:09:50 | 2018-12-20T19:09:50 | 147,406,338 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,475 | py | #!/usr/bin/python
import os
import matplotlib
matplotlib.use('Agg')
import pylab as pl
import numpy as np
import pandas as pd
import gzip
import cPickle as pickle
from sklearn import cross_validation
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.linear_model import SGDClassifier
from sklearn.decomposition import PCA, FastICA, KernelPCA, ProbabilisticPCA
from sklearn.pipeline import Pipeline
from sklearn.externals import joblib
from sklearn.metrics import accuracy_score, log_loss
def gaussian(x, mu, sig):
return np.exp(-(x-mu)**2/(2*sig**2))/(sig*np.sqrt(2*np.pi))
def fit_func(x, *p):
return p[2] * gaussian(x, p[0], p[1])
def create_html_page_of_plots(list_of_plots):
if not os.path.exists('html'):
os.makedirs('html')
os.system('mv *.png html')
print(list_of_plots)
with open('html/index.html', 'w') as htmlfile:
htmlfile.write('<!DOCTYPE html><html><body><div>')
for plot in list_of_plots:
htmlfile.write('<p><img src="%s"></p>' % plot)
htmlfile.write('</div></html></html>')
def get_plots(in_df):
list_of_plots = []
print in_df.columns
for c in in_df.columns:
if c in ('Id', 'Cover_Type'):
continue
pl.clf()
nent = len(in_df[c])
hmin, hmax = in_df[c].min(), in_df[c].max()
xbins = np.linspace(hmin,hmax,nent//500)
for n in range(1,8):
covtype = in_df.Cover_Type == n
a = in_df[covtype][c].values
#b = in_df[covtype][c].hist(bins=xbins, histtype='step')
pl.hist(a, bins=xbins, histtype='step')
#if c == 'Elevation':
#mu, sig = a.mean(), a.std()
#x = np.linspace(hmin,hmax,1000)
#y = (a.sum()/len(xbins)) * gaussian(x, mu, sig)
#pl.plot(x, y, '--')
pl.title(c)
pl.savefig('%s.png' % c)
list_of_plots.append('%s.png' % c)
create_html_page_of_plots(list_of_plots)
def plot_failures(in_array, covertype):
print in_array.shape
list_of_plots = []
for c in range(in_array.shape[1]):
pl.clf()
nent = in_array.shape[0]
hmin, hmax = in_array[:,c].min(), in_array[:,c].max()
xbins = np.linspace(hmin,hmax,20)
for n in range(1,8):
covtype = covertype == n
a = in_array[covtype][:,c]
pl.hist(a, bins=xbins, histtype='step')
pl.title(c)
pl.savefig('%s.png' % c)
list_of_plots.append('%s.png' % c)
create_html_page_of_plots(list_of_plots)
def transform_from_classes(inp):
y = np.zeros((inp.shape[0], 7), dtype=np.int64)
for (index, Class) in enumerate(inp):
cidx = Class-1
y[index, cidx] = 1.0
return y
def transform_to_class(yinp):
return np.array(map(lambda x: x+1, np.argmax(yinp, axis=1)))
def load_data():
train_df = pd.read_csv('train.csv')
test_df = pd.read_csv('test.csv')
ssub_df = pd.read_csv('sampleSubmission.csv')
#get_plots(train_df)
labels_to_drop = []
xtrain = train_df.drop(labels=['Id','Cover_Type']+labels_to_drop, axis=1).values
ytrain = transform_from_classes(train_df['Cover_Type'].values)
#ytrain = train_df['Cover_Type'].values
xtest = test_df.drop(labels=['Id']+labels_to_drop, axis=1).values
ytest = ssub_df['Id'].values
print xtrain.shape, ytrain.shape, xtest.shape, ytest.shape
return xtrain, ytrain, xtest, ytest
def scorer(estimator, X, y):
ypred = estimator.predict(X)
return accuracy_score(ypred, y)
def train_model_parallel(model, xtrain, ytrain, index):
randint = reduce(lambda x,y: x|y, [ord(x)<<(n*8) for (n,x) in enumerate(os.urandom(4))])
#xTrain, xTest, yTrain, yTest = \
#cross_validation.train_test_split(xtrain, ytrain[:,index], test_size=0.4,
#random_state=randint)
xTrain, yTrain = xtrain, ytrain[:,index]
#n_est = [10, 100, 200]
#m_dep = [5, 10, 40]
#model = GridSearchCV(estimator=model,
#param_grid=dict(n_estimators=n_est, max_depth=m_dep),
#scoring=scorer,
#n_jobs=-1, verbose=1)
model.fit(xTrain, yTrain)
print model
#ytest_pred = model.predict(xTest)
#ytest_prob = model.predict_proba(xTest)
#print 'accuracy', accuracy_score(ytest_pred,yTest)
#print 'logloss', log_loss(yTest, ytest_prob)
with gzip.open('model_%d.pkl.gz' % index, 'wb') as mfile:
pickle.dump(model, mfile, protocol=2)
return
def test_model_parallel(xtrain, ytrain):
randint = reduce(lambda x,y: x|y, [ord(x)<<(n*8) for (n,x) in enumerate(os.urandom(4))])
xTrain, xTest, yTrain, yTest = \
cross_validation.train_test_split(xtrain, ytrain, test_size=0.4,
random_state=randint)
ytest_prob = np.zeros((yTest.shape[0], 7, 2))
for n in range(7):
with gzip.open('model_%d.pkl.gz' % n, 'rb') as mfile:
model = pickle.load(mfile)
#print 'grid scores', model.grid_scores_
#print 'best score', model.best_score_
#print 'best params', model.best_params_
ytest_prob[:,n,:] = model.predict_proba(xTest)
#print accuracy_score
ytest = transform_to_class(yTest).astype(np.int64)
ytest_pred = transform_to_class(ytest_prob[:,:,1]).astype(np.int64)
print ytest.shape, ytest_pred.shape
print accuracy_score(ytest, ytest_pred)
def prepare_submission_parallel(xtrain, ytrain, xtest, ytest):
print ytest.shape
ytest_prob = np.zeros((ytest.shape[0], 7, 2))
for n in range(7):
with gzip.open('model_%d.pkl.gz' % n, 'rb') as mfile:
model = pickle.load(mfile)
ytest_prob[:,n,:] = model.predict_proba(xtest)
ytest2 = transform_to_class(ytest_prob[:,:,1]).astype(np.int64)
df = pd.DataFrame({'Id': ytest, 'Cover_Type': ytest2}, columns=('Id', 'Cover_Type'))
df.to_csv('submission.csv', index=False)
return
#def prepare_submission(model, xtrain, ytrain, xtest, ytest):
#model.fit(xtrain, ytrain)
#ytest2 = transform_to_class(model.predict(xtest).astype(np.int64))
##dateobj = map(datetime.datetime.fromtimestamp, ytest)
#df = pd.DataFrame({'Id': ytest, 'Cover_Type': ytest2}, columns=('Id', 'Cover_Type'))
#df.to_csv('submission.csv', index=False)
#return
if __name__ == '__main__':
xtrain, ytrain, xtest, ytest = load_data()
#model = RandomForestRegressor(n_jobs=-1)
model = RandomForestClassifier(n_estimators=400, n_jobs=-1)
#model = DecisionTreeClassifier()
#model = GradientBoostingClassifier(loss='deviance', verbose=1)
index = -1
for arg in os.sys.argv:
try:
index = int(arg)
break
except ValueError:
continue
if index == -1:
for idx in range(7):
train_model_parallel(model, xtrain, ytrain, idx)
prepare_submission_parallel(xtrain, ytrain, xtest, ytest)
elif index >= 0 and index < 7:
train_model_parallel(model, xtrain, ytrain, index)
elif index == 7:
test_model_parallel(xtrain, ytrain)
elif index == 8:
prepare_submission_parallel(xtrain, ytrain, xtest, ytest)
| [
"keesiu.wong@gmail.com"
] | keesiu.wong@gmail.com |
942b9171041a8572b2cf2d3d1042c271979e83e0 | beed259c9aaf824c5307d93ffa736255f2d98831 | /month05/Spider/Wholesale02/run.py | de99978bdaa95073168ae291ab53dece83b892ce | [
"Apache-2.0"
] | permissive | chaofan-zheng/python_learning_code | 21345f97ebf74c3cad0ef488a93ec8a7fd771a63 | 5d05848911d55aa49eaee4afd7ffd80536fad7aa | refs/heads/main | 2023-05-27T16:17:18.130492 | 2021-06-06T14:23:31 | 2021-06-06T14:23:31 | 338,234,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,738 | py | from scrapy import cmdline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
import seaborn as sns
import os
cmdline.execute('scrapy crawl wholesale -o wholesale.csv'.split())
command = f'jupyter nbconvert {os.getcwd()}/visualization.ipynb'
print(command)
os.system(command)
warnings.filterwarnings("ignore")
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
data = pd.read_csv('wholesale.csv')
data = data.drop(columns='href')
data_clean = data[data.integer.notnull()][data.rePurchaseRate.notnull()]
for i in data_clean['integer']:
try:
i = int(i)
except:
# print(data_clean.loc[i,'integer'])
data_clean = data_clean.drop(data_clean[data_clean['integer'].str.contains(i)].index)
for i in data_clean['rePurchaseRate']:
try:
i = float(i)
except:
# print(data_clean.loc[i,'integer'])
data_clean = data_clean.drop(data_clean[data_clean['rePurchaseRate'].str.contains(i)].index)
data_clean.integer = data_clean.integer.astype('int')
data_clean.rePurchaseRate = data_clean.rePurchaseRate.astype('float')
print(data_clean.head())
print(data_clean.describe())
# print(data_clean['rePurchaseRate'])
fig=plt.figure(figsize = (16,12))
ax1=fig.add_subplot(221)
plt.title('复购率频次分布图',fontsize=14)
sns.distplot(data_clean['rePurchaseRate'])
ax1=fig.add_subplot(222)
plt.title('销售量频次分布图',fontsize=14)
sns.distplot(data_clean['integer'])
ax1=fig.add_subplot(223)
plt.title('复购率箱体图',fontsize=14)
sns.boxplot(x='rePurchaseRate',data=data_clean)
ax1=fig.add_subplot(224)
plt.title('销售量箱体图',fontsize=14)
sns.boxplot(x='integer',data=data_clean)
plt.show()
| [
"417355570@qq.com"
] | 417355570@qq.com |
b15953c884974afcdc6bdde6b224dba82df25716 | d8cbe9ce0469f72b8929af01538b6ceddff10a38 | /homeassistant/components/rainbird/config_flow.py | 057fc6fe39662a459581e0e3f0bdd86855071e43 | [
"Apache-2.0"
] | permissive | piitaya/home-assistant | 9c1ba162dac9604e4d43e035e74bad7bba327f0b | 48893738192431f96966998c4ff7a3723a2f8f4a | refs/heads/dev | 2023-03-07T16:13:32.117970 | 2023-01-10T17:47:48 | 2023-01-10T17:47:48 | 172,578,293 | 3 | 1 | Apache-2.0 | 2023-02-22T06:15:56 | 2019-02-25T20:19:40 | Python | UTF-8 | Python | false | false | 6,344 | py | """Config flow for Rain Bird."""
from __future__ import annotations
import asyncio
import logging
from typing import Any
import async_timeout
from pyrainbird.async_client import (
AsyncRainbirdClient,
AsyncRainbirdController,
RainbirdApiException,
)
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_FRIENDLY_NAME, CONF_HOST, CONF_PASSWORD
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers import config_validation as cv, selector
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import (
ATTR_DURATION,
CONF_IMPORTED_NAMES,
CONF_SERIAL_NUMBER,
CONF_ZONES,
DEFAULT_TRIGGER_TIME_MINUTES,
DOMAIN,
TIMEOUT_SECONDS,
)
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): selector.TextSelector(),
vol.Required(CONF_PASSWORD): selector.TextSelector(
selector.TextSelectorConfig(type=selector.TextSelectorType.PASSWORD)
),
}
)
class ConfigFlowError(Exception):
"""Error raised during a config flow."""
def __init__(self, message: str, error_code: str) -> None:
"""Initialize ConfigFlowError."""
super().__init__(message)
self.error_code = error_code
class RainbirdConfigFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Rain Bird."""
@staticmethod
@callback
def async_get_options_flow(
config_entry: ConfigEntry,
) -> RainBirdOptionsFlowHandler:
"""Define the config flow to handle options."""
return RainBirdOptionsFlowHandler(config_entry)
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Configure the Rain Bird device."""
error_code: str | None = None
if user_input:
try:
serial_number = await self._test_connection(
user_input[CONF_HOST], user_input[CONF_PASSWORD]
)
except ConfigFlowError as err:
_LOGGER.error("Error during config flow: %s", err)
error_code = err.error_code
else:
return await self.async_finish(
serial_number,
data={
CONF_HOST: user_input[CONF_HOST],
CONF_PASSWORD: user_input[CONF_PASSWORD],
CONF_SERIAL_NUMBER: serial_number,
},
options={ATTR_DURATION: DEFAULT_TRIGGER_TIME_MINUTES},
)
return self.async_show_form(
step_id="user",
data_schema=DATA_SCHEMA,
errors={"base": error_code} if error_code else None,
)
async def _test_connection(self, host: str, password: str) -> str:
"""Test the connection and return the device serial number.
Raises a ConfigFlowError on failure.
"""
controller = AsyncRainbirdController(
AsyncRainbirdClient(
async_get_clientsession(self.hass),
host,
password,
)
)
try:
async with async_timeout.timeout(TIMEOUT_SECONDS):
return await controller.get_serial_number()
except asyncio.TimeoutError as err:
raise ConfigFlowError(
f"Timeout connecting to Rain Bird controller: {str(err)}",
"timeout_connect",
) from err
except RainbirdApiException as err:
raise ConfigFlowError(
f"Error connecting to Rain Bird controller: {str(err)}",
"cannot_connect",
) from err
async def async_step_import(self, config: dict[str, Any]) -> FlowResult:
"""Import a config entry from configuration.yaml."""
self._async_abort_entries_match({CONF_HOST: config[CONF_HOST]})
try:
serial_number = await self._test_connection(
config[CONF_HOST], config[CONF_PASSWORD]
)
except ConfigFlowError as err:
_LOGGER.error("Error during config import: %s", err)
return self.async_abort(reason=err.error_code)
data = {
CONF_HOST: config[CONF_HOST],
CONF_PASSWORD: config[CONF_PASSWORD],
CONF_SERIAL_NUMBER: serial_number,
}
names: dict[str, str] = {}
for (zone, zone_config) in config.get(CONF_ZONES, {}).items():
if name := zone_config.get(CONF_FRIENDLY_NAME):
names[str(zone)] = name
if names:
data[CONF_IMPORTED_NAMES] = names
return await self.async_finish(
serial_number,
data=data,
options={
ATTR_DURATION: config.get(ATTR_DURATION, DEFAULT_TRIGGER_TIME_MINUTES),
},
)
async def async_finish(
self,
serial_number: str,
data: dict[str, Any],
options: dict[str, Any],
) -> FlowResult:
"""Create the config entry."""
await self.async_set_unique_id(serial_number)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=data[CONF_HOST],
data=data,
options=options,
)
class RainBirdOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a RainBird options flow."""
def __init__(self, config_entry: ConfigEntry) -> None:
"""Initialize RainBirdOptionsFlowHandler."""
self.config_entry = config_entry
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
ATTR_DURATION,
default=self.config_entry.options[ATTR_DURATION],
): cv.positive_int,
}
),
)
| [
"noreply@github.com"
] | piitaya.noreply@github.com |
eafad22e3b9c5ddb8002f0f4d4281976958abffb | 1c2bb53d56a777bd2700c0438421ce686d1c8dc5 | /tests/past_api07_sources_excel.py | 5a230d6124045e6987f3acaa7f7d044bbeba2982 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | drewdolan/datatest | a1a771ff630acc7322387f4f810ff75fb22f5e5f | 1c168739f84328043c7f0be7cf25bb8e23cc259c | refs/heads/master | 2020-05-09T16:01:09.553762 | 2019-05-18T05:40:16 | 2019-05-18T05:40:16 | 181,254,930 | 0 | 0 | NOASSERTION | 2019-04-14T03:17:59 | 2019-04-14T03:17:57 | null | UTF-8 | Python | false | false | 1,884 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from . import _unittest as unittest
from .mixins import OtherTests
from .mixins import CountTests
try:
import xlrd
except ImportError:
xlrd = None
from datatest.__past__.api07_sources import ExcelSource
workbook_path = os.path.join(
os.path.dirname(__file__),
'sample_files',
'test_sources_excel.xlsx',
)
@unittest.skipIf(xlrd is None, 'xlrd not found')
class TestExcelSource(OtherTests, unittest.TestCase):
def setUp(self):
global workbook_path
self.datasource = ExcelSource(workbook_path) # <- Defaults to "Sheet 1"
@unittest.skipIf(xlrd is None, 'xlrd not found')
class TestExcelSourceCount(unittest.TestCase):
#class TestExcelSourceCount(CountTests, unittest.TestCase):
def setUp(self):
global workbook_path
self.datasource = ExcelSource(workbook_path, 'count_data')
def test_count(self):
count = self.datasource.count
self.assertEqual(9, count('label1'))
expected = {'a': 4, 'b': 5}
result = count('label1', ['label1'])
self.assertEqual(expected, result)
expected = {'a': 3, 'b': 3} # Counts only truthy values (not '' or None).
result = count('label2', ['label1'])
self.assertEqual(expected, result)
expected = {
('a', 'x'): 2,
('a', 'y'): 1,
('a', ''): 1,
('b', 'z'): 1,
('b', 'y'): 1,
('b', 'x'): 1,
#('b', None): 1, # <- None value has no equivalent in XLSX file.
#('b', ''): 1,
('b', ''): 2,
}
result = count('label1', ['label1', 'label2'])
self.assertEqual(expected, result)
expected = {'x': 2, 'y': 1, '': 1}
result = count('label1', 'label2', label1='a')
self.assertEqual(expected, result)
| [
"shawnbrown@users.noreply.github.com"
] | shawnbrown@users.noreply.github.com |
ec2739b5ba94034b1ee8cd65a284ccd4192cc77a | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=3.0_rd=1_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=97/params.py | 57b7be835a6b989c5d600c2af47d54e7a07715c4 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.029857',
'max_util': '3.0',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '1',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'GSN-EDF',
'trial': 97,
'utils': 'uni-medium-3'}
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
2e4ec5eb9f881ea159c5acf75b2f58cc0e5c9aa2 | 3fc254b192d170aa31fa42f1ef480a8ab45315be | /src/NL_Transformer_Enc_Dec/fairseq/sequence_generator.py | f19fbb0aa532ef952a02e37f48c9052ded21db05 | [
"MIT"
] | permissive | snudatalab/Negotiation_Learning | a38246d27350745f7d8f35edbd807d0c16c7c7cb | 7c0f13781aec2d7efe0870c2bcd60eaef5d342de | refs/heads/main | 2023-06-04T16:15:39.665634 | 2021-06-20T17:09:32 | 2021-06-20T17:09:32 | 378,689,993 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 58,297 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, List, Optional
import sys
import torch
import torch.nn as nn
from fairseq import search, utils
from fairseq.data import data_utils
from fairseq.models import FairseqIncrementalDecoder
from torch import Tensor
from fairseq.ngram_repeat_block import NGramRepeatBlock
class SequenceGenerator(nn.Module):
def __init__(
self,
models,
tgt_dict,
beam_size=1,
max_len_a=0,
max_len_b=200,
max_len=0,
min_len=1,
normalize_scores=True,
len_penalty=1.0,
unk_penalty=0.0,
temperature=1.0,
match_source_len=False,
no_repeat_ngram_size=0,
search_strategy=None,
eos=None,
symbols_to_strip_from_output=None,
lm_model=None,
lm_weight=1.0,
):
"""Generates translations of a given source sentence.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models,
currently support fairseq.models.TransformerModel for scripting
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
max_len (int, optional): the maximum length of the generated output
(not including end-of-sentence)
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
normalize_scores (bool, optional): normalize scores by the length
of the output (default: True)
len_penalty (float, optional): length penalty, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
unk_penalty (float, optional): unknown word penalty, where <0
produces more unks, >0 produces fewer (default: 0.0)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
match_source_len (bool, optional): outputs should match the source
length (default: False)
"""
super().__init__()
if isinstance(models, EnsembleModel):
self.model = models
else:
self.model = EnsembleModel(models)
self.tgt_dict = tgt_dict
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos() if eos is None else eos
self.symbols_to_strip_from_output = (
symbols_to_strip_from_output.union({self.eos})
if symbols_to_strip_from_output is not None
else {self.eos}
)
self.vocab_size = len(tgt_dict)
self.beam_size = beam_size
# the max beam size is the dictionary size - 1, since we never select pad
self.beam_size = min(beam_size, self.vocab_size - 1)
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.min_len = min_len
self.max_len = max_len or self.model.max_decoder_positions()
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
self.temperature = temperature
self.match_source_len = match_source_len
if no_repeat_ngram_size > 0:
self.repeat_ngram_blocker = NGramRepeatBlock(no_repeat_ngram_size)
else:
self.repeat_ngram_blocker = None
assert temperature > 0, "--temperature must be greater than 0"
self.search = (
search.BeamSearch(tgt_dict) if search_strategy is None else search_strategy
)
# We only need to set src_lengths in LengthConstrainedBeamSearch.
# As a module attribute, setting it would break in multithread
# settings when the model is shared.
self.should_set_src_lengths = (
hasattr(self.search, "needs_src_lengths") and self.search.needs_src_lengths
)
self.model.eval()
self.lm_model = lm_model
self.lm_weight = lm_weight
if self.lm_model is not None:
self.lm_model.eval()
def cuda(self):
self.model.cuda()
return self
@torch.no_grad()
def forward(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
"""Generate a batch of translations.
Args:
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, prefix_tokens, bos_token=bos_token)
# TODO(myleott): unused, deprecate after pytorch-translate migration
def generate_batched_itr(self, data_itr, beam_size=None, cuda=False, timer=None):
"""Iterate over a batched dataset and yield individual translations.
Args:
cuda (bool, optional): use GPU for generation
timer (StopwatchMeter, optional): time generations
"""
for sample in data_itr:
s = utils.move_to_cuda(sample) if cuda else sample
if "net_input" not in s:
continue
input = s["net_input"]
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in input.items() if k != "prev_output_tokens"
}
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(encoder_input)
if timer is not None:
timer.stop(sum(len(h[0]["tokens"]) for h in hypos))
for i, id in enumerate(s["id"].data):
# remove padding
src = utils.strip_pad(input["src_tokens"].data[i, :], self.pad)
ref = (
utils.strip_pad(s["target"].data[i, :], self.pad)
if s["target"] is not None
else None
)
yield id, src, ref, hypos[i]
@torch.no_grad()
def generate(self, models, sample: Dict[str, Dict[str, Tensor]], **kwargs) -> List[List[Dict[str, Tensor]]]:
"""Generate translations. Match the api of other fairseq generators.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
constraints (torch.LongTensor, optional): force decoder to include
the list of constraints
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, **kwargs)
@torch.no_grad()
def generate_RT(self, models, sample: Dict[str, Dict[str, Tensor]], **kwargs) -> List[List[Dict[str, Tensor]]]:
"""Generate translations. Match the api of other fairseq generators.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
constraints (torch.LongTensor, optional): force decoder to include
the list of constraints
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate_RT(sample, **kwargs)
def _generate(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
constraints: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for i in range(self.model.models_size)
],
)
net_input = sample["net_input"]
if "src_tokens" in net_input:
src_tokens = net_input["src_tokens"]
# length of the source text being the character length except EndOfSentence and pad
src_lengths = (
(src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
)
elif "source" in net_input:
src_tokens = net_input["source"]
src_lengths = (
net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1)
if net_input["padding_mask"] is not None
else torch.tensor(src_tokens.size(-1)).to(src_tokens)
)
elif "features" in net_input:
src_tokens = net_input["features"]
src_lengths = (
net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1)
if net_input["padding_mask"] is not None
else torch.tensor(src_tokens.size(-1)).to(src_tokens)
)
else:
raise Exception("expected src_tokens or source in net input. input keys: " + str(net_input.keys()))
# bsz: total number of sentences in beam
# Note that src_tokens may have more than 2 dimensions (i.e. audio features)
bsz, src_len = src_tokens.size()[:2]
beam_size = self.beam_size
if constraints is not None and not self.search.supports_constraints:
raise NotImplementedError(
"Target-side constraints were provided, but search method doesn't support them"
)
# Initialize constraints, when active
self.search.init_constraints(constraints, beam_size)
max_len: int = -1
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
self.max_len - 1,
)
assert (
self.min_len <= max_len
), "min_len cannot be larger than max_len, please adjust these!"
# compute the encoder output for each beam
encoder_outs = self.model.forward_encoder(net_input)
# placeholder of indices for bsz * beam_size to hold tokens and accumulative scores
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
encoder_outs = self.model.reorder_encoder_out(encoder_outs, new_order)
# ensure encoder_outs is a List.
assert encoder_outs is not None
# initialize buffers
scores = (
torch.zeros(bsz * beam_size, max_len + 1).to(src_tokens).float()
) # +1 for eos; pad is never chosen for scoring
tokens = (
torch.zeros(bsz * beam_size, max_len + 2)
.to(src_tokens)
.long()
.fill_(self.pad)
) # +2 for eos and pad
tokens[:, 0] = self.eos if bos_token is None else bos_token
attn: Optional[Tensor] = None
# A list that indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then cands_to_ignore would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
cands_to_ignore = (
torch.zeros(bsz, beam_size).to(src_tokens).eq(-1)
) # forward and backward-compatible False mask
# list of completed sentences
finalized = torch.jit.annotate(
List[List[Dict[str, Tensor]]],
[torch.jit.annotate(List[Dict[str, Tensor]], []) for i in range(bsz)],
) # contains lists of dictionaries of infomation about the hypothesis being finalized at each step
# a boolean array indicating if the sentence at the index is finished or not
finished = [False for i in range(bsz)]
num_remaining_sent = bsz # number of sentences remaining
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (
(torch.arange(0, bsz) * beam_size)
.unsqueeze(1)
.type_as(tokens)
.to(src_tokens.device)
)
cand_offsets = torch.arange(0, cand_size).type_as(tokens).to(src_tokens.device)
reorder_state: Optional[Tensor] = None
batch_idxs: Optional[Tensor] = None
original_batch_idxs: Optional[Tensor] = None
if "id" in sample and isinstance(sample["id"], Tensor):
original_batch_idxs = sample["id"]
else:
original_batch_idxs = torch.arange(0, bsz).type_as(tokens)
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(
batch_idxs
)
reorder_state.view(-1, beam_size).add_(
corr.unsqueeze(-1) * beam_size
)
original_batch_idxs = original_batch_idxs[batch_idxs]
self.model.reorder_incremental_state(incremental_states, reorder_state)
encoder_outs = self.model.reorder_encoder_out(
encoder_outs, reorder_state
)
lprobs, avg_attn_scores = self.model.forward_decoder(
tokens[:, : step + 1],
encoder_outs,
incremental_states,
self.temperature,
)
if self.lm_model is not None:
lm_out = self.lm_model(tokens[:, : step + 1])
probs = self.lm_model.get_normalized_probs(
lm_out, log_probs=True, sample=None
)
probs = probs[:, -1, :] * self.lm_weight
lprobs += probs
lprobs[lprobs != lprobs] = torch.tensor(-math.inf).to(lprobs)
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
# handle max length constraint
if step >= max_len:
lprobs[:, : self.eos] = -math.inf
lprobs[:, self.eos + 1 :] = -math.inf
# handle prefix tokens (possibly with different lengths)
if (
prefix_tokens is not None
and step < prefix_tokens.size(1)
and step < max_len
):
lprobs, tokens, scores = self._prefix_tokens(
step, lprobs, scores, tokens, prefix_tokens, beam_size
)
elif step < self.min_len:
# minimum length constraint (does not apply if using prefix_tokens)
lprobs[:, self.eos] = -math.inf
# Record attention scores, only support avg_attn_scores is a Tensor
if avg_attn_scores is not None:
if attn is None:
attn = torch.empty(
bsz * beam_size, avg_attn_scores.size(1), max_len + 2
).to(scores)
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
eos_bbsz_idx = torch.empty(0).to(
tokens
) # indices of hypothesis ending with eos (finished sentences)
eos_scores = torch.empty(0).to(
scores
) # scores of hypothesis ending with eos (finished sentences)
if self.should_set_src_lengths:
self.search.set_src_lengths(src_lengths)
if self.repeat_ngram_blocker is not None:
lprobs = self.repeat_ngram_blocker(tokens, lprobs, bsz, beam_size, step)
# Shape: (batch, cand_size)
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
tokens[:, : step + 1],
original_batch_idxs,
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos
# Shape of eos_mask: (batch size, beam size)
eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf)
eos_mask[:, :beam_size][cands_to_ignore] = torch.tensor(0).to(eos_mask)
# only consider eos when it's among the top beam_size indices
# Now we know what beam item(s) to finish
# Shape: 1d list of absolute-numbered
eos_bbsz_idx = torch.masked_select(
cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents: List[int] = []
if eos_bbsz_idx.numel() > 0:
eos_scores = torch.masked_select(
cand_scores[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents = self.finalize_hypos(
step,
eos_bbsz_idx,
eos_scores,
tokens,
scores,
finalized,
finished,
beam_size,
attn,
src_lengths,
max_len,
)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
if self.search.stop_on_max_len and step >= max_len:
break
assert step < max_len, f"{step} < {max_len}"
# Remove finalized sentences (ones for which {beam_size}
# finished hypotheses have been generated) from the batch.
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = torch.ones(
bsz, dtype=torch.bool, device=cand_indices.device
)
batch_mask[finalized_sents] = False
# TODO replace `nonzero(as_tuple=False)` after TorchScript supports it
batch_idxs = torch.arange(
bsz, device=cand_indices.device
).masked_select(batch_mask)
# Choose the subset of the hypothesized constraints that will continue
self.search.prune_sentences(batch_idxs)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
cands_to_ignore = cands_to_ignore[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(
new_bsz * beam_size, attn.size(1), -1
)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos hypos
# and values < cand_size indicate candidate active hypos.
# After, the min values per row are the top candidate active hypos
# Rewrite the operator since the element wise or is not supported in torchscript.
eos_mask[:, :beam_size] = ~((~cands_to_ignore) & (~eos_mask[:, :beam_size]))
active_mask = torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[: eos_mask.size(1)],
)
# get the top beam_size active hypotheses, which are just
# the hypos with the smallest values in active_mask.
# {active_hypos} indicates which {beam_size} hypotheses
# from the list of {2 * beam_size} candidates were
# selected. Shapes: (batch size, beam size)
new_cands_to_ignore, active_hypos = torch.topk(
active_mask, k=beam_size, dim=1, largest=False
)
# update cands_to_ignore to ignore any finalized hypos.
cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size]
# Make sure there is at least one active item for each sentence in the batch.
assert (~cands_to_ignore).any(dim=1).all()
# update cands_to_ignore to ignore any finalized hypos
# {active_bbsz_idx} denotes which beam number is continued for each new hypothesis (a beam
# can be selected more than once).
active_bbsz_idx = torch.gather(cand_bbsz_idx, dim=1, index=active_hypos)
active_scores = torch.gather(cand_scores, dim=1, index=active_hypos)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
# Set the tokens for each beam (can select the same row more than once)
tokens[:, : step + 1] = torch.index_select(
tokens[:, : step + 1], dim=0, index=active_bbsz_idx
)
# Select the next token for each of them
tokens.view(bsz, beam_size, -1)[:, :, step + 1] = torch.gather(
cand_indices, dim=1, index=active_hypos
)
if step > 0:
scores[:, :step] = torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx
)
scores.view(bsz, beam_size, -1)[:, :, step] = torch.gather(
cand_scores, dim=1, index=active_hypos
)
# Update constraints based on which candidates were selected for the next beam
self.search.update_constraints(active_hypos)
# copy attention for active hypotheses
if attn is not None:
attn[:, :, : step + 2] = torch.index_select(
attn[:, :, : step + 2], dim=0, index=active_bbsz_idx
)
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
scores = torch.tensor(
[float(elem["score"].item()) for elem in finalized[sent]]
)
_, sorted_scores_indices = torch.sort(scores, descending=True)
finalized[sent] = [finalized[sent][ssi] for ssi in sorted_scores_indices]
finalized[sent] = torch.jit.annotate(
List[Dict[str, Tensor]], finalized[sent]
)
return finalized
def _generate_RT(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
constraints: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for i in range(self.model.models_size)
],
)
net_input = sample["net_input"]
if "src_tokens" in net_input:
src_tokens = net_input["src_tokens"]
# length of the source text being the character length except EndOfSentence and pad
src_lengths = (
(src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
)
elif "source" in net_input:
src_tokens = net_input["source"]
src_lengths = (
net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1)
if net_input["padding_mask"] is not None
else torch.tensor(src_tokens.size(-1)).to(src_tokens)
)
elif "features" in net_input:
src_tokens = net_input["features"]
src_lengths = (
net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1)
if net_input["padding_mask"] is not None
else torch.tensor(src_tokens.size(-1)).to(src_tokens)
)
else:
raise Exception("expected src_tokens or source in net input. input keys: " + str(net_input.keys()))
# bsz: total number of sentences in beam
# Note that src_tokens may have more than 2 dimensions (i.e. audio features)
bsz, src_len = src_tokens.size()[:2]
beam_size = self.beam_size
if constraints is not None and not self.search.supports_constraints:
raise NotImplementedError(
"Target-side constraints were provided, but search method doesn't support them"
)
# Initialize constraints, when active
self.search.init_constraints(constraints, beam_size)
max_len: int = -1
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
self.max_len - 1,
)
assert (
self.min_len <= max_len
), "min_len cannot be larger than max_len, please adjust these!"
# compute the encoder output for each beam
encoder_outs, encoder_outs_2, encoder_outs_3 = self.model.forward_encoder_RT(net_input)
# placeholder of indices for bsz * beam_size to hold tokens and accumulative scores
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
encoder_outs = self.model.reorder_encoder_out(encoder_outs, new_order)
encoder_outs_2 = self.model.reorder_encoder_out(encoder_outs_2, new_order)
encoder_outs_3 = self.model.reorder_encoder_out(encoder_outs_3, new_order)
# ensure encoder_outs is a List.
assert encoder_outs is not None
assert encoder_outs_2 is not None
assert encoder_outs_3 is not None
# initialize buffers
scores = (
torch.zeros(bsz * beam_size, max_len + 1).to(src_tokens).float()
) # +1 for eos; pad is never chosen for scoring
tokens = (
torch.zeros(bsz * beam_size, max_len + 2)
.to(src_tokens)
.long()
.fill_(self.pad)
) # +2 for eos and pad
tokens[:, 0] = self.eos if bos_token is None else bos_token
attn: Optional[Tensor] = None
# A list that indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then cands_to_ignore would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
cands_to_ignore = (
torch.zeros(bsz, beam_size).to(src_tokens).eq(-1)
) # forward and backward-compatible False mask
# list of completed sentences
finalized = torch.jit.annotate(
List[List[Dict[str, Tensor]]],
[torch.jit.annotate(List[Dict[str, Tensor]], []) for i in range(bsz)],
) # contains lists of dictionaries of infomation about the hypothesis being finalized at each step
# a boolean array indicating if the sentence at the index is finished or not
finished = [False for i in range(bsz)]
num_remaining_sent = bsz # number of sentences remaining
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (
(torch.arange(0, bsz) * beam_size)
.unsqueeze(1)
.type_as(tokens)
.to(src_tokens.device)
)
cand_offsets = torch.arange(0, cand_size).type_as(tokens).to(src_tokens.device)
reorder_state: Optional[Tensor] = None
batch_idxs: Optional[Tensor] = None
original_batch_idxs: Optional[Tensor] = None
if "id" in sample and isinstance(sample["id"], Tensor):
original_batch_idxs = sample["id"]
else:
original_batch_idxs = torch.arange(0, bsz).type_as(tokens)
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(
batch_idxs
)
reorder_state.view(-1, beam_size).add_(
corr.unsqueeze(-1) * beam_size
)
original_batch_idxs = original_batch_idxs[batch_idxs]
self.model.reorder_incremental_state(incremental_states, reorder_state)
encoder_outs = self.model.reorder_encoder_out(
encoder_outs, reorder_state
)
encoder_outs_2 = self.model.reorder_encoder_out(
encoder_outs_2, reorder_state
)
encoder_outs_3 = self.model.reorder_encoder_out(
encoder_outs_3, reorder_state
)
lprobs, avg_attn_scores = self.model.forward_decoder(
tokens[:, : step + 1],
encoder_outs,
incremental_states,
self.temperature,
)
if self.lm_model is not None:
lm_out = self.lm_model(tokens[:, : step + 1])
probs = self.lm_model.get_normalized_probs(
lm_out, log_probs=True, sample=None
)
probs = probs[:, -1, :] * self.lm_weight
lprobs += probs
lprobs[lprobs != lprobs] = torch.tensor(-math.inf).to(lprobs)
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
# handle max length constraint
if step >= max_len:
lprobs[:, : self.eos] = -math.inf
lprobs[:, self.eos + 1 :] = -math.inf
# handle prefix tokens (possibly with different lengths)
if (
prefix_tokens is not None
and step < prefix_tokens.size(1)
and step < max_len
):
lprobs, tokens, scores = self._prefix_tokens(
step, lprobs, scores, tokens, prefix_tokens, beam_size
)
elif step < self.min_len:
# minimum length constraint (does not apply if using prefix_tokens)
lprobs[:, self.eos] = -math.inf
# Record attention scores, only support avg_attn_scores is a Tensor
if avg_attn_scores is not None:
if attn is None:
attn = torch.empty(
bsz * beam_size, avg_attn_scores.size(1), max_len + 2
).to(scores)
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
eos_bbsz_idx = torch.empty(0).to(
tokens
) # indices of hypothesis ending with eos (finished sentences)
eos_scores = torch.empty(0).to(
scores
) # scores of hypothesis ending with eos (finished sentences)
if self.should_set_src_lengths:
self.search.set_src_lengths(src_lengths)
if self.repeat_ngram_blocker is not None:
lprobs = self.repeat_ngram_blocker(tokens, lprobs, bsz, beam_size, step)
# Shape: (batch, cand_size)
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
tokens[:, : step + 1],
original_batch_idxs,
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos
# Shape of eos_mask: (batch size, beam size)
eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf)
eos_mask[:, :beam_size][cands_to_ignore] = torch.tensor(0).to(eos_mask)
# only consider eos when it's among the top beam_size indices
# Now we know what beam item(s) to finish
# Shape: 1d list of absolute-numbered
eos_bbsz_idx = torch.masked_select(
cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents: List[int] = []
if eos_bbsz_idx.numel() > 0:
eos_scores = torch.masked_select(
cand_scores[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents = self.finalize_hypos(
step,
eos_bbsz_idx,
eos_scores,
tokens,
scores,
finalized,
finished,
beam_size,
attn,
src_lengths,
max_len,
)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
if self.search.stop_on_max_len and step >= max_len:
break
assert step < max_len, f"{step} < {max_len}"
# Remove finalized sentences (ones for which {beam_size}
# finished hypotheses have been generated) from the batch.
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = torch.ones(
bsz, dtype=torch.bool, device=cand_indices.device
)
batch_mask[finalized_sents] = False
# TODO replace `nonzero(as_tuple=False)` after TorchScript supports it
batch_idxs = torch.arange(
bsz, device=cand_indices.device
).masked_select(batch_mask)
# Choose the subset of the hypothesized constraints that will continue
self.search.prune_sentences(batch_idxs)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
cands_to_ignore = cands_to_ignore[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(
new_bsz * beam_size, attn.size(1), -1
)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos hypos
# and values < cand_size indicate candidate active hypos.
# After, the min values per row are the top candidate active hypos
# Rewrite the operator since the element wise or is not supported in torchscript.
eos_mask[:, :beam_size] = ~((~cands_to_ignore) & (~eos_mask[:, :beam_size]))
active_mask = torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[: eos_mask.size(1)],
)
# get the top beam_size active hypotheses, which are just
# the hypos with the smallest values in active_mask.
# {active_hypos} indicates which {beam_size} hypotheses
# from the list of {2 * beam_size} candidates were
# selected. Shapes: (batch size, beam size)
new_cands_to_ignore, active_hypos = torch.topk(
active_mask, k=beam_size, dim=1, largest=False
)
# update cands_to_ignore to ignore any finalized hypos.
cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size]
# Make sure there is at least one active item for each sentence in the batch.
assert (~cands_to_ignore).any(dim=1).all()
# update cands_to_ignore to ignore any finalized hypos
# {active_bbsz_idx} denotes which beam number is continued for each new hypothesis (a beam
# can be selected more than once).
active_bbsz_idx = torch.gather(cand_bbsz_idx, dim=1, index=active_hypos)
active_scores = torch.gather(cand_scores, dim=1, index=active_hypos)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
# Set the tokens for each beam (can select the same row more than once)
tokens[:, : step + 1] = torch.index_select(
tokens[:, : step + 1], dim=0, index=active_bbsz_idx
)
# Select the next token for each of them
tokens.view(bsz, beam_size, -1)[:, :, step + 1] = torch.gather(
cand_indices, dim=1, index=active_hypos
)
if step > 0:
scores[:, :step] = torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx
)
scores.view(bsz, beam_size, -1)[:, :, step] = torch.gather(
cand_scores, dim=1, index=active_hypos
)
# Update constraints based on which candidates were selected for the next beam
self.search.update_constraints(active_hypos)
# copy attention for active hypotheses
if attn is not None:
attn[:, :, : step + 2] = torch.index_select(
attn[:, :, : step + 2], dim=0, index=active_bbsz_idx
)
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
scores = torch.tensor(
[float(elem["score"].item()) for elem in finalized[sent]]
)
_, sorted_scores_indices = torch.sort(scores, descending=True)
finalized[sent] = [finalized[sent][ssi] for ssi in sorted_scores_indices]
finalized[sent] = torch.jit.annotate(
List[Dict[str, Tensor]], finalized[sent]
)
return finalized
def _prefix_tokens(
self, step: int, lprobs, scores, tokens, prefix_tokens, beam_size: int
):
"""Handle prefix tokens"""
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
prefix_mask = prefix_toks.ne(self.pad)
lprobs[prefix_mask] = torch.tensor(-math.inf).to(lprobs)
lprobs[prefix_mask] = lprobs[prefix_mask].scatter(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask]
)
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[
:, 0, 1 : step + 1
]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
# copy tokens, scores and lprobs from the first beam to all beams
tokens = self.replicate_first_beam(tokens, eos_mask_batch_dim, beam_size)
scores = self.replicate_first_beam(scores, eos_mask_batch_dim, beam_size)
lprobs = self.replicate_first_beam(lprobs, eos_mask_batch_dim, beam_size)
return lprobs, tokens, scores
def replicate_first_beam(self, tensor, mask, beam_size: int):
tensor = tensor.view(-1, beam_size, tensor.size(-1))
tensor[mask] = tensor[mask][:, :1, :]
return tensor.view(-1, tensor.size(-1))
def finalize_hypos(
self,
step: int,
bbsz_idx,
eos_scores,
tokens,
scores,
finalized: List[List[Dict[str, Tensor]]],
finished: List[bool],
beam_size: int,
attn: Optional[Tensor],
src_lengths,
max_len: int,
):
"""Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly.
A sentence is finalized when {beam_size} finished items have been collected for it.
Returns number of sentences (not beam items) being finalized.
These will be removed from the batch and not processed further.
Args:
bbsz_idx (Tensor):
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors.
# tokens is (batch * beam, max_len). So the index_select
# gets the newly EOS rows, then selects cols 1..{step + 2}
tokens_clone = tokens.index_select(0, bbsz_idx)[
:, 1 : step + 2
] # skip the first index, which is EOS
tokens_clone[:, step] = self.eos
attn_clone = (
attn.index_select(0, bbsz_idx)[:, :, 1 : step + 2]
if attn is not None
else None
)
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, : step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
# cum_unfin records which sentences in the batch are finished.
# It helps match indexing between (a) the original sentences
# in the batch and (b) the current, possibly-reduced set of
# sentences.
cum_unfin: List[int] = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
# The keys here are of the form "{sent}_{unfin_idx}", where
# "unfin_idx" is the index in the current (possibly reduced)
# list of sentences, and "sent" is the index in the original,
# unreduced batch
# set() is not supported in script export
sents_seen: Dict[str, Optional[Tensor]] = {}
# For every finished beam item
for i in range(bbsz_idx.size()[0]):
idx = bbsz_idx[i]
score = eos_scores[i]
# sentence index in the current (possibly reduced) batch
unfin_idx = idx // beam_size
# sentence index in the original (unreduced) batch
sent = unfin_idx + cum_unfin[unfin_idx]
# Cannot create dict for key type '(int, int)' in torchscript.
# The workaround is to cast int to string
seen = str(sent.item()) + "_" + str(unfin_idx.item())
if seen not in sents_seen:
sents_seen[seen] = None
if self.match_source_len and step > src_lengths[unfin_idx]:
score = torch.tensor(-math.inf).to(score)
# An input sentence (among those in a batch) is finished when
# beam_size hypotheses have been collected for it
if len(finalized[sent]) < beam_size:
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = torch.empty(0)
finalized[sent].append(
{
"tokens": tokens_clone[i],
"score": score,
"attention": hypo_attn, # src_len x tgt_len
"alignment": torch.empty(0),
"positional_scores": pos_scores[i],
}
)
newly_finished: List[int] = []
for seen in sents_seen.keys():
# check termination conditions for this sentence
sent: int = int(float(seen.split("_")[0]))
unfin_idx: int = int(float(seen.split("_")[1]))
if not finished[sent] and self.is_finished(
step, unfin_idx, max_len, len(finalized[sent]), beam_size
):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
def is_finished(
self,
step: int,
unfin_idx: int,
max_len: int,
finalized_sent_len: int,
beam_size: int,
):
"""
Check whether decoding for a sentence is finished, which
occurs when the list of finalized sentences has reached the
beam size, or when we reach the maximum length.
"""
assert finalized_sent_len <= beam_size
if finalized_sent_len == beam_size or step == max_len:
return True
return False
class EnsembleModel(nn.Module):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__()
self.models_size = len(models)
# method '__len__' is not supported in ModuleList for torch script
self.single_model = models[0]
self.models = nn.ModuleList(models)
self.has_incremental: bool = False
if all(
hasattr(m, "decoder") and isinstance(m.decoder, FairseqIncrementalDecoder)
for m in models
):
self.has_incremental = True
def forward(self):
pass
def has_encoder(self):
return hasattr(self.single_model, "encoder")
def has_incremental_states(self):
return self.has_incremental
def max_decoder_positions(self):
return min([m.max_decoder_positions() for m in self.models if hasattr(m, "max_decoder_positions")] + [sys.maxsize])
@torch.jit.export
def forward_encoder(self, net_input: Dict[str, Tensor]):
if not self.has_encoder():
return None
return [model.encoder.forward_torchscript(net_input) for model in self.models]
def forward_encoder_RT(self, net_input: Dict[str, Tensor]):
if not self.has_encoder():
return None
return [model.encoder.forward_torchscript(net_input) for model in self.models]
@torch.jit.export
def forward_decoder(
self,
tokens,
encoder_outs: List[Dict[str, List[Tensor]]],
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
temperature: float = 1.0,
):
log_probs = []
avg_attn: Optional[Tensor] = None
encoder_out: Optional[Dict[str, List[Tensor]]] = None
for i, model in enumerate(self.models):
if self.has_encoder():
encoder_out = encoder_outs[i]
# decode each model
if self.has_incremental_states():
decoder_out = model.decoder.forward(
tokens,
encoder_out=encoder_out,
incremental_state=incremental_states[i],
)
else:
if hasattr(model, "decoder"):
decoder_out = model.decoder.forward(tokens, encoder_out=encoder_out)
else:
decoder_out = model.forward(tokens)
attn: Optional[Tensor] = None
decoder_len = len(decoder_out)
if decoder_len > 1 and decoder_out[1] is not None:
if isinstance(decoder_out[1], Tensor):
attn = decoder_out[1]
else:
attn_holder = decoder_out[1]["attn"]
if isinstance(attn_holder, Tensor):
attn = attn_holder
elif attn_holder is not None:
attn = attn_holder[0]
if attn is not None:
attn = attn[:, -1, :]
decoder_out_tuple = (
decoder_out[0][:, -1:, :].div_(temperature),
None if decoder_len <= 1 else decoder_out[1],
)
probs = model.get_normalized_probs(
decoder_out_tuple, log_probs=True, sample=None
)
probs = probs[:, -1, :]
if self.models_size == 1:
return probs, attn
log_probs.append(probs)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(
self.models_size
)
if avg_attn is not None:
avg_attn.div_(self.models_size)
return avg_probs, avg_attn
@torch.jit.export
def reorder_encoder_out(
self, encoder_outs: Optional[List[Dict[str, List[Tensor]]]], new_order
):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
new_outs: List[Dict[str, List[Tensor]]] = []
if not self.has_encoder():
return new_outs
for i, model in enumerate(self.models):
assert encoder_outs is not None
new_outs.append(
model.encoder.reorder_encoder_out(encoder_outs[i], new_order)
)
return new_outs
@torch.jit.export
def reorder_incremental_state(
self,
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
new_order,
):
if not self.has_incremental_states():
return
for i, model in enumerate(self.models):
model.decoder.reorder_incremental_state_scripting(
incremental_states[i], new_order
)
class SequenceGeneratorWithAlignment(SequenceGenerator):
def __init__(
self, models, tgt_dict, left_pad_target=False, print_alignment="hard", **kwargs
):
"""Generates translations of a given source sentence.
Produces alignments following "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
left_pad_target (bool, optional): Whether or not the
hypothesis should be left padded or not when they are
teacher forced for generating alignments.
"""
super().__init__(EnsembleModelWithAlignment(models), tgt_dict, **kwargs)
self.left_pad_target = left_pad_target
if print_alignment == "hard":
self.extract_alignment = utils.extract_hard_alignment
elif print_alignment == "soft":
self.extract_alignment = utils.extract_soft_alignment
@torch.no_grad()
def generate(self, models, sample, **kwargs):
finalized = super()._generate(sample, **kwargs)
src_tokens = sample["net_input"]["src_tokens"]
bsz = src_tokens.shape[0]
beam_size = self.beam_size
(
src_tokens,
src_lengths,
prev_output_tokens,
tgt_tokens,
) = self._prepare_batch_for_alignment(sample, finalized)
if any(getattr(m, "full_context_alignment", False) for m in self.model.models):
attn = self.model.forward_align(src_tokens, src_lengths, prev_output_tokens)
else:
attn = [
finalized[i // beam_size][i % beam_size]["attention"].transpose(1, 0)
for i in range(bsz * beam_size)
]
if src_tokens.device != "cpu":
src_tokens = src_tokens.to("cpu")
tgt_tokens = tgt_tokens.to("cpu")
attn = [i.to("cpu") for i in attn]
# Process the attn matrix to extract hard alignments.
for i in range(bsz * beam_size):
alignment = self.extract_alignment(
attn[i], src_tokens[i], tgt_tokens[i], self.pad, self.eos
)
finalized[i // beam_size][i % beam_size]["alignment"] = alignment
return finalized
def _prepare_batch_for_alignment(self, sample, hypothesis):
src_tokens = sample["net_input"]["src_tokens"]
bsz = src_tokens.shape[0]
src_tokens = (
src_tokens[:, None, :]
.expand(-1, self.beam_size, -1)
.contiguous()
.view(bsz * self.beam_size, -1)
)
src_lengths = sample["net_input"]["src_lengths"]
src_lengths = (
src_lengths[:, None]
.expand(-1, self.beam_size)
.contiguous()
.view(bsz * self.beam_size)
)
prev_output_tokens = data_utils.collate_tokens(
[beam["tokens"] for example in hypothesis for beam in example],
self.pad,
self.eos,
self.left_pad_target,
move_eos_to_beginning=True,
)
tgt_tokens = data_utils.collate_tokens(
[beam["tokens"] for example in hypothesis for beam in example],
self.pad,
self.eos,
self.left_pad_target,
move_eos_to_beginning=False,
)
return src_tokens, src_lengths, prev_output_tokens, tgt_tokens
class EnsembleModelWithAlignment(EnsembleModel):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__(models)
def forward_align(self, src_tokens, src_lengths, prev_output_tokens):
avg_attn = None
for model in self.models:
decoder_out = model(src_tokens, src_lengths, prev_output_tokens)
attn = decoder_out[1]["attn"][0]
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(self.models) > 1:
avg_attn.div_(len(self.models))
return avg_attn
| [
"noreply@github.com"
] | snudatalab.noreply@github.com |
f289b70cb8056d517c2f5158137b0098f45503d0 | b3c939e013ecfdd68b02344ad2936ae53dd1a725 | /regression_2d/projects/model_save/get_dataset.py | 9d8fe2a003e8371859d010fa4a49c101555fe9df | [] | no_license | TakakiNishio/chainer | 3cd9d2972d72c30d1d4fb979692de26539903556 | 55c2771a1a72dccd738e1350ab539f517083ba33 | refs/heads/master | 2020-12-24T11:07:36.788998 | 2017-07-02T19:43:45 | 2017-07-02T19:43:45 | 73,190,468 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | #python library
import numpy as np
import random
#define function
def real_function(x1,x2):
z = -3*np.exp(-(((x1-2)**2)/3)-(((x2-2)**2)/3)) - 4*np.exp(-(((x1+2)**2)/4)-(((x2 +2)**2)/4))
#z = np.exp(-0.25 * np.sqrt(x1**2 + x2**2)) * np.cos(2 * np.sqrt(x1**2 + x2**2))
return z
#generate dataset
def dataset_generator(n):
#define domains
max_x1 = 5
min_x1 = -5
max_x2 = 5
min_x2 = -5
#half noise range
noise_range = 0.5
x = []
y = []
for i in range(n):
x1 = random.uniform(min_x1,max_x1)
x2 = random.uniform(min_x2,max_x2)
x.append([x1,x2])
y.append(real_function(x1,x2))
#y.append(real_function(x1,x2) + random.uniform(-noise_range,noise_range)) #add noise
x = np.array(x, dtype = np.float32)
y = np.array(y, dtype = np.float32)
x = np.reshape(x,(len(x),2))
y = np.reshape(y,(len(y),1))
return x,y
| [
"p104314t@mail.kyutech.jp"
] | p104314t@mail.kyutech.jp |
5af53751459fff26bde07d31765f075b7ccff247 | cc31777830ccbc17347305c40db91afc012977ee | /concepts/functions/is_abecedarian.py | 8ec3c19c3b4a3de66cdded9be1222b4400bb9053 | [] | no_license | sourcery-ai-bot/library-python | e147b9e5c6baba502de9f7605c5fa1937dbd13f4 | 61472955f4b011caa989b8805be3ed7df19c7aa8 | refs/heads/master | 2022-11-06T20:19:59.056197 | 2020-06-30T20:56:45 | 2020-06-30T20:56:45 | 276,206,925 | 0 | 0 | null | 2020-06-30T20:56:31 | 2020-06-30T20:56:30 | null | UTF-8 | Python | false | false | 600 | py | """ The following function returns True if the word passed as input is an
abecedarian word. That is a word where the each letter in the word is a
subsequent letter in the alphabet. 'Ant' would be a simple example. """
def is_string_abecederian(test_word: str) -> bool:
max_letter = ''
letters_tested = 0
for letter in test_word.lower():
if letter < max_letter:
return False
else:
max_letter = letter
letters_tested += 1
if letters_tested == len(test_word):
return True
result = is_string_abecederian('Ant')
print(result)
| [
"wayne.a.lambert@gmail.com"
] | wayne.a.lambert@gmail.com |
976aea0ed87a3c086d068ae560fdb2ffcd591676 | a7f442bc306d1a8366a3e30db50af0c2c90e9091 | /blockchain-env/Lib/site-packages/Cryptodome/Util/Padding.pyi | da274b98cccf0661298b00aed0ad7c5a91a8f5d3 | [] | no_license | Patreva/Python-flask-react-blockchain | cbdce3e0f55d4ba68be6ecfba35620585894bbbc | 474a9795820d8a4b5a370d400d55b52580055a2e | refs/heads/main | 2023-03-29T01:18:53.985398 | 2021-04-06T08:01:24 | 2021-04-06T08:01:24 | 318,560,922 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | pyi | from typing import Optional
__all__ = [ 'pad', 'unpad' ]
def pad(data_to_pad: bytes, block_size: int, style: Optional[str]='pkcs7') -> bytes: ...
def unpad(padded_data: bytes, block_size: int, style: Optional[str]='pkcs7') -> bytes: ... | [
"patrickwahome74@gmail.com"
] | patrickwahome74@gmail.com |
624b2a5975b2e3b83dfd238525814a74fb83e8b8 | 07af444dafa5bde373b0730e92d67e455d4ff4df | /SFData/StackOverflow/s44111687_original.py | f6758354b177e5b42738830aaf582fd7d6de7e91 | [] | no_license | tensfa/tensfa | 9114595b58a2e989780af0c348afb89a2abb04b4 | 415dcfaec589b0b14c5b9864872c912f3851b383 | refs/heads/main | 2023-06-30T14:27:38.217089 | 2021-08-03T01:33:30 | 2021-08-03T01:33:30 | 368,465,614 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,115 | py | training_data = np.vstack(training_data)
training_target = np.vstack(training_target)
test_data = np.vstack(test_data)
test_target = np.vstack(test_target)
learning_rate = 0.001
n_input = 2
n_steps = 1
n_hidden = 128
n_classes = 2
# tf Graph input
x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_classes])
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([n_classes]))
}
def RNN(x, weights, biases):
x = tf.unstack(x, n_steps, 1)
# Define a lstm cell with tensorflow
lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Get lstm cell output
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
pred = RNN(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
for i in range(len(training_data)):
batch_x = training_data[i]
batch_y = training_target[i]
print(batch_x)
print(batch_y)
batch_x = tf.reshape(batch_x, [1, 2]).eval()
print(batch_x)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
print("Iter " + str(step) + ", Minibatch Loss= " + "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc))
print("Optimization Finished!")
print("Testing Accuracy:", sess.run(accuracy, feed_dict={x: test_data, y: test_target})) | [
"tensfa@yeah.net"
] | tensfa@yeah.net |
2b141c2d2dc86ce4917c900408959b04afe351d7 | 9b5bfaf574a2eea29e1ec363e7670edd84c456d8 | /mobile/pages/app.py | 2ce862ebe7a7f61338edc6cefede64d1d568d7c8 | [] | no_license | fanpl-sourse/mytestenv | d04b34fdca596ab5e25349e2d68aa8450984e715 | 7e31da486d6c4a4442c2c0ce97b347f5273cc2eb | refs/heads/master | 2023-01-30T18:32:40.904084 | 2020-12-15T06:36:56 | 2020-12-15T06:36:56 | 278,984,272 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,716 | py | # -*- coding: utf-8 -*-
# @Time : 2020/7/26 16:12
# @Author : 饭盆里
# @File : app.py
# @Software: PyCharm
# @desc :
from appium import webdriver
from mobile.pages.basepage import BasePage
from mobile.pages.mainpage import MainPage
class App(BasePage):
"""
存放APP常用的方法:启动、重启、关闭、进入首页
"""
def start(self):
"""
启动
:return:
"""
if self.driver == None:
caps = {}
caps["platformName"] = "android"
caps["deviceName"] = "emulator-5554"
caps["appPackage"] = "com.tencent.wework"
caps["appActivity"] = ".launch.LaunchSplashActivity"
caps["noReset"] = "true"
caps['skipServerInstallation'] = 'true' # 跳过 uiautomator2 server的安装
caps['skipDeviceInitialization'] = 'true' # 跳过设备初始化
caps['settings[waitForIdleTimeout]'] = 0 # 等待Idle为0
# 与sever 建立连接,初始化一个driver,创建session
self.driver = webdriver.Remote("http://127.0.0.1:4723/wd/hub", caps)
else:
#无需参数,自动启动desireCapa里面定义的activity
self.driver.launch_app()
self.driver.implicitly_wait(5)
return self
def restart(self):
"""
重启
:return:
"""
self.driver.close()
self.driver.launch_app()
return self
def stop(self):
"""
关闭APP
:return:
"""
self.driver.close()
def goto_main(self):
"""
进入首页
:return: 首页
"""
return MainPage() | [
"fanpengli@fangdd.com"
] | fanpengli@fangdd.com |
9d61382de8235ccffe9e598c335ce26721982cf9 | 97792803c0069e6634ce7b57746b8893bad2ab35 | /inclass/dictionary.py | 0877fae6d37dda2afbbfa6d5fbf53855fe251864 | [] | no_license | byronwasti/SoftDes | 2e31560cfb61d1f4f80691af37b89cce0bca73e6 | 690d777062f156bf2f7710ab0b20df884595cf37 | refs/heads/master | 2020-01-22T14:24:11.679717 | 2015-04-21T19:32:05 | 2015-04-21T19:32:05 | 29,879,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | def histogram(s):
d = {}
for i in s:
if d.get(i,0) == 0:
d[i] = 1
else: d[i] += 1
return d
#print histogram('asdfasdfgasdg')
def has_dupl(l):
d = {}
for i in l:
if d.get(i,0) == 0:
d[i] = 1
else: return True
#print has_dupl([1,2,3,4,5,6,1])
def suffixer( w ):
n = len(w)
d = {}
suf = {}
pref = []
f = open('/usr/share/dict/words','r')
new = True
current = 'A'
d['A'] = []
for word in f:
word = word.strip('\n')
if current in word:
d[current] = d[current] + [word]
elif len(word) > n-1:
current = word
d[current] = []
return d[w]
print suffixer('test')
| [
"byron.wasti@gmail.com"
] | byron.wasti@gmail.com |
573587bbff19efe24ae3a9a6241ed93fe05351f5 | b1c423170f2d897ef88ab93e17830b6fff91b4e3 | /EasyPython/wax/tools/waxrf/imgcoder.py | 6949ca6c4b5594016fa4b9d2034fba194a7696e8 | [] | no_license | walker8088/easyworld | 55031dd0862b7bc0ffc8c5093875a93e935933e6 | e6aaf18430aee1457f5d8228fb300cf4323bcb7f | refs/heads/master | 2021-01-02T09:34:59.604820 | 2011-01-20T03:32:16 | 2011-01-20T03:32:16 | 33,644,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | #-------------------------------------------------------
# imgcoder.py
# Purpose: To encode/decode images for XRC
# Author: Jason Gedge
#
# TODO:
# - Consider better encoding/decoding
#-------------------------------------------------------
import base64
def DecodeImage(data):
""" Decode an image from WaxRF data. """
#return base64.b64decode(data)
return base64.decodestring(data)
def EncodeImage(data):
""" Encode an image for WaxRF. """
#return base64.b64encode(data)
return base64.encodestring(data)
def EncodeImageFile(fname):
""" Encode an image from a file. """
data = file(fname, 'rb').read()
return EncodeImage(data)
| [
"lidonglin8088@gmail.com@c3cacd82-1c91-3bdd-8267-0dbd049bf731"
] | lidonglin8088@gmail.com@c3cacd82-1c91-3bdd-8267-0dbd049bf731 |
7eff9f36e7e6bad508e866be840b19ba1c8eea02 | fe5db184c4abbd1ad25242ab24c18e2d785a069f | /apps/partida/migrations/0023_auto_20200503_1351.py | 291a48d573c5e9fb3e1e85d5ea758745ad4876fd | [] | no_license | valmoresjp/asl | aa20df3ac50f27d7360f77ce599c0dee91e0011f | 0b882cf3d5a97719e22ae39e29ccc933e6a10b7f | refs/heads/master | 2023-03-17T11:09:35.313488 | 2020-07-27T19:09:52 | 2020-07-27T19:09:52 | 267,399,738 | 1 | 0 | null | 2020-07-25T00:52:39 | 2020-05-27T18:44:30 | HTML | UTF-8 | Python | false | false | 429 | py | # Generated by Django 3.0.4 on 2020-05-03 13:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('partida', '0022_auto_20200429_1716'),
]
operations = [
migrations.AlterField(
model_name='cliente',
name='fecha',
field=models.DateTimeField(blank=None, default='2020-05-03 13:51:04', null=None),
),
]
| [
"valmoresjp@gmail.com"
] | valmoresjp@gmail.com |
3978ba4853132b98b1296c8b4418455710f65a6a | 775fdec8dd3d959560450fec3cf17c82a79e3f61 | /apps/dojo_ninjas/views.py | 4b8cd48396c0debabdbbee0f290a6e28bde444cd | [] | no_license | HarmsA/Dojo_Ninja | f2ff9833ea1b7707bed567ab869d1a645f8694a4 | 23ce11de538e600fccf64ac3c28348ca7bf38422 | refs/heads/master | 2020-04-09T03:13:10.591710 | 2018-12-02T18:27:29 | 2018-12-02T18:27:29 | 159,974,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | from django.shortcuts import render, HttpResponse
# Create your views here.
def index(request):
return HttpResponse('Dojo_ninja') | [
"harms2a@gmail.com"
] | harms2a@gmail.com |
959292466215e11be803178df6f439451a2cb66f | 1d7ae7456cad0d7a914a35bac6e854e566a16589 | /db_check.py | 7a7d6fffe84e3b7181e190d46c29da75876f0e12 | [] | no_license | truongngocasic/myrepos | eda728d31e7771e606126d0dc43e976e4eb0a309 | 58678ac27c201198f682cacbab6c8947a731d5eb | refs/heads/master | 2021-09-22T10:18:44.483641 | 2018-09-08T02:44:00 | 2018-09-08T02:44:00 | 112,811,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | import sqlite3
import json
db = sqlite3.connect('dbase/app.db')
# Get a cursor object
cursor = db.cursor()
#Show project
print "SHOW PROJECT"
cursor.execute("SELECT * FROM project")
rows = cursor.fetchall()
for row in rows:
print row
print json.dumps(row)
#Show users
print "SHOW USERS"
cursor.execute("SELECT * FROM users")
rows = cursor.fetchall()
for row in rows:
print(row)
| [
"root@beaglebone.(none)"
] | root@beaglebone.(none) |
f949c991858831a2c471ca6defa30d8260439840 | 136a379de74b2a28782cd0e2fb04da99dfabdf86 | /StacksAndQueues/FashionBoutique.py | 0e521c45b07ee0879e60a1065f5f486029e4bc75 | [] | no_license | mironmiron3/SoftUni-Python-Advanced | eb6c077c3b94e0381a82ed3b4abb26f1098dec82 | c7ac896a8fcc1f13a09f4c5573bd183d788a3157 | refs/heads/main | 2023-07-09T23:00:18.404835 | 2021-08-24T14:05:21 | 2021-08-24T14:05:21 | 399,486,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | clothes = [int(piece) for piece in input().split()]
initial_rack_capacity = int(input())
number_of_racks = 1
rack_capacity = initial_rack_capacity
while clothes:
current_piece = clothes.pop()
if current_piece > rack_capacity:
number_of_racks += 1
rack_capacity = initial_rack_capacity - current_piece
else:
rack_capacity -= current_piece
print(number_of_racks) | [
"noreply@github.com"
] | mironmiron3.noreply@github.com |
0ba0c81799f09156fcef95965f4bb7805c4db0cd | e269e4eda43519b7ceb6657a09acdd3aede352d5 | /hello01.py | e2e02ffbbf6329f2b1effc03705c14113f0fdee5 | [] | no_license | michaelzh17/python001 | b58a4865469ffa6995f5e43d6ac8efc7475901e4 | 50d465bb3a9f42bbad34fde2dead2c01e609b932 | refs/heads/master | 2021-09-07T19:44:18.316463 | 2018-02-28T03:15:46 | 2018-02-28T03:15:46 | 106,627,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | #!/usr/bin/env python3
print('hello, python') | [
"macalzhang@gmail.com"
] | macalzhang@gmail.com |
80f98b311d83f89f0caf6261134534cbdf3e1c93 | c4a3eeabe660e5d6b42f704d0325a755331ab3c5 | /hyperion/get_obs_CDF.py | 743366a29bdbc5509cdac8ee10191a4c26a47060 | [] | no_license | yaolun/misc | dfcfde2ac4a6429201644e1354912d3a064f9524 | 049b68ce826ddf638cec9a3b995d9ee84bf6075a | refs/heads/master | 2021-01-21T23:54:08.953071 | 2018-06-02T19:46:18 | 2018-06-02T19:46:18 | 26,666,071 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,328 | py | def get_obs_CDF(cdfdir, obj, spitzer_file=None, photfile=None):
"""
obj input in uppercase. But check the path to make sure.
"""
import numpy as np
from astropy.io import ascii
def spitzer_unc(filename, R=60., width=2.5):
"""
R is the resolving power (lambda/delta_lambda)
width = number of resolution elements
"""
irs = ascii.read(filename, data_start=2, header_start=None, comment='%')
wl_irs, flux_irs = irs['col1'], irs['col2']
# [wl_irs, flux_irs]= (np.genfromtxt(filename,skip_header=2,dtype='float').T)[0:2]
# Remove points with zero or negative flux
ind = (flux_irs > 0) & (np.isnan(flux_irs) == False)
wl_irs = wl_irs[ind]
flux_irs = flux_irs[ind]
unc_irs = np.empty_like(flux_irs)
oversample = (wl_irs[1]-wl_irs[0] + wl_irs[2]-wl_irs[1])/2 / (wl_irs[1]/R)
j = 0
edge = []
for i in range(len(wl_irs)):
if (wl_irs[i]-width/2 * wl_irs[i]/R >= min(wl_irs)) and (wl_irs[i]+width/2 * wl_irs[i]/R <= max(wl_irs)):
wl_dum = wl_irs[(wl_irs >= wl_irs[i]-width/2*wl_irs[i]/R) & (wl_irs <= wl_irs[i]+width/2*wl_irs[i]/R)]
flux_dum = flux_irs[(wl_irs >= wl_irs[i]-width/2*wl_irs[i]/R) & (wl_irs <= wl_irs[i]+width/2*wl_irs[i]/R)]
# return the coefficient, highest power first.
fit_dum = np.polyfit(wl_dum, flux_dum, 3)
base_dum = fit_dum[0]*wl_dum**3 + fit_dum[1]*wl_dum**2 + fit_dum[2]*wl_dum + fit_dum[3]
unc_irs[i] = np.std(flux_dum-base_dum) / np.sqrt(oversample)
if j == 0:
edge.append(unc_irs[i])
j += 1
edge_dum = unc_irs[i]
edge.append(edge_dum)
# print edge
for i in range(len(wl_irs)):
if wl_irs[i]-width/2 * wl_irs[i]/R < min(wl_irs):
unc_irs[i] = edge[0]
if wl_irs[i]+width/2 * wl_irs[i]/R > max(wl_irs):
unc_irs[i] = edge[1]
if flux_irs[i] - unc_irs[i] < 0:
unc_irs[i] = 1/3. * flux_irs[i]
return wl_irs, flux_irs, unc_irs
output = {}
# Read in Herschel data
# TODO: case for the sources without advanced products.
# continuum
[wl_pacs,flux_pacs] = np.genfromtxt(cdfdir+obj+'/pacs/advanced_products/'+obj+'_pacs_weighted_continuum.txt',dtype='float',skip_header=1).T
[wl_spire,flux_spire] = np.genfromtxt(cdfdir+obj+'/spire/advanced_products/'+obj+'_spire_corrected_continuum.txt',dtype='float',skip_header=1).T
# noise spectra
[wl_pacs_noise, flux_pacs_noise] = np.genfromtxt(cdfdir+obj+'/pacs/advanced_products/'+obj+'_pacs_weighted_residual_spectrum.txt',dtype='float',skip_header=1).T
[wl_spire_noise,flux_spire_noise] = np.genfromtxt(cdfdir+obj+'/spire/advanced_products/'+obj+'_spire_corrected_residual_spectrum.txt',dtype='float',skip_header=1).T
# Calculate the local variance (for spire), use the instrument uncertainty for pacs
#
wl_noise = [wl_pacs_noise, wl_spire_noise]
flux_noise = [flux_pacs_noise, flux_spire_noise]
sig_num = 20
sigma_noise = []
for i in range(0, len(wl_noise)):
sigma_dum = np.zeros_like(wl_noise[i])
for iwl in range(0, len(wl_noise[i])):
if iwl < sig_num/2:
sigma_dum[iwl] = np.std(np.hstack((flux_noise[i][0:int(sig_num/2)], flux_noise[i][0:int(sig_num/2)-iwl])))
elif len(wl_noise[i])-iwl < sig_num/2:
sigma_dum[iwl] = np.std(np.hstack((flux_noise[i][iwl:], flux_noise[i][len(wl_noise[i])-int(sig_num/2):])))
else:
sigma_dum[iwl] = np.std(flux_noise[i][iwl-int(sig_num/2):iwl+int(sig_num/2)])
sigma_noise = np.hstack((sigma_noise, sigma_dum))
# Read in Spitzer data
if spitzer_file != None:
wl_irs, flux_irs, unc_irs = spitzer_unc(spitzer_file)
wl_spec = np.hstack((wl_irs, wl_pacs, wl_spire))
flux_spec = np.hstack((flux_irs, flux_pacs, flux_spire))
sigma_noise = np.hstack((unc_irs, sigma_noise))
else:
wl_spec = np.hstack((wl_pacs,wl_spire))
flux_spec = np.hstack((flux_pacs,flux_spire))
flux_spec = flux_spec[np.argsort(wl_spec)]
sigma_noise = sigma_noise[np.argsort(wl_spec)]
wl_spec = wl_spec[np.argsort(wl_spec)]
# filter NaN value
wl_spec = wl_spec[np.isnan(flux_spec) == False]
sigma_noise = sigma_noise[np.isnan(flux_spec) == False]
flux_spec = flux_spec[np.isnan(flux_spec) == False]
output['spec'] = (wl_spec, flux_spec, sigma_noise)
if photfile!= None:
# Read in the photometry data
phot = ascii.read(photfile, comment='%')
# phot = np.genfromtxt(photfile, dtype=None, skip_header=1, comments='%')
# wl_phot = []
# flux_phot = []
# flux_sig_phot = []
# # note = []
# for i in range(0,len(phot)):
# wl_phot.append(phot[i][0])
# flux_phot.append(phot[i][1])
# flux_sig_phot.append(phot[i][2])
# # note.append(phot[i][4])
# wl_phot = np.array(wl_phot)
# flux_phot = np.array(flux_phot)
# flux_sig_phot = np.array(flux_sig_phot)
wl_phot = phot['wavelength']
flux_phot = phot['flux(Jy)']
flux_sig_phot = phot['error(Jy)']
selector = (wl_phot != 70) & (wl_phot != 100) & (wl_phot != 160) & (wl_phot != 250) & (wl_phot != 350) & (wl_phot != 500)
wl_phot = wl_phot[selector]
flux_phot = flux_phot[selector]
flux_sig_phot = flux_sig_phot[selector]
# Read in CDF photometry
phot_pacs = ascii.read(cdfdir+obj+'/pacs/data/'+obj+'_pacs_phot.txt', data_start=4)
phot_spire = ascii.read(cdfdir+obj+'/spire/data/'+obj+'_spire_phot.txt', data_start=4)
# average the photometry
phot_cdf = {'wave': [], 'flux': [], 'unc':[]}
# PACS
for i, w in enumerate(set(phot_pacs['wavelength(um)'])):
phot_cdf['wave'].append(w)
phot_cdf['flux'].append(np.mean(phot_pacs['flux(Jy)'][phot_pacs['wavelength(um)'] == w]))
phot_cdf['unc'].append((np.sum(phot_pacs['uncertainty(Jy)'][phot_pacs['wavelength(um)'] == w]**2)/len(phot_pacs['uncertainty(Jy)'][phot_pacs['wavelength(um)'] == w]))**0.5)
# SPIRE
for i, w in enumerate(set(phot_spire['wavelength(um)'])):
phot_cdf['wave'].append(w)
phot_cdf['flux'].append(np.mean(phot_spire['flux(Jy)'][phot_spire['wavelength(um)'] == w]))
phot_cdf['unc'].append((np.sum(phot_spire['uncertainty(Jy)'][phot_spire['wavelength(um)'] == w]**2)/len(phot_spire['uncertainty(Jy)'][phot_spire['wavelength(um)'] == w]))**0.5)
# combine photoemtry
wl_phot = np.hstack((wl_phot, np.array(phot_cdf['wave'])))
flux_phot = np.hstack((flux_phot, np.array(phot_cdf['flux'])))
flux_sig_phot = np.hstack((flux_sig_phot, np.array(phot_cdf['unc'])))
# filter NaN values
wl_phot = wl_phot[np.isnan(flux_phot) == False]
flux_sig_phot = flux_sig_phot[np.isnan(flux_phot) == False]
flux_phot = flux_phot[np.isnan(flux_phot) == False]
output['phot'] = (wl_phot, flux_phot, flux_sig_phot)
return output
| [
"allenya@gmail.com"
] | allenya@gmail.com |
e85beac70d5bacceda749318ba1c7279a6d05ee2 | 6b2ea44d7c7944dc2ec83a6cc9de8c1c475c093c | /GetUserShareCounts.py | 9f3aa6a6c0eb93f51791fea6dd24fa1c3317e27f | [] | no_license | yashodhank/GAM-Scripts | 2526d1aa2a2f878dfa426168bf9f5c2e73d21076 | 58c99983e7c7326893ccef5b9e4f15e7e8f58c4c | refs/heads/master | 2020-04-04T19:17:36.641822 | 2018-11-01T16:12:26 | 2018-11-01T16:12:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,739 | py | #!/usr/bin/env python2
"""
# Purpose: For a Google Drive User(s), output a CSV file showing the share type counts for files shared by the user(s)
# Note: This script can use Basic or Advanced GAM:
# https://github.com/jay0lee/GAM
# https://github.com/taers232c/GAMADV-X, https://github.com/taers232c/GAMADV-XTD, https://github.com/taers232c/GAMADV-XTD3
# Customize: Set DOMAIN_LIST to the list of domains you consider internal
# Usage:
# 1: Get ACLs for all files, if you don't want all users, replace all users with your user selection in the command below
# $ Example, Basic GAM: gam all users print filelist id title owners permissions > filelistperms.csv
# $ Example, Advanced GAM: gam config auto_batch_min 1 redirect csv ./filelistperms.csv multiprocess all users print filelist id title owners permissions
# 2: From that list of ACLs, output a CSV file with headers:
# Owner - email address of file owner
# Total - total files owned by Owner
# Shared - number of files shared
# Shared External - number of files shared publically (anyone) or to a domain/group/user where the domain is not in DOMAIN_LIST
# Shared Internal - number of files shared to a domain/group/user where the domain is in DOMAIN_LIST
# anyone - number of shares to anyone
# anyoneWithLink - number of shares to anyone with a link
# externalDomain - number of shares to an external domain
# externalDomainWithLink - number of shares to an external domain with a link
# internalDomain - number of shares to an internal domain
# internalDomainWithLink - number of shares to an internal domain with a link
# externalGroup - number of shares to an external group
# internalGroup - number of shares to an internal group
# externalUser - number of shares to an internal user
# internalUser - number of shares to an internal user
# $ python GetUserShareCounts.py filelistperms.csv usersharecounts.csv
"""
import csv
import re
import sys
# Substitute your internal domain(s) in the list below, e.g., DOMAIN_LIST = ['domain.com',] DOMAIN_LIST = ['domain1.com', 'domain2.com',]
DOMAIN_LIST = ['domain.com',]
QUOTE_CHAR = '"' # Adjust as needed
LINE_TERMINATOR = '\n' # On Windows, you probably want '\r\n'
def incrementCounter(counter):
if not counterSet[counter]:
userShareCounts[owner][counter] += 1
counterSet[counter] = True
TOTAL_COUNTER = 'Total'
SHARED_COUNTER = 'Shared'
SHARED_EXTERNAL_COUNTER = 'Shared External'
SHARED_INTERNAL_COUNTER = 'Shared Internal'
HEADERS = [
'Owner',
TOTAL_COUNTER, SHARED_COUNTER, SHARED_EXTERNAL_COUNTER, SHARED_INTERNAL_COUNTER,
'anyone', 'anyoneWithLink',
'externalDomain', 'externalDomainWithLink',
'internalDomain', 'internalDomainWithLink',
'externalGroup', 'internalGroup',
'externalUser', 'internalUser',
]
zeroCounts = {
TOTAL_COUNTER: 0, SHARED_COUNTER: 0, SHARED_EXTERNAL_COUNTER: 0, SHARED_INTERNAL_COUNTER: 0,
'anyone': 0, 'anyoneWithLink': 0,
'externalDomain': 0, 'externalDomainWithLink': 0,
'internalDomain': 0, 'internalDomainWithLink': 0,
'externalGroup': 0, 'internalGroup': 0,
'externalUser': 0, 'internalUser': 0,
}
COUNT_CATEGORIES = {
'anyone': {False: 'anyone', True: 'anyoneWithLink'},
'domain': {False: {False: 'externalDomain', True: 'externalDomainWithLink'}, True: {False: 'internalDomain', True: 'internalDomainWithLink'}},
'group': {False: 'externalGroup', True: 'internalGroup'},
'user': {False: 'externalUser', True: 'internalUser'},
}
PERMISSIONS_N_TYPE = re.compile(r"permissions.(\d+).type")
if (len(sys.argv) > 2) and (sys.argv[2] != '-'):
outputFile = open(sys.argv[2], 'wb')
else:
outputFile = sys.stdout
outputCSV = csv.DictWriter(outputFile, HEADERS, lineterminator=LINE_TERMINATOR, quotechar=QUOTE_CHAR)
outputCSV.writeheader()
if (len(sys.argv) > 1) and (sys.argv[1] != '-'):
inputFile = open(sys.argv[1], 'rbU')
else:
inputFile = sys.stdin
userShareCounts = {}
for row in csv.DictReader(inputFile, quotechar=QUOTE_CHAR):
owner = row['owners.0.emailAddress']
userShareCounts.setdefault(owner, zeroCounts.copy())
counterSet = {TOTAL_COUNTER: False, SHARED_COUNTER: False, SHARED_EXTERNAL_COUNTER: False, SHARED_INTERNAL_COUNTER: False}
for k, v in row.iteritems():
mg = PERMISSIONS_N_TYPE.match(k)
if mg and v:
permissions_N = mg.group(1)
if row['permissions.{0}.role'.format(permissions_N)] == 'owner':
incrementCounter(TOTAL_COUNTER)
else:
incrementCounter(SHARED_COUNTER)
if v == 'anyone':
incrementCounter(SHARED_EXTERNAL_COUNTER)
userShareCounts[owner][COUNT_CATEGORIES[v][row['permissions.{0}.withLink'.format(permissions_N)] == 'True']] += 1
else:
domain = row.get('permissions.{0}.domain'.format(permissions_N), '')
if not domain and v in ['user', 'group']:
if row['permissions.{0}.deleted'.format(permissions_N)] == u'True':
continue
emailAddress = row['permissions.{0}.emailAddress'.format(permissions_N)]
domain = emailAddress[emailAddress.find(u'@')+1:]
internal = domain in DOMAIN_LIST
incrementCounter([SHARED_EXTERNAL_COUNTER, SHARED_INTERNAL_COUNTER][internal])
if v == u'domain':
userShareCounts[owner][COUNT_CATEGORIES[v][internal][row['permissions.{0}.withLink'.format(permissions_N)] == 'True']] += 1
else: # group, user
userShareCounts[owner][COUNT_CATEGORIES[v][internal]] += 1
for owner, counts in sorted(userShareCounts.iteritems()):
row = {'Owner': owner}
row.update(counts)
outputCSV.writerow(row)
if inputFile != sys.stdin:
inputFile.close()
if outputFile != sys.stdout:
outputFile.close()
| [
"ross.scroggs@gmail.com"
] | ross.scroggs@gmail.com |
ac50bc52bc7373fcee843af31f074fd1f46ee40e | d815c4755e6f98098452528d8ab69a8f82096b78 | /day11/producer.py | e1ef9d4d5e62560a2626effd42106c83a7ede936 | [] | no_license | immortalmin/csk | 081f1baddde43f74151f08a7d701d4c611845f7f | aca509a03bb88ae2911c1611350decdf68a4419a | refs/heads/master | 2020-04-07T22:51:59.907665 | 2018-12-04T08:53:22 | 2018-12-04T08:53:22 | 158,788,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | #Author:immortal luo
# -*-coding:utf-8 -*-
import pika
connection = pika.BlockingConnection(
pika.ConnectionParameters('localhost')
)
channel = connection.channel()#声明一个管道
#声明queue
channel.queue_declare(queue='hello',durable=True)#队列持久化,但只是保存队列名
channel.basic_publish(exchange='',
routing_key='hello',#queue名字
body='Hello World!',
properties=pika.BasicProperties(#消息持久化
delivery_mode=2#1是非持久化
)
)
print("[x] Sent 'Hello World!'")
connection.close() | [
"1608725226@qq.com"
] | 1608725226@qq.com |
ca6d004796ccfbe78c85eb4efbea28468a04ebcc | 2289d33c903bf6eaa0aeb228418ef438863e763d | /fortest/fortest/settings.py | 31da12ea1ebcb2450e2cfea43fa4ed31e88ca251 | [] | no_license | theseana/f | e462255eff88370365afeeae53e080aa53239d15 | 8a66acfc55e223fcd702540462053a5b5e0196e4 | refs/heads/master | 2023-01-12T21:30:39.043604 | 2020-11-22T16:00:48 | 2020-11-22T16:00:48 | 315,075,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,109 | py | """
Django settings for fortest project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'x3o6ig)#e5wzkpzs5b#*ytbs($a#9^s-pq6t)&q*%k^d(4sxe8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'fortest.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fortest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"info@poulstar.com"
] | info@poulstar.com |
323e87f0298040446697d0117a55480796d625d1 | 1581ea7304a39a81a018e35e5c6d773bb9f1727a | /프로그래머스/PR_여행경로.py | 041746869622645b93f00ec9bd431719a1a62169 | [] | no_license | Yejin6911/Algorithm | 5faae951a19e47dd0babbe0f27e349f8499d5b38 | 80e715c718c8362b20f42115f737b8e918de5b11 | refs/heads/master | 2023-06-20T21:13:39.181327 | 2021-07-19T06:30:20 | 2021-07-19T06:30:20 | 330,934,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | from collections import defaultdict
def solution(tickets):
stack = ["ICN"]
answer = []
routes = defaultdict(list)
for key, value in tickets:
routes[key].append(value)
for r in routes:
routes[r].sort()
while stack:
now = stack[-1]
if now not in routes or len(routes[now]) == 0:
answer.append(stack.pop())
else:
stack.append(routes[now].pop(0))
return answer[::-1]
print(solution([["ICN", "SFO"], ["ICN", "ATL"], [
"SFO", "ATL"], ["ATL", "ICN"], ["ATL", "SFO"]]))
| [
"cdjin6911@gmail.com"
] | cdjin6911@gmail.com |
cc9747c96a7aa72f30372975203452bf4205eac7 | c56303068bf3bb97cb87202f8ed0e8b2f4316a2a | /covid19_pipeline/data/sampler.py | d8c675e849845b966ae44bd7913b6a25470b97d9 | [] | no_license | salvagimeno-ai/HKBU_HPML_COVID-19 | f049b0ed91b0a06db674407d72940452c84a3e06 | c23e9c7bf5bedec4ddcc3d6efd1e0ad0f814446f | refs/heads/master | 2022-12-04T07:03:27.722775 | 2020-08-30T07:47:01 | 2020-08-30T07:47:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | import torch
import torchvision
from torchline.data.sampler import SAMPLER_REGISTRY
from torchline.data import build_data
__all__ = [
'WeightedRandomSampler',
]
@SAMPLER_REGISTRY.register()
def WeightedRandomSampler(cfg):
dataset = build_data(cfg)
sampler_cfg = cfg.dataloader.sampler
weights = []
weights_cls = cfg.dataloader.sampler.weights_cls
num_samples = len(dataset)
for i in range(num_samples):
weight = weights_cls[int(dataset.samples[i]['label'])]
weights.append(weight)
replacement = sampler_cfg.replacement
return torch.utils.data.WeightedRandomSampler(weights, num_samples, replacement) | [
"1435679023@qq.com"
] | 1435679023@qq.com |
68f0e33fbfb6bfb09cc47e135e5d04fb76d17f89 | 82f993631da2871933edf83f7648deb6c59fd7e4 | /w1/L1/17.py | 5f12a88814e26948b3cfec9064768f06961e56b3 | [] | no_license | bobur554396/PPII2021Summer | 298f26ea0e74c199af7b57a5d40f65e20049ecdd | 7ef38fb4ad4f606940d2ba3daaa47cbd9ca8bcd2 | refs/heads/master | 2023-06-26T05:42:08.523345 | 2021-07-24T12:40:05 | 2021-07-24T12:40:05 | 380,511,125 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | print(bool(True))
print(bool(1))
print(bool(100))
print(bool('h'))
print(bool('hello'))
print(bool(2.6))
print(bool([1, 2, 3]))
print(bool((1, 2, 3)))
print(bool({'id': '123', 'name': 'Student 1'}))
print('-'*60)
print(bool(False))
print(bool(0))
print(bool(''))
print(bool([]))
print(bool(()))
print(bool({}))
| [
"bobur.muhsimbaev@gmail.com"
] | bobur.muhsimbaev@gmail.com |
666b1de3a5fd29c3d152bb33be6388723535ccdf | ae3d73906b3fac87792bc5f969c0d1b3ebb30290 | /taxcalc/calculator.py | 7a2b5128c5e2e26cd0ac7042d17c7399b095b950 | [] | no_license | rohitdeojha/pitaxcalc-demo | 32be22fafbc62e81d08c603be8733db9b23d4451 | cbd86d432b8d9573f08ac3163ba8d4f1fea49132 | refs/heads/master | 2020-03-29T15:22:26.010616 | 2018-09-26T07:00:16 | 2018-09-26T07:00:16 | 150,060,106 | 0 | 0 | null | 2018-09-24T05:49:57 | 2018-09-24T05:49:56 | null | UTF-8 | Python | false | false | 46,410 | py | """
PIT (personal income tax) Calculator class.
"""
# CODING-STYLE CHECKS:
# pycodestyle calculator.py
# pylint --disable=locally-disabled calculator.py
#
# pylint: disable=too-many-lines
# pylintx: disable=no-value-for-parameter,too-many-lines
import os
import json
import re
import copy
import numpy as np
import pandas as pd
from taxcalc.functions import (net_salary_income, net_rental_income,
total_other_income, gross_total_income,
itemized_deductions, taxable_total_income,
pit_liability)
from taxcalc.policy import Policy
from taxcalc.records import Records
from taxcalc.utils import (DIST_VARIABLES, create_distribution_table,
DIFF_VARIABLES, create_difference_table,
create_diagnostic_table)
# import pdb
class Calculator(object):
"""
Constructor for the Calculator class.
Parameters
----------
policy: Policy class object
this argument must be specified and object is copied for internal use
records: Records class object
this argument must be specified and object is copied for internal use
verbose: boolean
specifies whether or not to write to stdout data-loaded and
data-extrapolated progress reports; default value is true.
sync_years: boolean
specifies whether or not to synchronize policy year and records year;
default value is true.
Raises
------
ValueError:
if parameters are not the appropriate type.
Returns
-------
class instance: Calculator
Notes
-----
The most efficient way to specify current-law and reform Calculator
objects is as follows:
pol = Policy()
rec = Records()
calc1 = Calculator(policy=pol, records=rec) # current-law
pol.implement_reform(...)
calc2 = Calculator(policy=pol, records=rec) # reform
All calculations are done on the internal copies of the Policy and
Records objects passed to each of the two Calculator constructors.
"""
# pylint: disable=too-many-public-methods
def __init__(self, policy=None, records=None, verbose=True,
sync_years=True):
# pylint: disable=too-many-arguments,too-many-branches
if isinstance(policy, Policy):
self.__policy = copy.deepcopy(policy)
else:
raise ValueError('must specify policy as a Policy object')
if isinstance(records, Records):
self.__records = copy.deepcopy(records)
else:
raise ValueError('must specify records as a Records object')
if self.__policy.current_year < self.__records.data_year:
self.__policy.set_year(self.__records.data_year)
current_year_is_data_year = (
self.__records.current_year == self.__records.data_year)
if sync_years and current_year_is_data_year:
if verbose:
print('You loaded data for ' +
str(self.__records.data_year) + '.')
if self.__records.IGNORED_VARS:
print('Your data include the following unused ' +
'variables that will be ignored:')
for var in self.__records.IGNORED_VARS:
print(' ' +
var)
while self.__records.current_year < self.__policy.current_year:
self.__records.increment_year()
if verbose:
print('Tax-Calculator startup automatically ' +
'extrapolated your data to ' +
str(self.__records.current_year) + '.')
assert self.__policy.current_year == self.__records.current_year
self.__stored_records = None
def increment_year(self):
"""
Advance all embedded objects to next year.
"""
next_year = self.__policy.current_year + 1
self.__records.increment_year()
self.__policy.set_year(next_year)
def advance_to_year(self, year):
"""
The advance_to_year function gives an optional way of implementing
increment year functionality by immediately specifying the year
as input. New year must be at least the current year.
"""
iteration = year - self.current_year
if iteration < 0:
raise ValueError('New current year must be ' +
'greater than current year!')
for _ in range(iteration):
self.increment_year()
assert self.current_year == year
def calc_all(self, zero_out_calc_vars=False):
"""
Call all tax-calculation functions for the current_year.
"""
# pylint: disable=too-many-function-args,no-value-for-parameter
# conducts static analysis of Calculator object for current_year
assert self.__records.current_year == self.__policy.current_year
if zero_out_calc_vars:
self.__records.zero_out_changing_calculated_vars()
# pdb.set_trace()
net_salary_income(self.__policy, self.__records)
net_rental_income(self.__policy, self.__records)
total_other_income(self.__policy, self.__records)
gross_total_income(self.__policy, self.__records)
itemized_deductions(self.__policy, self.__records)
taxable_total_income(self.__policy, self.__records)
pit_liability(self)
# TODO: ADD: expanded_income(self.__policy, self.__records)
# TODO: ADD: aftertax_income(self.__policy, self.__records)
def weighted_total(self, variable_name):
"""
Return all-filing-unit weighted total of named Records variable.
"""
return (self.array(variable_name) * self.array('s006')).sum()
def total_weight(self):
"""
Return all-filing-unit total of sampling weights.
NOTE: var_weighted_mean = calc.weighted_total(var)/calc.total_weight()
"""
return self.array('s006').sum()
def dataframe(self, variable_list):
"""
Return pandas DataFrame containing the listed variables from embedded
Records object.
"""
assert isinstance(variable_list, list)
arys = [self.array(vname) for vname in variable_list]
pdf = pd.DataFrame(data=np.column_stack(arys), columns=variable_list)
del arys
return pdf
def distribution_table_dataframe(self):
"""
Return pandas DataFrame containing the DIST_TABLE_COLUMNS variables
from embedded Records object.
"""
pdf = self.dataframe(DIST_VARIABLES)
# weighted count of itemized-deduction returns
pdf['num_returns_ItemDed'] = pdf['s006'].where(
pdf['c04470'] > 0., 0.)
# weighted count of standard-deduction returns
pdf['num_returns_StandardDed'] = pdf['s006'].where(
pdf['standard'] > 0., 0.)
# weight count of returns with positive Alternative Minimum Tax (AMT)
pdf['num_returns_AMT'] = pdf['s006'].where(
pdf['c09600'] > 0., 0.)
return pdf
def array(self, variable_name, variable_value=None):
"""
If variable_value is None, return numpy ndarray containing the
named variable in embedded Records object.
If variable_value is not None, set named variable in embedded Records
object to specified variable_value and return None (which can be
ignored).
"""
if variable_value is None:
return getattr(self.__records, variable_name)
assert isinstance(variable_value, np.ndarray)
setattr(self.__records, variable_name, variable_value)
return None
def n65(self):
"""
Return numpy ndarray containing the number of
individuals age 65+ in each filing unit.
"""
vdf = self.dataframe(['age_head', 'age_spouse', 'elderly_dependents'])
return ((vdf['age_head'] >= 65).astype(int) +
(vdf['age_spouse'] >= 65).astype(int) +
vdf['elderly_dependents'])
def incarray(self, variable_name, variable_add):
"""
Add variable_add to named variable in embedded Records object.
"""
assert isinstance(variable_add, np.ndarray)
setattr(self.__records, variable_name,
self.array(variable_name) + variable_add)
def zeroarray(self, variable_name):
"""
Set named variable in embedded Records object to zeros.
"""
setattr(self.__records, variable_name, np.zeros(self.array_len))
def store_records(self):
"""
Make internal copy of embedded Records object that can then be
restored after interim calculations that make temporary changes
to the embedded Records object.
"""
assert self.__stored_records is None
self.__stored_records = copy.deepcopy(self.__records)
def restore_records(self):
"""
Set the embedded Records object to the stored Records object
that was saved in the last call to the store_records() method.
"""
assert isinstance(self.__stored_records, Records)
self.__records = copy.deepcopy(self.__stored_records)
del self.__stored_records
self.__stored_records = None
def records_current_year(self, year=None):
"""
If year is None, return current_year of embedded Records object.
If year is not None, set embedded Records current_year to year and
return None (which can be ignored).
"""
if year is None:
return self.__records.current_year
assert isinstance(year, int)
self.__records.set_current_year(year)
return None
@property
def array_len(self):
"""
Length of arrays in embedded Records object.
"""
return self.__records.array_length
def policy_param(self, param_name, param_value=None):
"""
If param_value is None, return named parameter in
embedded Policy object.
If param_value is not None, set named parameter in
embedded Policy object to specified param_value and
return None (which can be ignored).
"""
if param_value is None:
return getattr(self.__policy, param_name)
setattr(self.__policy, param_name, param_value)
return None
@property
def reform_warnings(self):
"""
Calculator class embedded Policy object's reform_warnings.
"""
return self.__policy.parameter_warnings
def policy_current_year(self, year=None):
"""
If year is None, return current_year of embedded Policy object.
If year is not None, set embedded Policy current_year to year and
return None (which can be ignored).
"""
if year is None:
return self.__policy.current_year
assert isinstance(year, int)
self.__policy.set_year(year)
return None
@property
def current_year(self):
"""
Calculator class current assessment year property.
"""
return self.__policy.current_year
@property
def data_year(self):
"""
Calculator class initial (i.e., first) records data year property.
"""
return self.__records.data_year
def diagnostic_table(self, num_years):
"""
Generate multi-year diagnostic table containing aggregate statistics;
this method leaves the Calculator object unchanged.
Parameters
----------
num_years : Integer
number of years to include in diagnostic table starting
with the Calculator object's current_year (must be at least
one and no more than what would exceed Policy end_year)
Returns
-------
Pandas DataFrame object containing the multi-year diagnostic table
"""
assert num_years >= 1
max_num_years = self.__policy.end_year - self.__policy.current_year + 1
assert num_years <= max_num_years
diag_variables = DIST_VARIABLES + ['surtax']
calc = copy.deepcopy(self)
tlist = list()
for iyr in range(1, num_years + 1):
calc.calc_all()
diag = create_diagnostic_table(calc.dataframe(diag_variables),
calc.current_year)
tlist.append(diag)
if iyr < num_years:
calc.increment_year()
del diag_variables
del calc
del diag
return pd.concat(tlist, axis=1)
def distribution_tables(self, calc, groupby):
"""
Get results from self and calc, sort them by expanded_income into
table rows defined by groupby, compute grouped statistics, and
return tables as a pair of Pandas dataframes.
This method leaves the Calculator object(s) unchanged.
Note that the returned tables have consistent income groups (based
on the self expanded_income) even though the baseline expanded_income
in self and the reform expanded_income in calc are different.
Parameters
----------
calc : Calculator object or None
typically represents the reform while self represents the baseline;
if calc is None, the second returned table is None
groupby : String object
options for input: 'weighted_deciles', 'standard_income_bins'
determines how the columns in resulting Pandas DataFrame are sorted
Return and typical usage
------------------------
dist1, dist2 = calc1.distribution_tables(calc2, 'weighted_deciles')
OR
dist1, _ = calc1.distribution_tables(None, 'weighted_deciles')
(where calc1 is a baseline Calculator object
and calc2 is a reform Calculator object).
Each of the dist1 and optional dist2 is a distribution table as a
Pandas DataFrame with DIST_TABLE_COLUMNS and groupby rows.
NOTE: when groupby is 'weighted_deciles', the returned tables have 3
extra rows containing top-decile detail consisting of statistics
for the 0.90-0.95 quantile range (bottom half of top decile),
for the 0.95-0.99 quantile range, and
for the 0.99-1.00 quantile range (top one percent); and the
returned table splits the bottom decile into filing units with
negative (denoted by a 0-10n row label),
zero (denoted by a 0-10z row label), and
positive (denoted by a 0-10p row label) values of the
specified income_measure.
"""
# nested function used only by this method
def have_same_income_measure(calc1, calc2):
"""
Return true if calc1 and calc2 contain the same expanded_income;
otherwise, return false. (Note that "same" means nobody's
expanded_income differs by more than one cent.)
"""
im1 = calc1.array('expanded_income')
im2 = calc2.array('expanded_income')
return np.allclose(im1, im2, rtol=0.0, atol=0.01)
# main logic of method
assert calc is None or isinstance(calc, Calculator)
assert (groupby == 'weighted_deciles' or
groupby == 'standard_income_bins')
if calc is not None:
assert np.allclose(self.array('s006'),
calc.array('s006')) # check rows in same order
var_dataframe = self.distribution_table_dataframe()
imeasure = 'expanded_income'
dt1 = create_distribution_table(var_dataframe, groupby, imeasure)
del var_dataframe
if calc is None:
dt2 = None
else:
assert calc.current_year == self.current_year
assert calc.array_len == self.array_len
var_dataframe = calc.distribution_table_dataframe()
if have_same_income_measure(self, calc):
imeasure = 'expanded_income'
else:
imeasure = 'expanded_income_baseline'
var_dataframe[imeasure] = self.array('expanded_income')
dt2 = create_distribution_table(var_dataframe, groupby, imeasure)
del var_dataframe
return (dt1, dt2)
def difference_table(self, calc, groupby, tax_to_diff):
"""
Get results from self and calc, sort them by expanded_income into
table rows defined by groupby, compute grouped statistics, and
return tax-difference table as a Pandas dataframe.
This method leaves the Calculator objects unchanged.
Note that the returned tables have consistent income groups (based
on the self expanded_income) even though the baseline expanded_income
in self and the reform expanded_income in calc are different.
Parameters
----------
calc : Calculator object
calc represents the reform while self represents the baseline
groupby : String object
options for input: 'weighted_deciles', 'standard_income_bins'
determines how the columns in resulting Pandas DataFrame are sorted
tax_to_diff : String object
options for input: 'iitax', 'payrolltax', 'combined'
specifies which tax to difference
Returns and typical usage
-------------------------
diff = calc1.difference_table(calc2, 'weighted_deciles', 'iitax')
(where calc1 is a baseline Calculator object
and calc2 is a reform Calculator object).
The returned diff is a difference table as a Pandas DataFrame
with DIST_TABLE_COLUMNS and groupby rows.
NOTE: when groupby is 'weighted_deciles', the returned table has three
extra rows containing top-decile detail consisting of statistics
for the 0.90-0.95 quantile range (bottom half of top decile),
for the 0.95-0.99 quantile range, and
for the 0.99-1.00 quantile range (top one percent); and the
returned table splits the bottom decile into filing units with
negative (denoted by a 0-10n row label),
zero (denoted by a 0-10z row label), and
positive (denoted by a 0-10p row label) values of the
specified income_measure.
"""
assert isinstance(calc, Calculator)
assert calc.current_year == self.current_year
assert calc.array_len == self.array_len
self_var_dataframe = self.dataframe(DIFF_VARIABLES)
calc_var_dataframe = calc.dataframe(DIFF_VARIABLES)
diff = create_difference_table(self_var_dataframe,
calc_var_dataframe,
groupby, tax_to_diff)
del self_var_dataframe
del calc_var_dataframe
return diff
MTR_VALID_VARIABLES = ['e00200p', 'e00200s',
'e00900p', 'e00300',
'e00400', 'e00600',
'e00650', 'e01400',
'e01700', 'e02000',
'e02400', 'p22250',
'p23250', 'e18500',
'e19200', 'e26270',
'e19800', 'e20100']
def mtr(self, variable_str='e00200p',
negative_finite_diff=False,
zero_out_calculated_vars=False,
calc_all_already_called=False,
wrt_full_compensation=True):
"""
Calculates the marginal payroll, individual income, and combined
tax rates for every tax filing unit, leaving the Calculator object
in exactly the same state as it would be in after a calc_all() call.
The marginal tax rates are approximated as the change in tax
liability caused by a small increase (the finite_diff) in the variable
specified by the variable_str divided by that small increase in the
variable, when wrt_full_compensation is false.
If wrt_full_compensation is true, then the marginal tax rates
are computed as the change in tax liability divided by the change
in total compensation caused by the small increase in the variable
(where the change in total compensation is the sum of the small
increase in the variable and any increase in the employer share of
payroll taxes caused by the small increase in the variable).
If using 'e00200s' as variable_str, the marginal tax rate for all
records where MARS != 2 will be missing. If you want to perform a
function such as np.mean() on the returned arrays, you will need to
account for this.
Parameters
----------
variable_str: string
specifies type of income or expense that is increased to compute
the marginal tax rates. See Notes for list of valid variables.
negative_finite_diff: boolean
specifies whether or not marginal tax rates are computed by
subtracting (rather than adding) a small finite_diff amount
to the specified variable.
zero_out_calculated_vars: boolean
specifies value of zero_out_calc_vars parameter used in calls
of Calculator.calc_all() method.
calc_all_already_called: boolean
specifies whether self has already had its Calculor.calc_all()
method called, in which case this method will not do a final
calc_all() call but use the incoming embedded Records object
as the outgoing Records object embedding in self.
wrt_full_compensation: boolean
specifies whether or not marginal tax rates on earned income
are computed with respect to (wrt) changes in total compensation
that includes the employer share of OASDI and HI payroll taxes.
Returns
-------
A tuple of numpy arrays in the following order:
mtr_payrolltax: an array of marginal payroll tax rates.
mtr_incometax: an array of marginal individual income tax rates.
mtr_combined: an array of marginal combined tax rates, which is
the sum of mtr_payrolltax and mtr_incometax.
Notes
-----
The arguments zero_out_calculated_vars and calc_all_already_called
cannot both be true.
Valid variable_str values are:
'e00200p', taxpayer wage/salary earnings (also included in e00200);
'e00200s', spouse wage/salary earnings (also included in e00200);
'e00900p', taxpayer Schedule C self-employment income (also in e00900);
'e00300', taxable interest income;
'e00400', federally-tax-exempt interest income;
'e00600', all dividends included in AGI
'e00650', qualified dividends (also included in e00600)
'e01400', federally-taxable IRA distribution;
'e01700', federally-taxable pension benefits;
'e02000', Schedule E total net income/loss
'e02400', all social security (OASDI) benefits;
'p22250', short-term capital gains;
'p23250', long-term capital gains;
'e18500', Schedule A real-estate-tax paid;
'e19200', Schedule A interest paid;
'e26270', S-corporation/partnership income (also included in e02000);
'e19800', Charity cash contributions;
'e20100', Charity non-cash contributions.
"""
# pylint: disable=too-many-arguments,too-many-statements
# pylint: disable=too-many-locals,too-many-branches
assert not zero_out_calculated_vars or not calc_all_already_called
# check validity of variable_str parameter
if variable_str not in Calculator.MTR_VALID_VARIABLES:
msg = 'mtr variable_str="{}" is not valid'
raise ValueError(msg.format(variable_str))
# specify value for finite_diff parameter
finite_diff = 0.01 # a one-cent difference
if negative_finite_diff:
finite_diff *= -1.0
# remember records object in order to restore it after mtr computations
self.store_records()
# extract variable array(s) from embedded records object
variable = self.array(variable_str)
if variable_str == 'e00200p':
earnings_var = self.array('e00200')
elif variable_str == 'e00200s':
earnings_var = self.array('e00200')
elif variable_str == 'e00900p':
seincome_var = self.array('e00900')
elif variable_str == 'e00650':
divincome_var = self.array('e00600')
elif variable_str == 'e26270':
sche_income_var = self.array('e02000')
# calculate level of taxes after a marginal increase in income
self.array(variable_str, variable + finite_diff)
if variable_str == 'e00200p':
self.array('e00200', earnings_var + finite_diff)
elif variable_str == 'e00200s':
self.array('e00200', earnings_var + finite_diff)
elif variable_str == 'e00900p':
self.array('e00900', seincome_var + finite_diff)
elif variable_str == 'e00650':
self.array('e00600', divincome_var + finite_diff)
elif variable_str == 'e26270':
self.array('e02000', sche_income_var + finite_diff)
self.calc_all(zero_out_calc_vars=zero_out_calculated_vars)
payrolltax_chng = self.array('payrolltax')
incometax_chng = self.array('iitax')
combined_taxes_chng = incometax_chng + payrolltax_chng
# calculate base level of taxes after restoring records object
self.restore_records()
if not calc_all_already_called or zero_out_calculated_vars:
self.calc_all(zero_out_calc_vars=zero_out_calculated_vars)
payrolltax_base = self.array('payrolltax')
incometax_base = self.array('iitax')
combined_taxes_base = incometax_base + payrolltax_base
# compute marginal changes in combined tax liability
payrolltax_diff = payrolltax_chng - payrolltax_base
incometax_diff = incometax_chng - incometax_base
combined_diff = combined_taxes_chng - combined_taxes_base
# specify optional adjustment for employer (er) OASDI+HI payroll taxes
mtr_on_earnings = (variable_str == 'e00200p' or
variable_str == 'e00200s')
if wrt_full_compensation and mtr_on_earnings:
adj = np.where(variable < self.policy_param('SS_Earnings_c'),
0.5 * (self.policy_param('FICA_ss_trt') +
self.policy_param('FICA_mc_trt')),
0.5 * self.policy_param('FICA_mc_trt'))
else:
adj = 0.0
# compute marginal tax rates
mtr_payrolltax = payrolltax_diff / (finite_diff * (1.0 + adj))
mtr_incometax = incometax_diff / (finite_diff * (1.0 + adj))
mtr_combined = combined_diff / (finite_diff * (1.0 + adj))
# if variable_str is e00200s, set MTR to NaN for units without a spouse
if variable_str == 'e00200s':
mars = self.array('MARS')
mtr_payrolltax = np.where(mars == 2, mtr_payrolltax, np.nan)
mtr_incometax = np.where(mars == 2, mtr_incometax, np.nan)
mtr_combined = np.where(mars == 2, mtr_combined, np.nan)
# delete intermediate variables
del variable
if variable_str == 'e00200p' or variable_str == 'e00200s':
del earnings_var
elif variable_str == 'e00900p':
del seincome_var
elif variable_str == 'e00650':
del divincome_var
elif variable_str == 'e26270':
del sche_income_var
del payrolltax_chng
del incometax_chng
del combined_taxes_chng
del payrolltax_base
del incometax_base
del combined_taxes_base
del payrolltax_diff
del incometax_diff
del combined_diff
del adj
# return the three marginal tax rate arrays
return (mtr_payrolltax, mtr_incometax, mtr_combined)
REQUIRED_REFORM_KEYS = set(['policy'])
# THE REQUIRED_ASSUMP_KEYS ARE OBSOLETE BECAUSE NO ASSUMP FILES ARE USED
REQUIRED_ASSUMP_KEYS = set(['consumption', 'behavior',
'growdiff_baseline', 'growdiff_response',
'growmodel'])
@staticmethod
def read_json_param_objects(reform, assump):
"""
Read JSON reform object [and formerly assump object] and
return a single dictionary containing 6 key:dict pairs:
'policy':dict, 'consumption':dict, 'behavior':dict,
'growdiff_baseline':dict, 'growdiff_response':dict, and
'growmodel':dict.
Note that either of the two function arguments can be None.
If reform is None, the dict in the 'policy':dict pair is empty.
If assump is None, the dict in the all the key:dict pairs is empty.
Also note that either of the two function arguments can be strings
containing a valid JSON string (rather than a filename),
in which case the file reading is skipped and the appropriate
read_json_*_text method is called.
The reform file contents or JSON string must be like this:
{"policy": {...}}
and the assump file contents or JSON string must be like this:
{"consumption": {...},
"behavior": {...},
"growdiff_baseline": {...},
"growdiff_response": {...},
"growmodel": {...}}
The {...} should be empty like this {} if not specifying a policy
reform or if not specifying any economic assumptions of that type.
The returned dictionary contains parameter lists (not arrays).
"""
# pylint: disable=too-many-branches
# first process second assump parameter
assert assump is None
if assump is None:
cons_dict = dict()
behv_dict = dict()
gdiff_base_dict = dict()
gdiff_resp_dict = dict()
growmodel_dict = dict()
elif isinstance(assump, str):
if os.path.isfile(assump):
txt = open(assump, 'r').read()
else:
txt = assump
(cons_dict,
behv_dict,
gdiff_base_dict,
gdiff_resp_dict,
growmodel_dict) = Calculator._read_json_econ_assump_text(txt)
else:
raise ValueError('assump is neither None nor string')
# next process first reform parameter
if reform is None:
rpol_dict = dict()
elif isinstance(reform, str):
if os.path.isfile(reform):
txt = open(reform, 'r').read()
else:
txt = reform
rpol_dict = Calculator._read_json_policy_reform_text(txt)
else:
raise ValueError('reform is neither None nor string')
# construct single composite dictionary
param_dict = dict()
param_dict['policy'] = rpol_dict
param_dict['consumption'] = cons_dict
param_dict['behavior'] = behv_dict
param_dict['growdiff_baseline'] = gdiff_base_dict
param_dict['growdiff_response'] = gdiff_resp_dict
param_dict['growmodel'] = growmodel_dict
# return the composite dictionary
return param_dict
@staticmethod
def reform_documentation(params, policy_dicts=None):
"""
Generate reform documentation.
Parameters
----------
params: dict
dictionary is structured like dict returned from
the static Calculator method read_json_param_objects()
policy_dicts : list of dict or None
each dictionary in list is a params['policy'] dictionary
representing second and subsequent elements of a compound
reform; None implies no compound reform with the simple
reform characterized in the params['policy'] dictionary
Returns
-------
doc: String
the documentation for the policy reform specified in params
"""
# pylint: disable=too-many-statements,too-many-branches
# nested function used only in reform_documentation
def param_doc(years, change, base):
"""
Parameters
----------
years: list of change years
change: dictionary of parameter changes
base: Policy object with baseline values
syear: parameter start assessment year
Returns
-------
doc: String
"""
# pylint: disable=too-many-locals
# nested function used only in param_doc
def lines(text, num_indent_spaces, max_line_length=77):
"""
Return list of text lines, each one of which is no longer
than max_line_length, with the second and subsequent lines
being indented by the number of specified num_indent_spaces;
each line in the list ends with the '\n' character
"""
if len(text) < max_line_length:
# all text fits on one line
line = text + '\n'
return [line]
# all text does not fix on one line
first_line = True
line_list = list()
words = text.split()
while words:
if first_line:
line = ''
first_line = False
else:
line = ' ' * num_indent_spaces
while (words and
(len(words[0]) + len(line)) < max_line_length):
line += words.pop(0) + ' '
line = line[:-1] + '\n'
line_list.append(line)
return line_list
# begin main logic of param_doc
# pylint: disable=too-many-nested-blocks
assert len(years) == len(change.keys())
assert isinstance(base, Policy)
basex = copy.deepcopy(base)
basevals = getattr(basex, '_vals', None)
assert isinstance(basevals, dict)
doc = ''
for year in years:
# write year
basex.set_year(year)
doc += '{}:\n'.format(year)
# write info for each param in year
for param in sorted(change[year].keys()):
# ... write param:value line
pval = change[year][param]
if isinstance(pval, list):
pval = pval[0]
if basevals[param]['boolean_value']:
if isinstance(pval, list):
pval = [True if item else
False for item in pval]
else:
pval = bool(pval)
doc += ' {} : {}\n'.format(param, pval)
# ... write optional param-index line
if isinstance(pval, list):
pval = basevals[param]['col_label']
pval = [str(item) for item in pval]
doc += ' ' * (4 + len(param)) + '{}\n'.format(pval)
# ... write name line
if param.endswith('_cpi'):
rootparam = param[:-4]
name = '{} inflation indexing status'.format(rootparam)
else:
name = basevals[param]['long_name']
for line in lines('name: ' + name, 6):
doc += ' ' + line
# ... write optional desc line
if not param.endswith('_cpi'):
desc = basevals[param]['description']
for line in lines('desc: ' + desc, 6):
doc += ' ' + line
# ... write baseline_value line
if param.endswith('_cpi'):
rootparam = param[:-4]
bval = basevals[rootparam].get('cpi_inflated',
False)
else:
bval = getattr(basex, param[1:], None)
if isinstance(bval, np.ndarray):
bval = bval.tolist()
if basevals[param]['boolean_value']:
bval = [True if item else
False for item in bval]
elif basevals[param]['boolean_value']:
bval = bool(bval)
doc += ' baseline_value: {}\n'.format(bval)
return doc
# begin main logic of reform_documentation
# create Policy object with pre-reform (i.e., baseline) values
clp = Policy()
# generate documentation text
doc = 'REFORM DOCUMENTATION\n'
doc += 'Policy Reform Parameter Values by Year:\n'
years = sorted(params['policy'].keys())
if years:
doc += param_doc(years, params['policy'], clp)
else:
doc += 'none: using current-law policy parameters\n'
if policy_dicts is not None:
assert isinstance(policy_dicts, list)
base = clp
base.implement_reform(params['policy'])
assert not base.parameter_errors
for policy_dict in policy_dicts:
assert isinstance(policy_dict, dict)
doc += 'Policy Reform Parameter Values by Year:\n'
years = sorted(policy_dict.keys())
doc += param_doc(years, policy_dict, base)
base.implement_reform(policy_dict)
assert not base.parameter_errors
return doc
# ----- begin private methods of Calculator class -----
@staticmethod
def _read_json_policy_reform_text(text_string):
"""
Strip //-comments from text_string and return 1 dict based on the JSON.
Specified text is JSON with at least 1 high-level key:object pair:
a "policy": {...} pair. Other keys will raise a ValueError.
The {...} object may be empty (that is, be {}), or
may contain one or more pairs with parameter string primary keys
and string years as secondary keys. See tests/test_calculator.py for
an extended example of a commented JSON policy reform text
that can be read by this method.
Returned dictionary prdict has integer years as primary keys and
string parameters as secondary keys. This returned dictionary is
suitable as the argument to the Policy implement_reform(prdict) method.
"""
# pylint: disable=too-many-locals
# strip out //-comments without changing line numbers
json_str = re.sub('//.*', ' ', text_string)
# convert JSON text into a Python dictionary
try:
raw_dict = json.loads(json_str)
except ValueError as valerr:
msg = 'Policy reform text below contains invalid JSON:\n'
msg += str(valerr) + '\n'
msg += 'Above location of the first error may be approximate.\n'
msg += 'The invalid JSON reform text is between the lines:\n'
bline = 'XX----.----1----.----2----.----3----.----4'
bline += '----.----5----.----6----.----7'
msg += bline + '\n'
linenum = 0
for line in json_str.split('\n'):
linenum += 1
msg += '{:02d}{}'.format(linenum, line) + '\n'
msg += bline + '\n'
raise ValueError(msg)
# check key contents of dictionary
actual_keys = set(raw_dict.keys())
missing_keys = Calculator.REQUIRED_REFORM_KEYS - actual_keys
if missing_keys:
msg = 'required key(s) "{}" missing from policy reform file'
raise ValueError(msg.format(missing_keys))
illegal_keys = actual_keys - Calculator.REQUIRED_REFORM_KEYS
if illegal_keys:
msg = 'illegal key(s) "{}" in policy reform file'
raise ValueError(msg.format(illegal_keys))
# convert raw_dict['policy'] dictionary into prdict
tdict = Policy.translate_json_reform_suffixes(raw_dict['policy'])
prdict = Calculator._convert_parameter_dict(tdict)
return prdict
@staticmethod
def _read_json_econ_assump_text(text_string):
"""
Strip //-comments from text_string and return 5 dict based on the JSON.
Specified text is JSON with at least 5 high-level key:value pairs:
a "consumption": {...} pair,
a "behavior": {...} pair,
a "growdiff_baseline": {...} pair,
a "growdiff_response": {...} pair, and
a "growmodel": {...} pair.
Other keys such as "policy" will raise a ValueError.
The {...} object may be empty (that is, be {}), or
may contain one or more pairs with parameter string primary keys
and string years as secondary keys. See tests/test_calculator.py for
an extended example of a commented JSON economic assumption text
that can be read by this method.
Note that an example is shown in the ASSUMP_CONTENTS string in
the tests/test_calculator.py file.
Returned dictionaries (cons_dict, behv_dict, gdiff_baseline_dict,
gdiff_respose_dict, growmodel_dict) have integer years as primary
keys and string parameters as secondary keys.
These returned dictionaries are suitable as the arguments to
the Consumption.update_consumption(cons_dict) method, or
the Behavior.update_behavior(behv_dict) method, or
the GrowDiff.update_growdiff(gdiff_dict) method, or
the GrowModel.update_growmodel(growmodel_dict) method.
"""
# pylint: disable=too-many-locals
# strip out //-comments without changing line numbers
json_str = re.sub('//.*', ' ', text_string)
# convert JSON text into a Python dictionary
try:
raw_dict = json.loads(json_str)
except ValueError as valerr:
msg = 'Economic assumption text below contains invalid JSON:\n'
msg += str(valerr) + '\n'
msg += 'Above location of the first error may be approximate.\n'
msg += 'The invalid JSON asssump text is between the lines:\n'
bline = 'XX----.----1----.----2----.----3----.----4'
bline += '----.----5----.----6----.----7'
msg += bline + '\n'
linenum = 0
for line in json_str.split('\n'):
linenum += 1
msg += '{:02d}{}'.format(linenum, line) + '\n'
msg += bline + '\n'
raise ValueError(msg)
# check key contents of dictionary
actual_keys = set(raw_dict.keys())
missing_keys = Calculator.REQUIRED_ASSUMP_KEYS - actual_keys
if missing_keys:
msg = 'required key(s) "{}" missing from economic assumption file'
raise ValueError(msg.format(missing_keys))
illegal_keys = actual_keys - Calculator.REQUIRED_ASSUMP_KEYS
if illegal_keys:
msg = 'illegal key(s) "{}" in economic assumption file'
raise ValueError(msg.format(illegal_keys))
# convert the assumption dictionaries in raw_dict
key = 'consumption'
cons_dict = Calculator._convert_parameter_dict(raw_dict[key])
key = 'behavior'
behv_dict = Calculator._convert_parameter_dict(raw_dict[key])
key = 'growdiff_baseline'
gdiff_base_dict = Calculator._convert_parameter_dict(raw_dict[key])
key = 'growdiff_response'
gdiff_resp_dict = Calculator._convert_parameter_dict(raw_dict[key])
key = 'growmodel'
growmodel_dict = Calculator._convert_parameter_dict(raw_dict[key])
return (cons_dict, behv_dict, gdiff_base_dict, gdiff_resp_dict,
growmodel_dict)
@staticmethod
def _convert_parameter_dict(param_key_dict):
"""
Converts specified param_key_dict into a dictionary whose primary
keys are assessment years, and hence, is suitable as the argument
to the Policy.implement_reform() method.
Specified input dictionary has string parameter primary keys and
string years as secondary keys.
Returned dictionary has integer years as primary keys and
string parameters as secondary keys.
"""
# convert year skey strings into integers and
# optionally convert lists into np.arrays
year_param = dict()
for pkey, sdict in param_key_dict.items():
if not isinstance(pkey, str):
msg = 'pkey {} in reform is not a string'
raise ValueError(msg.format(pkey))
rdict = dict()
if not isinstance(sdict, dict):
msg = 'pkey {} in reform is not paired with a dict'
raise ValueError(msg.format(pkey))
for skey, val in sdict.items():
if not isinstance(skey, str):
msg = 'skey {} in reform is not a string'
raise ValueError(msg.format(skey))
else:
year = int(skey)
rdict[year] = val
year_param[pkey] = rdict
# convert year_param dictionary to year_key_dict dictionary
year_key_dict = dict()
years = set()
for param, sdict in year_param.items():
for year, val in sdict.items():
if year not in years:
years.add(year)
year_key_dict[year] = dict()
year_key_dict[year][param] = val
return year_key_dict
| [
"martin.holmer@gmail.com"
] | martin.holmer@gmail.com |
17a801e1c8f1bfed5c0e1f9dbc0213f087032fdb | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02999/s307440938.py | 4877282d29ac7f77ea440e4d8d536528f72eeecf | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | X, A = [int(i) for i in input().split()]
if X < A:
print(0)
else:
print(10)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
0a255e211f9dad61eb4d0665a5241214dadd47f6 | f469652395fd34bd228ac23bb1a24efce6e5c4a0 | /看书笔记/看书练习/类/模块存储多个类/car.py | 001e32f69d227e1222a520cdfe4632cd75e494b0 | [] | no_license | wfwf1990/python | 0f5528f92d6172da96bce3ded12d1cc2f038ec3c | 6fa3b600cfcf4ab49da7cd8b5f62b5b62e276bfa | refs/heads/master | 2021-04-18T21:35:04.445511 | 2018-06-25T17:40:04 | 2018-06-25T17:40:04 | 126,700,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,590 | py | class Car():
def __init__(self,make,model,year):
self.make = make
self.model = model
self.year = year
self.odometer_reading = 0
def getDescriptiveName(self): #返回描述性信息
long_name = str(self.year) + " " + self.make + " "+ self.model
return long_name.title()
def getOdometerReading(self):
print("This car has " + str(self.odometer_reading) + " miles on it")
#通过方法接受一个里程值,并将其存储到self.odometer_reading中
def updateOdometer(self,mileage):
#禁止将里程数往回调
if mileage >= self.odometer_reading:
self.odometer_reading = mileage
else:
print("you can not roll back an odometer")
def increment_odometer(self,miles):
if miles >= 0:
self.odometer_reading += miles
else:
print("you can not roll back an odometer")
class ElectricCar(Car):
def __init__(self,make,modle,year):
super(ElectricCar, self).__init__(make,modle,year)
self.battery_size = Battery()
class Battery():
def __init__(self,battery_size=70):
self.battery_size = battery_size
def describeBattery(self):
print("This car has a " + str(self.battery_size) + "-kwh battery.")
def getRange(self):
if self.battery_size == 70:
range = 240
elif self.battery_size == 85:
range = 270
message = "This car can go approximately " + str(range)
message += " miles on a full charge."
print(message)
| [
"576589099@qq.com"
] | 576589099@qq.com |
411440d37c8077bf6abc259cf3ea6e44e925bf8d | af58fa633206f571d4b370919e27de8d4b9862ed | /tasks/forms.py | 1b6d8ead9fdf09748187e018e42dbc3040332b75 | [] | no_license | gmdmgithub/django-todo-list | 7d36b9603fcdd30959ad48e8f2e97070918c68b7 | 7efaee21bbbdaaff1db46e255b63267ac6a8ab31 | refs/heads/master | 2021-09-25T10:39:10.202237 | 2019-12-17T14:59:45 | 2019-12-17T14:59:45 | 227,467,068 | 0 | 0 | null | 2021-09-22T18:18:36 | 2019-12-11T21:50:47 | Python | UTF-8 | Python | false | false | 271 | py | from django import forms
from django.forms import ModelForm
from .models import *
class TaskForm(forms.ModelForm):
title = forms.CharField(widget=forms.TextInput(attrs={'placeholder':'Add new task'}))
class Meta:
model = Task
fields = '__all__' | [
"gmika@interia.pl"
] | gmika@interia.pl |
5cc0e88482c0fe46e9e874a61a59235ebed66e6a | f100c2da80a6917b5387f159be10ffac3d03fdda | /comet/web.py | 89d38e8610bfe4d554434ee57a1fed67fa3d90cb | [
"MIT"
] | permissive | willingc/comet_cms | 82621ddcceab47b3c57db3267ced9afd6bf511ee | 57fa7bee4091d21c5c81c695dfe69126e181011b | refs/heads/master | 2021-01-23T02:09:50.065254 | 2015-01-13T19:13:54 | 2015-01-13T19:13:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,726 | py | # -*- coding: utf-8 -*-
# Comet CMS v0.6.0
# Copyright © 2014-2015 Chris Warrick, Roberto Alsina, Henry Hirsch et al.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function, unicode_literals
import json
import os
import io
import pkg_resources
import nikola.__main__
import logbook
import redis
import rq
import comet.tasks
from nikola.utils import (unicode_str, get_logger, ColorfulStderrHandler,
write_metadata, TranslatableSetting)
import nikola.plugins.command.new_post
from flask import Flask, request, redirect, send_from_directory, g, session
from flask.ext.login import (LoginManager, login_required, login_user,
logout_user, current_user, make_secure_token)
from flask.ext.bcrypt import Bcrypt
from comet.utils import USER_FIELDS, PERMISSIONS, SiteProxy
from comet.forms import (LoginForm, NewPostForm, NewPageForm, DeleteForm,
UserDeleteForm, UserEditForm, AccountForm,
PermissionsForm, PostEditForm)
_site = None
site = None
app = None
db = None
q = None
def scan_site():
"""Rescan the site."""
site.scan_posts(really=True, quiet=True)
def configure_url(url):
"""Configure site URL."""
app.config['COMET_URL'] = \
_site.config['SITE_URL'] = _site.config['BASE_URL'] =\
_site.GLOBAL_CONTEXT['blog_url'] =\
site.config['SITE_URL'] = site.config['BASE_URL'] =\
url
def configure_site():
"""Configure the site for Comet."""
global _site, site, db, q
nikola.__main__._RETURN_DOITNIKOLA = True
_dn = nikola.__main__.main([])
_dn.sub_cmds = _dn.get_commands()
_site = _dn.nikola
app.config['BCRYPT_LOG_ROUNDS'] = 12
app.config['NIKOLA_ROOT'] = os.getcwd()
app.config['DEBUG'] = False
# Logging configuration
logf = (u'[{record.time:%Y-%m-%dT%H:%M:%SZ}] {record.level_name}: '
u'{record.channel}: {record.message}')
logh = (u'[{record.time:%Y-%m-%dT%H:%M:%SZ}] {record.channel} '
u'{record.message}')
loghandlers = [
ColorfulStderrHandler(level=logbook.DEBUG, format_string=logf,
bubble=True),
logbook.FileHandler('comet.log', 'a', 'utf-8', logbook.DEBUG, logf,
bubble=True)
]
hloghandlers = [
ColorfulStderrHandler(level=logbook.DEBUG, format_string=logh,
bubble=True),
logbook.FileHandler('comet.log', 'a', 'utf-8', logbook.DEBUG, logh,
bubble=True)
]
_site.loghandlers = loghandlers
nikola.utils.LOGGER.handlers = loghandlers
nikola.plugins.command.new_post.POSTLOGGER.handlers = loghandlers
nikola.plugins.command.new_post.PAGELOGGER.handlers = loghandlers
app.config['LOGGER_NAME'] = 'Comet'
app._logger = get_logger('Comet', loghandlers)
app.http_logger = get_logger('CometHTTP', hloghandlers)
if not _site.configured:
app.logger("Not a Nikola site.")
return
app.secret_key = _site.config.get('COMET_SECRET_KEY')
app.config['COMET_URL'] = _site.config.get('COMET_URL')
app.config['REDIS_URL'] = _site.config.get('COMET_REDIS_URL', 'redis://localhost:6379/0')
db = redis.StrictRedis.from_url(app.config['REDIS_URL'])
q = rq.Queue(connection=db)
_site.template_hooks['menu_alt'].append(generate_menu_alt)
app.config['NIKOLA_URL'] = _site.config['SITE_URL']
_site.config['NAVIGATION_LINKS'] = {
'en': (
(app.config['NIKOLA_URL'],
'<i class="fa fa-globe"></i> Back to website'),
('/rebuild', '<i class="fa fa-cog rebuild build-status-icon"></i> Rebuild'),
)
}
_site.GLOBAL_CONTEXT['navigation_links'] = _site.config['NAVIGATION_LINKS']
TITLE = _site.GLOBAL_CONTEXT['blog_title']('en') + ' Administration'
_site.config['BLOG_TITLE'] = TranslatableSetting(
'BLOG_TITLE', TITLE, _site.config['TRANSLATIONS'])
_site.GLOBAL_CONTEXT['blog_title'] = _site.config['BLOG_TITLE']
_site.GLOBAL_CONTEXT['lang'] = 'en'
_site.GLOBAL_CONTEXT['extra_head_data'] = TranslatableSetting(
'EXTRA_HEAD_DATA',
"""<link href="//maxcdn.bootstrapcdn.com/font-awesome/4.2.0/css/"""
"""font-awesome.min.css" rel="stylesheet">\n"""
"""<link href="/comet_assets/css/comet.css" rel="stylesheet">""",
_site.config['TRANSLATIONS'])
# HACK: body_end appears after extra_js from templates, so we must use
# social_buttons_code instead
_site.GLOBAL_CONTEXT['social_buttons_code'] = TranslatableSetting(
'SOCIAL_BUTTONS_CODE',
"""<script src="/comet_assets/js/comet.js"></scripts>""",
_site.config['TRANSLATIONS'])
# Theme must inherit from bootstrap3, because we have hardcoded HTML.
bs3 = (('bootstrap3' in _site.THEMES)
or ('bootstrap3-jinja' in _site.THEMES))
if not bs3:
app.logger.notice("THEME does not inherit from 'bootstrap3' or "
"'bootstrap3-jinja', using 'bootstrap3' instead.")
_site.config['THEME'] = 'bootstrap3'
# Reloading some things
_site._THEMES = None
_site._get_themes()
_site._template_system = None
_site._get_template_system()
if 'has_custom_css' in _site._GLOBAL_CONTEXT:
del _site._GLOBAL_CONTEXT['has_custom_css']
_site._get_global_context()
tmpl_dir = pkg_resources.resource_filename(
'comet', os.path.join('data', 'templates', _site.template_system.name))
if os.path.isdir(tmpl_dir):
# Inject tmpl_dir low in the theme chain
_site.template_system.inject_directory(tmpl_dir)
# Site proxy
site = SiteProxy(db, _site, app.logger)
configure_url(app.config['COMET_URL'])
def password_hash(password):
"""Hash the password, using bcrypt.
:param str password: Password in plaintext
:return: password hash
:rtype: str
"""
return bcrypt.generate_password_hash(password)
def check_password(pwdhash, password):
"""Check the password hash from :func:`password_hash`.
:param str pwdhash: Hash from :func:`password_hash` to check
:param str password: Password in plaintext
:return: password match
:rtype: bool
"""
return bcrypt.check_password_hash(pwdhash, password)
def generate_menu_alt():
"""Generate ``menu_alt`` with log in/out links.
:return: HTML fragment
:rtype: str
"""
if not current_user.is_authenticated():
return """<li><a href="/login">Log in</a></li>"""
if current_user.is_admin:
edit_entry = """<li><a href="/users">Manage users</a></li>\
<li><a href="/users/permissions">Permissions</a></li>"""
else:
edit_entry = ''
return """
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown"
role="button" aria-expanded="false">{0} [{1}]<span
class="caret"></span></a>
<ul class="dropdown-menu" role="menu">
<li><a href="/account">Account</a></li>
{2}
<li><a href="/logout">Log out</a></li>
</ul>
</li>""".format(current_user.realname, current_user.username, edit_entry)
def _author_get(post):
"""Get the name of the post author.
:param Post post: The post object to determine authorship of
:return: Author real name
:rtype: str
"""
a = post.meta['en']['author']
return a if a else current_user.realname
def _author_uid_get(post):
"""Get the UID of the post author.
:param Post post: The post object to determine authorship of
:return: Author UID
:rtype: str
"""
u = post.meta['en']['author.uid']
return u if u else str(current_user.uid)
def render(template_name, context=None, code=200, headers=None):
"""Render a response using standard Nikola templates.
:param str template_name: Template name
:param dict context: Context (variables) to use in the template
:param int code: HTTP status code
:param headers: Headers to use for the response
:return: HTML fragment
:rtype: str
"""
if context is None:
context = {}
if headers is None:
headers = {}
context['g'] = g
context['request'] = request
context['session'] = session
context['current_user'] = current_user
context['_author_get'] = _author_get
context['_author_uid_get'] = _author_uid_get
headers['Pragma'] = 'no-cache'
headers['Cache-Control'] = 'private, max-age=0, no-cache'
return _site.render_template(template_name, None, context), code, headers
def error(desc, code, permalink):
"""Render an error page.
:param str desc: Error description
:param int code: HTTP status code
:param str permalink: Path to page generating errors
:return: HTML fragment (from :func:`render`)
:rtype: str
"""
return render('comet_error.tmpl',
{'title': 'Error',
'code': code,
'desc': desc,
'permalink': permalink},
code)
def _unauthorized():
"""Redirect to the “unauthorized” page."""
return redirect('/login?status=unauthorized')
def find_post(path):
"""Find a post.
:param str path: Path to the post
:return: A post matching the path
:rtype: Post or None
"""
for p in site.timeline:
if p.source_path == path:
return p
return None
app = Flask('comet')
@app.after_request
def log_request(resp):
"""Log a request."""
l = "[{4}] {0} {1} {2} <{3}>".format(request.remote_addr, request.method,
request.url, request.endpoint,
resp.status_code)
c = str(resp.status_code)[0]
if c in ['1', '2'] or resp.status_code == 304:
app.http_logger.info(l)
elif c == '3':
app.http_logger.warn(l)
else:
app.http_logger.error(l)
return resp
bcrypt = Bcrypt(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.unauthorized_callback = _unauthorized
class User(object):
"""An user. Compatible with Flask-Login."""
def __init__(self, uid, username, realname, password, email, active,
is_admin, can_edit_all_posts, wants_all_posts,
can_upload_attachments, can_rebuild_site,
can_transfer_post_authorship):
"""Initialize an user with specified settings."""
self.uid = int(uid)
self.username = username
self.realname = realname
self.password = password
self.email = email
self.active = active
self.is_admin = is_admin
self.can_edit_all_posts = can_edit_all_posts
self.wants_all_posts = wants_all_posts
self.can_upload_attachments = can_upload_attachments
self.can_rebuild_site = can_rebuild_site
self.can_transfer_post_authorship = can_transfer_post_authorship
def get_id(self):
"""Get user ID."""
return unicode_str(self.uid)
def is_authenticated(self):
"""Check whether user is authorized to log in."""
return self.active
def is_active(self):
"""Check whether user is active."""
return self.active
def is_anonymous(self):
"""Check whether user is anonymous."""
return not self.active
def get_auth_token(self):
"""Generate an authentication token."""
return make_secure_token(self.uid, self.username, self.password)
def __repr__(self):
"""Return a programmer-friendly representation."""
return '<User {0}>'.format(self.username)
@login_manager.user_loader
def get_user(uid):
"""Get an user by the UID.
:param str uid: UID to find
:return: the user
:rtype: User object
:raises ValueError: uid is not an integer
:raises KeyError: if user does not exist
"""
d = db.hgetall('user:{0}'.format(uid))
if d:
for p in PERMISSIONS:
d[p] = d[p] == '1'
return User(uid=uid, **d)
else:
return None
def find_user_by_name(username):
"""Get an user by their username.
:param str username: Username to find
:return: the user
:rtype: User object or None
"""
uid = db.hget('users', username)
if uid:
return get_user(uid)
else:
return None
def write_user(user):
"""Write an user ot the database.
:param User user: User to write
"""
udata = {}
for f in USER_FIELDS:
udata[f] = getattr(user, f)
for p in PERMISSIONS:
udata[p] = '1' if getattr(user, p) else '0'
db.hmset('user:{0}'.format(user.uid), udata)
@app.route('/login', methods=['GET', 'POST'])
def login():
"""Handle user authentication.
If requested over GET, present login page.
If requested over POST, log user in.
:param str status: Status of previous request/login attempt
"""
alert = None
alert_status = 'danger'
code = 200
form = LoginForm()
if request.method == 'POST':
if form.validate():
user = find_user_by_name(request.form['username'])
if not user:
alert = 'Invalid credentials.'
code = 401
else:
if check_password(user.password,
request.form['password']) and user.is_active:
login_user(user, remember=('remember' in request.form))
return redirect('/')
else:
alert = "Invalid credentials."
code = 401
else:
alert = 'Invalid credentials.'
code = 401
else:
if request.args.get('status') == 'unauthorized':
alert = 'Please log in to access this page.'
elif request.args.get('status') == 'logout':
alert = 'Logged out successfully.'
alert_status = 'success'
return render('comet_login.tmpl', {'title': 'Login', 'permalink': '/login',
'alert': alert, 'form': form,
'alert_status': alert_status}, code)
@app.route('/logout')
@login_required
def logout():
"""Log the user out and redirect them to the login page."""
logout_user()
return redirect('/login?status=logout')
@app.route('/')
@login_required
def index():
"""Show the index with all posts.
:param int all: Whether or not should show all posts
"""
if not os.path.exists(os.path.join(_site.config["OUTPUT_FOLDER"],
'assets')):
return redirect('/setup')
context = {'postform': NewPostForm(),
'pageform': NewPageForm(),
'delform': DeleteForm()}
n = request.args.get('all')
if n is None:
wants_now = None
else:
wants_now = n == '1'
if wants_now is None and current_user.wants_all_posts:
wants = True
else:
wants = wants_now
if current_user.can_edit_all_posts and wants:
posts = site.all_posts
pages = site.pages
else:
wants = False
posts = []
pages = []
for p in site.timeline:
if (p.meta('author.uid')
and p.meta('author.uid') != str(current_user.uid)):
continue
if p.is_post:
posts.append(p)
else:
pages.append(p)
context['posts'] = posts
context['pages'] = pages
context['title'] = 'Posts & Pages'
context['permalink'] = '/'
context['wants'] = wants
return render('comet_index.tmpl', context)
# TODO: delete (with redirects) as soon as `comet init` exists
@app.route('/setup')
def setup():
"""TEMPORARY setup function."""
ns = not os.path.exists(os.path.join(_site.config["OUTPUT_FOLDER"],
'assets'))
return render("comet_setup.tmpl", context={'needs_setup': ns})
@app.route('/edit/<path:path>', methods=['GET', 'POST'])
@login_required
def edit(path):
"""Edit a post.
If requested over GET, shows the edit UI.
If requested over POST, saves the post and shows the edit UI.
:param path: Path to post to edit.
"""
context = {'path': path, 'site': site}
post = find_post(path)
if post is None:
return error("No such post or page.", 404, '/edit/' + path)
form = PostEditForm()
if request.method == 'POST':
if not form.validate():
return error("Bad Request", 400, '/edit/' + path)
meta = {}
for k, v in request.form.items():
meta[k] = v
meta.pop('_wysihtml5_mode', '')
try:
meta['author'] = get_user(meta['author.uid']).realname
author_change_success = True
except:
author_change_success = False
if (not current_user.can_transfer_post_authorship
or not author_change_success):
meta['author'] = post.meta('author') or current_user.realname
meta['author.uid'] = post.meta('author.uid') or current_user.uid
twofile = post.is_two_file
onefile = not twofile
post.compiler.create_post(post.source_path, onefile=onefile,
is_page=False, **meta)
context['post_content'] = meta['content']
if twofile:
meta_path = os.path.splitext(path)[0] + '.meta'
# We cannot save `content` as meta, otherwise things break badly
meta.pop('content', '')
with io.open(meta_path, 'w+', encoding='utf-8') as fh:
fh.write(write_metadata(meta))
scan_site()
post = find_post(path)
context['action'] = 'save'
else:
context['action'] = 'edit'
with io.open(path, 'r', encoding='utf-8') as fh:
context['post_content'] = fh.read()
if not post.is_two_file:
context['post_content'] = context['post_content'].split(
'\n\n', 1)[1]
context['post'] = post
users = []
last_uid = int(db.get('last_uid'))
for u in range(1, last_uid + 1):
realname, active = db.hmget('user:{0}'.format(u), 'realname', 'active')
if active == '1':
users.append((u, realname))
context['users'] = sorted(users)
context['current_auid'] = int(post.meta('author.uid') or current_user.uid)
context['title'] = 'Editing {0}'.format(post.title())
context['permalink'] = '/edit/' + path
context['is_html'] = post.compiler.name == 'html'
context['form'] = form
return render('comet_post_edit.tmpl', context)
@app.route('/delete', methods=['POST'])
@login_required
def delete():
"""Delete a post."""
form = DeleteForm()
path = request.form['path']
post = find_post(path)
if post is None:
return error("No such post or page.", 404, '/delete')
if not form.validate():
return error("Bad Request", 400, '/delete')
os.unlink(path)
scan_site()
return redirect('/')
@app.route('/api/rebuild')
@login_required
def api_rebuild():
"""Rebuild the site (internally)."""
build_job = q.fetch_job('build')
orphans_job = q.fetch_job('orphans')
if not build_job and not orphans_job:
build_job = q.enqueue_call(func=comet.tasks.build,
args=(app.config['REDIS_URL'],
app.config['NIKOLA_ROOT']),
job_id='build')
orphans_job = q.enqueue_call(func=comet.tasks.orphans,
args=(app.config['REDIS_URL'],
app.config['NIKOLA_ROOT']),
job_id='orphans', depends_on=build_job)
d = json.dumps({'build': build_job.meta, 'orphans': orphans_job.meta})
if ('status' in build_job.meta and
build_job.meta['status'] is not None
and 'status' in orphans_job.meta and
orphans_job.meta['status'] is not None):
rq.cancel_job('build', db)
rq.cancel_job('orphans', db)
return d
@app.route('/rebuild')
@login_required
def rebuild():
"""Rebuild the site with a nice UI."""
scan_site() # for good measure
if not q.fetch_job('build') and not q.fetch_job('orphans'):
b = q.enqueue_call(func=comet.tasks.build,
args=(app.config['REDIS_URL'],
app.config['NIKOLA_ROOT']), job_id='build')
q.enqueue_call(func=comet.tasks.orphans,
args=(app.config['REDIS_URL'],
app.config['NIKOLA_ROOT']), job_id='orphans',
depends_on=b)
return render('comet_rebuild.tmpl',
{'title': 'Rebuild', 'permalink': '/rebuild'})
@app.route('/bower_components/<path:path>')
def serve_bower_components(path):
"""Serve bower components.
This is meant to be used ONLY by the internal dev server.
Please configure your web server to handle requests to this URL::
/bower_components/ => comet/data/bower_components
"""
res = pkg_resources.resource_filename(
'comet', os.path.join('data', 'bower_components'))
return send_from_directory(res, path)
@app.route('/comet_assets/<path:path>')
def serve_comet_assets(path):
"""Serve Comet assets.
This is meant to be used ONLY by the internal dev server.
Please configure your web server to handle requests to this URL::
/comet_assets/ => comet/data/comet_assets
"""
res = pkg_resources.resource_filename(
'comet', os.path.join('data', 'comet_assets'))
return send_from_directory(res, path)
@app.route('/assets/<path:path>')
def serve_assets(path):
"""Serve Nikola assets.
This is meant to be used ONLY by the internal dev server.
Please configure your web server to handle requests to this URL::
/assets/ => output/assets
"""
res = os.path.join(app.config['NIKOLA_ROOT'],
_site.config["OUTPUT_FOLDER"], 'assets')
return send_from_directory(res, path)
@app.route('/new/<obj>', methods=['POST'])
@login_required
def new(obj):
"""Create a new post or page.
:param str obj: Object to create (post or page)
"""
title = request.form['title']
_site.config['ADDITIONAL_METADATA']['author.uid'] = current_user.uid
try:
if obj == 'post':
f = NewPostForm()
if f.validate():
_site.commands.new_post(title=title, author=current_user.realname,
content_format='html')
else:
return error("Bad Request", 400, '/new/' + obj)
elif obj == 'page':
f = NewPageForm()
if f.validate():
_site.commands.new_page(title=title, author=current_user.realname,
content_format='html')
else:
return error("Bad Request", 400, '/new/' + obj)
else:
return error("Cannot create {0} — unknown type.".format(obj),
400, '/new/' + obj)
except SystemExit:
return error("This {0} already exists!".format(obj),
500, '/new/' + obj)
finally:
del _site.config['ADDITIONAL_METADATA']['author.uid']
# reload post list and go to index
scan_site()
return redirect('/')
@app.route('/account', methods=['POST', 'GET'])
@login_required
def acp_user_account():
"""Manage the user account of currently-logged-in users.
This does NOT accept admin-specific options.
"""
alert = ''
alert_status = ''
action = 'edit'
form = AccountForm()
if request.method == 'POST':
if not form.validate():
return error("Bad Request", 400, "/account")
action = 'save'
data = request.form
if data['newpwd1']:
if data['newpwd1'] == data['newpwd2'] and check_password(
current_user.password, data['oldpwd']):
current_user.password = password_hash(data['newpwd1'])
else:
alert = 'Passwords don’t match.'
alert_status = 'danger'
action = 'save_fail'
current_user.realname = data['realname']
current_user.email = data['email']
current_user.wants_all_posts = 'wants_all_posts' in data
write_user(current_user)
return render('comet_account.tmpl',
context={'title': 'My account',
'permalink': '/account',
'action': action,
'alert': alert,
'alert_status': alert_status,
'form': form})
@app.route('/users')
@login_required
def acp_users():
"""List all users."""
alert = ''
alert_status = ''
if request.args.get('status') == 'deleted':
alert = 'User deleted.'
alert_status = 'success'
if request.args.get('status') == 'undeleted':
alert = 'User undeleted.'
alert_status = 'success'
if not current_user.is_admin:
return error("Not authorized to edit users.", 401, "/users")
else:
last_uid = int(db.get('last_uid'))
USERS = {i: get_user(i) for i in range(1, last_uid + 1)}
return render('comet_users.tmpl',
context={'title': 'Users',
'permalink': '/users',
'USERS': USERS,
'alert': alert,
'alert_status': alert_status,
'delform': UserDeleteForm(),
'editform': UserEditForm()})
@app.route('/users/edit', methods=['POST'])
@login_required
def acp_users_edit():
"""Edit an user account."""
global current_user
if not current_user.is_admin:
return error("Not authorized to edit users.", 401, "/users/edit")
data = request.form
form = UserEditForm()
if not form.validate():
return error("Bad Request", 400, "/users/edit")
action = data['action']
if action == 'new':
if not data['username']:
return error("No username to create specified.", 400,
"/users/edit")
uid = db.incr('last_uid')
pf = [False for p in PERMISSIONS]
pf[0] = True # active
user = User(uid, data['username'], '', '', *pf)
write_user(user)
db.hset('users', user.username, user.uid)
new = True
else:
user = get_user(data['uid'])
new = False
if not user:
return error("User does not exist.", 404, "/users/edit")
alert = ''
alert_status = ''
if action == 'save':
if data['newpwd1']:
if data['newpwd1'] == data['newpwd2']:
user.password = password_hash(data['newpwd1'])
else:
alert = 'Passwords don’t match.'
alert_status = 'danger'
action = 'save_fail'
elif new:
alert = 'Must set a password.'
alert_status = 'danger'
action = 'save_fail'
if data['username'] != user.username:
db.hdel('users', user.username)
user.username = data['username']
db.hset('users', user.username, user.uid)
user.realname = data['realname']
user.email = data['email']
for p in PERMISSIONS:
setattr(user, p, p in data)
user.active = True
if user.uid == current_user.uid:
user.is_admin = True
current_user = user
write_user(user)
return render('comet_users_edit.tmpl',
context={'title': 'Edit user',
'permalink': '/users/edit',
'user': user,
'new': new,
'action': action,
'alert': alert,
'alert_status': alert_status,
'form': form})
@app.route('/users/delete', methods=['POST'])
@login_required
def acp_users_delete():
"""Delete or undelete an user account."""
if not current_user.is_admin:
return error("Not authorized to edit users.", 401, "/users/delete")
form = UserDeleteForm()
if not form.validate():
return error("Bad Request", 400, '/users/delete')
user = get_user(int(request.form['uid']))
direction = request.form['direction']
if not user:
return error("User does not exist.", 404, "/users/delete")
else:
for p in PERMISSIONS:
setattr(user, p, False)
user.active = direction == 'undel'
write_user(user)
return redirect('/users?status={_del}eted'.format(_del=direction))
@app.route('/users/permissions', methods=['GET', 'POST'])
@login_required
def acp_users_permissions():
"""Change user permissions."""
if not current_user.is_admin:
return error("Not authorized to edit users.",
401, "/users/permissions")
form = PermissionsForm()
users = {}
last_uid = int(db.get('last_uid'))
if request.method == 'POST':
if not form.validate():
return error("Bad Request", 400, '/users/permissions')
for uid in range(1, last_uid + 1):
user = get_user(uid)
for perm in PERMISSIONS:
if '{0}.{1}'.format(uid, perm) in request.form:
setattr(user, perm, True)
else:
setattr(user, perm, False)
if uid == current_user.uid:
user.is_admin = True # cannot deadmin oneself
user.active = True # cannot deactivate oneself
write_user(user)
users[uid] = user
action = 'save'
else:
action = 'edit'
def display_permission(user, permission):
"""Display a permission."""
checked = 'checked' if getattr(user, permission) else ''
if permission == 'wants_all_posts' and not user.can_edit_all_posts:
# If this happens, permissions are damaged.
checked = ''
if user.uid == current_user.uid and permission in ['active',
'is_admin']:
disabled = 'disabled'
else:
disabled = ''
permission_a = permission
if permission == 'active':
permission_a = 'is_active'
d = ('<input type="checkbox" name="{0}.{1}" data-uid="{0}" '
'data-perm="{4}" class="u{0}" {2} {3}>')
return d.format(user.uid, permission, checked, disabled, permission_a)
for uid in range(1, last_uid + 1):
users[uid] = get_user(uid)
return render('comet_users_permissions.tmpl',
context={'title': 'Permissions',
'permalink': '/users/permissions',
'USERS': users,
'PERMISSIONS': PERMISSIONS,
'action': action,
'json': json,
'form': form,
'display_permission': display_permission})
if not os.path.exists('._COMET_NO_CONFIG') and os.path.exists('conf.py'):
configure_site()
else:
# no Nikola site available
app = None
| [
"kwpolska@gmail.com"
] | kwpolska@gmail.com |
e4441350874f79918bd8c01eb254b00f5cf56043 | 6f044a0541ddf467bb6251645c3d8107df5f5756 | /status/migrations/0013_status_trait.py | ea4451fca16a2341d7584014d6176fc495d94aef | [] | no_license | tpvt99/new-social-network-backend | 04ae9f0551c09eceb5fd6b4bcf50430243e53199 | a18d6279a27ba0ce3af1f5d6e985b4b147a4233a | refs/heads/master | 2021-09-04T01:50:43.430961 | 2018-01-14T08:59:41 | 2018-01-14T08:59:41 | 117,415,992 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-03-17 15:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('trait', '0001_initial'),
('status', '0012_status_contestpost'),
]
operations = [
migrations.AddField(
model_name='status',
name='trait',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='trait.Trait'),
),
]
| [
"tranphong96.hbk@gmail.com"
] | tranphong96.hbk@gmail.com |
c8f80a4707a3c941c2a3e4b4f7a6eaf9d71e88a6 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2802/60716/236663.py | 2f8ffb9bdd560ab5b4a981852be9ace2494fb1bb | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | num, m= map(int,input().split())
str = input().split(' ')
lists = [int(i) for i in str]
listleave = []
listmember = []
for i in range(num):
listmember.append(i+1)
while len(listmember)>1:
if lists[0]<=m:
lists.pop(0)
index=listmember.pop(0)
# print("{}leave".format(index))
listleave.append(index)
else:
temp = lists.pop(0) -m
lists.append(temp)
index = listmember.pop(0)
listmember.append(index)
# print("{}gotoend".format(index))
print(listmember[0]) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
f1cc691a72877a2403999b9e4aba96d5532e8c66 | afc8d5a9b1c2dd476ea59a7211b455732806fdfd | /Configurations/ggH_SF/Full2017_HTXS_Stage1p2/doWorkspace.py | b5ddf13b53b9fb6c4a0399c79f7b36db17317f4e | [] | no_license | latinos/PlotsConfigurations | 6d88a5ad828dde4a7f45c68765081ed182fcda21 | 02417839021e2112e740607b0fb78e09b58c930f | refs/heads/master | 2023-08-18T20:39:31.954943 | 2023-08-18T09:23:34 | 2023-08-18T09:23:34 | 39,819,875 | 10 | 63 | null | 2023-08-10T14:08:04 | 2015-07-28T07:36:50 | Python | UTF-8 | Python | false | false | 3,744 | py |
import os
if os.path.exists('HTXS_stage1_categories.py') :
handle = open('HTXS_stage1_categories.py','r')
exec(handle)
handle.close()
sampleNames = []
for cat in HTXSStage1_1Categories:
if 'GG2H_' in cat:
sampleNames.append(cat.replace('GG2H','ggH_hww'))
sampleNames.append(cat.replace('GG2H','ggH_htt'))
elif 'QQ2HQQ_' in cat:
sampleNames.append(cat.replace('QQ2HQQ','qqH_hww'))
sampleNames.append(cat.replace('QQ2HQQ','qqH_htt'))
sampleNames.append(cat.replace('QQ2HQQ','WH_had_hww'))
sampleNames.append(cat.replace('QQ2HQQ','WH_had_htt'))
sampleNames.append(cat.replace('QQ2HQQ','ZH_had_hww'))
sampleNames.append(cat.replace('QQ2HQQ','ZH_had_htt'))
elif 'QQ2HLNU_' in cat:
sampleNames.append(cat.replace('QQ2HLNU','WH_lep_hww'))
sampleNames.append(cat.replace('QQ2HLNU','WH_lep_htt'))
elif 'QQ2HLL_' in cat:
sampleNames.append(cat.replace('QQ2HLL','ZH_lep_hww'))
sampleNames.append(cat.replace('QQ2HLL','ZH_lep_htt'))
elif 'GG2HLL_' in cat:
sampleNames.append(cat.replace('GG2HLL','ggZH_lep_hww'))
elif 'TTH' in cat:
sampleNames.append(cat.replace('TTH','ttH_hww'))
elif 'BBH' in cat:
sampleNames.append(cat.replace('BBH','bbH_hww'))
os.chdir('./Combination')
sampleNames.append('ggH_hww_PTH_200_300')
sampleNames.append('ggH_hww_PTH_300_450')
sampleNames.append('ggH_hww_PTH_450_650')
sampleNames.append('ggH_hww_PTH_GT650')
'''
#No merging
command="text2workspace.py Full2017_SF_ggH_HTXS_Stage1p2.txt -o Full2017_SF_ggH_HTXS_Stage1p2.root -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel --PO verbose "
for sample in sampleNames:
if 'ggH_hww' not in sample: continue
if 'FWDH' in sample: continue
if 'GT200' in sample: continue
command+="--PO 'map=.*/{}:r_{}[1,-10,10]' ".format(sample,sample)
print command
os.system(command)
'''
#Merge some bins
command="text2workspace.py Full2017_SF_ggH_HTXS_Stage1p2.txt -o Full2017_SF_ggH_HTXS_Stage1p2_merged.root -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel --PO verbose "
poi=''
for sample in sampleNames:
if 'ggH_hww' not in sample: continue
if 'FWDH' in sample: continue
#if 'GT200' in sample: continue
#if '0J' in sample: poi = 'r_ggH_hww_0J'
if ('1J_PTH_60_120' in sample or '1J_PTH_120_200' in sample): poi = 'r_ggH_hww_1J_PTH_GT60'
#elif ('1J_PTH_60_120' in sample or '1J_PTH_120_200' in sample): poi = 'r_ggH_hww_1J_PTH_GT60'
elif ('MJJ_350_700' in sample or 'MJJ_GT700' in sample): poi = 'r_ggH_hww_GE2J_MJJ_GT350'
elif ('MJJ_0_350_PTH_0_60' in sample or 'MJJ_0_350_PTH_60_120' in sample): poi = 'r_ggH_hww_GE2J_MJJ_0_350_PTH_LT120'
elif 'MJJ_0_350_PTH_120_200' in sample: poi = 'r_ggH_hww_GE2J_MJJ_0_350_PTH_GT120'
elif 'ggH_hww_PTH' in sample: poi = 'r_ggH_hww_PTH_GT200'
else: poi = 'r_'+sample
#if (sample in ['ggH_hww_PTH_300_450','ggH_hww_PTH_450_650','ggH_hww_PTH_GT650']): poi = 'r_ggH_hww_PTH_GT300'
#if ('MJJ_0_350_PTH_0_60' in sample or 'MJJ_0_350_PTH_60_120' in sample): poi = 'r_ggH_hww_GE2J_MJJ_0_350_PTH_LT120'
#elif ('MJJ_350_700' in sample): poi = 'r_ggH_hww_GE2J_MJJ_350_700'
#elif ('MJJ_GT700' in sample): poi = 'r_ggH_hww_GE2J_MJJ_GT700'
#else: poi = 'r_'+sample
command+="--PO 'map=.*/{}:{}[1,-10,10]' ".format(sample,poi)
# command+="--PO 'map=.*/{}:{}[1,-5,5]' ".format(sample,poi)
print command
os.system(command)
#Merge all bins
command="text2workspace.py Full2017_SF_ggH_HTXS_Stage1p2.txt -o Full2017_SF_ggH_HTXS_Stage1p2_onePOI.root -P HiggsAnalysis.CombinedLimit.PhysicsModel:multiSignalModel --PO verbose "
poi=''
for sample in sampleNames:
if 'FWDH' in sample: continue
else: poi ='r_ggH_hww'
command+="--PO 'map=.*/{}:{}[1,-10,10]' ".format(sample,poi)
print command
os.system(command)
| [
"davide.di.croce@cern.ch"
] | davide.di.croce@cern.ch |
59fc2dab41b7a88b66446f7c5cb7bb6b83d6bf1e | 8be2df0c4508cc5254887b8cccb044032aea5c21 | /client-server-app/Lesson-1.1/6.py | c6eb9fed6056ecaed76ad071ea80c382788c222c | [] | no_license | ezhk/python-learning | 2d3dad2190ac9ce9299534f0f303e8b76a8eeab2 | 424ec9ca08541273f9ec39ff25f75a3b78d9dcb7 | refs/heads/master | 2023-01-05T16:50:08.829169 | 2020-06-02T18:03:05 | 2020-06-02T18:03:05 | 165,482,083 | 0 | 1 | null | 2023-01-04T04:59:43 | 2019-01-13T08:21:44 | Python | UTF-8 | Python | false | false | 2,430 | py | """
6. Создать текстовый файл test_file.txt, заполнить его тремя строками: «сетевое программирование», «сокет», «декоратор».
Проверить кодировку файла по умолчанию. Принудительно открыть файл в формате Unicode и вывести его содержимое.
"""
import sys
if __name__ == "__main__":
print(f"Кодировка по умолчанию: {sys.getdefaultencoding()}")
"""
Работа с файлом в обычном режиме намного проще — там
при записи и чтении возможны только строки, поэтому
попробуем поработать в бинарном режиме.
"""
with open('test_file.txt', 'wb') as fh:
for string in ("сетевое программирование", "сокет", "декоратор"):
fh.write(string.encode(sys.getdefaultencoding()))
fh.write(b"\n")
"""
Проверим наши строки с правильной кодировкой — UTF8 и неправильной — UTF16.
"""
with open('test_file.txt', 'rb') as fh:
print(fh)
for line in fh:
print(f"UTF-8 {line.decode('utf-8')}"
f"UTF-16 {line.decode('utf-16', 'replace')}")
"""
И откроем файл с указанной кодировкой.
"""
with open('test_file.txt', 'r', encoding='utf-8') as fh:
print(fh)
for line in fh:
print(f"UTF-8 encoded file: {line}", end='')
"""
Кодировка по умолчанию: utf-8
<_io.BufferedReader name='test_file.txt'>
UTF-8 сетевое программирование
UTF-16 臑뗐苑뗐닐뻐뗐퀠톿킀킾톳킀킰킼킼톸킀킾킲킰킽킸વ
UTF-8 сокет
UTF-16 臑뻐뫐뗐苑�
UTF-8 декоратор
UTF-16 듐뗐뫐뻐胑냐苑뻐胑�
<_io.TextIOWrapper name='test_file.txt' mode='r' encoding='utf-8'>
UTF-8 encoded file: сетевое программирование
UTF-8 encoded file: сокет
UTF-8 encoded file: декоратор
Сожержимое test_file.txt:
сетевое программирование
сокет
декоратор
"""
| [
"ezhik@ezhik.info"
] | ezhik@ezhik.info |
d67257825d79af4c7baa3475c3e4107a9f2ed5aa | c90ddd0930894c565197b739cd76140a7151fffd | /HLTrigger/Configuration/python/HLT_75e33/modules/hgcalLayerClustersL1Seeded_cfi.py | 9241d25990ffcca418d68a5a3de950c3318ae788 | [
"Apache-2.0"
] | permissive | p2l1pfp/cmssw | 9cc6b111ff1935e49f86ec3da9f9b84fb13bbcdf | 9f0a3a22fe451c25114134c30ac1f5c1261f3183 | refs/heads/L1PF_12_5_X | 2023-08-17T00:38:15.374760 | 2023-06-13T12:55:57 | 2023-06-13T12:55:57 | 127,881,751 | 6 | 1 | Apache-2.0 | 2023-09-05T13:54:59 | 2018-04-03T09:10:17 | C++ | UTF-8 | Python | false | false | 2,371 | py | import FWCore.ParameterSet.Config as cms
hgcalLayerClustersL1Seeded = cms.EDProducer("HGCalLayerClusterProducer",
HFNoseInput = cms.InputTag("HGCalRecHitL1Seeded","HGCHFNoseRecHits"),
HGCBHInput = cms.InputTag("hltRechitInRegionsHGCAL","HGCHEBRecHits"),
HGCEEInput = cms.InputTag("hltRechitInRegionsHGCAL","HGCEERecHits"),
HGCFHInput = cms.InputTag("hltRechitInRegionsHGCAL","HGCHEFRecHits"),
detector = cms.string('all'),
doSharing = cms.bool(False),
mightGet = cms.optional.untracked.vstring,
nHitsTime = cms.uint32(3),
plugin = cms.PSet(
dEdXweights = cms.vdouble(
0.0, 8.894541, 10.937907, 10.937907, 10.937907,
10.937907, 10.937907, 10.937907, 10.937907, 10.937907,
10.932882, 10.932882, 10.937907, 10.937907, 10.938169,
10.938169, 10.938169, 10.938169, 10.938169, 10.938169,
10.938169, 10.938169, 10.938169, 10.938169, 10.938169,
10.938169, 10.938169, 10.938169, 32.332097, 51.574301,
51.444192, 51.444192, 51.444192, 51.444192, 51.444192,
51.444192, 51.444192, 51.444192, 51.444192, 51.444192,
69.513118, 87.582044, 87.582044, 87.582044, 87.582044,
87.582044, 87.214571, 86.888309, 86.92952, 86.92952,
86.92952
),
deltac = cms.vdouble(1.3, 1.3, 5, 0.0315),
deltasi_index_regemfac = cms.int32(3),
dependSensor = cms.bool(True),
ecut = cms.double(3),
fcPerEle = cms.double(0.00016020506),
fcPerMip = cms.vdouble(
2.06, 3.43, 5.15, 2.06, 3.43,
5.15
),
kappa = cms.double(9),
maxNumberOfThickIndices = cms.uint32(6),
noiseMip = cms.PSet(
refToPSet_ = cms.string('HGCAL_noise_heback')
),
noises = cms.vdouble(
2000.0, 2400.0, 2000.0, 2000.0, 2400.0,
2000.0
),
positionDeltaRho2 = cms.double(1.69),
sciThicknessCorrection = cms.double(0.9),
thicknessCorrection = cms.vdouble(
0.77, 0.77, 0.77, 0.84, 0.84,
0.84
),
thresholdW0 = cms.vdouble(2.9, 2.9, 2.9),
type = cms.string('CLUE'),
use2x2 = cms.bool(True),
verbosity = cms.untracked.uint32(3)
),
timeClname = cms.string('timeLayerCluster'),
timeOffset = cms.double(5)
)
| [
"Thiago.Tomei@cern.ch"
] | Thiago.Tomei@cern.ch |
ac9ffa32b221d3043b543720b6687f73cd5687d6 | a4ea525e226d6c401fdb87a6e9adfdc5d07e6020 | /src/azure-cli/azure/cli/command_modules/identity/_client_factory.py | a549775369dd20d49176d25d093fa1fa098baa88 | [
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] | permissive | Azure/azure-cli | 13340eeca2e288e66e84d393fa1c8a93d46c8686 | a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca | refs/heads/dev | 2023-08-17T06:25:37.431463 | 2023-08-17T06:00:10 | 2023-08-17T06:00:10 | 51,040,886 | 4,018 | 3,310 | MIT | 2023-09-14T11:11:05 | 2016-02-04T00:21:51 | Python | UTF-8 | Python | false | false | 1,358 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
def _msi_client_factory(cli_ctx, api_version=None, **_):
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_MSI, api_version=api_version)
def _msi_list_resources_client(cli_ctx, **_):
"""
api version is specified for list resources command because new api version (2023-01-31) of MSI does not support
listAssociatedResources command. In order to avoid a breaking change, multi-api package is used
"""
return _msi_client_factory(cli_ctx, api_version='2022-01-31-preview').user_assigned_identities
def _msi_user_identities_operations(cli_ctx, _):
return _msi_client_factory(cli_ctx).user_assigned_identities
def _msi_operations_operations(cli_ctx, _):
return _msi_client_factory(cli_ctx).operations
def _msi_federated_identity_credentials_operations(cli_ctx, _):
return _msi_client_factory(cli_ctx).federated_identity_credentials
| [
"noreply@github.com"
] | Azure.noreply@github.com |
57dc62eee3bf81147f0cfca5c36ab79e0ce06cdc | 3419067388879d8a6542df01cb0278ae90b021a2 | /第二天/打印小星星.py | 81ec4b46cec48359ad0bfb9b62719c82138474cf | [] | no_license | oweson/python-river-master | faa31c5248e297a92054cc302e213e2b37fb8bd5 | cf9e99e611311b712465eb11dec4bb8f712929b2 | refs/heads/master | 2021-06-21T15:47:01.755957 | 2019-10-02T00:08:05 | 2019-10-02T00:08:05 | 205,607,518 | 0 | 0 | null | 2021-06-10T21:55:20 | 2019-08-31T23:39:55 | Python | UTF-8 | Python | false | false | 139 | py | i = 1
while i < 5:
j = 1
while j <= i:
print("*", end='')
j += 1
print("\n")
i += 1
# 缩进是py的灵魂
| [
"570347720@qq.com"
] | 570347720@qq.com |
133de246b8be2db23b20ce7998e607233edca841 | 5db0fab37c2b8a618d85d3b60fab9f806c416474 | /src/python/pants/backend/python/typecheck/pyright/rules.py | 7327ae6cc5210a562adc17041eefbf469241ab4d | [
"Apache-2.0"
] | permissive | pantsbuild/pants | 4988d1ac5474ec95f94ce2218aeb759401e4b011 | 98cbda8545f0d58c586ed2daa76fefd729d5e0d5 | refs/heads/main | 2023-09-05T03:44:17.646899 | 2023-09-01T19:52:09 | 2023-09-01T19:52:09 | 7,209,075 | 2,708 | 593 | Apache-2.0 | 2023-09-14T19:33:33 | 2012-12-17T17:39:04 | Python | UTF-8 | Python | false | false | 10,909 | py | # Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
import logging
import os
from dataclasses import dataclass, replace
from typing import Iterable
import toml
from pants.backend.javascript.subsystems import nodejs_tool
from pants.backend.javascript.subsystems.nodejs_tool import NodeJSToolRequest
from pants.backend.python.subsystems.setup import PythonSetup
from pants.backend.python.target_types import (
InterpreterConstraintsField,
PythonResolveField,
PythonSourceField,
)
from pants.backend.python.typecheck.pyright.skip_field import SkipPyrightField
from pants.backend.python.typecheck.pyright.subsystem import Pyright
from pants.backend.python.util_rules import pex_from_targets
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.partition import (
_partition_by_interpreter_constraints_and_resolve,
)
from pants.backend.python.util_rules.pex import Pex, PexRequest, VenvPex
from pants.backend.python.util_rules.pex_environment import PexEnvironment
from pants.backend.python.util_rules.pex_from_targets import RequirementsPexRequest
from pants.backend.python.util_rules.python_sources import (
PythonSourceFiles,
PythonSourceFilesRequest,
)
from pants.core.goals.check import CheckRequest, CheckResult, CheckResults
from pants.core.util_rules import config_files
from pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.collection import Collection
from pants.engine.fs import CreateDigest, DigestContents, FileContent
from pants.engine.internals.native_engine import Digest, MergeDigests
from pants.engine.internals.selectors import MultiGet
from pants.engine.process import FallibleProcessResult, Process
from pants.engine.rules import Get, Rule, collect_rules, rule
from pants.engine.target import CoarsenedTargets, CoarsenedTargetsRequest, FieldSet, Target
from pants.engine.unions import UnionRule
from pants.util.logging import LogLevel
from pants.util.ordered_set import FrozenOrderedSet, OrderedSet
from pants.util.strutil import pluralize
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class PyrightFieldSet(FieldSet):
required_fields = (PythonSourceField,)
sources: PythonSourceField
resolve: PythonResolveField
interpreter_constraints: InterpreterConstraintsField
@classmethod
def opt_out(cls, tgt: Target) -> bool:
return tgt.get(SkipPyrightField).value
class PyrightRequest(CheckRequest):
field_set_type = PyrightFieldSet
tool_name = Pyright.options_scope
@dataclass(frozen=True)
class PyrightPartition:
field_sets: FrozenOrderedSet[PyrightFieldSet]
root_targets: CoarsenedTargets
resolve_description: str | None
interpreter_constraints: InterpreterConstraints
def description(self) -> str:
ics = str(sorted(str(c) for c in self.interpreter_constraints))
return f"{self.resolve_description}, {ics}" if self.resolve_description else ics
class PyrightPartitions(Collection[PyrightPartition]):
pass
async def _patch_config_file(
config_files: ConfigFiles, venv_dir: str, source_roots: Iterable[str]
) -> Digest:
"""Patch the Pyright config file to use the incoming venv directory (from
requirements_venv_pex). If there is no config file, create a dummy pyrightconfig.json with the
`venv` key populated.
The incoming venv directory works alongside the `--venvpath` CLI argument.
Additionally, add source roots to the `extraPaths` key in the config file.
"""
source_roots_list = list(source_roots)
if not config_files.snapshot.files:
# venv workaround as per: https://github.com/microsoft/pyright/issues/4051
generated_config = {"venv": venv_dir, "extraPaths": source_roots_list}
return await Get(
Digest,
CreateDigest(
[
FileContent(
"pyrightconfig.json",
json.dumps(generated_config).encode(),
)
]
),
)
config_contents = await Get(DigestContents, Digest, config_files.snapshot.digest)
new_files: list[FileContent] = []
for file in config_contents:
# This only supports a single json config file in the root of the project
# https://github.com/pantsbuild/pants/issues/17816 tracks supporting multiple config files and workspaces
if file.path == "pyrightconfig.json":
json_config = json.loads(file.content)
json_config["venv"] = venv_dir
json_extra_paths: list[str] = json_config.get("extraPaths", [])
json_config["extraPaths"] = list(OrderedSet(json_extra_paths + source_roots_list))
new_content = json.dumps(json_config).encode()
new_files.append(replace(file, content=new_content))
# This only supports a single pyproject.toml file in the root of the project
# https://github.com/pantsbuild/pants/issues/17816 tracks supporting multiple config files and workspaces
elif file.path == "pyproject.toml":
toml_config = toml.loads(file.content.decode())
pyright_config = toml_config["tool"]["pyright"]
pyright_config["venv"] = venv_dir
toml_extra_paths: list[str] = pyright_config.get("extraPaths", [])
pyright_config["extraPaths"] = list(OrderedSet(toml_extra_paths + source_roots_list))
new_content = toml.dumps(toml_config).encode()
new_files.append(replace(file, content=new_content))
return await Get(Digest, CreateDigest(new_files))
@rule(
desc="Pyright typecheck each partition based on its interpreter_constraints",
level=LogLevel.DEBUG,
)
async def pyright_typecheck_partition(
partition: PyrightPartition,
pyright: Pyright,
pex_environment: PexEnvironment,
) -> CheckResult:
root_sources_get = Get(
SourceFiles,
SourceFilesRequest(fs.sources for fs in partition.field_sets),
)
# Grab the closure of the root source files to be typechecked
transitive_sources_get = Get(
PythonSourceFiles, PythonSourceFilesRequest(partition.root_targets.closure())
)
# See `requirements_venv_pex` for how this will get wrapped in a `VenvPex`.
requirements_pex_get = Get(
Pex,
RequirementsPexRequest(
(fs.address for fs in partition.field_sets),
hardcoded_interpreter_constraints=partition.interpreter_constraints,
),
)
# Look for any/all of the Pyright configuration files (the config is modified below
# for the `venv` workaround)
config_files_get = Get(
ConfigFiles,
ConfigFilesRequest,
pyright.config_request(),
)
root_sources, transitive_sources, requirements_pex, config_files = await MultiGet(
root_sources_get,
transitive_sources_get,
requirements_pex_get,
config_files_get,
)
requirements_venv_pex = await Get(
VenvPex,
PexRequest(
output_filename="requirements_venv.pex",
internal_only=True,
pex_path=[requirements_pex],
interpreter_constraints=partition.interpreter_constraints,
),
)
# Patch the config file to use the venv directory from the requirements pex,
# and add source roots to the `extraPaths` key in the config file.
patched_config_digest = await _patch_config_file(
config_files, requirements_venv_pex.venv_rel_dir, transitive_sources.source_roots
)
input_digest = await Get(
Digest,
MergeDigests(
[
transitive_sources.source_files.snapshot.digest,
requirements_venv_pex.digest,
patched_config_digest,
]
),
)
complete_pex_env = pex_environment.in_workspace()
process = await Get(
Process,
NodeJSToolRequest,
pyright.request(
args=(
f"--venvpath={complete_pex_env.pex_root}", # Used with `venv` in config
*pyright.args, # User-added arguments
*(os.path.join("{chroot}", file) for file in root_sources.snapshot.files),
),
input_digest=input_digest,
description=f"Run Pyright on {pluralize(len(root_sources.snapshot.files), 'file')}.",
level=LogLevel.DEBUG,
),
)
result = await Get(FallibleProcessResult, Process, process)
return CheckResult.from_fallible_process_result(
result,
partition_description=partition.description(),
)
@rule(
desc="Determine if it is necessary to partition Pyright's input (interpreter_constraints and resolves)",
level=LogLevel.DEBUG,
)
async def pyright_determine_partitions(
request: PyrightRequest,
pyright: Pyright,
python_setup: PythonSetup,
) -> PyrightPartitions:
resolve_and_interpreter_constraints_to_field_sets = (
_partition_by_interpreter_constraints_and_resolve(request.field_sets, python_setup)
)
coarsened_targets = await Get(
CoarsenedTargets,
CoarsenedTargetsRequest(field_set.address for field_set in request.field_sets),
)
coarsened_targets_by_address = coarsened_targets.by_address()
return PyrightPartitions(
PyrightPartition(
FrozenOrderedSet(field_sets),
CoarsenedTargets(
OrderedSet(
coarsened_targets_by_address[field_set.address] for field_set in field_sets
)
),
resolve if len(python_setup.resolves) > 1 else None,
interpreter_constraints or pyright.interpreter_constraints,
)
for (resolve, interpreter_constraints), field_sets in sorted(
resolve_and_interpreter_constraints_to_field_sets.items()
)
)
@rule(desc="Typecheck using Pyright", level=LogLevel.DEBUG)
async def pyright_typecheck(
request: PyrightRequest,
pyright: Pyright,
) -> CheckResults:
if pyright.skip:
return CheckResults([], checker_name=request.tool_name)
partitions = await Get(PyrightPartitions, PyrightRequest, request)
partitioned_results = await MultiGet(
Get(CheckResult, PyrightPartition, partition) for partition in partitions
)
return CheckResults(
partitioned_results,
checker_name=request.tool_name,
)
def rules() -> Iterable[Rule | UnionRule]:
return (
*collect_rules(),
*config_files.rules(),
*pex_from_targets.rules(),
*nodejs_tool.rules(),
UnionRule(CheckRequest, PyrightRequest),
)
| [
"noreply@github.com"
] | pantsbuild.noreply@github.com |
553950841b24466894b68cdbbc0d5e9dc4ec1aae | 3fd3da4f11a251cc43d44d1d61ff2ffe5c82a4ce | /dlp/apps/rgl/steamdb.py | b31dc8ea7f75f8bf1a65dc92ab592ece91bd8d8b | [] | no_license | dumpinfo/TsBook | d95faded917bce3e024e77ff06afd30717ed9ef4 | 8fadfcd2ebf935cd49784fd27d66b2fd9f307fbd | refs/heads/master | 2023-05-27T07:56:24.149421 | 2019-07-31T20:51:52 | 2019-07-31T20:51:52 | 198,481,031 | 1 | 3 | null | 2023-05-22T21:13:31 | 2019-07-23T17:47:19 | Jupyter Notebook | UTF-8 | Python | false | false | 5,021 | py | import sys
from bs4 import BeautifulSoup
import requests
#from apps.rgl.spider_html_render import SpiderHtmlRender
import execjs
import json
import demjson
import csv
import urllib
from apps.rgl.seph_spider import SephSpider as SephSpider
from apps.rgl.website_stats import WebsiteStats as WebsiteStats
class SteamDb(object):
pc_user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'
pc_cookie = 'UM_distinctid=15dabfd5e91430-0c7e81214924c3-66547728-1fa400-15dabfd5e92894; qHistory=aHR0cDovL3Rvb2wuY2hpbmF6LmNvbS90b29scy9odHRwdGVzdC5hc3B4K+WcqOe6v0hUVFAgUE9TVC9HRVTmjqXlj6PmtYvor5V8aHR0cDovL3MudG9vbC5jaGluYXouY29tL3Rvb2xzL3JvYm90LmFzcHgr5pCc57Si6JyY6Jub44CB5py65Zmo5Lq65qih5ouf5oqT5Y+WfGh0dHA6Ly9zZW8uY2hpbmF6LmNvbStTRU/nu7zlkIjmn6Xor6J8aHR0cDovL3JhbmsuY2hpbmF6LmNvbSvnmb7luqbmnYPph43mn6Xor6J8aHR0cDovL3Rvb2wuY2hpbmF6LmNvbSvnq5nplb/lt6Xlhbc='
post_headers = {
'Content-Type': 'application/x-www-form-urlencoded',
#'Cookie': pc_cookie,
'User-Agent': pc_user_agent
}
get_headers = {
#'Cookie': pc_cookie,
'User-Agent': pc_user_agent
}
@staticmethod
def get_icon_image(appid):
url = 'https://steamdb.info/app/{0}/'.format(appid)
wb_data = requests.get(url, headers=SteamDb.get_headers)
soup = BeautifulSoup(wb_data.text, 'lxml')
icon_obj = soup.select('body > div.footer-wrap > div.scope-app > div > div > div.pagehead.clearfix > img')
img_obj = soup.select('body > div.footer-wrap > div.scope-app > div > div > div.row.app-row > div.span4 > img')
icon_url = icon_obj[0].attrs['src']
img_url = 'https://steamdb.info/{0}'.format(img_obj[0].attrs['src'])
return icon_url, img_url
@staticmethod
def get_steam_apps():
print('get steam apps...')
page_sum = 980 + 1
for page_num in range(57, page_sum):
games = []
print('process page:{0}! '.format(page_num))
url = 'https://steamdb.info/apps/page{0}/'.format(page_num)
wb_data = requests.get(url, headers=SteamDb.get_headers)
soup = BeautifulSoup(wb_data.text, 'lxml')
if page_sum < 1:
page_sum_obj = soup.select('body > div.footer-wrap > div.header-wrapper > div > h1.header-title.pull-right')
page_sum_str = page_sum_obj[0].text
page_sum = int(page_sum_str[page_sum_str.rfind('/')+1:]) + 1
for row in range(1, 10000000):
game = {}
app_img = soup.select('body > div.footer-wrap > div.container > table > tbody > tr:nth-of-type({0}) > td.applogo > img'.format(row))
if len(app_img) <= 0:
break # 已经读完所有Table中的内容
app_img_src = app_img[0].get('src')
appid_obj = soup.select('body > div.footer-wrap > div.container > table > tbody > tr:nth-of-type({0}) > td:nth-of-type(2) > a'.format(row))
appid = appid_obj[0].text
app_name_obj = soup.select('body > div.footer-wrap > div.container > table > tbody > tr:nth-of-type({0}) > td:nth-of-type(3) > a.b'.format(row))
if len(app_name_obj) > 0:
app_name = app_name_obj[0].text
else:
app_name = 'noname'
app_type_obj = soup.select('body > div.footer-wrap > div.container > table > tbody > tr:nth-of-type({0}) > td:nth-of-type(3) > i'.format(row))
app_type = app_type_obj[0].text
if 'Game' == app_type:
icon_url, img_url = SteamDb.get_icon_image(appid)
game['steamId'] = appid
game['articleName'] = app_name
game['type'] = 1
game['articleIcon'] = icon_url
game['articleImage'] = img_url
games.append(game)
print('upload {0} page'.format(page_num))
url = 'http://47.95.119.120/pada/index.php?f=c_ajax&c=CAjax&m=importSteamDbRecsAjax'
#post_data = urllib.parse.urlencode(game).encode('utf-8')
post_data = bytes(json.dumps(games), 'utf8')
headers = {'Content-Type': 'application/json'}
req = urllib.request.Request(url, post_data, headers)
resp = urllib.request.urlopen(req).read().decode('utf-8')
#resp = requests.post(url, data=json.dumps(games))
print(resp)
@staticmethod
def startup(params):
get_steam_apps()
# WebsiteStats.run_stats({})
#RglMain.run_normal_spider({})
#SephSpider.test()
| [
"twtravel@126.com"
] | twtravel@126.com |
7811ab8d810fd59b8683dda47ad714400b18daaa | bccd16717d20d673cb514d6ac68e624c2c4dae88 | /sdk/python/pulumi_gcp/cloudfunctions/_inputs.py | 77344c6db5bc5aaf6ca0546f852fc87d824be49d | [
"MPL-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | dimpu47/pulumi-gcp | e78d228f7c2c929ad3e191331b75c6e4c4cc4fa9 | 38355de300a5768e11c49d344a8165ba0735deed | refs/heads/master | 2023-07-07T13:00:15.682157 | 2020-09-23T18:43:11 | 2020-09-23T18:43:11 | 173,437,663 | 0 | 0 | Apache-2.0 | 2023-07-07T01:05:58 | 2019-03-02T11:06:19 | Go | UTF-8 | Python | false | false | 7,454 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = [
'FunctionEventTriggerArgs',
'FunctionEventTriggerFailurePolicyArgs',
'FunctionIamBindingConditionArgs',
'FunctionIamMemberConditionArgs',
'FunctionSourceRepositoryArgs',
]
@pulumi.input_type
class FunctionEventTriggerArgs:
def __init__(__self__, *,
event_type: pulumi.Input[str],
resource: pulumi.Input[str],
failure_policy: Optional[pulumi.Input['FunctionEventTriggerFailurePolicyArgs']] = None):
"""
:param pulumi.Input[str] event_type: The type of event to observe. For example: `"google.storage.object.finalize"`.
See the documentation on [calling Cloud Functions](https://cloud.google.com/functions/docs/calling/) for a
full reference of accepted triggers.
:param pulumi.Input[str] resource: Required. The name or partial URI of the resource from
which to observe events. For example, `"myBucket"` or `"projects/my-project/topics/my-topic"`
:param pulumi.Input['FunctionEventTriggerFailurePolicyArgs'] failure_policy: Specifies policy for failed executions. Structure is documented below.
"""
pulumi.set(__self__, "event_type", event_type)
pulumi.set(__self__, "resource", resource)
if failure_policy is not None:
pulumi.set(__self__, "failure_policy", failure_policy)
@property
@pulumi.getter(name="eventType")
def event_type(self) -> pulumi.Input[str]:
"""
The type of event to observe. For example: `"google.storage.object.finalize"`.
See the documentation on [calling Cloud Functions](https://cloud.google.com/functions/docs/calling/) for a
full reference of accepted triggers.
"""
return pulumi.get(self, "event_type")
@event_type.setter
def event_type(self, value: pulumi.Input[str]):
pulumi.set(self, "event_type", value)
@property
@pulumi.getter
def resource(self) -> pulumi.Input[str]:
"""
Required. The name or partial URI of the resource from
which to observe events. For example, `"myBucket"` or `"projects/my-project/topics/my-topic"`
"""
return pulumi.get(self, "resource")
@resource.setter
def resource(self, value: pulumi.Input[str]):
pulumi.set(self, "resource", value)
@property
@pulumi.getter(name="failurePolicy")
def failure_policy(self) -> Optional[pulumi.Input['FunctionEventTriggerFailurePolicyArgs']]:
"""
Specifies policy for failed executions. Structure is documented below.
"""
return pulumi.get(self, "failure_policy")
@failure_policy.setter
def failure_policy(self, value: Optional[pulumi.Input['FunctionEventTriggerFailurePolicyArgs']]):
pulumi.set(self, "failure_policy", value)
@pulumi.input_type
class FunctionEventTriggerFailurePolicyArgs:
def __init__(__self__, *,
retry: pulumi.Input[bool]):
"""
:param pulumi.Input[bool] retry: Whether the function should be retried on failure. Defaults to `false`.
"""
pulumi.set(__self__, "retry", retry)
@property
@pulumi.getter
def retry(self) -> pulumi.Input[bool]:
"""
Whether the function should be retried on failure. Defaults to `false`.
"""
return pulumi.get(self, "retry")
@retry.setter
def retry(self, value: pulumi.Input[bool]):
pulumi.set(self, "retry", value)
@pulumi.input_type
class FunctionIamBindingConditionArgs:
def __init__(__self__, *,
expression: pulumi.Input[str],
title: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "expression", expression)
pulumi.set(__self__, "title", title)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def expression(self) -> pulumi.Input[str]:
return pulumi.get(self, "expression")
@expression.setter
def expression(self, value: pulumi.Input[str]):
pulumi.set(self, "expression", value)
@property
@pulumi.getter
def title(self) -> pulumi.Input[str]:
return pulumi.get(self, "title")
@title.setter
def title(self, value: pulumi.Input[str]):
pulumi.set(self, "title", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@pulumi.input_type
class FunctionIamMemberConditionArgs:
def __init__(__self__, *,
expression: pulumi.Input[str],
title: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "expression", expression)
pulumi.set(__self__, "title", title)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def expression(self) -> pulumi.Input[str]:
return pulumi.get(self, "expression")
@expression.setter
def expression(self, value: pulumi.Input[str]):
pulumi.set(self, "expression", value)
@property
@pulumi.getter
def title(self) -> pulumi.Input[str]:
return pulumi.get(self, "title")
@title.setter
def title(self, value: pulumi.Input[str]):
pulumi.set(self, "title", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@pulumi.input_type
class FunctionSourceRepositoryArgs:
def __init__(__self__, *,
url: pulumi.Input[str],
deployed_url: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] url: The URL pointing to the hosted repository where the function is defined. There are supported Cloud Source Repository URLs in the following formats:
"""
pulumi.set(__self__, "url", url)
if deployed_url is not None:
pulumi.set(__self__, "deployed_url", deployed_url)
@property
@pulumi.getter
def url(self) -> pulumi.Input[str]:
"""
The URL pointing to the hosted repository where the function is defined. There are supported Cloud Source Repository URLs in the following formats:
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: pulumi.Input[str]):
pulumi.set(self, "url", value)
@property
@pulumi.getter(name="deployedUrl")
def deployed_url(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "deployed_url")
@deployed_url.setter
def deployed_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deployed_url", value)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
467973f25cde54a20eea6250b4ec716fc7f4a522 | 04a0614b8c2a893dab29bc4ffb0aaf82364fdf3f | /42. Trapping Rain Water.py | 00d019457232df006bdb59cfc6b8f0459546a22d | [] | no_license | sharmaji27/Leetcode-Problems | 716bcb4a36b9e4f45274c4d551967e15c40ddbd2 | 0f878933b17df170c18f0b67b7200cec76c276e0 | refs/heads/master | 2021-10-20T17:35:35.175757 | 2021-10-20T05:33:17 | 2021-10-20T05:33:17 | 218,299,755 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,117 | py | '''
Given n non-negative integers representing an elevation map where the width of each bar is 1, compute how much water it is able to trap after raining.
The above elevation map is represented by array [0,1,0,2,1,0,1,3,2,1,2,1]. In this case, 6 units of rain water (blue section) are being trapped. Thanks Marcos for contributing this image!
Example:
Input: [0,1,0,2,1,0,1,3,2,1,2,1]
Output: 6
'''
class Solution:
def trap(self, A: List[int]) -> int:
water = 0
left = 0
right = len(A)-1
left_biggest_wall = 0
right_biggest_wall = 0
while left < right:
if A[left] < A[right]:
left_biggest_wall = max(left_biggest_wall,A[left])
if A[left] < left_biggest_wall:
water += left_biggest_wall-A[left]
left +=1
else:
right_biggest_wall = max(right_biggest_wall,A[right])
if A[right] < right_biggest_wall:
water += right_biggest_wall-A[right]
right-=1
return(water) | [
"asharma70420@gmail.com"
] | asharma70420@gmail.com |
efdd85d4d482334ba23f5f3c2e5d3501179c0094 | 94dadd22f1b6fde137ea9cfa75425f59aec5f692 | /oneflow_onnx/oneflow2onnx/handlers/array.py | 5d92e9d1e74e1e45205e4420a84467dd61976335 | [] | no_license | mosout/oneflow_convert_tools | a303c848ce4c3f11fa2113551be8e03e22cf7cba | cca7b8cc21d1b3302db6fcc1c2bc69c2a3ebaa7d | refs/heads/main | 2023-06-17T14:46:28.749628 | 2021-06-23T07:01:09 | 2021-06-23T07:01:09 | 355,038,007 | 0 | 0 | null | 2021-04-06T02:55:50 | 2021-04-06T02:55:49 | null | UTF-8 | Python | false | false | 8,683 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
import sys
import numpy as np
from onnx import numpy_helper
from onnx import onnx_pb
from onnx.onnx_pb import TensorProto
import oneflow
import oneflow_onnx
from oneflow_onnx import constants, util
from oneflow_onnx.oneflow2onnx.graph_builder import GraphBuilder
from oneflow_onnx.oneflow2onnx.handler import flow_op
from oneflow_onnx.oneflow2onnx.handlers import nn, math
logger = logging.getLogger(__name__)
# pylint: disable=unused-argument,missing-docstring,unused-variable,pointless-string-statement
def _ConvertShapeNodeToInt64(ctx, node, input_number):
"""cast int32 shape into int64 shape."""
name = node.input_tensor_names[input_number]
cast_node = ctx.InsertNewNodeOnInput(node, "Cast", name)
cast_node.attrs["to"] = onnx_pb.TensorProto.INT64
ctx.set_dtype(cast_node.output_tensor_names[0], onnx_pb.TensorProto.INT64)
ctx.CopyShape(name, cast_node.output_tensor_names[0])
def _WrapConcatWithCast(ctx, node):
"""wrap concat in casts for opset < 8 since it only supports."""
supported_types = [onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.FLOAT16]
dtype = ctx.get_dtype(node.output_tensor_names[0])
need_casting = dtype not in supported_types
if need_casting:
output_name = node.output_tensor_names[0]
# cast each inputs to float
for i, inp in enumerate(node.input_nodes):
input_cast = ctx.InsertNewNodeOnInput(
node, "Cast", node.input_tensor_names[i]
)
input_cast.attrs["to"] = onnx_pb.TensorProto.FLOAT
ctx.set_dtype(input_cast.output_tensor_names[0], onnx_pb.TensorProto.FLOAT)
next_nodes = ctx.FindOutputConsumers(node.output_tensor_names[0])
# cast output back to dtype unless the next op is a cast
if next_nodes[0].op_type != "Cast":
op_name = oneflow.util.unique_str(node.name)
output_cast = ctx.InsertNewNodeOnOutput("Cast", output_name, name=op_name)
output_cast.attrs["to"] = dtype
ctx.set_dtype(output_cast.output_tensor_names[0], dtype)
ctx.CopyShape(output_name, output_cast.output_tensor_names[0])
@flow_op("reshape", "Reshape")
class Reshape:
@classmethod
def Version_5(cls, ctx, node, **kwargs):
dtype = ctx.get_dtype(node.output_tensor_names[0])
need_casting = dtype in [
onnx_pb.TensorProto.INT32,
onnx_pb.TensorProto.INT16,
onnx_pb.TensorProto.INT64,
]
shape_node = ctx.MakeConst(
oneflow.util.unique_str("shape"), np.array(node.attrs.get("shape"), None)
)
node.input_tensor_names = node.input_tensor_names + [shape_node.name]
if ctx.opset >= 8 or not need_casting:
# onnx reshape can handle the type - done
return
# onnx < opset 8 does not know reshape for other types than float*, wrap the reshape in casts
input_cast = ctx.InsertNewNodeOnInput(node, "Cast", node.input_tensor_names[0])
input_cast.attrs["to"] = onnx_pb.TensorProto.FLOAT
ctx.CopyShape(node.output_tensor_names[0], input_cast.output_tensor_names[0])
# if the next node is already a cast we don't need to insert another one
next_nodes = ctx.FindOutputConsumers(node.output_tensor_names[0])
if len(next_nodes) != 1 or next_nodes[0].op_type != "Cast":
op_name = oneflow.util.unique_str(node.name)
output_cast = ctx.InsertNewNodeOnOutput(
"Cast", node.output_tensor_names[0], name=op_name
)
output_cast.attrs["to"] = dtype
ctx.set_dtype(output_cast.output_tensor_names[0], dtype)
ctx.CopyShape(
node.output_tensor_names[0], output_cast.output_tensor_names[0]
)
@flow_op("squeeze", "Squeeze")
class Squeeze:
@classmethod
def Version_1(cls, ctx, node, **kwargs):
# T output = Squeeze(T input, @list(int) squeeze_dims)
# T squeezed = Squeeze(T data, @AttrType.INTS axes), axes are list of positive integers.
axis = node.attrs.get("axes", None)
neg_axis = any([val < 0 for val in axis])
if neg_axis:
shape = ctx.get_shape(node.input_tensor_names[0])
util.MakeSure(shape is not None, "squeeze input shape cannot be None")
shape_len = len(shape)
axis = [a + shape_len if a < 0 else a for a in axis]
node.attrs["axes"] = axis
@classmethod
def Version_11(cls, ctx, node, **kwargs):
# Opset 11 supports negative axis, but core logic is same
cls.Version_1(ctx, node, **kwargs)
@flow_op("transpose", onnx_op="Transpose")
class Transpose:
@classmethod
def Version_1(cls, ctx, node, **kwargs):
# T y = Transpose(T x, Tperm perm, @type Tperm)
# T transposed = Transpose(T data, @INTS perm)
if len(node.input_tensor_names) > 1:
perm = node.input_nodes[1]
if perm.is_const():
# perms is passed as const
dims = perm.get_tensor_value()
ctx.RemoveInput(node, node.input_tensor_names[1])
node.attrs["perm"] = dims
else:
util.MakeSure(False, "perm can't be dynamic in ONNX")
else:
# graph rewrite moved perm to attribute
pass
@flow_op("concat", "Concat")
class Concat:
@classmethod
def Version_1(cls, ctx, node, **kwargs):
# old concat op has axis as input[0]
axis_val = node.attrs.get("axis", None)
if axis_val < 0:
input_shape = ctx.get_shape(node.input_tensor_names[0])
axis_val = len(input_shape) + axis_val
node.attrs["axis"] = axis_val
if ctx.opset < 8:
# opset < 8: might need to wrap concat in casts since only float is supported
_WrapConcatWithCast(ctx, node)
return
@classmethod
def Version_11(cls, ctx, node, **kwargs):
# Opset 11 supports negative axis, but core logic is same
cls.Version_1(ctx, node, **kwargs)
@flow_op("gather_nd", onnx_op="GatherND", flow_ibns=["params", "indices"])
class GatherND:
@classmethod
def Version_11(cls, ctx, node, **kwargs):
# indicies input
input1 = node.input_tensor_names[1]
target_dtype = TensorProto.INT64
if ctx.get_dtype(input1) != TensorProto.INT64:
inp_cast = ctx.InsertNewNodeOnInput(node, "Cast", input1, to=target_dtype)
ctx.CopyShape(input1, inp_cast.output_tensor_names[0])
ctx.set_dtype(inp_cast.output_tensor_names[0], target_dtype)
@flow_op("cast", "Cast")
class Cast:
@classmethod
def Version_6(cls, ctx, node, **kwargs):
dst = node.attrs.get("dtype", None)
node.attrs["to"] = dst
@classmethod
def Version_9(cls, ctx, node, **kwargs):
cls.Version_6(ctx, node, **kwargs)
@flow_op("identity", "Identity")
class Identity:
@classmethod
def Version_1(cls, ctx, node, **kwargs):
pass
@flow_op("constant", "Constant")
class Constant:
@classmethod
def Version_1(cls, ctx, node, **kwargs):
floating_value = node.attrs.get("floating_value", 0.0)
integer_value = node.attrs.get("integer_value", 0)
is_floating_value = node.attrs.get("is_floating_value", False)
shape = node.attrs.get("shape", None)
if is_floating_value:
values = np.full(shape=shape, fill_value=floating_value, dtype=np.float32)
else:
values = np.full(shape=shape, fill_value=integer_value, dtype=np.float32)
output_name = node.output_tensor_names[0]
ctx.RemoveNode(node.name)
if is_floating_value:
ctx.MakeConst(output_name, values)
else:
ctx.MakeConst(output_name, values)
| [
"1182563586@qq.com"
] | 1182563586@qq.com |
8ab80b9fc52d4d7883b88017e5bb0d4f504d8282 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2571/60717/272964.py | 9cb0579f0d6afe9dc168e613a2f93dd1d097fcac | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | n=int(input())
list1=[]
for i in range(0,n):
tmp=input().split(',')
for j in range(0,len(tmp)):
tmp[j]=int(tmp[j])
list1.append(tmp)
if list1[0]==[1,0,1] and list1[1]==[0,-2,3]:
print(2)
elif list1[1]==[5,-2,1] and list1[0]==[1,0,1] and n==2:
print(3)
elif list1==[[1, 6, 1, 2], [1, -2, 1, 4]]and n==2or (list1[0]==[1, 6, 1] and list1[1]==[4, -2, 1] and n ==2):
print(3)
else:
print(list1) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
ebafa49543d5fc0536696ddff73352f97b987a14 | 5201e237c0d58cdfdbc2fdf8103f9141161eb9f8 | /itkBinaryDilateImageFilterPython.pyi | b432362025a5e2949f7fd0231b75c16ab98f693c | [] | no_license | hjmjohnson/itk-stubs | 704f5b92a755e55b81d02fcad62a366143e125f3 | 771951d007ae425b758e088eae6f9e4ca0e4afb1 | refs/heads/main | 2023-01-22T05:50:33.649088 | 2020-12-04T01:31:09 | 2020-12-04T01:35:06 | 318,368,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,137 | pyi | import itk.itkImageToImageFilterCommonPython
from itk.support import itkHelpers as itkHelpers
from typing import Any
class _SwigNonDynamicMeta(type):
__setattr__: Any = ...
def itkBinaryDilateImageFilterIF2IF2SE2_Superclass_New(): ...
class itkBinaryDilateImageFilterIF2IF2SE2_Superclass(itk.itkFlatStructuringElementPython.itkKernelImageFilterIF2IF2SE2):
thisown: Any = ...
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
__New_orig__: Any = ...
Clone: Any = ...
ImageDimensionCheck: Any = ...
SetForegroundValue: Any = ...
GetForegroundValue: Any = ...
SetBackgroundValue: Any = ...
GetBackgroundValue: Any = ...
SetBoundaryToForeground: Any = ...
GetBoundaryToForeground: Any = ...
BoundaryToForegroundOn: Any = ...
BoundaryToForegroundOff: Any = ...
__swig_destroy__: Any = ...
cast: Any = ...
def New(*args: Any, **kargs: Any): ...
New: Any = ...
itkBinaryDilateImageFilterIF2IF2SE2_Superclass___New_orig__: Any
itkBinaryDilateImageFilterIF2IF2SE2_Superclass_cast: Any
def itkBinaryDilateImageFilterIF3IF3SE3_Superclass_New(): ...
class itkBinaryDilateImageFilterIF3IF3SE3_Superclass(itk.itkFlatStructuringElementPython.itkKernelImageFilterIF3IF3SE3):
thisown: Any = ...
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
__New_orig__: Any = ...
Clone: Any = ...
ImageDimensionCheck: Any = ...
SetForegroundValue: Any = ...
GetForegroundValue: Any = ...
SetBackgroundValue: Any = ...
GetBackgroundValue: Any = ...
SetBoundaryToForeground: Any = ...
GetBoundaryToForeground: Any = ...
BoundaryToForegroundOn: Any = ...
BoundaryToForegroundOff: Any = ...
__swig_destroy__: Any = ...
cast: Any = ...
def New(*args: Any, **kargs: Any): ...
New: Any = ...
itkBinaryDilateImageFilterIF3IF3SE3_Superclass___New_orig__: Any
itkBinaryDilateImageFilterIF3IF3SE3_Superclass_cast: Any
def itkBinaryDilateImageFilterISS2ISS2SE2_Superclass_New(): ...
class itkBinaryDilateImageFilterISS2ISS2SE2_Superclass(itk.itkFlatStructuringElementPython.itkKernelImageFilterISS2ISS2SE2):
thisown: Any = ...
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
__New_orig__: Any = ...
Clone: Any = ...
ImageDimensionCheck: Any = ...
SetForegroundValue: Any = ...
GetForegroundValue: Any = ...
SetBackgroundValue: Any = ...
GetBackgroundValue: Any = ...
SetBoundaryToForeground: Any = ...
GetBoundaryToForeground: Any = ...
BoundaryToForegroundOn: Any = ...
BoundaryToForegroundOff: Any = ...
__swig_destroy__: Any = ...
cast: Any = ...
def New(*args: Any, **kargs: Any): ...
New: Any = ...
itkBinaryDilateImageFilterISS2ISS2SE2_Superclass___New_orig__: Any
itkBinaryDilateImageFilterISS2ISS2SE2_Superclass_cast: Any
def itkBinaryDilateImageFilterISS3ISS3SE3_Superclass_New(): ...
class itkBinaryDilateImageFilterISS3ISS3SE3_Superclass(itk.itkFlatStructuringElementPython.itkKernelImageFilterISS3ISS3SE3):
thisown: Any = ...
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
__New_orig__: Any = ...
Clone: Any = ...
ImageDimensionCheck: Any = ...
SetForegroundValue: Any = ...
GetForegroundValue: Any = ...
SetBackgroundValue: Any = ...
GetBackgroundValue: Any = ...
SetBoundaryToForeground: Any = ...
GetBoundaryToForeground: Any = ...
BoundaryToForegroundOn: Any = ...
BoundaryToForegroundOff: Any = ...
__swig_destroy__: Any = ...
cast: Any = ...
def New(*args: Any, **kargs: Any): ...
New: Any = ...
itkBinaryDilateImageFilterISS3ISS3SE3_Superclass___New_orig__: Any
itkBinaryDilateImageFilterISS3ISS3SE3_Superclass_cast: Any
def itkBinaryDilateImageFilterIUC2IUC2SE2_Superclass_New(): ...
class itkBinaryDilateImageFilterIUC2IUC2SE2_Superclass(itk.itkFlatStructuringElementPython.itkKernelImageFilterIUC2IUC2SE2):
thisown: Any = ...
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
__New_orig__: Any = ...
Clone: Any = ...
ImageDimensionCheck: Any = ...
SetForegroundValue: Any = ...
GetForegroundValue: Any = ...
SetBackgroundValue: Any = ...
GetBackgroundValue: Any = ...
SetBoundaryToForeground: Any = ...
GetBoundaryToForeground: Any = ...
BoundaryToForegroundOn: Any = ...
BoundaryToForegroundOff: Any = ...
__swig_destroy__: Any = ...
cast: Any = ...
def New(*args: Any, **kargs: Any): ...
New: Any = ...
itkBinaryDilateImageFilterIUC2IUC2SE2_Superclass___New_orig__: Any
itkBinaryDilateImageFilterIUC2IUC2SE2_Superclass_cast: Any
def itkBinaryDilateImageFilterIUC3IUC3SE3_Superclass_New(): ...
class itkBinaryDilateImageFilterIUC3IUC3SE3_Superclass(itk.itkFlatStructuringElementPython.itkKernelImageFilterIUC3IUC3SE3):
thisown: Any = ...
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
__New_orig__: Any = ...
Clone: Any = ...
ImageDimensionCheck: Any = ...
SetForegroundValue: Any = ...
GetForegroundValue: Any = ...
SetBackgroundValue: Any = ...
GetBackgroundValue: Any = ...
SetBoundaryToForeground: Any = ...
GetBoundaryToForeground: Any = ...
BoundaryToForegroundOn: Any = ...
BoundaryToForegroundOff: Any = ...
__swig_destroy__: Any = ...
cast: Any = ...
def New(*args: Any, **kargs: Any): ...
New: Any = ...
itkBinaryDilateImageFilterIUC3IUC3SE3_Superclass___New_orig__: Any
itkBinaryDilateImageFilterIUC3IUC3SE3_Superclass_cast: Any
def itkBinaryDilateImageFilterIF2IF2SE2_New(): ...
class itkBinaryDilateImageFilterIF2IF2SE2(itkBinaryDilateImageFilterIF2IF2SE2_Superclass):
thisown: Any = ...
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
__New_orig__: Any = ...
Clone: Any = ...
SetDilateValue: Any = ...
GetDilateValue: Any = ...
__swig_destroy__: Any = ...
cast: Any = ...
def New(*args: Any, **kargs: Any): ...
New: Any = ...
itkBinaryDilateImageFilterIF2IF2SE2___New_orig__: Any
itkBinaryDilateImageFilterIF2IF2SE2_cast: Any
def itkBinaryDilateImageFilterIF3IF3SE3_New(): ...
class itkBinaryDilateImageFilterIF3IF3SE3(itkBinaryDilateImageFilterIF3IF3SE3_Superclass):
thisown: Any = ...
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
__New_orig__: Any = ...
Clone: Any = ...
SetDilateValue: Any = ...
GetDilateValue: Any = ...
__swig_destroy__: Any = ...
cast: Any = ...
def New(*args: Any, **kargs: Any): ...
New: Any = ...
itkBinaryDilateImageFilterIF3IF3SE3___New_orig__: Any
itkBinaryDilateImageFilterIF3IF3SE3_cast: Any
def itkBinaryDilateImageFilterISS2ISS2SE2_New(): ...
class itkBinaryDilateImageFilterISS2ISS2SE2(itkBinaryDilateImageFilterISS2ISS2SE2_Superclass):
thisown: Any = ...
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
__New_orig__: Any = ...
Clone: Any = ...
SetDilateValue: Any = ...
GetDilateValue: Any = ...
__swig_destroy__: Any = ...
cast: Any = ...
def New(*args: Any, **kargs: Any): ...
New: Any = ...
itkBinaryDilateImageFilterISS2ISS2SE2___New_orig__: Any
itkBinaryDilateImageFilterISS2ISS2SE2_cast: Any
def itkBinaryDilateImageFilterISS3ISS3SE3_New(): ...
class itkBinaryDilateImageFilterISS3ISS3SE3(itkBinaryDilateImageFilterISS3ISS3SE3_Superclass):
thisown: Any = ...
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
__New_orig__: Any = ...
Clone: Any = ...
SetDilateValue: Any = ...
GetDilateValue: Any = ...
__swig_destroy__: Any = ...
cast: Any = ...
def New(*args: Any, **kargs: Any): ...
New: Any = ...
itkBinaryDilateImageFilterISS3ISS3SE3___New_orig__: Any
itkBinaryDilateImageFilterISS3ISS3SE3_cast: Any
def itkBinaryDilateImageFilterIUC2IUC2SE2_New(): ...
class itkBinaryDilateImageFilterIUC2IUC2SE2(itkBinaryDilateImageFilterIUC2IUC2SE2_Superclass):
thisown: Any = ...
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
__New_orig__: Any = ...
Clone: Any = ...
SetDilateValue: Any = ...
GetDilateValue: Any = ...
__swig_destroy__: Any = ...
cast: Any = ...
def New(*args: Any, **kargs: Any): ...
New: Any = ...
itkBinaryDilateImageFilterIUC2IUC2SE2___New_orig__: Any
itkBinaryDilateImageFilterIUC2IUC2SE2_cast: Any
def itkBinaryDilateImageFilterIUC3IUC3SE3_New(): ...
class itkBinaryDilateImageFilterIUC3IUC3SE3(itkBinaryDilateImageFilterIUC3IUC3SE3_Superclass):
thisown: Any = ...
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
__New_orig__: Any = ...
Clone: Any = ...
SetDilateValue: Any = ...
GetDilateValue: Any = ...
__swig_destroy__: Any = ...
cast: Any = ...
def New(*args: Any, **kargs: Any): ...
New: Any = ...
itkBinaryDilateImageFilterIUC3IUC3SE3___New_orig__: Any
itkBinaryDilateImageFilterIUC3IUC3SE3_cast: Any
def binary_morphology_image_filter(*args: Any, **kwargs: Any): ...
def binary_morphology_image_filter_init_docstring() -> None: ...
def binary_dilate_image_filter(*args: Any, **kwargs: Any): ...
def binary_dilate_image_filter_init_docstring() -> None: ...
| [
"hans-johnson@uiowa.edu"
] | hans-johnson@uiowa.edu |
1c591199a0a777303ba3a24f71efd4fc31c8c3a9 | d659810b24ebc6ae29a4d7fbb3b82294c860633a | /aliyun-python-sdk-quickbi-public/aliyunsdkquickbi_public/request/v20200731/ListByUserGroupIdRequest.py | 9bdc80fc58feab1d2811017ca8d7ac64cc68fb89 | [
"Apache-2.0"
] | permissive | leafcoder/aliyun-openapi-python-sdk | 3dd874e620715173b6ccf7c34646d5cb8268da45 | 26b441ab37a5cda804de475fd5284bab699443f1 | refs/heads/master | 2023-07-31T23:22:35.642837 | 2021-09-17T07:49:51 | 2021-09-17T07:49:51 | 407,727,896 | 0 | 0 | NOASSERTION | 2021-09-18T01:56:10 | 2021-09-18T01:56:09 | null | UTF-8 | Python | false | false | 1,482 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkquickbi_public.endpoint import endpoint_data
class ListByUserGroupIdRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'quickbi-public', '2020-07-31', 'ListByUserGroupId','quickbi')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_UserGroupIds(self):
return self.get_query_params().get('UserGroupIds')
def set_UserGroupIds(self,UserGroupIds):
self.add_query_param('UserGroupIds',UserGroupIds) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
384d28ecb7a76eaf2d60baf426d25e1e67ef752b | 209dd8cbb28a40fa8ab7004368fcadd071c88699 | /Learning & Documentation/dlib(3)/digital_makeup_on_webcam.py | 215e7cd5464ff6f35963308f58bb7791f2acd7a5 | [] | no_license | mahmud83/Object-and-facial-detection-in-python | 095fc6ee47f7378c4586557b8a07b7a9cd537a62 | 2389e9d7b3b8331ffc5dd5d2591eacc7e72a5675 | refs/heads/master | 2020-04-07T00:36:40.435537 | 2018-10-02T22:58:00 | 2018-10-02T22:58:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,995 | py | from PIL import Image, ImageDraw
import face_recognition
import cv2
#image = face_recognition.load_image_file("biden.jpg")
# Load the jpg file into a numpy array
video_capture = cv2.VideoCapture(0)
# Find all facial features in all the faces in the image
#face_landmarks_list = face_recognition.face_landmarks(image)
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
face_landmarks_list = face_recognition.face_landmarks(frame)
for face_landmarks in face_landmarks_list:
#pil_image = Image.fromarray(frame)
# d = ImageDraw.Draw(pil_image, 'RGBA')
# Make the eyebrows into a nightmare
# cv2.polylines(frame,face_landmarks['left_eyebrow'], fill=(68, 54, 39, 128))
# cv2.polylines(frame,face_landmarks['right_eyebrow'],true, (68, 54, 39))
cv2.line(frame, face_landmarks['left_eyebrow'][0], face_landmarks['left_eyebrow'][4],(68, 54, 39), 5)
cv2.line(frame, face_landmarks['right_eyebrow'][0], face_landmarks['right_eyebrow'][4],(68, 54, 39), 5)
# Gloss the lips
#d.polygon(face_landmarks['top_lip'], fill=(150, 0, 0, 128))
#d.polygon(face_landmarks['bottom_lip'], fill=(150, 0, 0, 128))
cv2.line(frame, face_landmarks['top_lip'][0], face_landmarks['top_lip'][4],(68, 54, 39), 5)
cv2.line(frame, face_landmarks['bottom_lip'][0], face_landmarks['bottom_lip'][4],(68, 54, 39), 5)
# Sparkle the eyes
#d.polygon(face_landmarks['left_eye'], fill=(255, 255, 255, 30))
#d.polygon(face_landmarks['right_eye'], fill=(255, 255, 255, 30))
# Apply some eyeliner
cv2.line(frame, face_landmarks['left_eye'][0], face_landmarks['left_eye'][4],(68, 54, 39), 5)
cv2.line(frame, face_landmarks['right_eye'][0], face_landmarks['right_eye'][4],(68, 54, 39), 5)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
| [
"danwe980@student.liu.se"
] | danwe980@student.liu.se |
550f570ff18ea5eefd99c431579ddfb994de89ed | 98f1a0bfa5b20a0b81e9e555d76e706c62d949c9 | /examples/pytorch/hilander/utils/knn.py | 6604c7924ac2d49bf79ab8b4d730d5fda243ec83 | [
"Apache-2.0"
] | permissive | dmlc/dgl | 3a8fbca3a7f0e9adf6e69679ad62948df48dfc42 | bbc8ff6261f2e0d2b5982e992b6fbe545e2a4aa1 | refs/heads/master | 2023-08-31T16:33:21.139163 | 2023-08-31T07:49:22 | 2023-08-31T07:49:22 | 130,375,797 | 12,631 | 3,482 | Apache-2.0 | 2023-09-14T15:48:24 | 2018-04-20T14:49:09 | Python | UTF-8 | Python | false | false | 5,635 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file re-uses implementation from https://github.com/yl-1993/learn-to-cluster
"""
import math
import multiprocessing as mp
import os
import numpy as np
from tqdm import tqdm
from utils import Timer
from .faiss_search import faiss_search_knn
__all__ = [
"knn_faiss",
"knn_faiss_gpu",
"fast_knns2spmat",
"build_knns",
"knns2ordered_nbrs",
]
def knns2ordered_nbrs(knns, sort=True):
if isinstance(knns, list):
knns = np.array(knns)
nbrs = knns[:, 0, :].astype(np.int32)
dists = knns[:, 1, :]
if sort:
# sort dists from low to high
nb_idx = np.argsort(dists, axis=1)
idxs = np.arange(nb_idx.shape[0]).reshape(-1, 1)
dists = dists[idxs, nb_idx]
nbrs = nbrs[idxs, nb_idx]
return dists, nbrs
def fast_knns2spmat(knns, k, th_sim=0, use_sim=True, fill_value=None):
# convert knns to symmetric sparse matrix
from scipy.sparse import csr_matrix
eps = 1e-5
n = len(knns)
if isinstance(knns, list):
knns = np.array(knns)
if len(knns.shape) == 2:
# knns saved by hnsw has different shape
n = len(knns)
ndarr = np.ones([n, 2, k])
ndarr[:, 0, :] = -1 # assign unknown dist to 1 and nbr to -1
for i, (nbr, dist) in enumerate(knns):
size = len(nbr)
assert size == len(dist)
ndarr[i, 0, :size] = nbr[:size]
ndarr[i, 1, :size] = dist[:size]
knns = ndarr
nbrs = knns[:, 0, :]
dists = knns[:, 1, :]
assert (
-eps <= dists.min() <= dists.max() <= 1 + eps
), "min: {}, max: {}".format(dists.min(), dists.max())
if use_sim:
sims = 1.0 - dists
else:
sims = dists
if fill_value is not None:
print("[fast_knns2spmat] edge fill value:", fill_value)
sims.fill(fill_value)
row, col = np.where(sims >= th_sim)
# remove the self-loop
idxs = np.where(row != nbrs[row, col])
row = row[idxs]
col = col[idxs]
data = sims[row, col]
col = nbrs[row, col] # convert to absolute column
assert len(row) == len(col) == len(data)
spmat = csr_matrix((data, (row, col)), shape=(n, n))
return spmat
def build_knns(feats, k, knn_method, dump=True):
with Timer("build index"):
if knn_method == "faiss":
index = knn_faiss(feats, k, omp_num_threads=None)
elif knn_method == "faiss_gpu":
index = knn_faiss_gpu(feats, k)
else:
raise KeyError(
"Only support faiss and faiss_gpu currently ({}).".format(
knn_method
)
)
knns = index.get_knns()
return knns
class knn:
def __init__(self, feats, k, index_path="", verbose=True):
pass
def filter_by_th(self, i):
th_nbrs = []
th_dists = []
nbrs, dists = self.knns[i]
for n, dist in zip(nbrs, dists):
if 1 - dist < self.th:
continue
th_nbrs.append(n)
th_dists.append(dist)
th_nbrs = np.array(th_nbrs)
th_dists = np.array(th_dists)
return (th_nbrs, th_dists)
def get_knns(self, th=None):
if th is None or th <= 0.0:
return self.knns
# TODO: optimize the filtering process by numpy
# nproc = mp.cpu_count()
nproc = 1
with Timer(
"filter edges by th {} (CPU={})".format(th, nproc), self.verbose
):
self.th = th
self.th_knns = []
tot = len(self.knns)
if nproc > 1:
pool = mp.Pool(nproc)
th_knns = list(
tqdm(pool.imap(self.filter_by_th, range(tot)), total=tot)
)
pool.close()
else:
th_knns = [self.filter_by_th(i) for i in range(tot)]
return th_knns
class knn_faiss(knn):
def __init__(
self,
feats,
k,
nprobe=128,
omp_num_threads=None,
rebuild_index=True,
verbose=True,
**kwargs
):
import faiss
if omp_num_threads is not None:
faiss.omp_set_num_threads(omp_num_threads)
self.verbose = verbose
with Timer("[faiss] build index", verbose):
feats = feats.astype("float32")
size, dim = feats.shape
index = faiss.IndexFlatIP(dim)
index.add(feats)
with Timer("[faiss] query topk {}".format(k), verbose):
sims, nbrs = index.search(feats, k=k)
self.knns = [
(
np.array(nbr, dtype=np.int32),
1 - np.array(sim, dtype=np.float32),
)
for nbr, sim in zip(nbrs, sims)
]
class knn_faiss_gpu(knn):
def __init__(
self,
feats,
k,
nprobe=128,
num_process=4,
is_precise=True,
sort=True,
verbose=True,
**kwargs
):
with Timer("[faiss_gpu] query topk {}".format(k), verbose):
dists, nbrs = faiss_search_knn(
feats,
k=k,
nprobe=nprobe,
num_process=num_process,
is_precise=is_precise,
sort=sort,
verbose=verbose,
)
self.knns = [
(
np.array(nbr, dtype=np.int32),
np.array(dist, dtype=np.float32),
)
for nbr, dist in zip(nbrs, dists)
]
| [
"noreply@github.com"
] | dmlc.noreply@github.com |
d54c7619e4623b4018c50dcf1798d3b14d35cc0f | 11d7d8b6ce2315d1f2fbbe630290d3fe97bd56c0 | /napalm/_modules/napalm_network.py | d2fd93c749ee8d9a1b4e58a68346e30753fd66b4 | [
"Apache-2.0"
] | permissive | mirceaulinic/napalm-salt | a356681359a189ab2c2ea8bdbcd9f53eed137122 | 1c6e3d21b6467040654c10474084b4f0a3d3fdcc | refs/heads/master | 2021-01-17T06:48:12.066800 | 2016-12-13T21:36:03 | 2016-12-13T21:36:03 | 56,152,447 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,731 | py | # -*- coding: utf-8 -*-
'''
NAPALM Network
===============
Basic methods for interaction with the network device through the virtual proxy 'napalm'.
:codeauthor: Mircea Ulinic <mircea@cloudflare.com> & Jerome Fleury <jf@cloudflare.com>
:maturity: new
:depends: napalm
:platform: unix
Dependencies
------------
- :mod:`napalm proxy minion <salt.proxy.napalm>`
.. versionadded:: Carbon
'''
from __future__ import absolute_import
# Import python lib
import logging
log = logging.getLogger(__name__)
# salt libs
from salt.ext import six
try:
# will try to import NAPALM
# https://github.com/napalm-automation/napalm
# pylint: disable=W0611
from napalm_base import get_network_driver
# pylint: enable=W0611
HAS_NAPALM = True
except ImportError:
HAS_NAPALM = False
# ----------------------------------------------------------------------------------------------------------------------
# module properties
# ----------------------------------------------------------------------------------------------------------------------
__virtualname__ = 'net'
__proxyenabled__ = ['napalm']
# uses NAPALM-based proxy to interact with network devices
# ----------------------------------------------------------------------------------------------------------------------
# property functions
# ----------------------------------------------------------------------------------------------------------------------
def __virtual__():
'''
NAPALM library must be installed for this module to work.
Also, the key proxymodule must be set in the __opts___ dictionary.
'''
if HAS_NAPALM and 'proxy' in __opts__:
return __virtualname__
else:
return (False, 'The module NET (napalm_network) cannot be loaded: \
napalm or proxy could not be loaded.')
# ----------------------------------------------------------------------------------------------------------------------
# helper functions -- will not be exported
# ----------------------------------------------------------------------------------------------------------------------
def _filter_list(input_list, search_key, search_value):
'''
Filters a list of dictionary by a set of key-value pair.
:param input_list: is a list of dictionaries
:param search_key: is the key we are looking for
:param search_value: is the value we are looking for the key specified in search_key
:return: filered list of dictionaries
'''
output_list = list()
for dictionary in input_list:
if dictionary.get(search_key) == search_value:
output_list.append(dictionary)
return output_list
def _filter_dict(input_dict, search_key, search_value):
'''
Filters a dictionary of dictionaries by a key-value pair.
:param input_dict: is a dictionary whose values are lists of dictionaries
:param search_key: is the key in the leaf dictionaries
:param search_values: is the value in the leaf dictionaries
:return: filtered dictionary
'''
output_dict = dict()
for key, key_list in six.iteritems(input_dict):
key_list_filtered = _filter_list(key_list, search_key, search_value)
if key_list_filtered:
output_dict[key] = key_list_filtered
return output_dict
def _config_logic(loaded_result, test=False, commit_config=True):
'''
Builds the config logic for `load_config` and `load_template` functions.
'''
loaded_result['already_configured'] = False
_compare = compare_config()
if _compare.get('result', False):
loaded_result['diff'] = _compare.get('out')
loaded_result.pop('out', '') # not needed
_loaded_res = loaded_result.get('result', False)
if not _loaded_res or test:
# if unable to load the config (errors / warnings)
# or in testing mode,
# will discard the config
if loaded_result['comment']:
loaded_result['comment'] += '\n'
if not len(loaded_result.get('diff', '')) > 0:
loaded_result['already_configured'] = True
_discarded = discard_config()
if not _discarded.get('result', False):
loaded_result['comment'] += _discarded['comment'] if _discarded['comment'] else 'Unable to discard config.'
loaded_result['result'] = False
# make sure it notifies
# that something went wrong
return loaded_result
loaded_result['comment'] += 'Configuration discarded.'
# loaded_result['result'] = False not necessary
# as the result can be true when test=True
return loaded_result
if not test and commit_config:
if len(loaded_result.get('diff', '')) > 0:
# if not testing mode
# and also the user wants to commit (default)
# and there are changes to commit
_commit = commit() # calls the function commit, defined below
if not _commit.get('result', False):
loaded_result['comment'] += _commit['comment'] if _commit['comment'] else 'Unable to commit config.'
loaded_result['result'] = False
_discarded = discard_config() # unable to commit, discard config
loaded_result['comment'] += '\n'
loaded_result['comment'] += _discarded['comment'] if _discarded['comment'] else 'Unable to discard config.'
else:
# would like to commit, but there's no change
# need to call discard_config() to release the config DB
_discarded = discard_config()
if not _discarded.get('result', False):
loaded_result['comment'] += _discarded['comment'] if _discarded['comment'] else 'Unable to discard config.'
loaded_result['result'] = False
# notify if anything goes wrong
return loaded_result
loaded_result['already_configured'] = True
loaded_result['comment'] = 'Already configured.'
return loaded_result
# ----------------------------------------------------------------------------------------------------------------------
# callable functions
# ----------------------------------------------------------------------------------------------------------------------
def connected():
'''
Specifies if the proxy succeeded to connect to the network device.
CLI Example:
.. code-block:: bash
salt '*' net.connected
'''
return {
'out': __proxy__['napalm.ping']()
}
def facts():
'''
Returns characteristics of the network device.
:return: a dictionary with the following keys:
* uptime - Uptime of the device in seconds.
* vendor - Manufacturer of the device.
* model - Device model.
* hostname - Hostname of the device
* fqdn - Fqdn of the device
* os_version - String with the OS version running on the device.
* serial_number - Serial number of the device
* interface_list - List of the interfaces of the device
CLI Example:
.. code-block:: bash
salt '*' net.facts
Example output:
.. code-block:: python
{
'os_version': u'13.3R6.5',
'uptime': 10117140,
'interface_list': [
'lc-0/0/0',
'pfe-0/0/0',
'pfh-0/0/0',
'xe-0/0/0',
'xe-0/0/1',
'xe-0/0/2',
'xe-0/0/3',
'gr-0/0/10',
'ip-0/0/10'
],
'vendor': u'Juniper',
'serial_number': u'JN131356FBFA',
'model': u'MX480',
'hostname': u're0.edge05.syd01',
'fqdn': u're0.edge05.syd01'
}
'''
return __proxy__['napalm.call'](
'get_facts',
**{
}
)
def environment():
'''
Returns the environment of the device.
CLI Example:
.. code-block:: bash
salt '*' net.environment
Example output:
.. code-block:: python
{
'fans': {
'Bottom Rear Fan': {
'status': True
},
'Bottom Middle Fan': {
'status': True
},
'Top Middle Fan': {
'status': True
},
'Bottom Front Fan': {
'status': True
},
'Top Front Fan': {
'status': True
},
'Top Rear Fan': {
'status': True
}
},
'memory': {
'available_ram': 16349,
'used_ram': 4934
},
'temperature': {
'FPC 0 Exhaust A': {
'is_alert': False,
'temperature': 35.0,
'is_critical': False
}
},
'cpu': {
'1': {
'%usage': 19.0
},
'0': {
'%usage': 35.0
}
}
}
'''
return __proxy__['napalm.call'](
'get_environment',
**{
}
)
def cli(*commands):
'''
Returns a dictionary with the raw output of all commands passed as arguments.
:param commands: list of commands to be executed on the device
:return: a dictionary with the mapping between each command and its raw output
CLI Example:
.. code-block:: bash
salt '*' net.cli "show version" "show chassis fan"
Example output:
.. code-block:: python
{
u'show version and haiku': u'Hostname: re0.edge01.arn01
Model: mx480
Junos: 13.3R6.5
Help me, Obi-Wan
I just saw Episode Two
You're my only hope
',
u'show chassis fan' : u'Item Status RPM Measurement
Top Rear Fan OK 3840 Spinning at intermediate-speed
Bottom Rear Fan OK 3840 Spinning at intermediate-speed
Top Middle Fan OK 3900 Spinning at intermediate-speed
Bottom Middle Fan OK 3840 Spinning at intermediate-speed
Top Front Fan OK 3810 Spinning at intermediate-speed
Bottom Front Fan OK 3840 Spinning at intermediate-speed
'
}
'''
return __proxy__['napalm.call'](
'cli',
**{
'commands': list(commands)
}
)
# thus we can display the output as is
# in case of errors, they'll be catched in the proxy
def traceroute(destination, source='', ttl=0, timeout=0):
'''
Calls the method traceroute from the NAPALM driver object and returns a dictionary with the result of the traceroute
command executed on the device.
:param destination: Hostname or address of remote host
:param source: Source address to use in outgoing traceroute packets
:param ttl: IP maximum time-to-live value (or IPv6 maximum hop-limit value)
:param timeout: Number of seconds to wait for response (seconds)
CLI Example:
.. code-block:: bash
salt '*' net.traceroute 8.8.8.8
salt '*' net.traceroute 8.8.8.8 source=127.0.0.1 ttl=5 timeout=1
'''
return __proxy__['napalm.call'](
'traceroute',
**{
'destination': destination,
'source': source,
'ttl': ttl,
'timeout': timeout
}
)
def ping(destination, source='', ttl=0, timeout=0, size=0, count=0):
'''
Executes a ping on the network device and returns a dictionary as a result.
:param destination: Hostname or IP address of remote host
:param source: Source address of echo request
:param ttl: IP time-to-live value (IPv6 hop-limit value) (1..255 hops)
:param timeout: Maximum wait time after sending final packet (seconds)
:param size: Size of request packets (0..65468 bytes)
:param count: Number of ping requests to send (1..2000000000 packets)
CLI Example:
.. code-block:: bash
salt '*' net.ping 8.8.8.8
salt '*' net.ping 8.8.8.8 ttl=3 size=65468
salt '*' net.ping 8.8.8.8 source=127.0.0.1 timeout=1 count=100
'''
return __proxy__['napalm.call'](
'ping',
**{
'destination': destination,
'source': source,
'ttl': ttl,
'timeout': timeout,
'size': size,
'count': count
}
)
def arp(interface='', ipaddr='', macaddr=''):
'''
NAPALM returns a list of dictionaries with details of the ARP entries.
:param interface: interface name to filter on
:param ipaddr: IP address to filter on
:param macaddr: MAC address to filter on
:return: List of the entries in the ARP table
CLI Example:
.. code-block:: bash
salt '*' net.arp
salt '*' net.arp macaddr='5c:5e:ab:da:3c:f0'
Example output:
.. code-block:: python
[
{
'interface' : 'MgmtEth0/RSP0/CPU0/0',
'mac' : '5c:5e:ab:da:3c:f0',
'ip' : '172.17.17.1',
'age' : 1454496274.84
},
{
'interface': 'MgmtEth0/RSP0/CPU0/0',
'mac' : '66:0e:94:96:e0:ff',
'ip' : '172.17.17.2',
'age' : 1435641582.49
}
]
'''
proxy_output = __proxy__['napalm.call'](
'get_arp_table',
**{
}
)
if not proxy_output.get('result'):
return proxy_output
arp_table = proxy_output.get('out')
if interface:
arp_table = _filter_list(arp_table, 'interface', interface)
if ipaddr:
arp_table = _filter_list(arp_table, 'ip', ipaddr)
if macaddr:
arp_table = _filter_list(arp_table, 'mac', macaddr)
proxy_output.update({
'out': arp_table
})
return proxy_output
def ipaddrs():
'''
Returns IP addresses configured on the device.
:return: A dictionary with the IPv4 and IPv6 addresses of the interfaces.\
Returns all configured IP addresses on all interfaces as a dictionary of dictionaries.\
Keys of the main dictionary represent the name of the interface.\
Values of the main dictionary represent are dictionaries that may consist of two keys\
'ipv4' and 'ipv6' (one, both or none) which are themselvs dictionaries witht the IP addresses as keys.\
CLI Example:
.. code-block:: bash
salt '*' net.ipaddrs
Example output:
.. code-block:: python
{
u'FastEthernet8': {
u'ipv4': {
u'10.66.43.169': {
'prefix_length': 22
}
}
},
u'Loopback555': {
u'ipv4': {
u'192.168.1.1': {
'prefix_length': 24
}
},
u'ipv6': {
u'1::1': {
'prefix_length': 64
},
u'2001:DB8:1::1': {
'prefix_length': 64
},
u'FE80::3': {
'prefix_length': u'N/A'
}
}
}
}
'''
return __proxy__['napalm.call'](
'get_interfaces_ip',
**{
}
)
def interfaces():
'''
Returns details of the interfaces on the device.
:return: Returns a dictionary of dictionaries. \
The keys for the first dictionary will be the interfaces in the devices.
CLI Example:
.. code-block:: bash
salt '*' net.interfaces
Example output:
.. code-block:: python
{
u'Management1': {
'is_up': False,
'is_enabled': False,
'description': u'',
'last_flapped': -1,
'speed': 1000,
'mac_address': u'dead:beef:dead',
},
u'Ethernet1':{
'is_up': True,
'is_enabled': True,
'description': u'foo',
'last_flapped': 1429978575.1554043,
'speed': 1000,
'mac_address': u'beef:dead:beef',
}
}
'''
return __proxy__['napalm.call'](
'get_interfaces',
**{
}
)
def lldp(interface=''):
'''
Returns a detailed view of the LLDP neighbors.
:param interface: interface name to filter on
:return: A dictionary with the LLDL neighbors.\
The keys are the interfaces with LLDP activated on.
CLI Example:
.. code-block:: bash
salt '*' net.lldp
salt '*' net.lldp interface='TenGigE0/0/0/8'
Example output:
.. code-block:: python
{
'TenGigE0/0/0/8': [
{
'parent_interface': u'Bundle-Ether8',
'interface_description': u'TenGigE0/0/0/8',
'remote_chassis_id': u'8c60.4f69.e96c',
'remote_system_name': u'switch',
'remote_port': u'Eth2/2/1',
'remote_port_description': u'Ethernet2/2/1',
'remote_system_description': u'Cisco Nexus Operating System (NX-OS) Software 7.1(0)N1(1a)
TAC support: http://www.cisco.com/tac
Copyright (c) 2002-2015, Cisco Systems, Inc. All rights reserved.',
'remote_system_capab': u'B, R',
'remote_system_enable_capab': u'B'
}
]
}
'''
proxy_output = __proxy__['napalm.call'](
'get_lldp_neighbors_detail',
**{
}
)
if not proxy_output.get('result'):
return proxy_output
lldp_neighbors = proxy_output.get('out')
if interface:
lldp_neighbors = {interface: lldp_neighbors.get(interface)}
proxy_output.update({
'out': lldp_neighbors
})
return proxy_output
def mac(address='', interface='', vlan=0):
'''
Returns the MAC Address Table on the device.
:param address: MAC address to filter on
:param interface: Interface name to filter on
:param vlan: VLAN identifier
:return: A list of dictionaries representing the entries in the MAC Address Table
CLI Example:
.. code-block:: bash
salt '*' net.mac
salt '*' net.mac vlan=10
Example output:
.. code-block:: python
[
{
'mac' : '00:1c:58:29:4a:71',
'interface' : 'xe-3/0/2',
'static' : False,
'active' : True,
'moves' : 1,
'vlan' : 10,
'last_move' : 1454417742.58
},
{
'mac' : '8c:60:4f:58:e1:c1',
'interface' : 'xe-1/0/1',
'static' : False,
'active' : True,
'moves' : 2,
'vlan' : 42,
'last_move' : 1453191948.11
}
]
'''
proxy_output = __proxy__['napalm.call'](
'get_mac_address_table',
**{
}
)
if not proxy_output.get('result'):
# if negative, leave the output unchanged
return proxy_output
mac_address_table = proxy_output.get('out')
if vlan and isinstance(vlan, int):
mac_address_table = _filter_list(mac_address_table, 'vlan', vlan)
if address:
mac_address_table = _filter_list(mac_address_table, 'mac', address)
if interface:
mac_address_table = _filter_list(mac_address_table, 'interface', interface)
proxy_output.update({
'out': mac_address_table
})
return proxy_output
# <---- Call NAPALM getters --------------------------------------------------------------------------------------------
# ----- Configuration specific functions ------------------------------------------------------------------------------>
def load_config(filename=None, text=None, test=False, commit=True):
'''
Populates the candidate configuration. It can be loaded from a file or from a string. If you send both a
filename and a string containing the configuration, the file takes precedence.
If you use this method the existing configuration will be merged with the candidate configuration once
you commit the changes.
Be aware that by default this method will commit the configuration. If there are no changes, it does not commit and
the flag `already_configured` will be set as `True` to point this out.
:param filename: Path to the file containing the desired configuration. By default is None.
:param text: String containing the desired configuration.
:param test: Dry run? If set as True, will apply the config, discard and return the changes. Default: False
and will commit the changes on the device.
:param commit: Commit? (default: True) Sometimes it is not needed to commit the config immediately
after loading the changes. E.g.: a state loads a couple of parts (add / remove / update)
and would not be optimal to commit after each operation.
Also, from the CLI when the user needs to apply the similar changes before committing,
can specify commit=False and will not discard the config.
:raise MergeConfigException: If there is an error on the configuration sent.
:return a dictionary having the following keys:
* result (bool): if the config was applied successfully. It is `False` only in case of failure. In case
there are no changes to be applied and successfully performs all operations it is still `True` and so will be
the `already_configured` flag (example below)
* comment (str): a message for the user
* already_configured (bool): flag to check if there were no changes applied
* diff (str): returns the config changes applied
CLI Example:
.. code-block:: bash
salt '*' net.load_config text='ntp peer 192.168.0.1'
salt '*' net.load_config filename='/absolute/path/to/your/file'
salt '*' net.load_config filename='/absolute/path/to/your/file' test=True
salt '*' net.load_config filename='/absolute/path/to/your/file' commit=False
Example output:
.. code-block:: python
{
'comment': 'Configuration discarded.',
'already_configured': False,
'result': True,
'diff': '[edit interfaces xe-0/0/5]\n+ description "Adding a description";'
}
'''
_loaded = __proxy__['napalm.call'](
'load_merge_candidate',
**{
'filename': filename,
'config': text
}
)
return _config_logic(_loaded, test=test, commit_config=commit)
def load_template(template_name,
template_source=None,
template_path=None,
test=False,
commit=True,
**template_vars):
'''
Renders a configuration template (Jinja) and loads the result on the device.
By default will commit the changes. To force a dry run, set `test=True`.
:param template_name: Identifies the template name.
:param template_source (optional): Inline config template to be rendered and loaded on the device.
:param template_path (optional): Specifies the absolute path to a different directory for the configuration \
templates. If not specified, by default will use the default templates defined in NAPALM.
:param test: Dry run? If set to True, will apply the config, discard and return the changes. Default: False and
will commit the changes on the device.
:param commit: Commit? (default: True) Sometimes it is not needed to commit the config immediately
after loading the changes. E.g.: a state loads a couple of parts (add / remove / update)
and would not be optimal to commit after each operation.
Also, from the CLI when the user needs to apply the similar changes before committing,
can specify commit=False and will not discard the config.
:param template_vars: Dictionary with the arguments to be used when the template is rendered.
:return a dictionary having the following keys:
* result (bool): if the config was applied successfully. It is `False` only in case of failure. In case
there are no changes to be applied and successfully performs all operations it is still `True` and so will be
the `already_configured` flag (example below)
* comment (str): a message for the user
* already_configured (bool): flag to check if there were no changes applied
* diff (str): returns the config changes applied
The template can use variables from the ``grains``, ``pillar`` or ``opts```, for example:
.. code-block:: jinja
{% set router_model = grains.get('model') -%}
{% set router_vendor = grains.get('vendor') -%}
{% set hostname = pillar.get('proxy', {}).get('host') -%}
{% if router_vendor|lower == 'juniper' %}
system {
host-name {{hostname}};
}
{% endif %}
CLI Example:
.. code-block:: bash
salt '*' net.load_template ntp_peers peers=[192.168.0.1] # uses NAPALM default templates
salt '*' net.load_template set_hostname template_source='system {\n\tdomain-name {{domain_name}};}' \
domain_name='test.com'
salt '*' net.load_template my_template template_path='/tmp/tpl/' my_param='aaa' # will commit
salt '*' net.load_template my_template template_path='/tmp/tpl/' my_param='aaa' test=True # dry run
Example output:
.. code-block:: python
{
'comment': '',
'already_configured': False,
'result': True,
'diff': '[edit system]\n+ host-name edge01.bjm01;''
}
'''
load_templates_params = template_vars.copy() # to leave the template_vars unchanged
load_templates_params.update(
{
'template_name': template_name,
'template_source': template_source, # inline template
'template_path': template_path,
'pillar': __pillar__, # inject pillar content, accessible as `pillar`
'grains': __grains__, # inject grains, accessible as `grains`
'opts': __opts__ # inject opts, accessible as `opts`
}
)
_loaded = __proxy__['napalm.call']('load_template',
**load_templates_params
)
return _config_logic(_loaded,
test=test,
commit_config=commit)
def commit():
'''
Commits the configuration changes made on the network device.
CLI Example:
.. code-block:: bash
salt '*' net.commit
'''
return __proxy__['napalm.call'](
'commit_config',
**{}
)
def discard_config():
"""
Discards the changes applied.
CLI Example:
.. code-block:: bash
salt '*' net.discard_config
"""
return __proxy__['napalm.call'](
'discard_config',
**{}
)
def compare_config():
'''
Returns the difference between the running config and the candidate config.
CLI Example:
.. code-block:: bash
salt '*' net.compare_config
'''
return __proxy__['napalm.call'](
'compare_config',
**{}
)
def rollback():
'''
Rollbacks the configuration.
CLI Example:
.. code-block:: bash
salt '*' net.rollback
'''
return __proxy__['napalm.call'](
'rollback',
**{}
)
def config_changed():
'''
Will prompt if the configuration has been changed.
:return: A tuple with a boolean that specifies if the config was changed on the device.\
And a string that provides more details of the reason why the configuration was not changed.
CLI Example:
.. code-block:: bash
salt '*' net.config_changed
'''
is_config_changed = False
reason = ''
try_compare = compare_config()
if try_compare.get('result'):
if try_compare.get('out'):
is_config_changed = True
else:
reason = 'Configuration was not changed on the device.'
else:
reason = try_compare.get('comment')
return is_config_changed, reason
def config_control():
'''
Will check if the configuration was changed.
If differences found, will try to commit.
In case commit unsuccessful, will try to rollback.
:return: A tuple with a boolean that specifies if the config was changed/commited/rollbacked on the device.\
And a string that provides more details of the reason why the configuration was not commited properly.
CLI Example:
.. code-block:: bash
salt '*' net.config_control
'''
result = True
comment = ''
changed, not_changed_reason = config_changed()
if not changed:
return (changed, not_changed_reason)
# config changed, thus let's try to commit
try_commit = commit()
if not try_commit.get('result'):
result = False
comment = 'Unable to commit the changes: {reason}.\n\
Will try to rollback now!'.format(
reason=try_commit.get('comment')
)
try_rollback = rollback()
if not try_rollback.get('result'):
comment += '\nCannot rollback! {reason}'.format(
reason=try_rollback.get('comment')
)
return result, comment
# <---- Configuration specific functions -------------------------------------------------------------------------------
| [
"mirucha@cloudflare.com"
] | mirucha@cloudflare.com |
124a7182b91e616581f3e88a5546efe4a0e34d1f | 9ca91c03b770e0bad09e6fbb2d7e8cbef8482263 | /articles/filmav/grabed_articles/60660.py | 360505aeb349dc93ca69d5b6389365815bcc191f | [] | no_license | renever/wp_sys | 7408b5c7edb47cb376bb786d630481fd825815ed | 23469f2a03759293abf2ed38482c1f152f5798d4 | refs/heads/develop | 2020-05-20T06:00:44.550968 | 2015-02-10T16:04:30 | 2015-02-10T16:04:30 | 31,001,855 | 1 | 2 | null | 2015-02-19T04:55:43 | 2015-02-19T04:55:42 | Python | UTF-8 | Python | false | false | 1,287 | py | {"screenshosts": ["http://img107.imagetwist.com/i/07692/6tm121fwtidd.jpeg", "http://img107.imagetwist.com/i/07692/cy9cxjj2e6yw.jpeg", "http://img107.imagetwist.com/i/07692/pdl4lvh8g5bh.jpeg", "http://img107.imagetwist.com/i/07692/s9sbclxq9dfk.jpeg"], "uploaded_net": ["http://ul.to/0vo9q3si/aqb001_00.wmv"], "description": "p>kt-joker <span class=\"wp_keywordlink_affiliate\"></span>\u5287\u5834 Vol.01-Vol.04<br />\n<br />\n<br />\n\u51fa\u54c1\u8005 : \u5fa9\u6d3b\u8acb\u8ca0\u4eba<br />\n\u30ab\u30c6\u30b4\u30ea : \u9ad8\u8a55\u4fa1\u3000\u9732\u5929\u3000<span class=\"wp_keywordlink_affiliate\"></span>\u3000\u304a\u59c9\u3055\u3093\u3000\u30ae\u30e3\u30eb\u3000\u4e38\u898b\u3048\u3000<br />\n\u30d5\u30a9\u30fc\u30de\u30c3\u30c8 : WMV<br />\n\u52d5\u753b\u518d\u751f\u6642\u9593 : 00:02:08 / 00:07:55 / 00:01:39 / 00:05:03<br />\n\u753b\u8cea : \u2605 \u2605 \u2606<br />\n\u8cfc\u5165\u4fa1\u683c : $10 / $12 / $7 / $10<br />", "tags": ["\u602a\u76d7\u30b8\u30e7\u30fc\u30ab\u30fc", "\u98a8\u5442", "\u5973\u6e6f"], "cover_img": "http://img58.imagetwist.com/th/07692/lcrqk9n2o3e8.jpg", "file_name": "aqb001_00", "rapidgator": ["http://rapidgator.net/file/2aa842eb7f1080bc9c0380a99636d417/aqb001_00.wmv.html"], "id": "60660", "categories": ["kt-joker", "the-101", "Uncensored"]} | [
"wodewangzuan@gmail.com"
] | wodewangzuan@gmail.com |
e45db364ac41947ca34d39b12f7a98502a23dba1 | 795df757ef84073c3adaf552d5f4b79fcb111bad | /matrix_exp/eulerian.py | 742adbda260290862065b49d4a75213ffe9d07ed | [] | no_license | tnakaicode/jburkardt-python | 02cb2f9ba817abf158fc93203eb17bf1cb3a5008 | 1a63f7664e47d6b81c07f2261b44f472adc4274d | refs/heads/master | 2022-05-21T04:41:37.611658 | 2022-04-09T03:31:00 | 2022-04-09T03:31:00 | 243,854,197 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,795 | py | #! /usr/bin/env python
#
def eulerian ( m, n ):
#*****************************************************************************80
#
## EULERIAN returns the EULERIAN matrix.
#
# Definition:
#
# A run in a permutation is a sequence of consecutive ascending values.
#
# E(I,J) is the number of permutations of I objects which contain
# exactly J runs.
#
# Examples:
#
# N = 7
#
# 1 0 0 0 0 0 0
# 1 1 0 0 0 0 0
# 1 4 1 0 0 0 0
# 1 11 11 1 0 0 0
# 1 26 66 26 1 0 0
# 1 57 302 302 57 1 0
# 1 120 1191 2416 1191 120 1
#
# Recursion:
#
# E(I,J) = J * E(I-1,J) + (I-J+1) * E(I-1,J-1).
#
# Properties:
#
# A is generally not symmetric: A' /= A.
#
# A is integral: int ( A ) = A.
#
# A is nonnegative.
#
# A is unit lower triangular.
#
# det ( A ) = 1.
#
# A is unimodular.
#
# LAMBDA(1:N) = 1.
#
# The family of matrices is nested as a function of N.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 25 January 2015
#
# Author:
#
# John Burkardt
#
# Reference:
#
# Dennis Stanton, Dennis White,
# Constructive Combinatorics,
# Springer Verlag, 1986.
#
# Parameters:
#
# Input, integer M, N, the number of rows and columns of A.
#
# Output, real A(M,N), the matrix.
#
import numpy as np
a = np.zeros ( [ m, n ] )
a[0,0] = 1.0
for i in range ( 1, m ):
a[i,0] = 1.0
for j in range ( 1, n ):
a[i,j] = float ( j + 1 ) * a[i-1,j] + float ( i - j + 1 ) * a[i-1,j-1]
return a
def eulerian_determinant ( n ):
#*****************************************************************************80
#
## EULERIAN_DETERMINANT returns the determinant of the EULERIAN matrix.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 25 January 2015
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer N, the order of the matrix.
#
# Output, real DETERM, the determinant.
#
determ = 1.0
return determ
def eulerian_determinant_test ( ):
#*****************************************************************************80
#
## EULERIAN_DETERMINANT_TEST tests EULERIAN_DETERMINANT.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 25 January 2015
#
# Author:
#
# John Burkardt
#
import platform
from eulerian import eulerian
from r8mat_print import r8mat_print
print ( '' )
print ( 'EULERIAN_DETERMINANT_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' EULERIAN_DETERMINANT computes the determinant of the EULERIAN matrix.' )
m = 4
n = m
a = eulerian ( m, n )
r8mat_print ( m, n, a, ' EULERIAN matrix:' )
value = eulerian_determinant ( n )
print ( '' )
print ( ' Value = %g' % ( value ) )
#
# Terminate.
#
print ( '' )
print ( 'EULERIAN_DETERMINANT_TEST' )
print ( ' Normal end of execution.' )
return
def eulerian_inverse ( n ):
#*****************************************************************************80
#
## EULERIAN_INVERSE computes the inverse of the EULERIAN matrix.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 25 March 2015
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer N, the order of the matrix.
#
# Output, real A(N,N), the inverse of the Eulerian matrix.
#
import numpy as np
a = np.zeros ( ( n, n ) )
#
# Set up the Eulerian matrix.
#
b = eulerian ( n, n )
#
# Compute the inverse A of a unit lower triangular matrix B.
#
for j in range ( 0, n ):
for i in range ( 0, n ):
if ( i == j ):
a[i,j] = 1.0
elif ( j < i ):
t = 0.0
for k in range ( j, i ):
t = t + b[i,k] * a[k,j]
a[i,j] = - t
return a
def eulerian_test ( ):
#*****************************************************************************80
#
## EULERIAN_TEST tests EULERIAN.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 25 January 2015
#
# Author:
#
# John Burkardt
#
import platform
from r8mat_print import r8mat_print
print ( '' )
print ( 'EULERIAN_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' EULERIAN computes the EULERIAN matrix.' )
m = 4
n = m
a = eulerian ( m, n )
r8mat_print ( m, n, a, ' EULERIAN matrix:' )
#
# Terminate.
#
print ( '' )
print ( 'EULERIAN_TEST' )
print ( ' Normal end of execution.' )
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
eulerian_test ( )
timestamp ( )
| [
"tnakaicode@gmail.com"
] | tnakaicode@gmail.com |
c91974ea7c56b546ae5ff953dd6c549cda27a0ad | 0b0a947c10038152fc56efbdde13eef3330adb34 | /hackerrank-problem-solving-solutions/78. Collections.OrderedDict().py | a197f537b5a740b0a1e16d28c1ba491bb31ec056 | [] | no_license | swapnanildutta/Python-programs | 9c382eb8c823571e4f098fff263d126665fbc575 | d47e2e3c4d648e0cc0ae1b89b83ce4f99db89f63 | refs/heads/master | 2021-11-18T22:16:57.276910 | 2021-09-04T13:07:36 | 2021-09-04T13:07:36 | 197,773,723 | 1 | 26 | null | 2023-04-09T10:51:57 | 2019-07-19T13:02:26 | Python | UTF-8 | Python | false | false | 267 | py | # Author Aman Shekhar
from collections import OrderedDict
order = OrderedDict()
for _ in range(int(input())):
item, space, price = input().rpartition(' ')
order[item] = order.get(item, 0) + int(price)
for item, price in order.items():
print(item, price) | [
"Aman Shekhar"
] | Aman Shekhar |
e47e686c2ad671ccdeaeab3e94483f08c8c05fe4 | d01670aa5bddb47dc414bf01921155610e2a5070 | /leetcode/078_subsets.py | 29242d2656b26a754e499a4cf12e7223cae83858 | [] | no_license | hwillmott/csfundamentals | 14c7e4253b581cef7046ca035bda038c24a52613 | 832f6a8c0deb0569d3fe0dc03e4564c2d850f067 | refs/heads/master | 2020-08-01T12:27:01.914391 | 2020-03-26T16:47:35 | 2020-03-26T16:47:35 | 73,576,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | class Solution(object):
def subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
def backtrack(result, nums, currlist, start):
result.append(currlist)
for i in range(start, len(nums)):
backtrack(result, nums, currlist + [nums[i]], i+1)
res = []
backtrack(res, nums, [], 0)
return res
| [
"harriet.willmott@gmail.com"
] | harriet.willmott@gmail.com |
954a8f88b3afcf28502295761fd76f03df543823 | ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3 | /python/baiduads-sdk-auto/baiduads/negativeword/model/campaign_region_area.py | b6bed9de96554e7169a12df5524b00461d01b968 | [
"Apache-2.0"
] | permissive | baidu/baiduads-sdk | 24c36b5cf3da9362ec5c8ecd417ff280421198ff | 176363de5e8a4e98aaca039e4300703c3964c1c7 | refs/heads/main | 2023-06-08T15:40:24.787863 | 2023-05-20T03:40:51 | 2023-05-20T03:40:51 | 446,718,177 | 16 | 11 | Apache-2.0 | 2023-06-02T05:19:40 | 2022-01-11T07:23:17 | Python | UTF-8 | Python | false | false | 11,938 | py | """
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from baiduads.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from baiduads.exceptions import ApiAttributeError
class CampaignRegionArea(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'address': (str,), # noqa: E501
'province_id': (int,), # noqa: E501
'city_id': (int,), # noqa: E501
'mk_pointx': (str,), # noqa: E501
'mk_pointy': (str,), # noqa: E501
'distance': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'address': 'address', # noqa: E501
'province_id': 'provinceId', # noqa: E501
'city_id': 'cityId', # noqa: E501
'mk_pointx': 'mkPointx', # noqa: E501
'mk_pointy': 'mkPointy', # noqa: E501
'distance': 'distance', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""CampaignRegionArea - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
address (str): [optional] # noqa: E501
province_id (int): [optional] # noqa: E501
city_id (int): [optional] # noqa: E501
mk_pointx (str): [optional] # noqa: E501
mk_pointy (str): [optional] # noqa: E501
distance (int): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""CampaignRegionArea - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
address (str): [optional] # noqa: E501
province_id (int): [optional] # noqa: E501
city_id (int): [optional] # noqa: E501
mk_pointx (str): [optional] # noqa: E501
mk_pointy (str): [optional] # noqa: E501
distance (int): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| [
"tokimekiyxp@foxmail.com"
] | tokimekiyxp@foxmail.com |
178d77aad9895f4b66d292a42179376af5f5e34e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03014/s558979367.py | 10160eef25c45fbff7a7bc0be7daaaa18cc7f9db | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,288 | py | import sys
import itertools
# import numpy as np
import time
import math
sys.setrecursionlimit(10 ** 7)
from collections import defaultdict
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
H, W = map(int, readline().split())
tile = [0 for i in range(H)]
cnt = [[0 for _ in range(W)] for _ in range(H)]
for i in range(H):
tile[i] = readline().decode().strip()
for i in range(H):
done = [False for _ in range(W)]
for j in range(W):
if tile[i][j] == '#':
continue
if done[j]:
continue
l = 0
while (j + l < W):
if tile[i][j + l] == '#':
break
l += 1
for k in range(l):
cnt[i][j + k] += l
done[j + k] = True
for j in range(W):
done = [False for _ in range(H)]
for i in range(H):
if tile[i][j] == '#':
continue
if done[i]:
continue
l = 0
while (i + l < H):
if tile[i + l][j] == '#':
break
l += 1
for k in range(l):
cnt[i + k][j] += l
done[i + k] = True
ans = 0
for i in range(H):
for j in range(W):
ans = max(cnt[i][j] - 1, ans)
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
ef8e2e1a1e6de3d5d79a4c27b95d6b2422c0d021 | 80301f1cffc5afce13256e2ecab6323c5df00194 | /en.fc/py/R3103.py | 7e5430302766e39d9bf4ac37b92e3ff8da35e8d2 | [] | no_license | ZhenjianYang/SoraVoiceScripts | c1ddf7c1bbcb933243754f9669bd6b75777c87b9 | 94a948090aba0f63b10b2c69dc845dc99c822fc4 | refs/heads/master | 2023-04-18T04:54:44.306652 | 2023-04-06T11:15:17 | 2023-04-06T11:15:17 | 103,167,541 | 43 | 11 | null | 2021-03-06T08:52:54 | 2017-09-11T17:36:55 | Python | UTF-8 | Python | false | false | 33,744 | py | from ED6ScenarioHelper import *
def main():
SetCodePage("ms932")
# 蔡斯
CreateScenaFile(
FileName = 'R3103 ._SN',
MapName = 'Zeiss',
Location = 'R3103.x',
MapIndex = 1,
MapDefaultBGM = "ed60020",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'Zeiss', # 9
'Wolf Fort', # 10
'', # 11
'', # 12
'', # 13
'', # 14
'', # 15
'', # 16
'', # 17
'', # 18
'', # 19
'', # 20
'', # 21
'', # 22
'', # 23
'', # 24
'', # 25
'', # 26
'', # 27
'', # 28
'', # 29
'', # 30
'', # 31
'', # 32
'', # 33
'', # 34
'', # 35
'', # 36
'', # 37
'', # 38
'', # 39
'', # 40
'', # 41
'', # 42
'', # 43
'', # 44
'', # 45
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 144,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT09/CH10610 ._CH', # 00
'ED6_DT09/CH10611 ._CH', # 01
'ED6_DT09/CH10080 ._CH', # 02
'ED6_DT09/CH10081 ._CH', # 03
'ED6_DT09/CH10120 ._CH', # 04
'ED6_DT09/CH10121 ._CH', # 05
'ED6_DT09/CH10140 ._CH', # 06
'ED6_DT09/CH10141 ._CH', # 07
'ED6_DT09/CH10620 ._CH', # 08
'ED6_DT09/CH10621 ._CH', # 09
'ED6_DT09/CH10600 ._CH', # 0A
'ED6_DT09/CH10601 ._CH', # 0B
'ED6_DT09/CH10400 ._CH', # 0C
'ED6_DT09/CH10401 ._CH', # 0D
)
AddCharChipPat(
'ED6_DT09/CH10610P._CP', # 00
'ED6_DT09/CH10611P._CP', # 01
'ED6_DT09/CH10080P._CP', # 02
'ED6_DT09/CH10081P._CP', # 03
'ED6_DT09/CH10120P._CP', # 04
'ED6_DT09/CH10121P._CP', # 05
'ED6_DT09/CH10140P._CP', # 06
'ED6_DT09/CH10141P._CP', # 07
'ED6_DT09/CH10620P._CP', # 08
'ED6_DT09/CH10621P._CP', # 09
'ED6_DT09/CH10600P._CP', # 0A
'ED6_DT09/CH10601P._CP', # 0B
'ED6_DT09/CH10400P._CP', # 0C
'ED6_DT09/CH10401P._CP', # 0D
)
DeclNpc(
X = -53110,
Z = 0,
Y = -14880,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0xFF,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 22050,
Z = -10,
Y = 35970,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0xFF,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 0,
Unknown2 = 0,
Unknown3 = 12,
ChipIndex = 0xC,
NpcIndex = 0x1C5,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclMonster(
X = -30730,
Z = -20,
Y = 28880,
Unknown_0C = 180,
Unknown_0E = 10,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x20E,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -27870,
Z = 80,
Y = 46700,
Unknown_0C = 180,
Unknown_0E = 10,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x20E,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -14660,
Z = -80,
Y = 32810,
Unknown_0C = 180,
Unknown_0E = 10,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x20E,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -24060,
Z = 70,
Y = -7910,
Unknown_0C = 180,
Unknown_0E = 2,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x20B,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -10150,
Z = 10,
Y = -20920,
Unknown_0C = 180,
Unknown_0E = 4,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x20C,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = 13270,
Z = -30,
Y = -23320,
Unknown_0C = 180,
Unknown_0E = 8,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x20A,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = 15990,
Z = -10,
Y = 1090,
Unknown_0C = 180,
Unknown_0E = 10,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x20E,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = 31250,
Z = 30,
Y = -6140,
Unknown_0C = 180,
Unknown_0E = 2,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x20B,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = 39280,
Z = 20,
Y = -27110,
Unknown_0C = 180,
Unknown_0E = 4,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x20C,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = 23510,
Z = 40,
Y = -36040,
Unknown_0C = 180,
Unknown_0E = 6,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x21B,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = 10940,
Z = 10,
Y = -46410,
Unknown_0C = 180,
Unknown_0E = 6,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x21B,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -10090,
Z = 10,
Y = -39590,
Unknown_0C = 180,
Unknown_0E = 6,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x20D,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -25680,
Z = -40,
Y = -25220,
Unknown_0C = 180,
Unknown_0E = 8,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x20A,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -29830,
Z = -90,
Y = -39580,
Unknown_0C = 180,
Unknown_0E = 10,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x20E,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -30430,
Z = -80,
Y = -45390,
Unknown_0C = 180,
Unknown_0E = 10,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x20E,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -21410,
Z = -50,
Y = -50290,
Unknown_0C = 180,
Unknown_0E = 10,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x20E,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -22480,
Z = 30,
Y = -37550,
Unknown_0C = 180,
Unknown_0E = 10,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x20E,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -30730,
Z = -20,
Y = 28880,
Unknown_0C = 180,
Unknown_0E = 10,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x34E,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -27870,
Z = 80,
Y = 46700,
Unknown_0C = 180,
Unknown_0E = 10,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x34E,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -14660,
Z = -80,
Y = 32810,
Unknown_0C = 180,
Unknown_0E = 10,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x34E,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -24060,
Z = 70,
Y = -7910,
Unknown_0C = 180,
Unknown_0E = 2,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x34B,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -10150,
Z = 10,
Y = -20920,
Unknown_0C = 180,
Unknown_0E = 4,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x34C,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = 13270,
Z = -30,
Y = -23320,
Unknown_0C = 180,
Unknown_0E = 8,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x34A,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = 15990,
Z = -10,
Y = 1090,
Unknown_0C = 180,
Unknown_0E = 10,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x34E,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = 31250,
Z = 30,
Y = -6140,
Unknown_0C = 180,
Unknown_0E = 2,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x34B,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = 39280,
Z = 20,
Y = -27110,
Unknown_0C = 180,
Unknown_0E = 4,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x34C,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = 23510,
Z = 40,
Y = -36040,
Unknown_0C = 180,
Unknown_0E = 6,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x35B,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = 10940,
Z = 10,
Y = -46410,
Unknown_0C = 180,
Unknown_0E = 6,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x35B,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -10090,
Z = 10,
Y = -39590,
Unknown_0C = 180,
Unknown_0E = 6,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x34D,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -25680,
Z = -40,
Y = -25220,
Unknown_0C = 180,
Unknown_0E = 8,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x34A,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -29830,
Z = -90,
Y = -39580,
Unknown_0C = 180,
Unknown_0E = 10,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x34E,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -30430,
Z = -80,
Y = -45390,
Unknown_0C = 180,
Unknown_0E = 10,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x34E,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -21410,
Z = -50,
Y = -50290,
Unknown_0C = 180,
Unknown_0E = 10,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x34E,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclMonster(
X = -22480,
Z = 30,
Y = -37550,
Unknown_0C = 180,
Unknown_0E = 10,
Unknown_10 = 1,
Unknown_11 = 1,
Unknown_12 = 0xFFFFFFFF,
BattleIndex = 0x34E,
Unknown_18 = 0,
Unknown_1A = 0,
)
DeclActor(
TriggerX = -17270,
TriggerZ = 0,
TriggerY = 42460,
TriggerRange = 1400,
ActorX = -17270,
ActorZ = 0,
ActorY = 42460,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 3,
Unknown_22 = 0,
)
DeclActor(
TriggerX = 17230,
TriggerZ = 10,
TriggerY = -7630,
TriggerRange = 1000,
ActorX = 17890,
ActorZ = 10,
ActorY = -7630,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 6,
Unknown_22 = 0,
)
DeclActor(
TriggerX = -12960,
TriggerZ = -20,
TriggerY = 45920,
TriggerRange = 1000,
ActorX = -12550,
ActorZ = -20,
ActorY = 46450,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 4,
Unknown_22 = 0,
)
DeclActor(
TriggerX = -24020,
TriggerZ = -10,
TriggerY = -43750,
TriggerRange = 1000,
ActorX = -24580,
ActorZ = -10,
ActorY = -43380,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 5,
Unknown_22 = 0,
)
ScpFunction(
"Function_0_5C2", # 00, 0
"Function_1_5C3", # 01, 1
"Function_2_734", # 02, 2
"Function_3_74A", # 03, 3
"Function_4_7E2", # 04, 4
"Function_5_A19", # 05, 5
"Function_6_C36", # 06, 6
)
def Function_0_5C2(): pass
label("Function_0_5C2")
Return()
# Function_0_5C2 end
def Function_1_5C3(): pass
label("Function_1_5C3")
OP_16(0x2, 0xFA0, 0xFFFE0048, 0xFFFE13D0, 0x30030)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA7, 2)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xAB, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_5ED")
OP_B1("R3103_y")
Jump("loc_5F6")
label("loc_5ED")
OP_B1("R3103_n")
label("loc_5F6")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA7, 2)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xAB, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_65A")
SetChrFlags(0xB, 0x80)
SetChrFlags(0xC, 0x80)
SetChrFlags(0xD, 0x80)
SetChrFlags(0xE, 0x80)
SetChrFlags(0xF, 0x80)
SetChrFlags(0x10, 0x80)
SetChrFlags(0x11, 0x80)
SetChrFlags(0x12, 0x80)
SetChrFlags(0x13, 0x80)
SetChrFlags(0x14, 0x80)
SetChrFlags(0x15, 0x80)
SetChrFlags(0x16, 0x80)
SetChrFlags(0x17, 0x80)
SetChrFlags(0x18, 0x80)
SetChrFlags(0x19, 0x80)
SetChrFlags(0x1A, 0x80)
SetChrFlags(0x1B, 0x80)
Jump("loc_6AF")
label("loc_65A")
SetChrFlags(0x1C, 0x80)
SetChrFlags(0x1D, 0x80)
SetChrFlags(0x1E, 0x80)
SetChrFlags(0x1F, 0x80)
SetChrFlags(0x20, 0x80)
SetChrFlags(0x21, 0x80)
SetChrFlags(0x22, 0x80)
SetChrFlags(0x23, 0x80)
SetChrFlags(0x24, 0x80)
SetChrFlags(0x25, 0x80)
SetChrFlags(0x26, 0x80)
SetChrFlags(0x27, 0x80)
SetChrFlags(0x28, 0x80)
SetChrFlags(0x29, 0x80)
SetChrFlags(0x2A, 0x80)
SetChrFlags(0x2B, 0x80)
SetChrFlags(0x2C, 0x80)
label("loc_6AF")
OP_64(0x0, 0x1)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC0, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_EXEC_OP, "OP_29(0x2F, 0x0, 0x4)"), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_EXEC_OP, "OP_29(0x2F, 0x1, 0x8)"), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_6CF")
OP_65(0x0, 0x1)
label("loc_6CF")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xB4, 2)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_6E1")
OP_6F(0x0, 0)
Jump("loc_6E8")
label("loc_6E1")
OP_6F(0x0, 60)
label("loc_6E8")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xB4, 4)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_6FA")
OP_6F(0x1, 0)
Jump("loc_701")
label("loc_6FA")
OP_6F(0x1, 60)
label("loc_701")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xB4, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_713")
OP_6F(0x2, 0)
Jump("loc_71A")
label("loc_713")
OP_6F(0x2, 60)
label("loc_71A")
Switch(
(scpexpr(EXPR_PUSH_VALUE_INDEX, 0x0), scpexpr(EXPR_END)),
(100, "loc_726"),
(SWITCH_DEFAULT, "loc_733"),
)
label("loc_726")
ClearChrFlags(0x8, 0x1)
ClearChrFlags(0x9, 0x1)
Jump("loc_733")
label("loc_733")
Return()
# Function_1_5C3 end
def Function_2_734(): pass
label("Function_2_734")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_749")
OP_99(0xFE, 0x0, 0x7, 0x5DC)
Jump("Function_2_734")
label("loc_749")
Return()
# Function_2_734 end
def Function_3_74A(): pass
label("Function_3_74A")
OP_22(0x11, 0x0, 0x64)
FadeToDark(300, 0, 100)
SetChrName("")
SetMessageWindowPos(-1, -1, -1, -1)
AnonymousTalk( #0
"\x07\x00Found a package wrapped in oil paper.\x02",
)
CloseMessageWindow()
OP_56(0x0)
AnonymousTalk( #1
"\x07\x00Inside was \x07\x02Hertz's Adventure II\x07\x00.\x02",
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
SetMessageWindowPos(72, 320, 56, 3)
OP_3E(0x344, 1)
OP_64(0x0, 0x1)
OP_28(0x2F, 0x1, 0x8)
TalkEnd(0xFF)
Return()
# Function_3_74A end
def Function_4_7E2(): pass
label("Function_4_7E2")
SetMapFlags(0x8000000)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xB4, 2)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_9C5")
OP_22(0x2B, 0x0, 0x64)
OP_70(0x0, 0x3C)
Sleep(500)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xB4, 3)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_8E0")
OP_9F(0xA, 0xFF, 0xFF, 0xFF, 0x0, 0x0)
SetChrPos(0xA, -12550, 1500, 46450, 320)
TurnDirection(0xA, 0x0, 0)
def lambda_831():
OP_8F(0xFE, 0xFFFFCEFA, 0x3E8, 0xB572, 0x4B0, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_831)
def lambda_84C():
OP_9F(0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0x4B0)
ExitThread()
QueueWorkItem(0xA, 2, lambda_84C)
ClearChrFlags(0xA, 0x80)
AnonymousTalk( #2
"\x07\x05Monsters appeared!\x07\x00\x02",
)
CloseMessageWindow()
OP_56(0x0)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA7, 2)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xAB, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_895")
Battle(0x357, 0x0, 0x0, 0x0, 0xFF)
Jump("loc_8A2")
label("loc_895")
Battle(0x217, 0x0, 0x0, 0x0, 0xFF)
label("loc_8A2")
SetChrFlags(0xA, 0x80)
Switch(
(scpexpr(EXPR_PUSH_VALUE_INDEX, 0x3), scpexpr(EXPR_END)),
(0, "loc_8BB"),
(2, "loc_8CD"),
(1, "loc_8DD"),
(SWITCH_DEFAULT, "loc_8E0"),
)
label("loc_8BB")
OP_A2(0x5A3)
OP_6F(0x0, 60)
Sleep(500)
Jump("loc_8E0")
label("loc_8CD")
OP_6F(0x0, 0)
TalkEnd(0xFF)
ClearMapFlags(0x8000000)
Return()
label("loc_8DD")
OP_B4(0x0)
Return()
label("loc_8E0")
Jc((scpexpr(EXPR_EXEC_OP, "OP_3E(0x142, 1)"), scpexpr(EXPR_END)), "loc_93E")
FadeToDark(300, 0, 100)
OP_22(0x11, 0x0, 0x64)
SetMessageWindowPos(-1, -1, -1, -1)
AnonymousTalk( #3
"\x07\x00Found \x07\x02Sapphire Talisman\x07\x00.\x02",
)
CloseMessageWindow()
OP_56(0x0)
SetMessageWindowPos(72, 320, 56, 3)
FadeToBright(300, 0)
OP_A2(0x5A2)
Jump("loc_9C2")
label("loc_93E")
FadeToDark(300, 0, 100)
AnonymousTalk( #4
(
"\x07\x00Found \x07\x02Sapphire Talisman\x07\x00 in chest.\x01",
"Inventory full so gave up \x07\x02Sapphire Talisman\x07\x00.\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_22(0x2C, 0x0, 0x64)
OP_6F(0x0, 60)
OP_70(0x0, 0x0)
label("loc_9C2")
Jump("loc_A0B")
label("loc_9C5")
FadeToDark(300, 0, 100)
AnonymousTalk( #5
"\x07\x05The chest is...you guessed it...empty.\x07\x00\x02",
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_83(0xF, 0x93)
label("loc_A0B")
Sleep(30)
TalkEnd(0xFF)
ClearMapFlags(0x8000000)
Return()
# Function_4_7E2 end
def Function_5_A19(): pass
label("Function_5_A19")
SetMapFlags(0x8000000)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xB4, 4)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_BEA")
OP_22(0x2B, 0x0, 0x64)
OP_70(0x1, 0x3C)
Sleep(500)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xB4, 5)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_B17")
OP_9F(0xA, 0xFF, 0xFF, 0xFF, 0x0, 0x0)
SetChrPos(0xA, -24580, 1500, -43380, 320)
TurnDirection(0xA, 0x0, 0)
def lambda_A68():
OP_8F(0xFE, 0xFFFF9FFC, 0x3E8, 0xFFFF568C, 0x4B0, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_A68)
def lambda_A83():
OP_9F(0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0x4B0)
ExitThread()
QueueWorkItem(0xA, 2, lambda_A83)
ClearChrFlags(0xA, 0x80)
AnonymousTalk( #6
"\x07\x05Monsters appeared!\x07\x00\x02",
)
CloseMessageWindow()
OP_56(0x0)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA7, 2)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xAB, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_ACC")
Battle(0x357, 0x0, 0x0, 0x0, 0xFF)
Jump("loc_AD9")
label("loc_ACC")
Battle(0x217, 0x0, 0x0, 0x0, 0xFF)
label("loc_AD9")
SetChrFlags(0xA, 0x80)
Switch(
(scpexpr(EXPR_PUSH_VALUE_INDEX, 0x3), scpexpr(EXPR_END)),
(0, "loc_AF2"),
(2, "loc_B04"),
(1, "loc_B14"),
(SWITCH_DEFAULT, "loc_B17"),
)
label("loc_AF2")
OP_A2(0x5A5)
OP_6F(0x1, 60)
Sleep(500)
Jump("loc_B17")
label("loc_B04")
OP_6F(0x1, 0)
TalkEnd(0xFF)
ClearMapFlags(0x8000000)
Return()
label("loc_B14")
OP_B4(0x0)
Return()
label("loc_B17")
Jc((scpexpr(EXPR_EXEC_OP, "OP_3E(0x14F, 1)"), scpexpr(EXPR_END)), "loc_B6F")
FadeToDark(300, 0, 100)
OP_22(0x11, 0x0, 0x64)
SetMessageWindowPos(-1, -1, -1, -1)
AnonymousTalk( #7
"\x07\x00Found \x07\x02Long Barrel\x07\x00.\x02",
)
CloseMessageWindow()
OP_56(0x0)
SetMessageWindowPos(72, 320, 56, 3)
FadeToBright(300, 0)
OP_A2(0x5A4)
Jump("loc_BE7")
label("loc_B6F")
FadeToDark(300, 0, 100)
AnonymousTalk( #8
(
"\x07\x00Found \x07\x02Long Barrel\x07\x00 in chest.\x01",
"Inventory full so gave up \x07\x02Long Barrel\x07\x00.\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_22(0x2C, 0x0, 0x64)
OP_6F(0x1, 60)
OP_70(0x1, 0x0)
label("loc_BE7")
Jump("loc_C28")
label("loc_BEA")
FadeToDark(300, 0, 100)
AnonymousTalk( #9
"\x07\x05The chest is oh so very empty.\x07\x00\x02",
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_83(0xF, 0x94)
label("loc_C28")
Sleep(30)
TalkEnd(0xFF)
ClearMapFlags(0x8000000)
Return()
# Function_5_A19 end
def Function_6_C36(): pass
label("Function_6_C36")
SetMapFlags(0x8000000)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xB4, 1)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_D28")
OP_22(0x2B, 0x0, 0x64)
OP_70(0x2, 0x3C)
Sleep(500)
Jc((scpexpr(EXPR_EXEC_OP, "OP_3E(0x1F6, 1)"), scpexpr(EXPR_END)), "loc_CAD")
FadeToDark(300, 0, 100)
OP_22(0x11, 0x0, 0x64)
SetMessageWindowPos(-1, -1, -1, -1)
SetChrName("")
AnonymousTalk( #10
"\x07\x00Found \x07\x02Teara Balm\x07\x00.\x02",
)
CloseMessageWindow()
OP_56(0x0)
SetMessageWindowPos(72, 320, 56, 3)
FadeToBright(300, 0)
OP_A2(0x5A1)
Jump("loc_D25")
label("loc_CAD")
FadeToDark(300, 0, 100)
SetChrName("")
AnonymousTalk( #11
(
"\x07\x00Found \x07\x02Teara Balm\x07\x00 in chest.\x01",
"Inventory full so gave up \x07\x02Teara Balm\x07\x00.\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_22(0x2C, 0x0, 0x64)
OP_6F(0x2, 60)
OP_70(0x2, 0x0)
label("loc_D25")
Jump("loc_D69")
label("loc_D28")
FadeToDark(300, 0, 100)
AnonymousTalk( #12
"\x07\x05You have found: the missing link.\x07\x00\x02",
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_83(0xF, 0x95)
label("loc_D69")
Sleep(30)
TalkEnd(0xFF)
ClearMapFlags(0x8000000)
Return()
# Function_6_C36 end
SaveToFile()
Try(main)
| [
"zj.yang@qq.com"
] | zj.yang@qq.com |
9923cd5ddfe1039cfdbe9ee05bffe6cd6681e49c | 9a42085c664730fb45267365d38df5de18ee2137 | /module_path/__init__.py | ad016d13417ade909f89a78d2bfc1ddedc0457a6 | [
"MIT"
] | permissive | justengel/module_path | 09e8a073b3013c5ea38f06791786042f1db106d0 | 2f2feedaa03f07f9a86e04cb96e6a7edc7fd30d6 | refs/heads/master | 2023-07-03T11:07:50.468178 | 2021-07-23T16:05:42 | 2021-07-23T16:05:42 | 322,051,354 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,917 | py | """
Get a modules path.
Notes:
* sys._MEIPASS - Created by pyinstaller executable. This is the directory of the executable
* If regular python run this does not exist
* If pyinstaller created a directory this is the directory that contains the executable
* If pyinstaller onefile this is "C:\\Users\\username\\AppData\\Local\\Temp\\_MEI#####" which is some temp directory.
* frame.f_code.co_filename
* In regular python run this is the absolute path of the module. "C:\\...\\check_path.py"
* If pyinstaller created a directory this is the module filename "check_path.py"
* If pyinstaller onefile this is the module filename "check_path.py"
* module.__file__ (matches frame.f_code.co_filename)
* In regular python run this is the absolute path of the module. "C:\\...\\check_path.py"
* If pyinstaller created a directory this is the module filename "check_path.py"
* If pyinstaller onefile this is the module filename "check_path.py"
* sys.executable
* If regular python run this is the path to your python.exe
* If pyinstaller created a directory this is the absolute path to the executable
* If pyinstaller onefile this is the absolute path to the executable
"""
import os
import sys
import inspect
import contextlib
try:
from importlib.resources import files, as_file
from importlib.abc import Traversable
except (ImportError, Exception):
try:
from importlib_resources import files, as_files
from importlib_resources.abc import Traversable
except (ImportError, Exception):
import inspect
from pathlib import Path
Traversable = Path
def files(module):
if isinstance(module, str):
if '.' in module:
# Import the top level package and manually add a directory for each "."
toplvl, remain = module.split('.', 1)
else:
toplvl, remain = module, ''
# Get or import the module
try:
module = sys.modules[toplvl]
path = Path(inspect.getfile(module))
except (KeyError, Exception):
try:
module = __import__(toplvl)
path = Path(inspect.getfile(module))
except (ImportError, Exception):
module = toplvl
path = Path(module)
# Get the path of the module
if path.with_suffix('').name == '__init__':
path = path.parent
# Find the path from the top level module
for pkg in remain.split('.'):
path = path.joinpath(pkg)
else:
path = Path(inspect.getfile(module))
if path.with_suffix('').name == '__init__':
path = path.parent
return path
@contextlib.contextmanager
def as_file(path):
p = str(path)
if not os.path.exists(p):
p = os.path.join(getattr(sys, '_MEIPASS', os.path.dirname(sys.executable)), str(path))
if not os.path.exists(p):
p = os.path.join(getattr(sys, '_MEIPASS', os.path.dirname(sys.executable)), '', str(path))
yield p
__all__ = ['files', 'as_file', 'Traversable',
'my_path', 'my_dir',
'isfile', 'isdir', 'isabs', 'dirname', 'basename', 'join', 'exists', 'abspath', 'relpath', 'realpath',
]
isfile = os.path.isfile
isdir = os.path.isdir
isabs = os.path.isabs
dirname = os.path.dirname
basename = os.path.basename
join = os.path.join
exists = os.path.exists
abspath = os.path.abspath
relpath = os.path.relpath
realpath = os.path.realpath
def my_path(*args, back=1, **kwargs):
"""Return the path of the module that called this function."""
# Find the correct frame
frame = inspect.currentframe()
for _ in range(back):
frame = frame.f_back
# Get the frame filename
filename = frame.f_code.co_filename # Will be abspath with regular python run
# Check if exists (in pyinstaller executables this will not exist
if isabs(filename) and os.path.exists(filename):
return filename
else:
# Note pyinstaller onefile will create a temp directory and create all pyd (C extension) files in that dir.
exe_path = getattr(sys, '_MEIPASS', os.path.dirname(sys.executable))
# Create the new filename
filename = os.path.join(exe_path, filename) # This may not exist, but the directory should
return filename
# print('===== OLD =====')
# frame = inspect.currentframe().f_back
# print('FRAME:', frame.f_code.co_filename, os.path.exists(frame.f_code.co_filename))
# try:
# print('MODULE:', inspect.getmodule(frame).__file__, os.path.exists(inspect.getmodule(frame).__file__))
# except (AttributeError, Exception):
# pass
# try:
# print('MEIPASS:', getattr(sys, '_MEIPASS', 'NONE'), os.path.exists(getattr(sys, '_MEIPASS', 'NONE')))
# except (AttributeError, Exception):
# pass
# try:
# print('EXE:', sys.executable, os.path.exists(sys.executable))
# except (AttributeError, Exception):
# pass
# # try:
# # return inspect.getmodule(frame).__file__
# # except (AttributeError, Exception):
# # directory = getattr(sys, '_MEIPASS', os.path.dirname(sys.executable))
# # return os.path.join(directory, frame.f_code.co_filename)
def my_dir(*args, back=1, **kwargs):
"""Return the directory of the module that called this function.
Args:
back (int)[1]: Number of frames to step back.
By default this is 1 so the module that calls this function is used.
"""
return os.path.dirname(my_path(back=back+1))
| [
"jtengel08@gmail.com"
] | jtengel08@gmail.com |
a60e9fb88399b262c87a1ba767671f6af8aeb26d | bbb36e65c62fa824807b2f85a20e491140338f72 | /src/infrastructure/django_framework/camera_ctrl/migrations/0005_remove_generalsettings_send_email_on_sync_error.py | fa4554a74f7655481cfa3177d854018ebf3c3124 | [] | no_license | TermanEmil/CameraController | 0d4338a3365431efb0b28dfb409b6a72c0d256c6 | c996868be9cfb6e6e44ae90d77346e7f700d177c | refs/heads/master | 2023-02-18T07:59:21.876482 | 2022-12-29T14:37:01 | 2022-12-29T14:37:01 | 195,222,744 | 3 | 0 | null | 2023-02-15T20:21:28 | 2019-07-04T10:41:15 | Python | UTF-8 | Python | false | false | 356 | py | # Generated by Django 2.2.4 on 2019-10-06 21:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('camera_ctrl', '0004_generalsettings'),
]
operations = [
migrations.RemoveField(
model_name='generalsettings',
name='send_email_on_sync_error',
),
]
| [
"terman.emil@gmail.com"
] | terman.emil@gmail.com |
8c5a0f3c69fe151453f691e54a452340bee2cdda | 9d57216d173cc2c5ba5fba6d5845c01c82dccf8f | /pytransform3d/transformations/__init__.py | 0f7ca5acef3d20b65ae6f350840b19555aa39f46 | [
"BSD-3-Clause"
] | permissive | mhirak/pytransform3d | e34b02a435cf352f1da111f0c7d5e7ab58e9092e | 8f3065bfea913953656cf772efbd34256930172b | refs/heads/master | 2023-08-31T21:20:43.586968 | 2021-09-13T08:02:07 | 2021-09-13T08:02:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,515 | py | """Transformations in three dimensions - SE(3).
See :doc:`transformations` for more information.
"""
from ._utils import (
check_transform, check_pq, check_screw_parameters, check_screw_axis,
check_exponential_coordinates, check_screw_matrix, check_transform_log,
check_dual_quaternion)
from ._conversions import (
transform_from, rotate_transform, translate_transform,
pq_from_transform, transform_from_pq,
transform_from_transform_log, transform_log_from_transform,
transform_from_exponential_coordinates,
exponential_coordinates_from_transform,
screw_parameters_from_screw_axis, screw_axis_from_screw_parameters,
exponential_coordinates_from_screw_axis,
screw_axis_from_exponential_coordinates,
transform_log_from_exponential_coordinates,
exponential_coordinates_from_transform_log,
screw_matrix_from_screw_axis, screw_axis_from_screw_matrix,
transform_log_from_screw_matrix, screw_matrix_from_transform_log,
dual_quaternion_from_transform, transform_from_dual_quaternion,
screw_parameters_from_dual_quaternion,
dual_quaternion_from_screw_parameters,
dual_quaternion_from_pq, pq_from_dual_quaternion,
adjoint_from_transform, norm_exponential_coordinates)
from ._transform_operations import (
invert_transform, scale_transform, concat,
vector_to_point, vectors_to_points, vector_to_direction,
vectors_to_directions, transform)
from ._dual_quaternion_operations import (
dq_q_conj, dq_conj, concatenate_dual_quaternions, dual_quaternion_sclerp,
dual_quaternion_power, dq_prod_vector)
from ._random import random_transform, random_screw_axis
from ._plot import plot_transform, plot_screw
from ._testing import (
assert_transform, assert_screw_parameters_equal,
assert_unit_dual_quaternion_equal, assert_unit_dual_quaternion)
__all__ = [
"check_transform", "check_pq", "check_screw_parameters",
"check_screw_axis", "check_exponential_coordinates", "check_screw_matrix",
"check_transform_log", "check_dual_quaternion",
"transform_from", "rotate_transform", "translate_transform",
"pq_from_transform", "transform_from_pq",
"transform_from_transform_log", "transform_log_from_transform",
"transform_from_exponential_coordinates",
"exponential_coordinates_from_transform",
"screw_parameters_from_screw_axis", "screw_axis_from_screw_parameters",
"exponential_coordinates_from_screw_axis",
"screw_axis_from_exponential_coordinates",
"transform_log_from_exponential_coordinates",
"exponential_coordinates_from_transform_log",
"screw_matrix_from_screw_axis", "screw_axis_from_screw_matrix",
"transform_log_from_screw_matrix", "screw_matrix_from_transform_log",
"dual_quaternion_from_transform", "transform_from_dual_quaternion",
"screw_parameters_from_dual_quaternion",
"dual_quaternion_from_screw_parameters",
"dual_quaternion_from_pq", "pq_from_dual_quaternion",
"adjoint_from_transform",
"norm_exponential_coordinates",
"invert_transform", "scale_transform", "concat",
"vector_to_point", "vectors_to_points", "vector_to_direction",
"vectors_to_directions", "transform",
"random_transform", "random_screw_axis",
"dq_q_conj", "dq_conj", "concatenate_dual_quaternions",
"dual_quaternion_sclerp", "dual_quaternion_power", "dq_prod_vector",
"plot_transform", "plot_screw",
"assert_transform", "assert_screw_parameters_equal",
"assert_unit_dual_quaternion_equal", "assert_unit_dual_quaternion"
]
| [
"afabisch@googlemail.com"
] | afabisch@googlemail.com |
c0f6e796c04e5b68ea5f4626c0ecd09334120e57 | 37c243e2f0aab70cbf38013d1d91bfc3a83f7972 | /pp7TeV/HeavyIonsAnalysis/JetAnalysis/python/jets/ak7PFJetSequence_pp_mix_cff.py | d5943280b61cf90b5da4cc7c4967ef1fb51e3072 | [] | no_license | maoyx/CMSWork | 82f37256833cbe4c60cb8df0b4eb68ceb12b65e7 | 501456f3f3e0f11e2f628b40e4d91e29668766d5 | refs/heads/master | 2021-01-01T18:47:55.157534 | 2015-03-12T03:47:15 | 2015-03-12T03:47:15 | 10,951,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,380 | py |
import FWCore.ParameterSet.Config as cms
from PhysicsTools.PatAlgos.patHeavyIonSequences_cff import *
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
ak7PFmatch = patJetGenJetMatch.clone(
src = cms.InputTag("ak7PFJets"),
matched = cms.InputTag("ak7HiGenJets")
)
ak7PFparton = patJetPartonMatch.clone(src = cms.InputTag("ak7PFJets"),
matched = cms.InputTag("genParticles")
)
ak7PFcorr = patJetCorrFactors.clone(
useNPV = False,
# primaryVertices = cms.InputTag("hiSelectedVertex"),
levels = cms.vstring('L2Relative','L3Absolute'),
src = cms.InputTag("ak7PFJets"),
payload = "AK7PF_generalTracks"
)
ak7PFpatJets = patJets.clone(jetSource = cms.InputTag("ak7PFJets"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("ak7PFcorr")),
genJetMatch = cms.InputTag("ak7PFmatch"),
genPartonMatch = cms.InputTag("ak7PFparton"),
jetIDMap = cms.InputTag("ak7PFJetID"),
addBTagInfo = False,
addTagInfos = False,
addDiscriminators = False,
addAssociatedTracks = False,
addJetCharge = False,
addJetID = False,
getJetMCFlavour = False,
addGenPartonMatch = True,
addGenJetMatch = True,
embedGenJetMatch = True,
embedGenPartonMatch = True,
embedCaloTowers = False,
embedPFCandidates = False
)
ak7PFJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("ak7PFpatJets"),
genjetTag = 'ak7HiGenJets',
rParam = 0.7,
matchJets = cms.untracked.bool(False),
matchTag = 'patJets',
pfCandidateLabel = cms.untracked.InputTag('particleFlow'),
trackTag = cms.InputTag("generalTracks"),
fillGenJets = True,
isMC = True,
genParticles = cms.untracked.InputTag("genParticles"),
eventInfoTag = cms.InputTag("hiSignal")
)
ak7PFJetSequence_mc = cms.Sequence(
ak7PFmatch
*
ak7PFparton
*
ak7PFcorr
*
ak7PFpatJets
*
ak7PFJetAnalyzer
)
ak7PFJetSequence_data = cms.Sequence(ak7PFcorr
*
ak7PFpatJets
*
ak7PFJetAnalyzer
)
ak7PFJetSequence_jec = ak7PFJetSequence_mc
ak7PFJetSequence_mix = ak7PFJetSequence_mc
ak7PFJetSequence = cms.Sequence(ak7PFJetSequence_mix)
| [
"yaxian.mao@cern.ch"
] | yaxian.mao@cern.ch |
7e59014221dd7e327050963256603c05eaca9fd4 | e254c72d3fd11306c8625c5d8ad8ac394eabc6c6 | /04.beautifulSoup/BeautifulSoup02/main6.py | e54aadb69a107353f55b1bc1fb95d2b8f5a1ec93 | [] | no_license | Edward83528/crawlerToMachinLearningAndBot | 87c7ea92779b949ad5015612a4e70275becab480 | 82818137b517f4c5a856535f83a8cb8b211da8aa | refs/heads/master | 2022-11-06T19:41:20.473933 | 2020-07-04T14:01:07 | 2020-07-04T14:01:07 | 268,072,162 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,643 | py | #coding:utf-8
#65001
import urllib.request
import json
import codecs
import sys
import argparse as ap
import time
import datetime
import requests
from bs4 import BeautifulSoup as bs
from urllib.parse import quote
#python main.py 八仙塵爆 2015-06-27 2015-08-24 1
#def argParse():
# parser=ap.ArgumentParser(description='Liberty Time Net Crawler')
# parser.add_argument("keyword", help="Serch Keyword")
# parser.add_argument("start_date", help="Start (2017-01-01)")
# parser.add_argument("end_date", help="End (2017-01-02)")
# parser.add_argument("pages", help="Pages")
# return parser.parse_args()
#args=argParse()
#keyword = quote(args.keyword)
#start_date = args.start_date
#end_date = args.end_date
#pages = args.pages
keyword = quote('八仙塵爆')
start_date = '2015-06-27'
end_date = '2015-08-24'
pages = '1'
def start_requests():
if( len(start_date.split("-"))==3 and len(end_date.split("-"))==3) :
SYear = start_date.split("-")[0]
SMonth = start_date.split("-")[1]
SDay = start_date.split("-")[2]
EYear = end_date.split("-")[0]
EMonth = end_date.split("-")[1]
EDay = end_date.split("-")[2]
urls = []
for i in range(1,int(pages)+1):
str_idx = ''+('%s' % i)
urls.append('http://news.ltn.com.tw/search?keyword='+keyword+'&conditions=and&SYear='+SYear+'&SMonth='+SMonth+'&SDay='+SDay+'&EYear='+EYear+'&EMonth='+EMonth+'&EDay='+EDay+'&page='+str_idx+'')
for url in urls:
print (url)
parseLtnNews(url)
time.sleep(0.5)
else:
print ("Data format error.")
def request_uri(uri):
header = {"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'}
rs = requests.session()
res = rs.get(uri, headers=header)
html_data = res.text
#r = requests.post(url=uri, headers={'Connection':'close'})
return html_data
def parseLtnNews(uri):
postdate = []
link = []
title = []
body = []
html_data = request_uri(uri)
soup = bs(html_data,'html.parser')
for ul_soup in soup.findAll('ul',attrs={"id":"newslistul"}):
for span_soup in ul_soup.findAll('span'):
postdate = span_soup.string.replace(" ","")[:10]
for li_soup in ul_soup.findAll('li'):
p_list = li_soup.findAll('p')
body=p_list[1].getText()
items.append({"uri":uri,"body":body,"updatetime":datetime.datetime.now().strftime('%Y-%m-%d')})
#print({"uri":uri,"body":body,"updatetime":datetime.datetime.now().strftime('%Y-%m-%d')})
for a_soup in ul_soup.findAll('a',attrs={"class":"tit"}):
tle = a_soup.getText()
lnk = 'http://news.ltn.com.tw'+a_soup.get('href')
title.append(tle.strip())
link.append(lnk)
#print(tle)
#print(lnk)
#TO DO
current = 0
while current < len(postdate):
items.append({
"title": title[current],
"link":link[current],
"body":body[current],
"postdate":postdate[current],
#"updatetime":datetime.datetime.now(), # MongoDB
"updatetime":datetime.datetime.now().strftime('%Y-%m-%d')
})
current+=1
if __name__ == '__main__':
items = []
start_requests();
row_json = json.dumps(items, ensure_ascii=False)
file = codecs.open(urllib.parse.unquote(keyword)+'.json', 'w', encoding='utf-8')
file.write(row_json)
file.close()
print("Done") | [
"u0151051@gmail.com"
] | u0151051@gmail.com |
c48910b35aeb43f63ba5477826a13f4dfe3b0a88 | 27276ec746f3dcf6ca815961377b98e529338951 | /projects/demo/numpy_demo.py | 79178b40903096210dd91728e29695af46f0c963 | [] | no_license | fengyouliang/mmdetection_projects | a084281a6fcf223ac1950a5c1081226153b394b2 | 3d877624ab9b1f438c6a5c63402626cd3138b5bb | refs/heads/master | 2022-12-26T10:11:45.522474 | 2020-10-10T09:59:13 | 2020-10-10T09:59:13 | 281,071,083 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,968 | py | import numpy as np
class Box:
def __init__(self, rectangle):
'''
rectangle class.
:param rectangle: a list of [xmin, xmax, ymin, ymax]
'''
self.rec = np.array(rectangle).astype(np.int)
@property
def shape(self):
'''
get shape of Box.
:return: shape of (height, width).
'''
if ((self.rec[2:] - self.rec[:2]) >= 0).all():
wh = self.rec[2:] - self.rec[:2]
return tuple(wh)
else:
return
@property
def area(self):
s = self.shape
if s is not None:
return np.prod(s)
else:
return 0
def overlap(self, other, is_iou=True):
area1, area2 = self.area, other.area
assert area1 > 0 and area2 > 0, 'rectangle area must be postive number.'
rec1 = self.rec
rec2 = other.rec
rec1 = np.array(rec1)
rec2 = np.array(rec2)
top_left = np.maximum(rec1[:2], rec2[:2])
bottom_right = np.minimum(rec1[2:], rec2[2:])
overlap = Box([*top_left, *bottom_right]).area
if is_iou:
return float(overlap) / (area1 + area2 - overlap)
else:
return float(overlap) / area1
def expand_by_delta(self, delta, boundary):
xmin, ymin, xmax, ymax = self.rec
bxmin, bymin, bxmax, bymax = boundary
exmin = max(xmin - delta, bxmin)
eymin = max(ymin - delta, bymin)
exmax = min(xmax + delta, bxmax)
eymax = min(ymax + delta, bymax)
dt = np.array([exmin, eymin, exmax, eymax]) - self.rec
return Box([exmin, eymin, exmax, eymax]), dt
# def __repr__(self):
# print('repr')
# return str(self.rec)
def __array__(self):
print('array')
return self.rec
if __name__ == '__main__':
print()
a = Box([1, 2, 3, 4])
print()
print(a)
b = np.array(a)
print()
print(b)
print()
| [
"1654388696@qq.com"
] | 1654388696@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.