blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
de8dee0cbf146e884769e2cd4d828061a06f1d64
|
8b68e037fd73507132fa8a4afed14934bfd1a388
|
/18352/특정 거리의 도시 찾기.py
|
eb8b0ee194249d697607654cc9ff6e095429c9ed
|
[] |
no_license
|
Seunghan-Jung/BOJ
|
f76772d8c00dbef528b37ba7f63abd9de306bb50
|
6a93a3e75fc6500437087910d43b69ce311713f4
|
refs/heads/master
| 2023-01-08T13:08:11.405968
| 2020-11-08T11:35:31
| 2020-11-08T11:35:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 791
|
py
|
import sys
sys.stdin = open('input.txt')
from collections import deque
input = sys.stdin.readline
def solution():
N, M, K, X = map(int, input().split())
X = X - 1
adj = [[] for _ in range(N)]
for m in range(M):
u, v = map(lambda x: int(x) - 1, input().split())
adj[u].append(v)
dist = [float('inf')] * N
q = deque([X])
dist[X] = 0
d = 0
while q:
d += 1
for _ in range(len(q)):
cur = q.popleft()
for next in adj[cur]:
if dist[next] == float('inf'):
q.append(next)
dist[next] = d
answer = [u + 1 for u in range(N) if dist[u] == K]
print('\n'.join(map(str, answer)) if answer else -1)
if __name__ == '__main__':
solution()
|
[
"ajtwlstmdgks@gmail.com"
] |
ajtwlstmdgks@gmail.com
|
470170d76cb952b93cbc7050321da021ccefcb4a
|
15de0278797e0a72f096f53830bd98c52b310b4a
|
/Logic_Alarm_FileExtration.py
|
03117336b455c0829be9aff938eb7c7247bfd083
|
[] |
no_license
|
berlyne-liu/Carpals_exe
|
6e9654534159023d382b6e69695cd014579598ee
|
382932747a79ef29ce9763d2b08064c9051455dc
|
refs/heads/master
| 2020-09-09T16:36:14.254731
| 2019-11-28T18:20:10
| 2019-11-28T18:20:10
| 221,498,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,098
|
py
|
# -*- coding: utf-8 -*-
import csv
import xlrd
class Alarm_Extraction:
def textExtraction(self, filepath):
container = []
error = "存在以下异常:\n"
try:
with open(filepath, 'r') as file:
file_list = file.readlines()
t_header = file_list[3].strip('\n').split('\t')
column_h = len(t_header)
for row in range(len(file_list)):
package = file_list[row].strip('\n').split('\t')
if row > 0 and len(package) == column_h:
container.append(package)
else:
error = error + ("第%s行的字段数为%s,不符合规则!\n" % (row, len(package)))
except Exception as e:
error = error + str(e)
t_header = None
return t_header, container, error
def csvExtraction(self, csvpath):
container = []
error = "存在以下异常:\n"
try:
with open(csvpath, 'r') as file:
cr = csv.reader(file, delimiter="|", quotechar='"')
for i, rows in enumerate(cr):
if i > 0:
container.append(rows)
c_header = container[0]
except Exception as e:
error = error + str(e)
c_header = None
return c_header, container, error
def excelExtraction(self, excelpath, mode="data", _sheetName=None):
container = []
error = "存在以下异常:\n"
excelFile = xlrd.open_workbook(excelpath)
if mode == "data":
try:
table = excelFile.sheet_by_name(_sheetName)
r = table.nrows
e_header = table.row_values(rowx=0, start_colx=0, end_colx=None)
for i in range(r):
if i > 0:
container.append(table.row_values(rowx=i, start_colx=0, end_colx=None))
except Exception as e:
error = error + str(e)
e_header = None
return e_header, container, error
elif mode == "sheet":
sheet_name = excelFile.sheet_names()
return sheet_name
elif mode == "header":
table = excelFile.sheet_by_name(_sheetName)
e_header = table.row_values(rowx=0, start_colx=0, end_colx=None)
return e_header
def PersonalizedFileImport(self, Filepath, ColList, _sheetName=None):
_header = []
_Coly = []
ColIndex = []
excelFile = xlrd.open_workbook(Filepath)
table = excelFile.sheet_by_name(_sheetName)
_header = table.row_values(rowx=0, start_colx=0, end_colx=None)
# print(ColList)
for k, v in enumerate(ColList):
ColIndex.append(_header.index(v))
# print(ColIndex)
for n, v in enumerate(tuple(ColIndex)):
_Coly.append(table.col_values(colx=v, start_rowx=0, end_rowx=None))
_data = list(map(list, (zip(*_Coly))))
return _data # list type
|
[
"13750104197@139.com"
] |
13750104197@139.com
|
875a1ae3b757b65f2519b9f2a38de074fe7c90bf
|
ba3203c1b0d8ff12631c93e84027d3f3575a7fda
|
/cargar/migrations/0005_comprobante_conteo_id.py
|
3db2e6ebca73c9b8f7b1389534eb2742b391a2d9
|
[] |
no_license
|
renearteaga1/comprobantes
|
e4c27fec1a3b47301eb35b652476f668653df94c
|
1543f7cd3ddef667be2cfeacc0cbdb21135ad3c2
|
refs/heads/master
| 2022-12-13T18:24:22.429250
| 2019-06-14T17:50:39
| 2019-06-14T17:50:39
| 189,061,681
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
# Generated by Django 2.1.5 on 2019-06-07 13:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cargar', '0004_auto_20190606_1922'),
]
operations = [
migrations.AddField(
model_name='comprobante',
name='conteo_id',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.DO_NOTHING, to='cargar.ConteoComprobante'),
preserve_default=False,
),
]
|
[
"nick@nick.com"
] |
nick@nick.com
|
78d0a66a32db94c13a12774eea9acf4c09747ef1
|
7cd594d9e34e11e2a2d12ef8f7576c7a3be13909
|
/budgetbutler/views.py
|
909679a54f50f6359f2b1be89ca1b36e9c7c6791
|
[] |
no_license
|
o-zander/budget-butler
|
e2494582cf7a41ec53ef0a3c213afa189829a588
|
74ad034e3f1db63a5b4a203d8c7e60559c9dc001
|
refs/heads/master
| 2020-04-04T16:18:28.895538
| 2014-11-13T21:57:23
| 2014-11-13T21:57:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,376
|
py
|
import datetime
from django.http import HttpResponse, JsonResponse
from django.views.generic import View, DayArchiveView, CreateView, TemplateView
from .forms import ExpenseModelForm
from .models import Expense
from .utils import get_months, get_days
class JsonView(View):
safe = True
def dispatch(self, request, *args, **kwargs):
data = super(JsonView, self).dispatch(request, *args, **kwargs)
if isinstance(data, HttpResponse):
return data
return JsonResponse(data, safe=self.safe)
class MonthListView(TemplateView):
template_name = 'views/month-list-view.html'
def get_context_data(self, **kwargs):
years_with_months = [
(year, get_months(year))
for year in (2014, 2015)
]
return super(MonthListView, self).get_context_data(years_with_months=years_with_months, **kwargs)
class MonthDetailView(TemplateView):
template_name = 'views/month-detail-view.html'
def get_context_data(self, **kwargs):
date = datetime.date(int(self.kwargs.get('year')), int(self.kwargs.get('month')), 1)
weeks_with_days = [
(week, [(day, Expense.objects.get_budget(day)) for day in days])
for week, days in get_days(date.year, date.month)
]
return super(MonthDetailView, self).get_context_data(date=date, weeks_with_days=weeks_with_days)
class IndexView(MonthListView):
template_name = 'index.html'
class ExpenseMixIn(object):
model = Expense
class ExpenseListView(ExpenseMixIn, DayArchiveView):
template_name = 'views/day-detail-view.html'
allow_empty = True
allow_future = True
date_field = 'date'
month_format = '%m'
def render_to_response(self, context, **response_kwargs):
objects, date = (lambda object_list, day, **extra: (object_list, day))(**context)
context.update(total=objects.get_sum_amount(), budget=Expense.objects.get_budget(date))
return super(ExpenseListView, self).render_to_response(context, **response_kwargs)
class ExpenseAddView(ExpenseMixIn, CreateView):
form_class = ExpenseModelForm
template_name = 'forms/add-expense-form.html'
def form_valid(self, form):
self.object = form.save()
url = self.get_success_url()
return JsonResponse({'pk': self.object.pk, 'url': url}, status=201)
|
[
"oliver.zander@gmail.com"
] |
oliver.zander@gmail.com
|
115788d109597217ac9850d87e15f97498d0487a
|
950fb2eaca699f460bf4592ef3e35081136cb57e
|
/node_modules/node-sass/build/config.gypi
|
61a0b625d114f9a9b70764cdb95d8b0243a691dc
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
isabella232/ionic-shopping-cart
|
f60a7db8b0c35a1573e62c39150cbe2611c75bad
|
1f75937e5577325aa5b0048d4416601bdd1a8a5b
|
refs/heads/master
| 2021-12-23T06:02:01.632472
| 2017-10-31T13:26:46
| 2017-10-31T13:26:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,000
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"force_dynamic_crt": 0,
"gas_version": "2.23",
"host_arch": "x64",
"icu_data_file": "icudt59l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt59l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "59",
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 57,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "so.57",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "false",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"want_separate_host_toolset_mkpeephole": 0,
"nodedir": "/home/ideais/.node-gyp/8.1.2",
"standalone_static_library": 1,
"libsass_ext": "",
"libsass_cflags": "",
"libsass_ldflags": "",
"libsass_library": "",
"cache_lock_stale": "60000",
"ham_it_up": "",
"legacy_bundling": "",
"sign_git_tag": "",
"user_agent": "npm/5.4.2 node/v8.1.2 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"allow_same_version": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"if_present": "",
"init_version": "1.0.0",
"user": "1000",
"prefer_online": "",
"force": "",
"only": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"tag_version_prefix": "v",
"cache_max": "Infinity",
"timing": "",
"userconfig": "/home/ideais/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"save_dev": "",
"usage": "",
"metrics_registry": "https://registry.npmjs.org/",
"progress": "true",
"package_lock": "true",
"https_proxy": "",
"save_prod": "",
"onload_script": "",
"sso_type": "oauth",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"dry_run": "",
"prefix": "/home/ideais/.nvm/versions/node/v8.1.2",
"scope": "",
"browser": "",
"cache_lock_wait": "10000",
"ignore_prepublish": "",
"registry": "https://registry.npmjs.org/",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/ideais/.npm",
"send_metrics": "",
"global_style": "",
"ignore_scripts": "",
"version": "",
"local_address": "",
"viewer": "man",
"prefer_offline": "",
"color": "true",
"fetch_retry_mintimeout": "10000",
"maxsockets": "50",
"offline": "",
"sso_poll_frequency": "500",
"umask": "0002",
"fetch_retry_maxtimeout": "60000",
"logs_max": "10",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"access": "",
"also": "",
"save": "true",
"unicode": "true",
"long": "",
"production": "",
"searchlimit": "20",
"unsafe_perm": "true",
"auth_type": "legacy",
"node_version": "8.1.2",
"tag": "latest",
"git_tag_version": "true",
"commit_hooks": "true",
"script_shell": "",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"save_exact": "",
"strict_ssl": "true",
"dev": "",
"globalconfig": "/home/ideais/.nvm/versions/node/v8.1.2/etc/npmrc",
"init_module": "/home/ideais/.npm-init.js",
"parseable": "",
"globalignorefile": "/home/ideais/.nvm/versions/node/v8.1.2/etc/npmignore",
"cache_lock_retries": "10",
"searchstaleness": "900",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": ""
}
}
|
[
"raphaelmiranda@gmail.com"
] |
raphaelmiranda@gmail.com
|
6715826fb8795709149f209138bc4ce0faf2bfee
|
d9cc44ee61ebf36151cd2ffd4f81fc29c9e8dd5c
|
/Socket_Satyam/udpsend.py
|
a57271afa3c1c8911c402736793cf055bbbd8233
|
[] |
no_license
|
Naman311/ReconSubsea
|
b51412e835624cd5784af36d5269a31f8fc3da0f
|
4535ed521b8b283f0a060926c9cccd6c77e87f98
|
refs/heads/master
| 2021-03-30T17:28:20.815482
| 2019-03-13T14:19:26
| 2019-03-13T14:19:26
| 123,790,039
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
import random
import socket
import multiprocessing
UDP_IP="127.0.0.1"
sock=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
sock.bind(('',8888))
while True:
data=b'sdfsds'
sock.sendto(data,(UDP_IP,8888))
|
[
"36452790+satyamambast@users.noreply.github.com"
] |
36452790+satyamambast@users.noreply.github.com
|
fd8270a17dc066561a5e7c462ed7e663f0a3beb0
|
b1d52780cbc395e693c7481171c3d42ed0a11041
|
/run-backup-ptbr.py
|
c61649f03dd824a4cd67c2632064f831e8709308
|
[
"MIT"
] |
permissive
|
kaueda/MySqlBashBackup
|
be2ca058dd9448af0388acaebc568547e9ebde02
|
dfee1119dfd404142c12704e70a3720981b8a42a
|
refs/heads/master
| 2021-01-10T08:48:09.147350
| 2016-04-01T20:59:02
| 2016-04-01T20:59:02
| 55,234,502
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 920
|
py
|
#!/usr/bin/env python
import sys
import os
import MySQLdb
if (len(sys.argv) != 5):
sys.exit("Quantidade de argumentos eh invalida.\n Usage: python run-backup.py <host> <user> <password> <database>")
db = MySQLdb.connect(host=sys.argv[1], # ip do servidor de banco de dados ex.: "localhost"
user=sys.argv[2], # nome de usuario ex.: "root"
passwd=sys.argv[3], # senha do banco de dados ex.: "root123456"
db=sys.argv[4]) # nome do banco de dados ex.: "dbbackup-info"
# Cursor para buscas no banco
cur = db.cursor()
cur.execute("select * from `schemas-info`")
# Cria o backup para cada banco existente
for row in cur.fetchall():
print("./script-backup.sh --database %s --user %s --password %s" % (row[1], row[2], row[3]))
os.system("./script-backup.sh --database %s --user %s --password %s" % (row[1], row[2], row[3]))
db.close()
|
[
"kauesilveira1609@gmail.com"
] |
kauesilveira1609@gmail.com
|
263752926dfb7b13b863bb723d4a56ed89b685cf
|
ccdaefbc30ba1d063f40d094583f8233cdad5f25
|
/cmdbapi/serializers/HostSerializer.py
|
781d1364c32f006e776267ff10b71bb03962d61a
|
[] |
no_license
|
sxf123/sxf-devops
|
dcde26b237082fb385881daee6fa42ac78a63527
|
38d426e5e609efdfcbdd7781056186e532943101
|
refs/heads/master
| 2020-03-26T15:40:53.468317
| 2018-10-29T02:30:21
| 2018-10-29T02:30:21
| 145,057,985
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,655
|
py
|
from cmdb.models.Host import Host
from rest_framework import serializers
from cmdb.models.Cluster import Cluster
from cmdbapi.serializers.ManyRelationSerializer import ManyRelationSerializer
class HostGetSerializer(serializers.ModelSerializer):
ippool_set = serializers.StringRelatedField(many=True,required=False)
cluster = ManyRelationSerializer(many=True)
class Meta:
model = Host
fields = ("id","host_name","kernel","os","osrelease","environment","ippool_set","cluster")
def create(self,validated_data):
host = Host.objects.create(
host_name = validated_data.get("host_name"),
kernel = validated_data.get("kernel"),
os = validated_data.get("os"),
osrelease = validated_data.get("osrelease"),
environment = validated_data.get("environment"),
)
for cluster_name in validated_data.get("cluster"):
host.cluster.add(Cluster.objects.get(cluster_name=cluster_name))
return host
def update(self,instance,validated_data):
instance.host_name = validated_data.get("host_name",instance.host_name)
instance.kernel = validated_data.get("kernel",instance.kernel)
instance.os = validated_data.get("os",instance.os)
instance.osrelease = validated_data.get("osrelease",instance.osrelease)
instance.environment = validated_data.get("environment",instance.environment)
instance.cluster.clear()
for cluster_name in validated_data.get("cluster"):
instance.cluster.add(Cluster.objects.get(cluster_name=cluster_name))
instance.save()
return instance
|
[
"songxiaofeng@zhexinit.com"
] |
songxiaofeng@zhexinit.com
|
29b539c908a4a9a6f4e32ebfd64257809c24590f
|
18d6f64e55893c5057a83552e666ecacdd60c1a4
|
/day21/solve.py
|
a20789ca457bf6b488f964eb6ce408bad5f1ae2e
|
[] |
no_license
|
turtlegraphics/advent-of-code-2020
|
bcf0c81c3f19da193483bd85436f6fecf9587b98
|
ea75f5d57f130992369d64f173af15c4310582bc
|
refs/heads/main
| 2023-02-07T09:44:08.265054
| 2020-12-25T06:14:07
| 2020-12-25T06:14:07
| 323,831,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,647
|
py
|
#
# Advent of Code 2020
# Bryan Clair
#
# Day 21
#
import sys
import re
sys.path.append("..")
import aocutils
args = aocutils.parse_args()
inputlines = [x.strip() for x in open(args.file).readlines()]
parser = re.compile(r"([ \w]+) \(contains ([ ,\w]+)\)") # or whatever
badfood = {}
allfood = set()
for line in inputlines:
food_str,allergy_str = parser.match(line).groups()
foods = set(food_str.split())
allfood = allfood.union(foods)
allergies = [x.strip() for x in allergy_str.split(',')]
for a in allergies:
if a not in badfood:
badfood[a] = foods
else:
badfood[a] = badfood[a].intersection(foods)
safefood = set()
for f in allfood:
good = True
for a in badfood:
if f in badfood[a]:
good = False
break
if good:
safefood.add(f)
count = 0
for line in inputlines:
food_str,allergy_str = parser.match(line).groups()
foods = set(food_str.split())
for f in foods:
if f in safefood:
count += 1
print 'part 1:',count
for a in badfood:
badfood[a] = badfood[a].difference(safefood)
# print then solve this crap by hand. Done!
if args.verbose > 2:
print a,badfood[a]
# ok ok here's code to do it, written post-competition:
found = {}
while len(found) < len(badfood):
for a in badfood:
badfood[a] = badfood[a].difference(found.values())
if len(badfood[a]) == 1:
found[a] = list(badfood[a])[0]
if args.verbose > 1:
for a in found:
print found[a],'contains',a
print 'part 2:',
print ','.join([found[a] for a in sorted(found.keys())])
|
[
"bryan@slu.edu"
] |
bryan@slu.edu
|
c17a2f123731f0fb5ad75eb48b9fdb6af937bf25
|
205ed0031c1762999fde737d6b2b868c7c8444ee
|
/day4.py
|
5637d05f6895eec910a7adc0ea1eecced4f8f819
|
[] |
no_license
|
Premsaivudya/Python-Bootcamp
|
6068537cc362fc0392e616c68d3eb4283c87646c
|
74dcc5fafb22097f1f8bdfb9b359e988e3d98158
|
refs/heads/main
| 2023-06-21T22:15:08.352506
| 2021-07-20T03:50:41
| 2021-07-20T03:50:41
| 376,601,826
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 569
|
py
|
#Day-4 Task
#Exercise 1
a=b=c=int(10)
print("Values of a,b,c are ",a,",",b,",",c)
a=int(a/10)
b=int(b*50)
c=int(c+60)
print("After Dividing, Multiplying, and Adding")
print("The values of a,b,c are",a,",",b,",",c)
#Exercise 2
name="abcdef"
print("The name is ", name)
x=name.replace("c","G")
print("After replacing the 3rd charactern The name is:",x)
#Exercise 3
a=int(15)
b=float(20)
print("The initial values of a,b are",a,"int type",b,"float type")
a=float(a)
b=int(b)
print("After converting")
print(a,"in float type")
print(b,"in int type")
|
[
"noreply@github.com"
] |
Premsaivudya.noreply@github.com
|
441b63535d816a7a5f8bc9956ed3f2120beed5dc
|
4f0ceccea62d3c909af88a7e3f5e1d063ed94b2e
|
/1605_find_valid_matrix_for_given_sums.py
|
da8faa849a2539c3be8089cfdaf7c3db2170e2a0
|
[] |
no_license
|
0as1s/leetcode
|
d19c2ec4f96666a03227871b9b21b26adcd6b3b4
|
c82d375f8d9d4feeaba243eb5c990c1ba3ec73d2
|
refs/heads/master
| 2021-05-09T21:30:35.378394
| 2021-02-01T15:37:37
| 2021-02-01T15:37:37
| 118,729,125
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 836
|
py
|
from heapq import *
class Solution(object):
def restoreMatrix(self, rowSum, colSum):
"""
:type rowSum: List[int]
:type colSum: List[int]
:rtype: List[List[int]]
"""
grid = [[0] * len(colSum) for _ in range(len(rowSum))]
rowheap = [[rowSum[i], i] for i in range(len(rowheap))]
heapify(rowheap)
colheap = [[-colSum[i], i] for i in range(len(colheap))]
heapify(colheap)
while rowheap:
r, i = heappop(rowheap)
c, j = heappop(colheap)
c = -c
if c == r:
grid[i][j] = c
elif c>r:
grid[i][j] = r
heappush(colheap, [r-c,j])
else:
grid[i][j] = c
heappush(rowheap, [r-c,i])
return grid
|
[
"oasis@penguin"
] |
oasis@penguin
|
38ff331d3294e7a396694d15618413f89a63abe3
|
6810a482759afd585db7bb0b85fd0416f0450e6d
|
/2019 Practice 2/eenymeeny.py
|
ff65262b7300e55226671925bb3c76154cf9f4a1
|
[] |
no_license
|
BenRStutzman/kattis
|
01b000ac2353c8b8000c6bddec3698f66b0198ef
|
005720f853e7f531a264227d0d9aaa19d4d7cf1b
|
refs/heads/master
| 2020-07-15T23:52:45.785021
| 2019-11-09T03:28:06
| 2019-11-09T03:28:06
| 205,675,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
import sys
def main():
inp = sys.stdin.read().splitlines()
n_words = len(inp[0].split())
n_people = int(inp[1])
people = inp[2:]
teams = [[], []]
for i in range(n_people):
index = (n_words - 1) % len(people)
#print(index)
teams[i % 2].append(people[index])
people = people[index + 1:] + people[:index]
#print(people)
for team in teams:
print(len(team))
for person in team:
print(person)
main()
|
[
"ben.stutzman@emu.edu"
] |
ben.stutzman@emu.edu
|
0b5bbaf76fa84e9989c75b6195c24cfab5ad6c53
|
6e6757a1fc119b2a3932aa498a2b50d35f7cd4cd
|
/unittest_kj/com/log_in.py
|
e50965047ed975aa2623cbad6c6d7510c80f5015
|
[] |
no_license
|
wang1041208706/wl_product
|
24a98ba069eb9013cdfabd012438f544347c1a52
|
79cc8fc9d7be073673aa782cf6554c6674160d7d
|
refs/heads/master
| 2023-06-04T21:25:53.866978
| 2021-06-22T07:40:45
| 2021-06-22T07:40:45
| 379,172,554
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,187
|
py
|
from selenium import webdriver
import time
from unittest_kj.env.env_factory import *
from ddt import ddt,data
from unittest_kj.test_data.read_data import *
class Login():
def login(self):
self.web = webdriver.Firefox()
self.web.implicitly_wait(20)
self.web.get(ReadConfig().url)
self.web.find_element_by_xpath('/html/body/div/section/form/div/input[1]').send_keys(ReadConfig().username)
self.web.find_element_by_xpath('/html/body/div/section/form/div/input[2]').send_keys(ReadConfig().password)
self.web.find_element_by_xpath('/html/body/div/section/form/div/input[3]').click()
def click(self, list_argument):
for item in list_argument:
self.web.find_element_by_xpath(item).click()
def send_keys(self, list_address, list_argument):
for item in range(len(list_address)):
self.web.find_element_by_xpath(list_address[item]).clear()
self.web.find_element_by_xpath(list_address[item]).send_keys(list_argument[item])
def switch_to(self, argument):
self.web.iframe = self.web.find_elements_by_tag_name(argument)[0]
self.web.switch_to.frame(self.web.iframe)
|
[
"1041208706@qq.com"
] |
1041208706@qq.com
|
488b4c572ee2b48feda208a11995a813054d2313
|
e9596473100470e8ab732e562c5188b9675cf09e
|
/2017/SECCON2017/Man-in-the-middle_on_SECP384R1/exploit.1.py
|
0c9f9a37d20997c40d144edc9861fd05b9b3f16a
|
[] |
no_license
|
trmr/ctf
|
548c2674e34d11bc902f75d2ab4aae5836c0f8bb
|
1482cf2bd4eff3576cd455617ec60c72713a54f2
|
refs/heads/master
| 2022-04-15T09:16:53.037471
| 2020-04-14T14:40:49
| 2020-04-14T14:40:49
| 245,831,722
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,898
|
py
|
#!/usr/bin/python3
import socket
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.serialization import *
import hashlib
from Crypto.Cipher import AES
private_key1 = ec.generate_private_key(ec.SECP384R1, default_backend())
private_key2 = ec.generate_private_key(ec.SECP384R1, default_backend())
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = "mitm.pwn.seccon.jp"
s.connect((host, 8000))
s.recv(len("[dev0 to dev1]:"))
data = s.recv(120)
## todo
k1 = load_der_public_key(data, backend=default_backend())
payload = private_key1.public_key().public_bytes(encoding=Encoding.DER, format=PublicFormat.SubjectPublicKeyInfo)
s.send(payload)
data = s.recv(len("\n[dev1 to dev0]: OK\n"))
data = s.recv(len("[dev1 to dev0]:"))
data = s.recv(120)
## todo
k2 = load_der_public_key(data, backend=default_backend())
payload = private_key2.public_key().public_bytes(encoding=Encoding.DER, format=PublicFormat.SubjectPublicKeyInfo)
s.send(payload)
s.recv(len("\n[dev0 to dev1]: OK\n"))
data = s.recv(len("[KBKDF: SHA256, Encryption: AES]\n"))
## derive keys
sk1 = private_key2.exchange(ec.ECDH(),k1)
sk2 = private_key1.exchange(ec.ECDH(),k2)
aesk1 = hashlib.sha256(sk1)
aesk2 = hashlib.sha256(sk2)
print ("key1: %s" % aesk1.hexdigest() )
print ("key2: %s" % aesk2.hexdigest() )
a1 = AES.new(aesk1.digest(),AES.MODE_CBC,"0000000000000000")
a2 = AES.new(aesk2.digest(),AES.MODE_CBC,"0000000000000000")
data = s.recv(len("[dev0 to dev1]:"))
data = s.recv(256)
nonce = a1.decrypt(data)
print("nonce: %s" % nonce.decode())
ct = a2.encrypt(nonce)
## todo
# mitm
s.send(ct)
data = s.recv(len("\n[dev1 to dev0]: OK\n"))
data = s.recv(len("[dev1 to dev0]:"))
data = s.recv(256)
## todo
# decrypt
a2 = AES.new(aesk2.digest(),AES.MODE_CBC,"0000000000000000")
print(a2.decrypt(data).decode())
|
[
"ryo1.teramura@gmail.com"
] |
ryo1.teramura@gmail.com
|
9e12fe15be6b38490c7289617d6c874b51f6e450
|
afc00adc224855094e18cd25c919f0a193299c7d
|
/class1/rank_by_num.py
|
0a63619d6586029cbbb078999ccb558ad89851d6
|
[] |
no_license
|
rebeccasmile1/algorithm
|
e865e4916ee04989cce44de22eb63211de93938b
|
a9a60a59f6bb55cdba682721b6f9f9ac7a616d7a
|
refs/heads/master
| 2020-04-11T18:44:11.225477
| 2019-04-15T15:17:49
| 2019-04-15T15:17:49
| 162,009,696
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,727
|
py
|
'''
描述
给定一个整数数组,根据元素的频率对数组进行排序。例如,如果输入数组是{2,3,2,4,5,12,2,3,3,3,12},则将数组修改为{3,3,3,3,2,2, 2,12,12,4,5}。如果两个元素的频率相同,则按递增顺序打印它们。
输入
第一行输入包含一个整数T,表示测试用例的数量。T测试案例的描述如下。每个测试用例的第一行包含一个表示数组大小的整数N. 第二行包含N个空格分隔的整数A1,A2,...,AN表示数组的元素。(1≤T≤70;30≤N≤130;1≤A[i]≤60)
产量
在单独的行中打印每个已排序的数组。
1
5
5 5 4 6 4
'''
def mycount():
case_num=int(input())
for i in range(0,case_num):
result=[]
data_num=int(input())
list=input().split()
data_list=[]
# mydictionary = [[] for i in range(0, len(list))] # mydictionary=[[] for i in range(0,61)]
for j in list:
data_list.append(int(j))
mydictionary = []
temp = 0
mydictionary.append([])
mydictionary[0].append(data_list[0])
mydictionary[0].append(0)
for num in data_list:
flag=False
for i in range(0,len(mydictionary)):
if num==mydictionary[i][0]:#且等,则加1
mydictionary[i][1]+=1
flag=True
break
else:#不等,则继续往下遍历
continue
if flag==False:
temp = temp + 1
mydictionary.append([])
mydictionary[temp] = []
mydictionary[temp].append(num)
mydictionary[temp].append(1)
else:
continue
mydictionary.sort()
mydictionary.sort(key=lambda x:x[1],reverse=True)
for i in range(0,len(mydictionary)):
for j in range(0,mydictionary[i][1]):
print(mydictionary[i][0],end=' ')
# result.append(mydictionary[i][0])
# print(' '.join(str(mydictionary[i][0])))
if __name__ == '__main__':
mycount()
# case_num = int(input())
#
# for i in range(0, case_num):
# temp=[]
# result = []
# dict={}
# data_num = int(input())
# list = input().split()
# data_list = []
# for j in list:
# data_list.append(int(j))
# if j not in dict.keys():
# dict[j]=0
# data_list.sort()
# print(data_list)
# for key in data_list:
# dict[str(key)]+=1
# sorted(dict.items(),key=lambda x:x[1])#默认是降序
# print(dict)
|
[
"15850680911@163.com"
] |
15850680911@163.com
|
59435a7f2df6ad87a54f3cb9be8871b61d922a66
|
71864f24ea02e031a39572373e16bdfbc61df15e
|
/Rare7_DP/wons/1005_ACMCraft.py
|
8ce324117f0d936dc0cdb580a9866fc62afe4381
|
[] |
no_license
|
kanghuiseon/algorithmStudy
|
38c6401e10830e1fcdba46e8fb0662110af73547
|
dd038391f0562368cfaeda0cfb7c4cd328d884a6
|
refs/heads/master
| 2023-08-12T10:44:11.743292
| 2021-09-14T07:36:27
| 2021-09-14T07:36:27
| 380,399,403
| 0
| 4
| null | 2021-09-14T07:29:14
| 2021-06-26T02:48:49
|
C++
|
UTF-8
|
Python
| false
| false
| 821
|
py
|
import sys
from collections import deque
read = sys.stdin.readline
def bfs(ed) :
q = deque()
dp = [0 for _ in range(N+1)]
for i in range(1,N+1) :
if before[i] == 0 :
q.append(i)
dp[i] = cost[i-1]
while q :
cur = q.popleft()
for nx in adjs[cur] :
dp[nx] = max(dp[nx],dp[cur]+cost[nx-1])
before[nx] -= 1
if before[nx] == 0 :
q.append(nx)
return dp[ed]
T = int(read())
for _ in range(T) :
N,K = map(int,read().split())
cost = list(map(int,read().split()))
adjs = [[] for _ in range(N+1)]
before = [0 for _ in range(N+1)]
for _ in range(K) :
x,y = map(int,read().split())
adjs[x].append(y)
before[y] += 1
W = int(read())
print(bfs(W))
|
[
"noreply@github.com"
] |
kanghuiseon.noreply@github.com
|
7553076b98cac49868ef486a3f58bd686e5fce79
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/templates/_algorithms_challenges/pybites/beginner/beginner-bite-257-extract-users-dict-from-multiline-string.py
|
b76dcd0f676c24ed898b3a7e8d0130ba21c80bfe
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,615
|
py
|
"""
A quick Bite to practice some string parsing extracting a users dict from a password file.
Complete get_users is how it works:
{'postfix': 'unknown', 'ssh-rsa': 'unknown', 'artagnon': 'Ramkumar R Git GSOC'}
So keys are usernames, values are names. Note that commas inside the name
get replace by a single space. Trailing commas (not in this example) get stripped off.
Have fun and keep calm and code in Python!
"""
____ t___ _______ D..
pw "\nmysql:x:106:107:MySQL Server,,,:/var/lib/mysql:/bin/false\navar:x:1000:1000::/home/avar:/bin/bash\nchad:x:1001:1001::/home/chad:/bin/bash\ngit-svn-mirror:x:1002:1002:Git mirror,,,:/home/git-svn-mirror:/bin/bash\ngerrit2:x:1003:1003:Gerrit User,,,:/home/gerrit2:/bin/bash\navahi:x:107:108:Avahi mDNS daemon,,,:/var/run/avahi-daemon:/bin/false\npostfix:x:108:112::/var/spool/postfix:/bin/false\nssh-rsa:x:1004:1004::/home/ssh-rsa:/bin/bash\nartagnon:x:1005:1005:Ramkumar R,,,,Git GSOC:/home/artagnon:/bin/bash\n"
_______ __
___ get_users_1st_solution passwd: s.. __ d..
result # dict
passwd ?.s...('\n')
lines passwd.s..('\n')
___ line __ lines:
fields line.s..(':')
k fields[0]
v fields[4]
__ v __ "":
v "unknown"
____
v __.sub(',+', ' ', v)
v v.s..
result[k] v
r..(result)
___ get_users_2nd_solution passwd: s.. __ d..
output # dict
___ row __ passwd.s...s..
fields row.s..(':')
username fields[0]
name __.s.. _ ,+', r' ', fields[4].s..(',' o. 'unknown'
output[username] name
r.. ?
print(get_users(pw
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
8e87682b4371d274864b66e790e7d04c57bf3e3d
|
319d64b33c52d59ecc2ccdbccf4255ec952e9d73
|
/config_old.py
|
b3b5966d366b2524534feb16be544ac3f7f048b4
|
[] |
no_license
|
CeramiqueHeart/keyhac
|
9a0139557c4e19e5b881cb8d01ae52777931bc51
|
8796800af0426a0ba906d040ab04f3d3b47c85a7
|
refs/heads/master
| 2020-12-22T13:41:58.219140
| 2016-07-07T09:46:42
| 2016-07-07T09:46:42
| 62,709,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30,696
|
py
|
# -*- mode: python; coding: utf-8-dos -*-
##
## Windows の操作を emacs のキーバインドで行うための設定(keyhac版)
##
# このスクリプトは、keyhac で動作します。
# https://sites.google.com/site/craftware/keyhac
# スクリプトですので、使いやすいようにカスタマイズしてご利用ください。
#
# この内容は、utf-8-dos の coding-system で config.py の名前でセーブして
# 利用してください。また、このスクリプトの最後の方にキーボードマクロの
# キーバインドの設定があります。英語キーボードと日本語キーボードで設定の
# 内容を変える必要があるので、利用しているキーボードに応じて if文 の設定を
# 変更してください。(現在の設定は、日本語キーボードとなっています。)
#
# emacs の挙動と明らかに違う動きの部分は以下のとおりです。
# ・左の Ctrlキー と Altキー のみが、emacs用のキーとして認識される。
# ・ESC の二回押下で、ESC を入力できる。
# ・C-o と C-\ で IME の切り替えが行われる。
# ・C-c、C-z は、Windows の「コピー」、「取り消し」が機能するようにしている。
# ・C-x C-y で、クリップボード履歴を表示する。(C-n で選択を移動し、Enter で確定する)
# ・C-x o は、一つ前にフォーカスがあったウインドウに移動する。
# NTEmacs から Windowsアプリケーションソフトを起動した際に戻るのに便利。
# ・C-k を連続して実行しても、クリップボードへの削除文字列の蓄積は行われない。
# C-u による行数指定をすると、削除行を一括してクリップボードに入れることができる。
# ・C-l は、アプリケーションソフト個別対応とする。recenter 関数で個別に指定すること。
# この設定では、Sakura Editor のみ対応している。
# ・キーボードマクロは emacs の挙動と異なり、IME の変換キーも含めた入力したキー
# そのものを記録する。このため、キーボードマクロ記録時や再生時、IMEの状態に留意した
# 利用が必要。
# ・Excel の場合、^Enter に F2(セル編集モード移行)を割り当てている。(オプション)
# ・Emacs の場合、IME 切り替え用のキーを C-\ に置き換える方法を提供している。(オプション)
from time import sleep
from keyhac import *
def configure(keymap):
# emacs のキーバインドに"したくない"アプリケーションソフトを指定する(False を返す)
# keyhac のメニューから「内部ログ」を ON にすると processname や classname を確認することができます
def is_emacs_target(window):
if window.getProcessName() in ("cmd.exe", # cmd
"mintty.exe", # mintty
"emacs.exe", # Emacs
"runemacs.exe", # Emacs
"gvim.exe", # GVim
# "eclipse.exe", # Eclipse
"xyzzy.exe", # xyzzy
"VirtualBox.exe", # VirtualBox
"XWin.exe", # Cygwin/X
"Xming.exe", # Xming
"putty.exe", # PuTTY
"ttermpro.exe", # TeraTerm
"MobaXterm.exe", # MobaXterm
"TurboVNC.exe", # TurboVNC
"vncviewer.exe", # UltraVNC
"mstsc.exe"): # Remote Desktop
return False
return True
# input method の切り替え"のみをしたい"アプリケーションソフトを指定する(True を返す)
# 指定できるアプリケーションソフトは、is_emacs_target で除外指定したものからのみとする
def is_im_target(window):
if window.getProcessName() in ("cmd.exe", # cmd
"mintty.exe", # mintty
"gvim.exe", # GVim
# "eclipse.exe", # Eclipse
"xyzzy.exe", # xyzzy
"putty.exe", # PuTTY
"ttermpro.exe", # TeraTerm
"MobaXterm.exe"): # MobaXterm
return True
return False
keymap_emacs = keymap.defineWindowKeymap(check_func=is_emacs_target)
keymap_im = keymap.defineWindowKeymap(check_func=is_im_target)
# mark がセットされると True になる
keymap_emacs.is_marked = False
# 検索が開始されると True になる
keymap_emacs.is_searching = False
# キーボードマクロの play 中 は True になる
keymap_emacs.is_playing_kmacro = False
# universal-argument コマンドが実行されると True になる
keymap_emacs.is_universal_argument = False
# digit-argument コマンドが実行されると True になる
keymap_emacs.is_digit_argument = False
# コマンドのリピート回数を設定する
keymap_emacs.repeat_counter = 1
########################################################################
## IMEの切替え
########################################################################
def toggle_input_method():
keymap.InputKeyCommand("A-(25)")()
if 1:
if keymap_emacs.is_playing_kmacro == False:
sleep(0.05) # delay
# IME の状態を取得する
if keymap.wnd.getImeStatus():
message = "あ"
else:
message = "A"
# IMEの状態をバルーンヘルプで表示する
keymap.popBalloon("ime_status", message, 200)
def enable_input_method():
keymap.wnd.setImeStatus(1)
display_input_method_status()
def disable_input_method():
keymap.wnd.setImeStatus(0)
display_input_method_status()
def display_input_method_status():
if keymap_emacs.is_playing_kmacro == False:
sleep(0.05) # delay
# IME の状態を取得する
if keymap.wnd.getImeStatus():
message = "あ"
else:
message = "A"
# IMEの状態をバルーンヘルプで表示する
keymap.popBalloon("ime_status", message, 200)
########################################################################
## ファイル操作
########################################################################
def find_file():
keymap.InputKeyCommand("C-o")()
def save_buffer():
keymap.InputKeyCommand("C-s")()
def write_file():
keymap.InputKeyCommand("A-f", "A-a")()
########################################################################
## カーソル移動
########################################################################
def backward_char():
keymap.InputKeyCommand("Left")()
def forward_char():
keymap.InputKeyCommand("Right")()
def backward_word():
keymap.InputKeyCommand("C-Left")()
def forward_word():
keymap.InputKeyCommand("C-Right")()
def previous_line():
keymap.InputKeyCommand("Up")()
def next_line():
keymap.InputKeyCommand("Down")()
def move_beginning_of_line():
keymap.InputKeyCommand("Home")()
def move_end_of_line():
keymap.InputKeyCommand("End")()
if keymap.getWindow().getClassName() == "_WwG": # Microsoft Word
if keymap_emacs.is_marked:
keymap.InputKeyCommand("Left")()
def beginning_of_buffer():
keymap.InputKeyCommand("C-Home")()
def end_of_buffer():
keymap.InputKeyCommand("C-End")()
def scroll_up():
keymap.InputKeyCommand("PageUp")()
def scroll_down():
keymap.InputKeyCommand("PageDown")()
def recenter():
if keymap.getWindow().getClassName() == "EditorClient": # Sakura Editor
keymap.InputKeyCommand("C-h")()
########################################################################
## カット / コピー / 削除 / アンドゥ
########################################################################
def delete_backward_char():
keymap.InputKeyCommand("Back")()
def delete_char():
keymap.InputKeyCommand("Delete")()
def backward_kill_word():
keymap.InputKeyCommand("C-S-Left", "C-x")()
def kill_word():
keymap.InputKeyCommand("C-S-Right", "C-x")()
def kill_line():
keymap_emacs.is_marked = True
mark(move_end_of_line)()
keymap.InputKeyCommand("C-c", "Delete")()
def kill_line2():
if keymap_emacs.repeat_counter == 1:
kill_line()
else:
keymap_emacs.is_marked = True
if keymap.getWindow().getClassName() == "_WwG": # Microsoft Word
for i in range(keymap_emacs.repeat_counter):
mark(next_line)()
mark(move_beginning_of_line)()
else:
for i in range(keymap_emacs.repeat_counter - 1):
mark(next_line)()
mark(move_end_of_line)()
mark(forward_char)()
kill_region()
def kill_region():
keymap.InputKeyCommand("C-x")()
def kill_ring_save():
keymap.InputKeyCommand("C-c")()
if keymap.getWindow().getClassName() == "EditorClient": # Sakura Editor
# 選択されているリージョンのハイライトを解除するために Esc を発行する
keymap.InputKeyCommand("Esc")()
def windows_copy():
keymap.InputKeyCommand("C-c")()
def yank():
keymap.InputKeyCommand("C-v")()
def undo():
keymap.InputKeyCommand("C-z")()
def set_mark_command():
if keymap_emacs.is_marked:
keymap_emacs.is_marked = False
else:
keymap_emacs.is_marked = True
def mark_whole_buffer():
if keymap.getWindow().getClassName().startswith("EXCEL"): # Microsoft Excel
# Excel のセルの中でも機能するようにする対策
keymap.InputKeyCommand("C-End", "C-S-Home")()
else:
keymap.InputKeyCommand("C-Home", "C-a")()
def mark_page():
mark_whole_buffer()
def open_line():
keymap.InputKeyCommand("Enter", "Up", "End")()
########################################################################
## バッファ / ウインドウ操作
########################################################################
def kill_buffer():
keymap.InputKeyCommand("C-F4")()
def other_window():
keymap.InputKeyCommand("D-Alt")()
keymap.InputKeyCommand("Tab")()
sleep(0.01) # delay
keymap.InputKeyCommand("U-Alt")()
########################################################################
## 文字列検索 / 置換
########################################################################
def isearch_backward():
if keymap_emacs.is_searching:
if keymap.getWindow().getProcessName() == "EXCEL.EXE": # Microsoft Excel
if keymap.getWindow().getClassName() == "EDTBX": # 検索ウィンドウ
keymap.InputKeyCommand("A-S-f")()
else:
keymap.InputKeyCommand("C-f")()
else:
keymap.InputKeyCommand("S-F3")()
else:
keymap.InputKeyCommand("C-f")()
keymap_emacs.is_searching = True
def isearch_forward():
if keymap_emacs.is_searching:
if keymap.getWindow().getProcessName() == "EXCEL.EXE": # Microsoft Excel
if keymap.getWindow().getClassName() == "EDTBX": # 検索ウィンドウ
keymap.InputKeyCommand("A-f")()
else:
keymap.InputKeyCommand("C-f")()
else:
keymap.InputKeyCommand("F3")()
else:
keymap.InputKeyCommand("C-f")()
keymap_emacs.is_searching = True
########################################################################
## キーボードマクロ
########################################################################
def kmacro_start_macro():
keymap.command_RecordStart()
def kmacro_end_macro():
keymap.command_RecordStop()
# キーボードマクロの終了キー C-x ) の C-x がマクロに記録されてしまうのを削除する
# キーボードマクロの終了キーの前提を C-x ) としていることについては、とりえず了承ください
if len(keymap.record_seq) >= 4:
if (((keymap.record_seq[len(keymap.record_seq) - 1] == (162, True) and # U-LCtrl
keymap.record_seq[len(keymap.record_seq) - 2] == ( 88, True)) or # U-X
(keymap.record_seq[len(keymap.record_seq) - 1] == ( 88, True) and # U-X
keymap.record_seq[len(keymap.record_seq) - 2] == (162, True))) and # U-LCtrl
keymap.record_seq[len(keymap.record_seq) - 3] == (88, False)): # D-X
keymap.record_seq.pop()
keymap.record_seq.pop()
keymap.record_seq.pop()
if keymap.record_seq[len(keymap.record_seq) - 1] == (162, False): # D-LCtrl
for i in range(len(keymap.record_seq) - 1, -1, -1):
if keymap.record_seq[i] == (162, False): # D-LCtrl
keymap.record_seq.pop()
else:
break
else:
# コントロール系の入力が連続して行われる場合があるための対処
keymap.record_seq.append((162, True)) # U-LCtrl
def kmacro_end_and_call_macro():
keymap_emacs.is_playing_kmacro = True
keymap.command_RecordPlay()
keymap_emacs.is_playing_kmacro = False
########################################################################
## その他
########################################################################
def newline():
keymap.InputKeyCommand("Enter")()
def newline_and_indent():
keymap.InputKeyCommand("Enter", "Tab")()
def indent_for_tab_command():
keymap.InputKeyCommand("Tab")()
def keybord_quit():
if not keymap.getWindow().getClassName().startswith("EXCEL"): # Microsoft Excel 以外
# 選択されているリージョンのハイライトを解除するために Esc を発行しているが、
# アプリケーションソフトによっては効果なし
# keymap.InputKeyCommand("Esc")()
keymap.InputKeyCommand("Left")()
keymap.InputKeyCommand("Right")()
keymap.command_RecordStop()
def kill_emacs():
keymap.InputKeyCommand("A-F4")()
def universal_argument():
if keymap_emacs.is_universal_argument == True:
if keymap_emacs.is_digit_argument == True:
keymap_emacs.is_universal_argument = False
else:
keymap_emacs.repeat_counter = keymap_emacs.repeat_counter * 4
else:
keymap_emacs.is_universal_argument = True
keymap_emacs.repeat_counter = keymap_emacs.repeat_counter * 4
def digit_argument(number):
if keymap_emacs.is_digit_argument == True:
keymap_emacs.repeat_counter = keymap_emacs.repeat_counter * 10 + number
else:
keymap_emacs.repeat_counter = number
keymap_emacs.is_digit_argument = True
def clipboard_list():
keymap_emacs.is_marked = False
keymap.command_ClipboardList()
def goto_line():
keymap.InputKeyCommand("C-g")()
########################################################################
## 共通関数
########################################################################
def self_insert_command(key):
return keymap.InputKeyCommand(key)
def digit(number):
def _digit():
if keymap_emacs.is_universal_argument == True:
digit_argument(number)
else:
reset_counter(reset_mark(repeat(keymap.InputKeyCommand(str(number)))))()
return _digit
def digit2(number):
def _digit2():
keymap_emacs.is_universal_argument = True
digit_argument(number)
return _digit2
def mark(func):
def _mark():
if keymap_emacs.is_marked:
# D-Shift だと、M-< や M-> 押下時に、D-Shift が解除されてしまう。その対策。
keymap.InputKeyCommand("D-LShift")()
keymap.InputKeyCommand("D-RShift")()
func()
if keymap_emacs.is_marked:
keymap.InputKeyCommand("U-LShift")()
keymap.InputKeyCommand("U-RShift")()
return _mark
def reset_mark(func):
def _reset_mark():
func()
keymap_emacs.is_marked = False
return _reset_mark
def reset_counter(func):
def _reset_counter():
func()
keymap_emacs.is_universal_argument = False
keymap_emacs.is_digit_argument = False
keymap_emacs.repeat_counter = 1
return _reset_counter
def reset_search(func):
def _reset_search():
func()
keymap_emacs.is_searching = False
return _reset_search
def repeat(func):
def _repeat():
repeat_counter = keymap_emacs.repeat_counter
keymap_emacs.repeat_counter = 1
for i in range(repeat_counter):
func()
return _repeat
def repeat2(func):
def _repeat2():
if keymap_emacs.is_marked == True:
keymap_emacs.repeat_counter = 1
repeat(func)()
return _repeat2
########################################################################
## キーバインド
########################################################################
# http://homepage3.nifty.com/ic/help/rmfunc/vkey.htm
# http://www.azaelia.net/factory/vk.html
## マルチストロークキーの設定
# keymap_emacs["Esc"] = keymap.defineMultiStrokeKeymap("Esc")
keymap_emacs["LC-OpenBracket"] = keymap.defineMultiStrokeKeymap("C-OpenBracket")
keymap_emacs["LC-x"] = keymap.defineMultiStrokeKeymap("C-x")
keymap_emacs["LC-q"] = keymap.defineMultiStrokeKeymap("C-q")
# IM制御用マルチストロークキーの追加
keymap_emacs["LC-o"] = keymap.defineMultiStrokeKeymap("C-o")
## 0-9キーの設定
for key in range(10):
keymap_emacs[ str(key)] = digit(key)
# keymap_emacs["LC-" + str(key)] = digit2(key)
# keymap_emacs["LA-" + str(key)] = digit2(key)
# keymap_emacs["Esc"][ str(key)] = digit2(key)
keymap_emacs["LC-OpenBracket"][str(key)] = digit2(key)
keymap_emacs["S-" + str(key)] = reset_counter(reset_mark(repeat(self_insert_command("S-" + str(key)))))
## SPACE, A-Zキーの設定
for vkey in [32] + list(range(65, 90 + 1)):
keymap_emacs[ "(" + str(vkey) + ")"] = reset_counter(reset_mark(repeat(self_insert_command( "(" + str(vkey) + ")"))))
keymap_emacs["S-(" + str(vkey) + ")"] = reset_counter(reset_mark(repeat(self_insert_command("S-(" + str(vkey) + ")"))))
## 10key の特殊文字キーの設定
for vkey in [106, 107, 109, 110, 111]:
keymap_emacs[ "(" + str(vkey) + ")"] = reset_counter(reset_mark(repeat(self_insert_command( "(" + str(vkey) + ")"))))
## 特殊文字キーの設定
for vkey in list(range(186, 192 + 1)) + list(range(219, 222 + 1)) + [226]:
keymap_emacs[ "(" + str(vkey) + ")"] = reset_counter(reset_mark(repeat(self_insert_command( "(" + str(vkey) + ")"))))
keymap_emacs["S-(" + str(vkey) + ")"] = reset_counter(reset_mark(repeat(self_insert_command("S-(" + str(vkey) + ")"))))
## quoted-insertキーの設定
for vkey in range(1, 255):
keymap_emacs["LC-q"][ "(" + str(vkey) + ")"] = reset_search(reset_counter(reset_mark(repeat(self_insert_command( "(" + str(vkey) + ")")))))
keymap_emacs["LC-q"]["S-(" + str(vkey) + ")"] = reset_search(reset_counter(reset_mark(repeat(self_insert_command("S-(" + str(vkey) + ")")))))
keymap_emacs["LC-q"]["C-(" + str(vkey) + ")"] = reset_search(reset_counter(reset_mark(repeat(self_insert_command("C-(" + str(vkey) + ")")))))
keymap_emacs["LC-q"]["C-S-(" + str(vkey) + ")"] = reset_search(reset_counter(reset_mark(repeat(self_insert_command("C-S-(" + str(vkey) + ")")))))
keymap_emacs["LC-q"]["A-(" + str(vkey) + ")"] = reset_search(reset_counter(reset_mark(repeat(self_insert_command("A-(" + str(vkey) + ")")))))
keymap_emacs["LC-q"]["A-S-(" + str(vkey) + ")"] = reset_search(reset_counter(reset_mark(repeat(self_insert_command("A-S-(" + str(vkey) + ")")))))
## Esc の二回押しを Esc とする設定
# keymap_emacs["Esc"]["Esc"] = reset_counter(self_insert_command("Esc"))
# keymap_emacs["LC-OpenBracket"]["C-OpenBracket"] = reset_counter(self_insert_command("Esc"))
## universal-argumentキーの設定
keymap_emacs["LC-u"] = universal_argument
## 「IMEの切替え」のキー設定
keymap_emacs["(243)"] = toggle_input_method
keymap_emacs["(244)"] = toggle_input_method
keymap_emacs["LA-(25)"] = toggle_input_method
keymap_emacs["LC-Yen"] = toggle_input_method
# keymap_emacs["LC-o"] = toggle_input_method # or open_line
keymap_emacs["LC-o"]["LC-o"] = enable_input_method
keymap_emacs["LC-o"]["LC-i"] = disable_input_method
keymap_im["(243)"] = toggle_input_method
keymap_im["(244)"] = toggle_input_method
keymap_im["LA-(25)"] = toggle_input_method
keymap_im["LC-Yen"] = toggle_input_method
keymap_im["LC-o"] = toggle_input_method
## 「ファイル操作」のキー設定
keymap_emacs["LC-x"]["C-f"] = reset_search(reset_counter(reset_mark(find_file)))
keymap_emacs["LC-x"]["C-s"] = reset_search(reset_counter(reset_mark(save_buffer)))
keymap_emacs["LC-x"]["C-w"] = reset_search(reset_counter(reset_mark(write_file)))
## 「カーソル移動」のキー設定
keymap_emacs["LC-b"] = reset_search(reset_counter(repeat(mark(backward_char))))
keymap_emacs["LC-f"] = reset_search(reset_counter(repeat(mark(forward_char))))
keymap_emacs["LA-b"] = reset_search(reset_counter(repeat(mark(backward_word))))
# keymap_emacs["Esc"]["b"] = reset_search(reset_counter(repeat(mark(backward_word))))
keymap_emacs["LC-OpenBracket"]["b"] = reset_search(reset_counter(repeat(mark(backward_word))))
keymap_emacs["LA-f"] = reset_search(reset_counter(repeat(mark(forward_word))))
# keymap_emacs["Esc"]["f"] = reset_search(reset_counter(repeat(mark(forward_word))))
keymap_emacs["LC-OpenBracket"]["f"] = reset_search(reset_counter(repeat(mark(forward_word))))
keymap_emacs["LC-p"] = reset_search(reset_counter(repeat(mark(previous_line))))
keymap_emacs["LC-n"] = reset_search(reset_counter(repeat(mark(next_line))))
keymap_emacs["LC-a"] = reset_search(reset_counter(mark(move_beginning_of_line)))
keymap_emacs["LC-e"] = reset_search(reset_counter(mark(move_end_of_line)))
keymap_emacs["LA-S-Comma"] = reset_search(reset_counter(mark(beginning_of_buffer)))
# keymap_emacs["Esc"]["S-Comma"] = reset_search(reset_counter(mark(beginning_of_buffer)))
keymap_emacs["LC-OpenBracket"]["S-Comma"] = reset_search(reset_counter(mark(beginning_of_buffer)))
keymap_emacs["LA-S-Period"] = reset_search(reset_counter(mark(end_of_buffer)))
# keymap_emacs["Esc"]["S-Period"] = reset_search(reset_counter(mark(end_of_buffer)))
keymap_emacs["LC-OpenBracket"]["S-Period"] = reset_search(reset_counter(mark(end_of_buffer)))
keymap_emacs["LA-v"] = reset_search(reset_counter(mark(scroll_up)))
# keymap_emacs["Esc"]["v"] = reset_search(reset_counter(mark(scroll_up)))
keymap_emacs["LC-OpenBracket"]["v"] = reset_search(reset_counter(mark(scroll_up)))
keymap_emacs["LC-v"] = reset_search(reset_counter(mark(scroll_down)))
keymap_emacs["LC-l"] = reset_search(reset_counter(recenter))
keymap_emacs["LC-x"]["C-g"] = reset_search(reset_counter(reset_mark(goto_line)))
## 「カット / コピー / 削除 / アンドゥ」のキー設定
keymap_emacs["LC-h"] = reset_search(reset_counter(reset_mark(repeat2(delete_backward_char))))
keymap_emacs["LC-d"] = reset_search(reset_counter(reset_mark(repeat2(delete_char))))
# keymap_emacs["LC-Back"] = reset_search(reset_counter(reset_mark(repeat(backward_kill_word))))
keymap_emacs["LA-Delete"] = reset_search(reset_counter(reset_mark(repeat(backward_kill_word))))
# keymap_emacs["Esc"]["Delete"] = reset_search(reset_counter(reset_mark(repeat(backward_kill_word))))
keymap_emacs["LC-OpenBracket"]["Delete"] = reset_search(reset_counter(reset_mark(repeat(backward_kill_word))))
keymap_emacs["LC-Delete"] = reset_search(reset_counter(reset_mark(repeat(kill_word))))
keymap_emacs["LA-d"] = reset_search(reset_counter(reset_mark(repeat(kill_word))))
# keymap_emacs["Esc"]["d"] = reset_search(reset_counter(reset_mark(repeat(kill_word))))
keymap_emacs["LC-OpenBracket"]["d"] = reset_search(reset_counter(reset_mark(repeat(kill_word))))
keymap_emacs["LC-k"] = reset_search(reset_counter(reset_mark(kill_line2)))
keymap_emacs["LC-w"] = reset_search(reset_counter(reset_mark(kill_region)))
keymap_emacs["LA-w"] = reset_search(reset_counter(reset_mark(kill_ring_save)))
# keymap_emacs["Esc"]["w"] = reset_search(reset_counter(reset_mark(kill_ring_save)))
keymap_emacs["LC-OpenBracket"]["w"] = reset_search(reset_counter(reset_mark(kill_ring_save)))
keymap_emacs["LC-c"] = reset_search(reset_counter(reset_mark(windows_copy)))
keymap_emacs["LC-y"] = reset_search(reset_counter(reset_mark(yank)))
keymap_emacs["LC-z"] = reset_search(reset_counter(reset_mark(undo)))
keymap_emacs["LC-Slash"] = reset_search(reset_counter(reset_mark(undo)))
keymap_emacs["LC-Underscore"] = reset_search(reset_counter(reset_mark(undo)))
keymap_emacs["LC-x"]["u"] = reset_search(reset_counter(reset_mark(undo)))
# LC-Atmark とすると英語キーボードで LC-2 が横取りされるので、LC-(192) としている
keymap_emacs["LC-(192)"] = reset_search(reset_counter(set_mark_command))
keymap_emacs["LC-Space"] = reset_search(reset_counter(set_mark_command))
keymap_emacs["LC-x"]["h"] = reset_search(reset_counter(reset_mark(mark_whole_buffer)))
keymap_emacs["LC-x"]["C-p"] = reset_search(reset_counter(reset_mark(mark_page)))
## 「バッファ / ウインドウ操作」のキー設定
keymap_emacs["LC-x"]["k"] = reset_search(reset_counter(reset_mark(kill_buffer)))
keymap_emacs["LC-x"]["o"] = reset_search(reset_counter(reset_mark(other_window)))
## 「文字列検索 / 置換」のキー設定
keymap_emacs["LC-r"] = reset_counter(reset_mark(isearch_backward))
keymap_emacs["LC-s"] = reset_counter(reset_mark(isearch_forward))
## 「キーボードマクロ」のキー設定
if 1:
# 日本語キーボードの場合
keymap_emacs["LC-x"]["S-8"] = kmacro_start_macro
keymap_emacs["LC-x"]["S-9"] = kmacro_end_macro
else:
# 英語キーボードの場合
keymap_emacs["LC-x"]["S-9"] = kmacro_start_macro
keymap_emacs["LC-x"]["S-0"] = kmacro_end_macro
keymap_emacs["LC-x"]["e"] = reset_search(reset_counter(repeat(kmacro_end_and_call_macro)))
## 「その他」のキー設定
keymap_emacs["LC-m"] = reset_counter(reset_mark(repeat(newline)))
keymap_emacs["Enter"] = reset_counter(reset_mark(repeat(newline)))
keymap_emacs["LC-j"] = reset_counter(reset_mark(newline_and_indent))
keymap_emacs["LC-i"] = reset_counter(reset_mark(repeat(indent_for_tab_command)))
keymap_emacs["Tab"] = reset_counter(reset_mark(repeat(indent_for_tab_command)))
keymap_emacs["LC-g"] = reset_search(reset_counter(reset_mark(keybord_quit)))
keymap_emacs["LC-x"]["C-c"] = reset_search(reset_counter(reset_mark(kill_emacs)))
keymap_emacs["LC-x"]["C-y"] = reset_search(reset_counter(reset_mark(clipboard_list)))
## Excel のキー設定(オプション)
if 1:
keymap_excel = keymap.defineWindowKeymap(class_name='EXCEL*')
# C-Enter 押下で、「セル編集モード」に移行する
keymap_excel["LC-Enter"] = reset_search(reset_counter(reset_mark(self_insert_command("F2"))))
## Emacs のキー設定(オプション)
if 0:
keymap_real_emacs = keymap.defineWindowKeymap(class_name='Emacs')
# IME 切り替え用のキーを C-\ に置き換える
keymap_real_emacs["(28)"] = self_insert_command("C-Yen") # 「変換」キー
keymap_real_emacs["(29)"] = self_insert_command("C-Yen") # 「無変換」キー
keymap_real_emacs["(242)"] = self_insert_command("C-Yen") # 「カタカナ・ひらがな」キー
keymap_real_emacs["(243)"] = self_insert_command("C-Yen") # 「半角/全角」キー
keymap_real_emacs["(244)"] = self_insert_command("C-Yen") # 「半角/全角」キー
keymap_real_emacs["A-(25)"] = self_insert_command("C-Yen") # 「Alt-`」 キー
|
[
"CeramiqueHeart@users.noreply.github.com"
] |
CeramiqueHeart@users.noreply.github.com
|
ada303c258bd31f38b9cf482d11d69d20461a863
|
f245a602aad5515489b2bba8bf91c56bb5e299ec
|
/__init__.py
|
decc003fdc99c4ca155ee01e91e9e513434ded6a
|
[
"Apache-2.0"
] |
permissive
|
imgcook/pipcook-plugin-pytorch-yolov5-evaluator
|
18f0c5643ed70b9da9ce244fd42b86686a875ce9
|
eafac6d84d2e8b910a7600cc41c9045ca870acf9
|
refs/heads/main
| 2023-01-20T18:57:06.003917
| 2020-12-01T12:45:13
| 2020-12-01T12:45:13
| 309,251,906
| 2
| 0
|
Apache-2.0
| 2020-12-01T12:45:15
| 2020-11-02T03:53:30
| null |
UTF-8
|
Python
| false
| false
| 6,748
|
py
|
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__)))
import argparse
import glob
import json
import shutil
from pathlib import Path
import numpy as np
import torch
import yaml
from tqdm import tqdm
from helper.datasets import create_dataloader
from helper.general import (
compute_loss, non_max_suppression,
clip_coords, xywh2xyxy, box_iou, ap_per_class)
from helper.torch_utils import select_device, time_synchronized
class obj(object):
def __init__(self, d):
for a, b in d.items():
if isinstance(b, (list, tuple)):
setattr(self, a, [obj(x) if isinstance(x, dict) else x for x in b])
else:
setattr(self, a, obj(b) if isinstance(b, dict) else b)
def test(data,
opt,
imgsz=640,
model=None,
save_dir=Path(''), # for saving images
):
batch_size = 16
conf_thres = 0.001
iou_thres = 0.6
augment = False
verbose=False
save_dir=Path('')
device = next(model.parameters()).device # get model device
# Half
half = device.type != 'cpu' # half precision only supported on CUDA
if half:
model.half()
# Configure
model.eval()
with open(data) as f:
data = yaml.load(f, Loader=yaml.FullLoader) # model dict
nc = int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
niou = iouv.numel()
# Dataloader
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
path = data['test']
if not path:
return {}
dataloader = create_dataloader(path, imgsz, batch_size, model.stride.max(), opt,
hyp=None, augment=False, cache=False, pad=0.5, rect=True)[0]
seen = 0
names = model.names if hasattr(model, 'names') else model.module.names
s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class = [], [], [], []
for batch_i, (img, targets, paths, shapes) in enumerate(dataloader):
img = img.to(device, non_blocking=True)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
targets = targets.to(device)
nb, _, height, width = img.shape # batch size, channels, height, width
whwh = torch.Tensor([width, height, width, height]).to(device)
# Disable gradients
with torch.no_grad():
# Run model
t = time_synchronized()
inf_out, train_out = model(img, augment=augment) # inference and training outputs
t0 += time_synchronized() - t
# Compute loss
loss += compute_loss([x.float() for x in train_out], targets, model)[1][:3] # box, obj, cls
# Run NMS
t = time_synchronized()
output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres)
t1 += time_synchronized() - t
# Statistics per image
for si, pred in enumerate(output):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
seen += 1
if pred is None:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Clip boxes to image bounds
clip_coords(pred, (height, width))
# Assign all predictions as incorrect
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
if nl:
detected = [] # target indices
tcls_tensor = labels[:, 0]
# target boxes
tbox = xywh2xyxy(labels[:, 1:5]) * whwh
# Per target class
for cls in torch.unique(tcls_tensor):
ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices
pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices
# Search for detections
if pi.shape[0]:
# Prediction to target ious
ious, i = box_iou(pred[pi, :4], tbox[ti]).max(1) # best ious, indices
# Append detections
detected_set = set()
for j in (ious > iouv[0]).nonzero(as_tuple=False):
d = ti[i[j]] # detected target
if d.item() not in detected_set:
detected_set.add(d.item())
detected.append(d)
correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
if len(detected) == nl: # all targets already located in image
break
# Append statistics (correct, conf, pcls, tcls)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=False, fname=save_dir / 'precision-recall_curve.png')
p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(1) # [P, R, AP@0.5, AP@0.5:0.95]
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%12.3g' * 6 # print format
print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
# Print results per class
if verbose and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
# Print speeds
t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple
# Return results
model.float() # for training
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return {
'mp': mp,
'mr': mr,
'map50': map50,
}
def main(data, model, args):
opt = obj({})
opt.single_cls = False
return test(
model.cfg.data,
opt,
model.cfg.imgsz,
model=model.model
)
sys.path.pop()
|
[
"queyue.crk@alibaba-inc.com"
] |
queyue.crk@alibaba-inc.com
|
6c775a6551caf0cb5e21d29bfe59dd7ff3b6347f
|
cf522474a1d1c2fff31b5cd5d8dd7c36e6d14d5d
|
/assets/cobo.py
|
757bd71ce864388d39f362757d3855672a87db8c
|
[] |
no_license
|
tastycontrols/cobo-tools
|
61cb1bb01b4b8168d180202208cf4040953e3b6d
|
c6dc2a6338707a1f331be23c9aa1d1fa6557baa1
|
refs/heads/main
| 2023-07-15T17:34:13.638008
| 2021-08-25T04:41:50
| 2021-08-25T04:41:50
| 399,669,722
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,358
|
py
|
#!/usr/bin/env python3
# cobo.py
# SLD 2021
#
############
# INITIALIZATION
############
# Imports
import os, sys, json
from datetime import datetime
# Greet the user
print("COBO, a cookbook (and cookbook build system) \nSLD 2021 \n")
# Valid operation modes go here
valid_modes = ["help", "build", "get-tags", "subset", "compile-md", "add-source"]
# Set constants
MD_LINEBREAK = "\n\n"
# Initialize variables
source_dir = None
dest_dir = None
input_filename = None
output_filename = None
tag = None
mode = None
# Begin parsing command line arguments
# First, collect operation mode
try: mode = sys.argv[1].lower()
except: pass
# Parse mode-dependent variables
for raw_arg in sys.argv[2:]:
this_arg = raw_arg.split("=")
# Variables should occupy one argument, form of `ARG=VALUE`
if len(this_arg) == 2:
this_var = this_arg[0].lower()
this_val = this_arg[1]
# Assign variable values
if this_var == "sources": source_dir = this_val
if this_var == "input": input_filename = this_val
if this_var == "output": output_filename = this_val
if this_var == "tag": tag = this_val
if this_var == "dest": dest_dir = this_val
# Ensure mode has been specified or quit
if(mode) and mode in valid_modes: pass
else: quit("Valid operation mode required but not defined. Valid modes:\n\n"+
"\n".join("`"+md+"`" for md in valid_modes)+
"\n")
# Test that all required arguments have been passed before continuing
# `update` requires `sources=<<<SOURCE_DIR>>>` and `output=<<<OUTPUT_FILE>>>`
print("Operation mode: `"+mode+"`")
if mode == "build":
if(source_dir): pass
else: quit("Source directory required but not defined: try adding `sources=SOURCE_DIR`\n")
if(output_filename): pass
else: quit("Output filename required but not defined: try adding `output=OUTPUT_FILE`\n")
if mode == "get-tags":
if(input_filename): pass
else: quit("Input filename required but not defined: try adding `input=INPUT_FILE`\n")
if(output_filename): pass
else: quit("Output filename required but not defined: try adding `output=OUTPUT_FILE`\n")
if mode == "subset":
if(tag): pass
else: quit("Tag required but not defined: try adding `tag=TAG`\n")
if(input_filename): pass
else: quit("Input filename required but not defined: try adding `input=INPUT_FILE`\n")
if(output_filename): pass
else: quit("Output filename required but not defined: try adding `output=OUTPUT_FILE`\n")
if mode == "compile-md":
if(input_filename): pass
else: quit("Input filename required but not defined: try adding `input=INPUT_FILE`\n")
if(output_filename): pass
else: quit("Output filename required but not defined: try adding `output=OUTPUT_FILE`\n")
if mode == "add-source":
if(dest_dir): pass
else: quit("Destination directory required but not defined: try adding `dest=DEST_DIR`\n")
############
# OPERATION MODE `help`
############
if mode == "help":
print("\nSyntax: python3 cobo.py OP_MODE ARG1 ARG2 ... ARGn")
print()
print("OP_MODE `help`: displays this help screen")
print()
print("OP_MODE `build`: build output JSON file from contents of a source directory")
print("\tRequired argument `sources`: specifies directory containing source MD files")
print("\tRequired argument `output`: specifies JSON output file name")
print()
print("OP_MODE `get-tags`: extract tags from input JSON file and output as plaintext file")
print("\tRequired argument `input`: specifies JSON input file name")
print("\tRequired argument `output`: specifies plaintext output file name")
print()
print("OP_MODE `subset`: extract subset of input JSON file by tag, and output as JSON file")
print("\tRequired argument `tag`: specifies tag to extract recipes by")
print("\tRequired argument `input`: specifies JSON input file name")
print("\tRequired argument `output`: specifies JSON output file name")
print()
print("OP_MODE `compile-md`: compile Markdown cookbook from an input JSON file")
print("\tRequired argument `input`: specifies JSON input file name")
print("\tRequired argument `output`: specifies Markdown output file name")
print()
print("OP_MODE `add-source`: add new blank Markdown recipe to the specified source directory")
print("\tRequired argument `dest`: specifies destination directory for new MD file")
print()
quit()
############
# OPERATION MODE `build`
############
if mode == "build":
# Begin by polling the source directory for MD files
files = os.listdir(source_dir+"/")
recipes_to_parse=[]
for indiv_file in files:
if indiv_file[-2:].lower()=="md":
recipes_to_parse.append(source_dir+"/"+indiv_file)
# Begin collecting data
data = {}
for this_recipe in recipes_to_parse:
# For this recipe, get the entire recipe body
input_file = open(this_recipe, "r")
full_text = [ln.strip("\n") for ln in input_file.readlines()]
input_file.close()
# Extract title, tags, and method
title = full_text[0][2:]
tags = [tg[1:-1] for tg in full_text[2][1:-1].split(",")]
method = full_text[4:]
# Define index (hash) and handle (derived from title)
# Need these for downstream data manipulation and file creation
index = this_recipe.split("/")[-1:][0][:-3]
handle = "-".join(title.lower().split())
data[index] = {
"handle":handle,
"title":title,
"tags":tags,
"method":method
}
# Create JSON dump and write out
with open(output_filename, "w") as output_file:
output_file.write(json.dumps(data, indent=4))
# Clean up and finish
output_file.close()
quit("Successfully built `"+output_filename+"` from contents of `"+source_dir+"` \n")
############
# OPERATION MODE: `get-tags`
############
if mode == "get-tags":
# Begin by loading the JSON input file
input_file = open(input_filename, "r")
data = json.load(input_file)
input_file.close()
# Pass through each recipe
all_tags = []
for index in data:
# Get the tags from this recipe
this_recipe_tags = data[index]["tags"]
# Add tags from this recipe to the global list if not already present
for this_tag in this_recipe_tags:
if this_tag not in all_tags: all_tags.append(this_tag)
# Export tag list
output_file = open(output_filename, "w")
for this_tag in all_tags: output_file.write(this_tag+"\n")
# Clean up and finish
output_file.close()
quit("Successfully extracted tags from `"+input_filename+"` to `"+output_filename+"` \n")
#########
# OPERATION MODE: `subset`
############
if mode == "subset":
# Begin by loading the JSON input file
input_file = open(input_filename, "r")
data = json.load(input_file)
input_file.close()
# Collect recipe indices citing the specified tag
tagged_indices = []
for index in data:
if tag in data[index]["tags"]: tagged_indices.append(index)
# Clone tagged recipes into a new dictionary
subset_data = {}
for this_index in tagged_indices:
# Get the data for this recipe
this_handle = data[this_index]["handle"]
this_title = data[this_index]["title"]
this_tags = data[this_index]["tags"]
this_method = data[this_index]["method"]
# Add to the subset dictionary
subset_data[this_index] = {
"handle":this_handle,
"title":this_title,
"tags":this_tags,
"method":this_method
}
# Create JSON dump and write out
with open(output_filename, "w") as output_file:
output_file.write(json.dumps(subset_data, indent=4))
# Clean up and finish
output_file.close()
quit("Successfully built `"+output_filename+"` from `"+input_filename+"` using tag `"+tag+"` \n")
#########
# OPERATION MODE: `compile-md`
############
if mode == "compile-md":
# Begin by loading the JSON input file
input_file = open(input_filename, "r")
data = json.load(input_file)
input_file.close()
# Pass through recipe indices
toc = ""
contents = ""
for index in data:
# Create title text and ToC link
title_text = "## " + data[index]["title"]
toc += "["+data[index]["title"]+"](#"
toc += "-".join(data[index]["title"].lower().split())
toc += ")\\\n"
# Create metadata text
metadata_text = "#### COBO Metadata"
metadata_text += "\n```\n"
metadata_text += "index:" + index + "\n"
metadata_text += "handle:" + data[index]["handle"] + "\n"
metadata_text += "tags:" + str(data[index]["tags"]) + "\n"
metadata_text += "```"
# Create method text
method_text = ""
for line in data[index]["method"]: method_text += line + "\n"
# Add recipe to contents text
contents += " ___ \n"
contents += title_text
contents += MD_LINEBREAK
contents += metadata_text
contents += MD_LINEBREAK
contents += method_text
contents += MD_LINEBREAK + MD_LINEBREAK
# Create Markdown file and write out
output_file = open(output_filename, "w")
output_file.write(
"## Table Of Contents: ```" + input_filename+"```"
+MD_LINEBREAK
+toc
+MD_LINEBREAK
+contents
)
# Clean up and finish
output_file.close()
quit("Successfully compiled `"+input_filename+"` to `"+output_filename+"` \n")
#########
# OPERATION MODE: `add-source`
############
if mode == "add-source":
# Generate a new source filename
dt_obj = datetime.now()
new_source = str(dt_obj.day).zfill(2)
new_source += str(dt_obj.month).zfill(2)
new_source += str(dt_obj.year).zfill(4)
new_source += str(dt_obj.second).zfill(2)
new_source += str(dt_obj.minute).zfill(2)
new_source += str(dt_obj.hour).zfill(2) + ".md"
# Clone recipe template into source directory, named as new source
os.system("cp ./source.md.template "+dest_dir+"/"+new_source)
# Clean up and finish
quit("Successfully created new source `"+dest_dir+"/"+new_source+"` \n")
|
[
"noreply@github.com"
] |
tastycontrols.noreply@github.com
|
0a7414dac806081fed5c5ae76e995858fc7b09ff
|
fcd999e8cd43188050796c7dfa63aff2b1fc5ebf
|
/agendaContacto/asgi.py
|
8786fa9f9dbcfd0734b4ef8b5d5f6c2bafcdd0cd
|
[] |
no_license
|
cristopherhurtado/agendaContacto
|
2e9a9e614fbe058ad9aa54164ffc9e27aa71cd5f
|
b79f255613db5a543243c106da3bad0d9555365f
|
refs/heads/main
| 2023-02-05T04:18:26.794096
| 2020-12-14T03:53:28
| 2020-12-14T03:53:28
| 321,226,112
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
"""
ASGI config for agendaContacto project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'agendaContacto.settings')
application = get_asgi_application()
|
[
"cristopherhurtado@gmail.com"
] |
cristopherhurtado@gmail.com
|
69132fa12aba4f775807f76c7c78ad574e333ec5
|
94cd4506f23f3e0469a0f3fbcffa0d06b4eb7e0f
|
/pytorch-chatbot-master/chat.py
|
23b6f8ccf1f87e9022c4cbd5ffcf7b8423f084a6
|
[
"MIT"
] |
permissive
|
FenilNavdiwala/NLPchatbot
|
bba0f5dd441f62f358f03fb62fa4059ee22b1825
|
551261cb6e2a12a471e6d37935e4e6e6f877c7a9
|
refs/heads/main
| 2023-04-22T00:15:14.334293
| 2021-05-06T10:19:57
| 2021-05-06T10:19:57
| 364,976,458
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,906
|
py
|
import random
import json
import argparse
import torch
import sys
from model import NeuralNet
from nltk_utils import bag_of_words, tokenize, syn
from pymongo import MongoClient
from bson import ObjectId
# from train import ChatDataset
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Database Url nd Password
client = MongoClient(
"mongodb+srv://admin:admin123@cluster1.ycoy8.mongodb.net/myFirstDatabase?retryWrites=true&w=majority")
db = client["myFirstDatabase"]
# cahtbots is collection Name
collection = db["chatbots"]
y = collection.find({"_id": ObjectId("6051b174e1801a35d1336993")})
a = y.next()
K = a['intents']
A = [] # Tag
TP = [] # trainingphrase
R = [] # response
for i in range(len(K)):
A.append(K[i]['intentName'])
TP.append(K[i]['trainingPhrase'])
R. append(K[i]['response'])
FILE = "/home/fenil/nlp/pytorch-chatbot-master/data.pth"
data = torch.load(FILE)
input_size = data["input_size"]
hidden_size = data["hidden_size"]
output_size = data["output_size"]
all_words = data['all_words']
A = data['tags']
model_state = data["model_state"]
model = NeuralNet(input_size, hidden_size, output_size).to(device)
model.load_state_dict(model_state)
model.eval()
bot_name = "Fenil"
sentence = "what is AI?"
# sentence = sys.argv[1]
# print(sentence)
_sentence = tokenize(sentence)
X = bag_of_words(_sentence, all_words)
X = X.reshape(1, X.shape[0])
X = torch.from_numpy(X).to(device)
output = model(X)
_, predicted = torch.max(output, dim=1)
tag = A[predicted.item()]
probs = torch.softmax(output, dim=1)
prob = probs[0][predicted.item()]
res = None
# print(prob.item())
if prob.item() > 0.5:
for data in K:
if data['intentName'] == sentence or sentence in data["trainingPhrase"]:
res = data['response']
print(res)
break
if not res:
print(f"{bot_name}: I do not understand...")
|
[
"fenilnavdiwala088@gmail.com"
] |
fenilnavdiwala088@gmail.com
|
73ebc481d008e79fbdd92ae312b284c8a5a42c45
|
d6e670718cb40912c83b89e4918c1d59c2ccd7da
|
/Assignment6/cas2.py
|
c380f7f95e09c38ed6338f566f239ba08563857d
|
[] |
no_license
|
realTrueProger/PythonCourseSolovevGimaev
|
dc973a8112bf8ae843d7411d56b7bc35fd8d4cb2
|
255b7bc1986ee712e5169a2f5e4c8e6e06d8424b
|
refs/heads/master
| 2022-11-05T02:44:28.844425
| 2020-06-26T16:14:49
| 2020-06-26T16:14:49
| 266,848,504
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,633
|
py
|
# check if chars are digit
def is_digit(c):
try:
int(c)
return True
except ValueError:
return False
# check if the char is operation
def is_operation(c):
operations = ['+', '-']
if c in operations:
return True
else:
return False
def simplify(x):
# we split everythhing in operations and operands
operations = []
operands = []
for c in x.split():
if is_digit(c):
operands.append(int(c))
elif is_operation(c):
if not operands:
print('err: invalid expression')
return('error')
else:
operations.append(c)
else:
print('err: invalid expression')
return('error')
if len(operations) + 1 != len(operands): # check if number of operations relate to number of operands
print('err: invalid expression')
return('error')
else:
# compute the result
result = operands[0] # we start with first operand
for n, operation in enumerate(operations):
if operation == '+':
result += operands[n+1]
else:
result -= operands[n+1]
return str(result)
# split string by space
def split_string(x):
result = []
a = ''
length = len(x)
for n, c in enumerate(x):
if c != ' ':
a += c
elif c == ' ':
result.append(a)
a = ''
if len(x) == n+1:
result.append(a)
return result
count = 0
results = []
while True:
keyboard_input = input()
if keyboard_input == 'exit': # condition on exit
break
else:
# handle brackets
open_brackets = False
# first we do every operation in brackets
# then expand them and compute everything else
brackets_place = [] # list of list [start, end, result]
for n, c in enumerate(split_string(keyboard_input)):
if c.startswith("(") and not open_brackets:
open_brackets = True
brackets_place.append([n,0,0]) # bracket starts
inside_bracket = c[1:]
elif open_brackets:
if c.endswith(")"):
open_brackets = False
inside_bracket += ' ' + c[:-1]
brackets_place[-1][1] = n # bracket ends
brackets_place[-1][2] = simplify(inside_bracket) # bracket result
else:
inside_bracket += ' ' + c
elif c.startswith("[") and not open_brackets:
brackets_place.append([n,0,0]) # bracket starts
if c.endswith("]") and len(results) != 0:
brackets_place[-1][1] = n # bracket ends
brackets_place[-1][2] = results[int(c[1:-1])] # bracket result
else:
print('err: invalid expression')
break
elif not open_brackets and c.endswith(")"):
print('err: invalid expression')
break
# replace everything inside the brackets
brackets_place.reverse()
new_keyboard_input = split_string(keyboard_input)
for bracket in brackets_place:
del new_keyboard_input[bracket[0]:bracket[1]+1]
new_keyboard_input.insert(bracket[0], bracket[2])
# compute final result
result = simplify(' '.join(new_keyboard_input))
if result != 'error':
print(f'{count}: {result}')
results.append(result)
count += 1
|
[
"vovasollo@yandex.ru"
] |
vovasollo@yandex.ru
|
ab15a0efaac07c70295a3d3f72269277df81f4d8
|
73ac971c75d395ce03c812cf60744a4d8ccb15ce
|
/accounts/models.py
|
1954119a5af0157048d1f25984629c17fd24a5cb
|
[] |
no_license
|
SanketKesharwani/GreatKart
|
d92eed75a030059d86ea1fa6b44566f437165078
|
94f0f4a98e221c818594d960d4f691a55bf6dd71
|
refs/heads/main
| 2023-09-01T11:16:51.009342
| 2021-10-30T18:41:46
| 2021-10-30T18:41:46
| 403,580,145
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,766
|
py
|
from django.db import models
from django.contrib.auth.models import AbstractUser,BaseUserManager
# Create your models here.
class MyAccountManager(BaseUserManager):
def create_user(self,first_name,last_name,username,email,password):
if not email:
raise ValueError('NOT PROVIDED OR SEEMS WRONG')
if not username:
raise ValueError('USERNAME IS MANDATORY')
user = self.model(first_name = first_name,
last_name = last_name,
email = self.normalize_email(email),
username = username)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self,first_name,last_name,email,username,password):
user = self.create_user(first_name = first_name,
last_name = last_name,
email = self.normalize_email(email),
username = username,
password = password,)
user.is_active = True
user.is_admin = True
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class Account(AbstractUser):
first_name = models.CharField(max_length=200)
last_name = models.CharField(max_length=200)
username = models.CharField(max_length=100)
email = models.EmailField(max_length=100,unique=True)
phone = models.CharField(max_length=20)
#mandatory required fields
date_created = models.DateTimeField(auto_now_add=True)
last_login = models.DateTimeField(auto_now_add=True)
is_admin = models.BooleanField(default=False)
is_active = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username', 'first_name', 'last_name']
objects = MyAccountManager()
def __str__(self):
return self.email
def has_perm(self, perm,obj=None):
return self.is_admin
def has_module_prems(self,add_lable):
return True
class UserProfile(models.Model):
user = models.ForeignKey(Account, on_delete=models.CASCADE)
address_line_1 = models.CharField(max_length=200,blank=True)
address_line_2 = models.CharField(max_length=200,blank=True)
city = models.CharField(max_length=50)
state = models.CharField(max_length=50)
country = models.CharField(max_length=50)
profile_picture = models.ImageField(blank=True,upload_to='userprofile')
def __str__(self):
return self.user.first_name
def full_address(self):
return f'{self.address_line_1} {self.address_line_2}'
|
[
"98sanketkesharwani@gmail.com"
] |
98sanketkesharwani@gmail.com
|
af316aa3e34b277b1454686a6fb25362fb4ce391
|
588fe4ea44b24fab42aef793facf0a85e6709497
|
/applesnake.py
|
80391c961070ef61165946107211d1423424f4b6
|
[] |
no_license
|
KristenChing/Final_Project2
|
9ce169119151a54a03ce1b4efd303652f35b3b07
|
616ef6bf5506ed556ffd99b550ff2bda85f1bec6
|
refs/heads/master
| 2021-01-21T11:00:20.108438
| 2017-06-09T17:25:59
| 2017-06-09T17:25:59
| 91,717,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,092
|
py
|
import turtle
import time
import random
wn = turtle.Screen()
wn.bgcolor("white")
apeat= 0
apple = turtle.Turtle()
apple.penup()
apple.shape("turtle")
apple.fillcolor("red")
snake = turtle.Turtle()
snake.color("darkgreen")
snake.pensize(5)
snake.speed(0)
snake.hideturtle()
snaketail = turtle.Turtle()
snaketail.color("white")
snaketail.pensize(5)
snaketail.speed(0)
snaketail.hideturtle()
w = 0
z = 0
coords = []
turns = 0
speed = 2
tailspeed = 1
ateApple = False
turnedLeft = False
turnedRight = False
timer = 5
def addspeed():
global tailspeed
tailspeed = 2
def resetcoords():
global x
x = snake.xcor()
global y
y = snake.ycor()
coords.append([])
coords[turns - 1].append(x)
coords[turns - 1].append(y)
def resetcOrds():
#apple.hideturtle()
global w
w = apple.xcor()
global z
z = apple.ycor()
apple.goto(random.randint(-100, 100), random.randint(-100, 100))
#apple.showturtle()
resetcOrds()
def left():
global turnedLeft
global turns
turns += 1
snake.left(90)
resetcoords()
turnedLeft = True
def right():
global turnedRight
global turns
turns += 1
snake.right(90)
resetcoords()
turnedRight = True
turtle.listen()
turtle.onkey(left, "Left")
turtle.onkey(right, "Right")
while True:
snake.forward(speed)
snaketail.forward(tailspeed)
## print(speed)
## print("tail", tailspeed)
## print("turns", turns)
## print("coords list", coords)
print("snake coords", snake.xcor(), snake.ycor())
## print("snaketail coords", snaketail.xcor(), snaketail.ycor())
print("apple coords", apple.xcor(), apple.ycor())
print("\n")
x = snake.xcor()
y = snake.ycor()
w = apple.xcor()
z = apple.ycor()
for x in range(turns):
if (snaketail.xcor() == coords[x][0]) and (snaketail.ycor() == coords[x][1]):
if turnedLeft == True:
snaketail.left(90)
addspeed()
tailspeed = speed
turnedLeft = False
if turnedRight == True:
snaketail.right(90)
addspeed()
tailspeed = speed
turnedRight = False
for i in range (int(w) - 10, int(w) + 10):
print("w", w)
print("i", i)
if i == snake.xcor():
for j in range (int(z) - 10, int(z) + 10):
print("j", j)
if j == snake.ycor():
apple.hideturtle()
apeat+=1
ateApple = True
resetcOrds()
apple.showturtle()
if ateApple == True:
oldspeed = speed
speed = oldspeed + 0.5
ateApple = False
apple.listen()
#-------------------------------------debug code-------------------------------
## print("turns", turns)
## print("coords list", coords)
## print("snake coords", snake.xcor(), snake.ycor())
## print("snaketail coords", snaketail.xcor(), snaketail.ycor())
## print("\n")
|
[
"kristenching1@gmail.com"
] |
kristenching1@gmail.com
|
faec97843cec5bed045234f3d1b9b4b5f7f7da54
|
93aed45601b0c6ec23a988a24c6b80d4d5e93dc3
|
/shapy/framework/executor.py
|
44903b89fe9bdaf4a3743db720cbef27bd6d1808
|
[
"MIT"
] |
permissive
|
liangxiaoping/shapy
|
dbd19b8affc79aabc32ccba560419ee532715b59
|
7fa5512d9015b4921870f212495280fbb0675164
|
refs/heads/master
| 2020-12-28T06:58:23.222187
| 2013-03-18T01:43:15
| 2013-03-18T01:43:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,745
|
py
|
import os, sys
import re
import subprocess
import shlex
import logging
import logging.handlers
#from string import Template
logger = logging.getLogger('shapy.framework.executor')
from shapy import settings
from shapy.framework.exceptions import ImproperlyConfigured
def run(command, **kwargs):
command = shlex.split(command)
if kwargs.pop('sudo', True):
if settings.SUDO_PASSWORD:
command.insert(0, '-S')
command.insert(0, 'sudo')
p = subprocess.Popen(command, bufsize=-1, stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.PIPE,
env=settings.ENV)
stdout, stderr = p.communicate('%s\n' % settings.SUDO_PASSWORD)
if p.returncode == 0:
logger.info('[{1}] {0}'.format(' '.join(command), p.returncode))
else:
fmt = """[{1}] {0} [{2}]"""
logger.error(fmt.format(' '.join(command), p.returncode, stderr.strip()))
return stdout
def get_command(name, **kwargs):
try:
__import__(settings.COMMANDS)
cmd = getattr(sys.modules[settings.COMMANDS], name)
if kwargs:
cmd = cmd.format(**kwargs)
return cmd
except AttributeError:
msg = "Command '%s' undefined!" % name
logger.critical(msg)
raise ImproperlyConfigured(msg)
except KeyError, ImportError:
msg = "Missing commands module (%s)!" % settings.COMMANDS
logger.critical(msg)
raise ImproperlyConfigured(msg)
class Executable(object):
def __init__(self, **kwargs):
self.opts = kwargs
self.executed = False
def __setitem__(self, key, item):
self.opts.update({key: item})
def __getitem__(self, key):
return self.opts[key]
@property
def cmd(self):
return get_command(self.__class__.__name__)
def get(self):
self.opts.update(self.get_context())
return self.cmd.format(**self.opts)
def get_context(self):
has_p = getattr(self, 'parent', None)
return {'parent': self.parent['handle'] if has_p else '',
'interface': self.get_interface()}
def get_interface(self):
p = getattr(self, 'parent', self)
while hasattr(p, 'parent'):
p = getattr(p, 'parent')
try:
return getattr(p, 'interface')
except AttributeError:
msg = "Element {0!r} has no interface".format(self)
logger.critical(msg)
raise ImproperlyConfigured(msg)
def execute(self):
if not self.executed:
run(self.get())
else:
logger.debug("Command %s was already executed."% self.get())
|
[
"petr@praus.net"
] |
petr@praus.net
|
0bb40e15cc5d1261943589d966e5df9dcb4a91cc
|
425e80db15d13a6e3c76df622a56c348031b7f39
|
/Compare_SGL/test_cv_real.py
|
34505cff35c3703b1615f281a25a1c6616f472a1
|
[] |
no_license
|
rubygyj/SGL_lmm_code
|
529855452bf9c102c67f965dedbf6f8354831439
|
c07208e120d30de4abcf1715188ebb86ac93a464
|
refs/heads/master
| 2021-01-01T05:55:35.740027
| 2017-07-18T02:52:10
| 2017-07-18T02:52:10
| 97,308,868
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,186
|
py
|
"""
"""
import csv
import scipy as SP
import scipy.linalg as LA
import pdb
import lmm_lasso_pg as lmm_lasso
import os
import sys
from pandas_plink import read_plink
if __name__ == '__main__':
if len(sys.argv) < 5:
sys.stderr.write('\tUsage: python test_cv_real.py [gene_file] [pca_x_file] [use_pca]\n');
sys.exit(1);
if sys.argv[3].lower() == 'true':
has_pca = True
elif sys.argv[3].lower() == 'false':
has_pca = False
else:
sys.stderr.write('invalid has_pca value: true or false')
sys.exit(1)
root = 'data'
gene_file = os.path.join(root, sys.argv[1])
pca_x_file = os.path.join(root, sys.argv[2])
# load genotypes
[bim, fam, G] = read_plink(gene_file)
X = SP.array(G.compute()).astype(float)
[n_f, n_s] = X.shape
for i in range(X.shape[0]):
m = X[i].mean()
std = X[i].std()
X[i] = (X[i] - m) / std
X = X.T
if has_pca:
pca_x = SP.array(list(csv.reader(open(pca_x_file, 'rb'),
delimiter=','))).astype(float)
X = SP.column_stack((X, pca_x))
# simulate phenotype
y = SP.array(list(fam['i'])).astype(float)
[n_s, n_f] = X.shape
# init
debug = False
n_train = int(n_s * 0.7)
n_test = n_s - n_train
n_reps = 10
f_subset = 0.7
muinit = 0.1
mu2init = 0.1
ps_step = 3
# split into training and testing
train_idx = SP.random.permutation(SP.arange(n_s))
test_idx = train_idx[n_train:]
train_idx = train_idx[:n_train]
# calculate kernel
# the first 2622 SNP are in the first chromosome which we are testing
group = SP.array(list(csv.reader(open(os.path.join(root, sys.argv[1] + '_group.info'))))).astype(int)
if has_pca: group = group + [[n_f - 10, n_f]]
# Glasso Parameter selection by 5 fold cv
optmu = muinit
optmu2 = mu2init
optcor = 0
for j1 in range(7):
for j2 in range(7):
mu = muinit * (ps_step ** j1)
mu2 = mu2init * (ps_step ** j2)
cor = 0
for k in range(5): # 5 for full 5 fold CV
train1_idx = SP.concatenate(
(train_idx[:int(n_train * k * 0.2)], train_idx[int(n_train * (k + 1) * 0.2):n_train]))
valid_idx = train_idx[int(n_train * k * 0.2):int(n_train * (k + 1) * 0.2)]
w1 = lmm_lasso.train_lasso(X[train1_idx], y[train1_idx], mu, mu2, group)
# predict
idx = w1.nonzero()[0]
Xvalid = X[valid_idx, :]
yhat = SP.dot(Xvalid[:, idx], w1[idx])
cor += SP.dot(yhat.T - yhat.mean(), y[valid_idx] - y[valid_idx].mean()) / (
yhat.std() * y[valid_idx].std())
print mu, mu2, cor[0, 0]
if cor > optcor:
optcor = cor
optmu = mu
optmu2 = mu2
print optmu, optmu2, optcor[0, 0]
# train
w = lmm_lasso.train_lasso(X[train_idx], y[train_idx], optmu, optmu2, group)
# predict
idx = w.nonzero()[0]
Xtest = X[test_idx, :]
yhat = SP.dot(Xtest[:, idx], w[idx])
corr = 1. / n_test * SP.dot(yhat.T - yhat.mean(), y[test_idx] - y[test_idx].mean()) / (
yhat.std() * y[test_idx].std())
print corr[0, 0]
# stability selection
# group info included
ss = lmm_lasso.stability_selection(X, K, y, optmu, optmu2, group, n_reps, f_subset)
sserr1 = 0
sserr2 = 0
for i in range(n_f):
if i in idx:
if ss[i] < n_reps * 0.8:
sserr1 += 1
else:
if ss[i] >= n_reps * 0.8:
sserr2 += 1
# Output
result_ss = [(ss[idx], idx) for idx in len(ss)]
result_ss.sort(key = lambda item : (-item[0], item[1]))
with open(sys.argv[1] + '{0}_result.csv'.format('_SGL_10pc' if has_pca else '_SGL'), 'w') as result_file:
result_writer = csv.writer(result_file)
for item in result_ss:
result_writer.writerow((idx, ss[idx]))
for i in range(n_f):
print i, (i in idx), ss[i], ss2[i]
print optmu, optmu2, optmu0
print sserr1, sserr2, ss2err1, ss2err2
|
[
"lkgv@foxmail.com"
] |
lkgv@foxmail.com
|
43e5d2934e1bf847673aa01b30a09c96d320f9d6
|
441e4506d741db1afbeaa0af8822b130e26e5d5d
|
/unit7/mission7.3.2.py
|
7f1d2a17c934a7f313b13783c5e7f4ce0f1ee300
|
[] |
no_license
|
ronhrl/Self.py
|
253bca9f23042833573ef93864a6988052f31b31
|
ba257e5300f548b3fe504dbb027fa054e80de365
|
refs/heads/master
| 2023-07-28T08:03:44.372673
| 2021-09-13T09:05:47
| 2021-09-13T09:05:47
| 403,127,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
def check_win(secret_word, old_letters_guessed):
i = 0
count_of_correct = 0
while i < len(secret_word):
j = 0
while j < len(old_letters_guessed):
if secret_word[i] == old_letters_guessed[j]:
count_of_correct += 1
j += 1
break
j += 1
i += 1
if count_of_correct == len(secret_word):
return True
else:
return False
|
[
"rontsohrl@gmail.com"
] |
rontsohrl@gmail.com
|
4c2caed53401e57b7f09d65b6b9d5806aec30d9e
|
797557fcd8e666e329c135b328974c868d0b09a8
|
/face/norm_files/norm2.py
|
d3bebe24f27ffde2514b2fcf7d218a9b7bf640ff
|
[] |
no_license
|
north-fern/ME-134
|
d9a80efce520d5af89ac440be2a3cd262a06de34
|
14b2eaa13b3d7a3381f93e984dd9039f429547b4
|
refs/heads/master
| 2023-01-22T19:02:09.004734
| 2020-11-23T03:23:10
| 2020-11-23T03:23:10
| 298,402,783
| 1
| 1
| null | 2020-11-23T02:08:21
| 2020-09-24T21:50:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,591
|
py
|
import tensorflow.keras
from Pillow import Image, ImageOps
import numpy as np
import time
import cv2
def select_image(index):
# Disable scientific notation for clarity
np.set_printoptions(suppress=True)
# Load the model
model = tf.contrib.lite.Interpreter(model_path="model_unquant.tflite")
# Create the array of the right shape to feed into the keras model
# The 'length' or number of images you can put into the array is
# determined by the first position in the shape tuple, in this case 1.
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
# Replace this with the path to your image
image = Image.open('image' + str(index) + '.jpg')
#resize the image to a 224x224 with the same strategy as in TM2:
#resizing the image to be at least 224x224 and then cropping from the center
size = (224, 224)
image = ImageOps.fit(image, size, Image.ANTIALIAS)
#turn the image into a numpy array
image_array = np.asarray(image)
# display the resized image
image.show()
# Normalize the image
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
# run the inference
prediction = model.predict(data)
print(prediction)
def take_image(index):
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
cv2.imwrite('image' + str(index) + '.jpg', frame)
index = 0
while True:
take_image(index)
time.sleep(.5)
select_image(index)
time.sleep(.5)
index = index + 1
print(index)
|
[
"60278154+north-fern@users.noreply.github.com"
] |
60278154+north-fern@users.noreply.github.com
|
4b3694a7a68013ffe4d6627a48024d90045f59cd
|
d8e38b9c6f7d8cf09e9a700ee784fb3e6e58b89f
|
/SVD/svd.py
|
6066e48560cae98ed331346494d6b7314e59c721
|
[] |
no_license
|
vishalbelsare/Lasso-ElasticNet-toolkit
|
0ba5e152c062b633347353867a9fed2380752394
|
e474824555a1680cd35814635ba6e0713ebef018
|
refs/heads/master
| 2021-12-12T18:33:12.922797
| 2017-02-14T13:40:27
| 2017-02-14T13:40:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,700
|
py
|
import numpy as np
import operator
from os import listdir
import matplotlib
import matplotlib.pyplot as plt
from numpy.linalg import *
from scipy.stats.stats import pearsonr
from numpy import linalg as la
import xlrd
def svd(data, S):
#calculate SVD
U, s, Vt = linalg.svd( data )
V = Vt.T
#take out columns we don't need
Sig = np.eye(S)*s[:S]
newU = U[:,:S]
newV = V[:,:S]
#print np.sqrt(Sig[0])
#print newV
# np.savetxt('dat.txt', np.sqrt(Sig[0,0])*newU[:,0])
# Retrieve dataset
print Sig[9,9]
# print newU
print (np.sqrt(n)-np.sqrt(m))*(1./np.sqrt(m))
#fig = plt.figure()
#ax = fig.add_subplot(1,1,1)
colors = ['blue','red']
# for i in xrange(num_Sel):
# ax.scatter(np.sqrt(Sig[0,0])*newU[i,0],np.sqrt(Sig[1,1])*newU[i,1], color= 'blue')
# for i in xrange(num_UnS):
# ax.scatter(np.sqrt(Sig[0,0])*newU[i+num_Sel,0],np.sqrt(Sig[1,1])*newU[i+num_Sel,1], color= 'red')
# for i in xrange(n):
# ax.scatter(Sig[0,0]*newV[i,0],Sig[1,1]*newV[i,1], color= 'gray')
# plt.xlabel('SVD1')
# plt.ylabel('SVD2')
# plt.show()
#Main
if __name__=='__main__':
# load data points
n=1000
m=10
H=(1./np.sqrt(m))*np.random.randn(m,n)
# H = np.array(H,dtype=int)
# mean = H.mean(axis=0)
# mean.shape = (1,mean.shape[0])
# H = H-mean #takeout column mean
# mean = H.mean(axis=1)
# mean.shape = (mean.shape[0], 1)
# H = H-mean #takeout raw mean
#fly,snp = np.shape(H)
svd(H,m)
|
[
"mohammad@Mohammads-MacBook-Pro.local"
] |
mohammad@Mohammads-MacBook-Pro.local
|
a12d2275939d8e34ebee7703446413cb633bd5a5
|
65e4df21987fd61b313dcf4c1b5139c8f16b453b
|
/painter.py
|
35f2c4a2af773859ac4139a8d38d475c13541aae
|
[] |
no_license
|
JustinOliver/ATBS
|
ec4ccc625cef009eeac023ba23c812a243f8b491
|
172bd5788988e15b4894622495ac6962c9a28582
|
refs/heads/master
| 2021-09-07T08:14:33.957443
| 2018-02-20T02:03:05
| 2018-02-20T02:03:05
| 112,120,783
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,185
|
py
|
#!c:\users\michael scott\desktop\python\python.exe
#
# The Python Imaging Library
# $Id$
#
# this demo script illustrates pasting into an already displayed
# photoimage. note that the current version of Tk updates the whole
# image every time we paste, so to get decent performance, we split
# the image into a set of tiles.
#
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
#
# painter widget
class PaintCanvas(tkinter.Canvas):
def __init__(self, master, image):
tkinter.Canvas.__init__(self, master,
width=image.size[0], height=image.size[1])
# fill the canvas
self.tile = {}
self.tilesize = tilesize = 32
xsize, ysize = image.size
for x in range(0, xsize, tilesize):
for y in range(0, ysize, tilesize):
box = x, y, min(xsize, x+tilesize), min(ysize, y+tilesize)
tile = ImageTk.PhotoImage(image.crop(box))
self.create_image(x, y, image=tile, anchor=tkinter.NW)
self.tile[(x, y)] = box, tile
self.image = image
self.bind("<B1-Motion>", self.paint)
def paint(self, event):
xy = event.x - 10, event.y - 10, event.x + 10, event.y + 10
im = self.image.crop(xy)
# process the image in some fashion
im = im.convert("L")
self.image.paste(im, xy)
self.repair(xy)
def repair(self, box):
# update canvas
dx = box[0] % self.tilesize
dy = box[1] % self.tilesize
for x in range(box[0]-dx, box[2]+1, self.tilesize):
for y in range(box[1]-dy, box[3]+1, self.tilesize):
try:
xy, tile = self.tile[(x, y)]
tile.paste(self.image.crop(xy))
except KeyError:
pass # outside the image
self.update_idletasks()
#
# main
if len(sys.argv) != 2:
print("Usage: painter file")
sys.exit(1)
root = tkinter.Tk()
im = Image.open(sys.argv[1])
if im.mode != "RGB":
im = im.convert("RGB")
PaintCanvas(root, im).pack()
root.mainloop()
|
[
"justinoliver77@aol.com"
] |
justinoliver77@aol.com
|
bf7e1ff062001f78b26cb924fe7dce1e81c97584
|
30f8afce1ba484183d8e1e14aae76cabb2d92354
|
/pbase/day25/code.py
|
4241fecd4e12aae4b1efcb70b62bba4ff4ecc133
|
[] |
no_license
|
brooot/Python_Base_Codes
|
d83e8c3b8a37b86672412c812fdb0d47deb67836
|
a864685e160b5df4162a6f9fb910627eda702aaf
|
refs/heads/master
| 2023-04-10T20:08:39.161289
| 2021-03-25T12:59:23
| 2021-03-25T12:59:23
| 200,570,412
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 458
|
py
|
# x=1
# sum=1
# def cal(n):
# while x<=n:
# sum += 1/(2**n)
# return sum
# print(cal(n))
# def cal(n):
# result=0
# for i in range(n+1):
# result += 1/(2**i)
# return result
# # n=int(input("请输入n:"))
# for i in range(10,100):
# print(cal(i))
def cal1(n):
result=0
for i in range(1,n+1):
result += 1/i
return result
# n=int(input("请输入n:"))
for n in range(1,20):
print(cal1(n))
|
[
"1442704671@qq.com"
] |
1442704671@qq.com
|
33e6f44cdd478c00475a2ec163ef13c53db3775f
|
0c3fa4fef366529eae709245cac6982f93c83bb1
|
/navigus_app/api_views/serializers.py
|
021d92fe8e583a479610841db2bbb32e1c3a0080
|
[] |
no_license
|
Mprogrammer2020/navigus
|
bedad5b92d1902b34b1384fece96e8116ba61772
|
b2e8ac3381ddc0ddf629f48c054e83a4c6a9ca9e
|
refs/heads/master
| 2022-07-29T11:45:11.373707
| 2020-05-24T13:58:50
| 2020-05-24T13:58:50
| 266,552,052
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 680
|
py
|
from django.contrib.auth.models import User
from rest_framework import serializers
from api_views.models import TimeSlots, Bookings
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'first_name', 'last_name', 'email', 'password')
class TimeSlotsSerializer(serializers.ModelSerializer):
class Meta:
model = TimeSlots
fields = ('id', 'teacher', 'dayDate', 'startTime', 'endTime', 'status')
class BookingsSerializer(serializers.ModelSerializer):
class Meta:
model = Bookings
fields = ('id', 'student', 'timeSlot')
|
[
"mprogrammer2020@gmail.com"
] |
mprogrammer2020@gmail.com
|
42bb7f8081777e72d2ae388547512f336b7ee65b
|
c73b6722d362192af7030d0df83c18f41b02e3f6
|
/tictactoe.py
|
beeba861c7398fb96bc992704004870bc9e483ca
|
[] |
no_license
|
jyojun/pygame
|
c7c8a96bcaaee4df8e49f357cc0e4b384b9ce7fe
|
7e9101f855ef569916c82dab09bdb58f2b18f821
|
refs/heads/master
| 2022-06-04T17:09:06.586078
| 2020-05-05T02:37:42
| 2020-05-05T02:37:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,779
|
py
|
import pygame
import sys
red = (255,0,0)
green = (0,255,0)
blue = (0,0,255)
darkBlue = (0,0,128)
white = (255,255,255)
black = (0,0,0)
pink = (255,200,200)
#iconChoice = input("Would you like to be X's or O's?(X/O)?:")
iconChoice = "X"
# initialize game engine
pygame.init()
pygame.font.init()
font = pygame.font.SysFont("Century Schoolbook",12)
# set screen width/height and caption
size = [500,500]
screen = pygame.display.set_mode(size)
pygame.display.set_caption('My Game')
# initialize clock. used later in the loop.
clock = pygame.time.Clock()
# Loop until the user clicks close button
done = False
while done == False:
# write event handlers here
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
# write game logic here
sys_font = pygame.font.SysFont("None",60)
rendered = sys_font.render(iconChoice, 0, black)
mousexpos, mouseypos = pygame.mouse.get_pos()
pygame.event.get()
if pygame.mouse.get_pressed()[0] == True and mousexpos > 166 and mousexpos < 322 and mouseypos < 156:
print("2")
elif pygame.mouse.get_pressed()[0] == True and mousexpos > 332 and mouseypos < 156:
print("3")
done = True
elif pygame.mouse.get_pressed()[0] == True and mousexpos < 156 and mouseypos > 166 and mouseypos < 322:
print("4")
done= True
elif pygame.mouse.get_pressed()[0] == True and mousexpos > 166 and mousexpos < 322 and mouseypos > 166 and mouseypos < 322:
print("5")
done= True
elif pygame.mouse.get_pressed()[0] == True and mousexpos > 332 and mouseypos > 166 and mouseypos < 322:
print("6")
done = True
elif pygame.mouse.get_pressed()[0] == True and mousexpos < 156 and mouseypos > 332:
print("7")
done= True
elif pygame.mouse.get_pressed()[0] == True and mousexpos > 166 and mousexpos < 322 and mouseypos > 332:
print("8")
done= True
elif pygame.mouse.get_pressed()[0] == True and mousexpos > 332 and mouseypos > 332:
print("9")
done = True
# clear the screen before drawing
screen.fill((255, 255, 255))
# draw
pygame.draw.rect(screen, black, (10,156,480,15), 0)
pygame.draw.rect(screen, black, (10,322,480,15), 0)
pygame.draw.rect(screen, black, (156,10,15,480), 0)
pygame.draw.rect(screen, black, (322,10,15,480), 0)
pygame.display.flip()
if pygame.mouse.get_pressed()[0] == True and mousexpos < 156 and mouseypos < 156:
print("1")
screen.blit(rendered, (20,15))
pygame.display.update(10,10,166,166)
# display what’s drawn. this might change.
pygame.display.update()
# run at 20 fps
clock.tick(20)
# close the window and quit
pygame.quit()
|
[
"parkcode98@gmail.com"
] |
parkcode98@gmail.com
|
34f227442508aa9b92b0762b4d1ead4ff2c31df9
|
2be9cb839f95c52c506a140f599f431f19ba31c4
|
/modules/mtg.py
|
ea1234b7e9488171e3855ff28d010be0c53df21a
|
[] |
no_license
|
dasu/phenny
|
70d2289c5ea5ff6304b65a348103f0f5e1b0514b
|
d615728dbba57f465fcd590a3a392d47e5a76957
|
refs/heads/master
| 2021-01-12T19:43:00.900865
| 2015-03-04T17:48:38
| 2015-03-04T17:48:38
| 15,314,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 677
|
py
|
from urllib.request import urlopen
import json
def mtg(phenny,input):
if not input.group(2):
return phenny.say("Please enter a card name.")
i = input.group(2)
x = urlopen('http://api.mtgdb.info/search/%s' % i)
c = x.read()
js = json.loads(c.decode('utf-8'))[0]
if js['type'] == 'Creature':
phenny.say('Name: {0}, Type: {1}, Cost: {2}, Effect: {3}, Power: {4}, Toughness: {5}'.format(js['name'], js['type'], js['manaCost'], js['description'], js['power'], js['toughness']))
else:
phenny.say('Name: {0}, Type: {1}, Cost: {2}, Effect: "{3}"'.format(js['name'], js['type'], js['manaCost'], js['description']))
mtg.commands = ['mtg','magic']
mtg.priority = 'medium'
|
[
"desusp@gmail.com"
] |
desusp@gmail.com
|
090e82b46e7545d66e3d998765a81304294b9244
|
91aa198b8178d1bf2b710a1ac315f0455e631916
|
/myproject/boards/migrations/0002_topic_views.py
|
47a39922ba2832aa1ecee0160507bca7037fe35e
|
[] |
no_license
|
Mmarin95/django-app
|
e56c16892f8454552fb7a0ba386dace211063a88
|
23059fecd137461958f2a202250037940e99686b
|
refs/heads/main
| 2023-01-18T16:12:55.275588
| 2020-11-22T10:50:46
| 2020-11-22T10:50:46
| 314,205,764
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
# Generated by Django 3.1.3 on 2020-11-21 17:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('boards', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='topic',
name='views',
field=models.PositiveIntegerField(default=0),
),
]
|
[
"mmarin_79@hotmail.com"
] |
mmarin_79@hotmail.com
|
e9056686b755ec62d1124eb7098211b4b3e8aee9
|
56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e
|
/CMGTools/H2TauTau/prod/TauES_test/up/emb/DoubleMuParked/StoreResults-Run2012C_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0_1374851248/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_113/run_cfg.py
|
41ce89199c250114da8542c4194b345c595cb1af
|
[] |
no_license
|
rmanzoni/HTT
|
18e6b583f04c0a6ca10142d9da3dd4c850cddabc
|
a03b227073b2d4d8a2abe95367c014694588bf98
|
refs/heads/master
| 2016-09-06T05:55:52.602604
| 2014-02-20T16:35:34
| 2014-02-20T16:35:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 69,050
|
py
|
import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/TauES_test/up/emb/DoubleMuParked/StoreResults-Run2012C_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0_1374851248/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
lumisToProcess = cms.untracked.VLuminosityBlockRange( ("190645:10-190645:110", "190646:1-190646:111", "190659:33-190659:167", "190679:1-190679:55", "190688:69-190688:249",
"190702:51-190702:53", "190702:55-190702:122", "190702:124-190702:169", "190703:1-190703:252", "190704:1-190704:3",
"190705:1-190705:5", "190705:7-190705:65", "190705:81-190705:336", "190705:338-190705:350", "190705:353-190705:383",
"190706:1-190706:126", "190707:1-190707:237", "190707:239-190707:257", "190708:1-190708:189", "190733:71-190733:96",
"190733:99-190733:389", "190733:392-190733:460", "190736:1-190736:80", "190736:83-190736:185", "190738:1-190738:130",
"190738:133-190738:226", "190738:229-190738:349", "190782:55-190782:181", "190782:184-190782:233", "190782:236-190782:399",
"190782:401-190782:409", "190895:64-190895:202", "190895:210-190895:302", "190895:305-190895:584", "190895:587-190895:948",
"190906:73-190906:256", "190906:259-190906:354", "190906:356-190906:496", "190945:124-190945:207", "190949:1-190949:81",
"191043:45-191043:46", "191046:1-191046:21", "191046:24-191046:82", "191046:84-191046:88", "191046:92-191046:116",
"191046:119-191046:180", "191046:183", "191046:185-191046:239", "191056:1", "191056:4-191056:9",
"191056:16-191056:17", "191056:19", "191057:1", "191057:4-191057:40", "191062:1",
"191062:3", "191062:5-191062:214", "191062:216-191062:541", "191090:1-191090:55", "191201:38-191201:49",
"191201:52-191201:79", "191202:1-191202:64", "191202:66-191202:68", "191202:87-191202:105", "191202:108-191202:118",
"191226:77-191226:78", "191226:81-191226:831", "191226:833-191226:1454", "191226:1456-191226:1466", "191226:1469-191226:1507",
"191226:1510-191226:1686", "191247:1-191247:153", "191247:156-191247:280", "191247:283-191247:606", "191247:608-191247:620",
"191247:622-191247:818", "191247:821-191247:834", "191247:837-191247:1031", "191247:1034-191247:1046", "191247:1049-191247:1140",
"191247:1143-191247:1187", "191247:1190-191247:1214", "191247:1217-191247:1224", "191248:1-191248:103", "191264:59-191264:79",
"191264:82-191264:152", "191264:155-191264:189", "191271:56-191271:223", "191271:225-191271:363", "191276:1-191276:16",
"191277:1-191277:28", "191277:30-191277:164", "191277:167-191277:253", "191277:255-191277:457", "191277:460-191277:535",
"191277:537-191277:576", "191277:579-191277:775", "191277:778-191277:811", "191277:813-191277:849", "191367:1-191367:2",
"191411:1-191411:23", "191695:1", "191718:43-191718:95", "191718:98-191718:207", "191720:1",
"191720:3-191720:15", "191720:17-191720:181", "191721:1", "191721:3-191721:34", "191721:36-191721:183",
"191721:186-191721:189", "191726:1-191726:13", "191810:15", "191810:22-191810:49", "191810:52-191810:92",
"191830:54-191830:242", "191830:245-191830:301", "191830:304-191830:393", "191833:1", "191833:3-191833:103",
"191834:1-191834:30", "191834:33-191834:74", "191834:77-191834:299", "191834:302-191834:352", "191837:1-191837:44",
"191837:47-191837:53", "191837:56-191837:65", "191856:1-191856:133", "191859:1-191859:28", "191859:31-191859:126",
"193093:1-193093:33", "193123:1-193123:27", "193124:1-193124:52", "193192:58-193192:86", "193193:1-193193:6",
"193193:8", "193193:11-193193:83", "193193:86-193193:120", "193193:122-193193:160", "193193:162-193193:274",
"193193:276-193193:495", "193193:497-193193:506", "193207:54-193207:182", "193334:29-193334:172", "193336:1-193336:264",
"193336:267-193336:492", "193336:495-193336:684", "193336:687-193336:729", "193336:732-193336:951", "193541:77-193541:101",
"193541:103-193541:413", "193541:416-193541:575", "193541:578-193541:619", "193556:41-193556:83", "193557:1-193557:84",
"193575:48-193575:173", "193575:176-193575:349", "193575:351-193575:394", "193575:397-193575:415", "193575:417-193575:658",
"193575:660-193575:752", "193621:60-193621:570", "193621:573-193621:769", "193621:772-193621:976", "193621:979-193621:1053",
"193621:1056-193621:1137", "193621:1139-193621:1193", "193621:1195-193621:1371", "193621:1373-193621:1654", "193834:1-193834:35",
"193835:1-193835:20", "193835:22-193835:26", "193836:1-193836:2", "193998:66-193998:113", "193998:115-193998:278",
"193999:1-193999:45", "194027:57-194027:113", "194050:53-194050:113", "194050:116-194050:273", "194050:275-194050:355",
"194050:357-194050:369", "194050:372-194050:391", "194050:394-194050:490", "194050:492-194050:814", "194050:816-194050:1435",
"194050:1437-194050:1735", "194050:1760-194050:1888", "194051:1-194051:12", "194052:1-194052:99", "194052:102-194052:166",
"194075:48-194075:101", "194075:103", "194075:105-194075:107", "194075:109", "194075:111",
"194076:1-194076:9", "194076:11-194076:55", "194076:58-194076:163", "194076:165-194076:228", "194076:230-194076:264",
"194076:267-194076:507", "194076:509-194076:527", "194076:530-194076:538", "194076:541-194076:562", "194076:565-194076:748",
"194108:81-194108:161", "194108:164-194108:264", "194108:266-194108:373", "194108:376-194108:396", "194108:398-194108:433",
"194108:436-194108:452", "194108:454-194108:577", "194108:579-194108:590", "194108:593-194108:668", "194108:671-194108:872",
"194115:66-194115:184", "194115:186-194115:338", "194115:340-194115:346", "194115:348-194115:493", "194115:496-194115:731",
"194115:819-194115:857", "194117:1-194117:38", "194119:1-194119:229", "194119:232-194119:261", "194120:1-194120:162",
"194120:165-194120:406", "194150:42-194150:127", "194150:129-194150:261", "194150:264-194150:311", "194151:47-194151:72",
"194151:75-194151:191", "194151:193-194151:238", "194151:240-194151:617", "194151:619", "194151:621",
"194151:623", "194153:1-194153:115", "194199:96-194199:227", "194199:229-194199:336", "194199:339-194199:402",
"194210:3-194210:195", "194210:198-194210:217", "194210:220-194210:359", "194210:361-194210:555", "194223:61-194223:112",
"194224:1-194224:126", "194224:129-194224:206", "194224:208-194224:250", "194224:253-194224:309", "194224:312-194224:386",
"194224:389-194224:412", "194225:1-194225:23", "194225:26-194225:47", "194225:49-194225:85", "194225:88-194225:149",
"194270:56-194270:68", "194303:56-194303:66", "194303:69-194303:102", "194304:1-194304:43", "194304:46",
"194305:1-194305:84", "194314:52-194314:130", "194314:133-194314:300", "194315:1-194315:10", "194315:13-194315:314",
"194315:317-194315:428", "194315:431-194315:452", "194315:455-194315:467", "194317:1-194317:20", "194424:63-194424:141",
"194424:144-194424:195", "194424:198-194424:266", "194424:268-194424:421", "194424:424-194424:478", "194424:481-194424:531",
"194424:534-194424:553", "194424:556-194424:706", "194424:708", "194428:1-194428:85", "194428:87-194428:122",
"194428:125-194428:294", "194428:296-194428:465", "194429:1-194429:4", "194429:7-194429:54", "194429:57-194429:147",
"194429:150-194429:411", "194429:413-194429:742", "194429:745-194429:986", "194429:988-194429:1019", "194439:46-194439:77",
"194439:79-194439:106", "194455:45-194455:64", "194455:67-194455:140", "194455:142-194455:255", "194455:293-194455:303",
"194464:1-194464:127", "194464:130-194464:142", "194464:145-194464:210", "194479:1-194479:44", "194479:165-194479:232",
"194479:235-194479:262", "194479:265-194479:374", "194479:377-194479:431", "194479:434-194479:489", "194479:492-194479:529",
"194479:531-194479:566", "194480:1-194480:32", "194480:34-194480:205", "194480:207-194480:375", "194480:377-194480:387",
"194480:389-194480:759", "194480:762-194480:956", "194480:959-194480:1402", "194533:46-194533:379", "194533:382-194533:415",
"194533:417-194533:618", "194533:620-194533:872", "194619:31-194619:110", "194631:1-194631:42", "194631:44-194631:100",
"194631:102-194631:169", "194631:171-194631:222", "194643:1-194643:287", "194644:1-194644:168", "194644:171-194644:181",
"194644:184-194644:185", "194644:187-194644:319", "194644:321-194644:421", "194691:61-194691:104", "194691:107-194691:155",
"194691:158-194691:251", "194691:254-194691:268", "194691:271-194691:272", "194691:275-194691:289", "194691:292-194691:313",
"194699:1-194699:30", "194699:32-194699:52", "194699:55-194699:64", "194699:67-194699:71", "194699:73-194699:154",
"194699:157-194699:215", "194699:218-194699:238", "194699:241-194699:259", "194702:1-194702:138", "194702:141-194702:191",
"194704:1-194704:41", "194704:44-194704:545", "194704:548-194704:592", "194711:1-194711:7", "194711:9-194711:619",
"194712:1-194712:56", "194712:61-194712:418", "194712:420-194712:625", "194712:627-194712:759", "194735:44-194735:71",
"194735:74-194735:101", "194735:104-194735:130", "194778:60-194778:118", "194778:120-194778:219", "194789:1-194789:18",
"194789:21-194789:32", "194789:34-194789:80", "194789:82-194789:166", "194789:168-194789:269", "194789:272-194789:405",
"194789:409-194789:414", "194789:417-194789:427", "194789:430-194789:566", "194790:1-194790:45", "194825:72-194825:117",
"194825:120-194825:221", "194896:34-194896:55", "194896:58-194896:79", "194896:82-194896:103", "194897:1-194897:6",
"194897:8-194897:78", "194897:80-194897:96", "194897:98-194897:102", "194912:53-194912:70", "194912:72-194912:96",
"194912:98-194912:444", "194912:446-194912:450", "194912:453-194912:467", "194912:470-194912:561", "194912:564-194912:660",
"194912:663-194912:813", "194912:815-194912:840", "194912:843-194912:864", "194912:866-194912:1004", "194912:1007-194912:1025",
"194912:1027-194912:1067", "194912:1069-194912:1137", "194912:1140-194912:1166", "194912:1168-194912:1249", "194912:1251-194912:1304",
"194912:1307-194912:1444", "194912:1447-194912:1487", "194912:1489-194912:1503", "194912:1506-194912:1662", "194914:1-194914:38",
"194915:1-194915:74", "195013:94-195013:144", "195013:146-195013:185", "195013:187-195013:206", "195013:208-195013:299",
"195013:302-195013:324", "195013:326-195013:366", "195013:369-195013:447", "195013:450-195013:526", "195013:528-195013:541",
"195014:1-195014:6", "195014:9-195014:119", "195014:121-195014:148", "195015:1-195015:13", "195016:1-195016:21",
"195016:23-195016:55", "195016:58-195016:63", "195016:65-195016:174", "195016:177-195016:184", "195016:186-195016:241",
"195016:243-195016:246", "195016:248-195016:251", "195016:254-195016:367", "195016:370-195016:422", "195016:425-195016:560",
"195016:563-195016:569", "195099:70-195099:144", "195099:147-195099:186", "195099:189-195099:208", "195099:211-195099:224",
"195099:227-195099:248", "195109:98-195109:241", "195112:1-195112:12", "195112:15-195112:26", "195113:1-195113:209",
"195113:212-195113:388", "195113:391-195113:403", "195113:406-195113:419", "195113:422-195113:492", "195113:495-195113:579",
"195114:1-195114:69", "195114:72-195114:103", "195115:1-195115:7", "195115:10-195115:22", "195147:132-195147:282",
"195147:285-195147:294", "195147:297-195147:331", "195147:334-195147:363", "195147:366-195147:442", "195147:445-195147:536",
"195147:539-195147:559", "195163:72-195163:138", "195163:140-195163:224", "195163:227-195163:240", "195163:243",
"195163:246-195163:347", "195164:1-195164:64", "195165:1-195165:4", "195165:7-195165:41", "195165:44-195165:54",
"195165:56-195165:153", "195165:156-195165:260", "195165:263-195165:266", "195251:1-195251:131", "195251:134-195251:137",
"195251:140-195251:152", "195251:154-195251:165", "195251:167-195251:242", "195303:109-195303:191", "195303:194-195303:277",
"195303:280-195303:310", "195303:312-195303:316", "195303:318-195303:409", "195304:1-195304:3", "195304:6-195304:22",
"195304:27-195304:80", "195304:83-195304:100", "195304:103-195304:154", "195304:157-195304:341", "195304:344-195304:588",
"195304:590-195304:727", "195304:729-195304:1003", "195304:1006-195304:1079", "195304:1083-195304:1140", "195304:1143-195304:1229",
"195378:90-195378:117", "195378:120-195378:127", "195378:130-195378:185", "195378:187-195378:204", "195378:206-195378:302",
"195378:305-195378:542", "195378:544-195378:565", "195378:567-195378:645", "195378:647-195378:701", "195378:703-195378:734",
"195378:737-195378:1120", "195378:1122-195378:1133", "195390:1", "195390:4-195390:27", "195390:30-195390:145",
"195390:147-195390:183", "195390:186-195390:187", "195390:190-195390:208", "195390:210-195390:213", "195390:215-195390:400",
"195396:49-195396:55", "195396:58-195396:63", "195396:66-195396:131", "195397:1-195397:10", "195397:12-195397:89",
"195397:92-195397:120", "195397:123-195397:141", "195397:143-195397:251", "195397:253", "195397:256-195397:475",
"195397:478-195397:525", "195397:527-195397:608", "195397:611-195397:776", "195397:779-195397:970", "195397:972-195397:1121",
"195397:1123-195397:1181", "195397:1184-195397:1198", "195397:1200-195397:1209", "195398:3-195398:137", "195398:139-195398:494",
"195398:497-195398:585", "195398:587-195398:817", "195398:820-195398:824", "195398:827-195398:1225", "195398:1228-195398:1307",
"195398:1309-195398:1712", "195398:1721-195398:1736", "195398:1741-195398:1752", "195398:1767-195398:1795", "195399:1-195399:192",
"195399:194-195399:382", "195530:1-195530:80", "195530:82-195530:104", "195530:107-195530:156", "195530:159-195530:300",
"195530:302-195530:405", "195540:68-195540:123", "195540:126-195540:137", "195540:140-195540:283", "195540:286-195540:319",
"195551:91-195551:106", "195552:1-195552:21", "195552:23-195552:27", "195552:30-195552:147", "195552:149-195552:155",
"195552:158-195552:182", "195552:185-195552:287", "195552:290-195552:349", "195552:352-195552:469", "195552:472-195552:815",
"195552:818-195552:823", "195552:825-195552:883", "195552:885-195552:1152", "195552:1154-195552:1300", "195552:1303-195552:1789",
"195633:40-195633:42", "195647:1-195647:41", "195649:1-195649:69", "195649:72-195649:151", "195649:154-195649:181",
"195649:183-195649:247", "195655:1-195655:129", "195655:131-195655:184", "195655:186-195655:260", "195655:263-195655:350",
"195655:353-195655:446", "195655:448-195655:483", "195655:485-195655:498", "195656:1-195656:362", "195658:1-195658:37",
"195658:40-195658:362", "195658:364-195658:382", "195658:384-195658:386", "195749:1-195749:8", "195749:10-195749:33",
"195749:36-195749:131", "195757:1-195757:82", "195757:85-195757:115", "195757:118-195757:161", "195757:163-195757:206",
"195758:1-195758:18", "195774:1-195774:13", "195774:16-195774:137", "195774:139-195774:151", "195774:154-195774:162",
"195774:164-195774:256", "195774:258-195774:276", "195774:279-195774:362", "195774:365-195774:466", "195774:469-195774:618",
"195774:620-195774:649", "195774:651-195774:830", "195775:1-195775:57", "195775:60-195775:100", "195775:103-195775:170",
"195776:1-195776:63", "195776:66-195776:283", "195776:286-195776:337", "195776:340-195776:399", "195776:401-195776:409",
"195776:411-195776:477", "195841:74-195841:85", "195868:1-195868:88", "195868:90-195868:107", "195868:110-195868:205",
"195915:1-195915:109", "195915:111-195915:275", "195915:278-195915:390", "195915:393-195915:417", "195915:419-195915:429",
"195915:432-195915:505", "195915:507-195915:747", "195915:749-195915:785", "195915:787-195915:828", "195915:830-195915:850",
"195916:1-195916:16", "195916:19-195916:68", "195916:71-195916:212", "195917:1-195917:4", "195918:1-195918:44",
"195918:46", "195918:49-195918:64", "195919:1-195919:15", "195923:1-195923:14", "195925:1-195925:12",
"195926:1", "195926:3-195926:19", "195926:21-195926:34", "195929:1-195929:29", "195930:1-195930:77",
"195930:80-195930:176", "195930:179-195930:526", "195930:529-195930:596", "195937:1-195937:28", "195937:31-195937:186",
"195937:188-195937:396", "195947:23-195947:62", "195947:64-195947:88", "195948:51-195948:116", "195948:119-195948:144",
"195948:147", "195948:150-195948:352", "195948:355-195948:369", "195948:372-195948:402", "195948:404-195948:500",
"195948:503-195948:540", "195948:543-195948:565", "195948:567-195948:602", "195948:605-195948:615", "195950:1-195950:71",
"195950:73-195950:138", "195950:141-195950:169", "195950:172-195950:332", "195950:335-195950:350", "195950:353-195950:382",
"195950:385-195950:421", "195950:424-195950:450", "195950:453-195950:483", "195950:485-195950:616", "195950:619-195950:715",
"195950:718-195950:787", "195950:789-195950:800", "195950:803-195950:829", "195950:831", "195950:833-195950:1587",
"195963:54-195963:58", "195970:44-195970:49", "195970:51-195970:85", "196019:54-196019:68", "196027:1-196027:55",
"196027:58-196027:119", "196027:121-196027:155", "196027:158-196027:186", "196046:12-196046:40", "196047:1-196047:64",
"196047:70-196047:75", "196048:1-196048:44", "196048:46-196048:48", "196197:58-196197:122", "196197:125-196197:179",
"196197:181-196197:311", "196197:313-196197:516", "196197:519-196197:562", "196199:1-196199:33", "196199:36-196199:83",
"196199:86-196199:118", "196199:121-196199:147", "196199:150-196199:237", "196199:239-196199:285", "196199:287-196199:534",
"196200:1-196200:68", "196202:3-196202:61", "196202:64-196202:108", "196203:1-196203:102", "196203:107-196203:117",
"196218:55-196218:199", "196218:201-196218:224", "196218:226-196218:393", "196218:396-196218:494", "196218:496-196218:741",
"196218:744-196218:752", "196218:754-196218:757", "196218:759-196218:820", "196239:1-196239:59", "196239:62-196239:154",
"196239:157-196239:272", "196239:274-196239:373", "196239:375-196239:432", "196239:435-196239:465", "196239:468-196239:647",
"196239:650-196239:706", "196239:709-196239:1025", "196249:63-196249:77", "196249:80-196249:99", "196250:1-196250:2",
"196250:5-196250:265", "196250:267-196250:426", "196252:1-196252:35", "196334:59-196334:111", "196334:113-196334:123",
"196334:126-196334:132", "196334:135-196334:167", "196334:170-196334:193", "196334:196-196334:257", "196334:259-196334:267",
"196334:270-196334:289", "196334:292-196334:342", "196349:65-196349:84", "196349:86-196349:154", "196349:157-196349:244",
"196349:246-196349:258", "196357:1-196357:4", "196359:1-196359:2", "196362:1-196362:88", "196363:1-196363:8",
"196363:11-196363:34", "196364:1-196364:93", "196364:96-196364:136", "196364:139-196364:365", "196364:368-196364:380",
"196364:382-196364:601", "196364:603-196364:795", "196364:798-196364:884", "196364:887-196364:1196", "196364:1199-196364:1200",
"196364:1203-196364:1299", "196437:1", "196437:3-196437:74", "196437:77-196437:169", "196438:1-196438:181",
"196438:184-196438:699", "196438:701-196438:1269", "196452:82-196452:112", "196452:114-196452:490", "196452:493-196452:586",
"196452:589-196452:618", "196452:622-196452:668", "196452:671-196452:716", "196452:718-196452:726", "196452:728-196452:956",
"196452:958-196452:1004", "196452:1007-196452:1091", "196453:1-196453:74", "196453:77-196453:145", "196453:147-196453:669",
"196453:673-196453:714", "196453:717-196453:799", "196453:802-196453:988", "196453:991-196453:1178", "196453:1180",
"196453:1182-196453:1248", "196453:1250-196453:1528", "196453:1531-196453:1647", "196495:114-196495:180", "196495:182-196495:272",
"196509:1-196509:68", "196531:62-196531:150", "196531:152-196531:253", "196531:256-196531:285", "196531:288-196531:302",
"196531:305-196531:422", "196531:425-196531:440", "198049:1-198049:11", "198049:14-198049:57", "198050:2-198050:155",
"198063:1-198063:37", "198063:40-198063:72", "198063:74-198063:124", "198063:127-198063:294", "198116:36-198116:52",
"198116:54-198116:55", "198116:58-198116:96", "198116:98-198116:112", "198207:1-198207:97", "198208:1-198208:92",
"198208:94-198208:134", "198208:137-198208:147", "198208:150-198208:209", "198210:1-198210:221", "198212:1-198212:574",
"198213:1-198213:107", "198215:1-198215:12", "198230:1-198230:33", "198230:36-198230:57", "198230:60-198230:235",
"198230:237-198230:324", "198230:326-198230:388", "198230:390-198230:459", "198230:462-198230:625", "198230:627-198230:651",
"198230:653-198230:805", "198230:808-198230:811", "198230:814-198230:948", "198230:950-198230:1090", "198230:1093-198230:1103",
"198230:1106-198230:1332", "198230:1335-198230:1380", "198249:1-198249:7", "198269:3-198269:198", "198271:1-198271:91",
"198271:93-198271:170", "198271:173-198271:299", "198271:301-198271:450", "198271:453-198271:513", "198271:516-198271:616",
"198271:619-198271:628", "198271:631-198271:791", "198271:793-198271:797", "198272:1-198272:185", "198272:188-198272:245",
"198272:248-198272:314", "198272:317-198272:433", "198272:436-198272:444", "198272:454-198272:620", "198346:44-198346:47",
"198372:57-198372:110", "198485:68-198485:109", "198485:112-198485:134", "198485:136-198485:181", "198485:184-198485:239",
"198487:1-198487:145", "198487:147-198487:514", "198487:517-198487:668", "198487:671-198487:733", "198487:736-198487:757",
"198487:760-198487:852", "198487:854-198487:994", "198487:997-198487:1434", "198487:1437-198487:1610", "198522:65-198522:144",
"198522:147-198522:208", "198941:102-198941:189", "198941:191-198941:220", "198941:222-198941:241", "198941:243-198941:249",
"198941:252-198941:284", "198954:108-198954:156", "198954:159-198954:277", "198955:1-198955:45", "198955:47-198955:50",
"198955:53-198955:220", "198955:223-198955:269", "198955:271-198955:284", "198955:286-198955:338", "198955:340-198955:580",
"198955:583-198955:742", "198955:744-198955:910", "198955:913-198955:946", "198955:949-198955:1162", "198955:1165-198955:1169",
"198955:1172-198955:1182", "198955:1185-198955:1188", "198955:1190-198955:1246", "198955:1249-198955:1304", "198955:1306-198955:1467",
"198955:1470-198955:1485", "198955:1487-198955:1552", "198969:58-198969:81", "198969:84-198969:247", "198969:249-198969:323",
"198969:325-198969:365", "198969:367-198969:413", "198969:416-198969:466", "198969:468-198969:643", "198969:646-198969:918",
"198969:920-198969:1011", "198969:1013-198969:1175", "198969:1178-198969:1236", "198969:1239-198969:1253", "199008:75-199008:93",
"199008:95-199008:121", "199008:124-199008:208", "199008:211-199008:331", "199008:333-199008:373", "199008:376-199008:482",
"199008:485-199008:605", "199008:608-199008:644", "199011:1-199011:11", "199011:13-199011:24", "199021:59-199021:88",
"199021:91-199021:128", "199021:130-199021:133", "199021:136-199021:309", "199021:311-199021:333", "199021:335-199021:410",
"199021:414-199021:469", "199021:471-199021:533", "199021:535-199021:563", "199021:565-199021:1223", "199021:1226-199021:1479",
"199021:1481-199021:1494", "199318:65-199318:138", "199319:1-199319:7", "199319:9-199319:223", "199319:226-199319:277",
"199319:280-199319:348", "199319:351-199319:358", "199319:360-199319:422", "199319:424-199319:490", "199319:492-199319:493",
"199319:496-199319:612", "199319:615-199319:642", "199319:645-199319:720", "199319:723-199319:728", "199319:730-199319:731",
"199319:734-199319:741", "199319:744-199319:752", "199319:754-199319:943", "199319:945-199319:997", "199336:1-199336:33",
"199336:36-199336:122", "199336:125-199336:231", "199336:234-199336:614", "199336:617-199336:789", "199336:791-199336:977",
"199356:95-199356:121", "199356:123-199356:168", "199356:171-199356:205", "199356:208-199356:231", "199409:25-199409:54",
"199409:56-199409:89", "199409:91-199409:204", "199409:206-199409:290", "199409:293-199409:583", "199409:586-199409:602",
"199409:604-199409:1014", "199409:1016-199409:1300", "199428:61-199428:197", "199428:200-199428:210", "199428:212-199428:382",
"199428:387-199428:414", "199428:417-199428:436", "199428:439-199428:530", "199428:533-199428:648", "199429:1-199429:28",
"199429:30-199429:36", "199429:39-199429:55", "199429:58-199429:101", "199429:103-199429:148", "199429:151-199429:154",
"199435:63-199435:106", "199435:109-199435:261", "199435:263-199435:579", "199435:582-199435:654", "199435:656-199435:696",
"199435:699-199435:1034", "199435:1037-199435:1144", "199435:1147-199435:1327", "199435:1330-199435:1411", "199435:1414-199435:1431",
"199435:1434-199435:1441", "199435:1444-199435:1487", "199435:1489-199435:1610", "199436:1-199436:113", "199436:116-199436:254",
"199436:257-199436:675", "199436:678-199436:748", "199564:1-199564:3", "199569:1-199569:2", "199569:5-199569:136",
"199569:139-199569:367", "199570:1-199570:17", "199571:1-199571:184", "199571:186-199571:360", "199571:363-199571:561",
"199572:1-199572:317", "199573:1-199573:22", "199574:1-199574:53", "199574:56-199574:153", "199574:156-199574:246",
"199608:60-199608:157", "199608:159-199608:209", "199608:211-199608:341", "199608:344-199608:390", "199608:392-199608:461",
"199608:464-199608:800", "199608:802-199608:1064", "199608:1067-199608:1392", "199608:1395-199608:1630", "199608:1633-199608:1904",
"199608:1907-199608:1962", "199608:1965-199608:2252", "199608:2255-199608:2422", "199698:72-199698:94", "199698:96-199698:127",
"199699:1-199699:154", "199699:157-199699:169", "199699:172-199699:410", "199699:412-199699:756", "199703:1-199703:94",
"199703:97-199703:482", "199703:485-199703:529", "199739:66-199739:133", "199751:103-199751:119", "199751:121-199751:127",
"199752:1-199752:141", "199752:144-199752:180", "199752:182-199752:186", "199752:188-199752:211", "199752:214-199752:322",
"199753:1-199753:59", "199754:1-199754:203", "199754:205-199754:325", "199754:328-199754:457", "199754:459-199754:607",
"199754:610-199754:613", "199754:615-199754:806", "199754:808-199754:998", "199804:78-199804:88", "199804:90-199804:181",
"199804:183-199804:235", "199804:238-199804:278", "199804:281-199804:290", "199804:292-199804:519", "199804:522-199804:575",
"199804:577-199804:628", "199804:631-199804:632", "199812:70-199812:141", "199812:144-199812:163", "199812:182-199812:211",
"199812:214-199812:471", "199812:474-199812:505", "199812:508-199812:557", "199812:560-199812:571", "199812:574-199812:623",
"199812:626-199812:751", "199812:754-199812:796", "199832:58-199832:62", "199832:65-199832:118", "199832:121-199832:139",
"199832:142-199832:286", "199833:1-199833:13", "199833:16-199833:103", "199833:105-199833:250", "199833:253-199833:493",
"199833:496-199833:794", "199833:797-199833:1032", "199833:1034-199833:1185", "199833:1188-199833:1239", "199834:1-199834:9",
"199834:11", "199834:14-199834:18", "199834:21-199834:54", "199834:56-199834:57", "199834:62-199834:65",
"199834:69-199834:284", "199834:286-199834:503", "199834:505-199834:942", "199862:59-199862:141", "199864:1-199864:87",
"199864:89", "199864:92-199864:103", "199864:106-199864:372", "199864:374-199864:385", "199864:388-199864:486",
"199867:1-199867:134", "199867:136-199867:172", "199867:174-199867:218", "199867:221-199867:320", "199868:1-199868:21",
"199875:70-199875:150", "199875:152-199875:334", "199876:1-199876:19", "199876:22-199876:95", "199876:97-199876:249",
"199876:252-199876:272", "199876:274-199876:340", "199876:343-199876:362", "199876:365-199876:376", "199877:1-199877:173",
"199877:175-199877:605", "199877:607-199877:701", "199877:703-199877:871", "199960:72-199960:139", "199960:141-199960:197",
"199960:204-199960:232", "199960:235-199960:363", "199960:365-199960:367", "199960:370-199960:380", "199960:383-199960:459",
"199960:461-199960:466", "199960:469-199960:485", "199961:1-199961:211", "199961:213-199961:287", "199967:60-199967:120",
"199967:122-199967:170", "199967:172-199967:198", "199973:73-199973:89", "200041:62-200041:83", "200041:85-200041:157",
"200041:162-200041:274", "200041:277-200041:318", "200041:321-200041:335", "200041:337-200041:386", "200041:388-200041:389",
"200041:392-200041:400", "200041:402-200041:568", "200041:571-200041:593", "200041:595-200041:646", "200041:649-200041:728",
"200041:731-200041:860", "200041:862-200041:930", "200041:932-200041:1096", "200042:1-200042:110", "200042:112-200042:536",
"200049:1-200049:177", "200075:76-200075:139", "200075:142-200075:232", "200075:256-200075:326", "200075:329-200075:422",
"200075:425-200075:431", "200075:434-200075:500", "200075:502-200075:605", "200091:67", "200091:70-200091:151",
"200091:154-200091:172", "200091:174-200091:187", "200091:190-200091:196", "200091:199-200091:201", "200091:204-200091:425",
"200091:428-200091:535", "200091:537-200091:607", "200091:610-200091:879", "200091:881-200091:943", "200091:946-200091:999",
"200091:1001-200091:1025", "200091:1027-200091:1132", "200091:1135-200091:1339", "200091:1341-200091:1433", "200091:1435-200091:1450",
"200091:1453-200091:1523", "200091:1526-200091:1664", "200091:1667-200091:1680", "200091:1683-200091:1710", "200152:74-200152:116",
"200160:52-200160:68", "200161:1-200161:97", "200161:100-200161:112", "200174:81-200174:84", "200177:1-200177:56",
"200178:1-200178:38", "200180:1-200180:18", "200186:1-200186:3", "200186:6-200186:24", "200188:1-200188:24",
"200188:27-200188:28", "200188:31-200188:76", "200188:79-200188:271", "200188:274-200188:352", "200190:1-200190:4",
"200190:6-200190:76", "200190:79-200190:143", "200190:146-200190:159", "200190:162-200190:256", "200190:258-200190:321",
"200190:324-200190:401", "200190:403-200190:453", "200190:456-200190:457", "200190:460-200190:565", "200190:567-200190:588",
"200190:591", "200190:593-200190:595", "200190:597-200190:646", "200190:649-200190:878", "200229:1-200229:33",
"200229:41-200229:219", "200229:222-200229:244", "200229:247-200229:290", "200229:293-200229:624", "200229:627-200229:629",
"200243:69-200243:103", "200243:106-200243:139", "200244:3-200244:304", "200244:307-200244:442", "200244:445-200244:507",
"200244:510-200244:619", "200245:1-200245:103", "200245:105-200245:128", "200245:131-200245:248", "200245:251-200245:357",
"200368:72-200368:180", "200369:1-200369:5", "200369:8-200369:61", "200369:64-200369:360", "200369:363-200369:439",
"200369:441-200369:578", "200369:580-200369:603", "200369:606-200369:684", "200369:686", "200381:8-200381:15",
"200381:18-200381:36", "200381:38-200381:89", "200381:91-200381:195", "200466:134-200466:274", "200473:96-200473:157",
"200473:159-200473:224", "200473:226-200473:304", "200473:306-200473:469", "200473:472-200473:524", "200473:527-200473:542",
"200473:545-200473:619", "200473:622-200473:688", "200473:691-200473:730", "200473:733-200473:738", "200473:740-200473:1324",
"200491:87-200491:107", "200491:110-200491:149", "200491:152-200491:157", "200491:160-200491:197", "200491:199-200491:237",
"200491:240-200491:270", "200491:273", "200491:276-200491:334", "200491:336-200491:360", "200491:363-200491:419",
"200515:97-200515:183", "200519:1-200519:111", "200519:114-200519:126", "200519:129-200519:136", "200519:138-200519:224",
"200519:227-200519:258", "200519:261-200519:350", "200519:353-200519:611", "200519:613-200519:747", "200525:77-200525:149",
"200525:151-200525:164", "200525:166-200525:190", "200525:193-200525:276", "200525:278-200525:311", "200525:314-200525:464",
"200525:467-200525:488", "200525:491-200525:674", "200525:676-200525:704", "200525:707-200525:755", "200525:757-200525:895",
"200525:898-200525:937", "200525:939-200525:990", "200532:1-200532:37", "200599:75-200599:129", "200599:132-200599:137",
"200600:1-200600:183", "200600:186-200600:299", "200600:302-200600:313", "200600:316-200600:324", "200600:327-200600:334",
"200600:336-200600:397", "200600:399-200600:417", "200600:420-200600:526", "200600:529-200600:591", "200600:594-200600:596",
"200600:598-200600:609", "200600:611-200600:660", "200600:663-200600:823", "200600:826-200600:900", "200600:902-200600:943",
"200600:945-200600:1139", "200961:1-200961:115", "200976:94-200976:164", "200990:75-200990:143", "200991:1-200991:42",
"200991:44", "200991:47-200991:80", "200991:83-200991:175", "200991:178-200991:181", "200991:184-200991:252",
"200991:255-200991:632", "200991:635-200991:916", "200991:918-200991:1017", "200991:1019-200991:1048", "200992:1-200992:405",
"200992:408-200992:434", "200992:436-200992:581", "201062:78-201062:268", "201097:83-201097:136", "201097:138-201097:245",
"201097:248-201097:300", "201097:303-201097:370", "201097:372-201097:429", "201097:432-201097:497", "201114:1-201114:14",
"201115:1-201115:73", "201159:70-201159:211", "201164:1-201164:8", "201164:10-201164:94", "201164:96-201164:125",
"201164:128-201164:178", "201164:180-201164:198", "201164:200-201164:271", "201164:274-201164:416", "201164:418",
"201168:1-201168:37", "201168:39-201168:275", "201168:278-201168:481", "201168:483-201168:558", "201168:560-201168:730",
"201173:1-201173:194", "201173:197-201173:586", "201174:1-201174:214", "201174:216-201174:263", "201174:265-201174:339",
"201174:342-201174:451", "201191:75-201191:98", "201191:100-201191:216", "201191:218-201191:389", "201191:392-201191:492",
"201191:494-201191:506", "201191:509-201191:585", "201191:587-201191:594", "201191:597-201191:607", "201191:609-201191:794",
"201191:796-201191:838", "201191:841-201191:974", "201191:977-201191:1105", "201191:1108-201191:1117", "201191:1120-201191:1382",
"201191:1385-201191:1386", "201193:1-201193:19", "201196:1-201196:238", "201196:241-201196:278", "201196:286-201196:299",
"201196:302-201196:338", "201196:341-201196:515", "201196:518-201196:720", "201196:723-201196:789", "201196:803-201196:841",
"201197:1-201197:23", "201202:1-201202:437", "201229:1-201229:5", "201229:8-201229:26", "201229:29-201229:73",
"201278:62-201278:163", "201278:166-201278:229", "201278:232-201278:256", "201278:259-201278:316", "201278:318-201278:595",
"201278:598-201278:938", "201278:942-201278:974", "201278:976-201278:1160", "201278:1163-201278:1304", "201278:1306-201278:1793",
"201278:1796-201278:1802", "201278:1805-201278:1906", "201278:1909-201278:1929", "201278:1932-201278:2174", "201554:70-201554:86",
"201554:88-201554:114", "201554:116-201554:126", "201602:76-201602:81", "201602:83-201602:194", "201602:196-201602:494",
"201602:496-201602:614", "201602:617-201602:635", "201611:87-201611:145", "201611:149-201611:182", "201611:184-201611:186",
"201613:1-201613:42", "201613:44-201613:49", "201613:53-201613:210", "201613:213-201613:215", "201613:218-201613:225",
"201613:228-201613:646", "201624:83-201624:92", "201624:95-201624:240", "201624:270", "201625:211-201625:312",
"201625:315-201625:348", "201625:351-201625:416", "201625:418-201625:588", "201625:591-201625:671", "201625:673-201625:758",
"201625:760-201625:791", "201625:793-201625:944", "201657:77-201657:93", "201657:95-201657:108", "201657:110-201657:118",
"201658:1-201658:19", "201658:21-201658:118", "201658:121-201658:136", "201658:139-201658:288", "201668:78-201668:157",
"201669:1-201669:9", "201669:12-201669:136", "201669:139-201669:141", "201669:143-201669:165", "201671:1-201671:120",
"201671:122-201671:174", "201671:177-201671:462", "201671:464-201671:482", "201671:485-201671:499", "201671:501-201671:545",
"201671:547-201671:571", "201671:574-201671:614", "201671:617-201671:766", "201671:768-201671:896", "201671:899-201671:911",
"201671:914-201671:1007", "201678:1-201678:120", "201679:1-201679:110", "201679:112-201679:241", "201679:244-201679:298",
"201679:302-201679:321", "201679:324-201679:461", "201679:463-201679:483", "201692:78-201692:81", "201692:83-201692:179",
"201705:65-201705:73", "201705:75-201705:109", "201705:111-201705:187", "201706:1-201706:62", "201707:1-201707:23",
"201707:26-201707:42", "201707:45-201707:115", "201707:118-201707:130", "201707:133-201707:160", "201707:163-201707:276",
"201707:279-201707:471", "201707:473-201707:511", "201707:514-201707:545", "201707:547-201707:570", "201707:572-201707:622",
"201707:625-201707:735", "201707:738-201707:806", "201707:809-201707:876", "201707:879-201707:964", "201708:1-201708:79",
"201718:58-201718:108", "201727:67-201727:185", "201729:6-201729:20", "201729:22-201729:75", "201729:77-201729:126",
"201729:129-201729:154", "201729:156-201729:216", "201729:219-201729:244", "201794:58-201794:94", "201802:68-201802:209",
"201802:211-201802:214", "201802:216-201802:220", "201802:223-201802:288", "201802:290-201802:296", "201816:1-201816:72",
"201816:74-201816:105", "201816:107-201816:157", "201817:1-201817:274", "201818:1", "201819:1-201819:94",
"201819:96-201819:241", "201824:1-201824:139", "201824:141-201824:176", "201824:179-201824:286", "201824:289-201824:492",
"202012:98-202012:121", "202012:126-202012:131", "202013:1-202013:2", "202013:5-202013:35", "202013:38-202013:57",
"202014:1-202014:5", "202014:8-202014:14", "202014:16-202014:18", "202014:20-202014:77", "202014:79-202014:102",
"202014:104-202014:174", "202014:177-202014:190", "202014:192-202014:196", "202016:1-202016:48", "202016:51-202016:134",
"202016:137-202016:177", "202016:179-202016:743", "202016:745-202016:831", "202016:834-202016:890", "202016:893-202016:896",
"202016:898-202016:932", "202016:934-202016:1010", "202044:84-202044:101", "202044:104-202044:266", "202044:268-202044:461",
"202044:463-202044:466", "202045:1-202045:30", "202045:33-202045:72", "202045:75-202045:528", "202045:531-202045:601",
"202045:603-202045:785", "202045:788-202045:809", "202045:822-202045:823", "202054:6-202054:266", "202054:268-202054:489",
"202054:492-202054:605", "202054:608-202054:631", "202060:76-202060:142", "202060:144-202060:154", "202060:156-202060:244",
"202060:246-202060:497", "202060:499-202060:642", "202060:644-202060:682", "202060:684-202060:743", "202060:746-202060:936",
"202074:66-202074:174", "202075:1-202075:18", "202075:21-202075:187", "202075:189-202075:214", "202075:217-202075:247",
"202075:250-202075:342", "202075:345-202075:406", "202075:409-202075:497", "202075:500-202075:537", "202075:539",
"202075:542-202075:560", "202075:562-202075:615", "202075:618-202075:628", "202084:83-202084:156", "202084:159-202084:177",
"202084:179-202084:180", "202084:182-202084:239", "202087:1-202087:25", "202087:28-202087:208", "202087:210-202087:357",
"202087:359-202087:652", "202087:655-202087:853", "202087:856-202087:1093", "202088:1-202088:286", "202093:1-202093:104",
"202093:107-202093:320", "202093:322-202093:360", "202116:59-202116:60", "202178:67-202178:78", "202178:80-202178:88",
"202178:91-202178:177", "202178:180-202178:186", "202178:188-202178:337", "202178:340-202178:377", "202178:379-202178:425",
"202178:428-202178:475", "202178:478-202178:548", "202178:551-202178:717", "202178:720-202178:965", "202178:967-202178:1444",
"202178:1447-202178:1505", "202178:1508-202178:1519", "202178:1522-202178:1555", "202205:94-202205:114", "202209:1-202209:48",
"202209:51-202209:142", "202237:39-202237:128", "202237:131", "202237:134-202237:219", "202237:222-202237:235",
"202237:238-202237:275", "202237:277-202237:289", "202237:291-202237:316", "202237:319-202237:419", "202237:422-202237:538",
"202237:540-202237:936", "202237:939-202237:950", "202237:952-202237:976", "202237:979-202237:1079", "202272:76-202272:112",
"202272:115-202272:141", "202272:144-202272:185", "202272:188-202272:205", "202272:208-202272:305", "202272:307-202272:313",
"202272:315-202272:371", "202272:436-202272:480", "202272:483-202272:555", "202272:558-202272:577", "202272:579-202272:683",
"202272:686-202272:705", "202272:707-202272:740", "202272:742-202272:890", "202272:937-202272:1295", "202272:1299-202272:1481",
"202299:68-202299:84", "202299:87-202299:141", "202299:143-202299:193", "202299:196-202299:358", "202299:361-202299:379",
"202299:382-202299:414", "202299:416-202299:452", "202299:455-202299:555", "202305:1-202305:89", "202305:92-202305:130",
"202305:133-202305:323", "202314:67-202314:104", "202314:107-202314:265", "202314:268-202314:278", "202328:46-202328:89",
"202328:92-202328:156", "202328:158-202328:276", "202328:278-202328:291", "202328:294-202328:434", "202328:437-202328:460",
"202328:463-202328:586", "202328:588-202328:610", "202328:612-202328:614", "202333:1-202333:235", "202389:81-202389:182",
"202389:185-202389:190", "202389:192-202389:199", "202469:87-202469:158", "202469:160-202469:174", "202469:177-202469:352",
"202472:1-202472:96", "202472:99-202472:112", "202477:1-202477:129", "202477:131-202477:150", "202478:1-202478:177",
"202478:180-202478:183", "202478:186-202478:219", "202478:222-202478:360", "202478:362-202478:506", "202478:509-202478:531",
"202478:534-202478:718", "202478:720-202478:927", "202478:929-202478:973", "202478:975-202478:1029", "202478:1031-202478:1186",
"202478:1189-202478:1212", "202478:1215-202478:1248", "202504:77-202504:96", "202504:99-202504:133", "202504:135-202504:182",
"202504:184-202504:211", "202504:213-202504:241", "202504:243-202504:392", "202504:395-202504:527", "202504:529-202504:617",
"202504:620-202504:715", "202504:718-202504:763", "202504:766-202504:1172", "202504:1174-202504:1247", "202504:1250-202504:1471",
"202504:1474-202504:1679", "202504:1682-202504:1704", "202972:1-202972:30", "202972:33-202972:184", "202972:186-202972:290",
"202972:292-202972:295", "202972:298-202972:371", "202972:374-202972:429", "202972:431-202972:544", "202973:1-202973:234",
"202973:237-202973:305", "202973:308-202973:437", "202973:439-202973:530", "202973:532-202973:541", "202973:544-202973:552",
"202973:555-202973:851", "202973:853-202973:1408", "203002:77-203002:128", "203002:130-203002:141", "203002:144-203002:207",
"203002:209-203002:267", "203002:270-203002:360", "203002:362-203002:501", "203002:504-203002:641", "203002:643-203002:669",
"203002:671", "203002:674-203002:717", "203002:720-203002:1034", "203002:1037-203002:1070", "203002:1073-203002:1370",
"203002:1372-203002:1392", "203002:1395-203002:1410", "203002:1413-203002:1596", "203709:1-203709:121", "203742:1-203742:29",
"203777:103-203777:113", "203830:82-203830:182", "203832:1-203832:11", "203833:1-203833:70", "203833:73-203833:128",
"203834:1-203834:40", "203835:1-203835:70", "203835:73-203835:358", "203853:122-203853:222", "203894:82-203894:272",
"203894:275-203894:477", "203894:480-203894:902", "203894:905-203894:1319", "203909:79-203909:113", "203909:116-203909:117",
"203909:120-203909:140", "203909:143-203909:382", "203912:1-203912:306", "203912:308-203912:566", "203912:569-203912:609",
"203912:611-203912:698", "203912:701-203912:820", "203912:823-203912:865", "203912:867-203912:1033", "203912:1035-203912:1321",
"203987:1-203987:9", "203987:12-203987:241", "203987:243-203987:339", "203987:342-203987:781", "203987:784-203987:1014",
"203992:1-203992:15", "203994:1-203994:56", "203994:59-203994:136", "203994:139-203994:304", "203994:306-203994:342",
"203994:344-203994:425", "204100:117-204100:139", "204101:1-204101:74", "204113:82-204113:96", "204113:98-204113:102",
"204113:105-204113:127", "204113:129-204113:191", "204113:194-204113:258", "204113:261-204113:327", "204113:329-204113:388",
"204113:390-204113:400", "204113:402-204113:583", "204113:585-204113:690", "204114:1-204114:358", "204238:23-204238:52",
"204238:55", "204250:92-204250:118", "204250:121-204250:177", "204250:179-204250:285", "204250:287-204250:336",
"204250:339-204250:400", "204250:403-204250:521", "204250:524-204250:543", "204250:546-204250:682", "204250:684-204250:801",
"204511:1-204511:56", "204541:5-204541:39", "204541:42", "204541:44-204541:139", "204541:142-204541:149",
"204541:151-204541:204", "204544:1-204544:11", "204544:13-204544:93", "204544:96-204544:195", "204544:197-204544:224",
"204544:226-204544:334", "204544:337-204544:426", "204552:1-204552:9", "204553:1-204553:51", "204553:53-204553:60",
"204553:63-204553:101", "204554:1-204554:5", "204554:7-204554:221", "204554:224-204554:455", "204554:458-204554:470",
"204554:472-204554:481", "204554:483-204554:514", "204555:1-204555:329", "204555:331-204555:334", "204563:91-204563:99",
"204563:102-204563:178", "204563:180-204563:219", "204563:222-204563:229", "204563:231-204563:364", "204563:366",
"204563:369-204563:470", "204563:473-204563:524", "204563:527-204563:571", "204564:1-204564:84", "204564:87-204564:89",
"204564:92-204564:159", "204564:161-204564:187", "204564:190-204564:191", "204564:193-204564:293", "204564:296-204564:315",
"204564:317-204564:340", "204564:343-204564:427", "204564:429-204564:434", "204564:437-204564:735", "204564:737-204564:855",
"204564:858-204564:1206", "204564:1209-204564:1248", "204564:1251-204564:1284", "204565:1-204565:48", "204566:1-204566:12",
"204567:1-204567:38", "204576:49-204576:192", "204576:195-204576:301", "204577:1-204577:46", "204577:49-204577:64",
"204577:67-204577:105", "204577:107-204577:170", "204577:173-204577:181", "204577:183-204577:193", "204577:196-204577:653",
"204577:656-204577:669", "204577:671-204577:740", "204577:742-204577:913", "204577:915-204577:1057", "204577:1059-204577:1115",
"204577:1117-204577:1282", "204599:73-204599:83", "204599:85-204599:94", "204599:97-204599:121", "204599:124-204599:125",
"204599:128-204599:173", "204599:175-204599:240", "204599:243-204599:245", "204599:248-204599:264", "204599:266-204599:292",
"204599:294-204599:334", "204601:1-204601:25", "204601:28-204601:62", "204601:65-204601:80", "204601:83-204601:89",
"204601:92-204601:290", "204601:292-204601:563", "204601:565-204601:591", "204601:593-204601:652", "204601:655-204601:780",
"204601:783-204601:812", "204601:814-204601:892", "204601:894-204601:984", "204601:986-204601:1003", "204601:1006-204601:1038",
"204601:1040-204601:1088", "204601:1091-204601:1102", "204601:1105-204601:1161", "204601:1164-204601:1250", "205086:95-205086:149",
"205111:88-205111:390", "205111:392-205111:441", "205111:444-205111:446", "205158:81-205158:289", "205158:292-205158:313",
"205158:315-205158:473", "205158:476-205158:591", "205158:594-205158:595", "205158:597-205158:612", "205158:615-205158:663",
"205158:665-205158:667", "205158:672-205158:685", "205158:687-205158:733", "205193:80-205193:109", "205193:111-205193:349",
"205193:352-205193:486", "205193:488-205193:650", "205193:652-205193:712", "205193:714-205193:902", "205217:1-205217:12",
"205217:16-205217:111", "205217:113-205217:171", "205217:174-205217:250", "205217:253-205217:318", "205233:94-205233:153",
"205236:1-205236:190", "205236:193-205236:207", "205236:209-205236:260", "205236:263-205236:331", "205236:334-205236:352",
"205238:1-205238:6", "205238:9-205238:199", "205238:202-205238:254", "205238:256-205238:304", "205238:306-205238:355",
"205238:358-205238:381", "205238:384-205238:596", "205238:598-205238:617", "205303:35-205303:54", "205303:90-205303:132",
"205303:135-205303:144", "205310:76-205310:306", "205310:309-205310:313", "205310:316", "205310:319-205310:321",
"205310:324-205310:457", "205310:460-205310:559", "205311:1-205311:85", "205311:88-205311:92", "205311:95-205311:183",
"205311:186-205311:395", "205311:397-205311:592", "205311:595-205311:910", "205311:913-205311:1260", "205339:71-205339:175",
"205339:178-205339:213", "205339:216-205339:230", "205339:233-205339:262", "205339:265-205339:404", "205344:1-205344:83",
"205344:86-205344:104", "205344:106-205344:359", "205344:362-205344:431", "205344:433-205344:949", "205344:951-205344:967",
"205344:969-205344:1127", "205344:1129-205344:1346", "205344:1348-205344:1586", "205515:82-205515:201", "205515:203-205515:216",
"205519:1-205519:47", "205519:50-205519:172", "205519:175-205519:367", "205519:370-205519:386", "205519:389-205519:472",
"205526:1-205526:269", "205526:272-205526:277", "205526:280-205526:332", "205614:1-205614:4", "205614:7-205614:40",
"205617:1-205617:29", "205617:32-205617:102", "205617:105-205617:123", "205617:125-205617:140", "205617:143-205617:264",
"205617:266-205617:448", "205617:451-205617:532", "205617:534-205617:547", "205618:1-205618:12", "205620:1-205620:175",
"205666:60-205666:119", "205666:122-205666:165", "205666:168-205666:259", "205666:261-205666:322", "205666:325-205666:578",
"205666:580-205666:594", "205666:597-205666:721", "205666:724-205666:739", "205667:1-205667:165", "205667:168-205667:282",
"205667:285-205667:318", "205667:321-205667:412", "205667:415-205667:689", "205667:692-205667:751", "205667:754-205667:774",
"205667:777-205667:1109", "205683:76-205683:82", "205683:85-205683:178", "205683:181-205683:198", "205683:201-205683:305",
"205690:1-205690:40", "205694:1-205694:205", "205694:208-205694:230", "205694:233-205694:347", "205694:350-205694:452",
"205694:455-205694:593", "205694:595-205694:890", "205718:49-205718:75", "205718:78-205718:97", "205718:100-205718:103",
"205718:105-205718:176", "205718:178-205718:338", "205718:341-205718:361", "205718:363-205718:524", "205718:527-205718:531",
"205718:534-205718:589", "205718:591-205718:694", "205774:1-205774:80", "205777:1-205777:8", "205781:1-205781:89",
"205781:91-205781:197", "205781:200-205781:502", "205826:80-205826:232", "205826:235-205826:303", "205826:306-205826:468",
"205833:84-205833:86", "205833:89-205833:121", "205833:123-205833:155", "205833:157-205833:165", "205833:167-205833:173",
"205833:176-205833:219", "205833:221-205833:267", "205833:270-205833:312", "205833:315-205833:346", "205833:350-205833:355",
"205833:360-205833:366", "205834:1-205834:12", "205834:14-205834:195", "205908:68-205908:200", "205908:202-205908:209",
"205921:22-205921:73", "205921:76-205921:268", "205921:271-205921:394", "205921:397-205921:401", "205921:410-205921:428",
"205921:431-205921:498", "205921:500-205921:571", "205921:574-205921:779", "205921:782-205921:853", "206066:89-206066:146",
"206088:86-206088:159", "206088:161-206088:178", "206088:181-206088:199", "206088:202-206088:286", "206102:83-206102:116",
"206102:120-206102:130", "206102:133-206102:208", "206102:211-206102:235", "206102:238-206102:246", "206102:249-206102:278",
"206102:281-206102:349", "206187:107-206187:169", "206187:172-206187:242", "206187:245-206187:288", "206187:290-206187:340",
"206187:343-206187:427", "206187:429-206187:435", "206187:437-206187:486", "206187:489-206187:569", "206187:571-206187:647",
"206187:649-206187:662", "206187:664-206187:708", "206188:1-206188:40", "206188:42-206188:55", "206199:1-206199:75",
"206199:77-206199:82", "206199:85-206199:114", "206207:82-206207:130", "206207:132-206207:176", "206207:179-206207:194",
"206207:196-206207:388", "206207:390-206207:419", "206207:422-206207:447", "206207:450-206207:569", "206207:572-206207:690",
"206208:1-206208:470", "206208:472-206208:518", "206210:11-206210:25", "206210:28-206210:275", "206210:277-206210:298",
"206210:300-206210:383", "206210:386-206210:466", "206243:62-206243:169", "206243:172-206243:196", "206243:199-206243:354",
"206243:357-206243:433", "206243:435-206243:448", "206243:451-206243:533", "206243:536-206243:554", "206243:557-206243:723",
"206243:726-206243:905", "206245:1-206245:62", "206246:1-206246:14", "206246:16-206246:237", "206246:240-206246:285",
"206246:288-206246:407", "206246:412-206246:676", "206246:678-206246:704", "206246:706-206246:785", "206246:787-206246:962",
"206246:965-206246:997", "206246:1000-206246:1198", "206246:1201-206246:1290", "206257:1-206257:29", "206258:1-206258:36",
"206258:39-206258:223", "206258:226-206258:249", "206302:1-206302:8", "206302:11-206302:33", "206302:36-206302:44",
"206302:47-206302:82", "206302:84-206302:108", "206302:110-206302:149", "206302:151-206302:186", "206302:189-206302:229",
"206302:231-206302:232", "206302:234-206302:241", "206302:243-206302:276", "206303:1-206303:19", "206303:23-206303:286",
"206304:1-206304:4", "206304:6-206304:62", "206331:91-206331:222", "206331:225-206331:312", "206389:88-206389:185",
"206389:187-206389:249", "206389:252-206389:272", "206389:275-206389:392", "206391:1-206391:55", "206391:57-206391:91",
"206401:69-206401:90", "206401:92-206401:194", "206401:197-206401:210", "206401:212-206401:249", "206401:251-206401:265",
"206401:267-206401:409", "206446:92-206446:141", "206446:143-206446:159", "206446:162-206446:205", "206446:208-206446:301",
"206446:304-206446:442", "206446:445", "206446:448-206446:474", "206446:476-206446:616", "206446:619-206446:872",
"206446:874-206446:910", "206446:912-206446:948", "206446:950-206446:989", "206446:992-206446:1030", "206446:1033-206446:1075",
"206446:1109-206446:1149", "206448:1-206448:143", "206448:145-206448:559", "206448:561-206448:1170", "206448:1173-206448:1231",
"206448:1235-206448:1237", "206466:24-206466:137", "206466:140-206466:277", "206466:280-206466:296", "206466:299-206466:303",
"206466:306-206466:405", "206466:407-206466:419", "206466:422-206466:477", "206466:480-206466:511", "206466:514-206466:676",
"206476:73-206476:129", "206476:133-206476:137", "206476:140-206476:141", "206476:143-206476:219", "206477:1-206477:14",
"206477:16-206477:31", "206477:33-206477:41", "206477:44-206477:51", "206477:53-206477:70", "206477:73-206477:75",
"206477:77-206477:89", "206477:91-206477:94", "206477:97-206477:115", "206477:118-206477:184", "206478:1-206478:27",
"206478:29-206478:136", "206478:139-206478:144", "206484:73-206484:95", "206484:98-206484:133", "206484:136-206484:163",
"206484:166-206484:186", "206484:189-206484:384", "206484:387-206484:463", "206484:465-206484:551", "206484:554",
"206484:556-206484:669", "206512:91-206512:123", "206512:125-206512:133", "206512:136-206512:161", "206512:163-206512:190",
"206512:193-206512:201", "206512:203-206512:212", "206512:214-206512:332", "206512:334-206512:584", "206512:587-206512:604",
"206512:607-206512:1005", "206512:1008-206512:1123", "206512:1126-206512:1163", "206512:1165-206512:1211", "206513:3-206513:39",
"206513:42-206513:188", "206513:191-206513:234", "206513:237-206513:238", "206513:241-206513:323", "206542:1-206542:115",
"206542:117-206542:165", "206542:168-206542:511", "206542:514-206542:547", "206542:550-206542:603", "206542:606-206542:668",
"206542:671-206542:727", "206542:730-206542:739", "206542:741-206542:833", "206550:77-206550:132", "206550:135-206550:144",
"206572:37-206572:47", "206573:2-206573:14", "206574:1-206574:87", "206575:1-206575:7", "206575:10",
"206575:12-206575:69", "206594:72-206594:107", "206594:110-206594:246", "206594:249-206594:281", "206595:1-206595:34",
"206595:37-206595:42", "206595:45-206595:193", "206596:1-206596:13", "206596:15-206596:220", "206596:222-206596:228",
"206596:231-206596:236", "206596:239-206596:292", "206596:295-206596:695", "206596:697-206596:728", "206596:730-206596:810",
"206598:1-206598:81", "206598:83-206598:103", "206598:105-206598:588", "206598:591-206598:657", "206598:659-206598:719",
"206605:1-206605:36", "206605:39-206605:78", "206744:49-206744:157", "206744:160-206744:192", "206744:195-206744:395",
"206744:398-206744:452", "206745:1-206745:81", "206745:84-206745:199", "206745:202-206745:224", "206745:227-206745:237",
"206745:240-206745:304", "206745:306-206745:318", "206745:321-206745:720", "206745:723-206745:796", "206745:799-206745:894",
"206745:897-206745:944", "206745:946-206745:1106", "206745:1108-206745:1524", "206745:1527-206745:1862", "206745:1988-206745:1996",
"206859:79-206859:210", "206859:212-206859:258", "206859:260-206859:323", "206859:325-206859:356", "206859:359-206859:609",
"206859:612-206859:681", "206859:684-206859:732", "206859:734-206859:768", "206859:771-206859:808", "206859:811-206859:827",
"206859:830-206859:848", "206866:1-206866:30", "206866:33-206866:113", "206866:115-206866:274", "206868:1-206868:3",
"206868:10-206868:16", "206869:1-206869:251", "206869:253-206869:271", "206869:274-206869:502", "206869:507-206869:520",
"206869:522-206869:566", "206869:568-206869:752", "206897:1-206897:34", "206897:38-206897:61", "206897:63-206897:102",
"206897:109", "206897:111-206897:112", "206897:114-206897:131", "206897:133-206897:137", "206901:1-206901:98",
"206906:1-206906:31", "206906:38-206906:94", "206906:96-206906:136", "206906:138-206906:139", "206906:142-206906:149",
"206906:151-206906:175", "206906:177-206906:206", "206940:1-206940:151", "206940:153", "206940:155-206940:298",
"206940:301-206940:382", "206940:384-206940:712", "206940:715-206940:803", "206940:805-206940:960", "206940:963-206940:1027",
"207099:83-207099:134", "207099:137-207099:172", "207099:175-207099:213", "207099:216-207099:314", "207099:316-207099:320",
"207099:323-207099:330", "207099:333-207099:367", "207099:370-207099:481", "207099:484-207099:602", "207099:605-207099:755",
"207099:757-207099:1046", "207099:1048-207099:1171", "207100:1-207100:91", "207100:94", "207214:57-207214:112",
"207214:114-207214:177", "207214:179-207214:181", "207214:184-207214:196", "207214:199-207214:220", "207214:223-207214:262",
"207214:265-207214:405", "207214:408-207214:482", "207214:485-207214:640", "207214:643-207214:708", "207214:718-207214:757",
"207214:759-207214:808", "207214:811-207214:829", "207217:1-207217:32", "207219:1-207219:112", "207220:1-207220:160",
"207221:1-207221:102", "207222:1-207222:17", "207222:20-207222:289", "207231:70-207231:84", "207231:86-207231:121",
"207231:123-207231:184", "207231:187-207231:189", "207231:192-207231:303", "207231:306-207231:354", "207231:357-207231:481",
"207231:484-207231:504", "207231:508-207231:549", "207231:552-207231:626", "207231:628-207231:690", "207231:693-207231:875",
"207231:878-207231:1000", "207231:1003-207231:1170", "207231:1173-207231:1187", "207231:1189-207231:1227", "207231:1229-207231:1415",
"207231:1418-207231:1445", "207231:1447-207231:1505", "207233:1-207233:119", "207233:121-207233:148", "207269:80-207269:394",
"207269:397-207269:436", "207269:439-207269:463", "207269:466-207269:551", "207269:568-207269:577", "207273:3-207273:877",
"207279:68-207279:138", "207279:141-207279:149", "207279:151-207279:237", "207279:240-207279:266", "207279:269-207279:307",
"207279:309-207279:416", "207279:498-207279:551", "207279:554-207279:640", "207279:643-207279:961", "207279:963-207279:1095",
"207279:1098-207279:1160", "207320:1-207320:110", "207320:112-207320:350", "207371:72-207371:117", "207371:120-207371:124",
"207372:1-207372:27", "207372:30-207372:113", "207372:116-207372:154", "207372:156-207372:174", "207372:176-207372:478",
"207372:480-207372:496", "207397:32-207397:77", "207397:80-207397:140", "207397:143-207397:179", "207398:1-207398:14",
"207398:16-207398:33", "207454:79-207454:95", "207454:98-207454:123", "207454:126-207454:259", "207454:261-207454:363",
"207454:365-207454:458", "207454:461-207454:498", "207454:501-207454:609", "207454:612-207454:632", "207454:635-207454:781",
"207454:784-207454:866", "207454:869-207454:974", "207454:977-207454:1064", "207454:1067-207454:1079", "207454:1081-207454:1321",
"207454:1323-207454:1464", "207454:1467-207454:1569", "207454:1571-207454:1604", "207454:1607-207454:1712", "207454:1714-207454:1988",
"207469:1-207469:31", "207469:34-207469:45", "207477:76-207477:104", "207477:107-207477:111", "207477:114-207477:147",
"207477:150-207477:295", "207477:298-207477:483", "207477:486-207477:494", "207477:497-207477:527", "207477:530-207477:563",
"207477:565-207477:570", "207487:50-207487:98", "207487:101-207487:311", "207487:313-207487:359", "207487:363-207487:468",
"207487:471-207487:472", "207488:1-207488:63", "207488:66-207488:92", "207488:95-207488:113", "207488:116-207488:198",
"207488:200-207488:250", "207488:252-207488:288", "207488:291-207488:365", "207488:368-207488:377", "207488:379-207488:440",
"207490:1-207490:48", "207490:51-207490:111", "207491:1-207491:176", "207491:179-207491:458", "207492:1-207492:20",
"207492:23-207492:298", "207515:79-207515:109", "207515:112-207515:132", "207515:134-207515:208", "207515:211-207515:225",
"207515:228-207515:320", "207515:322-207515:381", "207515:383-207515:498", "207515:500-207515:730", "207515:733-207515:849",
"207515:851-207515:954", "207515:957-207515:994", "207515:997-207515:1052", "207515:1055-207515:1143", "207515:1145-207515:1211",
"207517:1-207517:12", "207517:15-207517:57", "207518:1-207518:59", "207518:61-207518:83", "207882:22-207882:45",
"207883:1", "207883:3-207883:4", "207883:7-207883:75", "207884:1-207884:106", "207884:108-207884:183",
"207885:1-207885:90", "207886:1-207886:30", "207886:32-207886:90", "207886:92-207886:156", "207886:158-207886:166",
"207886:168-207886:171", "207889:1-207889:43", "207889:47-207889:57", "207889:60-207889:303", "207889:306-207889:442",
"207889:445", "207889:447-207889:551", "207889:553-207889:731", "207889:733-207889:907", "207889:910-207889:945",
"207898:1-207898:33", "207898:36-207898:57", "207898:60-207898:235", "207898:239-207898:257", "207898:260-207898:277",
"207905:75-207905:196", "207905:198-207905:281", "207905:284-207905:329", "207905:331-207905:402", "207905:404-207905:565",
"207905:568-207905:672", "207905:675-207905:805", "207905:807-207905:850", "207905:852-207905:861", "207905:864-207905:884",
"207905:886-207905:1180", "207905:1183-207905:1283", "207905:1285-207905:1331", "207905:1333-207905:1515", "207905:1518-207905:1734",
"207905:1737-207905:1796", "207920:84-207920:146", "207920:149-207920:241", "207920:243-207920:261", "207920:264-207920:291",
"207920:294-207920:486", "207920:489-207920:518", "207920:520-207920:598", "207920:600-207920:708", "207920:710-207920:826",
"207921:1-207921:37", "207921:40-207921:58", "207922:1-207922:69", "207922:71-207922:100", "207922:103-207922:126",
"207922:129-207922:242", "207922:274-207922:291", "207924:1-207924:52", "207924:54-207924:171", "207924:173-207924:178",
"207924:181-207924:339", "208307:2-208307:42", "208307:45", "208307:47-208307:70", "208307:72-208307:147",
"208307:150-208307:252", "208307:256-208307:259", "208307:262-208307:275", "208307:278-208307:342", "208307:345-208307:450",
"208307:453-208307:527", "208307:530-208307:583", "208307:586-208307:605", "208307:608-208307:616", "208307:618-208307:667",
"208307:670-208307:761", "208307:763-208307:798", "208307:800-208307:889", "208307:891-208307:893", "208307:896-208307:1055",
"208307:1057-208307:1205", "208307:1208-208307:1294", "208307:1297-208307:1328", "208339:77-208339:89", "208339:91-208339:122",
"208339:125-208339:208", "208339:211-208339:346", "208339:349-208339:363", "208341:1-208341:84", "208341:87-208341:117",
"208341:120-208341:513", "208341:515-208341:685", "208341:688-208341:693", "208341:695-208341:775", "208341:777-208341:824",
"208351:83-208351:97", "208351:100-208351:356", "208351:359-208351:367", "208351:369", "208352:1-208352:15",
"208352:17", "208352:19", "208353:1-208353:76", "208353:78-208353:269", "208353:271-208353:348",
"208357:1-208357:70", "208357:73-208357:507", "208390:72-208390:128", "208390:130-208390:169", "208391:52-208391:82",
"208391:84-208391:162", "208391:164-208391:216", "208391:219-208391:493", "208391:495-208391:498", "208391:500-208391:523",
"208391:526-208391:533", "208391:535-208391:588", "208391:591-208391:660", "208391:663-208391:869", "208427:49-208427:89",
"208427:92-208427:161", "208427:164", "208427:166-208427:173", "208427:175-208427:268", "208427:271-208427:312",
"208427:315", "208427:317-208427:335", "208427:337-208427:361", "208427:364-208427:402", "208427:404-208427:422",
"208427:425-208427:577", "208427:580-208427:647", "208428:1-208428:58", "208428:61-208428:68", "208428:70-208428:156",
"208428:159-208428:227", "208429:1-208429:56", "208429:59-208429:139", "208429:141-208429:159", "208429:162-208429:237",
"208429:240-208429:440", "208429:442-208429:452", "208429:455-208429:589", "208429:592-208429:712", "208429:715-208429:922",
"208487:2-208487:26", "208487:29-208487:159", "208487:161-208487:307", "208487:309-208487:459", "208487:462-208487:476",
"208487:479-208487:621", "208509:71-208509:232", "208538:2-208538:43", "208540:1-208540:26", "208540:29-208540:98",
"208541:1-208541:57", "208541:59-208541:173", "208541:175-208541:376", "208541:378-208541:413", "208551:119-208551:193",
"208551:195-208551:212", "208551:215-208551:300", "208551:303-208551:354", "208551:356-208551:554", "208551:557-208551:580",
"208686:73-208686:79", "208686:82-208686:181", "208686:183-208686:224", "208686:227-208686:243", "208686:246-208686:311",
"208686:313-208686:459" ) ),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DoubleMuParked/StoreResults-Run2012C_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0/cmgTuple_401.root',
'/store/cmst3/user/cmgtools/CMG/DoubleMuParked/StoreResults-Run2012C_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0/cmgTuple_402.root',
'/store/cmst3/user/cmgtools/CMG/DoubleMuParked/StoreResults-Run2012C_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0/cmgTuple_403.root')
)
|
[
"riccardo.manzoni@cern.ch"
] |
riccardo.manzoni@cern.ch
|
876edad12dae623475e52f382350d060ce20d0c8
|
01aeb49dc7d6bd565055a559017861097d4054ea
|
/myenv/bin/python-config
|
7e374a2e4a3430bf6582b3bfb0b7d81899ecf134
|
[] |
no_license
|
lutchin/django
|
acc1e0572ea2ef4f0c55584c4e25d3730e31a78a
|
44f3a107de9cb019aeb0b05abf430acc1281ff01
|
refs/heads/master
| 2020-12-02T07:41:59.841608
| 2017-07-09T22:25:40
| 2017-07-09T22:25:40
| 96,715,205
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,357
|
#!/home/pavel/PycharmProjects/input/myenv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
|
[
"lutchin@gmail.com"
] |
lutchin@gmail.com
|
|
0db7c6e3c890058c81dae222415ebf0ed77c0316
|
9f7d4d76c7e66aa424a5f8723575dc489f1fd2ab
|
/2015/19/input.py
|
9ab34c71179e16ff6e97feae921de5f5e01b13a7
|
[
"MIT"
] |
permissive
|
kristianwiklund/AOC
|
df5a873287304816f25d91259c6e6c99c7a5f4bf
|
d9a668c406d2fd1b805d9b6a34cffa237a33c119
|
refs/heads/master
| 2023-01-12T09:01:11.012081
| 2023-01-02T19:12:29
| 2023-01-02T19:12:29
| 227,458,380
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,095
|
py
|
def tokens():
return {"Al":["ThF","ThRnFAr"],
"B":["BCa","TiB","TiRnFAr"],
"Ca":["CaCa","PB","PRnFAr","SiRnFYFAr","SiRnMgAr","SiTh"],
"e":["HF","NAl","OMg"],
"F":["CaF","PMg","SiAl"],
"H":["CRnAlAr","CRnFYFYFAr","CRnFYMgAr","CRnMgYFAr","HCa","NRnFYFAr","NRnMgAr","NTh","OB","ORnFAr"],
"Mg":["BF","TiMg"],
"N":["CRnFAr","HSi"],
"O":["CRnFYFAr","CRnMgAr","HP","NRnFAr","OTi"],
"P":["CaP","PTi","SiRnFAr"],
"Si":["CaSi"],
"Th":["ThCa"],
"Ti":["BP","TiTi"]}
def input():
return "ORnPBPMgArCaCaCaSiThCaCaSiThCaCaPBSiRnFArRnFArCaCaSiThCaCaSiThCaCaCaCaCaCaSiRnFYFArSiRnMgArCaSiRnPTiTiBFYPBFArSiRnCaSiRnTiRnFArSiAlArPTiBPTiRnCaSiAlArCaPTiTiBPMgYFArPTiRnFArSiRnCaCaFArRnCaFArCaSiRnSiRnMgArFYCaSiRnMgArCaCaSiThPRnFArPBCaSiRnMgArCaCaSiThCaSiRnTiMgArFArSiThSiThCaCaSiRnMgArCaCaSiRnFArTiBPTiRnCaSiAlArCaPTiRnFArPBPBCaCaSiThCaPBSiThPRnFArSiThCaSiThCaSiThCaPTiBSiRnFYFArCaCaPRnFArPBCaCaPBSiRnTiRnFArCaPRnFArSiRnCaCaCaSiThCaRnCaFArYCaSiRnFArBCaCaCaSiThFArPBFArCaSiRnFArRnCaCaCaFArSiRnFArTiRnPMgArF"
|
[
"githubkristian@snabela.nl"
] |
githubkristian@snabela.nl
|
dcf36cbc60fc14c4439c8aacebc223dd42a3772e
|
67bdebd561b19af9bf759b6ed5de8556b93ea91f
|
/sweep_avm_hps.py
|
69c3462e3a1b63ee50ef08b1c83461cc8250860a
|
[] |
no_license
|
rlowrance/re-avm
|
91371ec79f6b6f48e17643da4dfb7a4894d0a0ca
|
d4cfa62e9f65d325e8ac98caa61d3fb666b8a6a2
|
refs/heads/master
| 2021-01-17T07:34:16.876133
| 2017-02-06T21:04:59
| 2017-02-06T21:04:59
| 42,865,972
| 31
| 10
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,027
|
py
|
'sweep hyparamaters over grid'
from __future__ import division
import pdb
import AVM
from columns_contain import columns_contain
import layout_transactions
import sweep_types
cc = columns_contain
def sweep_avm_hps(model_testperiod_grid, samples, random_state, just_test=False, verbose=True):
'Return dictionary of test results for grid HPs on samples[test_period]'
result = {}
def vprint(s):
if verbose:
print s
def max_features_s(max_features):
'convert to 4-character string (for printing)'
return max_features[:4] if isinstance(max_features, str) else ('%4.1f' % max_features)
def fit_and_run(avm):
'return a ResultValue'
avm.fit(samples)
mask = samples[layout_transactions.yyyymm] == test_period
samples_yyyymm = samples[mask]
predictions = avm.predict(samples_yyyymm)
if predictions is None:
print 'no predictions!'
pdb.set_trace()
actuals = samples_yyyymm[layout_transactions.price]
return sweep_types.ResultValue(actuals, predictions)
def search_en(n_months_back, test_period, grid):
'search over ElasticNet HPs, appending to result'
result = {}
for units_X in grid.units_X_seq:
for units_y in grid.units_y_seq:
for alpha in grid.alpha_seq:
for l1_ratio in grid.l1_ratio_seq:
vprint(
'%6d %3s %1d %3s %3s %4.2f %4.2f' %
(test_period, 'en', n_months_back, units_X[:3], units_y[:3],
alpha, l1_ratio)
)
avm = AVM.AVM(
model_name='ElasticNet',
forecast_time_period=test_period,
random_state=random_state,
n_months_back=n_months_back,
units_X=units_X,
units_y=units_y,
alpha=alpha,
l1_ratio=l1_ratio,
)
result_key = sweep_types.ResultKeyEn(
n_months_back,
units_X,
units_y,
alpha,
l1_ratio,
)
pdb.set_trace()
result[result_key] = fit_and_run(avm)
if just_test:
return result
return result
def search_gbr(n_months_back, test_period, grid):
'search over GradientBoostingRegressor HPs, appending to result'
result = {}
for n_estimators in grid.n_estimators_seq:
for max_features in grid.max_features_seq:
for max_depth in grid.max_depth_seq:
for loss in grid.loss_seq:
for learning_rate in grid.learning_rate_seq:
vprint(
'%6d %3s %1d %4d %4s %3d %8s %4.2f' %
(test_period, 'gbr', n_months_back,
n_estimators, max_features_s(max_features), max_depth, loss, learning_rate)
)
avm = AVM.AVM(
model_name='GradientBoostingRegressor',
forecast_time_period=test_period,
random_state=random_state,
n_months_back=n_months_back,
learning_rate=learning_rate,
loss=loss,
alpha=.5 if loss == 'quantile' else None,
n_estimators=n_estimators, # number of boosting stages
max_depth=max_depth, # max depth of any tree
max_features=max_features, # how many features to test when splitting
)
result_key = sweep_types.ResultKeyGbr(
n_months_back,
n_estimators,
max_features,
max_depth,
loss,
learning_rate,
)
pdb.set_trace()
result[result_key] = fit_and_run(avm)
if just_test:
return result
pdb.set_trace()
return result
def search_rf(n_months_back, test_period, grid):
'search over RandomForestRegressor HPs, appending to result'
result = {}
for n_estimators in grid.n_estimators_seq:
for max_features in grid.max_features_seq:
for max_depth in grid.max_depth_seq:
vprint(
'%6d %3s %1d %4d %4s %3d' %
(test_period, 'rfr', n_months_back,
n_estimators, max_features_s(max_features), max_depth)
)
avm = AVM.AVM(
model_name='RandomForestRegressor',
forecast_time_period=test_period,
random_state=random_state,
n_months_back=n_months_back,
n_estimators=n_estimators, # number of boosting stages
max_depth=max_depth, # max depth of any tree
max_features=max_features, # how many features to test when splitting
)
result_key = sweep_types.ResultKeyRfr(
n_months_back,
n_estimators,
max_features,
max_depth,
)
pdb.set_trace()
result[result_key] = fit_and_run(avm)
if just_test:
return result
return result
# grid search for all model types
pdb.set_trace()
result = {}
for model, testperiod_grid in model_testperiod_grid.iteritems():
test_period, grid = testperiod_grid
for n_months_back in grid.n_months_back_seq:
if model == 'en':
result = dict(result, **search_en(n_months_back, test_period, grid))
if model == 'gb':
more = search_gbr(n_months_back, test_period, grid)
pdb.set_trace()
result = dict(result, **more)
result = dict(result, **search_gbr(n_months_back,test_period, grid))
if model == 'rf':
result = dict(result, search_rf(n_months_back, test_period, grid))
if just_test:
break
return result
|
[
"roy.lowrance@gmail.com"
] |
roy.lowrance@gmail.com
|
f6501bb433d19b4e8dc86423d939fae6e6b7ed6f
|
59629d5f64a48a6bfccf5795047a0a4e9fe1c587
|
/footballtournamentleague/asgi.py
|
bfc54df69679d7bbd47a873a9c8e7ac2063a0ab3
|
[] |
no_license
|
vaeryn-uk/football-tournament-league
|
713df4740f683b6fd695bcc165e365e644d33e5d
|
e929171d2b9e95e4071d332d820f08d8ed763656
|
refs/heads/main
| 2023-06-17T13:38:23.128843
| 2021-07-11T17:01:47
| 2021-07-11T17:01:47
| 382,633,536
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
"""
ASGI config for footballtournamentleague project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'footballtournamentleague.settings')
application = get_asgi_application()
|
[
"andrewcook1990@gmail.com"
] |
andrewcook1990@gmail.com
|
b95687a3080ae3065b7a6dc4cd9c614d5fa8a37e
|
94f9b83245f6be17648e5f3d0c11509f60d3974b
|
/choose.py
|
323cc4d98d56b0e2c210508bcf40a03ffe0d896f
|
[] |
no_license
|
Cotyaoo/ChooseNum
|
f0dee6a65c62da77ca3a9118fc1867137bd56675
|
f007c105f6a3161f47eaf5142db86c291338b9a7
|
refs/heads/master
| 2020-12-27T20:03:03.046601
| 2020-02-03T18:39:34
| 2020-02-03T18:39:34
| 238,034,446
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
import random
def choose_number():
a = random.randint(1, 100)
b = int(input("Введите число от 1 до 100: "))
p = 0
while p < 15:
if a != b:
print("Не угадали! Компьютер загадал число: ", a)
return choose_number()
p += 1
elif a == b:
print("Вы угадали")
break
choose_number()
|
[
"noreply@github.com"
] |
Cotyaoo.noreply@github.com
|
3b1dd594d751b4cc19eedb56c0807d574d123925
|
5f148e5b75f4afc6edd1432ffdc425932d0fbe51
|
/physionet-django/project/migrations/0045_auto_20200805_1001.py
|
ba412e27baaf2a52d794801cfdf1f1c652887c12
|
[
"BSD-3-Clause"
] |
permissive
|
MIT-LCP/physionet-build
|
82c4092dabe1ef2436e4a55d73bdadd5b4b1c624
|
304e093dc550da8636552dc601d6545c07ffc771
|
refs/heads/dev
| 2023-08-16T10:33:44.203786
| 2023-08-10T17:32:27
| 2023-08-10T17:32:27
| 82,719,282
| 50
| 17
|
BSD-3-Clause
| 2023-09-14T20:52:38
| 2017-02-21T19:37:04
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 548
|
py
|
# Generated by Django 2.2.10 on 2020-08-05 14:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('project', '0044_auto_20200512_1029'),
]
operations = [
migrations.AlterField(
model_name='duasignature',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='dua_signatures', to=settings.AUTH_USER_MODEL),
),
]
|
[
"felipe.torres.cs@gmail.com"
] |
felipe.torres.cs@gmail.com
|
8f5ba8d9c2ed80b13bc8f6549a0f7f87e04804b6
|
1ad24aa3fa880e97ddbbf5eb2ae6fc491207351d
|
/learnPython/sys.py
|
25307c0633dc57253b001a5c9cfced1203121603
|
[] |
no_license
|
qdxt/python
|
9258378f9a20c68e49b078901e3f1d1a6ab23b3c
|
052745d86dbcde6a9b14d8682817eafbcb97dda5
|
refs/heads/master
| 2021-01-11T08:01:29.601575
| 2018-05-16T06:50:06
| 2018-05-16T06:50:06
| 113,291,070
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
from sys import argv,path #导入特定的成员
print('================Python import mode==========================')
print('path:',path)
counter = 100
print('================Python import mode==========================')
#允许同时为多个变量赋值
# a = b = c = 1
# print('a:',a,'b:',b,'c:',c)
#可以为多个对象指定多个变量
a,b,c = 1,2,'runoob'
print('a:',a,'b:',b,'c:',c)
#int float bool complex(复数)
print(type(a),type(c))
|
[
"980165003@qq.com"
] |
980165003@qq.com
|
952c934a258215a3e3f4cc666120893ffff24cdf
|
aa3354835e09a7d41466310355eabc2178ec2604
|
/code/events.py
|
13f4dcdd900032aed4359fe20124cf28fd54576b
|
[] |
no_license
|
bug320/2048Game
|
7b555c86531725f8b332573061c5e00c6f2898ea
|
1521aac9ad949a28548f4ec70423931d2cb8bb0f
|
refs/heads/master
| 2021-09-06T04:27:58.353842
| 2018-02-02T10:11:22
| 2018-02-02T10:11:22
| 105,138,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 916
|
py
|
# -*- coding:utf-8 -*-
class UserInputError(Exception):
pass
class TryApp(object):
def __init__(self, name = None):
self.name = name if name else "bug320"
pass
def pname(self):
print self.name
pass
pass
class UserInut(object):
def __init__(self,functable):
self.functable = functable
pass
def doFunc(self,index):
func,argv = self.functable[index]
func(argv)
pass
def changeArgs(self, index, args):
if not index:
raise
pass
func_,args_ = self.functable[index]
args_ = args
self.functable(func_,args_)
pass
pass
if __name__ == '__main__':
app = TryApp()
func = {'p':(app.pname,None)}
while True:
a = raw_input("Input:")
if a == 'q':
raise UserInputError("InputError")
break
pass
|
[
"1046218884@qq.com"
] |
1046218884@qq.com
|
1cd6adfb765fafd2922978f2212706cbf1a4d359
|
f116ebf876b630b00ae7f0a3310c4a01b22d665c
|
/repo_files/zips/plugin.video.saturnhosting/resources/modules/tools.py
|
e07ab54773e9d4df1455db1be1374d310d083e06
|
[] |
no_license
|
sClarkeIsBack/SaturnHosting
|
673dd4c94da3976461dc05dd6449057fbfc1155a
|
26051fb1ec7d1833f82985308fef38acdd882270
|
refs/heads/master
| 2020-06-24T15:18:08.968909
| 2017-10-01T20:17:14
| 2017-10-01T20:17:14
| 96,941,844
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,091
|
py
|
import os,re,sys,xbmc,json,base64,client,control,string,urllib,urlparse,requests,shutil,xbmcplugin,xbmcgui,socket
from resources.modules import user
def regex_from_to(text, from_string, to_string, excluding=True):
if excluding:
try: r = re.search("(?i)" + from_string + "([\S\s]+?)" + to_string, text).group(1)
except: r = ''
else:
try: r = re.search("(?i)(" + from_string + "[\S\s]+?" + to_string + ")", text).group(1)
except: r = ''
return r
def regex_get_all(text, start_with, end_with):
r = re.findall("(?i)(" + start_with + "[\S\s]+?" + end_with + ")", text)
return r
def addDir(name,url,mode,iconimage,fanart,description):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&iconimage="+urllib.quote_plus(iconimage)+"&description="+urllib.quote_plus(description)
ok=True
liz=xbmcgui.ListItem(name, iconImage=iconimage, thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={"Title": name,"Plot":description,})
liz.setProperty('fanart_image', fanart)
if mode==4:
liz.setProperty("IsPlayable","true")
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=False)
elif mode==7 or mode==10 or mode==21:
liz.setInfo( type="Video", infoLabels={"Title": name,"Plot":description})
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=False)
else:
liz.setInfo( type="Video", infoLabels={"Title": name,"Plot":description})
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
xbmcplugin.endOfDirectory
def addDirMeta(name,url,mode,iconimage,fanart,description,year,cast,rating,runtime,genre):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&iconimage="+urllib.quote_plus(iconimage)+"&description="+urllib.quote_plus(description)
ok=True
liz=xbmcgui.ListItem(name, iconImage=iconimage, thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={"Title": name,"Plot":description,"Rating":rating,"Year":year,"Duration":runtime,"Cast":cast,"Genre":genre})
liz.setProperty('fanart_image', fanart)
liz.setProperty("IsPlayable","true")
cm = []
cm.append(('Play Trailer','XBMC.RunPlugin(plugin://'+user.id+'/?mode=9&url='+str(name)+')'))
cm.append(('Movie Information', 'XBMC.Action(Info)'))
liz.addContextMenuItems(cm,replaceItems=True)
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=False)
return ok
def OPEN_URL(url):
headers = {}
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0'
link = requests.session().get(url, headers=headers, verify=False).text
link = link.encode('ascii', 'ignore')
return link
def clear_cache():
xbmc.log('CLEAR CACHE ACTIVATED')
xbmc_cache_path = os.path.join(xbmc.translatePath('special://home'), 'cache')
confirm=xbmcgui.Dialog().yesno("Please Confirm","Please Confirm You Wish To Delete Your Kodi Application Data","","","Cancel","Clear")
if confirm:
if os.path.exists(xbmc_cache_path)==True:
for root, dirs, files in os.walk(xbmc_cache_path):
file_count = 0
file_count += len(files)
if file_count > 0:
for f in files:
try:
os.unlink(os.path.join(root, f))
except:
pass
for d in dirs:
try:
shutil.rmtree(os.path.join(root, d))
except:
pass
dialog = xbmcgui.Dialog()
dialog.ok(user.name, "Cache Cleared Successfully!")
xbmc.executebuiltin("Container.Refresh()")
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
class Trailer:
def __init__(self):
self.base_link = 'http://www.youtube.com'
self.key_link = 'QUl6YVN5QnZES3JnSU1NVmRPajZSb1pnUWhaSzRHM3MybDZXeVhn'
self.key_link = '&key=%s' % base64.urlsafe_b64decode(self.key_link)
self.search_link = 'https://www.googleapis.com/youtube/v3/search?part=snippet&type=video&maxResults=5&q=%s'
self.youtube_search = 'https://www.googleapis.com/youtube/v3/search?q='
self.youtube_watch = 'http://www.youtube.com/watch?v=%s'
def play(self, name, url=None):
try:
url = self.worker(name, url)
if url == None: return
title = control.infoLabel('listitem.title')
if title == '': title = control.infoLabel('listitem.label')
icon = control.infoLabel('listitem.icon')
item = control.item(path=url, iconImage=icon, thumbnailImage=icon)
try: item.setArt({'icon': icon})
except: pass
item.setInfo(type='Video', infoLabels = {'title': title})
control.player.play(url, item)
except:
pass
def worker(self, name, url):
try:
if url.startswith(self.base_link):
url = self.resolve(url)
if url == None: raise Exception()
return url
elif not url.startswith('http://'):
url = self.youtube_watch % url
url = self.resolve(url)
if url == None: raise Exception()
return url
else:
raise Exception()
except:
query = name + ' trailer'
query = self.youtube_search + query
url = self.search(query)
if url == None: return
return url
def search(self, url):
try:
query = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
url = self.search_link % urllib.quote_plus(query) + self.key_link
result = client.request(url)
items = json.loads(result)['items']
items = [(i['id']['videoId']) for i in items]
for url in items:
url = self.resolve(url)
if not url is None: return url
except:
return
def resolve(self, url):
try:
id = url.split('?v=')[-1].split('/')[-1].split('?')[0].split('&')[0]
result = client.request('http://www.youtube.com/watch?v=%s' % id)
message = client.parseDOM(result, 'div', attrs = {'id': 'unavailable-submessage'})
message = ''.join(message)
alert = client.parseDOM(result, 'div', attrs = {'id': 'watch7-notification-area'})
if len(alert) > 0: raise Exception()
if re.search('[a-zA-Z]', message): raise Exception()
url = 'plugin://plugin.video.youtube/play/?video_id=%s' % id
return url
except:
return
def getlocalip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 0))
s = s.getsockname()[0]
return s
def getexternalip():
open = OPEN_URL('http://canyouseeme.org/')
ip = re.search('(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)',open)
return str(ip.group())
def MonthNumToName(num):
if '01' in num:
month = 'January'
elif '02' in num:
month = 'Febuary'
elif '03' in num:
month = 'March'
elif '04' in num:
month = 'April'
elif '05' in num:
month = 'May'
elif '06' in num:
month = 'June'
elif '07' in num:
month = 'July'
elif '08' in num:
month = 'Augast'
elif '09' in num:
month = 'September'
elif '10' in num:
month = 'October'
elif '11' in num:
month = 'November'
elif '12' in num:
month = 'December'
return month
|
[
"mediahubiptv@gmail.com"
] |
mediahubiptv@gmail.com
|
a33c8b2e9486fbe2b0792af28dd2a1d7f4c36758
|
083ece7a46de36c44424124259033b1ce0863f26
|
/novelist_ni_narou/dataset/arasuji.py
|
636f336e6900fddd6acf599e1b2d445dc2d1d166
|
[] |
no_license
|
knok/chainer-SeqGAN
|
bb4715c6cc8886dca18f759590cbbb3b04ca5937
|
c33970190f4e0e8918ec89e7416b19584857131f
|
refs/heads/master
| 2021-01-11T09:24:13.211038
| 2016-12-23T03:42:03
| 2016-12-23T03:42:03
| 77,117,256
| 3
| 4
| null | 2016-12-22T06:09:01
| 2016-12-22T06:09:00
| null |
UTF-8
|
Python
| false
| false
| 4,071
|
py
|
import re
import unicodedata
import numpy as np
from collections import Counter
import os
import json
import pickle
class Arasuji(object):
def __init__(self, raw_data, vocab_size, seq_length):
self.vocab_size = vocab_size
self.seq_length = seq_length
self.raw_text=[]
for i, j in enumerate(raw_data):
x = self.clean(j['summary'])
if x and len(x) < seq_length:
self.raw_text.append(x)
self.data_num = len(self.raw_text)
print('data num',self.data_num)
words = []
for line in self.raw_text:
words.extend(line)
words.append(' ')
counter = Counter(words)
self.word_freq = {word: cnt for word, cnt in counter.most_common(vocab_size-3)}
self.vocab = ['_START'] + ['<EOS>'] + sorted(list(self.word_freq)) + [' ']
self.word2idx = {word:i for i, word in enumerate(self.vocab)}
print('word num',len(self.vocab))
self.data = np.ones((self.data_num, self.seq_length), np.int32) * (vocab_size-1)
for i in range(self.data_num):
for j in range(len(self.raw_text[i])):
w = self.raw_text[i][j]
if w in self.vocab:
self.data[i][j] = self.word2idx[w]
else:
self.data[i][len(self.raw_text[i])] = 1
perm = np.random.permutation(self.data_num)
self.test_idx = perm[:11700]
self.train_idx = perm[11700:]
def clean(self, string):
SpecialLetters = r"""*|¥|¥|#|#|?|×|+|†|:|;|~|¨|\xad|°|´|'̈|゙ ゚
|×|ĵ|α|β|π|σ|φ|ω|м|о|х|٩|۶|ก|ค|ง|จ|ณ|ท|\||
|น|ฟ|ม|ย|ร|ส|ห|ั|า|ิ|ี|ุ|เ|แ|ไ|่|‐|–|─|—|•|‥|′| '́|̈'
|…|※|‼|⁇|⁈|⁉|⁺|℃|ⅰ|ⅱ|ⅲ|←|↑|→|↓|⇒|⇔|−|〜 |〝|\〟|〜|〟
|∞|≒|≧|≪|≫|①|②|③|④|⑤|⑥|⑦|⑧|⑨|⑩|⑪|⑫|\^
|━|│|┌|┐|■|□|△|▼|▽|◆|◇|○|◎|●|◒|◯|〇|◼|〓|★|☆|♀|♂|♥|♡|♦|♪|♬|♯|⚪|⚫|✕|✖|✳|〃
|\x81|\x8d|«|·|»|â|ä|è|é|ö|ø|ə|ɪ|ɲ|ʕ|̀|́|̄|̈|ά|γ|δ|ς|υ|д|з|щ|я|ᅠ|\u200f|―|‹|›|∀|√|∠|∮|∵|∽
|≋|≓|≔|≕|≖|≠|≡|≣|⊂|⊰|⊱|⊴|⋆|⋛|⋯|⌒|┏|┓|├|┤|╋|═|▄|◥|◻|◽|☓|☝|☪|☺|
♉|♠|♢|♤|♧|♭|⚠|✤|✩|✴|✽|❁|❕|❗|❪|❫|❮|❯|➖|➡|⬆|⭐|؈|'ฺ|∽|♉|,"""
string = string.split('。')[0] + '。'
string = ' '.join(string.split())
string = re.sub(r'{|\{|\[|『|【|《|〈|〔|〖','「',string)
string = re.sub(r'{|\}|\]|』|】|》|〉|〕|〗','」', string)
string = re.sub(r'[‘’“”″`\']', '"', string)
string = re.sub(r"(.+?)", '', string)
string = re.sub(r"\(.+?\)", '', string)
string = re.sub(r"<.+?>", '', string)
string = unicodedata.normalize('NFKC', string).lower()
string = re.sub('https?://(\S)+', '', string)
string = re.sub(SpecialLetters, '', string)
string = ' '.join(string.split())
if len(set(string)) < 5:
return ''
return string
def get_train_data(self,batch_size):
idx = np.random.choice(self.train_idx, batch_size, replace=False)
return self.data[idx]
def get_test_data(self, batch_size):
idx = np.random.choice(self.test_idx, batch_size, replace=False)
return self.data[idx]
if __name__ == '__main__':
json_data = []
for i, f_name in enumerate(os.listdir('dataset')):
try:
with open('dataset/' + f_name) as f:
json_data.append(json.load(f))
except:
pass
Loader = Arasuji(json_data, vocab_size=3000, seq_length=40)
with open('arasuji.dat', 'wb') as f:
pickle.dump(Loader, f)
|
[
"fukuta6140@gmail.com"
] |
fukuta6140@gmail.com
|
5192f3acfcc9c4a8e579a483b0fcfee562d4d63a
|
1edfdc6194342d102e6347216ce0661e25171912
|
/dl_yt_vids.py
|
15a5b77fdc49f62af787bf6866b6beb9d967692b
|
[] |
no_license
|
jivakalan/action_spotting_ufc
|
2e4e08496d9256c2144d1072f7e2a702d351507e
|
098573b01bf04b9573d2c7a29071a2ab7ea20c5a
|
refs/heads/master
| 2023-04-17T23:05:39.636163
| 2021-05-06T00:10:27
| 2021-05-06T00:10:27
| 305,566,773
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 19 22:04:44 2020
@author: jkalan
"""
from pytube import YouTube
from datetime import datetime
import os,time
def downloading_video(link):
now = datetime.now() # current date and time
timeNow = now.strftime("%m-%d-%Y at %H_%M_%S")
yt = YouTube(link)
time.sleep(5)
video = yt.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first()
time.sleep(2)
folderPath = os.path.join(os.getcwd(),timeNow)
os.mkdir(folderPath)
videoPath = os.path.join(os.getcwd(),timeNow,video.title)
video.download(output_path = folderPath)
return videoPath
for url in video_url:
yt= YouTube(url).streams.first().download()
time.sleep(5)
video = yt.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first()
time.sleep(2)
video_url =['https://www.youtube.com/watch?v=kB4jgaYYBlI','https://www.youtube.com/watch?v=Ro_zceOYrb0']
'https://www.youtube.com/watch?v=U-fhb1RnRzM']
video_url =['https://www.youtube.com/watch?v=knRMfWDjKZE','https://www.youtube.com/watch?v=RK9Wry-DHew','https://www.youtube.com/watch?v=L3sR0-BJQPk&t=1s','https://www.youtube.com/watch?v=y5tmDY9-BAU']
|
[
"kalan.jiva@gmail.com"
] |
kalan.jiva@gmail.com
|
2cb1da17f4c7a1704330b470888dc115a7e8bbed
|
c548d5d6609eb4bff3511cd822fc6ec2580c4935
|
/promises/models.py
|
7d5fcec6d725294d9d68c2ecec348a9a50f6072a
|
[] |
no_license
|
bmy4415/django_restframework_homework
|
5abc934b28ffa8eb4428d19e05771bc229034876
|
3ffecc1540afef8f49a7ee63c93bbf90708b5067
|
refs/heads/master
| 2020-03-09T02:40:04.721914
| 2018-04-16T13:25:37
| 2018-04-16T13:25:37
| 128,545,595
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
from django.db import models
class Promise(models.Model):
created = models.DateTimeField(auto_now_add=True)
sinceWhen = models.DateTimeField()
tilWhen = models.DateTimeField()
user1 = models.ForeignKey("auth.User", related_name="promises_as_inviter", on_delete=models.CASCADE)
user2 = models.ForeignKey("auth.User", related_name="promises_as_invitee", on_delete=models.CASCADE)
|
[
"bmy4415@naver.com"
] |
bmy4415@naver.com
|
8793d4ce5b93e0abb4d7dd2d64c6957bdf1997b2
|
847ecf9ded56b8680bde70ff4544bd36ccf26eb3
|
/user_listsp.py
|
b08eab53c5a1a907fed769c3bf913e4ca12c0be3
|
[] |
no_license
|
camly93/uph_heroku
|
4366db42b3ac8d26b5bb8628afd5ae977304bd38
|
ce1d76137b8631d455b5719d02a3071b4fc22e0f
|
refs/heads/master
| 2021-01-18T09:31:40.963817
| 2016-04-16T09:42:13
| 2016-04-16T09:42:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,139
|
py
|
from mongoengine import *
import codecraper
connect('amazon_rank',host='mongodb://amazon:mlab1234@ds015740.mlab.com:15740/amazon_rank')
class Listsp(EmbeddedDocument):
id_sanpham = StringField()
class User_sanpham(Document):
username = StringField()
sanpham = ListField(EmbeddedDocumentField(Listsp))
class User_nhap_sp:
def __init__(self, name,ten_sp):
self.name = name
self.sp_nhap= ten_sp
def list_sanpham(nhap_user):
if len(User_sanpham.objects())==0:
return "khong"
else:
for user_data in User_sanpham.objects():
print(user_data.username)
if user_data.username==nhap_user:
return user_data.sanpham
return "khong"
def xoa_sanpham(nhap_user,xoa_sp):
for user_data in User_sanpham.objects():
if user_data.username == nhap_user:
listsp = Listsp(id_sanpham=xoa_sp)
user_data.sanpham.remove(listsp)
user_data.save()
break
def nhap_sanpham(nhap_user,nhap_sp):
user_nhap=User_nhap_sp(nhap_user,nhap_sp)
print(user_nhap.name)
#check rank sanpham luon khi nhap
codecraper.check_rank(nhap_sp)
listsp=Listsp(id_sanpham=user_nhap.sp_nhap)
if len(User_sanpham.objects())==0:
user_sp=User_sanpham(username=user_nhap.name,sanpham=[listsp])
user_sp.save()
else:
w=0
## chi cho phep nhap toi da 20 san pham
if len(User_sanpham.objects())<=20:
for user_data in User_sanpham.objects():
if user_data.username==user_nhap.name:
w=1
# print(sp.id_sanpham)
k=0
for sp in user_data.sanpham:
if user_nhap.sp_nhap== sp.id_sanpham:
k=1
break
if k==0:
user_data.sanpham.append(listsp)
user_data.save()
break
if w==0:
user_sp=User_sanpham(username=user_nhap.name,sanpham=[listsp])
user_sp.save()
|
[
"phuc1009@gmail.com"
] |
phuc1009@gmail.com
|
f96fa7588da7efa6d77b1d26da414ebe94dffcd3
|
c17b2c46032dec8bfadcdb6b2fc99d9f220f4b39
|
/venv/bin/easy_install
|
2fab74bca2f3f62282c116c087103c76b26b7c2a
|
[] |
no_license
|
soumyaevan/PythonSelenium
|
70a0e371e997d7c9ed74288b27a349658b5e4622
|
846e6411c1454893b51e3882e51958c7c06c7246
|
refs/heads/master
| 2022-04-22T03:45:44.736114
| 2020-04-10T17:32:39
| 2020-04-10T17:32:39
| 254,648,008
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 449
|
#!/Users/soumyasen/PycharmProjects/SeleniumTest/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
|
[
"sensoumya94@gmail.com"
] |
sensoumya94@gmail.com
|
|
5563a838ed4f87a906a02b3048563e2919d0fdd7
|
25340be13d4b7fa3e93fd848ccc11763aa753814
|
/data_processing/format_lyft.py
|
dcdb4b2df4d6a9a137c3bb589f90d598c40cc301
|
[] |
no_license
|
CVPR2020-9832/Spectral-Clustering-Graph-LSTM
|
93e60e26eb745562bb912d980f5b2976423600d8
|
9cc48a5bde502e1fc6cc03ef041b2b4d28276e84
|
refs/heads/master
| 2020-09-15T19:20:57.854651
| 2019-11-23T05:51:22
| 2019-11-23T05:51:22
| 223,537,702
| 3
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,968
|
py
|
import json
import numpy as np
def get_sample(filepath):
'''
to load the sample.json file
:param filepath: filepath of the sample.json
:return: sample
'''
with open(filepath) as json_file:
sample = json.load(json_file)
return sample
def get_sample_data(filepath):
'''
to load the sample_data.json file
:param filepath: filepath of the sample_data.json
:return: sample_data
'''
with open(filepath) as json_file:
sample_data = json.load(json_file)
return sample_data
def get_sample_annotation(filepath):
'''
to load the sample_annotation.json file
:param filepath: filepath of the sample_annotation.json
:return: sample_annotation file
'''
with open(filepath) as json_file:
sample_annotation = json.load(json_file)
return sample_annotation
def get_timestamp_list(sample, scene_token):
'''
to get the list with sample tokens corresponding to timestamp
:param sample: sample from sample.json
:param scene_token: scene_token from scene.json
:return: sorted list of timestamp, tokens
'''
time_stamp_list = []
visited_set = set()
count = 0
for items_sample in sample:
if items_sample['scene_token'] == scene_token:
time_stamp_list.append([items_sample['timestamp'],items_sample['token']])
visited_set.add(items_sample['token'])
count +=1
sorted_timestamp_list = sorted(time_stamp_list)
return sorted_timestamp_list
def zero_row_process(zero_row):
'''
to format the timestamp row
:param zero_row: row to be formatter, timestamp_row
:return: formatted row as an array
'''
first_ele = zero_row[0]
frame_ID_list = []
j = 1
for i in zero_row:
if i == first_ele:
frame_ID_list.append(j)
else:
first_ele = i
j+=1
frame_ID_list.append(j)
frame_ID_array = np.asarray(frame_ID_list, dtype= int).T
return frame_ID_array
def create_ultimate_list(sorted_timestamp_list, sample_data, sample_annotation):
'''
to create a list consisting of all the data that we require to obtain the final file
:param sorted_timestamp_list: sorted timestamp, token list
:param sample_data: sample_data from sample_data.json
:param sample_annotation: sample_annotation from from sample_annotation.json
:return: ultimate list with all the required data sorted based on timestamp
'''
ultimate_list = []
new_dict = {}
for st_l in sorted_timestamp_list:
new_dict['timestamp'] = st_l[0]
new_dict['sample_token'] = st_l[1]
new_dict['sample_data'] = {}
ultimate_list.append(new_dict)
new_dict = {}
for st_list in ultimate_list:
samp_data = []
for items_sample_data in sample_data:
if items_sample_data['sample_token'] == st_list['sample_token']:
samp_data.append(items_sample_data)
st_list['sample_data'] = samp_data
for stl_list in ultimate_list:
ann_data = []
for items_sample_ann_data in sample_annotation:
if items_sample_ann_data['sample_token'] == stl_list['sample_token']:
ann_data.append(items_sample_ann_data)
stl_list['annotation_data'] = ann_data
return ultimate_list
def timestamp_annotations(ultimate_list):
'''
to obtain a list with [timestamp, instances, location]
:param ultimate_list: the ultimate list obtained
:return: [timestamp, instances, location] list and instance_tokens_list
'''
ts_ann_list = []
instance_tokens_list = []
for items5 in ultimate_list:
ts = items5['timestamp']
for items6 in items5['annotation_data']:
ts_ann_list.append([ts, items6['instance_token'], items6['translation']])
instance_tokens_list.append(items6['instance_token'])
return ts_ann_list, instance_tokens_list
def instance_matching(instance_tokens_list):
'''
assigning each instance_token with a numerical value and obtained a dictionary
:param instance_tokens_list: list of all instance_tokens
:return: obtained dictionary
'''
k = 1
instance_matching_dict = {}
visited_tokens_set = set()
for item7 in instance_tokens_list:
if item7 not in visited_tokens_set:
instance_matching_dict[item7] = k
visited_tokens_set.add(item7)
k += 1
else:
continue
return instance_matching_dict
def timestamp_objectID_XYZ(ultimate_list, instance_matching_dict):
'''
final list with all the information [timestamp, object_ID, X, Y, Z]
:param ultimate_list: ultimate list that was created
:param instance_matching_dict: instance_matching_dict
:return: list in the format [timestamp, object_ID, X, Y, Z]
'''
ts_obj_ID = []
for items5 in ultimate_list:
ts = items5['timestamp']
for items6 in items5['annotation_data']:
ts_obj_ID.append([ts, instance_matching_dict[items6['instance_token']], items6['translation'][0], items6['translation'][1], items6['translation'][2]])
return ts_obj_ID
def format_frame_ID(ts_obj_ID):
'''
to transform timestamps into 1 to max range
:param ts_obj_ID: list in the format [timestamp, object_ID, X, Y, Z]
:return: array of frame_ID's
'''
frame_ID_list = []
for items7 in ts_obj_ID:
ts = items7[0]
frame_ID_list.append(ts)
frame_ID_arr = np.asarray(frame_ID_list).T
return frame_ID_arr
def save_to_text(final, to_save_txt,index):
'''
to save as .txt files
:param final: final array obtained
:param to_save_txt: filepath to save as
:param index: scene index to be considered as dataset ID
:return: None
'''
print(to_save_txt)
ind = index
lisss = np.ndarray.tolist(final)
for items in lisss:
items[0] = int(items[0])
items[1] = int(items[1])
with open(to_save_txt, 'w') as filehandle:
for l in lisss:
# filehandle.write('%d \t %d \t %f \t %f \t %f \n' %(l[0], l[1], l[2], l[3], l[4]))
filehandle.write("{},{},{},{},{}\n".format(ind,l[1],l[0],l[2],l[3]))
def save_to_file(data,files_path_to_sv):
'''
to save as .npy file
:param data: the final array
:param files_path_to_sv: file path where to save the file
:return: None
'''
print(files_path_to_sv)
np.save(files_path_to_sv, data)
def lyft_to_formatted(dir):
'''
to format the lyft data
:param dir: directory for lyft data
:return: files in the LYFT folder of the supplied directory
'''
## loading all the data
sample_filepath = dir + 'sample.json'
sample = get_sample(sample_filepath)
sample_data_filepath = dir + 'sample_data.json'
sample_data = get_sample_data(sample_data_filepath)
sample_annotation_filepath = dir + 'sample_annotation.json'
sample_annotation = get_sample_annotation(sample_annotation_filepath)
scene_file_path = dir + 'scene.json'
with open(scene_file_path) as json_file:
scene = json.load(json_file)
print('loaded all files')
#iterating through all the scene data
index = 1
for items2 in scene:
# using scene token to get the sample tokens
scene_token = items2['token']
sorted_timestamp_list = get_timestamp_list(sample, scene_token)
print('got sorted_timestamp_list for scene ', index)
ultimate_list = create_ultimate_list(sorted_timestamp_list, sample_data, sample_annotation)
print('got ultimate_list for scene ', index)
ts_ann_list, instance_tokens_list = timestamp_annotations(ultimate_list)
instance_matching_dict = instance_matching(instance_tokens_list)
print('got instance_matching_dict for scene ', index)
ts_obj_ID = timestamp_objectID_XYZ(ultimate_list, instance_matching_dict)
frame_ID_arr = format_frame_ID(ts_obj_ID)
formatted_zero_row = zero_row_process(frame_ID_arr)
ts_obj_ID_arr = np.asarray(ts_obj_ID)
ts_obj_ID_arr[:,0] = formatted_zero_row
print('got ts_obj_ID_arr for scene ', index)
if index <=126:
to_save_txt = dir + 'LYFT/train/traj{:>04}.txt'.format(index)
# print('index in <126', index)
elif index <= 144:
to_save_txt = dir + 'LYFT/val/traj{:>04}.txt'.format(index)
# print('index in <144', index)
else:
to_save_txt = dir + 'LYFT/test_obs/traj{:>04}.txt'.format(index)
# print('index in <180', index)
# save_to_file(ts_obj_ID_arr, files_path_to_sv)
save_to_text(ts_obj_ID_arr,to_save_txt, index)
print('saved txt and npy for scene', index)
index += 1
break
DATA_DIR = 'directory/' ## provide the directory where the downloaded data is present
lyft_to_formatted(DATA_DIR)
|
[
"9832.cvpr20@gmail.com"
] |
9832.cvpr20@gmail.com
|
5576bd123572dd7c83c5171be4ce5c13ed91cd7f
|
8ea2e7fe85a10bf214c83f250f86a8db7a408134
|
/excursion.py
|
0e54a9374205aa015aaea54b4045792da859bc31
|
[] |
no_license
|
Abdullah-Al-Zishan/deadline-aware-fair-scheduling
|
5d3ace53a473d8e69e52db4b1957de099c8df89f
|
02c351b4c16930f18a63113984e2fc045f1eecd2
|
refs/heads/master
| 2022-02-19T23:04:17.707096
| 2019-10-22T14:53:14
| 2019-10-22T14:53:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 828
|
py
|
import numpy as np
import utility as util
file_name = 'static.txt'
result_path = 'result/'+file_name
env_path = 'env/'+file_name
result = util.load_dict(result_path)
env = util.load_dict(env_path)
def excursion_for_single_time_slot(trans_load, trans=[1], time=(1/6.0)):
rating = np.array(env['transRating'])
load = np.array(trans_load)
total = 0
for i in trans:
diff = 3*rating[3*i] - load[3*i] - load[3*i+1] - load[3*i+2]
if diff <= 0.0:
total -= diff
#print(total)
return total*time
algos = {'Central', 'GPA', 'SGPA', 'LLF', 'EDF'}
output = {}
for algo in algos:
total = 0.0
#print(algo)
for i in range(0,144):
#print(i)
total += excursion_for_single_time_slot(result[algo][i]['trans_load'])
output[algo] = total
print(output)
|
[
"azishan@ualberta.ca"
] |
azishan@ualberta.ca
|
1b13d84b5e842b96a4f4095e3c1be2d5f5a0b20c
|
75e3e2779d90f4b3273dbeedbbff8481f83ee68f
|
/ps4.py
|
3ff2689fc36b00526faea82ab77acb28d021e4a7
|
[] |
no_license
|
DragosTeodoru/SD
|
9960671d9ed5a65c23820316f150fcfba2c6e22a
|
1af39acff60646eb7c25f12517b6f9592831cff9
|
refs/heads/master
| 2020-04-02T19:09:00.588488
| 2019-01-22T09:59:22
| 2019-01-22T09:59:22
| 154,724,044
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 870
|
py
|
#Task 1
radius=int(input("Radius:"))
x=3.14
pi=x
area=pi*radius**2
print (area)
#Task 2
x=4
y=5
a=3*(x+y)
print (a)
#Task 3
radius=float(input("Enter the radius:"))
#Task 4
print((3+4+5)/3)
#Task 5
x=19.93
y=20.00
z=y-x
print("%.2f" %z)
#Task 6
x=2
print("x, squared is, x*x")
xcubed=x**3
print (xcubed)
#Task 7
from math import sqrt
x=2
y=4
print("The product of ", x, "and", y, " is ", x * y)
print("The root of their difference is ", sqrt(abs(x - y)))
#Task 8
name=input("Enter your name: ")
age=int(input("Enter your age: "))
age=age+1
print ("Hello " + name +", next year you will be " + str(age) +" years old!")
#Task 9
radius= ("Radius is: %6.d" %2)
area=("Area is: %8.2f" %12.5678)
print (radius)
print (area)
#Task 10
p=17
q=18
print(p//10+p%10)
print(p%2+q% 2)
print((p+q)//2)
print((p+q)/2.0)
|
[
"noreply@github.com"
] |
DragosTeodoru.noreply@github.com
|
ab6b93883fbd3ab158e745475577ce299961a020
|
a018a4969678b309b9778d74cffea2fac8731e62
|
/mynavi_sample.py
|
1371b140f3ec0170ec6b85f6e5a4dc434ff3e379
|
[] |
no_license
|
aoi-eita/task2
|
4692d8cda5e32636e1cced6382d4484a5e02d55d
|
e5233abfda47dcd6667b02622a2013452ee25b35
|
refs/heads/main
| 2023-06-06T19:18:31.430530
| 2021-07-19T06:19:28
| 2021-07-19T06:19:28
| 387,359,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,285
|
py
|
import os
from selenium.webdriver import Chrome, ChromeOptions
from selenium.webdriver.chrome.webdriver import WebDriver
from selenium.webdriver.remote.webelement import WebElement
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver import Firefox
from selenium.webdriver.firefox.options import Options
import time
import pandas as pd
import datetime
from bs4 import BeautifulSoup
LOG_FILE_PATH = "./log/log_{datetime}.log"
EXP_CSV_PATH="./exp_list_{search_keyword}_{datetime}.csv"
log_file_path=LOG_FILE_PATH.format(datetime=datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))
# Chromeを起動する関数
def set_driver(driver_path, headless_flg):
if "chrome" in driver_path:
options = ChromeOptions()
else:
options = Options()
# ヘッドレスモード(画面非表示モード)をの設定
if headless_flg == True:
options.add_argument('--headless')
# 起動オプションの設定
options.add_argument(
'--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36')
# options.add_argument('log-level=3')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--ignore-ssl-errors')
options.add_argument('--incognito') # シークレットモードの設定を付与
# ChromeのWebDriverオブジェクトを作成する。
if "chrome" in driver_path:
return Chrome(ChromeDriverManager().install(), options=options)
else:
return Firefox(executable_path=os.getcwd() + "/" + driver_path,options=options)
# ログの作成
def log(txt):
now=datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
logStr = '[%s:%s] %s' % ('log',now,txt)
# ログの出力
with open(log_file_path , 'a' ,encoding='utf-8_sig') as f:
f.write(logStr + '\n')
print(logStr)
# main処理
def main():
log("処理開始")
search_keyword =input("検索したいワードを入力して下さい >>> ")
log(f"検索キーワード:{search_keyword}")
# driverを起動
if os.name == 'nt': #Windows
driver = set_driver("chromedriver.exe", False)
elif os.name == 'posix': #Mac
driver = set_driver("chromedriver", False)
# Webサイトを開く
driver.get("https://tenshoku.mynavi.jp/")
time.sleep(5)
# ポップアップを閉じる
driver.execute_script('document.querySelector(".karte-close").click()')
time.sleep(5)
# ポップアップを閉じる
driver.execute_script('document.querySelector(".karte-close").click()')
# 検索窓に入力
driver.find_element_by_class_name(
"topSearch__text").send_keys(search_keyword)
# 検索ボタンクリック
driver.find_element_by_class_name("topSearch__button").click()
month_salary_list = []
year_salary_list = []
name_list = []
count = 1
success = 0
fail = 0
while True:
# ソースコードを取得
time.sleep(10)
# HTMLをパースする
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
# 会社ブロック単位でitemsに格納
items = soup.findAll(class_="cassetteRecruit__content")
#try文でエラーを回避
# forとtryを併用する時はtryを中に入れて使用 そうしないと止まってしまう
for i in range(len(items)):
try:
name_list.append(items[i].findAll(class_="cassetteRecruit__name")[0].text.split("|")[0])
month_salary_list.append(items[i].findAll(class_="tableCondition__body")[3].text)
year_salary_list.append(items[i].findAll(class_="tableCondition__body")[4].text)
log(f"{count}件目成功:{name_list[i]}")
success+=1
except Exception as e:
year_salary_list.append("データなし")
log(f"{count}件目失敗:{name_list[i]}")
log(e)
fail+=1
finally:
count+=1
if len(driver.find_elements_by_class_name("iconFont--arrowLeft")) > 0:
next_url = driver.find_elements_by_class_name("iconFont--arrowLeft")[0].get_attribute("href")
driver.get(next_url)
else:
log("終了しました")
break
# 空のDataFrame作成
df = pd.DataFrame()
# 1ページ分繰り返し
now = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
for (name,month_salary,year_salary) in zip(name_list,month_salary_list,year_salary_list):
df = df.append(
{"会社名": name,
"月給": month_salary,
"年収": year_salary},
ignore_index=True)
now = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
df.to_csv(EXP_CSV_PATH.format(search_keyword=search_keyword,datetime=
now),encoding="utf-8-sig")
log(f"処理完了 成功件数: {success} 件 / 失敗件数: {fail} 件")
# 直接起動された場合はmain()を起動(モジュールとして呼び出された場合は起動しないようにするため)
if __name__ == "__main__":
main()
|
[
"aoi.eita66@gmail.com"
] |
aoi.eita66@gmail.com
|
be5addc90cf01e19f23dc3dbfa0e78b3efc89c5d
|
b1aba90c839deec0641b260db2afc1d579fa0525
|
/Ejercicios_Programacion_Funcional_Python/Ejercicios propuestos iniciación a la programación/Ejercicio12.py
|
49d943ff4db64fed6710f9e991fc3fcc1d3e45ad
|
[] |
no_license
|
Globix/Ejercicios_Programacion_FP_Dual
|
86102cff6bc12cfee21631bf39fc803c6c3ac7c6
|
4c89ab02e73e045228d67f8523ee89aa59031fbe
|
refs/heads/master
| 2021-01-18T22:36:51.138016
| 2016-06-05T18:35:46
| 2016-06-05T18:35:46
| 49,504,702
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,873
|
py
|
# Ejercicio que pide dos números y comprueba unas condiciones prestablecidas.
# Si dichos números las pasan se realiza la suma de ambos y se muestra por pantalla.
#------Subrutinas a usar---------
# - Subrutina: sumaCondicinal
# - Descripción: Suma 2 numeros solo si se cumplen las siguientes 3 condiciones:
# a) Los dos son pares
# b) El primero es menor que cincuenta
# c) El segundo está dentro del intervalo cerrado 100-500.
# - Entrada: 2 numeros
# - Salidas:
# -> -1: Si no se cumple la primera condición
# -> -2: Si no se cumple la segunda condición
# -> -3: Si no se cumple la tercera condición
# -> Default: Resultado con la suma de los 2 numeros
def sumaPositivos (primerNumero, segundoNumero):
if primerNumero%2==1 or segundoNumero%2==1:
return -1
elif primerNumero > 50:
return -2
elif segundoNumero < 100 or segundoNumero > 500:
return -3
else:
return primerNumero + segundoNumero
#------Fin de las subrutinas------
#-------Programa principal--------
print ("Vamos a realizar la suma de 2 numeros positivos solo si se cumple que: \n - Los 2 son pares. \n - El primero es menor que cincuenta. \n - El segundo está dentro del intervalo cerrado 100-500.\n")
primerNumero = int(input("Introduzca un número: "))
segundoNumero = int(input("Introduzca un segundo número: "))
print("\n")
valor = sumaPositivos(primerNumero, segundoNumero)
if valor == -1:
print ("No se calcula la suma porque un número és impar o ambos són impares.")
elif valor == -2:
print ("No se calcula la suma porque el primer número es menor que 50.")
elif valor == -3:
print ("No se calcula la suma porque el segundo número no esta entre el intervalo 100-500.")
else:
print ("La suma de los dos números es: " + str(valor))
#---------Fin del programa--------
|
[
"globix28@gmail.com"
] |
globix28@gmail.com
|
64505008f1caf4928dc0111e3fe70d2b73314ebb
|
1c0224741806a1cff56e17c61a76caf565cb47ea
|
/testPyGraph.py
|
155305a2d2d7f6d9dfca3916dd1e62aad4ba7734
|
[] |
no_license
|
minu7/drone-simulator
|
500fdd1fd13c1f75d1af45da2a415961fb01d5af
|
cad69f97f71f3055732863b6b637f846cd62bf5a
|
refs/heads/master
| 2020-06-09T04:15:03.870311
| 2019-07-09T10:55:06
| 2019-07-09T10:55:06
| 193,368,397
| 27
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 737
|
py
|
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
import pyqtgraph as pg
import threading
import time
app = QtGui.QApplication([])
## Create window with GraphicsView widget
w = pg.GraphicsView()
w.show()
w.resize(800,800)
w.setWindowTitle('test')
view = pg.ViewBox()
w.setCentralItem(view)
## lock the aspect ratio
view.setAspectLocked(True)
## Create image item
data = np.full((200,200, 3), 200)
img = pg.ImageItem(data)
view.addItem(img)
def thread_function():
time.sleep(2)
print("ciao")
data[0:40, 40:200, 0:3] = 0
img.updateImage()
x = threading.Thread(target=thread_function)
x.start()
## Start Qt event loop unless running in interactive mode or using pyside.
QtGui.QApplication.instance().exec_()
|
[
"filippominutella@gmail.com"
] |
filippominutella@gmail.com
|
bfa44cfca13ef1bfbe5e6a567fc8c7957ea9affc
|
4516fffb6f87b3b0ea7e3bee27b214094c6cb6f9
|
/implement_queue_using_stacks.py
|
69c46153bc80b68cd6330857e2710948dc6758cf
|
[] |
no_license
|
jianminchen/leetcode-8
|
b59f488f24cf53cdb0e04a214e49c9c6f72cacf5
|
1459baacd8c7264c2b62a2c6b5ce0001dae0aba7
|
refs/heads/master
| 2021-01-15T08:47:36.084748
| 2015-10-18T01:10:43
| 2015-10-18T01:10:43
| 49,403,008
| 1
| 1
| null | 2016-01-11T04:56:45
| 2016-01-11T04:56:45
| null |
UTF-8
|
Python
| false
| false
| 573
|
py
|
class Queue:
# initialize your data structure here.
def __init__(self):
self.stack = []
# @param x, an integer
# @return nothing
def push(self, x):
ss = []
while self.stack:
ss.append( (self.stack.pop()) )
self.stack.append(x)
while ss:
self.stack.append( (ss.pop()) )
# @return nothing
def pop(self):
self.stack.pop()
# @return an integer
def peek(self):
return self.stack[-1]
# @return an boolean
def empty(self):
return self.stack==[]
|
[
"qirongismonica@gmail.com"
] |
qirongismonica@gmail.com
|
7ba19beea4a7e0a6ed0063dd1242df88be8e9272
|
b56109a8801d0c1ec15272fa9c397fbc707d3cac
|
/jenkins/files/server.py
|
907b5086dc60f817f65833800c73d30c149edb1b
|
[] |
no_license
|
sgtux/devsecops-lab
|
dd7d1eecb511e175e2d4509f395e0740d87389b2
|
3367f7f689c3ea692c0681fe275183209fa5bc2e
|
refs/heads/main
| 2023-03-20T08:47:58.344199
| 2021-03-15T13:42:35
| 2021-03-15T13:42:35
| 312,301,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 281
|
py
|
#!/usr/bin/python3
from flask import Flask, render_template_string
app = Flask(__name__)
@app.route('/')
def index():
return render_template_string("<html><body><h1>Jenkins Server!</h1></body></html>")
if __name__ == "__main__":
app.run(debug=False, host="0.0.0.0", port=80)
|
[
"sgtux@protonmail.com"
] |
sgtux@protonmail.com
|
c348471e918a6fb35fc29ad895d84d497f6413fc
|
927efa87cbcd55ad9f22871665dd4710f38f1b18
|
/expect_log_parsing/mycfg2html.py
|
9e9a9e8f7bd77d1a858cff5e8eb06bea6a182309
|
[] |
no_license
|
robertlupinek/python-examples
|
1e163fa9a796e8110fe4cc8ab7e4f87e59d3168d
|
90e0ea68f2a9cf9512023a23faeb210e2d017479
|
refs/heads/master
| 2022-02-10T00:31:46.683443
| 2019-09-12T20:32:52
| 2019-09-12T20:32:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,308
|
py
|
#!/usr/bin/python
import MySQLdb as mdb
import os, sys, csv
class MySqlCon:
def __init__(self ,dbhost, dbuser, dbpass, dbdatabase ):
try:
self.con = mdb.connect(dbhost, dbuser, dbpass, dbdatabase );
self.cur = self.con.cursor()
except mdb.Error, e:
print "Error %d: %s" % (e.args[0],e.args[1])
sys.exit(1)
def execsql(self,sql):
self.cur.execute(sql)
def returnone(self):
return self.cur.fetchone()
def main ():
mydb = MySqlCon('localhost','admin','admin','network_mess')
#Recurse one level in the directory specified get the file names and extention...
path="/var/www/html/cfg2html/"
dir_list=os.listdir(path)
old_line = ''
start_route = 0;
truncate = "truncate table host_interfaces;"
try:
mydb.execsql( truncate )
next_id = mydb.returnone()
except Exception, e:
print str(e)
sys.exit
truncate = "truncate table host_routes;"
try:
mydb.execsql( truncate )
next_id = mydb.returnone()
except Exception, e:
print str(e)
sys.exit
for fname in dir_list:
dir_list2=os.listdir(path+fname)
for sub_fname in dir_list2:
sub_f, sub_ext = os.path.splitext(sub_fname)
#If the extention indicate html we know we have the right file to start parseing!
#Open that sucker up and get to work parsing interface and route info!
if sub_ext == '.html':
print sub_f + sub_ext
open_file = open(path+fname+'/'+sub_fname, 'r')
#Set server specific information here.
print sub_f + "'s Network Config"
get_next_sql = "select IFNULL(max(id),-1) + 1 from host_interfaces;"
try:
mydb.execsql( get_next_sql )
next_id = mydb.returnone()
except Exception, e:
print str(e)
sys.exit
print 'Host id = ' + str(next_id[0]);
#All individual interface and route information goes here
for line in open_file:
#Get the interface info
if line.strip().startswith('inet addr:'):
#Create get rid of pesky : and turn it into white spaces so we can do consistent splits
int_config = line.replace(':',' ').split()
int_name = old_line.split()
try:
print int_name[0].strip('<PRE>') + ' ' + int_config[2] + ' ' + int_config[6]
values = "'" + str(next_id[0]) + "','" + sub_f + "','" + int_name[0].strip('<PRE>') + "','" + int_config[2] + "','" + int_config[6] + "'"
insert_sql = "insert into host_interfaces ( id,hostname, interface, ip_address, netmask ) values ( " + values + ");"
print insert_sql
mydb.execsql( insert_sql )
except Exception, e:
print str(e)
#Get route info
if line.startswith('<A'):
start_route = 0
if start_route == 1:
try:
route = line.split()
values = "'" + str(next_id[0]) +"','" + sub_f + "','" + route[0] + "','" + route[1] + "','" + route[2] + "','" + route[7].strip('</PRE>') + "'"
insert_sql = "insert into host_routes ( id,hostname, dest_ip,gate_ip,mask_ip,interface ) values ( " + values + ");"
print insert_sql
mydb.execsql(insert_sql)
except Exception, e:
print str(e)
if line.startswith('Destination Gateway'):
start_route = 1
old_line = line
open_file.close
if __name__ == '__main__':
try:
main()
except Exception, e:
print str(e)
#traceback.print_exc()
os._exit(1)
|
[
"robert.lupinek@macys.com"
] |
robert.lupinek@macys.com
|
3fb4779bf3b2aebd8605856f9a34e4d73c6915b9
|
4e51e654a42f56712c9087ae056b6741542a5713
|
/programs/timer_on.py
|
01a822ba732780cb4fe284dd06fe072b51b2c5a1
|
[
"MIT"
] |
permissive
|
shadhali/tuya-air-purifier
|
85a3b4237af5bcc532b309969bdab9f351b11ecf
|
8a03b8bcd3514666c1eb685b243e9d1ced40b3f3
|
refs/heads/main
| 2023-06-18T05:06:59.416211
| 2021-07-22T14:22:34
| 2021-07-22T14:22:34
| 388,489,149
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 782
|
py
|
import logging
import time
from env import ENDPOINT, ACCESS_ID, ACCESS_KEY, USERNAME, PASSWORD
from tuya_iot import (
TuyaOpenAPI,
ProjectType,
TuyaOpenMQ,
TuyaDeviceManager,
TuyaHomeManager,
TuyaDeviceListener,
TuyaDevice,
TuyaTokenInfo,
tuya_logger
)
tuya_logger.setLevel(logging.DEBUG)
# Init
openapi = TuyaOpenAPI(ENDPOINT, ACCESS_ID, ACCESS_KEY, ProjectType.INDUSTY_SOLUTIONS)
openapi.login(USERNAME, PASSWORD)
openmq = TuyaOpenMQ(openapi)
openmq.start()
deviceManager = TuyaDeviceManager(openapi, openmq)
device = deviceManager.deviceMap.get(DEVICE_ID)
time.sleep(1000) #Time in seconds
deviceManager.sendCommands(device.id, [{'code': 'switch', 'value': True}])
print('status: ', device.status)
|
[
"noreply@github.com"
] |
shadhali.noreply@github.com
|
0572c4956642dddc052fb946e166664438126575
|
ff39af62524743f7f2358f7a96496a5be9ff65b9
|
/uniproxy/serve.py
|
39260e8f6ef86a7f1a2cc53c9029b7c771cb3356
|
[] |
no_license
|
wlb/antigfw
|
9c23945fa686ab8d44afaa5ac1031467e38522a0
|
64182271af3ee05d47335122deca4ebf7d5bf176
|
refs/heads/master
| 2021-01-15T18:45:02.099820
| 2012-05-25T16:42:23
| 2012-05-25T16:42:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,309
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
@date: 2012-04-26
@author: shell.xu
'''
import logging
import socks, proxy, dofilter
from http import *
from os import path
from urlparse import urlparse
from contextlib import contextmanager
from gevent import socket
__all__ = ['ProxyServer',]
def import_config(*cfgs):
d = {}
for cfg in reversed(cfgs):
try:
with open(path.expanduser(cfg)) as fi:
eval(compile(fi.read(), cfg, 'exec'), d)
except (OSError, IOError): logger.error('import config')
return dict([(k, v) for k, v in d.iteritems() if not k.startswith('_')])
def initlog(lv, logfile=None):
rootlog = logging.getLogger()
if logfile: handler = logging.FileHandler(logfile)
else: handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter(
'%(asctime)s,%(msecs)03d %(name)s[%(levelname)s]: %(message)s',
'%H:%M:%S'))
rootlog.addHandler(handler)
rootlog.setLevel(lv)
logger = logging.getLogger('server')
@contextmanager
def with_sock(addr, port):
sock = socket.socket()
sock.connect((addr, port))
try: yield sock
finally: sock.close()
def mgr_default(self, req, stream):
req.recv_body(stream)
response_http(stream, 404, body='Page not found')
class ProxyServer(object):
proxytypemap = {'socks5': socks.SocksManager}
srv_urls = {}
def __init__(self, *cfgs):
self.cfgs = cfgs
self.sockcfg = []
self.config = {}
self.filter = dofilter.DomainFilter()
self.worklist = []
@classmethod
def register(cls, url):
def inner(func):
cls.srv_urls[url] = func
return func
return inner
@contextmanager
def with_worklist(self, desc):
self.worklist.append(desc)
try: yield
finally: self.worklist.remove(desc)
def load_socks(self):
socks_srv = self.config.get('socks', None)
max_conn = self.config.get('max_conn', None)
if not socks_srv and max_conn:
def ssh_info(srv):
if 'sockport' in srv:
return 'socks5', '127.0.0.1', srv['sockport'], max_conn
elif 'listenport' in srv:
return 'http', '127.0.0.1', srv['listenport'][0], max_conn
socks_srv = [ssh_info(srv) for srv in self.config['servers']]
del self.sockcfg[:]
for proxytype, host, port, max_conn in socks_srv:
self.sockcfg.append(self.proxytypemap[proxytype](
host, port, max_conn=max_conn))
def load_filters(self):
self.filter.empty()
for filepath in self.config['filter']: self.filter.loadfile(filepath)
def init(self):
self.config.update(import_config(*self.cfgs))
initlog(getattr(logging, self.config.get('loglevel', 'WARNING')),
self.config.get('logfile', None))
logger.info('init ProxyServer')
self.load_socks()
self.load_filters()
return self.config.get('localip', ''), self.config.get('localport', 8118)
def get_socks_factory(self):
return min(self.sockcfg, key=lambda x: x.size()).with_socks
def do_req(self, req, stream):
u = urlparse(req.uri)
if req.method.upper() == 'CONNECT':
hostname, func = u.path, proxy.connect
else:
if not u.netloc:
logger.info('manager %s' % (u.path,))
return self.srv_urls.get(u.path, mgr_default)(self, req, stream)
hostname, func = u.netloc, proxy.http
usesocks = hostname.split(':', 1)[0] in self.filter
reqid = '%s %s %s' % (req.method, req.uri.split('?', 1)[0],
'socks' if usesocks else 'direct')
with self.with_worklist(reqid):
logger.info(reqid)
return func(req, stream,
self.get_socks_factory() if usesocks else with_sock)
def handler(self, sock, addr):
stream = sock.makefile()
try:
while self.do_req(recv_msg(stream, HttpRequest), stream): pass
except (EOFError, socket.error): pass
except Exception, err: logger.exception('unknown')
sock.close()
def final(self): logger.info('system exit')
|
[
"shell909090@gmail.com"
] |
shell909090@gmail.com
|
91a646c9d70867ddf41db0dd14ae731a51c83899
|
463a78480656e0ebd6c99630ec1b6c5e00c65554
|
/ml/hw-01/main.py
|
f09fb2ee4dd4aae79c6f6273dab8413353611289
|
[] |
no_license
|
anarsiel/University
|
689fd588a6ed53e8ceec565c60d4fab15f6d5cfe
|
3011e1f83c943c1436aadaa6507388f3937cc6aa
|
refs/heads/master
| 2023-03-09T17:05:44.349307
| 2022-07-05T14:09:50
| 2022-07-05T14:09:50
| 187,517,045
| 12
| 1
| null | 2023-03-03T01:30:49
| 2019-05-19T19:05:36
|
Java
|
UTF-8
|
Python
| false
| false
| 6,342
|
py
|
import collections
import pandas as pd
import numpy as np
import matplotlib.pyplot as ppl
def minmax(dataset):
minmax = list()
for i in range(len(dataset[0])):
if i == len(dataset[0]) - 1:
continue
value_min = dataset[:, i].min()
value_max = dataset[:, i].max()
minmax.append([value_min, value_max])
return minmax
def normalize(dataset, minmax):
for row in dataset:
for i in range(len(row)):
if i == len(row) - 1: # exclude labels
continue
row[i] = (row[i] - minmax[i][0]) / (minmax[i][1] - minmax[i][0])
return dataset
def minkovskiy_distance(a, b, p):
distance = 0
for i in range(len(a) - 1):
distance += abs(a[i] - b[i]) ** p
return distance ** (1 / p)
def euclidean_distance(a, b):
return minkovskiy_distance(a, b, 1)
def manhattan_distance(a, b):
return minkovskiy_distance(a, b, 2)
def default_kernel(x, k, p1, p2, do_abs=False):
if abs(x) > 1:
return 0
if do_abs:
x = abs(x)
return k * (1 - x ** p1) ** p2
def uniform_kernel(a):
return default_kernel(a, 1 / 2, 0, 0)
def triangular_kernel(a):
return default_kernel(a, 1, 1, 1, True)
def epanechnikov_kernel(a):
return default_kernel(a, 3 / 4, 2, 1)
def biweight_kernel(a):
return default_kernel(a, 15 / 16, 2, 2)
def tricube_kernel(a):
return default_kernel(a, 70 / 81, 3, 3, True)
def launch(window_width, distance, kernel, data_X, data_Y_naive,
data_Y_one_hot):
def launch_naive(weights, data_Y):
result = 0
for idx in range(weights.shape[0]):
result += data_Y[idx] * weights[idx]
if (np.sum(weights) == 0):
return 0
return result / np.sum(weights)
def launch_one_hot(weights, data_Y):
result = [0] * data_Y.shape[1]
for one_hot_idx in range(data_Y.shape[1]):
for idx in range(data_Y.shape[0]):
result += data_Y[idx] * weights[idx]
if (np.sum(weights) == 0):
result[one_hot_idx] = 0
else:
result[one_hot_idx] /= np.sum(weights)
return result
def get_distance(test_X, data_X):
dist_array = np.empty(data_X_learn.shape[0])
for idx in range(data_X.shape[0]):
dist_array[idx] = distance(test_X, data_X[idx])
return dist_array
def get_weights(distance):
weights = np.empty(distance.shape[0])
for idx in range(distance.shape[0]):
weights[idx] = kernel(distance[idx] / window_width)
return weights
tps, tps2 = 0, 0
fps, fps2 = 0, 0
tns, tns2 = 0, 0
fns, fns2 = 0, 0
for test_element_index in range(data_X.shape[0]):
test_element_naive = (data_X[test_element_index],
data_Y_naive[test_element_index])
test_element_one_hot = (data_X[test_element_index],
data_Y_one_hot[test_element_index, :])
data_X_learn = np.concatenate(
(data_X[:test_element_index],
data_X[test_element_index + 1:]),
axis=0
)
data_Y_learn_naive = np.concatenate(
(data_Y_naive[:test_element_index],
data_Y_naive[test_element_index + 1:]),
axis=0
)
data_Y_learn_one_hot = np.concatenate(
(data_Y_one_hot[:test_element_index],
data_Y_one_hot[test_element_index + 1:]),
axis=0
)
distances = get_distance(test_element_naive[0], data_X_learn)
weights = get_weights(distances)
naive_result = launch_naive(weights, data_Y_learn_naive)
one_hot_result = launch_one_hot(weights, data_Y_learn_one_hot)
if naive_result == test_element_naive[1]:
tps += 1
fps += 0
tns += 2
fns += 0
else:
tps += 0
fps += 1
tns += 1
fns += 1
if np.argmax(one_hot_result) == np.argmax(test_element_one_hot[1]):
tps2 += 1
fps2 += 0
tns2 += 2
fns2 += 0
else:
tps2 += 0
fps2 += 1
tns2 += 1
fns2 += 1
presicion = tps / (tps + fps)
recall = tps / (tps + fns)
presicion2 = tps2 / (tps2 + fps2)
recall2 = tps2 / (tps2 + fns2)
f1_naive, f1_one_hot = 0, 0
if presicion + recall != 0:
f1_naive = 2 * (presicion * recall) / (presicion + recall)
if presicion2 + recall2 != 0:
f1_one_hot = 2 * (presicion2 * recall2) / (presicion2 + recall2)
return f1_naive, f1_one_hot
filename = 'red-wine-quality.csv'
dataset = pd.read_csv(filename).to_numpy()
class5_indexes = [i for i, x in enumerate(dataset) if x[-1] == 5.][:100]
class6_indexes = [i for i, x in enumerate(dataset) if x[-1] == 6.][:100]
class7_indexes = [i for i, x in enumerate(dataset) if x[-1] == 7.][:100]
dataset5 = dataset[class5_indexes]
dataset6 = dataset[class6_indexes]
dataset7 = dataset[class7_indexes]
dataset = np.concatenate((dataset5, dataset6, dataset7), axis=0)
min_max = minmax(dataset)
dataset = normalize(dataset, min_max)
data_X, data_Y = [], []
for dataline in dataset:
x, y = dataline[:-2], dataline[-1]
data_X.append(x), data_Y.append(y)
data_X = np.array(data_X, np.float)
data_Y_naive = np.array(data_Y, np.float)
data_Y_one_hot = pd.get_dummies(np.array(data_Y, np.float)).to_numpy()
# width, width_change, iterations_count = 0.2, 0.2, 10
widths = np.arange(0.1, 1, 0.1)
distance_funcs = [euclidean_distance, manhattan_distance]
kernel_funcs = [uniform_kernel, triangular_kernel, epanechnikov_kernel,
biweight_kernel, tricube_kernel]
for kernel in kernel_funcs:
print(kernel)
f1s_naive, f1s_one_hot = [], []
for width in widths:
print(width)
f1_naive, f1_one_hot = launch(width, manhattan_distance, kernel, data_X,
data_Y_naive, data_Y_one_hot)
f1s_naive.append(f1_naive)
f1s_one_hot.append(f1_one_hot)
ppl.xlabel('window width')
ppl.ylabel('f1')
ppl.title(kernel.__name__)
ppl.plot(widths, f1s_naive, label='Naive')
ppl.plot(widths, f1s_one_hot, label='One Hot')
ppl.show()
|
[
"dimitrovblagoi@gmail.com"
] |
dimitrovblagoi@gmail.com
|
b56fe4b9e938161ed9a2691f1220a78815187a06
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_121/ch52_2020_10_06_21_28_51_510149.py
|
070609e4ce46802cdb35f1ba28594210c774a35e
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
def calcula_total_da_nota(preco, quantidade):
resultado=0
i=0
while i<len(preco) or i<len(quantidade):
resultado+=preco[i]*quantidade[i]
i+=1
return resultado
|
[
"you@example.com"
] |
you@example.com
|
16988f6ae95a20214ed8766601da8e877c8c96ae
|
ee7382a5ced14bed741d3263431b30f6d8b803b0
|
/9_string.py
|
c9ffce905e488a6340c01d97012a5d1a62b144ef
|
[] |
no_license
|
hsalam/python_learning_1
|
42601f1759b12d75d8c1fca267ab43a50af5ddf4
|
d005e1b42965b3856a7f93fb6530cf7a260c07b4
|
refs/heads/master
| 2021-08-24T01:07:47.390945
| 2017-11-16T06:10:16
| 2017-11-16T06:10:16
| 109,577,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 715
|
py
|
#!/usr/bin/python
#pgm to enter n number of strings and display them with no duplicates. Also to display the duplicate strings
list_names = []
duplicates = []
no_duplicate_list = []
input_limit = raw_input("Enter the limit : ")
for i in range(0,int(input_limit)):
list_names.insert(i,raw_input("Enter name "+str(i+1)+" : "))
for i in xrange(int(input_limit)):
count = 0
if list_names[i] not in duplicates:
for j in xrange(int(input_limit)):
if list_names[i] == list_names[j]:
count += 1
duplicates.insert(i,list_names[i])
if count>1:
print "%s occured %d" %(list_names[i], count)
no_duplicate_list.insert(i,list_names[i])
print "List of strings with no duplicates :\n %s" %no_duplicate_list
|
[
"hasnasalam11@gmail.com"
] |
hasnasalam11@gmail.com
|
1f25c522e3304dee61cfd2ed17b83686d7305a28
|
439852a1cbd8ad06936857d2ed83f5816c486ec1
|
/django/pactf_web/ctflex_helpers.py
|
06e9ddbd5d41c9f96f73c520c1781fa771359353
|
[
"MIT"
] |
permissive
|
PACTF/pactf
|
82ea465488cc79ec8568b0aa8f19f9b89b7d0bbd
|
1a0d9acd451c43b4c36b24d92c8afd07c3d706a7
|
refs/heads/master
| 2023-01-11T13:58:03.060226
| 2022-01-22T06:02:43
| 2022-01-22T06:02:43
| 46,456,319
| 10
| 9
|
NOASSERTION
| 2022-12-26T19:51:47
| 2015-11-19T00:22:23
|
Python
|
UTF-8
|
Python
| false
| false
| 307
|
py
|
"""Define objects for CTFlex"""
# def eligible(team):
# """Determine eligibility of team
#
# This function is used by CTFlex.
# """
# return (team.standing == team.GOOD_STANDING
# and team.country == team.US_COUNTRY
# and team.background == team.SCHOOL_BACKGROUND)
|
[
"yatharth999@gmail.com"
] |
yatharth999@gmail.com
|
8a5136d1250ba4cd928c65f96fd34cd13c10dd5b
|
934ebf73b75417b4db013fad55b58e0846500cc8
|
/bentoml/version.py
|
5a9390efd5db0fa750b9040009678ec490f88760
|
[
"Apache-2.0"
] |
permissive
|
BigRLab/BentoML
|
8a2b92f52fb3e6f0ea58c61e806ce4a19490cebb
|
a14adc816b0374b63fe1b800e5dfe4201d0cb801
|
refs/heads/master
| 2020-06-08T06:31:12.502409
| 2019-06-21T23:05:22
| 2019-06-21T23:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
# Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "0.2.0"
|
[
"noreply@github.com"
] |
BigRLab.noreply@github.com
|
8eed96af352ee8e686c9336ff893f50493418394
|
2b092b1b8ca75c239377738d2302ac3fd1876d82
|
/lesson3/task1.py
|
89fb70c3bab83d7e85dc06273e8a1ff4b2a57c17
|
[] |
no_license
|
aliakseiChynayeu/geekbrains-python
|
5e912860f8b39a2a535a665d2f579c03b89bfff1
|
5619a0c5e879e01d190f31c68c836aee12b04c77
|
refs/heads/master
| 2022-12-27T02:45:23.127694
| 2020-09-26T10:02:54
| 2020-09-26T10:02:54
| 281,753,275
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 562
|
py
|
def divide(a, b):
"""
Функция деления 2 чисел
:param a: Делимое
:param b: Делитель
:return: Результат деления a/b
Вывод на экран ошибки при делении на 0
"""
try:
return a / b
except ZeroDivisionError:
print("Деление на 0!!!")
a = int(input("введите делимое >>> "))
b = int(input("введите делитель >>> "))
result = divide(a, b)
if result is not None:
print(f" {a} / {b} = {result:.3f}")
|
[
"aliaksei.chynayeu@gmail.com"
] |
aliaksei.chynayeu@gmail.com
|
2d0bd029a13d182328130194503783919f0b007f
|
fe58e129abaf3c0a12436c71d6ea93f755728f46
|
/03_last_element/last_element.py
|
66c3ac26c7ce1dffe76e69b262e36dc4ef280abb
|
[] |
no_license
|
wcjennyng/Python-Ds-Practice
|
90a5fb0f2754007f2906315beec2c1503c9ea9c0
|
bc885984c211c1fb7099d8c20fc0c670874faa54
|
refs/heads/master
| 2023-04-18T19:14:06.802805
| 2021-05-09T17:27:18
| 2021-05-09T17:27:18
| 364,309,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
def last_element(lst):
"""Return last item in list (None if list is empty.
>>> last_element([1, 2, 3])
3
>>> last_element([]) is None
True
"""
if lst == []:
return None
else:
return lst[-1]
|
[
"wcjennyng@gmail.com"
] |
wcjennyng@gmail.com
|
fcc9d408343810e8bd47c7d90e7972f93ccf785a
|
af61044c866eb85ca2c622e082090f7657431206
|
/webcli/viewset/.ipynb_checkpoints/view_abtest-checkpoint.py
|
335bc8aa7d4e62a24e746dae3b852ccf07ff14c8
|
[] |
no_license
|
leepand/gridpoc
|
f7959ef099d8a5513c59dfeb682761771ffe7594
|
4c476cd0241a95a4a7d2abf53a519d3749ecfb94
|
refs/heads/master
| 2020-04-28T02:38:49.631595
| 2019-03-11T02:01:50
| 2019-03-11T02:01:50
| 174,906,542
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,647
|
py
|
# -*- coding: UTF-8 -*-
# author: leepand
# time: 2018-12-01/update 2019-01-16
# desc: abtesting view and api
from view_base import ViewBase,render,render_without_base
from Arthur.core.entities.dao.hjs_user_dao import HjsUserDao
from Arthur.core.entities.dao.Arthur_Abtesting_dao import ArthurAbTestingDao
from Arthur.core.entities.base.bs_log import Log
from Arthur.core.entities.bean.hjs_user import HjsUser
import os,sys
from Arthur.core.abtesting.utils.models import Experiment
import Arthur.core.abtesting.utils.db as db
from markdown import markdown
from Arthur.core.entities.bean.Arthur_algolist import ArthurAlgoList
import web
import datetime
def determine_period():
per={'period':'day'}
period = per.get('period', 'day')
if period not in ['day', 'week', 'month', 'year']:
err = {'error': 'invalid argument: {0}'.format(period), 'status': 400}
#abort(400, jsonify(err))
return period
def simple_markdown(experiment):
description = experiment['description']
if description and description != '':
experiment['pretty_description'] = markdown(description)
return experiment
def experiment_list():
experiments = Experiment.all(redis=db.REDIS)
period = determine_period()
experiments = [simple_markdown(exp.objectify_by_period(period)) for exp in experiments]
return experiments
def archived():
experiments = Experiment.archived(redis=db.REDIS)
period = determine_period()
experiments = [simple_markdown(exp.objectify_by_period(period)) for exp in experiments]
return experiments
def paused():
experiments = Experiment.paused(redis=db.REDIS)
period = determine_period()
experiments = [simple_markdown(exp.objectify_by_period(period)) for exp in experiments]#[exp.name for exp in experiments]
return experiments
class ViewMyAbExp(ViewBase):
def GET(self):
bRet, sRet = self.check_login()
if not bRet:
Log.err("user not login!")
return web.seeother("/login")
return render.my_abexp()
def POST(self):
return self.GET()
class viewMyAbexpAdd(ViewBase):
def GET(self):
if not self.check_login():
Log.err("user not login");
return web.seeother("/login")
#此处用layer自己的模版框架
return render_without_base.my_abexpAdd()
from operator import itemgetter
class ViewAbtest(ViewBase):
def GET(self):
bRet, sRet = self.check_login()
if not bRet:
Log.err("user not login!")
return web.seeother("/login")
exp_list=experiment_list()+archived()+paused()
if len(exp_list)>0:
for exp_num in range(len(exp_list)):
exp_list[exp_num]['id']=exp_num
if exp_list[exp_num]['is_paused']:#如果实验结束,但未决胜负,则状态黑色2,否则绿色1
if exp_list[exp_num]['has_winner']:
exp_list[exp_num]['is_paused']={'result':u'已结束','status':'label-default'}
#exp_list[exp_num]['has_winner']={'result':u'已决','status':1}
else:
exp_list[exp_num]['is_paused']={'result':u'已结束','status':'label-default'}
#exp_list[exp_num]['has_winner']={'result':u'决战中','status':0}
else:
exp_list[exp_num]['is_paused']={'result':u'进行中','status':'label-primary'}
if exp_list[exp_num]['has_winner']:
exp_list[exp_num]['has_winner']={'result':u'已决','status':'label-default'}
else:
exp_list[exp_num]['has_winner']={'result':u'决战中','status':'label-primary'}
if len(exp_list[exp_num]['winner'])<1:
exp_list[exp_num]['winner']=u'暂无'
serviceList_bytime = sorted(exp_list,key = itemgetter('created_at'),reverse = True)
exp_list = serviceList_bytime
return render.abtest(exp_list)
def POST(self):
return self.GET()
class ViewExpdetails(ViewBase):
def GET(self,name):
bRet, sRet = self.check_login()
if not bRet:
Log.err("user not login!")
return web.seeother("/login")
return render.expdetails(name)
def POST(self):
return self.GET(name)
class ViewAbtestCreate(ViewBase):
def GET(self):
bRet, sRet = self.check_login()
if not bRet:
Log.err("user not login!")
return web.seeother("/login")
exp_list=experiment_list()+archived()+paused()
if len(exp_list)>0:
for exp_num in range(len(exp_list)):
exp_list[exp_num]['id']=exp_num
if exp_list[exp_num]['is_paused']:#如果实验结束,但未决胜负,则状态黑色2,否则绿色1
if exp_list[exp_num]['has_winner']:
exp_list[exp_num]['is_paused']={'result':u'已结束','status':'label-default'}
#exp_list[exp_num]['has_winner']={'result':u'已决','status':1}
else:
exp_list[exp_num]['is_paused']={'result':u'已结束','status':'label-default'}
#exp_list[exp_num]['has_winner']={'result':u'决战中','status':0}
else:
exp_list[exp_num]['is_paused']={'result':u'进行中','status':'label-primary'}
if exp_list[exp_num]['has_winner']:
exp_list[exp_num]['has_winner']={'result':u'已决','status':'label-default'}
else:
exp_list[exp_num]['has_winner']={'result':u'决战中','status':'label-primary'}
if len(exp_list[exp_num]['winner'])<1:
exp_list[exp_num]['winner']=u'暂无'
return render.abtest_create(exp_list)
class ViewApiAbtestAdd(ViewBase):
def __init__(self):
self._rDict = {
"expid": {'n': 'expId', 't': str, 'v': None},
"explist": {'n': 'expList', 't': str, 'v': None},
"clientid": {'n': 'clientId', 't': str, 'v': None},
"expdesc": {'n': 'expDesc', 't': str, 'v': None},
}
def _check_param(self):
bRet, sRet = super(ViewApiAbtestAdd, self)._check_param()
if not bRet:
return bRet, sRet
return True, None
def _deal_abtest_add(self):
bRet, is_admin = HjsUser.is_admin(self.get_user_name())
if not bRet:
return False, sRet
#if not is_admin:
# return False, 'No permission do abtest experiment add'
bRet, user_id = HjsUser.get_user_uid(self.get_user_name())
if not bRet:
return False, user_id
bRet,is_ab_exsit = ArthurAbTestingDao.query_node_by_abname(self.expId)
if len(is_ab_exsit)>1:
return False, 'Experiment Name you create is exist!'
abname=self.expId
bRet,insert_ab = ArthurAbTestingDao.insert_node(user_id,abname)
self.expList=[str(i) for i in self.expList.split(',')]
#status=abtest_add(self.expId, self.expList, self.clientId)
exp=Experiment.find_or_create(self.expId,self.expList,redis=db.REDIS)
exp.update_description(self.expDesc)
return True,exp
def POST(self):
if not self.check_login():
return self.make_error("user not login")
bRet, sRet = self.process(self._deal_abtest_add)
if not bRet:
Log.err("add abtset error: %s" % (str(sRet)))
return self.make_error(sRet)
return self.make_response(ViewBase.RetMsg.MSG_SUCCESS)
def abtest_add(exp_id,alt,client_id):
try:
with thrifty.Client(Abtesting_thrift) as c:
return c.diversion(exp_id,alt,client_id)
except:
print 'wrong exp create'
class ViewApiAbServList(ViewBase):
#def _check_param(self):
# bRet, sRet = super(ViewApiServMsgUser, self)._check_param()
# if not bRet:
# return False, sRet
#
# return True, None
def _deal_ab_serv_list(self):
#msg_info = get_req_all_param()
#userName=msg_info.get('userName','')
#projectName = msg_info.get('projectName','')
#bRet, is_admin = HjsUser.is_admin(self.get_user_name())
#if not bRet:
# return False, sRet
#if not is_admin:
# return False, 'No permission do user info'
#bRet, user_id = HjsUser.get_user_uid(self.get_user_name())
#if not bRet:
# return False, user_id
#_bRet, msg_user_list=ArthurMsg.query_node_msg_list_by_usrproj(userName,projectName)
#_bRet, msg_user_list =HjsUserDao.query_node_msg_user_list(user_id,"../static/images/user-icon.png")
#if not _bRet:
# return False, msg_user_list
#print 'msg_info = get_req_all_param()',msg_info
#print 'comments;;;;;;;',msg_user_list
exp_list=experiment_list()+archived()+paused()
is_win_list=[]
is_pause_list=[]
is_end_list = []
ab_info_dict={}
if len(exp_list)>0:
for exp_num in range(len(exp_list)):
exp_list[exp_num]['id']=exp_num
if exp_list[exp_num]['is_paused']:#如果实验结束,但未决胜负,则状态黑色2,否则绿色1
is_pause_list.append(exp_list[exp_num])
if exp_list[exp_num]['is_paused'] or exp_list[exp_num]['has_winner']:
is_end_list.append(exp_list[exp_num])
if exp_list[exp_num]['has_winner']:
is_win_list.append(exp_list[exp_num])
bRet, sRet =ArthurAlgoList.algo_list_forautocomplete()
abservlist= sRet
ab_info_dict['abservlist']=abservlist
ab_info_dict['is_pause_list']=is_pause_list
ab_info_dict['is_win_list']=is_win_list
ab_info_dict['is_end_list']=is_end_list
ab_info_dict['all_exp_cnt']=len(exp_list)
ab_info_dict['is_win_ratio']="width: {data}%;".format(data=int(round((len(is_win_list)/(len(exp_list)+0.0000001)),2)*100))
ab_info_dict['is_pause_ratio']="width: {data}%;".format(data=int(round((len(is_pause_list)/(len(exp_list)+0.0000001)),2)*100))
ab_info_dict['is_end_ratio']="width: {data}%;".format(data=int(round((len(is_end_list)/(len(exp_list)+0.0000001)),2)*100))
return True,ab_info_dict
def GET(self):
if not self.check_login():
return self.make_error("user not login")
bRet, sRet = self.process(self._deal_ab_serv_list)
if not bRet:
Log.err("deal_ab_serv_list: %s" % (str(sRet)))
return self.make_error(sRet)
return self.make_response(sRet)
#我的实验页面
class ViewApiMyAbList(ViewBase):
def _deal_my_ab_list(self):
bRet, user_id = HjsUser.get_user_uid(self.get_user_name())
if not bRet:
return False, user_id
bRet, my_ab_list = ArthurAbTestingDao.query_node_ab_list_by_uid(user_id)
my_ab_list_all=[]
my_ab_list_win=[]
my_ab_list_paused=[]
my_ab_list_archived=[]
my_ab_list_running = []
if len(my_ab_list)>0:
for ab_dict in my_ab_list:
ab_info={}
abName = ab_dict['abname']
bRet,experiment= find_or_404(abName)
if experiment=="None":
continue
#print bRet,experiment
if bRet:
period = determine_period()
obj = simple_markdown(experiment.objectify_by_period(period))
ab_info['created_at']=experiment.created_at
description = obj['description']
if description and description != '':
ab_info['description']=description
else:
ab_info['description']='目前还没有实验描述,赶紧在详情页添加吧'
ab_info['abname'] = abName
if obj['kpis']:
ab_info['kpis'] = obj['kpis']
else:
ab_info['kpis'] = []
ab_info['has_winner']=obj['has_winner']
ab_info['is_archived']=obj['is_archived']
ab_info['is_paused']=obj['is_paused']
if obj['has_winner']:#剔除已暂停和已结束的实验,置为推送
if not obj['is_archived'] and not obj['is_paused']:
my_ab_list_win.append(ab_info)
if obj['is_archived']:
my_ab_list_archived.append(ab_info)
if obj['is_paused']:
my_ab_list_paused.append(ab_info)
if not obj['is_paused'] and not obj['is_archived']:
if not obj['has_winner']:
my_ab_list_running.append(ab_info)
my_ab_list_all.append(ab_info)
else:
return False,'%s is not found!'%abName
return True,{'my_ab_list_all':my_ab_list_all,'my_ab_list_win':my_ab_list_win,'my_ab_list_archived':my_ab_list_archived,
'my_ab_list_paused':my_ab_list_paused,'my_ab_list_running':my_ab_list_running}
def GET(self):
if not self.check_login():
return self.make_error("user not login")
bRet, sRet = self.process(self._deal_my_ab_list)
if not bRet:
Log.err("deal_my_ab_list: %s" % (str(sRet)))
return self.make_error(sRet)
return self.make_response(sRet)
class ViewApiMyAbDetails(ViewBase):
def __init__(self):
self._rDict = {
"abname": {'n': "abName", 't': str, 'v': None}
}
def _check_param(self):
bRet, sRet = super(ViewApiMyAbDetails, self)._check_param()
if not bRet:
return False, sRet
return True, None
def _deal_Myab_details(self):
#bRet, is_admin = HjsUser.is_admin(self.get_user_name())
#if not bRet:
# return False, sRet
#if not is_admin:
# return False, 'No permission to do this'
bRet,experiment= find_or_404(self.abName)
exp_compute_info={}
if experiment:
period = determine_period()
obj = simple_markdown(experiment.objectify_by_period(period))
exp_out=obj['alternatives']#实验各组详情数据
if not obj['description']:
description=u'目前还没有实验描述,赶紧添加吧'
else:
description=obj['description']
if len(obj['kpis'])>0:
kpis=obj['kpis']
else:
kpis='defalut'
exp_compute_info={'description':description,'kpis':kpis,\
'total_conversions':obj['total_conversions'],\
'total_participants':obj['total_participants'],\
'total_coversion_rate':100*round(obj['total_conversions']/(obj['total_participants']+0.000001),2),\
'created_at':obj['created_at']
}
else:
return False,'No experiment found!'
#判断实验是否为control
for _i in range(len(exp_out)):
exp_out[_i]['confidence_interval']=round(exp_out[_i]['confidence_interval'],2)
if exp_out[_i]['is_winner']:
exp_out[_i]['is_winner_check']='fa fa-check'
exp_out[_i]['is_winner_class']='label label-primary'
exp_out[_i]['is_winner_result']=u'胜出'
else:
exp_out[_i]['is_winner_check']=''
exp_out[_i]['is_winner_class']='label label-default'
exp_out[_i]['is_winner_result']=u'暂未胜出'
if exp_out[_i]['is_control']:
exp_out[_i]['is_control_label']='label label-warning'
exp_out[_i]['is_control_is']=u'控制组'
else:
exp_out[_i]['is_control_label']=''
exp_out[_i]['is_control_is']=u''
#abtest详情曲线图
date_all=[]#每个实验的所有实验日期列表
for j in exp_out:
for k in j['data']:
date_all.append(k['date'])
if len(date_all)<1:
date_all=[obj['created_at'][0:10]]
_m=max(date_all)
new_list=[]
str2date=datetime.datetime.strptime(_m,"%Y-%m-%d")
for _i in range(5):
d_j=str2date+datetime.timedelta(days=-_i)
new_list.append(d_j.strftime("%Y-%m-%d"))
data_date=sorted(new_list)
#组合E chart所需要的信息
exp_name_list=[]
exp_info_list=[]
exp_series=[]
for _j_ in exp_out:
exp_info={}
exp_name_list.append(_j_['name'])
exp_data=[0.0,0.0,0.0,0.0,0.0]
for _iter in range(len(data_date)):
for kk in _j_['data']:
if kk['date'] == data_date[_iter]:
exp_data[_iter]=kk['conversions']/(kk['participants']+0.001)
#exp_data.append(kk['conversions']/(kk['participants']+0.00001))
#else:
# exp_data.append(0.00)
exp_info['name']=_j_['name']
exp_info['type']='line'
exp_info['data']=exp_data
exp_info_list.append(exp_info)
return True,{"exp_info_list":exp_info_list,"exp_daily_details":exp_out,\
"exp_name_list4Echart":exp_name_list,"exp_info_list4Echart":exp_info_list,\
"data_date4Echart":data_date,'obj':obj,'exp_compute_info':exp_compute_info}
def GET(self):
bRet, sRet = self.check_login()
if not bRet:
return web.seeother("/login")
bRet, sRet = self.process(self._deal_Myab_details)
if not bRet:
Log.err("deal_Myab_details: %s" % (str(sRet)))
return self.make_error(sRet)
return self.make_response(sRet)
class ViewApiAbActions(ViewBase):
def __init__(self):
self._rDict = {
"actiontype": {'n': 'actionType', 't': str, 'v': None},
"abname": {'n': 'abName', 't': str, 'v': None},
"altname": {'n': 'altName', 't': str, 'v': None}
}
def _check_param(self):
bRet, sRet = super(ViewApiAbActions, self)._check_param()
if not bRet:
return bRet, sRet
return True, None
def _deal_ab_actions(self):
"""bRet, is_admin = HjsUser.is_admin(self.get_user_name())
if not bRet:
return False, sRet
if not is_admin:
return False, 'No permission do user del'
bRet, user_id = HjsUser.get_user_uid(self.get_user_name())
if not bRet:
return False, user_id
if user_id == self.uId:
return False, 'do not allow delete yourself'"""
if self.actionType=="set_winner":
return set_winner(self.abName,self.altName)
if self.actionType=="reset_exp":
return reset_experiment(self.abName)
if self.actionType=="reset_winner":
return reset_winner(self.abName)
if self.actionType=="pause_exp":
return toggle_experiment_pause(self.abName)
if self.actionType=="update_desc":
#self.altName代替更新描述
return update_experiment_description(self.abName,self.altName)
if self.actionType=="end_exp":
return toggle_experiment_archive(self.abName)
if self.actionType=="del_exp":
return delete_experiment(self.abName)
def POST(self):
if not self.check_login():
return self.make_error("user not login")
bRet, sRet = self.process(self._deal_ab_actions)
if not bRet:
Log.err("deal_ab_actions: %s" % (str(sRet)))
return self.make_error(sRet)
return self.make_response(ViewBase.RetMsg.MSG_SUCCESS)
def find_or_404(experiment_name,kpi=None):
try:
experiment_name = experiment_name
exp = Experiment.find(experiment_name, db.REDIS)
if kpi:#设置kpi,用于页面查询,需要kpis列表非空否则出错,server端调用时也需要有kpi的参数输入
exp.set_kpi(kpi)
#if request.args.get('kpi'):
# exp.set_kpi(request.args.get('kpi'))
return True,exp
except ValueError:
return False,'None'
# Set winner for an experiment
def set_winner(experiment_name,alternative_name):
bRet,experiment = find_or_404(experiment_name)
if bRet:
experiment.set_winner(alternative_name)
return True,"Sucess"
else:
return False,"None experiment is found"
# Reset experiment
def reset_experiment(experiment_name):
bRet,experiment = find_or_404(experiment_name)
if bRet:
experiment.reset()
return True,"Sucess"
else:
return False,"None experiment is found"
# Pause experiment
def toggle_experiment_pause(experiment_name):
bRet,experiment = find_or_404(experiment_name)
if experiment.is_paused():
experiment.resume()
else:
experiment.pause()
return True,"Sucess"
# Pause experiment
def update_experiment_description(experiment_name,description):
bRet,experiment = find_or_404(experiment_name)
experiment.update_description(description)
return True,"Sucess"
# Archive experiment
def toggle_experiment_archive(experiment_name):
bRet,experiment = find_or_404(experiment_name)
if experiment.is_archived():
return False,"Exp has already archived"
else:
experiment.archive()
return True,"Sucess"
# Delete experiment
def delete_experiment(experiment_name):
bRet,experiment = find_or_404(experiment_name)
experiment.delete()
return True,"Sucess"
def reset_winner(experiment_name):
bRet,experiment = find_or_404(experiment_name)
experiment.reset_winner()
return True,"Sucess"
#我的实验页面
class ViewApiMyAbList(ViewBase):
def _deal_my_ab_list(self):
bRet, user_id = HjsUser.get_user_uid(self.get_user_name())
if not bRet:
return False, user_id
bRet, my_ab_list = ArthurAbTestingDao.query_node_ab_list_by_uid(user_id)
my_ab_list_all=[]
my_ab_list_win=[]
my_ab_list_paused=[]
my_ab_list_archived=[]
my_ab_list_running = []
if len(my_ab_list)>0:
for ab_dict in my_ab_list:
ab_info={}
abName = ab_dict['abname']
bRet,experiment= find_or_404(abName)
if experiment=="None":
continue
#print bRet,experiment
if bRet:
period = determine_period()
obj = simple_markdown(experiment.objectify_by_period(period))
ab_info['created_at']=experiment.created_at
description = obj['description']
if description and description != '':
ab_info['description']=description
else:
ab_info['description']='目前还没有实验描述,赶紧在详情页添加吧'
ab_info['abname'] = abName
if obj['kpis']:
ab_info['kpis'] = obj['kpis']
else:
ab_info['kpis'] = []
ab_info['has_winner']=obj['has_winner']
ab_info['is_archived']=obj['is_archived']
ab_info['is_paused']=obj['is_paused']
if obj['has_winner']:#剔除已暂停和已结束的实验,置为推送
if not obj['is_archived'] and not obj['is_paused']:
my_ab_list_win.append(ab_info)
if obj['is_archived']:
my_ab_list_archived.append(ab_info)
if obj['is_paused']:
my_ab_list_paused.append(ab_info)
if not obj['is_paused'] and not obj['is_archived']:
if not obj['has_winner']:
my_ab_list_running.append(ab_info)
my_ab_list_all.append(ab_info)
else:
return False,'%s is not found!'%abName
return True,{'my_ab_list_all':my_ab_list_all,'my_ab_list_win':my_ab_list_win,'my_ab_list_archived':my_ab_list_archived,
'my_ab_list_paused':my_ab_list_paused,'my_ab_list_running':my_ab_list_running}
def GET(self):
if not self.check_login():
return self.make_error("user not login")
bRet, sRet = self.process(self._deal_my_ab_list)
if not bRet:
Log.err("deal_my_ab_list: %s" % (str(sRet)))
return self.make_error(sRet)
return self.make_response(sRet)
#实验list Zoo
class ViewApiabZooList(ViewBase):
def __init__(self):
self._rDict = {
"comming_soon": {'n': "comming_soon", 't': str, 'v': 'None'}
}
def _check_param(self):
bRet, sRet = super(ViewApiabZooList, self)._check_param()
if not bRet:
return False, sRet
return True, None
def _deal_zooab_details(self):
#bRet, is_admin = HjsUser.is_admin(self.get_user_name())
#if not bRet:
# return False, sRet
#if not is_admin:
# return False, 'No permission to do this'
#experiments = Experiment.all(redis=db.REDIS)
#experiments = [exp.name for exp in experiments]
#period = determine_period()
#experiments = [simple_markdown(exp.objectify_by_period(period)) for exp in experiments]
experiments=experiment_list()+archived()+paused()
experiments_bytime = sorted(experiments,key = itemgetter('created_at'),reverse = True)
return True,experiments_bytime
def GET(self):
bRet, sRet = self.check_login()
if not bRet:
return web.seeother("/login")
bRet, sRet = self.process(self._deal_zooab_details)
if not bRet:
Log.err("deal_zooab_details: %s" % (str(sRet)))
return self.make_error(sRet)
return self.make_response(sRet)
|
[
"85721094@qq.com"
] |
85721094@qq.com
|
2d3f6c3e7ce8a553a4f77d8e97cc905613e289df
|
dd95edb16036c7de0970b52590fd8ae7b979e1ac
|
/src/data_loader.py
|
b998cbab540087e92c9724035ee2d49134d8d1fa
|
[] |
no_license
|
YuzhongHuang/Data-Science-Final
|
e1978cbfb1ae04d682eaa37aaff3840cfd19ebaf
|
87a2f3691547fd7ec3bc2e90fc2439d95a3243e2
|
refs/heads/master
| 2021-01-10T14:42:58.192759
| 2016-05-04T23:28:09
| 2016-05-04T23:28:09
| 54,586,654
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,468
|
py
|
"""data_loader.py
~~~~~~~~~~~~~~
A local data loader program that grab medical entries data from
given csv file and randomly split data into a tuple of (train, validata, test)
Inside each group, data is represented in a tuple of (input, traget)
To make the loader more efficient, the program allows user to save
and load the loader object through the use of pickle.
"""
import numpy
import pandas
import pickle
from theano import *
import theano.tensor as T
def read_csv(dataset, target_name):
""" Reads an csv file and converts to (train, val, test)
with each of the form (inputs, targets)
:type dataset: string
:param dataset: the path to the dataset
"""
# default values
train_percent = 0.5 # default trainset percentage
val_percent = 0.2 # default valset percentage
test_percent = 0.3 # default testset percentage
# process dataset to form (inputs, targets)
data = pandas.read_csv(dataset) # import from dataset
targets = data[target_name].apply(round).apply(int).as_matrix() # get the target column as numpy array
print targets
data = data.drop(target_name, 1) # get a dataframe with the remaining features
inputs = data.as_matrix() # get the feature matrix as numpy matrix
inputs = inputs[:, 1:] # drop the index column
entries = targets.shape[0] # total entries of dataset
# use permutation to generate random indices
indices = numpy.random.permutation(entries)
# get randomized indices for each datasets
train_idx = indices[:int(train_percent*entries)]
val_idx = indices[int(train_percent*entries):int(test_percent*entries)+int(train_percent*entries)]
test_idx = indices[int(test_percent*entries)+int(train_percent*entries):]
# get datasets
train = (inputs[train_idx,:], targets[train_idx])
val = (inputs[val_idx,:], targets[val_idx])
test = (inputs[test_idx,:], targets[test_idx])
return train, val, test
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
return shared_x, T.cast(shared_y, 'int32')
def load_data(dataset, target_name):
""" Loads the dataset
:type dataset: string
:param dataset: the path to the dataset
"""
train_set, valid_set, test_set = read_csv(dataset, target_name)
test_set_x, test_set_y = shared_dataset(test_set)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
def save_data(dataset, filename):
data = load_data(dataset)
with open(filename+'.pickle', 'wb') as handle:
pickle.dump(data, handle)
|
[
"yuzhong.huang@students.olin.edu"
] |
yuzhong.huang@students.olin.edu
|
222e039a8f1b16ddc964c3f5b6aad8e5a655613f
|
f60cbe4de824e2f9b8fe0aa291e1c26c3cac96ea
|
/test.py
|
728beada28f13143ccb6088a3b95cf5856bc4eeb
|
[] |
no_license
|
mangesh222/python
|
3a48935bce956b2595b7eb8d22d439c3c7465bb3
|
ecea235e7db42acf3f2193882d6e31dcc70e0301
|
refs/heads/master
| 2023-05-29T01:33:56.418128
| 2021-06-10T03:04:00
| 2021-06-10T03:04:00
| 374,527,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28
|
py
|
print('hello)
print('vishal'
|
[
"mangesh.mangesh22@gmail.com"
] |
mangesh.mangesh22@gmail.com
|
263366fcc564685a4541c1d461871e0dd45977f6
|
836645c94a5a286c282b986814e25c94603bdbb0
|
/venv/bin/chardetect
|
ddea9e3829290f630fd84fd014373b4efab27c62
|
[] |
no_license
|
masande-99/FINAL-PROJECT-back-end-
|
56fc58b901e310a2cd9dc25095d9fe2d80a77e16
|
e4f3c4e8d0f237a6c232484ddd401c0430152e2c
|
refs/heads/main
| 2023-04-03T01:12:05.345338
| 2021-03-15T09:43:47
| 2021-03-15T09:43:47
| 341,456,343
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 281
|
#!/home/lifechoices/Documents/END-OF-COURSE-PROJECT(back-end)/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"gontyelenimasande@gmail.com"
] |
gontyelenimasande@gmail.com
|
|
a84311b4e1840eba2b2772bbabae3cd68e3804b0
|
a0985fe6dfb733c3c172c5feac468e7b2ddc210a
|
/3CNFSat/solvers.py
|
3552e19c7df161ae2c5e0c0c52b4687de798e151
|
[
"Apache-2.0"
] |
permissive
|
gustavodiel/IAR
|
b2955036a748b20ea54e62f510e60d67475293c3
|
9a45a4eba0cf8019cc00e42ffd29db69204c9dee
|
refs/heads/master
| 2020-04-23T17:51:52.019484
| 2020-01-29T15:35:20
| 2020-01-29T15:35:20
| 171,347,064
| 0
| 0
|
Apache-2.0
| 2019-04-03T12:40:31
| 2019-02-18T19:54:53
|
C++
|
UTF-8
|
Python
| false
| false
| 2,613
|
py
|
import numpy as np
from helpers import *
def update_temperature(iteration, max_iterations, initial_temperature, final_temperature):
return (initial_temperature - final_temperature) / (np.cosh(5 * iteration / max_iterations)) + final_temperature
def random_search(name, expected_right, clauses, num_vars, max_iterations, repetitions, cenario):
total_scores = []
for r in range(repetitions):
local_scores = []
solution_FO = evaluate_all(clauses, generate_solution(num_vars))
for i in range(max_iterations):
new_solution = generate_solution(num_vars)
new_score = evaluate_all(clauses, new_solution)
local_scores.append(expected_right - new_score)
if new_score > solution_FO:
solution_FO = new_score
total_scores.append(local_scores)
np_scores = np.array(total_scores)
best_scores = np_scores.min(axis=1)
convergence = pd.DataFrame(
np_scores.mean(axis=0), np.arange(max_iterations), columns=["clausulas"]
)
plot_convergence(convergence, "random_search", name, cenario)
return best_scores.mean(), best_scores.std()
def simmulated_annealing(
name,
expected_right,
clauses,
num_vars,
initial_temperature,
final_temperature,
SAMax,
max_iterations,
repetitions,
cenario
):
total_scores = []
for r in range(repetitions):
temperature = initial_temperature
solution = generate_solution(num_vars)
solution_FO = evaluate_all(clauses, solution)
iteration = 0
local_scores = []
while iteration < max_iterations:
for i in range(SAMax):
new_solution = disturb(solution)
new_score = evaluate_all(clauses, new_solution)
delta = solution_FO - new_score
if delta < 0 or random() < np.exp(-delta / temperature):
solution = new_solution
solution_FO = new_score
temperature = update_temperature(
iteration, max_iterations, initial_temperature, final_temperature
)
local_scores.append(expected_right - solution_FO)
iteration += 1
total_scores.append(local_scores)
np_scores = np.array(total_scores)
best_scores = np_scores.min(axis=1)
convergence = pd.DataFrame(
np_scores.mean(axis=0), np.arange(max_iterations), columns=["clausulas"]
)
plot_convergence(convergence, "simulated_anealing", name, cenario)
return best_scores.mean(), best_scores.std()
|
[
"gustavodiel@hotmail.com"
] |
gustavodiel@hotmail.com
|
23a741447677df22688dcf7e5fbe171db1bd2fcb
|
6967c0bca6063d93829aa579f515d9bd6f71e0dc
|
/reprozip/reprozip/pack.py
|
08ba5ad7a180d4b3cf68a03919285218c13f9b80
|
[
"BSD-3-Clause"
] |
permissive
|
aashish24/reprozip
|
2632ecfde7544de7f272f8ce56f8509a1b8093d7
|
2207987da6ae6fb7e256fc4a197366a881680e5c
|
refs/heads/master
| 2020-04-30T00:08:34.055174
| 2015-05-11T06:14:42
| 2015-05-11T06:14:42
| 35,426,059
| 1
| 0
| null | 2015-05-11T13:36:04
| 2015-05-11T13:36:04
| null |
UTF-8
|
Python
| false
| false
| 6,619
|
py
|
# Copyright (C) 2014-2015 New York University
# This file is part of ReproZip which is released under the Revised BSD License
# See file LICENSE for full license details.
"""Packing logic for reprozip.
This module contains the :func:`~reprozip.pack.pack` function and associated
utilities that are used to build the .rpz pack file from the trace SQLite file
and config YAML.
"""
from __future__ import unicode_literals
import itertools
import logging
import os
from rpaths import Path
import sys
import tarfile
import uuid
from reprozip import __version__ as reprozip_version
from reprozip.common import File, load_config, save_config, \
record_usage_package
from reprozip.tracer.linux_pkgs import identify_packages
from reprozip.tracer.trace import merge_files
def expand_patterns(patterns):
files = set()
dirs = set()
# Finds all matching paths
for pattern in patterns:
if logging.root.isEnabledFor(logging.DEBUG):
logging.debug("Expanding pattern %r into %d paths",
pattern,
len(list(Path('/').recursedir(pattern))))
for path in Path('/').recursedir(pattern):
if path.is_dir():
dirs.add(path)
else:
files.add(path)
# Don't include directories whose files are included
non_empty_dirs = set([Path('/')])
for p in files | dirs:
path = Path('/')
for c in p.components[1:]:
path = path / c
non_empty_dirs.add(path)
# Builds the final list
return [File(p) for p in itertools.chain(dirs - non_empty_dirs, files)]
def canonicalize_config(runs, packages, other_files, additional_patterns,
sort_packages):
"""Expands ``additional_patterns`` from the configuration file.
"""
add_files = expand_patterns(additional_patterns)
if sort_packages:
add_files, add_packages = identify_packages(add_files)
else:
add_packages = []
other_files, packages = merge_files(add_files, add_packages,
other_files, packages)
return runs, packages, other_files
def data_path(filename, prefix=Path('DATA')):
"""Computes the filename to store in the archive.
Turns an absolute path containing '..' into a filename without '..', and
prefixes with DATA/.
Example:
>>> data_path(PosixPath('/var/lib/../../../../tmp/test'))
PosixPath(b'DATA/tmp/test')
>>> data_path(PosixPath('/var/lib/../www/index.html'))
PosixPath(b'DATA/var/www/index.html')
"""
return prefix / filename.split_root()[1]
class PackBuilder(object):
"""Higher layer on tarfile that adds intermediate directories.
"""
def __init__(self, filename):
self.tar = tarfile.open(str(filename), 'w:gz')
self.seen = set()
def add(self, name, arcname, *args, **kwargs):
from rpaths import PosixPath
assert isinstance(name, PosixPath)
assert isinstance(arcname, PosixPath)
self.tar.add(str(name), str(arcname), *args, **kwargs)
def add_data(self, filename):
if filename in self.seen:
return
path = Path('/')
for c in filename.components[1:]:
path = path / c
if path in self.seen:
continue
logging.debug("%s -> %s", path, data_path(path))
self.tar.add(str(path), str(data_path(path)), recursive=False)
self.seen.add(path)
def close(self):
self.tar.close()
self.seen = None
def pack(target, directory, sort_packages):
"""Main function for the pack subcommand.
"""
if target.exists():
# Don't overwrite packs...
logging.critical("Target file exists!")
sys.exit(1)
# Reads configuration
configfile = directory / 'config.yml'
if not configfile.is_file():
logging.critical("Configuration file does not exist!\n"
"Did you forget to run 'reprozip trace'?\n"
"If not, you might want to use --dir to specify an "
"alternate location.")
sys.exit(1)
runs, packages, other_files, additional_patterns = load_config(
configfile,
canonical=False)
# Canonicalize config (re-sort, expand 'additional_files' patterns)
runs, packages, other_files = canonicalize_config(
runs, packages, other_files, additional_patterns, sort_packages)
logging.info("Creating pack %s...", target)
tar = PackBuilder(target)
# Stores the original trace
trace = directory / 'trace.sqlite3'
if trace.is_file():
tar.add(trace, Path('METADATA/trace.sqlite3'))
# Add the files from the packages
for pkg in packages:
if pkg.packfiles:
logging.info("Adding files from package %s...", pkg.name)
files = []
for f in pkg.files:
if not Path(f.path).exists():
logging.warning("Missing file %s from package %s",
f.path, pkg.name)
else:
tar.add_data(f.path)
files.append(f)
pkg.files = files
else:
logging.info("NOT adding files from package %s", pkg.name)
# Add the rest of the files
logging.info("Adding other files...")
files = set()
for f in other_files:
if not Path(f.path).exists():
logging.warning("Missing file %s", f.path)
else:
tar.add_data(f.path)
files.add(f)
other_files = files
logging.info("Adding metadata...")
# Stores pack version
fd, manifest = Path.tempfile(prefix='reprozip_', suffix='.txt')
os.close(fd)
try:
with manifest.open('wb') as fp:
fp.write(b'REPROZIP VERSION 1\n')
tar.add(manifest, Path('METADATA/version'))
finally:
manifest.remove()
# Generates a unique identifier for the pack (for usage reports purposes)
pack_id = str(uuid.uuid4())
# Stores canonical config
fd, can_configfile = Path.tempfile(suffix='.yml', prefix='rpz_config_')
os.close(fd)
try:
save_config(can_configfile, runs, packages, other_files,
reprozip_version, canonical=True,
pack_id=pack_id)
tar.add(can_configfile, Path('METADATA/config.yml'))
finally:
can_configfile.remove()
tar.close()
# Record some info to the usage report
record_usage_package(runs, packages, other_files, pack_id)
|
[
"remirampin@gmail.com"
] |
remirampin@gmail.com
|
efbb025919e9eac773e2b39a84bbc652dc3b43be
|
f8494866e8a6d8ca2437d01b3c32c159a7948bcc
|
/rangetree_reference.py
|
bcfa70c0d9ba8a5eb0301c4233584364d0b5f63d
|
[] |
no_license
|
ideasman42/rangetree-py
|
fbfeb222fb8980f228c0d704879d0f2ac09e09d8
|
8c8594b7547d5f70365e94ed0acef5ccb791c178
|
refs/heads/master
| 2021-01-11T06:52:55.794222
| 2016-10-26T12:54:26
| 2016-10-26T12:54:26
| 71,978,842
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,366
|
py
|
class RangeTree:
"""
Slow range tree version, use for testing.
"""
__slots__ = (
"_data",
"_min",
"_max",
"_hard_min",
"_hard_max",
)
@classmethod
def FromRanges(cls, range_iter):
range_ls = list(range_iter)
range_ls_unpack = [value for pair in range_ls for value in pair]
if not range_ls_unpack:
# empty case
range_ls_unpack = [0]
r = RangeTree(min=min(range_ls_unpack), max=max(range_ls_unpack))
for pair in range_ls:
for value in range(pair[0], pair[1] + 1):
r.take(value)
return r
def __init__(self, *, min, max):
self._data = set()
self._hard_min = min
self._hard_max = max
self._min = 0
# not inclusive
self._max = 0
def copy(self):
tree_dst = RangeTree(min=self._hard_min, max=self._hard_max)
tree_dst._data = self._data.copy()
tree_dst._min = self._min
tree_dst._max = self._max
return tree_dst
def clear(self):
self._data.clear()
self._min = 0
self._max = 0
def take(self, value):
if value < self._hard_min or value > self._hard_max:
raise Exception("Value out of range")
# not essential but OK
if not self._data and self._min == self._max:
# Newly created
self._data.add(value)
self._min = value
self._max = value + 1
else:
# Existing data
if value >= self._max:
self._data.update(set(range(self._max, value + 1)))
self._max = value + 1
if value < self._min:
self._data.update(set(range(value, self._min)))
self._min = value
self._data.remove(value)
def retake(self, value):
if self.has(value):
return False
else:
self.take(value)
return True
def take_any(self):
if not self._data:
if self._max == self._hard_max:
raise IndexError("All values taken!")
self._data.add(self._max)
self._max += 1
value = min(self._data)
self.take(value)
return value
def release(self, value):
assert(value not in self._data)
assert(value >= self._min)
assert(value <= self._max)
self._data.add(value)
# not essential, we could just not do this
if self.is_empty():
self._min = 0
self._max = 0
self._data = set()
def is_empty(self):
return len(self._data) == (self._max - self._min)
def has(self, value):
if value < self._min:
return False
if value >= self._max:
return False
return value not in self._data
def range_iter(self):
# Ranges are inclusive
'''
Stupid slow code:
'''
d = self._data
i = self._min
while i < self._max:
if i not in d:
i_end = i
j = i + 1
while j not in d and j < self._max:
i_end = j
j += 1
yield (i, i_end)
i = j
else:
i += 1
|
[
"ideasman42@gmail.com"
] |
ideasman42@gmail.com
|
293aed28c0d7663002ce1813c8f563ba65498434
|
16f1ccd091ed6a2d713fdeef938737c5b8bdec53
|
/coleyoungmark/settings.py
|
ede0c6e2e7a6c104d5d4185807c21e7e1abb1351
|
[] |
no_license
|
cyoungmark/coleyoungmark
|
2f2837669bc66efcf1a7bc4aeb39a3884cd91611
|
6efca7d3580f0b240c2d26b248ae64869234e6d5
|
refs/heads/master
| 2021-03-12T20:01:45.094400
| 2015-08-29T03:57:27
| 2015-08-29T03:57:27
| 36,996,290
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,248
|
py
|
"""
Django settings for mywebsite project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+j)^&8!4c@o496^xe-*s0uw-cmwfowc7^88w(-)y$v33nb3()v'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
#ALLOWED_HOSTS = ['backoffice.software']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'coleyoungmark',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'coleyoungmark.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['/Users/coleyoungmark/Documents/coleyoungmark/templates',],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'coleyoungmark.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
#MEDIA_ROOT = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
#STATIC_ROOT = '/home/cyoungmark/webapps/backoffice_software_static/'
STATIC_URL = '/static/'
##
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'templates'),
os.path.join(BASE_DIR, 'templates/landingpage'),
os.path.join(BASE_DIR, 'static'),
)
##STATICFILES_FINDERS = (
## 'django.contrib.staticfiles.finders.FileSystemFinder',
## 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
### 'django.contrib.staticfiles.finders.DefaultStorageFinder',
##)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.static',
)
|
[
"coleyoungmark@Coles-MacBook-Air.local"
] |
coleyoungmark@Coles-MacBook-Air.local
|
0f252f30b43655380250062d6602a40100b40390
|
ade1d4623a29433308e8ed959c5be1e33c07fb4c
|
/hxl/converters.py
|
1e6766aef761152e0468bed447d9b48c4e302a88
|
[
"Unlicense"
] |
permissive
|
raymondnijssen/libhxl-python
|
a6f08f95f1d2e10af08062bf6143af9051930638
|
40e34af5975001292299dab53139bd0cbdff74e0
|
refs/heads/master
| 2021-01-25T05:56:54.825257
| 2017-01-26T21:41:19
| 2017-01-26T21:41:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,756
|
py
|
"""Data-conversion classes
This module holds classes for converting to HXL from other formats, or
from HXL to other formats. Current, the only class is L{Tagger} (for
adding tags to non-HXL tabular data on the fly), but we will add more
converters soon, especially for formats like GeoJSON.
@author: David Megginson
@organization: UNOCHA
@license: Public Domain
@date: Started April 2015
@see: U{hxlstandard.org}
"""
import hxl, re
class Tagger(hxl.io.AbstractInput):
"""Add HXL hashtags to a CSV-like input stream.
Usage::
input = open('data.csv', 'r')
specs = [('Cluster', '#sector'), ('Organi', '#org'), ('province', '#adm1+es')]
tagger = Tagger(input, specs)
The tagger object acts as a L{hxl.io.AbstractInput} source, which you can
use with the L{hxl.data} function like this::
source = hxl.data(Tagger(input, specs)).with_rows('org=unicef').sort()
"""
def __init__(self, input, specs=[], default_tag=None, match_all=False):
"""Construct a new Tagger object.
The input spec is a list of tuples, where the first item is a
substring to match (case-/space-/punctuation-insensitive), and
the second item is the HXL tag spec to use. Example::
[('Cluster', '#sector'), ('Organi', '#org'), ('province', '#adm1+es')]
@param input: an input stream of some kind.
@param specs: the input specs, as described above (default: [])
@param match_all: if True, require that the full header string match; otherwise, match substrings (default: False).
@param default_tag: default tagspec to use for any column without a match.
"""
if isinstance(specs, dict):
# convert to list of tuples if needed
specs = [(key, specs[key]) for key in specs]
self.specs = [(hxl.common.normalise_string(spec[0]), spec[1]) for spec in specs]
self.default_tag = default_tag
self.match_all = match_all
self.input = iter(input)
self._cache = []
self._found_tags = False
def __next__(self):
"""Return the next line of input (including the new tags)."""
if not self._found_tags:
# Search the first 25 rows for a match.
if self._add_tags():
self._found_tags = True
else:
# if no match, through an exception
raise hxl.common.HXLException("Tagging failed")
if len(self._cache) > 0:
# read from the cache, first
return self._cache.pop(0)
else:
return next(self.input)
next = __next__
def _add_tags(self):
"""Look for headers in the first 25 rows."""
for n in range(0, 25):
raw_row = next(self.input)
if not raw_row:
break
self._cache.append(raw_row)
tag_row = self._try_tag_row(raw_row)
if tag_row:
self._cache.append(tag_row)
return True
return False
def _try_tag_row(self, raw_row):
"""See if we can match a header row."""
tags = []
tag_count = 0
for index, value in enumerate(raw_row):
value = hxl.common.normalise_string(value)
for spec in self.specs:
if self._check_header(spec[0], value):
tags.append(spec[1])
tag_count += 1
break
else:
# run only if nothing found
tags.append('')
if tag_count > 0 and tag_count/float(len(self.specs)) >= 0.5:
if self.default_tag:
tags = [tag or self.default_tag for tag in tags]
return tags
else:
return None
def _check_header(self, spec, header):
if self.match_all:
return (spec == header)
else:
return (spec in header)
def __iter__(self):
return self
_SPEC_PATTERN = r'^(.+)(#{token}([+]{token})*)$'.format(token=hxl.common.TOKEN_PATTERN)
@staticmethod
def parse_spec(s):
"""Try parsing a tagger spec (used only by the command-line tools)"""
result = re.match(Tagger._SPEC_PATTERN, s)
if result:
return (result.group(1), hxl.model.Column.parse(result.group(2), use_exception=True).display_tag)
else:
raise HXLFilterException("Bad tagging spec: " + s)
@staticmethod
def _load(input, spec):
"""Create a tagger from a dict spec."""
return Tagger(
input=input,
specs=spec.get('specs', []),
default_tag=spec.get('default_tag', None),
match_all=spec.get('match_all', False)
)
|
[
"david.megginson@megginson.com"
] |
david.megginson@megginson.com
|
2994e4887672e2f5f3863effad6a98df827036db
|
1a2ca64839723ede3134a0781128b0dc0b5f6ab8
|
/ExtractFeatures/Data/kracekumar/sprites_tut_showmovingboxes.py
|
9dd45e3e2d91ca5f484f4b8638a2f0c45deff20d
|
[] |
no_license
|
vivekaxl/LexisNexis
|
bc8ee0b92ae95a200c41bd077082212243ee248c
|
5fa3a818c3d41bd9c3eb25122e1d376c8910269c
|
refs/heads/master
| 2021-01-13T01:44:41.814348
| 2015-07-08T15:42:35
| 2015-07-08T15:42:35
| 29,705,371
| 9
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 590
|
py
|
import pygame
from pygame.locals import *
from sprites_tut_boxes2 import UpDownBox
pygame.init()
boxes = []
for color, location in [ ( [255,0,0], [0,0] ), ( [0,255,0], [60,60] ), (
[0,0,255], [120, 120]) ]:
boxes.append(UpDownBox(color, location))
screen = pygame.display.set_mode([150, 150])
while pygame.event.poll().type != KEYDOWN:
screen.fill([0,0,0]) # Blank the screen
# Save time by only calling this once
time = pygame.time.get_ticks()
for b in boxes:
b.update(time, 150)
screen.blit(b.image, b.rect)
pygame.display.update()
|
[
"vivekaxl@gmail.com"
] |
vivekaxl@gmail.com
|
3c209ab182a6171f405a344ebd4393600a80fc5a
|
6fd2c754e5503b7fbada5c19fa449cf1a899ef7e
|
/overlap_with_toxins/multiclass_classification/functions/featurizing_functions.py
|
a0b1082474d59893d68e560d02c33ed058b03337
|
[] |
no_license
|
atan14/ML_agrochemical
|
d7d7dd70282b725b105918db08add89df4ecb94d
|
75e620727eb387c1948226dc5035e5d35eb0bd34
|
refs/heads/master
| 2020-03-21T23:40:16.977964
| 2019-05-30T20:05:19
| 2019-05-30T20:05:19
| 139,200,569
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
import numpy as np
def daylight_fingerprint(mol):
from rdkit.Chem.Fingerprints import FingerprintMols
return np.ndarray.flatten(np.array(FingerprintMols.FingerprintMol(mol)))
def daylight_fingerprint_padding(x):
result = np.zeros((2048,))
result[:x.shape[0]] = x
return result
def get_ecfp(mol):
from rdkit.Chem.rdMolDescriptors import GetMorganFingerprintAsBitVect
bitstring = GetMorganFingerprintAsBitVect(mol, 2, nBits=2048).ToBitString()
return np.array(list(bitstring))
|
[
"tanaikrui@hotmail.com"
] |
tanaikrui@hotmail.com
|
818d433d1aee6a9d7e64cc77f52918ca81a967cc
|
8afb5afd38548c631f6f9536846039ef6cb297b9
|
/GIT-USERS/ashishdotme/til.ashish.me/update_readme.py
|
aaddea4afad1353c82f665de43750860440207c0
|
[
"MIT"
] |
permissive
|
bgoonz/UsefulResourceRepo2.0
|
d87588ffd668bb498f7787b896cc7b20d83ce0ad
|
2cb4b45dd14a230aa0e800042e893f8dfb23beda
|
refs/heads/master
| 2023-03-17T01:22:05.254751
| 2022-08-11T03:18:22
| 2022-08-11T03:18:22
| 382,628,698
| 10
| 12
|
MIT
| 2022-10-10T14:13:54
| 2021-07-03T13:58:52
| null |
UTF-8
|
Python
| false
| false
| 1,166
|
py
|
"Run this after build_database.py - it needs til.db"
import pathlib
import sqlite_utils
import sys
import re
root = pathlib.Path(__file__).parent.resolve()
index_re = re.compile(r"<!\-\- index starts \-\->.*<!\-\- index ends \-\->", re.DOTALL)
if __name__ == "__main__":
db = sqlite_utils.Database(root / "til.db")
by_topic = {}
for row in db["til"].rows_where(order_by="created_utc"):
by_topic.setdefault(row["topic"], []).append(row)
index = ["<!-- index starts -->"]
for topic, rows in by_topic.items():
index.append("## {}\n".format(topic))
for row in rows:
index.append(
"* [{title}]({url}) - {date}".format(
date=row["created"].split("T")[0], **row
)
)
index.append("")
if index[-1] == "":
index.pop()
index.append("<!-- index ends -->")
if "--rewrite" in sys.argv:
readme = root / "README.md"
index_txt = "\n".join(index).strip()
readme_contents = readme.open().read()
readme.open("w").write(index_re.sub(index_txt, readme_contents))
else:
print("\n".join(index))
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
51c5e4e1a1e7906e7623520ad5ad5c8651ef2141
|
4bd4bacecee33cada173e427b5ecb1d758bafaad
|
/src/scalarizr/api/binding/jsonrpc_http.py
|
3d2e776f7689332ee98d02399e6aaae3de72a773
|
[] |
no_license
|
kenorb-contrib/scalarizr
|
3f2492b20910c42f6ab38749545fdbb79969473f
|
3cc8b64d5a1b39c4cf36f5057f1a6a84a9a74c83
|
refs/heads/master
| 2022-11-26T10:00:58.706301
| 2017-11-02T16:41:34
| 2017-11-02T16:41:34
| 108,550,233
| 0
| 2
| null | 2020-07-24T11:05:36
| 2017-10-27T13:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 5,235
|
py
|
from __future__ import with_statement
'''
JSON-RPC over HTTP.
Public Scalarizr API
- Simple to Learn
- Simple to Use
'''
from __future__ import with_statement
import os
import posixpath
import binascii
import logging
import sys
import time
import urllib2
import hashlib
import hmac
try:
import json
except ImportError:
import simplejson as json
from scalarizr import rpc
from scalarizr.util import cryptotool
from scalarizr.bus import bus
LOG_CATEGORY = 'scalarizr.api'
LOG = logging.getLogger(LOG_CATEGORY)
class Security(object):
DATE_FORMAT = "%a %d %b %Y %H:%M:%S UTC"
def __init__(self, crypto_key_path):
self.crypto_key_path = crypto_key_path
def _read_crypto_key(self):
return binascii.a2b_base64(open(self.crypto_key_path).read().strip())
def sign(self, data, key, timestamp=None):
date = time.strftime(self.DATE_FORMAT, timestamp or time.gmtime())
canonical_string = data + date
digest = hmac.new(key, canonical_string, hashlib.sha1).digest()
sign = binascii.b2a_base64(digest)
if sign.endswith('\n'):
sign = sign[:-1]
return sign, date
def check_signature(self, signature, data, timestamp):
calc_signature = self.sign(data, self._read_crypto_key(),
time.strptime(timestamp, self.DATE_FORMAT))[0]
assert signature == calc_signature, "Signature doesn't match"
def decrypt_data(self, data):
try:
return cryptotool.decrypt(data, self._read_crypto_key())
except:
raise rpc.InvalidRequestError('Failed to decrypt data')
def encrypt_data(self, data):
try:
return cryptotool.encrypt(data, self._read_crypto_key())
except:
raise rpc.InvalidRequestError('Failed to encrypt data. Error: %s' % (sys.exc_info()[1], ))
class WsgiApplication(Security):
def __init__(self, req_handler, crypto_key_path):
Security.__init__(self, crypto_key_path)
self.req_handler = req_handler
def __call__(self, environ, start_response):
try:
length = int(environ['CONTENT_LENGTH'])
data = environ['wsgi.input'].read(length)
except:
data = ''
try:
try:
self.check_signature(environ['HTTP_X_SIGNATURE'], data, environ['HTTP_DATE'])
data = self.decrypt_data(data)
except:
start_response('400 Bad request', [], sys.exc_info())
return str(sys.exc_info()[1])
req = json.loads(data)
with self.handle_meta_params(req):
result = self.req_handler.handle_request(req, namespace=environ['PATH_INFO'][1:] or None)
result = self.encrypt_data(result)
sig, date = self.sign(result, self._read_crypto_key())
headers = [('Content-type', 'application/json'),
('Content-length', str(len(result))),
('X-Signature', sig),
('Date', date)]
start_response('200 OK', headers)
return [result, ]
except:
if sys.exc_info()[0] in (SystemExit, KeyboardInterrupt):
raise
start_response('500 Internal Server Error', [], sys.exc_info())
LOG.exception('Unhandled exception')
return ''
def handle_meta_params(self, req):
if 'params' in req and '_platform_access_data' in req['params']:
pl = bus.platform
pl.set_access_data(req['params']['_platform_access_data'])
del req['params']['_platform_access_data']
return self
def __enter__(self):
return self
def __exit__(self, *args):
pl = bus.platform
#pl.clear_access_data()
# Commented to allow async=True processing
class HttpServiceProxy(rpc.ServiceProxy, Security):
def __init__(self, endpoint, crypto_key_path, server_id=None, sign_only=False):
Security.__init__(self, crypto_key_path)
rpc.ServiceProxy.__init__(self)
self.endpoint = endpoint
self.server_id = server_id
self.sign_only = sign_only
def exchange(self, jsonrpc_req):
if self.crypto_key_path:
if not self.sign_only:
jsonrpc_req = self.encrypt_data(jsonrpc_req)
sig, date = self.sign(jsonrpc_req, self._read_crypto_key())
headers = {
'Date': date,
'X-Signature': sig
}
else:
headers = {}
if self.server_id:
headers['X-Server-Id'] = self.server_id
namespace = self.local.method[0] if len(self.local.method) > 1 else ''
http_req = urllib2.Request(posixpath.join(self.endpoint, namespace), jsonrpc_req, headers)
try:
jsonrpc_resp = urllib2.urlopen(http_req).read()
if self.crypto_key_path and not self.sign_only:
return self.decrypt_data(jsonrpc_resp)
else:
return jsonrpc_resp
except urllib2.HTTPError, e:
raise Exception('%s: %s' % (e.code, e.read()))
|
[
"kenorb@users.noreply.github.com"
] |
kenorb@users.noreply.github.com
|
6553ce680192ec4b4d05d75b017b92aba2a60cfc
|
6bd4390463b42828dd3b8226f6b168a4c976abc1
|
/Problem Solving/DONGHAK/[boj]/[boj]15829.py
|
651311fc8bdab4f4f41d520ca07f9dbecd62143d
|
[] |
no_license
|
SSD-2021/backend-study
|
e123cb76ed75190542350f8ea62e09f3f68c04ea
|
7a3138d44daea67beebfab0c74c8bfb2b610050f
|
refs/heads/master
| 2023-02-18T13:21:06.860116
| 2021-01-23T06:52:18
| 2021-01-23T06:52:18
| 323,662,633
| 0
| 0
| null | 2021-01-23T06:52:19
| 2020-12-22T15:24:49
|
Python
|
UTF-8
|
Python
| false
| false
| 181
|
py
|
L = int(input())
S = input()
sum = 0
M = 1234567891
r = 31
i = 0
for element in S:
temp = ord(element) - 96
sum += temp * (r ** i)
i += 1
answer = sum % M
print(answer)
|
[
"donghark03@naver.com"
] |
donghark03@naver.com
|
543c70702d7fc5ab4eff9fbab2702cb7e6d4b331
|
60b44961da72f61f19b4d999c2b2417b3e75c114
|
/crud_python_sqlite.py
|
34e5b02df6cd55d7eca3e44d2fecf7886733e9c7
|
[] |
no_license
|
bia-rodrig/CRUD-python-sqlite3
|
28183437bcc4e028f2d6083d24ee9f7095bb4885
|
9ef4fd0fc9887412818d8b946c19340cebe83aca
|
refs/heads/master
| 2023-04-12T23:03:39.907505
| 2023-04-10T00:39:14
| 2023-04-10T00:39:14
| 199,316,563
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,365
|
py
|
import os, sqlite3
#Bianca Rodrigues
#GitHub: https://github.com/bia-rodrig/
#Check if DB exists and create if doesn't
'''
check = os.path.exists('db_test.db')
if (not check):
file = open('db_test.db', 'w')
file.close()
print('created')
# connection to DB
connection = sqlite3.connect('db_test.db')
# SQLite commands cursor
cursor = connection.cursor()
#Create query to table
sql = 'CREATE TABLE IF NOT EXISTS Contacts (ID INTEGER PRIMARY KEY AUTOINCREMENT, NAME VARCHAR(100), AGE INT, PHONE VARCHAR(20))'
# Table Name: Contacts
# Colunms:
# ID: Integer
# Name: varchar(100)
# PHONE: VARCHAR(20)
'''
def create_db(db_file):
file = open(db_file, 'w')
file.close()
connection = sqlite3.connect(db_file)
cursor = connection.cursor()
sql = 'CREATE TABLE IF NOT EXISTS Contacts (ID INTEGER PRIMARY KEY AUTOINCREMENT, NAME VARCHAR(100), PHONE VARCHAR(20))'
cursor.execute(sql)
connection.close()
def check_if_exists(db_file, name):
connection = sqlite3.connect(db_file)
cursor = connection.cursor()
cursor.execute('SELECT * FROM Contacts WHERE NAME=?', (name,))
result = cursor.fetchall()
if (len(result) > 0):
return True
else: return False
def insert_contact(db_file, name, phone):
connection = sqlite3.connect(db_file)
cursor = connection.cursor()
cursor.execute('INSERT INTO Contacts (NAME, PHONE) VALUES (?, ?)', (name, phone))
connection.commit()
connection.close()
def update_contact(db_file, id_var, name, phone):
connection = sqlite3.connect(db_file)
cursor = connection.cursor()
cursor.execute('UPDATE Contacts set NAME = ?, PHONE = ? where ID=?', (name, phone, id_var))
connection.commit()
connection.close()
print('Contact updated\n')
def search_contact(db_file, name):
connection = sqlite3.connect(db_file)
cursor = connection.cursor()
sql = 'SELECT * FROM Contacts Where NAME LIKE \'%' + name +'%\''
cursor.execute(sql)
result = cursor.fetchall()
connection.close()
return result
def delete(db_file, contact_id):
connection = sqlite3.connect(db_file)
cursor = connection.cursor()
cursor.execute('DELETE FROM Contacts where ID=?', (contact_id,))
connection.commit()
connection.close()
def list_contacts(db_name):
connection = sqlite3.connect(db_name)
cursor = connection.cursor()
cursor.execute('SELECT * from Contacts')
contacts = cursor.fetchall()
connection.close()
return contacts
|
[
"biancar1987@gmail.com"
] |
biancar1987@gmail.com
|
3512d17e90d7345d2358dba6a9babeb8df90f39f
|
a448e7f20efdf74dc095ba4ac3635e676555f433
|
/homework03/life-gui.py
|
b08e5744508e84e518569ece233f4ee08e64f3ea
|
[] |
no_license
|
vidyakov/cs102
|
b4aeed18c1ff1cf98a000b2947610267ecc1d9e0
|
46416d648f8b4cf36142d375743822a181e02d0c
|
refs/heads/master
| 2020-08-06T06:16:57.234147
| 2019-11-16T16:46:36
| 2019-11-16T16:46:36
| 212,868,061
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
import pygame
from pygame.locals import *
from life import GameOfLife
from ui import UI
class GUI(UI):
def __init__(self, life: GameOfLife, cell_size: int=10, speed: int=10) -> None:
super().__init__(life)
def draw_lines(self) -> None:
# Copy from previous assignment
pass
def draw_grid(self) -> None:
# Copy from previous assignment
pass
def run(self) -> None:
# Copy from previous assignment
pass
|
[
"noreply@github.com"
] |
vidyakov.noreply@github.com
|
00f77be2e48fc00606f3dbbbd401874c87aa9f4f
|
23481678e7ba1f3df89c9d4d9c5651123c60e119
|
/BackUpFiles.py
|
0097845fe52517faffe18ed40c2bc59954db8043
|
[] |
no_license
|
yusuf17-10/C99
|
a7307bd4390949def3d0377dbc9bb7d775c0dd47
|
449ff1231a338d2d9b9844f381db72eb95f24287
|
refs/heads/master
| 2023-03-11T22:08:11.067870
| 2021-02-25T15:46:18
| 2021-02-25T15:46:18
| 342,294,354
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
import shutil
import os
source=input("Enter The name Of source Directory : ")
destination=input("Enter The name Of Destination Directory : ")
source = source+"/"
destination=destination+"/"
listOfFiles=os.listdir(source)
for file in listOfFiles :
shutil.copy(source+file,destination)
|
[
"vadapallimumtaj123@gmail.com"
] |
vadapallimumtaj123@gmail.com
|
bb0e2091fccf56d46ed7d6833cf0c7ab7feb4ea2
|
9fb8f9c2ffc0e417b1ec3536e6f8b1336c909f23
|
/panel/superadmin/migrations/0024_remove_uniuni_username.py
|
829773434231ac0eaac04eee48d1e00194c7dc89
|
[] |
no_license
|
Hiba-Mahboob/fyp
|
55d09cc07dac7c6a6a9de7547438dee5d71ead34
|
d78d92799927306dc2802206c55d08b69438daa1
|
refs/heads/master
| 2023-08-01T19:51:53.133411
| 2021-09-20T09:19:50
| 2021-09-20T09:19:50
| 406,693,612
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 321
|
py
|
# Generated by Django 3.2.7 on 2021-09-20 02:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('superadmin', '0023_uniuni'),
]
operations = [
migrations.RemoveField(
model_name='uniuni',
name='username',
),
]
|
[
"17b-106-se@students.uit.edu"
] |
17b-106-se@students.uit.edu
|
6d62f3e0f25fe5acad5b05c23847351e0515d4ca
|
e46f47b4838199e1be222c544be1a4a097b87b26
|
/src/models/vrinv/train.py
|
0766af54dd6f9bc249bf7fae1b2fc3eb16457716
|
[
"MIT"
] |
permissive
|
alexmlamb/SPUDT
|
18291abae1b581f6aa049af40885053617db7732
|
5d4ff32c9e37a485c176d3e68c58723e544972e5
|
refs/heads/master
| 2021-05-25T19:32:16.889764
| 2020-04-08T22:51:15
| 2020-04-08T22:51:15
| 253,891,846
| 0
| 0
|
MIT
| 2020-04-07T19:26:11
| 2020-04-07T19:26:10
| null |
UTF-8
|
Python
| false
| false
| 5,631
|
py
|
import time
import torch
import torch.nn.functional as F
from torch import optim
from sklearn.decomposition import PCA
import matplotlib.pylab as plt
from common.util import sample, save_models, one_hot_embedding
from common.initialize import initialize, infer_iteration
from . import model
def gp_loss(x, y, d, device):
batch_size = x.size()[0]
gp_alpha = torch.rand(batch_size, 1, device=device)
interpx = gp_alpha * x.data + (1 - gp_alpha) * y.data
interpx.requires_grad = True
d_interp = d(interpx)
grad_interp = torch.autograd.grad(outputs=d_interp, inputs=(interpx,),
grad_outputs=torch.ones(d_interp.size(), device=device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
diff1 = grad_interp.norm(2, dim=1) - 1
diff1 = torch.clamp(diff1, 0)
return torch.mean(diff1 ** 2)
def compute_loss(x, xp, encoder, contrastive, device):
z = encoder(x)
zp = encoder(xp)
ztrue = torch.randint(z.shape[1], size=(z.shape[0],))
ztrue = one_hot_embedding(ztrue, z.shape[1]).to(device)
p = contrastive(z)
closs = p.mean()
dloss = F.mse_loss(zp, z).mean()
return dloss, closs
def contrastive_loss(x, n_classes, encoder, contrastive, device):
enc = encoder(x)
z = torch.randint(n_classes, size=(enc.shape[0],))
z = one_hot_embedding(z, n_classes).to(device)
cz = contrastive(z).mean()
cenc = contrastive(enc).mean()
gp = gp_loss(enc, z, contrastive, device)
return cz, cenc, gp
def define_models(shape1, **parameters):
encoder = model.Encoder(shape1[0], **parameters)
contrastive = model.Contrastive(**parameters)
return {
'encoder': encoder,
'contrastive': contrastive,
}
@torch.no_grad()
def evaluate_clusters(visualiser, encoder, target, label, id):
enc = encoder(target)
pca = PCA(2)
emb = pca.fit_transform(enc.reshape(enc.shape[0], -1).cpu().squeeze().numpy())
fig = plt.figure()
colors = [f'C{c}' for c in label.cpu().numpy()]
plt.scatter(*emb.transpose(), c=colors)
visualiser.matplotlib(fig, f'Embeddings {id}', None)
plt.clf()
plt.close(fig)
@torch.no_grad()
def evaluate_accuracy(visualiser, i, loader, classifier, nlabels, id, device):
labels = []
preds = []
for data, label in loader:
data, label = data.to(device), label.to(device)
pred = F.softmax(classifier(data), 1)
pred = classifier(data)
labels += [label]
preds += [pred]
labels = torch.cat(labels)
preds = torch.cat(preds).argmax(1)
correct = 0
total = 0
for j in range(nlabels):
label = labels[preds == j]
if len(label):
correct += one_hot_embedding(label, nlabels).sum(0).max()
total += len(label)
accuracy = correct / total
accuracy = accuracy.cpu().numpy()
visualiser.plot(accuracy, title=f'Classifier accuracy {id}', step=i)
return accuracy
def evaluate(visualiser, data, datap, id):
visualiser.image(data.cpu().detach().numpy(), f'target{id}', 0)
visualiser.image(datap.cpu().detach().numpy(), f'target p {id}', 0)
def train(args):
parameters = vars(args)
train_loader1, test_loader1 = args.loaders1
models = define_models(**parameters)
initialize(models, args.reload, args.save_path, args.model_path)
encoder = models['encoder'].to(args.device)
contrastive = models['contrastive'].to(args.device)
print(encoder)
print(contrastive)
optim_encoder = optim.Adam(encoder.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))
optim_contrastive = optim.Adam(contrastive.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))
iter1 = iter(train_loader1)
iteration = infer_iteration(list(models.keys())[0], args.reload, args.model_path, args.save_path)
mone = torch.FloatTensor([-1]).to(args.device)
t0 = time.time()
for i in range(iteration, args.iterations):
encoder.train()
contrastive.train()
for _ in range(args.d_updates):
batchx, iter1 = sample(iter1, train_loader1)
datax = batchx[0].float().to(args.device)
optim_contrastive.zero_grad()
ploss, nloss, gp = contrastive_loss(datax, args.n_classes, encoder, contrastive, args.device)
ploss.backward()
nloss.backward(mone)
(1 * gp).backward()
optim_contrastive.step()
optim_encoder.zero_grad()
batchx, iter1 = sample(iter1, train_loader1)
datax = batchx[0].float().to(args.device)
dataxp = batchx[1].float().to(args.device)
dloss, closs = compute_loss(datax, dataxp, encoder, contrastive, args.device)
(args.ld * dloss + closs).backward()
optim_encoder.step()
if i % args.evaluate == 0:
encoder.eval()
contrastive.eval()
print('Iter: {}'.format(i), end=': ')
evaluate(args.visualiser, datax, dataxp, 'x')
_acc = evaluate_accuracy(args.visualiser, i, test_loader1, encoder, args.n_classes, 'x', args.device)
print('disc loss: {}'.format((ploss - nloss).detach().cpu().numpy()), end='\t')
print('gp: {}'.format(gp.detach().cpu().numpy()), end='\t')
print('positive dist loss: {}'.format(dloss.detach().cpu().numpy()), end='\t')
print('contrast. loss: {}'.format(closs.detach().cpu().numpy()), end='\t')
print('Accuracy: {}'.format(_acc))
t0 = time.time()
save_models(models, i, args.model_path, args.checkpoint)
|
[
"samuel.lavoie.m@gmail.com"
] |
samuel.lavoie.m@gmail.com
|
b3ff49b3d506d8d2a4eba691ca27a8218b4b262b
|
99d8059d9ad1292c9d7a2f47dff64ee1f951aabc
|
/test.py
|
780ee90a0df6fff0dbb96ea45cb20637e84a93f2
|
[
"CC-BY-4.0"
] |
permissive
|
ibLeDy/the-super-tiny-compiler
|
94f38bc00ca394a79dcf5a3049056d62e316ff01
|
c4804537def384e4bff896bfd2473efe283f87ec
|
refs/heads/master
| 2022-04-25T19:56:04.537359
| 2020-04-27T16:01:47
| 2020-04-27T16:05:27
| 259,379,790
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,111
|
py
|
import the_super_tiny_compiler
input_string = "(add 2 (subtract 4 2))"
output_string = "add(2, subtract(4, 2));"
expected_tokens = [
{
"type": "paren",
"value": "("
},
{
"type": "name",
"value": "add"
},
{
"type": "number",
"value": "2"
},
{
"type": "paren",
"value": "("
},
{
"type": "name",
"value": "subtract"
},
{
"type": "number",
"value": "4"
},
{
"type": "number",
"value": "2"
},
{
"type": "paren",
"value": ")"
},
{
"type": "paren",
"value": ")"
}
]
expected_ast = {
"type": "Program",
"body": [
{
"type": "CallExpression",
"name": "add",
"params": [
{
"type": "NumberLiteral",
"value": "2"
},
{
"type": "CallExpression",
"name": "subtract",
"params": [
{
"type": "NumberLiteral",
"value": "4"
},
{
"type": "NumberLiteral",
"value": "2"
}
]
}
]
}
]
}
expected_new_ast = {
"type": "Program",
"body": [
{
"type": "ExpressionStatement",
"expression": {
"type": "CallExpression",
"callee": {
"type": "Identifier",
"name": "add"
},
"arguments": [
{
"type": "NumberLiteral",
"value": "2"
},
{
"type": "CallExpression",
"callee": {
"type": "Identifier",
"name": "subtract"
},
"arguments": [
{
"type": "NumberLiteral",
"value": "4"
},
{
"type": "NumberLiteral",
"value": "2"
}
]
}
]
}
}
]
}
def test_compiler():
tokens = the_super_tiny_compiler.tokenizer(input_string)
ast = the_super_tiny_compiler.parser(tokens)
new_ast = the_super_tiny_compiler.transformer(ast)
generated_code = the_super_tiny_compiler.code_generator(new_ast)
output = the_super_tiny_compiler.compiler(input_string)
assert tokens == expected_tokens
assert ast == expected_ast
assert new_ast == expected_new_ast
assert generated_code == output_string
assert output == output_string
|
[
"deejaynof@gmail.com"
] |
deejaynof@gmail.com
|
4f6394640fbf025002650fd3f5fe2e99c40efe6d
|
de56f06af1314640e2c5d2d24c02cd02281e3624
|
/mini_pipes.py
|
ac5247a4763648a7c9118ea36f5267e527f44396
|
[] |
no_license
|
pts/staticpython
|
7d21423567581f9f8eda4d60120232e992cc21b1
|
1bc021851823cbbc38c0dbd0790a252b760e9beb
|
refs/heads/master
| 2023-03-06T19:12:01.918433
| 2023-02-21T21:48:08
| 2023-02-21T21:49:06
| 45,545,293
| 45
| 13
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 543
|
py
|
"""pipes module for StaticPython."""
# Safe unquoted
_safechars = frozenset('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_@%-+=:,./')
def quote(file):
"""Return a shell-escaped version of the file string."""
for c in file:
if c not in _safechars:
break
else:
if not file:
return "''"
return file
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + file.replace("'", "'\"'\"'") + "'"
|
[
"pts@fazekas.hu"
] |
pts@fazekas.hu
|
b1c4ef2d344d091e872bed1911b0122968ab3435
|
b15bc93d979cb830a9f1fdd1da4dbf95758be080
|
/galaxy-dist/eggs/bx_python-0.7.2-py2.7-linux-x86_64-ucs4.egg/EGG-INFO/scripts/bed_coverage.py
|
c4bd4b0311d9b2e13928abc21e9da2635cb6634b
|
[
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
najoshi/ucd-biocore-galaxy-command-line
|
2faa7fe8b5881e6bc89b9f7d44ea4d7e54cd6d0e
|
ddc9c6113148b9c9f06ea6d552ac348852a37d25
|
refs/heads/master
| 2020-07-09T05:01:01.202511
| 2016-09-07T22:44:36
| 2016-09-07T22:44:36
| 67,649,050
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 714
|
py
|
#!/afs/bx.psu.edu/project/pythons/py2.7-linux-x86_64-ucs4/bin/python2.7
"""
Print number of bases covered by all intervals in a bed file (bases covered by
more than one interval are counted only once). Multiple bed files can be
provided on the command line or to stdin.
usage: %prog bed files ...
"""
import psyco_full
import sys
from bx.bitset import BinnedBitSet
from bx.bitset_builders import *
from itertools import *
bed_filenames = sys.argv[1:]
if bed_filenames:
input = chain( * imap( open, bed_filenames ) )
else:
input = sys.stdin
bitsets = binned_bitsets_from_file( input )
total = 0
for chrom in bitsets:
total += bitsets[chrom].count_range( 0, bitsets[chrom].size )
print total
|
[
"najoshi@ucdavis.edu"
] |
najoshi@ucdavis.edu
|
e233dae0ba7bdbac9a4a8bddf19892c13945c25a
|
0322a2ca41e9d1d1cad1f74f96dd6e31650f51c1
|
/main.py
|
7a036d02c58719daaeb64939e9d009727b746818
|
[] |
no_license
|
standardgalactic/Gibbs-sampling
|
b107e632f14f951a82e53252bc431b06fda83b5f
|
23ec0c6475003eb019dd13598bbb130d60e29ee1
|
refs/heads/master
| 2022-02-15T09:20:51.347003
| 2015-12-01T23:01:23
| 2015-12-01T23:01:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,602
|
py
|
__author__ = 'chinna'
import theano
from theano import tensor as T
import numpy as np
import matplotlib.pyplot as plt
from theano import shared
from theano import function
import scipy as sp
from scipy import signal
from PIL import Image
class gibbs_sampler_class:
def __init__(self,config):
print "Creating Gibbs sampler..."
self.config = config
np.random.seed(self.config['seed'])
self.d = config['grid_size']
self.X = np.zeros((self.d+2,self.d+2))
p = config['p']
d = self.d
self.X[1:d+1,1:d+1] = np.random.choice(np.array([1,-1]),size=(d,d),p=[p,1-p])#2*np.random.binomial(1,config['p'],(self.d,self.d)) - 1
self.row = 1
self.col = 0
print "Grid size :",self.d,'x',self.d
print "initilization done..."
# iterator ( row wise )
def get_next(self):
self.col += 1
# end of matrix, start over
if self.row == self.d and self.col > self.d:
self.row = self.col = 1
# end of row. go to next row
if self.col > self.d:
self.col = 1
self.row += 1
return self.row, self.col
def nbr_sum(self,X,i,j):
return X[i,j+1] + X[i,j-1] + X[i+1,j] + X[i-1,j]
def sample(self,i,j):
X,t = self. X,self.config['theta']
s = self.nbr_sum(X,i,j)
p = np.exp(t*s)/(np.exp(t*s) + np.exp(-t*s))
return np.random.choice(np.array([1,-1]),size=(1,),p=[p,1-p])#2*np.random.binomial(1,p) - 1
def plot(self,file,X):
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(111)
ax.set_title('colorMap')
plt.pcolor(X,cmap='Greys')#'Reds')
plt.savefig(file)
print "plot saved as :",file
def run(self):
self.plot(self.config['name'] + "_before.jpeg",self.X[1:self.d+1,1:self.d+1])
L_heatmap = []
for k in range(self.config['iterations']+1):
for m_t in range((self.d**2)):
i,j = self.get_next()
self.X[i,j] = self.sample(i,j)
L_heatmap.append(self.X[1:self.d+1,1:self.d+1].flatten())
if k % self.config['visualize_checkpt'] == 0:
print "#Iterations completed:",k
self.plot(self.config['name'] + "_itr_"+str(k)+".jpeg",self.X[1:self.d+1,1:self.d+1])
self.plot("vanilla_gibbs_heat_map.jpeg",np.array(L_heatmap))
""" ================================================= """
""" ================================================= """
""" =========== Block-Gibbs sampling =============== """
""" ================================================= """
""" ================================================= """
class node:
num_nodes = 0
def __init__(self,theta,d=1):
self.__class__.num_nodes += 1
#print "creating node...",self.__class__.num_nodes
self.node_id = self.__class__.num_nodes
self.t = theta
p = 0.5
self.x = np.random.choice(np.array([1,-1]),size=(2,),p=[p,1-p])
self.l = None
self.r = None
x = np.exp(self.t)
self.bf = np.array([[x,1/x],[1/x,x]])
def update_bf(self,a,b):
t = self.t
for i in range(2):
for j in range(2):
x1 = 2*i - 1
x2 = 2*j - 1
self.bf[i][j] *= np.exp(t*x1*a + t*x2*b)
def normalize_bf(self):
self.bf = self.bf/(self.bf.sum())
class block_gibbs_sampler_class:
def __init__(self,config):
self.config = config
print "Initializing for belief prop..."
np.random.seed(config['seed'])
self.d = config['num_nodes']
self.n_m = config['num_messages']
self.k = config['variable_cardinality']
self.t = config['theta']
x = np.exp(self.t)
self.phi = np.array([[x,1/x],[1/x,x]])
self.nodes = []#np.array([node(t) for i in range(d)])
self.root = [None]*2
#self.msgs = np.array([belief(k) for i in range(n_m)])
def plot(self,file,X):
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(111)
ax.set_title('colorMap')
plt.pcolor(X,cmap='Greys')#'Reds')
plt.savefig(file)
print "plot saved as :",file
def add_nodes_to_left(self,root,d):
itr = root
for i in range(d):
itr.l = node(self.t)
itr = itr.l
return itr
def add_nodes_to_right(self,root,d):
itr = root
for i in range(d):
itr.r = node(self.t)
itr = itr.r
return itr
#comb structure
def create_clique_tree(self,d):
root = node(self.t)
self.add_nodes_to_left(root, d-2)
itr = root
for i in range(d):
itr.r = node(self.t)
itr = itr.r
if i%2 != 0:
self.add_nodes_to_left(itr,d-1)
return root
#update the phi / beliefs in the function
#from the nbr samples
def update_clique_tree(self,root,samples):
assert len(samples) == self.d / 2
# treat the first branch alone differently
# due to the assymetry in the clique tree
tmp_itr = root.l
for j in range(self.d - 3):
tmp_itr.update_bf(samples[0][j],0)
if j == self.d - 4:
tmp_itr.update_bf(0,samples[0][j+1])
tmp_itr.update_bf(0,samples[0][j+2])
tmp_itr = tmp_itr.l
sample_comb = 0
itr = root.r
for i in range(self.d-1):
if i%2 == 0:
itr.update_bf(0,samples[sample_comb][0])
else:
tmp_itr = itr.l
for j in range(self.d - 2):
tmp_itr.update_bf(samples[sample_comb][j],0)
tmp_itr.update_bf(0,samples[sample_comb+1][j])
if j == self.d - 3:
tmp_itr.update_bf(0,samples[sample_comb+1][j+1])
tmp_itr = tmp_itr.l
sample_comb += 1
itr = itr.r
def gen_message_down_bp(self,root,m):
# include the incoming the message over the left item
for i in range(2):
for j in range(2):
root.bf[i][j] *= m[i]
# gen message by marglinizing the left item
return root.bf.sum(axis=0)
#preorder traversal
def downward_msg_pass_util(self,root,m,p_type):
if root == None:
return None
#print "visiting node id:",root.node_id
#get message
new_m = self.gen_message_down_bp(root,m)
#pass the message to the children
self.downward_msg_pass_util(root.l, new_m,0)
self.downward_msg_pass_util(root.r, new_m,1)
def downward_msg_pass(self,root):
print "Triggering downward pass..."
#Gen explicit msgs for the root due to the assymtry
rmsg = root.bf.sum(axis=0)
lmsg = root.bf.sum(axis=1)
self.downward_msg_pass_util(root.r,rmsg,0)
self.downward_msg_pass_util(root.l,lmsg,0)
print "downward pass done..."
def gen_message_up_bp(self,root,m):
# include the incoming the message over the right item
for i in range(2):
for j in range(2):
root.bf[i][j] *= m[j]
# gen message by marglinizing the right item
return root.bf.sum(axis=1)
# post order traversal
def upward_msg_pass_util(self,root):
if root == None:
return np.ones(2)
lmsg = self.upward_msg_pass_util(root.l)
rmsg = self.upward_msg_pass_util(root.r)
#print "visiting node id :",root.node_id, lmsg
new_m = self.gen_message_up_bp(root, lmsg * rmsg)
return new_m
def upward_msg_pass(self,root):
#print "Triggering upward pass..."
lmsg = self.upward_msg_pass_util(root.r)
rmsg = self.upward_msg_pass_util(root.l)
# dealing with root here due to the assymtry
for i in range(2):
for j in range(2):
root.bf[i][j] *= lmsg[j]*rmsg[i]
#print "upward pass done..."
def get_sample_with(self,p):
return np.random.choice(np.array([1,-1]),size=(1,),p=[p,1-p])
def get_samples_util(self,root,p_x,s_x,L_samples):
if root == None:
return
p_child_parent = root.bf[(s_x+1)/2,:] # choose the row based on s_x
p_child_given_parent = p_child_parent / p_x[(s_x+1)/2]
p_child_given_parent = p_child_given_parent/p_child_given_parent.sum()
new_sample = self.get_sample_with(p_child_given_parent[0,1])
L_samples.append(new_sample)
#print "appending sample",new_sample, len(L_samples), root.node_id
p_child = (root.bf.sum(axis=0)) #marginalize left item
p_child = p_child/p_child.sum()
self.get_samples_util(root.l, p_child, new_sample, L_samples)
self.get_samples_util(root.r, p_child, new_sample, L_samples)
def get_samples(self,root):
#print "getting samples...."
# get two samples from the root node and give one to each child
L_samples = []
p_x11 = root.bf.sum(axis=1)
p_x11 = p_x11/p_x11.sum()
s_x11 = self.get_sample_with(p_x11[1])
p_x21 = root.bf.sum(axis=0)
p_x21 = p_x21/p_x21.sum()
s_x21 = self.get_sample_with(p_x21[1])
L_samples.append(s_x11)
L_samples.append(s_x21)
self.get_samples_util(root.l,p_x21,s_x21,L_samples)
self.get_samples_util(root.r,p_x11,s_x11,L_samples)
assert len(L_samples) == (self.d**2)/2
formatted_samples = np.ones((self.d/2, self.d-1))
d = self.d
for i in range(self.d/2):
for j in range(d-1):
formatted_samples[-i-1,j] = L_samples[i*d+j]
if j == d-2:
formatted_samples[-i-1,j] = L_samples[i*d+d-1]
#print formatted_samples[-i-1]
#print "sampling done...",len(L_samples)
return formatted_samples, L_samples
def normlize_util(self,root):
if root == None:
return
root.normalize_bf()
self.normlize_util(root.l)
self.normlize_util(root.r)
def normalize(self,root):
print "Normalizing beliefs..."
self.normlize_util(root)
def visualize(self,b,itr_num):
d = self.d
visual = np.zeros((d,d))
for j in range(d):
if j % 2 ==0:
for i in range(d-1):
visual[i][j] = b[0][(d*j)/2 + i]
visual[d-1][j] = b[1][-1 - (d*j)/2]
else:
visual[0][j] = b[0][d-1+d*(j/2)]
for i in range(1,d):
visual[i][j] = b[1][-1-i-d*(j/2)]
file = "block_gibbs_iteration_" + str(itr_num) + ".jpeg"
self.plot(file,visual)
def gen_heatmap(self,b,L_heatmap):
d = self.d
visual = np.zeros((d,d))
for j in range(d):
if j % 2 ==0:
for i in range(d-1):
visual[i][j] = b[0][(d*j)/2 + i]
visual[d-1][j] = b[1][-1 - (d*j)/2]
else:
visual[0][j] = b[0][d-1+d*(j/2)]
for i in range(1,d):
visual[i][j] = b[1][-1-i-d*(j/2)]
L_heatmap.append(visual.reshape((d*d,)))
def run(self):
print "running belief prop..."
form_samples_shape = (self.d/2, self.d-1)
p = 0.5
formatted_samples = np.random.choice(np.array([1,-1]),size=(form_samples_shape),p=[p,1-p])
L_heatmap = []
block = 0
for i in range(self.config['iterations']+1):
#print "iteration:",i
#block0
for j in range(2):
self.root[j] = self.create_clique_tree(self.d-1)
self.update_clique_tree(self.root[0],formatted_samples)
self.upward_msg_pass(self.root[0])
#self.downward_msg_pass(self.root1)
formatted_samples,b1_samples = self.get_samples(self.root[0])
#block1
self.update_clique_tree(self.root[1],formatted_samples)
self.upward_msg_pass(self.root[1])
formatted_samples,b2_samples = self.get_samples(self.root[1])
self.gen_heatmap([b1_samples,b2_samples],L_heatmap)
if i%self.config['visualize_checkpt'] == 0:
print "Running iteration :",i
self.visualize([b1_samples,b2_samples],i)
self.plot("block_gibbs_heat_map.jpeg",np.array(L_heatmap))
def run_sub_problem2():
config = {}
config['p'] = 0.5
config['iterations'] = 1000
config['visualize_checkpt'] = 100
config['grid_size'] = 30
config['theta'] = 0.5
config['seed'] = 42
config['name'] = 'vanilla_gibbs'
gibbs_sampler = gibbs_sampler_class(config)
gibbs_sampler.run()
def run_sub_problem3():
# problem config
config = {}
config['p'] = 0.5
config['iterations'] = 1000
config['visualize_checkpt'] = 100
config['grid_size'] = 30
config['theta'] = 0.5
config['seed'] = 42
config['num_nodes'] = config['grid_size']
config['num_messages'] = 4
config['variable_cardinality'] = 2
block_gibbs_sampler= block_gibbs_sampler_class(config)
block_gibbs_sampler.run()
if __name__ == "__main__":
run_sub_problem2()
run_sub_problem3()
|
[
"chinnadhurai@gmail.com"
] |
chinnadhurai@gmail.com
|
d35082dc40ef0bf6e3823cc7384a225c4bb6135b
|
79e19819aec49b500825f82a7de149eb6a0ba81d
|
/leetcode/24.py
|
ba48ddbc51a3aa7d331ae719f845d9b53bc1dee8
|
[] |
no_license
|
seoyeonhwng/algorithm
|
635e5dc4a2e9e1c50dc0c75d9a2a334110bb8e26
|
90406ee75de69996e666ea505ff5d9045c2ad941
|
refs/heads/master
| 2023-05-03T16:51:48.454619
| 2021-05-26T00:54:40
| 2021-05-26T00:54:40
| 297,548,218
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 613
|
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def swapPairs(self, head: ListNode) -> ListNode:
root = prev = ListNode(None)
prev.next = head
while head and head.next:
# b가 a(head)를 가리키도록 할당
b = head.next
head.next = b.next
b.next = head
prev.next = b
head = head.next
prev = prev.next.next
return root.next
|
[
"seoyeon@nowbusking.com"
] |
seoyeon@nowbusking.com
|
9efec025d99a7053e6a026371f77723b727c2fa6
|
1a5a9bfa6ee62c328fc6ab828ad743c555b0f23a
|
/catagory/JianzhiOffer/stage-04/0376-binary-tree-path-sum.py
|
09f548c21cfe7f1906cf346e4628da16006ad407
|
[] |
no_license
|
zzy1120716/my-nine-chapter
|
04b3e4d43a0d8086e5c958b81a3dc4356622d65f
|
c7bf3eed366b91d6bdebb79d0f11680cf7c18344
|
refs/heads/master
| 2020-03-30T03:07:14.748145
| 2019-05-15T13:07:44
| 2019-05-15T13:07:44
| 150,670,072
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,140
|
py
|
"""
376. 二叉树的路径和
中文English
给定一个二叉树,找出所有路径中各节点相加总和等于给定 目标值 的路径。
一个有效的路径,指的是从根节点到叶节点的路径。
样例
给定一个二叉树,和 目标值 = 5:
1
/ \
2 4
/ \
2 3
返回:
[
[1, 2, 2],
[1, 4]
]
"""
# Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class Solution:
"""
@param: root: the root of binary tree
@param: target: An integer
@return: all valid paths
"""
def binaryTreePathSum(self, root, target):
# write your code here
ans = []
self.helper(root, [], ans, target)
return ans
def helper(self, root, path, ans, target):
if not root:
return
path.append(root.val)
if not root.left and not root.right and root.val == target:
ans.append(path[:])
self.helper(root.left, path, ans, target - root.val)
self.helper(root.right, path, ans, target - root.val)
path.pop()
|
[
"zzy1120716@126.com"
] |
zzy1120716@126.com
|
6b6760d54ab8ed4ae6efdc3eec0985701a2fb123
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/datashare/v20181101preview/get_data_set_mapping.py
|
55fc5d19fd9e4b4798021b855d7b238d175d2bb4
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773
| 2020-11-06T22:39:06
| 2020-11-06T22:39:06
| 312,993,761
| 0
| 0
|
Apache-2.0
| 2023-06-02T06:47:28
| 2020-11-15T09:04:00
| null |
UTF-8
|
Python
| false
| false
| 3,255
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetDataSetMappingResult',
'AwaitableGetDataSetMappingResult',
'get_data_set_mapping',
]
@pulumi.output_type
class GetDataSetMappingResult:
"""
A data set mapping data transfer object.
"""
def __init__(__self__, kind=None, name=None, type=None):
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def kind(self) -> str:
"""
Kind of data set mapping.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the azure resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the azure resource
"""
return pulumi.get(self, "type")
class AwaitableGetDataSetMappingResult(GetDataSetMappingResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDataSetMappingResult(
kind=self.kind,
name=self.name,
type=self.type)
def get_data_set_mapping(account_name: Optional[str] = None,
data_set_mapping_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
share_subscription_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDataSetMappingResult:
"""
Use this data source to access information about an existing resource.
:param str account_name: The name of the share account.
:param str data_set_mapping_name: The name of the dataSetMapping.
:param str resource_group_name: The resource group name.
:param str share_subscription_name: The name of the shareSubscription.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['dataSetMappingName'] = data_set_mapping_name
__args__['resourceGroupName'] = resource_group_name
__args__['shareSubscriptionName'] = share_subscription_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:datashare/v20181101preview:getDataSetMapping', __args__, opts=opts, typ=GetDataSetMappingResult).value
return AwaitableGetDataSetMappingResult(
kind=__ret__.kind,
name=__ret__.name,
type=__ret__.type)
|
[
"public@paulstack.co.uk"
] |
public@paulstack.co.uk
|
73d70662fc18497745cf16fbd499aecf914b036e
|
9d68e4c8e210b4a25483887a5d10850bdbe0b712
|
/234.py
|
a75de8a7e1be722cd15b406f231464b698469669
|
[] |
no_license
|
kliner/leetcode
|
588f26e8d9a977ef8c581ba89165c9a1360187ac
|
4145d415dabb2e5e8195817b517e5a28e2bf216f
|
refs/heads/master
| 2020-12-24T16:15:31.060680
| 2016-02-25T15:38:29
| 2016-02-25T15:38:29
| 33,992,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 897
|
py
|
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
cur = head
newHead = None
while cur != None:
tmp = ListNode(cur.val)
tmp.next = newHead
newHead = tmp
cur = cur.next
cur = head
while cur != None:
if cur.val != newHead.val:
return False
cur = cur.next
newHead = newHead.next
return True
if __name__ == '__main__':
test = Solution()
n = ListNode(1)
print test.isPalindrome(n)
n.next = ListNode(6)
print test.isPalindrome(n)
n.next.next = ListNode(1)
print test.isPalindrome(n)
|
[
"kliner@live.cn"
] |
kliner@live.cn
|
23c24ca31a1dd9dc5822143c5c8eced6558c253d
|
8dc3e9887d3ab15215ba9406d4ee06fdb4c1ef46
|
/components/lib/filter_mongo.py
|
7d4dc3319f0fc6826bdc77ea942293ad90c15671
|
[] |
no_license
|
zmilan/plutonium
|
71568594f3048cc2d325f50be1cb09b7966c5c89
|
1df584ec481f7f9e51c54b2dfaa327cdcc7c23bc
|
refs/heads/master
| 2021-01-01T19:33:01.089534
| 2015-10-30T20:27:38
| 2015-10-30T20:27:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,246
|
py
|
filters = {}
def filter(collection):
def helper1(func):
def helper2(**kw):
ret = func(**kw)
#ret.update({'__collection__': collection})
return ret
filters[func.__name__] = helper2
return helper2
return helper1
class Filter(object):
def __init__(self, item=None, **kw):
if item:
self.stop = item.pop('__stop__', None)
self.raw_filter = item.copy()
self.name = item.pop('__filter__')
self.full_name = str([self.name] + sorted(self.raw_filter.items()))
self.key = item.pop('__key__', None)
self.limit = item.pop('__limit__', None)
self.collection = item.pop('__collection__')
self.skip = item.pop('__skip__', 0)
self.filter = filters[self.name](**item)
else:
item = kw
self.stop = item.pop('stop', None)
self.raw_filter = item.copy()
self.name = item.pop('filter')
self.full_name = str([self.name] + sorted(self.raw_filter.items()))
self.key = item.pop('key', None)
self.limit = item.pop('limit', None)
self.collection = item.pop('collection')
self.skip = item.pop('skip', 0)
self.filter = filters[self.name](**item)
def pass_filter(self, model):
print('model en pass_filter', model)
if '__deleted__' in model.keys():
return False
for key, value in self.filter.items():
if key == '__collection__':
continue
v = model.get(key)
if v is None:
return False
if type(value) == int or type(value) == str:
if v != value:
return False
else:
for op, val in value.items():
if op == '$gt':
if v <= val:
return False
elif op == '$lt':
if v >= val:
return False
elif op == '$gte':
if v < val:
return False
elif op == '$lte':
if v > val:
return False
return True
def _pass_filter(filter, model):
print('model en pass_filter', model)
if '__deleted__' in model.keys():
return False
for key, value in filter.items():
if key == '__collection__':
continue
v = model.get(key)
if v is None:
return False
if type(value) == int or type(value) == str:
if v != value:
return False
else:
for op, val in value.items():
if op == '$gt':
if v <= val:
return False
elif op == '$lt':
if v >= val:
return False
elif op == '$gte':
if v < val:
return False
elif op == '$lte':
if v > val:
return False
return True
|
[
"miguel.alarcos@gmail.com"
] |
miguel.alarcos@gmail.com
|
b1494fd9fd38995cfefa98dab60e75f584231fd0
|
ff81a9d7880f1b85a1dc19d5eba5ac72d7179c86
|
/pychron/labbook/tasks/actions.py
|
4c142ac4a827f87beb652a7d47c0fe7d622d8d58
|
[
"Apache-2.0"
] |
permissive
|
UManPychron/pychron
|
2fb7e479a9f492423c0f458c70102c499e1062c4
|
b84c9fd70072f9cbda30abe2c471e64fe3dd75d8
|
refs/heads/develop
| 2022-12-03T23:32:45.579326
| 2020-01-29T19:02:20
| 2020-01-29T19:02:20
| 36,100,637
| 0
| 0
| null | 2015-05-23T00:10:06
| 2015-05-23T00:10:05
| null |
UTF-8
|
Python
| false
| false
| 1,794
|
py
|
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from pyface.tasks.action.task_action import TaskAction
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.envisage.resources import icon
class AddNoteAction(TaskAction):
name = 'Add Note'
method = 'add_note'
image = icon('note-add')
class SaveNoteAction(TaskAction):
name = 'Save Note'
method = 'save_note'
image = icon('document-save')
class AddFolderAction(TaskAction):
name = 'Add Folder'
method = 'add_folder'
image = icon('folder-new')
class PushAction(TaskAction):
name = 'Push'
method = 'push'
image = icon('arrow_up')
class PullAction(TaskAction):
name = 'Pull'
method = 'pull'
image = icon('arrow_down')
class NewLabelAction(TaskAction):
name = 'New Label'
method = 'new_label'
image = icon('add')
# ============= EOF =============================================
|
[
"jirhiker@gmail.com"
] |
jirhiker@gmail.com
|
4e097a35fdfa9814cae2d318dd92445c9608917c
|
e4719910564b923cae492cf5f1ccd8590a6cda05
|
/app/preprocessings/livedoor.py
|
5c1ad0e4a7b1050a9188bf821ec836b056c22e4b
|
[] |
no_license
|
ninnin-engineer/natural-language-classifier
|
4f4b8865fa8a0cb453ae96ba7db76a0565795979
|
2ca0d8799b7a0ec60e86c83017eb696cf5874625
|
refs/heads/master
| 2021-08-16T17:33:24.208039
| 2017-11-20T05:59:15
| 2017-11-20T05:59:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,042
|
py
|
import glob
import os
import numpy as np
import pandas as pd
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
LIVEDOOR_DIR = os.path.join(BASE_DIR, 'data', 'livedoor')
def load_df():
category = {
'dokujo-tsushin': 1,
'it-life-hack':2,
'kaden-channel': 3,
'livedoor-homme': 4,
'movie-enter': 5,
'peachy': 6,
'smax': 7,
'sports-watch': 8,
'topic-news':9
}
docs = []
labels = []
for c_name, c_id in category.items():
files = glob.glob(LIVEDOOR_DIR + "/text/{c_name}/{c_name}*.txt".format(c_name=c_name))
text = ''
for file in files:
with open(file, 'r') as f:
lines = f.read().splitlines()
url = lines[0]
datetime = lines[1]
subject = lines[2]
body = "\n".join(lines[3:])
text = subject + "\n" + body
docs.append(text)
labels.append(c_id)
df = pd.DataFrame(data = { 'docs': docs, 'labels': labels })
np.random.seed(0)
return df.reindex(np.random.permutation(df.index))
|
[
"afujiwara2012@gmail.com"
] |
afujiwara2012@gmail.com
|
75dc191ffa958cad0e57397c2cd8e0d23936a5ed
|
7c1e597df32844e1c2a5b1b83456813b2f5d5abb
|
/does_it_live_v0.10.py
|
30b233eee70804315894039da9225cbf8c734ae0
|
[] |
no_license
|
alexisdacquay/does_it_live
|
6c835ceeab787962cd3048f821dee139c96ce1c6
|
241b00c9bcfd399b1cb8e11ee6e64588048101b9
|
refs/heads/master
| 2022-07-08T00:52:54.043402
| 2022-06-15T20:08:44
| 2022-06-15T20:08:44
| 144,036,277
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,494
|
py
|
#!/usr/bin/env python
#
# Version 1.0 2018-08-08
# Written by:
# Alexis Dacquay
import argparse
import time
import os
import signal
import subprocess
import socket
import sys
import signal
import re
import platform
#import syslog
#import sys
#import datetime
#from ctypes import cdll, byref, create_string_buffer
def trace( *msg ):
if args.debug:
if len(msg) > 1:
# If 2 strings were passed for trace print out
print ( '{:20} {}'.format( msg[ 0 ], msg[ 1 ] ) )
else:
# If only 1 message was passed to print
print ( msg[0] )
def parseArgs():
parser = argparse.ArgumentParser( description='Checks whether a destination \
is alive' )
parser.add_argument( '-x', '--debug', action='store_true',
help='activates debug output' )
parser.add_argument( '-i', '--interval', type=int, default=5,
help='Interval of polls. Default is 5' )
parser.add_argument( '-t', '--timeout', type=int, default=5,
help='Amount of seconds to wait for a response' )
parser.add_argument( '-m', '--mode', default='icmp',
help='detection mode: ICMP, DNS or SSH. \
Default is ICMP' )
parser.add_argument( '-s', '--source',
help='source IP address to reach' )
parser.add_argument( '-d', '--dns',
help='IP address of the DNS name-server, to be used in\
conjunction with the DNS mode and a FQDN' )
parser.add_argument( 'host', nargs='+',
help='FQDN or IP address of the destination(s) to \
check' )
args = parser.parse_args()
return args
def checkOS( )
os = platform.system()
if os == 'Linux':
# On EOS Linux kernel timeout is in second and IP source as "Interface"
timeUnit = 1000
sourceSetting = '-I'
if os == 'Darwin':
# On MACOS timeout is in msec and IP source as "Source"
timeUnit = 1000
sourceSetting = '-S'
def checkSocket( ip, port, timeout ):
s = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
s.settimeout( timeout )
try:
s.connect( ( ip, port ) )
trace( "{} - Port {} is reachable".format( ip, port ) )
test_success = True
except socket.error as e:
trace( "Error on connect: {}".format( e ))
test_success = False
s.settimeout( None )
#fileobj = s.makefile( 'rb', 0 )
s.close()
return( test_success )
class checkICMP:
def __init__( self, host ):
self.host = host
def getLatency( self, output ):
# Must get an output first, check with isAlive()
outputLines = output.split('\n')
lastNonEmpty = [ i for i in outputLines if i ][ -1 ]
trace( 'Ping result:', lastNonEmpty)
timingData = lastNonEmpty.split( '=' )[1]
timingStats = timingData.split( '/' )
#pingMin = float( timingStats[ 0 ] )
pingAvg = float( timingStats[ 1 ] )
#pingMax = float( timingStats[ 2 ] )
return pingAvg
def isAlive( self ):
result = ''
pythonVersion = sys.version_info[ 0 ]
trace( 'Python version:', pythonVersion )
src_exists = True if args.source else False
command = [ 'ping' ] + \
[ '-c 1' ] + \
[ '-t ' + str( args.timeout ) ] + \
[ '-S ' + str( args.source ) ] * src_exists + \
[ self.host ]
trace( 'The command is:', str( command ) )
# Python 2 compatibility
if sys.version_info[ 0 ] < 3:
# !!!!!!!!!! To Do: harmonise ping command across v2 and v3 versions
proc = subprocess.Popen( [ 'ping', '-n', '-c 1', '-W 1', self.host ], \
stdout = subprocess.PIPE, stderr = subprocess.PIPE )
returncode = proc.wait()
if returncode == 0:
rawOutput = proc.communicate()
output = rawOutput[0].decode( 'ascii' )
trace( 'The latency is:', self.getLatency( output ) )
result = True
else:
error = 'The ICMP check did not succeed'
trace( 'Error:', error )
result = False
# Python 3
if sys.version_info[ 0 ] >= 3:
proc = subprocess.run( command, capture_output = True )
if proc.returncode == 0:
output = proc.stdout.decode( 'ascii' )
trace( 'The latency is:', self.getLatency( output ) )
result = True
else:
# if proc.returncode != 0 it means an error occured
error = proc.stderr.decode( 'ascii' )
trace( 'Error:', error )
result = False
return result
def main():
global args
# Signal handling used to quit by Ctrl+C without tracekack
signal.signal( signal.SIGINT, lambda sig_number, current_stack_frame: sys.exit( 0 ) )
args = parseArgs()
trace( 'Args are:', args )
global timeUnit, sourceSetting
checkOS()
check = checkICMP( args.host[ 0 ] )
while True:
if check.isAlive():
print( "Target is alive" )
time.sleep( 5 )
if __name__ == '__main__':
main()
|
[
"adacquay@gmail.com"
] |
adacquay@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.