blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
522c84f8db34b50d58ffb6b486b23961e9cc2994 | 5556424ae28e1965ccf0bb60b8203d76e812c3e3 | /app/settings_prod.py | 3db6997577ab1a3ec887588bf20072da76a50cb7 | [] | no_license | astromitts/borkle | e0c36ebb0c6575d123680342efcf20383d592102 | ff5f67a1fb81bc0dbf4afb5e8fda3cbe86ec9b63 | refs/heads/main | 2023-01-01T02:48:54.321538 | 2020-10-25T13:43:45 | 2020-10-25T13:43:45 | 307,006,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | import os
from app.settings import * # noqa
DEBUG = True
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
| [
"bomorin-id@MacBook-Pro.local"
] | bomorin-id@MacBook-Pro.local |
ec45f92fa7feccad2e8ebffe1dfe13c66eefb4c8 | b4f80293a9230925429bccf9ca80df404f830466 | /char-rnns/microgradchar.py | 90cbf45553a52b3692d0fbe815caa415f50e10ab | [] | no_license | jcanode/nlp_fun | cac737c43559aaa7bca55984b849adfbea4e1314 | f81a75fe6f325eeaf531d3c6cfa64ef12ada4fc2 | refs/heads/master | 2023-01-11T15:34:04.888871 | 2020-11-11T04:56:52 | 2020-11-11T04:56:52 | 303,938,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | from micrograd.engine import Value
a = Value(-4.0)
b = Value(2.0)
c = a + b
d = a * b + b**3
c += c + 1
c += 1 + c + (-a)
d += d * 2 + (b + a).relu()
d += 3 * d + (b - a).relu()
e = c - d
f = e**2
g = f / 2.0
g += 10.0 / f
print(f'{g.data:.4f}') # prints 24.7041, the outcome of this forward pass
g.backward()
print(f'{a.grad:.4f}') # prints 138.8338, i.e. the numerical value of dg/da
print(f'{b.grad:.4f}') # prints 645.5773, i.e. the numerical value of dg/dbc | [
"45806280+jcanode@users.noreply.github.com"
] | 45806280+jcanode@users.noreply.github.com |
183aba71c348509e64b9916b1b2e0d84103f3be3 | 322726c2e15389d0381692676156a5e87c8ad0f9 | /site/bin/painter.py | 342e538912085fb724da01907eca35899f87f5b4 | [] | no_license | Trostnick/PSDB | e2cddd3f5c4518098e38935d441e0112b178aa14 | fd4c4a209d60c1c029916b0328ac9d7372daed83 | refs/heads/master | 2020-05-27T13:01:55.726212 | 2018-03-20T14:32:12 | 2018-03-20T14:32:12 | 124,147,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,119 | py | #!/home/meeg/site/bin/python2
#
# The Python Imaging Library
# $Id$
#
# this demo script illustrates pasting into an already displayed
# photoimage. note that the current version of Tk updates the whole
# image every time we paste, so to get decent performance, we split
# the image into a set of tiles.
#
try:
from tkinter import Tk, Canvas, NW
except ImportError:
from Tkinter import Tk, Canvas, NW
from PIL import Image, ImageTk
import sys
#
# painter widget
class PaintCanvas(Canvas):
def __init__(self, master, image):
Canvas.__init__(self, master, width=image.size[0], height=image.size[1])
# fill the canvas
self.tile = {}
self.tilesize = tilesize = 32
xsize, ysize = image.size
for x in range(0, xsize, tilesize):
for y in range(0, ysize, tilesize):
box = x, y, min(xsize, x+tilesize), min(ysize, y+tilesize)
tile = ImageTk.PhotoImage(image.crop(box))
self.create_image(x, y, image=tile, anchor=NW)
self.tile[(x, y)] = box, tile
self.image = image
self.bind("<B1-Motion>", self.paint)
def paint(self, event):
xy = event.x - 10, event.y - 10, event.x + 10, event.y + 10
im = self.image.crop(xy)
# process the image in some fashion
im = im.convert("L")
self.image.paste(im, xy)
self.repair(xy)
def repair(self, box):
# update canvas
dx = box[0] % self.tilesize
dy = box[1] % self.tilesize
for x in range(box[0]-dx, box[2]+1, self.tilesize):
for y in range(box[1]-dy, box[3]+1, self.tilesize):
try:
xy, tile = self.tile[(x, y)]
tile.paste(self.image.crop(xy))
except KeyError:
pass # outside the image
self.update_idletasks()
#
# main
if len(sys.argv) != 2:
print("Usage: painter file")
sys.exit(1)
root = Tk()
im = Image.open(sys.argv[1])
if im.mode != "RGB":
im = im.convert("RGB")
PaintCanvas(root, im).pack()
root.mainloop()
| [
"trostnick97@mail.ru"
] | trostnick97@mail.ru |
c36e62063a94a409390144111aa8b1febb637d79 | 1c594498900dd6f25e0a598b4c89b3e33cec5840 | /iqps/search/views.py | c6c5dfb564a3088854e3a4badd988789e7fb6d3b | [
"MIT"
] | permissive | thealphadollar/iqps | cef42ed8c86e4134e724a5f4967e96a83d672fcd | 187f6b134d82e2dce951b356cb0c7151994ca3ab | refs/heads/master | 2023-07-14T04:41:13.190595 | 2020-06-25T14:51:17 | 2020-06-25T14:51:17 | 277,360,692 | 0 | 0 | MIT | 2020-07-05T18:29:17 | 2020-07-05T18:29:16 | null | UTF-8 | Python | false | false | 3,320 | py | from django.shortcuts import render
from django.db import connection
from django.http import JsonResponse
from iqps.settings import DATABASES
#from .processors import SearchCursor
#Use this with sqlite
#db_name = DATABASES['default']['NAME']
def sqlite_search(subject, year=0, department="", paper_type=""):
year_filter = "AND p.year = {}".format(year) if year > 0 else ""
dep_filter = "AND d.code = '{}'".format(department) if department != "" else ""
type_filter = "AND p.paper_type = '{}'".format(paper_type) if paper_type != "" else ""
if subject == "":
return []
query =\
"""SELECT p.subject, p.year, p.department_id, d.id, d.code, p.paper_type, p.link, SIMILARITYSCORE(p.subject, '{}') AS s
FROM papers p JOIN departments d ON p.department_id = d.id
WHERE s > 70 {} {} {} ORDER BY s DESC;""".format(subject, year_filter, dep_filter, type_filter)
results = []
with SearchCursor(db_name) as c:
c.execute(query)
for row in c.fetchall():
results.append(row)
return results
def _search(subject, year=0, department="", paper_type="", keywords=""):
year_filter = "AND p.year = {}".format(year) if year > 0 else ""
dep_filter = "AND d.code = '{}'".format(department) if department != "" else ""
type_filter = "AND p.paper_type = '{}'".format(paper_type) if paper_type != "" else ""
keyword_filter = "AND kt.text IN {}".format(keywords) if keywords != "" else ""
if subject == "":
return []
if keyword_filter == "":
query =\
"""SELECT p.subject, p.year, d.code, p.paper_type, p.link, p.id
FROM papers p JOIN departments d ON p.department_id = d.id
WHERE SOUNDEX(SUBSTRING(p.subject, 1, LENGTH('{}'))) = SOUNDEX('{}') {} {} {} ORDER BY year DESC LIMIT 30;""".format(subject, subject, year_filter, dep_filter, type_filter)
else:
query =\
"""SELECT p.subject, p.year, d.code, p.paper_type, p.link, p.id, GROUP_CONCAT(kt.text) AS keywords
FROM papers AS p JOIN departments AS d ON p.department_id = d.id
LEFT OUTER JOIN (
SELECT pk.paper_id, k.text FROM papers_keywords AS pk JOIN keywords AS k ON pk.keyword_id = k.id
) AS kt
ON p.id = kt.paper_id
WHERE SOUNDEX(SUBSTRING(p.subject, 1, LENGTH('{}'))) = SOUNDEX('{}')
{} {} {} {}
ORDER BY p.year DESC LIMIT 30;
""".format(subject, subject, year_filter, dep_filter, type_filter, keyword_filter)
results = []
with connection.cursor() as c:
c.execute(query)
for row in c.fetchall():
results.append(row)
return results
def hitSearch(request):
"""
Meant to be an independent API.
Request args:
q -> subject name
year -> year filter
dep -> department filter
typ -> paper_type filter
"""
q = request.GET.get('q', "")
year = request.GET.get('year', 0)
dep = request.GET.get('dep', "")
typ = request.GET.get('typ', "")
keywords = request.GET.get('keys', "")
try:
year = int(year)
except:
year = 0
results = _search(q, year=year, department=dep, paper_type=typ, keywords=keywords)
response = JsonResponse({"papers": results})
response["Access-Control-Allow-Origin"] = "*" #For CORS
return response
| [
"smishra99.iitkgp@gmail.com"
] | smishra99.iitkgp@gmail.com |
e077f429daff201e907044fe1dafc3a66af86952 | 26fc334777ce27d241c67d97adc1761e9d23bdba | /tests/django_tests/tests/middleware_exceptions/tests.py | 0c39f09f9156cf2b9787fa67ac627a5c7dd4a653 | [
"BSD-3-Clause"
] | permissive | alihoseiny/djongo | 1434c9e78c77025d7e0b3330c3a40e9ea0029877 | e2edf099e398573faa90e5b28a32c3d7f1c5f1e9 | refs/heads/master | 2020-03-27T23:27:02.530397 | 2018-08-30T14:44:37 | 2018-08-30T14:44:37 | 147,317,771 | 2 | 1 | BSD-3-Clause | 2018-09-04T09:00:53 | 2018-09-04T09:00:53 | null | UTF-8 | Python | false | false | 6,887 | py | from django.conf import settings
from django.core.exceptions import MiddlewareNotUsed
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import patch_logger
from . import middleware as mw
@override_settings(ROOT_URLCONF='middleware_exceptions.urls')
class MiddlewareTests(SimpleTestCase):
def tearDown(self):
mw.log = []
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.ProcessViewNoneMiddleware'])
def test_process_view_return_none(self):
response = self.client.get('/middleware_exceptions/view/')
self.assertEqual(mw.log, ['processed view normal_view'])
self.assertEqual(response.content, b'OK')
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.ProcessViewMiddleware'])
def test_process_view_return_response(self):
response = self.client.get('/middleware_exceptions/view/')
self.assertEqual(response.content, b'Processed view normal_view')
@override_settings(MIDDLEWARE=[
'middleware_exceptions.middleware.ProcessViewTemplateResponseMiddleware',
'middleware_exceptions.middleware.LogMiddleware',
])
def test_templateresponse_from_process_view_rendered(self):
"""
TemplateResponses returned from process_view() must be rendered before
being passed to any middleware that tries to access response.content,
such as middleware_exceptions.middleware.LogMiddleware.
"""
response = self.client.get('/middleware_exceptions/view/')
self.assertEqual(response.content, b'Processed view normal_view\nProcessViewTemplateResponseMiddleware')
@override_settings(MIDDLEWARE=[
'middleware_exceptions.middleware.ProcessViewTemplateResponseMiddleware',
'middleware_exceptions.middleware.TemplateResponseMiddleware',
])
def test_templateresponse_from_process_view_passed_to_process_template_response(self):
"""
TemplateResponses returned from process_view() should be passed to any
template response middleware.
"""
response = self.client.get('/middleware_exceptions/view/')
expected_lines = [
b'Processed view normal_view',
b'ProcessViewTemplateResponseMiddleware',
b'TemplateResponseMiddleware',
]
self.assertEqual(response.content, b'\n'.join(expected_lines))
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.TemplateResponseMiddleware'])
def test_process_template_response(self):
response = self.client.get('/middleware_exceptions/template_response/')
self.assertEqual(response.content, b'template_response OK\nTemplateResponseMiddleware')
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.LogMiddleware'])
def test_view_exception_converted_before_middleware(self):
response = self.client.get('/middleware_exceptions/permission_denied/')
self.assertEqual(mw.log, [(response.status_code, response.content)])
self.assertEqual(response.status_code, 403)
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.ProcessExceptionMiddleware'])
def test_view_exception_handled_by_process_exception(self):
response = self.client.get('/middleware_exceptions/error/')
self.assertEqual(response.content, b'Exception caught')
@override_settings(MIDDLEWARE=[
'middleware_exceptions.middleware.ProcessExceptionLogMiddleware',
'middleware_exceptions.middleware.ProcessExceptionMiddleware',
])
def test_response_from_process_exception_short_circuits_remainder(self):
response = self.client.get('/middleware_exceptions/error/')
self.assertEqual(mw.log, [])
self.assertEqual(response.content, b'Exception caught')
@override_settings(MIDDLEWARE=[
'middleware_exceptions.middleware.LogMiddleware',
'middleware_exceptions.middleware.NotFoundMiddleware',
])
def test_exception_in_middleware_converted_before_prior_middleware(self):
response = self.client.get('/middleware_exceptions/view/')
self.assertEqual(mw.log, [(404, response.content)])
self.assertEqual(response.status_code, 404)
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.ProcessExceptionMiddleware'])
def test_exception_in_render_passed_to_process_exception(self):
response = self.client.get('/middleware_exceptions/exception_in_render/')
self.assertEqual(response.content, b'Exception caught')
@override_settings(ROOT_URLCONF='middleware_exceptions.urls')
class RootUrlconfTests(SimpleTestCase):
@override_settings(ROOT_URLCONF=None)
def test_missing_root_urlconf(self):
# Removing ROOT_URLCONF is safe, as override_settings will restore
# the previously defined settings.
del settings.ROOT_URLCONF
with self.assertRaises(AttributeError):
self.client.get("/middleware_exceptions/view/")
class MyMiddleware:
def __init__(self, get_response=None):
raise MiddlewareNotUsed
def process_request(self, request):
pass
class MyMiddlewareWithExceptionMessage:
def __init__(self, get_response=None):
raise MiddlewareNotUsed('spam eggs')
def process_request(self, request):
pass
@override_settings(
DEBUG=True,
ROOT_URLCONF='middleware_exceptions.urls',
MIDDLEWARE=['django.middleware.common.CommonMiddleware'],
)
class MiddlewareNotUsedTests(SimpleTestCase):
rf = RequestFactory()
def test_raise_exception(self):
request = self.rf.get('middleware_exceptions/view/')
with self.assertRaises(MiddlewareNotUsed):
MyMiddleware().process_request(request)
@override_settings(MIDDLEWARE=['middleware_exceptions.tests.MyMiddleware'])
def test_log(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 1)
self.assertEqual(
calls[0],
"MiddlewareNotUsed: 'middleware_exceptions.tests.MyMiddleware'"
)
@override_settings(MIDDLEWARE=['middleware_exceptions.tests.MyMiddlewareWithExceptionMessage'])
def test_log_custom_message(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 1)
self.assertEqual(
calls[0],
"MiddlewareNotUsed('middleware_exceptions.tests.MyMiddlewareWithExceptionMessage'): spam eggs"
)
@override_settings(DEBUG=False)
def test_do_not_log_when_debug_is_false(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 0)
| [
"nesdis@gmail.com"
] | nesdis@gmail.com |
1265e8a3612b796c07f5ab24327fecb0bbb2d1b8 | b9a607c121c8e36c3c1dec47003ec2fbfc660ff1 | /sendmail/views.py | 6a5b9c7c278a35a31939020d3becfa191cb0c296 | [] | no_license | AustralianSynchrotron/send-mail-server | fe8bc8a6e5c076d9980b8b6df25f50a10f875927 | ea3c3eff1e229bfeb35a008b6a4c116a86113af0 | refs/heads/master | 2021-01-19T11:45:06.921363 | 2017-07-15T14:00:24 | 2017-07-15T14:00:24 | 82,261,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,969 | py | from sendmail import app,mail
from flask_mail import Message
from flask import request
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
st = logging.StreamHandler()
st.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s [%(name)s] %(levelname)s :: %(message)s")
st.setFormatter(formatter)
logger.addHandler(st)
from jinja2 import Template
@app.route('/')
def index():
return Template("<link rel='shortcut icon' href='/static/img/favicon.png' >" +
"<pre>curl --data 'subject=<subject>&body=<body>" +
"&recipients=<recipient[@synchrotron.org.au]>[,one][,two][,etc...]' " +
"hostAddress:Port/sendmail/</pre>").render()
@app.route('/sendmail/', methods=['POST'])
def post_the_mail():
# required to send message
# subject<string>, body<string>, recipients<list>
subject_string = str(request.form.get('subject'))
body_string = str(request.form.get('body'))
tmp_list = str(request.form.get('recipients')).split(',')
from_string = str(request.form.get('from'))
logger.info(from_string)
if not from_string:
from_string = 'email_robot'
recipient_list = []
for recipient in tmp_list:
r = recipient
if '@' not in recipient:
r = r + '@synchrotron.org.au'
recipient_list.append(r)
logger.info("%s, %s, %s, %s" % (from_string, subject_string, body_string, tmp_list))
with mail.connect() as con:
logger.info('Sending Email Message to the following users: ')
for user in recipient_list:
logger.info(user)
msg = Message(subject=subject_string,
sender=from_string + '@synchrotron.org.au',
html=body_string,
recipients=[user])
con.send(msg)
logger.info("Email Sent, you don't have anymore messages!")
return "Done! "
| [
"cameron.rodda@synchrotron.org.au"
] | cameron.rodda@synchrotron.org.au |
fbe152c2c7005c6aebce424912f910e0be53e37a | 65ce70d806e379f75683244722b2975e6f511bbb | /another program/Map() in Python.py | 32bbde9e8dc5fdc8bc20989ea35087a257880b03 | [] | no_license | Mahedi2150/python | dfc2ad6c01d16cf5a6da85864d5fb9c478e645ef | 37fbd2b1fe0b606cc91a5bc8a47a0fc9f2ab0eec | refs/heads/master | 2022-12-30T17:20:23.705238 | 2020-10-20T20:04:55 | 2020-10-20T20:04:55 | 293,569,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | """def mulFiveTimes(number):
return number*5
result = []
num = [3,5,7,9,1,5]
for i in num:
result.append(mulFiveTimes(i))
print(result)"""
def mulFiveTimes(number):
return number*5
num = [3,5,7,9,1,5]
print(list(map(mulFiveTimes,num))) | [
"noreply@github.com"
] | Mahedi2150.noreply@github.com |
5d74c49cfb918d4a55a35493f41a60b12a5f1a0f | 667c2d8d8a37a3a7719d5aa44586f59f8ce8fd51 | /bakery/__init__.py | 952a95fe79f173ed2e4c6d969e5f5477dc00636a | [
"MIT"
] | permissive | iredelmeier/doughknots | fe2880dff5b32a81e572b088cbe97b1501b2f38a | 487431b189eed8e33d369403100ff3b68d7a4151 | refs/heads/master | 2022-12-23T22:00:33.953861 | 2019-07-17T22:48:59 | 2019-07-17T22:48:59 | 197,473,677 | 3 | 0 | MIT | 2022-12-08T05:54:28 | 2019-07-17T22:49:51 | Python | UTF-8 | Python | false | false | 195 | py | from .bakery import Bakery, NoopBakery
from .httpclient import HttpClient
from .kind import Kind
from .service import Service
__all__ = ["Bakery", "HttpClient", "Kind", "NoopBakery", "Service"]
| [
"iredelmeier@gmail.com"
] | iredelmeier@gmail.com |
9d72434ff4c42cd9934c292efbbb2cdcf75e5a58 | f719ec76a8417fc05a2d46ada2501052e2bf9469 | /exp_runners/traffic/cent_traffic_runner.py | 2179e0136d393da470ab919a3989f6ab9e970282 | [] | no_license | yang-xy20/DICG | cc31064a3e4a3dd01414161e42b228c2c09bfea7 | c64ba9dbbe0f2b745cd04ce516aa1fed4c2cffc7 | refs/heads/master | 2023-07-04T18:25:18.461196 | 2021-08-19T21:34:06 | 2021-08-19T21:34:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,351 | py | import sys
import os
current_file_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(current_file_path + '/../../')
import socket
import collections
import numpy as np
import argparse
import joblib
import time
import matplotlib.pyplot as plt
from types import SimpleNamespace
import torch
from torch.nn import functional as F
import akro
import garage
from garage import wrap_experiment
from garage.envs import GarageEnv
from garage.experiment.deterministic import set_seed
from envs import TrafficJunctionWrapper
from dicg.torch.baselines import GaussianMLPBaseline
from dicg.torch.algos import CentralizedMAPPO
from dicg.torch.policies import CentralizedCategoricalMLPPolicy
from dicg.experiment.local_runner_wrapper import LocalRunnerWrapper
from dicg.sampler import CentralizedMAOnPolicyVectorizedSampler
def run(args):
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# garage.torch.utils.set_gpu_mode(mode=torch.cuda.is_available())
# print(garage.torch.utils.global_device())
if args.exp_name is None:
exp_layout = collections.OrderedDict([
('cent{}_ppo', ''),
('entcoeff={}', args.ent),
('dim={}', args.dim),
('nagents={}', args.n_agents),
('difficulty={}', args.difficulty),
('curr={}', bool(args.curriculum)),
('steps={}', args.max_env_steps),
('nenvs={}', args.n_envs),
('bs={:0.0e}', args.bs),
('splits={}', args.opt_n_minibatches),
('miniepoch={}', args.opt_mini_epochs),
('seed={}', args.seed)
])
exp_name = '_'.join(
[key.format(val) for key, val in exp_layout.items()]
)
else:
exp_name = args.exp_name
prefix = 'traffic'
id_suffix = ('_' + str(args.run_id)) if args.run_id != 0 else ''
unseeded_exp_dir = './data/' + args.loc +'/' + exp_name[:-7]
exp_dir = './data/' + args.loc +'/' + exp_name + id_suffix
# Enforce
args.center_adv = False if args.entropy_method == 'max' else args.center_adv
if args.mode == 'train':
# making sequential log dir if name already exists
@wrap_experiment(name=exp_name,
prefix=prefix,
log_dir=exp_dir,
snapshot_mode='last',
snapshot_gap=1)
def train_traffic(ctxt=None, args_dict=vars(args)):
args = SimpleNamespace(**args_dict)
set_seed(args.seed)
if args.curriculum:
curr_start = int(0.125 * args.n_epochs)
curr_end = int(0.625 * args.n_epochs)
else:
curr_start = 0
curr_end = 0
args.add_rate_min = args.add_rate_max
env = TrafficJunctionWrapper(
centralized=True, # centralized training and critic
dim=args.dim,
vision=1,
add_rate_min=args.add_rate_min,
add_rate_max=args.add_rate_max,
curr_start=curr_start,
curr_end=curr_end,
difficulty=args.difficulty,
n_agents=args.n_agents,
max_steps=args.max_env_steps
)
env = GarageEnv(env)
runner = LocalRunnerWrapper(
ctxt,
eval=args.eval_during_training,
n_eval_episodes=args.n_eval_episodes,
eval_greedy=args.eval_greedy,
eval_epoch_freq=args.eval_epoch_freq,
save_env=env.pickleable
)
hidden_nonlinearity = F.relu if args.hidden_nonlinearity == 'relu' \
else torch.tanh
policy = CentralizedCategoricalMLPPolicy(
env.spec,
env.n_agents,
hidden_nonlinearity=hidden_nonlinearity,
hidden_sizes=args.hidden_sizes,
name='dec_categorical_mlp_policy'
)
baseline = GaussianMLPBaseline(env_spec=env.spec,
hidden_sizes=(64, 64, 64))
# Set max_path_length <= max_steps
# If max_path_length > max_steps, algo will pad obs
# obs.shape = torch.Size([n_paths, algo.max_path_length, feat_dim])
algo = CentralizedMAPPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=args.max_env_steps, # Notice
discount=args.discount,
center_adv=bool(args.center_adv),
positive_adv=bool(args.positive_adv),
gae_lambda=args.gae_lambda,
policy_ent_coeff=args.ent,
entropy_method=args.entropy_method,
stop_entropy_gradient=True \
if args.entropy_method == 'max' else False,
clip_grad_norm=args.clip_grad_norm,
optimization_n_minibatches=args.opt_n_minibatches,
optimization_mini_epochs=args.opt_mini_epochs,
)
runner.setup(algo, env,
sampler_cls=CentralizedMAOnPolicyVectorizedSampler,
sampler_args={'n_envs': args.n_envs})
runner.train(n_epochs=args.n_epochs,
batch_size=args.bs)
train_traffic(args_dict=vars(args))
elif args.mode in ['restore', 'eval']:
data = joblib.load(exp_dir + '/params.pkl')
env = data['env']
algo = data['algo']
if args.mode == 'restore':
from dicg.experiment.runner_utils import restore_training
restore_training(exp_dir, exp_name, args,
env_saved=env.pickleable, env=env)
elif args.mode == 'eval':
env.eval(algo.policy, n_episodes=args.n_eval_episodes, greedy=args.eval_greedy,
load_from_file=True, max_steps=args.max_env_steps, render=args.render)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Meta
parser.add_argument('--mode', '-m', type=str, default='train')
parser.add_argument('--loc', type=str, default='local')
parser.add_argument('--exp_name', type=str, default=None)
# Train
parser.add_argument('--seed', '-s', type=int, default=1)
parser.add_argument('--n_epochs', type=int, default=1000)
parser.add_argument('--bs', type=int, default=60000)
parser.add_argument('--n_envs', type=int, default=1)
# Eval
parser.add_argument('--run_id', type=int, default=0) # sequential naming
parser.add_argument('--n_eval_episodes', type=int, default=100)
parser.add_argument('--render', type=int, default=0)
parser.add_argument('--inspect_steps', type=int, default=0)
parser.add_argument('--eval_during_training', type=int, default=1)
parser.add_argument('--eval_greedy', type=int, default=1)
parser.add_argument('--eval_epoch_freq', type=int, default=5)
# Env
parser.add_argument('--max_env_steps', type=int, default=20)
parser.add_argument('--dim', type=int, default=8)
parser.add_argument('--n_agents', '-n', type=int, default=5)
parser.add_argument('--difficulty', type=str, default='easy')
parser.add_argument('--add_rate_max', type=float, default=0.3)
parser.add_argument('--add_rate_min', type=float, default=0.1)
parser.add_argument('--curriculum', type=int, default=0)
# Algo
# parser.add_argument('--max_algo_path_length', type=int, default=n_steps)
parser.add_argument('--hidden_nonlinearity', type=str, default='tanh')
parser.add_argument('--discount', type=float, default=0.99)
parser.add_argument('--center_adv', type=int, default=1)
parser.add_argument('--positive_adv', type=int, default=0)
parser.add_argument('--gae_lambda', type=float, default=0.97)
parser.add_argument('--ent', type=float, default=0.02) # 0.01 is too small
parser.add_argument('--entropy_method', type=str, default='regularized')
parser.add_argument('--clip_grad_norm', type=float, default=7)
parser.add_argument('--opt_n_minibatches', type=int, default=4,
help='The number of splits of a batch of trajectories for optimization.')
parser.add_argument('--opt_mini_epochs', type=int, default=10,
help='The number of epochs the optimizer runs for each batch of trajectories.')
# Policy
# Example: --encoder_hidden_sizes 12 123 1234
parser.add_argument('--hidden_sizes', nargs='+', type=int)
args = parser.parse_args()
# Enforce values
if args.difficulty == 'hard':
args.max_env_steps = 60
args.dim = 18
args.n_agents = 20
args.add_rate_min = 0.02
args.add_rate_max = 0.05
elif args.difficulty == 'medium':
args.max_env_steps = 40
args.dim = 14
args.n_agents = 10
args.add_rate_min = 0.05
args.add_rate_max = 0.2
elif args.difficulty == 'easy':
args.max_env_steps = 20
args.dim = 8
args.n_agents = 5
args.add_rate_min = 0.1
args.add_rate_max = 0.3
if args.hidden_sizes is None:
args.hidden_sizes = [265, 128, 64]
run(args)
| [
"lisheng@stanford.edu"
] | lisheng@stanford.edu |
0093acd5c0ab3527f6e25307f4a2f09b05a3eb73 | 27f1be7865eb58d17e5478299b5685fc625a055c | /src/dataset/dataset_mnistm.py | 99eaa5390d2b6840b00a2fc0b30135514eaefe79 | [
"MIT"
] | permissive | mkirchmeyer/adaptation-imputation | e53099f654bf75526e11ed93e9b78e4a26ed0bef | 7ef683f2da08699b3f877467fdb0e00d3b02bccc | refs/heads/main | 2023-08-28T05:31:30.924573 | 2021-10-13T15:03:59 | 2021-10-13T15:09:42 | 351,026,014 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,718 | py | """
Dataset setting and data loader for MNIST-M.
Modified from
https://github.com/pytorch/vision/blob/master/torchvision/datasets/mnist.py
CREDIT: https://github.com/corenel
"""
from __future__ import print_function
import errno
import os
import torch
import torch.utils.data as data
from PIL import Image
from src.dataset.sampler import BalancedBatchSampler
class MNISTM(data.Dataset):
"""`MNIST-M Dataset."""
url = "https://github.com/VanushVaswani/keras_mnistm/releases/download/1.0/keras_mnistm.pkl.gz"
raw_folder = 'raw'
processed_folder = 'processed'
training_file = 'mnist_m_train.pt'
test_file = 'mnist_m_test.pt'
def __init__(self,
root, mnist_root="data",
train=True,
transform=None, target_transform=None,
download=False):
"""Init MNIST-M dataset."""
super(MNISTM, self).__init__()
self.root = os.path.expanduser(root)
self.mnist_root = os.path.expanduser(mnist_root)
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
if self.train:
self.train_data, self.train_labels = \
torch.load(os.path.join(self.root,
self.processed_folder,
self.training_file))
else:
self.test_data, self.test_labels = \
torch.load(os.path.join(self.root,
self.processed_folder,
self.test_file))
def __getitem__(self, index):
"""Get images and target for data loader.
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if self.train:
img, target = self.train_data[index], self.train_labels[index]
else:
img, target = self.test_data[index], self.test_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img.squeeze().numpy(), mode='RGB')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
"""Return size of dataset."""
if self.train:
return len(self.train_data)
else:
return len(self.test_data)
def _check_exists(self):
return os.path.exists(os.path.join(self.root,
self.processed_folder,
self.training_file)) and \
os.path.exists(os.path.join(self.root,
self.processed_folder,
self.test_file))
def download(self):
"""Download the MNIST data."""
# import essential packages
from six.moves import urllib
import gzip
import pickle
from torchvision import datasets
# check if dataset already exists
if self._check_exists():
return
# make data dirs
try:
os.makedirs(os.path.join(self.root, self.raw_folder))
os.makedirs(os.path.join(self.root, self.processed_folder))
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
# download pkl files
print('Downloading ' + self.url)
filename = self.url.rpartition('/')[2]
file_path = os.path.join(self.root, self.raw_folder, filename)
if not os.path.exists(file_path.replace('.gz', '')):
data = urllib.request.urlopen(self.url)
with open(file_path, 'wb') as f:
f.write(data.read())
with open(file_path.replace('.gz', ''), 'wb') as out_f, \
gzip.GzipFile(file_path) as zip_f:
out_f.write(zip_f.read())
os.unlink(file_path)
# process and save as torch files
print('Processing...')
# load MNIST-M images from pkl file
with open(file_path.replace('.gz', ''), "rb") as f:
mnist_m_data = pickle.load(f, encoding='bytes')
mnist_m_train_data = torch.ByteTensor(mnist_m_data[b'train'])
mnist_m_test_data = torch.ByteTensor(mnist_m_data[b'test'])
# get MNIST labels
mnist_train_labels = datasets.MNIST(root=self.mnist_root,
train=True,
download=True).train_labels
mnist_test_labels = datasets.MNIST(root=self.mnist_root,
train=False,
download=True).test_labels
# save MNIST-M dataset
training_set = (mnist_m_train_data, mnist_train_labels)
test_set = (mnist_m_test_data, mnist_test_labels)
with open(os.path.join(self.root,
self.processed_folder,
self.training_file), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.root,
self.processed_folder,
self.test_file), 'wb') as f:
torch.save(test_set, f)
print('Done!')
def get_mnistm(train, transform, path, image_size=28, batch_size=32, in_memory=True, num_channel=1, is_balanced=False,
drop_last=True, download=True):
"""Get MNISTM dataset loader."""
# dataset and data loader
mnistm_dataset = MNISTM(root=f"{path}/data/", train=train, transform=transform, download=download)
if in_memory:
mnistm_data_loader = torch.utils.data.DataLoader(
dataset=mnistm_dataset,
batch_size=1,
shuffle=True,
drop_last=False)
data = torch.zeros((len(mnistm_data_loader), num_channel, image_size, image_size))
label = torch.zeros(len(mnistm_data_loader))
for i, (data_, target) in enumerate(mnistm_data_loader):
# print(i, data_.shape)
data[i] = data_
label[i] = target
full_data = torch.utils.data.TensorDataset(data, label.long())
if is_balanced:
mnistm_data_loader = torch.utils.data.DataLoader(
dataset=full_data,
batch_size=batch_size,
sampler=BalancedBatchSampler(full_data, in_memory=True),
drop_last=drop_last)
else:
mnistm_data_loader = torch.utils.data.DataLoader(
dataset=full_data,
batch_size=batch_size,
shuffle=True,
drop_last=drop_last)
else:
if is_balanced:
mnistm_data_loader = torch.utils.data.DataLoader(
dataset=mnistm_dataset,
batch_size=batch_size,
sampler=BalancedBatchSampler(mnistm_dataset),
drop_last=drop_last)
else:
mnistm_data_loader = torch.utils.data.DataLoader(
dataset=mnistm_dataset,
batch_size=batch_size,
shuffle=True,
drop_last=drop_last)
return mnistm_data_loader
| [
"m.kirchmeyer@criteo.com"
] | m.kirchmeyer@criteo.com |
e7277b90bf91d0a0aa2e1eb71e0f8754381554a3 | 12a8b0779182959fe2e48b3df12b093c970da4d8 | /listings/models.py | eacf6c83d0b2667e7b237b3573abd1e5ffa3de36 | [] | no_license | kdogan11/rf_project | 8169e8dd74eeabc1e3d1934239ed35df58e1542b | ec245e19b80ced07a1fbe8468479d18e46d34ac0 | refs/heads/master | 2020-04-10T14:24:47.281703 | 2018-12-09T21:28:10 | 2018-12-09T21:28:10 | 161,076,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,474 | py | from django.db import models
from datetime import datetime
from realtors.models import Realtor
class Listing(models.Model):
realtor = models.ForeignKey(Realtor, on_delete=models.DO_NOTHING)
title = models.CharField(max_length = 200)
address = models.CharField(max_length = 200)
city = models.CharField(max_length = 100)
state = models.CharField(max_length = 100)
zipcode = models.CharField(max_length = 20)
description = models.TextField(blank = True)
price = models.IntegerField()
bedrooms = models.IntegerField()
bathrooms = models.DecimalField(max_digits = 2, decimal_places = 1)
garage = models.IntegerField(default=0)
sqft = models.IntegerField()
lot_size = models.DecimalField(max_digits=5, decimal_places=1)
photo_main = models.ImageField(upload_to = 'photos/%Y%m/%d/')
photo_1 = models.ImageField(upload_to = 'photos/%Y%m/%d/', blank = True)
photo_2 = models.ImageField(upload_to = 'photos/%Y%m/%d/', blank = True)
photo_3 = models.ImageField(upload_to = 'photos/%Y%m/%d/', blank = True)
photo_4 = models.ImageField(upload_to = 'photos/%Y%m/%d/', blank = True)
photo_5 = models.ImageField(upload_to = 'photos/%Y%m/%d/', blank = True)
photo_6 = models.ImageField(upload_to = 'photos/%Y%m/%d/', blank = True)
is_published = models.BooleanField(default=True)
list_date = models.DateTimeField(default = datetime.now, blank = True)
def __str__(self):
return self.title
| [
"kdogan11@gmail.com"
] | kdogan11@gmail.com |
cdd2eecd01704a67982d8dda03dcfaa9b67cffff | 3f9c9a80240b5d059dd48d30859c498c417b3db5 | /visit_column.py | 2f6b2dc3c0dfcd234f404aa02ca94803df8a2f28 | [] | no_license | haiqiang2017/csdn_pageviewers- | 45f858815e712b4240826eb59b6f9d892d520031 | fd5c32a051a49a811b89323fe83f36b628686a33 | refs/heads/master | 2020-05-15T21:50:24.153753 | 2019-05-02T15:40:43 | 2019-05-02T15:40:43 | 182,510,406 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,235 | py | #coding:utf-8
import random
import urllib3
import time
from cookie_pool import get_cookie
from UserAgent_pool import get_UserAgent
import requests
"""提取数据访问链接"""
# 禁用urllib3的警告
urllib3.disable_warnings()
class visitSpider(object):
def __init__(self):
self.ua = get_UserAgent().get_random_useragent()
self.cookie = get_cookie().get_random_cookie()
# print self.ua,self.cookie
# 处理 headers
self.headers = {"User-Agent": self.ua, "cookie": self.cookie}
self.urls = []
self.nums = 0
self.currentHour = time.localtime().tm_hour
def readFile(self):
""""""
self.nums += 0
with open("download/column.txt", "r") as f:
for i in f.readlines():
# url = i[:-2]
url = i
if self.checkClone(url,self.urls):
# print("\r读取url:",i)
self.urls.append(i[:-1])
self.nums += 1
def checkClone(self,urls,list):
"""判重"""
flag = 0
if len(list) > 0:
for i in list:
if list == urls:
flag = 1
if flag == 1:
return 0
return 1
def visit(self):
request = urllib3.PoolManager()
# 计算时间
timeNum = 0
# 计算次数
listNum = 0
while True:
# 访问时间 8 12 18 20
#if self.currentHour == 0 or self.currentHour == 1 or self.currentHour == 20 or self.currentHour == 1:
try:
if self.currentHour:
# 得到url值
url = self.urls[random.randint(0,self.nums - 1)]
# 使用urllib3发送请求
# response = request.request('GET', url, headers=self.headers)
response = requests.get(url,headers= self.headers)
# print response.content
# break
# 打印返回信息
print(url,response.status_code,listNum,str(time.localtime().tm_hour) + ":" + str(time.localtime().tm_min))
# 访问一次睡一秒
time.sleep(random.choice(range(8,12)))
timeNum += 1
listNum += 1
# 每访问50次睡30秒
if listNum % 50 == 0:
for i in range(random.choice(range(25,30))):
print("\r剩余休息时间:%d秒"%(30-i))
time.sleep(1)
# 当到达一定时间(1个小时)之后重新读取文档
if timeNum%3600==0:
self.readFile()
else:
print("休息中,当前时间:" + str(time.localtime().tm_hour) + ":" + str(time.localtime().tm_min)
+ ":" + str(time.localtime().tm_sec) + " ...")
time.sleep(1)
except Exception as e:
print str(e)
time.sleep(3600)
def Main():
vi = visitSpider()
vi.readFile()
vi.visit()
if __name__ == "__main__":
Main()
| [
"Venus_haiqiang@163.com"
] | Venus_haiqiang@163.com |
383e4c356d475a877c9169bab7cfba4b57eb90d8 | b9e0e10e9014f80ede6ea7c29174697367cf9acb | /src/evaluation_utils.py | 6ad5766e619bc2ac050261f9ead7b8438681df4f | [
"MIT"
] | permissive | talshapira/SASA | 2e9b7353147b5cba8e48b65ebb12b9bd13346a53 | 70db6ba36d7602e46fbb95b6a3cac822c8af2ab9 | refs/heads/main | 2023-08-17T08:52:33.398005 | 2021-10-04T15:36:44 | 2021-10-04T15:36:44 | 407,496,107 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,195 | py | from sklearn.metrics import confusion_matrix
import itertools
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import accuracy_score, recall_score, roc_auc_score, roc_curve, auc, precision_score
def get_tpr_specific_fpr(fpr, tpr, s_fpr=0.01):
for i, fp in enumerate(fpr):
if fp > s_fpr:
return fpr[i-1], tpr[i-1]
def print_evaluation_metrics(y_test, y_test_prediction, y_test_prob, model_name):
print("accuracy_score", "for", model_name, accuracy_score(y_test, y_test_prediction))
print("FA", "for", model_name, 1 - recall_score(y_test, y_test_prediction, pos_label=0))
print("Detection rate i.e. recall_score", "for", model_name, recall_score(y_test, y_test_prediction))
print("AUC", "for", model_name, roc_auc_score(y_test, y_test_prob))
fpr, tpr, thresholds = roc_curve(y_test, y_test_prob)
print("TPR@FPR=0.001", "for", model_name, get_tpr_specific_fpr(fpr, tpr, s_fpr=0.001))
print("TPR@FPR=0.01", "for", model_name, get_tpr_specific_fpr(fpr, tpr, s_fpr=0.01))
print("TPR@FPR=0.1", "for", model_name, get_tpr_specific_fpr(fpr, tpr, s_fpr=0.1))
def plot_roc_curve(y_test, y_test_prob, path_prefix, model_name='', max_fp=0.1):
fpr, tpr, thresholds = roc_curve(y_test, y_test_prob)
plt.figure()
lw = 2
plt.plot(fpr, tpr, lw=lw, label=model_name + ' (AUC = %0.3f)' % auc(fpr, tpr))
plt.xlim([0.0, max_fp])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
# plt.title('ROC Curves')
plt.legend(loc="lower right")
plt.savefig(path_prefix + "_ROC Curve", bbox_inches='tight')
plt.show()
def plot_roc_curve_multiple(y_test, y_test_prob_list, path_prefix, model_names, max_fp=0.1):
plt.figure()
lw = 2
for i, y_test_prob in enumerate(y_test_prob_list):
fpr, tpr, thresholds = roc_curve(y_test, y_test_prob)
plt.plot(fpr, tpr, lw=lw, label=model_names[i] + ' (AUC = %0.3f)' % auc(fpr, tpr))
plt.xlim([0.0, max_fp])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
# plt.title('ROC Curves')
plt.legend(loc="lower right")
plt.savefig(path_prefix + "_ROC Curve", bbox_inches='tight')
plt.show()
def smooth(y, box_pts):
box = np.ones(box_pts)/box_pts
y_smooth = np.convolve(y, box, mode='same')
return y_smooth
def plot_history_accuracy(history, epochs, path_prefix, sm=False, metrics=['acc','val_acc']):
x = np.asarray(range(1, epochs + 1))
# summarize history for accuracy
plt.figure()
if sm:
plt.plot(x, smooth([y*100 for y in history[metrics[0]]],2))
plt.plot(x, smooth([y*100 for y in history[metrics[1]]],2))
else:
plt.plot(x, [y*100 for y in history[metrics[0]]])
plt.plot(x, [y*100 for y in history[metrics[1]]])
plt.ylabel('Accuracy (%)')
plt.xlabel('Epochs')
# plt.ylim(70,100) ###########################
plt.legend(['Training', 'Test'], loc='lower right') #loc='lower right')
plt.grid()
fname = path_prefix + "_accuracy_history"
plt.savefig(fname, bbox_inches='tight')
plt.show()
def plot_history_loss(history, epochs, path_prefix, sm=False, metrics=['loss','val_loss']):
x = np.asarray(range(1, epochs + 1))
# summarize history for accuracy
plt.figure()
if sm:
plt.plot(x, smooth([y*100 for y in history[metrics[0]]],2))
plt.plot(x, smooth([y*100 for y in history[metrics[1]]],2))
else:
plt.plot(x, [y*100 for y in history[metrics[0]]])
plt.plot(x, [y*100 for y in history[metrics[1]]])
plt.ylabel('Loss')
plt.xlabel('Epochs')
# plt.ylim(70,100) ###########################
plt.legend(['Training', 'Test'], loc='upper right') #loc='lower right')
plt.grid()
fname = path_prefix + "_loss_history"
plt.savefig(fname, bbox_inches='tight')
plt.show()
def plot_confusion_matrix(cm, classes,
normalize=False,
fname='Confusion matrix', title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
if title is not None:
plt.title(title)
cbar = plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.ylim(-0.5, 1.5)
plt.yticks(tick_marks, classes)
fmt = '.1f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(j, i, format(cm[i, j] * 100, fmt) + '%',
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
cbar.set_ticks([0, .2, .4, 0.6, 0.8, 1])
cbar.set_ticklabels(['0%', '20%', '40%', '60%', '80%', '100%'])
else:
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
plt.savefig(fname + ".png", bbox_inches='tight')
def compute_confusion_matrix(y_test, y_test_prediction, class_names, path_prefix):
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_test_prediction)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names,
fname=path_prefix + "_" + 'Confusion_matrix_without_normalization')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
fname=path_prefix + "_" + 'Normalized_confusion_matrix')
plt.show() | [
"noreply@github.com"
] | talshapira.noreply@github.com |
9e99850e135e9250610331794779c4d4a0c8881e | 449c29b00f44e441f285638eea46fe957a042424 | /MainApp/mixins.py | d01891068c8582665e8aca033474b0576eb0d1dc | [] | no_license | InnocenceNerevarine/Diploma | d4260cf55041583a6e199f75884bd46e3e5ffd3c | 7a2ea9976ec7e0730c7731a4a0613cd7c58013af | refs/heads/master | 2023-04-26T13:00:53.634118 | 2021-05-20T06:52:07 | 2021-05-20T06:52:07 | 368,245,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | from django.views.generic import View
from .models import Cart, Customer
class CartMixin(View):
def dispatch(self, request, *args, **kwargs):
if not request.session.session_key:
request.session.save()
self.session = request.session
if request.user.is_authenticated:
customer = Customer.objects.filter(user=request.user).first()
if not customer:
customer = Customer.objects.create(user=request.user)
cart = Cart.objects.filter(owner=customer, in_order=False).first()
if not cart:
cart = Cart.objects.create(owner=customer)
else:
cart = Cart.objects.filter(session_key=self.session.session_key, for_anonymous_user=True).first()
if not cart:
cart = Cart.objects.create(session_key=self.session.session_key, for_anonymous_user=True)
self.cart = cart
return super().dispatch(request, *args, **kwargs)
| [
"ilya.sidorov.2014@gmail.com"
] | ilya.sidorov.2014@gmail.com |
fc1a6a670a2055dd23d840641834d8d1a011428d | 18462298cd5636399735339266ece565e7fbd494 | /daily_weather/setup.py | e684da34c60bb9bee45f2fddfa44d55802629abc | [] | no_license | AnkitP7/flask-demo | 4f8897d7563392a398dc10207af900d4afe45115 | 1ce10542669a044cece68c5cf4fb1caaa99003aa | refs/heads/master | 2020-03-29T12:19:44.807918 | 2018-09-22T16:28:29 | 2018-09-22T16:28:29 | 149,893,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | from setuptools import setup
setup(
name='daily_weather',
packages=['daily_weather'],
include_package_data=True,
install_requires=[
'flask',
'pylint'
],
)
| [
"ankit.patel39@gmail.com"
] | ankit.patel39@gmail.com |
5b2787c83a0a8eb0caae96635e595e2bc7f9dbed | bc441bb06b8948288f110af63feda4e798f30225 | /database_delivery_sdk/api/sqlpkgs/update_pb2.py | 0f09d354715753abc6b91286cff95f9f6a2d58bf | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 14,764 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: update.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from database_delivery_sdk.model.database_delivery import sql_package_version_pb2 as database__delivery__sdk_dot_model_dot_database__delivery_dot_sql__package__version__pb2
from database_delivery_sdk.model.database_delivery import app_pb2 as database__delivery__sdk_dot_model_dot_database__delivery_dot_app__pb2
from database_delivery_sdk.model.database_delivery import dbservice_pb2 as database__delivery__sdk_dot_model_dot_database__delivery_dot_dbservice__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='update.proto',
package='sqlpkgs',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0cupdate.proto\x12\x07sqlpkgs\x1aGdatabase_delivery_sdk/model/database_delivery/sql_package_version.proto\x1a\x37\x64\x61tabase_delivery_sdk/model/database_delivery/app.proto\x1a=database_delivery_sdk/model/database_delivery/dbservice.proto\"\xbd\x01\n\x17UpdateSQLPackageRequest\x12\r\n\x05pkgId\x18\x01 \x01(\t\x12\x43\n\x0cupdateSqlpkg\x18\x02 \x01(\x0b\x32-.sqlpkgs.UpdateSQLPackageRequest.UpdateSqlpkg\x1aN\n\x0cUpdateSqlpkg\x12\r\n\x05\x61ppId\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x62ServiceId\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x0c\n\x04memo\x18\x04 \x01(\t\"\xa1\x02\n\x18UpdateSQLPackageResponse\x12\x39\n\x0bversionList\x18\x01 \x03(\x0b\x32$.database_delivery.SQLPackageVersion\x12+\n\x03\x41PP\x18\x02 \x03(\x0b\x32\x1e.database_delivery.Application\x12/\n\tDBSERVICE\x18\x03 \x03(\x0b\x32\x1c.database_delivery.DBService\x12\n\n\x02id\x18\x04 \x01(\t\x12\x0c\n\x04name\x18\x05 \x01(\t\x12\x0c\n\x04memo\x18\x06 \x01(\t\x12\x0f\n\x07\x63reator\x18\x07 \x01(\t\x12\r\n\x05\x63time\x18\x08 \x01(\x03\x12\r\n\x05mtime\x18\t \x01(\x03\x12\x15\n\rrepoPackageId\x18\n \x01(\t\"\x84\x01\n\x1fUpdateSQLPackageResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12/\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32!.sqlpkgs.UpdateSQLPackageResponseb\x06proto3')
,
dependencies=[database__delivery__sdk_dot_model_dot_database__delivery_dot_sql__package__version__pb2.DESCRIPTOR,database__delivery__sdk_dot_model_dot_database__delivery_dot_app__pb2.DESCRIPTOR,database__delivery__sdk_dot_model_dot_database__delivery_dot_dbservice__pb2.DESCRIPTOR,])
_UPDATESQLPACKAGEREQUEST_UPDATESQLPKG = _descriptor.Descriptor(
name='UpdateSqlpkg',
full_name='sqlpkgs.UpdateSQLPackageRequest.UpdateSqlpkg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='appId', full_name='sqlpkgs.UpdateSQLPackageRequest.UpdateSqlpkg.appId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dbServiceId', full_name='sqlpkgs.UpdateSQLPackageRequest.UpdateSqlpkg.dbServiceId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='sqlpkgs.UpdateSQLPackageRequest.UpdateSqlpkg.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memo', full_name='sqlpkgs.UpdateSQLPackageRequest.UpdateSqlpkg.memo', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=330,
serialized_end=408,
)
_UPDATESQLPACKAGEREQUEST = _descriptor.Descriptor(
name='UpdateSQLPackageRequest',
full_name='sqlpkgs.UpdateSQLPackageRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pkgId', full_name='sqlpkgs.UpdateSQLPackageRequest.pkgId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='updateSqlpkg', full_name='sqlpkgs.UpdateSQLPackageRequest.updateSqlpkg', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_UPDATESQLPACKAGEREQUEST_UPDATESQLPKG, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=219,
serialized_end=408,
)
_UPDATESQLPACKAGERESPONSE = _descriptor.Descriptor(
name='UpdateSQLPackageResponse',
full_name='sqlpkgs.UpdateSQLPackageResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='versionList', full_name='sqlpkgs.UpdateSQLPackageResponse.versionList', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='APP', full_name='sqlpkgs.UpdateSQLPackageResponse.APP', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='DBSERVICE', full_name='sqlpkgs.UpdateSQLPackageResponse.DBSERVICE', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='sqlpkgs.UpdateSQLPackageResponse.id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='sqlpkgs.UpdateSQLPackageResponse.name', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memo', full_name='sqlpkgs.UpdateSQLPackageResponse.memo', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='sqlpkgs.UpdateSQLPackageResponse.creator', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ctime', full_name='sqlpkgs.UpdateSQLPackageResponse.ctime', index=7,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mtime', full_name='sqlpkgs.UpdateSQLPackageResponse.mtime', index=8,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='repoPackageId', full_name='sqlpkgs.UpdateSQLPackageResponse.repoPackageId', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=411,
serialized_end=700,
)
_UPDATESQLPACKAGERESPONSEWRAPPER = _descriptor.Descriptor(
name='UpdateSQLPackageResponseWrapper',
full_name='sqlpkgs.UpdateSQLPackageResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='sqlpkgs.UpdateSQLPackageResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='sqlpkgs.UpdateSQLPackageResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='sqlpkgs.UpdateSQLPackageResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='sqlpkgs.UpdateSQLPackageResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=703,
serialized_end=835,
)
_UPDATESQLPACKAGEREQUEST_UPDATESQLPKG.containing_type = _UPDATESQLPACKAGEREQUEST
_UPDATESQLPACKAGEREQUEST.fields_by_name['updateSqlpkg'].message_type = _UPDATESQLPACKAGEREQUEST_UPDATESQLPKG
_UPDATESQLPACKAGERESPONSE.fields_by_name['versionList'].message_type = database__delivery__sdk_dot_model_dot_database__delivery_dot_sql__package__version__pb2._SQLPACKAGEVERSION
_UPDATESQLPACKAGERESPONSE.fields_by_name['APP'].message_type = database__delivery__sdk_dot_model_dot_database__delivery_dot_app__pb2._APPLICATION
_UPDATESQLPACKAGERESPONSE.fields_by_name['DBSERVICE'].message_type = database__delivery__sdk_dot_model_dot_database__delivery_dot_dbservice__pb2._DBSERVICE
_UPDATESQLPACKAGERESPONSEWRAPPER.fields_by_name['data'].message_type = _UPDATESQLPACKAGERESPONSE
DESCRIPTOR.message_types_by_name['UpdateSQLPackageRequest'] = _UPDATESQLPACKAGEREQUEST
DESCRIPTOR.message_types_by_name['UpdateSQLPackageResponse'] = _UPDATESQLPACKAGERESPONSE
DESCRIPTOR.message_types_by_name['UpdateSQLPackageResponseWrapper'] = _UPDATESQLPACKAGERESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UpdateSQLPackageRequest = _reflection.GeneratedProtocolMessageType('UpdateSQLPackageRequest', (_message.Message,), {
'UpdateSqlpkg' : _reflection.GeneratedProtocolMessageType('UpdateSqlpkg', (_message.Message,), {
'DESCRIPTOR' : _UPDATESQLPACKAGEREQUEST_UPDATESQLPKG,
'__module__' : 'update_pb2'
# @@protoc_insertion_point(class_scope:sqlpkgs.UpdateSQLPackageRequest.UpdateSqlpkg)
})
,
'DESCRIPTOR' : _UPDATESQLPACKAGEREQUEST,
'__module__' : 'update_pb2'
# @@protoc_insertion_point(class_scope:sqlpkgs.UpdateSQLPackageRequest)
})
_sym_db.RegisterMessage(UpdateSQLPackageRequest)
_sym_db.RegisterMessage(UpdateSQLPackageRequest.UpdateSqlpkg)
UpdateSQLPackageResponse = _reflection.GeneratedProtocolMessageType('UpdateSQLPackageResponse', (_message.Message,), {
'DESCRIPTOR' : _UPDATESQLPACKAGERESPONSE,
'__module__' : 'update_pb2'
# @@protoc_insertion_point(class_scope:sqlpkgs.UpdateSQLPackageResponse)
})
_sym_db.RegisterMessage(UpdateSQLPackageResponse)
UpdateSQLPackageResponseWrapper = _reflection.GeneratedProtocolMessageType('UpdateSQLPackageResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _UPDATESQLPACKAGERESPONSEWRAPPER,
'__module__' : 'update_pb2'
# @@protoc_insertion_point(class_scope:sqlpkgs.UpdateSQLPackageResponseWrapper)
})
_sym_db.RegisterMessage(UpdateSQLPackageResponseWrapper)
# @@protoc_insertion_point(module_scope)
| [
"service@easyops.cn"
] | service@easyops.cn |
b1487c88e0ba1b6b72123718eba565f36d6903b3 | 8b0dcbc828284e273e1f2065b8d4870521681455 | /app.py | 0a9d7432681831d781202487642fbe84c4b276b5 | [] | no_license | BCStudentSoftwareDevTeam/Scrolling-Font-Changer | d46a06b9b750e2bee364f5d9c14fd36553980f4c | b9717eac8e207b6c2d6e3b6eb07a94bd7a0d5f96 | refs/heads/master | 2020-08-01T23:22:59.518056 | 2019-09-30T18:37:53 | 2019-09-30T18:37:53 | 211,156,250 | 0 | 0 | null | 2019-09-26T18:29:04 | 2019-09-26T18:29:03 | null | UTF-8 | Python | false | false | 2,889 | py | from flask import Flask, render_template, request, redirect, url_for
import threading, time
app = Flask(__name__)
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
@app.route('/young')
def young():
return render_template("main.html", age = "young")
@app.route('/old')
def old():
return render_template("main.html", age = "old")
@app.route('/editDisplay')
def editDisplay():
f = open('currentFont.txt', 'r')
font = f.readline()
f.close()
return render_template("editDisplay.html", font = font)
@app.route('/getWords')
def getWords():
f = open('currentFont.txt', 'r')
font = f.readline()
# print(font)
f.close()
f = open('words.txt', 'r')
words = f.read()
f.close()
f = open('muteDisplay.txt', 'r')
muter = f.read()
f.close()
return muter + "||" + font + "||" + words
@app.route('/vetWords')
def vetWords():
f = open('pendingWords.txt', 'r')
words = f.readlines()
print(words)
f.close()
f = open('words.txt', 'r')
vettedWords = f.readlines()
return render_template("vetWords.html", words = words, vettedWords = vettedWords)
@app.route('/approve/<word>')
def approve(word):
f = open('words.txt', 'a')
f.write(word + "\n")
f.close()
with open('pendingWords.txt', 'r') as f:
lines = f.readlines()
with open('pendingWords.txt', 'w') as f:
# Remove word from pendingWords that was approved
for line in lines:
print("Line: ", line.strip())
print("word: ", word.strip())
print("Evaluated: ", line.strip() != word.strip())
if line.strip() != word.strip():
print("adding", word, line.strip("\n"))
f.write(line)
else:
print("Skipping: ", line)
f = open('pendingWords.txt', 'r')
words = f.read()
f.close()
if len(words) > 0:
return words
else:
return ""
@app.route('/removeWord', methods = ["POST"])
def removeWord():
word = request.form.get("word")
f = open('pendingWords.txt', 'r')
words = f.read()
words = words.replace(word, " ")
f = open('pendingWords.txt', 'w')
f.write(words)
f.close()
return redirect(url_for('vetWords'))
@app.route('/sendWord/<age>/<word>')
def sendWord(age, word):
f = open('pendingWords.txt', 'a')
f.write(age + ": " + word.strip() + ":|: \n")
f.close()
return word
@app.route('/sendFont/<font>')
def sendFont(font):
f = open('currentFont.txt', 'w')
f.write(font)
f.close()
return font
@app.route('/getFont')
def getFont():
f = open('currentFont.txt', 'r')
font = f.read()
print(font)
f.close()
return font
def updateMuteState():
states = {"true": "false",
"false": "true",
"": "true"}
f = open('muteDisplay.txt', 'r')
currentState = f.read()
f = open('muteDisplay.txt', 'w')
f.write(states[currentState])
f.close()
@app.route('/muteDisplay')
def muteDisplay():
updateMuteState()
threading.Timer(20.0, updateMuteState).start()
f = open('muteDisplay.txt', 'r')
currentState = f.read()
f.close()
return currentState
| [
"heggens@berea.edu"
] | heggens@berea.edu |
fef8db32f61c0a08006394e2202189ef6d30a1d7 | bba12c5af82ea9d1f0321231bd4d33e835212128 | /redisPubsub.py | 565f0398530d4426cbc67a325ae307265ba30a0b | [] | no_license | stock-ed/material-study | c151598b2b22f34ee8fb906ac86689b264094769 | 04b5607881b44faa9d4568b412ed5249a6f170b1 | refs/heads/main | 2023-08-23T22:24:16.810727 | 2021-10-18T07:22:08 | 2021-10-18T07:22:08 | 404,207,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,777 | py | import threading
import redis
import json
from redisTSBars import RealTimeBars
from redisUtil import KeyName, RedisAccess
class RedisSubscriber(threading.Thread):
def __init__(self, channels, r=None, callback=None):
threading.Thread.__init__(self)
self.redis = RedisAccess.connection(r)
self.pubsub = self.redis.pubsub()
self.pubsub.subscribe(channels)
self.callback = callback
def get_redis(self):
return self.redis
def work(self, package):
if (self.callback == None):
print(package['channel'], ":", package['data'])
else:
data = json.loads(package['data'])
self.callback(data)
def run(self):
for package in self.pubsub.listen():
if package['data'] == "KILL":
self.pubsub.unsubscribe()
print("unsubscribed and finished")
break
elif package['type'] == 'message':
self.work(package)
else:
pass
class RedisPublisher:
def __init__(self, channels, r=None):
self.redis = RedisAccess.connection(r)
self.channels = channels
def publish(self, data):
package = json.dumps(data)
self.redis.publish(self.channels[0], package)
def killme(self):
self.redis.publish(self.channels[0], 'KILL')
class StreamBarsSubscriber(RedisSubscriber):
def __init__(self):
self.rtb = RealTimeBars()
RedisSubscriber.__init__(self,
KeyName.EVENT_BAR2DB, callback=self.rtb.redisAdd1Min)
class StreamBarsPublisher(RedisPublisher):
def __init__(self):
RedisPublisher.__init__(self, KeyName.EVENT_BAR2DB)
if __name__ == "__main__":
pass
| [
"kyoungd@hotmail.com"
] | kyoungd@hotmail.com |
f73ad632a0b644f358bb65369b1122bfe0e5dda5 | 3143d971afa307c824c76cb3b6fba27b47b53ff8 | /showTest/grade_crawle/src/example.py | 869587302567e21d03fd1ee54a9abc628ccaef67 | [] | no_license | InnerAc/GradeQuery | 62838fef31af17e012198bfdc754c83abaca5869 | 53769a50e27b9e4aed146c30a8d30e05466c6c04 | refs/heads/master | 2020-05-29T17:55:52.086130 | 2016-02-27T03:06:42 | 2016-02-27T03:06:42 | 42,023,260 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | from segmentation import NormalSegmenter
from feature_extraction import SimpleFeatureExtractor
from analyzer import KNNAnalyzer
import random
import urllib
def getImage(url, file_path):
u = urllib.urlopen(url)
data = u.read()
f = open(file_path, 'wb')
f.write(data)
f.close()
segmenter = NormalSegmenter()
extractor = SimpleFeatureExtractor( feature_size=20, stretch=False )
analyzer = KNNAnalyzer( segmenter, extractor)
analyzer.train('../data/features.jpg')
for i in range(1):
rand = random.random()
url = "http://202.119.113.135/validateCodeAction.do?random=" + str(rand);
#print url
file_path = "../train/crawler.jpg"
getImage(url,file_path)
result = analyzer.analyze('../train/crawler.jpg')
print result
#analyzer.display()
#analyzer.display_binary()
| [
"anjicun@live.com"
] | anjicun@live.com |
3edc6166c5ab9e995f874c861d02d67b0d48ae21 | 3d671fcdd27ae90698c29d0e066c662dcd4e5ee9 | /myproject/myroot/mysite/settings.py | e0fa391dbcf7d7712f450517d82f4800b9950be3 | [] | no_license | GeethuEipe/Django | 63212ac6e4bcaefa8cc5c05ea8641e72ddd04518 | 799c1157b0d7fdab07b02ca491a722ce60219ac0 | refs/heads/main | 2023-03-26T18:32:24.119589 | 2021-03-27T12:29:41 | 2021-03-27T12:29:41 | 352,067,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,157 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ljwyn^1i_($byl&$m#xn!34+#)6i^%l*uurs8)6dukbq!-*n%q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'event'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'mysite/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [BASE_DIR / 'mysite/static']
| [
"geethueipe97@gmail.com"
] | geethueipe97@gmail.com |
788b1114cf8da3899edd4800a1fbc676bf8142ee | 1577e1cf4e89584a125cffb855ca50a9654c6d55 | /pyobjc/pyobjc/pyobjc-framework-Quartz-2.5.1/Examples/Programming with Quartz/BasicDrawing/MyAppController.py | 7108ddb749d657bf205c4db6e76aba0164427919 | [
"MIT"
] | permissive | apple-open-source/macos | a4188b5c2ef113d90281d03cd1b14e5ee52ebffb | 2d2b15f13487673de33297e49f00ef94af743a9a | refs/heads/master | 2023-08-01T11:03:26.870408 | 2023-03-27T00:00:00 | 2023-03-27T00:00:00 | 180,595,052 | 124 | 24 | null | 2022-12-27T14:54:09 | 2019-04-10T14:06:23 | null | UTF-8 | Python | false | false | 4,062 | py | from Cocoa import *
import objc
import PDFHandling
import BitmapContext
import Utilities
# Initial defaults
_dpi = 144
_useQT = False
def getURLToExport(suffix):
savePanel = NSSavePanel.savePanel()
initialFileName = "BasicDrawing.%s"%(suffix,)
if savePanel.runModalForDirectory_file_(None, initialFileName) == NSFileHandlingPanelOKButton:
return savePanel.URL()
return None
class MyAppController (NSObject):
theView = objc.IBOutlet()
currentDPIMenuItem = objc.IBOutlet()
currentExportStyleMenuItem = objc.IBOutlet()
@objc.IBAction
def print_(self, sender):
self.theView.print_(sender)
def updateDPIMenu_(self, sender):
if self.currentDPIMenuItem is not sender:
# Uncheck the previous item.
if self.currentDPIMenuItem is not None:
self.currentDPIMenuItem.setState_(NSOffState)
# Update to the current item.
self.currentDPIMenuItem = sender
# Check new menu item.
self.currentDPIMenuItem.setState_(NSOnState)
def updateExportStyleMenu_(self, sender):
if self.currentExportStyleMenuItem is not sender:
# Uncheck the previous item.
if self.currentExportStyleMenuItem is not None:
self.currentExportStyleMenuItem.setState_(NSOffState)
# Update to the current item.
self.currentExportStyleMenuItem = sender
# Check new menu item.
self.currentExportStyleMenuItem.setState_(NSOnState)
@objc.IBAction
def setExportResolution_(self, sender):
global _dpi
_dpi = sender.tag()
self.updateDPIMenu_(sender)
@objc.IBAction
def setUseQT_(self, sender):
global _useQT
_useQT = True
self.updateExportStyleMenu_(sender)
@objc.IBAction
def setUseCGImageSource_(self, sender):
global _useQT
_useQT = False
self.updateExportStyleMenu_(sender)
def setupExportInfo_(self, exportInfoP):
# Use the printable version of the current command. This produces
# the best results for exporting.
exportInfoP.command = self.theView.currentPrintableCommand()
exportInfoP.fileType = ' ' # unused
exportInfoP.useQTForExport = _useQT
exportInfoP.dpi = _dpi
@objc.IBAction
def exportAsPDF_(self, sender):
url = getURLToExport("pdf")
if url is not None:
exportInfo = Utilities.ExportInfo()
self.setupExportInfo_(exportInfo)
PDFHandling.MakePDFDocument(url, exportInfo)
@objc.IBAction
def exportAsPNG_(self, sender):
url = getURLToExport("png")
if url is not None:
exportInfo = Utilities.ExportInfo()
self.setupExportInfo_(exportInfo)
BitmapContext.MakePNGDocument(url, exportInfo)
@objc.IBAction
def exportAsTIFF_(self, sender):
url = getURLToExport("tif")
if url is not None:
exportInfo = Utilities.ExportInfo()
self.setupExportInfo_(exportInfo)
BitmapContext.MakeTIFFDocument(url, exportInfo)
@objc.IBAction
def exportAsJPEG_(self, sender):
url = getURLToExport("jpg")
if url is not None:
exportInfo = Utilities.ExportInfo()
self.setupExportInfo_(exportInfo)
BitmapContext.MakeJPEGDocument(url, exportInfo)
def validateMenuItem_(self, menuItem):
if menuItem.tag == _dpi:
currentDPIMenuItem = menuItem
menuItem.setState_(True)
elif menuItem.action() == 'setUseQT:':
if _useQT:
self.currentDPIMenuItem = menuItem
menuItem.setState_(True)
else:
menuItem.setState_(False)
elif menuItem.action() == 'setUseCGImageSource:':
if _useQT:
currentDPIMenuItem = menuItem
menuItem.setState_(True)
else:
menuItem.setState_(False)
return True
| [
"opensource@apple.com"
] | opensource@apple.com |
75a01a39bc004c6914c4510e6c3287cc71942b9a | 478fe983582eee010b9de9a446383c02e2c3b449 | /utils/merge_fastq.py | 3908ca836dcf3e7df05759cfcc039c4fe0fb7116 | [] | no_license | jrw24/SRI37240 | 79e9d4f1090e3b19fa493cc28e559dfedc917041 | ddb86a12f60abaf593df627b6ed6512097ceb33a | refs/heads/master | 2021-07-23T14:13:02.466810 | 2020-02-13T19:37:13 | 2020-02-13T19:37:13 | 240,345,550 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,108 | py | ### Script for merging fastq files from seperate experiments
import sys
import os
import subprocess
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--inputDir', help= 'directory with fastq files')
parser.add_argument('--outputDir', help = 'directory to send output')
args = parser.parse_args()
inpath = args.inputDir
outpath = args.outputDir
fq1 = [
"1_dmso_A",
"4_g418_A",
"7_sri37240_A"
]
fq2 = [
"2_dmso_B",
"5_g418_B",
"8_sri37240_B"
]
fq3 = [
"3_dmso_C",
"6_g418_C",
"9_sri37240_C"
]
fq_merged = [
"1_dmso",
"2_g418",
"3_sri372340"
]
# if not os.path.exists(FASTQpath): os.makedirs(FASTQpath)
def mergeFastQ(fq1Input, fq2Input, fq3Input, fqOutput):
fq1 = '%s/%s*.fastq.gz' % (inpath, fq1Input)
fq2 = '%s/%s*.fastq.gz' % (inpath, fq2Input)
fg3 = '%s/%s*.fastq.gz' % (inpath, fq3Input)
fqOut = '%s/%s.fastq.gz' % (outpath, fqOutput)
merge_command = 'cat %s %s %s > %s' % (fq1, fq2, fq3, fqOut)
print merge_command
os.system(merge_command)
for sample in range(len(fq_merged)):
mergeFastQ(fq1[sample], fq2[sample], fq3[sample], fq_merged[sample])
| [
"greenlab@greenlabs-pro.win.ad.jhu.edu"
] | greenlab@greenlabs-pro.win.ad.jhu.edu |
01685b4a849a3156658fa0dbdaad10650ff9d148 | b14802e3892a661fa62d9d0772f72becc0abd612 | /evaluation/get_top_socored.py | 0bd0d8919ad1d0eed44022b6a57cbb69617117bb | [] | no_license | gombru/HateSpeech | e4c4b7993354ce2cb49334b814f929364fdcb446 | 7891c7e2835f17ed2a9985abd285e19788685c66 | refs/heads/master | 2022-02-23T08:57:34.909778 | 2022-02-10T12:54:41 | 2022-02-10T12:54:41 | 138,057,409 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,326 | py | import numpy as np
import operator
import shutil
import os
model_name = 'MMHS_classification_CNNinit_SCM_ALL_epoch_10_ValAcc_62'
out_folder_name = 'top_MMHS_classification_CNNinit_SCM_ALL_epoch_10_ValAcc_62'
out_file = open('../../../datasets/HateSPic/MMHS/top_scored/' + out_folder_name + '.txt','w')
if not os.path.exists('../../../datasets/HateSPic/MMHS/top_scored/' + out_folder_name):
os.makedirs('../../../datasets/HateSPic/MMHS/top_scored/' + out_folder_name)
results = {}
with open('../../../datasets/HateSPic/MMHS/results/' + model_name + '/test.txt') as f:
for line in f:
data = line.split(',')
id = int(data[0])
label = int(data[1])
hate_score = float(data[3])
notHate_score = float(data[2])
softmax_hate_score = np.exp(hate_score) / (np.exp(hate_score) + np.exp(notHate_score))
results[id] = softmax_hate_score
results = sorted(results.items(), key=operator.itemgetter(1))
results = list(reversed(results))
for i,r in enumerate(results):
if i == 50: break
print r[1]
shutil.copyfile('../../../datasets/HateSPic/MMHS/img_resized/' + str(str(r[0])) + '.jpg', '../../../datasets/HateSPic/MMHS/top_scored/' + out_folder_name + '/' + str(i) + '-' + str(r[0]) + '.jpg')
out_file.write(str(r[0]) + '\n')
out_file.close()
print("Done") | [
"raulgombru@gmail.com"
] | raulgombru@gmail.com |
c02da091fdeacb53d6ce13fd9ec1162d84589d2e | 058a0b8ca26624c74edf260e19ead70548f66e25 | /UserInterface/Admin_Mode_1.py | 39e252dc85979c0e3bc38a874e3642c1b9389258 | [] | no_license | chenhuik029/TimeLog_py | 35a9eb291e81c5b4f28de84f0ae2f0a677ebddd8 | a47d174dd189cb8221b7bd8545b4e91a0d0ba4ab | refs/heads/master | 2023-06-08T02:40:20.856109 | 2021-07-04T13:54:55 | 2021-07-04T13:54:55 | 330,116,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,101 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '04_Admin_Mode_01.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Admin_Mode_1(object):
def setupUi(self, Admin_Mode_1):
Admin_Mode_1.setObjectName("Admin_Mode_1")
Admin_Mode_1.resize(1022, 835)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 233, 190))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 233, 190))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 233, 190))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 233, 190))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
Admin_Mode_1.setPalette(palette)
self.centralwidget = QtWidgets.QWidget(Admin_Mode_1)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.frame_title = QtWidgets.QFrame(self.centralwidget)
self.frame_title.setMaximumSize(QtCore.QSize(16777215, 50))
self.frame_title.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_title.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_title.setObjectName("frame_title")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.frame_title)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label_title = QtWidgets.QLabel(self.frame_title)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
self.label_title.setPalette(palette)
font = QtGui.QFont()
font.setPointSize(18)
font.setBold(True)
font.setWeight(75)
self.label_title.setFont(font)
self.label_title.setObjectName("label_title")
self.verticalLayout_2.addWidget(self.label_title)
self.verticalLayout.addWidget(self.frame_title)
self.frame_instruction = QtWidgets.QFrame(self.centralwidget)
self.frame_instruction.setMaximumSize(QtCore.QSize(16777215, 50))
self.frame_instruction.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_instruction.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_instruction.setObjectName("frame_instruction")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.frame_instruction)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.label_instruction = QtWidgets.QLabel(self.frame_instruction)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(14)
font.setBold(True)
font.setUnderline(True)
font.setWeight(75)
self.label_instruction.setFont(font)
self.label_instruction.setObjectName("label_instruction")
self.verticalLayout_3.addWidget(self.label_instruction)
self.verticalLayout.addWidget(self.frame_instruction)
self.frame_input = QtWidgets.QFrame(self.centralwidget)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 233, 190))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 233, 190))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 233, 190))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 233, 190))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
self.frame_input.setPalette(palette)
self.frame_input.setStyleSheet("b")
self.frame_input.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_input.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_input.setObjectName("frame_input")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.frame_input)
self.horizontalLayout.setContentsMargins(20, -1, 20, -1)
self.horizontalLayout.setObjectName("horizontalLayout")
self.frame = QtWidgets.QFrame(self.frame_input)
self.frame.setMinimumSize(QtCore.QSize(350, 0))
self.frame.setMaximumSize(QtCore.QSize(350, 16777215))
self.frame.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.frame)
self.verticalLayout_4.setSpacing(30)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_emp_name = QtWidgets.QLabel(self.frame)
font = QtGui.QFont()
font.setPointSize(11)
self.label_emp_name.setFont(font)
self.label_emp_name.setObjectName("label_emp_name")
self.verticalLayout_4.addWidget(self.label_emp_name)
self.label_emp_id = QtWidgets.QLabel(self.frame)
font = QtGui.QFont()
font.setPointSize(11)
self.label_emp_id.setFont(font)
self.label_emp_id.setObjectName("label_emp_id")
self.verticalLayout_4.addWidget(self.label_emp_id)
self.label_card_id = QtWidgets.QLabel(self.frame)
font = QtGui.QFont()
font.setPointSize(11)
self.label_card_id.setFont(font)
self.label_card_id.setObjectName("label_card_id")
self.verticalLayout_4.addWidget(self.label_card_id)
self.label_emp_sal = QtWidgets.QLabel(self.frame)
font = QtGui.QFont()
font.setPointSize(11)
self.label_emp_sal.setFont(font)
self.label_emp_sal.setObjectName("label_emp_sal")
self.verticalLayout_4.addWidget(self.label_emp_sal)
self.label_2 = QtWidgets.QLabel(self.frame)
font = QtGui.QFont()
font.setPointSize(11)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.verticalLayout_4.addWidget(self.label_2)
self.horizontalLayout.addWidget(self.frame)
self.frame_2 = QtWidgets.QFrame(self.frame_input)
self.frame_2.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_2.setObjectName("frame_2")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.frame_2)
self.verticalLayout_5.setSpacing(30)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.lineEdit_emp_name = QtWidgets.QLineEdit(self.frame_2)
self.lineEdit_emp_name.setMinimumSize(QtCore.QSize(200, 40))
self.lineEdit_emp_name.setMaximumSize(QtCore.QSize(16777215, 40))
font = QtGui.QFont()
font.setPointSize(10)
self.lineEdit_emp_name.setFont(font)
self.lineEdit_emp_name.setAutoFillBackground(False)
self.lineEdit_emp_name.setObjectName("lineEdit_emp_name")
self.verticalLayout_5.addWidget(self.lineEdit_emp_name)
self.lineEdit_emp_id = QtWidgets.QLineEdit(self.frame_2)
self.lineEdit_emp_id.setMinimumSize(QtCore.QSize(200, 40))
self.lineEdit_emp_id.setMaximumSize(QtCore.QSize(16777215, 40))
font = QtGui.QFont()
font.setPointSize(10)
self.lineEdit_emp_id.setFont(font)
self.lineEdit_emp_id.setObjectName("lineEdit_emp_id")
self.verticalLayout_5.addWidget(self.lineEdit_emp_id)
self.lineEdit_card_id = QtWidgets.QLineEdit(self.frame_2)
self.lineEdit_card_id.setMinimumSize(QtCore.QSize(200, 40))
self.lineEdit_card_id.setMaximumSize(QtCore.QSize(16777215, 40))
font = QtGui.QFont()
font.setPointSize(10)
self.lineEdit_card_id.setFont(font)
self.lineEdit_card_id.setObjectName("lineEdit_card_id")
self.verticalLayout_5.addWidget(self.lineEdit_card_id)
self.lineEdit_emp_sal = QtWidgets.QLineEdit(self.frame_2)
self.lineEdit_emp_sal.setMinimumSize(QtCore.QSize(200, 40))
self.lineEdit_emp_sal.setMaximumSize(QtCore.QSize(16777215, 40))
font = QtGui.QFont()
font.setPointSize(10)
self.lineEdit_emp_sal.setFont(font)
self.lineEdit_emp_sal.setObjectName("lineEdit_emp_sal")
self.verticalLayout_5.addWidget(self.lineEdit_emp_sal)
self.comboBox_emp_stat = QtWidgets.QComboBox(self.frame_2)
self.comboBox_emp_stat.setMinimumSize(QtCore.QSize(200, 30))
self.comboBox_emp_stat.setObjectName("comboBox_emp_stat")
self.comboBox_emp_stat.addItem("")
self.comboBox_emp_stat.addItem("")
self.verticalLayout_5.addWidget(self.comboBox_emp_stat)
self.horizontalLayout.addWidget(self.frame_2)
self.verticalLayout.addWidget(self.frame_input)
self.frame_button = QtWidgets.QFrame(self.centralwidget)
self.frame_button.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_button.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_button.setObjectName("frame_button")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.frame_button)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.pushButton_cancel = QtWidgets.QPushButton(self.frame_button)
self.pushButton_cancel.setMinimumSize(QtCore.QSize(100, 40))
self.pushButton_cancel.setMaximumSize(QtCore.QSize(100, 40))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.pushButton_cancel.setFont(font)
self.pushButton_cancel.setAutoFillBackground(False)
self.pushButton_cancel.setStyleSheet("background: #f0f0f0")
self.pushButton_cancel.setObjectName("pushButton_cancel")
self.horizontalLayout_2.addWidget(self.pushButton_cancel)
self.pushButton_2 = QtWidgets.QPushButton(self.frame_button)
self.pushButton_2.setMinimumSize(QtCore.QSize(100, 40))
self.pushButton_2.setMaximumSize(QtCore.QSize(100, 40))
font = QtGui.QFont()
font.setPointSize(10)
self.pushButton_2.setFont(font)
self.pushButton_2.setAutoFillBackground(False)
self.pushButton_2.setStyleSheet("background: #f0f0f0")
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout_2.addWidget(self.pushButton_2)
self.verticalLayout.addWidget(self.frame_button)
self.label = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.EmployeeDataBase_Table = QtWidgets.QTableWidget(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.EmployeeDataBase_Table.sizePolicy().hasHeightForWidth())
self.EmployeeDataBase_Table.setSizePolicy(sizePolicy)
self.EmployeeDataBase_Table.setLayoutDirection(QtCore.Qt.LeftToRight)
self.EmployeeDataBase_Table.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.EmployeeDataBase_Table.setLineWidth(1)
self.EmployeeDataBase_Table.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustIgnored)
self.EmployeeDataBase_Table.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.EmployeeDataBase_Table.setObjectName("EmployeeDataBase_Table")
self.EmployeeDataBase_Table.setColumnCount(7)
self.EmployeeDataBase_Table.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.EmployeeDataBase_Table.setHorizontalHeaderItem(0, item)
self.EmployeeDataBase_Table.setColumnWidth(0, 30)
item = QtWidgets.QTableWidgetItem()
self.EmployeeDataBase_Table.setHorizontalHeaderItem(1, item)
self.EmployeeDataBase_Table.setColumnWidth(1, 270)
item = QtWidgets.QTableWidgetItem()
self.EmployeeDataBase_Table.setHorizontalHeaderItem(2, item)
self.EmployeeDataBase_Table.setColumnWidth(2, 200)
item = QtWidgets.QTableWidgetItem()
self.EmployeeDataBase_Table.setHorizontalHeaderItem(3, item)
self.EmployeeDataBase_Table.setColumnWidth(3, 100)
item = QtWidgets.QTableWidgetItem()
self.EmployeeDataBase_Table.setHorizontalHeaderItem(4, item)
self.EmployeeDataBase_Table.setColumnWidth(4, 100)
item = QtWidgets.QTableWidgetItem()
self.EmployeeDataBase_Table.setHorizontalHeaderItem(5, item)
self.EmployeeDataBase_Table.setColumnWidth(5, 150)
item = QtWidgets.QTableWidgetItem()
self.EmployeeDataBase_Table.setHorizontalHeaderItem(6, item)
self.EmployeeDataBase_Table.setColumnWidth(6, 100)
self.EmployeeDataBase_Table.horizontalHeader().setCascadingSectionResizes(False)
self.EmployeeDataBase_Table.horizontalHeader().setMinimumSectionSize(39)
self.EmployeeDataBase_Table.horizontalHeader().setSortIndicatorShown(True)
self.EmployeeDataBase_Table.horizontalHeader().setStretchLastSection(False)
self.EmployeeDataBase_Table.verticalHeader().setVisible(False)
self.EmployeeDataBase_Table.verticalHeader().setStretchLastSection(False)
self.verticalLayout.addWidget(self.EmployeeDataBase_Table)
spacerItem1 = QtWidgets.QSpacerItem(20, 30, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Maximum)
self.verticalLayout.addItem(spacerItem1)
Admin_Mode_1.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(Admin_Mode_1)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1022, 20))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuNavigate = QtWidgets.QMenu(self.menubar)
self.menuNavigate.setObjectName("menuNavigate")
Admin_Mode_1.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(Admin_Mode_1)
self.statusbar.setObjectName("statusbar")
Admin_Mode_1.setStatusBar(self.statusbar)
self.actionExit = QtWidgets.QAction(Admin_Mode_1)
self.actionExit.setObjectName("actionExit")
self.actionBack = QtWidgets.QAction(Admin_Mode_1)
self.actionBack.setObjectName("actionBack")
self.menuFile.addAction(self.actionExit)
self.menuNavigate.addAction(self.actionBack)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuNavigate.menuAction())
self.retranslateUi(Admin_Mode_1)
QtCore.QMetaObject.connectSlotsByName(Admin_Mode_1)
def retranslateUi(self, Admin_Mode_1):
_translate = QtCore.QCoreApplication.translate
Admin_Mode_1.setWindowTitle(_translate("Admin_Mode_1", "Attendance Recorder System - Admin Mode (Add Employee ID)"))
self.label_title.setText(_translate("Admin_Mode_1", "Administrator Mode"))
self.label_instruction.setText(_translate("Admin_Mode_1", "Add Employee ID"))
self.label_emp_name.setText(_translate("Admin_Mode_1", "Employee Name:\n"
"(Last Name, First Name)"))
self.label_emp_id.setText(_translate("Admin_Mode_1", "Employee ID: \n"
"(For Manual Entry)"))
self.label_card_id.setText(_translate("Admin_Mode_1", "Card ID: \n"
"(Please Tap Designated Card at Card Reader\n"
" to retrieve Card ID)"))
self.label_emp_sal.setText(_translate("Admin_Mode_1", "Employee Salary:\n"
" (For Salary Disbursement Usage)"))
self.label_2.setText(_translate("Admin_Mode_1", "Employment Status:"))
self.lineEdit_emp_name.setPlaceholderText(_translate("Admin_Mode_1", "Employee Name..."))
self.lineEdit_emp_id.setPlaceholderText(_translate("Admin_Mode_1", "Employee ID..."))
self.lineEdit_card_id.setPlaceholderText(_translate("Admin_Mode_1", "Please tap RFID Card on Card Reader for Card ID..."))
self.lineEdit_emp_sal.setPlaceholderText(_translate("Admin_Mode_1", "Employee salary..."))
self.comboBox_emp_stat.setItemText(0, _translate("Admin_Mode_1", "Active"))
self.comboBox_emp_stat.setItemText(1, _translate("Admin_Mode_1", "Inactive"))
self.pushButton_cancel.setText(_translate("Admin_Mode_1", "Back"))
self.pushButton_2.setText(_translate("Admin_Mode_1", "Apply"))
self.label.setText(_translate("Admin_Mode_1", "Employee Database"))
self.EmployeeDataBase_Table.setSortingEnabled(True)
item = self.EmployeeDataBase_Table.horizontalHeaderItem(0)
item.setText(_translate("Admin_Mode_1", "ID"))
item = self.EmployeeDataBase_Table.horizontalHeaderItem(1)
item.setText(_translate("Admin_Mode_1", "Employee Name"))
item = self.EmployeeDataBase_Table.horizontalHeaderItem(2)
item.setText(_translate("Admin_Mode_1", "Employee ID"))
item = self.EmployeeDataBase_Table.horizontalHeaderItem(3)
item.setText(_translate("Admin_Mode_1", "Card ID"))
item = self.EmployeeDataBase_Table.horizontalHeaderItem(4)
item.setText(_translate("Admin_Mode_1", "Salary"))
item = self.EmployeeDataBase_Table.horizontalHeaderItem(5)
item.setText(_translate("Admin_Mode_1", "Employee Status"))
item = self.EmployeeDataBase_Table.horizontalHeaderItem(6)
item.setText(_translate("Admin_Mode_1", "Date Joined"))
self.menuFile.setTitle(_translate("Admin_Mode_1", "File"))
self.menuNavigate.setTitle(_translate("Admin_Mode_1", "Navigate"))
self.actionExit.setText(_translate("Admin_Mode_1", "Exit"))
self.actionBack.setText(_translate("Admin_Mode_1", "Back"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Admin_Mode_1 = QtWidgets.QMainWindow()
ui = Ui_Admin_Mode_1()
ui.setupUi(Admin_Mode_1)
Admin_Mode_1.show()
sys.exit(app.exec_())
| [
"chenhui_k029@hotmail.com"
] | chenhui_k029@hotmail.com |
e20a5ee9ecd63a4818ab1e7040a2fe0646911b83 | 6af28264b86db139af2a885a7355be6184e2af7d | /backend/schedule_randomiser.py | de684df001503a678c69bc0c97b7ed87185e89ca | [] | no_license | LieutenantPorky/ember | 8792bc5ce2c48a0c8380b9ccfa08a337ca85308e | 39385d36be49eaad9ffac6c57ff55361d9100e03 | refs/heads/master | 2020-12-14T12:47:21.242585 | 2020-01-19T14:49:52 | 2020-01-19T14:49:52 | 234,749,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,962 | py | from peewee import *
from playhouse.sqlite_ext import *
import json
import numpy as np
from datetime import date, datetime, time, timedelta
week = [date(day=20 + i,month=1,year=2020) for i in range(0,5)]
#[print(i.isoformat()) for i in week]
randClasses = [
["9:00", "10:00", "Intro to Minecraft"],
["11:00", "13:00", "Applied Numerology"],
["13:00", "14:00", "Physics of Kitties"],
["15:00", "17:00", "Computational Turbodynamics"],
["17:00", "18:00", "Pro Haxxing 101"],
]
def getRand():
randSchedule = {"timetable":{}}
for day in week:
daySchedule = [{"start_time":i[0], "end_time":i[1], "module":{"name":i[2]}} for i in randClasses if np.random.random() > 0.5]
randSchedule["timetable"][day.isoformat()] = daySchedule
return randSchedule
def getZucc():
randSchedule = {"timetable":{}}
for day in week:
daySchedule = []
randSchedule["timetable"][day.isoformat()] = daySchedule
return randSchedule
#print(json.dumps(randSchedule, sort_keys=True, indent=4))
#print(json.dumps(randSchedule, sort_keys=True)
bios = [
"A lonely soul looking for love",
"YeEt",
"Hello world",
"I just want someone to buy me dinner"
]
usersDB = SqliteDatabase("User.db")
class User(Model):
username = CharField(unique=True)
id = AutoField()
schedule = JSONField()
bio = TextField()
class Meta:
database = usersDB
# zucc = User.get(username="Mark the Zucc Zuccson")
# zucc.schedule=json.dumps(getZucc())
# zucc.save()
for i in User.select():
print(i.username, i.bio)
# usersDB.create_tables([User])
# for name in ["Bob", "Bill", "Jeb", "Caroline", "Taylor", "Jim", "Hubert", "Lily", "Timothy", "Jerrington"]:
# newUser = User(username=name,schedule=json.dumps(getRand()), bio = bios[np.random.randint(4)])
# newUser.save()
# zucc = User(username="Mark the Zucc Zuccson", schedule=[], bio = "Single lizard robot looking for cute girl to steal data with")
# zucc.save()
| [
"jacopo@siniscalco.eu"
] | jacopo@siniscalco.eu |
576cdc82df4f4c2c6b43fd86bf0387f7bd3e2b14 | 54ff26f132923cf5cf6c19e56156e8b99b65c6aa | /jobsapp/migrations/0009_auto_20201108_0009.py | 1db9f61d18ca784a57804d5c939fdde783fc738a | [
"MIT"
] | permissive | sokogfb/Job-Portal | e19d2a80b82b24dfc2e305346db9fbddfe06d81d | 15c9065853920ddfe6e43a37062cc7fd32fb7f8e | refs/heads/main | 2023-01-21T06:50:48.925252 | 2020-11-09T19:25:29 | 2020-11-09T19:25:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | # Generated by Django 3.0.7 on 2020-11-07 18:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobsapp', '0008_auto_20200810_1925'),
]
operations = [
migrations.AlterField(
model_name='job',
name='category',
field=models.CharField(choices=[('1', 'Full time'), ('2', 'Part time'), ('3', 'Internship')], max_length=100),
),
]
| [
"54090909+rajpateln1995@users.noreply.github.com"
] | 54090909+rajpateln1995@users.noreply.github.com |
27f391a18eeeb3d62288d25e1dbfff1a600a7beb | bcebaeb318059cbf8b7d2bb0e991253440be3518 | /importingEXCEL.py | 2a856236e021f8cc6988304a8248026866c52057 | [] | no_license | JadfJamal98/FEC_final_version | add4dc70658f4f427295ec54bdf021f1c4027fce | 1ac3b9c00038ea21610c372efeedb85a5a2a49b7 | refs/heads/main | 2023-08-05T16:13:45.711104 | 2021-10-11T21:22:28 | 2021-10-11T21:22:28 | 370,786,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,375 | py | from openpyxl import load_workbook
def searchcatsub(sr,D):
""" This function return a list of subcategory's name for a category or the categor's name for the topic
input:
sr: Excel Search Engine for a specific Sheet
D: the Name of the column from which we want to extract the names
Output:
list containing the list of lists containing the names of sub categories
"""
result = [] # initializing
for j in range(1,len(sr[D])+1): # looping through all rows
k = D + str(j) # creating the cell's ID
if sr[k].value is None: # if the va
continue # skips
else: # if not null
result.append(sr[k].value.lower()) # adopting a norm of lower values
return result
def clean(arr):
"""
clean array from empty '' caused by the splitting
"""
cleaned = []
for i in arr:
if len(i) == 0: # such strings have length 0
continue # so we skip them
else:
cleaned.append(i) # we append them otherwise
return cleaned
def searchvalues(sr,p,n):
"""
this function has a job of collecting the words tokenize them and add them to list, which also will be categorised
in a list according the the category/subcategory
Input:
sr: Excel Search engine given a sheet
p: the tag of the Column where there is the sub-category/category
n: the tag of the column where there is the actual words corresponding
Output:
results: list of lists, the lists inside are divided to accomodate for the number of subsequent categories/sub
"""
result , sub = [] , []
for j in range(1,len(sr[p])+1):
prev = p + str(j) # creating the cell's ID of the previous column ie. the sub/categories
act = n + str(j) # creating the cell's ID of the cell containing the values
if sr[act].value is None: # if the cell of words is empty, skip this cell
continue
if sr[prev].value is not None: # if the prev cell is not empty means we have a new sub/cat
result.append(clean(sub)) # append the previously collected , cleaned
sub = [] # reinitiate the same list
if "," in sr[act].value: # if the cell contains many words
# lower case, eliminate spaces and split at ',' and add this list to sub
sub+=sr[act].value.lower().replace(" ","").split(",")
else:
sub.append(sr[act].value.lower()) # otherwise it appends the lower case value
result.append(clean(sub)) # appending the last result as its was not appended
return result[1:] # the first list initialted is added directly so it was taken out
def importing(sr):
"""
this function collects the data collected in the previous functions and returns a dictionary with multiple layers
with keys equal to categories sub categories and if the the category has a some words its added under key : 'self'
Input:
sr: Excel Engine related to a specific excel sheet
Output:
Result: dict, containing all the words under their correct distribution in the excel file
"""
# first getting the Data
Topic_value = searchvalues(sr,'A','B') # the words associated directly with the topic
Categories_name = searchcatsub(sr,'C') # the list of categories under the topic
Categories_values = searchvalues(sr,'C','D') # the words assigned to each of these categories
Subcateg_name = searchcatsub(sr,'E') # the list of sub category under each cateogry
Subcateg_values = searchvalues(sr,'E','F') # list of words for each of these sub categories
inhertence = searchvalues(sr,'C','E') # the list of sub cateogies with respect to each category
# backward induction to build the dictionary
last_layer = {} # initializing the last layer of our dictionary
for i in range(len(Subcateg_name)): # loopoing through each subcategory name
# appending to the last layer keys being the names of subcategory and the
# value being the list of corresponding words
last_layer.update({Subcateg_name[i]:Subcateg_values[i]})
second_layer ={} #initializing the second to last layer
for j in range(len(Categories_name)): # looping through all categories name
second_layerh = {} # initalizing the hidden layer i.e. the dictionary in the dictionary
second_layerh.update({'self' : Categories_values[j]}) # appending first the own words of the category
for k in inhertence[j]: # then looping through all its inheritance ie. the subs corresponding the each category
# for each inheritant sub category, adding the name as key and the value
# as the previous layer with the same key
second_layerh.update({k : last_layer[k]})
second_layer.update({Categories_name[j]:second_layerh}) # then adding all this hidden dictionary in the second layer one
Result = {} # this is the return dictionary containint all the words neatly oraganized by category and subs
Result.update({'self':Topic_value[0]}) # the topic has its words, adding them under the key self
for l in Categories_name: # looping through all categories
# appending the dictionary with key as the cateogry and the value as the dictionary of the category
Result.update({l:second_layer[l]})
return Result
wb = load_workbook(filename="LoughranMcDonald_SentimentWordLists_2018.xlsx") # initialzing for the first Sentiment T
sheetn = wb.sheetnames[1:] # the first sheet is just information we don't need
Sentinents = {} # initiliazing the dictionar
for i in range(len(sheetn)): # looping through all sheets
listword=[] # in each sheet we redifine a new list
sr=wb[sheetn[i]] # we set the engine to work in the specific sheet
for j in range(1,sr.max_row+1): # we loop till the last row
k = 'A' + str(j) # creating the the ID of the cell
listword.append(sr[k].value.lower()) # appending its lower case value
Sentinents.update({sheetn[i]:listword}) # we append to the main dictionary the list collected, with a key equal to the name of the sheet
| [
"71495618+JadfJamal98@users.noreply.github.com"
] | 71495618+JadfJamal98@users.noreply.github.com |
429d42c8fd21f8aeed2ca8697dc6fab586d5a1dd | 1fec393454ffe7f65fce3617c14a2fcedf1da663 | /Searching/Searching I/matrix_median.py | 9cab3f6da7a1a3e9e867bcedf81f9997880f980b | [] | no_license | VarmaSANJAY/InterviewBit-Solution-Python | fbeb1d855a5244a89b40fbd2522640dc596c79b6 | ea26394cc1b9d22a9ab474467621d2b61ef15a31 | refs/heads/master | 2022-11-27T22:46:34.966395 | 2020-08-09T14:10:58 | 2020-08-09T14:10:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | from bisect import *
class Solution:
def binary_search(self,A, min_el, max_el, cnt_before_mid):
s = min_el
e = max_el
while s < e:
mid = (s+e) // 2
count = 0
for row in A:
count += bisect_right(row, mid)
if count > cnt_before_mid:
e = mid
else:
s = mid + 1
return s
def Solve(self,A):
min_el = float('inf')
max_el = float('-inf')
for i in A:
min_el = min(i[0], min_el)
max_el = max(i[-1], max_el)
m=len(A)
n=len(A[0])
cnt_before_mid = (m*n) // 2
return self.binary_search(A, min_el, max_el,cnt_before_mid)
if __name__ == '__main__':
A = [[1, 3, 5],
[2, 6, 9],
[3, 6, 9]]
B = Solution()
print(B.Solve(A))
| [
"srajsonu02@gmail.com"
] | srajsonu02@gmail.com |
4b324a9f9ea99b231e13b55494bd0092b1cf52ec | c3ca0bcea4d1b4013a0891f014928922fc81fe7a | /examples/multi_step_training.py | 605e0ac42e4b43a5d9c9b7ba9d1573554d4f6c74 | [
"MIT"
] | permissive | takuseno/d3rlpy | 47894b17fc21fab570eca39fe8e6925a7b5d7d6f | 4ba297fc6cd62201f7cd4edb7759138182e4ce04 | refs/heads/master | 2023-08-23T12:27:45.305758 | 2023-08-14T12:07:03 | 2023-08-14T12:07:03 | 266,369,147 | 1,048 | 222 | MIT | 2023-09-02T08:12:48 | 2020-05-23T15:51:51 | Python | UTF-8 | Python | false | false | 1,483 | py | import argparse
import gym
import d3rlpy
GAMMA = 0.99
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--env", type=str, default="Pendulum-v1")
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("--n-steps", type=int, default=1)
parser.add_argument("--gpu", action="store_true")
args = parser.parse_args()
env = gym.make(args.env)
eval_env = gym.make(args.env)
# fix seed
d3rlpy.seed(args.seed)
d3rlpy.envs.seed_env(env, args.seed)
d3rlpy.envs.seed_env(eval_env, args.seed)
# setup algorithm
sac = d3rlpy.algos.SACConfig(
batch_size=256,
gamma=GAMMA,
actor_learning_rate=3e-4,
critic_learning_rate=3e-4,
temp_learning_rate=3e-4,
action_scaler=d3rlpy.preprocessing.MinMaxActionScaler(),
).create(device=args.gpu)
# multi-step transition sampling
transition_picker = d3rlpy.dataset.MultiStepTransitionPicker(
n_steps=args.n_steps,
gamma=GAMMA,
)
# replay buffer for experience replay
buffer = d3rlpy.dataset.create_fifo_replay_buffer(
limit=100000,
env=env,
transition_picker=transition_picker,
)
# start training
sac.fit_online(
env,
buffer,
eval_env=eval_env,
n_steps=100000,
n_steps_per_epoch=1000,
update_interval=1,
update_start_step=1000,
)
if __name__ == "__main__":
main()
| [
"takuma.seno@gmail.com"
] | takuma.seno@gmail.com |
86673876860a16e73baeba13cf15a5f5f9a6b8f6 | ca9bf5b1d53ff7d755c53e45486238cf4c2fec43 | /src/accounts/forms.py | 3fce5be865495125379ea09990f5b3f0c5dbfb1e | [] | no_license | pagnn/Geolocator | 6aa2e75aab8395f0f22048dd844b7d69b2ac3ee7 | b45d9500667e3552acc8c851e7f8f383d3d86c9f | refs/heads/master | 2021-09-01T09:13:55.726030 | 2017-12-26T06:13:11 | 2017-12-26T06:13:11 | 110,927,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | from django import forms
from django.contrib.auth.forms import AuthenticationForm
class LoginForm(AuthenticationForm):
def confirm_login_allowed(self,user):
if not user.is_active:
raise forms.ValidationError('This account is inactive',code='inactive')
| [
"sylviawei19950920@gmail.com"
] | sylviawei19950920@gmail.com |
7c4cb0d388dfd9e306500f3f0b0cc9ceb415e596 | e15575c838c4f656e751a0d544f6cf1e49580305 | /User_Accounts/migrations/0001_initial.py | 326403f3fb22f7c06dd73251b6fda46741b4ffed | [] | no_license | manojakumarpanda/Blog_post_and-comment | bd764676b9f17ab95eb163bbe17c7dc4f1550a66 | 84540228d33d8e6337a12eb834ffbc2ccbdd5fc2 | refs/heads/master | 2022-07-03T21:33:10.230573 | 2020-05-14T17:39:20 | 2020-05-14T17:39:20 | 263,977,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,041 | py | # Generated by Django 2.1.7 on 2020-05-07 15:27
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Cities',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('city_name', models.CharField(blank=True, default='Berhampur', max_length=50, null=True)),
],
options={
'db_table': 'cities',
'ordering': ['city_name'],
},
),
migrations.CreateModel(
name='Countrey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('countrey_name', models.CharField(default='india', max_length=30, verbose_name='countrey')),
],
options={
'db_table': 'countrey',
'ordering': ['countrey_name'],
},
),
migrations.CreateModel(
name='Districts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('district_name', models.CharField(blank=True, default='Ganjam', max_length=30, null=True)),
],
options={
'db_table': 'districts',
'ordering': ['district_name'],
},
),
migrations.CreateModel(
name='States',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('state_name', models.CharField(blank=True, default='Odisha', max_length=30, null=True)),
('count', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='Districts', to='User_Accounts.Countrey')),
],
options={
'db_table': 'states',
'ordering': ['state_name'],
},
),
migrations.CreateModel(
name='Users',
fields=[
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('id', models.UUIDField(default=uuid.UUID('77bcd7ff-2296-46b4-a4b1-723b9bee9955'), primary_key=True, serialize=False)),
('username', models.CharField(blank=True, max_length=30, null=True, unique=True, verbose_name='username')),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('full_name', models.CharField(blank=True, max_length=60, null=True, verbose_name='fullname')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='email address')),
('house_num', models.CharField(default='4/1', max_length=7, verbose_name='House Numebr/Flat Number')),
('address', models.CharField(max_length=300, verbose_name='Address')),
('pin_code', models.CharField(default='760008', max_length=6)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False)),
('updated_at', models.DateTimeField(auto_now=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'db_table': 'Accounts',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.AddField(
model_name='districts',
name='state',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='User_Accounts.States'),
),
migrations.AddField(
model_name='cities',
name='dist',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='User_Accounts.Districts'),
),
]
| [
"kumarpandamanoja@gmail.com"
] | kumarpandamanoja@gmail.com |
d30aefdbac47c69b1651c631e5c0d110bdced301 | abdc30ddc3e2aa874afe85f3b3cf914c55ef98a4 | /RecipeForDisaster/views.py | 681a2096fc19587138817e22f8a920a6b05f2c06 | [] | no_license | dann4520/RecipeSite | 65fd1b8afbb9ef0414dd4431cc918d3eeaf34b94 | 851b49fac08d74aabe5dde88d4c56d375edf705d | refs/heads/master | 2020-04-19T12:37:50.444276 | 2019-01-29T18:22:21 | 2019-01-29T18:22:21 | 168,088,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | from django.shortcuts import render
from django.utils import timezone
from .models import Recipe
# Create your views here.
def recipe_list(request):
recipes = Recipe.objects.filter(created_date__lte=timezone.now()).order_by('created_date')
return render(request, 'RecipeForDisaster/recipe_list.html', {'recipes': recipes})
| [
"stabdan@gmail.com"
] | stabdan@gmail.com |
350796e30288007d708560cb8c78151b69807870 | 2b2d000525205763a8379621f2413c1c5dae1aa0 | /resize2.py | 869d8b06bdaa1fd07f0269feb4a328531c0c9670 | [] | no_license | DAYARAM99/opencv- | 0c6defbe034c3a237c4093f362dfe65b884e6e3b | af37fd6823cf956fe9b1d13b6727833888e1a443 | refs/heads/master | 2020-08-11T04:31:12.622793 | 2019-10-11T17:23:54 | 2019-10-11T17:23:54 | 214,491,923 | 0 | 0 | null | 2019-10-11T17:16:59 | 2019-10-11T17:16:59 | null | UTF-8 | Python | false | false | 373 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 4 05:44:06 2019
@author: Rajat arya
"""
import cv2
img=cv2.imread("index.png")
resize_img = cv2.resize(img, (int(img.shape[1]/2), int(img.shape[0]/2)))
resized_image = cv2.resize(img, (650,500))
resized_image = cv2.resize(img, (650,500))
cv2.imshow("image",resize_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | DAYARAM99.noreply@github.com |
2a407ad94d7f94a71b7a2950a9fb841ae5678614 | ca5b57ee732081cf03de08e8f640ba2197b1a11e | /Binary Search/Tree/Longest Tree Sum Path From Root to Leaf.py | cc5a508e70e1b35eaaeddd53b9909ef5fddcf6df | [] | no_license | Ajinkya-Sonawane/Problem-Solving-in-Python | cf0507fca0968f6eef02492656f16b256cc0d07c | 96529af9343d4a831c0f8f4f92df87a49e155854 | refs/heads/main | 2023-06-06T12:46:57.990001 | 2021-06-12T19:38:02 | 2021-06-12T19:38:02 | 369,957,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | # https://binarysearch.com/problems/Longest-Tree-Sum-Path-From-Root-to-Leaf
# class Tree:
# def __init__(self, val, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def solve(self, root):
return self.traverse(root)[0]
def traverse(self,root):
if not root:
return 0,0
l,heightL = self.traverse(root.left)
r,heightR = self.traverse(root.right)
temp = 0
if heightL == heightR:
temp = max(l,r) + root.val
return temp,heightL+1
if heightL > heightR:
temp = l + root.val
return temp,heightL+1
temp = r + root.val
return temp,heightR+1 | [
"sonawaneajinks@gmail.com"
] | sonawaneajinks@gmail.com |
46b2a84ea85072fd8b8c7365f2bcc70d57327e12 | 8509927b6281647a5400f92a2199cb82860f2997 | /code/grid_search/run_grid_search.py | b60615d1712fba4315ad782c693ded31690fffc0 | [] | no_license | egeodaci/comp551-2020-p2_classification_of_textual_data | 02dbd55dd61a098081aed27202ce5d653ca46dee | 13b6e169b5b965b7185de49294c33c35c7da9b65 | refs/heads/master | 2022-11-21T08:15:45.700213 | 2020-07-09T17:38:22 | 2020-07-09T17:38:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,838 | py | import json
import logging
import os
from time import time
from sklearn import metrics
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import RidgeClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_validate
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import ComplementNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from datasets.load_dataset import load_twenty_news_groups, load_imdb_reviews
from model_selection.ml_algorithm_pair_list import JSON_FOLDER
from utils.dataset_enum import Dataset
from utils.ml_classifiers_enum import Classifier
def get_classifier_with_best_parameters(classifier_enum, best_parameters):
if classifier_enum == Classifier.ADA_BOOST_CLASSIFIER:
return AdaBoostClassifier(**best_parameters)
elif classifier_enum == Classifier.BERNOULLI_NB:
return BernoulliNB(**best_parameters)
elif classifier_enum == Classifier.COMPLEMENT_NB:
return ComplementNB(**best_parameters)
elif classifier_enum == Classifier.DECISION_TREE_CLASSIFIER:
return DecisionTreeClassifier(**best_parameters)
elif classifier_enum == Classifier.GRADIENT_BOOSTING_CLASSIFIER:
return GradientBoostingClassifier(**best_parameters)
elif classifier_enum == Classifier.K_NEIGHBORS_CLASSIFIER:
return KNeighborsClassifier(**best_parameters)
elif classifier_enum == Classifier.LINEAR_SVC:
return LinearSVC(**best_parameters)
elif classifier_enum == Classifier.LOGISTIC_REGRESSION:
return LogisticRegression(**best_parameters)
elif classifier_enum == Classifier.MULTINOMIAL_NB:
return MultinomialNB(**best_parameters)
elif classifier_enum == Classifier.NEAREST_CENTROID:
return NearestCentroid(**best_parameters)
elif classifier_enum == Classifier.PASSIVE_AGGRESSIVE_CLASSIFIER:
return PassiveAggressiveClassifier(**best_parameters)
elif classifier_enum == Classifier.PERCEPTRON:
return Perceptron(**best_parameters)
elif classifier_enum == Classifier.RANDOM_FOREST_CLASSIFIER:
return RandomForestClassifier(**best_parameters)
elif classifier_enum == Classifier.RIDGE_CLASSIFIER:
return RidgeClassifier(**best_parameters)
def run_classifier_grid_search(classifer, classifier_enum, param_grid, dataset, final_classification_table_default_parameters, final_classification_table_best_parameters, imdb_multi_class, save_json_with_best_parameters):
if param_grid is None:
return
if dataset == Dataset.TWENTY_NEWS_GROUPS:
remove = ('headers', 'footers', 'quotes')
data_train = \
load_twenty_news_groups(subset='train', categories=None, shuffle=True, random_state=0, remove=remove)
data_test = \
load_twenty_news_groups(subset='test', categories=None, shuffle=True, random_state=0, remove=remove)
X_train, y_train = data_train.data, data_train.target
X_test, y_test = data_test.data, data_test.target
target_names = data_train.target_names
elif dataset == Dataset.IMDB_REVIEWS:
db_parent_path = os.getcwd()
db_parent_path = db_parent_path.replace('grid_search', '')
if imdb_multi_class:
X_train, y_train = \
load_imdb_reviews(subset='train', multi_class_labels=True, verbose=False, shuffle=True, random_state=0,
db_parent_path=db_parent_path)
X_test, y_test = \
load_imdb_reviews(subset='test', multi_class_labels=True, verbose=False, shuffle=True, random_state=0,
db_parent_path=db_parent_path)
else:
X_train, y_train = \
load_imdb_reviews(subset='train', multi_class_labels=False, verbose=False, shuffle=True, random_state=0,
db_parent_path=db_parent_path)
X_test, y_test = \
load_imdb_reviews(subset='test', multi_class_labels=False, verbose=False, shuffle=True, random_state=0,
db_parent_path=db_parent_path)
if imdb_multi_class:
# IMDB_REVIEWS dataset
# If binary classification: 0 = neg and 1 = pos.
# If multi-class classification use the review scores: 1, 2, 3, 4, 7, 8, 9, 10
target_names = ['1', '2', '3', '4', '7', '8', '9', '10']
else:
# IMDB_REVIEWS dataset
# If binary classification: 0 = neg and 1 = pos.
# If multi-class classification use the review scores: 1, 2, 3, 4, 7, 8, 9, 10
target_names = ['0', '1']
try:
# Extracting features
vectorizer = TfidfVectorizer(stop_words='english', strip_accents='unicode', analyzer='word', binary=True)
X_train = vectorizer.fit_transform(X_train)
X_test = vectorizer.transform(X_test)
# Create pipeline
pipeline = Pipeline([('classifier', classifer)])
# Create grid search object
grid_search = GridSearchCV(pipeline, param_grid=param_grid, cv=5, verbose=True, n_jobs=-1)
logging.info("\n\nPerforming grid search...\n")
logging.info("Parameters:")
logging.info(param_grid)
t0 = time()
grid_search.fit(X_train, y_train)
logging.info("\tDone in %0.3fs" % (time() - t0))
# Get best parameters
logging.info("\tBest score: %0.3f" % grid_search.best_score_)
logging.info("\tBest parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
new_parameters = {}
for param_name in sorted(param_grid.keys()):
logging.info("\t\t%s: %r" % (param_name, best_parameters[param_name]))
key = param_name.replace('classifier__', '')
value = best_parameters[param_name]
new_parameters[key] = value
if save_json_with_best_parameters:
if dataset == Dataset.TWENTY_NEWS_GROUPS:
json_path = os.path.join(os.getcwd(), JSON_FOLDER, dataset.name, classifier_enum.name + ".json")
with open(json_path, 'w') as outfile:
json.dump(new_parameters, outfile)
else:
if imdb_multi_class:
json_path = os.path.join(os.getcwd(), JSON_FOLDER, dataset.name, 'multi_class_classification', classifier_enum.name + ".json")
with open(json_path, 'w') as outfile:
json.dump(new_parameters, outfile)
else:
json_path = os.path.join(os.getcwd(), JSON_FOLDER, dataset.name, 'binary_classification', classifier_enum.name + ".json")
with open(json_path, 'w') as outfile:
json.dump(new_parameters, outfile)
logging.info('\n\nUSING {} WITH DEFAULT PARAMETERS'.format(classifier_enum.name))
clf = classifer
final_classification_report(clf, X_train, y_train, X_test, y_test, target_names, classifier_enum, final_classification_table_default_parameters)
logging.info('\n\nUSING {} WITH BEST PARAMETERS: {}'.format(classifier_enum.name, new_parameters))
clf = get_classifier_with_best_parameters(classifier_enum, new_parameters)
final_classification_report(clf, X_train, y_train, X_test, y_test, target_names, classifier_enum, final_classification_table_best_parameters)
except MemoryError as error:
# Output expected MemoryErrors.
logging.error(error)
except Exception as exception:
# Output unexpected Exceptions.
logging.error(exception)
def final_classification_report(clf, X_train, y_train, X_test, y_test, target_names, classifier_enum, final_classification_table):
# Fit on data
logging.info('_' * 80)
logging.info("Training: ")
logging.info(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
logging.info("Train time: %0.3fs" % train_time)
# Predict
t0 = time()
y_pred = clf.predict(X_test)
test_time = time() - t0
logging.info("Test time: %0.3fs" % test_time)
accuracy_score = metrics.accuracy_score(y_test, y_pred)
logging.info("Accuracy score: %0.3f" % accuracy_score)
logging.info("\n\n===> Classification Report:\n")
# logging.info(metrics.classification_report(y_test, y_pred, target_names=target_names))
logging.info(metrics.classification_report(y_test, y_pred))
n_splits = 5
logging.info("\n\nCross validation:")
scoring = ['accuracy', 'precision_macro', 'precision_micro', 'precision_weighted', 'recall_macro', 'recall_micro',
'recall_weighted', 'f1_macro', 'f1_micro', 'f1_weighted', 'jaccard_macro']
cross_val_scores = cross_validate(clf, X_train, y_train, scoring=scoring, cv=n_splits,
n_jobs=-1, verbose=True)
cv_test_accuracy = cross_val_scores['test_accuracy']
logging.info("\taccuracy: {}-fold cross validation: {}".format(5, cv_test_accuracy))
cv_accuracy_score_mean_std = "%0.2f (+/- %0.2f)" % (cv_test_accuracy.mean() * 100, cv_test_accuracy.std() * 2 * 100)
logging.info("\ttest accuracy: {}-fold cross validation accuracy: {}".format(n_splits, cv_accuracy_score_mean_std))
final_classification_table[classifier_enum.value] = classifier_enum.name, format(accuracy_score, ".2%"), str(cv_test_accuracy), cv_accuracy_score_mean_std, format(train_time, ".4"), format(test_time, ".4")
def get_classifier_with_default_parameters(classifier_enum):
if classifier_enum == Classifier.ADA_BOOST_CLASSIFIER:
'''
AdaBoostClassifier(algorithm='SAMME.R', base_estimator=None, learning_rate=1.0,
n_estimators=50, random_state=None)
'''
clf = AdaBoostClassifier()
parameters = {
'classifier__learning_rate': [0.1, 1],
'classifier__n_estimators': [200, 500]
}
elif classifier_enum == Classifier.BERNOULLI_NB:
'''
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
'''
clf = BernoulliNB()
parameters = {
'classifier__alpha': [0.0001, 0.001, 0.01, 0.1, 0.5, 1.0, 2.0, 4.0, 6.0, 8.0, 10.0],
'classifier__binarize': [0.0001, 0.001, 0.01, 0.1, 0.5, 1.0, 2.0, 4.0, 6.0, 8.0, 10.0],
'classifier__fit_prior': [False, True]
}
elif classifier_enum == Classifier.COMPLEMENT_NB:
'''
ComplementNB(alpha=1.0, class_prior=None, fit_prior=True, norm=False)
'''
clf = ComplementNB()
parameters = {
'classifier__alpha': [0.0001, 0.001, 0.01, 0.1, 0.5, 1.0, 2.0, 4.0, 6.0, 8.0, 10.0],
'classifier__fit_prior': [False, True],
'classifier__norm': [False, True]
}
elif classifier_enum == Classifier.DECISION_TREE_CLASSIFIER:
'''
DecisionTreeClassifier(ccp_alpha=0.0, class_weight=None, criterion='gini',
max_depth=None, max_features=None, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, presort='deprecated',
random_state=None, splitter='best')
'''
clf = DecisionTreeClassifier()
parameters = {
'classifier__criterion': ["entropy", "gini"],
'classifier__splitter': ["best", "random"],
'classifier__min_samples_split': [2, 100, 250]
}
elif classifier_enum == Classifier.K_NEIGHBORS_CLASSIFIER:
'''
KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski',
metric_params=None, n_jobs=None, n_neighbors=5, p=2,
weights='uniform')
'''
clf = KNeighborsClassifier()
parameters = {
'classifier__leaf_size': [5, 30],
'classifier__metric': ['euclidean', 'minkowski'],
'classifier__n_neighbors': [3, 50],
'classifier__weights': ['uniform', 'distance']
}
elif classifier_enum == Classifier.LINEAR_SVC:
'''
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
'''
clf = LinearSVC()
parameters = {
'classifier__C': [0.01, 1.0],
'classifier__multi_class': ['ovr', 'crammer_singer'],
'classifier__tol': [0.0001, 0.001]
}
elif classifier_enum == Classifier.LOGISTIC_REGRESSION:
'''
LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
intercept_scaling=1, l1_ratio=None, max_iter=100,
multi_class='auto', n_jobs=None, penalty='l2',
random_state=None, solver='lbfgs', tol=0.0001, verbose=0,
warm_start=False)
'''
clf = LogisticRegression()
parameters = {
'classifier__C': [1, 10],
'classifier__tol': [0.001, 0.01]
}
elif classifier_enum == Classifier.MULTINOMIAL_NB:
'''
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
'''
clf = MultinomialNB()
parameters = {
'classifier__alpha': [0.0001, 0.001, 0.01, 0.1, 0.5, 1.0, 2.0, 4.0, 6.0, 8.0, 10.0],
'classifier__fit_prior': [False, True]
}
elif classifier_enum == Classifier.NEAREST_CENTROID:
'''
NearestCentroid(metric='euclidean', shrink_threshold=None)
'''
clf = NearestCentroid()
parameters = {
'classifier__metric': ['euclidean', 'cosine']
}
elif classifier_enum == Classifier.PASSIVE_AGGRESSIVE_CLASSIFIER:
'''
PassiveAggressiveClassifier(C=1.0, average=False, class_weight=None,
early_stopping=False, fit_intercept=True,
loss='hinge', max_iter=1000, n_iter_no_change=5,
n_jobs=None, random_state=None, shuffle=True,
tol=0.001, validation_fraction=0.1, verbose=0,
warm_start=False)
'''
clf = PassiveAggressiveClassifier()
parameters = {
'classifier__C': [0.01, 1.0],
'classifier__early_stopping': [False, True],
'classifier__tol': [0.0001, 0.001, 0.01],
'classifier__validation_fraction': [0.0001, 0.01]
}
elif classifier_enum == Classifier.PERCEPTRON:
'''
Perceptron(alpha=0.0001, class_weight=None, early_stopping=False, eta0=1.0,
fit_intercept=True, max_iter=1000, n_iter_no_change=5, n_jobs=None,
penalty=None, random_state=0, shuffle=True, tol=0.001,
validation_fraction=0.1, verbose=0, warm_start=False)
'''
clf = Perceptron()
parameters = {
'classifier__early_stopping': [True],
'classifier__max_iter': [100],
'classifier__n_iter_no_change': [3, 15],
'classifier__penalty': ['l2'],
'classifier__tol': [0.0001, 0.1],
'classifier__validation_fraction': [0.0001, 0.01]
}
elif classifier_enum == Classifier.RANDOM_FOREST_CLASSIFIER:
'''
RandomForestClassifier(bootstrap=True, ccp_alpha=0.0, class_weight=None,
criterion='gini', max_depth=None, max_features='auto',
max_leaf_nodes=None, max_samples=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=100,
n_jobs=None, oob_score=False, random_state=None,
verbose=0, warm_start=False)
'''
clf = RandomForestClassifier()
parameters = {
'classifier__min_samples_leaf': [1, 2],
'classifier__min_samples_split': [2, 5],
'classifier__n_estimators': [100, 200]
}
elif classifier_enum == Classifier.RIDGE_CLASSIFIER:
'''
RidgeClassifier(alpha=1.0, class_weight=None, copy_X=True, fit_intercept=True,
max_iter=None, normalize=False, random_state=None,
solver='auto', tol=0.001)
'''
clf = RidgeClassifier()
parameters = {
'classifier__alpha': [0.5, 1.0],
'classifier__tol': [0.0001, 0.001]
}
elif classifier_enum == Classifier.GRADIENT_BOOSTING_CLASSIFIER:
'''
GradientBoostingClassifier(ccp_alpha=0.0, criterion='friedman_mse', init=None,
learning_rate=0.1, loss='deviance', max_depth=3,
max_features=None, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=100,
n_iter_no_change=None, presort='deprecated',
random_state=None, subsample=1.0, tol=0.0001,
validation_fraction=0.1, verbose=0,
warm_start=False)
'''
clf = GradientBoostingClassifier()
parameters = {
'classifier__learning_rate': [0.01, 0.1],
'classifier__n_estimators': [100, 200]
}
return clf, parameters
def run_grid_search(save_logs_in_file, just_imdb_dataset, imdb_multi_class, save_json_with_best_parameters=False):
if imdb_multi_class:
if save_logs_in_file:
if not os.path.exists('grid_search/just_imdb_using_multi_class_classification'):
os.mkdir('grid_search/just_imdb_using_multi_class_classification')
logging.basicConfig(filename='grid_search/just_imdb_using_multi_class_classification/all.log', format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.INFO, datefmt='%m/%d/%Y %I:%M:%S %p')
else:
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO,
datefmt='%m/%d/%Y %I:%M:%S %p')
else:
if save_logs_in_file:
if not os.path.exists('grid_search/20newsgroups_and_imdb_using_binary_classification'):
os.mkdir('grid_search/20newsgroups_and_imdb_using_binary_classification')
logging.basicConfig(filename='grid_search/20newsgroups_and_imdb_using_binary_classification/all.log', format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.INFO, datefmt='%m/%d/%Y %I:%M:%S %p')
else:
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO,
datefmt='%m/%d/%Y %I:%M:%S %p')
classifier_list = [
Classifier.ADA_BOOST_CLASSIFIER,
Classifier.DECISION_TREE_CLASSIFIER,
Classifier.LINEAR_SVC,
Classifier.LOGISTIC_REGRESSION,
Classifier.RANDOM_FOREST_CLASSIFIER,
Classifier.BERNOULLI_NB,
Classifier.COMPLEMENT_NB,
Classifier.MULTINOMIAL_NB,
Classifier.NEAREST_CENTROID,
Classifier.PASSIVE_AGGRESSIVE_CLASSIFIER,
Classifier.K_NEIGHBORS_CLASSIFIER,
Classifier.PERCEPTRON,
Classifier.RIDGE_CLASSIFIER,
Classifier.GRADIENT_BOOSTING_CLASSIFIER
]
if just_imdb_dataset:
dataset_list = [
Dataset.IMDB_REVIEWS
]
else:
dataset_list = [
Dataset.IMDB_REVIEWS,
Dataset.TWENTY_NEWS_GROUPS
]
logging.info("\n>>> GRID SEARCH")
for dataset in dataset_list:
c_count = 1
final_classification_table_default_parameters = {}
final_classification_table_best_parameters = {}
for classifier_enum in classifier_list:
logging.info("\n")
logging.info("#" * 80)
if save_logs_in_file:
print("#" * 80)
logging.info("{})".format(c_count))
clf, parameters = get_classifier_with_default_parameters(classifier_enum)
logging.info("*" * 80)
logging.info("Classifier: {}, Dataset: {}".format(classifier_enum.name, dataset.name))
logging.info("*" * 80)
start = time()
run_classifier_grid_search(clf, classifier_enum, parameters, dataset, final_classification_table_default_parameters, final_classification_table_best_parameters, imdb_multi_class, save_json_with_best_parameters)
end = time() - start
logging.info("It took {} seconds".format(end))
logging.info("*" * 80)
if save_logs_in_file:
print("*" * 80)
print("Classifier: {}, Dataset: {}".format(classifier_enum.name, dataset.name))
print(clf)
print("It took {} seconds".format(end))
print("*" * 80)
logging.info("#" * 80)
if save_logs_in_file:
print("#" * 80)
c_count = c_count + 1
logging.info(
'\n\nCURRENT CLASSIFICATION TABLE: {} DATASET, CLASSIFIER WITH DEFAULT PARAMETERS'.format(dataset.name))
print_final_classification_table(final_classification_table_default_parameters)
logging.info(
'\n\nCURRENT CLASSIFICATION TABLE: {} DATASET, CLASSIFIER WITH BEST PARAMETERS'.format(dataset.name))
print_final_classification_table(final_classification_table_best_parameters)
logging.info('\n\nFINAL CLASSIFICATION TABLE: {} DATASET, CLASSIFIER WITH DEFAULT PARAMETERS'.format(dataset.name))
print_final_classification_table(final_classification_table_default_parameters)
logging.info('\n\nFINAL CLASSIFICATION TABLE: {} DATASET, CLASSIFIER WITH BEST PARAMETERS'.format(dataset.name))
print_final_classification_table(final_classification_table_best_parameters)
def print_final_classification_table(final_classification_table_default_parameters):
logging.info(
'| ID | ML Algorithm | Accuracy Score (%) | K-fold Cross Validation (CV) (k = 5) | CV (Mean +/- Std) | '
'Training time (seconds) | Test time (seconds) |')
logging.info(
'| -- | ------------ | ------------------ | ------------------------------------ | ----------------- | '
' ------------------ | ------------------ |')
for key in sorted(final_classification_table_default_parameters.keys()):
values = final_classification_table_default_parameters[key]
logging.info(
"| {} | {} | {} | {} | {} | {} | {} |".format(key, values[0], values[1], values[2],
values[3], values[4], values[5]))
| [
"ramon.fgrd@gmail.com"
] | ramon.fgrd@gmail.com |
90e17c94f2e06a546e4b52d1a8e0b9da901b50ce | 991666b692f8017c2d753b0267eb9189413f54eb | /bin/Debug/models/sum.py | 32fb9016bc4f31059e4160cc0aae8e9682a9deed | [
"Apache-2.0"
] | permissive | DaniilVdovin/InformaticalPasCompilCore | 8a0b4e532e439b2497ec35028d9bade6c37b1b98 | 6d3f6fd3bef63a355c4fd39aabccf3a768daaa7c | refs/heads/main | 2023-04-20T12:47:09.101144 | 2021-05-02T09:00:35 | 2021-05-02T09:00:35 | 363,605,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | import sys
if(sys.argv[1]=="-r"):
print("r:0:100:2")
else:
print(sum(map(int,sys.argv[1:])))
| [
"stels1040533@gmail.com"
] | stels1040533@gmail.com |
025eb96b12404c357e6dfb6cd2f21162f9492c7b | bf014a59c19683bf6c6e0c2ec5b89a18c4305b3c | /backends/__init__.py | 6ab5708fda5ed59498f8977037d2d3a11eef6adf | [
"MIT"
] | permissive | l3robot/pytorch-a3c | 5974e2c894aff121223fdb80c77eea7bbc36313e | 0bc46ac67346d77c5bd01cff8fd98ea617efc708 | refs/heads/master | 2020-03-18T10:40:55.832062 | 2018-05-23T21:30:06 | 2018-05-23T21:30:06 | 133,974,170 | 0 | 0 | null | 2018-05-18T15:43:46 | 2018-05-18T15:43:45 | null | UTF-8 | Python | false | false | 75 | py | from .atari import create_atari_env
from .unity3d import create_unity3d_env | [
"louis-emile.robitaille@elementai.com"
] | louis-emile.robitaille@elementai.com |
c5e7d15d5d15185d551a3d9dcfd54449f26ac850 | 986a9c6a9463e029e33dfa564aeaabde1dc573c8 | /traffic_sign.py | af907a13df2fc8d470aefae12f80a555275d95a0 | [] | no_license | rushikeshkorde/Traffic-Signs-Classification | 15c1eda0261771cc8d5932305773bf9a1f6f1172 | 226deb010b5973c3b4d82fa09c69510a75542241 | refs/heads/master | 2022-08-16T01:54:48.752862 | 2020-05-31T14:13:22 | 2020-05-31T14:13:22 | 267,787,807 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,218 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
import tensorflow as tf
from PIL import Image
import os
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from keras.models import Sequential, load_model
from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout
data = []
labels = []
classes = 43
cur_path = os.getcwd()
#Retrieving the images and their labels
for i in range(classes):
path = os.path.join(cur_path,'train',str(i))
images = os.listdir(path)
for a in images:
try:
image = Image.open(path + '\\'+ a)
image = image.resize((30,30))
image = np.array(image)
#sim = Image.fromarray(image)
data.append(image)
labels.append(i)
except:
print("Error loading image")
#Converting lists into numpy arrays
data = np.array(data)
labels = np.array(labels)
print(data.shape, labels.shape)
#Splitting training and testing dataset
X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.2, random_state=42)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
#Converting the labels into one hot encoding
y_train = to_categorical(y_train, 43)
y_test = to_categorical(y_test, 43)
#Building the model
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu', input_shape=X_train.shape[1:]))
model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.25))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(rate=0.5))
model.add(Dense(43, activation='softmax'))
#Compilation of the model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
epochs = 15
history = model.fit(X_train, y_train, batch_size=32, epochs=epochs, validation_data=(X_test, y_test))
model.save("my_model.h5")
#plotting graphs for accuracy
plt.figure(0)
plt.plot(history.history['accuracy'], label='training accuracy')
plt.plot(history.history['val_accuracy'], label='val accuracy')
plt.title('Accuracy')
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.legend()
plt.show()
plt.figure(1)
plt.plot(history.history['loss'], label='training loss')
plt.plot(history.history['val_loss'], label='val loss')
plt.title('Loss')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend()
plt.show()
#testing accuracy on test dataset
from sklearn.metrics import accuracy_score
y_test = pd.read_csv('Test.csv')
labels = y_test["ClassId"].values
imgs = y_test["Path"].values
data=[]
for img in imgs:
image = Image.open(img)
image = image.resize((30,30))
data.append(np.array(image))
X_test=np.array(data)
pred = model.predict_classes(X_test)
#Accuracy with the test data
from sklearn.metrics import accuracy_score
print(accuracy_score(labels, pred))
| [
"noreply@github.com"
] | rushikeshkorde.noreply@github.com |
1fff2c19c3e1dc938c736276b810be8d34a5d060 | 7cfbeaa4fe1cdf8e90e4c42fe575a3b76cbf64ab | /setup.py | 193d68ce2b3f00e87fe2c3aacbab22886d5bf1a4 | [
"MIT"
] | permissive | keans/timecache | dac029b51e63fdbb6afa03ee33b8b08806a34993 | 9355e8b155e8071860942303ad364d719742eb78 | refs/heads/master | 2020-03-27T19:29:37.022521 | 2018-09-02T17:25:25 | 2018-09-02T17:25:25 | 146,992,718 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py | from setuptools import setup, find_packages
import codecs
import os
# get current directory
here = os.path.abspath(os.path.dirname(__file__))
def get_long_description():
"""
get long description from README.rst file
"""
with codecs.open(os.path.join(here, "README.rst"), "r", "utf-8") as f:
return f.read()
setup(
name='timecache',
version='0.0.4',
description='Time Cache',
long_description=get_long_description(),
url='https://github.com',
author='Ansgar Kellner',
author_email='keans@gmx.net',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='python packaging',
packages=find_packages(
exclude=['contrib', 'docs', 'tests']
),
install_requires=[],
)
| [
"keans@gmx.de"
] | keans@gmx.de |
8d9e1c544ea7627b9c50bc0bde5e458f4f1713df | 4293afea6240e2216eb5ed79579de1bc999b25ff | /TextViewer/icon.py | 985e10764f34b730212cdbb9f4360ee18a5b471d | [] | no_license | zoemurmure/TextViewer | 3df1737ed3b836ff89e1d5b018dd722e097ea4fc | f9ee29cfc8d15d440a74d2a6211db37d87738ba7 | refs/heads/master | 2022-11-23T14:11:38.247111 | 2020-07-31T08:01:46 | 2020-07-31T08:01:46 | 272,883,299 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,619 | py | img=b'AAABAAEAQEAAAAEAIAAoQgAAFgAAACgAAABAAAAAgAAAAAEAIAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPb29iL29vaP9vb20/X19fX29vX/9vb1//b29f/29vX/9vb1//b29f/29vX/9vb1//b29f/29vX/9vb1//b29f/29vX/9vb1//b29f/29vX/9vb1//b29f/29vX/9vb1//b29f/29vX/9vb1//b29f/29vX/9vb1//b29f/29vX/9vb1//b29f/29vX/9vb1//b29f/29vX/9vb1//b29f/29vX/9vb1//b29f/29vX/9vb1//b29f/29vX/9vb1//b29f/29vX/9vb1//b29f/29vX/9vb1//b29f/29vX/9vb29fb29tP29vaP9vb2IgAAAAAAAAAAAAAAAPb29lD29vbz9fb2//Tz8//p5N//6OLe/+ji3v/o4t7/6OLe/+ji3v/o4t7/6OLe/+ji3v/o4t7/6OLe/+ji3v/o4t7/6OLe/+ji3v/o4t7/6OLe/+ji3v/o4t7/6OLe/+ji3v/o4t7/6OLe/+ji3v/o4t7/6OLe/+ji3v/o4t7/6OLe/+ji3v/o4t7/6OLe/+ji3v/o4t7/6OLe/+ji3v/o4t7/6OLe/+ji3v/o4t7/6OLe/+ji3v/o4t7/6OLe/+ji3v/o4t7/6OLe/+ji3v/o4t7/6OLe/+ji3v/o4t7/5+Le/+jk3//09PP/9fb2//b19vP29vZQAAAAAPb29iL29vbz9vb2/+/v7P+Vd1v/fFIu/31RLf99US3/fVEt/31RLf99US3/fVEt/31RLf99US3/fVEt/31RLf99US3/fVEt/31RLf99US3/fVEt/31RLf99US3/fVEt/31RLf99US3/fVEt/31RLf99US3/fVEt/31RLf99US3/fVEt/31RLf99US3/fVEt/31RLf99US3/fVEt/31RLf99US3/fVEt/31RLf99US3/fVEt/31RLf99US3/fVEt/31RLf99US3/fVEt/31RLf99US3/fVEt/31RLf99US3/fVEt/3xRLf98Ui3/l3dc//Du7P/29fb/9vb28/b29iL29vaP9vb2//b29v/LvrH/e1Eu/3tRLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFEs/3xSLf/MwLH/9vb2//b29v/29vaP9vb20/b29v/29vb/29PM/3xTL/97US3/e1Et/3tRLf97US3/e1Et/3tRLf97US3/e1Et/3tRLf97US3/e1Et/3tRLf97US3/e1Et/3tRLf97US3/e1Et/3tRLf97US3/e1Et/3tRLf97US3/e1Et/3tRLf97Uiz/fFEt/3xSLf98Ui3/fFIt/3tRLP98Ui3/e1Et/3tRLf97US3/e1Et/3tRLf97US3/e1Et/3tRLf97US3/e1Et/3tRLf97US3/e1Et/3tRLf97US3/e1Et/3tRLf97US3/e1Et/3tRLf97US3/fFIt/3xRLf99Uy7/3NPL//b29v/29vb/9vb20/b29vX29vb/9vb2//X19f/OwbT/qY54/6eLdf+ninX/p4p1/6eKdf+ninX/p4p1/6eKdf+ninX/p4p1/6eKdf+ninX/p4p1/6eKdf+ninX/p4p1/6eKdf+ninX/p4p1/6eKdf+ninX/p4p1/6eKdf+ni3T/pIhu/3xSLf98Ui3/fFIt/3xSLf+kh27/p4t0/6eKdf+ninX/p4p1/6eKdf+ninX/p4p1/6eKdf+ninX/p4p1/6eKdf+ninX/p4p1/6eKdf+ninX/p4p1/6eKdf+ninX/p4p1/6eKdf+ninX/p4p1/6eLdf+ojnj/z8G2//T19f/19fX/9vb2//X19fX29vb/9vb2//b29v/29vb/9vb2//X19f/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2/8m7rf98Ui3/fFIt/3xSLf98US3/yLut//X29f/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/19fX/9fX1//X19f/19fX/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b19f/19fX/9fb2/+jj3f+IY0P/e1Et/3xSLf98Ui3/fFIt/4hjQ//m4t3/9vX1//X19f/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//X19f/29vX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/8O/t/8q9rv+HY0H/fFIt/3xSLf98Ui3/fFIt/3tRLf97US7/iGJB/8i7rf/x7+7/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/Syr7/lHJU/41lRf+NZUb/jWVG/41lRv+NZUb/jWVG/41lRv+NZUb/jWVG/41lRv+NZUb/jWVG/41lRv+NZUb/jWVG/41lRv+NZUb/jWVG/41lRv+NZUb/jWVG/41lRv+MZUX/imRD/39YNf96US3/fFIu/3xSLv98Ui3/fFIt/3xSLf98Ui3/e1Et/3tRLf97US3/f1g2/4tkQ/+NZUX/jWVG/41lRv+NZUb/jWVG/41lRv+NZUb/jWVG/41lRv+NZUb/jWVG/41lRv+NZUb/jWVG/41lRv+NZUb/jWVG/41lRv+NZUb/jWVG/41lRv+NZUb/i2VF/5RyVP/Vyr7/h2JA/3tSLP98US7/fFEt/3xRLf98US3/fFEt/3xRLf98US3/fFEt/3xRLf98US3/fFEt/3xRLf98US3/fFEt/3xRLf98US3/fFEt/3xRLf98US3/fFEt/3xRLf98US3/fFEt/3xSLv98Ui3/fFIt/3xSLf98US3/e1Es/3xSLf98US3/e1Is/3xSLf98Ui3/fFIt/3xSLv98Ui3/fFEu/3xRLf98US3/fFEt/3xRLf98US3/fFEt/3xRLf98US3/fFEt/3xRLf98US3/fFEt/3xRLf98US3/fFEt/3xRLf98US3/fFEt/3xRLf98US3/fFEt/3xRLf98US3/h2JB/3tRLP97USz/fFIt/3tRLf98Ui3/fFIs/3xSLP98Uiz/fFIs/3xSLP98Uiz/fFIs/3xSLP98Uiz/fFIs/3xSLP98Uiz/fFIs/3xSLP98Uiz/fFIs/3xSLP98Uiz/fFIs/3xSLP98Ui3/fFEt/3tSLP98Ui3/kGpI/4FZNf98Ui3/fFEt/4BaNf+ObU7/fFIs/3xSLf97US3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf97US3/fFIt/3tRLf98Ui3/fFIt/3xSLf+AVjL/iGI//4dhP/+HYT//h2E//4dhP/+HYT//h2E//4dhP/+HYT//h2E//4dhP/+HYT//h2E//4dhP/+HYT//h2E//4dhP/+HYT//h2E//4dhP/+IYT//iWJA/5NuS/+piGj/0bic//Tkyf+JYj//fFIt/3tSLf+IZEP/8fHw/83Btv+ojXb/kXBR/4hlQ/+IY0H/iGNB/4hjQf+IY0H/iGNB/4hjQf+IY0H/iGNB/4hjQf+IY0H/iGNB/4hjQf+IY0H/iGNB/4hjQf+IY0H/iGNB/4hjQf+IY0H/iGNB/4hjQv9/VTL/e1Et/3xSLf98Ui3/fFIt/3xSLf98Ui7/mHRS//jnzv/4587/+OfO//jnzv/4587/+OfO//jnzv/4587/+OfO//jnzv/4587/+OfO//jnzv/4587/+OfO//jnzv/4587/+OfO//jnzv/4587/+efO//nnzv/5587/+ubO//vm0P/55s7/iWJA/3xSLf97Ui3/iGVC//T19P/29vb/9fX1//b29f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fb/mXhc/3tRLP98Ui3/fFIt/3xSLf98Ui3/fFIu/5h0U//65s//++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/+ebO/4liP/98Ui3/e1It/4hlQv/19fX/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9PX2/5l6XP98Uiz/fFIt/3xSLf98Ui3/fFIt/3xSLv+YdFP/+ubP//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//nmzv+JYj//fFIt/3tSLf+IZUL/9fX1//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//T19v+Zelz/fFIs/3xSLf98Ui3/fFIt/3xSLf98Ui7/mHRT//rmz//75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/55s7/iWI//3xSLf97Ui3/iGVC//X19f/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/09fb/mXpc/3xSLP98Ui3/fFIt/3xSLf98Ui3/fFIu/5h0U//65s//++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/+ebO/4liP/98Ui3/e1It/4hlQv/19fX/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9PX2/5l6XP98Uiz/fFIt/3xSLf98Ui3/fFIt/3xSLv+YdFP/+ubP//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//nmzv+JYj//fFIt/3tSLf+IZUL/9fX1//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//T19v+Zelz/fFIs/3tRLf98US3/fFIt/3xSLf98Ui7/mHRT//rmz//75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/55s7/iWI//3xSLf97Ui3/iGVC//X19f/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29fb/nYBm/3xSLP96Uiz/glg2/3xSLf98Ui3/fFIu/5h0U//65s//++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/+ebO/4liP/98Ui3/e1It/4hlQv/19fX/9vb2//b29v/29vb/9vb2//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/29vb/9vb2//b29v/29vb/9vb2/9LGvf+BWjb/flMw/7qmlv98Ui3/fFIt/3xSLv+YdFP/+ubP//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//nmzv+JYj//fFIt/3tSLf+IZUL/9fX1//b29v/29vb/9vb2//b29v/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9vb2//b29v/29vb/9vb2//b29v/29vb/8O3q/+vo5P/19fX/fFIt/3xSLf98Ui7/mHRT//rmz//75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/55s7/iWI//3xSLf97Ui3/iGVC//X19f/29vb/9vb2//b29v/z8vH/sZyI/5JwU/+Rb1H/kW9R/5FvUf+Rb1H/kW9R/5FvUf+Rb1H/kW9R/5FvUf+Rb1H/kW9R/5FvUf+Rb1L/rJJ8//Du7P/19fb/9vb2//b29v/29vb/9vb2//b29f/19vX/9fX1/3xSLf98Ui3/fFIu/5h0U//65s//++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/+ebO/4liP/98Ui3/e1It/4hlQv/19fX/9vb2//b29v/19fb/y7uu/3tSLP98Uiz/fFIs/3xSLP98Uiz/fFIs/3xSLP98Uiz/fFIs/3xSLP98Uiz/fFIs/3xSLP98Uiz/fFIt/3tRLf+6qJb/9fX2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v98Ui3/fFIt/3xSLv+YdFP/+ubP//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//nmzv+JYj//fFIt/3tSLf+IZUL/9fX1//b29v/29vb/9fX2/8O2pf98US7/e1Et/3tRLv97US7/e1Eu/3tRLv97US7/e1Eu/3tRLv97US7/e1Eu/3tRLv97US7/e1Eu/3tRLf97US7/t6KP//X29f/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/fFIt/3xSLf98Ui7/mHRT//rmz//75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/55s7/iWI//3xSLf97Ui3/iGVC//X19f/29vb/9vb2//b19v/x7+3/o4lx/4NdOv+CXDj/gls4/4JbOP+CWzj/gls4/4JbOP+CWzj/gls4/4JbOP+CWzj/gls4/4FbOP+DXDr/nH1i/+vo5f/29vb/9vb2//b29v/29vb/9vb2//X19f/29vb/9vb2/3xSLf98Ui3/fFIu/5h0U//65s//++bQ//vm0P/75tD/+ObQ//fm0P/65tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//rlz//75tD/++bQ//vm0P/75tD/+ebO/4liP/98Ui3/e1It/4hlQv/19fX/9vb2//b29v/19fX/9vX2//b29v/29fX/9fX0//X19P/19fT/9fX0//X19P/19fT/9fX0//X19P/19fT/9fX0//X19P/19fT/9fX0//b29f/29vX/9vb2//b29v/29vb/9vb2//X19v/r5uP/5N/a//X19v98Ui3/fFIt/3xSLv+YdFP/+ubP//vm0P/75tD/++bQ//Xly/+8ooL/pYNi/6aCYv+mgmL/poJi/6aCYv+mgmL/poJi/6aCYv+mgmL/poJi/6aCYv+mgmL/poJi/6eEY//Gq43/+ObN//jmz//75tD/++bQ//nmzv+JYj//fFIt/3tSLf+IZUL/9fX1//b29v/29vb/9vb2//b29v/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/29vb/9vb2//b29v/29vb/9vb2//X19f/PwrP/f1Y0/3xTLf+0nor/fFIt/3xSLf98Ui7/mHRT//rmz//75tD/++bQ//jmz//DqYv/fFEt/3tRLP97US3/e1It/3tSLf97Ui3/e1It/3tSLf97Ui3/e1It/3tSLf97Ui3/e1It/3tSLf97USz/fFEt/9G6nv/4587/++bQ//vm0P/55s7/iWI//3xSLf97Ui3/iGVC//X19f/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/19fb/nX5j/3xSLf98Ui3/glc1/3xSLf98Ui3/fFIu/5h0U//65s//++bQ//vm0P/55tD/tZZ4/3tSLv98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/e1Es/3xSLP/DqYz/+ObP//vm0P/75tD/+ebO/4liP/98Ui3/e1It/4hlQv/19fX/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9PX2/5l5XP98Uiz/fFIt/3xRLP98Ui3/fFIt/3xSLv+YdFP/+ubP//vm0P/75tD/+ubQ/+bSuP+MZUP/e1Mt/3tSLf97Ui3/e1It/3tSLf97Ui3/e1It/3tSLf97Ui3/e1It/3tSLf97Ui3/e1It/3xSLv+Sb03/7t3B//jnzv/75tD/++bQ//nmzv+JYj//fFIt/3tSLf+IZUL/9fX1//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//T19v+Zelz/fFIs/3xSLf98Ui3/fFIt/3xSLf98Ui7/mHRT//rmz//75tD/++bQ//vm0P/65s//9ubL/+zbvv/r2r3/69q9/+vavf/r2r3/69q9/+vavf/r2r3/69q9/+vavf/r2r3/69q9/+vavv/t28D/9+fN//rmz//65s//++bQ//vm0P/55s7/iWI//3xSLf97Ui3/iGVC//X19f/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/09fb/mXpc/3xSLP98Ui3/fFIt/3xSLf98Ui3/fFIu/5h0U//65s//++bQ//vm0P/75tD/++bQ//rmz//55s//+ebP//nm0P/55tD/+ebQ//nm0P/55tD/+ebQ//nm0P/55tD/+ebQ//nm0P/65s//+ubP//rmz//75tD/++bQ//vm0P/75tD/+ebO/4liP/98Ui3/e1It/4hlQv/19fX/9vb2//b29v/29vb/9vb2//b29v/29vb/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9vb2//X19f/29vb/9vb2//b29v/29vb/9PX2/5l6XP98Uiz/fFIt/3xSLf98Ui3/fFIt/3xSLv+YdFP/+ubP//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//nmzv+JYj//fFIt/3tSLf+IZUL/9fX1//b29v/29vb/9vb2//b19f/Pwrb/sJiC/6+Xgf+vl4H/r5eB/6+Xgf+vl4H/r5eB/6+Xgf+vl4H/r5eB/6+Xgf+vl4H/r5eB/6+Xgv/Ht6r/9fX1//b29v/29vb/9vb2//T19v+Zelz/fFIs/3xSLf98Ui3/fFIt/3xSLf98Ui7/mHRT//rmz//75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/55s7/iWI//3xSLf97Ui3/iGVC//X19f/29vb/9vb2//b19f/Syr//fFIv/3xRLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xRLf98Uiz/e1Et/8e4qv/19fb/9vb2//b29v/09fb/mXpc/3xSLP98Ui3/fFIt/3xSLf98Ui3/fFIu/5h0U//65s//++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/+ebO/4liP/98Ui3/e1It/4hlQv/19fX/9vb2//b29v/29vX/v66d/3xRLv98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf+xm4b/9fX2//b29v/29vb/9PX2/5l6XP98Uiz/fFIt/3xSLf98Ui3/fFIt/3xSLv+YdFP/+ubP//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//nmzv+JYj//fFIt/3tSLf+IZUL/9fX1//b29v/29vb/9vX1/+jk4f+KZkj/e1Is/3xSLf97Uiz/e1Is/3tSLP97Uiz/e1Is/3tSLP97Uiz/e1Is/3tSLP97Uiz/fFIt/3xSLf+GXj3/4NjS//X29f/29vb/9vb2//T19v+Zelz/fFIs/3xSLf98Ui3/fFIt/3xSLf98Ui7/mHRT//rmz//75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/55s7/iWI//3xSLf97Ui3/iGVC//X19f/29vb/9vb2//b29v/19fX/8vLw/+Hb1f/f2NL/39jR/9/Y0f/f2NH/39jR/9/Y0f/f2NH/39jR/9/Y0f/f2NH/39jR/9/Y0v/f2tP/8O/s//X19f/19vX/9vb2//b29v/09fb/mXpc/3xSLP98Ui3/fFIt/3xSLf98Ui3/fFIu/5h0U//65s//++bQ//vm0P/75tD/+ubO//vmz//6587/+ufO//rnzv/6587/+ufO//rnzv/6587/+ufO//rnzv/6587/+ufO//rnzv/6587/+efN//nnzv/75s7/++bQ//vm0P/75tD/+ebO/4liP/98Ui3/e1It/4hlQv/19fX/9vb2//b29v/29vb/9vb2//X19f/19vb/9fb2//X29v/19vb/9fb2//X29v/19vb/9fb2//X29v/19vb/9fb2//X29v/19vb/9vb2//X19f/29vb/9vb2//b29v/29vb/9PX2/5l6XP98Uiz/fFIt/3xSLf98Ui3/fFIt/3xSLv+YdFP/+ubP//vm0P/75tD/++bQ//nmzf/Tu5//up5+/7mefv+5nX7/uZ1+/7mdfv+5nX7/uZ1+/7mdfv+5nX7/uZ1+/7mdfv+5nX7/uZ19/7qff//axab/+ufO//vm0P/75tD/++bQ//nmzv+JYj//fFIt/3tSLf+IZUL/9fX1//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//T19v+Zelz/fFIs/3xSLf98Ui3/fFIt/3xSLf98Ui7/mHRT//rmz//75tD/++bQ//vm0P/NtJn/e1It/3tSLf97Ui3/e1It/3tSLf97Ui3/e1It/3tSLf97Ui3/e1It/3tSLf97Ui3/e1It/3tSLf98Ui3/fVMv/9vGqP/55s7/++bQ//vm0P/55s7/iWI//3xSLf97Ui3/iGVC//X19f/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/09fb/mXpc/3xSLP98Ui3/fFIt/3xSLf98Ui3/fFIu/5h0U//65s//++bQ//vm0P/65c//spJ0/3tRLf97USz/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3tSLf/Bpoj/+OfO//vm0P/75tD/+ebO/4liP/98Ui3/e1It/4hlQv/19fX/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9PX2/5l6XP98Uiz/fFIt/3xSLf98Ui3/fFIt/3xSLv+YdFP/+ubP//vm0P/75tD/++bO/93Hqv+CWDP/fFEt/31SLf99Ui3/fVIt/31SLf99Ui3/fVIt/31SLf99Ui3/fVIt/31SLf99Ui3/fVIt/3xSLf+FXjz/59K5//nm0P/75tD/++bQ//nmzv+JYj//fFIt/3tSLf+IZUL/9fX1//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//T19v+Zelz/fFIs/3xSLf98Ui3/fFIt/3xSLf98Ui7/mHRT//rmz//75tD/++bQ//vnzv/55s7/7Ni//9jApf/YwKT/2L+k/9i/pP/Yv6T/2L+k/9i/pP/Yv6T/2L+k/9i/pP/Yv6T/2L+k/9jApP/ZwaT/8N3C//nnzv/75tD/++bQ//vm0P/55s7/iWI//3xSLf97Ui3/iGVC//X19f/29vb/9vb2//b29v/19fX/9fX1//b29v/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//X19f/19fX/9fX1//b29v/19fX/9fb1//b29v/29vb/9vb2//b29v/09fb/mXpc/3xSLP98Ui3/fFIt/3xSLf98Ui3/fFIu/5h0U//65s//++bQ//vm0P/75tD/++bQ//jmz//6587/++fO//vnzv/7587/++fO//vnzv/7587/++fO//vnzv/7587/++fO//vnzv/7587/++fO//rnzv/55s//++bQ//vm0P/75tD/+ebO/4liP/98Ui3/e1It/4hlQv/19fX/9vb2//b29v/19fb/9PX2/+Tg2v/KvLD/yruu/8q7rv/Ku67/yruu/8q7rv/Ku67/yruu/8q7rv/Ku67/yruu/8q7rv/Lu6//yruv/+Lb1P/29vX/9fX1//b29v/29vb/9PX2/5l6XP98Uiz/fFIt/3xSLf98Ui3/fFIt/3xSLv+YdFP/+ubP//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//nmzv+JYj//fFIt/3tSLf+IZUL/9fX1//b29v/29vb/9vb1/+DZ0v9/WTf/fFIt/3xSLP98Uiz/fFIs/3xSLP98Uiz/fFIs/3xSLP98Uiz/fFIs/3xSLP98Uiz/fFIt/3tSLf9/VTH/08rA//b29v/29vb/9vb2//T19v+Zelz/fFIs/3xSLf98Ui3/fFIt/3xSLf98Ui7/mHRT//rmz//75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/55s7/iWI//3xSLf97Ui3/iGVC//X19f/29vb/9vb2//X29f++rZ3/e1Eu/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFEt/6+YhP/29vb/9vb2//b29v/09fb/mXpc/3xSLP98Ui3/fFIt/3xSLf98Ui3/fFIu/5h0U//65s//++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/+ebO/4liP/98Ui3/e1It/4hlQv/19fX/9vb2//b29v/19fb/3tbO/39XNf98US3/fFEs/3xRLP98USz/fFEs/3xRLP98USz/fFEs/3xRLP98USz/fFEs/3xRLP98USz/fFIs/31TLv/Qxbv/9vb2//b29v/29vb/9PX2/5l6XP98Uiz/fFIt/3xSLf98Ui3/fFIt/3xSLv+YdFP/+ubP//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//nmzv+JYj//fFIt/3tSLf+IZUL/9fX1//b29v/29vb/9vb2//X19f/g2tP/xbSk/8SypP/EsqP/xLKj/8Syo//EsqP/xLKj/8Syo//EsqP/xLKj/8Syo//EsqP/xLKj/8SypP/c08v/9vb2//b29v/29vb/9vb2//T19v+Zelz/fFIs/3xSLf98Ui3/fFIt/3xSLf98Ui7/mHRT//rmz//75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/55s7/iWI//3xSLf97Ui3/iGVC//X19f/29vb/9vb2//b29v/19fX/9fX1//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/09fb/mXpc/3xSLP98Ui3/fFIt/3xSLf98Ui3/fFIu/5h0U//65s//++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/+ebO/4liP/98Ui3/e1It/4hlQv/19fX/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9PX2/5l6XP98Uiz/fFIt/3xSLf98Ui3/fFIt/3xSLv+YdFP/+ubP//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//nmzv+JYj//fFIt/3tSLf+IZUL/9fX1//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//T19v+Zelz/fFIs/3xSLf98Ui3/fFIt/3xSLf98Ui7/mHRT//rmz//75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/55s7/iWI//3xSLf97Ui3/iGVD//X19f/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/09fb/mXpc/3xSLP98Ui3/fFIt/3xSLf98Ui3/fFIu/5h0U//65s//++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/+ubO/4liP/98Ui3/e1It/4djQ//09fX/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9PX2/5l6XP98Uiz/fFIt/3xSLf98Ui3/fFIt/3xSLv+YdFP/+ubP//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/+ubQ//bjy/+DWTb/fFIt/3xSLP+BWjj/8fHw//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//T19v+Zelz/fFIs/3xSLf98Ui3/fFIt/3xSLf98Ui7/mHRT//rmz//75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/+uXP//nmz//VwKX/e1Is/3xSLf98Ui3/e1Eu/9TJwP/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/09fb/mXpc/3xSLP98Ui3/fFIt/3xSLf98Ui3/fFIu/5h0U//65s//++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/++bQ//vm0P/75tD/+ubQ//nm0P/y4cj/k3BO/3xSLf98Ui3/fFIt/3xSLP+Tc1X/8e/t//b19f/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9PX2/5l6XP98Uiz/fFIt/3xSLf98Ui3/fFIt/3xSLv+YdFP/+ubP//rlz//65c//+uXP//rlz//65c//+uXP//rlz//65c//+uXP//rlz//65c//+uXP//rlz//65c//+uXP//rlz//65c//+uXP//rlz//65c//+uXP//nmz//l0rf/mHZV/3tSLf98Ui3/fFIt/3xSLf98Ui3/e1It/5p5Xv/j3tj/9fb1//b19f/19vX/9fb1//X29f/19vX/9fb1//X29f/19vX/9fb1//X29f/19vX/9fb1//X29f/19vX/9fb1//X29f/19vX/9fb1//X29f/19vX/9fb1//X19v+Zelz/fFIt/3xSLf98Ui3/fFIs9XxSLf98Ui3/hl86/6iHZv+oh2b/qIdm/6iHZv+oh2b/qIdm/6iHZv+oh2b/qIdm/6iHZv+oh2b/qIdm/6iHZv+oh2b/qIdm/6iHZv+oh2b/qIdm/6iHZv+oh2b/qIdm/6WFY/+XclD/fVQw/3tRLv98Ui3/fFIt/3tRLf97Ui3/fFIt/3xSLf98USz/fVQw/5V1Wf+minH/qItz/6eLc/+ni3P/p4tz/6eLc/+ni3P/p4tz/6eLc/+ni3P/p4tz/6eLc/+ni3P/p4tz/6eLc/+ni3P/p4tz/6eLc/+ni3P/p4tz/6eLc/+ni3T/hl8//3tRLf98Ui3/fFIt9XxSLNN7US3/fFIt/3xSLf97US7/fFIu/3xSLv98Ui7/fFIu/3xSLv98Ui7/fFIu/3xSLv98Ui7/fFIu/3xSLv98Ui7/fFIu/3xSLv98Ui7/fFIu/3xSLv98Ui7/fFIu/3xSLv97US7/fFIu/3xSLv97Ui3/fFIt/3tSLf+xmYT/sJmF/3tRLP98Ui3/fFIt/3tRLP97USz/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/fFIt/3xSLf98Ui3/e1It/3xSLNN8UiyPfFIt/3tRLf97US3/e1Et/3tRLf97US3/e1Et/3tRLf97US3/e1Et/3tRLf97US3/e1Et/3tRLf97US3/e1Et/3tRLf97US3/e1Et/3tRLf97US3/e1Et/3tRLf97US3/e1Et/3xRLf98USz/e1Iu/39YNf++rJr/9PX0//b19f++rJv/f1g1/3tSLv98USz/e1Es/3xSLf97US3/e1Et/3tRLf97US3/e1Et/3tRLf97US3/e1Et/3tRLf97US3/e1Et/3tRLf97US3/e1Et/3tRLf97US3/e1Et/3tRLf97US3/e1Et/3tRLf97US3/e1Et/3tSLf9+UyuPfVAoInxTMPN7US7/e1Eu/3tRLv97US7/e1Eu/3tRLv97US7/e1Eu/3tRLv97US7/e1Eu/3tRLv97US7/e1Eu/3tRLv97US7/e1Eu/3tRLv97US7/e1Eu/3tRLv97US7/e1Eu/3xSLv99Uy//jWdI/7Kahv/k39v/9fX1//b29v/19fX/9fX2/+Tg2v+xmYb/jWdI/3xTL/97US//e1Eu/3tRLv97US7/e1Eu/3tRLv97US7/e1Eu/3tRLv97US7/e1Eu/3tRLv97US7/e1Eu/3tRLv97US7/e1Eu/3tRLv97US7/e1Eu/3tRLv97US7/e1Eu/3xRLv99Uy7zfVIrIgAAAADc0sxQ5uHd8+fi3v/n4t7/5+Le/+fi3v/n4t7/5+Le/+fi3v/n4t7/5+Le/+fi3v/n4t7/5+Le/+fi3v/n4t7/5+Le/+fi3v/n4t7/5+Le/+fi3v/n4t7/5+Le/+fi3v/o497/7+zq//X19f/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//X19f/u6+r/6OPe/+fi3v/n4t7/5+Le/+fi3v/n4t7/5+Le/+fi3v/n4t7/5+Le/+fi3v/n4t7/5+Le/+fi3v/n4t7/5+Le/+fi3v/n4t7/5+Le/+fi3v/n4t7/5+Le/+fi3v/n4d3z29TLUAAAAAAAAAAAAAAAAPb29iL29vaP9vb20/b29vX29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb2//b29v/29vb/9vb29fb29tP29vaP9vb2IgAAAAAAAAAA4AAAAAAAAAfAAAAAAAAAA4AAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAcAAAAAAAAAD4AAAAAAAAAc=' | [
"zoemurmure@gmail.com"
] | zoemurmure@gmail.com |
c3bbb5738b81da3295cb82f51894e74b8553f71b | 7765c093fbfaebc3328f8500db2e462977ac42a5 | /sqlite/sample.py | f4dc2f38f85c48f038a9b6f853da204c4bf0df63 | [] | no_license | iamkamleshrangi/datascience | e118e41591850f24438aa344100a07737490fd29 | 7add9501c3ac75323e94df5351e2baf6cadb73ae | refs/heads/master | 2022-02-02T20:19:20.986813 | 2018-07-23T13:26:37 | 2018-07-23T13:26:37 | 128,158,552 | 0 | 0 | null | 2022-01-21T04:26:26 | 2018-04-05T04:22:15 | Python | UTF-8 | Python | false | false | 358 | py | # Create engine: engine
engine = create_engine('sqlite:///Chinook.sqlite')
# Open engine in context manager
with engine.connect() as con:
rs = con.execute('select * from Employee order by BirthDate asc')
df = pd.DataFrame(rs.fetchall())
# Set the DataFrame's column names
df.columns = rs.keys()
# Print head of DataFrame
print(df.head())
| [
"iamkamleshrangi@gmail.com"
] | iamkamleshrangi@gmail.com |
412e272a77c61d7b9f5a0c1f4eeb2a6cdd56efbc | 09f175c759b0c798c1f5605b1720f9571fb5d4aa | /app/main.py | 451951e6016c9e0931d8a05daedf417eb3c471de | [] | no_license | dparker2/internet-trends | 5fe13172fd9bdb5b7d557435e4f699245b47d7df | 1e778c4d52140d1d0f92523ef2608eb623f75bc9 | refs/heads/master | 2022-02-25T00:34:40.988473 | 2019-10-14T06:47:01 | 2019-10-14T06:47:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | import falcon
from app.resources.html import HTMLResource
print("PRINTED")
app = falcon.API()
HTML_resource = HTMLResource()
app.add_route("/", HTML_resource, suffix="index")
| [
"crazdave@gmail.com"
] | crazdave@gmail.com |
1701c7e4aa7e3cded6d76cdf36ae3df50910147a | 7cbcfc334d0c99b7c2a4740de26bebce42907362 | /1.6. Input print:Hour and minutes.py | a0647d328fb61a693e8d1e043a43ec06b77e6910 | [] | no_license | YukPathy/100-days-of-code | 55603bb80a22fcfdbc1751e1bc7ee5884faa0ddc | 959ed06064c5955fb15d9ceed68c449881c59062 | refs/heads/master | 2020-07-01T14:04:34.627510 | 2019-08-06T15:29:10 | 2019-08-06T15:29:10 | 201,191,395 | 0 | 0 | null | 2019-08-08T06:20:11 | 2019-08-08T06:20:11 | null | UTF-8 | Python | false | false | 107 | py | # Read an integer:
a = int(input())
#Print a value:
h=a//3600;
m = (a//60)
# print(a)
print(str(h),str(m))
| [
"noreply@github.com"
] | YukPathy.noreply@github.com |
0fe975cae36d9d31d2e48d6d1cdbcfdd27ce3810 | 25687385836b292ee8d92f855782f2b98cc6b500 | /operator_app/consumers.py | e9f2f2bff1b32712d7dd117bf59c2cf75f270559 | [] | no_license | Konbini-shubham/Konbini | 3b9e2f11ef198aaad3347265f3c317766d2a51f6 | 85d3c2cc75e3bc6294adf57bd77b2589bb58cf81 | refs/heads/master | 2020-05-30T08:41:07.990166 | 2016-06-01T04:56:40 | 2016-06-01T04:56:40 | 59,843,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | py | from channels.sessions import channel_session
from channels import Group
from urllib.parse import urlparse, parse_qs
import pprint
pp = pprint.PrettyPrinter(indent=4)
@channel_session
def ws_connect(message):
query_parameters = parse_qs(message.content['query_string'])
machine_id = query_parameters['id'][0]
Group(machine_id).add(message.reply_channel)
message.channel_session['id'] = machine_id
message.reply_channel.send({'text': 'In ws_connect'})
@channel_session
def ws_receive(message):
print("In ws_receive")
group = Group(message.channel_session['id'])
message.reply_channel.send({'text': 'In ws_receive'})
group.send({
"text": message.content['text'],
})
def ws_disconnect(message):
print("In ws_disconnect")
message.reply_channel.send({'text': 'In ws_disconnect'}) | [
"shubhamjigoyal@gmail.com"
] | shubhamjigoyal@gmail.com |
04bb08d4b13fe38a056228962344ffdfb9bf975a | 92e09d003c43662f8452f8445fdc793b60406670 | /Python/python 爬虫/Maoyantop100/spider.py | 634c40a7994be216c33dcb259f6c60942d2ac138 | [] | no_license | WangJian1314/python_spider | 63f9bd98c8f6618aeb3b6f51e6a49563cb3e6066 | 6d0394b560a556df8dca1388dfa6182475ef885b | refs/heads/master | 2020-06-02T22:50:36.577067 | 2019-04-20T09:02:51 | 2019-04-20T09:02:51 | 191,334,030 | 1 | 0 | null | 2019-06-11T09:03:54 | 2019-06-11T09:03:53 | null | UTF-8 | Python | false | false | 1,578 | py | import json
from multiprocessing import Pool
import requests
import re
from requests.exceptions import RequestException
def get_one_page(url):
try:
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36'
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.text
return None
except RequestException:
return None
def parse_one_page(html):
pattern = re.compile('<dd>.*?board-index.*?>(\d+)</i>.*?data-src="(.*?)".*?name"><a'
+ '.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>'
+ '.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>', re.S)
items = re.findall(pattern, html)
for items in items:
yield {
'index': items[0],
'image': items[1],
'title': items[2],
'actor': items[3].strip()[3:],
'time': items[4].strip()[5:],
'score': items[5]+items[6]
}
def write_to_file(content):
with open('result.txt', 'a', encoding='utf-8') as f:
f.write(json.dumps(content, ensure_ascii=False) + '\n')
f.close()
def main(offset):
url = 'http://maoyan.com/board/4?offset=' + str(offset)
html = get_one_page(url)
for item in parse_one_page(html):
print(item)
write_to_file(item)
if __name__ == '__main__':
pool = Pool()
pool.map(main, [i*10 for i in range(10)])
| [
"303061411@qq.com"
] | 303061411@qq.com |
11e111e1dce4624067f7d5607b2f5bc263d234b6 | 152b31f0da5899569c1e30cec9c901ff9ef0a231 | /pythonHelloWord.py | 2233b8c7bdddf33dd1af56c9defa2c7bc478b431 | [] | no_license | lindaTest01/withIgnorefile | acf80a5d91c054847220455d4888012544595980 | cd2c75065ad6cc9417db9be300ec0fd83ee524cf | refs/heads/master | 2022-11-11T07:18:20.756362 | 2020-07-06T08:51:07 | 2020-07-06T08:51:07 | 276,827,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | # -*- coding: UTF-8 -*-
# Filename : helloworld.py
# author by : www.runoob.com
# 该实例输出 Hello World!
print('Hello World!')
| [
"noreply@github.com"
] | lindaTest01.noreply@github.com |
551d0de7166a4b76fbeb42575292be18fcab560f | ba6923aa77c6abeb4428f071528c5a36a7000732 | /ChangeTheWorld/AndAgainAndAgainAndAgainAndAgainAndAgainAndAgain.py | 4b68c7de0db1b0c9188d8f77a3399c7851033194 | [] | no_license | page2me/IT-Xtoberfest2021 | 8522c81a0159dd1de377a8825c0b2f27b84d0be1 | 7b39b819d4815a4840b62a4782b3672472cf9a0a | refs/heads/main | 2023-08-27T13:50:21.420977 | 2021-10-30T18:50:06 | 2021-10-30T18:50:06 | 420,277,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | """Func"""
def func(text):
"""AndAgainAndAgainAndAgainAndAgainAndAgainAndAgain"""
answer = []
for i in text:
counter = 0
counter += i.count("a") + i.count("e") + i.count("i") + i.count("o") + i.count("u")
if counter >= 2:
answer.append(i)
answer.sort()
if len(answer) == 0:
print("Nope")
else:
print(*answer, sep="\n")
func(input().replace(".", "").split())
| [
"earth_killerdark@hotmail.com"
] | earth_killerdark@hotmail.com |
231208107ccc0ef9cd28b169c0c9c14596a48d32 | f93f0936ce11e0e4f7fbda66c6921a3fdc481e10 | /scrapy_aishanghai/aishanghai/aishanghai/middlewares.py | 8ad154919e9f8ccbfd798039d80d90046a13a393 | [] | no_license | leiyanhui/leiyh_projects | 11c827ea4b68040ba73022ade6b47da08bd7161a | 8e33be618f5078d08dec92475bf70ba1fb94ff67 | refs/heads/master | 2020-03-23T22:19:53.547259 | 2018-09-11T12:32:39 | 2018-09-11T12:32:39 | 142,169,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,605 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class AishanghaiSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class AishanghaiDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"18817380161@163.com"
] | 18817380161@163.com |
af38ecfee37543b5b56fdeb4ae4fe169f1676baa | ea4b8ad32345a94ec1c566c30efb4dfc9fd46b8e | /GeoGossip/webapps/geogossip/tests.py | 7b788d76bdb245e32e179aac540d6504c253de3b | [] | no_license | yyi1/GeoGossip | 76e09b585c0a765485547560def4b2f9aa407777 | 99fa1d06c4f26ad1f0ab8b1c007ab2d54d3cc56f | refs/heads/master | 2020-04-06T04:21:46.476790 | 2017-02-25T04:45:26 | 2017-02-25T04:45:26 | 82,977,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,040 | py | from django.test import TestCase
from django.test import Client
from django.contrib.auth import authenticate
from models import User
# Create your tests here.
class EndToEndTest(TestCase):
def setUp(self):
super(EndToEndTest, self).setUp()
self.client = Client()
self.user = User.objects.create_user(username='stonebai', first_name='Shi', last_name='Bai',
email='shib@andrew.cmu.edu', password='123')
new_user = authenticate(username=self.user.username, password='123')
self.assertIsNotNone(new_user)
self.client.login(username=self.user.username, password='123')
pass
#
# def test_home(self):
# response = self.client.get('/')
# self.assertEqual(response.status_code, 200)
# pass
def test_profile(self):
response = self.client.get('/geogossip/profile/' + str(self.user.id))
self.assertEqual(response.status_code, 200)
pass
def test_get_group_with_get_method(self):
response = self.client.get('/geogossip/get-groups')
self.assertEqual(response.status_code, 404)
pass
def test_get_group_success(self):
response = self.client.post('/geogossip/get-groups', data={
'lat': 0.0,
'lon': 0.0
})
self.assertEqual(response.status_code, 200)
pass
def test_get_group_with_invalid_lat(self):
response = self.client.post('/geogossip/get-groups', data={
'lat': 91.0,
'lon': 0.0
})
self.assertEqual(response.status_code, 400)
pass
def test_get_group_with_invalid_lon(self):
response = self.client.post('/geogossip/get-groups', data={
'lat': 0.0,
'lon': 181.0
})
self.assertEqual(response.status_code, 400)
pass
def test_get_business_with_get_method(self):
response = self.client.get('/geogossip/get-businesses')
self.assertEqual(response.status_code, 404)
pass
def test_get_business_success(self):
response = self.client.post('/geogossip/get-businesses', data={
'lat': 0.0,
'lon': 0.0
})
self.assertEqual(response.status_code, 200)
pass
def test_get_business_with_invalid_lat(self):
response = self.client.post('/geogossip/get-businesses', data={
'lat': 91.0,
'lon': 0.0
})
self.assertEqual(response.status_code, 400)
pass
def test_get_business_with_invalid_lon(self):
response = self.client.post('/geogossip/get-businesses', data={
'lat': 0.0,
'lon': 181.0
})
self.assertEqual(response.status_code, 400)
pass
def test_non_exists_group_chat(self):
response = self.client.get('/geogossip/group-chat/1')
self.assertEqual(response.status_code, 404)
pass
def test_non_exists_avatar(self):
response = self.client.get('/geogossip/avatar/1')
self.assertEqual(response.status_code, 404)
pass
# def test_profile(self):
# response = self.client.get('/geogossip/profile/7')
# self.assertEqual(response.status_code, 200)
# pass
# test user_id = 30(invalid uid), redirect to home page
def test_get_profileWithInvalidID_session(self):
response = self.client.get('/geogossip/profile/30')
self.assertEqual(response.status_code, 404)
pass
#############################################################
# Test @login_required #
#############################################################
def test_home_session(self):
client = Client()
response = client.get('/')
self.assertEqual(response.status_code, 302)
pass
def test_logout_session(self):
client = Client()
response = client.get('/geogossip/logout')
self.assertEqual(response.status_code, 302)
pass
def test_get_group_session(self):
client = Client()
response = client.get('/geogossip/get-groups')
self.assertEqual(response.status_code, 302)
pass
def test_get_getBusinesses_session(self):
client = Client()
response = client.get('/geogossip/get-businesses')
self.assertEqual(response.status_code, 302)
pass
def test_get_createGroup_session(self):
client = Client()
response = client.get('/geogossip/create-group')
self.assertEqual(response.status_code, 302)
pass
# test user_id = 7
def test_get_profile_session(self):
client = Client()
response = client.get('/geogossip/profile/7')
self.assertEqual(response.status_code, 302)
pass
def test_get_profileWithoutID_session(self):
client = Client()
response = client.get('/geogossip/profile')
self.assertEqual(response.status_code, 404)
pass
pass
| [
"yyi1@andrew.cmu.edu"
] | yyi1@andrew.cmu.edu |
28afd10dd4bf86cc9fc12239cac8891a7b46c5df | a9243f735f6bb113b18aa939898a97725c358a6d | /0.12/_downloads/plot_time_frequency_mixed_norm_inverse.py | 65ac593e852afd7ae0cd4471a6c573000a16b131 | [] | permissive | massich/mne-tools.github.io | 9eaf5edccb4c35831400b03278bb8c2321774ef2 | 95650593ba0eca4ff8257ebcbdf05731038d8d4e | refs/heads/master | 2020-04-07T08:55:46.850530 | 2019-09-24T12:26:02 | 2019-09-24T12:26:02 | 158,233,630 | 0 | 0 | BSD-3-Clause | 2018-11-19T14:06:16 | 2018-11-19T14:06:16 | null | UTF-8 | Python | false | false | 4,959 | py | """
=============================================
Compute MxNE with time-frequency sparse prior
=============================================
The TF-MxNE solver is a distributed inverse method (like dSPM or sLORETA)
that promotes focal (sparse) sources (such as dipole fitting techniques).
The benefit of this approach is that:
- it is spatio-temporal without assuming stationarity (sources properties
can vary over time)
- activations are localized in space, time and frequency in one step.
- with a built-in filtering process based on a short time Fourier
transform (STFT), data does not need to be low passed (just high pass
to make the signals zero mean).
- the solver solves a convex optimization problem, hence cannot be
trapped in local minima.
References:
A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with
non-stationary source activations
Neuroimage, Volume 70, 15 April 2013, Pages 410-422, ISSN 1053-8119,
DOI: 10.1016/j.neuroimage.2012.12.051.
A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
Functional Brain Imaging with M/EEG Using Structured Sparsity in
Time-Frequency Dictionaries
Proceedings Information Processing in Medical Imaging
Lecture Notes in Computer Science, 2011, Volume 6801/2011,
600-611, DOI: 10.1007/978-3-642-22092-0_49
https://doi.org/10.1007/978-3-642-22092-0_49
"""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import mne
from mne.datasets import sample
from mne.minimum_norm import make_inverse_operator, apply_inverse
from mne.inverse_sparse import tf_mixed_norm
from mne.viz import plot_sparse_source_estimates
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
ave_fname = data_path + '/MEG/sample/sample_audvis-no-filter-ave.fif'
cov_fname = data_path + '/MEG/sample/sample_audvis-shrunk-cov.fif'
# Read noise covariance matrix
cov = mne.read_cov(cov_fname)
# Handling average file
condition = 'Left visual'
evoked = mne.read_evokeds(ave_fname, condition=condition, baseline=(None, 0))
evoked = mne.pick_channels_evoked(evoked)
# We make the window slightly larger than what you'll eventually be interested
# in ([-0.05, 0.3]) to avoid edge effects.
evoked.crop(tmin=-0.1, tmax=0.4)
# Handling forward solution
forward = mne.read_forward_solution(fwd_fname, force_fixed=False,
surf_ori=True)
###############################################################################
# Run solver
# alpha_space regularization parameter is between 0 and 100 (100 is high)
alpha_space = 50. # spatial regularization parameter
# alpha_time parameter promotes temporal smoothness
# (0 means no temporal regularization)
alpha_time = 1. # temporal regularization parameter
loose, depth = 0.2, 0.9 # loose orientation & depth weighting
# Compute dSPM solution to be used as weights in MxNE
inverse_operator = make_inverse_operator(evoked.info, forward, cov,
loose=loose, depth=depth)
stc_dspm = apply_inverse(evoked, inverse_operator, lambda2=1. / 9.,
method='dSPM')
# Compute TF-MxNE inverse solution
stc, residual = tf_mixed_norm(evoked, forward, cov, alpha_space, alpha_time,
loose=loose, depth=depth, maxit=200, tol=1e-4,
weights=stc_dspm, weights_min=8., debias=True,
wsize=16, tstep=4, window=0.05,
return_residual=True)
# Crop to remove edges
stc.crop(tmin=-0.05, tmax=0.3)
evoked.crop(tmin=-0.05, tmax=0.3)
residual.crop(tmin=-0.05, tmax=0.3)
# Show the evoked response and the residual for gradiometers
ylim = dict(grad=[-120, 120])
evoked.pick_types(meg='grad', exclude='bads')
evoked.plot(titles=dict(grad='Evoked Response: Gradiometers'), ylim=ylim,
proj=True)
residual.pick_types(meg='grad', exclude='bads')
residual.plot(titles=dict(grad='Residuals: Gradiometers'), ylim=ylim,
proj=True)
###############################################################################
# View in 2D and 3D ("glass" brain like 3D plot)
plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1),
opacity=0.1, fig_name="TF-MxNE (cond %s)"
% condition, modes=['sphere'], scale_factors=[1.])
time_label = 'TF-MxNE time=%0.2f ms'
clim = dict(kind='value', lims=[10e-9, 15e-9, 20e-9])
brain = stc.plot('sample', 'inflated', 'rh', clim=clim, time_label=time_label,
smoothing_steps=5, subjects_dir=subjects_dir)
brain.show_view('medial')
brain.set_data_time_index(120)
brain.add_label("V1", color="yellow", scalar_thresh=.5, borders=True)
brain.add_label("V2", color="red", scalar_thresh=.5, borders=True)
| [
"larson.eric.d@gmail.com"
] | larson.eric.d@gmail.com |
9b9bf9b3e9aaeb11ed05526758f3d5fb48fab438 | 09b41a5db055eccee47aba8b305c25515d303793 | /title_test.py | f84ee707d3e011be6b95904389fbf36cf84834ed | [
"Apache-2.0"
] | permissive | ishansd94/travisci-test | 1c83638f8576a931d7776394ee9926b8c7cf07bc | 2edd64e655f8536bddd9655992d1397a10835ed7 | refs/heads/master | 2020-03-30T09:09:52.755411 | 2018-10-01T11:36:52 | 2018-10-01T11:36:52 | 151,063,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | import unittest
from title import convert
class TestConvert(unittest.TestCase):
def test_convert(self):
str = convert("hello world")
self.assertEqual(str, "Hello World")
if __name__ == '__main__':
unittest.main() | [
"ishan.dassanayake@pearson.com"
] | ishan.dassanayake@pearson.com |
2f4b33789431edee4986b09ab5ab538d8efb35ac | 10914bad0901b0e9d0233418f850050b23fe4da2 | /models/pretrained_mobilenet.py | 34aa6ef8eeb074987ccbbc850e52d60e7e50dc2f | [] | no_license | wQuole/image_classifier | 3ced4f48bdef555eaf07858a56c56930ca3f39b3 | 7ebd1794381902ddb316161d4778cb84a3cc0b13 | refs/heads/master | 2022-09-09T08:22:41.818211 | 2020-05-31T17:50:43 | 2020-05-31T17:50:43 | 267,560,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,289 | py | from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, Activation, GlobalAveragePooling2D
class PretrainedMobileNetV2:
def __init__(self, IMAGE_SIZE):
self.image_size = IMAGE_SIZE
self.model = Sequential()
self.fine_tune()
def fine_tune(self):
mobilenetv2_model = MobileNetV2(input_shape=(self.image_size),
include_top=False,
pooling='avg',
weights="imagenet")
for layer in mobilenetv2_model.layers:
# Freeze layers that should not be re-trained
layer.trainable = False
# Add all layers from basemodel, trainable and non-trainable to our model
self.model.add(mobilenetv2_model)
# Add classification block
self.model.add(Dense(2, activation='softmax'))
self.model.compile(optimizer=RMSprop(lr=1e-4),
loss=BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
| [
"wgkvaale@gmail.com"
] | wgkvaale@gmail.com |
b548eedfdd00fe7c08f5ba00618fbe44e0cba7df | 2bdedcda705f6dcf45a1e9a090377f892bcb58bb | /src/main/output/pipeline/service_group/number_office/time/fact.py | e3cbfccb649de7dbf84162e340a4f0fe1510ddd6 | [] | no_license | matkosoric/GenericNameTesting | 860a22af1098dda9ea9e24a1fc681bb728aa2d69 | 03f4a38229c28bc6d83258e5a84fce4b189d5f00 | refs/heads/master | 2021-01-08T22:35:20.022350 | 2020-02-21T11:28:21 | 2020-02-21T11:28:21 | 242,123,053 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,575 | py | package textTranslator;
import java.io.*;
import java.net.*;
import java.util.*;
import com.google.gson.*;
import com.squareup.okhttp.*;
public class Translate {
String subscriptionKey = 'b58103fec253e2c21b0fdc1a24e16352';
String url = "https://api.cognitive.microsofttranslator.com/translate?api-version=3.0&to=";
public Translate(String subscriptionKey) {
this.subscriptionKey = subscriptionKey;
}
// Instantiates the OkHttpClient.
OkHttpClient client = new OkHttpClient();
// This function performs a POST request.
public String Post() throws IOException {
MediaType mediaType = MediaType.parse("application/json");
RequestBody body = RequestBody.create(mediaType,
"[{\n\t\"Text\": \"Welcome to Microsoft Translator. Guess how many languages I speak!\"\n}]");
Request request = new Request.Builder()
.url(url).post(body)
.addHeader("ec0c96a092ea0a3ba1041f4738a0b33a", subscriptionKey)
.addHeader("Content-type", "application/json").build();
Response response = client.newCall(request).execute();
return response.body().string();
}
public String Post(String bodyStr, String translateTo) throws IOException {
MediaType mediaType = MediaType.parse("application/json");
RequestBody body = RequestBody.create(mediaType,
"[{\n\t\"Text\": \"" + bodyStr + "\"\n}]");
Request request = new Request.Builder()
.url(url + translateTo).post(body)
.addHeader("f460aacf46d11f243d71d7221840dbe5", subscriptionKey)
.addHeader("Content-type", "application/json").build();
Response response = client.newCall(request).execute();
return response.body().string();
}
// This function prettifies the json response.
public static String prettify(String json_text) {
JsonParser parser = new JsonParser();
JsonElement json = parser.parse(json_text);
Gson gson = new GsonBuilder().setPrettyPrinting().create();
return gson.toJson(json);
}
public static String getTranslatedText(String jsonText) {
JsonParser parser = new JsonParser();
JsonArray json = parser.parse(jsonText).getAsJsonArray();
String translatedText = null;
for (int i = 0; i < json.size(); i++) {
if (translatedText != null)
break;
JsonObject jsonObj = json.get(i).getAsJsonObject();
JsonArray translations = jsonObj.getAsJsonArray("translations");
if (translations == null) return "";
for (int j = 0; j < translations.size(); j++) {
if (translatedText != null) break;
JsonObject translation = translations.get(j).getAsJsonObject();
JsonElement text = translation.get("text");
if (text == null) return "";
translatedText = text.getAsString();
}
}
return translatedText;
}
// public static void main(String[] args) {
// try {
// Translate translateRequest = new Translate(System.getenv("Translator"));
//// String response = translateRequest.Post();
//// System.out.println(prettify(response));
//
// String response = translateRequest.Post("Hello", "fr");
// System.out.println(Translate.prettify(response));
//
// System.out.println(getTranslatedText(response));
//
//
// } catch (Exception e) {
// System.out.println(e);
// }
// }
}
| [
"soric.matko@gmail.com"
] | soric.matko@gmail.com |
92bce89e8eb6178bc209f07bc8b16f2f963e3867 | 20b47d5a51508cbcb8cd57f950e1d0c6f679b71c | /education.py | 0a28a7aef18e8dec4b013e26cccebcc1862c4daf | [] | no_license | amycbaker/Thinkful_Assignments | 91722e60d08022387009548e0849d69b6fa04327 | 46d14e50c25f01bfe857f4620771253a559f2f6f | refs/heads/master | 2021-01-19T23:42:30.374995 | 2017-06-13T04:05:00 | 2017-06-13T04:05:00 | 89,014,116 | 0 | 0 | null | 2017-05-17T16:30:58 | 2017-04-21T18:48:20 | Python | UTF-8 | Python | false | false | 2,696 | py | from bs4 import BeautifulSoup
import requests
import pandas as pd
import csv
import sqlite3 as lite
import statsmodels.formula.api as smf
import math
# store url for school years
url = "http://web.archive.org/web/20110514112442/http://unstats.un.org/unsd/demographic/products/socind/education.htm"
# get the html
r = requests.get(url)
# parse the html content with bs
soup = BeautifulSoup(r.content)
mylist = soup.findAll('tr', attrs=('class', 'tcont'))
mylist = mylist[:93]
#country_name, year, school years, male , female
countries = []
for item in mylist:
countries.append([item.contents[1].string,
item.contents[3].string,
item.contents[9].string,
item.contents[15].string,
item.contents[21].string])
# convert data to pandas dataframe and define column names
df = pd.DataFrame(countries)
df.columns = ['Country', 'DataYear', 'TotalYears', 'MaleYears', 'FemaleYears']
# convert school years to integers
df['TotalYears'] = df['MaleYears'].map(lambda x: int(x))
df['MaleYears'] = df['MaleYears'].map(lambda x: int(x))
df['FemaleYears'] = df['FemaleYears'].map(lambda x: int(x))
print("The city mean years are:")
mean = df.mean()
print mean
print("The city mean years are:")
median = df.median()
print median
max = df.max()
print("The maximum years is")
print max
min = df.min()
print("The minimum years is")
print min
con = lite.connect('education.db')
with con:
cur = con.cursor()
df.to_sql("education_years", con, if_exists="replace")
cur.execute("DROP TABLE IF EXISTS gdp")
cur.execute('CREATE TABLE gdp (country_name text, _1999 integer, _2000 integer, _2001 integer, _2002 integer, _2003 integer, _2004 integer, _2005 integer, _2006 integer, _2007 integer, _2008 integer, _2009 integer, _2010 integer)')
with open('API_NY.GDP.MKTP.CD_DS2_en_csv_v2.csv','rU') as inputFile:
next(inputFile)
next(inputFile)
next(inputFile)
next(inputFile)
header = next(inputFile)
inputReader = csv.reader(inputFile)
for line in inputReader:
cur.execute('INSERT INTO gdp (country_name, _1999, _2000, _2001, _2002, _2003, _2004, _2005, _2006, _2007, _2008, _2009, _2010) VALUES ("' + line[0] + '","' + '","'.join(line[42:-8]) + '");')
cur.execute("SELECT country_name, TotalYears, _2000, _2005, _2010 FROM education_years INNER JOIN gdp ON Country = country_name")
rows = cur.fetchall()
cols = [desc[0] for desc in cur.description]
gdp_df = pd.DataFrame(rows, columns=cols)
est = smf.ols(formula='TotalYears ~ _2010', data=gdp_df).fit()
print(est.summary())
| [
"amybaker@gmail.com"
] | amybaker@gmail.com |
4e58c678d8ebf19f2a3b0ab83528aa49d5f251d1 | 4d0472bcb230cf060e31e0d5320f3de2f5e9e55e | /train_generator.py | fd3098f5830fbb894befa01558bb81b18581e701 | [] | no_license | sharadmv/CharacterGAN | 7b9d37d8515978655084ea815b714596e3d5adc7 | 4a0253102f23313a5ee72a09f938c64a220fe6b8 | refs/heads/master | 2021-01-15T21:14:26.016372 | 2016-03-01T02:06:08 | 2016-03-01T02:06:08 | 52,777,523 | 0 | 0 | null | 2016-02-29T08:57:04 | 2016-02-29T08:57:03 | null | UTF-8 | Python | false | false | 5,040 | py | import numpy as np
import cPickle as pickle
import theano
import sys
import csv
import logging
import random
from dataset import *
from deepx.nn import *
from deepx.rnn import *
from deepx.loss import *
from deepx.optimize import *
from argparse import ArgumentParser
theano.config.on_unused_input = 'ignore'
logging.basicConfig(level=logging.DEBUG)
def parse_args():
argparser = ArgumentParser()
argparser.add_argument("reviews")
argparser.add_argument("--log", default="loss/generator_loss_current.txt")
return argparser.parse_args()
class WindowedBatcher(object):
def __init__(self, sequences, encodings, batch_size=100, sequence_length=50):
self.sequences = sequences
self.pre_vector_sizes = [c.seq[0].shape[0] for c in self.sequences]
self.pre_vector_size = sum(self.pre_vector_sizes)
self.encodings = encodings
self.vocab_sizes = [c.index for c in self.encodings]
self.vocab_size = sum(self.vocab_sizes)
self.batch_index = 0
self.batches = []
self.batch_size = batch_size
self.sequence_length = sequence_length + 1
self.length = len(self.sequences[0])
self.batch_index = 0
self.X = np.zeros((self.length, self.pre_vector_size))
self.X = np.hstack([c.seq for c in self.sequences])
N, D = self.X.shape
assert N > self.batch_size * self.sequence_length, "File has to be at least %u characters" % (self.batch_size * self.sequence_length)
self.X = self.X[:N - N % (self.batch_size * self.sequence_length)]
self.N, self.D = self.X.shape
self.X = self.X.reshape((self.N / self.sequence_length, self.sequence_length, self.D))
self.N, self.S, self.D = self.X.shape
self.num_sequences = self.N / self.sequence_length
self.num_batches = self.N / self.batch_size
self.batch_cache = {}
def next_batch(self):
idx = (self.batch_index * self.batch_size)
if self.batch_index >= self.num_batches:
self.batch_index = 0
idx = 0
if self.batch_index in self.batch_cache:
batch = self.batch_cache[self.batch_index]
self.batch_index += 1
return batch
X = self.X[idx:idx + self.batch_size]
y = np.zeros((X.shape[0], self.sequence_length, self.vocab_size))
for i in xrange(self.batch_size):
for c in xrange(self.sequence_length):
seq_splits = np.split(X[i, c], np.cumsum(self.pre_vector_sizes))
vec = np.concatenate([e.convert_representation(split) for
e, split in zip(self.encodings, seq_splits)])
y[i, c] = vec
X = y[:, :-1, :]
y = y[:, 1:, :self.vocab_sizes[0]]
X = np.swapaxes(X, 0, 1)
y = np.swapaxes(y, 0, 1)
# self.batch_cache[self.batch_index] = X, y
self.batch_index += 1
return X, y
def generate_sample(length):
'''Generate a sample from the current version of the generator'''
characters = [np.array([0])]
generator2.reset_states()
for i in xrange(length):
output = generator2.predict(np.eye(len(text_encoding))[None, characters[-1]])
sample = np.random.choice(xrange(len(text_encoding)), p=output[0, 0])
characters.append(np.array([sample]))
characters = np.array(characters).ravel()
num_seq = NumberSequence(characters[1:])
return num_seq.decode(text_encoding)
if __name__ == '__main__':
args = parse_args()
logging.debug('Reading file...')
with open(args.reviews, 'r') as f:
reviews = [r[3:] for r in f.read().strip().split('\n')]
reviews = [r.replace('\x05', '') for r in reviews]
reviews = [r.replace('<STR>', '') for r in reviews]
logging.debug('Retrieving text encoding...')
with open('data/charnet-encoding.pkl', 'rb') as fp:
text_encoding = pickle.load(fp)
# Create reviews and targets
logging.debug('Converting to one-hot...')
review_sequences = [CharacterSequence.from_string(r) for r in reviews]
num_sequences = [c.encode(text_encoding) for c in review_sequences]
final_sequences = NumberSequence(np.concatenate([c.seq.astype(np.int32) for c in num_sequences]))
# Construct the batcher
batcher = WindowedBatcher([final_sequences], [text_encoding], sequence_length=200, batch_size=100)
generator = Sequence(Vector(len(text_encoding), batch_size=100)) >> Repeat(LSTM(1024, stateful=True), 2) >> Softmax(len(text_encoding))
generator2 = Sequence(Vector(len(text_encoding), batch_size=1)) >> Repeat(LSTM(1024, stateful=True), 2) >> Softmax(len(text_encoding))
logging.debug('Loading prior model...')
with open('models/generative/generative-model-0.0.renamed.pkl', 'rb') as fp:
generator.set_state(pickle.load(fp))
with open('models/generative/generative-model-0.0.renamed.pkl', 'rb') as fp:
generator2.set_state(pickle.load(fp))
# Optimization procedure
rmsprop = RMSProp(generator, CrossEntropy())
def train_generator(iterations, step_size):
with open(args.log, 'w') as f:
for _ in xrange(iterations):
X, y = batcher.next_batch()
loss = rmsprop.train(X, y, step_size)
print >> f, 'Loss[%u]: %f' % (_, loss)
print 'Loss[%u]: %f' % (_, loss)
f.flush()
with open('models/generative/generative-model-current.pkl', 'wb') as g:
pickle.dump(generator.get_state(), g)
generator2.set_state(generator.get_state())
| [
"liam.fedus@gmail.com"
] | liam.fedus@gmail.com |
bde1909ef256bb98db99f9655bb6c8af594fcd1c | f3dc7c8ab38e3affaacbc05179aa30d14589adf1 | /SOLA.py | e51c3c4172777cf5b63909b718772f4a7582392f | [] | no_license | jishnub/moccasin | fa2c6ffd7f42f99aacb9f4519a58017f37715fab | 3495cc82c70d8a62f2fda551b2d269b17f4efeef | refs/heads/master | 2020-03-18T07:34:39.901461 | 2019-01-21T11:02:12 | 2019-01-21T11:02:12 | 134,461,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,599 | py | from astropy.io import fits
import numpy as np; import array
import matplotlib.pyplot as plt
from matplotlib import cm
import scipy
import math
from numpy.linalg import inv
import numpy.matlib
def dfac(n):
a = np.zeros_like(n)
for j in range(0,n.size):
a[j] = np.prod(np.arange(n[j],1,-2))
return a
def fac(n):
a = np.zeros_like(n)
for j in range(0,n.size):
a[j] = math.factorial(n[j])
return a
elmin = 1
elmax = 10
dellmin = 1
dellmax =1
instrument = "HMI"
track = 430
smin =0
smax = 7
perform_inversion = True
sigma_min = 1 # micro-Hertz
sigma_max = 200 # micro-Hertz
nyears = 6 # each "year" is 360 days
startyear = 1
r0 = 0.15
deltar = 0.05
reg = 1e6
dnu = 1e6/(nyears*360.0*86400.0) # in mu Hz
freqfloor = int(np.floor(sigma_min/dnu) +1)
r=np.squeeze(fits.open("radius.fits")[0].data)
trackch =str("{:03d}".format(track));
lminst =str("{:03d}".format(elmin));
lmaxst =str("{:03d}".format(elmax));
direct0 = '/scratch/shravan/HMI'
direct = direct0 + '/tracking' + trackch
ns = (smax+1)**2
dellst =str("{:01d}".format(dellmin));
kern = np.squeeze(fits.open(instrument+"_kernels_"+lminst +"_to_"+lmaxst+"_dell_"+dellst+".fits")[0].data)
indices = np.loadtxt(instrument+"_indices_"+lminst +"_to_"+lmaxst+"_dell_"+dellst)
for dell in range(dellmin+1, dellmax+1):
dellst =str("{:01d}".format(dell));
kerntemp = np.squeeze(fits.open(instrument+"_kernels_"+lminst +"_to_"+lmaxst+"_dell_"+dellst+".fits")[0].data)
g = np.loadtxt(instrument+"_indices_"+lminst +"_to_"+lmaxst+"_dell_"+dellst)
indices = np.r_[indices, g]
kern = np.r_[kern, kerntemp]
h = kern.shape
nkerns = h[0]
dr = np.zeros_like(r)
nr = r.shape
nr = nr[0]
dr[0:nr-2] = r[1:nr-1] - r[0:nr-2]
dr[nr-1] = dr[nr-2]
target = np.exp(-(r-r0)**2.0/(2.0*deltar**2))
target = target/np.sum(target*dr)
A = np.zeros((nkerns,nkerns))
rhs = np.zeros((nkerns,1))
for j in range(0,nkerns):
temp = dr*kern[j,:]
rhs[j] = np.sum(temp*target)
for i in range(j,nkerns):
A[i,j] = np.sum(temp*kern[i,:])
A[j,i] = A[i,j]
parity = np.zeros((nkerns,1))
coeffstor = np.zeros((nkerns,smax+1))
coeffspol = np.zeros((nkerns,smax+1))
elldiff = indices[:,2] - indices[:,0]
for s in range(smin,smax+1):
parity = np.mod(elldiff + s,2)
hh = np.where(abs(elldiff) <= s)[0]
if (hh.size == 0):
continue
tind = hh[np.where(parity[hh] == 1)[0]]
#tind = tind[hh]
pind = hh[np.where(parity[hh] == 0)[0]]
#pind = np.where(parity == 0 and abs(elldiff) <= s)
sumdiff1 = s + elldiff
sumdiff2 = s - elldiff
if (tind.size >0):
factor = (1-2*np.mod((sumdiff1[tind] - 1)/2,2)) * dfac(sumdiff1[tind]) * dfac(sumdiff2[tind])/(fac(sumdiff1[tind])*fac(sumdiff2[tind]))**0.5
rhstor = rhs[tind,0] * factor
Ator = A[np.ix_(tind, tind)]
coeffstor[tind,s] = np.matmul(inv(Ator + reg * np.eye(tind.size)), rhstor)
# if (s==2):
# func = np.matmul(np.squeeze(coeffstor[tind,s]),kern[tind,:])
# plt.plot(r,target/target.max()); plt.plot(r,func/func.max()); plt.show()
# stop
if (pind.size >0):
facpol = (1-2*np.mod(sumdiff1[pind]/2,2)) * elldiff[pind] * dfac(sumdiff1[pind]-1) * dfac(sumdiff2[pind]-1)/(fac(sumdiff1[pind])*fac(sumdiff2[pind]))**0.5
rhspol = rhs[pind,0] * facpol
Apol = A[np.ix_(pind, pind)]
coeffspol[pind,s] = np.matmul(inv(Apol + reg * np.eye(pind.size)), rhspol)
if (perform_inversion):
nfreq = int(np.floor((sigma_max - sigma_min)/dnu)) + 2
a = np.zeros([nfreq,60,ns],'complex'); powpos = 0; powneg = 0;
nus = (np.arange(nfreq) + freqfloor)*dnu
noitoroidal = np.zeros([nfreq,ns])
noipoloidal = np.zeros([nfreq,ns])
toroidal = np.zeros([nfreq,ns], dtype = complex)
poloidal = np.zeros([nfreq,ns], dtype = complex)
nors = np.zeros([60]);nord = np.zeros([60]); nordp = np.zeros([60])
stry1 = str("{:02d}".format(5*(startyear-1)+1));
stry2 = str("{:02d}".format(5*(startyear+nyears-1)));
stryear = '_year_'+stry1+'_'+stry2
for dell in range(dellmin, dellmax+1):
for ell in range(elmin, elmax+1-dell):
print "ell:", ell
ellc =str("{:03d}".format(ell))
ellp = ell + dell
ellpc =str("{:03d}".format(ellp))
allind = np.where(indices[:,0] == ell)[0]
allind = allind[np.where(indices[allind,2] == ellp)]
te = fits.open(direct+'/bcoef_l_'+ellc +'_lp_'+ellpc+stryear+'.fits')[0].data
noit = fits.open(direct+'/noise_l_'+ellc +'_lp_'+ellpc+stryear+'.fits')[0].data
nfrequ = te.shape[1]
te = te[0,:,:,:]+1j*te[1,:,:,:]
f = open(direct+'/frequency_metadata_l_'+ellc +'_lp_'+ellpc+stryear, 'r')
j=-1
k= -1
for line in f:
j = j+1
if (j==0):
line = line.strip()
columns = line.split()
dnu = float(columns[0])
if (j <= 3):
continue
k = k+1
line = line.strip()
columns = line.split()
freqdiff = np.float(columns[5]) - np.float(columns[2])
if (np.absolute(freqdiff) < sigma_min or np.absolute(freqdiff) > sigma_max):
continue
fst = int(np.floor(np.absolute(freqdiff)/dnu)) - freqfloor
fend = np.minimum(fst + nfrequ, nfreq)
nord = int(columns[1])
nordp = int(columns[4])
nind = np.where(indices[allind,1] == nord)[0]
freql = nfrequ + (fend-fst - nfrequ)
#print fst,fend,nus[fst],nus[fend-1],fend-fst,nfrequ,freql,nus.shape
# if (fend > nfreq-1):
# print "Frequency range too high, skipping, ell =", ell, "dell =", dell, "n = ", nord, "n' = ", nordp, "freq. difference = ", freqdiff
# continue
coefind = allind[np.where(indices[allind[nind],3] == nordp)][0]
for s in range(smin,smax+1):
for t in range(-s,s+1):
ind = s**2 + s + t
poloidal[fst:fend,ind] = poloidal[fst:fend,ind] + te[0:freql,k,ind]*coeffspol[coefind,s]*(1.0 + 0.0j)
noipoloidal[fst:fend,ind] = noipoloidal[fst:fend,ind] + noit[0:freql,k,ind]*coeffspol[coefind,s]
toroidal[fst:fend,ind] = toroidal[fst:fend,ind] + te[0:freql,k,ind]*coeffstor[coefind,s]*(1.0 + 0.0j)
noitoroidal[fst:fend,ind] = noitoroidal[fst:fend,ind] + noit[0:freql,k,ind]*coeffstor[coefind,s]
f.close()
| [
"hanasoge@gmail.com"
] | hanasoge@gmail.com |
dfee4727ac0ae042e9312e84a9fcd32b98b17fc0 | 81c0dcb009cd30e12e6948b90a0a2ff71fa88d98 | /word/index.py | 9474bdb8bd374a2cdc94a2b0ad6775e1490590af | [] | no_license | function2-llx/IR-system | c4bc3b5a84f693c2c62b03979adea39601cb09c9 | 8b40e8c9637e1d5d3665df48670a9095f4027665 | refs/heads/master | 2023-02-09T03:25:45.723609 | 2021-01-01T16:14:36 | 2021-01-01T16:14:36 | 311,276,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | import json
from tqdm import tqdm
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
client = Elasticsearch()
inner_sep = '\t'
token_sep = ' '
if __name__ == '__main__':
corpus = json.load(open('sample.json'))
chunk_size = 1000
for i in tqdm(range(0, len(corpus), chunk_size)):
batch = corpus[i:i + chunk_size]
actions = []
for j, doc in enumerate(corpus[i:i + chunk_size]):
tokens = doc['tokens']
content = token_sep.join([inner_sep.join((token, pos, tokens[head - 1] if head else '', rel)) for token, pos, (_, head, rel) in zip(tokens, doc['pos'], doc['dep'])])
actions.append({
'_id': i * chunk_size + j,
'_source': {'content': content}
})
results = bulk(client, actions, index='docs')
| [
"function2@qq.com"
] | function2@qq.com |
49bbf912ea16d6bf259a39904154cc010346a28a | 687306842e8082ed1c31441bbacf697352fe1d22 | /design.py | b7a6d0ecdb4066f6d35c6bb75f544e667966a54e | [] | no_license | Garyguo2011/Firewall | a77940d6fa0957fb2c2811cfcc5fa3c3b8982209 | 0906e947853c14b0a04fcccfd350202405b1c8f5 | refs/heads/master | 2020-05-21T11:37:02.562484 | 2014-12-03T15:45:44 | 2014-12-03T15:45:44 | 26,337,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | class DNSArchive(Archive):
def __init__(self, ....):
self.app = "dns"
self.domainName
class TCPArchive (Archive):
def __init__(self, packet)
class Archive(object):
def __init__(self, ....):
self.direction
self.protocol
self.externalIP
self.countryCode
self.packet
self.matchRules
# Control Plane
| [
"xguo@berkeley.edu"
] | xguo@berkeley.edu |
40ce5ec818cd4be194c39a8e93b7069f16945d43 | 1ec2e018d63d15486110aea7923ffbbf62ecbd5b | /SVM_classification__.py | a6893ea17659bb15ef9207d6898ec74c58171c78 | [] | no_license | DDeman/svm | 88f51cefed530821e5a13df259602bf3c67f0128 | 743c6edcbd12a34ffc45dd43ef97c36792d308ac | refs/heads/master | 2021-05-24T17:04:42.727047 | 2020-04-07T02:39:47 | 2020-04-07T02:39:47 | 253,668,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,489 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = ''
__author__ = '任晓光'
__mtime__ = '2020/4/3'
# code is far away from bugs with the god animal protecting
I love animals. They taste delicious.
┏┓ ┏┓
┏┛┻━━━┛┻┓
┃ ☃ ┃
┃ ┳┛ ┗┳ ┃
┃ ┻ ┃
┗━┓ ┏━┛
┃ ┗━━━┓
┃ 神兽保佑 ┣┓
┃ 永无BUG! ┏┛
┗┓┓┏━┳┓┏┛
┃┫┫ ┃┫┫
┗┻┛ ┗┻┛
"""
import numpy as np
import pandas as pd
from sklearn.datasets import load_breast_cancer
# np.set_printoptions(suppress=True)
class SVM__smo_simple():
def __init__(self):
pass
def select_j(self, i, m):
while True:
j = int(np.random.uniform(0, m))
if j != i:
break
return j
def calcute_L_H(self, i, j, C, y_array, alpha):
if y_array[i] != y_array[j]:
L = max(0, alpha[j] - alpha[i])
H = min(C, C + alpha[j] - alpha[i])
else:
L = max(0, alpha[i] + alpha[j] - C)
H = min(C, alpha[i] + alpha[j])
return L, H
def cat_alpha(self, L, H, alpha):
if alpha > H:
return H
elif alpha < L:
return L
else:
return alpha
def calcute_Ei(self,alpha,x_array,y_array,b):
for idx in range(len(alpha)):
gx = sum(np.dot(x_array,x_array[idx]) * y_array * alpha) + b
self.E[idx] = gx - y_array[idx]
def fit(self, data, C, toler, max_iters):
x_pd = data.iloc[:, :-1]
y_pd = data.iloc[:, -1]
m, n = x_pd.shape
alpha = np.zeros((m,))
b = 0
x_array = np.array(x_pd)
y_array = np.array(y_pd)
iters = 0
max_iters = max_iters
self.E = np.zeros((m,1))
self.calcute_Ei(alpha,x_array,y_array,b)
kk = 0
while iters < max_iters:
i = self.select_i(alpha, y_array, x_array, C, b)
E_i = self.E[i]
# print((E_i))
# b = alpha[i]
if ((y_array[i] * E_i < - toler) and (alpha[i] < C)) or ((y_array[i] * E_i > toler) and (alpha[i] > 0)):
j = self.select_j(i, m)
E_j = sum(np.dot(x_array, x_array[j]) * y_array * alpha) + b - y_array[j]
L, H = self.calcute_L_H(i, j, C, y_array, alpha)
if L == H:
continue
eta = np.dot(x_array[i], x_array[i]) + np.dot(x_array[j], x_array[j]) - 2 * np.dot(x_array[i],
x_array[j])
if eta <= 0:
continue
alpha_j_new_unc = alpha[j] + y_array[j] * (E_i - E_j) / eta
alpha_j_new = self.cat_alpha(L, H, alpha_j_new_unc)
alpha_i_new = alpha[i] + y_array[i] * y_array[j] * (alpha[j] - alpha_j_new)
bi_new = b - E_i - y_array[i] * (alpha_i_new - alpha[i]) * np.dot(x_array[i], x_array[i]) - y_array[
j] * (
alpha_j_new -
alpha[
j]) * np.dot(
x_array[j], x_array[i])
bj_new = b - E_j - y_array[i] * (alpha_i_new - alpha[i]) * np.dot(x_array[i], x_array[j]) - y_array[
j] * (
alpha_j_new -
alpha[
j]) * np.dot(
x_array[j], x_array[j])
if 0 < alpha_i_new < C:
b = bi_new
elif 0 < alpha_j_new < C:
b = bj_new
else:
b = (bi_new + bj_new) / 2
alpha[i], alpha[j] = alpha_i_new, alpha_j_new
# print(alpha)
iters += 1
print('iters is :', iters)
# print(alpha)
self.w = np.dot(alpha * y_array, x_array)
j = None
for i in range(m):
if alpha[i] > 0:
j = i
continue
self.b = y_array[j] - alpha * y_array * np.dot(x_array, x_array[j])
return self.w, self.b
def predict(self, x_array):
pred = np.dot(x_array, self.w) + self.b
return pred
def select_i(self, alpha, y_array, x_array, C, b):
for idx in range(len(alpha)):
gx = sum(np.dot(x_array,x_array[idx]) * y_array * alpha) + b
if alpha[idx] == 0:
if y_array[idx] * gx < 1:
return idx
elif 0 < alpha[idx] < C:
if y_array[idx] * gx != 1:
return idx
elif alpha[idx] == C:
if y_array[idx] * gx > 1:
return idx
i = np.random.uniform(0, len(alpha))
return alpha[i]
if __name__ == '__main__':
data = load_breast_cancer()
x, y = data.data, data.target
x_pd = pd.DataFrame(x, columns=data.feature_names)
y_pd = pd.DataFrame(y, columns=['result']).replace([0, 1], [-1, 1])
data_pd = pd.concat([x_pd, y_pd], axis=1)
svm = SVM__smo_simple()
w, b = svm.fit(data_pd, 0.6, 0.001, 5000)
x_array = np.array(x_pd)
y_pred = svm.predict(x_array)
print(y_pred)
y_p = []
for i in y_pred:
if i > 0:
y_p.append(1)
else:
y_p.append(-1)
from sklearn.metrics import accuracy_score
print(y_p)
print(np.array(y_pd).tolist())
acc = accuracy_score(y_pd, y_p)
print(acc)
| [
"rxg15506009565"
] | rxg15506009565 |
df6c20b6c5095c0d72d68b742ed9c6b48614b69e | 73de83162fd26ea60b0d07a3bb0a9ced63499d43 | /scripts/show_result.py | f1fd3005a1e8db8cd77c90185924775cb3cb8c28 | [
"GPL-3.0-or-later",
"MIT"
] | permissive | Geonhee-LEE/PythonLinearNonlinearControl | fa6c3dec2a7234ddb182388ac2e21074162e2155 | 2a2467098108641483778c09ceb7906cb49f6cee | refs/heads/master | 2023-07-10T03:48:45.566076 | 2021-08-21T12:55:30 | 2021-08-21T12:55:30 | 276,524,383 | 0 | 0 | MIT | 2020-07-02T02:00:18 | 2020-07-02T02:00:17 | null | UTF-8 | Python | false | false | 1,636 | py | import os
import argparse
import pickle
import numpy as np
import matplotlib.pyplot as plt
from PythonLinearNonlinearControl.plotters.plot_func import load_plot_data, \
plot_multi_result
def run(args):
controllers = ["iLQR", "DDP", "CEM", "MPPI"]
history_xs = None
history_us = None
history_gs = None
# load data
for controller in controllers:
history_x, history_u, history_g = \
load_plot_data(args.env, controller,
result_dir=args.result_dir)
if history_xs is None:
history_xs = history_x[np.newaxis, :]
history_us = history_u[np.newaxis, :]
history_gs = history_g[np.newaxis, :]
continue
history_xs = np.concatenate((history_xs,
history_x[np.newaxis, :]), axis=0)
history_us = np.concatenate((history_us,
history_u[np.newaxis, :]), axis=0)
history_gs = np.concatenate((history_gs,
history_g[np.newaxis, :]), axis=0)
plot_multi_result(history_xs, histories_g=history_gs, labels=controllers,
ylabel="x")
plot_multi_result(history_us, histories_g=np.zeros_like(history_us),
labels=controllers, ylabel="u", name="input_history")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--env", type=str, default="FirstOrderLag")
parser.add_argument("--result_dir", type=str, default="./result")
args = parser.parse_args()
run(args)
if __name__ == "__main__":
main()
| [
"quick1st97@gmail.com"
] | quick1st97@gmail.com |
ea9db589734e38f7ee6202aef62fb859d876b357 | 05f7f004ccd926c1611dc03473e0778d8c332e14 | /lcmf_projects/bond_click.py | c44ffa7db70e91bd17ff2cafe6c8693d6261a9f2 | [] | no_license | tongch8819/lcmf_projects | 06cd875e5b001a871cc11cdc8cf45a32f7faa105 | a7243aee94da9bbf9651e1365351c5b4ef364b80 | refs/heads/master | 2022-04-04T22:15:26.726761 | 2020-02-24T03:51:23 | 2020-02-24T03:51:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,807 | py | import numpy as np
import click
from scipy import optimize
@click.command()
@click.option('--coefficients', default=(1,0,2), help='tuple for representation of polynomial')
def ployvalTest(coefficients: tuple =(1,0,2) ) -> str:
print('Input: ', coefficients)
res = np.polyval(coefficients, 1)
if res == 3:
return 'descending'
else:
return 'ascending'
@click.command()
@click.option('--value', help='value of bond')
@click.option('--price', help='price of bond')
@click.option('--couponRate', help='couponRate of bond')
@click.option('--period', help='period of bond')
# 跟 wind 实盘 最新YTM 对不上 !!!!!!!!!!
def normalBondYTM(value: float, price: float, couponRate: float, period: int):
coupon = value * couponRate
poly = np.array([-price] + [coupon] * (period-1) + [coupon+value])
roots = np.roots(poly)
for root in roots:
if root.imag == 0.0:
return root.real - 1
# 跟 wind 实盘 久期 对不上 !!!!!!!!!!
def normalBondDuration(value: float, price: float, couponRate: float, period: int):
YTM = normalBondYTM(value, price, couponRate, period)
vec1 = np.array([i*np.exp(-YTM * i) for i in range(period+1)[1:]])
coupon = value * couponRate
vec2 = np.array([coupon] * (period -1) + [coupon+value])
if price != 0:
continuousDuration = vec1.dot(vec2) / price
modifiedDuration = continuousDuration/(1+YTM)
return modifiedDuration
else:
print('price is zero')
return
if __name__ == '__main__':
# print(ployvalTest())
# print(normalBondYTM(100, 90.68, 0.0375, 5))
# print(normalBondDuration(100, 90.68, 0.0375, 5))
sen = ployvalTest()
print(sen) | [
"tong.cheng.8819@outlook.com"
] | tong.cheng.8819@outlook.com |
d17b534e12aa0ac404011b7b2baf87061f771646 | 893e09a68b636a214a75745453fb73ce6c618472 | /lab9_b.py | 40c65fe61ff69a6f3d9df4d95b6f349c17210272 | [] | no_license | pavangabani/DAA_Lab | 241b33d4a13156c2adaa79e82e36140a7f6c3a99 | c3cb265e412ce77106507a8491c24fd38612939f | refs/heads/main | 2023-08-15T01:22:38.135604 | 2021-09-14T16:36:06 | 2021-09-14T16:36:06 | 406,445,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | def lcs(X , Y):
m = len(X)
n = len(Y)
L = [[None]*(n+1) for i in range(m+1)]
for i in range(m+1):
for j in range(n+1):
if i == 0 or j == 0 :
L[i][j] = 0
elif X[i-1] == Y[j-1]:
L[i][j] = L[i-1][j-1]+1
else:
L[i][j] = max(L[i-1][j] , L[i][j-1])
return L[m][n]
x="pavan"
y="gabani"
a=lcs(x,y)
print ("Length of LCS is ",a)
| [
"pavan.gabani@gmail.com"
] | pavan.gabani@gmail.com |
ae5a6526f090a8363d28b2f1de374b3d972022b0 | 5b256252d35410f7c3239b13d8aed801dc3ad0e8 | /More Exercises/city.py | 90fd5eda5f36e8d55273d3847aae6fc6ab996092 | [] | no_license | Nmazil-Dev/PythonCrashCoursePractice | d52d3f4585fbce18011720cc3cef83f095add526 | 7bf55dc8298e70ee4b5b5345e1171c188ea8bbda | refs/heads/master | 2020-04-27T09:56:22.937795 | 2019-03-06T23:15:05 | 2019-03-06T23:15:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | def city_name(city, country, population=''):
if population == '':
name = city.title() + ', ' + country.title() + population
elif population != '':
name = city.title() + ', ' + country.title() + ' - population ' + str(population)
print (name)
city_name('brookfield', 'usa')
| [
"nmazil68@gmail.com"
] | nmazil68@gmail.com |
ffe965efd83b48d88452e41df5c8274713eac169 | ca565548206583a58fe8d646bfd9a6f1ba51c673 | /problem2.py | fa5313404ef249962fe28fa2f3edd13684ba5711 | [] | no_license | GLAU-TND/python-programming-assignment2-kirtimansinghcs19 | fbd772f38fa3546e579ffc2bdf99cc2b34e9937b | 5dc16c8b24186a2e00c749e14eecaac426f51e90 | refs/heads/master | 2021-01-13T22:51:02.990390 | 2020-02-23T16:32:51 | 2020-02-23T16:32:51 | 242,519,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | from itertools import permutations
def largest(l):
lst=[]
for i in permutations(l, len(l)):
lst.append(''.join(map(str,i)))
return max(lst)
ls=[]
n=int(input('Enter the no element'))
for i in range(0,n):
ls.append(int(input()))
print(largest(ls))
| [
"noreply@github.com"
] | GLAU-TND.noreply@github.com |
201f42a6dc8b4593fc50814c1c71e25270c0c730 | 0318d24670acc083b67d27027961ba2e060857b4 | /naiveBayes_logisticRegression/utility.py | 455d08018463e44195b6d09999257d67ea39ffe2 | [] | no_license | HC15/Machine-Learning-Email-Classification | 7d4e4ef9c76d41884c29c72179f6df8f204529a7 | fe4945bc01ac9055aec143478d6bedaa8f71eda6 | refs/heads/master | 2020-03-08T04:00:52.183730 | 2018-05-07T15:30:57 | 2018-05-07T15:30:57 | 127,908,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,819 | py | from re import sub
from os import scandir
from nltk.stem import SnowballStemmer
from Classifier import Classifier
def normalize_text(text):
return sub("[^a-z]+", ' ', text.lower())
def read_data(directory_name):
data = {}
with scandir(directory_name) as data_directory:
for class_entry in data_directory:
if class_entry.is_dir():
classification = class_entry.name
if classification not in data:
data[classification] = []
with scandir(class_entry.path) as class_directory:
for file_entry in class_directory:
if file_entry.is_file() and file_entry.name.endswith(".txt"):
with open(file_entry.path, 'r', encoding="utf8", errors="ignore") as file:
data[classification].append(normalize_text(file.read()))
return data
def read_data_python35(directory_name):
data = {}
data_directory = scandir(directory_name)
for class_entry in data_directory:
if class_entry.is_dir():
classification = class_entry.name
if classification not in data:
data[classification] = []
class_directory = scandir(class_entry.path)
for file_entry in class_directory:
if file_entry.is_file() and file_entry.name.endswith(".txt"):
file = open(file_entry.path, 'r', encoding="utf8", errors="ignore")
data[classification].append(normalize_text(file.read()))
file.close()
return data
def get_stop_words(stop_words_on):
if stop_words_on:
return [" ", "a", "about", "above", "after", "again", "against", "all", "am", "an", "and", "any", "are",
"arent", "as", "at", "be", "because", "been", "before", "being", "below", "between", "both", "but",
"by", "cant", "cannot", "could", "couldnt", "did", "didnt", "do", "does", "doesnt", "doing", "dont",
"down", "during", "each", "few", "for", "from", "further", "had", "hadnt", "has", "hasnt", "have",
"havent", "having", "he", "hed", "hell", "hes", "her", "here", "heres", "hers", "herself", "him",
"himself", "his", "how", "hows", "i", "id", "ill", "im", "ive", "if", "in", "into", "is", "isnt",
"it", "its", "its", "itself", "lets", "me", "more", "most", "mustnt", "my", "myself", "no", "nor",
"not", "of", "off", "on", "once", "only", "or", "other", "ought", "our", "ours", "ourselves", "out",
"over", "own", "same", "shant", "she", "shed", "shell", "shes", "should", "shouldnt", "so", "some",
"such", "than", "that", "thats", "the", "their", "theirs", "them", "themselves", "then", "there",
"theres", "these", "they", "theyd", "theyll", "theyre", "theyve", "this", "those", "through", "to",
"too", "under", "until", "up", "very", "was", "wasnt", "we", "wed", "well", "were", "weve", "werent",
"what", "whats", "when", "whens", "where", "wheres", "which", "while", "who", "whos", "whom", "why",
"whys", "with", "wont", "would", "wouldnt", "you", "youd", "youll", "youre", "youve", "your", "yours",
"yourself", "yourselves"]
else:
return [" "]
def data_to_classifiers(data, filter_stop_words):
classifiers = []
stop_words = get_stop_words(filter_stop_words)
stemmer = SnowballStemmer("english")
for classification, text_files in data.items():
for text in text_files:
classifier_new = Classifier(classification)
classifier_new.count_words(stop_words, stemmer, text)
classifiers.append(classifier_new)
return classifiers
| [
"harvc015@gmail.com"
] | harvc015@gmail.com |
48763a0aeba89cb486860a35c4917e5e2660d135 | 7393b48ff1a403d163812ae77321981586707733 | /email_dl_sep.py | b2a8ef3c378ad7ee7dfe35898484f5b8c67a5ccc | [] | no_license | playerdefault/littlethings | 7121754cc5174c407adeb3d49e5e2f5d7796276c | 5b0fb17f51a7e9179f5523492c44a7eed2f0e47e | refs/heads/master | 2021-07-05T08:05:28.183727 | 2020-08-27T06:41:43 | 2020-08-27T06:41:43 | 168,153,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | # This program separates a list of semi-colon separated emails from a text file
# and prints out the number of emails
path = input(str("Enter the relative path of the file with the DL List: "))
DLListInputFile = open(path, 'r')
DLInput = DLListInputFile.read()
numberOfEmails = 0
for char in DLInput:
if(char==";"):
numberOfEmails += 1
print("The number of emails is: " + str(numberOfEmails))
| [
"swaraj.mohapatra@outlook.in"
] | swaraj.mohapatra@outlook.in |
142f68111255fe08b0cfa29a4378494361ef2c57 | 8ee5dcbdbd407eb5f294d430813b16eca22f571c | /data/HW5/hw5_253.py | 628a39851ed1f06194065eadcb2c20d9da276de9 | [] | no_license | MAPLE-Robot-Subgoaling/IPT | 5e60e2ee4d0a5688bc8711ceed953e76cd2ad5d9 | f512ea3324bfdceff8df63b4c7134b5fcbb0514e | refs/heads/master | 2021-01-11T12:31:00.939051 | 2018-08-13T23:24:19 | 2018-08-13T23:24:19 | 79,373,489 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | def main():
width = int(input("please enter the width of the box "))
height = int(input("please enter the height of thebox "))
sym = input("please enter a symbol for the outline ")
fill = input("please enter a fill symbol ")
for h in range(height):
for w in range(width):
print(sym if h in(0,height-1) or w in(0,width-1) else fill, end = ' ')
print()
main()
| [
"mneary1@umbc.edu"
] | mneary1@umbc.edu |
27a16c27e996906294903e01303e65e7a1d5d0ff | b0769f847d8f2c945f2552891ea7b48e2fad3a0f | /tweetTrend/bin/wheel | f7b6559dcc982dc09a8bff038e4c0507de0ea03c | [] | no_license | jiayangli2/twittmap | b808090acd66a6533e7e17a818512d9a01912632 | 772b9a868c0a9a0fe52c87bae77d43bc71fd9003 | refs/heads/master | 2021-03-22T01:27:09.600457 | 2017-04-09T20:38:37 | 2017-04-09T20:38:37 | 84,241,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | #!/home/emmittxu/Desktop/TweetTrend/tweetTrend/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"gx2127@columbia.edu"
] | gx2127@columbia.edu | |
fd6b8c1a24799e9d64be437cb85b6a6c16c1e23c | 9fe79da67efcd12cae6c61ea360960a87e8fe805 | /web/urls.py | b7afe9636b634ea342a3349487fb98dd5bac0c64 | [] | no_license | ZuiYee/EducationSystem | 00abaa482c393c1b50dac598d4b3ddfdd6268cd7 | 07bf141ea4c213d966105273d3feb9ad997f556f | refs/heads/master | 2020-04-09T23:42:44.673994 | 2019-01-13T06:14:17 | 2019-01-13T06:14:17 | 160,663,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | from django.conf.urls import url
from . import views
app_name = 'web'
urlpatterns = [
url(r'^studentProfile/', views.studentProfile, name='studentProfile'),
url(r'^teacherProfile/', views.teacherProfile, name='teacherProfile'),
url(r'^studentparseresult/', views.studentparseresult, name='studentparseresult'),
url(r'^teacherparseresult/', views.teacherparseresult, name='teacherparseresult'),
] | [
"39691460+ZuiYee@users.noreply.github.com"
] | 39691460+ZuiYee@users.noreply.github.com |
0fe08899b3a8f27f944baf7bfb39b3fcdf8ebdff | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/synapse/azure-synapse-accesscontrol/azure/synapse/accesscontrol/aio/__init__.py | 8eafa989fcbc836fcc407acd2ea0859726442db7 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 558 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._access_control_client import AccessControlClient
__all__ = ['AccessControlClient']
| [
"noreply@github.com"
] | Azure.noreply@github.com |
49e35d732d050a8de37689f7459907f5c429e2fa | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/request/AlipayCloudDevopsDictQueryRequest.py | 980977a75413bdc34d657ea1e8ece1c6b0ddb700 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 3,955 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayCloudDevopsDictQueryModel import AlipayCloudDevopsDictQueryModel
class AlipayCloudDevopsDictQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayCloudDevopsDictQueryModel):
self._biz_content = value
else:
self._biz_content = AlipayCloudDevopsDictQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.cloud.devops.dict.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"jishupei.jsp@alibaba-inc.com"
] | jishupei.jsp@alibaba-inc.com |
457107eadcf898c5b73e8308dd0dda559a8e82c0 | 1eefc1fc19dd4b0ded6eaad75d232450d00e0eba | /bench/pya.py | e72d2be49042f8110990e00bb7955677fd30d4af | [
"MIT"
] | permissive | pskopnik/apq | 2dbfa3e56c6e5c836b9d38a9e4b6bdf2f83eb44a | 827e722ec604d2f7c050f43748136613c3cd3d70 | refs/heads/master | 2021-07-07T08:32:40.204240 | 2020-02-12T13:33:41 | 2020-02-12T13:33:41 | 237,626,912 | 4 | 1 | MIT | 2021-04-20T19:28:42 | 2020-02-01T14:31:05 | Python | UTF-8 | Python | false | false | 2,319 | py | from . import bench, BenchTimer, main_bench_registered
from .utils import StringSource
from .py.keyedpq_a import PyKeyedPQA
from random import random as random_01
@bench()
def bench_add(b: BenchTimer) -> None:
s = StringSource()
s_offset = StringSource()
pq: PyKeyedPQA[str, None] = PyKeyedPQA()
for _ in range(10000):
pq.add(next(s), random_01(), None)
next(s_offset)
with b.time() as t:
for _ in t:
pq.add(next(s), random_01(), None)
with b.offset() as t:
for _ in t:
next(s_offset)
random_01()
@bench()
def bench_pop(b: BenchTimer) -> None:
s = StringSource()
pq: PyKeyedPQA[str, None] = PyKeyedPQA()
for _ in range(b.n + 10000):
pq.add(next(s), random_01(), None)
with b.time() as t:
for _ in t:
pq.pop()
with b.offset() as t:
for _ in t:
pass
@bench()
def bench_pop_add(b: BenchTimer) -> None:
s = StringSource()
s_offset = StringSource()
pq: PyKeyedPQA[str, None] = PyKeyedPQA()
for _ in range(10000):
pq.add(next(s), random_01(), None)
next(s_offset)
with b.time() as t:
for _ in t:
pq.pop()
pq.add(next(s), random_01(), None)
with b.offset() as t:
for _ in t:
next(s_offset)
random_01()
@bench()
def bench_change_value(b: BenchTimer) -> None:
s = StringSource()
pq: PyKeyedPQA[str, None] = PyKeyedPQA()
for _ in range(10000):
pq.add(next(s), random_01(), None)
with b.time() as t:
for _ in t:
key = s.rand_existing()
pq.change_value(key, random_01())
with b.offset() as t:
for _ in t:
key = s.rand_existing()
random_01()
@bench()
def bench_remove(b: BenchTimer) -> None:
s = StringSource()
s_remove = StringSource()
s_offset = StringSource()
pq: PyKeyedPQA[str, None] = PyKeyedPQA()
for _ in range(b.n + 10000):
pq.add(next(s), random_01(), None)
next(s_offset)
with b.time() as t:
for _ in t:
key = next(s_remove)
del pq[key]
with b.offset() as t:
for _ in t:
key = next(s_offset)
if __name__ == '__main__':
main_bench_registered()
| [
"paul@skopnik.me"
] | paul@skopnik.me |
87eea3930704d7e2f8216d0c4e219c57beb148a0 | f6973cc740981bf4ce80b5d1dc8e12f90d38ef42 | /XDG_CACHE_HOME/Microsoft/Python Language Server/stubs.v1/sOkxR-HicgX79sgQZAyBR1W09XbOY-yF9DMdXi8nVww=/rwobject.cpython-36m-x86_64-linux-gnu.pyi | fdf731a3699e1443db437aea28576483fb0d807e | [] | no_license | fdr896/Trumper-Jumper | 88783cf97979a0e9f8d2a3f64c606a67d4dd1719 | 77f813b7267451f3156cf6bddad76081c29a25f0 | refs/heads/master | 2020-06-02T08:41:50.978639 | 2019-06-10T05:36:24 | 2019-06-10T05:36:24 | 191,102,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 707 | pyi | pygame 1.9.4
Hello from the pygame community. https://www.pygame.org/contribute.html
import builtins as _mod_builtins
_PYGAME_C_API = _mod_builtins.PyCapsule()
__doc__ = 'SDL_RWops support'
__file__ = '/home/fed/.local/lib/python3.6/site-packages/pygame/rwobject.cpython-36m-x86_64-linux-gnu.so'
__name__ = 'pygame.rwobject'
__package__ = 'pygame'
def encode_file_path(obj=None, etype=None):
'encode_file_path([obj [, etype]]) -> bytes or None\nEncode a Unicode or bytes object as a file system path'
pass
def encode_string(obj=None, encoding=None, errors=None, etype=None):
'encode_string([obj [, encoding [, errors [, etype]]]]) -> bytes or None\nEncode a Unicode or bytes object'
pass
| [
"fiosetrova@gmail.com"
] | fiosetrova@gmail.com |
cf3970f95cd0df134c66e7e2c608fab1e79e582a | 0031bd210e25f9602a8ee3cf581c44e8e8f3a00f | /Junior/COSC0023-Py/Exercise/数据画图.py | a0a7647e524821d9fb8ef041ddf829e2fe99deef | [
"MIT"
] | permissive | TiffanyChou21/University | d991d30cad3b28bb5abc929faa6d530219a1d844 | 9584fa6b052a59ce01a256efc77add5bbec68d98 | refs/heads/master | 2020-09-29T10:54:00.297491 | 2020-08-16T03:47:57 | 2020-08-16T03:47:57 | 227,021,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,043 | py | #!/usr/bin/env python
# coding: utf-8
# In[52]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
# plt.rcParams['font.sans-serif'] = [u'SimHei'] #显示不了中文放弃了不显示了
# plt.rcParams['axes.unicode_minus'] = False
warnings.filterwarnings('ignore')
pd.set_option('display.max_rows',None)
iris=pd.read_csv('iris.csv',names=['花萼长度','花萼宽度','花瓣长度','花瓣宽度','类别'])
# ## 作业一
# 画出花萼长度和花萼宽度的散点图
# In[19]:
plt.xlabel('length')
plt.ylabel('width')
plt.scatter(iris['花萼长度'],iris['花萼宽度'],c='r',marker='.')
# ## 作业二
# 按照花萼长度排序作为X轴,画出花萼宽度随着花萼长度变化的折线图,包括图表标题、轴标签、刻度等
# In[20]:
plt.xlabel('length')
plt.ylabel('width')
plt.title("width-length")
plt.plot(iris.sort_index(by='花萼长度')['花萼长度'],iris['花萼宽度'],'r')
# ## 作业三
# 画出花瓣长度和花瓣宽度的散点图,要求不同类别花样本点的颜色不同。
# In[42]:
plt.xlabel('length')
plt.ylabel('width')
plt.scatter(iris['花瓣长度'], iris['花瓣宽度'], c=iris['类别'])
# ## 作业四
# 计算每个特征的平均值,画出直方图
# In[30]:
iris1=iris.drop(['类别'],axis=1)
m=np.array(iris1.mean())
# In[45]:
# labels = ['花萼长度', '花萼宽度', '花瓣长度', '花瓣宽度']#中文显示有问题花萼对应s 花瓣对应f
labels = ['slength', 'swidth', 'flength', 'fwidth']
plt.bar(np.arange(4)+1,m,color='c',tick_label=labels)
for x, y in zip(np.arange(4)+1, m):
plt.text(x , y, '%.2f' % y, ha='center', va='bottom')
# ## 作业五
# 计算每种花的样本数量百分比,画出饼状图。
# In[67]:
count_df = iris.groupby('类别').count()
test_df = pd.DataFrame(count_df)
perc=test_df/test_df.sum()
perc=perc.drop(['花萼宽度','花瓣长度','花瓣宽度'],axis=1)
perc=np.array(perc)
# In[70]:
plt.pie(perc,labels=['0','1','2'],autopct='%1.1f')
| [
"TiffanyChou21@163.com"
] | TiffanyChou21@163.com |
379b155116a31b53d67b638b0922f8cf82ef99a4 | 6aef2fdd5b98038fc6ecc7551dd76dccf370c4ae | /without_variance/GPOMDP_SVRG_WV_ada_bv_wbas.py | 25116e70f8b7cc295ad1394c40f7e36f3d85bfe8 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Bobeye/rllab | 29b1cf3f29b748f93af4ac103d1a0eaa40290e7f | 53c0afb73f93c4a78ff21507914d7f7735c21ea9 | refs/heads/master | 2020-05-02T07:18:17.323566 | 2019-03-26T02:34:02 | 2019-03-26T02:34:02 | 177,814,299 | 0 | 0 | NOASSERTION | 2019-03-26T15:14:46 | 2019-03-26T15:14:45 | null | UTF-8 | Python | false | false | 18,229 | py | from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
from rllab.envs.normalized_env import normalize
import numpy as np
import theano
import theano.tensor as TT
from rllab.sampler import parallel_sampler
from lasagne.updates import sgd
import matplotlib.pyplot as plt
from rllab.envs.gym_env import GymEnv
def unpack(i_g):
i_g_arr = [np.array(x) for x in i_g]
res = i_g_arr[0].reshape(i_g_arr[0].shape[0]*i_g_arr[0].shape[1])
res = np.concatenate((res,i_g_arr[1]))
res = np.concatenate((res,i_g_arr[2][0]))
res = np.concatenate((res,i_g_arr[3]))
return res
def compute_snap_batch(observations,actions,d_rewards,n_traj,n_part):
n=n_traj
i=0
svrg_snap=list()
while(n-np.int(n_traj/n_part)>=0):
n=n-np.int(n_traj/n_part)
s_g = compute_grad_snap(observations[i:i+np.int(n_traj/n_part)],
actions[i:i+np.int(n_traj/n_part)],d_rewards[i:i+np.int(n_traj/n_part)],np.int(n_traj/n_part),T)
i += np.int(n_traj/n_part)
svrg_snap.append(unpack(s_g))
return svrg_snap
def estimate_variance(observations,actions,d_rewards,snap_grads,n_traj,n_traj_s,n_part,M,N):
n=n_traj
i=0
svrg=list()
j=0
while(n-np.int(n_traj/n_part)>=0):
n=n-np.int(n_traj/n_part)
x = unpack(compute_grad_svrg(observations[i:i+np.int(n_traj/n_part)],actions[i:i+np.int(n_traj/n_part)],
d_rewards[i:i+np.int(n_traj/n_part)],np.int(n_traj/n_part),T,False,None))*np.sqrt(np.int(n_traj/n_part)/M)
g = snap_grads[j]*np.sqrt(np.int(n_traj_s/n_part)/N)+x
g=g/n_traj*n_part
i+=np.int(n_traj/n_part)
j+=1
svrg.append(g)
return (np.diag(np.cov(np.matrix(svrg),rowvar=False)).sum())
def compute_grad_snap(observations,actions,d_rewards,N,T):
minT=T
cum_num = []
cum_den = []
for ob,ac,rw in zip(observations,actions,d_rewards):
if minT>len(ob):
minT=len(ob)
x=f_baseline_g(ob, ac)
z = [y**2 for y in x]
index2 = np.arange(len(rw))
prov_der_num = [y[i]*rw[i] for i in index2 for y in z ]
prov_der_den = [y[i] for i in index2 for y in z]
cum_num.append(prov_der_num)
cum_den.append(prov_der_den)
mean_num = []
mean_den = []
baseline = []
for i in range(minT):
mean_num.append(cum_num[0][len(x)*i:len(x)*(i+1)])
mean_den.append(cum_den[0][len(x)*i:len(x)*(i+1)])
index = np.arange(len(mean_num[0]))
for i in range(minT):
for j in range(1,len(cum_den)):
mean_num[i] = [mean_num[i][pos] + cum_num[j][len(x)*i:len(x)*(i+1)][pos] for pos in index]
mean_den[i] = [mean_den[i][pos] + cum_den[j][len(x)*i:len(x)*(i+1)][pos] for pos in index]
for i in range(minT):
mean_num[i] = [mean_num[i][pos]/N for pos in index]
mean_den[i] = [mean_den[i][pos]/N for pos in index]
baseline.append([mean_num[i][pos]/mean_den[i][pos] for pos in index])
zero_grad = [mean_den[0][pos]*0 for pos in index]
for i in range(minT,T):
baseline.append(zero_grad)
cum = zero_grad
s_g = f_train(observations[0], actions[0])
index2 = np.arange(len(d_rewards[0]))
s_g = [y[i] for i in index2 for y in s_g]
for i in range(len(observations[0])):
R = [(d_rewards[0][i]-baseline[i][pos])*s_g[len(zero_grad)*i:len(zero_grad)*(i+1)][pos] for pos in index]
cum = [R[pos]+cum[pos] for pos in index]
for ob,ac,rw in zip(observations[1:],actions[1:],d_rewards[1:]):
s_g = f_train(ob, ac)
index2 = np.arange(len(rw))
s_g = [y[i] for i in index2 for y in s_g]
for i in range(len(ob)):
R = [(rw[i]-baseline[i][pos])*s_g[len(zero_grad)*i:len(zero_grad)*(i+1)][pos] for pos in index]
cum = [R[pos]+cum[pos] for pos in index]
cum = [cum[pos]/N for pos in index]
return cum
def compute_grad_svrg(observations,actions,d_rewards,M,T,add_full,fg):
minT=T
cum_num = []
cum_den = []
for ob,ac,rw in zip(observations,actions,d_rewards):
if minT>len(ob):
minT=len(ob)
x=f_baseline_g(ob, ac)
index = np.arange(len(x))
x_snap=f_baseline_g_snap(ob, ac)
iw = f_importance_weights(ob,ac)
index2 = np.arange(len(rw))
x_iw = [y[i]*iw[i] for i in index2 for y in x ]
x_snap_bv = [y[i] for i in index2 for y in x_snap]
index3 = np.arange(len(x_snap_bv))
x_dif = [x_iw[i]-x_snap_bv[i] for i in index3]
z = [y**2 for y in x_dif]
prov_der_num = [z[len(x)*i:len(x)*(i+1)][pos]*rw[i] for i in index2 for pos in index ]
prov_der_den = z
cum_num.append(prov_der_num)
cum_den.append(prov_der_den)
mean_num = []
mean_den = []
baseline = []
for i in range(minT):
mean_num.append(cum_num[0][len(x)*i:len(x)*(i+1)])
mean_den.append(cum_den[0][len(x)*i:len(x)*(i+1)])
for i in range(minT):
for j in range(1,len(cum_den)):
mean_num[i] = [mean_num[i][pos] + cum_num[j][len(x)*i:len(x)*(i+1)][pos] for pos in index]
mean_den[i] = [mean_den[i][pos] + cum_den[j][len(x)*i:len(x)*(i+1)][pos] for pos in index]
for i in range(minT):
mean_num[i] = [mean_num[i][pos]/M for pos in index]
mean_den[i] = [mean_den[i][pos]/M+1e-16 for pos in index]
baseline.append([mean_num[i][pos]/mean_den[i][pos] for pos in index])
zero_grad = [mean_den[0][pos]*0 for pos in index]
for i in range(minT,T):
baseline.append(zero_grad)
cum = zero_grad
s_g = f_baseline_g(observations[0], actions[0])
s_g_snap_p=f_baseline_g_snap(observations[0], actions[0])
iw = f_importance_weights(observations[0], actions[0])
index2 = np.arange(len(d_rewards[0]))
s_g_iw = [y[i]*iw[i] for i in index2 for y in s_g ]
s_g_snap = [y[i] for i in index2 for y in s_g_snap_p]
for i in range(len(observations[0])):
R = [(d_rewards[0][i]-baseline[i][pos])*(s_g_iw[len(zero_grad)*i:len(zero_grad)*(i+1)][pos]-s_g_snap[len(zero_grad)*i:len(zero_grad)*(i+1)][pos]) for pos in index]
cum = [R[pos]+cum[pos] for pos in index]
for ob,ac,rw in zip(observations[1:],actions[1:],d_rewards[1:]):
s_g = f_baseline_g(ob, ac)
s_g_snap=f_baseline_g_snap(ob, ac)
iw = f_importance_weights(ob, ac)
index2 = np.arange(len(rw))
s_g_iw = [y[i]*iw[i] for i in index2 for y in s_g ]
s_g_snap = [y[i] for i in index2 for y in s_g_snap]
for i in range(len(ob)):
R = [(rw[i]-baseline[i][pos])*(-s_g_iw[len(zero_grad)*i:len(zero_grad)*(i+1)][pos]+s_g_snap[len(zero_grad)*i:len(zero_grad)*(i+1)][pos]) for pos in index]
cum = [R[pos]+cum[pos] for pos in index]
cum = [cum[pos]/M for pos in index]
if (add_full):
cum = [cum[pos] + fg[pos] for pos in index]
return cum
load_policy=True
# normalize() makes sure that the actions for the environment lies
# within the range [-1, 1] (only works for environments with continuous actions)
env = normalize(CartpoleEnv())
#env = GymEnv("InvertedPendulum-v1")
# Initialize a neural network policy with a single hidden layer of 8 hidden units
policy = GaussianMLPPolicy(env.spec, hidden_sizes=(8,),learn_std=False)
snap_policy = GaussianMLPPolicy(env.spec, hidden_sizes=(8,),learn_std=False)
back_up_policy = GaussianMLPPolicy(env.spec, hidden_sizes=(8,),learn_std=False)
parallel_sampler.populate_task(env, snap_policy)
# policy.distribution returns a distribution object under rllab.distributions. It contains many utilities for computing
# distribution-related quantities, given the computed dist_info_vars. Below we use dist.log_likelihood_sym to compute
# the symbolic log-likelihood. For this example, the corresponding distribution is an instance of the class
# rllab.distributions.DiagonalGaussian
dist = policy.distribution
snap_dist = snap_policy.distribution
# We will collect 100 trajectories per iteration
N = 100
# Each trajectory will have at most 100 time steps
T = 100
#We will collect M secondary trajectories
M = 10
#Number of sub-iterations
#m_itr = 100
# Number of iterations
#n_itr = np.int(10000/(m_itr*M+N))
# Set the discount factor for the problem
discount = 0.99
# Learning rate for the gradient update
learning_rate = 0.00005
#perc estimate
perc_est = 0.6
#tot trajectories
s_tot = 10000
partition = 3
porz = np.int(perc_est*N)
observations_var = env.observation_space.new_tensor_variable(
'observations',
# It should have 1 extra dimension since we want to represent a list of observations
extra_dims=1
)
actions_var = env.action_space.new_tensor_variable(
'actions',
extra_dims=1
)
d_rewards_var = TT.vector('d_rewards')
importance_weights_var = TT.vector('importance_weight')
bl = TT.vector()
# policy.dist_info_sym returns a dictionary, whose values are symbolic expressions for quantities related to the
# distribution of the actions. For a Gaussian policy, it contains the mean and (log) standard deviation.
dist_info_vars = policy.dist_info_sym(observations_var)
snap_dist_info_vars = snap_policy.dist_info_sym(observations_var)
surr = - dist.log_likelihood_sym_1traj_GPOMDP(actions_var, dist_info_vars)
params = policy.get_params(trainable=True)
snap_params = snap_policy.get_params(trainable=True)
importance_weights = dist.likelihood_ratio_sym_1traj_GPOMDP(actions_var,snap_dist_info_vars,dist_info_vars)
grad = TT.jacobian(surr, params)
eval_grad1 = TT.matrix('eval_grad0',dtype=grad[0].dtype)
eval_grad2 = TT.vector('eval_grad1',dtype=grad[1].dtype)
eval_grad3 = TT.col('eval_grad3',dtype=grad[2].dtype)
eval_grad4 = TT.vector('eval_grad4',dtype=grad[3].dtype)
surr_on1 = TT.sum(- dist.log_likelihood_sym_1traj_GPOMDP(actions_var,dist_info_vars)*d_rewards_var*importance_weights_var)
surr_on2 = TT.sum(snap_dist.log_likelihood_sym_1traj_GPOMDP(actions_var,snap_dist_info_vars)*d_rewards_var)
grad_SVRG =[sum(x) for x in zip([eval_grad1, eval_grad2, eval_grad3, eval_grad4], theano.grad(surr_on1,params),theano.grad(surr_on2,snap_params))]
grad_SVRG_4v = [sum(x) for x in zip(theano.grad(surr_on1,params),theano.grad(surr_on2,snap_params))]
grad_var = theano.grad(surr_on1,params)
cum_likelihood = dist.log_likelihood_sym_1traj_GPOMDP(actions_var, dist_info_vars)
cum_likelihood_snap = dist.log_likelihood_sym_1traj_GPOMDP(actions_var, snap_dist_info_vars)
all_der, update_scan = theano.scan(lambda i, cum_likelihood: theano.grad(cum_likelihood[i], params),
sequences=TT.arange(cum_likelihood.shape[0]),
non_sequences=cum_likelihood)
all_der_snap, update_scan = theano.scan(lambda i, cum_likelihood_snap: theano.grad(cum_likelihood_snap[i], snap_params),
sequences=TT.arange(cum_likelihood_snap.shape[0]),
non_sequences=cum_likelihood_snap)
f_train = theano.function(
inputs = [observations_var, actions_var],
outputs = grad
)
f_update = theano.function(
inputs = [eval_grad1, eval_grad2, eval_grad3, eval_grad4],
outputs = None,
updates = sgd([eval_grad1, eval_grad2, eval_grad3, eval_grad4], params, learning_rate=learning_rate)
)
f_importance_weights = theano.function(
inputs = [observations_var, actions_var],
outputs = importance_weights
)
f_update_SVRG = theano.function(
inputs = [eval_grad1, eval_grad2, eval_grad3, eval_grad4],
outputs = None,
updates = sgd([eval_grad1, eval_grad2, eval_grad3, eval_grad4], params, learning_rate=learning_rate)
)
f_train_SVRG = theano.function(
inputs=[observations_var, actions_var, d_rewards_var, eval_grad1, eval_grad2, eval_grad3, eval_grad4,importance_weights_var],
outputs=grad_SVRG,
)
f_train_SVRG_4v = theano.function(
inputs=[observations_var, actions_var, d_rewards_var,importance_weights_var],
outputs=grad_SVRG_4v,
)
var_SVRG = theano.function(
inputs=[observations_var, actions_var, d_rewards_var, importance_weights_var],
outputs=grad_var,
)
f_baseline_g = theano.function(
inputs = [observations_var, actions_var],
outputs = all_der
)
f_baseline_g_snap = theano.function(
inputs = [observations_var, actions_var],
outputs = all_der_snap
)
alla = []
alla2 = []
alla3 = []
for k in range(10):
alla4=[]
if (load_policy):
snap_policy.set_param_values(np.loadtxt('policy_novar.txt'), trainable=True)
policy.set_param_values(np.loadtxt('policy_novar.txt'), trainable=True)
avg_return = np.zeros(s_tot)
#np.savetxt("policy_novar.txt",snap_policy.get_param_values(trainable=True))
j=0
while j<s_tot-N:
paths = parallel_sampler.sample_paths_on_trajectories(snap_policy.get_param_values(),N,T,show_bar=False)
#baseline.fit(paths)
j+=N
observations = [p["observations"] for p in paths]
actions = [p["actions"] for p in paths]
d_rewards = [p["rewards"] for p in paths]
temp = list()
for x in d_rewards:
z=list()
t=1
for y in x:
z.append(y*t)
t*=discount
temp.append(np.array(z))
d_rewards=temp
s_g = compute_grad_snap(observations,actions,d_rewards,N,T)
b=compute_snap_batch(observations[0:porz],actions[0:porz],d_rewards[0:porz],porz,partition)
f_update(s_g[0],s_g[1],s_g[2],s_g[3])
avg_return[j-N:j] = np.repeat(np.mean([sum(p["rewards"]) for p in paths]),N)
var_sgd = np.cov(np.matrix(b),rowvar=False)
var_batch = (var_sgd)*(porz/partition)/M
print(str(j-1)+' Average Return:', avg_return[j-1])
back_up_policy.set_param_values(policy.get_param_values(trainable=True), trainable=True)
n_sub = 0
while j<s_tot-M:
iw_var = f_importance_weights(observations[0],actions[0])
var_svrg = (estimate_variance(observations[porz:],actions[porz:],d_rewards[porz:],b,N-porz,porz,partition,M,N))
var_dif = var_svrg-(np.diag(var_batch).sum())
alla2.append(var_svrg)
alla3.append((np.diag(var_batch).sum()))
alla4.append(np.mean(iw_var))
if (var_dif>0):
policy.set_param_values(back_up_policy.get_param_values(trainable=True), trainable=True)
break
j += M
n_sub+=1
sub_paths = parallel_sampler.sample_paths_on_trajectories(snap_policy.get_param_values(),M,T,show_bar=False)
#baseline.fit(paths)
sub_observations=[p["observations"] for p in sub_paths]
sub_actions = [p["actions"] for p in sub_paths]
sub_d_rewards = [p["rewards"] for p in sub_paths]
temp = list()
for x in sub_d_rewards:
z=list()
t=1
for y in x:
z.append(y*t)
t*=discount
temp.append(np.array(z))
sub_d_rewards=temp
iw = f_importance_weights(sub_observations[0],sub_actions[0])
back_up_policy.set_param_values(policy.get_param_values(trainable=True), trainable=True)
g = compute_grad_svrg(observations,actions,d_rewards,M,T,True,s_g)
f_update(g[0],g[1],g[2],g[3])
avg_return[j-M:j] = np.repeat(np.mean([sum(p["rewards"]) for p in sub_paths]),M)
#print(str(j)+' Average Return:', avg_return[j])
snap_policy.set_param_values(policy.get_param_values(trainable=True), trainable=True)
plt.plot(avg_return[::10])
plt.show()
plt.plot(alla2)
plt.plot(alla3)
plt.show()
alla.append(avg_return)
alla_mean = [np.mean(x) for x in zip(*alla)]
plt.plot(alla_mean)
plt.plot()
np.savetxt("GPOMDP_SVRG_wbas",alla_mean)
gpomdp = np.loadtxt("GPOMDP_l5e-05")
gpomdpbas = np.loadtxt("GPOMDP_with_base")
#gpomdp_svrg=np.loadtxt("GPOMDP_SVRG_5e-5")
#gpomdp_svrg_ada_wb = np.loadtxt("GPOMDP_SVRG_5e-5_ada_wb")
plt.plot(gpomdp )
plt.plot(gpomdpbas)
plt.plot(gpomdp_svrg_ada_wb[::10])
plt.plot(alla_mean[::10])
plt.legend(['gpomdp','gpomdp baseline','gpomdp_svrg','gpomdp_svrg baseline'], loc='lower right')
plt.savefig("baseline_verbaseline.jpg", figsize=(32, 24), dpi=160)
#gpomdp_svrg_ada_wb_bv_m7 = np.loadtxt("GPOMDP_SVRG_5e-5_ada_b2")
#gpomdp_svrg_ada_wb_bv_m5 = np.loadtxt("GPOMDP_SVRG_5e-5_ada_b2_m5")
#gpomdp_svrg_ada_wb_bv_m3 = np.loadtxt("GPOMDP_SVRG_5e-5_ada_b2_m3")
#gpomdp_svrg_ada_wb_bv_s15 = np.loadtxt("GPOMDP_SVRG_5e-5_ada_b2_s15")
#
plt.plot(gpomdp)
#plt.plot(gpomdp_svrg)
#plt.plot(gpomdp_svrg_ada_wb[::10])
plt.plot(gpomdp_svrg_ada_wb_bv_m7[::10])
#plt.plot(gpomdp_svrg_ada_wb_bv_m3[::10])
#plt.plot(gpomdp_svrg_ada_wb_bv_m5[::10])
#plt.plot(gpomdp_svrg_ada_wb_bv_s15[::10])
#plt.legend(['gpomdp','gpomdp_svrg','gpomdp_svrg_ada_wb','gpomdp_svrg_m7','gpomdp_svrg_s15'], loc='lower right')
#plt.savefig("adapt_nnv.jpg", figsize=(32, 24), dpi=160)
plt.show()
#uni = np.ones(640,dtype=np.int)
#for i in range(40):
# uni[i*16]=10
#scal_svrg = np.repeat(gpondp_svrg,uni)
#plt.plot(gpondp)
#plt.plot(scal_svrg )
#plt.legend(['gpondp','gpondp_svrg'], loc='lower right')
#plt.savefig("gpondp_5e-6.jpg", figsize=(32, 24), dpi=160) | [
"damiano.binaghi@gmail.com"
] | damiano.binaghi@gmail.com |
f2a89ce80fd4ce27424ff0a7e83f15dcf071cf2b | 4994d9e1e3bbf4fc3d2ce7b5f3a41b3d0ff1e165 | /lib/handle/StaticHandler.py | 053961a57bf511f3f79a3ace3ee78ea3d3c07234 | [
"MIT"
] | permissive | francoricci/sapspid | 228ab362c123cf3d28c9eab215f5daafe4aa293b | db335f2335824ba4f7aa7a01cd15c235bc815a47 | refs/heads/master | 2021-07-13T02:19:12.923503 | 2021-03-03T15:59:43 | 2021-03-03T15:59:43 | 97,124,842 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | import response
class StaticFileHandler(response.StaticFileHandler):
def write_error(self, status_code, **kwargs):
super(StaticFileHandler, self).write_error(status_code, errorcode = '3', **kwargs) | [
"pippo@pippo.com"
] | pippo@pippo.com |
f8597c8ce3dfbc755d8bf76575047963a0ec8beb | 6c74c8babd2f94cbed185af75940774a2750f3e5 | /src/georinex/base.py | ccfff852795a64c572afd92589a410550c92cf2e | [
"MIT"
] | permissive | geospace-code/georinex | c28c8a17196bb1fa8093c818ce43bcb74ec52171 | c689a5a6bc2ffb68bc055f150f1da1b6bab12812 | refs/heads/main | 2023-04-13T15:01:50.903458 | 2022-12-27T19:25:58 | 2022-12-27T19:26:15 | 34,296,204 | 106 | 40 | MIT | 2023-04-10T02:54:45 | 2015-04-21T01:19:29 | Python | UTF-8 | Python | false | false | 7,148 | py | from __future__ import annotations
import typing as T
from pathlib import Path
import xarray
from datetime import datetime, timedelta
import logging
from .rio import rinexinfo
from .obs2 import rinexobs2
from .obs3 import rinexobs3
from .nav2 import rinexnav2
from .nav3 import rinexnav3
from .sp3 import load_sp3
from .utils import _tlim
# for NetCDF compression. too high slows down with little space savings.
ENC = {"zlib": True, "complevel": 1, "fletcher32": True}
def load(
rinexfn: T.TextIO | str | Path,
out: Path = None,
use: set[str] = None,
tlim: tuple[datetime, datetime] = None,
useindicators: bool = False,
meas: list[str] = None,
verbose: bool = False,
*,
overwrite: bool = False,
fast: bool = True,
interval: float | int | timedelta = None,
):
"""
Reads OBS, NAV in RINEX 2.x and 3.x
Files / StringIO input may be plain ASCII text or compressed (including Hatanaka)
"""
if verbose:
logging.basicConfig(level=logging.INFO)
if isinstance(rinexfn, (str, Path)):
rinexfn = Path(rinexfn).expanduser()
# %% determine if/where to write NetCDF4/HDF5 output
outfn = None
if out:
out = Path(out).expanduser()
if out.is_dir():
outfn = out / (
rinexfn.name + ".nc"
) # not with_suffix to keep unique RINEX 2 filenames
elif out.suffix == ".nc":
outfn = out
else:
raise ValueError(f"not sure what output is wanted: {out}")
# %% main program
if tlim is not None:
if len(tlim) != 2:
raise ValueError("time bounds are specified as start stop")
if tlim[1] < tlim[0]:
raise ValueError("stop time must be after start time")
info = rinexinfo(rinexfn)
if info["rinextype"] == "nav":
return rinexnav(rinexfn, outfn, use=use, tlim=tlim, overwrite=overwrite)
elif info["rinextype"] == "obs":
return rinexobs(
rinexfn,
outfn,
use=use,
tlim=tlim,
useindicators=useindicators,
meas=meas,
verbose=verbose,
overwrite=overwrite,
fast=fast,
interval=interval,
)
assert isinstance(rinexfn, Path)
if info["rinextype"] == "sp3":
return load_sp3(rinexfn, outfn)
elif rinexfn.suffix == ".nc":
# outfn not used here, because we already have the converted file!
try:
nav = rinexnav(rinexfn)
except LookupError:
nav = None
try:
obs = rinexobs(rinexfn)
except LookupError:
obs = None
if nav is not None and obs is not None:
return {"nav": nav, "obs": rinexobs(rinexfn)}
elif nav is not None:
return nav
elif obs is not None:
return obs
else:
raise ValueError(f"No data of known format found in {rinexfn}")
else:
raise ValueError(f"What kind of RINEX file is: {rinexfn}")
def batch_convert(
path: Path,
glob: str,
out: Path,
use: set[str] = None,
tlim: tuple[datetime, datetime] = None,
useindicators: bool = False,
meas: list[str] = None,
verbose: bool = False,
*,
fast: bool = True,
):
path = Path(path).expanduser()
flist = (f for f in path.glob(glob) if f.is_file())
for fn in flist:
try:
load(
fn,
out,
use=use,
tlim=tlim,
useindicators=useindicators,
meas=meas,
verbose=verbose,
fast=fast,
)
except ValueError as e:
logging.error(f"{fn.name}: {e}")
def rinexnav(
fn: T.TextIO | str | Path,
outfn: Path = None,
use: set[str] = None,
group: str = "NAV",
tlim: tuple[datetime, datetime] = None,
*,
overwrite: bool = False,
) -> xarray.Dataset:
"""Read RINEX 2 or 3 NAV files"""
if isinstance(fn, (str, Path)):
fn = Path(fn).expanduser()
if fn.suffix == ".nc":
try:
return xarray.open_dataset(fn, group=group)
except OSError as e:
raise LookupError(f"Group {group} not found in {fn} {e}")
tlim = _tlim(tlim)
info = rinexinfo(fn)
if int(info["version"]) == 2:
nav = rinexnav2(fn, tlim=tlim)
elif int(info["version"]) == 3:
nav = rinexnav3(fn, use=use, tlim=tlim)
else:
raise LookupError(f"unknown RINEX {info} {fn}")
# %% optional output write
if outfn:
outfn = Path(outfn).expanduser()
wmode = _groupexists(outfn, group, overwrite)
enc = {k: ENC for k in nav.data_vars}
nav.to_netcdf(outfn, group=group, mode=wmode, encoding=enc)
return nav
# %% Observation File
def rinexobs(
fn: T.TextIO | Path,
outfn: Path = None,
use: set[str] = None,
group: str = "OBS",
tlim: tuple[datetime, datetime] = None,
useindicators: bool = False,
meas: list[str] = None,
verbose: bool = False,
*,
overwrite: bool = False,
fast: bool = True,
interval: float | int | timedelta = None,
):
"""
Read RINEX 2.x and 3.x OBS files in ASCII or GZIP (or Hatanaka)
"""
if isinstance(fn, (str, Path)):
fn = Path(fn).expanduser()
# %% NetCDF4
if fn.suffix == ".nc":
try:
return xarray.open_dataset(fn, group=group)
except OSError as e:
raise LookupError(f"Group {group} not found in {fn} {e}")
tlim = _tlim(tlim)
# %% version selection
info = rinexinfo(fn)
if int(info["version"]) in (1, 2):
obs = rinexobs2(
fn,
use,
tlim=tlim,
useindicators=useindicators,
meas=meas,
verbose=verbose,
fast=fast,
interval=interval,
)
elif int(info["version"]) == 3:
obs = rinexobs3(
fn,
use,
tlim=tlim,
useindicators=useindicators,
meas=meas,
verbose=verbose,
fast=fast,
interval=interval,
)
else:
raise ValueError(f"unknown RINEX {info} {fn}")
# %% optional output write
if outfn:
outfn = Path(outfn).expanduser()
wmode = _groupexists(outfn, group, overwrite)
enc = {k: ENC for k in obs.data_vars}
# Pandas >= 0.25.0 requires this, regardless of xarray version
if obs.time.dtype != "datetime64[ns]":
obs["time"] = obs.time.astype("datetime64[ns]")
obs.to_netcdf(outfn, group=group, mode=wmode, encoding=enc)
return obs
def _groupexists(fn: Path, group: str, overwrite: bool) -> str:
print(f"saving {group}:", fn)
if overwrite or not fn.is_file():
return "w"
# be sure there isn't already NAV in it
try:
xarray.open_dataset(fn, group=group)
raise ValueError(f"{group} already in {fn}")
except OSError:
pass
return "a"
| [
"scivision@users.noreply.github.com"
] | scivision@users.noreply.github.com |
ee391734bbe1d920f7349971047cc74c0c565f36 | e9ef3cd143478660d098668a10e67544a42b5878 | /Lib/corpuscrawler/crawl_mpx.py | 71bb3a7ee49333cc9c4fc1cee863a89f398c5aa2 | [
"Apache-2.0"
] | permissive | google/corpuscrawler | a5c790c19b26e6397b768ce26cf12bbcb641eb90 | 10adaecf4ed5a7d0557c8e692c186023746eb001 | refs/heads/master | 2023-08-26T04:15:59.036883 | 2022-04-20T08:18:11 | 2022-04-20T08:18:11 | 102,909,145 | 119 | 40 | NOASSERTION | 2022-04-20T08:18:12 | 2017-09-08T22:21:03 | Python | UTF-8 | Python | false | false | 799 | py | # coding: utf-8
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function, unicode_literals
import re
def crawl(crawler):
out = crawler.get_output(language='mpx')
crawler.crawl_pngscriptures_org(out, language='mpx')
| [
"sascha@brawer.ch"
] | sascha@brawer.ch |
e8d0823a474e0ac46065eca508bc73d8188113d9 | 29c4f16b2bd95203fc58f1b43ada634116aabb8d | /Customer.py | fa14e037618593e8a9bba2823b3fae772e9ee149 | [] | no_license | AlexandreGheraibia/banquePython | 2c715c7013c8cd89f87ce80fcadb63ce47830b76 | df2f1b79f8dd5219f65bd5cf0b9a249a12e11caa | refs/heads/master | 2020-03-22T01:06:23.046419 | 2018-06-30T22:27:29 | 2018-06-30T22:27:29 | 139,283,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | class Customer:
def setId(this,id):
return id
def getId(this):
return this.id
def setName(this,name):
return name
def getName(this):
return this.name
def __init__(this):
return
def __init__(this,id,name):
this.id=id
this.name=name
| [
"gheraibia@hotmail.com"
] | gheraibia@hotmail.com |
3f80f32810fc412f915873a0c635f787b0603cd6 | 7f07515310c075c95033354a91f9f82557b98092 | /heatmap.py | cb8fd55587cb71d1707428b4f0ae92759ce9cb53 | [] | no_license | HegemanLab/VanKrevelen | 805d33e9e0515a1250fbb87f27b7d56af1de759f | ea82f284f3ade1b43adb6bc5041b5bb14c166c2f | refs/heads/master | 2020-04-16T23:39:22.636160 | 2016-08-17T17:54:16 | 2016-08-17T17:54:16 | 47,356,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,028 | py | '''
Thanks to jjguy @ http://jjguy.com/heatmap/
Minimal edits made to the original code, mostly just comments
'''
import ctypes
import os
import platform
import sys
from PIL import Image
import colorschemes
class Heatmap:
"""
Create heatmaps from a list of 2D coordinates.
Heatmap requires the Python Imaging Library and Python 2.5+ for ctypes.
Coordinates autoscale to fit within the image dimensions, so if there are
anomalies or outliers in your dataset, results won't be what you expect. You
can override the autoscaling by using the area parameter to specify the data bounds.
The output is a PNG with transparent background, suitable alone or to overlay another
image or such. You can also save a KML file to use in Google Maps if x/y coordinates
are lat/long coordinates. Make your own wardriving maps or visualize the footprint of
your wireless network.
Most of the magic starts in heatmap(), see below for description of that function.
"""
KML = """<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2">
<Folder>
<GroundOverlay>
<Icon>
<href>%s</href>
</Icon>
<LatLonBox>
<north>%2.16f</north>
<south>%2.16f</south>
<east>%2.16f</east>
<west>%2.16f</west>
<rotation>0</rotation>
</LatLonBox>
</GroundOverlay>
</Folder>
</kml>"""
def __init__(self, libpath=None):
self.minXY = ()
self.maxXY = ()
self.img = None
if libpath:
self._heatmap = ctypes.cdll.LoadLibrary(libpath)
else:
# establish the right library name, based on platform and arch. Windows
# are pre-compiled binaries; linux machines are compiled during setup.
self._heatmap = None
libname = "cHeatmap.so"
if "cygwin" in platform.system().lower():
libname = "cHeatmap.dll"
if "windows" in platform.system().lower():
libname = "cHeatmap-x86.dll"
if "64" in platform.architecture()[0]:
libname = "cHeatmap-x64.dll"
# now rip through everything in sys.path to find them. Should be in site-packages
# or local dir
for d in sys.path:
if os.path.isfile(os.path.join(d, libname)):
self._heatmap = ctypes.cdll.LoadLibrary(
os.path.join(d, libname))
if not self._heatmap:
raise Exception("Heatmap shared library not found in PYTHONPATH.")
def heatmap(self, points, dotsize=150, opacity=128, size=(1024, 1024), scheme="classic", area=None):
"""
points -> an iterable list of tuples, where the contents are the
x,y coordinates to plot. e.g., [(1, 1), (2, 2), (3, 3)]
dotsize -> the size of a single coordinate in the output image in
pixels, default is 150px. Tweak this parameter to adjust
the resulting heatmap.
opacity -> the strength of a single coordiniate in the output image.
Tweak this parameter to adjust the resulting heatmap.
size -> tuple with the width, height in pixels of the output PNG
scheme -> Name of color scheme to use to color the output image.
Use schemes() to get list. (images are in source distro)
area -> Specify bounding coordinates of the output image. Tuple of
tuples: ((minX, minY), (maxX, maxY)). If None or unspecified,
these values are calculated based on the input data.
"""
self.dotsize = dotsize
self.opacity = opacity
self.size = size
self.points = points
if area is not None:
self.area = area
self.override = 1
else:
self.area = ((0, 0), (0, 0))
self.override = 0
if scheme not in self.schemes():
tmp = "Unknown color scheme: %s. Available schemes: %s" % (
scheme, self.schemes())
raise Exception(tmp)
arrPoints = self._convertPoints(points)
arrScheme = self._convertScheme(scheme)
arrFinalImage = self._allocOutputBuffer()
ret = self._heatmap.tx(
arrPoints, len(points) * 2, size[0], size[1], dotsize,
arrScheme, arrFinalImage, opacity, self.override,
ctypes.c_float(self.area[0][0]), ctypes.c_float(
self.area[0][1]),
ctypes.c_float(self.area[1][0]), ctypes.c_float(self.area[1][1]))
if not ret:
raise Exception("Unexpected error during processing.")
self.img = Image.frombuffer('RGBA', (self.size[0], self.size[1]),
arrFinalImage, 'raw', 'RGBA', 0, 1)
return self.img
def _allocOutputBuffer(self):
return (ctypes.c_ubyte * (self.size[0] * self.size[1] * 4))()
def _convertPoints(self, pts):
""" flatten the list of tuples, convert into ctypes array """
flat = []
for i, j in pts:
flat.append(i)
flat.append(j)
# Build array of input points
arr_pts = (ctypes.c_float * (len(pts) * 2))(*flat)
return arr_pts
def _convertScheme(self, scheme):
""" flatten the list of RGB tuples, convert into ctypes array """
flat = []
for r, g, b in colorschemes.schemes[scheme]:
flat.append(r)
flat.append(g)
flat.append(b)
arr_cs = (
ctypes.c_int * (len(colorschemes.schemes[scheme]) * 3))(*flat)
return arr_cs
def _ranges(self, points):
""" walks the list of points and finds the
max/min x & y values in the set """
minX = points[0][0]
minY = points[0][1]
maxX = minX
maxY = minY
for x, y in points:
minX = min(x, minX)
minY = min(y, minY)
maxX = max(x, maxX)
maxY = max(y, maxY)
return ((minX, minY), (maxX, maxY))
def saveKML(self, kmlFile):
"""
Saves a KML template to use with google earth. Assumes x/y coordinates
are lat/long, and creates an overlay to display the heatmap within Google
Earth.
kmlFile -> output filename for the KML.
"""
if self.img is None:
raise Exception("Must first run heatmap() to generate image file.")
tilePath = os.path.splitext(kmlFile)[0] + ".png"
self.img.save(tilePath)
if self.override:
((east, south), (west, north)) = self.area
else:
((east, south), (west, north)) = self._ranges(self.points)
bytes = self.KML % (tilePath, north, south, east, west)
file(kmlFile, "w").write(bytes)
def schemes(self):
"""
Return a list of available color scheme names.
"""
return colorschemes.valid_schemes()
| [
"roden026@umn.edu"
] | roden026@umn.edu |
b48d0fdf80d00e5128a64d51260df0314579ca35 | 4160b450b052830e17457a0412e29414f67caea5 | /order/migrations/0010_auto_20210822_0755.py | 295b200f20b97da9b7772cdca4c5ea7279ce3aaa | [] | no_license | mnogoruk/fastcustoms | 6ad7b058607ddf4d2b56a09e23e66fcfb43be1a7 | 4c3bf7f9f1d4af2851f957a084b6adc2b7b7f681 | refs/heads/master | 2023-08-23T15:54:08.415613 | 2021-10-31T12:21:29 | 2021-10-31T12:21:29 | 372,066,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | # Generated by Django 3.2.3 on 2021-08-22 07:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0009_auto_20210821_2220'),
]
operations = [
migrations.AlterField(
model_name='orderagent',
name='comment',
field=models.TextField(blank=True, default='', max_length=1000),
),
migrations.AlterField(
model_name='orderagent',
name='company_name',
field=models.CharField(blank=True, max_length=250, null=True),
),
migrations.AlterField(
model_name='orderagent',
name='email',
field=models.EmailField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='orderagent',
name='phone',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
| [
"danii.litvinenko@x5.ru"
] | danii.litvinenko@x5.ru |
4a80747e2268ca965179dddbee104338c66849bc | af65714ea99ea2a1edd6b372609f682399a7d64d | /your_app_name/manage.py | 5578a3e697c59017498982b4457f470dcf7b70f3 | [
"MIT"
] | permissive | gibeongideon/django-github-action-runner-CICD | fbfb81b94bbb4ccc93fc90cbc452695a4949a502 | ddf02176dc83e3f7ed4944f8f48207c944e33f18 | refs/heads/master | 2023-06-05T21:39:30.002833 | 2021-06-23T20:21:46 | 2021-06-23T20:21:46 | 379,716,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'your_app_name.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"kipngeno.gibeon@gmail.com"
] | kipngeno.gibeon@gmail.com |
e0c09849f0aec5951bf94adaa9bc3656ac75f05f | abc72a2f2072ab7a5a338e41d81c354324943b09 | /MC 102 (Exemplos de aula)/eliminar_repeticao.py | 55c15d25c81d25f12a60900b67da3c9af6354681 | [] | no_license | gigennari/mc102 | a3d39fd9a942c97ef477a9b59d7955f4269b202a | fce680d5188a8dfb0bc1832d6f430cbcaf68ef55 | refs/heads/master | 2023-04-05T01:40:58.839889 | 2020-07-27T20:33:56 | 2020-07-27T20:33:56 | 354,130,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py |
def eliminar_repeticao(lista1, lista2):
lista_sem_rep = []
freq_sem_rep = []
for i in range(len(lista1)):
if lista1[i] not in lista_sem_rep:
lista_sem_rep.append(lista1[i])
freq_sem_rep.append(lista2[i])
return lista_sem_rep, freq_sem_rep
def main():
lista1 = [3, 3, 6, 5, 8, 8, 10]
lista2 = [2, 2, 1, 1, 2, 2, 1]
lista3, lista4 = eliminar_repeticao(lista1, lista2)
print(lista3)
main() | [
"g198010@dac.unicamp.br"
] | g198010@dac.unicamp.br |
f7ee63e6b92678782ec9da34b96b0addaf69997c | b9571590d8cc83a99293d777f57e5ebeea5bcc92 | /spiders/DoctorSpider.py | 1cc8539b8017fa62c7ea2ce5c7a731be27f7fec8 | [] | no_license | LiuQL2/Crawler_xywy_doctor_communication | 585a0a3230f397640e5fc54506cd6585bfd04f57 | 3374f08ea34ae8ea7e96501188a4fec247c72b5d | refs/heads/master | 2020-06-30T13:28:01.048195 | 2017-08-04T07:29:19 | 2017-08-04T07:29:19 | 74,369,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,508 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
用来获取病例和心得帖子内容的类,传入一个帖子的URL,调用不同的方法得到不同的数据。
"""
# Author: Liu Qianlong <LiuQL2@163.com>
# Date: 2016.12.08
import datetime
import json
import sys
import urllib2
from BaseSpider import BaseSpider
reload(sys)
sys.setdefaultencoding('utf-8')
class DoctorSpider(BaseSpider):
def __init__(self,url, crawl_number, try_number = 20):
self.target_url = url
request = urllib2.Request(url=self.target_url, headers=self.get_header())
self.status = True
self.try_number = try_number
self.crawl_number = crawl_number
self.selector = None
self.number_url = 'http://club.xywy.com/doctorShare/index.php?type=share_operation&uid=' + self.target_url.split('/')[4] + '&stat=14'
def get_number(self):
doc = self.process_url_request(self.number_url,xpath_type=False)
if doc != None:
doc = json.loads(doc)
crawl_time = datetime.datetime.now().strftime('%Y-%m-%d')
return {'attention_number':str(doc['attenNum']), 'fans_number':str(doc['fansNum']),'web_number':str(doc['wbNum']),'doctor_url':self.target_url, 'crawl_time':crawl_time, 'crawl_number':self.crawl_number}
else:
return None
if __name__ == '__main__':
doctor = DoctorSpider(url='http://club.xywy.com/doc_card/55316663/blog')
print doctor.get_number() | [
"LiuQL2@sina.com"
] | LiuQL2@sina.com |
9691a7aad57450dba8acad44cb0105ff32ac9d5c | c171d1b819d599c294afe67d17f09f7ef5e358af | /Blind_Search/8_Puzzle_DFS.py | 088de9e8ec7e148eb1df2ea660d39d7c67ccbbe2 | [] | no_license | bawejagb/Artificial_Intelligence | 0e18e9f9cb0ee2663ae5d3ffd4ad9b2560295947 | 9caafe026e93cd50a0d01d3d83e079ce221c5a9d | refs/heads/main | 2023-04-21T19:20:49.713415 | 2021-05-04T15:54:48 | 2021-05-04T15:54:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,840 | py | '''
Q1- 8 puzzle problem
Made By: Gaurav Baweja, 102097005, CSE4
'''
import copy as cp
def Show(arr):
print("------")
for lis in arr:
for elm in lis:
print(elm,end="|")
print()
print("------")
def Position(val,arr):
for i in range(len(arr)):
for j in range(len(arr[i])):
if(arr[i][j] == val):
return (i,j)
def Swap(i1,j1,i2,j2,arr):
temp = arr[i1][j1]
arr[i1][j1] = arr[i2][j2]
arr[i2][j2] = temp
def MoveUp(i,j,arr):
if(i < 1):
return arr
temp = cp.deepcopy(arr)
Swap(i,j,i-1,j,temp)
return temp
def MoveLeft(i,j,arr):
if(j < 1):
return arr
temp = cp.deepcopy(arr)
Swap(i,j,i,j-1,temp)
return temp
def MoveRight(i,j,arr):
if(j > 1):
return arr
temp = cp.deepcopy(arr)
Swap(i,j,i,j+1,temp)
return temp
def MoveDown(i,j,arr):
if(i > 1):
return arr
temp = cp.deepcopy(arr)
Swap(i,j,i+1,j,temp)
return temp
def Compare(arr1, arr2):
if(arr1 == arr2):
return True
return False
def enqueue(que, arr):
que.append(arr)
def dequeue(que):
if(len(que) != 0):
del que[-1]
def front(que):
if(len(que) > 0):
return que[0]
def end(que):
if(len(que) > 0):
return que[-1]
def DFS(start, goal):
itr_count = 0
queue = []
visited = []
enqueue(queue, start)
while(len(queue) != 0):
itr_count += 1
temp = end(queue)
#Show(temp)
dequeue(queue)
visited.append((temp))
row,col = Position(0,temp) # Check Position of Empty Slide(0)
for state in range(1,5):
if(state == 1): #MoveUp
nextState = MoveUp(row,col,temp)
if(state == 2): #MoveDown
nextState = MoveDown(row,col,temp)
if(state == 3): #MoveLeft
nextState = MoveLeft(row,col,temp)
if(state == 4): #MoveRight
nextState = MoveRight(row,col,temp)
if(nextState == goal): # Check Goal State
print("Achieved Goal State:")
print("Total Iteration: ", itr_count)
Show(nextState)
return True
if(nextState not in queue and
nextState not in visited): #Enqueue
enqueue(queue, nextState)
return False
if (__name__ == "__main__"):
#Start
""" StartState = [[2,0,3],
[1,8,4],
[7,6,5]]
GoalState = [[1,2,3],[8,0,4],[7,6,5]] """
StartState = [[2,0,3],
[1,8,4],
[7,6,5]]
GoalState = [[1,2,3],
[8,0,4],
[7,6,5]]
status = DFS(StartState, GoalState)
print("State Possible: ",end = "")
if(status):
print("Yes")
else:
print("No")
| [
"gaurav.baweja2508@gmail.com"
] | gaurav.baweja2508@gmail.com |
380e413c789d06c1d8c1e6b1304c9a37e8b030e3 | ec852d0c26ca2ebba40054cd2668db0ee990af69 | /2.py | 1a0edb5dff657f4e37b3a0835b97627531f12924 | [] | no_license | sadilet/xgboost-predictor | 61ecdd951d275f3e69a3741f1623c52c11cf4ed2 | e31c4bcdce995a7c1ffa3c4eaf8a2b7fb65b5861 | refs/heads/master | 2023-02-06T13:38:37.149240 | 2020-12-21T00:54:50 | 2020-12-21T00:54:50 | 321,763,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,292 | py | import datetime
import xgboost
import numpy as np
import xgboost_predictor
if __name__ == "__main__":
data = []
with open("tests/resources/data/agaricus.txt.0.test", "r") as f:
for line in f.readlines():
row = [0] * 126
for i in line.split(" ")[1:]:
f, v = i.split(":")
row[int(f)] = int(v)
data.append(tuple(row))
data = tuple(data)
booster1 = xgboost.Booster({"nthread": 1})
booster1.load_model('tests/resources/model/gblinear/v40/binary-logistic.model')
data1 = xgboost.DMatrix(np.array(data, dtype=np.float32))
start = datetime.datetime.now()
a = booster1.predict(data1)
print(datetime.datetime.now() - start)
booster2 = xgboost_predictor.load_model('tests/resources/model/gblinear/v40/binary-logistic.model')
data2 = np.array(data, dtype=np.float32)
start = datetime.datetime.now()
b = booster2.predict_many(data2)
print((datetime.datetime.now() - start))
print(a)
print(b[:10])
print(len(b))
"""
[1,2,3,4, 1,2,3,4, 1,2,3,4]
(0 * 3) + 0 = 0
(0 * 3) + 1 = 1
(0 * 3) + 2 = 2
(0 * 3) + 3 = 3
(1 * 3) + 0 = 3 +
(1 * 3) + 1 = 4
(1 * 3) + 2 = 5
(1 * 3) + 3 = 6
(2 * 3) + 0 = 6 + 2
(2 * 3) + 1 = 7
(2 * 3) + 2 = 8
(2 * 3) + 3 = 9
""" | [
"wallstbrok@gmail.com"
] | wallstbrok@gmail.com |
e7e95d0bc9841ba1cce84f802176bbe2ef6d5e38 | 6c71226e2080c79a993fb086445c5af52b42bb95 | /randomForestRegression/random_forest_regression.py | 1df75f2bfa11cfc2b80c74e43cd52df35f2a8c6d | [] | no_license | ArakelyanEdgar/MachineLearningAlgorithms | 32f4046931d39cfacc58bd6c753031fd3af3d3aa | 07b64e83dafc9259f9a09aefcce07ca4b261b76f | refs/heads/master | 2020-03-08T13:27:31.750449 | 2018-04-05T04:27:44 | 2018-04-05T04:27:44 | 128,158,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,304 | py | # Random Forest Regression
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
# Splitting the dataset into the Training set and Test set
"""from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)"""
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
#random forest regression
regressor = RandomForestRegressor(n_estimators=1000, random_state=0)
regressor.fit(X, y)
#Predicting a new result
y_pred = regressor.predict(6.5)
print(y_pred)
# Visualising the Random Forest Regression results (higher resolution)
X_grid = np.arange(min(X), max(X), 0.01)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color = 'red')
plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')
plt.title('Truth or Bluff (Random Forest Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show() | [
"edgararakelyan123@gmail.com"
] | edgararakelyan123@gmail.com |
f9aedbea917b21ed89324565fef95311658ab4cb | a37d9f2ab27952dbc1fd3f394ce6f8f537bbee3a | /bnet/optimizers/__init__.py | 01323a07f36de3f988c1c8a9a500ab761d196794 | [] | no_license | dmbernaal/bnet-resnest-xresnet-mininets | d4b1739270bc0610550d4098f2db2756147f2c7c | b53248bc64e6cc1eca47a7770984066e38070045 | refs/heads/main | 2023-01-23T15:08:26.376580 | 2020-12-08T06:08:08 | 2020-12-08T06:08:08 | 319,539,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | from .adamod import AdaMod
from .deepmemory import DeepMemory
from .diffgrad import DiffGrad
from .diffmod import DiffMod
from .lookahead import Lookahead, LookaheadAdam
from .novograd import Novograd
from .ralamb import Ralamb
from .ranger import Ranger
from .rangerlars import RangerLars
from .adahessian import Adahessian, get_params_grad | [
"dmbernaal@gmail.com"
] | dmbernaal@gmail.com |
d71180f0bd321d3d7193738b32581743b75440f3 | 3257372291236aac1737b057c9ac6c61da9ccca0 | /tutorials/W0D5_Statistics/solutions/W0D5_Tutorial2_Solution_281848de.py | 65dc66a635bd53e9c76bb3d72f597aebb3c00512 | [
"CC-BY-4.0",
"BSD-3-Clause",
"MIT"
] | permissive | NeuromatchAcademy/precourse | 230ead0d11ae7b0dba21c8df97695a1796e9797d | b7f2432c6a68a7984ca923ceed8e07d5cfdb77c3 | refs/heads/main | 2023-07-26T11:18:24.493966 | 2023-07-09T14:42:49 | 2023-07-09T14:42:49 | 256,327,558 | 639 | 174 | MIT | 2023-07-09T14:42:50 | 2020-04-16T20:54:03 | Jupyter Notebook | UTF-8 | Python | false | false | 621 | py |
""" You will learn more about "Bayesian brains" and the theory surrounding
these ideas once the course begins. Here is a brief explanation: it may
be ideal for human brains to implement Bayesian inference by integrating "prior"
information the brain has about the world (memories, prior knowledge, etc.) with
new evidence that updates its "beliefs"/prior. This process seems to parallel
the brain's method of learning about its environment, making it a compelling
theory for many neuroscience researchers. One of Bonus exercises below examines a possible
real world model for Bayesian inference: sound localization.
"""; | [
"noreply@github.com"
] | NeuromatchAcademy.noreply@github.com |
6f1626422bd5fe30a5d69e1a6033406a0eb15da8 | 74d962367e299eb2ffb827d78e88aea76c3ebabc | /manage.py | 6c8e8d2a9587f74e95b0d56e701e5076f0a97dec | [] | no_license | PauloViOS/flordodia | 8b40105f78ce5d4ff73b4a5e9fe4ef2402fc6c73 | b10ca10b882aba785a4ffd2e6a51d2cc16f59209 | refs/heads/master | 2023-03-06T16:14:59.815158 | 2021-02-23T13:55:28 | 2021-02-23T13:55:28 | 337,239,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "flordodia.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"paulo.santos@buser.com.br"
] | paulo.santos@buser.com.br |
f19728bf8e40186ce8d1a8b60514928cac527607 | 51a48a4144ef3aabb354e75033293da45a813c8f | /sourceCodes/python/pfile/wrapper.py | b7c9f99060862916eef3125a38ab52e2f6a0a2ff | [] | no_license | melodi-lab/SGM | a53f79c8a0e4cbc7dd64f269002b6a0d49cd5d92 | 36d9b7b6f864ee93220c960065f8c7e216c5009c | refs/heads/master | 2020-04-05T23:11:23.626090 | 2017-06-19T23:30:40 | 2017-06-19T23:30:40 | 64,819,641 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,343 | py | #!/usr/bin/env python
#
# Copyright 2011 <fill in later>
__authors__ = [ 'Ajit Singh <ajit@ee.washington.edu>' ]
import os
import sys
import types
try:
import libpfile as lib
except ImportError:
dirname = os.path.dirname(__file__)
print >> sys.stderr, "Run make in %s to compile libpfile" % dirname
exit(-1)
class PFile(object):
def __init__(self, nf, ni, f, doswap = None):
"""Constructor
Creates a PFile with the name specified in fn. Each segment contains
nf floats, followed by ni integers.
Arguments:
nf: Number of floats in a segment.
ni: Number of integers (labels) in a segment.
fn: Name of the pfile to create, no extension is forced.
doswap: If set, it will force a particular byte order in the
generated PFile. Useful if you're writing a PFile on one
platform for use on another platform. If None, use whatever
sys.byteorder returns.
"""
if not doswap:
if sys.byteorder == 'little':
doswap = 1
elif sys.byteorder == 'big':
doswap = 0
else:
raise Exception("Could not infer byteorder.")
index = 1
if type(f) == types.FileType:
self.f = f
elif type(f) == types.StringType:
self.f = open(f, 'w')
else:
raise Exception("Bad filename argument: %s" % str(f))
self.nf = nf
self.ni = ni
self.doswap = doswap
self.pf = lib.OutFtrLabStream_PFile(0, '', self.f, nf, ni, index,
doswap)
# Create buffers for translating Python lists of floats or ints to
# float* and unsigneed int*
self.buf_floats = lib.new_doubleArray(self.nf)
self.buf_ints = lib.new_uintArray(self.ni)
def __del__(self):
"""Destructor.
TODO(ajit): Calling pfile.fclose of self.pf causes a segmentation fault.
Determine where the file is really being deleted (it may only be on
exit, or deletion of the class).
"""
del self.pf
lib.delete_doubleArray(self.buf_floats)
lib.delete_uintArray(self.buf_ints)
@property
def name(self):
return self.f.name
def check_frame(self, *args):
if len(args) != self.nf + self.ni:
raise Exception("Wrong length %d vs. %d" % (len(args),
self.nf + self.ni))
for i in xrange(0, self.nf, 1):
if not type(args[i]) == types.FloatType:
raise Exception("Wrong type arg[%d]: wanted float, got %s" % (
i, str(type(args[i]))))
for i in xrange(self.nf, self.nf+self.ni, 1):
if not type(args[i]) == types.IntType:
raise Exception("Wrong type arg[%d]: wanted int, got %s" % (
i, str(type(args[i]))))
def add_frame(self, *args):
for i in xrange(0, self.nf, 1):
lib.doubleArray_setitem(self.buf_floats, i, args[i])
for i in xrange(self.nf, self.nf + self.ni, 1):
lib.uintArray_setitem(self.buf_ints, i-self.nf, args[i])
self.pf.write_ftrslabs(1, self.buf_floats, self.buf_ints)
def add_segment(self, nframes, floats, ints):
"""Copy a whole sentence in one shot.
Can be useful in reducing the Python -> C++ overhead required to
generate one sentence: e.g., creating one list for all the floats
in a sentence, instead of one list per-frame. You do not need to
call end_segment after using this function.
TODO(ajit): It's not clear whether the segment ID is actually used
anywhere. The code in pfile.cc:doneseg does not appear to use the
segment ID, and ignoring it doesn't seem to cause any problems.
Arguments:
nframes: Number of frames in the sentence.
floats: Iterable with all of the floats in the sentence. First,
all the floats in frame 0, then frame 1, etc.
ints: Iterable with all the integers in the sentence.
"""
pass
def end_segment(self, i = None):
if not i:
i = lib.SEGID_UNKNOWN
self.pf.doneseg(i)
| [
"baiwenruo@gmail.com"
] | baiwenruo@gmail.com |
b132f2782265b56e98b88f6df9aaaa8c85d5bddc | 0db8fad3d630899a1e8389349b047c2cabdb6a27 | /meiduo_mall/meiduo_mall/apps/orders/migrations/0002_auto_20190531_0919.py | 6168072e180c8d36de7f6d08fa9748ee6b9c8253 | [] | no_license | zhujian2019/Django_Frontend | 30123b8e20a9e0fc6b48b49433c22744cd1aa9d1 | ee8fa8385487ee96b309f8230928e5945ddc5c86 | refs/heads/master | 2020-06-04T19:46:17.733452 | 2019-06-16T08:38:14 | 2019-06-16T08:38:14 | 192,167,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-05-31 09:19
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('orders', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='ordergoods',
old_name='oreder',
new_name='order',
),
]
| [
"zhujian_work@163.com"
] | zhujian_work@163.com |
5bc51ef0a70eeafa727c3a43c4b2b701625f00af | 07f54a15cc10ec715da0864901485d6282f13cae | /Lessons/ex10.py | 397a1a8c699589e535b37706c1aedff31d50643c | [] | no_license | levelupcode/LearnPythonTheHardWay | 4d7604c065f90f7ea63919a8994e04330a4aa309 | 88dcb4ec00a5d59769a1888b9b72c1968de8e943 | refs/heads/master | 2021-01-22T13:13:03.308266 | 2013-01-03T18:18:06 | 2013-01-03T18:18:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | # Part 1
"I am 6'2\" tall." # Escape double-quote inside string
'I am 6\'2" Tall.' # Escape single-quote inside string
# Part 2
tabby_cat = "\tI'm tabben in."
persian_cat = "I'm split\non a line."
backslash_cat = "I'm \\ a \\ cat."
fat_cat = """
I'l do a list:
\t* Cat Food
\t* Fishies
\t* Catnip\n\t* Grass
"""
print tabby_cat
print persian_cat
print backslash_cat
print fat_cat
# Part 3
while True:
for i in ["/","-","\\","|"]:
print "%s\r" % i * 10, | [
"levelupcode@gmail.com"
] | levelupcode@gmail.com |
50c2dde48b456f93aa0260584ded425981eeb60e | e7e029a04319afce21c43317e2cc8f3dc92091ca | /pex/commands/command.py | 2104e893ca0f0b603d42816941bd46692371ae1a | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | asherf/pex | e451b28a088968736e47285396813f6a2b4c6058 | c15508ad1a2d48bdef9fbac677dbfa32927e6625 | refs/heads/master | 2022-07-22T23:01:48.331316 | 2022-07-05T14:41:35 | 2022-07-05T14:41:35 | 237,337,647 | 0 | 0 | Apache-2.0 | 2020-01-31T01:12:31 | 2020-01-31T01:12:30 | null | UTF-8 | Python | false | false | 13,469 | py | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, print_function
import functools
import json
import logging
import os
import subprocess
import sys
import tempfile
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, Namespace, _ActionsContainer
from contextlib import contextmanager
from pex import pex_warnings
from pex.argparse import HandleBoolAction
from pex.common import safe_mkdtemp, safe_open
from pex.result import Error, Ok, Result
from pex.typing import TYPE_CHECKING, Generic, cast
from pex.variables import ENV, Variables
from pex.version import __version__
if TYPE_CHECKING:
from typing import (
IO,
Any,
Dict,
Iterable,
Iterator,
NoReturn,
Optional,
Sequence,
Type,
TypeVar,
)
import attr # vendor:skip
else:
from pex.third_party import attr
if TYPE_CHECKING:
_T = TypeVar("_T")
def try_run_program(
program, # type: str
args, # type: Iterable[str]
url=None, # type: Optional[str]
error=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Result
try:
subprocess.check_call([program] + list(args), **kwargs)
return Ok()
except OSError as e:
msg = [error] if error else []
msg.append("Do you have `{}` installed on the $PATH?: {}".format(program, e))
if url:
msg.append(
"Find more information on `{program}` at {url}.".format(program=program, url=url)
)
return Error("\n".join(msg))
except subprocess.CalledProcessError as e:
return Error(str(e), exit_code=e.returncode)
def try_open_file(
path, # type: str
error=None, # type: Optional[str]
):
# type: (...) -> Result
opener, url = (
("xdg-open", "https://www.freedesktop.org/wiki/Software/xdg-utils/")
if "Linux" == os.uname()[0]
else ("open", None)
)
with open(os.devnull, "wb") as devnull:
return try_run_program(opener, [path], url=url, error=error, stdout=devnull)
@attr.s(frozen=True)
class Command(object):
@staticmethod
def show_help(
parser, # type: ArgumentParser
*_args, # type: Any
**_kwargs # type: Any
):
# type: (...) -> NoReturn
parser.error("a subcommand is required")
@staticmethod
def register_global_arguments(
parser, # type: _ActionsContainer
include_verbosity=True, # type: bool
):
# type: (...) -> None
register_global_arguments(parser, include_verbosity=include_verbosity)
@classmethod
def name(cls):
# type: () -> str
return cls.__name__.lower()
@classmethod
def description(cls):
# type: () -> Optional[str]
return cls.__doc__
@classmethod
def add_arguments(cls, parser):
# type: (ArgumentParser) -> None
pass
options = attr.ib() # type: Namespace
class OutputMixin(object):
@staticmethod
def add_output_option(
parser, # type: _ActionsContainer
entity, # type: str
):
# type: (...) -> None
parser.add_argument(
"-o",
"--output",
metavar="PATH",
help=(
"A file to output the {entity} to; STDOUT by default or when `-` is "
"specified.".format(entity=entity)
),
)
@staticmethod
def is_stdout(options):
# type: (Namespace) -> bool
return options.output == "-" or not options.output
@classmethod
@contextmanager
def output(
cls,
options, # type: Namespace
binary=False, # type: bool
):
# type: (...) -> Iterator[IO]
if cls.is_stdout(options):
stdout = getattr(sys.stdout, "buffer", sys.stdout) if binary else sys.stdout
yield stdout
else:
with safe_open(options.output, mode="wb" if binary else "w") as out:
yield out
class JsonMixin(object):
@staticmethod
def add_json_options(
parser, # type: _ActionsContainer
entity, # type: str
include_switch=True, # type: bool
):
flags = ("-i", "--indent") if include_switch else ("--indent",)
parser.add_argument(
*flags,
type=int,
default=None,
help="Pretty-print {entity} json with the given indent.".format(entity=entity)
)
@staticmethod
def dump_json(
options, # type: Namespace
data, # type: Dict[str, Any]
out, # type: IO
**json_dump_kwargs # type: Any
):
json.dump(data, out, indent=options.indent, **json_dump_kwargs)
def register_global_arguments(
parser, # type: _ActionsContainer
include_verbosity=True, # type: bool
):
# type: (...) -> None
"""Register Pex global environment configuration options with the given parser.
:param parser: The parser to register global options with.
:param include_verbosity: Whether to include the verbosity option `-v`.
"""
group = parser.add_argument_group(title="Global options")
if include_verbosity:
group.add_argument(
"-v",
dest="verbosity",
action="count",
default=0,
help="Turn on logging verbosity, may be specified multiple times.",
)
group.add_argument(
"--emit-warnings",
"--no-emit-warnings",
dest="emit_warnings",
action=HandleBoolAction,
default=True,
help=(
"Emit runtime UserWarnings on stderr. If false, only emit them when PEX_VERBOSE "
"is set."
),
)
group.add_argument(
"--pex-root",
dest="pex_root",
default=None,
help=(
"Specify the pex root used in this invocation of pex "
"(if unspecified, uses {}).".format(ENV.PEX_ROOT)
),
)
group.add_argument(
"--disable-cache",
dest="disable_cache",
default=False,
action="store_true",
help="Disable caching in the pex tool entirely.",
)
group.add_argument(
"--cache-dir",
dest="cache_dir",
default=None,
help=(
"DEPRECATED: Use --pex-root instead. The local cache directory to use for speeding up "
"requirement lookups."
),
)
group.add_argument(
"--tmpdir",
dest="tmpdir",
default=tempfile.gettempdir(),
help="Specify the temporary directory Pex and its subprocesses should use.",
)
group.add_argument(
"--rcfile",
dest="rc_file",
default=None,
help=(
"An additional path to a pexrc file to read during configuration parsing, in addition "
"to reading `/etc/pexrc` and `~/.pexrc`. If `PEX_IGNORE_RCFILES=true`, then all rc "
"files will be ignored."
),
)
class GlobalConfigurationError(Exception):
"""Indicates an error processing global options."""
@contextmanager
def _configured_env(options):
# type: (Namespace) -> Iterator[None]
if options.rc_file or not ENV.PEX_IGNORE_RCFILES:
with ENV.patch(**Variables(rc=options.rc_file).copy()):
yield
else:
yield
@contextmanager
def global_environment(options):
# type: (Namespace) -> Iterator[Dict[str, str]]
"""Configures the Pex global environment.
This includes configuration of basic Pex infrastructure like logging, warnings and the
`PEX_ROOT` to use.
:param options: The global options registered by `register_global_arguments`.
:yields: The configured global environment.
:raises: :class:`GlobalConfigurationError` if invalid global option values were specified.
"""
if not hasattr(options, "rc_file"):
# We don't register the global args on the root command (but do on every subcommand).
# So if the user runs just `pex` with no subcommand we must not attempt to use those
# global args, including rc_file, which we check for here as a representative of the
# global args.
# Note that we can't use command_type here because the legacy command line parser in
# pex/bin/pex.py uses this function as well, and it doesn't set command_type.
with ENV.patch() as env:
yield env
with _configured_env(options):
verbosity = Variables.PEX_VERBOSE.strip_default(ENV)
if verbosity is None:
verbosity = getattr(options, "verbosity", 0)
emit_warnings = True
if not options.emit_warnings:
emit_warnings = False
if emit_warnings and ENV.PEX_EMIT_WARNINGS is not None:
emit_warnings = ENV.PEX_EMIT_WARNINGS
with ENV.patch(PEX_VERBOSE=str(verbosity), PEX_EMIT_WARNINGS=str(emit_warnings)):
pex_warnings.configure_warnings(env=ENV)
# Ensure the TMPDIR is an absolute path (So subprocesses that change CWD can find it)
# and that it exists.
tmpdir = os.path.realpath(options.tmpdir)
if not os.path.exists(tmpdir):
raise GlobalConfigurationError(
"The specified --tmpdir does not exist: {}".format(tmpdir)
)
if not os.path.isdir(tmpdir):
raise GlobalConfigurationError(
"The specified --tmpdir is not a directory: {}".format(tmpdir)
)
tempfile.tempdir = os.environ["TMPDIR"] = tmpdir
if options.cache_dir:
pex_warnings.warn("The --cache-dir option is deprecated, use --pex-root instead.")
if options.pex_root and options.cache_dir != options.pex_root:
raise GlobalConfigurationError(
"Both --cache-dir and --pex-root were passed with conflicting values. "
"Just set --pex-root."
)
if options.disable_cache:
def warn_ignore_pex_root(set_via):
pex_warnings.warn(
"The pex root has been set via {via} but --disable-cache is also set. "
"Ignoring {via} and disabling caches.".format(via=set_via)
)
if options.cache_dir:
warn_ignore_pex_root("--cache-dir")
elif options.pex_root:
warn_ignore_pex_root("--pex-root")
elif os.environ.get("PEX_ROOT"):
warn_ignore_pex_root("PEX_ROOT")
pex_root = safe_mkdtemp()
else:
pex_root = options.cache_dir or options.pex_root or ENV.PEX_ROOT
with ENV.patch(PEX_ROOT=pex_root, TMPDIR=tmpdir) as env:
yield env
if TYPE_CHECKING:
_C = TypeVar("_C", bound=Command)
class Main(Generic["_C"]):
def __init__(
self,
command_types, # type: Iterable[Type[_C]]
description=None, # type: Optional[str]
subparsers_description=None, # type: Optional[str]
prog=None, # type: Optional[str]
):
# type: (...) -> None
self._prog = prog
self._description = description or self.__doc__
self._subparsers_description = subparsers_description
self._command_types = command_types
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
pass
@contextmanager
def parsed_command(self, args=None):
# type: (Optional[Sequence[str]]) -> Iterator[_C]
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO)
# By default, let argparse derive prog from sys.argv[0].
prog = self._prog
if os.path.basename(sys.argv[0]) == "__main__.py":
prog = "{python} -m {module}".format(
python=sys.executable, module=".".join(type(self).__module__.split(".")[:-1])
)
parser = ArgumentParser(
prog=prog,
formatter_class=ArgumentDefaultsHelpFormatter,
description=self._description,
)
parser.add_argument("-V", "--version", action="version", version=__version__)
parser.set_defaults(command_type=functools.partial(Command.show_help, parser))
self.add_arguments(parser)
if self._command_types:
subparsers = parser.add_subparsers(description=self._subparsers_description)
for command_type in self._command_types:
name = command_type.name()
description = command_type.description()
help_text = description.splitlines()[0] if description else None
command_parser = subparsers.add_parser(
name,
formatter_class=ArgumentDefaultsHelpFormatter,
help=help_text,
description=description,
)
command_type.add_arguments(command_parser)
command_parser.set_defaults(command_type=command_type)
options = parser.parse_args(args=args)
with global_environment(options):
command_type = cast("Type[_C]", options.command_type)
yield command_type(options)
| [
"noreply@github.com"
] | asherf.noreply@github.com |
f369d5667a7f0255f82296fbbee935075af34b7e | 7b5ec17918cb2328d53bf2edd876c153af26b38d | /scripts/ingestors/rwis/process_idot_awos.py | c29e696ecbcafd40fb720a5612021a2b033ca115 | [
"MIT"
] | permissive | Xawwell/iem | 78e62f749661f3ba292327f82acf4ef0f0c8d55b | 88177cc096b9a66d1bd51633fea448585b5e6573 | refs/heads/master | 2020-09-06T09:03:54.174221 | 2019-11-08T03:23:44 | 2019-11-08T03:23:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,063 | py | """Process AWOS METAR file"""
from __future__ import print_function
import re
import sys
import os
import datetime
import ftplib
import subprocess
import tempfile
from io import StringIO
from pyiem import util
INCOMING = "/mesonet/data/incoming"
def fetch_files():
"""Fetch files """
props = util.get_properties()
fn = "%s/iaawos_metar.txt" % (INCOMING,)
try:
ftp = ftplib.FTP("165.206.203.34")
except TimeoutError:
print("process_idot_awos FTP server timeout error")
sys.exit()
ftp.login("rwis", props["rwis_ftp_password"])
ftp.retrbinary("RETR METAR.txt", open(fn, "wb").write)
ftp.close()
return fn
def main():
"""Go Main"""
fn = fetch_files()
utc = datetime.datetime.utcnow().strftime("%Y%m%d%H%M")
data = {}
# Sometimes, the file gets gobbled it seems
for line in open(fn, "rb"):
line = line.decode("utf-8", "ignore")
match = re.match("METAR K(?P<id>[A-Z1-9]{3})", line)
if not match:
continue
gd = match.groupdict()
data[gd["id"]] = line
sio = StringIO()
sio.write("\001\r\r\n")
sio.write(
("SAUS00 KISU %s\r\r\n")
% (datetime.datetime.utcnow().strftime("%d%H%M"),)
)
sio.write("METAR\r\r\n")
for sid in data:
sio.write("%s=\r\r\n" % (data[sid].strip().replace("METAR ", ""),))
sio.write("\003")
sio.seek(0)
(tmpfd, tmpname) = tempfile.mkstemp()
os.write(tmpfd, sio.getvalue().encode("utf-8"))
os.close(tmpfd)
proc = subprocess.Popen(
(
"/home/ldm/bin/pqinsert -i -p 'data c %s "
"LOCDSMMETAR.dat LOCDSMMETAR.dat txt' %s"
)
% (utc, tmpname),
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
(stdout, stderr) = proc.communicate()
os.remove(tmpname)
if stdout != b"" or stderr is not None:
print("process_idot_awos\nstdout: %s\nstderr: %s" % (stdout, stderr))
if __name__ == "__main__":
main()
| [
"akrherz@iastate.edu"
] | akrherz@iastate.edu |
7832dae5c8c8d4af7c3fe58a5174a5722ceeaa14 | 00ca311d3dccb4ffe4d2e1cd88c98cab699d26a0 | /hk.py | e91e776e17f03d58ddd559c02a0a0df1abde908c | [] | no_license | y1212/myslite | e13bfb11a4189a75b1bd5d062b8cb48ef70de1c8 | 74e35bd04f9ac8c0a7fd1a3eb68ee3311f6adff9 | refs/heads/master | 2020-12-13T06:05:52.774554 | 2020-01-16T11:14:24 | 2020-01-16T11:14:24 | 234,330,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,428 | py | #setence="ls , aa, cnm, sb"
# var=input("please input:")
# if var in setence:
# setence=setence.replace("cnm","*")
# print(setence)
#
# tuple=(1,2,3)
# list1=list(tuple[1])
# print(list1,type(list1))
# c,s =" ", "this is my house"
# d=s.split()
# d.reverse()
# for e in d:
# c=c+e+" "
# print(c.lstrip())
# listB, listC = [], ["string", "tuple", "list", (1, 2, 3, 4, 5), [6, 7]]
# for element in listC:
# if type(element) not in (int, float, str, bool):
# for e in element:
# listB.append(e)
# else:
# listB.append(element)
# print(listB)
# files= open(file="story/tt.txt",encoding="utf8")
# content=files.read()
# print(content)
# files.close()
#1、除二余一,a/3==2,a/4==3,a/5==4,a/6==5,a/7==9
a=7
while True:
if a % 2 ==1 and a % 3 == 2 and a % 4 == 3 and a % 5 == 4 and a % 6 == 5 and a % 7 == 0:
print(a)
break
else:
a += 7
# 2. 使用while循环, 反转句子"hello Tony, this my sister"; 反转后为"sister my this, Tony hello";
s = 'hello Tony,this my sister'
list1 = s.split(',')
list_target = []
i = len(list1) - 1
while i >= 0:
list0 = list1[i].split(' ')
a = ' '.join(list0[::-1])
list_target.append(a)
i -= 1
str_target = ','.join(list_target)
print(str_target)
# A,B=[],"hello Tony, this my sister"
# list1=B.split(',')
# while len(list1)-1>=0:
# list0=list1[len(list1)-1].split()
# list1[]
# 3. 列表["string", "tuple", "list", (1, 2, 3, 4, 5), [6, 7]]转换成["string", "tuple", "list", 1, 2, 3, 4, 5, 6, 7];
listB, listC = [], ["string", "tuple", "list", (1, 2, 3, 4, 5), [6, 7]]
for element in listC:
if type(element) not in (int, float, str, bool):
for e in element:
listB.append(e)
else:
listB.append(element)
print(listB)
4.#对[23, 12, 15, 11, 29, 24, 57, 21, 80, 99, 45]进行排序; 方式一: 要求使用for循环; 方式二: sorted函数;
listA = [23, 12, 15, 11, 29, 24, 57, 21, 80, 99, 45]
s=sorted(listA)
print(s)
# for循环
for i in range(len(listA)):
for j in range(len(listA)):
if listA[i]<listA[j]:
listA[i],listA[j]=listA[j],listA[i]
print(listA)
# 5. 元组("string", "world", 1, 2, 3, 4, 6, 9, 10), 把其中的数字提取到一个列表中;
listA, tupleA = [], ("string", "world", 1, 2, 3, 4, 6, 9, 10)
for element in tupleA:
if type(element) in (int, float):
listA.append(element)
print(listA,type(listA))
# 6. 提取access_log日志中所有的IP地址到字典ips中, 并根据ips中每个IP出现次数进行排序;
with open("story/access.log","r",encoding="utf8") as log:
ips = {}
while True:
lines = log.readline()
if not lines:
break
if lines.split()[0] in ips.keys():
ips[lines.split()[0]] += 1
else:
ips[lines.split()[0]] = 1
print(ips)
ip_sort = []
for item in ips.items():
ip_sort.append(item)
for i in range(len(ip_sort)):
for j in range(len(ip_sort) - 1):
if ip_sort[j][1] < ip_sort[j + 1][1]:
ip_sort[j], ip_sort[j + 1] = ip_sort[j + 1], ip_sort[j]
print(sorted(ip_sort))#正序
print(ip_sort)#倒序
#7. 字符串"hello7723worl45d78", 把其中的数字提取到一个元组中;
str1 = "hello7723worl45d78"
str2 = "0123456789"
c = " "
for i in str1:
if i in str2:
c= c + i
print(tuple(c))
| [
"59354787+y1212@users.noreply.github.com"
] | 59354787+y1212@users.noreply.github.com |
1fe976ca0106beea3c9fdf4f4b31055726c1d3f6 | 61cefb212b04fdcd3ab3e36374104f7eb9aeab75 | /gym_rover/state/agent.py | cd7a3be00fb7e82f143279bf6667f2d83e2934bf | [
"MIT"
] | permissive | eklinkhammer/gym-rover | 8df6297218c1a22c42273861e68aeb342d3a63a4 | 38ed96f6f7ea4e5fed4432cb3967d2317caf145e | refs/heads/master | 2021-01-20T02:25:14.158595 | 2018-03-20T01:24:05 | 2018-03-20T01:24:05 | 89,403,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,057 | py | import sys
import random
import numpy as np
class Agent(object):
""" Agent is a rover that moves in either a discrete or continuous
action space
"""
def __init__(self, loc, uuid=None):
""" Creates an Agent at position with a unique id.
Args:
loc (length 2 np array): Position of Agent
if not a np array, must be Indexable and at least length 2
uuid (int): Unique identifer. If None, will be a random integer
Because integer is random there is a chance of a collision.
"""
if type(loc) is not np.ndarray:
loc = np.array([loc[0], loc[1]])
if uuid is None:
uuid = random.randint(0, sys.maxsize)
self._loc = loc
self._uuid = uuid
self.moves = { 0 : np.array([ 0, 0]), # Still
1 : np.array([ 1, 0]), # East
2 : np.array([ 1, 1]), # NE
3 : np.array([ 0, 1]), # North
4 : np.array([-1, 1]), # NW
5 : np.array([-1, 0]), # West
6 : np.array([-1, -1]), # SW
7 : np.array([ 0, -1]), # South
8 : np.array([ 1, -1])} # SE
def discrete_move(self, command):
""" Move agent in one of the eight cardinal directions, or stay still.
Args:
command (int): The movement direction.
"""
my_vec = self.moves[command]
normed = self._normalize(my_vec)
self._loc = np.add(self._loc, normed)
def cont_move(self, command):
self._loc = np.add(self._loc, command)
def get_uuid(self):
""" Accessor method for unique id """
return self._uuid
def get_loc(self):
""" Accessor method for location. Numpy array """
return self._loc
def _normalize(self, v):
""" Vector Norm """
norm = np.linalg.norm(v)
if norm == 0:
return v
return v / norm
| [
"eklinkhammer@gmail.com"
] | eklinkhammer@gmail.com |
d9d53fa77c4f2dfbb4ae777a99cd4e7040f3318e | 7842b9dfa31c533e753763a63dd6643f66ca7d43 | /courses/models.py | d09519e3df02b7839605a043e067bf5ef53d212c | [] | no_license | YOKxPIE/YOKxPIE.github.io | 83dfc46167a74860824b04e6b8d752b8fd4dd77c | 633af49a4f65a07d574fc7c40a836a15b4a2016d | refs/heads/main | 2023-08-17T00:51:30.654217 | 2021-10-02T10:02:20 | 2021-10-02T10:02:20 | 403,917,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,395 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Course(models.Model):
c_code = models.CharField(max_length=5)
c_name = models.CharField(max_length=150, null = True)
semester = models.IntegerField()
a_year = models.IntegerField()
count_stu = models.IntegerField(default=0)
max_stu = models.PositiveIntegerField(default=3)
status = models.BooleanField(default=True)
def __str__(self):
return f"{self.c_code} {self.semester}/{self.a_year}"
def is_seat_available(self):
return self.count_stu < self.max_stu
class Student(models.Model):
user = models.OneToOneField(User, null=True, on_delete=models.CASCADE)
First_name = models.CharField(max_length=100, null = True)
Last_name = models.CharField(max_length=100, null = True)
email = models.CharField(max_length=200, null = True)
student_id = models.CharField(max_length=10, null = True)
def __str__(self):
return f"{self.student_id}: {self.First_name} {self.Last_name}"
class Enroll(models.Model):
student = models.ForeignKey(Student, null=True, on_delete=models.CASCADE)
course = models.ForeignKey(Course, null=True, on_delete=models.CASCADE)
date_created = models.DateTimeField(auto_now_add=True, null=True)
def __str__(self):
return f"{self.student} enroll {self.course}" | [
"6210530017@student.tu.ac.th"
] | 6210530017@student.tu.ac.th |
c3f0599a81e8b53fa9132efeb23e2ef17af55d83 | 9b98b2f357a8b3311d51b74089bd7b6b2eb107ab | /Python Codes/teapot_optimized.py | d1ac296ecb4a99e81ac97febb5ecc67c757954ed | [] | no_license | shivam-grover/Eyantra-Thirsty_Crow-Team_ID-972 | 65010b9fca267b903ccc6bb29b3878adea205dfd | a6f0f3ec5a9cb5e3d5f8d694bba53137a23e8b4e | refs/heads/master | 2021-10-27T12:01:41.261582 | 2019-04-17T03:49:21 | 2019-04-17T03:49:21 | 157,661,451 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,792 | py | """
**************************************************************************
* E-Yantra Robotics Competition
* ================================
* This software is intended to check version compatiability of open source software
* Theme: Thirsty Crow
* MODULE: Task1.1
* Filename: detect.py
* Version: 1.0.0
* Date: October 31, 2018
*
* Author: e-Yantra Project, Department of Computer Science
* and Engineering, Indian Institute of Technology Bombay.
*
* Software released under Creative Commons CC BY-NC-SA
*
* For legal information refer to:
* http://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
*
*
* This software is made available on an “AS IS WHERE IS BASIS”.
* Licensee/end user indemnifies and will keep e-Yantra indemnified from
* any and all claim(s) that emanate from the use of the Software or
* breach of the terms of this agreement.
*
* e-Yantra - An MHRD project under National Mission on Education using
* ICT(NMEICT)
*
**************************************************************************
"""
import numpy as np
import cv2
import cv2.aruco as aruco
import math
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from PIL import Image
import pygame
texture_object = None
texture_background = None
camera_matrix = None
dist_coeff = None
cap = cv2.VideoCapture(1)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
INVERSE_MATRIX = np.array([[1.0, 1.0, 1.0, 1.0],
[-1.0, -1.0, -1.0, -1.0],
[-1.0, -1.0, -1.0, -1.0],
[1.0, 1.0, 1.0, 1.0]])
################## Define Utility Functions Here #######################
"""
Function Name : getCameraMatrix()
Input: None
Output: camera_matrix, dist_coeff
Purpose: Loads the camera calibration file provided and returns the camera and
distortion matrix saved in the calibration file.
"""
def getCameraMatrix():
global camera_matrix, dist_coeff
with np.load('Camera.npz') as X:
camera_matrix, dist_coeff, _, _ = [X[i] for i in ('mtx', 'dist', 'rvecs', 'tvecs')]
########################################################################
############# Main Function and Initialisations ########################
"""
Function Name : main()
Input: None
Output: None
Purpose: Initialises OpenGL window and callback functions. Then starts the event
processing loop.
"""
def main():
glutInit()
getCameraMatrix()
glutInitWindowSize(640, 480)
glutInitWindowPosition(625, 100)
glutInitDisplayMode(GLUT_RGB | GLUT_DEPTH | GLUT_DOUBLE)
window_id = glutCreateWindow("OpenGL")
init_gl()
glutDisplayFunc(drawGLScene)
glutIdleFunc(drawGLScene)
glutReshapeFunc(resize)
glutMainLoop()
"""
Function Name : init_gl()
Input: None
Output: None
Purpose: Initialises various parameters related to OpenGL scene.
"""
def init_gl():
global texture_object, texture_background
glClearColor(0.0, 0.0, 0.0, 0.0)
glClearDepth(1.0)
glDepthFunc(GL_LESS)
glEnable(GL_DEPTH_TEST)
glShadeModel(GL_SMOOTH)
glMatrixMode(GL_MODELVIEW)
glEnable(GL_DEPTH_TEST)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
texture_background = glGenTextures(1)
texture_object = glGenTextures(1)
"""
Function Name : resize()
Input: None
Output: None
Purpose: Initialises the projection matrix of OpenGL scene
"""
def resize(w, h):
ratio = 1.0 * w / h
glMatrixMode(GL_PROJECTION)
glViewport(0, 0, w, h)
gluPerspective(45, ratio, 0.1, 100.0)
"""
Function Name : drawGLScene()
Input: None
Output: None
Purpose: It is the main callback function which is called again and
again by the event processing loop. In this loop, the webcam frame
is received and set as background for OpenGL scene. ArUco marker is
detected in the webcam frame and 3D model is overlayed on the marker
by calling the overlay() function.
"""
def drawGLScene():
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
ar_list = []
ret, frame = cap.read()
if ret == True:
draw_background(frame)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
ar_list = detect_markers(frame)
if ar_list is not None:
for i in ar_list:
if i[0] == 0:
overlay(frame, ar_list, i[0], "texture_1.png")
if i[0] == 2:
overlay(frame, ar_list, i[0], "texture_2.png")
if i[0] == 1:
overlay(frame, ar_list, i[0], "texture_3.png")
if i[0] == 6:
overlay(frame, ar_list, i[0], "texture_4.png")
cv2.imshow('frame', frame)
cv2.waitKey(1)
glutSwapBuffers()
########################################################################
######################## Aruco Detection Function ######################
"""
Function Name : detect_markers()
Input: img (numpy array)
Output: aruco list in the form [(aruco_id_1, centre_1, rvec_1, tvec_1),(aruco_id_2,
centre_2, rvec_2, tvec_2), ()....]
Purpose: This function takes the image in form of a numpy array, camera_matrix and
distortion matrix as input and detects ArUco markers in the image. For each
ArUco marker detected in image, paramters such as ID, centre coord, rvec
and tvec are calculated and stored in a list in a prescribed format. The list
is returned as output for the function
"""
def detect_markers(img):
aruco_list = []
################################################################
#################### Same code as Task 1.1 #####################
################################################################
markerLength = 0.127
aruco_list = []
aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_250)
parameters = aruco.DetectorParameters_create()
# lists of ids and the corners beloning to each id
corners, ids, rejectedImgPoints = aruco.detectMarkers(img, aruco_dict, parameters=parameters)
# print(corners)
cx = []
cy = []
if ids is not None and corners is not None:
for x in range(ids.size):
rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners[x], markerLength, camera_matrix,
dist_coeff) # Estimate pose of each marker and return the values rvet and tvec---different from camera coefficients
ids = ids.astype('int64')
aruco.drawDetectedMarkers(img, corners)
cx.append(int(
(corners[x][0][0][0] + corners[x][0][1][0] + corners[x][0][2][0] + corners[x][0][3][0]) / 4))
cy.append(int(
(corners[x][0][0][1] + corners[x][0][1][1] + corners[x][0][2][1] + corners[x][0][3][1]) / 4))
tup = (ids[x, 0], (cx[x], cy[x]), rvec, tvec) # Draw A square around the markers
aruco_list.append(tup)
#
return aruco_list
########################################################################
################# This is where the magic happens !! ###################
############### Complete these functions as directed ##################
"""
Function Name : draw_background()
Input: img (numpy array)
Output: None
Purpose: Takes image as input and converts it into an OpenGL texture. That
OpenGL texture is then set as background of the OpenGL scene
"""
def draw_background(img):
glEnable(GL_TEXTURE_2D)
bg_image = cv2.flip(img, 0)
bg_image = Image.fromarray(bg_image)
bg_image = bg_image.tobytes("raw", "RGB", 0, -1)
# create background texture
glBindTexture(GL_TEXTURE_2D, texture_background)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, bg_image)
glPushMatrix()
i = 30.0
# draw background
glTranslatef(0.0, 0.0, -i)
glBegin(GL_QUADS)
glTexCoord2f(0.0, 1.0)
glVertex3f(-(8*i/14), -(6*i/14), 0.0)
glTexCoord2f(1.0, 1.0)
glVertex3f((8*i/14), -(6*i/14), 0.0)
glTexCoord2f(1.0, 0.0)
glVertex3f((8*i/14), (6*i/14), 0.0)
glTexCoord2f(0.0, 0.0)
glVertex3f(-(8*i/14), (6*i/14), 0.0)
glEnd()
glPopMatrix()
return None
"""
Function Name : init_object_texture()
Input: Image file path
Output: None
Purpose: Takes the filepath of a texture file as input and converts it into OpenGL
texture. The texture is then applied to the next object rendered in the OpenGL
scene.
"""
def init_object_texture(image_filepath):
tex = cv2.imread(image_filepath)
glEnable(GL_TEXTURE_2D)
bg_image = cv2.flip(tex, 0)
bg_image = Image.fromarray(bg_image)
ix = bg_image.size[0]
iy = bg_image.size[1]
bg_image = bg_image.tobytes("raw", "BGRX", 0, -1)
# create background texture
glBindTexture(GL_TEXTURE_2D, texture_object)
# glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glPushMatrix()
glTranslatef(0.0, 0.0, 10.0)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
# Draw textured Quads
# glBegin(GL_QUADS)
# glTexCoord2f(0.0, 0.0)
# glVertex3f(0.0, 0.0, 0.0)
# glTexCoord2f(1.0, 0.0)
# glVertex3f(width, 0.0, 0.0)
# glTexCoord2f(1.0, 1.0)
# glVertex3f(width, height, 0.0)
# glTexCoord2f(0.0, 1.0)
# glVertex3f(0.0, height, 0.0)
# glEnd()
glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, bg_image)
glPopMatrix()
return None
"""
Function Name : overlay()
Input: img (numpy array), aruco_list, aruco_id, texture_file (filepath of texture file)
Output: None
Purpose: Receives the ArUco information as input and overlays the 3D Model of a teapot
on the ArUco marker. That ArUco information is used to
calculate the rotation matrix and subsequently the view matrix. Then that view matrix
is loaded as current matrix and the 3D model is rendered.
Parts of this code are already completed, you just need to fill in the blanks. You may
however add your own code in this function.
"""
def overlay(img, ar_list, ar_id, texture_file):
for x in ar_list:
if ar_id == x[0]:
centre, rvec, tvecs = x[1], x[2], x[3]
rmtx = cv2.Rodrigues(rvec)[0]
offset = [[[-0.127*19/8, -0.127*2, 0]]]
tvecs = tvecs - offset
font = cv2.FONT_HERSHEY_SIMPLEX #font for displaying text (below)
cv2.putText(img, "Id: " + str(ar_id), centre, font, 1, (0,255,0),2,cv2.LINE_AA)
view_matrix = np.array([[rmtx[0][0], rmtx[0][1], rmtx[0][2], tvecs[0][0][0]*3.5],
[rmtx[1][0], rmtx[1][1], rmtx[1][2], tvecs[0][0][1]*2.5],
[rmtx[2][0], rmtx[2][1], rmtx[2][2], tvecs[0][0][2]*2.3],
[0.0, 0.0, 0.0, 1.0]])
# view_matrix = np.array([[rmtx[0][0], rmtx[0][1], rmtx[0][2], tvecs[0][0][0]*72],
# [rmtx[1][0], rmtx[1][1], rmtx[1][2], tvecs[0][0][1]*72],
# [rmtx[2][0], rmtx[2][1], rmtx[2][2], tvecs[0][0][2]*15],
# [0.0, 0.0, 0.0, 1.0]])
print(tvecs , texture_file)
view_matrix = view_matrix * INVERSE_MATRIX
view_matrix = np.transpose(view_matrix)
init_object_texture(texture_file)
glPushMatrix()
glLoadMatrixd(view_matrix)
glutSolidTeapot(0.5)
glPopMatrix()
########################################################################
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | shivam-grover.noreply@github.com |
3d2ea1c68fba28be456bac1b10cce2be67a89c15 | f698ce603b9eab5427c26dfc5501ae4ec8e03214 | /proj/common/serializers.py | 6b4763bb9339570bf0595cd423e0bb96422da214 | [] | no_license | chunkai-meng/Fleet | 847f31c7b8b3321ab097428453d0c8f9e1b21f77 | 9e65fdab6fbae1a691db993d12bbcd2684f5b1e9 | refs/heads/main | 2023-03-05T00:22:51.761197 | 2020-11-24T20:53:55 | 2020-11-24T20:53:55 | 312,441,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | from rest_framework import serializers
class BaseSerializerMixin(serializers.Serializer):
created_name = serializers.CharField(source='created_by.cn_name', read_only=True)
updated_name = serializers.CharField(source='updated_by.cn_name', read_only=True)
class DynamicFieldsModelSerializer(serializers.ModelSerializer):
"""
A ModelSerializer that takes an additional `fields` argument that
controls which fields should be displayed.
"""
def __init__(self, *args, **kwargs):
# Don't pass the 'fields' arg up to the superclass
fields = kwargs.pop('fields', None)
# Instantiate the superclass normally
super(DynamicFieldsModelSerializer, self).__init__(*args, **kwargs)
if fields is not None:
# Drop any fields that are not specified in the `fields` argument.
allowed = set(fields)
existing = set(self.fields)
for field_name in existing - allowed:
self.fields.pop(field_name)
| [
"willcute@gmail.com"
] | willcute@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.