blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ada5076670ac2df02c7e357dcab945a4c4e346ff
|
5c7e1f689acf9e6602fcd71e9b35611e3f7b4f68
|
/flaskr/blog.py
|
477fcf8c55ed094b7e208573fec94e58ac284834
|
[] |
no_license
|
luisfdresch/flask-tutorial
|
d65da628fa6710c4a5185864dbb7181c9e8cb5c6
|
31ca9bda8d8ac253fa443d726a3c2d1838c3c1b0
|
refs/heads/main
| 2023-01-12T13:10:40.568332
| 2020-11-06T21:06:14
| 2020-11-06T21:06:14
| 310,701,254
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,653
|
py
|
from flask import Blueprint, flash, g, redirect, render_template, request, url_for
from werkzeug.exceptions import abort
from flaskr.auth import login_required
from flaskr.db import get_db
bp = Blueprint('blog', __name__)
@bp.route('/')
def index():
db = get_db()
posts = db.execute(
'SELECT p.id, title, body, created, author_id, username'
' FROM post p JOIN user u ON p.author_id = u.id'
' ORDER BY created DESC'
).fetchall()
return render_template('blog/index.html', posts=posts)
@bp.route('/create', methods=('GET', 'POST'))
@login_required
def create():
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
error = None
if not title:
error = 'Title is required'
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
'INSERT INTO post (title, body, author_id)'
' VALUES (?, ?, ?)',
(title, body, g.user['id'])
)
db.commit()
return redirect(url_for('blog.index'))
return render_template('blog/create.html')
def get_post(id, check_author=True):
post = get_db().execute(
'SELECT p.id, title, body, created, author_id, username'
' FROM post p JOIN user u ON p.author_id = u.id'
' WHERE p.id = ?',
(id,)
).fetchone()
if post is None:
abort(404, "Post {0} doesn't exist.".format(id))
if check_author and post['author_id'] != g.user['id']:
abort(403)
return post
@bp.route('/<int:id>/update', methods=('GET', 'POST'))
@login_required
def update(id):
post = get_post(id)
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
error = None
if not title:
error = 'Title is required'
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
'UPDATE post SET title = ?, body = ?'
' WHERE id = ?',
(title, body, id)
)
db.commit()
return redirect(url_for('blog.index'))
return render_template('blog/update.html', post=post)
@bp.route('/<int:id>/delete', methods=('POST',))
@login_required
def delete(id):
get_post(id)
db = get_db()
db.execute('DELETE FROM post WHERE id = ?', (id,))
db.commit()
return redirect(url_for('blog.index'))
|
[
"66801762+luisfdresch@users.noreply.github.com"
] |
66801762+luisfdresch@users.noreply.github.com
|
a262ae2e16a1c482655881051aec3552ba5cef76
|
3e8f63c0e45de6df395f41c62889330ad1a2f839
|
/lesson10/basic/warehouse.py
|
551ee21a9ee3bdfb327fde49840ae8bccb54269e
|
[] |
no_license
|
lvshaokang/python
|
336de2ec7d532fc777bf221dece307ee6625562f
|
71807622f19922e1ea718c38c544fc77d666d274
|
refs/heads/master
| 2020-03-28T19:41:44.530856
| 2018-10-01T14:56:45
| 2018-10-01T14:56:45
| 149,000,805
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
class WarehouseManageSys:
def __init__(self):
self.item_detail = {"老坛酸菜": 5, "红烧牛肉": 4, "酸辣粉": 6, "拉面": 7, "老干妈": 10, "乌江": 2, "王中王": 2, "蒜肠": 12, "淀粉肠": 8}
def get_item_list(self, item_type):
pm_list = ["老坛酸菜", "红烧牛肉", "酸辣粉", "拉面"]
zc_list = ["老干妈", "乌江"]
xc_list = ["王中王", "蒜肠", "淀粉肠"]
if item_type == "pm":
return pm_list
elif item_type == "zc":
return zc_list
elif item_type == "xc":
return xc_list
|
[
"lvshaokang@hotmail.com"
] |
lvshaokang@hotmail.com
|
98273d8b2de18dfad4d203ed5449358037428885
|
5bdd9737aef29f0341676b21c2df2d371985628f
|
/object_ref_object.py
|
5325909f9582199489f4ec01585200986c4fa51a
|
[] |
no_license
|
lamontu/starter
|
23eb9ceb8f260a7380d4fb2111158f0dc06dd7cb
|
176539fc03508b78da320737eeae43b4e509dbd6
|
refs/heads/master
| 2022-09-25T22:51:16.456495
| 2022-09-10T15:13:48
| 2022-09-10T15:13:48
| 62,473,690
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 337
|
py
|
# -*- coding: utf-8 -*-
class from_obj(object):
def __init__(self, to_obj):
self.to_obj = to_obj
print('b = [1, 2, 3]')
b = [1, 2, 3]
print('a = from_obj(b)')
a = from_obj(b)
print('id(b) >>>>')
print(hex(id(b)))
print()
print('id(a) >>>>')
print(hex(id(a)))
print()
print('id(a.to_obj) >>>>')
print(hex(id(a.to_obj)))
|
[
"lamontyu@163.com"
] |
lamontyu@163.com
|
042afc513c24332f122836a2cec49692b2f77a28
|
7a63ce94e1806a959c9c445c2e0bae95afb760c8
|
/tests/incident/test_resolve.py
|
8ccf653a5dbc4b46fd96837ef309be097512d6e1
|
[
"MIT"
] |
permissive
|
pklauke/pycamunda
|
20b54ceb4a40e836148e84912afd04d78d6ba0ec
|
3faac4037212df139d415ee1a54a6594ae5e9ac5
|
refs/heads/master
| 2023-08-18T10:23:30.503737
| 2022-04-17T18:34:40
| 2022-04-17T18:34:40
| 240,333,835
| 40
| 16
|
MIT
| 2023-09-12T13:29:08
| 2020-02-13T18:37:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,602
|
py
|
# -*- coding: utf-8 -*-
import unittest.mock
import pytest
import pycamunda.incident
from tests.mock import raise_requests_exception_mock, not_ok_response_mock
def test_resolve_params(engine_url):
resolve_incident = pycamunda.incident.Resolve(url=engine_url, id_='anId')
assert resolve_incident.url == engine_url + '/incident/anId'
assert resolve_incident.query_parameters() == {}
assert resolve_incident.body_parameters() == {}
@unittest.mock.patch('requests.Session.request')
def test_resolve_calls_requests(mock, engine_url):
resolve_incident = pycamunda.incident.Resolve(url=engine_url, id_='anId')
resolve_incident()
assert mock.called
assert mock.call_args[1]['method'].upper() == 'DELETE'
@unittest.mock.patch('requests.Session.request', raise_requests_exception_mock)
def test_resolve_raises_pycamunda_exception(engine_url):
resolve_incident = pycamunda.incident.Resolve(url=engine_url, id_='anId')
with pytest.raises(pycamunda.PyCamundaException):
resolve_incident()
@unittest.mock.patch('requests.Session.request', not_ok_response_mock)
@unittest.mock.patch('pycamunda.base._raise_for_status')
def test_resolve_raises_for_status(mock, engine_url):
resolve_incident = pycamunda.incident.Resolve(url=engine_url, id_='anId')
resolve_incident()
assert mock.called
@unittest.mock.patch('requests.Session.request', unittest.mock.MagicMock())
def test_resolve_returns_none(engine_url):
resolve_incident = pycamunda.incident.Resolve(url=engine_url, id_='anId')
result = resolve_incident()
assert result is None
|
[
"peter.klauke@tu-dortmund.de"
] |
peter.klauke@tu-dortmund.de
|
113af3e207e4b01797c11ec0d406ac5a136b56c2
|
801418efbd049078c8aad4cd17297f3ece571412
|
/temp/toy/python/238. Product of Array Except Self.py
|
d7da2b067439b8c2b107a462617c0fb4b8eac579
|
[] |
no_license
|
xixihaha1995/CS61B_SP19_SP20
|
2b654f0c864a80a0462fdd4b1561bdc697a8c1e2
|
7d6599596f7f49b38f1c256ece006b94555c1900
|
refs/heads/master
| 2023-01-01T18:41:48.027058
| 2020-10-29T04:50:01
| 2020-10-29T04:50:01
| 240,976,072
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
res, p, q = [1], 1, 1
for i in range(len(nums)-1):
p *= nums[i]
res.append(p)
for i in range(len(nums)-1, 0, -1):
q *= nums[i]
res[i-1] *= q
return res
|
[
"wulicheneason@gmail.com"
] |
wulicheneason@gmail.com
|
e9ab3371c89c06a80611e79a4dffd4bb44019dfa
|
3718077f1cbbc458fdb55dd7f904baab4b493bde
|
/main.py
|
5417be7d37b58912767d73d230f893f70ce35013
|
[] |
no_license
|
SupersonicCoder18/THE-MOVIE-API-
|
7b65cf9d8caba1f44826f0b03c188c973296155b
|
3a75eda4b0d1a7caf183d518a2b615ff086efbcd
|
refs/heads/main
| 2023-02-21T21:23:01.509582
| 2021-01-25T12:24:28
| 2021-01-25T12:24:28
| 332,738,371
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,772
|
py
|
from flask import Flask, jsonify, request
import csv
from storage import all_movies, liked_movies, not_liked_movies, did_not_watch
from Demographic_Filtering import output
from Content_Filtering import get_recommendations
app = Flask(__name__)
@app.route("/get-movie")
def get_movie():
movie_data = {
"title": all_movies[0][19],
"poster_link": all_movies[0][27],
"release_date": all_movies[0][13] or "N/A",
"duration": all_movies[0][15],
"rating": all_movies[0][20],
"overview": all_movies[0][9],
}
return jsonify({
"data": movie_data,
"status": "Success!"
})
@app.route("/liked-movie", methods = ["POST"])
def liked_movie():
movie = all_movies[0]
all_movies = all_movies[1:]
liked_movie.append(movie)
return jsonify({
"status": "Success"
}), 201
@app.route("/unliked-movie", methods = ["POST"])
def unliked_movie():
movie = all_movies[0]
all_movies = all_movies[1:]
not_liked_movie.append(movie)
return jsonify({
"status": "Success"
}), 201
@app.route("/did-not-watch-movie", methods = ["POST"])
def did_not_watched_movie():
movie = all_movies[0]
all_movies = all_movies[1:]
did_not_watch.append(movie)
return jsonify({
"status": "Success"
}), 201
@app.route("/popular-movies")
def popular_movies():
movie_data = []
for movie in output:
_d = {
"title": movie[0],
"poster_link": movie[1],
"release_date": movie[2] or "N/A",
"duration": movie[3],
"rating": movie[4],
"overview": movie[5]
}
movie_data.append(_d)
return jsonify({
"data": movie_data,
"status": "success"
}), 200
@app.route("/recommended-movies")
def recommended_movies():
all_recommended = []
for liked_movie in liked_movies:
output = get_recommendations(liked_movie[19])
for data in output:
all_recommended.append(data)
import itertools
all_recommended.sort()
all_recommended = list(all_recommended for all_recommended,_ in itertools.groupby(all_recommended))
movie_data = []
for recommended in all_recommended:
_d = {
"title": recommended[0],
"poster_link": recommended[1],
"release_date": recommended[2] or "N/A",
"duration": recommended[3],
"rating": recommended[4],
"overview": recommended[5]
}
movie_data.append(_d)
return jsonify({
"data": movie_data,
"status": "success"
}), 200
if __name__ == "__main__":
app.run()
|
[
"noreply@github.com"
] |
noreply@github.com
|
0758d42be8c51765633d9a7297cba762e8b04a25
|
cb55abd80671e898f08e07710bd87c72ba559477
|
/backend/provider/sendgridservice.py
|
23c8e65589e43d8381fa04ceaaad61cb32086739
|
[
"MIT"
] |
permissive
|
laughinging/yaes
|
ef307a27806ebbd9e6bb0f318825b7bdf4ad25b3
|
0893f7848ee0530fa6c3bd553f89aa430f9b2f02
|
refs/heads/master
| 2020-03-18T05:14:14.684146
| 2018-05-30T12:29:55
| 2018-05-30T12:29:55
| 134,331,994
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,356
|
py
|
import os
import logging
import sendgrid
from set_up import sg_client
from sendgrid.helpers.mail import *
from backend.provider.provider_exceptions import *
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class SendgridMail(object):
SENDGRID_ERROR = {
400: "BAD REQUEST",
401: "UNAUTHORIZED: You do not have authorization to make the request.",
403: "FORBIDDEN",
404: "NOT FOUND : The resource you tried to locate could not be found or does not exist.",
405: "METHOD NOT ALLOWED",
413: "PAYLOAD TOO LARGE: The JSON payload you have included in your request is too large.",
415: "UNSUPPORTED MEDIA TYPE",
429: "TOO MANY REQUESTS: The number of requests you have made exceeds SendGrid’s rate limitations",
500: "SERVER UNAVAILABLE: An error occurred on a SendGrid server.",
503: "SERVICE NOT AVAILABLE: The SendGrid v3 Web API is not available."
}
def __init__(self):
self.client = sg_client
def send_mail(self, **kwargs):
from_email = Email(kwargs['sender'])
to_email = Email(kwargs['recipient'])
subject = kwargs['subject']
content = Content("text/plain", kwargs['body'])
mail = Mail(from_email, subject, to_email, content)
logger.info('Attempt to send an email with sendgrid')
try:
response = self.client.client.mail.send.post(request_body=mail.get())
except Exception as e:
if e.status_code in (400, 403, 404, 405, 413, 415, 429):
message = "SendGrid Client Error {}: {}".format(e.status_code,
self.SENDGRID_ERROR[e.status_code])
logger.exception(message)
raise ClientError(message)
elif e.status_code in (401, 500, 503):
message = "SendGrid Server Error {}: {}".format(e.status_code,
self.SENDGRID_ERROR[e.status_code])
logger.exception(message)
raise ProviderServerError(message)
if __name__ == "__main__":
SendgridMail().send_mail(
sender="test@test.com",
recipient="test@test.com",
subject="test",
body="This is a test email."
)
|
[
"qianyunguo@gmail.com"
] |
qianyunguo@gmail.com
|
75d2f93063a4feaf6b869a50b0e5a88d40500e00
|
2bcf18252fa9144ece3e824834ac0e117ad0bdf3
|
/httpy/tags/0.7/tests/TestCaseHttpy.py
|
08a1fc6dd3fb6eb41284fefc3f7dc8c1602cb96c
|
[] |
no_license
|
chadwhitacre/public
|
32f65ba8e35d38c69ed4d0edd333283a239c5e1d
|
0c67fd7ec8bce1d8c56c7ff3506f31a99362b502
|
refs/heads/master
| 2021-05-10T14:32:03.016683
| 2010-05-13T18:24:20
| 2010-05-13T18:24:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,199
|
py
|
"""The idea and code for running a test._server in another thread are from the
standard library's test/test_socke._server.py.
TODO: This is out of date now that we are using asyncore (via httpy._zope._server).
"""
import asyncore
import os
import select
import socket
import threading
import time
import unittest
from httpy._zope.server.taskthreads import ThreadedTaskDispatcher
from httpy._zope.server.tests.asyncerror import AsyncoreErrorHook
from httpy.Config import Config
from httpy.Request import Request, ZopeRequest
from httpy.Server import Server
td = ThreadedTaskDispatcher()
opts = [ '--mode', 'development'
, '--sockfam', 'AF_INET'
, '--root', 'root'
, '--address', ':65370'
, '--verbosity', '99'
#, '--apps', '/' discover automatically
]
class TestCaseHttpy(unittest.TestCase, AsyncoreErrorHook):
# unittest.TestCase hooks
# =======================
want_config = False
def setUp(self):
self.scrubenv()
# [re]build a temporary website tree in ./root
self.removeTestSite()
self.buildTestSite()
if self.server:
self.startServer()
if self.want_config:
self.config = Config()
def tearDown(self):
if self.server:
self.stopServer()
self.removeTestSite()
self.restoreenv()
# server support
# ==============
server = False # Override to True if your subclass needs a server
def startServer(self):
if len(asyncore.socket_map) != 1:
# Let sockets die off.
# TODO tests should be more careful to clear the socket map.
asyncore.poll(0.1)
self.orig_map_size = len(asyncore.socket_map)
#self.hook_asyncore_error()
config = Config(opts)
self._server = Server(config, threads=4)
self._server.accept_connections()
self.port = self._server.socket.getsockname()[1]
self.run_loop = 1
self.counter = 0
self.thread_started = threading.Event()
self.thread = threading.Thread(target=self.loop)
self.thread.setDaemon(True)
self.thread.start()
self.thread_started.wait(10.0)
self.assert_(self.thread_started.isSet())
def stopServer(self):
self.run_loop = 0
self.thread.join()
td.shutdown()
self._server.close()
# Make sure all sockets get closed by asyncore normally.
timeout = time.time() + 5
while 1:
if len(asyncore.socket_map) == self.orig_map_size:
# Clean!
break
if time.time() >= timeout:
self.fail('Leaked a socket: %s' % `asyncore.socket_map`)
asyncore.poll(0.1)
#self.unhook_asyncore_error()
def loop(self):
self.thread_started.set()
while self.run_loop:
self.counter = self.counter + 1
asyncore.poll(0.1)
# environment
# ===========
def scrubenv(self):
save = {}
for opt in Config.options:
envvar = 'HTTPY_%s' % opt.upper()
if os.environ.has_key(envvar):
save[envvar] = os.environ[envvar]
del os.environ[envvar]
self.env = save
def restoreenv(self):
for k, v in self.env.items():
os.environ[k] = v
self.env = {}
# test site
# =========
# testsite is a list of strings and tuples. If a string, it is interpreted
# as a path to a directory that should be created. If a tuple, the first
# element is a path to a file, the second is the contents of the file.
# We do it this way to ease cross-platform testing.
#
# siteroot is the filesystem path under which to create the test site.
siteroot = 'root'
testsite = []
def buildTestSite(self):
"""Build the site described in self.testsite
"""
os.mkdir(self.siteroot)
for item in self.testsite:
if isinstance(item, basestring):
path = self.convert_path(item.lstrip('/'))
path = os.sep.join([self.siteroot, path])
os.mkdir(path)
elif isinstance(item, tuple):
filepath, contents = item
path = self.convert_path(filepath.lstrip('/'))
path = os.sep.join([self.siteroot, path])
file(path, 'w').write(contents)
def removeTestSite(self):
if os.path.isfile('httpy.conf'):
os.remove('httpy.conf')
if not os.path.isdir(self.siteroot):
return
for root, dirs, files in os.walk(self.siteroot, topdown=False):
for name in dirs:
os.rmdir(os.path.join(root, name))
for name in files:
os.remove(os.path.join(root, name))
os.rmdir(self.siteroot)
def convert_path(self, path):
"""Given a Unix path, convert it for the current platform.
"""
return os.sep.join(path.split('/'))
def convert_paths(self, paths):
"""Given a tuple of Unix paths, convert them for the current platform.
"""
return tuple([self.convert_path(p) for p in paths])
# utils
# =====
@staticmethod
def neuter_traceback(tb):
"""Given a traceback, return just the system-independent lines.
"""
tb_list = tb.split(os.linesep)
if not tb_list[-1]:
tb_list = tb_list[:-1]
neutered = []
for i in range(0,len(tb_list),2):
neutered.append(tb_list[i])
neutered.append(tb_list[-1])
return os.linesep.join(neutered)
@staticmethod
def dict2tuple(d):
return tuple(sorted(d.iteritems()))
@staticmethod
def make_request(uri, headers=None, Zope=False):
if headers is None:
headers = {}
request = ZopeRequest()
request.received("GET %s HTTP/1.1\r\n" % uri)
for header in headers.items():
request.received("%s: %s\r\n" % header)
request.received('\r\n')
if Zope:
return request
else:
return Request(request)
|
[
"chad@zetaweb.com"
] |
chad@zetaweb.com
|
d51b0b507c16f3480bdd85165672cf7a919454c4
|
9e12f81814b24aa54acccb80fcaf3bf688bd984a
|
/Spiders/youku/parsers/parse_detail_list_page.py
|
0d5dc03cc9424dcec81974f5a428c0d4251a0c13
|
[] |
no_license
|
wangtaihong/mov
|
76793050cdece48894be6433dd407cc918c0bbb3
|
8d394ca9c8c2906a585e58b85f974f68664629d3
|
refs/heads/master
| 2020-03-19T20:02:52.626127
| 2018-07-16T02:40:52
| 2018-07-16T02:40:52
| 136,885,627
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,708
|
py
|
# coding:utf-8
import re
import sys
reload(sys)
sys.setdefaultencoding('utf8')
from lxml import etree
def url_format(url):
"""
//v.youku.com/v_show/id_XMzA5NTA1ODg2MA==.html?s=bc2a0ca1a64b11e6b9bb
http://v.youku.com/v_show/id_XMzA5NTA1ODg2MA==.html
"""
url = re.sub('http:', '', url)
return "http:" + re.sub('(\.html.*)', '.html', url)
def parse_detail(r, url):
try:
page = etree.HTML(r)
except Exception as e:
return False
sss = re.sub(u'\\n', '', r)
data = dict()
stars = []
# title_show = re.search(u'class="p-thumb"><a title="([^"]+?)" href="([^"]+?)" target=',sss)
# v-show: v-show可能没有的
# v_show = re.search(u'class="p-thumb"><a title="([^" href]+?)" href="([^" ]+?)" target=',sss)
v_show = page.xpath(
u'//div[@class="p-post"]/div[@class="yk-pack p-list"]/div[@class="p-thumb"]/a')
if len(v_show) > 0:
data['v_show'] = url_format(v_show[0].get("href"))
# 海报:
# thumb = re.search(u'^(?=.*(http\://\w+\d+\.\w+\.com/(\w*\d*)+)").*$',sss).group(1)
thumb = page.xpath(
u'//div[@class="p-post"]/div[@class="yk-pack p-list"]/div[@class="p-thumb"]/img')
if len(thumb) > 0:
data['thumb'] = [{"url": url_format(thumb[0].get(
"src")), "title":thumb[0].get("alt"), "width":200, "height":300}]
data['title'] = thumb[0].get("alt")
# category:
# category = page.xpath('//div[@class="p-base"]/ul/li[@class="p-row p-title"]')[0].find('a')
category = page.xpath(
'//div[@class="p-base"]/ul/li[@class="p-row p-title"]/a')
if len(category) > 0:
data['category'] = category[0].text
# category_url = category.get('href')
# 年份:可能没有
year = page.xpath(
'//div[@class="p-base"]/ul/li[@class="p-row p-title"]/span[@class="sub-title"]')
if len(year) > 0:
data['year'] = year[0].text
# 别名:可能没有
alias = page.xpath('//div[@class="p-base"]/ul/li[@class="p-alias"]')
if len(alias) > 0:
data['alias'] = alias[0].get("title")
# 上映:可能没有
published_at = re.search(u'>上映:</label>(\w+-\d+-\d+)*</span>', sss)
if published_at != None:
data['published_at'] = published_at.group(1)
# 优酷上映:可能没有
yk_published_at = re.search(u'>优酷上映:</label>(\w+-\d+-\d+)*</span>', sss)
if yk_published_at != None:
data['yk_published_at'] = yk_published_at.group(1)
# 优酷评分:可能没有
youku_score = page.xpath(
'//div[@class="p-base"]/ul/li[@class="p-score"]/span[@class="star-num"]')
if len(youku_score) > 0:
data['youku_score'] = youku_score[0].text
# 豆瓣评分:可能没有
douban_score = re.search(u'<span class="db-bignum">(\d+\.\d*)</span>', sss)
if douban_score != None:
data['douban_score'] = douban_score.group(1)
# 豆瓣评价数量,可能没有
douban_cm_num = re.search(u'<span class="db-cm-num">(\d*)评价</span>', sss)
if douban_cm_num != None:
data['douban_cm_num'] = douban_cm_num.group(1)
# 主演:可能没有
actors = page.xpath('//div[@class="p-base"]/ul/li[@class="p-performer"]')
if len(actors) > 0:
data['actors'] = actors[0].get('title')
data['actor_list'] = []
for x in page.xpath('//div[@class="p-base"]/ul/li[@class="p-performer"]/a'):
print(x)
data['actor_list'].append({"name":x.text,"youkuid":re.search(u"//list\.youku\.com/star/show/(.*)\.html",etree.tostring(x)).group(1)})
# 集数
renew = page.xpath(
'//div[@class="p-base"]/ul/li[@class="p-row p-renew"]')
if len(renew) > 0:
data['renew'] = renew[0].text
# 主演连接:可能没有
actors_a = page.xpath(
'//div[@class="p-base"]/ul/li[@class="p-performer"]/a')
if len(actors_a) > 1:
for x in actors_a:
# actor_url = url_format(x.get('href'))
actor_name = x.text
stars.append(url_format(x.get('href')))
# rd.sadd(config.yk_star_task, url_format(x.get('href'))) # 明星采集队列,redis set特性去重
# //list.youku.com/star/show/uid_UODY0MjQ=.html
# 导演:循环出来
# directed = page.xpath('//div[@class="p-base"]/ul/li[@class="p-performer"]')[0].getnext().findall('a')
directed = page.xpath(
u'//div[@class="p-base"]/ul/li[contains(text(),"导演:")]/a')
data['director_list'] = []
if len(directed) > 0:
data['directors'] = ''
for x in directed:
# star_url = url_format(x.get("href"))
data['directors'] = data['directors'] + '|' + x.text
stars.append(url_format(x.get('href')))
data['director_list'].append({"name":x.text,"youkuid":re.search(u"//list\.youku\.com/star/show/(.*)\.html",etree.tostring(x)).group(1)})
# rd.sadd(config.yk_star_task, url_format(x.get("href"))) # 明星采集队列,redis set特性去重
# 地区,可能没有
area = re.search(
u'>地区:<a href="//list\.youku\.com/category/show/([^\.html]+?)\.html" target="_blank">([^</a></li>]+?)</a>', sss)
if area != None:
data['area'] = area.group(2)
# 类型:循环出来
types = page.xpath(
u'//div[@class="p-base"]/ul/li[contains(text(),"类型")]/a')
if len(types) > 0:
data['types'] = ''
for x in types:
data['types'] = data['types'] + ',' + x.text
# 总播放数:可能为none
plays_num = re.search(u'<li>总播放数:([^</li>]+?)</li>', sss)
if plays_num != None:
data['plays_num'] = plays_num.group(1)
# 评论数量:可能为none
youku_comments_num = re.search(u'<li>评论:([^</li>]+?)</li>', sss)
if youku_comments_num:
data['youku_comments_num'] = youku_comments_num.group(1)
# 顶:可以空
ding = re.search(u'<li>顶:([^</li>]+?)</li>', sss)
if ding:
data['ding'] = ding.group(1)
# 简介:
try:
page.xpath(
u'//div[@class="p-base"]/ul/li[@class="p-row p-intro"]/span[@class="intro-more hide"]')[0]
except Exception as e:
print("parse_detail_list_page:", url, str(e), r)
#update_session(proxy)
return False
# sys.exit("die")
summary = page.xpath(
u'//div[@class="p-base"]/ul/li[@class="p-row p-intro"]/span[@class="intro-more hide"]')[0]
if summary != None:
data['summary'] = summary.text
# 适合年龄,可能为空
age = re.search(u'>适用年龄:([^</li>]+?)</li>', sss)
if age:
data['age'] = age.group(1)
peiyin = page.xpath(
u'//div[@class="p-base"]/ul/li[contains(text(),"声优:")]/a')
if len(peiyin) > 0:
data['peiyin'] = ''
data['peiyin_list'] = []
for x in peiyin:
data['peiyin'] = data['peiyin'] + '|' + x.text
stars.append(url_format(x.get('href')))
# data['peiyin_list'].append({"name":x.text,"youkuid":re.search(u"show/(.*)\.html",etree.tostring(x)).group(1)})
data['peiyin_list'].append({"name":x.text,"youkuid":re.search(u"//list\.youku\.com/star/show/(.*)\.html",etree.tostring(x)).group(1)})
# 综艺节目有
presenters = page.xpath(
u'//div[@class="p-base"]/ul/li[contains(text(),"主持人:")]/a')
if len(presenters) > 0:
data['presenters'] = ""
for x in presenters:
data['presenters'] = data['presenters'] + '|' + x.text
stars.append(url_format(x.get('href')))
# rd.sadd(config.yk_star_task, url_format(x.get("href"))) # 明星采集队列,redis set特性去重
return {"data": data, "stars": stars}
|
[
"wangtaihong8@163.com"
] |
wangtaihong8@163.com
|
b549437484d60e16f3abc854f97caa01baff0c64
|
cfdaf1675a6a6a3c21c163ea48556a82d1f761f7
|
/Actividades/AF04/ventana_principal.py
|
591a0cbb2d104c65690f5adbec995595b09c185e
|
[] |
no_license
|
catalinamusalem/Catalina
|
8114568486f2e6e8b73def164274064de6790bbb
|
e508ccb622e03e543c1a7da6b2c1d4636325b92b
|
refs/heads/master
| 2022-11-28T14:42:55.037042
| 2020-07-05T22:06:01
| 2020-07-05T22:06:01
| 286,140,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,460
|
py
|
import os
import sys
from random import choice
from PyQt5.QtWidgets import QLabel, QWidget, QLineEdit, \
QHBoxLayout, QVBoxLayout, QPushButton
from PyQt5.QtCore import Qt, pyqtSignal
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QApplication
class VentanaPrincipal(QWidget):
# Aquí debes crear una señal que usaras para enviar la jugada al back-end
senal_enviar_jugada = pyqtSignal(dict)
def __init__(self, *args):
super().__init__(*args)
self.crear_pantalla()
def crear_pantalla(self):
# Aquí deben crear la ventana vacia.
self.setWindowTitle("DCCuent")
# Es decir, agregar y crear labels respectivos a datos del juego, pero sin contenido
nombre = ""
vict = ""
derrot = ""
self.rutai=""
self.rutar=""
self.rutaa=""
self.pixeles_i= ""
self.pixeles_r= ""
self.pixeles_a= ""
self.nombre_usuario= QLabel(nombre,self)
self.victorias =QLabel(vict,self)
self.derrotas = QLabel(derrot,self)
self.infanteria = QLabel("Q", self)
self.rango=QLabel("W",self)
self.artilleria=QLabel("E",self)
self.logo_i = QLabel(self)
self.logo_r = QLabel(self)
self.logo_a = QLabel(self)
pix_i=QPixmap(self.rutai)
pix_r=QPixmap(self.rutar)
pix_a=QPixmap(self.rutaa)
self.logo_i.setPixmap(pix_i)
self.logo_r.setPixmap(pix_r)
self.logo_a.setPixmap(pix_a)
vlayout1 = QVBoxLayout()
vlayout1.addWidget(self.nombre_usuario)
vlayout1.addWidget(self.infanteria)
vlayout1.addWidget(self.logo_i)
vlayout2 = QVBoxLayout()
vlayout2.addWidget(self.victorias)
vlayout2.addWidget(self.rango)
vlayout2.addWidget(self.logo_r)
vlayout3 = QVBoxLayout()
vlayout3.addWidget(self.derrotas)
vlayout3.addWidget(self.artilleria)
vlayout3.addWidget(self.logo_a)
mainlayout = QHBoxLayout()
mainlayout.addLayout(vlayout1)
mainlayout.addLayout(vlayout2)
mainlayout.addLayout(vlayout3)
self.setLayout(mainlayout)
# Si usas layout recuerda agregar los labels al layout y finalmente setear el layout
def actualizar(self, datos):
# Esta es la funcion que se encarga de actualizar el contenido de la ventana y abrirla
# Recibe las nuevas cartas y la puntuación actual en un diccionario
nombre = datos["usuario"]
vict = datos["victorias"]
derrot = datos["derrotas"]
self.pixeles_i= datos["infanteria"]
self.pixeles_r= datos["rango"]
self.pixeles_a= datos["artilleria"]
self.rutai=datos["infanteria"]["ruta"]
self.rutar=datos["rango"]["ruta"]
self.rutaa=datos["artilleria"]["ruta"]
# Al final, se muestra la ventana.
self.show()
def keyPressEvent(self, evento):
# Aquí debes capturar la techa apretara,
# y enviar la carta que es elegida
if evento.text() == "q":
data= self.pixeles_i
self.senal_enviar_jugada.emit(data)
if evento.text() == "w":
data= self.pixeles_r
self.senal_enviar_jugada.emit(data)
if evento.text() == "e":
data= self.pixeles_a
self.senal_enviar_jugada.emit(data)
class VentanaCombate(QWidget):
# Esta señal es para volver a la VentanaPrincipal con los datos actualizados
senal_regresar = pyqtSignal(dict)
# Esta señal envia a la ventana final con el resultado del juego
senal_abrir_ventana_final = pyqtSignal(str)
def __init__(self, *args):
super().__init__(*args)
self.crear_pantalla()
def crear_pantalla(self):
self.setWindowTitle("DCCuent")
self.vbox = QVBoxLayout()
self.layout_principal = QHBoxLayout()
self.label_carta_usuario = QLabel()
self.label_victoria = QLabel()
self.label_carta_enemiga = QLabel()
self.boton_regresar = QPushButton("Regresar")
self.layout_principal.addWidget(self.label_carta_usuario)
self.layout_principal.addWidget(self.label_victoria)
self.layout_principal.addWidget(self.label_carta_enemiga)
self.boton_regresar.clicked.connect(self.regresar)
self.vbox.addLayout(self.layout_principal)
self.vbox.addWidget(self.boton_regresar)
self.setLayout(self.vbox)
def mostrar_resultado_ronda(self, datos):
self.datos = datos
mensaje = datos["mensaje"]
carta_enemiga = datos["enemigo"]
carta_jugador = datos["jugador"]
self.label_carta_usuario.setPixmap(QPixmap(carta_jugador["ruta"]).scaled(238,452))
self.label_carta_enemiga.setPixmap(QPixmap(carta_enemiga["ruta"]).scaled(238,452))
self.label_victoria.setText(mensaje)
self.show()
def regresar(self):
resultado = self.datos["resultado"]
if resultado == "victoria" or resultado == "derrota":
self.senal_abrir_ventana_final.emit(resultado)
else:
self.senal_regresar.emit(self.datos)
self.hide()
if __name__ == "__main__":
def hook(type, value, traceback):
print(type)
print(traceback)
sys.__excepthook__ = hook
a = QApplication(sys.argv)
ventana_principal = VentanaPrincipal()
ventana_principal.show()
sys.exit(a.exec())
|
[
"catalina.musalem@uc.cl"
] |
catalina.musalem@uc.cl
|
ae3c07417196b04210dbed26d9b1fba5aac5f9ec
|
07ec5a0b3ba5e70a9e0fb65172ea6b13ef4115b8
|
/lib/python3.6/site-packages/numpy/core/tests/test_regression.py
|
39a92211635a6dcc5cd242241cf5f18f0e08b70e
|
[] |
no_license
|
cronos91/ML-exercise
|
39c5cd7f94bb90c57450f9a85d40c2f014900ea4
|
3b7afeeb6a7c87384049a9b87cac1fe4c294e415
|
refs/heads/master
| 2021-05-09T22:02:55.131977
| 2017-12-14T13:50:44
| 2017-12-14T13:50:44
| 118,736,043
| 0
| 0
| null | 2018-01-24T08:30:23
| 2018-01-24T08:30:22
| null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:2d5a65e7c1da1e87651cabd3481c0012ad15f784275aad1259a1312faf19cfc2
size 81211
|
[
"seokinj@jangseog-in-ui-MacBook-Pro.local"
] |
seokinj@jangseog-in-ui-MacBook-Pro.local
|
b1363d2eeea65f67da9c4da23778667e39565849
|
ee4152e9b5eafa7afafe05de04391a9a3606eea3
|
/client/API/AddRecord.py
|
431bc9058aefc1020df12034d650ed008e3998a5
|
[] |
no_license
|
adibl/password_saver
|
3a06c8c04905d82f01fc14b41b646a6578af2b70
|
2ea73781db92ce750f91039251f2c06e929da7bb
|
refs/heads/master
| 2020-04-09T23:51:34.804870
| 2019-06-16T10:13:42
| 2019-06-16T10:13:42
| 160,665,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,675
|
py
|
"""
name:
date:
description
"""
import base64
import json
import requests
import os
from .connection import Request
class Passwords(object):
FILE_NAME = 'token.txt'
@classmethod
def handle(cls, url, username, password):
return cls.POST(url, username, password)
@classmethod
def GET(cls):
auto = cls.read_jwt()
if auto is None:
return {'general': 401}
responce = conn = Request().get_conn().get(Request.URI + '/passwords', headers={'Authorization': 'Bearer {0}'.format(auto)})
if responce.status_code == 200:
return json.loads(responce.text)
else:
return {'general': responce.status_code}
@classmethod
def POST(cls, url, username, password):
auto = cls.read_jwt()
if auto is None:
return {'general': 401}
print base64.urlsafe_b64encode(url)
encode_url = base64.urlsafe_b64encode(url)
responce = conn = Request().get_conn().post(Request.URI + '/passwords', headers={'Authorization': 'Bearer {0}'.format(auto)}
, json={'username': username, 'password': password,
'program_id': encode_url})
if responce.status_code == 200:
return True
elif responce.status_code == 442:
return json.loads(responce.text)
else:
return {'general': 'general error'}
@classmethod
def read_jwt(cls):
if os.path.isfile(cls.FILE_NAME):
with open(cls.FILE_NAME, 'rb')as handel:
jwt = handel.read()
return jwt
else:
return None
|
[
"bleyer23@gmail.com"
] |
bleyer23@gmail.com
|
1b498a36f1e5cddb3c338c90afdb44c34630961f
|
794543da14ede49acde50acfac76681e87f31673
|
/src/training_scripts/sprp_onmt_copy_512/validate.py
|
58e8a364d3f0da808802f433ecd18845a2bb7706
|
[] |
no_license
|
roeeaharoni/sprp-acl2018
|
0f404dd27e5ea09f427df920e3d47b0d45d6c5d7
|
2d215999cd72cc0f59d7a6733e1b1f1d7ea54777
|
refs/heads/master
| 2020-03-07T23:33:59.874847
| 2019-02-15T10:06:01
| 2019-02-15T10:06:01
| 127,784,457
| 16
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,835
|
py
|
from src import evaluate
import os
def main():
model_name = 'sprp_onmt_copy_512'
base_path = '/home/nlp/aharonr6'
moses_path = base_path + '/git/mosesdecoder'
test_dirs_path_prefix = base_path + '/git/Split-and-Rephrase/evaluation-directories/validation/'
# the file containing the ids of the test sentences
test_sent_ids_path = base_path + '/git/Split-and-Rephrase/benchmark/complex-sents/validation.id'
# a directory that will hold single sentence files for the hypotheses
test_hypothesis_sents_dir = base_path + '/git/phrasing/models/{}/validation_complex_output_sents/'.format(model_name)
if not os.path.exists(test_hypothesis_sents_dir):
os.mkdir(test_hypothesis_sents_dir)
test_target = base_path + '/git/phrasing/models/{}/validation.complex.unique.output'.format(model_name)
print 'starting multi-ref evaluation...'
avg_bleu, avg_tokens_per_sent, avg_simple_sents_per_complex = evaluate.evaluate_avg_concat_bleu(moses_path,
test_sent_ids_path,
test_hypothesis_sents_dir,
test_target,
test_dirs_path_prefix,
splitter='. ')
print 'avg BLEU:{} avg tokens/sent: {} avg split: {}'.format(avg_bleu, avg_tokens_per_sent,
avg_simple_sents_per_complex)
return
if __name__ == '__main__':
main()
|
[
"roee.aharoni@gmail.com"
] |
roee.aharoni@gmail.com
|
bce22db2adda5234a705ff0d1fb719565b3bddd8
|
9692a20a1e7a224a72785e4495f31421639b9f3b
|
/frex/pipeline_stages/filters/candidate_filterer.py
|
2d79e3b31e1ec3776b5978e1f52488af2826dfdb
|
[] |
no_license
|
solashirai/FREx
|
6b0cb040930761a0e269f4591d7dde36e3f636d1
|
36ad09a0cb0020661ee990c7800bafd110e2ec04
|
refs/heads/master
| 2023-08-14T08:49:49.270281
| 2021-09-29T14:58:23
| 2021-09-29T14:58:23
| 291,760,109
| 0
| 0
| null | 2021-09-24T22:41:19
| 2020-08-31T15:57:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,535
|
py
|
from abc import abstractmethod
from typing import Generator, Optional, Any
from frex.models import Explanation, Candidate
from frex.pipeline_stages import PipelineStage
class CandidateFilterer(PipelineStage):
"""
CandidateFilterer is a PipelineStage that determines whether input candidates should be removed from consideration
or continue on through the FREx Pipeline.
A new CandidateFilterer class can be minimally defined by creating a new subclass of CandidateFilterer and
defining the filter() function.
"""
def __init__(
self, *, filter_explanation: Explanation, filter_score: float = 0, **kwargs
):
"""
:param filter_explanation: The explanation to add to the Candidate if it passes the filter function.
:param filter_score: The score to apply to the candidate if it passes the filter. This is 0 by default.
"""
self.filter_explanation = filter_explanation
self.filter_score = filter_score
@abstractmethod
def filter(self, *, candidate: Candidate) -> bool:
"""
A filter to determine whether or not the current candidate is suitable to move on through the Pipeline.
This function should return True when the candidate should be removed and False when it should continue on.
:param candidate: A domain-specific candidate to filter
:return: True if the candidate should be removed, False if it should be kept and passed on to later stages.
"""
pass
def __call__(
self, *, candidates: Generator[Candidate, None, None], context: Any
) -> Generator[Candidate, None, None]:
"""
For each of candidate being yielded by the Generator, apply a filtering function to decide whether or not
to yield the candidate forward to the next PipelineStage.
:param candidates: A Generator yielding candidates. In the setup of a FREx Pipeline, this is typically another
PipelineStage that is yielding candidates into the next stage.
:param context: The current context being used to execute the Pipeline.
:return: A Generator, yielding updated Candidate objects that have not been caught by this stage's
filtering function.
"""
for candidate in candidates:
if not self.filter(candidate=candidate):
candidate.applied_explanations.append(self.filter_explanation)
candidate.applied_scores.append(self.filter_score)
yield candidate
|
[
"solashakashirai@gmail.com"
] |
solashakashirai@gmail.com
|
3c0d2f6529512fe74b919bbd685a3ca9f69943c6
|
00c56919bc5919b2f728b3d631ad4b2d2fdb14fa
|
/missingvalues/Student/missingvalues_student.py
|
29efce54c92d59c33e70bcedcbc0eb810628f351
|
[] |
no_license
|
Tcintra/Missing-Values-and-Feature-Engineering
|
b108299176826a1124af52bd0102edcc11ed362b
|
f0b751253ce921b3e22d9310413a204517dfa212
|
refs/heads/master
| 2020-06-12T15:15:36.480988
| 2019-07-11T23:28:36
| 2019-07-11T23:28:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 743
|
py
|
"""
Author : Thomas Cintra and Yun Zhang
Class : CS 181R
Date : 2019 June 20
Description : Credit Score analysis
Name :
Homework 3
"""
# seaborn module
import seaborn as sns
# python modules
import os
# numpy module
import numpy as np
# pandas module
import pandas as pd
# matplotlib module
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
# import scikit learn module
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.tree import DecisionTreeClassifier
from sklearn.dummy import DummyClassifier
from sklearn.preprocessing import LabelEncoder
path = os.path.join("..", "Data")
def main():
print()
|
[
"noreply@github.com"
] |
noreply@github.com
|
861bd0f912326d66adf883ca271ce7af6319eb44
|
21906e3edd0cebc04a43f198fce4946c4cf3cf4f
|
/main_opencv.py
|
842ea1a4595658042f44649009e93cde77649e9a
|
[] |
no_license
|
chincherpa/slit_scan_image
|
f675a070b56b9f7b5f26d2d6fb53d11e827f721a
|
f5868d43296467f72ea33754a33f21640d75b1bf
|
refs/heads/master
| 2020-06-29T21:57:47.557903
| 2019-08-05T10:56:15
| 2019-08-05T10:56:15
| 200,635,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 900
|
py
|
import os
import sys
import numpy as np
import cv2
this_path = os.path.dirname(os.path.realpath(__file__))
filename = 'big_buck_bunny_720p_5mb.mp4'
path_to_file = os.path.join(this_path, filename)
output_filename = os.path.splitext(os.path.basename(path_to_file))[0] + '.png'
clip = cv2.VideoCapture(path_to_file)
first_frame = clip.read()
height, width, dpth = first_frame[1].shape
slitwidth = 1
slitpoint = width // 2
# np.zeros is how we generate an empty ndarray
img = np.zeros((height, 1, dpth), dtype='uint8')
while True:
frame = clip.read()
if frame[0] is False:
break
frame = np.array(frame[1])
slit = frame[:,slitpoint:slitpoint+slitwidth,:]
img = np.hstack((img, slit))
cv2.imshow("Frames", img)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
output = cv2.imwrite(os.path.join(this_path, output_filename), img)
clip.release()
|
[
"accounts@mail.de"
] |
accounts@mail.de
|
30dfff8bcb7876a52d6a99c2cd8349866f1eb587
|
9da79476a3002a4af98cc76effdabdbec9613adf
|
/Extended_Methods/Heuristic_2/GLOBAL_VAR.py
|
3bb5558e36235af3c35eb3a14bab112ea60dec5a
|
[
"CC-BY-4.0"
] |
permissive
|
heyuan7676/ts_eQTLs
|
1cb0517dbe1faac616fef6e5ebc87ffb6d47899a
|
62b04f5477183f5c0cb60f21264b3147fd8bd82a
|
refs/heads/master
| 2021-07-15T13:23:20.491904
| 2020-10-09T16:03:53
| 2020-10-09T16:03:53
| 211,209,498
| 20
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,354
|
py
|
import os
import sys
import numpy as np
import pandas as pd
import pdb
r2 = '1'
FDR = 0.05
fmDir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/FL/coph'
ll_dir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/LL'
prefix = 'v8_cbset_95_allPairs_filteredGenes.ciseQTL_results.complete_filteredSNPs.LDblocks_%s' % str(r2)
LDprefix = '_LD1'
#FMfn = 'SparseMF_coph_%s_topPair_K30_a11_l110' % prefix.replace(r2, '0.2')
#FMfn = 'SparseMF_coph_%s_topPair_K25_a125_l15000' % prefix
#LMfn= '%s%s_Loadings_beta_BH_corrected_alpha%s' % (FMfn, LDprefix, str(FDR))
#LMfn = '%s%s_Loadings_projection' % (FMfn, LDprefix)
FMfn = 'Thresholding_Refined'
if 1:
FOLDS = 100
PROP = 0.5
PVALUE = 0.001
N1 = 5
LMfn = 'ts_closeToTop_FOLDS%d_PROP%s_PVALUE%s_N1%d' % (FOLDS, str(PROP), str(PVALUE), N1)
bg_cluster_id = 0
inputdatadir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/datasets/cbset_datasets/input_pairs'
inputdir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/datasets/cbset_datasets/input_pairs_fitModel'
inputdatafn = 'v8_cbset_95_allPairs_filteredGenes.ciseQTL_results.complete.tss_distance.txt'
pairdir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/downstream/pairSets'
#pairdir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/downstream/pairSets_0907'
allSNPfeaturedir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/datasets/annotations/allPairs'
SNPfeaturedir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/datasets/annotations/cbset_pairs'
datasetName = 'v8_cbset_95_allPairs_filteredGenes.ciseQTL_results.complete.SNP_loc'
activeSNPdir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/downstream/SNP/SNPset_active'
sigSNPfeaturedir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/downstream/SNP/SNPset_features'
activeSNPfeaturedir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/downstream/SNP/SNPset_active_features'
active_proportion = 0.0
gsea_dir = '/work-zfs/abattle4/heyuan/tissue_spec_eQTL_v8/downstream/enrichmentTest/GSEA/'
gsea_file_used = 'c5.bp.v6.2.symbols.gmt.txt'
def get_tis_groups():
tissue_groups = [[x for x in tissues if 'Adipose' in x],
['Adrenal_Gland'],
[x for x in tissues if 'Artery' in x],
[x for x in tissues if 'Brain' in x],
['Cells_EBV-transformed_lymphocytes'],
['Cells_Cultured_fibroblasts'],
[x for x in tissues if 'Colon' in x],
[x for x in tissues if 'Esophagus' in x],
[x for x in tissues if 'Heart' in x],
['Kidney_Cortex'],
['Liver'],
['Lung'],
['Minor_Salivary_Gland'],
['Muscle_Skeletal'],
['Nerve_Tibial'],
['Ovary'],
['Pancreas'],
['Pituitary'],
['Prostate'],
[x for x in tissues if 'Skin' in x],
['Small_Intestine_Terminal_Ileum'],
['Spleen'],
['Stomach'],
['Testis'],
['Thyroid'],
['Uterus'],
['Vagina'],
['Whole_Blood']]
return tissue_groups
tissues = pd.read_csv('tissues.txt', sep='\t', header=None)
tissues = np.array(tissues[0])
Comp_tissues = get_tis_groups()
|
[
"yuanhe777tt@hotmail.com"
] |
yuanhe777tt@hotmail.com
|
3931ee5baf8bc1af8cebd1dfcacdfcb03668a43e
|
6cf909646199c6aa4e7a32a2f804c750013b4d2d
|
/property crriter.py
|
7343516c2d36acb6d67f5eb1d26bb246d3fe478a
|
[] |
no_license
|
LJ0401/pizzapy
|
3613dbba44e243fcb52672ee15389d07e4f7a7f5
|
21f76af524a822a841a7ab7ee385263ce4ad49b0
|
refs/heads/master
| 2021-01-22T21:37:22.828565
| 2017-04-13T14:21:28
| 2017-04-13T14:21:28
| 85,444,138
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 765
|
py
|
#Property Critter
#演示属性
class Critter(object):
"""A virtual pet"""
def _init_(self, name):
print("A new critter has been born!")
self._name = name
@property
def name(self):
return self._name
#访问属性
def talk(self):
print("\nHi, I'm", self.name)
#程序主体
crit = Critter("Poochie")
crit.talk()
#为小动物改个名字
print("\nAttempting to change my critter's name to Randoph...")
crit.name = "Randoph"
print("My critter's name is:", end=" ")
print(crit.name)
#将名字改成空格符
print("\nAttempting to change my critter's name to the empty string.")
crit.name =""
print("My critter's name is:", end=" ")
print(crit.name)
input("\n\nPress the enter key to exit.")
|
[
"1351507858@qq.com"
] |
1351507858@qq.com
|
3da95531c372cce9a2250fcbe7c834b331cfe810
|
22f4146b560571bfc646b7f0b500a4540f0db936
|
/Exercises/hand.py
|
3849da666c9b6fa084d014601ac091b1b7fdd5e7
|
[] |
no_license
|
xFluke/MITx-6.00.1x
|
86f3801593ce0dadfd468b039731b70c9e23a660
|
a973bddeae9312a936f5989bb124728b044f34a6
|
refs/heads/master
| 2020-03-28T16:36:08.046350
| 2018-09-13T23:56:34
| 2018-09-13T23:56:34
| 148,711,552
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,136
|
py
|
import random
class Hand(object):
def __init__(self, n):
'''
Initialize a Hand.
n: integer, the size of the hand.
'''
assert type(n) == int
self.HAND_SIZE = n
self.VOWELS = 'aeiou'
self.CONSONANTS = 'bcdfghjklmnpqrstvwxyz'
# Deal a new hand
self.dealNewHand()
def dealNewHand(self):
'''
Deals a new hand, and sets the hand attribute to the new hand.
'''
# Set self.hand to a new, empty dictionary
self.hand = {}
# Build the hand
numVowels = self.HAND_SIZE // 3
for i in range(numVowels):
x = self.VOWELS[random.randrange(0,len(self.VOWELS))]
self.hand[x] = self.hand.get(x, 0) + 1
for i in range(numVowels, self.HAND_SIZE):
x = self.CONSONANTS[random.randrange(0,len(self.CONSONANTS))]
self.hand[x] = self.hand.get(x, 0) + 1
def setDummyHand(self, handString):
'''
Allows you to set a dummy hand. Useful for testing your implementation.
handString: A string of letters you wish to be in the hand. Length of this
string must be equal to self.HAND_SIZE.
This method converts sets the hand attribute to a dictionary
containing the letters of handString.
'''
assert len(handString) == self.HAND_SIZE, "Length of handString ({0}) must equal length of HAND_SIZE ({1})".format(len(handString), self.HAND_SIZE)
self.hand = {}
for char in handString:
self.hand[char] = self.hand.get(char, 0) + 1
def calculateLen(self):
'''
Calculate the length of the hand.
'''
ans = 0
for k in self.hand:
ans += self.hand[k]
return ans
def __str__(self):
'''
Display a string representation of the hand.
'''
output = ''
hand_keys = sorted(self.hand.keys())
for letter in hand_keys:
for j in range(self.hand[letter]):
output += letter
return output
def update(self, word):
"""
Does not assume that self.hand has all the letters in word.
Updates the hand: if self.hand does have all the letters to make
the word, modifies self.hand by using up the letters in the given word.
Returns True if the word was able to be made with the letter in
the hand; False otherwise.
word: string
returns: Boolean (if the word was or was not made)
"""
copy_Hand = self.hand.copy()
for letter in word:
if letter in copy_Hand:
copy_Hand[letter] = copy_Hand.get(letter) - 1
if copy_Hand[letter] < 0:
del copy_Hand[letter]
else:
return False
self.hand = copy_Hand
return True
myHand = Hand(7)
print(myHand)
print(myHand.calculateLen())
myHand.setDummyHand('aazzmsp')
print(myHand)
print(myHand.calculateLen())
myHand.update('za')
print(myHand)
|
[
"noreply@github.com"
] |
noreply@github.com
|
a3048e66ffb33cc2d58ad1fbd2181763d17a7bc4
|
37e95a54c78b2cad61a8977833c3b69a9d757a5c
|
/Excepciones4.py
|
73dba6ebeb5910b26a297d8e9511abc603c9b74b
|
[] |
no_license
|
solrac205/curso_python
|
b8ab869440887523a0be56fc9b5ab4f53921ac98
|
c372dde614918a66e323da0ce16184f7d84c3a1e
|
refs/heads/master
| 2023-02-06T01:43:13.612237
| 2020-12-31T03:25:09
| 2020-12-31T03:25:09
| 325,693,852
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,989
|
py
|
print("******************************************************")
print("Curso No. 23 Excepciones" )
print("******************************************************")
def divide():
try:
op1=(float(input("Ingrese el primer valor a operar: ")))
op2=(float(input("Ingrese el segundo valor a operar: ")))
print("La división es: " + str(op1/op2))
except ValueError:
print("El valor introducido es erróneo")
except ZeroDivisionError:
print("No se puede dividir entre 0!")
print("Cálculo finalizado...")
def divide2():
try:
op1=(float(input("Ingrese el primer valor a operar: ")))
op2=(float(input("Ingrese el segundo valor a operar: ")))
print("La división es: " + str(op1/op2))
except ValueError:
print("El valor introducido es erróneo")
except ZeroDivisionError:
print("No se puede dividir entre 0!")
#la instruccion finally se ejecuta
#siempre aun asi se haya capturado una excepción.
finally:
print("Cálculo finalizado...")
#si un try no tiene la captura de error except y si finally el finally se ejecutara
#pero luego el programa cae pues se exterioriza el error detectado anteriormente.
def divide3():
try:
op1=(float(input("Ingrese el primer valor a operar: ")))
op2=(float(input("Ingrese el segundo valor a operar: ")))
print("La división es: " + str(op1/op2))
except:
print("se registro un error")
print("Cálculo finalizado...")
def EvaluaEdad(edad):
if edad<0:
raise ZeroDivisionError("No se permiten edades negativas...")
if edad <20:
return "Eres muy Joven"
elif edad < 40:
return "Eres Joven"
elif edad < 65:
return "Eres maduro"
elif edad < 100:
return "Cuidate..."
import math
def CalculaRaiz(num1):
if num1 < 0:
raise ValueError("Error en valor, este no puede ser negativo")
else:
return math.sqrt(num1)
op1=(int(input("introduce un número: ")))
try:
print(CalculaRaiz(op1))
except ValueError as ErrorDeNumeroNegativo:
print(ErrorDeNumeroNegativo)
print("finalizo programa")
|
[
"carlos_205ram@hotmail.com"
] |
carlos_205ram@hotmail.com
|
e074302c25447ad18fcf0611616ce9b72342db7e
|
276c023c6c051611724eca97595511d422152f4b
|
/tests/test_server.py
|
22863dfb74ece7697161d765f8d914eaec4cdb39
|
[
"MIT"
] |
permissive
|
perrinjerome/cmake-language-server
|
2aa1a03ee2a4b1df8acdee953da1fb7b3270c36d
|
66af586b2aa4da1a21b54e566f81d250acb0a848
|
refs/heads/master
| 2020-12-07T08:15:00.568358
| 2020-01-03T17:17:26
| 2020-01-03T17:17:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,732
|
py
|
from concurrent import futures
from pathlib import Path
from typing import Optional
from pygls.features import (COMPLETION, FORMATTING, HOVER, INITIALIZE,
TEXT_DOCUMENT_DID_OPEN)
from pygls.server import LanguageServer
from pygls.types import (CompletionContext, CompletionParams,
CompletionTriggerKind, DidOpenTextDocumentParams,
DocumentFormattingParams, FormattingOptions,
InitializeParams, Position, TextDocumentIdentifier,
TextDocumentItem, TextDocumentPositionParams)
CALL_TIMEOUT = 2
def _init(client: LanguageServer, root: Path):
retry = 3
while retry > 0:
try:
client.lsp.send_request(
INITIALIZE,
InitializeParams(
process_id=1234, root_uri=root.as_uri(),
capabilities=None)).result(timeout=CALL_TIMEOUT)
except futures.TimeoutError:
retry -= 1
else:
break
def _open(client: LanguageServer, path: Path, text: Optional[str] = None):
if text is None:
with open(path) as fp:
text = fp.read()
client.lsp.notify(
TEXT_DOCUMENT_DID_OPEN,
DidOpenTextDocumentParams(
TextDocumentItem(path.as_uri(), 'cmake', 1, text)))
def test_initialize(client_server, datadir):
client, server = client_server
assert server._api is None
_init(client, datadir)
assert server._api is not None
def test_completions_invoked(client_server, datadir):
client, server = client_server
_init(client, datadir)
path = datadir / 'CMakeLists.txt'
_open(client, path, 'projec')
response = client.lsp.send_request(
COMPLETION,
CompletionParams(TextDocumentIdentifier(path.as_uri()), Position(
0, 6), CompletionContext(
CompletionTriggerKind.Invoked))).result(timeout=CALL_TIMEOUT)
item = next(filter(lambda x: x.label == 'project', response.items), None)
assert item is not None
assert '<PROJECT-NAME>' in item.documentation
def test_completions_triggercharacter_variable(client_server, datadir):
client, server = client_server
_init(client, datadir)
path = datadir / 'CMakeLists.txt'
_open(client, path, '${')
response = client.lsp.send_request(
COMPLETION,
CompletionParams(
TextDocumentIdentifier(path.as_uri()), Position(0, 2),
CompletionContext(CompletionTriggerKind.TriggerCharacter,
'{'))).result(timeout=CALL_TIMEOUT)
assert 'PROJECT_VERSION' in [x.label for x in response.items]
def test_completions_triggercharacter_module(client_server, datadir):
client, server = client_server
_init(client, datadir)
path = datadir / 'CMakeLists.txt'
_open(client, path, 'include(')
response = client.lsp.send_request(
COMPLETION,
CompletionParams(
TextDocumentIdentifier(path.as_uri()), Position(0, 8),
CompletionContext(CompletionTriggerKind.TriggerCharacter,
'('))).result(timeout=CALL_TIMEOUT)
assert 'GoogleTest' in [x.label for x in response.items]
def test_completions_triggercharacter_package(client_server, datadir):
client, server = client_server
_init(client, datadir)
path = datadir / 'CMakeLists.txt'
_open(client, path, 'find_package(')
response = client.lsp.send_request(
COMPLETION,
CompletionParams(
TextDocumentIdentifier(path.as_uri()), Position(0, 13),
CompletionContext(CompletionTriggerKind.TriggerCharacter,
'('))).result(timeout=CALL_TIMEOUT)
assert 'Boost' in [x.label for x in response.items]
def test_formatting(client_server, datadir):
client, server = client_server
_init(client, datadir)
path = datadir / 'CMakeLists.txt'
_open(client, path, 'a ( b c ) ')
response = client.lsp.send_request(
FORMATTING,
DocumentFormattingParams(TextDocumentIdentifier(path.as_uri()),
FormattingOptions(
2, True))).result(timeout=CALL_TIMEOUT)
assert response[0].newText == 'a(b c)\n'
def test_hover(client_server, datadir):
client, server = client_server
_init(client, datadir)
path = datadir / 'CMakeLists.txt'
_open(client, path, 'project()')
response = client.lsp.send_request(
HOVER,
TextDocumentPositionParams(TextDocumentIdentifier(path.as_uri()),
Position())).result(timeout=CALL_TIMEOUT)
assert '<PROJECT-NAME>' in response.contents.value
|
[
"regen100@users.noreply.github.com"
] |
regen100@users.noreply.github.com
|
e827ef9de12fa0211e6677aa82084594cd16d444
|
6b76819d395bb76591fc12e9de83161b37d61672
|
/woot/apps/expt/management/commands/step02_zmod.py
|
f30ef4f4d650e4b9e4688253eed2cfb7feb067a9
|
[] |
no_license
|
NicholasPiano/img
|
8426530512ee80a4ed746874c4219b1de56acbfd
|
3a91c65c3c9680ba7ed7c94308a721dd0cff9ad5
|
refs/heads/master
| 2020-05-18T15:48:50.566974
| 2015-07-16T00:01:17
| 2015-07-16T00:01:17
| 38,632,176
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,707
|
py
|
# expt.command: step03_zmod
# django
from django.core.management.base import BaseCommand, CommandError
# local
from apps.img.models import Composite
from apps.expt.util import *
# util
from optparse import make_option
spacer = ' ' * 20
### Command
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--expt', # option that will appear in cmd
action='store', # no idea
dest='expt', # refer to this in options variable
default='050714-test', # some default
help='Name of the experiment to import' # who cares
),
make_option('--series', # option that will appear in cmd
action='store', # no idea
dest='series', # refer to this in options variable
default='13', # some default
help='Name of the series' # who cares
),
)
args = ''
help = ''
def handle(self, *args, **options):
'''
1. What does this script do?
> Make images that can be recognized by CellProfiler by multiplying smoothed GFP with the flattened Brightfield
2. What data structures are input?
> Channel
3. What data structures are output?
> Channel
4. Is this stage repeated/one-time?
> One-time
Steps:
1. Select composite
2. Call pmod mod on composite
3. Run
'''
# 1. select composite
composite = Composite.objects.get(experiment__name=options['expt'], series__name=options['series'])
# 2. Call pmod mod
mod = composite.mods.create(id_token=generate_id_token('img', 'Mod'), algorithm='mod_zmod')
# 3. Run mod
print('step02 | processing mod_zmod...', end='\r')
mod.run()
print('step02 | processing mod_zmod... done.{}'.format(spacer))
|
[
"nicholas.d.piano@gmail.com"
] |
nicholas.d.piano@gmail.com
|
25efd543c95c0f31ed446fd5997a5882b21497e1
|
b992ccd52327348af6d647b078ce43a356be5ff4
|
/ScrapyTest/ScrapyTest/pipelines.py
|
3ea8bba409b53ba8500899bad694824cfe4d854e
|
[] |
no_license
|
moritzwilksch/ScrapyProjects
|
b22ddd259484bf3604bba64eb64df00f2fb2443e
|
a29a07b760defd398a44048cb92e0d4b7f623d9c
|
refs/heads/main
| 2023-01-12T17:01:54.746172
| 2020-11-22T13:55:12
| 2020-11-22T13:55:12
| 315,046,775
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 911
|
py
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from scrapy.exporters import JsonItemExporter
import re
class ScrapytestPipeline:
def open_spider(self, spider):
self.file = open("exportdata.json", 'w+b')
self.exporter = JsonItemExporter(self.file)
self.exporter.start_exporting()
def close_spider(self, spider):
self.exporter.finish_exporting()
self.file.close()
def process_item(self, item, spider):
# dirty_preis = item['preis']
# match = re.match(r'\d+\.\d+', dirty_preis)
# item['preis'] = match.group().replace(".", "") if match else "0"
self.exporter.export_item(item)
return item
|
[
"moritzwilksch@gmail.com"
] |
moritzwilksch@gmail.com
|
41b8a35c5311c10f292a99c2ef0c63c9c5713fa9
|
0a5aeb6d170e14fc53d07c0523d1b995db1fd341
|
/Lab 7/file sequential.py
|
2f54749f2e24d636a6a30e96c47e57cb0177c025
|
[] |
no_license
|
iweyy/WIA2004-Operating-Systems
|
f664f8b9a32654de7ab1887131410dd69475edca
|
3106393626ee05171637404cee68fc4e8c2acee2
|
refs/heads/main
| 2023-08-08T14:26:11.961866
| 2021-06-01T15:55:43
| 2021-06-01T15:55:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,478
|
py
|
maximum = 50
files = [0]*maximum
repeat = 1
while repeat == 1:
start = int(input (f"Enter the starting block of the files (0-{maximum-1}): "))
while start<0 or start>=maximum:
if start>=maximum:
print ("Exceed maximum number of file")
if start<0:
print ("Cannot be a negative number")
start = int(input ("Enter the starting block of the files: "))
length = int(input ("Enter the length of the files: "))
while length<0 or length+start>maximum:
if length+start>maximum:
print ("Exceed maximum number of file")
if length<0:
print ("Cannot be less of equal; to 0")
length = int(input ("Enter the length of the files: "))
count = 0
for i in range (length):
if files[start+i] == 0:
count += 1
if count == length:
for i in range (length):
files[start+i] = 1
print (f"files[{start+i}] = 1")
print("The file is allocated to the disk")
else:
print("The file is not allocated to the disk")
repeat = 3
while repeat == 3:
ans = input("Do you want to enter more files? (Yes/No): ")
if (ans.lower() == "yes"):
repeat = 1
elif (ans.lower() == "no"):
repeat = 0
else:
print("Invalid answer.")
repeat = 3
print("Files Allocated are :")
for i in range (maximum):
print (f"files[{i}] = {files[i]}")
|
[
"megathilmi49@gmail.com"
] |
megathilmi49@gmail.com
|
d393bbe033abd2b8272cc0170156ef42703866a4
|
12342d6bf6635bf2bfc734f3a8e9bcb40e8782ce
|
/day5_2017.py
|
5b129fa811622a09731c9bd23ae465913e23c384
|
[] |
no_license
|
ngocmtu/adventofcode
|
6d06d008eae4b07b303dcf814a2e2ba42d599909
|
cde11d15284faa0433e3a62fc69fca7c9b93aaba
|
refs/heads/master
| 2020-03-23T08:01:23.707800
| 2018-07-24T18:49:00
| 2018-07-24T18:49:00
| 141,303,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
# s = [int(line) for line in open('input.txt','r').readlines()]
# s= [0,3,0,1,-3]
def solve_part1(s):
new_pos = 0
cur_pos = 0
count = 0
while True:
if new_pos >= len(s) or new_pos < 0:
return count
else:
count += 1
cur_pos = new_pos
step = s[cur_pos]
new_pos = cur_pos + step
s[cur_pos] += 1
def solve_part2(s):
new_pos = 0
cur_pos = 0
count = 0
while True:
if new_pos >= len(s) or new_pos < 0:
return count
else:
count += 1
cur_pos = new_pos
step = s[cur_pos]
new_pos = cur_pos + step
s[cur_pos] += 1 if s[cur_pos] <3 else -1
print(str(solve_part1(s)))
print(str(solve_part2(s)))
|
[
"noreply@github.com"
] |
noreply@github.com
|
16bf0ef9ec53acb6b4376b1146bb236b50565626
|
fddad101c7be2fcbc05131081e708f31948c002f
|
/329. Longest Increasing Path in a Matrix/answer_bfs.py
|
a9141a61f5be8c4c3d3ff273a059e79b03652077
|
[] |
no_license
|
LennyDuan/AlgorithmPython
|
a10c9278c676829ab5a284a618f6352414888061
|
523c11e8a5728168c4978c5a332e7e9bc4533ef7
|
refs/heads/master
| 2021-07-16T12:31:08.284846
| 2021-03-28T20:31:28
| 2021-03-28T20:31:28
| 244,040,362
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
def longestIncreasingPath(self, matrix) -> int:
if not matrix:
return 0
res = 0
visited = set()
rows, cols = len(matrix), len(matrix[0])
directions = ((0, 1), (0, -1), (1, 0), (-1, 0))
def traverse(i, j, visited):
if (i, j) in visited:
return 0
res = 1
for direction in directions:
next_i, next_j = i + direction[0], j + direction[1]
direction_count = 0
if 0 <= next_i < rows and 0 <= next_j < cols:
if matrix[next_i][next_j] > matrix[i][j]:
direction_count = 1 + traverse(next_i, next_j, visited)
res = max(res, direction_count)
return res
for row in range(rows):
for col in range(cols):
res = max(traverse(row, col, visited), res)
return res
nums = [
[3, 4, 5],
[3, 2, 6],
[2, 2, 1]
]
print(longestIncreasingPath(None, nums))
|
[
"hod8@aber.ac.uk"
] |
hod8@aber.ac.uk
|
abe3042622f37a3eb01601fb6fef3fa398f676a7
|
6438528fd3b380b1f5dcb94f4ccb18dc9add06e2
|
/py/size.py
|
fa1f0756b62d1a4ec6dbc5ac640d3b20c49d444f
|
[] |
no_license
|
JanStoeckmann/point_cloud_denoising
|
df4b6ce61095bbcd2a7c48485debef946654dacf
|
2da1a93b30d83541f8388df3cf609809bcb49fb5
|
refs/heads/main
| 2023-02-07T14:22:20.094419
| 2020-12-21T14:38:18
| 2020-12-21T14:38:18
| 322,682,380
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 723
|
py
|
#! /usr/bin/python
import sys
import numpy as np
import os
def load_pcd(input):
file1 = open(input, 'r')
Lines = file1.readlines()
cloud = []
for line in Lines:
if (line[0].isnumeric()) or (line[0] == "-"):
point = (line[0:-1]).split(" ", 2)
cloud.append([float(point[0]), float(point[1]), float(point[2])])
return cloud
def cloud_size(input):
file1 = open(input, 'r')
Lines = file1.readlines()
cloud = []
for line in Lines:
if (line[0].isnumeric()) or (line[0] == "-"):
point = (line[0:-1]).split(" ", 2)
cloud.append([float(point[0]), float(point[1]), float(point[2])])
cloud = load_pcd(input)
return len(cloud)
|
[
"jansto@web.de"
] |
jansto@web.de
|
0588bf589dc53ee0422f074a5ff5c91ed6377dba
|
d8e0d76faf67f5f466aa72b5515b1e84f30f2750
|
/resources/spotipy/util.py
|
1ca2a057fd7fc8906f4f1923abdf7c1202422a46
|
[] |
no_license
|
NicolasHaeffner/spotimark
|
edf23a1a1ce7256dc8c5e2b6e73e3ad5c4f54441
|
cea4067f4aca0f78e1c996e5a72bbe3ae5eae66d
|
refs/heads/master
| 2021-05-13T16:41:10.508334
| 2018-01-09T10:07:05
| 2018-01-09T10:07:05
| 116,799,463
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,981
|
py
|
# shows a user's playlists (need to be authenticated via oauth)
from __future__ import print_function
from . import oauth2
import webbrowser
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import urlparse as urlparse
import threading
from time import sleep
class TokenHandler(BaseHTTPRequestHandler):
global path
path = False
def do_GET(self):
global path
print("Just received a GET request")
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(b'You may close the browser now!')
path = self.path
# parsed_path = urlparse(self.path)
# print('=====================================')
# print(parsed_path)
# try:
# params = dict([p.split('=') for p in parsed_path[4].split('&')])
# except:
# params = {}
return
def log_request(self, code=None, size=None):
pass
def log_message(self, format, *args):
print('Message')
def prompt_for_user_token(username, cachepath=None, scope=None, client_id=None,
client_secret=None, ip=None, port=None):
# redirect_uri = 'http://localhost:12345/'
redirect_uri = 'http://' + ip + ':' + port + '/'
# print('The redirect uri is: ' + redirect_uri)
scope = 'user-read-playback-state user-modify-playback-state playlist-read-private playlist-read-collaborative playlist-modify-public playlist-modify-private user-follow-modify user-follow-read user-library-read user-library-modify user-read-private user-read-email user-read-birthdate user-top-read'
if not cachepath:
cachepath = ".cache-" + username
# request the token
sp_oauth = oauth2.SpotifyOAuth(client_id, client_secret, redirect_uri,
scope=scope, cache_path=cachepath + '/' + username + '.cache')
# try to get a valid token for this user, from the cache,
# if not in the cache, the create a new (this will send
# the user to a web page where they can authorize this app)
token_info = sp_oauth.get_cached_token()
if not token_info:
server = HTTPServer((ip, int(port)), TokenHandler)
t = threading.Thread(target=server.handle_request)
t.deamon = True
t.start()
auth_url = sp_oauth.get_authorize_url()
if ip == 'localhost':
webbrowser.open(auth_url)
print("Opened %s in your browser" % auth_url)
else:
print("Please navigate here: %s" % auth_url)
while not path:
print('ConnectControl: wait for token')
sleep(1)
response = 'http://' + ip + ':' + port + path
code = sp_oauth.parse_response_code(response)
token_info = sp_oauth.get_access_token(code)
# Auth'ed API request
if token_info:
return token_info['access_token']
else:
return None
|
[
"nicolas.haeffner@me.com"
] |
nicolas.haeffner@me.com
|
6651590162295a4127549a42719282d946ad2af4
|
a679c7624dd97779858bb7695b9e113bde09b6c6
|
/Python/Medium/firstNonRepeatedChar.py
|
cb6b0102b768d492ec5a2e63051bc8e09b366610
|
[] |
no_license
|
lgminh/Challenges
|
8f2fe24bae97a0343a6ccfa20ffcfa95b92469e7
|
1da5c2b7b49f36f2e8d8786ed54834e9af34086c
|
refs/heads/master
| 2021-10-29T01:34:40.692029
| 2019-04-25T09:10:41
| 2019-04-25T09:10:41
| 75,462,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
from string import ascii_lowercase
def firstNonRepeatedChar(s):
x = {}
for i in ascii_lowercase:
x[i] = []
for idx, c in enumerate(s):
x[c].append(idx)
for k,v in x.items():
if len(v) != 1:
del x[k]
print min(x.values())
for k,v in x.items():
if min(x.values())[0] == v[0]:
return k
if __name__ == '__main__':
print firstNonRepeatedChar('sdadsaasuwqf')
|
[
"minhlg@ahamove.com"
] |
minhlg@ahamove.com
|
6d8269da5291ae9167422229e4273a6016767dd4
|
843bb82a466376ca0a74a6e28bffa8bf43df72b8
|
/covid_tracker/users/migrations/0001_initial.py
|
a6439db3963e263f35577e52584547014c08f095
|
[] |
no_license
|
heyswatisrivastava/covid_tracker
|
a27210db400a83a466d21258fa6e4c062ac46c1f
|
843efed94d1df47d19a00f7d1fb7d3e40086c9a6
|
refs/heads/main
| 2023-05-10T12:04:57.788635
| 2021-06-28T11:11:34
| 2021-06-28T11:11:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,573
|
py
|
# Generated by Django 3.2.4 on 2021-06-27 06:38
from django.db import migrations, models
import django.db.models.deletion
import django_mysql.models
import phonenumber_field.modelfields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('mobile_number', phonenumber_field.modelfields.PhoneNumberField(max_length=128, region=None)),
('pincode', models.IntegerField()),
],
options={
'db_table': 'users',
},
),
migrations.CreateModel(
name='UserSymptom',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('symptoms', django_mysql.models.ListCharField(models.CharField(max_length=255), max_length=255, size=None)),
('travel_hostory', models.BooleanField(default=False)),
('covid_contact', models.BooleanField(default=False)),
('covid_risk', models.IntegerField(default=5)),
('user_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='users.user')),
],
options={
'db_table': 'user_symptoms',
},
),
]
|
[
"swatisrivastava162@gmail.com"
] |
swatisrivastava162@gmail.com
|
238056c98f81b88787366589bde5eb878cd01528
|
a93af8744c2a7c6fd0b8e353c9a6a6af563376e1
|
/venv/exporter.py
|
6ad74bd3e42333b0d0897cbb1b5b72ddd6c93337
|
[] |
no_license
|
ohjiwoo123/Python-JobScrapper
|
9189cba701cac1d7ee52932ed2e2cde2f81f7d63
|
e2591233b0c9c1473ce7682b41b6959b20944965
|
refs/heads/main
| 2023-06-02T22:06:20.385638
| 2021-06-20T13:23:05
| 2021-06-20T13:23:05
| 373,442,512
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
import csv
def save_to_file(jobs):
file = open("jobs.csv",mode ="w")
writer = csv.writer(file)
writer.writerow(["title","company","location","link"])
for job in jobs:
writer.writerow(list(job.values()))
return
|
[
"ohjiwoo123@naver.com"
] |
ohjiwoo123@naver.com
|
7e4604149921d96cee1e6d69a3b590c27c1da8f1
|
fb5fe577eaf98073ebe458ee51b3ef288e956353
|
/week5/9.py
|
ab76ec63b57426393f2e57bfd99d4a6eb4908de2
|
[] |
no_license
|
askarakshabayev/PP2_2021
|
f41b9703411602877c037f4a2099eb29f262125c
|
366793190ae7e6461e659697bfbefed28ae5e26e
|
refs/heads/master
| 2023-04-09T11:59:32.046566
| 2021-04-17T07:51:40
| 2021-04-17T07:51:40
| 334,356,647
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
import os
import shutil
# dir_path = "/Users/askar/Documents/KBTU/PP2/week5/test1/test2/test3"
# os.makedirs(dir_path)
# src = "/Users/askar/Documents/KBTU/PP2/week5/test1/test2/test3/input.txt"
# dst = "/Users/askar/Documents/KBTU/PP2/week5/test1/test2/test3/input_1.txt"
# os.rename(src, dst)
src = "/Users/askar/Documents/KBTU/PP2/week5/test1/test2/test_3"
dst = "/Users/askar/Documents/KBTU/PP2/week5/test1/test2/test_33"
# shutil.move(src, dst)
# shutil.copytree(src, dst)
# shutil.rmtree(src)
print(os.listdir(dst))
|
[
"askar.akshabayev@gmail.com"
] |
askar.akshabayev@gmail.com
|
bb085c931ea83f30ef777b2ca2a79f3eddced1d0
|
1953ad2d8cc8a36e29d3d48e5458aeb69bf17bdd
|
/[9372]패션왕 신해빈.py
|
8d1c75cf0e13cc09638262f1edfb0beec15e5d53
|
[] |
no_license
|
wookkl/backjoon-problemsolving
|
8b75ac4575ffdc15615bc5672f1d5358ac3016a4
|
fbe7e051c7513f52b2ac26472dfc34955013549d
|
refs/heads/master
| 2023-04-10T22:33:49.614340
| 2021-04-25T00:50:47
| 2021-04-25T00:50:47
| 219,535,847
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
for _ in range(int(input())):
items = [input().split() for i in range(int(input()))]
d = {k: 0 for _, k in items}
res = 1
for v, k in items:
d[k] += 1
for v in d.values():
res *= (v + 1)
print(res - 1)
|
[
"wjddnr315@gmail.com"
] |
wjddnr315@gmail.com
|
1515e9a592a06f8c05dc0dec9c9fd9eb17031857
|
15563e9aff20ceeb813bc89f02b7832f5fef8a89
|
/tests/test_filerecorder.py
|
988a9258575b12423828e9d590bc5d36d8a43e80
|
[
"BSD-3-Clause"
] |
permissive
|
openxc/openxc-python
|
5cc9a3b6ddf9ce7ecf6bca3163c306c0d5f4fbd9
|
5341180fea6c364027dedc9bc4c8027b2831325f
|
refs/heads/master
| 2022-05-12T22:26:54.176224
| 2021-08-11T14:35:16
| 2021-08-11T14:35:16
| 6,508,031
| 91
| 33
|
BSD-3-Clause
| 2022-03-24T11:23:19
| 2012-11-02T15:20:10
|
Python
|
UTF-8
|
Python
| false
| false
| 162
|
py
|
import unittest
from openxc.sinks import FileRecorderSink
class FileRecorderSinkTest(unittest.TestCase):
def test_create(self):
FileRecorderSink()
|
[
"chris.peplin@rhubarbtech.com"
] |
chris.peplin@rhubarbtech.com
|
8268a59d42801e2ee2cd8a1c58941a45940bc16a
|
f6a3de837ac401c464ada6d980b084425ef45791
|
/alexa-iot/device-broker.py
|
df6cfdf87a18510e1d4b32847420767a3692e0d9
|
[] |
no_license
|
johanlu4st/python-alexa-skills
|
4f9a0bf39c0d94cac9ef3318b9e094f2da275560
|
e71f5d4e1f49469dd9321fafbc166d2e65509ef8
|
refs/heads/master
| 2020-09-14T09:54:49.013271
| 2018-05-11T11:22:34
| 2018-05-11T11:22:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,067
|
py
|
from flask import Flask, render_template
from flask_mqtt import Mqtt
from pymongo import MongoClient
import datetime
app = Flask(__name__)
client = MongoClient('mongodb://pyiot:password123456@ds133166.mlab.com:33166/pyiot-stackup')
db = client['pyiot-stackup']
app = Flask(__name__)
app.config['MQTT_BROKER_URL'] = 'm12.cloudmqtt.com'
app.config['MQTT_BROKER_PORT'] = 13743
app.config['MQTT_USERNAME'] = 'kqcqutsu'
app.config['MQTT_PASSWORD'] = 'MP86zXZ6Zkds'
app.config['MQTT_REFRESH_TIME'] = 1.0 # refresh time in seconds
mqtt = Mqtt(app)
mqtt.subscribe('room/temp')
@mqtt.on_message()
def handle_mqtt_message(client, userdata, message):
data = dict(
topic=message.topic,
payload=message.payload.decode()
)
print(data['topic'])
print(data['payload'])
tempValue = {
"value" : data['payload'],
"modified-date" : datetime.datetime.utcnow()
}
temp = db.temperature
temperValue_id = temp.insert_one(tempValue).inserted_id
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5050, debug=False)
|
[
"bunnyppl@gmail.com"
] |
bunnyppl@gmail.com
|
2dd2ab1673c7b063eaa173cb49cb2e9ec01c412c
|
5aa3e81a9b7b251ee510208aab5f6a000c8d9e67
|
/Selenium/21 Comment(未完成).py
|
4223aeb1e7f17659ff81a1494ec9171ae95d40f6
|
[] |
no_license
|
PhenomK/Project
|
d99aae43d837863e86a8aee56eb92ec08c632f2b
|
7a5a1481ff29023c0a2d5cbcac24d1d7ccf9c684
|
refs/heads/master
| 2021-09-05T01:31:08.874681
| 2018-01-23T12:38:10
| 2018-01-23T12:38:10
| 104,861,165
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,613
|
py
|
import time
from selenium.webdriver.common.action_chains import ActionChains
from selenium import webdriver
#登陆
driver = webdriver.Chrome()
driver.get("http://172.16.29.37:8080/user/login") #测试网址
elem1 = driver.find_element_by_id("user_slug")
elem1.send_keys("chrome") #用户名
elem2 = driver.find_element_by_id("pwd")
elem2.send_keys("123456") #密码
button = driver.find_element_by_id("submit_button")
button.click()
time.sleep(2)
#跳转个人文件
elem3 = driver.find_element_by_link_text("个人文件")
elem3.click()
time.sleep(1)
driver.refresh()
#新建评论文档
time.sleep(1)
above = driver.find_element_by_id("addfile") #新建
ActionChains(driver).move_to_element(above).perform()
time.sleep(1)
elem4 = driver.find_element_by_class_name("addword") #新建文档
elem4.click()
time.sleep(1)
ActionChains(driver).move_by_offset(xoffset=400,yoffset=400).perform()
elem5 = driver.find_element_by_class_name("box")
elem5.send_keys("评论测试文档")
elem6 = driver.find_element_by_css_selector(".sure")
elem6.click()
time.sleep(2)
#评论
elem7 = driver.find_element_by_xpath("a//[@class='display-name' and @title='评论测试文档.docx'/../../../..]").find_element_by_class_name("item-checkbox")
elem7.click()
time.sleep(1)
elem8 = driver.find_element_by_id("review_contBox")
elem8.click()
time.sleep(1)
elem9 = driver.find_element_by_class_name("review_text")
elem9.send_keys("评论测试")
elem10 = driver.find_element_by_id("review_message_submit")
elem10.click()
|
[
"Ghope.plus@gmail.com"
] |
Ghope.plus@gmail.com
|
c7fa88552bf8e2d09b066cfbefe9f1deb2738348
|
b4b5b755eb767c8b8224df7d05f94ab49e9eae1d
|
/lib/model/test.py
|
635fdbb590850b0c96dd8f9556abb5e419b7d099
|
[
"MIT"
] |
permissive
|
107618024/Windows-Faster-RCNN-TensorFlow
|
8aa18d96df569251eeebec7c877bc2904e590035
|
95e73edffd0c0a556a2de8b832db53509d3db1f9
|
refs/heads/master
| 2020-06-22T19:00:17.663216
| 2019-07-09T17:06:49
| 2019-07-09T17:06:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,246
|
py
|
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
try:
import cPickle as pickle
except ImportError:
import pickle
import os
import math
from utils.timer import Timer
from utils.blob import im_list_to_blob
from model.config import cfg, get_output_dir
from model.bbox_transform import clip_boxes, bbox_transform_inv
from model.nms_wrapper import nms
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_blobs(im):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {}
blobs['data'], im_scale_factors = _get_image_blob(im)
return blobs, im_scale_factors
def _clip_boxes(boxes, im_shape):
"""Clip boxes to image boundaries."""
# x1 >= 0
boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)
# y2 < im_shape[0]
boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)
return boxes
def _rescale_boxes(boxes, inds, scales):
"""Rescale boxes according to image rescaling."""
for i in range(boxes.shape[0]):
boxes[i,:] = boxes[i,:] / scales[int(inds[i])]
return boxes
def im_detect(sess, net, im):
blobs, im_scales = _get_blobs(im)
assert len(im_scales) == 1, "Only single-image batch implemented"
im_blob = blobs['data']
blobs['im_info'] = np.array([im_blob.shape[1], im_blob.shape[2], im_scales[0]], dtype=np.float32)
_, scores, bbox_pred, rois = net.test_image(sess, blobs['data'], blobs['im_info'])
boxes = rois[:, 1:5] / im_scales[0]
scores = np.reshape(scores, [scores.shape[0], -1])
bbox_pred = np.reshape(bbox_pred, [bbox_pred.shape[0], -1])
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = _clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
return scores, pred_boxes
def apply_nms(all_boxes, thresh):
"""Apply non-maximum suppression to all predicted boxes output by the
test_net method.
"""
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in range(num_images)] for _ in range(num_classes)]
for cls_ind in range(num_classes):
for im_ind in range(num_images):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
inds = np.where((x2 > x1) & (y2 > y1))[0]
dets = dets[inds,:]
if dets == []:
continue
keep = nms(dets, thresh)
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
return nms_boxes
def test_net(sess, net, imdb, weights_filename, max_per_image=100, thresh=0.3):
np.random.seed(cfg.RNG_SEED)
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in range(num_images)]
for _ in range(imdb.num_classes)]
output_dir = get_output_dir(imdb, weights_filename)
# timers
_t = {'im_detect' : Timer(), 'misc' : Timer()}
for i in range(num_images):
im = cv2.imread(imdb.image_path_at(i))
_t['im_detect'].tic()
scores, boxes = im_detect(sess, net, im)
_t['im_detect'].toc()
_t['misc'].tic()
# skip j = 0, because it's the background class
for j in range(1, imdb.num_classes):
inds = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep, :]
all_boxes[j][i] = cls_dets
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in range(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
_t['misc'].toc()
print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time,
_t['misc'].average_time))
det_file = os.path.join(output_dir, 'detections.pkl')
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
imdb.evaluate_detections(all_boxes, output_dir)
|
[
"32533059+gwspotex@users.noreply.github.com"
] |
32533059+gwspotex@users.noreply.github.com
|
6c166f5f43ac352167d81f6b03513dddb17c94c5
|
d8f78d99e742806c963981ed227174ce16533b70
|
/ABC051/c.py
|
44e516975f8b8ef61484ae59e992d94dfda17eeb
|
[] |
no_license
|
pekopekojun/atcoder
|
28e3ad4e8a0741e31fbfa4ff6d6a29b964ef67c8
|
56bcad4855c11b12fcc8f11a65c33c501da3dff2
|
refs/heads/master
| 2023-03-04T00:44:15.742378
| 2021-02-12T14:53:06
| 2021-02-12T14:53:06
| 336,311,790
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
sx, sy, tx, ty = map(int, input().split())
dx = tx-sx
dy = ty-sy
print("U"*dy + "R"*dx, end="")
print("D"*dy + "L"*dx, end="")
print("L" + "U"*(dy+1) + "R"*(dx+1) + "D", end="")
print("R" + "D"*(dy+1) + "L"*(dx+1) + "U", end="")
|
[
"jun805@gmail.com"
] |
jun805@gmail.com
|
67c00ebedf8cd9fafcd55f4e5118aa391ff74616
|
a397c77c92522252a7333aa712949b8001e7f443
|
/freebot/modules/life.py
|
a9be7d70ec8a94577154af42daee6c072e0a2c1b
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
vashadow/Kopachris
|
a7b3fa64cd0b45afc12f389c61770c5c1a13d6da
|
fa791f00df9b5e332b82cd39f9ceb704579218b9
|
refs/heads/master
| 2023-09-04T10:30:10.761319
| 2021-09-26T03:23:31
| 2021-09-26T03:23:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 854
|
py
|
import re
## Description stored in db.bot_modules
description = "The answer"
## Prefix stored in db.bot_modules
## Each module should have its own prefix for bot_vars entries
prefix = "42_"
## Event type handled by this module
event_type = "PRIVMSG"
## Additional global vars
#H_HTTP = {'User-Agent': 'Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11'}
#G_URL = 'http://www.google.com/search?q={}&btnI'
def init(db):
pass
def remove(db):
pass
def run(bot, event, db):
m = event.message.lower()
regex = re.compile('[!"#$%&\'()*+,./:;<=>?@\\^_`{|}~-]')
mr = regex.sub('', m).split()
ms = set(mr)
q = {'what', 'the', 'life', 'universe', 'everything'}
q2 = {'meaning', 'question', 'answer'}
if q < ms and len(ms.intersection(q2)) == 1:
bot.bot_reply(event, "Everyone knows it's 42, duh!")
|
[
"chris@WS01-Chris.kopachris.net"
] |
chris@WS01-Chris.kopachris.net
|
11e357c42da450eb40840e5bb31b4c09f2f26c89
|
edb9121984ef9d6647ea7462c1401bdc6b9451e7
|
/grid_modules/__init__.py
|
1712334588c3eebf6859a51bae6265f6929e4b86
|
[] |
no_license
|
yang0110/controllable_agent
|
eceb6b17da4ea22cf8de753dd5260da37b2a403c
|
befeb7386d22ef8806725740ee08cbf9c87a5a0d
|
refs/heads/main
| 2023-08-25T21:25:49.977459
| 2021-10-14T13:08:53
| 2021-10-14T13:08:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 37
|
py
|
# from grid_modules.common import MDP
|
[
"ahmed.touati90@gmail.com"
] |
ahmed.touati90@gmail.com
|
8679eb15e7abddc2ffc51114e648c08423ab7ebd
|
2aec9c5e8c72b731d3abf22f2a407fe09c1cde09
|
/QDS_Test/case/dbwytest.py
|
22710e1c97b825043ebe5514995dd8e8038a0300
|
[] |
no_license
|
jiangyg/ZWFproject
|
8b24cc34970ae0a9c2a2b0039dc527c83a5862b5
|
aa35bc59566d92721f23d2dd00b0febd268ac2dd
|
refs/heads/master
| 2020-09-26T17:01:00.229380
| 2019-11-15T13:16:21
| 2019-11-15T13:16:21
| 226,297,631
| 0
| 1
| null | 2019-12-06T09:55:37
| 2019-12-06T09:55:36
| null |
UTF-8
|
Python
| false
| false
| 3,860
|
py
|
# coding=utf-8
import time
import logging
from selenium.webdriver import ActionChains
from utils.mytestcase import MyTestCase
from utils.logincookie import DengLuPage
from utils.random import unicode
from utils.screenshort import get_screenshort
class DbWyTest(MyTestCase):
"""担保无忧测试集"""
def test_dbwy(self):
"""担保无忧测试"""
# logging.basicConfig(filename='../LOG/' + __name__ + '.log',
# format='[%(asctime)s-%(filename)s-%(levelname)s: %(message)s]', level=logging.DEBUG,
# filemode='a', datefmt='%Y-%m-%d%I:%M:%S %p')
dl = DengLuPage(self.driver)
# 官方推荐有find_element(By.*(""))代替find_element_by_*("")
# self.driver.find_element_by_id()
# self.driver.find_element()
dl.login()
time.sleep(2)
ActionChains(self.driver).move_to_element(self.driver.find_element_by_css_selector(
"body > div.section-banner > div.public-navbar > div > div > h3 > span")).perform()
time.sleep(2)
ActionChains(self.driver).move_to_element(self.driver.find_element_by_css_selector(
"body > div.section-banner > div.public-navbar > div > div > div > ul:nth-child(1) > li:nth-child(1) > h3 > a")).perform()
ActionChains(self.driver).release()
self.driver.find_element_by_css_selector(
"body > div.section-banner > div.public-navbar > div > div > div > ul:nth-child(1) > li:nth-child(1) > div > dl:nth-child(3) > dd > a:nth-child(2)").click()
# 获取打开的多个窗口句柄
windows = self.driver.window_handles
# 切换到当前最新打开的窗口
self.driver.switch_to.window(windows[-1])
time.sleep(2)
self.driver.set_window_size(1920, 1080)
time.sleep(3)
self.assertIn("商标担保注册|商标注册费用|商标申请流程-权大师", self.driver.title)
print(self.driver.title)
# abwy注册
self.driver.find_element_by_css_selector(
"body > div.section-product.width1200 > dl > dd > div.cont-serviceItems > table > tbody > tr > td.td-cont > ul > li:nth-child(2)").click()
for a in self.driver.find_elements_by_css_selector("#total-price"):
print("费用总计:"+a.text)
aa=a.text
self.driver.find_element_by_css_selector(
"body > div.section-product.width1200 > dl > dd > div.cont-btnBuy > a.btn.btn-next.buynow").click()
self.driver.find_element_by_name("ownerContactPerson").send_keys("{}".format(unicode()))
self.driver.find_element_by_name("ownerContactPhone").send_keys("15624992498")
self.driver.find_element_by_name("contactMail").send_keys("145647@qq.com")
self.driver.find_element_by_css_selector("#remark").send_keys(time.strftime("%Y-%m-%d_%H-%M-%S") + "测试订单")
get_screenshort(self.driver, "test_dbwy.png")
for i in self.driver.find_elements_by_css_selector("body > div.myOrder-wrap > div.section-myorder.orderinfo-wrap.width1200 > div:nth-child(6) > div.last-pay.personal-last-pay > ul > li.row-sense > em > i"):
print("总价:"+i.text)
ii=i.text
self.assertIn(aa,ii)
print("价格一致")
self.driver.find_element_by_css_selector(
"body > div.myOrder-wrap > div.section-myorder.orderinfo-wrap.width1200 > div:nth-child(6) > div.btns > a.btn-next.submitOrder").click()
for o in self.driver.find_elements_by_class_name("payable"):
print("订单提交成功,应付金额:"+o.text)
oo=o.text
self.assertIn(oo,ii)
print("测试通过")
self.driver.find_element_by_css_selector("#alisubmit").click()
|
[
"34021500@qq.com"
] |
34021500@qq.com
|
ba3c5206735c47b78b510cda18d0093d61793a4f
|
5822544fcead26c64e6d05ba57ba2ab0fb446b39
|
/Program/FunctionList.py
|
a3c63b08a3e4b2af213ea65bd5f0989db3e1c811
|
[] |
no_license
|
R-second/PyAlgorithmProblem
|
7ba72b152b9f1356c448fb292aea5380c86e8b84
|
32ba15b9d9459731ee6dc5553bbaa756114c5323
|
refs/heads/master
| 2020-05-20T00:01:16.972241
| 2019-05-11T11:31:42
| 2019-05-11T11:31:42
| 185,279,094
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,140
|
py
|
# FunctionList.py
import tkinter
import PrimeFactor
import Fibonacci
import Gcd
import MaxMin
import Sort
import Calendar
class FunctionList:
# functionListにアルゴリズムの名称を設定
functionList = ["素数判定", "フィボナッチ数列の出力", "最大公約数出力", "最大値判定", "ソート", "万年カレンダー"]
@classmethod
def functionMain(cls, num):
# subWindowを作成
application2 = tkinter.Tk()
application2.title("subWindow")
# 引数numに応じてインスタンスの生成を変更
if num == 0:
subWindow = PrimeFactor.PrimeFactor(application2)
elif num == 1:
subWindow = Fibonacci.Fibonacci(application2)
elif num == 2:
subWindow = Gcd.Gcd(application2)
elif num == 3:
subWindow = MaxMin.MaxMin(application2)
elif num == 4:
subWindow = Sort.Sort(application2)
else:
subWindow = Calendar.Calendar(application2)
application2.protocol("WM_DELETE_WINDOW", subWindow.quit)
application2.mainloop()
|
[
"R-second@yamadarigatsu-no-MacBook-Air.local"
] |
R-second@yamadarigatsu-no-MacBook-Air.local
|
807e0194c93653a7d59c4c00259d0f1ece84c914
|
43fd8d4a7395ec3f5ff85d06da045d8646775d51
|
/06 - Extração de Informação/main.py
|
dc5c5590d20b141da3d483c024cb6e566218a3fb
|
[] |
no_license
|
odvieira/db201901
|
0725136b819b0515185bbb90511fb8a2ea07123f
|
c42a7971c5add37265fdb3a1192f3a4d821d9835
|
refs/heads/master
| 2020-05-14T11:17:47.500534
| 2019-05-16T02:14:18
| 2019-05-16T02:14:18
| 181,775,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,857
|
py
|
#!/usr/bin/env python3
from psycopg2 import extras, connect
from os import system
from bs4 import BeautifulSoup
import requests, xml.etree.cElementTree as ET
import urllib.request, json, wikipedia
if __name__ == "__main__":
credentials = "dbname='1901EquipePGDR' user='1901EquipePGDR' host='200.134.10.32' password='793953'"
with connect(credentials) as connection:
with connection.cursor(cursor_factory=extras.DictCursor) as cur:
# HEADER PARA PESQUISA EM INGLES
headers = {"Accept-Language": "en-US, en;q=0.5"}
query_artistas = 'SELECT * FROM ArtistasMusicais'
cur.execute(query_artistas)
# ORGANIZANDO ARTISTAS EM LISTA
lista_artistas = []
for artista_musical in cur.fetchall():
if(artista_musical[0].isdigit()):
continue
lista_artistas.append(artista_musical[0])
counter = 1
total = len(lista_artistas)
# CRIANDO XML E ITERANDO ARTISTAS
xml_art = ET.Element("ArtistasMusicais")
for nome_artista in lista_artistas:
print('REALIZANDO BUSCA '+str(counter)+'/'+str(total)+' (ARTISTA): ' + nome_artista)
url = requests.get("https://en.wikipedia.org/wiki/"+nome_artista,headers=headers)
data = url.text
soup = BeautifulSoup(data,"lxml")
info_box_wiki = soup.findAll("table", {"class": "infobox"})
if len(info_box_wiki) < 1:
continue
# NOME ARTISTA/GRUPO
nome_banda = info_box_wiki[0].find("th").find(text=True)
itunes_link = ''
url_alt = "https://itunes.apple.com/search?term="+nome_banda.replace(' ','+')+"&entity=musicArtist"
try:
with urllib.request.urlopen(url_alt) as asd:
data = json.loads(asd.read().decode())
itunes_link = data['results'][0]['artistLinkUrl']
except:
itunes_link = ''
origem = ''
generos = ''
for text in info_box_wiki[0].findAll("tr"):
if len(text.findAll("td")) < 1:
continue
# CIDADE/PAIS DE ORIGEM
if 'Origin' in str(text):
cell_origin = text.findAll("td")
for cell in cell_origin[0].contents:
try:
origem += cell.find(text=True)
except:
origem += cell
# GENEROS MUSICAIS
if 'Genres' in str(text):
cell_genres = text.findAll("td")
for cell in cell_genres[0].findAll("a"):
if '[' and ']' in str(cell):
continue
try:
generos += cell.find(text=True)+','
except:
generos += cell+','
generos = generos[:-1]
system('clear')
counter+=1
ET.SubElement(xml_art, "Artista", uri=nome_artista,nome=nome_banda,
origem=origem,generos=generos,link_itunes=itunes_link)
tree = ET.ElementTree(xml_art)
tree.write("music.xml")
# FIM MUSICAS
# INICIO FILMES
query_filmes = 'SELECT * FROM Filme'
cur.execute(query_filmes)
# ORGANIZANDO FILMES EM LISTA
lista_filmes = []
for filme in cur.fetchall():
if(filme[0].isdigit()):
continue
lista_filmes.append(filme[0])
# CRIANDO XML E ITERANDO FILMES
xml_mov = ET.Element("Filmes")
counter = 1
total = len(lista_filmes)
for id_filme in lista_filmes:
print('REALIZANDO BUSCA '+str(counter)+'/'+str(total)+' (FILME): ' + id_filme)
# REQUEST IMDB
url = requests.get('https://www.imdb.com/title/'+id_filme,headers=headers)
data = url.text
soup = BeautifulSoup(data,"lxml")
info_imdb = soup.findAll("div", {"class": "title_wrapper"})
# NOME DO FILME
nome = info_imdb[0].findAll("h1")
nome = nome[0].find(text=True)
####################################
try:
wiki_page = wikipedia.search(nome)[0]
resumo_filme = wikipedia.page(title=wiki_page).summary
resumo_filme = resumo_filme.replace('\n',' ')
except:
resumo_filme = ''
####################################
# GENEROS E LANCAMENTO
gen_lan = info_imdb[0].findAll("a")
lista_generos = []
for link in gen_lan:
if "genres" in str(link):
lista_generos.append(link.find(text=True))
elif 'releaseinfo' in str(link):
data_lancamento = link.find(text=True)
# DIRETOR(ES)
info_diretor = soup.findAll("div", {"class": "credit_summary_item"})
link_diretor = info_diretor[0].findAll("a")
lista_diretores = []
for link in link_diretor:
if "name" in str(link):
lista_diretores.append(link.find(text=True))
genero = ''
for x in lista_generos:
genero = x+','
genero = genero[:-1]
diretor = ''
for x in lista_diretores:
diretor = x+','
diretor = diretor[:-1]
data_lancamento = data_lancamento.replace('(Brazil)','')
system('clear')
counter+=1
ET.SubElement(xml_mov, "Filme", uri=id_filme,nome=nome,resumo=resumo_filme,
diretor=diretor,generos=genero,data_lancamento=data_lancamento)
tree = ET.ElementTree(xml_mov)
tree.write("movie.xml")
|
[
"rpasserino75@gmail.com"
] |
rpasserino75@gmail.com
|
72228f507a4ac8d98397a992ca802e652f3d5c8f
|
2207cf4fb992b0cb106e2daf5fc912f23d538d0d
|
/src/catalog/serializers.py
|
1e85a0316ce6f1e7fa4b866254126cb6dd9a095a
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
litedesk/litedesk-webserver-provision
|
95bc75f61532c5f1c7cb21fb5372ff288999689e
|
1576b9d3e5e2e64d1136d276767c2710cfb1938f
|
refs/heads/master
| 2021-05-15T01:35:31.984067
| 2020-08-18T10:55:20
| 2020-08-18T10:55:20
| 25,595,412
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,021
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014, Deutsche Telekom AG - Laboratories (T-Labs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rest_framework import serializers
import models
class OfferSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='offer-detail')
class Meta:
model = models.Offer
fields = ('url', 'name', 'currency', 'price', 'setup_price', 'status')
read_only_fields = ('name', 'asset', 'currency', )
|
[
"raphael@lullis.net"
] |
raphael@lullis.net
|
2ac108f270cf5ffa0bfbca7755b958d446b3a030
|
facb8b9155a569b09ba66aefc22564a5bf9cd319
|
/wp2/merra_scripts/01_netCDF_extraction/merra902Combine/21-tideGauge.py
|
784ddb0d0f655471f76357e1f1df6c7540900599
|
[] |
no_license
|
moinabyssinia/modeling-global-storm-surges
|
13e69faa8f45a1244a964c5de4e2a5a6c95b2128
|
6e385b2a5f0867df8ceabd155e17ba876779c1bd
|
refs/heads/master
| 2023-06-09T00:40:39.319465
| 2021-06-25T21:00:44
| 2021-06-25T21:00:44
| 229,080,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,374
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 17 11:28:00 2020
--------------------------------------------
Load predictors for each TG and combine them
--------------------------------------------
@author: Michael Tadesse
"""
import os
import pandas as pd
#define directories
# dir_name = 'F:\\01_erainterim\\01_eraint_predictors\\eraint_D3'
dir_in = "/lustre/fs0/home/mtadesse/merraLocalized"
dir_out = "/lustre/fs0/home/mtadesse/merraAllCombined"
def combine():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
x = 21
y = 22
for tg in range(x, y):
os.chdir(dir_in)
tg_name = tg_list_name[tg]
print(tg_name, '\n')
#looping through each TG folder
os.chdir(tg_name)
#check for empty folders
if len(os.listdir()) == 0:
continue
#defining the path for each predictor
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp.csv'),\
"wnd_u": os.path.join(where, 'wnd_u.csv'),\
'wnd_v' : os.path.join(where, 'wnd_v.csv')}
first = True
for pr in csv_path.keys():
print(tg_name, ' ', pr)
#read predictor
pred = pd.read_csv(csv_path[pr])
#remove unwanted columns
pred.drop(['Unnamed: 0'], axis = 1, inplace=True)
#sort based on date as merra files are scrambled
pred.sort_values(by = 'date', inplace=True)
#give predictor columns a name
pred_col = list(pred.columns)
for pp in range(len(pred_col)):
if pred_col[pp] == 'date':
continue
pred_col[pp] = pr + str(pred_col[pp])
pred.columns = pred_col
#merge all predictors
if first:
pred_combined = pred
first = False
else:
pred_combined = pd.merge(pred_combined, pred, on = 'date')
#saving pred_combined
os.chdir(dir_out)
tg_name = str(tg)+"_"+tg_name;
pred_combined.to_csv('.'.join([tg_name, 'csv']))
os.chdir(dir_in)
print('\n')
#run script
combine()
|
[
"michaelg.tadesse@gmail.com"
] |
michaelg.tadesse@gmail.com
|
117b4771a177a59bd2de0e47c5d4fb55e40f5dcf
|
39373e3f2a4e1a70cdf6e5cbbe189c2748f2b8b5
|
/Project2A/wrapper.py
|
d8b77ad60b37346858e3d3be2269570fd9284aaa
|
[] |
no_license
|
justkk/Penn-Computer-Vision
|
d00a7c3af2364643ac4a15f222a5f3656fdacf75
|
cb3305ec3a2cbed6f1aeadb534eb7ebea26c5472
|
refs/heads/main
| 2023-02-13T10:55:50.812795
| 2021-01-13T20:45:33
| 2021-01-13T20:45:33
| 314,481,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,021
|
py
|
from morph_tri import *
from click_correspondences import *
import imageio
sourceImage = np.array(Image.open("cat.jpeg"))
destImage = np.array(Image.open("tiger.png"))
#sourceImage = np.array(Image.open("leo.jpg"))
#destImage = np.array(Image.open("sachin.png"))
sourcePoints, destinationPoints = click_correspondences(sourceImage, destImage)
##
#ex1
'''
sourcePoints = np.array([[ 73.34256952 , 8.7195157 ],
[107.2168735, 27.77381168],
[136.85688947, 16.12951969],
[164.37976145, 35.18381568],
[209.89835742, 21.95166569],
[194.01977743, 70.11669165],
[188.72691743, 103.99099562],
[162.79190345, 133.1017256 ],
[126.27116948, 143.68744559],
[ 91.33829351, 133.1017256 ],
[ 70.69613953, 99.22742163],
[ 68.57899553, 64.29454565],
[ 97.16043951, 80.17312564],
[116.74402149, 83.87812764],
[115.15616349, 100.28599363],
[ 96.63115351, 99.22742163],
[144.26689347, 87.05384364],
[160.67475945, 88.11241563],
[162.26261745, 100.81527962],
[144.26689347, 103.99099562],
[118.33187949, 119.34028961],
[132.62260148, 119.34028961],
[ 0. , 0. ],
[256. , 256. ],
[256. , 0. ],
[ 0. , 256. ]]
)
destinationPoints = np.array([[ 47.7227213, 9.2488017 ],
[ 95.35846126, 36.77167368],
[137.70134123, 35.71310168],
[194.33494318, 48.41596567],
[232.97282115, 27.24452568],
[204.39137717, 80.17312564],
[222.38710116, 119.86957561],
[210.21352317, 175.97389156],
[116.00061524, 228.90249152],
[ 41.3712893 , 191.32318555],
[ 22.84627932, 139.98244359],
[ 40.8420033 , 84.40741364],
[ 80.00916727, 96.58099163],
[103.29775126, 97.11027763],
[ 95.88774726, 121.98671961],
[ 76.83345128, 111.93028562],
[156.75563721, 100.28599363],
[178.98564919, 104.52028162],
[176.8685052 , 118.28171761],
[158.34349521, 121.45743361],
[ 99.59274926, 165.38817157],
[138.23062723, 168.56388757],
[ 0. , 0. ],
[256. , 256. ],
[256. , 0. ],
[ 0. , 256. ]])
'''
'''
sourcePoints = np.array ([[120.22083907, 178.34197527],
[123.39655507, 122.23765931],
[144.56799505, 53.43047937],
[194.32087901, 19.55617539],
[252.54233896, 7.9118834 ],
[308.64665492, 23.79046339],
[348.87239089, 39.66904338],
[392.27384285, 108.47622332],
[393.33241485, 188.92769526],
[140.33370705, 276.78917119],
[161.50514704, 327.60062715],
[195.37945101 ,365.70921912],
[252.54233896, 397.46637909],
[306.52951092, 374.17779511],
[349.93096289, 333.95205914],
[375.33669087, 274.67202719],
[119.16226707, 229.15343123],
[383.80526686, 231.27057523],
[154.09514304, 211.15770724],
[197.49659501, 181.51769127],
[239.83947497, 209.04056324],
[272.65520695, 211.15770724],
[318.17380291, 183.63483526],
[354.16525088, 214.33342324],
[172.09086703, 231.27057523],
[209.140887 , 242.91486722],
[292.76807493 ,236.56343522],
[324.52523491, 240.79772322],
[219.72660699, 265.1448792 ],
[199.61373901, 289.49203518],
[191.14516301 ,319.13205116],
[293.82664693, 267.2620232 ],
[317.11523091, 286.31631918],
[322.40809091, 319.13205116],
[215.49231899 ,315.95633516],
[259.95234296 ,308.54633116],
[295.94379093, 317.01490716],
[255.71805496, 342.42063514],
[ 0. , 0. ],
[512. , 512. ],
[512. , 0. ],
[ 0. , 512. ]])
destinationPoints = np.array([[111.32402259, 227.03628723],
[ 98.6211586, 141.2919553 ],
[121.90974258, 75.66049135],
[171.66262654 , 17.4390314 ],
[249.99695448 , 2.61902341],
[328.33128241 , 17.4390314 ],
[379.14273837 , 60.84048336],
[411.95847035 ,127.53051931],
[407.72418235 ,220.68485523],
[138.84689456, 364.65064712],
[164.25262254, 404.87638309],
[207.65407451, 438.75068706],
[270.10982246, 458.86355504],
[336.79985841, 435.57497106],
[374.90845038, 397.46637909],
[395.02131836, 351.94778313],
[ 97.5625866 , 301.13632717],
[410.89989835, 288.43346318],
[153.66690255, 247.14915521],
[204.47835851, 214.33342324],
[258.46553047, 248.20772721],
[291.28126244, 250.32487121],
[330.44842641, 210.09913524],
[370.67416238, 240.79772322],
[182.24834653, 274.67202719],
[225.64979849, 276.78917119],
[313.51127442, 276.78917119],
[354.79558239, 271.49631119],
[218.2397945 , 328.65919915],
[200.24407052, 361.47493112],
[194.95121052, 390.0563751 ],
[328.33128241, 327.60062715],
[349.5027224 , 342.42063514],
[354.79558239, 362.53350312],
[229.88408649, 376.29493911],
[275.40268246, 367.82636312],
[323.03842242, 371.00207911],
[282.81268645, 398.52495109],
[ 0. , 0. ],
[512. , 512. ],
[512. , 0. ],
[ 0. , 512. ]])
'''
print(sourcePoints)
print(destinationPoints)
w = np.arange(0, 1.1, 0.1)
morphed_set = morph_tri(sourceImage, destImage, sourcePoints, destinationPoints, w, w)
res_list = []
k = 0
while k < morphed_set.shape[0]:
res_list.append(morphed_set[k, :, :, :])
k += 1
imageio.mimsave('./morph_1.gif', res_list)
|
[
"nikhilt@nikhils-MacBook-Pro.local"
] |
nikhilt@nikhils-MacBook-Pro.local
|
79d8384fe316eec7a4be279f68c1b2184e764229
|
757b2b6bfadc89c13aff30575445dc210529a384
|
/src/optimize_nn.py
|
48415572591084666117a31a386dfe9e90c7967a
|
[
"BSD-3-Clause"
] |
permissive
|
pgniewko/Protein-Secondary-Structure-Prediction
|
15d5b204787604f4a5dd0764d606a24649d5c9e2
|
5fdc58d7b9d59e314f873eb7784b16b5539d2df9
|
refs/heads/master
| 2018-09-08T00:28:08.313884
| 2018-06-04T21:30:55
| 2018-06-04T21:30:55
| 109,162,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,595
|
py
|
#! /usr/bin/env python
# BioE 134, Fall 2017
# Author: Pawel Gniewek (pawel.gniewek@berkeley.edu)
# License: BSD
#
# Point to an input file (../data/db/aa_w5_a3.dat), and sec.str. classes file (../data/db/ss_a3.dat)
# Usage: ./optimize_clf.py ../data/db/aa_w5_a3.dat ../data/db/ss_a3.dat
from __future__ import print_function
import sys
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.model_selection import cross_val_score
from utils import svd_pca, cross_decomp
if __name__ == "__main__":
# Read the data
X = np.loadtxt(sys.argv[1])
Y = np.loadtxt(sys.argv[2])
# X = cross_decomp(X, Y, 12)
X = svd_pca(X, 50)
clf = MLPClassifier(activation='logistic',solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(25,11,3))
scores = cross_val_score(clf, X, Y, cv=5, scoring='accuracy')
print("Accuracy: %0.2f (+/- %0.2f) [ %s ]" % (scores.mean(), scores.std(), "MLPClassifier: CV=5"))
# Set the parameters by cross-validation
parameters = [ {'activation':['logistic','relu','tanh'],\
'solver':['lbfgs','sgd','adam'], \
'learning_rate':['constant','adaptive'],\
'hidden_layer_sizes':[(100,),(50,50),(50,25,3),(25,11,5)] } ]
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.5, random_state=0)
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV( MLPClassifier() , parameters,\
cv=2, scoring='%s_macro' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
|
[
"gniewko.pablo@gmail.com"
] |
gniewko.pablo@gmail.com
|
9d503b337aa4b2aae5d968b8480f250c62f28706
|
4ac006cf216a2aac21dfdbf66db51b195066676f
|
/Proj5/tracking_sp16/bustersAgents.py
|
3482936d7e35ce0949627de786d0accfea16849a
|
[] |
no_license
|
Snedakerwalker1/cs188
|
af98c3549ee0dede3bc546f265f97966c65ac5cc
|
9b7662b03a0be57e1702d454472990ec0b4036fa
|
refs/heads/master
| 2020-06-12T02:44:06.673108
| 2019-07-23T19:21:17
| 2019-07-23T19:21:17
| 194,171,860
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,441
|
py
|
# bustersAgents.py
# ----------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import util
from game import Agent
from game import Directions
from keyboardAgents import KeyboardAgent
import inference
import busters
class NullGraphics:
"Placeholder for graphics"
def initialize(self, state, isBlue = False):
pass
def update(self, state):
pass
def pause(self):
pass
def draw(self, state):
pass
def updateDistributions(self, dist):
pass
def finish(self):
pass
class KeyboardInference(inference.InferenceModule):
"""
Basic inference module for use with the keyboard.
"""
def initializeUniformly(self, gameState):
"Begin with a uniform distribution over ghost positions."
self.beliefs = util.Counter()
for p in self.legalPositions: self.beliefs[p] = 1.0
self.beliefs.normalize()
def observeUpdate(self, observation, gameState):
noisyDistance = observation
pacmanPosition = gameState.getPacmanPosition()
allPossible = util.Counter()
for p in self.legalPositions:
trueDistance = util.manhattanDistance(p, pacmanPosition)
if noisyDistance != None and \
busters.getObservationProbability(noisyDistance, trueDistance) > 0:
allPossible[p] = 1.0
allPossible.normalize()
self.beliefs = allPossible
def elapseTime(self, gameState):
pass
def getBeliefDistribution(self):
return self.beliefs
class BustersAgent:
"An agent that tracks and displays its beliefs about ghost positions."
def __init__( self, index = 0, inference = "ExactInference", ghostAgents = None, observeEnable = True, elapseTimeEnable = True):
inferenceType = util.lookup(inference, globals())
self.inferenceModules = [inferenceType(a) for a in ghostAgents]
self.observeEnable = observeEnable
self.elapseTimeEnable = elapseTimeEnable
def registerInitialState(self, gameState):
"Initializes beliefs and inference modules"
import __main__
self.display = __main__._display
for inference in self.inferenceModules:
inference.initialize(gameState)
self.ghostBeliefs = [inf.getBeliefDistribution() for inf in self.inferenceModules]
self.firstMove = True
def observationFunction(self, gameState):
"Removes the ghost states from the gameState"
agents = gameState.data.agentStates
gameState.data.agentStates = [agents[0]] + [None for i in range(1, len(agents))]
return gameState
def getAction(self, gameState):
"Updates beliefs, then chooses an action based on updated beliefs."
for index, inf in enumerate(self.inferenceModules):
if not self.firstMove and self.elapseTimeEnable:
inf.elapseTime(gameState)
self.firstMove = False
if self.observeEnable:
inf.observe(gameState)
self.ghostBeliefs[index] = inf.getBeliefDistribution()
self.display.updateDistributions(self.ghostBeliefs)
return self.chooseAction(gameState)
def chooseAction(self, gameState):
"By default, a BustersAgent just stops. This should be overridden."
return Directions.STOP
class BustersKeyboardAgent(BustersAgent, KeyboardAgent):
"An agent controlled by the keyboard that displays beliefs about ghost positions."
def __init__(self, index = 0, inference = "KeyboardInference", ghostAgents = None):
KeyboardAgent.__init__(self, index)
BustersAgent.__init__(self, index, inference, ghostAgents)
def getAction(self, gameState):
return BustersAgent.getAction(self, gameState)
def chooseAction(self, gameState):
return KeyboardAgent.getAction(self, gameState)
from distanceCalculator import Distancer
from game import Actions
from game import Directions
class GreedyBustersAgent(BustersAgent):
"An agent that charges the closest ghost."
def registerInitialState(self, gameState):
"Pre-computes the distance between every two points."
BustersAgent.registerInitialState(self, gameState)
self.distancer = Distancer(gameState.data.layout, False)
def chooseAction(self, gameState):
"""
First computes the most likely position of each ghost that has
not yet been captured, then chooses an action that brings
Pacman closest to the closest ghost (according to mazeDistance!).
"""
pacmanPosition = gameState.getPacmanPosition()
legal = [a for a in gameState.getLegalPacmanActions()]
livingGhosts = gameState.getLivingGhosts()
livingGhostPositionDistributions = \
[beliefs for i, beliefs in enumerate(self.ghostBeliefs)
if livingGhosts[i+1]]
"*** YOUR CODE HERE ***"
"first lets find the most likely position of each of the remaining ghosts."
ghostpositions = []
#print livingGhostPositionDistributions
for distribution in livingGhostPositionDistributions:
pos = (0.0,0.0)
value = -float('inf')
for spot, prob in distribution.iteritems():
if prob >= value:
value = prob
pos = spot
#print probposition
ghostpositions.append(pos)
#print ghostpositions
"now use these positions to find the action that brings packman toward the closest ghost"
actionPair = (float('inf'), Directions.STOP)
for ghostPos in ghostpositions:
newactionPair = min((self.distancer.getDistance(ghostPos, Actions.getSuccessor(pacmanPosition,a)), a) for a in legal)
actionPair = min(actionPair, newactionPair)
return actionPair[1]
|
[
"wsnedaker@berkeley.edu"
] |
wsnedaker@berkeley.edu
|
18f607375d344ca11cc2a0c33fc9166c84602bde
|
58f314bc2df12c3c3b1ce7eacd5baaf60193008b
|
/tests/song_test.py
|
de5dcf007fcd024d43318168e73a2645656fb53c
|
[] |
no_license
|
portypy/Caraoke_bar_w2
|
e75fe57d357fe6bebf5b9b6372cbf0e6ec357796
|
3010052e1c371b345be31c7ce72ffdb48fc47658
|
refs/heads/main
| 2023-01-13T13:33:17.367773
| 2020-11-28T23:41:31
| 2020-11-28T23:41:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 608
|
py
|
import unittest
from src.song import Song
class TestSong(unittest.TestCase):
def setUp(self):
self.song_1 = Song("Gotta Go", "Agnostic Front", 3.2)
self.song_2 = Song("On My Radio", "Selecter", 3.52)
self.song_3 = Song("Divorce a I'ltalienne", "Mungo's Hifi", 3.46)
def test_song_has_title(self):
self.assertEqual("Gotta Go", self.song_1.title)
def test_song_has_artist_name(self):
self.assertEqual("Agnostic Front", self.song_1.artist)
def test_song_has_duration(self):
self.assertEqual(True,isinstance(self.song_2.duration,float))
|
[
"macdorphoto@yahoo.com"
] |
macdorphoto@yahoo.com
|
5edaa1b154eb40102fe6ec6a4a37b893c4eab07f
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/hv572GaPtbqwhJpTb_2.py
|
8e0bb6a39e996aa650ed4adf5f67abcc31d4539a
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,831
|
py
|
"""
In this challenge, you must think about words as elastics. What happens when
do you tend an elastic applying a constant traction force at both ends? Every
part (or letter, in this case) of the elastic will expand, with the minimum
expansion at the ends, and the maximum expansion in the center.
If the word has an odd length, the effective central character of the word
will be the pivot that splits the word into two halves.
"ABC" -> Left = "A" | Center = "B" | Right = "C"
If the word has an even length, you will consider two parts of equal length,
with the last character of the left half and the first character of the right
half being the center.
"ABCD" -> Left = "AB" | Right = "CD"
You will represent the expansion of a letter repeating it as many times as its
numeric position (so counting the indexes from/to 1, and not from 0 as usual)
in its half, with a crescent order in the left half and a decrescent order in
the right half.
Word = "ANNA"
Left = "AN"
Right = "NA"
Left = "A" * 1 + "N" * 2 = "ANN"
Right = "N" * 2 + "A" * 1 = "NNA"
Word = Left + Right = "ANNNNA"
If the word has an odd length, the pivot (the central character) will be the
peak (as to say, the highest value) that delimits the two halves of the word.
Word = "KAYAK"
Left = "K" * 1 + "A" * 2 = "KAA"
Pivot = "Y" * 3 = "YYY"
Right = "A" * 2 + "K" * 1 = "AAK"
Word = Left + Pivot + Right = "KAAYYYAAK"
Given a `word`, implement a function that returns the elasticized version of
the word as a string.
### Examples
elasticize("ANNA") ➞ "ANNNNA"
elasticize("KAYAK") ➞ "KAAYYYAAK"
elasticize("X") ➞ "X"
### Notes
* For words with less than three characters, the function must return the same word (no traction appliable).
* Remember, into the left part characters are counted from 1 to the end, and, in reverse order until 1 is reached, into the right.
"""
def elasticize(word):
def is_even(n):
return n%2==0
def first_half(word, n):
l8rs = {}
for num in range(n):
l8rs[num] = word[num] * (num+1)
return l8rs
def last_half(word, n):
l8rs = {}
y = 1
while len(word) - y > n-1:
l8rs[y] = word[len(word)-y]*y
y += 1
return l8rs
def combine(fh, lh):
lst = []
for key in sorted(list(fh.keys())):
lst.append(fh[key])
for key in reversed(sorted(list(lh.keys()))):
lst.append(lh[key])
return lst
if len(word) < 3:
return word
if is_even(len(word)) == False:
x = 0
y = 1
while x != len(word) - y:
x += 1
y += 1
middle = x
else:
middle = int(len(word)/2)
fh = first_half(word, middle)
lh = last_half(word, middle)
combined = combine(fh, lh)
return ''.join(combined)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
c100093dfec1709874cac7a3d2d395b9a4d1626c
|
b5bc88b6fc90a9910387ae32e4152859eae57116
|
/src/lib/trains/base_trainer.py
|
bcedf49fb5c5b7a1a918dc83bbaac8e7261c850c
|
[
"MIT"
] |
permissive
|
Frankhe303/GGNet
|
f0344f005bbb9cfa869d62980751df8ad9789ba4
|
5fd113711960200929b979724f2d9b5647b4719e
|
refs/heads/main
| 2023-04-13T23:53:07.135492
| 2021-04-16T01:38:54
| 2021-04-16T01:38:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,008
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import torch
from progress.bar import Bar
from models.data_parallel import DataParallel
from utils.utils import AverageMeter
class ModleWithLoss(torch.nn.Module):
def __init__(self, model, loss,opt):
super(ModleWithLoss, self).__init__()
self.model = model
self.loss = loss
self.opt = opt
def forward(self, batch):
outputs = self.model(batch['input'])
loss, loss_stats = self.loss(outputs, batch)
return outputs[-1], loss, loss_stats
class BaseTrainer(object):
def __init__(
self, opt, model, optimizer=None):
self.opt = opt
self.optimizer = optimizer
self.loss_stats, self.loss = self._get_losses(opt)
self.model_with_loss = ModleWithLoss(model, self.loss, self.opt)
def set_device(self, gpus, chunk_sizes, device):
if len(gpus) > 1:
self.model_with_loss = DataParallel(
self.model_with_loss, device_ids=gpus,
chunk_sizes=chunk_sizes).to(device)
else:
self.model_with_loss = self.model_with_loss.to(device)
for state in self.optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.to(device=device, non_blocking=True)
def run_epoch(self, phase, epoch, data_loader):
model_with_loss = self.model_with_loss
if phase == 'train':
model_with_loss.train()
else:
if len(self.opt.gpus) > 1:
model_with_loss = self.model_with_loss.module
model_with_loss.eval()
torch.cuda.empty_cache()
opt = self.opt
results = {}
data_time, batch_time = AverageMeter(), AverageMeter()
avg_loss_stats = {l: AverageMeter() for l in self.loss_stats}
num_iters = len(data_loader) if opt.num_iters < 0 else opt.num_iters
bar = Bar('{}/{}'.format(opt.task, opt.exp_id), max=num_iters)
end = time.time()
for iter_id, batch in enumerate(data_loader):
if iter_id >= num_iters:
break
data_time.update(time.time() - end)
for k in batch:
if k != 'meta':
batch[k] = batch[k].to(device=opt.device, non_blocking=True)
output, loss, loss_stats = model_with_loss(batch)
loss = loss.mean()
if phase == 'train':
self.optimizer.zero_grad()
loss.backward()
for key, value in model_with_loss.named_parameters():
if value.grad is None:
print(key)
assert False
elif (value.grad == 0).all():
print(key, "none")
self.optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
Bar.suffix = '{phase}: [{0}][{1}/{2}]|Tot: {total:} |ETA: {eta:} '.format(
epoch, iter_id, num_iters, phase=phase,
total=bar.elapsed_td, eta=bar.eta_td)
for l in avg_loss_stats:
avg_loss_stats[l].update(
loss_stats[l].mean().item(), batch['input'].size(0))
Bar.suffix = Bar.suffix + '|{} {:.4f} '.format(l, avg_loss_stats[l].avg)
if not opt.hide_data_time:
Bar.suffix = Bar.suffix + '|Data {dt.val:.3f}s({dt.avg:.3f}s) ' \
'|Net {bt.avg:.3f}s'.format(dt=data_time, bt=batch_time)
if opt.print_iter > 0:
if iter_id % opt.print_iter == 0:
print('{}/{}| {}'.format(opt.task, opt.exp_id, Bar.suffix))
else:
bar.next()
if opt.test:
self.save_result(output, batch, results)
del output, loss, loss_stats
bar.finish()
ret = {k: v.avg for k, v in avg_loss_stats.items()}
ret['time'] = bar.elapsed_td.total_seconds() / 60.
return ret, results
def save_result(self, output, batch, results):
raise NotImplementedError
def _get_losses(self, opt):
raise NotImplementedError
def val(self, epoch, data_loader):
return self.run_epoch('val', epoch, data_loader)
def train(self, epoch, data_loader):
return self.run_epoch('train', epoch, data_loader)
|
[
"973162258@qq.com"
] |
973162258@qq.com
|
7d39d82e21a62f66317e371a926cb296d6850163
|
687684850a677f9cfd40077d036a9e25abb6ed51
|
/pms/core/migrations/0001_initial.py
|
4261d4b42f9d61542c04c42383f3c42fb9323791
|
[] |
no_license
|
PhoenixCSCD/pms_backend
|
0f0889a70f58c477d5dc2ee8feec2c0d348e6276
|
bbf08354a0cc7d98f63408be460ae0522dedf96e
|
refs/heads/production
| 2022-12-11T01:08:41.065726
| 2020-07-23T10:25:01
| 2020-07-23T10:25:01
| 244,187,846
| 0
| 1
| null | 2022-12-08T10:57:17
| 2020-03-01T16:56:03
|
Python
|
UTF-8
|
Python
| false
| false
| 3,114
|
py
|
# Generated by Django 3.0.8 on 2020-07-18 10:37
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='Allergy',
fields=[
('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Branch',
fields=[
('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Drug',
fields=[
('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('name', models.CharField(max_length=250)),
('selling_price', models.DecimalField(decimal_places=2, max_digits=19)),
('cost_price_per_pack', models.DecimalField(decimal_places=2, max_digits=19)),
('quantity_per_pack', models.IntegerField(default=1)),
],
),
migrations.CreateModel(
name='User',
fields=[
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('email', models.CharField(max_length=50, unique=True)),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=1)),
('date_of_birth', models.DateField()),
('phone_number', models.CharField(max_length=100)),
('is_staff', models.BooleanField(default=False)),
('avatar', models.URLField(null=True)),
('branches', models.ManyToManyField(related_name='users', to='core.Branch')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
[
"developer.akabojohnkennedy@gmail.com"
] |
developer.akabojohnkennedy@gmail.com
|
4f0706689cac0ecb6adf7fe4ca0138f3f97c9ef1
|
7a77660d3205640c049cd2802954faaa819ce5b3
|
/Visualize.py
|
e329f25aede26bc2afcd8f91e1d8ea46ddc593b3
|
[] |
no_license
|
jagan-hazard/Convolutional-Neural-Network
|
b197c2e0277751a874a2bf00f752e5cd0e0faf31
|
7fb24b9a8578a69cbfa52f1a3cb896532265669b
|
refs/heads/master
| 2020-03-20T08:04:24.625616
| 2018-06-14T04:00:20
| 2018-06-14T04:00:20
| 137,298,241
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,490
|
py
|
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
from keras.models import model_from_json
import numpy as np
from keras.utils.np_utils import to_categorical
from keras.utils import plot_model
import matplotlib.pyplot as plt
import math
import cv2
import os
# dimensions of our images.
img_width, img_height = 150, 150
train_data_dir = '/home/jagan/Desktop/visualize/data/train'
validation_data_dir = '/home/jagan/Desktop/visualize/data/validation'
nb_train_samples = 3000
nb_validation_samples = 600
epochs = 1
batch_size = 150
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2 )
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
history=model.fit_generator(
train_generator,
epochs=epochs,
steps_per_epoch=nb_train_samples // batch_size,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
#history = model.fit(epochs, batch_size)
# serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights('iter_20_16_cnn32_64_128_fc256.h5')
print("Saved model to disk")
#plot_model(model, to_file='model.png')
#print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
# summarize history for accuracy
x=history.history['acc']
y=history.history['val_acc']
#plt.plot(history.history['acc'])
#plt.plot(history.history['val_acc'])
plt.plot(x,'bs',y,'g^')
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
# summarize history for loss
a=history.history['loss']
b=history.history['val_loss']
#plt.plot(history.history['loss'])
#plt.plot(history.history['val_loss'])
plt.plot(a,'bs',b,'g^')
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
# Training with callbacks
model.summary()
model.get_config()
model.layers[0].get_config()
model.layers[0].input_shape
model.layers[0].output_shape
model.layers[0].get_weights()
np.shape(model.layers[0].get_weights()[0])
model.layers[0].trainable
#hist = model.fit(X_train, y_train, batch_size=16, nb_epoch=num_epoch, verbose=1, validation_data=(X_test, y_test),callbacks=callbacks_list)
test_image = cv2.imread('/home/jagan/Desktop/1.jpg') #provide the image we want to visualize at each layer
#test_image=cv2.cvtColor(test_image, cv2.COLOR_BGR2GRAY)
test_image=cv2.resize(test_image,(150,150))
test_image = np.array(test_image)
test_image = test_image.astype('float32')
test_image /= 255
print (test_image.shape)
test_image= np.expand_dims(test_image, axis=0)
#test_image=np.rollaxis(test_image,2,0)
print (test_image.shape)
# Predicting the test image
print((model.predict(test_image)))
print(model.predict_classes(test_image))
def get_featuremaps(model, layer_idx, test_image):
get_activations = K.function([model.layers[0].input, K.learning_phase()],[model.layers[layer_idx].output,])
activations = get_activations([test_image,0])
return activations
layer_num= #Enter the layer number (Zero Indexing)
filter_num= ##Enter the filter number (Zero Indexing)
activations = get_featuremaps(model, int(layer_num),test_image)
print (np.shape(activations))
feature_maps = activations[0][0]
print (np.shape(feature_maps))
#For all the filters
fig=plt.figure(figsize=(30,30))
plt.imshow(feature_maps[:,:,filter_num])#,cmap='gray'
plt.savefig("feature_maps:{}".format(layer_num))
#plt.savefig("featurmemaps-layer-{}".format(layer_num) + "-filternum-{}".format(filter_num)+'.jpg')
num_of_featuremaps=feature_maps.shape[2]
fig=plt.figure(figsize=(30,30))
plt.title("featuremaps-layer-{}".format(layer_num))
subplot_num=int(np.ceil(np.sqrt(num_of_featuremaps)))
c=int(num_of_featuremaps)
print (c)
for i in range(c):
ax = fig.add_subplot(subplot_num, subplot_num, i+1)
#ax.imshow(output_image[0,:,:,i],interpolation='nearest' ) #to see the first filter
ax.imshow(feature_maps[:,:,i])#,cmap='gray'
plt.xticks([])
plt.yticks([])
plt.tight_layout()
plt.show()
fig.savefig("featuremaps-layer-{}".format(layer_num) + '.jpg')
|
[
"noreply@github.com"
] |
noreply@github.com
|
07c1baec5353d639e78ede2a77b169e7d80091d1
|
a5dd79505b3e9d0c089f623d479aac75da47a426
|
/Scatterplot Matrix.py
|
84372143a36c5f711f8b684da4d0ebe8cc459554
|
[] |
no_license
|
BsRam07/Data-Visualization---Python
|
e0cbbaa32baeaea4697a9cfcf4393ece7a3c636c
|
d4978b7699589e54e4c8ca9147b2d4b979bad80e
|
refs/heads/master
| 2020-04-25T20:16:38.595310
| 2019-02-28T05:34:44
| 2019-02-28T05:34:44
| 173,047,053
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as stats
import seaborn as sns
sns.set()
df = sns.load_dataset("iris")
sns.pairplot(df, hue="species", size=2.5)
plt.show()
|
[
"noreply@github.com"
] |
noreply@github.com
|
bd5348b75265914180c074a798ffa40de4204954
|
126a8cc805589b9313e9a67b1f50eefb24b6370f
|
/arp_mon_win.py
|
2a19c4dcce8abe9d093b6e56d0cd45dd756f74a1
|
[] |
no_license
|
stevery/coco
|
73bb3a8bc2e4ccfd1c924cb289fefeadf86720a5
|
9341f4d86faa6fd2abbbe346e4ca04ce2c973b55
|
refs/heads/master
| 2021-08-16T09:00:31.449415
| 2017-11-19T13:21:16
| 2017-11-19T13:21:16
| 110,894,608
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,057
|
py
|
#-*- encoding:utf8 -*-
import subprocess
import re
from time import sleep
while True:
arp_r = subprocess.check_output(["arp","-a"],
stderr=subprocess.STDOUT,
shell=True)
my_arp = {}
a = arp_r.split('\r\n\r\n')
for i in a:
tmp_list = i.strip().split('\r\n')
if re.search("\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", tmp_list[0]):
tmp_interface = re.search("\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", tmp_list[0]).group()
my_arp[tmp_interface] = {}
for j in tmp_list[2:]:
try:
tmp_body = j.strip().split()
if tmp_body[1] in my_arp[tmp_interface]:
print 'arp spoofing found {}:{}'.format(tmp_body[0],tmp_body[1])
print my_arp[tmp_interface][tmp_body[1]]
else:
my_arp[tmp_interface].update({tmp_body[1]:tmp_body[0]})
except:
pass
sleep(30)
|
[
"triptokyw@gmail.com"
] |
triptokyw@gmail.com
|
bafe2f617364ca4a66f1e271cba1e72f7d29aa53
|
2e46b786bd17c27f794b56c505b774fadd1ee7d4
|
/vente.py
|
27bcfdf60f1a9ab04c3ac6e59c65e96026426985
|
[] |
no_license
|
aniskchaou/PRODUCTION-ERP-MODULE
|
73c6193be5eade1beddafdc9204109ac654e88a7
|
f50f6f7193c3bd8ae8911dbe4e51579bfe77082f
|
refs/heads/master
| 2023-04-17T01:32:24.665057
| 2021-04-24T22:04:41
| 2021-04-24T22:04:41
| 351,488,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 36,260
|
py
|
# -*- coding: utf-8 -*-
import sys
import openerp
from openerp import models, fields, api, _
from openerp import tools
from datetime import date
from datetime import datetime
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
import re
import base64
from openerp.exceptions import except_orm, Warning, RedirectWarning
#----------------------------------------------------------
# taux_tva
#----------------------------------------------------------
class taux_tva(models.Model):
_name = 'taux.tva'
_rec_name = 'taux_tva'
client_ids = fields.One2many('vente.client', 'taux_tva_id', 'Clients')
taux_tva = fields.Float('Taux TVA', required=True)
default = fields.Boolean('Défaut')
@api.model
def create(self, values):
if values['default'] == True:
obj_ids = self.search([('default', '=', True)])
if len(obj_ids) > 0:
raise Warning(_('Erreur!'),
_('Il faut un seul valeur par défaut'))
#taux_tva doit etre unique
taux_tva_count = self.search_count([('taux_tva', '=', values['taux_tva'])])
if taux_tva_count > 0:
raise Warning(_('Erreur!'),
_('( %s ) : Cette valeur existe déja')% (values['taux_tva']))
obj_id = super(taux_tva, self).create(values)
return obj_id
@api.multi
def write(self, values):
if values.get("default", False) == True:
obj_ids = self.search([('default', '=', True)])
if len(obj_ids) > 0:
raise Warning(_('Erreur!'),
_('Il faut un seul valeur par défaut'))
#taux_tva doit etre unique
if values.get("taux_tva", False) != False and values.get("taux_tva", False) != self.taux_tva:
taux_tva_count = self.search_count([('taux_tva', '=', values.get("taux_tva", False))])
if taux_tva_count > 0:
raise Warning(_('Erreur!'),
_('( %s ) : Cette valeur existe déja')% (values.get("taux_tva", False)))
obj_id = super(taux_tva, self).write(values)
return obj_id
#----------------------------------------------------------
# article_commande_rel
#----------------------------------------------------------
class article_commande_rel(models.Model):
@api.one
@api.depends('commande_id', 'article_id')
def _get_quantite_livre(self):
for rec in self:
qte = 0
bl_ids = self.env['bon.livraison'].search([('commande_id', '=', rec.commande_id.id),
('article_id', '=', rec.article_id.id)])
for bl in bl_ids:
qte += bl.quantite
self.quantite_livre = qte
@api.one
@api.depends('commande_id', 'article_id')
def _get_quantite_reserve(self):
for rec in self:
qte = 0
br_ids = self.env['bon.reservation'].search([('commande_id', '=', rec.commande_id.id),
('article_id', '=', rec.article_id.id)])
for br in br_ids:
qte += br.quantite
self.quantite_reserve = qte
@api.one
@api.depends('quantite', 'quantite_livre')
def _get_progress(self):
if self.quantite > 0 and self.quantite_livre > 0:
self.progress = self.quantite_livre / self.quantite * 100
else:
self.progress = 0
_name = "article.commande.rel"
article_id = fields.Many2one('production.article', 'Article', ondelete='cascade', required=True)
commande_id = fields.Many2one('production.commande', 'Commande', ondelete='cascade', required=True)
quantite = fields.Float('Quantité', required=True)
unite = fields.Selection([('u','U'),
('kg','Kg'),
('m2','m²'),
('m','m')], related='article_id.unite', readonly=True, string='Unite')
date_limit = fields.Date('Date limite', required=True)
quantite_livre = fields.Float(compute='_get_quantite_livre', string='Qte_Livré')
quantite_reserve = fields.Float(compute='_get_quantite_reserve', string='Qte_Rés')
progress = fields.Float(compute='_get_progress', string='Progression')
stock_non_reserve = fields.Float(string='Stk_Non_Rés', related='article_id.stock_non_reserve')
@api.multi
def creer_of(self):
#pour creer un of il faut que la commande en etat demarre
if self.commande_id.state != 'nonplanifie':
raise Warning(_('Erreur!'),
_('OF est dejà Planifié'))
return {
'name': _("Ordre fabrication"),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'production.ordre.fabrication',
'view_id': False,
'context': {
'default_commande_id': self.commande_id.id,
'default_article_sortant': self.article_id.id,
'default_quantite': self.quantite,
'default_date_fin': self.date_limit,
'default_line_commande_id': self.id,
'default_famille_id':self.article_id.famille_id.id,
'default_quantite':self.quantite
},
}
@api.multi
def creer_bon_reservation(self):
#pour creer un bon réservation il faut que la commande en etat demarre
if self.commande_id.state == 'planifie':
raise Warning(_('Erreur!'),
_('La commande n\'est pas encore démarré'))
return {
'name': _("Bon de réservation"),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'bon.reservation',
'view_id': False,
'context': {
'default_client_id': self.commande_id.client_id.id,
'default_commande_id': self.commande_id.id,
'default_article_id': self.article_id.id,
'default_quantite_commande': self.quantite
},
}
@api.multi
def creer_bon_livraison(self):
#pour creer un bon livraison il faut que la commande en etat demarre
if self.commande_id.state == 'planifie':
raise Warning(_('Erreur!'),
_('La commande n\'est pas encore démarré'))
return {
'name': _("Bon de livraison"),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'bon.livraison',
'view_id': False,
'context': {
'default_client_id': self.commande_id.client_id.id,
'default_commande_id': self.commande_id.id,
'default_article_id': self.article_id.id,
'default_quantite_commande': self.quantite
},
}
#----------------------------------------------------------
# production_commande
#----------------------------------------------------------
class production_commande(models.Model):
@api.one
@api.depends('state')
def _check_color(self):
for rec in self:
color = 0
color_value = self.env['color.status'].search([('state', '=', rec.state)], limit=1).color
if color_value:
color = color_value
self.member_color = color
#button workflow Démarrer
@api.one
def action_demarrer_commande(self):
if self.article_commande_ids:
self.write({'state': 'demarre'})
else:
raise Warning(_('Erreur!'),
_('Cette commande (%s) ne contient aucun article')% (self.num_commande))
@api.one
def action_confirmer_commande(self):
self.write({'state': 'nonplanifie'})
#button workflow Terminer
@api.one
def action_terminer_commande(self):
self.write({'state': 'termine'})
_name = 'production.commande'
_rec_name = 'num_commande'
member_color = fields.Integer(compute='_check_color', string='Color')
of_ids = fields.One2many('production.ordre.fabrication', 'commande_id', 'Ordres de fabrication')
num_commande = fields.Char('Num commande', required=True)
client_id = fields.Many2one('vente.client', 'Client', required=True, ondelete='cascade')
date_creation = fields.Date('Date création', required=True, default= lambda *a:datetime.now().strftime('%Y-%m-%d'))
date_limit_cmd = fields.Date('Date limite', required=True)
article_commande_ids = fields.One2many('article.commande.rel', 'commande_id', 'Articles')
state = fields.Selection([('nonconfirme','Non Confirmé'),('nonplanifie','Non Planifié'),('planifie','Planifié'),
('demarre','Demarré'),
('termine','Terminé')], 'Etat', readonly=True, default='nonconfirme')
bon_livraison_ids = fields.One2many('bon.livraison', 'commande_id', 'Bons de livraiosn')
bon_reservation_ids = fields.One2many('bon.reservation', 'commande_id', 'Bons de réservation')
@api.model
def create(self, values):
#test num_commande doit etre unique
if self.env['production.commande'].search_count([('num_commande', '=', values['num_commande'])]) > 0:
raise Warning(_('Erreur!'),
_('Numéro commande existe déjà [ %s ].')% (values['num_commande']))
# test date_creation <= date_limit_cmd
if values['date_creation'] > values['date_limit_cmd']:
raise Warning(_('Erreur!'),
_('Il faut que : Date création <= Date limite'))
obj_id = super(production_commande, self).create(values)
#test si les lignes articles sont distinct
ids = []
for obj in self.browse(obj_id.id):
for line in obj.article_commande_ids:
if line.article_id.id in ids:
raise Warning(_('Erreur!'),
_("Même article ajouté plusieurs fois : %s") % line.article_id.code_article)
ids.append(line.article_id.id)
#récupérer les lignes de commande
article_lines = self.env['article.commande.rel'].search([('commande_id', '=', obj_id.id)])
for l in article_lines:
#test date_creation <= date_limit (article) <= date_limit_cmd
if l.date_limit > values['date_limit_cmd'] or l.date_limit < values['date_creation']:
raise Warning(_('Erreur!'),
_('Les dates des lignes articles doivent êtres dans [ %s , %s].\n %s qui est séléctionnée')% (values['date_creation'], values['date_limit_cmd'], l.date_limit))
#vérifier quantité
if float(l.quantite) <= 0:
raise Warning(_('Erreur!'),
_('La quantité doit être supérieur à zero'))
return obj_id
@api.multi
def creer_of(self):
return {
'name': _("Ordre fabrication"),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'production.ordre.fabrication',
'view_id': False,
'context': {
'default_commande_id': self.id,
},
}
@api.multi
def write(self, values):
obj_id=super(production_commande,self).write(values)
for obj in self:
#test num_commande doit etre unique
self.env.cr.execute('select * from production_commande where num_commande = %s',(obj.num_commande,))
lines = self.env.cr.dictfetchall()
if len(lines) > 1:
raise Warning(_('Erreur!'),
_('Numéro commande existe déjà [ %s ].')% (obj.num_commande))
# test date_creation <= date_limit_cmd
if obj.date_creation > obj.date_limit_cmd:
raise Warning(_('Erreur!'),
_('Il faut que : Date création <= Date limite'))
#test si les lignes articles sont distinct
ids = []
for line in obj.article_commande_ids:
if line.article_id.id in ids:
raise Warning(_('Erreur!'),
_("Même article ajouté plusieurs fois : %s") % line.article_id.code_article)
ids.append(line.article_id.id)
#récupérer les lignes de commande
article_lines = self.env['article.commande.rel'].search([('commande_id', '=', obj.id)])
for l in article_lines:
#test date_creation <= date_limit (article) <= date_limit_cmd
if l.date_limit > obj.date_limit_cmd or l.date_limit < obj.date_creation:
raise Warning(_('Erreur!'),
_('Les dates des lignes articles doivent êtres dans [ %s , %s].\n %s qui est séléctionnée')% (obj.date_creation, obj.date_limit_cmd, l.date_limit))
#vérifier commande
if float(l.quantite) <= 0:
raise Warning(_('Erreur!'),
_('La quantité doit être supérieur à zero'))
return obj_id
#----------------------------------------------------------
# bon_reservation
#----------------------------------------------------------
class bon_reservation(models.Model):
@api.one
@api.depends('commande_id', 'article_id')
def _get_quantite_commande(self):
qte = 0
if self.commande_id and self.article_id:
self.quantite_commande = self.env['article.commande.rel'].search([('article_id', '=', self.article_id.id),
('commande_id', '=', self.commande_id.id)],
limit=1).quantite
@api.one
@api.depends('commande_id', 'article_id')
def _get_quantite_livre(self):
qte = 0
if self.commande_id and self.article_id:
bon_livraison_ids = self.env['bon.livraison'].search([('commande_id', '=', self.commande_id.id),
('article_id', '=', self.article_id.id)])
for bl in bon_livraison_ids:
qte += bl.quantite
self.quantite_livre = qte
@api.one
@api.depends('commande_id', 'article_id')
def _get_quantite_reserve(self):
qte = 0
if self.commande_id and self.article_id:
bon_reservation_ids = self.env['bon.reservation'].search([('commande_id', '=', self.commande_id.id),
('article_id', '=', self.article_id.id)])
for br in bon_reservation_ids:
qte += br.quantite
self.quantite_reserve = qte
@api.one
@api.depends('quantite_commande', 'quantite_reserve')
def _get_progress_reserve_commande(self):
if self.quantite_commande > 0 and self.quantite_reserve > 0:
self.progress_reserve_commande = self.quantite_reserve / self.quantite_commande * 100
else:
self.progress_reserve_commande = 0
_name = 'bon.reservation'
code_bon = fields.Char('Code bon :', readonly=True)
date_bon = fields.Date('Date bon', required=True, default= lambda *a:datetime.now().strftime('%Y-%m-%d'))
client_id = fields.Many2one('vente.client', 'Code client', ondelete='cascade', required=True, domain=[('id', 'in', [])])
commande_id = fields.Many2one('production.commande', 'Code commande', ondelete='cascade', required=True,
domain="[('state', '=', 'demarre')]" )
article_id = fields.Many2one('production.article', 'Code article', ondelete='cascade', required=True)
quantite = fields.Float('Quantité ', required=True)
#ajouter qte satisfaite=
remarque = fields.Text('Remarque')
quantite_commande = fields.Float(compute='_get_quantite_commande', string='Quantité commandé')
quantite_livre = fields.Float(compute='_get_quantite_livre', string='Quantité livré')
quantite_reserve = fields.Float(compute='_get_quantite_reserve', string='Quantité réservé')
stock_disponible = fields.Float('Stock disponible', related='article_id.stock_disponible')
stock_non_reserve = fields.Float('Stock non réservé', related='article_id.stock_non_reserve')
unite = fields.Selection([('u','U'),
('kg','Kg'),
('m2','m²'),
('m','m')], related='article_id.unite', readonly=True, string='Unite')
progress_reserve_commande = fields.Float(compute='_get_progress_reserve_commande', string='Progression quantité réservé')
@api.model
def fields_view_get(self, view_id=None, view_type=False, toolbar=False, submenu=False):
res = super(bon_reservation, self).fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
for field in res['fields']:
if field == 'article_id':
res['fields'][field]['domain'] = [('id','in', [])]
return res
@api.model
def create(self, values):
#test si quantite <= 0 on genere exception
if values['quantite'] <= 0:
raise Warning(_('Erreur!'),
_('La quantité doit étre supérieur strictement à zero ( %s )')% (values['quantite']))
#test si quantite à réservé > stock_non_réservé ==> exception
article_obj = self.env['production.article'].browse(values['article_id'])
if values['quantite'] > article_obj.stock_non_reserve:
raise Warning(_('Erreur!'),
_('La quantité à réservé est supérieur à la quantité stock disponible'))
#Trouver quantité commandé
values['quantite_commande'] = self.env['article.commande.rel'].search([('article_id', '=', values['article_id']),
('commande_id', '=', values['commande_id'])],
limit=1).quantite
#Calcul quantite réservé
bon_reservation_ids = self.env['bon.reservation'].search([('commande_id', '=', values['commande_id']),
('article_id', '=', values['article_id'])])
qte_reserve = 0
for b in bon_reservation_ids:
qte_reserve += b.quantite
#test si quantite réservé > quantite commandé ==> exception
qte_reserve_total = qte_reserve + values['quantite']
if qte_reserve_total > values['quantite_commande']:
raise Warning(_('Erreur!'),
_('La quantité à réservé est supérieur à la quantité demandé :\n \
(qantite_à_réservé : %s / quantite_demandé : %s)')% (qte_reserve_total, values['quantite_commande']))
#augmenter le stock_reserve
article_obj.stock_reserve += values['quantite']
#generer code sequence "code_bon"
values['code_bon'] = self.env['ir.sequence'].get('bon.reservation')
new_id = super(bon_reservation, self).create(values)
return new_id
@api.multi
def write(self, values):
nouv_article = values.get('article_id', None)
nouv_quantite = values.get('quantite', None)
ancien_article_obj = self.env['production.article'].browse(self.article_id.id)
if nouv_article:
nouv_article_obj = self.env['production.article'].browse(nouv_article)
#si il y a une nouvelle quantité
if nouv_quantite:
#test si quantite <= 0 on genere exception
if nouv_quantite <= 0:
raise Warning(_('Erreur!'),
_('La quantité doit étre supérieur strictement à zero ( %s )')% (nouv_quantite))
#test si quantite à réservé > stock_non_réservé ==> exception
if nouv_quantite > nouv_article_obj.stock_non_reserve:
raise Warning(_('Erreur!'),
_('La quantité à réservé est supérieur à la quantité stock disponible'))
#modifier le stock
ancien_article_obj.stock_reserve -= self.quantite
nouv_article_obj.stock_reserve += nouv_quantite
else:#meme quantite
#test si quantite à réservé > stock_non_réservé ==> exception
if self.quantite > nouv_article_obj.stock_non_reserve:
raise Warning(_('Erreur!'),
_('La quantité à réservé est supérieur à la quantité stock disponible'))
#modifier le stock
ancien_article_obj.stock_reserve -= self.quantite
nouv_article_obj.stock_reserve += self.quantite
else:
if nouv_quantite:
#test si quantite <= 0 on genere exception
if nouv_quantite <= 0:
raise Warning(_('Erreur!'),
_('La quantité doit étre supérieur strictement à zero ( %s )')% (nouv_quantite))
#test si quantite à réservé > stock_non_réservé ==> exception
if (nouv_quantite - self.quantite) > ancien_article_obj.stock_non_reserve:
raise Warning(_('Erreur!'),
_('La quantité à réservé est supérieur à la quantité stock disponible'))
#modifier le stock
ancien_article_obj.stock_reserve += nouv_quantite - self.quantite
obj_id=super(bon_reservation, self).write(values)
return obj_id
@api.multi
def unlink(self):
for rec in self:
article_obj = self.env['production.article'].browse(rec.article_id.id)
article_obj.stock_reserve -= rec.quantite
return super(bon_reservation, self).unlink()
@api.onchange('commande_id')
def onchange_commande_id(self):
res = {}
ids = []
default_commande = self._context.get('default_commande_id', False)
default_article = self._context.get('default_article_id', False)
if self.commande_id:
if default_article == False:
self.article_id = []
if default_commande:
if self.commande_id.id != default_commande:
self.article_id = []
#filter sur le champ article_id selon commande_id séléctionné
for ligne in self.commande_id.article_commande_ids:
ids.append(ligne.article_id.id)
#select client_id selon commande_id séléctionné
self.client_id = self.commande_id.client_id
else:#si commande_id vide
self.article_id = []
res['domain'] = {'article_id': [('id', 'in', ids)]}
return res
#----------------------------------------------------------
# bon_livraison
#----------------------------------------------------------
class bon_livraison(models.Model):
@api.one
@api.depends('commande_id', 'article_id')
def _get_quantite_commande(self):
qte = 0
if self.commande_id and self.article_id:
self.quantite_commande = self.env['article.commande.rel'].search([('article_id', '=', self.article_id.id),
('commande_id', '=', self.commande_id.id)],
limit=1).quantite
@api.one
@api.depends('commande_id', 'article_id')
def _get_quantite_livre(self):
qte = 0
if self.commande_id and self.article_id:
bon_livraison_ids = self.env['bon.livraison'].search([('commande_id', '=', self.commande_id.id),
('article_id', '=', self.article_id.id)])
qte = 0
for bl in bon_livraison_ids:
qte += bl.quantite
self.quantite_livre = qte
@api.one
@api.depends('commande_id', 'article_id')
def _get_quantite_reserve(self):
qte = 0
if self.commande_id and self.article_id:
bon_reservation_ids = self.env['bon.reservation'].search([('commande_id', '=', self.commande_id.id),
('article_id', '=', self.article_id.id)])
qte = 0
for br in bon_reservation_ids:
qte += br.quantite
self.quantite_reserve = qte
@api.one
@api.depends('quantite_commande', 'quantite_livre')
def _get_progress_livre_commande(self):
if self.quantite_commande > 0 and self.quantite_livre > 0:
self.progress_livre_commande = self.quantite_livre / self.quantite_commande * 100
else:
self.progress_livre_commande = 0
@api.one
@api.depends('quantite_commande', 'quantite_reserve')
def _get_progress_reserve_commande(self):
if self.quantite_commande > 0 and self.quantite_reserve > 0:
self.progress_reserve_commande = self.quantite_reserve / self.quantite_commande * 100
else:
self.progress_reserve_commande = 0
_name = 'bon.livraison'
code_bon = fields.Char('Code bon :', readonly=True)
date_bon = fields.Date('Date bon', required=True, default= lambda *a:datetime.now().strftime('%Y-%m-%d'))
client_id = fields.Many2one('vente.client', 'Code client', ondelete='cascade', required=True, domain=[('id', 'in', [])])
commande_id = fields.Many2one('production.commande', 'Code commande', ondelete='cascade', required=True,
domain="[('state', '=', 'demarre')]" )
article_id = fields.Many2one('production.article', 'Code article', ondelete='cascade', required=True)
quantite = fields.Float('Quantité', required=True)
quantite_commande = fields.Float(compute='_get_quantite_commande', string='Quantité commandé')
quantite_commande2 = fields.Float('Quantité commandé', related='quantite_commande')
quantite_livre = fields.Float(compute='_get_quantite_livre', string='Quantité livré')
quantite_reserve = fields.Float(compute='_get_quantite_reserve', string='Quantité réservé')
stock_disponible = fields.Float('Stock disponible', related='article_id.stock_disponible')
stock_non_reserve = fields.Float('Stock non réservé', related='article_id.stock_non_reserve')
unite = fields.Selection([('u','U'),
('kg','Kg'),
('m2','m²'),
('m','m')], related='article_id.unite', readonly=True, string='Unite')
progress_reserve_commande = fields.Float(compute='_get_progress_reserve_commande', string='Progression quantité réservé')
progress_livre_commande = fields.Float(compute='_get_progress_livre_commande', string='Progression quantité livré')
@api.model
def fields_view_get(self, view_id=None, view_type=False, toolbar=False, submenu=False):
res = super(bon_livraison, self).fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
for field in res['fields']:
if field == 'article_id':
res['fields'][field]['domain'] = [('id','in', [])]
return res
@api.model
def create(self, values):
#test si quantite <= 0 on genere exception
if values['quantite'] <= 0:
raise Warning(_('Erreur!'),
_('La quantité doit étre supérieur strictement à zero ( %s )')% (values['quantite']))
#Calcul quantite réservé
bon_reservation_ids = self.env['bon.reservation'].search([('commande_id', '=', values['commande_id']),
('article_id', '=', values['article_id'])])
qte_res = 0
for b in bon_reservation_ids:
qte_res += b.quantite
#test si aucun quantite réservé
if qte_res == 0:
raise Warning(_('Erreur!'),
_('Aucun quantité réservé dans le stock'))
#Calcul quantite livré
bon_livraison_ids = self.env['bon.livraison'].search([('commande_id', '=', values['commande_id']),
('article_id', '=', values['article_id'])])
qte_livre = 0
for b in bon_livraison_ids:
qte_livre += b.quantite
#test si quantite livre > quantite reserve ==> exception
qte_livre_total = qte_livre + values['quantite']
if qte_livre_total > qte_res:
raise Warning(_('Erreur!'),
_('La quantité à livrer est supérieur à la quantité réservé:\n \
(quantite_à_livré : %s / quantite_réservé : %s)')% (qte_livre_total, qte_res))
#generer code sequence "code_bon"
values['code_bon'] = self.env['ir.sequence'].get('bon.livraison')
# stock_reel -= qte
# stock_reserve -= qte
article_obj = self.env['production.article'].browse(values['article_id'])
if article_obj:
article_obj.stock_reel -= values['quantite']
article_obj.stock_reserve -= values['quantite']
#test stock minimale
article_obj.verifier_stock()
new_id = super(bon_livraison, self).create(values)
return new_id
@api.multi
def write(self, values):
commande = values.get('commande_id', None)
if commande == None:
commande = self.commande_id.id
nouv_article = values.get('article_id', None)
nouv_quantite = values.get('quantite', None)
ancien_article_obj = self.env['production.article'].browse(self.article_id.id)
if nouv_article:
#Calcul quantite réservé
bon_reservation_ids = self.env['bon.reservation'].search([('commande_id', '=', commande),
('article_id', '=', nouv_article)])
qte_res = 0
for b in bon_reservation_ids:
qte_res += b.quantite
#Calcul quantite livré
bon_livraison_ids = self.env['bon.livraison'].search([('commande_id', '=', commande),
('article_id', '=', nouv_article)])
qte_livre = 0
for b in bon_livraison_ids:
qte_livre += b.quantite
nouv_article_obj = self.env['production.article'].browse(nouv_article)
#si il y a une nouvelle quantité
if nouv_quantite:
#test si quantite <= 0 on genere exception
if nouv_quantite <= 0:
raise Warning(_('Erreur!'),
_('La quantité doit étre supérieur strictement à zero ( %s )')% (nouv_quantite))
#test si quantite livre > quantite reserve ==> exception
qte_livre_total = qte_livre + nouv_quantite
if qte_livre_total > qte_res:
raise Warning(_('Erreur!'),
_('La quantité à livrer est supérieur à la quantité réservé:\n \
(quantite_à_livré : %s / quantite_réservé : %s)')% (qte_livre_total, qte_res))
#modifier le stock
ancien_article_obj.stock_reel += self.quantite
ancien_article_obj.stock_reserve += self.quantite
nouv_article_obj.stock_reel -= nouv_quantite
nouv_article_obj.stock_reserve -= nouv_quantite
else:#meme quantite
#test si quantite livre > quantite reserve ==> exception
qte_livre_total = qte_livre + self.quantite
if qte_livre_total > qte_res:
raise Warning(_('Erreur!'),
_('La quantité à livrer est supérieur à la quantité réservé:\n \
(quantite_à_livré : %s / quantite_réservé : %s)')% (qte_livre_total, qte_res))
#modifier le stock
ancien_article_obj.stock_reel += self.quantite
ancien_article_obj.stock_reserve += self.quantite
nouv_article_obj.stock_reel -= self.quantite
nouv_article_obj.stock_reserve -= self.quantite
else:
if nouv_quantite:
#test si quantite <= 0 on genere exception
if nouv_quantite <= 0:
raise Warning(_('Erreur!'),
_('La quantité doit étre supérieur strictement à zero ( %s )')% (nouv_quantite))
#Calcul quantite réservé
bon_reservation_ids = self.env['bon.reservation'].search([('commande_id', '=', commande),
('article_id', '=', self.article_id.id)])
qte_res = 0
for b in bon_reservation_ids:
qte_res += b.quantite
#Calcul quantite livré
bon_livraison_ids = self.env['bon.livraison'].search([('commande_id', '=', commande),
('article_id', '=', self.article_id.id)])
qte_livre = 0
for b in bon_livraison_ids:
qte_livre += b.quantite
#test si quantite livre > quantite reserve ==> exception
if nouv_quantite > self.quantite:
qte_livre_total = qte_livre + nouv_quantite - self.quantite
if qte_livre_total > qte_res:
raise Warning(_('Erreur!'),
_('La quantité à livrer est supérieur à la quantité réservé:\n \
(quantite_à_livré : %s / quantite_réservé : %s)')% (qte_livre_total, qte_res))
#modifier le stock
ancien_article_obj.stock_reel += self.quantite - nouv_quantite
ancien_article_obj.stock_reserve += self.quantite - nouv_quantite
obj_id=super(bon_livraison,self).write(values)
return obj_id
@api.multi
def unlink(self):
for rec in self:
article_obj = self.env['production.article'].browse(rec.article_id.id)
article_obj.stock_reel += rec.quantite
article_obj.stock_reserve += rec.quantite
return super(bon_livraison, self).unlink()
@api.onchange('commande_id')
def onchange_commande_id(self):
res = {}
ids = []
default_commande = self._context.get('default_commande_id', False)
default_article = self._context.get('default_article_id', False)
if self.commande_id:
if default_article == False:
self.article_id = []
if default_commande:
if self.commande_id.id != default_commande:
self.article_id = []
#filter sur le champ article_id selon commande_id séléctionné
for ligne in self.commande_id.article_commande_ids:
ids.append(ligne.article_id.id)
#select client_id selon commande_id séléctionné
self.client_id = self.commande_id.client_id
else:#si commande_id vide
self.article_id = []
res['domain'] = {'article_id': [('id', 'in', ids)]}
return res
|
[
"kchaouanis26@gmail.com"
] |
kchaouanis26@gmail.com
|
424a153fb67403733012e88be8f95f8f6783f4bc
|
4872375eeb0b2a45c0d3046bbfb5cd2d202b2295
|
/quiz.py
|
e89862b18df06fdbf7fe361b76ea3a36a1613f18
|
[
"MIT"
] |
permissive
|
ash018/discordQuizBot
|
ee3aae7171220f39bd9a0bb057c2fa5eab017dd5
|
b00441553bbbeeab2c4da0264eeed8480a33c3a1
|
refs/heads/master
| 2020-04-13T09:00:18.410702
| 2018-12-25T17:05:28
| 2018-12-25T17:05:28
| 163,098,967
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,308
|
py
|
# -*- coding: utf-8 -*-
"""
Quiz / Question classes for quizbot.
@author: drkatnz
"""
import asyncio
import random
import re
import os
#todo: probably need to remove punctuation from answers
class Quiz:
def __init__(self, client, win_limit=10, hint_time=30):
#initialises the quiz
self.__running = False
self.current_question = None
self._win_limit = win_limit
self._hint_time = hint_time
self._questions = []
self._asked = []
self.scores = {}
self._client = client
self._quiz_channel = None
self._cancel_callback = True
#load in some questions
datafiles = os.listdir('quizdata')
for df in datafiles:
filepath = 'quizdata' + os.path.sep + df
self._load_questions(filepath)
print('Loaded: ' + filepath)
print('Quiz data loading complete.\n')
def _load_questions(self, question_file):
# loads in the questions for the quiz
with open(question_file, encoding='utf-8',errors='replace') as qfile:
lines = qfile.readlines()
question = None
category = None
answer = None
regex = None
position = 0
while position < len(lines):
if lines[position].strip().startswith('#'):
#skip
position += 1
continue
if lines[position].strip() == '': #blank line
#add question
if question is not None and answer is not None:
q = Question(question=question, answer=answer,
category=category, regex=regex)
self._questions.append(q)
#reset everything
question = None
category = None
answer = None
regex = None
position += 1
continue
if lines[position].strip().lower().startswith('category'):
category = lines[position].strip()[lines[position].find(':') + 1:].strip()
elif lines[position].strip().lower().startswith('question'):
question = lines[position].strip()[lines[position].find(':') + 1:].strip()
elif lines[position].strip().lower().startswith('answer'):
answer = lines[position].strip()[lines[position].find(':') + 1:].strip()
elif lines[position].strip().lower().startswith('regexp'):
regex = lines[position].strip()[lines[position].find(':') + 1:].strip()
#else ignore
position += 1
def started(self):
#finds out whether a quiz is running
return self.__running
def question_in_progress(self):
#finds out whether a question is currently in progress
return self.__current_question is not None
async def _hint(self, hint_question, hint_number):
#offers a hint to the user
if self.__running and self.current_question is not None:
await asyncio.sleep(self._hint_time)
if (self.current_question == hint_question
and self._cancel_callback == False):
if (hint_number >= 5):
await self.next_question(self._channel)
hint = self.current_question.get_hint(hint_number)
await self._client.send_message(self._channel, 'Hint {}: {}'.format(hint_number, hint), tts=True)
if hint_number < 5:
await self._hint(hint_question, hint_number + 1)
async def start(self, channel):
#starts the quiz in the given channel.
if self.__running:
#don't start again
await self._client.send_message(channel,
'Quiz already started in channel {}, you can stop it with !stop or !halt'.format(self._channel.name), tts=True)
else:
await self.reset()
self._channel = channel
await self._client.send_message(self._channel, '@here Quiz starting in 10 seconds...', tts=True)
await asyncio.sleep(10)
self.__running = True
await self.ask_question()
async def reset(self):
if self.__running:
#stop
await self.stop()
#reset the scores
self.current_question = None
self._cancel_callback = True
self.__running = False
self._questions.append(self._asked)
self._asked = []
self.scores = {}
async def stop(self):
#stops the quiz from running
if self.__running:
#print results
#stop quiz
await self._client.send_message(self._channel, 'Quiz stopping.', tts=True)
if(self.current_question is not None):
await self._client.send_message(self._channel,
'The answer to the current question is: {}'.format(self.current_question.get_answer()), tts=True)
await self.print_scores()
self.current_question = None
self._cancel_callback = True
self.__running = False
else:
await self._client.send_message(self._channel, 'No quiz running, start one with !ask or !quiz', tts=True)
async def ask_question(self):
#asks a question in the quiz
if self.__running:
#grab a random question
qpos = random.randint(0,len(self._questions) - 1)
self.current_question = self._questions[qpos]
self._questions.remove(self.current_question)
self._asked.append(self.current_question)
await self._client.send_message(self._channel,
'Question {}: {}'.format(len(self._asked), self.current_question.ask_question()), tts=True)
self._cancel_callback = False
await self._hint(self.current_question, 1)
async def next_question(self, channel):
#moves to the next question
if self.__running:
if channel == self._channel:
await self._client.send_message(self._channel,
'Moving onto next question. The answer I was looking for was: {}'.format(self.current_question.get_answer()), tts=True)
self.current_question = None
self._cancel_callback = True
await self.ask_question()
async def answer_question(self, message):
#checks the answer to a question
if self.__running and self.current_question is not None:
if message.channel != self._channel:
pass
if self.current_question.answer_correct(message.content):
#record success
self._cancel_callback = True
if message.author.name in self.scores:
self.scores[message.author.name] += 1
else:
self.scores[message.author.name] = 1
await self._client.send_message(self._channel,
'Well done, {}, the correct answer was: {}'.format(message.author.name, self.current_question.get_answer()), tts=True)
self.current_question = None
#check win
if self.scores[message.author.name] == self._win_limit:
await self.print_scores()
await self._client.send_message(self._channel, '{} has won! Congratulations.'.format(message.author.name), tts=True)
self._questions.append(self._asked)
self._asked = []
self.__running = False
#print totals?
elif len(self._asked) % 5 == 0:
await self.print_scores()
await self.ask_question()
async def print_scores(self):
#prints out a table of scores.
if self.__running:
await self._client.send_message(self._channel,'Current quiz results:', tts=True)
else:
await self._client.send_message(self._channel,'Most recent quiz results:', tts=True)
highest = 0
for name in self.scores:
await self._client.send_message(self._channel,'{}:\t{}'.format(name,self.scores[name]), tts=True)
if self.scores[name] > highest:
highest = self.scores[name]
if len(self.scores) == 0:
await self._client.send_message(self._channel,'No results to display.', tts=True)
leaders = []
for name in self.scores:
if self.scores[name] == highest:
leaders.append(name)
if len(leaders) > 0:
if len(leaders) == 1:
await self._client.send_message(self._channel,'Current leader: {}'.format(leaders[0]), tts=True)
else:
await self._client.send_message(self._channel,'Print leaders: {}'.format(leaders), tts=True)
class Question:
# A question in a quiz
def __init__(self, question, answer, category=None, author=None, regex=None):
self.question = question
self.answer = answer
self.author = author
self.regex = regex
self.category = category
self._hints = 0
def ask_question(self):
# gets a pretty formatted version of the question.
question_text = ''
if self.category is not None:
question_text+='({}) '.format(self.category)
else:
question_text+='(General) '
if self.author is not None:
question_text+='Posed by {}. '.format(self.author)
question_text += self.question
return question_text
def answer_correct(self, answer):
#checks if an answer is correct or not.
#should check regex
if self.regex is not None:
match = re.fullmatch(self.regex.strip(),answer.strip())
return match is not None
#else just string match
return answer.lower().strip() == self.answer.lower().strip()
def get_hint(self, hint_number):
# gets a formatted hint for the question
hint = []
for i in range(len(self.answer)):
if i % 5 < hint_number:
hint = hint + list(self.answer[i])
else:
if self.answer[i] == ' ':
hint += ' '
else:
hint += '-'
return ''.join(hint)
def get_answer(self):
# gets the expected answer
return self.answer
|
[
"sadatakash018@gmail.com"
] |
sadatakash018@gmail.com
|
1c37e0a39fb706b42f03597283cf1a50dd03b413
|
f9888153e33dc29324d868ca7f1f0ec14b6b4bd4
|
/aireal/roof/view_results.py
|
e0d9c783b3c96475a711cee90d1d2ea8e98a11d0
|
[] |
no_license
|
gsvigruha/images
|
a029a51bf7bbcc5384ddb34c26e52eaf98261e04
|
6aac3deca36b09049f9f403ba438fdb7a98ee92e
|
refs/heads/master
| 2021-07-21T06:34:57.383519
| 2020-05-25T01:32:20
| 2020-05-25T01:32:20
| 173,651,473
| 0
| 0
| null | 2020-05-25T01:32:21
| 2019-03-04T01:18:23
|
Python
|
UTF-8
|
Python
| false
| false
| 3,163
|
py
|
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.image as mpimg
from matplotlib.pyplot import figure
import tensorflow as tf
from images.aireal.roof.image_loader import LABELS, TRAIN, BATCH_SIZE
def feature_iter_1(test_file_list, model_file):
sess = tf.Session('', tf.Graph())
with sess.graph.as_default():
train_images=[]
model = tf.keras.models.load_model(model_file)
for filename in test_file_list:
rgb_image = tf.read_file(TRAIN + filename + ".jpg")
rgb_image_decoded = tf.image.decode_jpeg(rgb_image, channels=3)
rgb_image_decoded = tf.to_float(rgb_image_decoded) / 255.0
cir_fn = tf.strings.regex_replace(filename, '2005050310033_78642723578549', '2005050310034_78642723578549_CIR')
cir_image = tf.read_file(TRAIN + cir_fn + ".jpg")
cir_image_decoded = tf.image.decode_jpeg(cir_image, channels=3)
cir_image_decoded = tf.to_float(cir_image_decoded) / 255.0
train_image_decoded = tf.concat([rgb_image_decoded, cir_image_decoded], axis=2)
input_tf = sess.run(tf.stack([train_image_decoded], axis=0))
train_images.append(model.predict(input_tf))
sess.close()
return np.squeeze(np.stack(train_images, axis=0))
def feature_iter_2(test_file_list, model_file, output_dirs):
sess = tf.Session('', tf.Graph())
with sess.graph.as_default():
train_images=[]
model = tf.keras.models.load_model(model_file)
for filename in test_file_list:
rgb_image = tf.read_file(TRAIN + filename + ".jpg")
rgb_image_decoded = tf.image.decode_jpeg(rgb_image, channels=3)
rgb_image_decoded = tf.to_float(rgb_image_decoded) / 255.0
cir_fn = tf.strings.regex_replace(filename, '2005050310033_78642723578549', '2005050310034_78642723578549_CIR')
cir_image = tf.read_file(TRAIN + cir_fn + ".jpg")
cir_image_decoded = tf.image.decode_jpeg(cir_image, channels=3)
cir_image_decoded = tf.to_float(cir_image_decoded) / 255.0
feature_tensors = [rgb_image_decoded, cir_image_decoded]
for output_dir in output_dirs:
prev_image = tf.read_file(TRAIN + output_dir + filename + "_roof_output.png")
prev_image_decoded = tf.image.decode_jpeg(prev_image, channels=1)
prev_image_decoded = tf.to_float(prev_image_decoded) / 255.0
feature_tensors.append(prev_image_decoded)
train_image_decoded = tf.concat(feature_tensors, axis=2)
input_tf = sess.run(tf.stack([train_image_decoded], axis=0))
train_images.append(model.predict(input_tf))
sess.close()
return np.squeeze(np.stack(train_images, axis=0))
def show(test_file_list, y):
f = figure(num=None, figsize=(16, 32), dpi=80, facecolor='w', edgecolor='k')
N = len(test_file_list)
for i in range(0, N):
name = test_file_list[i]
print(name)
f.add_subplot(N,3,i*3+1)
plt.imshow(np.squeeze(y[i]), cmap='gray', vmin=0, vmax=1)
img=mpimg.imread('/home/gsvigruha/aireal/Classification/'+name+'.jpg')
f.add_subplot(N,3,i*3+2)
plt.imshow(img)
img_s=mpimg.imread('/home/gsvigruha/aireal/Classification/'+name+'_shapes.png')
f.add_subplot(N,3,i*3+3)
plt.imshow(img_s)
plt.show()
|
[
"noreply@github.com"
] |
noreply@github.com
|
fce8102b6fabca507bec27dbceb55dbf1eaf4c0c
|
87e80e3e91dbb23b857b8fd44427bb03e3a0be29
|
/API/decision.py
|
38eb0adc8f8a33d738917e1901d6ca2c0503017e
|
[
"Unlicense"
] |
permissive
|
ClementRoyer/TwitchAFK-API
|
fc68da62bd6393d155a4ff6523f886636db0aec5
|
92164d776930bd6c8e371a5d2b8ef7fe07b76ea9
|
refs/heads/master
| 2023-01-22T09:00:07.838758
| 2020-12-07T22:01:59
| 2020-12-07T22:01:59
| 318,622,586
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 379
|
py
|
import time
def betStrategy(driver, bet):
output = {
"choice": "",
"amount": 0
}
output['choice'] = "A" if bet.coteA > bet.coteB else "B" # Take the most advantageous odds
output['amount'] = min(round(int("".join(bet.amount.split())) * 0.05), 250000) # Caped to 250 000, bet 5% of the balance
return output
|
[
"clement.royer@epitech.eu"
] |
clement.royer@epitech.eu
|
f382e321982d7046239348d2a2c1037f961777ea
|
012d619f174805e5aef31d3a51e5954542e5f270
|
/Dj030101/Dj030101/urls.py
|
81417811b7d92bc96bd871880e68821a0346eb75
|
[] |
no_license
|
thunderdrum/self-service-supermarket
|
8174a591bb91f5e67b86a51af56784b458788cf1
|
6e98511e37f09547f7f625626abd11fdedc41ab9
|
refs/heads/master
| 2022-04-01T21:43:21.988989
| 2020-01-23T03:05:57
| 2020-01-23T03:05:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,598
|
py
|
""" Dj030101 URL Configuration
The 'urlpatterns' list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from shop import views as shop_views
urlpatterns = [
path('admin/', admin.site.urls),
path('', shop_views.index, name='index'),
path('login/', shop_views.login, name='login'),
path('cashier/', shop_views.cashier, name='cashier'),
path('cashier/add_product/', shop_views.add_product, name="add_product"),
path('cashier/delete_product/', shop_views.delete_product, name="delete_product"),
path('cashier/get_return/', shop_views.get_return_money, name="get_return"),
path('cashier/cancel/', shop_views.cashier_cancel, name="cashier_cancel"),
path('cashier/submit/', shop_views.cashier_submit, name="cashier_submit"),
path('main/', shop_views.main, name='main'),
path('main/sales_query/', shop_views.sales_query, name='sales_query'),
path('main/salesdetail_query/', shop_views.salesdetail_query, name='salesdetail_query'),
]
|
[
"noreply@github.com"
] |
noreply@github.com
|
d26c162667d757caa31ccc5aa285f4f67e0e43f7
|
597e9361ba5508e1252a595791c7739ab7b7bf6d
|
/venv/Scripts/pip-script.py
|
550cd4c7602fded3c3fff7c55bfebf6c8340020c
|
[] |
no_license
|
Shivani-781/Sorting_Algorithms_Python_Implementation
|
ec1d2598322665f94a059a3ac8bc2074db814a56
|
ddaa2b47b6ed7fe9e65e259d47fa3a73d5f255ad
|
refs/heads/master
| 2022-12-06T14:41:42.282435
| 2020-08-29T18:58:34
| 2020-08-29T18:58:34
| 291,290,202
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 456
|
py
|
#!"C:\Users\Shivani Chauhan\PycharmProjects\Sorting_Algorithms_Python_Implementation\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
|
[
"shivanichauhan781@gmail.com"
] |
shivanichauhan781@gmail.com
|
78a5d26431e1c5228b1257e275fe51fbf8ab3863
|
09f49c5dae6a3abe0e8fc15437539f93dd3844fc
|
/common/vp_mail/publish_notes.py
|
a457bebf4a684419b1e93db8b223bf0679e9116b
|
[] |
no_license
|
JAZimmermann/samples
|
aeacfdc8281862c299ec950a2621e57dadc3110a
|
969c90d764f1df266aeea7c47cac02ef93a3d43d
|
refs/heads/master
| 2021-01-11T08:07:21.672933
| 2016-09-22T05:03:57
| 2016-09-22T05:03:57
| 68,860,690
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,507
|
py
|
#
# Copyright (c) [2014] John Zimmermann
#
# $URL$
# $Date: 2014-09-04$
# $Revision: 1.0$
# $Author: johnz $
#
class PublishNotes(object):
'''
main class for gathering publish notes from user by providing
a prompt dialog based on what application they are in, ie. maya or mobu
'''
def __init__(self):
'''
initialize instance
'''
self._win_title = "Publish Notes"
self._win_msg = "Enter publish notes / updates."
self._application = None
self.notes = None
# attempt to determine current application
self._determine_application()
def _determine_application(self):
'''
attempt to determine what is current application before
proceeding to get notes input from user
'''
if not self._check_for_maya() and not self._check_for_mobu():
raise ImportError("Unable to ascertain / import current "
+ "application as Maya or MotionBuilder. "
+ "Make sure application is correct.")
if self._application.lower() == "maya":
self._get_maya_notes()
if self._application.lower() == "mobu":
self._get_mobu_notes()
def _check_for_maya(self):
'''
try to determine if current application is maya related
'''
found = False
try:
import maya.cmds as mc
self._application = mc.about(query=True, application=True)
found = True
except:
pass
return found
def _check_for_mobu(self):
'''
try to determine if current application is motionbuilder related
'''
found = False
try:
import re
from pyfbsdk import FBSystem
mobu_patt = re.compile("motionbuilder", re.IGNORECASE)
if mobu_patt.search(FBSystem().ApplicationPath):
self._application = "mobu"
found = True
except:
pass
return found
def _get_maya_notes(self):
'''
prompt for and retrieve publish notes from user in maya
'''
import maya.cmds as mc
confirm = mc.promptDialog(
title=self._win_title,
messageAlign="center",
message=self._win_msg,
button=["OK", "Cancel"],
defaultButton="OK",
cancelButton="Cancel",
dismissString="Cancel"
)
if confirm == "OK":
self.notes = mc.promptDialog(query=True, text=True)
def _get_mobu_notes(self):
'''
prompt for and retrieve publish notes from user in maya
'''
from pyfbsdk import FBMessageBoxGetUserValue, FBPopupInputType
cancelBtn = 0
confirm, notes = FBMessageBoxGetUserValue(self._win_title,
self._win_msg,
"",
FBPopupInputType.kFBPopupString,
"Ok", "Cancel", None,
1, cancelBtn)
print confirm, notes
if confirm == 1:
self.notes = notes
|
[
"john.zimm.zimmermann@gmail.com"
] |
john.zimm.zimmermann@gmail.com
|
327169a1cb6be4099ccb7f13fab70dfa92f4742e
|
7deda84f7a280f5a0ee69b98c6a6e7a2225dab24
|
/Receptionist/migrations/0027_package_manage_reception.py
|
45248c462110a952feffbb09a7008787a2c97129
|
[] |
no_license
|
Cornex-Inc/Coffee
|
476e30f29412373fb847b2d518331e6c6b9fdbbf
|
fcd86f20152e2b0905f223ff0e40b1881db634cf
|
refs/heads/master
| 2023-01-13T01:56:52.755527
| 2020-06-08T02:59:18
| 2020-06-08T02:59:18
| 240,187,025
| 0
| 0
| null | 2023-01-05T23:58:52
| 2020-02-13T05:47:41
|
Python
|
UTF-8
|
Python
| false
| false
| 549
|
py
|
# Generated by Django 2.1.15 on 2020-05-19 15:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Receptionist', '0026_package_manage_grouping'),
]
operations = [
migrations.AddField(
model_name='package_manage',
name='reception',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.DO_NOTHING, to='Receptionist.Reception'),
preserve_default=False,
),
]
|
[
"khm4321@naver.com"
] |
khm4321@naver.com
|
36dc2128246ab9955f721cf3b4751c1493ded948
|
17c90beebbe2551255eacd009e0033f738d265f0
|
/python/02-Linux_loop_update/full-app-bootload.py
|
ac2b7454de4b926c7dc551cdd65f9435cda9c36a
|
[] |
no_license
|
xiangliangliang/python
|
533e766da1551f37695b44df8bbbddd807ede63c
|
8519c53582ddb82ec106d96b2a47e61258498825
|
refs/heads/master
| 2020-04-17T20:41:27.536570
| 2019-02-12T07:45:49
| 2019-02-12T07:45:49
| 166,916,742
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,401
|
py
|
'''
为确保程序运行,请执行以下操作
1. 使Linux的串口放在第一个tab
2. 使升级设备处于第二个tab
3. 使电源串口处于第三个tab
'''
# $language = "python"
# $interface = "1.0"
import re
import time
import datetime
import string
import random
filename = re.sub(r'[^0-9]', '_', str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")), 0)
file_path = crt.Dialog.FileOpenDialog(title='Please select a text file', defaultFilename=filename+'_log.log',filter = 'Log Files (*.log)|*.log')
def main():
upgrade_SW = ['app_16.bin','app_17.bin','app_18.bin','app_19.bin','app_20.bin','app_22.bin','app_23.bin','app_24.bin','app_25.bin','app_26.bin']
upgrade_FULL_SW = ['app_full_16.bin','app_full_17.bin','app_full_18.bin','app_full_19.bin','app_full_20.bin','app_full_22.bin','app_full_23.bin','app_full_24.bin','app_full_25.bin','app_full_26.bin']
upgrade_boot=['boot_8M.bin','boot_8M.bin']
check_version = ['00.01.24','00.01.24','00.01.24','00.01.24','00.01.24','00.01.24','00.01.24','00.01.24','00.01.24']
#check_version = ['00.01.16','00.01.17','00.01.18','00.01.19','00.01.20','00.01.21','00.01.22','00.01.23','00.01.24']
crt.Screen.Synchronous = False
i=0
boot_count=0
while 1:
initialTab = crt.GetScriptTab()
tab_1 = crt.GetTab(1)
tab_1.Activate()
tab_1.Screen.Send('\r\n')
#tab_1.Screen.Send("./sample_upgrade "+sw+ '\r\n') # --------------改改改
if (boot_count%2) == 0:
tab_1.Screen.Send("boot_8M.bin" + '\r\n') # 升级8M BootLoader
boot_count = boot_count +1
filep = open(file_path, 'a+')
filep.write(str(i)+' '+str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))+' '+check_version[i]+' PASS'+'\r\n')
i = i+1
else:
tab_1.Screen.Send("boot_4M.bin" + '\r\n')
filep = open(file_path, 'a+')
filep.write(str(i)+' '+str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))+' '+check_version[i]+' PASS'+'\r\n')
i = i+1
time.sleep(4)
tab_3 = crt.GetTab(3)
tab_3.Activate()
time.sleep(2)
tab_3.Screen.Send('\r\n')
tab_3.Screen.Send('\r\n')
on = str.upper('out')+'1'
off = str.upper('out')+'0'
tab_3.Screen.Send(off +'\r\n\r')
time.sleep(5)
tab_3.Screen.Send(on +'\r\n\r')
for i in range(8) : #-----------随机升级app8次
#升级app
tab_1 = crt.GetTab(1)
tab_1.Activate()
tab_1.Screen.Send('\r\n')
time.sleep(2)
#tab_1.Screen.Send("./sample_upgrade "+sw+ '\r\n') # --------------改改改
a = random.randint(0,10)
tab_1.Screen.Send(upgrade_SW[a]+ '\r\n') # --------------升级app
time.sleep(2) # --------------改改改
#重启平台,直流源的串口必须放在第三个tab
tab_3 = crt.GetTab(3)
tab_3.Activate()
time.sleep(2)
tab_3.Screen.Send('\r\n')
tab_3.Screen.Send('\r\n')
on = str.upper('out')+'1'
off = str.upper('out')+'0'
tab_3.Screen.Send(off +'\r\n\r')
time.sleep(5)
tab_3.Screen.Send(on +'\r\n\r')
#打开C201-D串口,串口必须放在第二个
tab_2 = crt.GetTab(2)
tab_2.Activate()
time.sleep(5)
tab_2.Screen.Send('\r\n')
tab_2.Screen.Send('getVersion'+'\r\n')
version_result = tab_2.Screen.WaitForString('command_getVersion',5)
if version_result == 1:
current_sw = tab_2.Screen.ReadString('CPU0').strip()
current_sw = current_sw[:8]
#crt.Dialog.MessageBox(current_sw)
time.sleep(2)
#crt.Dialog.MessageBox(check_version)
else:
crt.Dialog.MessageBox("版本升级失败,请终止升级","session",32|3)
time.sleep(1)
break
return
WEnd
if (current_sw == check_version[a]):
filep = open(file_path, 'a+')
filep.write(str(i)+' '+str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))+' '+check_version[i]+' PASS'+'\r\n')
i = i+1
else:
filep = open(file_path, 'a+')
filep.write(str(i)+' '+str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))+' '+check_version[i]+' Fail'+'\r\n')
i = i+1
crt.Dialog.MessageBox("版本升级失败,请终止升级","session",32|3)
time.sleep(1)
break
return
WEnd
tab_1 = crt.GetTab(1)
tab_1.Activate()
tab_1.Screen.Send('\r\n')
#tab_1.Screen.Send("./sample_upgrade "+sw+ '\r\n') # --------------改改改
tab_1.Screen.Send("app_25.bin" + '\r\n') # 升级app_25.bin
time.sleep(2) # ------改改改
tab_3 = crt.GetTab(3)
tab_3.Activate()
time.sleep(2)
tab_3.Screen.Send('\r\n')
tab_3.Screen.Send('\r\n')
on = str.upper('out')+'1'
off = str.upper('out')+'0'
tab_3.Screen.Send(off +'\r\n\r')
time.sleep(5)
tab_3.Screen.Send(on +'\r\n\r')
tab_2 = crt.GetTab(2)
tab_2.Activate()
time.sleep(5)
tab_2.Screen.Send('\r\n')
tab_2.Screen.Send('getVersion'+'\r\n')
version_result = tab_2.Screen.WaitForString('command_getVersion',5)
if version_result == 1:
current_sw = tab_2.Screen.ReadString('CPU0').strip()
current_sw = current_sw[:8]
#crt.Dialog.MessageBox(current_sw)
time.sleep(2)
#crt.Dialog.MessageBox(check_version)
else:
crt.Dialog.MessageBox("版本升级失败,请终止升级","session",32|3)
time.sleep(1)
break
return
WEnd
if (current_sw == '00.01.25'):
filep = open(file_path, 'a+')
filep.write(str(i)+' '+str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))+' '+'00.01.25 app'+' PASS'+'\r\n')
i = i+1
else:
filep = open(file_path, 'a+')
filep.write(str(i)+' '+str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))+' '+'00.01.25 app'+' Fail'+'\r\n')
i = i+1
crt.Dialog.MessageBox("版本升级失败,请终止升级","session",32|3)
time.sleep(1)
break
return
tab_1 = crt.GetTab(1)
tab_1.Activate()
tab_1.Screen.Send('\r\n')
#tab_1.Screen.Send("./sample_upgrade "+sw+ '\r\n') # --------------改改改
ij=random.randint(0.10)
tab_1.Screen.Send(upgrade_FULL_SW[ij] + '\r\n') # 升级随机一个full image.bin
time.sleep(2) # --------------改改改
tab_3 = crt.GetTab(3)
tab_3.Activate()
time.sleep(2)
tab_3.Screen.Send('\r\n')
tab_3.Screen.Send('\r\n')
on = str.upper('out')+'1'
off = str.upper('out')+'0'
tab_3.Screen.Send(off +'\r\n\r')
time.sleep(5)
tab_3.Screen.Send(on +'\r\n\r')
tab_2 = crt.GetTab(2)
tab_2.Activate()
time.sleep(5)
tab_2.Screen.Send('\r\n')
tab_2.Screen.Send('getVersion'+'\r\n')
version_result = tab_2.Screen.WaitForString('command_getVersion',5)
if version_result == 1:
current_sw = tab_2.Screen.ReadString('CPU0').strip()
current_sw = current_sw[:8]
#crt.Dialog.MessageBox(current_sw)
time.sleep(2)
#crt.Dialog.MessageBox(check_version)
else:
crt.Dialog.MessageBox("版本升级失败,请终止升级","session",32|3)
time.sleep(1)
break
return
WEnd
if (current_sw == check_version[ij]):
filep = open(file_path, 'a+')
filep.write(str(i)+' '+str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))+' '+check_version[ij]+' PASS'+'\r\n')
i = i+1
else:
filep = open(file_path, 'a+')
filep.write(str(i)+' '+str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))+' '+check_version[ij]+' Fail'+'\r\n')
i = i+1
crt.Dialog.MessageBox("版本升级失败,请终止升级","session",32|3)
time.sleep(1)
break
return
main()
|
[
"284604666@qq.com"
] |
284604666@qq.com
|
a734b3373cd26121facac575215b185ffb3f1f82
|
caa70852a42cc70ef81573539da1f3efedc9d0e5
|
/venv/bin/easy_install
|
94a82250d36e04e5e6e7dafdd4e3c20e0b4119cd
|
[] |
no_license
|
nejelnejel/bubble_sort_2
|
62aa4977f606ae2d6055c4cad9853393d415c93a
|
7fa37e55aa1ce7dd9b422688ad6a3b2b87283ed3
|
refs/heads/master
| 2020-11-26T12:01:03.276878
| 2019-12-19T14:38:08
| 2019-12-19T14:38:08
| 229,065,696
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
#!/home/rent/PycharmProjects/sphinx/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
|
[
"nejelnejel@gmail.com"
] |
nejelnejel@gmail.com
|
|
845f77bc8d39737647f4a55d183df4f8f7afdbf3
|
43aeee48c1f6fc468a43f9bb0d4edae8ee0dbee1
|
/LPTW-SRC/例3_21.py
|
8430bd36f542e524ac1f1798a936dc9eba351ed6
|
[] |
no_license
|
wiky2/mytestproject
|
f694cf71dd3031e4597086f3bc90d246c4b26298
|
e7b79df6304476d76e87f9e8a262f304b30ca312
|
refs/heads/master
| 2021-09-07T20:54:19.569970
| 2018-02-28T23:39:00
| 2018-02-28T23:39:00
| 100,296,844
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,333
|
py
|
#这个循环用来保证必须输入大于2的整数作为评委人数
while True:
try:
n = int(input('请输入评委人数:'))
if n <= 2:
print('评委人数太少,必须多于2个人。')
else:
#如果输入大于2的整数,就结束循环
break
except:
Pass
#用来保存所有评委的打分
scores = []
for i in range(n):
#这个while循环用来保证用户必须输入0到100之间的数字
while True:
try:
score = input('请输入第{0}个评委的分数:'.format(i+1))
#把字符串转换为实数
score = float(score)
#用来保证输入的数字在0到100之间
assert 0<=score<=100
scores.append(score)
#如果数据合法,跳出while循环,继续输入下一个评委的得分
break
except:
print('分数错误')
#计算并删除最高分与最低分
highest = max(scores)
lowest = min(scores)
scores.remove(highest)
scores.remove(lowest)
#计算平均分,保留2位小数
finalScore = round(sum(scores)/len(scores), 2)
formatter = '去掉一个最高分{0}\n去掉一个最低分{1}\n最后得分{2}'
print(formatter.format(highest, lowest, finalScore))
|
[
"jerry_136510@aliyun.com"
] |
jerry_136510@aliyun.com
|
729ab8bbd28235101a138824f2811f233210925f
|
a4f6e1caef32ddfd45fdf5475fbec24020e8be19
|
/KNN from scratch for mushroom dataset.py
|
4451375f94947ed573902870c40da4a363849fcd
|
[] |
no_license
|
rishabkr/KNN-classifier-from-scratch-for-Mushroom-Dataset
|
1c4ef8740ad63f7c5c8b0a7774478d15de654c6a
|
3fe49a331ac45346c719ff8ca433838fe3605b66
|
refs/heads/master
| 2020-12-29T16:11:21.315946
| 2020-02-06T10:41:59
| 2020-02-06T10:41:59
| 238,664,774
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,587
|
py
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pprint import pprint
import random
import math
from collections import Counter
class KNNClassifier:
def __init__(self):
pass
def train_test_split(self, dataframe,test_size):
dataframe_size=len(dataframe)
if isinstance(test_size,float):#if test size is passed as a proportion
test_size=round(test_size*dataframe_size)
#pick random samples from the data for train test split
indexes=dataframe.index.tolist()
test_indices=random.sample(population=indexes,k=test_size)
#now putting the values of train and test data into the respective df's
test_dataframe=dataframe.loc[test_indices]
cropped_dataframe=dataframe.drop(test_indices)
train_dataframe=cropped_dataframe
return train_dataframe,test_dataframe
def minkowski(self,test_value,p):
if(p==2):
distance=np.sum((self.train_values - test_value)**2,axis=1)
return distance
elif(p==1):
distance=np.sum(abs(self.train_values - test_value),axis=1)
return distance
def KNeighbors(self, k, test_value,p=2):
neighbors=[]
train_length=self.train_values.shape[0]
if(p==2):
distance=self.minkowski(test_value,p=2)
elif(p==1):
distance=self.minkowski(test_value,p=1)
k_neighbors=np.argsort(distance)
k_neighbors=k_neighbors[:k]
return k_neighbors
def find_majority(self, k_index):
ans = Counter(self.train_labels[k_index]).most_common()
return ans[0][0]
def train(self, train_path):
df=pd.read_csv(train_path,header=None)
letters={"a":int(ord('a')),"b":int(ord('b')),"c":int(ord('c')),"d":int(ord('d')),"e":int(ord('e')),"f":int(ord('f')),"g":int(ord('g')),"h":int(ord('h'))
,"i":int(ord('i')),"j":int(ord('j')),"k":int(ord('k')),"l":int(ord('l')),"m":int(ord('m')),"n":int(ord('n')),"o":int(ord('o')),"p":int(ord('p')),"q":int(ord('q')),"r":int(ord('r'))
,"s":int(ord('s')),"t":int(ord('t')),"u":int(ord('u')),"v":int(ord('v')),"w":int(ord('w')),"x":int(ord('x')),"y":int(ord('y')),"z":int(ord('z'))}
for column in df.columns:
df[column]=df[column].replace(to_replace ="?",value =df[column].mode()[0])
for column in df.columns:
df[column]=df[column].replace(to_replace=letters)
df = df.apply(pd.to_numeric)
train_df,val_df=self.train_test_split(df,0.3)
train_digits=train_df.to_numpy()
train_digits=np.array(train_digits)
val_digits=val_df.to_numpy()
val_digits=np.array(val_digits)
self.train_values=train_digits[:,1:]
self.train_labels=train_digits[:,0]
self.val_values=val_digits[:,1:]
self.val_labels=val_digits[:,0]
def predict(self, test_path):
df_test=pd.read_csv(test_path,header=None)
letters={"a":int(ord('a')),"b":int(ord('b')),"c":int(ord('c')),"d":int(ord('d')),"e":int(ord('e')),"f":int(ord('f')),"g":int(ord('g')),"h":int(ord('h'))
,"i":int(ord('i')),"j":int(ord('j')),"k":int(ord('k')),"l":int(ord('l')),"m":int(ord('m')),"n":int(ord('n')),"o":int(ord('o')),"p":int(ord('p')),"q":int(ord('q')),"r":int(ord('r'))
,"s":int(ord('s')),"t":int(ord('t')),"u":int(ord('u')),"v":int(ord('v')),"w":int(ord('w')),"x":int(ord('x')),"y":int(ord('y')),"z":int(ord('z'))}
for column in df_test.columns:
df_test[column]=df_test[column].replace(to_replace ="?",value =df_test[column].mode()[0])
for column in df_test.columns:
df_test[column]=df_test[column].replace(to_replace=letters)
test_vals=df_test.to_numpy()
test_vals=np.array(test_vals)
prediction=[]
length=test_vals.shape[0]
for i in range(length):
k_index=self.KNeighbors(5,test_vals[i])
result=self.find_majority(k_index)
prediction.append(result)
if i % 10 == 0:
#print(chr(result))
predictions=[]
for i in range(0,len(prediction)):
predictions.append(chr(prediction[i]))
return predictions
if __name__ == '__main__':
knn = KNNClassifier()
knn.train("train.csv")
preds = knn.predict("test.csv")
print("Done Testing")
df_labels=pd.read_csv("test_labels.csv", header=None)
label_vals=df_labels.iloc[:, 0].to_numpy()
label_vals=np.array(label_vals)
#print(preds.shape)
#print(label_vals.shape)
preds=np.array(preds)
acc = np.sum(preds == label_vals)/preds.shape[0]
print(acc)
# df_test_labels=pd.read_csv("test_labels.csv",header=None)
# # In[254]:
# for column in df_test_labels.columns:
# df_test_labels[column]=df_test_labels[column].replace(to_replace=letters)
# # In[255]:
# test_vals=df_test.to_numpy()
# test_vals=np.array(test_vals)
# label_vals=df_test_labels.to_numpy()
# label_vals=np.array(label_vals)
# # In[256]:
# def KNeighbors(k,train_values,train_labels,test_value):
# neighbors=[]
# train_length=train_values.shape[0]
# distance=np.sum((train_values - test_value)**2,axis=1)
# k_neighbors=np.argsort(distance)
# k_neighbors=k_neighbors[:k]
# return k_neighbors
# # In[257]:
# def find_majority(k_index,train_labels):
# from collections import Counter
# ans = Counter(train_labels[k_index]).most_common()
# return ans[0][0]
# # In[258]:
# predictions=[]
# length=test_vals.shape[0]
# for i in range(length):
# k_index=KNeighbors(6,train_values,train_labels,test_vals[i])
# result=find_majority(k_index,train_labels)
# predictions.append(result)
# predictions
# # In[ ]:
# # In[259]:
# cnt=0
# for i in range(0,length):
# if(predictions[i]==label_vals[i]):
# cnt+=1
# print(cnt/label_vals.shape[0])
# # In[260]:
# from sklearn.neighbors import KNeighborsClassifier
# classifier=KNeighborsClassifier(n_neighbors=6,metric='minkowski',p=2)
# classifier.fit(train_values,train_labels)
# # In[261]:
# y_pred=classifier.predict(test_vals)
# # In[262]:
# cnt=0
# for i in range(0,length):
# if(y_pred[i]==label_vals[i]):
# cnt+=1
# print(cnt/label_vals.shape[0])
# # In[281]:
# from sklearn.metrics import confusion_matrix
# y_pred=y_pred.reshape(1000,1)
|
[
"noreply@github.com"
] |
noreply@github.com
|
864bc49ad51f2d4f534f07456b571d961588a63d
|
766ada1da22829e7a0e56dfe56139d75c68c0d1d
|
/traffic_simulation.py
|
bd62b43bc09c26954ef9decc567c516c894cdf98
|
[] |
no_license
|
grizax/traffic-simulation
|
5f31341490058eaefe5b5db636f4deeadb220381
|
425ef24fed0164ee61037cd9d3b207cb208d00bf
|
refs/heads/master
| 2021-01-18T03:31:40.834686
| 2015-02-02T13:40:02
| 2015-02-02T13:40:02
| 30,035,613
| 0
| 0
| null | 2015-01-29T18:52:01
| 2015-01-29T18:52:01
| null |
UTF-8
|
Python
| false
| false
| 1,836
|
py
|
"""TO DO: Still needs a lot of work with matplotlib, stats, reporting, and ipython notebook"""
import numpy as np
import matplotlib.pyplot as plt
from traffic.simulation import Simulation
from traffic.road import Road
from traffic.car import Car
def multiple_simulations(num_simulations=100):
output_car_speeds = np.array([]).reshape(0, 30)
output_tracks = np.array(np.zeros((1, 1000)))
for _ in range(num_simulations):
track_results, car_speeds = one_simulation()
output_car_speeds = np.vstack([output_car_speeds, [car_speeds]])
output_tracks = np.append(output_tracks, track_results, axis=0)
output_tracks = np.delete(output_tracks, 0, 0)
return output_tracks, output_car_speeds
def one_simulation(time=60):
car_list = car_factory()
sim = Simulation()
for _ in range(time):
output = loop(car_list, sim)
car_speeds = [car.speed for car in car_list]
return output, car_speeds
def loop(cars, simulation):
the_road = Road()
simulation.driving_rules(cars)
simulation.car_collision(cars)
simulation.drive_cars(the_road, cars)
simulation.accelerate_cars(cars)
return the_road.track
def car_factory(car_fleet=30):
car_list = []
counter = 33
for car in range(car_fleet):
car = Car()
car.location += counter
car_list.append(car)
counter += 33
return car_list
def reporting():
track_results, speed = multiple_simulations()
speed_mean = Simulation.metric_conversion(np.mean(speed))
speed_std = Simulation.metric_conversion(np.std(speed))
rec_speed = speed_mean + speed_std
plotting(track_results)
return rec_speed
def plotting(track_results):
x = track_results
plt.imshow(x, cmap="binary_r", interpolation="gaussian")
plt.show()
reporting()
|
[
"ndbfoster@gmail.com"
] |
ndbfoster@gmail.com
|
7bdb37872ffc8f66ece48618e51d91c6e015762c
|
d8e9a2dfedaace3b96a7a4c3d105c06950b52e3a
|
/profesores/admin.py
|
6a0b853bfda8230e3084aa45aac60fe562f2d11b
|
[] |
no_license
|
HeribertoLara/DjangoTareaSem4
|
a55ee33427407564588a7933fd80ea1f2661859d
|
c3c0c50f8cf81c352603f803a727c2d201e90bd2
|
refs/heads/master
| 2023-01-20T03:28:14.173094
| 2020-12-02T05:05:10
| 2020-12-02T05:05:10
| 315,838,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
from django.contrib import admin
from profesores.models import Profesor
# Register your models here.
admin.site.register(Profesor)
|
[
"ing.quimico.heriberto.lara@gmil.com"
] |
ing.quimico.heriberto.lara@gmil.com
|
8c9c989a540b08b69f3d5efe25cb428555c4f3ac
|
f756d72da9a7a8b05399d7982ad83ab71170e3ce
|
/test_scraping.py
|
04dcbb7f21c523a5a0aa983b920849402b7cdba3
|
[] |
no_license
|
polmuz/GoGetWeb
|
c0fddda946d3950fc606af3b1bd0148f88589723
|
36a369c648e61c953437a2d7ee1a8017d7bb5636
|
refs/heads/master
| 2021-01-15T15:25:54.672987
| 2016-08-22T21:22:45
| 2016-08-22T21:22:45
| 63,362,626
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,971
|
py
|
# -*- coding: utf-8 -*-
import unittest
from unittest.mock import patch, MagicMock
from scraping import get_webpage_content, extract_xpaths, Not200Exception
class TestGetWebpageContent(unittest.TestCase):
@patch('scraping.requests.get')
def test_base(self, requests_get_mock):
expected_content = b'<h1>Chuck Norris</h1>'
requests_get_mock.return_value = MagicMock(
content=expected_content,
status_code=200
)
content = get_webpage_content("http://test.url")
self.assertEqual(expected_content, content)
@patch('scraping.requests.get')
def test_status_not_ok(self, requests_get_mock):
requests_get_mock.return_value = MagicMock(
content=b'Not Found',
status_code=404
)
with self.assertRaises(Not200Exception):
content = get_webpage_content("http://test.url")
@patch('scraping.requests.get')
def test_requests_exception(self, requests_get_mock):
requests_get_mock.side_effect = Exception("Requests Exception!")
with self.assertRaises(Exception):
content = get_webpage_content("http://test.url")
class TestExtractXpath(unittest.TestCase):
def test_base(self):
content = """
<html>
<body>
<h1>Title!</h1>
<p>Bla</p>
</body>
</html>
"""
xpaths = {
"title": "//h1"
}
extracted = extract_xpaths(content, xpaths)
self.assertEqual(extracted, {"title": "Title!"})
def test_multiple_elements(self):
content = """
<html>
<body>
<h1>Title!</h1>
<p>Bla</p>
<p>Ble</p>
</body>
</html>
"""
xpaths = {
"description": "//p"
}
extracted = extract_xpaths(content, xpaths)
self.assertEqual(extracted, {"description": "Bla\nBle"})
|
[
"pablomouzo@gmail.com"
] |
pablomouzo@gmail.com
|
d50c94eacde9d6b5811b845d34524432308590f4
|
b7d4bd854e0052c6f7ee8b8a42fa1145de76a61f
|
/src/7_3/7_3_test.py
|
89c48b13edd8b0a319c02587782f62dc7ed53ce0
|
[] |
no_license
|
WeslyG/labs
|
b22f61512293a2e00545b7ee02df1bf4a62961f1
|
976c06a3b2e10082638ae6b0cf55b8400fe2d4ab
|
refs/heads/master
| 2022-12-25T21:16:24.604494
| 2020-10-01T12:32:15
| 2020-10-01T12:32:15
| 297,427,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from s7_3 import number_is_even
def test_s7_3():
assert number_is_even(7) is False
assert number_is_even(6) is True
assert number_is_even(0) is True
|
[
"weslyg22@gmail.com"
] |
weslyg22@gmail.com
|
8046f0cbdb65b170b5dabaff186ad81fb6a24843
|
f3a73a2db52b9ec1b052d5f8de362991a19af1ba
|
/singletone_decorator.py
|
031b40a63e1ebe570022ea0cac4e4b52af249363
|
[] |
no_license
|
mikaevnikita/python
|
ffc9fe9a295f3291379482d08f72bac8fd3d98c1
|
ddd7a4f51ad1d1433b41c7e58db2227f41d19400
|
refs/heads/master
| 2021-09-02T02:27:26.029084
| 2017-12-29T18:39:40
| 2017-12-29T18:39:40
| 115,745,598
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 327
|
py
|
import functools
def singleton(cls):
instance = None
@functools.wraps(cls)
def inner(*args,**kwargs):
nonlocal instance
if instance is None:
instance = cls(*args,**kwargs)
return instance
return inner
@singleton
class Noop:
pass
a=Noop()
b=Noop()
print(id(a),id(b))
|
[
"n.v.mikaev@gmail.com"
] |
n.v.mikaev@gmail.com
|
2b4c7736ef6d059602290437ae4a47765822bc7a
|
c247035975b8ca12eff9cfe8bac7c57402d1e728
|
/Unit10/T10.7.py
|
f0b0092787214e70119d976390273dd4cd573d7e
|
[] |
no_license
|
xlxwalex/pythonew
|
cf896707a72cbb7fa44c8fa9e504e400628e3ddd
|
ff3601641f695ce01888518b970eccc457b4a238
|
refs/heads/master
| 2021-01-22T21:48:47.038598
| 2017-03-21T15:53:47
| 2017-03-21T15:54:22
| 85,477,387
| 9
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 443
|
py
|
#10.7
import requests
import bs4
import time
def baiduclock():
tim=0
agent={'User-agent':'Mozilla/5.0'}
timer=time.clock()
while time.clock() - timer <= 30:
print(time.clock() - timer)
httpbaidu=requests.get('http://www.baidu.com',headers=agent)
if httpbaidu.status_code == 200:
tim+=1
else:
print("30s内爬虫访问了百度{}次".format(tim))
baiduclock()
|
[
"xlxw@xlxw.org"
] |
xlxw@xlxw.org
|
3d0f58b74138d3d783dd0a71510afd2354a9ac4e
|
243eddaee6dff4551da9c10f725d8828e13840ac
|
/get_premium/apps.py
|
d24dcc77204cd467ed804330c4f12a0a7f693080
|
[
"MIT"
] |
permissive
|
BarunBlog/Link_People
|
46b0c2c141ae042b481893aee869977755790dc8
|
1ffd07bc5b31a715133c99efbbb478efe18d632b
|
refs/heads/master
| 2023-01-24T04:54:13.545951
| 2020-12-03T05:56:33
| 2020-12-03T05:56:33
| 304,888,418
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 96
|
py
|
from django.apps import AppConfig
class GetPremiumConfig(AppConfig):
name = 'get_premium'
|
[
"bhattacharjeebarun25@gmail.com"
] |
bhattacharjeebarun25@gmail.com
|
ee4433ee5b0534fc569bbd443da946bf6e41e51e
|
93857d07c3391a1a10843bc67bb0b7ae93edca97
|
/Mysql_CodeGen.py
|
92e33cfa3392dc71a9dcf7d6ed9db329f2df3c01
|
[] |
no_license
|
xumingxsh/Mysql_CodeGen
|
dffd3ff59f9b35c616fd85ab6296134333eb7293
|
4d43ad50cd994df2ba478a0695cda690460381b6
|
refs/heads/master
| 2021-01-10T01:20:43.885279
| 2016-02-28T12:47:52
| 2016-02-28T12:47:52
| 52,719,962
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,731
|
py
|
# 徐敏荣
# 2014-04-18
# 整理,重新编写
# 该代码主要实现从MySQL中读取数据库表结构,并自动生成某些有规律的代码
import MySQLdb
from StringIO import StringIO
from string import Template
#表结构
class TableInfo:
# 构造函数
def __init__(self):
self.table_Name = "" # 数据表英文名称
self.table_Comment = "" # 数据表中文名称或相关注释
self.nameLower = "" # 所属用户
# 扩展表结构
class TableEx:
def __init__(self):
self.base = None # 表结构
self.columns = [] # 表的列集合
self.keys = [] # 表的主键集合
# 列结构
class ColumnInfo:
# 构造函数
def __init__(self):
self.table_Name = "" # 所属表名称
self.column_Name = "" # 列名称
self.data_Type = "" # 数据类型
self.column_Default = "" # 默认值
self.is_Nullable = "" # 是否允许为空
self.column_Comment = "" # 列中文名称或注释
self.charLen = None # 是否允许为空
self.len = None # 数据长度
self.isKey = 0 # 是否主键
self.extra = "" # 是否是自增字段
self.precision=None # 双精度
self.scale=None #
# 表结构的SQL辅助类,主要用于生成SQL语句
class TableExForSQL:
def __init__(self, tb):
self.tb = tb # 使用的表
# 打印表结构信息
def show(self):
print '%s\t%s' % (self.tb.base.table_Name, self.tb.base.table_Comment)
print "---------------------------------------"
for i in self.tb.columns:
nullStr = "\t not null "
if i.is_Nullable.lower() != 'no':
nullStr = ""
print '%s\t%s%s\t%s\t%s' % (i.column_Name, i.data_Type,self.getColumnLen(i),nullStr, i.column_Comment)
# 打印列结构信息
# i-列结构
def columnSQL(self, column):
nullStr = "\t not null " # 是否允许为空
if column.is_Nullable.lower() != 'no':
nullStr = " "
defStr = "\t default " # 默认值
if column.column_Default == '' or\
column.column_Default == None or \
column.column_Default == '0000-00-00 00:00:00':
defStr = ""
else:
defStr += " " + column.column_Default + " "
comStr = "" # 注释信息
if column.column_Comment != '' and column.column_Comment != None:
comStr = "\t comment '" + column.column_Comment + "'"
autoCreat = "" # 是否是自增字段
if column.extra == "auto_increment":
autoCreat = "\t auto_increment "
return '\t' + column.column_Name + '\t' + column.data_Type + self.getColumnLen(column) + nullStr + autoCreat + defStr + comStr
# 数据库表创建语句
def createSQL(self):
print "/*%s\t%s*/" % (self.tb.base.table_Name, self.tb.base.table_Comment) # 注释信息
print "create table %s\n(" % self.tb.base.table_Name
# 打印列语句
for column in self.tb.columns:
print self.columnSQL(column) + ','
# 打印主键语句
key = ''
for i in self.tb.keys:
if key != '':
key += ','
key += i.column_Name
key = '\tprimary key (' + key + ')'
print key + "\n);\n"
# 打印注释信息
if self.tb.base.table_Comment != '' and self.tb.base.table_Comment != None:
print "alter table %s comment '%s';" % (self.tb.base.table_Name, self.tb.base.table_Comment)
# 表移除SQL
def dropSQL(self):
print "drop table if exists %s;" % (self.tb.base.table_Name)
# 单条记录查询
def getSQL(self):
cls = ""
for i in self.tb.columns:
if cls != "\t":
cls += "\n\t,"
cls += i.column_Name.lower()
print "\tSelect " + cls + " From " + self.tb.base.table_Name
# 添加列
def addColumns(self):
for i in self.tb.columns:
print "alter table %s add %d " % (self.tb.base.table_Name, self.column(i))
# 获得列长度
def getColumnLen(self, i):
if i.data_Type == "decimal"\
or i.data_Type == "double" \
or i.data_Type == "float":
if i.scale is None:
return "(%d)"%(i.precision)
else:
return "(%d,%d)"%(i.precision, i.scale)
if i.data_Type == "text":
return ''
if i.charLen != None and i.charLen != "":
return '(%d)'%i.charLen
return ''
# 获得列数据类型
def getParamType_Java_MyBatis(self, i):
if i.data_Type == "int"\
or i.data_Type == "decimal"\
or i.data_Type == "double" \
or i.data_Type == "smallint" \
or i.data_Type == "tinyint" \
or i.data_Type == "float":
return "NUMERIC"
if i.data_Type == "timestamp":
return "TIME"
return "VARCHAR"
# 添加记录SQL(适用于mybatis)
def insertSQL_Java_MyBatis(self):
params = ""
values = ""
for i in self.tb.columns:
if i.extra == "auto_increment" or\
i.column_Comment == "CURRENT_TIMESTAMP comment":
continue
if params != "":
params += "\t, "
else:
params += "\t"
params += i.column_Name.lower() + "\n"
if values != "":
values += "\t, "
else:
values += "\t"
t = Template('#{${name},jdbcType=${type}}\n')
values += t.substitute(name=i.column_Name.lower(), type=self.getParamType_Java_MyBatis(i))
print "\tInsert Into %s(\n%s\t) Values (\n%s\t)" % (self.tb.base.table_Name,params,values)
# 编辑记录SQL(适用于mybatis)
def updateSQL_Java_MyBatis(self):
params = ""
for i in self.tb.columns:
if i.extra == "auto_increment" or i.column_Comment == "CURRENT_TIMESTAMP comment":
continue
if params != "":
params += "\t, "
else:
params += "\t"
t = Template('${name} = #{${name},jdbcType=${type}}\n')
params += t.substitute(name=i.column_Name.lower(), type=self.getParamType_Java_MyBatis(i))
values = ""
for i in self.tb.keys:
if values != "":
values += "\t, "
else:
values += "\t"
t = Template('${name} = #{${name},jdbcType=${type}}\n')
values += t.substitute(name=i.column_Name.lower(), type=self.getParamType(i))
print "\tUpdate %s SET \n%s\t Where %s " % (self.tb.base.table_Name, params, values)
TableExForSQL.insertSQL = insertSQL_Java_MyBatis
TableExForSQL.updateSQL = updateSQL_Java_MyBatis
TableExForSQL.getParamType_Java_MyBatis = getParamType_Java_MyBatis
class TableExForMapper:
def __init__(self, tb):
self.tb = tb
self.sql = TableExForSQL(tb)
def insert(self):
print '<insert id="add" parameterType="' + self.tb.base.table_Name.capitalize() + 'PO">'
self.sql.insertSQL()
print '</insert>'
def update(self):
print '<update id="update" parameterType="' + self.tb.base.table_Name.capitalize() + 'PO">'
self.sql.updateSQL()
print '</update>'
def selectList(self):
print '<select id="getList" parameterType="' + self.tb.base.table_Name.capitalize() + \
'QO" resultType="' + self.tb.base.table_Name.capitalize() + 'VO">'
self.sql.getSQL()
print "\t LIMIT #{recordStart, jdbcType=NUMERIC},#{rows, jdbcType=NUMERIC}"
print '</select>'
class TableExForJava:
def __init__(self, tb):
self.tb = tb
def createPO(self):
propertys = ""
for i in self.tb.columns:
typ = "String"
if i.data_Type == "int":
typ = "int"
if i.data_Type == "timestamp":
typ = "Date"
if i.column_Comment != '' and i.column_Comment != None:
print "\t/**"
print "\t*" + i.column_Comment
print "\t*/"
print "\tprivate " + typ + " " + i.column_Name.lower() + ";"
t = Template("\tpublic ${type} get ${nameU}() {\n"
"\t\treturn this.${name};\n"\
"\t}\n\n"\
"\tpublic void set ${nameU}(${type} ${name}) {\n"\
"\t\tthis.${name} = ${name};\n"\
"\t}\n\n")
#propertys += "\tpublic " + typ + " get" + i.column_Name.lower().capitalize() + "() {\n"
#propertys += "\t\treturn this." + i.column_Name.lower() + ";\n"
#propertys += "\t}\n\n"
#propertys += "\tpublic void set" + i.column_Name.lower().capitalize() + "(" + typ + " " + i.column_Name.lower() + " ) {\n"
#propertys += "\t\tthis." + i.column_Name.lower() + " = " + i.column_Name.lower() + ";\n"
#propertys += "\t}\n\n"
propertys = t.ssubstitute(type=typ, nameU=i.column_Name.lower().capitalize(), name=i.column_Name.lower())
print ""
if i.data_Type != "timestamp":
continue
print "\tprivate String " + i.column_Name.lower() + "Str;"
propertys += "\tpublic String get" + i.column_Name.lower().capitalize() + "Str() {\n"
propertys += "\t\treturn TypeCommon.ConvertToString(this." + i.column_Name.lower() + ");\n"
propertys += "\t}\n\n"
print propertys
def dataGridColums(self):
for i in self.tb.columns:
comment = i.column_Name
if i.column_Comment != '' and i.column_Comment != None:
comment = i.column_Comment
print '\t\t <th field="' + i.column_Name.lower() + '" width="100px">' + comment + '</th>'
class DBGencode:
def __init__(self, host, port, db, user, pwd):
self.host = host
self.port = port
self.user = user
self.pwd = pwd
self.db = db
self.con = MySQLdb.connect(host=self.host,port=self.port,\
db='information_schema',user=self.user,passwd=self.pwd,\
charset="gbk")
cur = self.con.cursor()
cur.execute("select table_Name, table_Comment from tables where TABLE_SCHEMA='" + self.db + "'")
self.tables=[]
self.tableExs=[]
self.columns=[]
for i in cur.fetchall():
t = TableInfo()
t.table_Name = i[0]
t.nameLower = t.table_Name.lower()
arr = i[1].split(";")
if len(arr) > 1:
t.table_Comment = arr[0]
else:
t.table_Comment = ""
self.tables.append(t)
cur.execute("select Table_Name, Column_Name," \
+ "Data_Type,Column_Default,Is_Nullable,Column_Comment,"\
"CHARACTER_MAXIMUM_LENGTH, COLUMN_KEY, extra,NUMERIC_PRECISION,NUMERIC_SCALE from COLUMNS where TABLE_SCHEMA='"\
+ self.db + "' ")
for i in cur.fetchall():
c = ColumnInfo()
c.table_Name = i[0]
c.column_Name = i[1]
c.data_Type = i[2]
c.column_Default = i[3]
c.is_Nullable = i[4]
c.column_Comment = i[5]
c.charLen = i[6]
if i[7] == 'PRI':
c.isKey = 1
c.extra = i[8]
c.precision = i[9]
c.scale = i[10]
self.columns.append(c)
for i in self.tables:
tb = TableEx()
tb.base = i
for it in self.columns:
if it.table_Name.lower() != i.table_Name.lower():
continue
tb.columns.append(it)
if it.isKey == 1:
tb.keys.append(it)
self.tableExs.append(tb)
def showTables(self):
for i in self.tables:
#print str(i)
print '%s\t%s' % (i.table_Name, i.table_Comment)
#print i.table_Comment
def showColumns(self):
for i in self.columns:
print '%s\t%s\t%s' % (i.column_Name, i.data_Type,i.column_Comment)
def getTable(self, name):
nameLw = name.lower()
for i in self.tableExs:
if i.base.nameLower == nameLw:
return i
return None
def showTable(self, name):
tb = self.getTable(name)
if tb == None:
print u"没有查找到数据库表:" + name
return
sql = TableExForSQL(tb)
sql.show()
def showDataBase(self):
for i in self.tableExs:
sql = TableExForSQL(i)
sql.show()
print ""
print ""
def showCreateSQLs(self):
for i in self.tableExs:
sql = TableExForSQL(i)
sql.createSQL()
print ""
print ""
def dropSQLs(self):
for i in self.tableExs:
sql = TableExForSQL(i)
sql.dropSQL()
def insertSQLs(self):
for i in self.tableExs:
sql = TableExForSQL(i)
print ""
print ""
sql.insertSQL()
def updateSQLs(self):
for i in self.tableExs:
sql = TableExForSQL(i)
print ""
print ""
sql.updateSQL()
def sqls(self):
for i in self.tableExs:
sql = TableExForSQL(i)
print ""
print i.base.table_Name
print "----------------------------"
print u"添加语句"
sql.insertSQL()
print ""
print u"更新语句"
sql.updateSQL()
print ""
print u"查询语句"
sql.getSQL()
print ""
print u"添加列"
sql.addColumns()
def insertXMLs(self):
for i in self.tableExs:
mapper = TableExForMapper(i)
print ""
print ""
mapper.insert()
def updateXMLs(self):
for i in self.tableExs:
mapper = TableExForMapper(i)
print ""
print ""
mapper.update()
def XMLs(self):
for i in self.tableExs:
mapper = TableExForMapper(i)
print ""
print i.base.table_Name
print "----------------------------"
print u"添加语句"
mapper.insert()
print ""
print u"更新语句"
mapper.update()
print ""
print u"查询语句"
mapper.selectList()
def javas(self):
for i in self.tableExs:
jv = TableExForJava(i)
print ""
print i.base.table_Name
print "----------------------------"
print u"PO属性"
jv.createPO()
print ""
print u"列表列"
jv.dataGridColums()
def createSQLs(self):
for i in seinsertSQLslf.tableExs:
sql = TableExForSQL(i)
mylookup = TemplateLookup(directories=['docs'],\
module_directory='tmp/mako_modules', collection_size=500, output_encoding='utf-8', encoding_errors='replace')
mytemplate = mylookup.get_template('createSQL.sql')
print mytemplate.render(table=i, tb=i.base, sql=sql)
code=DBGencode("127.0.0.1", 3306, "ivsm", "root", "root")
code.insertSQLs()
code.showCreateSQLs()
|
[
"xumingxsh21@126.com"
] |
xumingxsh21@126.com
|
b8992e4670a1ea151e7a9491438ccf75e9e869fb
|
accf5e4bb9b0d0b0fe2a1ef900fcb9c726f664ba
|
/Network/MixAttNet.py
|
c142ac0ad2bf20b933716f7501d4068ada17799e
|
[] |
no_license
|
pdedumast/FetalCPSeg
|
43eab35dc3379e69818fa9e203f83442e4a4e8c6
|
713dc1b88ed42e4e5cdbc5b876449660e533cccb
|
refs/heads/master
| 2022-11-19T20:47:29.161663
| 2020-07-28T09:43:23
| 2020-07-28T09:43:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,306
|
py
|
import torch
from torch import nn
from torch.nn import functional as F
def convolution_block(in_chan, out_chan, ksize=3, pad=1, stride=1, bias=False):
"""
Convolution Block
Convolution + Normalization + NonLinear
"""
return nn.Sequential(
nn.Conv3d(in_chan, out_chan, kernel_size=ksize, padding=pad, stride=stride, bias=bias),
nn.BatchNorm3d(out_chan),
nn.PReLU()
)
def up_sample3d(x, t, mode="trilinear"):
"""
3D Up Sampling
"""
return F.interpolate(x, t.size()[2:], mode=mode, align_corners=False)
class ResStage(nn.Module):
"""
3D Res stage
"""
def __init__(self, in_chan, out_chan, stride=1):
super(ResStage, self).__init__()
self.conv1 = convolution_block(in_chan, out_chan, stride=stride)
self.conv2 = nn.Sequential(
nn.Conv3d(out_chan, out_chan, kernel_size=3, padding=1),
nn.BatchNorm3d(out_chan)
)
self.non_linear = nn.PReLU()
self.down_sample = nn.Sequential(
nn.Conv3d(in_chan, out_chan, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm3d(out_chan))
def forward(self, x):
out = self.conv2(self.conv1(x))
shortcut = self.down_sample(x)
out = self.non_linear(out + shortcut)
return out
def down_stage(in_chan, out_chan):
return nn.Sequential(
nn.Conv3d(in_chan, out_chan, kernel_size=1, bias=False),
nn.BatchNorm3d(out_chan),
nn.PReLU()
)
class MixBlock(nn.Module):
def __init__(self, in_chan, out_chan):
super(MixBlock, self).__init__()
self.conv1 = nn.Conv3d(in_chan, out_chan // 4, 3, padding=1, bias=False)
self.conv3 = nn.Conv3d(in_chan, out_chan // 4, 5, padding=2, bias=False)
self.conv5 = nn.Conv3d(in_chan, out_chan // 4, 7, padding=3, bias=False)
self.conv7 = nn.Conv3d(in_chan, out_chan // 4, 9, padding=4, bias=False)
self.bn1 = nn.BatchNorm3d(out_chan // 4)
self.bn3 = nn.BatchNorm3d(out_chan // 4)
self.bn5 = nn.BatchNorm3d(out_chan // 4)
self.bn7 = nn.BatchNorm3d(out_chan // 4)
self.nonlinear = nn.PReLU()
def forward(self, x):
k1 = self.bn1(self.conv1(x))
k3 = self.bn3(self.conv3(x))
k5 = self.bn5(self.conv5(x))
k7 = self.bn7(self.conv7(x))
return self.nonlinear(torch.cat((k1, k3, k5, k7), dim=1))
class Attention(nn.Module):
def __init__(self, in_chan, out_chan):
super(Attention, self).__init__()
self.mix1 = MixBlock(in_chan, out_chan)
self.conv1 = nn.Conv3d(out_chan, out_chan, kernel_size=1)
self.mix2 = MixBlock(out_chan, out_chan)
self.conv2 = nn.Conv3d(out_chan, out_chan, kernel_size=1)
self.norm1 = nn.BatchNorm3d(out_chan)
self.norm2 = nn.BatchNorm3d(out_chan)
self.relu = nn.PReLU()
def forward(self, x):
shortcut = x
mix1 = self.conv1(self.mix1(x))
mix2 = self.mix2(mix1)
att_map = F.sigmoid(self.conv2(mix2))
out = self.norm1(x*att_map) + self.norm2(shortcut)
return self.relu(out), att_map
def out_stage(in_chan, out_chan):
return nn.Sequential(
nn.Conv3d(in_chan, out_chan, kernel_size=3, padding=1),
nn.BatchNorm3d(out_chan),
nn.PReLU(),
nn.Conv3d(out_chan, 1, kernel_size=1)
)
class MixAttNet(nn.Module):
def __init__(self):
super(MixAttNet, self).__init__()
self.init_block = convolution_block(1, 16)
self.enc1 = ResStage(16, 16, 1)
self.enc2 = ResStage(16, 32, 2)
self.enc3 = ResStage(32, 64, 2)
self.enc4 = ResStage(64, 128, 2)
self.enc5 = ResStage(128, 128, 2)
self.dec4 = ResStage(128+128, 64)
self.dec3 = ResStage(64+64, 32)
self.dec2 = ResStage(32+32, 16)
self.dec1 = ResStage(16+16, 16)
self.down4 = down_stage(64, 16)
self.down3 = down_stage(32, 16)
self.down2 = down_stage(16, 16)
self.down1 = down_stage(16, 16)
self.mix1 = Attention(16, 16)
self.mix2 = Attention(16, 16)
self.mix3 = Attention(16, 16)
self.mix4 = Attention(16, 16)
self.mix_out1 = nn.Conv3d(16, 1, kernel_size=1)
self.mix_out2 = nn.Conv3d(16, 1, kernel_size=1)
self.mix_out3 = nn.Conv3d(16, 1, kernel_size=1)
self.mix_out4 = nn.Conv3d(16, 1, kernel_size=1)
self.down_out1 = nn.Conv3d(16, 1, kernel_size=1)
self.down_out2 = nn.Conv3d(16, 1, kernel_size=1)
self.down_out3 = nn.Conv3d(16, 1, kernel_size=1)
self.down_out4 = nn.Conv3d(16, 1, kernel_size=1)
self.out = out_stage(16*4, 64)
def forward(self, x):
x = self.init_block(x)
enc1 = self.enc1(x)
enc2 = self.enc2(enc1)
enc3 = self.enc3(enc2)
enc4 = self.enc4(enc3)
enc5 = self.enc5(enc4)
dec4 = self.dec4(
torch.cat((enc4, up_sample3d(enc5, enc4)), dim=1))
dec3 = self.dec3(
torch.cat((enc3, up_sample3d(dec4, enc3)), dim=1))
dec2 = self.dec2(
torch.cat((enc2, up_sample3d(dec3, enc2)), dim=1))
dec1 = self.dec1(
torch.cat((enc1, up_sample3d(dec2, enc1)), dim=1))
down1 = up_sample3d(self.down1(dec1), x)
down4 = up_sample3d(self.down4(dec4), x)
down3 = up_sample3d(self.down3(dec3), x)
down2 = up_sample3d(self.down2(dec2), x)
down_out1 = self.down_out1(down1)
down_out2 = self.down_out2(down2)
down_out3 = self.down_out3(down3)
down_out4 = self.down_out4(down4)
mix1, att1 = self.mix1(down1)
mix2, att2 = self.mix2(down2)
mix3, att3 = self.mix3(down3)
mix4, att4 = self.mix4(down4)
mix_out1 = self.mix_out1(mix1)
mix_out2 = self.mix_out2(mix2)
mix_out3 = self.mix_out3(mix3)
mix_out4 = self.mix_out4(mix4)
out = self.out(torch.cat((mix1, mix2, mix3, mix4), dim=1))
if self.training:
return out, mix_out1, mix_out2, mix_out3, mix_out4, down_out1, down_out2, down_out3, down_out4
else:
return torch.sigmoid(out)
if __name__ == '__main__':
net = MixAttNet().cuda()
torch.save(net.state_dict(), "MixAttNet.pth.gz")
|
[
"noreply@github.com"
] |
noreply@github.com
|
2e6274aeecb4abc551445e441ed62ced40c33285
|
6ae058253aeff9ee9d4a166bab1c6898fb2fa042
|
/hackerrank/artificial-intelligence/document-classification/document-classification.py
|
d58f437f054a7f3600a34b2252cff4f05a152261
|
[
"MIT"
] |
permissive
|
gnuaha7/programming-problems
|
054136b840a8323ca6d5c20e579dc63e19543138
|
3ed43b306c19718f00bf77ed191e7a3f2ba8da57
|
refs/heads/master
| 2021-01-11T02:56:34.788550
| 2016-09-04T19:38:53
| 2016-09-04T19:38:53
| 70,878,741
| 0
| 0
| null | 2016-10-14T06:00:42
| 2016-10-14T06:00:42
| null |
UTF-8
|
Python
| false
| false
| 1,457
|
py
|
# https://www.hackerrank.com/challenges/document-classification
import sys
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.cross_validation import cross_val_score
def load_corpus(f, has_classes=True):
num_docs = int(f.readline())
corpus = ([], [])
for i in range(num_docs):
line = f.readline()
if has_classes:
i = line.find(' ')
class_ = int(line[:i])
doc = line[i+1:]
corpus[1].append(class_)
else:
doc = line
corpus[0].append(doc)
return corpus
def train_classifier(corpus):
model = Pipeline([
('tfidf', TfidfVectorizer(stop_words='english')),
('classifier', SGDClassifier(loss='log', penalty='none', n_iter=100)),
])
# scores = cross_val_score(model, corpus[0], corpus[1], cv=10, n_jobs=-1)
# print('CV score:', np.mean(scores))
model.fit(corpus[0], corpus[1])
return model
if __name__ == '__main__':
np.random.seed(sum(map(ord, 'document-classification')))
with open('trainingdata.txt') as f:
training_data = load_corpus(f, has_classes=True)
classifier = train_classifier(training_data)
test_data = load_corpus(sys.stdin, has_classes=False)
classes = classifier.predict(test_data[0])
print('\n'.join(str(class_) for class_ in classes))
|
[
"yasserglez@gmail.com"
] |
yasserglez@gmail.com
|
c7e4005fc61db2b565fdd6d8e80b1c661ea470d3
|
94bd08e95ae0c31973f500a7bab3aa5378f7ec7b
|
/snippets/migrations/0003_auto_20190429_0033.py
|
8228906cde11f126db33ecf58304050ad10be0a8
|
[] |
no_license
|
freddiemo/django_rest_framework_3_sample
|
eec2f3315e9c79ca3af6aa39423337606eb3aca8
|
ca299957f8a7e666c31d71be74028e18392f65dc
|
refs/heads/master
| 2020-05-17T03:14:45.963863
| 2019-04-29T17:23:27
| 2019-04-29T17:23:27
| 183,473,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
# Generated by Django 2.2 on 2019-04-29 00:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('snippets', '0002_auto_20190429_0026'),
]
operations = [
migrations.AlterField(
model_name='snippet',
name='highlighted',
field=models.TextField(),
),
]
|
[
"freddiejmo@gmail.com"
] |
freddiejmo@gmail.com
|
afa9a1d0944e4af29df98932dd9113870175e138
|
3ac0a169aa2a123e164f7434281bc9dd6373d341
|
/singleNumber.py
|
4a7b92101b0350685936c92368994f2cf80679bc
|
[] |
no_license
|
sfeng77/myleetcode
|
02a028b5ca5a0354e99b8fb758883902a768f410
|
a2841fdb624548fdc6ef430e23ca46f3300e0558
|
refs/heads/master
| 2021-01-23T02:06:37.569936
| 2017-04-21T20:31:06
| 2017-04-21T20:31:06
| 85,967,955
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 208
|
py
|
class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
s = 0
for v in nums:
s = s ^ v
return s
|
[
"sfeng77@gmail.com"
] |
sfeng77@gmail.com
|
d8756586064d46abf0b01f2f255a4408170c98ca
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/galex_j19485-4225/sdB_GALEX_J19485-4225_lc.py
|
ad5e79f01dd4bec1f067eebd2a8c3dee9507a2f5
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[297.137792,-42.429325], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_GALEX_J19485-4225 /sdB_GALEX_J19485-4225_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
36ff37b0f61328d72dfe6e5c252ff5a249ce364e
|
4c3d19edd4e7e6325fd131134a28f5e0e78e1221
|
/accounts/user/registration.py
|
510bfe1fe853619048311613240ddecd78d0acf9
|
[] |
no_license
|
carpancan/producthunt
|
1e8e6e0793e24d049a5a95f84341fe0d977bbc79
|
ee51d0d6bf26f34dd4849c26603e9d0c43e45c54
|
refs/heads/master
| 2023-05-03T23:52:16.889611
| 2022-02-10T10:49:13
| 2022-02-10T10:49:13
| 234,498,693
| 0
| 0
| null | 2023-04-21T20:44:59
| 2020-01-17T07:51:30
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,560
|
py
|
from django.contrib.auth.models import User
from .exceptions import CustomExceptions
class Register:
__new_user_dto = None
def create_user(self, new_user_dto):
self.__initialize(new_user_dto)
self.__check_if_user_exists()
return self.__persist_user()
def __initialize(self, new_user_dto):
self.__new_user_dto = new_user_dto
def __check_if_user_exists(self):
try:
User.objects.get(username=self.__new_user_dto.get_username())
raise CustomExceptions.UserExistsException('User already in use')
except User.DoesNotExist as exception:
return False
def __persist_user(self):
return User.objects.create_user(
username=self.__new_user_dto.get_username(),
password=self.__new_user_dto.get_password()
)
class NewUserDto:
__register_data = None
def __init__(self, request):
self.__initialize(request)
def __initialize(self, request):
self.__register_data = self.__prepare_register_data(request)
def __prepare_register_data(self, request):
data = {}
for item, value in request.items():
if 'csrfmiddlewaretoken' in item:
continue
data[item] = value
return data
def get_username(self):
return self.__register_data.get('username')
def get_password(self):
return self.__register_data.get('password')
def get_password_confirm(self):
return self.__register_data.get('password_confirm')
|
[
"carlos.paniagua@sngular.team"
] |
carlos.paniagua@sngular.team
|
ecc65f736adcdf7ef2646a8f02c86e70de9ca226
|
1528b9eff25b03adf3c24229d08555aeaea5ac2b
|
/Client/Libraries/__init__.py
|
eb5d1888bc5f5b22e95fdccb22debe6f4fd1fcf0
|
[] |
no_license
|
Erez-Atir/Kahoot
|
b0cb40c0fefa86af6668a00be48807314955aa05
|
7fe5ba5df7e1ce0a7d10a6e84049b94043e2d42b
|
refs/heads/master
| 2021-08-17T02:13:32.473433
| 2020-05-24T19:39:58
| 2020-05-24T19:39:58
| 184,925,432
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,221
|
py
|
IP = None
my_socket = None
import os
import sys
sys.path.insert(0, os.getcwd()+'/files')
sys.dont_write_bytecode = True
import ServerDitection
import socket
import pygame
import textbox
import subprocess
RED = (204, 0, 0)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
GREEN = (0, 153, 0)
BLUE = (53, 119, 252)
PURPLE = (176, 71, 246)
GREY = (85, 77, 77)
ORANGE = (255, 181, 30)
def resfix(x=None, y=None):
"""
:param x: the x coordinate or a Pygame image. If sent None means only y need a conversion
:param y: the y coordinate. can be not sent for only x conversion.
:return: The new coordinates on the new screen with the same proportions. Tuple for (x,y). int for only one number.
"""
global WIDTH, HEIGHT
if x is not None:
if type(x) == type(42):
if y is not None:
return int(x/1500.*WIDTH), int(y/800.*HEIGHT)
return int(x/1500.*WIDTH)
else:
sizee = x.get_rect().size
return
if y is not None:
return int(y/800.*HEIGHT)
return None
if True:
IP = ServerDitection.server_scout()
if IP:
my_socket = socket.socket()
my_socket.connect((IP, 23))
else:
screen = pygame.display.set_mode((1500, 800))
WIDTH, HEIGHT = pygame.display.Info().current_w, pygame.display.Info().current_h
pygame.display.set_caption("Kaboot")
screen.fill(PURPLE)
a = textbox.OutputBox(screen=screen, text="No Game\nRunning!", size=resfix(650, 750), place=resfix(825, 0), color=None,
border_width=0, border_color=None, text_color=RED, font="files\\montserrat\\Montserrat-Black.otf")
b = textbox.OutputBox(screen=screen, text="Ask your teacher to run a game and then try again", size=resfix(650, 750), place=resfix(825, 0), color=None,
border_width=0, border_color=None, text_color=BLACK, font="files\\montserrat\\Montserrat-Black.otf")
c = textbox.OutputBox(screen=screen, text=" EXIT ", size=resfix(310, 100), place=resfix((825+(825+650))/2-310/2, 600), color=WHITE,
border_width=0, border_color=BLACK, text_color=BLACK, font="files\\montserrat\\Montserrat-Black.otf")
img = pygame.transform.scale(pygame.image.load("files\\sadog.jpg"), (int(WIDTH*1.066), HEIGHT))
finish = False
while not finish:
mouse = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT:#user presses the X
exit()
if event.type == pygame.KEYDOWN:
# If pressed key is ESC quit program
if event.key == pygame.K_ESCAPE:
exit()
if resfix((825+(825+650))/2-310/2+310) > mouse[0] > resfix((825+(825+650))/2-310/2) and resfix(None, 600+100) > mouse[1] > resfix(None, 600):
c.border_width = 5
if pygame.mouse.get_pressed()[0]:
sys.exit()
else:
c.border_width = 0
screen.blit(img, (0, 0))
a.draw()
b.draw()
c.draw()
pygame.display.flip()
|
[
"50252440+Erez-Atir@users.noreply.github.com"
] |
50252440+Erez-Atir@users.noreply.github.com
|
be016283897b8b97fcd923c3c66271b85639e383
|
10d98fecb882d4c84595364f715f4e8b8309a66f
|
/rl_metrics_aaai2021/utils.py
|
fdb1f66a5371b5960ba1746220fe5dec986ad621
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
afcarl/google-research
|
51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42
|
320a49f768cea27200044c0d12f394aa6c795feb
|
refs/heads/master
| 2021-12-02T18:36:03.760434
| 2021-09-30T20:59:01
| 2021-09-30T21:07:02
| 156,725,548
| 1
| 0
|
Apache-2.0
| 2018-11-08T15:13:53
| 2018-11-08T15:13:52
| null |
UTF-8
|
Python
| false
| false
| 7,577
|
py
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions."""
import collections
from absl import logging
import numpy as np
from rl_metrics_aaai2021 import bisimulation
from rl_metrics_aaai2021 import d_delta
from rl_metrics_aaai2021 import d_delta_star
from rl_metrics_aaai2021 import discrete_bisimulation
from rl_metrics_aaai2021 import discrete_lax_bisimulation
from rl_metrics_aaai2021 import lax_bisimulation
MetricData = collections.namedtuple('metric_data', ['constructor', 'label'])
MDPStats = collections.namedtuple(
'MDPStats', ['time', 'num_iterations', 'min_gap', 'avg_gap', 'max_gap'])
# Dictionary mapping metric name to constructor and LaTeX label.
METRICS = {
'bisimulation':
MetricData(bisimulation.Bisimulation, r'$d^{\sim}$'),
'discrete_bisimulation':
MetricData(discrete_bisimulation.DiscreteBisimulation, r'$e^{\sim}$'),
'lax_bisimulation':
MetricData(lax_bisimulation.LaxBisimulation, r'$d^{\sim_{lax}}$'),
'discrete_lax_bisimulation':
MetricData(discrete_lax_bisimulation.DiscreteLaxBisimulation,
r'$e^{\sim_{lax}}$'),
'd_delta_1':
MetricData(d_delta.DDelta1, r'$d_{\Delta1}$'),
'd_delta_5':
MetricData(d_delta.DDelta5, r'$d_{\Delta5}$'),
'd_delta_10':
MetricData(d_delta.DDelta10, r'$d_{\Delta10}$'),
'd_delta_15':
MetricData(d_delta.DDelta15, r'$d_{\Delta15}$'),
'd_delta_20':
MetricData(d_delta.DDelta20, r'$d_{\Delta20}$'),
'd_delta_50':
MetricData(d_delta.DDelta50, r'$d_{\Delta50}$'),
'd_delta_100':
MetricData(d_delta.DDelta100, r'$d_{\Delta100}$'),
'd_delta_500':
MetricData(d_delta.DDelta500, r'$d_{\Delta500}$'),
'd_delta_1000':
MetricData(d_delta.DDelta1000, r'$d_{\Delta1000}$'),
'd_delta_5000':
MetricData(d_delta.DDelta5000, r'$d_{\Delta5000}$'),
'd_Delta_star':
MetricData(d_delta_star.DDeltaStar, r'$d_{\Delta^*}$'),
}
def value_iteration(env, tolerance, verbose=False):
"""Run value iteration on env.
Args:
env: a MiniGrid environment, including the MDPWrapper.
tolerance: float, error tolerance used to exit loop.
verbose: bool, whether to print verbose messages.
Returns:
Numpy array with V* and Q*.
"""
values = np.zeros(env.num_states)
q_values = np.zeros((env.num_states, env.num_actions))
error = tolerance * 2
i = 0
while error > tolerance:
new_values = np.copy(values)
for s in range(env.num_states):
for a in range(env.num_actions):
q_values[s, a] = (
env.rewards[s, a] +
env.gamma * np.matmul(env.transition_probs[s, a, :], values))
new_values[s] = np.max(q_values[s, :])
error = np.max(abs(new_values - values))
values = new_values
i += 1
if i % 1000 == 0 and verbose:
logging.info('Error after %d iterations: %f', i, error)
if verbose:
logging.info('Found V* in %d iterations', i)
logging.info(values)
return values, q_values
def q_value_iteration(env, tolerance):
"""Run q value iteration on env.
Args:
env: a MiniGrid environment, including the MDPWrapper.
tolerance: float, error tolerance used to exit loop.
Returns:
Numpy array with V* and Q*.
"""
q_values = np.zeros((env.num_states, env.num_actions))
error = tolerance * 2
i = 0
while error > tolerance:
for s in range(env.num_states):
for a in range(env.num_actions):
old_q_values = np.copy(q_values[s, a])
q_values[s, a] = (
env.rewards[s, a] + env.gamma *
np.matmul(env.transition_probs[s, a, :], np.max(q_values, axis=1)))
error = np.max(abs(old_q_values - q_values[s, a]))
i += 1
return q_values
def policy_iteration(env, tolerance, verbose=False):
"""Run policy iteration on env.
Args:
env: a MiniGrid environment, including the MDPWrapper.
tolerance: float, evaluation stops when the value function change is less
than the tolerance.
verbose: bool, whether to print verbose messages.
Returns:
Numpy array with V*
"""
values = np.zeros(env.num_states)
# Random policy
policy = np.ones((env.num_states, env.num_actions)) / env.num_actions
policy_stable = False
i = 0
while not policy_stable:
# Policy evaluation
while True:
delta = 0.
for s in range(env.num_states):
v = np.sum(env.rewards[s, :] * policy[s, :] + env.gamma * policy[s, :] *
np.matmul(env.transition_probs[s, :, :], values))
delta = max(delta, abs(v - values[s]))
values[s] = v
if delta < tolerance:
break
# Policy improvement
policy_stable = True
for s in range(env.num_states):
old = policy[s].copy()
g = np.zeros(env.num_actions, dtype=float)
for a in range(env.num_actions):
g[a] = (
env.rewards[s, a] +
env.gamma * np.matmul(env.transition_probs[s, a, :], values))
greed_actions = np.argwhere(g == np.amax(g))
for a in range(env.num_actions):
if a in greed_actions:
policy[s, a] = 1 / len(greed_actions)
else:
policy[s, a] = 0
if not np.array_equal(policy[s], old):
policy_stable = False
i += 1
if i % 1000 == 0 and verbose:
logging.info('Error after %d iterations: %f', i, delta)
if verbose:
logging.info('Found V* in %d iterations', i)
logging.info(values)
return values
def q_policy_iteration(env, tolerance, verbose=False):
"""Run policy iteration on env.
Args:
env: a MiniGrid environment, including the MDPWrapper.
tolerance: float, evaluation stops when the value function change is less
than the tolerance.
verbose: bool, whether to print verbose messages.
Returns:
Numpy array with V*
"""
q_values = np.zeros((env.num_states, env.num_actions))
# Random policy
policy = np.ones((env.num_states, env.num_actions)) / env.num_actions
policy_stable = False
i = 0
while not policy_stable:
# Policy evaluation
while True:
delta = 0.
for s in range(env.num_states):
v = env.rewards[s, :] + env.gamma * np.matmul(
env.transition_probs[s, :, :], np.sum(q_values * policy, axis=1))
delta = max(delta, np.max(abs(v- q_values[s])))
q_values[s] = v
if delta < tolerance:
break
# Policy improvement
policy_stable = True
for s in range(env.num_states):
old = policy[s].copy()
greedy_actions = np.argwhere(q_values[s] == np.amax(q_values[s]))
for a in range(env.num_actions):
if a in greedy_actions:
policy[s, a] = 1 / len(greedy_actions)
else:
policy[s, a] = 0
if not np.array_equal(policy[s], old):
policy_stable = False
i += 1
if i % 1000 == 0 and verbose:
logging.info('Error after %d iterations: %f', i, delta)
if verbose:
logging.info('Found V* in %d iterations', i)
logging.info(q_values)
return q_values
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
98c8776b814a794b9aa792a3f3fc1eb2fc895e1c
|
ce6c84c99fc6efa26faececb6aa637b15d417271
|
/SUB/lib/automl.py
|
a5095e2c0eba303d43d381f6a5cf4224d72f1715
|
[] |
no_license
|
freekode1ko/DataScienceJourney
|
879568b084177e2518875b03c4bcec09178ecf3b
|
9eb9c63135b8682ee3cddf2500c388af989627ed
|
refs/heads/master
| 2020-04-20T22:27:24.910331
| 2019-02-04T20:03:20
| 2019-02-04T20:03:20
| 169,140,055
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,032
|
py
|
import os
import pandas as pd
import numpy as np
from lib.util import timeit, Config
from lib.read import read_df
from lib.preprocess import preprocess
from lib.model import train, predict, validate
from typing import Optional
class AutoML:
def __init__(self, model_dir: str):
os.makedirs(model_dir, exist_ok=True)
self.config = Config(model_dir)
def train(self, train_csv: str, mode: str):
self.config["task"] = "train"
self.config["mode"] = mode
self.config.tmp_dir = self.config.model_dir + "/tmp"
os.makedirs(self.config.tmp_dir, exist_ok=True)
df = read_df(train_csv, self.config)
preprocess(df, self.config)
y = df["target"]
X = df.drop("target", axis=1)
train(X, y, self.config)
def predict(self, test_csv: str, prediction_csv: str) -> (pd.DataFrame, Optional[np.float64]):
self.config["task"] = "predict"
self.config.tmp_dir = os.path.dirname(prediction_csv) + "/tmp"
os.makedirs(self.config.tmp_dir, exist_ok=True)
result = {
"line_id": [],
"prediction": [],
}
for X in pd.read_csv(
test_csv,
encoding="utf-8",
low_memory=False,
dtype=self.config["dtype"],
parse_dates=self.config["parse_dates"],
chunksize=self.config["nrows"]
):
result["line_id"] += list(X["line_id"])
preprocess(X, self.config)
result["prediction"] += list(predict(X, self.config))
result = pd.DataFrame(result)
result.to_csv(prediction_csv, index=False)
target_csv = test_csv.replace("test", "test-target")
if os.path.exists(target_csv):
score = validate(result, target_csv, self.config["mode"])
else:
score = None
return result, score
@timeit
def save(self):
self.config.save()
@timeit
def load(self):
self.config.load()
|
[
"noreply@github.com"
] |
noreply@github.com
|
fca6a52051658560f6a522ad47c53141010c8e4c
|
1396656a60be72e0dbe42a70750a7b775bad40bc
|
/CodeWars/GreatestWarrior.py
|
9b9eb5a08f15cd46f085a42b4158a3a2d56bf188
|
[] |
no_license
|
dianayuying/Python
|
f3b4bf9d2f9866869f811f9f327f1ccdc0de40a9
|
37d2e5b87261b4cc2d05d4e8aeacee2cea87cda2
|
refs/heads/master
| 2023-04-27T07:23:20.339945
| 2018-12-23T20:26:49
| 2018-12-23T20:26:49
| 162,921,513
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,446
|
py
|
"""
Create a class called Warrior which calculates and keeps track of their level and skills, and ranks them as the warrior they've proven to be.
Business Rules:
A warrior starts at level 1 and can progress all the way to 100.
A warrior starts at rank "Pushover" and can progress all the way to "Greatest".
The only acceptable range of rank values is "Pushover", "Novice", "Fighter", "Warrior", "Veteran", "Sage", "Elite", "Conqueror", "Champion", "Master", "Greatest".
Warriors will compete in battles. Battles will always accept an enemy level to match against your own.
With each battle successfully finished, your warrior's experience is updated based on the enemy's level.
The experience earned from the battle is relative to what the warrior's current level is compared to the level of the enemy.
A warrior's experience starts from 100. Each time the warrior's experience increases by another 100, the warrior's level rises to the next level.
A warrior's experience is cumulative, and does not reset with each rise of level. The only exception is when the warrior reaches level 100, with which the experience stops at 10000
At every 10 levels, your warrior will reach a new rank tier. (ex. levels 1-9 falls within "Pushover" tier, levels 80-89 fall within "Champion" tier, etc.)
A warrior cannot progress beyond level 100 and rank "Greatest".
Battle Progress Rules & Calculations:
If an enemy level does not fall in the range of 1 to 100, the battle cannot happen and should return "Invalid level".
Completing a battle against an enemy with the same level as your warrior will be worth 10 experience points.
Completing a battle against an enemy who is one level lower than your warrior will be worth 5 experience points.
Completing a battle against an enemy who is two levels lower or more than your warrior will give 0 experience points.
Completing a battle against an enemy who is one level higher or more than your warrior will accelarate your experience gaining. The greater the difference between levels, the more experinece your warrior will gain. The formula is 20 * diff * diff where diff equals the difference in levels between the enemy and your warrior.
However, if your warrior is at least one rank lower than your enemy, and at least 5 levels lower, your warrior cannot fight against an enemy that strong and must instead return "You've been defeated".
Every successful battle will also return one of three responses: "Easy fight", "A good fight", "An intense fight". Return "Easy fight" if your warrior is 2 or more levels higher than your enemy's level. Return "A good fight" if your warrior is either 1 level higher or equal to your enemy's level. Return "An intense fight" if your warrior's level is lower than the enemy's level.
"""
class Warrior():
ranking = {0:"Pushover",1:"Novice",2:"Fighter",3:"Warrior",4:"Veteran",
5:"Sage",6:"Elite",7:"Conqueror", 8:"Champion", 9:"Master", 10:"Greatest"}
def __init__(self):
self.level = 1
self.rank = "Pushover"
self.experience = 100
self.achievements=[]
def training(self, desc_list):
if desc_list[2]>self.level:
return "Not strong enough"
else:
self.achievements.append(desc_list[0])
if self.experience+desc_list[1]>10000:
self.experience=10000
else:
self.experience +=desc_list[1]
self.level=int(self.experience/100)
self.rank=Warrior.ranking[int(self.level/10)]
return desc_list[0]
def battle(self, n):
if n<1 or n>100:
return "Invalid level"
else:
diff = n-self.level
if n==self.level:
self.experience +=10
elif n==self.level-1:
self.experience +=5
elif n>self.level:
if n>=self.level+5 and int(n/10)>=int(self.level/10)+1:
return "You've been defeated"
else:
self.experience += 20*((n-self.level)**2)
if self.experience>10000:
self.experience=10000
self.level=int(self.experience/100)
self.rank=Warrior.ranking[int(self.level/10)]
if diff<=-2: return "Easy fight"
elif diff==-1 or diff==0: return "A good fight"
else: return "An intense fight"
|
[
"diana.yuying@gmail.com"
] |
diana.yuying@gmail.com
|
d8b576d9d8573f3e6f58e5e34ae445177ff8f207
|
62c9d736470c3f535de4fe5be56fea3334a081c4
|
/scripts/download-data.py
|
9ab361bf04e11aa1c721bea228c4e375dc378f4c
|
[] |
no_license
|
abachman/interactive-spaces-1
|
646f002b8136d054224c555cfb96cfa64b61babf
|
494e8dd82dc3848ad01a583453bd4f94aaff4508
|
refs/heads/master
| 2022-03-17T06:31:46.245542
| 2019-11-20T16:19:08
| 2019-11-20T16:19:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,883
|
py
|
# download all data from the 4 MICAVIBE feeds
import sys
import os
import re
import time
import json
import re
from io import StringIO
if sys.version_info < (3, 0):
print("make sure you're using python3 or python version 3.0 or higher")
os.exit(1)
import urllib.parse
import http.client
def parse_next_value(instr):
if not instr:
return None
for link in [h.strip() for h in instr.split(';')]:
if re.match('rel="next"', link):
try:
nurl = re.search("<(.+)>", link)[1]
return nurl
except:
print('no URL found in link header', link)
return None
def download(url, out_file, label):
source = urllib.parse.urlparse(url)
conn = http.client.HTTPSConnection(source.hostname, source.port)
conn.request("GET", url)
response = conn.getresponse()
body = response.read()
body_json = json.loads(body)
if len(body_json) > 0:
for record in body_json:
ts = record['created_epoch']
line = "{},{}\n".format(record['created_epoch'], record['value'])
out_file.write(line)
print(
"< {} {} ending on {} {} ({} total)".format(
len(body_json),
label,
record['id'], record['created_at'],
response.getheader('X-Pagination-Total')
)
)
return parse_next_value(response.getheader('Link'))
return None
def get_all_data(url, file_path, label):
data = StringIO()
next_page = download(url, data, label)
while next_page:
time.sleep(1)
next_page = download(next_page, data, label)
with open(file_path, 'w') as out_file:
out_file.write(data.getvalue())
data.close()
if __name__ == "__main__":
#
# https://io.adafruit.com/api/v2/mica_ia/feeds/mood/data
# https://io.adafruit.com/api/v2/mica_ia/feeds/split-motion/data
# https://io.adafruit.com/api/v2/mica_ia/feeds/sound/data
# https://io.adafruit.com/api/v2/mica_ia/feeds/sound-2/data
#
destination = "/var/www/app/shared/data/"
collections = (
("Mood", "https://io.adafruit.com/api/v2/mica_ia/feeds/mood/data", destination + 'mood.csv'),
("Motion", "https://io.adafruit.com/api/v2/mica_ia/feeds/split-motion/data", destination + 'motion.csv'),
("Sound 1", "https://io.adafruit.com/api/v2/mica_ia/feeds/sound/data", destination + 'sound-1.csv'),
("Sound 2", "https://io.adafruit.com/api/v2/mica_ia/feeds/sound-2/data", destination + 'sound-2.csv'),
)
for label, url, filepath in collections:
print("---------------------------------------------------------")
print(time.time(), "getting", url, "into", filepath)
print("---------------------------------------------------------")
get_all_data(url, filepath, label)
|
[
"adam.bachman@gmail.com"
] |
adam.bachman@gmail.com
|
30c051b3af9691fb1d09fd55b0cfda9348103df1
|
c1d7cb2489d19fca3bcb1627c2be62745af9c075
|
/Section-4/Source Code/iterator_example.py
|
0b71e1f52c7984d5d05a5d3c6adc8b71f92e7fb7
|
[] |
no_license
|
Yasser-Abbass/Python-From-Scratch
|
01c13726ff94fba8796b80eca5c4d02c93d4b658
|
4973535fa8252a4de0755290964f418e708b21fd
|
refs/heads/master
| 2022-12-11T10:05:27.826773
| 2020-09-10T21:09:55
| 2020-09-10T21:09:55
| 294,151,668
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 560
|
py
|
import sys
class Fib:
def __init__(self, num):
self.num = num
self.a = 0
self.b = 1
self.result = 0
def __iter__(self):
return self
def __next__(self):
if self.a < self.num:
self.result = self.a
self.a, self.b = self.b, self.b + self.a
return self.result
else:
raise StopIteration
if __name__ == "__main__":
x = int(sys.argv[1])
y = Fib(x)
results = []
for result in y:
results.append(result)
print(results)
|
[
"mongo.yasso@gmail.com"
] |
mongo.yasso@gmail.com
|
8cafbf132ca8eb8b86bedf45f6b404078bcc3054
|
1f127d9c25b2a3ff842019fffeaad4e8ff861ca7
|
/Articles/models.py
|
83b8b3da0b6e63692281463036ccb144aa4d55c0
|
[
"MIT"
] |
permissive
|
Hady-Eslam/Articles_Analyzing
|
b8caa49b5b21589e8ec5b101e5a52c92f747ff3e
|
885232db89cec88fc39e8260e7fde4241f4d7280
|
refs/heads/master
| 2021-07-09T01:43:18.097163
| 2020-07-21T09:42:26
| 2020-07-21T09:42:26
| 156,908,808
| 0
| 2
| null | 2020-07-21T09:42:28
| 2018-11-09T19:30:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,226
|
py
|
from django.db import models
from Python import init
class Posts(models.Model):
User_Email = models.CharField(max_length=init.Email_Len)
ArticleTitle = models.CharField(max_length=init.ArticleTitle_Len)
Article = models.CharField(max_length=init.Article_Len)
Tags = models.CharField(max_length=init.ArticleTags_Len, default='')
Deleted = models.BooleanField()
Date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return 'User Email Who Write The Post : ' + str(self.User_Email)
class LikesDisLikes(models.Model):
User_Email = models.CharField(max_length=110)
Post_id = models.CharField(max_length=11)
Status = models.BooleanField()
Date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return 'User : ' + str(self.User_Email) + ' Like Or DisLike Post : ' + str(self.Post_id)
class Comments(models.Model):
Post_id = models.IntegerField()
User_Email = models.CharField(max_length=init.Email_Len)
Comment = models.CharField(max_length=init.Comment_Len)
Date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return 'User : ' + str(self.User_Email) + ' Comment in Post : ' + str(self.Post_id)
|
[
"abdoaslam000@gmail.com"
] |
abdoaslam000@gmail.com
|
da4534207fbc24e56d3c2408862c3063b04a07fc
|
c2d018005ea56960a23faf173e4999bf3802eff6
|
/todo_app/settings.py
|
32db07f5ce7985623784e775068493a9dc80417f
|
[] |
no_license
|
AhmedMoustafaa/to-do-app
|
fb3a8c5188715559440b17649c0dba063f5cebd3
|
d9486f230dd93564b73a41e10f880718792aabd3
|
refs/heads/master
| 2023-08-12T02:31:40.391955
| 2020-06-07T16:13:31
| 2020-06-07T16:13:31
| 270,361,568
| 0
| 0
| null | 2021-09-22T19:14:31
| 2020-06-07T16:16:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,396
|
py
|
"""
Django settings for todo_app project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j!h1e6_v68(_xfc19mtq49c!-c62g1w^^3un6wqhmc6qv6da96'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
# local apps
'todo.apps.TodoConfig',
'users.apps.UsersConfig',
# third parties
'crispy_forms',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
|
[
"brian.laggy@gmail.com"
] |
brian.laggy@gmail.com
|
22aa0e448f5799c401421eb60b5b4237dab843a8
|
e655fafdf3a675d917135f333d6fa3d7fb9c59f6
|
/Exercícios/ex047.py
|
d29906501bb3ed21ba27de51cd130cdf17b1a1f1
|
[] |
no_license
|
Sabrina-AP/python-curso_em_video
|
7b595242a174f10e7e903cb73c8ea395d01ba609
|
43fef2a0aa67d2a67181bc5b73fb3b5de163b1d9
|
refs/heads/main
| 2023-03-25T22:52:42.811127
| 2021-03-23T17:49:44
| 2021-03-23T17:49:44
| 350,805,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 105
|
py
|
#contagem de pares
for i in range(1,51):
if i%2==0:
print(i, end=' ')
print('Acabou')
|
[
"noreply@github.com"
] |
noreply@github.com
|
b7f12b68859daa4b1cac219a0c23a17281619f5b
|
6ebd192d228c04152ea914a3130d1e34226ac412
|
/tests/unit/generate/test_context.py
|
eb565ca18a0832a2a5d8b33495d7e1aba7d2833d
|
[
"MIT"
] |
permissive
|
STAMP-project/camp
|
3acfa6746e30914e159735305328ef3ccc51eabe
|
e8652ddf3e2e84ffbf2b9dff3fb5ee678b209246
|
refs/heads/master
| 2022-12-23T09:35:43.045284
| 2021-11-18T15:56:05
| 2021-11-18T15:56:05
| 114,260,055
| 10
| 11
|
MIT
| 2022-12-16T02:41:51
| 2017-12-14T14:28:14
|
Python
|
UTF-8
|
Python
| false
| false
| 2,347
|
py
|
#
# CAMP
#
# Copyright (C) 2017 -- 2019 SINTEF Digital
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
#
from unittest import TestCase
from camp.entities.model import Model, Component, Service, Feature, Variable, Goals
from camp.generate import Context
from ozepy import start_over
class LoadedContextIncludes(TestCase):
def setUp(self):
start_over()
self._context = Context()
components = [
Component("c1",
provided_services=[Service("S1")],
provided_features=[Feature("F1")],
variables=[Variable("memory",
str,
values=["1GB",
"2GB"])])
]
self._model = Model(components,
Goals(services=[Service("S1")]))
self._context.load_metamodel()
self._context.load_model(self._model)
def test_all_metaclasses(self):
for each in self.EXPECTED_CLASSES:
self.assertIn(each, self._context)
EXPECTED_CLASSES = ["Value", "Variable", "Service",
"Feature", "Component", "CInstance"]
def test_all_services(self):
for each in self._model.services:
self.assertIn(each.name, self._context)
def test_all_features(self):
for each in self._model.features:
self.assertIn(each.name, self._context)
def test_all_components(self):
for each in self._model.components:
self.assertIn(each.name, self._context)
def test_all_variables_with_qualified_names(self):
for each_component in self._model.components:
for each_variable in each_component.variables:
qualified_name = "_".join([each_component.name, each_variable.name])
self.assertIn(qualified_name, self._context)
def test_all_values_slots_with_qualified_names(self):
for each_component in self._model.components:
for each_variable in each_component.variables:
qualified_name = "_".join([each_component.name, "0", each_variable.name])
self.assertIn(qualified_name, self._context)
|
[
"franck.chauvel@gmail.com"
] |
franck.chauvel@gmail.com
|
7a3484ca24eee71aa63e4e1eb0f4a392f1f4784a
|
41b4702e359e3352116eeecf2bdf59cb13c71cf2
|
/contextual_bcq/rand_param_envs/mujoco_py/mjlib.py
|
8f2cf8a780c82d64a893cfd22c85aaf7d6219ce8
|
[] |
no_license
|
CaralHsi/Multi-Task-Batch-RL
|
b0aad53291c1713fd2d89fa4fff4a85c98427d4d
|
69d29164ab7d82ec5e06a929ed3b96462db21853
|
refs/heads/master
| 2022-12-22T19:23:45.341092
| 2020-10-01T00:05:36
| 2020-10-01T00:05:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,301
|
py
|
from ctypes import *
import os
from .util import *
from .mjtypes import *
from rand_param_envs.mujoco_py import config
path_prefix = config.mjpro_path
if sys.platform.startswith("darwin"):
libfile = os.path.join(path_prefix, "bin/libmujoco131.dylib")
elif sys.platform.startswith("linux"):
libfile = os.path.join(path_prefix, "bin/libmujoco131.so")
elif sys.platform.startswith("win"):
libfile = os.path.join(path_prefix, "bin/mujoco131.lib")
else:
raise RuntimeError("Unrecognized platform %s" % sys.platform)
if not os.path.exists(libfile):
raise RuntimeError("Missing path: %s. (HINT: you should have unzipped the mjpro131.zip bundle without modification.)" % libfile)
mjlib = cdll.LoadLibrary(os.path.abspath(libfile))
mjlib.mj_loadXML.argtypes = [String, String, c_char_p, c_int]
mjlib.mj_loadXML.restype = POINTER(MJMODEL)
mjlib.mj_saveXML.argtypes = [String, POINTER(MJMODEL), String]
mjlib.mj_saveXML.restype = c_int
#mjlib.mj_printSchema.argtypes = [String, String, c_int, c_int, c_int]
#mjlib.mj_printSchema.restype = c_int
mjlib.mj_activate.argtypes = [String]
mjlib.mj_activate.restype = c_int
mjlib.mj_step.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
mjlib.mj_step.restype = None
mjlib.mj_step1.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
mjlib.mj_step1.restype = None
mjlib.mj_step2.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
mjlib.mj_step2.restype = None
mjlib.mj_forward.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
mjlib.mj_forward.restype = None
#mjlib.mj_inverse.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_inverse.restype = None
#mjlib.mj_forwardSkip.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int]
#mjlib.mj_forwardSkip.restype = None
#mjlib.mj_inverseSkip.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int]
#mjlib.mj_inverseSkip.restype = None
#mjlib.mj_sensor.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_sensor.restype = None
#mjlib.mj_energy.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_energy.restype = None
#mjlib.mj_defaultSolRefImp.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mj_defaultSolRefImp.restype = None
#mjlib.mj_defaultOption.argtypes = [POINTER(mjOption)]
#mjlib.mj_defaultOption.restype = None
#mjlib.mj_defaultVisual.argtypes = [POINTER(mjVisual)]
#mjlib.mj_defaultVisual.restype = None
#mjlib.mj_copyModel.argtypes = [POINTER(MJMODEL), POINTER(MJMODEL)]
#mjlib.mj_copyModel.restype = POINTER(MJMODEL)
#mjlib.mj_saveModel.argtypes = [POINTER(MJMODEL), String, c_int, POINTER(None)]
#mjlib.mj_saveModel.restype = None
#mjlib.mj_loadModel.argtypes = [String, c_int, POINTER(None)]
#mjlib.mj_loadModel.restype = POINTER(MJMODEL)
mjlib.mj_deleteModel.argtypes = [POINTER(MJMODEL)]
mjlib.mj_deleteModel.restype = None
#mjlib.mj_sizeModel.argtypes = [POINTER(MJMODEL)]
#mjlib.mj_sizeModel.restype = c_int
mjlib.mj_makeData.argtypes = [POINTER(MJMODEL)]
mjlib.mj_makeData.restype = POINTER(MJDATA)
#mjlib.mj_copyData.argtypes = [POINTER(MJDATA), POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_copyData.restype = POINTER(MJDATA)
mjlib.mj_resetData.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
mjlib.mj_resetData.restype = None
#mjlib.mj_stackAlloc.argtypes = [POINTER(MJDATA), c_int]
#mjlib.mj_stackAlloc.restype = POINTER(c_double)
mjlib.mj_deleteData.argtypes = [POINTER(MJDATA)]
mjlib.mj_deleteData.restype = None
#mjlib.mj_resetCallbacks.argtypes = []
#mjlib.mj_resetCallbacks.restype = None
#mjlib.mj_setConst.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int]
#mjlib.mj_setConst.restype = None
#mjlib.mj_printModel.argtypes = [POINTER(MJMODEL), String]
#mjlib.mj_printModel.restype = None
#mjlib.mj_printData.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), String]
#mjlib.mj_printData.restype = None
#mjlib.mju_printMat.argtypes = [POINTER(c_double), c_int, c_int]
#mjlib.mju_printMat.restype = None
#mjlib.mj_fwdPosition.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_fwdPosition.restype = None
#mjlib.mj_fwdVelocity.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_fwdVelocity.restype = None
#mjlib.mj_fwdActuation.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_fwdActuation.restype = None
#mjlib.mj_fwdAcceleration.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_fwdAcceleration.restype = None
#mjlib.mj_fwdConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_fwdConstraint.restype = None
#mjlib.mj_Euler.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_Euler.restype = None
#mjlib.mj_RungeKutta.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int]
#mjlib.mj_RungeKutta.restype = None
#mjlib.mj_invPosition.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_invPosition.restype = None
#mjlib.mj_invVelocity.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_invVelocity.restype = None
#mjlib.mj_invConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_invConstraint.restype = None
#mjlib.mj_compareFwdInv.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_compareFwdInv.restype = None
#mjlib.mj_checkPos.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_checkPos.restype = None
#mjlib.mj_checkVel.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_checkVel.restype = None
#mjlib.mj_checkAcc.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_checkAcc.restype = None
#mjlib.mj_kinematics.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_kinematics.restype = None
#mjlib.mj_comPos.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_comPos.restype = None
#mjlib.mj_tendon.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_tendon.restype = None
#mjlib.mj_transmission.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_transmission.restype = None
#mjlib.mj_crb.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_crb.restype = None
#mjlib.mj_factorM.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_factorM.restype = None
#mjlib.mj_backsubM.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_backsubM.restype = None
#mjlib.mj_backsubM2.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_backsubM2.restype = None
#mjlib.mj_comVel.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_comVel.restype = None
#mjlib.mj_passive.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_passive.restype = None
#mjlib.mj_rne.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, POINTER(c_double)]
#mjlib.mj_rne.restype = None
#mjlib.mj_rnePostConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_rnePostConstraint.restype = None
#mjlib.mj_collision.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_collision.restype = None
#mjlib.mj_makeConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_makeConstraint.restype = None
#mjlib.mj_projectConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_projectConstraint.restype = None
#mjlib.mj_referenceConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_referenceConstraint.restype = None
#mjlib.mj_isPyramid.argtypes = [POINTER(MJMODEL)]
#mjlib.mj_isPyramid.restype = c_int
#mjlib.mj_isSparse.argtypes = [POINTER(MJMODEL)]
#mjlib.mj_isSparse.restype = c_int
#mjlib.mj_mulJacVec.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double)]
#mjlib.mj_mulJacVec.restype = None
#mjlib.mj_mulJacTVec.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double)]
#mjlib.mj_mulJacTVec.restype = None
#mjlib.mj_jac.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jac.restype = None
#mjlib.mj_jacBody.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jacBody.restype = None
#mjlib.mj_jacBodyCom.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jacBodyCom.restype = None
#mjlib.mj_jacGeom.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jacGeom.restype = None
#mjlib.mj_jacSite.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jacSite.restype = None
#mjlib.mj_jacPointAxis.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jacPointAxis.restype = None
mjlib.mj_name2id.argtypes = [POINTER(MJMODEL), c_int, String] # The middle term is a mjtObj (an enum) in C.
mjlib.mj_name2id.restype = c_int
#mjlib.mj_id2name.argtypes = [POINTER(MJMODEL), mjtObj, c_int]
#mjlib. mj_id2name.restype = ReturnString
#mjlib.else:
#mjlib. mj_id2name.restype = String
#mjlib. mj_id2name.errcheck = ReturnString
#mjlib.mj_fullM.argtypes = [POINTER(MJMODEL), POINTER(c_double), POINTER(c_double)]
#mjlib.mj_fullM.restype = None
#mjlib.mj_mulM.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double)]
#mjlib.mj_mulM.restype = None
#mjlib.mj_applyFT.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, POINTER(c_double)]
#mjlib.mj_applyFT.restype = None
#mjlib.mj_objectVelocity.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, c_int, POINTER(c_double), mjtByte]
#mjlib.mj_objectVelocity.restype = None
#mjlib.mj_objectAcceleration.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, c_int, POINTER(c_double), mjtByte]
#mjlib.mj_objectAcceleration.restype = None
#mjlib.mj_contactForce.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, POINTER(c_double)]
#mjlib.mj_contactForce.restype = None
#mjlib.mj_integratePos.argtypes = [POINTER(MJMODEL), POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mj_integratePos.restype = None
#mjlib.mj_normalizeQuat.argtypes = [POINTER(MJMODEL), POINTER(c_double)]
#mjlib.mj_normalizeQuat.restype = None
#mjlib.mj_local2Global.argtypes = [POINTER(MJDATA), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_local2Global.restype = None
#mjlib.mj_getTotalmass.argtypes = [POINTER(MJMODEL)]
#mjlib.mj_getTotalmass.restype = c_double
#mjlib.mj_setTotalmass.argtypes = [POINTER(MJMODEL), c_double]
#mjlib.mj_setTotalmass.restype = None
#mjlib.mj_version.argtypes = []
#mjlib.mj_version.restype = c_double
mjlib.mjv_makeObjects.argtypes = [POINTER(MJVOBJECTS), c_int]
mjlib.mjv_makeObjects.restype = None
mjlib.mjv_freeObjects.argtypes = [POINTER(MJVOBJECTS)]
mjlib.mjv_freeObjects.restype = None
mjlib.mjv_defaultOption.argtypes = [POINTER(MJVOPTION)]
mjlib.mjv_defaultOption.restype = None
#mjlib.mjv_defaultCameraPose.argtypes = [POINTER(MJVCAMERAPOSE)]
#mjlib.mjv_defaultCameraPose.restype = None
mjlib.mjv_defaultCamera.argtypes = [POINTER(MJVCAMERA)]
mjlib.mjv_defaultCamera.restype = None
mjlib.mjv_setCamera.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(MJVCAMERA)]
mjlib.mjv_setCamera.restype = None
mjlib.mjv_updateCameraPose.argtypes = [POINTER(MJVCAMERA), c_double]
mjlib.mjv_updateCameraPose.restype = None
#mjlib.mjv_convert3D.argtypes = [POINTER(c_double), POINTER(c_double), c_double, POINTER(MJVCAMERAPOSE)]
#mjlib.mjv_convert3D.restype = None
#mjlib.mjv_convert2D.argtypes = [POINTER(c_double), mjtMouse, c_double, c_double, c_double, POINTER(MJVCAMERAPOSE)]
#mjlib.mjv_convert2D.restype = None
mjlib.mjv_moveCamera.argtypes = [c_int, c_float, c_float, POINTER(MJVCAMERA), c_float, c_float]
mjlib.mjv_moveCamera.restype = None
#mjlib.mjv_moveObject.argtypes = [mjtMouse, c_float, c_float, POINTER(MJVCAMERAPOSE), c_float, c_float, POINTER(c_double), POINTER(c_double)]
#mjlib.mjv_moveObject.restype = None
mjlib.mjv_mousePerturb.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)]
mjlib.mjv_mousePerturb.restype = None
#mjlib.mjv_mouseEdit.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, c_int, POINTER(c_double), POINTER(c_double)]
#mjlib.mjv_mouseEdit.restype = None
mjlib.mjv_makeGeoms.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(MJVOBJECTS), POINTER(MJVOPTION), c_int, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)]
mjlib.mjv_makeGeoms.restype = None
mjlib.mjv_makeLights.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(MJVOBJECTS)]
mjlib.mjv_makeLights.restype = None
mjlib.mjr_overlay.argtypes = [MJRRECT, c_int, c_int, String, String, POINTER(MJRCONTEXT)]
mjlib.mjr_overlay.restype = None
#mjlib.mjr_rectangle.argtypes = [c_int, MJRRECT, c_double, c_double, c_double, c_double, c_double, c_double, c_double, c_double]
#mjlib.mjr_rectangle.restype = None
#mjlib.mjr_finish.argtypes = []
#mjlib.mjr_finish.restype = None
#mjlib.mjr_text.argtypes = [String, POINTER(MJRCONTEXT), c_int, c_float, c_float, c_float, c_float, c_float, c_float]
#mjlib.mjr_text.restype = None
#mjlib.mjr_textback.argtypes = [String, POINTER(MJRCONTEXT), c_float, c_float, c_float, c_float, c_float, c_float]
#mjlib.mjr_textback.restype = None
#mjlib.mjr_textWidth.argtypes = [String, POINTER(MJRCONTEXT), c_int]
#mjlib.mjr_textWidth.restype = c_int
mjlib.mjr_defaultOption.argtypes = [POINTER(MJROPTION)]
mjlib.mjr_defaultOption.restype = None
mjlib.mjr_defaultContext.argtypes = [POINTER(MJRCONTEXT)]
mjlib.mjr_defaultContext.restype = None
#mjlib.mjr_uploadTexture.argtypes = [POINTER(MJMODEL), POINTER(MJRCONTEXT), c_int]
#mjlib.mjr_uploadTexture.restype = None
mjlib.mjr_makeContext.argtypes = [POINTER(MJMODEL), POINTER(MJRCONTEXT), c_int]
mjlib.mjr_makeContext.restype = None
mjlib.mjr_freeContext.argtypes = [POINTER(MJRCONTEXT)]
mjlib.mjr_freeContext.restype = None
mjlib.mjr_render.argtypes = [c_int, MJRRECT, POINTER(MJVOBJECTS), POINTER(MJROPTION), POINTER(MJVCAMERAPOSE), POINTER(MJRCONTEXT)]
mjlib.mjr_render.restype = None
#mjlib.mjr_select.argtypes = [MJRRECT, POINTER(MJVOBJECTS), c_int, c_int, POINTER(c_double), POINTER(c_double), POINTER(MJROPTION), POINTER(MJVCAMERAPOSE), POINTER(MJRCONTEXT)]
#mjlib.mjr_select.restype = c_int
#mjlib.mjr_showOffscreen.argtypes = [c_int, c_int, POINTER(MJRCONTEXT)]
#mjlib.mjr_showOffscreen.restype = None
#mjlib.mjr_showBuffer.argtypes = [POINTER(c_ubyte), c_int, c_int, c_int, c_int, POINTER(MJRCONTEXT)]
#mjlib.mjr_showBuffer.restype = None
#mjlib.mjr_getOffscreen.argtypes = [POINTER(c_ubyte), POINTER(c_float), MJRRECT, POINTER(MJRCONTEXT)]
#mjlib.mjr_getOffscreen.restype = None
#mjlib.mjr_getBackbuffer.argtypes = [POINTER(c_ubyte), POINTER(c_float), MJRRECT, POINTER(MJRCONTEXT)]
#mjlib.mjr_getBackbuffer.restype = None
#mjlib.
#mjlib.
#mjlib.mju_error.argtypes = [String]
#mjlib.mju_error.restype = None
#mjlib.mju_error_i.argtypes = [String, c_int]
#mjlib.mju_error_i.restype = None
#mjlib.mju_error_s.argtypes = [String, String]
#mjlib.mju_error_s.restype = None
#mjlib.mju_warning.argtypes = [String]
#mjlib.mju_warning.restype = None
#mjlib.mju_warning_i.argtypes = [String, c_int]
#mjlib.mju_warning_i.restype = None
#mjlib.mju_warning_s.argtypes = [String, String]
#mjlib.mju_warning_s.restype = None
#mjlib.mju_clearHandlers.argtypes = []
#mjlib.mju_clearHandlers.restype = None
#mjlib.mju_malloc.argtypes = [c_size_t]
#mjlib.mju_malloc.restype = POINTER(None)
#mjlib.mju_free.argtypes = [POINTER(None)]
#mjlib.mju_free.restype = None
#mjlib.mj_warning.argtypes = [POINTER(MJDATA), c_int]
#mjlib.mj_warning.restype = None
#mjlib.mju_zero3.argtypes = [POINTER(c_double)]
#mjlib.mju_zero3.restype = None
#mjlib.mju_copy3.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_copy3.restype = None
#mjlib.mju_scl3.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_scl3.restype = None
#mjlib.mju_add3.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_add3.restype = None
#mjlib.mju_sub3.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_sub3.restype = None
#mjlib.mju_addTo3.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_addTo3.restype = None
#mjlib.mju_addToScl3.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_addToScl3.restype = None
#mjlib.mju_addScl3.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_addScl3.restype = None
#mjlib.mju_normalize3.argtypes = [POINTER(c_double)]
#mjlib.mju_normalize3.restype = c_double
#mjlib.mju_norm3.argtypes = [POINTER(c_double)]
#mjlib.mju_norm3.restype = c_double
#mjlib.mju_dot3.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_dot3.restype = c_double
#mjlib.mju_dist3.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_dist3.restype = c_double
#mjlib.mju_rotVecMat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_rotVecMat.restype = None
#mjlib.mju_rotVecMatT.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_rotVecMatT.restype = None
#mjlib.mju_cross.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_cross.restype = None
#mjlib.mju_zero.argtypes = [POINTER(c_double), c_int]
#mjlib.mju_zero.restype = None
#mjlib.mju_copy.argtypes = [POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_copy.restype = None
#mjlib.mju_scl.argtypes = [POINTER(c_double), POINTER(c_double), c_double, c_int]
#mjlib.mju_scl.restype = None
#mjlib.mju_add.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_add.restype = None
#mjlib.mju_sub.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_sub.restype = None
#mjlib.mju_addTo.argtypes = [POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_addTo.restype = None
#mjlib.mju_addToScl.argtypes = [POINTER(c_double), POINTER(c_double), c_double, c_int]
#mjlib.mju_addToScl.restype = None
#mjlib.mju_addScl.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_double, c_int]
#mjlib.mju_addScl.restype = None
#mjlib.mju_normalize.argtypes = [POINTER(c_double), c_int]
#mjlib.mju_normalize.restype = c_double
#mjlib.mju_norm.argtypes = [POINTER(c_double), c_int]
#mjlib.mju_norm.restype = c_double
#mjlib.mju_dot.argtypes = [POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_dot.restype = c_double
#mjlib.mju_mulMatVec.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int]
#mjlib.mju_mulMatVec.restype = None
#mjlib.mju_mulMatTVec.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int]
#mjlib.mju_mulMatTVec.restype = None
#mjlib.mju_transpose.argtypes = [POINTER(c_double), POINTER(c_double), c_int, c_int]
#mjlib.mju_transpose.restype = None
#mjlib.mju_mulMatMat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int, c_int]
#mjlib.mju_mulMatMat.restype = None
#mjlib.mju_mulMatMatT.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int, c_int]
#mjlib.mju_mulMatMatT.restype = None
#mjlib.mju_sqrMat.argtypes = [POINTER(c_double), POINTER(c_double), c_int, c_int, POINTER(c_double), c_int]
#mjlib.mju_sqrMat.restype = None
#mjlib.mju_mulMatTMat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int, c_int]
#mjlib.mju_mulMatTMat.restype = None
#mjlib.mju_transformSpatial.argtypes = [POINTER(c_double), POINTER(c_double), mjtByte, POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_transformSpatial.restype = None
#mjlib.mju_rotVecQuat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_rotVecQuat.restype = None
#mjlib.mju_negQuat.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_negQuat.restype = None
#mjlib.mju_mulQuat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_mulQuat.restype = None
#mjlib.mju_mulQuatAxis.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_mulQuatAxis.restype = None
#mjlib.mju_axisAngle2Quat.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_axisAngle2Quat.restype = None
#mjlib.mju_quat2Vel.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_quat2Vel.restype = None
#mjlib.mju_quat2Mat.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_quat2Mat.restype = None
#mjlib.mju_mat2Quat.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_mat2Quat.restype = None
#mjlib.mju_derivQuat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_derivQuat.restype = None
#mjlib.mju_quatIntegrate.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_quatIntegrate.restype = None
#mjlib.mju_quatZ2Vec.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_quatZ2Vec.restype = None
#mjlib.mju_cholFactor.argtypes = [POINTER(c_double), POINTER(c_double), c_int, c_double, c_double, POINTER(c_double)]
#mjlib.mju_cholFactor.restype = c_int
#mjlib.mju_cholBacksub.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int, c_int]
#mjlib.mju_cholBacksub.restype = None
#mjlib.mju_eig3.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_eig3.restype = c_int
#mjlib.mju_muscleFVL.argtypes = [c_double, c_double, c_double, c_double, POINTER(c_double)]
#mjlib.mju_muscleFVL.restype = c_double
#mjlib.mju_musclePassive.argtypes = [c_double, c_double, c_double, POINTER(c_double)]
#mjlib.mju_musclePassive.restype = c_double
#mjlib.mju_pneumatic.argtypes = [c_double, c_double, c_double, POINTER(c_double), c_double, c_double, c_double, POINTER(c_double)]
#mjlib.mju_pneumatic.restype = c_double
#mjlib.mju_encodePyramid.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_encodePyramid.restype = None
#mjlib.mju_decodePyramid.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_decodePyramid.restype = None
#mjlib.mju_springDamper.argtypes = [c_double, c_double, c_double, c_double, c_double]
#mjlib.mju_springDamper.restype = c_double
#mjlib.mju_min.argtypes = [c_double, c_double]
#mjlib.mju_min.restype = c_double
#mjlib.mju_max.argtypes = [c_double, c_double]
#mjlib.mju_max.restype = c_double
#mjlib.mju_sign.argtypes = [c_double]
#mjlib.mju_sign.restype = c_double
#mjlib.mju_round.argtypes = [c_double]
#mjlib.mju_round.restype = c_int
#mjlib.mju_type2Str.argtypes = [c_int]
#mjlib. mju_type2Str.restype = ReturnString
#mjlib.else:
#mjlib. mju_type2Str.restype = String
#mjlib. mju_type2Str.errcheck = ReturnString
#mjlib.mju_str2Type.argtypes = [String]
#mjlib.mju_str2Type.restype = mjtObj
#mjlib.mju_warningText.argtypes = [c_int]
#mjlib. mju_warningText.restype = ReturnString
#mjlib.else:
#mjlib. mju_warningText.restype = String
#mjlib. mju_warningText.errcheck = ReturnString
#mjlib.mju_isBad.argtypes = [c_double]
#mjlib.mju_isBad.restype = c_int
#mjlib.mju_isZero.argtypes = [POINTER(c_double), c_int]
#mjlib.mju_isZero.restype = c_int
|
[
"jil021@eng.ucsd.edu"
] |
jil021@eng.ucsd.edu
|
2acbc2e004d4d067218de078794ec2dd281455fd
|
9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb
|
/sdk/cosmos/azure-mgmt-cosmosdb/generated_samples/cosmos_db_sql_container_create_update.py
|
4eb9b7c581d3ad5045f9f14afe3e0ab5a7f5f6c1
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
openapi-env-test/azure-sdk-for-python
|
b334a2b65eeabcf9b7673879a621abb9be43b0f6
|
f61090e96094cfd4f43650be1a53425736bd8985
|
refs/heads/main
| 2023-08-30T14:22:14.300080
| 2023-06-08T02:53:04
| 2023-06-08T02:53:04
| 222,384,897
| 1
| 0
|
MIT
| 2023-09-08T08:38:48
| 2019-11-18T07:09:24
|
Python
|
UTF-8
|
Python
| false
| false
| 3,434
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.cosmosdb import CosmosDBManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-cosmosdb
# USAGE
python cosmos_db_sql_container_create_update.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = CosmosDBManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.sql_resources.begin_create_update_sql_container(
resource_group_name="rg1",
account_name="ddb1",
database_name="databaseName",
container_name="containerName",
create_update_sql_container_parameters={
"location": "West US",
"properties": {
"options": {},
"resource": {
"clientEncryptionPolicy": {
"includedPaths": [
{
"clientEncryptionKeyId": "keyId",
"encryptionAlgorithm": "AEAD_AES_256_CBC_HMAC_SHA256",
"encryptionType": "Deterministic",
"path": "/path",
}
],
"policyFormatVersion": 2,
},
"conflictResolutionPolicy": {"conflictResolutionPath": "/path", "mode": "LastWriterWins"},
"defaultTtl": 100,
"id": "containerName",
"indexingPolicy": {
"automatic": True,
"excludedPaths": [],
"includedPaths": [
{
"indexes": [
{"dataType": "String", "kind": "Range", "precision": -1},
{"dataType": "Number", "kind": "Range", "precision": -1},
],
"path": "/*",
}
],
"indexingMode": "consistent",
},
"partitionKey": {"kind": "Hash", "paths": ["/AccountNumber"]},
"uniqueKeyPolicy": {"uniqueKeys": [{"paths": ["/testPath"]}]},
},
},
"tags": {},
},
).result()
print(response)
# x-ms-original-file: specification/cosmos-db/resource-manager/Microsoft.DocumentDB/stable/2023-04-15/examples/CosmosDBSqlContainerCreateUpdate.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
856b7d69a10ecf05d2a77cc576da385d6056ddd8
|
1ab2c3dbe3b8323c9167236160af263daca0ec5d
|
/maxmara_image_hashes.py
|
66371f9a6bec550085bf0e756cc82256f430e9eb
|
[
"MIT"
] |
permissive
|
Ziyu-Chen/image_hashing
|
d2a79ff610bf5bdfb35a451a05d99bdf95bb64ec
|
d48443b59959f2f785b864908e9b0979de59fe7a
|
refs/heads/master
| 2022-11-30T01:04:41.453363
| 2020-07-30T01:55:01
| 2020-07-30T01:55:01
| 274,356,173
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,107
|
py
|
import imagehash
import pandas as pd
import os
from PIL import Image
from collections import defaultdict
from square_crop import square_crop
directory_path = '/Users/Ziyu/OneDrive - Clarivate Analytics/Desktop/weekendmaxmara_images/'
data_path = '/Users/Ziyu/OneDrive - Clarivate Analytics/Desktop/weekendmaxmara_images/hashes.csv'
data_dict = defaultdict(list)
for image_name in os.listdir(directory_path):
image_path = directory_path + image_name
try:
with Image.open(image_path) as image:
image = square_crop(image)
ahash = imagehash.average_hash(image)
dhash = imagehash.dhash(image)
phash = imagehash.phash(image)
whash = imagehash.whash(image)
data_dict['image_name'].append(image_name)
data_dict['ahash'].append(ahash)
data_dict['dhash'].append(dhash)
data_dict['phash'].append(phash)
data_dict['whash'].append(whash)
print('Finished No. %s' % image_name)
except Exception:
continue
data = pd.DataFrame(data_dict)
data.to_csv(data_path)
|
[
"zc839@nyu.edu"
] |
zc839@nyu.edu
|
a1bc32ee0d27ba4faf285733b776292f4fc063d1
|
be3263f52e4c7b76d1d1d2afa81317967f8b4105
|
/coursea_course/mini-project_week-06.py
|
aff116685a5636f34a3c5d4f4513d5dcd15d9cbd
|
[] |
no_license
|
eastmanjoe/python_bucket
|
b1724ba035928ec6dc5364db33f9c23ea85c5fbb
|
0df4e77e415716dec9d059c592b287024b2cdac5
|
refs/heads/master
| 2021-01-17T08:54:15.115953
| 2018-08-09T15:39:13
| 2018-08-09T15:39:13
| 5,634,366
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,147
|
py
|
#!/usr/bin/env python
# URL for assignment template
# http://www.codeskulptor.org/#examples-blackjack_template.py
# URL for completed assignment
# http://www.codeskulptor.org/#user38_vITLjG598O_0.py
# http://www.codeskulptor.org/#user38_vITLjG598O_1.py
# http://www.codeskulptor.org/#user38_vITLjG598O_2.py
# http://www.codeskulptor.org/#user38_vITLjG598O_3.py
# Copy and paste below the line into CodeSkulptor
'''
- Card Class Testing: http://www.codeskulptor.org/#examples-card_template.py
- Hand Class Testing: http://www.codeskulptor.org/#examples-hand_template.py
- Draw Class Testing: http://www.codeskulptor.org/#examples-deck_template.py
'''
#------------------------------------------------------------------------------
'''
Mini-project - Week 06
Blackjack: The Game
'''
# Mini-project #6 - Blackjack
import simplegui
import random
DEBUG = False
# load card sprite - 936x384 - source: jfitz.com
CARD_SIZE = (72, 96)
CARD_CENTER = (36, 48)
card_images = simplegui.load_image("http://storage.googleapis.com/codeskulptor-assets/cards_jfitz.png")
CARD_BACK_SIZE = (72, 96)
CARD_BACK_CENTER = (36, 48)
card_back = simplegui.load_image("http://storage.googleapis.com/codeskulptor-assets/card_jfitz_back.png")
# initialize some useful global variables
in_play = False
outcome = ""
score = 0
# define globals for cards
SUITS = ('C', 'S', 'H', 'D')
RANKS = ('A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K')
VALUES = {'A':1, '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, 'T':10, 'J':10, 'Q':10, 'K':10}
# define card class
class Card:
def __init__(self, suit, rank):
if (suit in SUITS) and (rank in RANKS):
self.suit = suit
self.rank = rank
else:
self.suit = None
self.rank = None
print "Invalid card: ", suit, rank
def __str__(self):
return self.suit + self.rank
def get_suit(self):
return self.suit
def get_rank(self):
return self.rank
def draw(self, canvas, pos):
card_loc = (CARD_CENTER[0] + CARD_SIZE[0] * RANKS.index(self.rank),
CARD_CENTER[1] + CARD_SIZE[1] * SUITS.index(self.suit)
)
canvas.draw_image(
card_images, card_loc, CARD_SIZE,
[pos[0] + CARD_CENTER[0], pos[1] + CARD_CENTER[1]], CARD_SIZE
)
# define hand class
class Hand:
def __init__(self):
self.hand = []
self.value = 0
self.ace_in_hand = False
def __str__(self):
card_string = ''
for i in range(len(self.hand)):
card_string += ' ' + str(self.hand[i])
return 'Hand contains' + card_string
def add_card(self, card):
self.hand.append(card)
def get_value(self):
# count aces as 1, if the hand has an ace, then add 10 to hand value if it doesn't bust
self.value = 0
for c in self.hand:
# print self.value, c.get_rank(), c.get_suit(), self.ace_in_hand
self.value += VALUES[c.get_rank()]
if c.get_rank() == 'A': self.ace_in_hand = True
if not self.ace_in_hand:
return self.value
else:
if self.value + 10 <= 21:
return self.value + 10
else:
return self.value
def draw(self, canvas, pos):
for c in self.hand:
c.draw(canvas, pos)
pos = [pos[0] + CARD_SIZE[0] + 5, pos[1]]
# define deck class
class Deck:
def __init__(self):
self.deck = [Card(s, r) for s in SUITS for r in RANKS]
def shuffle(self):
# shuffle the deck
random.shuffle(self.deck)
def deal_card(self):
# pull the card from the end of the deck
return self.deck.pop(-1)
def __str__(self):
deck_string = ''
for i in range(len(self.deck)):
deck_string += ' ' + str(self.deck[i])
return 'Deck contains' + deck_string
#define event handlers for buttons
def deal():
global outcome, in_play, dealer_hand, player_hand, game_deck, score
#if new cards dealt while game in play, player automatically loses
if in_play:
score -= 1
# always start a new game with a new deck
game_deck = Deck()
game_deck.shuffle()
player_hand = Hand()
dealer_hand = Hand()
player_hand.add_card(game_deck.deal_card())
dealer_hand.add_card(game_deck.deal_card())
player_hand.add_card(game_deck.deal_card())
dealer_hand.add_card(game_deck.deal_card())
if DEBUG:
print 'Dealer', dealer_hand
print 'Player', player_hand
outcome = 'Hit or Stand?'
in_play = True
def hit():
global outcome, in_play, score, player_hand
# if the hand is in play, hit the player
if in_play:
player_hand.add_card(game_deck.deal_card())
if player_hand.get_value() > 21:
outcome = "Player Busted"
in_play = False
score -= 1
if DEBUG:
print 'Dealer', dealer_hand
print 'Player', player_hand
print 'Outcome:', outcome
print 'Score:', score
def stand():
global outcome, in_play, score, player_hand
# if hand is in play, repeatedly hit dealer until his hand has value 17 or more
while in_play:
if dealer_hand.get_value() > 21:
outcome = "Dealer Busted"
in_play = False
score += 1
elif dealer_hand.get_value() >= 17:
in_play = False
if dealer_hand.get_value() >= player_hand.get_value():
outcome = "Dealer WINS !"
score -= 1
elif dealer_hand.get_value() < player_hand.get_value():
outcome = "Player WINS !"
score += 1
else:
dealer_hand.add_card(game_deck.deal_card())
if DEBUG:
print 'Dealer', dealer_hand
print 'Player', player_hand
print 'Outcome:', outcome
print 'Score:', score
# draw handler
def draw(canvas):
canvas.draw_text('BlackJack', [170, 75], 72, 'Black')
canvas.draw_text(outcome, [190, 125], 48, 'Red')
canvas.draw_text("Dealer's Hand", [170, 175], 32, 'Black')
dealer_hand.draw(canvas, [50, 200])
if in_play:
canvas.draw_image(
card_back, CARD_BACK_CENTER, CARD_BACK_SIZE,
[50 + CARD_BACK_CENTER[0], 200 + CARD_BACK_CENTER[1]],
CARD_BACK_SIZE
)
canvas.draw_text("Player's Hand", [170, 375], 32, 'Black')
player_hand.draw(canvas, [50, 400])
if not in_play:
canvas.draw_text('New Deal?', [50, 460], 48, 'Green')
canvas.draw_text('Score', [400, 550], 32, 'Black')
canvas.draw_text(str(score), [425, 590], 48, 'Black')
# initialization frame
frame = simplegui.create_frame("Blackjack", 600, 600)
frame.set_canvas_background("Green")
#create buttons and canvas callback
frame.add_button("Deal", deal, 200)
frame.add_button("Hit", hit, 200)
frame.add_button("Stand", stand, 200)
frame.set_draw_handler(draw)
# get things rolling
player_hand = Hand()
dealer_hand = Hand()
deal()
frame.start()
|
[
"eastman.joseph@gmail.com"
] |
eastman.joseph@gmail.com
|
6a6aa3aae9981033e00458a70b5c856684bf33a6
|
8276a999272873c655b3a7c2bed0f3fb50a9c029
|
/Google_Cloud_Vision_Cv2.py
|
99045e6e7dfef858380f5bf4a50e24a51556ea74
|
[] |
no_license
|
varul29/Google_Cloud_Vision
|
589cce47e34df6eaffdd63607a4a46d57b815f28
|
1c0801fdc936d85caa197b6823aaba39dfcd57b8
|
refs/heads/master
| 2020-05-25T06:19:34.665203
| 2019-05-20T15:27:36
| 2019-05-20T15:27:36
| 187,666,048
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 871
|
py
|
import io
import os
import cv2
from google.cloud import vision
def non_ascii(text):
return ''.join(i if ord(i)<128 and i.isalnum() else '' for i in text]).strip()
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'apikey.json'
# The name of the image file to annotate (Change the line below 'image_path.jpg' ******)
path = os.path.join(os.path.dirname(__file__), '3.jpg') # Your image path from current directory
client = vision.ImageAnnotatorClient()
image = cv2.imread(path)
# Encode the frame using CV2 functions
success, encoded_image = cv2.imencode('.jpg', image)
content2 = encoded_image.tobytes()
# OCR Image to text process
image_cv2 = vision.types.Image(content=content2)
response = client.text_detection(image=image_cv2)
texts = response.text_annotations
full_text = non_ascii_remove(response.full_text_annotations)
print('Full Text:', full_text)
|
[
"noreply@github.com"
] |
noreply@github.com
|
cccd15128c4434b0606787b763c34be908546eb4
|
fe34bc1f4177753b26cfe48d38f93739dc2439c6
|
/unpickler/_nbdev.py
|
3f0f417dd44f1cc6e01032b3226c18edc27f02ef
|
[
"Apache-2.0"
] |
permissive
|
muellerzr/unpickler
|
0d9a4cc17cd8f4cf11d40775efa4624e866158cb
|
94c464abe8463f25f4b89d3770cfdfd347d87d83
|
refs/heads/master
| 2023-02-02T20:05:42.990577
| 2020-12-16T00:34:23
| 2020-12-16T00:34:23
| 321,793,188
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"get_files": "00_core.ipynb",
"UnpicklerModule": "00_core.ipynb"}
modules = ["core.py"]
doc_url = "https://muellerzr.github.io/unpickler/"
git_url = "https://github.com/muellerzr/unpickler/tree/master/"
def custom_doc_links(name): return None
|
[
"muellerzr@gmail.com"
] |
muellerzr@gmail.com
|
246673571092c9cb746b6d104d3951ebd7995526
|
d7aee9bee25dc3c1665fa5f0eb735d0ad3eb78f1
|
/tests/test_parser.py
|
cfa1428f879ce72c29e67b6aa61d0e75564c3354
|
[] |
no_license
|
hariton27sy/exeparser
|
bd1572c45b2ea0a6c663c12a6bab843d3bd6064d
|
b5eb7e5bd13c43645db62be45f202a30dbb11ced
|
refs/heads/master
| 2022-12-23T11:56:58.437613
| 2022-12-14T19:34:01
| 2022-12-14T19:34:01
| 200,896,027
| 0
| 0
| null | 2020-02-07T18:21:07
| 2019-08-06T17:29:42
|
Python
|
UTF-8
|
Python
| false
| false
| 6,769
|
py
|
import os
import sys
import unittest
PARENT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.path.pardir)
sys.path.append(PARENT_DIR)
import core.exefile as x
def full_path(path):
return os.path.join(PARENT_DIR, path)
class TestOnFileQoobExe(unittest.TestCase):
path = 'examples/qoob.exe'
def setUp(self):
self.file = x.ExeFile(full_path(self.path))
def test_rva_to_raw(self):
expected = 0x6e00
res = self.file.rva_to_raw(0x13000)
self.assertEqual(expected, res[1])
def test_rva_to_raw2(self):
expected = 0x6e02
res = self.file.rva_to_raw(0x13002)
self.assertEqual(expected, res[1])
def test_resources(self):
expected = ('+ root\n| + ICON\n| | + 1\n| + RCDATA\n| | + 2\n| | + 8\n'
'| | + 10\n| | + 17\n| | + 18\n| | + 20\n| | + 21\n'
'| | + 30\n| | + 101\n| | + 102\n| | + 103\n| | + 104\n'
'| + GROUP_ICON\n| | + 1\n| + VERSION\n| | + 1')
actual = str(self.file.resources())
self.assertEqual(expected, actual)
def test_no_export_table(self):
actual = self.file.export_table()
self.assertEqual('', str(actual))
def test_relocations(self):
actual = self.file.relocations()
self.assertIsNone(actual)
def test_raw_section_header(self):
expected = (b'<\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00|p@\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00')
actual = b"".join(self.file.raw_section_data(2))
self.assertEqual(expected, actual)
class TestOnFileApp2Exe(unittest.TestCase):
path = 'examples/App2.exe'
def setUp(self):
self.file = x.ExeFile(full_path(self.path))
def test_relocations(self):
expected = ('BASE RELOCATIONS:\n 0x2000 RVA 12 SizeOfBlock 2 Count '
'of relocations\n 0x0006F4 HIGHLOW\n 0x000000 '
'ABSOULUTE')
actual = self.file.relocations()
self.assertEqual(expected, str(actual))
def test_import_table(self):
expected = {
'originalFirstThunk': 9927,
'timeDateStamp': 0,
'forwarderChain': 0,
'name': "mscoree.dll",
'firstThunk': 8192
}
actual = self.file.import_table()
self.assertEqual(1, len(actual.table))
for field in expected:
self.assertEqual(expected[field], actual.table[0][field])
self.assertEqual(1, len(actual.table[0]['functions']))
def test_resources(self):
expected = """+ root\n| + VERSION\n| | + 1\n| + MANIFEST\n| | + 1"""
actual = self.file.resources()
self.assertEqual(expected, str(actual))
class TestOnFileFirefox2Exe(unittest.TestCase):
path = 'examples/firefox2.exe'
def setUp(self):
self.file = x.ExeFile(full_path(self.path))
def test_data_directory(self):
expected = [(b'\x05\x04\x04\x00', b'\xcf\x07\x00\x00'),
(b'\xd4\x0b\x04\x00', b'h\x01\x00\x00'),
(b'\x00\x80\x04\x00', b'\xb0%\x03\x00'),
(b'\x00\x00\x00\x00', b'\x00\x00\x00\x00'),
(b'\x00\x80\x07\x00', b' \x1e\x00\x00'),
(b'\x00\xb0\x07\x00', b"p'\x00\x00"),
(b'j\xfb\x03\x00', b'\x1c\x00\x00\x00'),
(b'\x00\x00\x00\x00', b'\x00\x00\x00\x00'),
(b'\x00\x00\x00\x00', b'\x00\x00\x00\x00'),
(b'\xd4\xcc\x03\x00', b'\x18\x00\x00\x00'),
(b'\xb8\xb0\x03\x00', b'\xa0\x00\x00\x00'),
(b'\x00\x00\x00\x00', b'\x00\x00\x00\x00'),
(b'\x8c\x12\x04\x00', b'P\x05\x00\x00'),
(b'\xa4\x00\x04\x00', b'\xe0\x00\x00\x00'),
(b'\x00\x00\x00\x00', b'\x00\x00\x00\x00'),
(b'\x00\x00\x00\x00', b'\x00\x00\x00\x00')]
actual = self.file.optional_header['dataDirectory']
self.assertEqual(16, len(actual))
self.assertEqual(expected, actual)
def test_export_table(self):
with open(full_path('tests/firefox2_expected/exportTable.txt')) as f:
expected = f.read()
actual = self.file.export_table()
self.assertEqual(expected, str(actual))
def test_dependents(self):
expected = {
"ADVAPI32.dll",
"KERNEL32.dll",
"MSVCP140.dll",
"VCRUNTIME140.dll",
"api-ms-win-crt-convert-l1-1-0.dll",
"api-ms-win-crt-environment-l1-1-0.dll",
"api-ms-win-crt-filesystem-l1-1-0.dll",
"api-ms-win-crt-heap-l1-1-0.dll",
"api-ms-win-crt-locale-l1-1-0.dll",
"api-ms-win-crt-math-l1-1-0.dll",
"api-ms-win-crt-runtime-l1-1-0.dll",
"api-ms-win-crt-stdio-l1-1-0.dll",
"api-ms-win-crt-string-l1-1-0.dll",
"api-ms-win-crt-time-l1-1-0.dll",
"api-ms-win-crt-utility-l1-1-0.dll",
"mozglue.dll",
"ntdll.dll"
}
actual = self.file.import_table().get_dependencies()
self.assertEqual(expected, set(actual))
# That no repeats
self.assertEqual(len(set(actual)), len(actual))
def test_get_when_resource_is_png(self):
resources = self.file.resources()
resource = resources.table.elements[0].elements[11].elements[0]
actual = self.file.get_resource(resource)
self.assertEqual(b"\x89PNG", actual[:4])
class TestCommonRaises(unittest.TestCase):
def test_file_not_found(self):
self.assertRaises(FileNotFoundError, lambda: x.ExeFile(
full_path('WrongPath/nofile.exe')))
def test_wrong_file_format(self):
with self.assertRaises(x.BrokenFileError) as excInfo:
x.ExeFile(full_path('index.py'))
self.assertIn('Broken file. No "MZ" in begin', str(excInfo.exception))
def test_no_mz_signature(self):
with self.assertRaises(x.BrokenFileError) as exc:
x.ExeFile(full_path('examples/NoMZSignature.exe'))
self.assertIn('Broken file. No "MZ" in begin', str(exc.exception))
def test_no_pe_signature(self):
with self.assertRaises(x.BrokenFileError) as exc:
x.ExeFile(full_path('examples/NoPESignature.exe'))
self.assertIn('Broken File. No "PE\\0\\0" in begin of PEHeader',
str(exc.exception))
if __name__ == "__main__":
unittest.main()
|
[
"hariton27sy@gmail.com"
] |
hariton27sy@gmail.com
|
6f87b92696de2420ba9b14956ac1d08db4e16a86
|
bc6c0cda914c23e80921793eb0ce71c45202ada4
|
/src/endoexport/export.py
|
66f3970d48311c18dc3f984c553dd2e423f77298
|
[
"MIT"
] |
permissive
|
karlicoss/endoexport
|
a2221799113a12b400e298dea8d95559926de138
|
98c8805cbcc00187822737ef32c2e0434c4f450e
|
refs/heads/master
| 2023-04-04T09:56:57.716411
| 2023-03-15T02:19:15
| 2023-03-15T02:22:45
| 230,617,833
| 3
| 0
|
MIT
| 2023-03-15T02:22:46
| 2019-12-28T14:05:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,512
|
py
|
#!/usr/bin/env python3
import argparse
import json
from .exporthelpers.export_helper import Json
import endoapi
def get_json(**params) -> Json:
endomondo = endoapi.endomondo.Endomondo(**params)
maximum_workouts = None # None means all
workouts = endomondo.get_workouts_raw(maximum_workouts)
return workouts
Token = str
def login(email: str) -> Token:
print(f"Logging in as {email}")
password = input('Your password: ')
endomondo = endoapi.endomondo.Endomondo(email=email, password=password)
token = endomondo.token
print('Your token:')
print(token)
return token
def make_parser():
from .exporthelpers.export_helper import setup_parser, Parser
parser = Parser("Tool to export your personal Endomondo data")
setup_parser(parser=parser, params=['email', 'token']) # TODO exports -- need help for each param?
parser.add_argument('--login', action='store_true', help='''
This will log you in and give you the token (you'll need your password).
You only need to do it once, after that just store the token and use it.
''')
return parser
def main() -> None:
# TODO add logger configuration to export_helper?
# TODO autodetect logzero?
args = make_parser().parse_args()
params = args.params
dumper = args.dumper
if args.login:
login(email=params['email'])
return
j = get_json(**params)
js = json.dumps(j, indent=1, ensure_ascii=False)
dumper(js)
if __name__ == '__main__':
main()
|
[
"karlicoss@gmail.com"
] |
karlicoss@gmail.com
|
d10c3fb59eb602e7a438fe8b8b7ccca52fcc45d2
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_syphon.py
|
1ef3547d3d666728720ba4bfc26206b8a9d76bc4
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
#calss header
class _SYPHON():
def __init__(self,):
self.name = "SYPHON"
self.definitions = [u'a siphon noun ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.