blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
842887ea36a0a019eae06afbedde1a86e74ddb9f | Python | ageelg-mobile/100py | /Week008/d053_d054_mymath.py | UTF-8 | 196 | 2.90625 | 3 | [] | no_license | #Ageel 10/11/2019
#100 Days of Python
#Day 053 & 054 - quiz
def add(x,y):
return x+y
def sub(x,y):
return x-y
def divide(x,y):
return x/y
def multiply(x,y):
return x*y | true |
8ea51e679df1793723ea3c04f6146729c80ff0f1 | Python | xiyangyang410/Learning-Python-Code | /38_2.py | UTF-8 | 2,242 | 3.046875 | 3 | [] | no_license | traceMe = False
def trace(*args):
if traceMe:
print('[' + ' '.join(map(str, args)), +']')
def accessControl(failIf):
def onDecorator(aClass):
if not __debug__:
return aClass
else:
class onInstance:
def __init__(self, *args, **kargs):
self.__wrapped = aClass(*args, **kargs)
def __getattr__(self, attr):
trace('get:', attr)
if failIf(attr):
raise TypeError('private attribute fetch: ' + attr)
else:
return getattr(self.__wrapped, attr)
def __setattr__(self, attr, value):
trace('set:', attr, value)
if attr == '_onInstance__wrapped':
self.__dict__[attr] = value
elif failIf(attr):
raise TypeError('private attribute change: ', attr)
else:
setattr(self.__wrapped, attr, value)
return onInstance
return onDecorator
def Private(*attributes):
return accessControl(failIf=(lambda attr: attr in attributes))
def Public(*attributes):
return accessControl(failIf=(lambda attr: attr not in attributes))
# Test code: split me off to another file to reuse decorator
@Private('age') # Person = Private('age')(Person)
class Person: # Person = onInstance with state
def __init__(self, name, age):
self.name = name
self.age = age # Inside accesses run normally
X = Person('Bob', 40)
print(X.name) # Outside accesses validated
X.name = 'Sue'
print(X.name)
# print(X.age) # FAILS unles "python -O"
# X.age = 999 # ditto
# print(X.age) # ditto
@Public('name')
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
X = Person('bob', 40) # X is an onInstance
print(X.name) # onInstance embeds Person
X.name = 'Sue'
print(X.name)
# print(X.age) # FAILS unless "python -O main.py"
# X.age = 999 # ditto
# print(X.age) # ditto
| true |
f28e2a537691c8db02d4f3d5e80cc20c6efd376e | Python | rk50895/capstone | /app.py | UTF-8 | 15,101 | 2.578125 | 3 | [] | no_license | import os
import json
from flask import (
Flask, request, abort, jsonify
)
from sqlalchemy import exc
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
from auth import (
AuthError, requires_auth
)
from models import (
db, setup_db, Actor, Movie, actor_movie, db_drop_and_create_all
)
def create_app(test_config=None):
'''
Create and configure app
'''
app = Flask(__name__)
CORS(app)
setup_db(app)
# db_drop_and_create_all()
migrate = Migrate(app, db)
'''
CORS Headers
'''
@app.after_request
def after_request(response):
response.headers.add(
'Access-Control-Allow-Headers',
'Content-Type, Authorization, true'
)
response.headers.add(
'Access-Control-Allow-Methods',
'GET, PATCH, POST, DELETE, OPTIONS'
)
return response
'''
Start of declaration of all ROUTES
'''
'''
GET /
it should be a public endpoint
it is a dummy endpoint
returns welcome message
'''
@app.route('/')
def index():
return "Welcome to my Capstone project!"
'''
GET /actors
it should be a public endpoint
it should contain only the actor.short() data representation
returns status code 200 and json {"success": True, "actors": actors}
where actors is the list of actors
or appropriate status code indicating reason for failure
'''
@app.route('/actors', methods=['GET'])
@requires_auth('view:actors')
def get_actors(jwt):
# Retrieve all actors from database
try:
actor_selection = [
actor.short() for actor in Actor.query.order_by(Actor.id).all()
]
except Exception as e:
# print(e)
abort(422)
# No actors in database
if len(actor_selection) == 0:
abort(404)
return jsonify({
"success": True,
"status_code": 200,
"status_message": 'OK',
"actors": actor_selection
})
'''
GET /actors/<id>
it should be a public endpoint
where <id> is the existing actor id
it should respond with a 404 error if <id> is not found
it should contain only the actor.long() data representation
returns status code 200 and json {"success": True, "actor": actor}
where actor is the selected actor
or appropriate status code indicating reason for failure
'''
@app.route('/actors/<int:actor_id>', methods=['GET'])
@requires_auth('view:actors')
def get_actor(jwt, actor_id):
# Retrieve requested actor from database
try:
target_actor = Actor.query.get(actor_id)
except Exception as e:
# print(e)
abort(422)
# No actor for this id in database
if target_actor is None:
abort(404)
return jsonify({
"success": True,
"status_code": 200,
"status_message": 'OK',
"actor": target_actor.long()
})
'''
POST /actors
it should create a new row in the actors table
it should require the 'add:actors' permission
it should contain the actor.long() data representation
returns status code 200 and json {"success": True, "actor": actor}
where actor contains only the newly created actor
or appropriate status code indicating reason for failure
'''
@app.route('/actors', methods=['POST'])
@requires_auth('add:actors')
def post_actors(jwt):
# Retrieve JSON payload
body = request.get_json()
# No JSON payload provided
if not body:
abort(400)
name = body.get("name", None)
age = body.get("age", None)
gender = body.get("gender", None)
movies = body.get("movies", None)
if name is None:
abort(422)
new_actor = Actor(name=name, age=age, gender=gender)
if movies:
new_actor.movies = Movie.query.filter(Movie.id.in_(movies)).all()
try:
new_actor.insert()
return jsonify({
"success": True,
"status_code": 200,
"status_message": "OK",
"actor": new_actor.long()
})
except Exception as e:
# print(e)
abort(422)
'''
PATCH /actors/<id>
where <id> is the existing actor id
it should respond with a 404 error if <id> is not found
it should update the corresponding row for <id>
it should require the 'edit:actors' permission
it should contain the actor.long() data representation
returns status code 200 and json {"success": True, "actor": actor}
where actor containing only the updated actor
or appropriate status code indicating reason for failure
'''
@app.route('/actors/<int:actor_id>', methods=['PATCH'])
@requires_auth('edit:actors')
def patch_actors(jwt, actor_id):
# Retrieve JSON payload
body = request.get_json()
# No JSON payload provided
if not body:
abort(400)
name = body.get("name", None)
age = body.get("age", None)
gender = body.get("gender", None)
movies = body.get("movies", None)
# Retrieve requested actor from database
target_actor = Actor.query.get(actor_id)
if target_actor is None:
abort(404)
if name:
target_actor.name = name
if age:
target_actor.age = age
if gender:
target_actor.gender = gender
if movies:
target_actor.movies = Movie.query.filter(
Movie.id.in_(movies)).all()
try:
target_actor.update()
return jsonify({
"success": True,
"status_code": 200,
"status_message": 'OK',
"actor": target_actor.long()
})
except Exception as e:
# print(e)
abort(422)
'''
DELETE /actors/<id>
where <id> is the existing actor id
it should respond with a 404 error if <id> is not found
it should delete the corresponding row for <id>
it should require the 'delete:actors' permission
returns status code 200 and json {"success": True, "delete": id}
where id is the id of the deleted record
or appropriate status code indicating reason for failure
'''
@app.route('/actors/<int:actor_id>', methods=['DELETE'])
@requires_auth(permission='delete:actors')
def delete_actors(jwt, actor_id):
# Retrieve requested actor from database
target_actor = Actor.query.get(actor_id)
if target_actor is None:
abort(404)
try:
target_actor.delete()
return jsonify({"success": True,
"status_code": 200,
"status_message": 'OK',
"id_deleted": actor_id})
except Exception as e:
# print(e)
abort(422)
'''
GET /movies
it should be a public endpoint
it should contain only the movie.short() data representation
returns status code 200 and json {"success": True, "movies": movies}
where movies is the list of movies
or appropriate status code indicating reason for failure
'''
@app.route('/movies', methods=["GET"])
@requires_auth('view:movies')
def get_movies(jwt):
# Retrieve all movies from database
try:
movie_selection = [
movie.short() for movie in Movie.query.order_by(Movie.id).all()
]
except Exception as e:
# print(e)
abort(422)
# No movies in database
if len(movie_selection) == 0:
abort(404)
return jsonify({
"success": True,
"status_code": 200,
"status_message": 'OK',
"movies": movie_selection
})
'''
GET /movies/<id>
it should be a public endpoint
where <id> is the existing movie id
it should respond with a 404 error if <id> is not found
it should contain only the movie.long() data representation
returns status code 200 and json {"success": True, "movie": movie}
where movie is the selected movie
or appropriate status code indicating reason for failure
'''
@app.route('/movies/<int:movie_id>', methods=['GET'])
@requires_auth('view:movies')
def get_movie(jwt, movie_id):
# Retrieve requested movie from database
try:
target_movie = Movie.query.get(movie_id)
except Exception as e:
# print(e)
abort(422)
# No movie for this id in database
if target_movie is None:
abort(404)
return jsonify({
"success": True,
"status_code": 200,
"status_message": 'OK',
"movie": target_movie.long()
})
'''
POST /movies
it should create a new row in the movies table
it should require the 'add:movies' permission
it should contain the movie.long() data representation
returns status code 200 and json {"success": True, "movie": movie}
where movie contains only the newly created movie
or appropriate status code indicating reason for failure
'''
@app.route('/movies', methods=['POST'])
@requires_auth(permission='add:movies')
def post_movies(jwt):
# Retrieve JSON payload
body = request.get_json()
# No JSON payload provided
if not body:
abort(400)
title = body.get("title", None)
release_date = body.get("release_date", None)
actors = body.get('actors', None)
if title is None:
abort(400)
new_movie = Movie(title=title, release_date=release_date)
if actors:
new_movie.actors = Actor.query.filter(Actor.id.in_(actors)).all()
try:
new_movie.insert()
return jsonify({
"success": True,
"status_code": 200,
"status_message": 'OK',
"movie": new_movie.long()
})
except Exception as e:
# print(e)
abort(422)
'''
PATCH /movies/<id>
where <id> is the existing model id
it should respond with a 404 error if <id> is not found
it should update the corresponding row for <id>
it should require the 'patch:movies' permission
it should contain the movie.long() data representation
returns status code 200 and json {"success": True, "movies": movie}
where movie an array containing only the updated movie
or appropriate status code indicating reason for failure
'''
@app.route('/movies/<int:movie_id>', methods=['PATCH'])
@requires_auth(permission='edit:movies')
def patch_movies(jwt, movie_id):
# Retrieve JSON payload
body = request.get_json()
# No JSON payload provided
if not body:
abort(400)
title = body.get("title", None)
release_date = body.get("release_date", None)
actors = body.get("actors", None)
# Retrieve requested movie from database
target_movie = Movie.query.get(movie_id)
if target_movie is None:
abort(404)
if title:
target_movie.title = title
if release_date:
target_movie.release_date = release_date
if actors:
target_movie.actors = Actor.query.filter(
Actor.id.in_(actors)).all()
try:
target_movie.update()
return jsonify({
"success": True,
"status_code": 200,
"status_message": 'OK',
"movie": target_movie.long()
})
except Exception as e:
# print(e)
abort(422)
'''
DELETE /movies/<id>
where <id> is the existing model id
it should respond with a 404 error if <id> is not found
it should delete the corresponding row for <id>
it should require the 'delete:movies' permission
returns status code 200 and json {"success": True, "delete": id}
where id is the id of the deleted record
or appropriate status code indicating reason for failure
'''
@app.route('/movies/<int:movie_id>', methods=['DELETE'])
@requires_auth(permission='delete:movies')
def delete_movies(jwt, movie_id):
# Retrieve requested movie from database
target_movie = Movie.query.get(movie_id)
if target_movie is None:
abort(404)
try:
target_movie.delete()
return jsonify({"success": True,
"status_code": 200,
"status_message": 'OK',
"id_deleted": movie_id})
except Exception as e:
# print(e)
abort(422)
'''
Error handling for resource not found
'''
@app.errorhandler(400)
def not_found(error):
return jsonify({
"success": False,
"error": 400,
"status_message": "bad request"
}), 400
'''
Error handling for resource not found
'''
@app.errorhandler(404)
def not_found(error):
return jsonify({
"success": False,
"error": 404,
"status_message": "resource not found"
}), 404
'''
Error handling for method not allowed'
'''
@app.errorhandler(405)
def not_found(error):
return jsonify({
"success": False,
"error": 405,
"status_message": "method not allowed"
}), 405
'''
Error handling for unprocessable entity
'''
@app.errorhandler(422)
def unprocessable(error):
return jsonify({
"success": False,
"error": 422,
"status_message": "unprocessable"
}), 422
'''
Error handling for AuthError that were raised
'''
@app.errorhandler(AuthError)
def handle_auth_error(ex):
response = jsonify(ex.error)
response.status_code = ex.status_code
return response
return app
app = create_app()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
| true |
4d65ed48d060183305ccb91392640406c91d09e8 | Python | percyfal/tskit | /python/tests/test_ibd.py | UTF-8 | 22,880 | 2.796875 | 3 | [
"MIT"
] | permissive | """
Tests of IBD finding algorithms.
"""
import io
import itertools
import random
import msprime
import pytest
import tests.ibd as ibd
import tests.test_wright_fisher as wf
import tskit
# Functions for computing IBD 'naively'.
def find_ibd(
ts,
sample_pairs,
min_length=0,
max_time=None,
compare_lib=True,
print_c=False,
print_py=False,
):
"""
Calculates IBD segments using Python and converts output to lists of segments.
Also compares result with C library.
"""
ibd_f = ibd.IbdFinder(
ts, sample_pairs=sample_pairs, max_time=max_time, min_length=min_length
)
ibd_segs = ibd_f.find_ibd_segments()
ibd_segs = convert_ibd_output_to_seglists(ibd_segs)
if compare_lib:
c_out = ts.tables.find_ibd(
sample_pairs, max_time=max_time, min_length=min_length
)
c_out = convert_ibd_output_to_seglists(c_out)
if print_c:
print("C output:\n")
print(c_out)
if print_py:
print("Python output:\n")
print(ibd_segs)
assert ibd_is_equal(ibd_segs, c_out)
return ibd_segs
def get_ibd(
sample0,
sample1,
treeSequence,
min_length=0,
max_time=None,
path_ibd=True,
mrca_ibd=True,
):
"""
Returns all IBD segments for a given pair of nodes in a tree
using a naive algorithm.
Note: This function probably looks more complicated than it needs to be --
This is because it also calculates other 'versions' of IBD (mrca_ibd=False,
path_ibd=False) that we have't implemented properly yet.
"""
ibd_list = []
ts, node_map = treeSequence.simplify(
samples=[sample0, sample1], keep_unary=True, map_nodes=True
)
node_map = node_map.tolist()
for n in ts.nodes():
if max_time is not None and n.time > max_time:
break
node_id = n.id
interval_list = []
if n.flags == 1:
continue
prev_dict = None
for t in ts.trees():
if len(list(t.nodes(n.id))) == 1 or t.num_samples(n.id) < 2:
continue
if mrca_ibd and n.id != t.mrca(0, 1):
continue
current_int = t.get_interval()
if len(interval_list) == 0:
interval_list.append(current_int)
else:
prev_int = interval_list[-1]
if not path_ibd and prev_int[1] == current_int[0]:
interval_list[-1] = (prev_int[0], current_int[1])
elif prev_dict is not None and subtrees_are_equal(
t, prev_dict, node_id
):
interval_list[-1] = (prev_int[0], current_int[1])
else:
interval_list.append(current_int)
prev_dict = t.get_parent_dict()
for interval in interval_list:
if min_length == 0 or interval.right - interval.left > min_length:
orig_id = node_map.index(node_id)
ibd_list.append(ibd.Segment(interval[0], interval[1], orig_id))
return ibd_list
def get_ibd_all_pairs(
treeSequence,
samples=None,
min_length=0,
max_time=None,
path_ibd=True,
mrca_ibd=False,
):
"""
Returns all IBD segments for all pairs of nodes in a tree sequence
using the naive algorithm above.
"""
ibd_dict = {}
if samples is None:
samples = treeSequence.samples().tolist()
pairs = itertools.combinations(samples, 2)
for pair in pairs:
ibd_list = get_ibd(
pair[0],
pair[1],
treeSequence,
min_length=min_length,
max_time=max_time,
path_ibd=path_ibd,
mrca_ibd=mrca_ibd,
)
ibd_dict[pair] = ibd_list
return ibd_dict
def subtrees_are_equal(tree1, pdict0, root):
"""
Checks for equality of two subtrees beneath a given root node.
"""
pdict1 = tree1.get_parent_dict()
if root not in pdict0.values() or root not in pdict1.values():
return False
leaves1 = set(tree1.leaves(root))
for leaf in leaves1:
node = leaf
while node != root:
p1 = pdict1[node]
if p1 not in pdict0.values():
return False
p0 = pdict0[node]
if p0 != p1:
return False
node = p1
return True
def verify_equal_ibd(
ts, sample_pairs=None, compare_lib=True, print_c=False, print_py=False
):
"""
Calculates IBD segments using both the 'naive' and sophisticated algorithms,
verifies that the same output is produced.
NB: May be good to expand this in the future so that many different combos
of IBD options are tested simultaneously (all the MRCA and path-IBD combos),
for example.
"""
if sample_pairs is None:
sample_pairs = list(itertools.combinations(ts.samples(), 2))
ibd0 = find_ibd(
ts,
sample_pairs=sample_pairs,
compare_lib=compare_lib,
print_c=print_c,
print_py=print_py,
)
ibd1 = get_ibd_all_pairs(ts, path_ibd=True, mrca_ibd=True)
# Check for equality.
for key0, val0 in ibd0.items():
assert key0 in ibd1.keys()
val1 = ibd1[key0]
val0.sort()
val1.sort()
def convert_ibd_output_to_seglists(ibd_out):
"""
Converts the Python mock-up output back into lists of segments.
This is needed to use the ibd_is_equal function.
"""
for key in ibd_out.keys():
seg_list = []
num_segs = len(ibd_out[key]["left"])
for s in range(num_segs):
seg_list.append(
ibd.Segment(
left=ibd_out[key]["left"][s],
right=ibd_out[key]["right"][s],
node=ibd_out[key]["node"][s],
)
)
ibd_out[key] = seg_list
return ibd_out
def ibd_is_equal(dict1, dict2):
"""
Verifies that two dictionaries have the same keys, and that
the set of items corresponding to each key is identical.
Used to check identical IBD output.
NOTE: is there a better/neater way to do this???
"""
if len(dict1) != len(dict2):
return False
for key1, val1 in dict1.items():
if key1 not in dict2.keys():
return False
val2 = dict2[key1]
if not segment_lists_are_equal(val1, val2):
return False
return True
def segment_lists_are_equal(val1, val2):
"""
Returns True if the two lists hold the same set of segments, otherwise
returns False.
"""
if len(val1) != len(val2):
return False
val1.sort()
val2.sort()
if val1 is None: # get rid of this later -- we don't any empty dict values!
if val2 is not None:
return False
elif val2 is None:
if val1 is not None:
return False
for i in range(len(val1)):
if val1[i] != val2[i]:
return False
return True
class TestIbdSingleBinaryTree:
#
# 2 4
# / \
# 1 3 \
# / \ \
# 0 0 1 2
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 1 0
3 0 1
4 0 2
"""
)
edges = io.StringIO(
"""\
left right parent child
0 1 3 0,1
0 1 4 2,3
"""
)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
# Basic test
def test_defaults(self):
ibd_segs = find_ibd(self.ts, sample_pairs=[(0, 1), (0, 2), (1, 2)])
true_segs = {
(0, 1): [ibd.Segment(0.0, 1.0, 3)],
(0, 2): [ibd.Segment(0.0, 1.0, 4)],
(1, 2): [ibd.Segment(0.0, 1.0, 4)],
}
assert ibd_is_equal(ibd_segs, true_segs)
def test_time(self):
ibd_segs = find_ibd(
self.ts,
sample_pairs=[(0, 1), (0, 2), (1, 2)],
max_time=1.5,
compare_lib=True,
)
true_segs = {(0, 1): [ibd.Segment(0.0, 1.0, 3)], (0, 2): [], (1, 2): []}
assert ibd_is_equal(ibd_segs, true_segs)
# Min length = 2
def test_length(self):
ibd_segs = find_ibd(
self.ts, sample_pairs=[(0, 1), (0, 2), (1, 2)], min_length=2
)
true_segs = {(0, 1): [], (0, 2): [], (1, 2): []}
assert ibd_is_equal(ibd_segs, true_segs)
def test_input_errors(self):
with pytest.raises(ValueError):
ibd.IbdFinder(self.ts, sample_pairs=[0])
with pytest.raises(AssertionError):
ibd.IbdFinder(self.ts, sample_pairs=[(0, 1, 2)])
with pytest.raises(ValueError):
ibd.IbdFinder(self.ts, sample_pairs=[(0, 5)])
with pytest.raises(ValueError):
ibd.IbdFinder(self.ts, sample_pairs=[(0, 1), (1, 0)])
class TestIbdTwoSamplesTwoTrees:
# 2
# | 3
# 1 2 | / \
# / \ | / \
# 0 0 1 | 0 1
# |------------|----------|
# 0.0 0.4 1.0
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 0 1
3 0 1.5
"""
)
edges = io.StringIO(
"""\
left right parent child
0 0.4 2 0,1
0.4 1.0 3 0,1
"""
)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
# Basic test
def test_basic(self):
ibd_segs = find_ibd(self.ts, sample_pairs=[(0, 1)])
true_segs = {(0, 1): [ibd.Segment(0.0, 0.4, 2), ibd.Segment(0.4, 1.0, 3)]}
assert ibd_is_equal(ibd_segs, true_segs)
# Max time = 1.2
def test_time(self):
ibd_segs = find_ibd(
self.ts, sample_pairs=[(0, 1)], max_time=1.2, compare_lib=True
)
true_segs = {(0, 1): [ibd.Segment(0.0, 0.4, 2)]}
assert ibd_is_equal(ibd_segs, true_segs)
# Min length = 0.5
def test_length(self):
ibd_segs = find_ibd(
self.ts, sample_pairs=[(0, 1)], min_length=0.5, compare_lib=True
)
true_segs = {(0, 1): [ibd.Segment(0.4, 1.0, 3)]}
assert ibd_is_equal(ibd_segs, true_segs)
class TestIbdUnrelatedSamples:
#
# 2 3
# | |
# 0 1
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 0 1
3 0 1
"""
)
edges = io.StringIO(
"""\
left right parent child
0 1 2 0
0 1 3 1
"""
)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
def test_basic(self):
ibd_segs = find_ibd(self.ts, sample_pairs=[(0, 1)])
true_segs = {(0, 1): []}
assert ibd_is_equal(ibd_segs, true_segs)
def test_time(self):
ibd_segs = find_ibd(self.ts, sample_pairs=[(0, 1)], max_time=1.2)
true_segs = {(0, 1): []}
assert ibd_is_equal(ibd_segs, true_segs)
def test_length(self):
ibd_segs = find_ibd(self.ts, sample_pairs=[(0, 1)], min_length=0.2)
true_segs = {(0, 1): []}
assert ibd_is_equal(ibd_segs, true_segs)
class TestIbdNoSamples:
def test_no_samples(self):
#
# 2
# / \
# / \
# / \
# (0) (1)
nodes = io.StringIO(
"""\
id is_sample time
0 0 0
1 0 0
2 0 1
3 0 1
"""
)
edges = io.StringIO(
"""\
left right parent child
0 1 2 0
0 1 3 1
"""
)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
with pytest.raises(ValueError):
ibd.IbdFinder(ts, sample_pairs=[(0, 1)])
class TestIbdSamplesAreDescendants:
#
# 4 5
# | |
# 2 3
# | |
# 0 1
#
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 1 1
3 1 1
4 0 2
5 0 2
"""
)
edges = io.StringIO(
"""\
left right parent child
0 1 2 0
0 1 3 1
0 1 4 2
0 1 5 3
"""
)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
def test_basic(self):
ibd_segs = find_ibd(
self.ts, sample_pairs=[(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]
)
true_segs = {
(0, 1): [],
(0, 2): [ibd.Segment(0.0, 1.0, 2)],
(0, 3): [],
(1, 2): [],
(1, 3): [ibd.Segment(0.0, 1.0, 3)],
(2, 3): [],
}
assert ibd_is_equal(ibd_segs, true_segs)
def test_input_sample_pairs(self):
ibd_segs = find_ibd(self.ts, sample_pairs=[(0, 3), (0, 2), (3, 5)])
true_segs = {
(0, 3): [],
(0, 2): [ibd.Segment(0.0, 1.0, 2)],
(3, 5): [ibd.Segment(0.0, 1.0, 5)],
}
assert ibd_is_equal(ibd_segs, true_segs)
class TestIbdDifferentPaths:
#
# 4 | 4 | 4
# / \ | / \ | / \
# / \ | / 3 | / \
# / \ | 2 \ | / \
# / \ | / \ | / \
# 0 1 | 0 1 | 0 1
# | |
# 0.2 0.7
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 0 1
3 0 1.5
4 0 2.5
"""
)
edges = io.StringIO(
"""\
left right parent child
0.2 0.7 2 0
0.2 0.7 3 1
0.0 0.2 4 0
0.0 0.2 4 1
0.7 1.0 4 0
0.7 1.0 4 1
0.2 0.7 4 2
0.2 0.7 4 3
"""
)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
def test_defaults(self):
ibd_segs = find_ibd(self.ts, sample_pairs=[(0, 1)])
true_segs = {
(0, 1): [
ibd.Segment(0.0, 0.2, 4),
ibd.Segment(0.7, 1.0, 4),
ibd.Segment(0.2, 0.7, 4),
]
}
assert ibd_is_equal(ibd_segs, true_segs)
def test_time(self):
ibd_segs = find_ibd(self.ts, sample_pairs=[(0, 1)], max_time=1.8)
true_segs = {(0, 1): []}
assert ibd_is_equal(ibd_segs, true_segs)
def test_length(self):
ibd_segs = find_ibd(self.ts, sample_pairs=[(0, 1)], min_length=0.4)
true_segs = {(0, 1): [ibd.Segment(0.2, 0.7, 4)]}
assert ibd_is_equal(ibd_segs, true_segs)
# This is a situation where the Python and the C libraries agree,
# but aren't doing as expected.
@pytest.mark.xfail
def test_input_sample_pairs(self):
ibd_f = ibd.IbdFinder(self.ts, sample_pairs=[(0, 1), (2, 3), (1, 3)])
ibd_segs = ibd_f.find_ibd_segments()
ibd_segs = convert_ibd_output_to_seglists(ibd_segs)
true_segs = {
(0, 1): [
ibd.Segment(0.0, 0.2, 4),
ibd.Segment(0.7, 1.0, 4),
ibd.Segment(0.2, 0.7, 4),
],
(2, 3): [ibd.Segment(0.2, 0.7, 4)],
}
ibd_segs = find_ibd(
self.ts,
sample_pairs=[(0, 1), (2, 3)],
compare_lib=True,
print_c=False,
print_py=False,
)
assert ibd_is_equal(ibd_segs, true_segs)
class TestIbdDifferentPaths2:
#
# 5 |
# / \ |
# / 4 | 4
# / / \ | / \
# / / \ | / \
# / / \ | 3 \
# / / \ | / \ \
# 0 1 2 | 0 2 1
# |
# 0.2
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 1 0
3 0 1
4 0 2.5
5 0 3.5
"""
)
edges = io.StringIO(
"""\
left right parent child
0.2 1.0 3 0
0.2 1.0 3 2
0.0 1.0 4 1
0.0 0.2 4 2
0.2 1.0 4 3
0.0 0.2 5 0
0.0 0.2 5 4
"""
)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
def test_defaults(self):
ibd_segs = find_ibd(self.ts, sample_pairs=[(1, 2)])
true_segs = {
(1, 2): [ibd.Segment(0.0, 0.2, 4), ibd.Segment(0.2, 1.0, 4)],
}
assert ibd_is_equal(ibd_segs, true_segs)
class TestIbdPolytomies:
#
# 5 | 5
# / \ | / \
# 4 \ | 4 \
# /|\ \ | /|\ \
# / | \ \ | / | \ \
# / | \ \ | / | \ \
# / | \ \ | / | \ \
# 0 1 2 3 | 0 1 3 2
# |
# 0.3
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 1 0
2 1 0
3 1 0
4 0 2.5
5 0 3.5
"""
)
edges = io.StringIO(
"""\
left right parent child
0.0 1.0 4 0
0.0 1.0 4 1
0.0 0.3 4 2
0.3 1.0 4 3
0.3 1.0 5 2
0.0 0.3 5 3
0.0 1.0 5 4
"""
)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
def test_defaults(self):
ibd_segs = find_ibd(
self.ts, sample_pairs=[(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]
)
true_segs = {
(0, 1): [ibd.Segment(0, 1, 4)],
(0, 2): [ibd.Segment(0, 0.3, 4), ibd.Segment(0.3, 1, 5)],
(0, 3): [ibd.Segment(0, 0.3, 5), ibd.Segment(0.3, 1, 4)],
(1, 2): [ibd.Segment(0, 0.3, 4), ibd.Segment(0.3, 1, 5)],
(1, 3): [ibd.Segment(0, 0.3, 5), ibd.Segment(0.3, 1, 4)],
(2, 3): [ibd.Segment(0.3, 1, 5), ibd.Segment(0, 0.3, 5)],
}
assert ibd_is_equal(ibd_segs, true_segs)
def test_time(self):
ibd_segs = find_ibd(
self.ts,
sample_pairs=[(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)],
max_time=3,
)
true_segs = {
(0, 1): [ibd.Segment(0, 1, 4)],
(0, 2): [ibd.Segment(0, 0.3, 4)],
(0, 3): [ibd.Segment(0.3, 1, 4)],
(1, 2): [ibd.Segment(0, 0.3, 4)],
(1, 3): [ibd.Segment(0.3, 1, 4)],
(2, 3): [],
}
assert ibd_is_equal(ibd_segs, true_segs)
def test_length(self):
ibd_segs = find_ibd(
self.ts,
sample_pairs=[(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)],
min_length=0.5,
)
true_segs = {
(0, 1): [ibd.Segment(0, 1, 4)],
(0, 2): [ibd.Segment(0.3, 1, 5)],
(0, 3): [ibd.Segment(0.3, 1, 4)],
(1, 2): [ibd.Segment(0.3, 1, 5)],
(1, 3): [ibd.Segment(0.3, 1, 4)],
(2, 3): [ibd.Segment(0.3, 1, 5)],
}
assert ibd_is_equal(ibd_segs, true_segs)
def test_input_sample_pairs(self):
ibd_segs = find_ibd(self.ts, sample_pairs=[(0, 1), (0, 3)])
true_segs = {
(0, 1): [ibd.Segment(0.0, 1.0, 4)],
(0, 3): [ibd.Segment(0.3, 1.0, 4), ibd.Segment(0.0, 0.3, 5)],
}
assert ibd_is_equal(ibd_segs, true_segs)
def test_duplicate_input_sample_pairs(self):
with pytest.raises(tskit.LibraryError):
self.ts.tables.find_ibd([(0, 1), (0, 1)])
with pytest.raises(tskit.LibraryError):
self.ts.tables.find_ibd([(0, 1), (1, 0)])
class TestIbdInternalSamples:
#
#
# 3
# / \
# / 2
# / \
# 0 (1)
nodes = io.StringIO(
"""\
id is_sample time
0 1 0
1 0 0
2 1 1
3 0 2
"""
)
edges = io.StringIO(
"""\
left right parent child
0.0 1.0 2 1
0.0 1.0 3 0
0.0 1.0 3 2
"""
)
ts = tskit.load_text(nodes=nodes, edges=edges, strict=False)
def test_defaults(self):
ibd_segs = find_ibd(self.ts, sample_pairs=[(0, 2)])
true_segs = {
(0, 2): [ibd.Segment(0, 1, 3)],
}
assert ibd_is_equal(ibd_segs, true_segs)
class TestIbdRandomExamples:
"""
Randomly generated test cases.
"""
def test_random_examples(self):
for i in range(1, 50):
ts = msprime.simulate(sample_size=10, recombination_rate=0.3, random_seed=i)
verify_equal_ibd(ts)
# Finite sites
def sim_finite_sites(self, random_seed, dtwf=False):
seq_length = int(1e5)
positions = random.sample(range(1, seq_length), 98) + [0, seq_length]
positions.sort()
rates = [random.uniform(1e-9, 1e-7) for _ in range(100)]
r_map = msprime.RecombinationMap(
positions=positions, rates=rates, num_loci=seq_length
)
if dtwf:
model = "dtwf"
else:
model = "hudson"
ts = msprime.simulate(
sample_size=10,
recombination_map=r_map,
Ne=10,
random_seed=random_seed,
model=model,
)
return ts
def test_finite_sites(self):
for i in range(1, 11):
ts = self.sim_finite_sites(i)
verify_equal_ibd(ts)
def test_dtwf(self):
for i in range(1000, 1010):
ts = self.sim_finite_sites(i, dtwf=True)
verify_equal_ibd(ts)
def test_sim_wright_fisher_generations(self):
# Uses the bespoke DTWF forward-time simulator.
for i in range(1, 6):
number_of_gens = 10
tables = wf.wf_sim(10, number_of_gens, deep_history=False, seed=i)
tables.sort()
ts = tables.tree_sequence()
verify_equal_ibd(ts)
| true |
9544988873297909fd565bc45a813fcf7f8b24db | Python | gebeto/nulp | /_parsing/formatter.py | UTF-8 | 632 | 2.515625 | 3 | [
"MIT"
] | permissive | import requests
from bs4 import BeautifulSoup
import io
ID = 166770
data = io.open("{}.html".format(ID), "r", encoding="utf-8").read()
soup = BeautifulSoup(data, 'html.parser')
formatted = soup.prettify()
imgs = soup.find_all('img')
for img in imgs:
if img.src:
# img.parent.insert(img.parent.index(img)+1, Tag(soup, 'span', text='HELLO'))
img.parent.insert(img.parent.index(img)+1, 'HELLO OWORDD')
# formatted = formatted.replace(str(img), "".format(img.src.split('/')[-1], img.src))
print str(img)
with io.open("_{}.html".format(ID), "w", encoding="utf-8") as f:
# f.write(formatted)
f.write(soup.prettify()) | true |
34de74931d3a8b4143581abff61ef80167eea99b | Python | sankarpa/daily-coding | /python-problems/arrays/src/smallest_window_to_be_sorted.py | UTF-8 | 426 | 3.65625 | 4 | [] | no_license | def smallest_window_to_be_sorted(nums: list):
maximum, minimum = -float("inf"), float("inf")
length = len(nums)
left, right = None, None
for i in range(length):
maximum = max(maximum, nums[i])
if nums[i] < maximum:
right = i
for i in range(length - 1, -1, -1):
minimum = min(minimum, nums[i])
if nums[i] > minimum:
left = i
return left, right
| true |
37cfe021e3afcb6c207e7136ff40cecd92d83b7d | Python | DinakarBijili/Python-Preparation | /Data Structures and Algorithms/Data Structures/Array/array.py | UTF-8 | 1,608 | 4.1875 | 4 | [] | no_license | class Array(object):
def __init__(self, sizeOfArray, arrayType = int):
self.sizeOfArray = len(list(map(arrayType, range(sizeOfArray))))
self.arrayItems =[arrayType(0)] * sizeOfArray # initialize array with zeroes
def __str__(self):
return ' '.join([str(i) for i in self.arrayItems])
# function for search
def search(self, keyToSearch):
for i in range(self.sizeOfArray):
if (self.arrayItems[i] == keyToSearch): # brute-forcing
return i # index at which element/ key was found
return -1 # if key not found, return -1
# function for inserting an element
def insert(self, keyToInsert, position):
if(self.sizeOfArray > position):
for i in range(self.sizeOfArray - 2, position - 1, -1):
self.arrayItems[i + 1] = self.arrayItems[i]
self.arrayItems[position] = keyToInsert
else:
print('Array size is:', self.sizeOfArray)
# function to delete an element
def delete(self, keyToDelete, position):
if(self.sizeOfArray > position):
for i in range(position, self.sizeOfArray - 1):
self.arrayItems[i] = self.arrayItems[i + 1]
else:
print('Array size is:', self.sizeOfArray)
"""
1. Search
2. insert
3. Delete
"""
a = Array(10, int)
#Search
index = a.search(0)
# print(index)
#Insert
a.insert(1,2)
a.insert(2,3)
a.insert(3,4)
print(a)
#Delete
a.delete(3,4)
print(a)
#OUTPUT:-
# 0 0 1 2 3 0 0 0 0 0
# 0 0 1 2 0 0 0 0 0 0 | true |
a47fade7b799c6d21498d2c132ab8149284904dd | Python | BrookhavenNationalLaboratory/pyRafters | /pyRafters/handlers/np_handler.py | UTF-8 | 8,140 | 2.8125 | 3 | [
"BSD-3-Clause"
] | permissive | """
A set of sources and sinks for handling in-memory nparrays
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import range
import numpy as np
from ..handler_base import (DistributionSource, DistributionSink,
require_active, ImageSink,
ImageSource, FrameSink, FrameSource)
class np_dist_source(DistributionSource):
"""
A source for reading distribution data out of csv (or tab or what ever)
separated files.
"""
# local stuff
def __init__(self, edges, vals):
"""
Wrapper for in-memory work
Parameters
----------
edges : nparray
The bin edges
vals : nparray
The bin values
"""
# base stuff
super(np_dist_source, self).__init__()
# np stuff, make local copies
self._edges = np.array(edges)
self._vals = np.array(vals)
# sanity checks
if self._edges.ndim != 1:
raise ValueError("edges must be 1D")
if self._vals.ndim != 1:
raise ValueError("vals must be 1D")
# distribution stuff
if (len(edges) - 1) == len(vals):
self._right = True
else:
raise ValueError("the length of `edges` must be " +
"one greater than the length of the vals. " +
"Not len(edges): {el} and len(vals): {vl}".format(
el=len(edges), vl=len(vals)))
@require_active
def values(self):
return self._vals
@require_active
def bin_edges(self):
return self._edges
@require_active
def bin_centers(self):
return self._edges[:-1] + np.diff(self._edges)
@property
def kwarg_dict(self):
md = super(np_dist_source, self).kwarg_dict
md.update({'edges': self._edges,
'vals': self._vals})
return md
class np_dist_sink(DistributionSink):
"""
A sink for writing distribution data to memory
"""
def __init__(self):
# base stuff
super(np_dist_sink, self).__init__()
self._vals = None
self._edges = None
# np parts
@require_active
def write_dist(self, edges, vals, right_edge=False):
self._edges = np.array(edges)
self._vals = np.array(vals)
@property
def kwarg_dict(self):
return super(np_dist_sink, self).kwarg_dict
def make_source(self, klass=None):
if klass is None:
klass = np_dist_source
else:
raise NotImplementedError("have not implemented class selection")
return klass(self._edges, self._vals)
_dim_err = ("wrong dimensions, data_array should have ndim = {fd} " +
"or {fdp1}, not {ndim}")
class np_frame_source(FrameSource):
"""
A source backed by a numpy arrays for in-memory image work
"""
def __init__(self, data_array=None, frame_dim=None, meta_data=None,
frame_meta_data=None, *args, **kwargs):
"""
Parameters
----------
data_array : ndarray
The image stack
meta_data : dict or None
"""
super(np_frame_source, self).__init__(*args, **kwargs)
if data_array is None:
raise ValueError("data_array must be not-None")
# make a copy of the data
data_array = np.array(data_array)
if frame_dim is None:
frame_dim = data_array.ndim - 1
# if have a non-sensible number of dimensions raise
if data_array.ndim < frame_dim or data_array.ndim > frame_dim + 1:
raise ValueError(_dim_err.format(fd=frame_dim,
fdp1=frame_dim+1,
ndim=data_array.ndim))
# if only one frame, upcast dimensions
elif data_array.ndim == frame_dim:
data_array.shape = (1, ) + data_array.shape
# save the data
self._data = data_array
# keep a copy of the length
self._len = data_array.shape[0]
# deal with set-level meta-data
if meta_data is None:
meta_data = dict()
self._meta_data = meta_data
if frame_meta_data is None:
frame_meta_data = [dict() for _ in range(self._len)]
if len(frame_meta_data) != self._len:
raise ValueError(("number of frames and number of" +
" md dicts must match"))
self._frame_meta_data = frame_meta_data
def __len__(self):
return self._len
@require_active
def get_frame(self, n):
# make a copy of the array before handing it out so we don't get
# odd in-place operation bugs
return np.array(self._data[n])
def get_frame_metadata(self, frame_num, key):
return self._frame_meta_data[frame_num][key]
def get_metadata(self, key):
return self._meta_data[key]
@require_active
def __iter__(self):
# leverage the numpy iterable
return iter(self._data)
@require_active
def __getitem__(self, arg):
# leverage the numpy slicing magic
return self._data[arg]
def kwarg_dict(self):
dd = super(np_frame_source, self).kwarg_dict
dd.update({'data_array': self._data,
'frame_dim': self._data.ndim - 1,
'meta_data': self._meta_data,
'frame_meta_data': self._frame_meta_data})
return dd
class NPImageSource(np_frame_source, ImageSource):
def __init__(self, *args, **kwargs):
ndim = kwargs.pop('frame_dim', 2)
if ndim != 2:
raise RuntimeError("frame_dim should be 2")
kwargs['frame_dim'] = ndim
super(NPImageSource, self).__init__(*args, **kwargs)
_im_dim_error = "img.ndim must equal {snk} not {inp}"
class NPFrameSink(FrameSink):
def __init__(self, frame_dim, *args, **kwargs):
super(NPFrameSink, self).__init__(*args, **kwargs)
self._frame_store = dict()
self._md_store = dict()
self._md = dict()
self._frame_dim = frame_dim
def record_frame(self, img, frame_number, frame_md=None):
if img.ndim != self._frame_dim:
raise ValueError(_im_dim_error.format(self._frame_dim,
img.ndim))
# TODO add checking on shape based on first frame or
# init arg
self._frame_store[frame_number] = img
if frame_md is None:
frame_md = dict()
self._md_store[frame_number] = frame_md
def set_metadata(self, md_dict):
self._md.update(md_dict)
def _clean(self):
# TODO, maybe this should return an empty handler
if len(self._frame_store) == 0:
raise ValueError("did not provide any frames")
frames = np.array(list(
six.iterkeys(self._frame_store)))
if (np.min(frames) != 0 or
np.max(frames) != len(frames) - 1):
raise ValueError("did not provide continuous frames")
data = np.array([self._frame_store[j]
for j in range(len(frames))])
frame_md = [self._md_store[j]
for j in range(len(frames))]
return {'data_array': data,
'frame_dim': self._frame_dim,
'meta_data': self._md,
'frame_meta_data': frame_md}
@property
def kwarg_dict(self):
dd = super(NPFrameSink, self).kwarg_dict
dd['frame_dim'] = self._frame_dim
return dd
def make_source(self):
return np_frame_source(**self._clean())
class NPImageSink(NPFrameSink, ImageSink):
def __init__(self, *args, **kwargs):
ndim = kwargs.pop('frame_dim', 2)
if ndim != 2:
raise RuntimeError("frame_dim should be 2")
kwargs['frame_dim'] = ndim
super(NPImageSink, self).__init__(*args, **kwargs)
def make_source(self):
return NPImageSource(**self._clean())
| true |
151c24c72746add09089e8d9e61891c9372de8e7 | Python | ashkanyousefi/Algorithms_and_Data_Structures | /HW2/week1_basic_data_structures/1_brackets_in_code/check_brackets.py | UTF-8 | 3,656 | 3.921875 | 4 | [] | no_license | # # python3
# from collections import namedtuple
# Bracket = namedtuple("Bracket", ["char", "position"])
# def are_matching(left, right):
# return (left + right) in ["()", "[]", "{}"]
# def find_mismatch(text):
# opening_brackets_stack = []
# for i, next in enumerate(text):
# if next in "([{":
# # Process opening bracket, write your code here
# pass
# if next in ")]}":
# # Process closing bracket, write your code here
# pass
# def main():
# text = input()
# mismatch = find_mismatch(text)
# # Printing answer, write your code here
# if __name__ == "__main__":
# main()
# Initially I have not take a look at the above ready code:
def balance_pranthesis(my_list):
from collections import deque
my_stack=deque()
for i in range(len(my_list)-1):
my_stack.append(my_list[i])
if my_list[i+1]!=')':
my_stack.append(my_list[i+1])
elif my_stack[i]==')':
my_stack.pop(my_list[i+1])
if my_list[i+1]!=']':
my_stack.append(my_list[i+1])
elif my_stack[i]==']':
my_stack.pop(my_list[i+1])
if my_list[i+1]!='}':
my_stack.append(my_list[i+1])
elif my_stack[i]=='}':
my_stack.pop(my_list[i+1])
if len(my_stack)==0:
status='Successful'
elif len(my_stack)!=0:
status='Not - Successful'
return status
# def balance_check(my_list):
# from collections import deque
# my_stack=deque()
# for i, element in enumerate(my_list):
# if element is a closing bracket:
# if closing bracket is consistent with top of stack:
# pop from stack
# else:
# raise error
# else: #if opening bracket is seen:
# push it to the stack
def balance_check(my_list):
from collections import deque
my_stack=deque()
for i,element in enumerate(my_list):
print(i)
print(element)
print(my_stack)
# input()
if element == ')' and my_stack[-1]=='(':
my_stack.pop()
elif element == ')' and my_stack[-1]!='(':
print('There is a problem in {} location'.format(i))
return
elif element == ']' and my_stack[-1]=='[':
my_stack.pop()
elif element == ']' and my_stack[-1]!='[':
print('There is a problem in {} location'.format(i))
return
elif element == '}' and my_stack[-1]=='{':
my_stack.pop()
elif element == '}' and my_stack[-1]!='{':
print('There is a problem in {} location'.format(i))
return
else:
my_stack.append(element)
if my_stack.empty():
print('Success')
def the_second_blance_check(my_list):
from collections import deque
my_stack=deque()
for i,element in enumerate(my_list):
print(i)
print(element)
print(my_stack)
# input()
if element in ['(', '[', '{']:
my_stack.append(element)
elif element == ')' and my_stack[-1]=='(':
my_stack.pop()
elif element == ']' and my_stack[-1]=='[':
my_stack.pop()
elif element == '}' and my_stack[-1]=='{':
my_stack.pop()
else:
print('There is a problem in {} location'.format(i))
return i
if my_stack.empty():
print('Success')
# print(balance_pranthesis('[([()][]]'))
# balance_check('[([()][]]))))')
the_second_blance_check('[([()][]]))))')
| true |
8fb083b80286b7ae4b1c147bbd091e5c41d0b9ef | Python | ddebettencourt/adventofcode2020 | /day6.py | UTF-8 | 600 | 2.921875 | 3 | [] | no_license | data = open("C:\\Users\\djdeb\\Desktop\\Random Stuff\\Advent of Code 2020\\day6input.txt")
str = data.read()
puzzle_array = str.splitlines()
#print(puzzle_array)
alphabet = [0 for i in range(26)]
total_count = 0
num = 0
for line in puzzle_array:
if line == "":
print(alphabet)
total_count += sum([a for a in alphabet if a == num])//num
alphabet = [0 for i in range(26)]
num = 0
else:
num += 1
for char in line:
alphabet[ord(char) - 97] += 1
if alphabet[ord(char) - 97] == 0:
alphabet[ord(char) - 97] = 1
#total_count += sum(alphabet)
print(total_count)
| true |
6e2b573abe43ad7718806696aaf8d934700f8c4d | Python | aga-moj-nick/Python-List | /Exercise 002.py | UTF-8 | 168 | 4 | 4 | [] | no_license | # 2. Write a Python program to multiply all the items in a list.
liczby = [1, 2, 3, 4, 5]
print (2 * liczby)
liczby1 = [1, 2, 3, 4, 5]
print (liczby1 + liczby1)
| true |
bce27ea874eb066d6df04a5fab09617145179c0e | Python | icasarino/ProjectEuler | /Euler18/main.py | UTF-8 | 961 | 2.984375 | 3 | [] | no_license | import triangleList as tl
tvalues = tl.triangle
svalues = tl.copyMatrix(tvalues)
size = len(tvalues)
def calcular():
for i in range(size - 1):
llen = len(tvalues[i])
for j in range(llen):
valor = tvalues[i][j]
if tvalues[i].index(valor) < llen - 1:
valorSig = tvalues[i][j+1]
else:
valorSig = 0
hijoIzq = tvalues[i + 1][j]
hijoDer = tvalues[i + 1][j + 1]
if valorSig >= valor:
hijoDer += valorSig
if hijoIzq == svalues[i + 1][j]:
hijoIzq += valor
else:
hijoDer += valor
if hijoIzq == svalues[i + 1][j]:
hijoIzq += valor
tvalues[i + 1][j] = hijoIzq
tvalues[i + 1][j + 1] = hijoDer
print("Respuesta: ", max(tvalues[size-1]))
calcular()
| true |
c7e84c8af3425affe59bd73121b183acd48f9027 | Python | Ruben-hash/bina-dec-machine | /entier_vers_binaire.py | UTF-8 | 144 | 3.3125 | 3 | [] | no_license | def entier_vers_binaire(n):
b = []
while n > 0:
b.append(n%2)
n = n //2
b.reverse()
return b
| true |
6c95c09b5b70b2e3db45f28a936d3f3c2d0b12ec | Python | skadldnr89579/Python-Practice | /005 - Data type.py | UTF-8 | 401 | 3.4375 | 3 | [] | no_license | int_data=1 #integer
float_data=3.14 #float
complex_data=1+5j #complex number
str_data1='I love Python' #string (English)
str_data2="파이썬 좋아" #string (Korean)
list_data=[1,2,3] #list
tuple_data=(1,2,3) #tuple
dict_data={0:'False',1:'True'} #dictionary
print(int_data)
print(float_data)
print(complex_data)
print(str_data1)
print(str_data2)
print(list_data)
print(tuple_data)
print(dict_data)
| true |
c230de9d340da44a678ce809cba41cc7a7a99611 | Python | itm-dsc-tap-2020-1/tap-practica-3-web-scraping-mysql-AlondraZM | /practica3.py | UTF-8 | 1,859 | 2.859375 | 3 | [] | no_license | import tkinter as Tk
from tkinter import ttk
from urllib.request import urlopen
from bs4 import BeautifulSoup
import mysql.connector as mysql
conexion = mysql.connect( host='localhost', user= 'alondra', passwd='garu', db='practica3' )
operacion = conexion.cursor()
operacion.execute( "SELECT * FROM web" )
pag_inicial=input('INGRESE URL: ')
url = urlopen(pag_inicial)
print("\nENLACES EXTRAIDOS DE LA PAGINA WEB: " + pag_inicial + "\n")
bs = BeautifulSoup(url.read(), 'html.parser')
lista_enlaces=bs.find_all("a")
for enlaces in lista_enlaces :
for i in lista_enlaces:
try:
url: str=i["href"]
except KeyError:
continue
if not url.startswith("http"):
continue
try:
operacion.execute(f'INSERT INTO web VALUES ("{url}",false)')
except mysql.errors.IntegrityError:
continue
print("href: {}".format(enlaces.get("href")))
print("\nFIN DE ENLACES ENCONTRADOS EN: "+pag_inicial+"\n")
#mostrar tabla
for pagina,status, in operacion.fetchall() :
print (pagina,status)
for pagina,status, in operacion.fetchall():
url=pagina
print("\nENLACES EXTRAIDOS DE LA PAGINA WEB: " + pag_inicial + "\n")
bs = BeautifulSoup(url.read(), 'html.parser')
lista_enlaces=bs.find_all("a")
for enlaces in lista_enlaces :
for i in lista_enlaces:
try:
url: str=i["href"]
except KeyError:
continue
if not url.startswith("http"):
continue
try:
operacion.execute(f'INSERT INTO web VALUES ("{url}",false)')
except mysql.errors.IntegrityError:
continue
print("href: {}".format(enlaces.get("href")))
print("\nFIN DE ENLACES ENCONTRADOS EN: "+pag_inicial+"\n")
conexion.close()
'''
| true |
6716714bac4e840e993b50155e9771a1004da091 | Python | ROHINI-23/Patterns | /Pyramid_Shape_Reverse.py | UTF-8 | 183 | 3.484375 | 3 | [] | no_license | n = int(input())
k = n
for i in range(n,0,-1):
for j in range(0,n-i):
print(end=" ")
for j in range(0,k):
print("*", end=" ")
k = k-1
print() | true |
1468f2752363cd389dc06c3fda137eae10f46ec0 | Python | MarianDanaila/Competitive-Programming | /Leetcode Contests/Biweekly Contest 24/Find the Minimum Number of Fibonacci Numbers Whose Sum Is K.py | UTF-8 | 397 | 3.296875 | 3 | [] | no_license | class Solution:
def findMinFibonacciNumbers(self, k: int) -> int:
stack = []
fib1 = 0
fib2 = 1
while fib2 <= k:
stack.append(fib2)
fib1, fib2 = fib2, fib1 + fib2
count = 0
while k > 0:
if stack[-1] <= k:
k -= stack[-1]
count += 1
stack.pop()
return count
| true |
aa00d2aeaad4ead38c2aaf5eb6cab0471f59a9c8 | Python | luckyboy1220/tutorial | /advertise/make_sample.py | UTF-8 | 2,239 | 2.546875 | 3 | [] | no_license | # encoding=utf-8
"""
@author : peng
"""
from feature import Feature,FeatureType
import logging
import pandas as pd
BIN_COLS = ['item_id', 'item_brand_id', 'shop_id']
VAL_COLS = ['item_posrate', 'recent_15minutes', 'shop_score_delivery']
def init_feature_list():
logging.info("init feature list")
buf = []
for col in BIN_COLS:
buf.append( Feature(name=col, prefix=col,startid=1,type= FeatureType.BIN, drop=False))
for col in VAL_COLS:
buf.append( Feature(name=col, prefix=col,startid=1,type= FeatureType.VAL, drop=False))
return buf
def fill_feature_dict(fealist , df):
logging.info('fill feature dict')
map = {}
for f in fealist:
map[f.prefix] = f
# cols = ['adgroup_id' , 'pid', 'cate_id', 'campaign_id', 'customer','brand','cms_segid', 'cms_group_id']
for col in BIN_COLS:
for v in df[col].unique():
fs = '{0}={1}'.format(col, v)
if col in map:
fea = map[col] #type:Feature
fea.tryAdd(col, fs)
start = 1
for f in fealist: # type:Feature
start = f.alignFeatureID(start)
logging.info(f.coverRange())
return start
def make_sample(fealist ,df, y, qidvals ):
logging.info('make sample')
map = {}
for f in fealist:
map[f.prefix] = f
cols = df.columns.values
dvals = df.values
r, c = df.shape
k = 0
for val in dvals:
rbuf = []
for i in range(0 , c):
col = cols[i]
v = val[i]
fs = '{0}={1}'.format(col, v)
if col in map:
fea = map[col]
rbuf.append( fea.transform(col ,fs))
rbuf.sort(key=lambda x: int(x.split(":")[0]))
yield y[k] , qidvals[k], ' '.join(rbuf)
k +=1
def main():
df = pd.read_csv('./conv_ins.csv',sep=' ')
Y = df['is_trade']
qidvals = df['instance_id']
fealist = init_feature_list()
fill_feature_dict(fealist, df)
with open('final.sample', 'w') as f :
for label , qid, feature in make_sample(fealist, df, Y, qidvals):
f.write('{} qid:{} {}\n'.format(label,qid, feature))
logging.info('sample done')
if __name__ == '__main__':
main()
| true |
e56b4af8f422799ba13e291906da7be583b03cb8 | Python | xslogic/taba | /py/tellapart/taba/taba_event.py | UTF-8 | 1,562 | 2.78125 | 3 | [
"Apache-2.0",
"MIT"
] | permissive | # Copyright 2012 TellApart, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Class and methods for dealing with Taba Events.
"""
import cjson
TABA_EVENT_IDX_NAME = 0
TABA_EVENT_IDX_TYPE = 1
TABA_EVENT_IDX_VALUE = 2
TABA_EVENT_IDX_TIME = 3
class TabaEvent(object):
"""Simple contained for Taba Events"""
def __init__(self, name, type, value, timestamp):
self.name = name
self.type = type
self.value = value
self.timestamp = timestamp
def SerializeEvent(event):
"""Convert a Taba Event object into a representation that can be serialized
Args:
event - A TabaEvent object.
Returns:
A tuple of (name, type, val, timestamp) for the Event.
"""
return (event.name, event.type, cjson.encode(event.value), event.timestamp)
def DeserializeEvent(val):
"""Convert the output of SerializeEvent() back into a TabaEvent object.
Args:
val - A tuple of (name, type, val, timestamp) for an Event.
Returns:
A corresponding TabaEvent object.
"""
return TabaEvent(val[0], val[1], cjson.decode(val[2]), val[3])
| true |
263a4239a5a018f04995ff46fa21b5d18dbe4cca | Python | PhiCtl/NorthernLights | /utils.py | UTF-8 | 4,907 | 3.125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import numpy as np
#-------------------------------------------------LOSSES-------------------------------------------------------------#
def compute_loss_MSE(y, tx, w, L1_reg, lambda_):
"""Calculate the MSE loss (with L2 regularization if lambda is not 0)"""
e = y - tx.dot(w)
if L1_reg:
return 0.5*np.mean(e**2) + lambda_*np.linalg.norm(w,1)
else:
return 0.5*np.mean(e**2) + lambda_*(np.linalg.norm(w)**2)
def compute_RMSE(y, tx, w, L1_reg, lambda_):
"Calculate the RMSE loss"
return np.sqrt(2*compute_loss_MSE(y, tx, w, L1_reg, lambda_))
def compute_loss_logREG(y, tx, w, lambda_):
"""compute the loss: negative log likelihood."""
sig = sigmoid(tx.dot(w))
loss = y.T.dot( np.log(sig) ) + (1 - y).T.dot( np.log(1-sig) )
return -np.sum(loss) + np.squeeze(w.T.dot(w))*lambda_
def compute_loss_MAE(y, tx, w):
"""Calculate the loss using mae """
e = y - tx @ w
return (1/len(y) * np.sum(np.abs(e), axis = 0) )
def compute_loss(y, tx, w, loss_type = 'MSE', lbd = 0, L1 = False):
"""Compute loss for all"""
if loss_type == 'RMSE':
return compute_RMSE(y, tx, w, L1, lbd)
if loss_type == 'MAE':
return compute_loss_MAE(y, tx, w)
if loss_type == 'logREG':
return compute_loss_logREG(y, tx, w, lbd)
return compute_loss_MSE(y, tx, w, L1, lbd)
#---------------------------------------GRADIENT--------------------------------------------------------#
def compute_LS_gradient(y, tx, w):
"""Compute the gradient of Least squares GD."""
e = y - tx.dot(w)
N = len(e)
return -1/N * tx.T.dot(e)
def calculate_gradient_logREG(y, tx, w, lambda_):
"""compute the gradient of loss."""
sig=sigmoid(tx.dot(w))
grad=tx.T.dot(sig-y) + 2*lambda_*w
return grad
def compute_gradient(y, tx, w, method, lambda_ = 0, batch_s = 1):
"""Compute gradient"""
#least squares SGD uses this gradient in a loop
if method == 2:
return compute_LS_gradient(y, tx, w)
if method == 6:
return calculate_gradient_logREG(y, tx, w, lambda_)
else:
print("Error: no method specified")
#----------------------------------------FEATURES AUGMENTATION ------------------------------------------------------------#
def build_poly(x, degree):
"""polynomial basis functions for input data x, for j=0 up to j=degree."""
phi = np.ones((len(x),1))
for i in range(1, degree+1):
phi = np.c_[phi, np.power(x,i)]
return phi
#------------------------------------------ACCURACY-------------------------------------------------------------#
def predict_labels(y_pred):
y_pred[np.where(y_pred <= 0)] = -1
y_pred[np.where(y_pred > 0)] = 1
return y_pred
def accuracy(y_true, y_pred):
if(len(y_true) != len(y_pred)):
print("Error: sizes don't match")
else:
y_pred = predict_labels(y_pred)
acc = np.equal(y_true, y_pred)
return np.sum(acc)/len(y_true)
#-----------------------------STOCHASTIC GRADIENT DESCENT-----------------------------------------------#
def batch_iter(y, tx, batch_size, num_batches=1, shuffle=True):
"""
Generate a minibatch iterator for a dataset.
Takes as input two iterables (here the output desired values 'y' and the input data 'tx')
Outputs an iterator which gives mini-batches of `batch_size` matching elements from `y` and `tx`.
Data can be randomly shuffled to avoid ordering in the original data messing with the randomness of the minibatches.
Example of use :
for minibatch_y, minibatch_tx in batch_iter(y, tx, 32):
<DO-SOMETHING>
"""
data_size = len(y)
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_y = y[shuffle_indices]
shuffled_tx = tx[shuffle_indices]
else:
shuffled_y = y
shuffled_tx = tx
for batch_num in range(num_batches):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
if start_index != end_index:
yield shuffled_y[start_index:end_index], shuffled_tx[start_index:end_index]
#-----------------------------LOGISTIC REGRESSION ------------------------------------------------------#
def sigmoid(t):
"""apply the sigmoid function on t."""
ft=1/(1+np.exp(-t))
return ft
def learning_by_gradient_descent(y, tx, w, gamma):
"""
Do one step of gradient descent using logistic regression.
Return the loss and the updated w.
"""
# compute the loss:
loss=compute_loss(y, tx, w, 'logREG')
# compute the gradient:
gradient=compute_gradient(y, tx, w, 6)
# update w
w=w-(gamma*gradient)
return loss, w
#--------------------------------------------------------------------------------------------------------#
| true |
651ac4862985a09111b93420632382c760e894a2 | Python | dtgit/dtedu | /Archetypes/ref_graph.py | UTF-8 | 4,919 | 2.765625 | 3 | [
"BSD-3-Clause"
] | permissive | """
Graphviz local object referencs, allows any refrerenceable object to
produce a graph and a client side map. When we can export this as SVG
(and expect clients to handle it) it will be much easier to style to
the look of the site.
Inspired by code from Andreas Jung
"""
from urllib import unquote
from cStringIO import StringIO
from popen2 import popen2
from config import HAS_GRAPHVIZ, GRAPHVIZ_BINARY
def obj2id(obj):
""" convert an issue to an ID """
str = obj.absolute_url(1)
return str2id(str)
def str2id(str):
id = unquote(str)
id = id.replace('-', '_')
id = id.replace('/', '_')
id= id.replace(' ', '_')
id= id.replace('.', '_')
return id
class Node:
""" simple node class """
def __init__(self, inst):
self.id = obj2id(inst)
self.url = inst.absolute_url()
self.uid = inst.UID()
self.title = inst.title_or_id()
self.text = '%s: %s' % (inst.getId(), inst.Title())
def __str__(self):
return self.id
__repr__ = __str__
class Edge:
""" simple edge class """
def __init__(self, src, dest, reference):
self.src = src
self.dest = dest
self.relationship = reference.relationship
def __str__(self):
return '%s -> %s [label="%s", href="%s/reference_graph"]' % (self.src,
self.dest,
self.relationship,
self.src.url)
def __hash__(self):
return hash((self.src.uid, self.dest.uid, self.relationship))
__repr__ = __str__
def local_reference_graph(inst):
nodes = {}
graphs = { 'forward' : {},
'backward' : {},
}
rc = inst.reference_catalog
references = rc.getReferences(inst)
back_references = rc.getBackReferences(inst)
node = Node(inst)
nodes[inst.UID()] = node
for ref in references:
tob = ref.getTargetObject()
target = Node(tob)
if tob.UID() not in nodes:
nodes[tob.UID()] = target
e = Edge(node, target, ref)
graphs['forward'].setdefault(ref.relationship, []).append(e)
for ref in back_references:
sob = ref.getSourceObject()
source = Node(sob)
if sob.UID() not in nodes:
nodes[sob.UID()] = source
e = Edge(source, node, ref)
graphs['backward'].setdefault(ref.relationship, []).append(e)
return graphs
# typo, but keep API
local_refernece_graph = local_reference_graph
def build_graph(graphs, inst):
fp = StringIO()
print >>fp, 'digraph G {'
uid = inst.UID()
seen = {}
shown = {}
for direction, graph in graphs.iteritems(): #forw/back
for relationship, edges in graph.iteritems():
rel_id = "unqualified"
if relationship: rel_id = str2id(relationship)
print >>fp, 'subgraph cluster_%s {' % rel_id
for e in iter(edges):
for n in e.src, e.dest:
if n not in seen:
seen[n] = 1
print >>fp, '\t%s [label="%s", href="%s"' % (n.id,
n.title,
n.url),
if uid == n.uid:
print >>fp, '\tstyle=filled, fillcolor=blue',
print >>fp, ']'
for e in iter(edges):
if e in shown: continue
if direction == "forward":
print >>fp, '\t%s -> %s [label="%s", href="%s/reference_graph"]' % (
e.src,
e.dest,
e.relationship,
e.dest.url)
else:
print >>fp, '\t%s -> %s [label="%s", href="%s/reference_graph"]' % (
e.src,
e.dest,
e.relationship,
e.src.url)
shown[e] = e
print >>fp, '\t}\n'
print >>fp, "}"
return fp.getvalue()
if HAS_GRAPHVIZ:
def getDot(inst):
g = local_reference_graph(inst)
data = build_graph(g, inst)
return data
def get_image(inst, fmt):
data = getDot(inst)
stdout, stdin = popen2('%s -Gpack -T%s' % (GRAPHVIZ_BINARY, fmt))
stdin.write(data)
stdin.close()
output = stdout.read()
return output
def get_png(inst): return get_image(inst, fmt="png")
def get_cmapx(inst):
data = getDot(inst)
stdout, stdin = popen2('%s -Gpack -Tcmapx ' % GRAPHVIZ_BINARY)
stdin.write(data)
stdin.close()
output = stdout.read()
return output
else:
def get_png(inst): return None
def get_cmapx(inst): return None
| true |
a6ec2fe3d941879dcad75c855a5b1926e5ac180c | Python | tony-yuan33/IBI1_2019-20 | /Practical11/24Points.py | UTF-8 | 3,657 | 3.953125 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# Use `Fraction` to avoid floaring-point errors
from fractions import Fraction
def is_24_points_solvable(numbers: list) -> (bool, int):
# Pick two numbers, merge them, then put it back to the list
# Do this recursively until there is only one number left.
# If this number equals 24, we get a solution.
#
# The key point here is to ensure that all combinations are
# considered.
#
# For each (i, j) index pair where i < j, we consider the merging of
# the i-th and j-th items. After merging, the i-th position will store
# the new number, and the j-th position will be deleted.
if len(numbers) == 1:
return (numbers[0] == 24, 0)
total_recursion_times = 0
for i in range(len(numbers)):
for j in range(i + 1, len(numbers)):
add_numbers = numbers.copy()
add_numbers[i] = numbers[i] + numbers[j]
del add_numbers[j]
is_solvable, recursion_times = is_24_points_solvable(add_numbers)
total_recursion_times += recursion_times + 1
if is_solvable:
return (True, total_recursion_times)
min1_numbers = numbers.copy()
min1_numbers[i] = numbers[i] - numbers[j]
del min1_numbers[j]
is_solvable, recursion_times = is_24_points_solvable(min1_numbers)
total_recursion_times += recursion_times + 1
if is_solvable:
return (True, total_recursion_times)
min2_numbers = numbers.copy()
min2_numbers[i] = numbers[j] - numbers[i]
del min2_numbers[j]
is_solvable, recursion_times = is_24_points_solvable(min2_numbers)
total_recursion_times += recursion_times + 1
if is_solvable:
return (True, total_recursion_times)
mul_numbers = numbers.copy()
mul_numbers[i] = numbers[i] * numbers[j]
del mul_numbers[j]
is_solvable, recursion_times = is_24_points_solvable(mul_numbers)
total_recursion_times += recursion_times + 1
if is_solvable:
return (True, total_recursion_times)
if numbers[j] != 0:
div1_numbers = numbers.copy()
div1_numbers[i] = Fraction(numbers[i], numbers[j])
del div1_numbers[j]
is_solvable, recursion_times = is_24_points_solvable(div1_numbers)
total_recursion_times += recursion_times + 1
if is_solvable:
return (True, total_recursion_times)
if numbers[i] != 0:
div2_numbers = numbers.copy()
div2_numbers[i] = Fraction(numbers[j], numbers[i])
del div2_numbers[j]
is_solvable, recursion_times = is_24_points_solvable(div2_numbers)
total_recursion_times += recursion_times + 1
if is_solvable:
return (True, total_recursion_times)
return (False, total_recursion_times)
numbers = input("Please input numbers to compute 24: (use ',' to divide them)").split(',')
is_valid_input = True
for i in range(len(numbers)):
if not numbers[i].isnumeric() or not (1 <= int(numbers[i]) <= 23):
print("Invalid input: input should be integers between 1 and 23.")
is_valid_input = False
break
numbers[i] = int(numbers[i])
if is_valid_input:
is_solvable, recursion_times = is_24_points_solvable(numbers)
print("Yes" if is_solvable else "No")
print("Recursion times:", recursion_times) | true |
9cfabec9508142e7549a617b44ac5cfb2154ce8e | Python | turicfr/wikia-chatbot | /plugins/tell.py | UTF-8 | 5,935 | 2.53125 | 3 | [] | no_license | import json
from datetime import datetime
from contextlib import contextmanager
from chatbot.users import User
from chatbot.plugins import Plugin, Command, Argument
@Plugin()
class TellPlugin:
def __init__(self):
self.client = None
self.logger = None
self.just_joined = set()
@staticmethod
@contextmanager
def open_tell(write=True):
try:
with open("tell.json", encoding="utf-8") as tell_file:
tell = json.load(tell_file)
except (FileNotFoundError, json.decoder.JSONDecodeError):
tell = {}
try:
yield tell
finally:
if write:
with open("tell.json", "w", encoding="utf-8") as tell_file:
json.dump(tell, tell_file)
def on_load(self, client, logger):
self.client = client
self.logger = logger
def on_join(self, data):
self.just_joined.add(data["attrs"]["name"])
def on_message(self, data):
username = data["attrs"]["name"]
if username not in self.just_joined:
return
self.just_joined.remove(username)
with self.open_tell() as tell:
for message in tell.get(username.lower(), []):
if "delivered" not in message:
self.client.send_message(
f'{username}, {message["from"]} wanted to tell you @ '
f'{datetime.utcfromtimestamp(message["timestamp"]):%Y-%m-%d %H:%M:%S} UTC: {message["message"]}'
)
message["delivered"] = datetime.utcnow().timestamp()
@Command(
sender=Argument(implicit=True),
timestamp=Argument(implicit=True),
target=Argument(type=User),
message=Argument(rest=True),
)
def tell(self, sender, timestamp, target, message):
"""Deliver an offline user a message."""
if target == sender:
self.client.send_message(f"{sender}, you can't leave a message to yourself.")
return
if target == self.client.user:
self.client.send_message(f"{sender}, thank you for the message!")
return
if target.connected:
self.client.send_message(f"{target} is already here.")
return
with self.open_tell() as tell:
messages = list(filter(lambda m: m["from"] != sender.name, tell.get(target.name.lower(), [])))
messages.append({
"from": sender.name,
"message": message,
"timestamp": timestamp.timestamp(),
})
tell[target.name.lower()] = messages
self.client.send_message(f"I'll tell {target} that the next time I see them.")
@Command(sender=Argument(implicit=True), target=Argument(type=User, required=False))
def told(self, sender, target=None):
"""Report the status of your pending tell messages."""
if target is None:
response = []
with self.open_tell(write=False) as tell:
for user, messages in tell.items():
if list(filter(lambda m: m["from"] == sender.name and "delivered" not in m, messages)):
response.append(f"there is a message pending from you to {user}.")
if not response:
response = ["you currently don't have tell messages to anyone."]
self.client.send_message("\n".join(f"{sender}, {line}" for line in response))
else:
if target == sender:
self.client.send_message(f"{sender}, you can't tell yourself.")
return
if target == self.client.user:
self.client.send_message(f"{sender}, I can't tell myself.")
return
with self.open_tell() as tell:
messages = tell.get(target.name.lower(), [])
try:
message = next(m for m in messages if m["from"] == sender.name)
except StopIteration:
self.client.send_message(f"{sender}, I've got no message from you to {target}.")
return
text = message["message"][:50]
if len(message["message"]) > 50:
text += "..."
delivered = message.get("delivered")
if delivered is None:
self.client.send_message(
f"{sender}, I haven't been able to deliver your "
f'message "{text}" to {target} yet.'
)
return
messages.remove(message)
if not messages:
del tell[target.name.lower()]
self.client.send_message(
f'{sender}, I delivered your message "{text}" to {target} '
f"on {datetime.utcfromtimestamp(delivered):%Y-%m-%d %H:%M:%S} UTC."
)
@Command(sender=Argument(implicit=True), target=Argument(type=User))
def untell(self, sender, target):
"""Cancel the delivery of a tell message."""
if target == sender:
self.client.send_message(f"{sender}, you can't untell yourself.")
return
if target == self.client.user:
self.client.send_message(f"{sender}, I can't untell myself.")
return
with self.open_tell() as tell:
messages = tell.get(target.name.lower(), [])
try:
message = next(m for m in messages if m["from"] == sender.name)
except StopIteration:
self.client.send_message(f"{sender}, I've got no message from you to {target}.")
return
messages.remove(message)
if not messages:
del tell[target.name.lower()]
self.client.send_message(f"{sender}, I've deleted your message to {target}.")
| true |
e21a64140b36c48725d72f677129eebeab9c5c25 | Python | saadiabayou/Redshift-z | /raie_Hydro.py | UTF-8 | 612 | 3.203125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 20 19:47:10 2021
@author: Saadia Bayou
"""
""" Programme raie_Hydro : calcul de la longueur d'onde """
# Données
evJ=1.6076634e-19 # Joules -> 1 electronvolt vaut 1,6076634.10e-19 Joules
RH=1.10e7 # RH = 1.10e7 m-1
h=6.63e-34 # h = 6.63e-34 m2.kg.s-1
c=3.00e8 # c=3.00e08 m.s-1
#lambdas=[]
#Energies=[]
n1=int(input("Entrer la valeur du niveau initial : n1 = "))
n2=int(input("Entrer la valeur du niveau final: n2 = "))
def raie_H(n1,n2):
""" Calcul de la longuer d'onde"""
return 1/(RH*((1/(n1**2))-(1/(n2**2))))
raie_H(n1,n2) | true |
95233a8286160a56621528cc8bf47b090712b974 | Python | maxpipoka/seminario1 | /.vscode/TUTI - Cardozo Roque Martin - TP Integrador.py | UTF-8 | 14,447 | 3.421875 | 3 | [] | no_license | '''
El INYM desea generar una solución que permita modernizar el monitoreo de plantaciones de sus productores
asociados. Para ello, se desea implementar un sistema de monitoreo con dispositivos de tipo IoT. El sistema
se compone de un conjunto de dispositivos de los cuales se conocen su ID, descripción, zona de despliegue
(un valor alfanumérico) y ubicación (formada por las coordenadas de latitud y longitud). Cada dispositivo
tiene un conjunto de tipo de sensores asociados, de cada sensor se tiene un ID, una descripción y una unidad
de medida. El sistema debe llevar un registro de los valores obtenidos por los sensores, para ello se desea
almacenar los datos de que tipo de sensor realizó la lectura, en qué fecha y hora y el valor sensado. Se debe
considerar que todo dispositivo pertenece a una organización, de la cual se conoce su CUIT y razón social.
Consigna: Tomando en cuenta los contenidos vistos en este ciclo de tutorías se deberá realizar lo siguiente:
● Desarrollar un CRUD para gestionar los datos de los dispositivos mencionados en el escenario anterior.
Adaptar los datos para que cada dispositivo incorpore un sensor de humedad asociado (como atributo deberá
registrar el valor de humedad detectado, un % de 0 a 100) y si se encuentra operativo o no (a través de un
campo de estado). Los demás objetos del escenario no deben ser implementados. Ver figura 1 para un diagrama
de la clase a implementar.
● Algunas características del aplicativo a desarrollar se mencionan a continuación:
○ Deberán estar implementadas las operaciones de carga, impresión, modificación y eliminación de
dispositivos (incluyendo los campos nuevos del punto anterior).
○ Deberá contar con un menú para su operatoria mediante la consola / terminal.
○ Los datos de los diferentes dispositivos podrán ser almacenados en cualquier estructura de datos según
se considere oportuno.
○ Deberá contar con una clase Dispositivos a modo de diseño de los datos con los que se va a trabajar.
○ La clase Dispositivos deberá integrar los getters para todos los atributos de la misma. Además de una
implementación del método __str__() para poder imprimir por la salida estándar a cada objeto.
○ La aplicación deberá integrar dos operaciones según el siguiente detalle:
■ Una que permita cargar los valores del sensor de humedad de cada dispositivo que se encuentre con
estado activo. Para este caso deberá integrar a la clase un método setValorHumedad(valor), para ello
puede tomar como ejemplo el código que se muestra en la figura 2.
■ Otra operación que sobre el conjunto de dispositivos cuya valor de humedad se haya cargado en el
paso anterior se pueda detectar e informar a aquellos en los que el valor de humedad censado sea
inferiora un valor límite que deberá ser solicitado al usuario.
/////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////// INTERPRETADOR PYTHON 3.8.3 //////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////'''
import os
class Dispositivos:
def __init__(self, idd, descripcion, zonaDespliegue, ubicacion, valorHumedad, estado):
self.idd = idd
self.descripcion = descripcion
self.zonaDespliegue = zonaDespliegue
self.ubicacion = ubicacion
self.valorHumedad = valorHumedad
self.estado = estado
def __str__(self):
return f('ID: {self.idd} - ZD: {self.zonaDespliegue} - Ubic.: {self.ubicacion} - H: {self.valorHumedad} - Est.: {self.estado}')
def getId(self):
return self.idd
def getDescripcion(self):
return self.descripcion
def getZonaDespliegue(self):
return self.zonaDespliegue
def getUbicacion(self):
return self.ubicacion
def getValorHumedad(self):
return self.valorHumedad
def getEstado(self):
return self.estado
def setValorHumedad(self, humedad):
self.valorHumedad = humedad
def altaDispositivo(datos):
''' Para el alta de un dispositivo, se piden datos al usuario que se guardan en variables locales,
luego se evalua el input sobre el ESTADO para estandarizar el texto guardado en el objeto.
Finalmente se guarda en una instancia de la clase y se mete dentro de la lista que va guardando
todos los objetos y se devuelve al menu principal'''
carga = 'S'
while (carga == 'S'): #Se va a loopear la carga hasta que el usuario ponga en N la condicion.
borrarPantalla()
print(f'// Alta de nuevo dispositivo----------')
iddT = int(input(f'Ingrese el ID: #'))
descT = input(f'Ingrese la descripcion: ')
zonaDesT = input(f'Ingrese la zona de despliegue: ')
latT = input(f'Ingrese la latitud de ubicacion: ')
longT = input(f'Ingrese la longitud de ubicacion: ')
estadoADT = input(f'Ingrese el estado del dispositivo [A] Activo / Deshabilitado [D]: ').upper()
if (estadoADT == 'A'):
estadoT = 'ACTIVO'
elif (estadoADT == 'D'):
estadoT = 'DESHABILITADO'
ubicacionT = F'{latT},{longT}'
nDispositivo = Dispositivos(idd=iddT, descripcion=descT, zonaDespliegue=zonaDesT, ubicacion=ubicacionT, valorHumedad='', estado=estadoT)
datos.append(nDispositivo)
carga = input(f'///// Desea dar de alta otro dispositivo? S/N: ').upper()
print(f'')
print(f'')
return datos
def listarDispositivos(datos):
''' Listado de los registros cargados, se recorre la lista plasmando en pantalla los atributos del objeto
en cada iteracion. No realiza ninguna modificacion sobre la lista recibida'''
borrarPantalla()
i= 0
print(f'')
print(f'')
print(f'/// Listado de dispositivos registrados----------')
for nDispositivo in datos:
print(f'#{i}: Id Disp.: {nDispositivo.getId()} - Descr.: {nDispositivo.getDescripcion()} - Zona: {nDispositivo.getZonaDespliegue()} - Ub: {nDispositivo.getUbicacion()} - Val.Hum: {nDispositivo.getValorHumedad()} - Estado: {nDispositivo.getEstado()}')
i += 1
print(f'')
print(f'')
def actualizarDispositivo(datos):
''' Primero se llama al listado de registros, se le pide al usuario especifique que registro se va a modificar.
Se le pide al usuario los campos que se guardan en variables locales, se instancia la clase, y se actualiza
el registro en la lista con la nueva instancia. Finalmente se lista como quedó y se devuelve al menú'''
listarDispositivos(datos)
print(f'/// Modificacion de dispositivo registrado----------')
aModificar = int(input(f'/// Seleccione el dispositivo a modificar: #'))
temporal = datos[aModificar]
iddT = temporal.getId()
print(f'/ Dispositivo ID: {temporal.getId()}')
descT = input(f'/ Ingrese la nueva descripcion: ')
zonaDesT = input(f'Ingrese la nueva zona de despliegue: ')
latT = input(f'Ingrese la nueva latitud de ubicacion: ')
longT = input(f'Ingrese la nueva longitud de ubicacion: ')
estadoT = input(f'Ingrese el nuevo estado del dispositivo [A]Activo/Deshabilitado[D]: ')
estadoT = estadoT.upper()
ubicacionT = F'{latT},{longT}'
nDispositivo = Dispositivos(idd=iddT, descripcion=descT, zonaDespliegue=zonaDesT, ubicacion=ubicacionT, valorHumedad='', estado=estadoT)
datos[aModificar] = nDispositivo
listarDispositivos(datos)
print(f'')
print(f'')
return datos
def eliminarDispositivo(datos):
''' Para eliminar un registro de la lista de datos, se listan los cargados, se le pide al usuario cual se elimina
se evalua la respuesta de confimacion y se remueve de la lista el registro consignado. Se devuelve al menu el final'''
listarDispositivos(datos)
print(f'/// Borrado de dispositivo registrado----------')
aEliminar = int(input(f'/// Seleccione el dispositivo a eliminar: #'))
nDispositivo = datos[aEliminar]
confirmacion = input(f'--ATENCION! ESTA SEGURO DE BORRAR EL DISPOSITIVO #{aEliminar}? S/N ')
if (confirmacion == 's' or confirmacion == 'S'):
datos.remove(nDispositivo)
else:
print(f'ELIMINACION CANCELADA!')
print(f'')
print(f'')
return datos
def establecerHumedad(datos):
''' Para cargar el valor de la humedad a cada instancia de la clase almacenada. Se listan los registros de la lista
filtrandolos por el metodo get.Estado == ACTIVO, se le pide al usuario elija cual se modificara
se pide el valor, y se actualiza el objeto seleccionado mediante el metodo setValorHumedad.
Se lista el resultado y se devuelve la lista final al menú principal'''
borrarPantalla()
i = 0
print(f'/// Establecer valores de humedad----------')
print(f'// Listando sensores ACTIVOS---------------')
for nDispositivo in datos:
if (nDispositivo.getEstado() == 'ACTIVO'):
print(f'#{i}: Id Disp.: {nDispositivo.getId()} - Descr.: {nDispositivo.getDescripcion()} - Zona: {nDispositivo.getZonaDespliegue()} - Ub: {nDispositivo.getUbicacion()} - Val.Hum: {nDispositivo.getValorHumedad()} - Estado: {nDispositivo.getEstado()}')
i += 1
aTocar = int(input(f'Ingrese el # del sensor a modificar: '))
humedadT = float(input(f'#### Ingrese el valor de HUMEDAD PARA EL SENSOR {aTocar}: '))
datos[aTocar].setValorHumedad(humedadT)
listarDispositivos(datos)
print(f'// DATOS ACTUALIZADOS')
print(f'')
print(f'')
return datos
def humedadInferior(datos, minimo):
''' Para buscar los dispositivos con valor de humedad por debajo de un minimo especificado por el usuario.
Se listan los dispositivos que esten ACTIVOS y tengan un valor de humedad cargado.
antes de invocar esta funcion se invoca otra donde se le pide el valor minimo al usuario
Se itera la lista con los objetos, con las condiciones que el estado sea ACTIVO,
el valor de humedad no sea vacio, y el valor de humedad este por debajo del minimo especificado.
En caso de cumplir la condicion se imprime en pantalla el objeto encontrado.
Tambien hay una variable cumplenCondicion local que va contando si se encuentran dispositivos
que cumplan con los criterios, en caso de no encontrar ninguno se evalua para mostrar un mensaje
en pantalla.'''
borrarPantalla()
print(f'/// Dispositivos bajo el minimo de humedad----------')
print(f'// Listando sensores---------------')
cumplenCondicion = 0 # contador de dispositivos que cumplen con la condicion de estar activo y tener un valor de humedad cargado
print(f'// VALOR MINIMO DE HUMEDAD: {minimo}')
print(f'')
for nDispositivo in datos:
if (nDispositivo.getEstado() == 'ACTIVO' and nDispositivo.getValorHumedad() != '' and nDispositivo.getValorHumedad() < minimo):
print(f'# Id Disp.: {nDispositivo.getId()} - Descr.: {nDispositivo.getDescripcion()} - Zona: {nDispositivo.getZonaDespliegue()} - Ub: {nDispositivo.getUbicacion()} - Val.Hum: {nDispositivo.getValorHumedad()} - Estado: {nDispositivo.getEstado()}')
cumplenCondicion += 1
if (cumplenCondicion == 0):
print(f' ###### NO SE ENCONTRARON DISPOSITIVOS CON VALORES #####')
print(f' ###### POR DEBAJO DEL MINIMO ESPECIFICADO #####')
print(f'')
print(f'')
def ingresarMinimoTemperatura():
''' Solicita al usuario ingrese el dato para buscar como valor minimo de humedad
y realiza el control de que el ingresado no este por debajo y encima de lo
permitido'''
borrarPantalla()
print(f' ###### BUSQUEDA DE DISPOSITIVOS BAJO VALOR DE HUMEDAD MINIMO')
minimo = float(input(f' ### Ingrese el valor de temperatura mínimo a buscar en los dispositivos, 0-100: '))
while (minimo < 0 or minimo > 100):
print(f' ¡¡¡ VALOR INGRESADO INCORRECTO !!! ')
minimo = float(input(f' ### Ingrese el valor de temperatura mínimo a buscar en los dispositivos, 0-100: '))
return minimo
def borrarPantalla(): #Funcion para limpiar pantalla detectando SO
if os.name == "posix":
os.system ("clear")
elif os.name == "ce" or os.name == "nt" or os.name == "dos":
os.system ("cls")
def menu():
print(f'')
print(f'')
datos = []
# 3 registros pre cargados para testeo sin tener que cargarlos cada vez que se ejecuta el programa
nDispositivo = Dispositivos(idd=1, descripcion='EL PRIMERO', zonaDespliegue='A1', ubicacion='45,65', valorHumedad='', estado='ACTIVO')
datos.append(nDispositivo)
nDispositivo = Dispositivos(idd=2, descripcion='EL SEGUNDO', zonaDespliegue='A2', ubicacion='65,78', valorHumedad='', estado='ACTIVO')
datos.append(nDispositivo)
nDispositivo = Dispositivos(idd=3, descripcion='EL TERCERO', zonaDespliegue='A3', ubicacion='12,35', valorHumedad='', estado='DESHABILITADO')
datos.append(nDispositivo)
operacion = 'M'
while (operacion != 'X'):
print(f'// GESTION DE DISPOSITIVOS IOT ----------')
print(f'// [C] Alta de dispositivo')
print(f'// [R] Listado de dispositivos')
print(f'// [U] Actualizacion de dispositivo')
print(f'// [D] Borrado de dispositivo')
print(f'// [H] Establecer valores humedad')
print(f'// [I] Buscar dispositivos bajo el minimo de humedad')
print(f'// [X] Salir')
operacion = input(f'//// Seleccione a operación deseada: ')
operacion = operacion.upper()
if (operacion == 'C'):
datos = altaDispositivo(datos)
elif (operacion == 'R'):
listarDispositivos(datos)
elif (operacion == 'U'):
datos = actualizarDispositivo(datos)
elif (operacion == 'D'):
datos = eliminarDispositivo(datos)
elif (operacion == 'H'):
datos = establecerHumedad(datos)
elif (operacion == 'I'):
minimo = ingresarMinimoTemperatura()
humedadInferior(datos, minimo)
#MAIN ------------------------------------------------------------------------------------------------
menu() | true |
054d492ad0f621bd8552bddf1b83a06341673585 | Python | saikumarballu/PythonPrograms | /dictionaries.py | UTF-8 | 361 | 2.859375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 1 14:08:42 2019
@author: saib
"""
a = [1,2,3,4,5,6,7,1,1]
b = [1,2,3,4,5,6,7,1,1]
#c={name=['sai','kumar','ballu','hello'],id=[1,2,3,4]}
d= {'name':'sai','idd':1234,'pass':'password','extn':3211}
d['name']='kumar'
d['cel']=87686876
print(d)
del d['name']
#print(d.fromkeys.__doc__)
d.pop('idd')
print(d) | true |
256e2b79ca667397a4add53be98e3ecb77ef3620 | Python | danielleaneal/Neal_Danielle_DIG5508 | /Project-2/Free-Project-2.py | UTF-8 | 1,240 | 3.859375 | 4 | [] | no_license | #Free Project 2, free project 11-1 from Programming Textbook
#putting text on a picture
#%%
from PIL import Image, ImageDraw
def transformimage(text, bgcolor):
img = Image.new('RGB', (100, 30), color = bgcolor)
d = ImageDraw.Draw(img)
d.text ((10,10), text, fill=(255,255,0))
return img
transformimages("Dani Was HERE", "pink")
#putting text on an image on my local computer and making the
#text show up closer to the center of it (because the image is
#larger than before)
#%%
from PIL import Image, ImageDraw
def transformlocalimage(text):
img = Image.open("Project-2\DAWGS.jpg")
width,height = img.size
w = width / 2
h = height / 2
d = ImageDraw.Draw(img)
d.text ((w,h), text, fill=(255,255,0))
return img
transformlocalimage("HI JOHN MURRAY")
#Blurring the Dog Collage
#%%
from PIL import Image, ImageFilter
img = Image.open("Project-2\DAWGS.jpg")
img = img.filter(ImageFilter.GaussianBlur(radius=3))
img.show()
#I ran this code with different numbers in for the "radius,"
#and learned that the radius controls just HOW blurry the photo is
#made to be.
#Rotating an image 90 degrees
#%%
from PIL import Image
img = Image.open("Project-2\DAWGS.jpg")
img.rotate(90).show()
| true |
eb18e168767a06b34775d99e5215888c70074698 | Python | lair60/diyblog | /blog/models.py | UTF-8 | 2,200 | 2.53125 | 3 | [
"MIT"
] | permissive | from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse #Used to generate URLs by reversing the URL patterns
# Create your models here.
class BlogAuthor(models.Model):
user= models.OneToOneField(User,on_delete=models.SET_NULL, null= True)
bio= models.CharField(max_length=100,help_text="Enter a bio about Blog Author")
def get_absolute_url(self):
"""
Retorna la url para acceder a una instancia particular de un autor.
"""
return reverse('blogger-detail', args=[str(self.id)])
def __str__(self):
"""
String para representar el Objeto Modelo
"""
return self.user.username
class Blog(models.Model):
name = models.CharField(max_length=100, help_text="Enter the blog name")
description = models.TextField(max_length=1000, help_text="Enter a description about blog")
author = models.ForeignKey(BlogAuthor, on_delete=models.SET_NULL, null=True)
post_date = models.DateField(null=True, blank=True)
def __str__(self):
"""
String que representa al objeto Book
"""
return self.name
def get_absolute_url(self):
"""
Devuelve el URL a una instancia particular de Book
"""
return reverse('blog-detail', args=[str(self.id)])
class BlogComment(models.Model):
description = models.TextField(max_length=1000, help_text="Enter comment about blog here")
post_date = models.DateTimeField(auto_now_add=True)
author = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
blog = models.ForeignKey('Blog', on_delete=models.CASCADE, null=True)
def __str__(self):
"""
String para representar el Objeto Modelo
"""
return self.description
class Meta:
ordering = ["post_date"]
class TemporalLink(models.Model):
link_temporal = models.CharField(max_length=100)
email_request = models.CharField(max_length=100,default='')
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
"""
String que representa al objeto Book
"""
return self.link_temporal | true |
befb6568d360d7dd069570ae3273d1a8c01515e8 | Python | dodonmountain/algorithm | /2019_late/20191104/boj_1181_단어정렬.py | UTF-8 | 152 | 3.15625 | 3 | [] | no_license | n = int(input())
arr = set()
for i in range(n):
arr.add(input())
arr = sorted(list(arr))
arr.sort(key= lambda x:(len(x)))
for i in arr:
print(i) | true |
a57e2d2344805eb4c0cbc104ed0139d727b2732b | Python | trancuong95/hoc_git | /some_exercise_other/level_65_01_hay.py | UTF-8 | 162 | 3.515625 | 4 | [] | no_license | def f(n):
if n == 0:
return 0
else:
return f(n-1)+100
# Bài Python 65, Code by Quantrimang.com
n = int(input("Nhập số n>0: "))
print(f(n))
| true |
010525509b2177f3bd7065f63e762209658d6518 | Python | sealove20/Maratona-de-programa-o | /UriJudge/1016.py | UTF-8 | 97 | 3.453125 | 3 | [] | no_license | distancia = int(input())
x = 60
y = 90
tempo = int(distancia/(y-x)*60)
print("%d minutos"%tempo)
| true |
1f9e814ad93adbe11f900155dc2337ad7f7f0c57 | Python | scarlett-kim/bit_seoul | /Study/keras/keras13_shape.py | UTF-8 | 1,038 | 3.0625 | 3 | [] | no_license | # 데이터 shape
import numpy as np
x = np.array([range(1,101), range(711,811), range(100)])
y = np.array(range(101,201))
print(x)
print("transpose하기전" , x.shape) #transpose하기전 (3, 100)
print("t" , y.shape) #(100, )
x= np.transpose(x)
y= np.transpose(y)
print("transpose 하고 난 후" ,x.shape) #transpose 하고 난 후 (100, 3)
#사이킷런사용하여 트레인스플릿으로 슬라이싱저절로 되게 한다
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, shuffle =False, train_size=0.7)
#모델 구성
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential()
# model.add(Dense(10, input_dim =(3, )))
model.add(Dense(10, input_shape =(3, )))
#(100,10, 3): input_shape(10,3) 행무시 ?
model.add(Dense(5))
model.add(Dense(3))
#컴파일 훈련
model.compile(loss='mse', optimizer='adam', metrics='mae')
model.fit(x,y, epochs=100, validation_split=0.2)
| true |
ddc5caf0412756e86b5f885660155cf820ce8d73 | Python | miguelfdezc/neural-networks-pk | /Class 8/Lab10_Cross-validation/SoftMaxLinear_XValidation.py | UTF-8 | 6,418 | 3.171875 | 3 | [] | no_license | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
class SoftMaxLinear:
def __init__(self, inputs_num, outputs_num):
self.inum = inputs_num
self.onum = outputs_num
self.W = (-1 + 2*np.random.rand(inputs_num, outputs_num))/100.0 #neurons as columns
self.b = np.zeros((1, outputs_num)) #horizontal vector
self.probs = None
self.max_epochs = 100
self.eta_max = 0.1
self.eta_min = 0.01
def Forward(self, X): #examples as rows in X
f = np.dot(X, self.W) + self.b
f -= np.max(f, axis=1, keepdims=True) #trick for numerical stability
probs = np.exp(f)
probs /= np.sum(probs, axis=1, keepdims=True)
self.probs = probs
def Test(self, X, ClsIndx):
self.Forward(X)
#data loss: mean cross-entropy loss
ex_num = X.shape[0]
data_loss = -np.log(self.probs[range(ex_num),ClsIndx]).sum()/ex_num
#classification error
predictions = np.argmax(self.probs, axis=1)
errors_num = np.sum(predictions != ClsIndx)
error_rate = errors_num / ex_num
return (data_loss, error_rate, errors_num)
def GetProbs(self):
return self.probs
def GetPredictions(self):
return np.argmax(self.probs, axis=1)
def Update(self, X, ClsIndx, lrate):
self.Forward(X)
#gradients of outputs (class probabilities)
ex_num = X.shape[0]
dprobs = self.probs.copy()
dprobs[range(ex_num), ClsIndx] -= 1.0
dprobs /= ex_num #average over all examples
#gradient of weights and biases
dW = np.dot(X.T, dprobs) # chain rule to calculate gradients
db = np.sum(dprobs, axis=0,keepdims=True)
#update neurons
self.W = self.W - lrate*dW
self.b = self.b - lrate*db
def Learn(self, X, ClsIndx):
for i in range(self.max_epochs):
eta = self.eta_max - (self.eta_max - self.eta_min)*float(i)/self.max_epochs
# print('iteration ',i+1, 'eta=',eta)
self.Update(X, ClsIndx, eta)
###############################################################################
def generate_linear_softmax(inputs_num, outputs_num):
softmax_model = SoftMaxLinear(inputs_num, outputs_num)
softmax_model.eta_max = 0.1
softmax_model.eta_min = 0.01
softmax_model.max_epochs = 200
return softmax_model
###############################################################################
###############################################################################
def split_validation(X, labels, model_generator, split_ratio):
'''
split_ratio - how much of X is send to learning the model; 0 < split_ratio < 1
'''
print('\nStarting split-validation...')
ex_num = X.shape[0] #number of examples
inputs_num = X.shape[1]
outputs_num = len(set(labels)) #number of classes
#split data into two parts
indxs = np.random.rand(ex_num)
trainX = X[indxs<=split_ratio,:]
train_labels = labels[indxs<=split_ratio]
testX = X[indxs>split_ratio,:]
test_labels = labels[indxs>split_ratio]
#get the model and train it
print('Training the model..')
model = model_generator(inputs_num, outputs_num)
model.Learn(trainX, train_labels)
#check the model on train data
print('Checking the model on train data...')
model.Forward(trainX)
ans = model.GetPredictions()
train_error_rate = (ans!=train_labels).sum()/trainX.shape[0]
#check the model on test data
print('Checking the model on test data...')
model.Forward(testX)
ans = model.GetPredictions()
test_error_rate = (ans!=test_labels).sum()/testX.shape[0]
print('Split-validation finished\n')
return (train_error_rate, test_error_rate)
###############################################################################
def cross_validation(X, labels, model_generator, num_folds):
print('\nStarting cross-validation...')
ex_num = X.shape[0] #number of examples
inputs_num = X.shape[1]
outputs_num = len(set(labels)) #number of classes
#split data into num_folds parts
indxs = np.random.randint(num_folds, size=ex_num)
train_errors = []
test_errors = []
for i in range(num_folds):
trainX = X[indxs != i,:]
train_labels = labels[indxs != i]
testX = X[indxs == i,:]
test_labels = labels[indxs == i]
#get the model and train it
print('Training model',i+1,'...')
model = model_generator(inputs_num, outputs_num) #get a new model
model.Learn(trainX, train_labels)
#check the model on train data
print('Checking the model on train data...')
model.Forward(trainX)
ans = model.GetPredictions()
train_error_rate = (ans!=train_labels).sum()/trainX.shape[0]
#check the model on test data
print('Checking the model on test data...')
model.Forward(testX)
ans = model.GetPredictions()
test_error_rate = (ans!=test_labels).sum()/testX.shape[0]
train_errors.append(train_error_rate)
test_errors.append(test_error_rate)
train_errors = np.array(train_errors)
test_errors = np.array(test_errors)
stats = {}
stats['train_errors'] = train_errors
stats['test_errors'] = test_errors
stats['train_error_mean'] = train_errors.mean()
stats['test_error_mean'] = test_errors.mean()
stats['train_error_std'] = train_errors.std()
stats['test_error_std'] = test_errors.std()
print('Cross-validation finished\n')
return stats
###############################################################################
###############################################################################
X = np.loadtxt('iris.csv', dtype='str')
#X = np.loadtxt('pima-diabetes.csv', dtype='str', delimiter=',')
classes = set(X[:,-1])
for clsname, clsindx in zip(classes, range(len(classes))):
print(clsname, clsindx)
X[X==clsname] = clsindx
labels = X[:,-1].astype('int32')
X = X[:,:-1].astype(np.float)
#print(X)
print(X.shape)
#print(labels)
train_error_rate, test_error_rate = split_validation(X, labels, generate_linear_softmax, 0.7)
print('train_error_rate=', train_error_rate)
print('test_error_rate=', test_error_rate)
xval = cross_validation(X, labels, generate_linear_softmax, 10)
for key in xval:
print(key, xval[key],'')
print('end')
| true |
5ac4960cea859fa7f098c787224a854297ec3562 | Python | karpalexander1997org1/FLSpegtransferHO | /utils/CmnUtil.py | UTF-8 | 8,686 | 2.671875 | 3 | [] | no_license | """Shared methods, to be loaded in other code.
"""
import numpy as np
ESC_KEYS = [27, 1048603]
MILLION = float(10**6)
def normalize(v):
norm=np.linalg.norm(v, ord=2)
if norm==0:
norm=np.finfo(v.dtype).eps
return v/norm
def LPF(raw_data, fc, dt):
filtered = np.zeros_like(raw_data)
for i in range(len(raw_data)):
if i==0:
filtered[0] = raw_data[0]
else:
filtered[i] = 2*np.pi*fc*dt*raw_data[i] + (1-2*np.pi*fc*dt)*filtered[i-1]
return filtered
def euler_to_quaternion(rot, unit='rad'):
if unit=='deg':
rot = np.deg2rad(rot)
# for the various angular functions
yaw, pitch, roll = rot.T # yaw (Z), pitch (Y), roll (X)
cy = np.cos(yaw * 0.5)
sy = np.sin(yaw * 0.5)
cp = np.cos(pitch * 0.5)
sp = np.sin(pitch * 0.5)
cr = np.cos(roll * 0.5)
sr = np.sin(roll * 0.5)
# quaternion
qw = cy * cp * cr + sy * sp * sr
qx = cy * cp * sr - sy * sp * cr
qy = sy * cp * sr + cy * sp * cr
qz = sy * cp * cr - cy * sp * sr
return np.array([qx, qy, qz, qw]).T
def quaternion_to_euler(q, unit='rad'):
qx, qy, qz, qw = np.array(q).T
# roll (x-axis rotation)
sinr_cosp = 2 * (qw * qx + qy * qz)
cosr_cosp = 1 - 2 * (qx * qx + qy * qy)
roll = np.arctan2(sinr_cosp, cosr_cosp)
# pitch (y-axis rotation)
sinp = 2 * (qw * qy - qz * qx)
pitch = np.where(abs(sinp) >= np.ones_like(sinp), np.sign(sinp)*(np.pi/2), np.arcsin(sinp))
# yaw (z-axis rotation)
siny_cosp = 2 * (qw * qz + qx * qy)
cosy_cosp = 1 - 2 * (qy * qy + qz * qz)
yaw = np.arctan2(siny_cosp, cosy_cosp)
if unit=='deg':
[yaw, pitch, roll] = np.rad2deg([yaw,pitch,roll])
return np.array([yaw,pitch,roll]).T # [Z, Y, X]
# def quaternion_to_R(q):
# qx, qy, qz, qw = q
# s=np.sqrt(qx*qx + qy*qy + qz*qz + qw*qw)
# r11 = 1-2*s*(qy*qy+qz*qz); r12 = 2*s*(qx*qy-qz*qw); r13 = 2*s*(qx*qz+qy*qw)
# r21 = 2*s*(qx*qy+qz*qw); r22 = 1-2*s*(qx*qx+qz*qz); r23 = 2*s*(qy*qz-qx*qw)
# r31 = 2*s*(qx*qz-qy*qw); r32 = 2*s*(qy*qz+qx*qw); r33 = 1-2*s*(qx*qx+qy*qy)
# R = [[r11, r12, r13], [r21, r22, r23], [r31, r32, r33]]
# return R
def Rx(theta):
if np.size(theta) == 1:
return np.array([[1, 0, 0], [0, np.cos(theta), -np.sin(theta)], [0, np.sin(theta), np.cos(theta)]])
else:
R = np.eye(3)[np.newaxis, :, :]
R = np.repeat(R, len(theta), axis=0)
R[:, 1, 1] = np.cos(theta)
R[:, 1, 2] = -np.sin(theta)
R[:, 2, 1] = np.sin(theta)
R[:, 2, 2] = np.cos(theta)
return R
def Ry(theta):
if np.size(theta) == 1:
return np.array([[np.cos(theta), 0, np.sin(theta)], [0, 1, 0], [-np.sin(theta), 0, np.cos(theta)]])
else:
R = np.eye(3)[np.newaxis, :, :]
R = np.repeat(R, len(theta), axis=0)
R[:, 0, 0] = np.cos(theta)
R[:, 0, 2] = np.sin(theta)
R[:, 2, 0] = -np.sin(theta)
R[:, 2, 2] = np.cos(theta)
return R
def Rz(theta):
if np.size(theta) == 1:
return np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
else:
R = np.eye(3)[np.newaxis,:,:]
R = np.repeat(R, len(theta), axis=0)
R[:, 0, 0] = np.cos(theta)
R[:, 0, 1] = -np.sin(theta)
R[:, 1, 0] = np.sin(theta)
R[:, 1, 1] = np.cos(theta)
return R
def euler_to_R(euler_angles):
theta_z, theta_y, theta_x = euler_angles.T
Rotz = Rz(theta_z)
Roty = Ry(theta_y)
Rotx = Rx(theta_x)
return np.matmul(np.matmul(Rotz, Roty), Rotx)
# R to ZYX euler angle
def R_to_euler(R):
sy = np.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
singular = sy < 1e-6
if not singular:
x = np.arctan2(R[2, 1], R[2, 2])
y = np.arctan2(-R[2, 0], sy)
z = np.arctan2(R[1, 0], R[0, 0])
else:
x = np.arctan2(-R[1, 2], R[1, 1])
y = np.arctan2(-R[2, 0], sy)
z = 0
return np.array([z, y, x])
def R_to_quaternion(R):
euler = R_to_euler(R)
return euler_to_quaternion(euler)
def quaternion_to_R(q):
euler = quaternion_to_euler(q)
return euler_to_R(euler)
# ZYZ euler angle to quaternion
def inclined_orientation(axis_rot, latitude, longitude=0):
theta_z1 = longitude
theta_y = latitude
theta_z2 = axis_rot
R = U.Rz(theta_z1).dot(U.Ry(theta_y)).dot(U.Rz(theta_z2))
return U.R_to_quaternion(R)
def transform(points, T):
R = T[:3,:3]
t = T[:3,-1]
transformed = R.dot(points.T).T + t.T
return transformed
# Get a rigid transformation matrix from pts1 to pts2
def get_rigid_transform(pts1, pts2): # Make sure that this is opposite to the coordinate transform
pts1 = np.array(pts1)
pts2 = np.array(pts2)
mean1 = pts1.mean(axis=0)
mean2 = pts2.mean(axis=0)
pts1 = np.array([p - mean1 for p in pts1])
pts2 = np.array([p - mean2 for p in pts2])
# if option=='clouds':
H = pts1.T.dot(pts2) # covariance matrix
U,S,V = np.linalg.svd(H)
V = V.T
R = V.dot(U.T)
t = -R.dot(mean1.T) + mean2.T
T = np.zeros((4, 4))
T[:3, :3] = R
T[:3, -1] = t
T[-1, -1] = 1
return T
def minor(arr,i,j):
# ith row, jth column removed
arr = np.array(arr)
return arr[np.array(list(range(i))+list(range(i+1,arr.shape[0])))[:,np.newaxis],
np.array(list(range(j))+list(range(j+1,arr.shape[1])))]
def create_waveform(data_range, amp1, amp2, amp3, amp4, freq1, freq2, freq3, freq4, phase, step):
t = np.arange(0, 1, 1.0 / step)
waveform1 = amp1*np.sin(2*np.pi*freq1*(t-phase))
waveform2 = amp2*np.sin(2*np.pi*freq2*(t-phase))
waveform3 = amp3*np.sin(2*np.pi*freq3*(t-phase))
waveform4 = amp4*np.sin(2*np.pi*freq4*(t-phase))
waveform = waveform1 + waveform2 + waveform3 + waveform4
x = waveform / max(waveform)
y = (data_range[1]-data_range[0])/2.0*x + (data_range[1]+data_range[0])/2.0
return t, y
def fit_ellipse(x, y, method='RANSAC', w=None):
raise NotImplementedError # better to use a method in the OpenCV
if w is None:
w = []
if method=='least_square':
A = np.concatenate((x**2, x*y, y**2, x, y), axis=1)
b = np.ones_like(x)
# Modify A,b for weighted least squares
if len(w) == len(x):
W = np.diag(w)
A = np.dot(W, A)
b = np.dot(W, b)
# Solve by method of least squares
c = np.linalg.lstsq(A, b, rcond=None)[0].squeeze()
# Get circle parameters from solution
A0 = c[0]
B0 = c[1] / 2
C0 = c[2]
D0 = c[3] / 2
E0 = c[4] / 2
elif method=='RANSAC':
A = np.concatenate((x**2, x*y, y**2, x), axis=1)
b = -2*y
ransac = linear_model.RANSACRegressor()
ransac.fit(A, b)
c0, c1, c2, c3 = ransac.estimator_.coef_[0]
c4 = ransac.estimator_.intercept_[0]
E0 = -1/c4
A0 = c0*E0
B0 = c1*E0/2
C0 = c2*E0
D0 = c3*E0/2
else:
raise ValueError
# center of ellipse
cx = (C0*D0 - B0*E0)/(B0**2 - A0*C0)
cy = (A0*E0 - B0*D0)/(B0**2 - A0*C0)
temp = 1.0 - A0*cx**2 - 2.0*B0*cx*cy - C0*cy**2 - 2.0*D0*cx - 2.0*E0*cy
A1 = A0/temp
B1 = B0/temp
C1 = C0/temp
# rotating angle of ellipse
M = A1**2 + C1**2 + 4*B1**2 - 2*A1*C1
theta = np.arcsin(np.sqrt((-(C1-A1)*np.sqrt(M) + M)/(2*M)))
# length of axis of ellipse
a = np.sqrt(1.0/(A1*np.cos(theta)**2 + 2*B1*np.cos(theta)*np.sin(theta)+C1*np.sin(theta)**2))
b = np.sqrt(1.0/(A1*np.sin(theta)**2 - 2*B1*np.sin(theta)*np.cos(theta)+C1*np.cos(theta)**2))
return cx,cy, a,b, theta
if __name__ == '__main__':
# calculate_transformation()
# filename = '/home/hwangmh/pycharmprojects/FLSpegtransfer/vision/coordinate_pairs.npy'
# data = np.load(filename)
# print(data)
pts1 = [[0, 1, 0], [1, 0, 0], [0, -1, 0]]
pts2 = [[-0.7071, 0.7071, 0], [0.7071, 0.7071, 0], [0.7071, -0.7071, 0]]
T = get_rigid_transform(pts1, pts2)
print(T)
# f = 6 # (Hz)
# A = 1 # amplitude
# t, waveform = create_waveform(interp=[0.1, 0.5], amp1=A, amp2=A * 3, amp3=A * 4, freq1=f, freq2=f * 1.8,
# freq3=f * 1.4, phase=0.0, step=200)
# t, waveform = create_waveform(interp=[0.1, 0.5], amp1=A, amp2=A * 1.2, amp3=A * 4.2, freq1=0.8 * f, freq2=f * 1.9,
# freq3=f * 1.2, phase=0.5, step=200)
# t, waveform = create_waveform(interp=[0.1, 0.5], amp1=A, amp2=A * 1.5, amp3=A * 3.5, freq1=f, freq2=f * 1.8,
# freq3=f * 1.3, phase=0.3, step=200) | true |
ce6bc8a3a9c7f9675c97500bc888881d5b10c676 | Python | clpachec/COMPSCI-175 | /Process_Text.py | UTF-8 | 861 | 3.34375 | 3 | [] | no_license | '''
Created on Sun Mar 13 010:21:32 2016
@author: Arielle
'''
import re
import nltk
def Clean_Text(text: str):
"""
Returns a string cleaned of non-word related characters such as <br/ <p> due to
source of generated text
Parameters
----------
text : str
Text to be cleaned and formatted.
Returns
-------
str
A formatted and cleaned string
Examples
--------
>>> Clean_Text("<p>And upon Future could've Station when.")
"And upon Future could've Station when."
--------
""
"""
result = re.sub('((<\w*>)?(1)(<\w*>)|<\w*)|(\S*>)',' ',text) #Removes words such as <br/ <p>
result = re.sub('"',' ',result) #Remove '"' artifact
result = re.sub('\s\s+',' ',result) #Clean out the extra spaces created by the previous line
return result | true |
fbc9f6c49868e5911b2dc6a47f5c3a68b2347874 | Python | ksyoung/grasp_post_process | /clean_grid_file.py | UTF-8 | 1,836 | 3 | 3 | [] | no_license | # hacked together code to read, edit, and rewrite a .grd file.
# 'clean' refers to the error where 1.242E-110 is written to file as
# 1.242-110.
# code searches for these, and fixes these
import optparse
import sys
import pdb
# optparse it!
usage = "usage: %prog <input_file>"
parser = optparse.OptionParser(usage)
#parser.add_option('--t1', dest='title1', action='store', type='str', default='Open Dragone',
# help='Title for first set of data.')
## file to read in is first arg.
(option, args) = parser.parse_args()
#print args
#print args[0][:-4]+'_clean.grd' ,'w+'
#sys.exit()
## open file to write to
with open(args[0][:-4]+'_clean.grd' ,'w+') as outfile:
## open file to read from
header = True
with open(args[0],'r') as infile:
for line in infile:
# read past header. need to skip 5 more lines! grrr...
if line == '++++\n':
count_now = True # start a line counter.
count = 0
header = False
if header: # just write same line while in header.
outfile.write(line)
elif count < 6 and count_now: # write same lines in info section
outfile.write(line)
count += 1 # iterate my line counter.
else: # now read/fix/write the data.
n_vals = ['','','','']
for i,val in enumerate(line.split()):
if not('E' in val): # test if error exists
n_vals[i] = val[:-4]+'E'+val[-4:] #fix error
else:
n_vals[i] = val
#write line (or fixed line) to outfile.
outfile.write(' '.join(n_vals)+'\n')
print 'File fixed!! We hope.\n Written to %s' %(args[0][:-4]+'_clean.grd' )
| true |
b77fa8ad08f2b68af71f160a0271823116bb989d | Python | vincentnawrocki/aws-tooling | /all-region-modifier/all_region_modifier.py | UTF-8 | 3,600 | 2.765625 | 3 | [] | no_license | """Module to apply a change on multiple regions for multiple AWS accounts."""
import json
import boto3
from botocore.exceptions import ClientError
import argparse
import tqdm
import actions
from actions.ebs import enable_ebs_default_encryption
from logger.logger import LOG
def all_region_modifier(role: str, account_file: str, action):
"""all_region_modifier [summary]
Arguments:
role {str} -- [description]
account_file {str} -- [description]
action {[type]} -- [description]
Returns:
[type] -- [description]
"""
sts_client = boto3.client('sts')
failure_list = []
with open(account_file) as file:
account_list = json.load(file)
LOG.info(
f"Default ebs encryption will be activated on all regions for the list of account(s): {account_list['accounts']}")
for account in tqdm.tqdm(account_list['accounts'], desc="Accounts"):
role_arn = f"arn:aws:iam::{account}:role/{role}"
try:
assume_role = sts_client.assume_role(
RoleArn=role_arn, RoleSessionName="get_all_regions", DurationSeconds=3600)
session = boto3.Session(
aws_access_key_id=assume_role['Credentials']['AccessKeyId'],
aws_secret_access_key=assume_role['Credentials']['SecretAccessKey'],
aws_session_token=assume_role['Credentials']['SessionToken'],
region_name="us-east-1"
)
except ClientError as error:
LOG.error(
f"Failed to assume role during regions retrieval {role_arn} : {error}")
try:
ec2_client = session.client('ec2', region_name='us-east-1')
aws_regions = [region['RegionName']
for region in ec2_client.describe_regions()['Regions']]
LOG.info(f"Regions retrived using role {role_arn} : {aws_regions}")
except ClientError as error:
LOG.error(f"Failed to get regions : {error}")
for region in tqdm.tqdm(aws_regions, desc="Regions"):
try:
assume_role = sts_client.assume_role(
RoleArn=role_arn, RoleSessionName=f"enable_ebs_encryption_{region}")
session = boto3.Session(
aws_access_key_id=assume_role['Credentials']['AccessKeyId'],
aws_secret_access_key=assume_role['Credentials']['SecretAccessKey'],
aws_session_token=assume_role['Credentials']['SessionToken'],
region_name='us-east-1'
)
except ClientError as error:
LOG.error(f"Failed to assume role {role_arn} : {error}")
failure_list += action(session=session, account=account)
# Print error list
if failure_list:
LOG.error(f"Failures encountered applying change on account/region: {failure_list}")
else:
LOG.info("No error during the process")
def all_region_modifier_parser():
"""all_region_modifier_parser: Collecting args to launch all region modifier function."""
parser = argparse.ArgumentParser(description="Apply change on all regions for all accounts listed in provided input.")
parser.add_argument("role", type=str, help='The role to execute describe_regions and the action')
parser.add_argument("account_file", type=str, help='The relative path to json file with accounts')
args = parser.parse_args()
all_region_modifier(role=args.role, account_file=args.account_file, action=actions.ebs.enable_ebs_default_encryption)
all_region_modifier_parser()
| true |
f72bbc1cc4aae7571b2c56d917dc1dddf7e27602 | Python | rgzfx/django-challenge-001 | /jungledevs/utils/base_model.py | UTF-8 | 660 | 2.671875 | 3 | [] | no_license | from django.db.models import DateTimeField, Model
from django.utils.translation import ugettext_lazy as _
class BaseModel(Model):
created_at = DateTimeField(auto_now_add=True, verbose_name=_("Creation Date"))
updated_at = DateTimeField(auto_now=True, verbose_name=_("Update Date"))
class Meta:
abstract = True
def is_new(self) -> bool:
"""
There could be a few microseconds of difference between created_at and updated_at of newly created records.
This code ignores that difference.
:return:
"""
return self.created_at.replace(microsecond=0) == self.updated_at.replace(microsecond=0)
| true |
fac56fc2154ce5012d984a8046c9b508184f9503 | Python | eskog/inline_args | /inline_args.py | UTF-8 | 871 | 2.859375 | 3 | [] | no_license | #!/usr/bin/python
import sys
import getopt
def main(argv):
# Stuff before arguments
grammar = "Default value"
debug = 0
try:
opts, args = getopt.getopt(argv, "hg:d", ["help", "grammar"])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt == '-d':
debug= 1
elif opt in ("-g", "--grammar"):
grammar = arg
elif opt in (null):
usage()
sys.exit(4)
print grammar
print debug
def usage():
usage = """
-h --help Prints this helpscreen.
-d --Debug Enables Debug mode.
-g --grammar arg Changes from default grammar to arg
"""
print usage
if __name__ == "__main__":
main(sys.argv[1:]) | true |
e8ae772f75aa083c204ac3436b2b91f59be02c04 | Python | git-guozhijia/Python006-006 | /week01/1.py | UTF-8 | 989 | 3.890625 | 4 | [] | no_license | # 布尔运算 --- and, or, not
print(111) if 1 or 2 else print(222)
print(111) if 1 and 2 else print(222)
print(111) if not 1 else print(222)
# 111
# 111
# 222
# 比较运算
print("对") if 1 > 2 else print("错")
print("对") if 1 < 2 else print("错")
print("对") if 1 >= 2 else print("错")
print("对") if 1 >= 2 else print("错")
print("对") if 1 == 2 else print("错")
print("对") if 1 != 2 else print("错")
print("对") if 1 is 1 else print("错")# 对象标识 is 和 is not 运算符无法自定义;并且它们可以被应用于任意两个对象而不会引发异常。
print("对") if 1 is not 1 else print("错")# 否定的对象标识
# 错
# 对
# 错
# 错
# 错
# 对
# 对
# 错
a = 1
b = 1
c = 2
print("对") if a is c else print("错")
print("对") if a is not c else print("错")
print("对") if a is b else print("错")
# 错
# 对
# 对
# 数字类型 --- int, float, complex
print( 1+3 )
print( 1-3 )
print( 1*3 )
print( 1/3 )
print( 7//3 )
print( 7%3 ) | true |
4c2a4c0b0d2e3b2bee7e4190d7bb7caf9379ac44 | Python | rustiri/OO_Python | /magic_methods/equality_compare.py | UTF-8 | 1,693 | 4.65625 | 5 | [] | no_license | # Example of using __eq__ and __lt__ magic methods
class Book:
def __init__(self, title, author, price):
super().__init__()
self.title = title
self.author = author
self.price = price
# TODO: use the __eq__ method to check for equality between two objects
def __eq__(self, value):
# throw an exception if we pass an object that it's not a book to compare against
if not isinstance(value, Book):
raise ValueError("Can't compare book to a non-book")
return(self.title == value.title and self.author == value.author and self.price == value.price)
# TODO: use the __ge__ method to establish >= relationship with another objects
def __ge__(self, value):
# throw an exception if we pass an object that it's not a book to compare against
if not isinstance(value, Book):
raise ValueError("Can't compare book to a non-book")
return self.price >= value.price
# TODO: use the __lt__ method to establish <= relationship with another objects
def __lt__(self, value):
# throw an exception if we pass an object that it's not a book to compare against
if not isinstance(value, Book):
raise ValueError("Can't compare book to a non-book")
return self.price <= value.price
b1 = Book("War And Peace", "Leo Tolstoy", 39.95)
b2 = Book("The Catcher in The Rye", "JD Salinger", 29.95)
b3 = Book("To Kill a Monkingbird", "Harper Lee", 24.95)
b4 = Book("War And Peace", "Leo Tolstoy", 39.95)
# TODO: check for equality
print(b1 == b4)
print(b1 == b2)
# TODO: check for greater and lesser value
print(b2 >= b1)
print(b2 <= b1)
# TODO: Now we can sort them too
books = [b1, b3, b2, b4]
books.sort()
print([book.title for book in books]) | true |
ecb69a6a0a1562a8bbe2fd3fd50657fac46f7dc1 | Python | mohakbhardwaj/deep-rl | /rl_common/ReplayBuffer.py | UTF-8 | 5,459 | 3.015625 | 3 | [] | no_license | #!/usr/bin/env python
"""
Replay Buffer Module for Deep Q Network
Author: Mohak Bhardwaj
Based of off Berkeley Deep RL course's dqn implementation which can be found
at https://github.com/berkeleydeeprlcourse/homework/blob/master/hw3/dqn_utils.py
This implementation is optimized as it only keeps one copy of the frame in the buffer,
hence saving RAM which can blow up.
"""
from collections import deque
import random
import numpy as np
class ReplayBuffer(object):
"""Base class for SimpleBuffer and PrioritizedBuffer that implements add, size and clear
methods"""
#[TODO: Works only for discrete action spaces]
def __init__(self, buffer_size, frame_history_length):
self.buffer_size = buffer_size
self.frame_history_length = frame_history_length
# self.buffer = deque()
self.obs = None
self.action = None
self.reward = None
self.done = None
self.next_idx = 0
self.curr_buffer_size = 0
def add(self, s, a, r, t):
# experience = (s, a, r, t, s2)
# if self.count < self.buffer_size:
# self.buffer.append(experience)
# self.count += 1
# else:
# self.buffer.popleft()
# self.buffer.append(experience)
if self.obs is None:
self.obs = np.empty([self.buffer_size] + list(s.shape), dtype=np.uint8) #Change to uint8
self.action = np.empty([self.buffer_size] , dtype=np.int32)
self.reward = np.empty([self.buffer_size] , dtype=np.float32)
self.done = np.empty([self.buffer_size] , dtype=np.bool)
self.obs[self.next_idx] = s
self.action[self.next_idx] = a
self.reward[self.next_idx] = r
self.done[self.next_idx] = t
self.next_idx = (self.next_idx+ 1)%self.buffer_size
self.curr_buffer_size = min(self.buffer_size, self.curr_buffer_size + 1)
def size(self):
return self.curr_buffer_size
def sample_batch(self, batch_size):
'''
batch_size specifies the number of experiences to add
to the batch. If the replay buffer has less than batch_size
elements, simply return all of the elements within the buffer.
Generally, you'll want to wait until the buffer has at least
batch_size elements before beginning to sample from it.
Note that whenever there are missing frames mostly due to insufficient
data at the start of the episode, additional frames will be added which are
all zeros.
'''
def clear(self):
self.obs = None
self.action = None
self.reward = None
self.done = None
self.curr_buffer_size = 0
self.next_idx = 0
def can_sample(self, batch_size):
return batch_size < self.curr_buffer_size
def _encode_observation(self, idx):
end_idx = idx + 1 # make noninclusive
start_idx = end_idx - self.frame_history_length
# this checks if we are using low-dimensional observations, such as RAM
# state, in which case we just directly return the latest RAM.
if len(self.obs.shape) == 2:
return self.obs[end_idx-1]
# if there weren't enough frames ever in the buffer for context
if start_idx < 0 and self.curr_buffer_size != self.buffer_size:
start_idx = 0
for idx in range(start_idx, end_idx - 1):
if self.done[idx % self.buffer_size]:
start_idx = idx + 1
missing_context = self.frame_history_length - (end_idx - start_idx)
# if zero padding is needed for missing context
# or we are on the boundry of the buffer
if start_idx < 0 or missing_context > 0:
frames = [np.zeros_like(self.obs[0]) for _ in range(missing_context)]
for idx in range(start_idx, end_idx):
frames.append(self.obs[idx % self.buffer_size])
return np.asarray(frames)
else:
# this optimization has potential to saves about 30% compute time \o/
img_h, img_w = self.obs.shape[1], self.obs.shape[2]
# return self.obs[start_idx:end_idx].transpose(1, 2, 0, 3).reshape(img_h, img_w, -1)
# print self.obs[start_idx:end_idx].shape
return np.asarray(self.obs[start_idx:end_idx])
class SimpleBuffer(ReplayBuffer):
"""Implements simple experience replay buffer that samples batches uniformly
from the buffer without any prioritization"""
def sample_batch(self, batch_size):
assert self.can_sample(batch_size)
idxs = random.sample(xrange(0, self.curr_buffer_size - 2), batch_size)
s_batch = np.concatenate([self._encode_observation(idx)[None] for idx in idxs], 0)
a_batch = np.asarray(self.action[idxs])
r_batch = np.asarray(self.reward[idxs])
t_batch = np.asarray(self.done[idxs])
s2_batch = np.concatenate([self._encode_observation(idx + 1)[None] for idx in idxs], 0)
# print s_batch.shape
return s_batch, a_batch, r_batch, t_batch, s2_batch
class PrioritizedBuffer(ReplayBuffer):
"""Implements prioritized experience replay, where experiences are
prioritized based on TD error and stoachastic prioritization
with annealing. See https://arxiv.org/pdf/1511.05952v4.pdf for details"""
# [TODO: Implement]
| true |
f43d591dd23880097b9739264f31df9799b1ed92 | Python | Jeffrey-Huang11/jhuang11 | /05/krewes.py | UTF-8 | 982 | 3.328125 | 3 | [] | no_license | # Team Rising Drago (Jeffrey Huang, Dragos Lup, & Ryan Ma)
# SoftDev
# K05 -- Teamwork, but Better This Time/ went through a dictionary, randomly selected a
# key/group and randomly selected a "name" from the key/group
# 2020-09-30
# Import random to use 'random.choice' function, which goes through a list and randomly selects an element
import random
KREWES = {
'orpheus': ['ERIC', 'SAUVE', 'JONATHAN', 'PAK', 'LIAM', 'WINNIE', 'KELLY', 'JEFFREY', 'KARL', 'ISHITA', 'VICTORIA', 'BENJAMIN', 'ARIB', 'AMELIA', 'CONSTANCE', 'IAN'],
'rex': ['ANYA', 'DUB-Y', 'JESSICA', 'ALVIN', 'HELENA', 'MICHELLE', 'SHENKER', 'ARI', 'STELLA', 'RENEE', 'MADELYN', 'MAC', 'RYAN', 'DRAGOS'],
'endymion': ['JASON', 'DEAN', 'MADDY', 'SAQIF', 'CINDY', 'YI LING', 'RUOSHUI', 'FB', 'MATTHEW', 'MAY', 'ERIN', 'MEIRU']
}
print(random.choice(random.choice(list(KREWES.values()))))
# Gets the values (groups) from KREWES, declares them as lists, randomly chooses a list, then randomly chooses a name, prints
| true |
99b5f538f2368575ffd4529578f38865fb8906db | Python | JopRijks/Amstelhaege | /code/helpers/location.py | UTF-8 | 4,927 | 3.625 | 4 | [] | no_license | """
location.py
Wordt gebruikt om de vrijstand van een huis te berekenen en om
te controleren of de locatie van een huis aan de vereisten voldoet.
Programmeertheorie
Universiteit van Amsterdam
Jop Rijksbaron, Robin Spiers & Vincent Kleiman
"""
def location_checker(house, neighbourhood):
"""Checks if the placement of the house wouldn't cause any violations rules"""
# loop through the neighbourhood
for i in neighbourhood:
# check if i is water, if i is water than the only regulation is that the house can't stand on water
if i.name == "WATER":
# collect x and y ranges of water
horzWater = list(range(i.x0, i.x1))
vertWater = list(range(i.y0, i.y1))
# check if any corner of the house is placed on water
if (house.x0 in horzWater and house.y0 in vertWater):
return False
elif (house.x1 in horzWater and house.y0 in vertWater):
return False
elif (house.x0 in horzWater and house.y1 in vertWater):
return False
elif (house.x1 in horzWater and house.y1 in vertWater):
return False
else:
# check if house is standing on another house, if yes return false
if (house.x0 -1 <= i.x0 and house.x1+1 >= i.x0) or (house.x0-1 <= i.x1 and house.x1+1 >= i.x1):
if (house.y0-1 <= i.y0 and house.y1+1 >= i.y0) or (house.y0-1 <= i.y1 and house.y1+1 >= i.y1):
return False
# location if house is placed right or left from i
if (house.y0-1 <= i.y0 and house.y1+1 >= i.y0) or (house.y0-1 <= i.y1 and house.y1+1 >= i.y1):
# collect all possible distances if walls are next to eachother on x-axis, absoulte values because distances can't be negative
min_distance = min([abs(house.x0-i.x1),abs(house.x1-i.x0),abs(house.x1-i.x1),abs(house.x0-i.x0)])
# check if this distance is smaller than the obligated free space, if no then return false
if house.free_area > abs(min_distance) or i.free_area > abs(min_distance):
return False
# location if house is placed above or down from i
elif (house.x0 -1 <= i.x0 and house.x1+1 >= i.x0) or (house.x0-1 <= i.x1 and house.x1+1 >= i.x1):
# collect all possible distances if walls are next to eachother on x-axis, absoulte values because distances can't be negative
min_distance = min([abs(house.y0-i.y1),abs(house.y1-i.y0),abs(house.y1-i.y1),abs(house.y0-i.y0)])
# check if this distance is smaller than the obligated free space, if no then return false
if house.free_area > abs(min_distance) or i.free_area > abs(min_distance):
return False
# diagonal distance check
elif house.y1 < i.y0:
# location check if house is down left
if house.x1 < i.x0:
# calculate euclidean distance
min_distance = distanceCalc(house.x1,house.y1,i.x0,i.y0)
# check if this distance is smaller than the obligated free space, if no then return false
if house.free_area > abs(min_distance) or i.free_area > abs(min_distance):
return False
# location check if house is down right
elif house.x0 > i.x1:
# calculate euclidean distance
min_distance = distanceCalc(house.x0,house.y1,i.x1,i.y0)
# check if this distance is smaller than the obligated free space, if no then return false
if house.free_area > abs(min_distance) or i.free_area > abs(min_distance):
return False
elif house.y0 > i.y1:
# location check if house is upper left
if house.x1 < i.x0:
# calculate euclidean distance
min_distance = distanceCalc(house.x1,house.y0,i.x0,i.y1)
if house.free_area > abs(min_distance) or i.free_area > abs(min_distance):
return False
# location check if house is upper right
elif house.x0 > i.x1:
# calculate euclidean distance
min_distance = distanceCalc(house.x0,house.y0,i.x1,i.y1)
if house.free_area > abs(min_distance) or i.free_area > abs(min_distance):
return False
return True
def distanceCalc(x0,y0,x1,y1):
"""Calculates the euclidean distance between the given two coordinates"""
# calculate euclidian distance
return abs(((x1-x0)**2+(y1-y0)**2)**0.5) | true |
ef2872966849bc5654710e01e14042baebf698d8 | Python | colinrdavidson/Baseball-Integer-Program | /make_team.py | UTF-8 | 1,887 | 2.9375 | 3 | [] | no_license | #Default Modules
import argparse
import os.path
import subprocess
import sys
#Custom Modules
from generate_data_file import generate_data_file as gdf
#Set up arg parser
parser = argparse.ArgumentParser(description="Generate an optimal draft team.")
parser.add_argument("--input", "-i", dest="input_file", nargs=1, required=True, help="the csv where the data is stored")
parser.add_argument("--output", "-o", dest="output_file", nargs=1, required=False, default=["output.txt"], help="the file to write the logs and team to")
args = parser.parse_args()
#Assign args to variables
input_file = args.input_file[0]
output_file = args.output_file[0]
#Message
print("\nInput File: \"" + input_file + "\"")
print("Output File: \"" + output_file + "\"\n")
#See if input file exists
try:
f = open(input_file, "r")
f.close()
except IOError:
sys.exit("Cannot open \"" + input_file + "\", aborting...")
#Message
print("Creating data file for glpk...")
#Generate glpk Data File
try:
gdf(input_file, "baseball.dat")
except:
print("There was a problem generating \"baseball.dat\" from \"" + input_file + ", aborting...")
print("Here is the python exception:\n")
raise
print(" ...success!\n")
#Run the model
#See if "baseball.dat" file exists
try:
f = open("baseball.dat", "r")
f.close()
except IOError:
sys.exit("Cannot open \"baseball.dat\", aborting...")
#See if "baseball.mod" file exists
try:
f = open("baseball.mod", "r")
f.close()
except IOError:
sys.exit("Cannot open \"baseball.mod\", aborting...")
#Message
print("Running command:")
print(" glpsol --math --data baseball.dat --model baseball.mod\n")
#Run external command "glpsol"
with open(output_file, "w") as f:
argarray = ["glpsol", "--math", "--data", "baseball.dat", "--model", "baseball.mod"]
subprocess.call(argarray, stdout=f)
#Message
print("Output written to \"" + output_file + "\"")
| true |
2f9c1a210d0c1ddd7f090b12a4b4ea3152d0790a | Python | yura20/logos_python | /hw_04/palindrome.py | UTF-8 | 283 | 4.3125 | 4 | [] | no_license | text = input("enter your word :")
def palindrome(text=""):
text1 = text
text2 = text[::-1]
if text1 == text2:
print('{palin} is palindrome'.format(palin = text1))
else:
print("{palin} isn't palindrome".format(palin = text1))
palindrome(text) | true |
39715aaa4acf391e1cfbe66144ea7fc466d586f2 | Python | QMrpy/InteractiveErrors | /gpt2.py | UTF-8 | 2,965 | 2.65625 | 3 | [] | no_license | import argparse
import json
import logging
import os
import torch
from transformers import GPT2LMHeadModel, GPT2TokenizerFast
from tqdm import tqdm
def generate_candidate_leakages(leakage, model, tokenizer, args):
device = "cpu" if (args.no_cuda or not torch.cuda.is_available()) else "cuda"
candidate_leakage_dicts = []
input_split = leakage.split()
length_input = len(input_split)
for i in range(args.min_prefix_length, min(args.max_prefix_length, length_input - 1)):
input_context = " ".join(input_split[:i])
input_ids = tokenizer(input_context, return_tensors="pt").input_ids.to(device)
outputs = model.generate(input_ids=input_ids, min_length=5, max_length=20, do_sample=True, top_k=20, top_p=0.97, num_return_sequences=5)
for tokenized_text in outputs:
candidate_leakage = tokenizer.decode(tokenized_text.tolist(), skip_special_tokens=True)
candidate_leakage_dicts.append({'leakage': leakage, 'prefix': input_context, 'candidate_leakage': candidate_leakage})
return candidate_leakage_dicts
def main(args):
device = "cuda"
logging.info(f"Loading GPT2 model and tokenizer from {args.pretrained_model}..")
model = GPT2LMHeadModel.from_pretrained(args.pretrained_model)
model.to(device)
model.eval()
tokenizer = GPT2TokenizerFast.from_pretrained(args.pretrained_model)
tokenizer.pad_token = tokenizer.eos_token
logging.info("GPT2 model and tokenizer loaded.")
input_fp = args.leakages_file
output_fp = args.output_file
logging.info(f"Reading leakages from {input_fp} and generating new leakages..")
candidate_leakage_dicts = []
with open(input_fp, 'r') as input_file:
leakages = input_file.readlines()
for leakage in tqdm(leakages):
candidate_leakage_dicts.extend(generate_candidate_leakages(leakage.strip(), model, tokenizer, args))
logging.info(f"{len(candidate_leakage_dicts)} candidate leakages generated.")
with open(output_fp, 'w') as file:
data = {'args': vars(args), 'candidate_leakages': candidate_leakage_dicts}
json.dump(data, file, indent=2)
logging.info(f"Candidate leakages written to {output_fp}.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--leakages_file", type=str, help="File with leakges")
parser.add_argument("--output_file", type=str, help="Path where to save generated sentences")
parser.add_argument("--pretrained_model", type=str, default="gpt2-medium")
parser.add_argument("--min_prefix_length", type=int, default=2)
parser.add_argument("--max_prefix_length", type=int, default=5)
parser.add_argument("--quiet", action="store_true")
parser.add_argument("--no_cuda", action="store_true")
args = parser.parse_args()
logging_level = logging.INFO if not args.quiet else logging.ERROR
logging.basicConfig(level=logging_level)
main(args)
| true |
9d73279591a346bdf159fce20d602b5ec640d063 | Python | pombredanne/detools | /detools/sais.py | UTF-8 | 6,577 | 3.15625 | 3 | [
"BSD-2-Clause",
"MIT"
] | permissive | # Based on http://zork.net/~st/jottings/sais.html.
S_TYPE = ord("S")
L_TYPE = ord("L")
def build_type_map(data):
res = bytearray(len(data) + 1)
res[-1] = S_TYPE
if not len(data):
return res
res[-2] = L_TYPE
for i in range(len(data) - 2, -1, -1):
if data[i] > data[i + 1]:
res[i] = L_TYPE
elif data[i] == data[i + 1] and res[i + 1] == L_TYPE:
res[i] = L_TYPE
else:
res[i] = S_TYPE
return res
def is_lms_char(offset, typemap):
if offset == 0:
return False
if typemap[offset] == S_TYPE and typemap[offset - 1] == L_TYPE:
return True
return False
def lms_substrings_are_equal(string, typemap, offset_a, offset_b):
if offset_a == len(string) or offset_b == len(string):
return False
i = 0
while True:
a_is_lms = is_lms_char(i + offset_a, typemap)
b_is_lms = is_lms_char(i + offset_b, typemap)
if i > 0 and a_is_lms and b_is_lms:
return True
if a_is_lms != b_is_lms:
return False
if string[i + offset_a] != string[i + offset_b]:
return False
i += 1
def find_bucket_sizes(string, alphabet_size=256):
res = [0] * alphabet_size
for char in string:
res[char] += 1
return res
def find_bucket_heads(bucket_sizes):
offset = 1
res = []
for size in bucket_sizes:
res.append(offset)
offset += size
return res
def find_bucket_tails(bucket_sizes):
offset = 1
res = []
for size in bucket_sizes:
offset += size
res.append(offset - 1)
return res
def make_suffix_array_by_induced_sorting(string, alphabet_size):
typemap = build_type_map(string)
bucket_sizes = find_bucket_sizes(string, alphabet_size)
guessed_suffix_array = guess_lms_sort(string, bucket_sizes, typemap)
induce_sort_l(string, guessed_suffix_array, bucket_sizes, typemap)
induce_sort_s(string, guessed_suffix_array, bucket_sizes, typemap)
(summary_string,
summary_alphabet_size,
summary_suffix_offsets) = summarise_suffix_array(string,
guessed_suffix_array,
typemap)
summary_suffix_array = make_summary_suffix_array(
summary_string,
summary_alphabet_size)
result = accurate_lms_sort(string,
bucket_sizes,
summary_suffix_array,
summary_suffix_offsets)
induce_sort_l(string, result, bucket_sizes, typemap)
induce_sort_s(string, result, bucket_sizes, typemap)
return result
def guess_lms_sort(string, bucket_sizes, typemap):
guessed_suffix_array = [-1] * (len(string) + 1)
bucket_tails = find_bucket_tails(bucket_sizes)
for i in range(len(string)):
if not is_lms_char(i, typemap):
continue
bucket_index = string[i]
guessed_suffix_array[bucket_tails[bucket_index]] = i
bucket_tails[bucket_index] -= 1
guessed_suffix_array[0] = len(string)
return guessed_suffix_array
def induce_sort_l(string, guessed_suffix_array, bucket_sizes, typemap):
bucket_heads = find_bucket_heads(bucket_sizes)
for i in range(len(guessed_suffix_array)):
if guessed_suffix_array[i] == -1:
continue
j = guessed_suffix_array[i] - 1
if j < 0:
continue
if typemap[j] != L_TYPE:
continue
bucket_index = string[j]
guessed_suffix_array[bucket_heads[bucket_index]] = j
bucket_heads[bucket_index] += 1
def induce_sort_s(string, guessed_suffix_array, bucket_sizes, typemap):
bucket_tails = find_bucket_tails(bucket_sizes)
for i in range(len(guessed_suffix_array)-1, -1, -1):
j = guessed_suffix_array[i] - 1
if j < 0:
continue
if typemap[j] != S_TYPE:
continue
bucket_index = string[j]
guessed_suffix_array[bucket_tails[bucket_index]] = j
bucket_tails[bucket_index] -= 1
def summarise_suffix_array(string, guessed_suffix_array, typemap):
lms_names = [-1] * (len(string) + 1)
current_name = 0
last_lms_suffix_offset = None
lms_names[guessed_suffix_array[0]] = current_name
last_lms_suffix_offset = guessed_suffix_array[0]
for i in range(1, len(guessed_suffix_array)):
suffix_offset = guessed_suffix_array[i]
if not is_lms_char(suffix_offset, typemap):
continue
if not lms_substrings_are_equal(string,
typemap,
last_lms_suffix_offset,
suffix_offset):
current_name += 1
last_lms_suffix_offset = suffix_offset
lms_names[suffix_offset] = current_name
summary_suffix_offsets = []
summary_string = []
for index, name in enumerate(lms_names):
if name == -1:
continue
summary_suffix_offsets.append(index)
summary_string.append(name)
summary_alphabet_size = current_name + 1
return summary_string, summary_alphabet_size, summary_suffix_offsets
def make_summary_suffix_array(summary_string, summary_alphabet_size):
if summary_alphabet_size == len(summary_string):
summary_suffix_array = [-1] * (len(summary_string) + 1)
summary_suffix_array[0] = len(summary_string)
for x in range(len(summary_string)):
y = summary_string[x]
summary_suffix_array[y + 1] = x
else:
summary_suffix_array = make_suffix_array_by_induced_sorting(
summary_string,
summary_alphabet_size)
return summary_suffix_array
def accurate_lms_sort(string,
bucket_sizes,
summary_suffix_array,
summary_suffix_offsets):
suffix_offsets = [-1] * (len(string) + 1)
bucket_tails = find_bucket_tails(bucket_sizes)
for i in range(len(summary_suffix_array) - 1, 1, -1):
string_index = summary_suffix_offsets[summary_suffix_array[i]]
bucket_index = string[string_index]
suffix_offsets[bucket_tails[bucket_index]] = string_index
bucket_tails[bucket_index] -= 1
suffix_offsets[0] = len(string)
return suffix_offsets
def sais(data):
"""Calculates the suffix array and returns it as a list.
"""
return make_suffix_array_by_induced_sorting(data, 256)
| true |
9475e08099ef8cce375b658c7936e354104ec066 | Python | edeane/learning-opencv-stuff | /histogram.py | UTF-8 | 2,055 | 3.140625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 3 10:22:53 2017
@author: applesauce
https://www.pyimagesearch.com/2014/01/22/clever-girl-a-guide-to-utilizing-color-histograms-for-computer-vision-and-image-search-engines/
"""
import cv2
import numpy as np
import matplotlib.pyplot as plt
def gray_hist(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow('gray', gray)
hist_1 = cv2.calcHist([gray], [0], None, [256], [0, 256])
fig, ax = plt.subplots(1, 1)
ax.plot(hist_1)
ax.set_xlabel('bins')
ax.set_ylabel('# of pixels')
ax.set_xlim([0, 256])
plt.title('grayscale histogram 1')
plt.show()
hist_2 = np.histogram(gray, bins=255)
fig, ax = plt.subplots(1, 1)
ax.plot(hist_2[0])
ax.set_xlabel('bins')
ax.set_ylabel('# of pixels')
ax.set_xlim([0, 256])
plt.title('grayscale histogram 2')
plt.show()
def flat_hist(image):
chans = cv2.split(image)
colors = ('b', 'g', 'r')
plt.figure()
plt.title('flattened color hist')
plt.xlabel('bins')
plt.ylabel('# of pixels')
features = []
for (chan, color) in zip(chans, colors):
hist = cv2.calcHist([chan], [0], None, [256], [0,256])
features.append(hist)
plt.plot(hist, color=color)
plt.xlim([0,256])
print('flattened feature vector size: {}'.format(np.array(features).flatten().shape))
def two_d_hist(image):
chans = cv2.split(image)
combos = [(0, 1), (0, 2), (1, 2)]
combos_names = ['blue', 'green', 'red']
fig, axes = plt.subplots(1, 3)
for (a, b), ax in zip(combos, axes.reshape(-1)):
hist = cv2.calcHist([chans[a], chans[b]], [0, 1], None, [32, 32], [0, 256, 0, 256])
ax.imshow(hist, interpolation='nearest')
ax.set_title('{} and {}'.format(combos_names[a], combos_names[b]))
plt.show()
image = cv2.imread('images/grant.jpg')
chans = cv2.split(image)
hist = cv2.calcHist([image], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
plt.plot(hist)
plt.show()
| true |
98003230d6784153a08f8521e42526d09815ad1e | Python | ajflood/lampSeminar_dataAnalysis | /DOE_example.py | UTF-8 | 1,956 | 3.15625 | 3 | [] | no_license | from DOE import *
### NOTE YOU WILL NEED TO TAKE CARE OF RANDOMIZING THAT IS CURRENTLY NOT SUPPORTED
def convert_doe_to_levels(doe, factor_levels):
Converting the 0's and 1's to actual factor levels
number_of_factors = len(factor_levels)
converted_doe = numpy.zeros_like(doe)
for factor_id in range(number_of_factors):
levels = factor_levels[factor_id]
for level in range(len(levels)):
level_locs = (doe[:, factor_id] == level)
converted_doe[:, factor_id][level_locs] = levels[level]
return converted_doe
#Setting up a 2 level factorial experiment
number_of_factors = 3
two_level_factorial_levels = [
[0.0, 1.0],
[100.0, 600.0],
[1, 5]
]
two_level_factorial = Factorial().full_2_level(number_of_factors)
print('Two level factorial DOE')
print(two_level_factorial)
converted_two_level_factorial = convert_doe_to_levels(two_level_factorial, two_level_factorial_levels)
print('Two level factorial DOE converted to the real experimental values')
print(converted_two_level_factorial)
#setting up a multilevel factorial experiment
full_factorial_levels = [
[0.5, 1.0, 2.5],
[500.0, 100.0, 60., 80.],
[5.0, 100.0],
]
factor_levels = [3, 4, 2]
full_factorial_exp = Factorial().full(factor_levels, reps=2)
print('Full factorial experiment')
print(full_factorial_exp)
converted_full_factorial_exp = convert_doe_to_levels(full_factorial_exp, full_factorial_levels)
print('Full factorial experiment converted to real experimental values')
print(converted_full_factorial_exp)
setting up a central composite experiment
cc_factors = 5
cc_levels = [
[5, 10, 15],
[4, 9, 14],
[3, 8, 13],
[2, 7, 12],
[1, 6, 11],
]
cc_doe = CentralComposite().doe(cc_factors, center_points='d', alpha_type='faced')
print('Central Composite design of experiment')
print(cc_doe)
converted_cc_doe = convert_doe_to_levels(cc_doe, cc_levels)
print('Central Composite design of experiment converted to real experimental values')
print(converted_cc_doe)
| true |
63cea74780ecf2145edca527dbd0e9205c66ddea | Python | CodingLordSS/BMI-Calculator-Developer-CodeLordSS | /BMI.py | UTF-8 | 683 | 3.625 | 4 | [] | no_license | // Developer CodeLordSS
// Programmning language: Python
Height=float(input("Enter your height in centimeters: "))
Weight=float(input("Enter your Weight in Kg: "))
Height = Height/100
BMI=Weight/(Height*Height)
print("your Body Mass Index is: ",BMI)
if(BMI>0):
if(BMI<=16):
print("you are severely underweight")
elif(BMI<=18.5):
print("you are underweight")
elif(BMI<=25):
print("you are Healthy")
elif(BMI<=30):
print("you are overweight")
else: print("you are severely overweight")
else:("enter valid details")
// The result will be: =>
// Enter your height in centimeters: 170
Enter your Weight in Kg: 67
your Body Mass Index is: 23.18339100346021
you are Healthy
| true |
fc54fe330a8fcc7e62560ac9c979bb57552fd8e2 | Python | shaiwilson/algorithm_practice | /may12.py | UTF-8 | 382 | 3.53125 | 4 | [] | no_license |
def compress(theStr):
""" implement a method to perform basic string compression """
outstring = []
lastChar = ""
charCount = 0
for char in theStr:
if char == lastChar:
charCount += 1
else:
if lastChar != "":
outstring.append(lastChar + str(charCount))
charCount = 1
lastChar = char | true |
d14ebbae7b73a9d1242f098bb3da0c88267b99da | Python | shockflux/python_basics | /basic17.py | UTF-8 | 120 | 3.5625 | 4 | [] | no_license | #returning avg of two numbers using functions
def average(a,b):
return (a+b)/2
#main program
print(average(2,2)) | true |
19f72b359daafd49ac300c093f3cf7b8621b4f8e | Python | witklaus/simulationQueue | /passenger.py | UTF-8 | 1,738 | 2.796875 | 3 | [] | no_license | import pandas as pd
import random
import numpy as np
from lotnisko.conf import CONFIG
class PassengerRegistry(object):
def __init__(self, env):
self.passengers = []
self.env = env
self.waitsum = 0.0
def create_passenger(self, number):
passenger = Passenger(number, self.env, self)
self.passengers.append(passenger)
return passenger
def summarize(self):
df = pd.DataFrame(
list(map(lambda x: x.get_total_waiting_time(), self.passengers)))
return(df.describe())
def register_wait(self, wait):
self.waitsum += wait
def get_mean_wait_time(self):
no_passengers = len(self.passengers)
if no_passengers == 1:
return CONFIG['avg_waiting_time']
return self.waitsum/len(self.passengers)
class Passenger(object):
def __init__(self, number, env, registry):
self.waits = []
self._number = number
self.env = env
self.registry = registry
self._risk_level = np.minimum(np.random.poisson(
CONFIG["passenger_risk_poisson_lambda"], 1), 10)
def __enter__(self):
self.start = self.env.now
return self
def __exit__(self, *args):
self.waits.append(self.env.now - self.start)
self.registry.register_wait(self.waits[-1])
def __repr__(self):
return str(self._number)
def last_wait(self):
try:
return self.waits[-1]
except IndexError:
return 0
def select_queue(self, queues):
return random.choice(queues)
def get_total_waiting_time(self):
return sum(self.waits)
@property
def risk_level(self):
return self._risk_level
| true |
b71f4ed07c540bfbf8a4f51c2626111f05557365 | Python | tcmcginnis/python | /pytest.py.python2 | UTF-8 | 485 | 3.625 | 4 | [] | no_license | #!/usr/bin/python3
"""
this is a comment
"""
spam = "That is Alice's cat."
# print "asdasd \n"
# print "aaa:",spam
# print "a",spam[3]
# print spam.upper()
# print spam[3:]
linein = raw_input()
print "a",linein
# print "a",linein.lower()
# print('How are you?')
# feeling = raw_input()
# if feeling.lower() == 'great':
# print('I feel great too.')
# else:
# print('I hope the rest of your day is good.')
# print('I hope the rest of your day is good.')
# print('I good.')
| true |
e0fc887ec5a2aa1a1e10a56bf352eb6ed4d63a29 | Python | Rifleman354/Python | /Python Crash Course/Chapter 8 Exercises/favoriteBook.py | UTF-8 | 255 | 3.453125 | 3 | [] | no_license | def display_message(favorite_book): # The business end: the function
"""Display's the developer's favorite book"""
print("The Archmagos' favorite book is " + favorite_book.title())
display_message('Bushcrafting 101') # Input arguement for function | true |
2f08efc218d8220244b6ba37278ce6d37dc9d6ce | Python | dylancallaway/ee49_project | /training/run_inference.py | UTF-8 | 5,130 | 2.640625 | 3 | [] | no_license | import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
from object_detection.utils import ops as utils_ops
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
import socket
import pickle
from PIL import Image
import time
class Model:
def __init__(self, graph_path, label_path):
self.graph_path = graph_path
self.label_path = label_path
self.output_dict = {}
self.tensor_dict = {}
self.category_index = {}
self.detection_thresh = 0.6
self.image_np = np.ndarray((1, 1, 1), dtype=np.uint8)
detection_graph = tf.Graph()
detection_graph.as_default()
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(graph_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.category_index = label_map_util.create_category_index_from_labelmap(
label_path, use_display_name=True)
self.session = tf.Session()
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
for key in ['num_detections', 'detection_boxes', 'detection_scores', 'detection_classes']:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
self.tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
def detect(self, image_np):
self.image_np = image_np
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
self.output_dict = self.session.run(self.tensor_dict,
feed_dict={image_tensor: image_np_expanded})
# all outputs are float32 numpy arrays, so convert types as appropriate
self.output_dict['num_detections'] = int(
self.output_dict['num_detections'][0])
self.output_dict['detection_classes'] = self.output_dict[
'detection_classes'][0].astype(np.uint8)
self.output_dict['detection_boxes'] = self.output_dict['detection_boxes'][0]
self.output_dict['detection_scores'] = self.output_dict['detection_scores'][0]
self.num_hands = sum(
self.output_dict['detection_scores'] >= self.detection_thresh)
return self.num_hands
def display_results(self):
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
self.image_np,
self.output_dict['detection_boxes'],
self.output_dict['detection_classes'],
self.output_dict['detection_scores'],
self.category_index,
instance_masks=self.output_dict.get('detection_masks'),
use_normalized_coordinates=True,
min_score_thresh=self.detection_thresh,
line_thickness=6)
# Size, in inches, of the output image.
disp_size = (24, 16)
plt.figure(figsize=disp_size)
plt.imshow(self.image_np)
plt.show()
class Connection:
def __init__(self, recv_host, recv_port, send_host, send_port):
self.recv_host = recv_host
self.recv_port = recv_port
self.send_host = send_host
self.send_port = send_port
# Receiving images sockets setup
self.recv_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.recv_sock.bind((self.recv_host, self.recv_port))
print('Listening at: {}:{}'.format(
self.recv_host, str(self.recv_port)))
def wait_data(self):
self.recv_sock.listen(1)
data = b''
self.conn, self.addr = self.recv_sock.accept()
print('Incoming connection from:', self.addr)
while True:
inc_data = self.conn.recv(1024)
if inc_data == b'':
print('Received {} bytes.'.format(len(data)))
break
else:
data += inc_data
return data
def send_results(self, results):
# Send results socket setup
self.send_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.send_sock.connect((self.send_host, self.send_port))
self.send_sock.sendall(results)
self.send_sock.close()
if __name__ == '__main__':
graph_path = '/home/dylan/ee49_project/training/models/faster_rcnn_resnet50_lowproposals_coco_2018_01_28/frozen_inference_graph.pb'
label_path = '/home/dylan/ee49_project/training/data/tf_records/hands/label_map.pbtxt'
model = Model(graph_path, label_path)
image_path = 'training/test_images/first-gen-hand-raise-uc-davis.jpg'
image_pil = Image.open('../' + image_path)
image_np = np.array(image_pil)
tic = time.time()
detected_hands = model.detect(image_np)
toc = time.time()
print('ELAPSED TIME: {:.3f}'.format(toc-tic))
model.display_results()
| true |
a2ed3cb2dfc06f837edb937e681b12ed2d9500e7 | Python | xdaniel07x/python-for-everybody-solutions | /exercise7_1.py | UTF-8 | 320 | 4.25 | 4 | [] | no_license | """
Exercise 1: Write a program to read through a file and print the contents of
the file (line by line) all in upper case. Executing the program will look as
follows:
"""
fileName = input('Enter a file name: ')
fhand = open(fileName)
for line in fhand:
capital = line.upper().strip()
print(capital)
| true |
ebd40b56aedddcbb4fad56ba09c83063d5553ff5 | Python | jillwuu/6700-proj | /tictactoe.py | UTF-8 | 3,516 | 3.75 | 4 | [] | no_license | import random
from minimax import Minimax
class TicTacToe:
def __init__(self):
self.minimax = Minimax()
self.game_size = 3
self.player_0 = 'X'
self.player_1 = 'O'
self.player = self.player_0
self.empty = ' '
self.board = [[self.empty for _ in range(self.game_size)] for _ in range(self.game_size)]
self.game_over = False
self.winner = None
self.empty_spots = []
for i in range(self.game_size):
for j in range(self.game_size):
self.empty_spots.append((i, j))
def display_board(self):
row_divide = "---------"
for x in range(self.game_size):
curr_row = ""
for y in range(self.game_size):
if y == 0 or y == 1:
curr_row = curr_row + self.board[x][y] + " | "
else:
curr_row = curr_row + self.board[x][y]
print(curr_row)
if x == 0 or x == 1:
print(row_divide)
def update_board(self, player, location):
(x, y) = location
if x < 3 and y < 3:
if self.board[x][y] == self.empty:
self.board[x][y] = player
else:
print("This location is already filled, please enter another location")
else:
print("invalid location, please enter another location")
self.empty_spots.remove(location)
self.display_board()
if self.player == self.player_0:
self.player = self.player_1
else:
self.player = self.player_0
self.check_win()
if not self.game_over:
print("IT IS NOW PLAYER " + self.player + "'S TURN")
def check_win(self):
# check if any rows are completed
for x in range(self.game_size):
if self.game_over == False:
player_spot = self.board[x][0]
if player_spot != self.empty:
for y in range(1, self.game_size):
if player_spot != self.board[x][y]:
break
elif y == self.game_size - 1:
self.winner = player_spot
self.game_over = True
# check if any columns are completed
for y in range(self.game_size):
if self.game_over == False:
player_spot = self.board[0][y]
if player_spot != self.empty:
for x in range(1, self.game_size):
if player_spot != self.board[x][y]:
break
elif x == self.game_size - 1:
self.winner = player_spot
self.game_over = True
# check diagonal top left to bottom right (0,0) (1,1) (2,2)
if self.game_over == False:
player_spot = self.board[0][0]
if player_spot != self.empty:
for x in range(1, self.game_size):
if player_spot != self.board[x][x]:
break
elif x == self.game_size - 1:
self.winner = player_spot
self.game_over = True
# check diagonal top right to bottom left (2,0) (1,1) (0,2)
if self.game_over == False:
player_spot = self.board[2][0]
if player_spot != self.empty:
for x in range(1, self.game_size):
if player_spot != self.board[2-x][x]:
break
elif x == self.game_size - 1:
self.winner = player_spot
self.game_over = True
self.game_over = self.minimax.all_filled(self.board)
def computer_move(self):
computer_loc = (self.minimax.algorithm(self.board))
self.update_board(self.player, computer_loc)
def player_move(self):
x = input("Please choose an x coordinate: ")
y = input("Please choose a y coordinate: ")
self.update_board(self.player, (int(x), int(y)))
def play(self):
print("IT IS NOW PLAYER " + self.player + "'S TURN")
while not self.game_over:
self.player_move()
if not self.game_over:
self.computer_move()
if self.winner:
print("GAME OVER, " + self.winner + " HAS WON")
else:
print("GAME OVER, TIE!")
game = TicTacToe()
game.play()
| true |
74d9c99712d4be6884a8b63e2e29342e45b05d11 | Python | vsseetamraju/multiples | /multiplesGIT.py | UTF-8 | 231 | 4.21875 | 4 | [
"Unlicense"
] | permissive |
# Ask for the user input
userNum = input("Tell me a number ")
# convert to float
userNum = float(userNum)
# Do the computation
for i in range(2,10):
answer = userNum * i
print("{} times {} is {}.".format(userNum, i , answer)) | true |
f81b70152b69b2bc2779a4be5e51f65da8fd64c8 | Python | Artemigos/advent-of-code | /2021/21/solution.py | UTF-8 | 2,158 | 3.125 | 3 | [
"MIT"
] | permissive | from collections import Counter, defaultdict
p1_start = 3
p2_start = 4
# part 1
rolls = 0
def roll():
global rolls
rolls += 1
return ((rolls-1)%100)+1
p1 = p1_start
p2 = p2_start
p1_points = 0
p2_points = 0
while True:
p1 += roll()+roll()+roll()
p1 %= 10
p1_points += p1+1
if p1_points >= 1000:
losing_points = p2_points
break
p2 += roll()+roll()+roll()
p2 %= 10
p2_points += p2+1
if p2_points >= 1000:
losing_points = p1_points
break
print(rolls*losing_points)
# part 2
triple_roll = Counter()
for i in range(1, 4):
for j in range(1, 4):
for k in range(1, 4):
triple_roll[i+j+k] += 1
ways_to_reach_21 = defaultdict(lambda: (0, 0))
def find_ways_to_reach_21(p1, p2, p1_points, p2_points, p_to_move):
k = (p1, p2, p1_points, p2_points, p_to_move)
if k in ways_to_reach_21:
return ways_to_reach_21[k]
for roll in triple_roll:
if p_to_move == 1:
p1_new = (p1+roll)%10
p1_new_points = p1_points+p1_new+1
stored_p1_wins, stored_p2_wins = ways_to_reach_21[k]
if p1_new_points >= 21:
ways_to_reach_21[k] = (stored_p1_wins+triple_roll[roll], stored_p2_wins)
else:
lower_p1_wins, lower_p2_wins = find_ways_to_reach_21(p1_new, p2, p1_new_points, p2_points, 2)
ways_to_reach_21[k] = (triple_roll[roll]*lower_p1_wins+stored_p1_wins, triple_roll[roll]*lower_p2_wins+stored_p2_wins)
else:
p2_new = (p2+roll)%10
p2_new_points = p2_points+p2_new+1
stored_p1_wins, stored_p2_wins = ways_to_reach_21[k]
if p2_new_points >= 21:
ways_to_reach_21[k] = (stored_p1_wins, stored_p2_wins+triple_roll[roll])
else:
lower_p1_wins, lower_p2_wins = find_ways_to_reach_21(p1, p2_new, p1_points, p2_new_points, 1)
ways_to_reach_21[k] = (triple_roll[roll]*lower_p1_wins+stored_p1_wins, triple_roll[roll]*lower_p2_wins+stored_p2_wins)
return ways_to_reach_21[k]
print(max(find_ways_to_reach_21(p1_start, p2_start, 0, 0, 1)))
| true |
d451bf4af50603d5d3ec94dc10377dc9aa92099e | Python | andreashappe/mod_security_importer | /log_importer/log_import/parser.py | UTF-8 | 3,493 | 3.09375 | 3 | [] | no_license | """ This module converts the string representation of an incident into
a python (or rather sqlalchemy) object. """
# urllib.parse in python2/3
from future.standard_library import install_aliases
install_aliases()
import re
import datetime
from urllib.parse import urlparse
REGEXP_PART_A = '^\[([^\]]+)\] ([^ ]+) ([^ ]+) ([^ ]+) ([^ ]+) ([^ ]+)\n$'
def date_parser(match_group):
""" manually convert timestamp into UTC. Python's strptime function cannot
handle +0000 (which is somehow not mentioned in the documentation).
python-dateutil cannot handle the custom mod_security timestamp format
"""
parts = match_group.split()
time = datetime.datetime.strptime(parts[1], "+%H%M")
return datetime.datetime.strptime(parts[0], "%d/%b/%Y:%H:%M:%S") -\
datetime.timedelta(hours=time.hour, minutes=time.minute)
def parse_part_A(part):
""" Part A contains timestamp, id, destination and source information """
matcher = re.match(REGEXP_PART_A, part)
assert matcher
timestamp = date_parser(matcher.group(1))
return (timestamp, matcher.group(2),
matcher.group(3), int(matcher.group(4)),
matcher.group(5), int(matcher.group(6)))
def parse_H_detail_message(msg):
result = {}
for i in [x.split(' ', 1) for x in re.findall(r"\[([^\]]*)\]", msg)]:
if len(i) == 2:
key = i[0].strip()
value = i[1].strip("\"")
result[key] = value
return result
def parse_part_H(part):
messages = []
for i in [x.split(':', 1) for x in part]:
if i[0] == "Message":
messages.append(parse_H_detail_message(i[1]))
return messages
def parse_part_B(parts):
# check if we start with GET/etc. Request
matcher = re.match(r'^([^ ]+) (.*)\n$', parts[0])
if matcher:
method = matcher.group(1).strip()
path = urlparse(matcher.group(2)).path
else:
method = None
path = None
for i in [x.split(':', 1) for x in parts]:
if i[0] == "Host":
host = i[1].strip()
return host, method, path
def parse_incident(stuff, include_parts=False):
""" takes (string) parts of an incident and converts those into
a coherent python dictionary """
fragment_id = stuff[0]
parts = stuff[1]
# create the incident and fill it with data from the 'A' part
assert 'A' in parts
result_A = parse_part_A(parts['A'][0])
incident = { 'fragment_id': fragment_id,
'timestamp': result_A[0],
'unique_id': result_A[1],
'destination': [result_A[4], result_A[5]],
'source': [result_A[2], result_A[3]],
'parts': []
}
# import parts
if include_parts:
for (cat, body) in parts.items():
merged_part = "\n".join(body)
incident['parts'].append({'category': cat, 'body': merged_part})
# import details from 'B' part (if exists)
if 'B' in parts:
incident['host'], incident['method'], incident['path'] = parse_part_B(parts['B'])
else:
incident['host'] = incident['method'] = incident['path'] = ""
# import details from 'H' part (if exists)
if 'H' in parts:
incident['details'] = parse_part_H(parts['H'])
else:
incident['details'] = ''
if 'F' in parts:
incident['http_code'] = parts['F'][0].strip()
else:
incident['http_code'] = ''
return incident
| true |
b98643f704d8044c1d3182f1d94096ee7afb9bc0 | Python | linyuchen/py3study | /py3.5/scandir.py | UTF-8 | 272 | 3.125 | 3 | [] | no_license | # -*- coding:UTF-8 -*-
import os
__author__ = "linyuchen"
__doc__ = """
os.scandir,更快的遍历目录,os.walk内部也用os.scandir实现了
返回的是个生成器
"""
for i in os.scandir("."):
print(i, i.name, i.path, i.is_dir(), i.is_file())
| true |
5cad2e3a4c163b0cbcac0ea769168117105fcdd8 | Python | AroraTanmay7/Technocolabs-Minor-Project | /check-app.py | UTF-8 | 2,129 | 2.765625 | 3 | [] | no_license | import streamlit as st
import pandas as pd
import numpy as np
import pickle
from sklearn.ensemble import RandomForestClassifier
st.set_option('deprecation.showfileUploaderEncoding', False)
st.sidebar.header('User Input Features')
# Collects user input features into dataframe
uploaded_file = st.sidebar.file_uploader("Upload your input CSV file", type=["csv"])
if uploaded_file is not None:
input_df = pd.read_csv(uploaded_file)
else:
def user_input_features():
Recency = st.sidebar.slider('Recency (months)', 0.00 , 74.00, 59.80)
Frequency = st.sidebar.slider('Frequency (times)', 1.00, 50.00, 21.50)
Monetary = st.sidebar.slider('Monetary (c.c. blood)', 250.00, 12500.00, 560.00)
Time = st.sidebar.slider('Time (months)', 2.00, 98.00, 50.00)
data = {'Recency (months)': Recency,
'Frequency (times)': Frequency,
'Monetary (c.c. blood)': Monetary,
'Time (months)': Time}
features = pd.DataFrame(data, index=[0])
return features
input_df = user_input_features()
# Combines user input features with entire penguins dataset
transfusion_raw = pd.read_csv('transfusion.data')
np.any(np.isnan(transfusion_raw))
np.all(np.isfinite(transfusion_raw))
transfusion = transfusion_raw.drop(columns=['whether he/she donated blood in March 2007'])
df = pd.concat([input_df, transfusion], axis=0)
df = df[:1] # Selects only the first row (the user input data)
# Displays the user input features
st.subheader('User Input features')
if uploaded_file is not None:
st.write(df)
else:
st.write('Awaiting CSV file to be uploaded. Currently using example input parameters (shown below).')
st.write(df)
# Reads in saved classification model
load_clf = pickle.load(open('transfusion_clf.pkl', 'rb'))
# Apply model to make predictions
prediction = load_clf.predict(df)
prediction_prob = load_clf.predict_proba(df)
st.subheader('Prediction')
transfusion_prediction = np.array(['whether he/she donated blood in March 2007'])
st.write(prediction[0])
st.subheader('Prediction Probability')
st.write(prediction_prob)
| true |
920a608a0e88b13d9173099f3a9bf22fefc87677 | Python | PatLor77/zadania-9-wd | /zadanie6.py | UTF-8 | 277 | 2.65625 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import xlrd
csv = pd.read_csv('zamowienia.csv',sep=';')
sumy = csv.groupby(['Sprzedawca']).agg({'Utarg':['sum']})
suma_og = sum(csv['Utarg'])
sumy = (sumy/suma_og)*100
sumy = round(sumy)
plt.show() | true |
0dd3c114ea3138ca9015fe56c16515f916c1aa79 | Python | Samridh-Dhasmana/Recommendation_System | /app.py | UTF-8 | 2,079 | 3.234375 | 3 | [] | no_license | import flask
import pandas as pd
#reading the dataset
df=pd.read_csv('movies.csv')
#Storing movie titles from dataset
m_titles = [df['title'][i] for i in range(len(df['title']))]
#creating flask object
app = flask.Flask(__name__, template_folder='templates')
def create():
from sklearn.feature_extraction.text import TfidfVectorizer
#removing genral english words that occur
t = TfidfVectorizer(stop_words='english')
# nan value replaced by empty string
df['overview'] = df['overview'].fillna('')
#generating the tfidf matrix
matrix = t.fit_transform(df['overview'])
return matrix
def calcosine():
from sklearn.metrics.pairwise import cosine_similarity
#cosine similarity applied
cosine_sim = cosine_similarity(create(), create())
return cosine_sim
def recommend(title, cosine_sim=calcosine()):
#reverse mapping of movie title and dataframe indices
indices = pd.Series(df.index, index=df['title']).drop_duplicates()
#getting index of inputed movie
index = indices[title]
#finding cosine similarity score of inputed movie with other
s = list(enumerate(cosine_sim[index]))
#sorting based on score
s = sorted(s, key=lambda x: x[1], reverse=True)
#taking top 10 values
s = s[1:11]
movie_indices = [i[0] for i in s]
return df['title'].iloc[movie_indices]
# Set up the main route
@app.route('/', methods=['GET', 'POST'])
def main():
if flask.request.method == 'GET':
return(flask.render_template('index.html'))
if flask.request.method == 'POST':
m = flask.request.form['movie_name']
if m not in m_titles:
return (flask.render_template("wrong_input_result.html"))
else:
res = []
names=recommend(m)
for i in range(len(names)):
res.append(names.iloc[i])
return (flask.render_template("result.html",result=res,search_name=m))
if __name__ == '__main__':
app.run(debug=True) | true |
82ec2f8a99dcefbd90ca8473bcf205764636408b | Python | zdyxry/LeetCode | /design/1352_product_of_the_last_k_numbers/1352_product_of_the_last_k_numbers.py | UTF-8 | 327 | 3.515625 | 4 | [] | no_license |
class ProductOfNumbers(object):
def __init__(self):
self.A = [1]
def add(self, a):
if a == 0:
self.A = [1]
else:
self.A.append(self.A[-1] * a)
def getProduct(self, k):
if k >= len(self.A):
return 0
return self.A[-1] / self.A[-k-1]
| true |
678ea15cd6850b5217031e0d8b7ca7895d99d5a2 | Python | A-biao96/python-MP | /scripts/select_file.py | UTF-8 | 762 | 2.921875 | 3 | [] | no_license | #!/usr/bin/python3
# encoding:utf-8
import os
def select_file(*args, destdir='./'):
try: os.listdir(destdir)
except: return -1
suffix=['.cc', '.py', '.txt', '.md']
if len(args): suffix.extend(args)
flist = [f for f in os.listdir(destdir) if os.path.splitext(f)[1].lower() in suffix]
flist.sort()
files = ''.join([ str(idx+1) + ' ' + f + '\n' for (idx, f) in enumerate(flist)])
fidx = input('%s选择文件(输入数字):'%files)
if fidx.isdigit() and 0<int(fidx)<=len(flist):
return flist[int(fidx)-1]
else: return -1
if __name__ == '__main__':
dest = './'
dir_in = input('enter target dir([./]): ')
if dir_in: dest = dir_in
formt = ['.bmp', '.jpg']
print(select_file(*formt, destdir=dest))
| true |
18475a17a10202da8566abfabd9aa5b3e6e2eb9e | Python | saintifly/leetcode | /变位词组归类.py | UTF-8 | 921 | 3.3125 | 3 | [] | no_license | class Solution:
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
import numpy as np
s1List = [0]*26
strsToOrd = []
for i in strs:
for j in i:
s1List[ord(j)-ord('a')] +=1
print(s1List)
s1List = [str(x) for x in s1List]
s2= 'a'.join(s1List)
strsToOrd.append(s2)
s1List = [0]*26
outlist = []
tmp =[]
strsToOrdSet = set(strsToOrd)
npList = np.array(strsToOrd)
for i in strsToOrdSet:
eq_letter = np.where(npList == i)
for i in eq_letter[0]:
tmp.append(strs[i])
outlist.append(tmp)
tmp=[]
return outlist
# #将列表转换为numpy的数组
# a = np.array(["a","b","c","a","d","a"])
# #获取元素的下标位置
# eq_letter = np.where(a == "a")
# print(eq_letter[0])#[0 3 5]
| true |
adf226480fe3a3fe4b8ea12723a7cfe41181bbe1 | Python | AsummerCat/crawer_demo | /qiubai_crawer.py | UTF-8 | 3,728 | 3.171875 | 3 | [] | no_license | # -*- coding:utf-8 -*-
'''
糗百_爬虫
利用Beautiful Soup 库 和 requests
conda install -c conda-forge beautifulsoup4
conda install -c conda-forge requests
'''
'''
简单实现抓取糗百的数据
抓取流程: 获取页最大页数->根据最大页数遍历查询->抓取单页面的 title及其内容保存到列表中->进行渲染成字符串->导出文件
'''
import os
import requests
from bs4 import BeautifulSoup
import re
from time import *
# 发送http请求
def sendHttp(url):
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0",
"Referer": url}
r = requests.get(url, headers=headers) # 增加headers, 模拟浏览器
return r.text
# 获取总页数的标记
def getMaxPage(url):
soup = BeautifulSoup(sendHttp(url), 'html.parser')
con = soup.find(id='position')
## 总页数和笑话数
htmlPage = con.contents[6]
## 获取后半段数据
data = str(htmlPage).split(",")
pageSize = 0
if data:
# 具体页数
pageText = re.findall(r'(\w*[0-9]+)\w*', data[1])
if pageText:
pageSize = int(pageText[0])
return pageSize
# 查看文章
def cat_html(url, page, maxPage):
output = """第{}页 文章名称: 糗事百科: [{}] \n 点击数:{} \n文章内容\n{}\n\n""" # 最终输出格式
tetleContentText = []
articleContentHtmlText = []
html = sendHttp(url).replace("<br /><br /><br />", "").replace("<br />", "\n")
soup = BeautifulSoup(html, 'html.parser')
##所有文章标题html
titleList = soup.find(id='footzoon').find_all('h3')
## 所有文章内容html
contentList = soup.find(id='footzoon').find_all(id="endtext")
## 所有文章点击数html
clickNum = html.split(" Click:")
del clickNum[0]
## 获取所有标题列表
for i in titleList:
tetleContentText.append(i.find('a').get_text())
## 获取所有文章内容列表
for i in contentList:
articleContentHtmlText.append(i)
## 获取出完整的文章
if len(tetleContentText) == len(articleContentHtmlText) == len(clickNum):
print("=======================开始下载 第{}/{}页==============================".format(page, maxPage))
for i in range(len(tetleContentText)):
content = output.format(page, tetleContentText[i],
re.findall(r'(\w*[0-9]+)\w*',
clickNum[i][0:10])[0],
articleContentHtmlText[i].get_text())
save_html(content, tetleContentText[i], page)
## 保存文章
def save_html(content, title, page):
# 转义特殊符号
title = "".join(re.findall('[\u4e00-\u9fa5a-zA-Z0-9]+', title, re.S))
path = 'E:\\糗百text\\糗事百科第{}页'.format(page)
if not os.path.exists(path):
os.mkdir(path)
print("开始下载糗事百科:{}".format(title))
for i in content:
with open(r'{}\{}.txt'.format(path, title), 'a', encoding='utf-8') as f:
f.write(i)
if __name__ == '__main__':
begin_time = time()
print("抓取糗百_主函数")
urlList = ["http://www.lovehhy.net/Joke/Detail/QSBK/"]
# 获取出糗百的总页数
maxPage = getMaxPage(urlList[0])
## 首页单独查看下载
cat_html(urlList[0], 1, maxPage)
## 遍历查看文章下载
for i in range(1, maxPage+1):
url = "http://www.lovehhy.net/Joke/Detail/QSBK/{}".format(i)
urlList.append(url)
cat_html(urlList[i], i + 1, maxPage)
end_time = time()
run_time = end_time - begin_time
print('该程序运行时间:', run_time)
| true |
3cfe5d99485672528a22b32b99ff91169745df97 | Python | kbrewerphd/robotSim | /robot.py | UTF-8 | 9,660 | 3.296875 | 3 | [
"MIT"
] | permissive | """
Program name: robot.py
Author: Dr. Brewer
Initial Date: 20-Nov-2018 through 05-Dec-2018
Python vers: 3.6.5
Description: A simple robot simulation environment.
Code vers: 1.0 - Initial release
"""
from typing import List,Any
import math
import random as r
import time
class Robot():
"""
This is the Robot class. It is the parent class for the
individual student robot classes.
Inherits:
None
Returns:
None
"""
def __init__(self, s: str, d: float, x: float, y: float, xT: float, yT: float, color: List[int]) -> None:
"""
This is the constructor for the Robot class
Args:
s (String): the name for the robot (to be overridden by student class)
d (float): the direction the robot is initially heading, in degrees
x (float): the initial x coordinate location value
y (float): the initial y coordinate location value
xT (float): the target x coordinate location value
yT (float): the target y coordinate location value
color (List[int]): the red/green/blue color values (0-255) (to be overridden by student class)
Returns:
None
"""
self.memory = [] #to be defined by individual robots
self.name = s #to be defined by the individual robot and used as an identifier
self.locationHistory = [] #used for plotting
self.locationHistory.append([x,y])
self.heading = math.radians(d) #the direction (radians) the robot is pointing
self.__sensorReadings = [] #Forward,Right,Rear,Left
self.rgb = color
self.__v_Left = 0.0 #wheel velocity setting
self.__v_Right = 0.0
self.__x_Pos = x #where the robot is located
self.__y_Pos = y
self.__xT_Pos = xT #where the target is located
self.__yT_Pos = yT
self.__delta_Left = 0.0 #how much the wheels have turned since last step
self.__delta_Right = 0.0
self.__stillMoving = True
self.MAX_WHEEL_V = 10.0
self.__totalTime = 0.0
#
#the following to be used only by the simulation and not overridden by the student robot class
#
def set_wheel_deltas(self,l,r):
"""
This stores how much the wheels have turned since last step.
Args:
l (float): the left wheel movement
r (float): the right wheel movement
Returns:
None
"""
self.__delta_Left = l
self.__delta_Right = r
def set_new_position(self,leftDelta,rightDelta):
"""
This calculates and stores the new robot position and
heading given the left and right wheel movement. See
comments for source of algorithm.
Args:
leftDelta (float): the left wheel movement
rrightDelta (float): the right wheel movement
Returns:
None
"""
#below code from https://robotics.stackexchange.com/questions/1653/calculate-position-of-differential-drive-robot
#also from: http://www8.cs.umu.se/research/ifor/IFORnav/reports/rapport_MartinL.pdf
#leftDelta and rightDelta = distance that the left and right wheel have moved along the ground
if (math.fabs(leftDelta - rightDelta) < 1.0e-6): #basically going straight
new_x = self.__x_Pos + leftDelta * math.cos(self.heading)
new_y = self.__y_Pos + rightDelta * math.sin(self.heading)
new_heading = self.heading
else:
R = 1.0 * (rightDelta + leftDelta) / (2.0 * (leftDelta - rightDelta))
wd = (leftDelta - rightDelta) / 1.0 #axis width
new_x = self.__x_Pos + R * (math.sin(wd + self.heading) - math.sin(self.heading))
new_y = self.__y_Pos - R * (math.cos(wd + self.heading) - math.cos(self.heading))
new_heading = self.heading + wd
self.__x_Pos = new_x
self.__y_Pos = new_y
self.locationHistory.append([new_x,new_y])
self.heading = new_heading
def get_wheel_vel(self):
"""
This returns the left and righ wheel velocity settings.
Args:
None
Returns:
(float): the left wheel velocity setting
(float): the right wheel velocity setting
"""
return [ self.__v_Left, self.__v_Right ]
def getColor(self):
"""
This returns the color setting of the robot.
Args:
None
Returns:
(List[int]): the red/green/blue color values
"""
return self.rgb
def getTotalTime(self):
"""
This returns the total processing time the robot used.
Args:
None
Returns:
(float): the total processing time
"""
return self.__totalTime
def move_robot(self,delta_t):
"""
This calculates the movement of the robot for a given
delta time value.
Args:
delta_t (float): the time delta
Returns:
None
"""
#setup the movement
if self.__stillMoving:
#startTime = time.process_time_ns() #BEGIN TIMER <-- for v 3.7 or later of python!
startTime = time.process_time() #BEGIN TIMER
self.robot_action() #used to call the child function to be programmed by students
self.__totalTime += (time.process_time() - startTime) #END TIMER AND ADD TO STORE
#now move the robot
vl, vr = self.get_wheel_vel()
dir = self.heading
#now record the new position
self.set_new_position(vl,vr)
#now record the wheel turns
self.set_wheel_deltas(vl,vr)
def get_position(self) ->List[float]:
"""
This returns the current position of the robot.
Args:
None
Returns:
(List[float]): the x and y coordinates
"""
return [self.__x_Pos, self.__y_Pos]
def get_heading_degrees(self) ->float:
"""
This returns the current heading of the robot.
Args:
None
Returns:
(float): the heading, in degrees
"""
return math.degrees(self.heading)
def stillMoving(self) -> bool:
"""
This returns whether the robot is currently
still able to move.
Args:
None
Returns:
(bool): True if still moving
"""
return self.__stillMoving
def setMoving(self, m: bool) -> None:
"""
This sets whether the robot is still able
to move.
Args:
m (bool): whether the robot can still move
Returns:
None
"""
self.__stillMoving = m
def set_sensor_readings(self,st: List[float]) -> None:
"""
This sets the four sensor readings. If any of the
readings are greater than 15, they are set to 15.3.
Args:
st (List[float]): the four sensor values
forward, right, rear, left
Returns:
None
"""
s = [15.3,15.3,15.3,15.3]
if st[0] > 15.0:
s[0] = 15.3
else:
s[0] = st[0]
if st[1] > 15.0:
s[1] = 15.3
else:
s[1] = st[1]
if st[2] > 15.0:
s[2] = 15.3
else:
s[2] = st[2]
if st[3] > 15.0:
s[3] = 15.3
else:
s[3] = st[3]
self.__sensorReadings = s
#
#the following to be used by the child object
#
def set_robot_wheel_velocity(self, v_left: float, v_right:float) -> bool:
"""
This sets the two wheel velocities. The success of setting velocity
is returned as true/false. Values to be set are rotations per time
interval of left and right wheels. (Robot turns by setting the left/right
velocities differently.) Allowed values are -10.0 thru 10.0, if values
are out of range, values default to zero and return value is false.
One wheel rotation is one unit length.
Args:
v_left (float): the left wheel velocity
v_right (float): the right wheel velocity
Returns:
(bool): the success of setting the velocities
"""
if(v_left >= -self.MAX_WHEEL_V and v_left <= self.MAX_WHEEL_V and v_right >= -self.MAX_WHEEL_V and v_right <= self.MAX_WHEEL_V):
self.__v_Left = v_left
self.__v_Right = v_right
return True
else:
return False
def get_robot_sensor_readings(self) -> List[float]:
"""
This gets the sensor readings. Returns the
#list of four distances to obstacles: Forward,
Right,Rear,Left. Maximum sensor reading is 15.0
length units.
Args:
None
Returns:
(List[float]): the sensor values
forward, right, rear, left
"""
return self.__sensorReadings
def get_robot_wheel_sensor_ticks(self) -> List[float]:
"""
This returns list of left and right wheel rotations
during previous time interval.
Args:
None
Returns:
(List[float]): the left,right wheel rotations
"""
return [self.__delta_Left, self.__delta_Right]
def get_robot_target(self) -> List[float]:
"""
This calculates and returns list of degrees clockwise
from forward, and distance, to target.
Args:
None
Returns:
(List[float]): the direction (degrees),distance
"""
x_delta = (self.__xT_Pos - self.__x_Pos)
y_delta = (self.__yT_Pos - self.__y_Pos)
target_dist = math.sqrt((y_delta*y_delta)+(x_delta*x_delta))
target_angle = math.degrees(math.asin(abs(x_delta/target_dist)))
if(x_delta > 0 and y_delta > 0): target_angle = 90. - target_angle
if(x_delta > 0 and y_delta < 0): target_angle = -90. + target_angle
if(x_delta < 0 and y_delta < 0): target_angle = -90. - target_angle
if(x_delta < 0 and y_delta > 0): target_angle = 90. + target_angle
target_angle -= math.degrees(self.heading)
while (target_angle > 180.):
target_angle -= 360
while (target_angle < -180.):
target_angle += 360
return target_angle,target_dist
#
#the following to be overridden by the child object
#
def robot_action(self) -> None:
"""
This function is to be overridden by the student robot
implementation.
#The student robot only has access to
self.memory,
self.set_robot_wheel_velocity(),
self.get_robot_target(),
self.get_robot_wheel_sensor_ticks()
self.get_robot_sensor_readings
Args:
None
Returns:
None
"""
self.set_robot_wheel_velocity(0.0,0.0)
| true |
2fe6282ce0bc4c3229170646040918f67b5de839 | Python | ericjtaylor/random-fat | /random-fat.py | UTF-8 | 11,517 | 3.171875 | 3 | [] | no_license | from __future__ import division
import itertools
import numpy as np
import numpy.random as rng
import matplotlib.pyplot as plt
# Name: Random Function Analysis Tool
# Author: Eric Taylor
#
# Generates a random sequence, calculating the probability
# mass function of the intervals and the entropy of the
# output given a history.
#
# Currently 11 algorithms used in various Tetris games are
# implemented.
iterations = 100000 # random sequence length
radix = 7 # distinct category types
depth = 8 # conditional entropy history depth
# Atari / Sega / etc Tetris
class pure:
def rand(self):
return rng.randint(0,radix)
# NES Tetris
class nes:
def __init__(self):
self.h_size = 1
self.history = np.zeros([self.h_size], dtype=np.int64)
def rand(self):
# select next piece
piece = rng.randint(0,radix+1)
if piece == self.history[0] or piece == radix:
piece = rng.randint(0,radix)
# update history
self.history[0] = piece
return piece
# GameBoy Tetris
class gboy:
def __init__(self):
self.h_size = 2
self.history = np.zeros([self.h_size], dtype=np.int64)
def rand(self):
# select next piece
cycles = rng.randint(0,0x4000) # to-do: model this distribution... it's unlikely to be random
for rolls in range(3):
div = cycles // 0x40 # convert to 8 bit counter
piece = div % 7
# real game bug -- bitwise or, used to incorrectly test "3-in-a-row"
if piece == (piece | self.history[0] | self.history[1]):
# deterministic cycle advance for the "rerolls"
cycles += 100 # constant
cycles += (388 * (div // 7)) # full loop of 7
cycles += (56 * (div % 7)) # cycles for remainder
cycles &= 0x3FFF # 6 bit cycle counter (not 8 bits because every instruction is a multiple of 4 cycles)
continue
else:
break
# update history
self.history[1] = self.history[0]
self.history[0] = piece
return piece
# GameBoy Tetris (hypothetical bugfixed)
class gboy_fixed:
def __init__(self):
self.h_size = 2
self.history = np.zeros([self.h_size], dtype=np.int64)
def rand(self):
# select next piece
for rolls in range(3):
piece = rng.randint(0,radix)
if ((piece == self.history[0]) and (self.history[0] == self.history[1])):
continue
else:
break
# update history
self.history[1] = self.history[0]
self.history[0] = piece
return piece
# Tetris the Grand Master
class tgm1:
def __init__(self):
self.h_size = 4
self.history = np.zeros([self.h_size], dtype=np.int64) # initial history ZZZZ
self.first_piece = 1
def rand(self):
# select next piece
for rolls in range(4):
# roll
if self.first_piece == 1:
while self.first_piece == 1:
piece = rng.randint(0,radix)
if piece not in (1, 2, 5): # Z, S, O forbidden as first piece
self.first_piece = 0
else:
piece = rng.randint(0,radix)
# check history
if piece not in self.history:
break
# update history
for h in range(self.h_size-1, 0, -1):
self.history[h] = self.history[h-1]
self.history[0] = piece
return piece
# Tetris the Grand Master 2: The Absolute Plus
class tgm2:
def __init__(self):
self.h_size = 4
self.history = [1, 2, 1, 2] # initial history ZSZS
self.first_piece = 1
def rand(self):
# select next piece
for rolls in range(6):
# roll
if self.first_piece == 1:
while self.first_piece == 1:
piece = rng.randint(0,radix)
if piece not in (1, 2, 5): # Z, S, O forbidden as first piece
self.first_piece = 0
else:
piece = rng.randint(0,radix)
# check history
if piece not in self.history:
break
# update history
for h in range(self.h_size-1, 0, -1):
self.history[h] = self.history[h-1]
self.history[0] = piece
return piece
# Tetris the Grand Master 3: Terror Instinct
class tgm3:
def __init__(self):
self.h_size = 4
self.history = [1, 2, 1, 2] # initial history ZSZS
self.first_piece = 1
self.drought = np.zeros([radix], dtype=np.int64)
self.droughtest = 0
self.pool = np.zeros([radix*5], dtype=np.int64)
for i in range(radix):
self.pool[(i*5)+0] = i
self.pool[(i*5)+1] = i
self.pool[(i*5)+2] = i
self.pool[(i*5)+3] = i
self.pool[(i*5)+4] = i
self.drought[i] = -999
def rand(self):
# select next piece
for rolls in range(6):
# roll first piece
if self.first_piece == 1:
while self.first_piece == 1:
piece = rng.randint(0,radix)
if piece not in (1, 2, 5): # Z, S, O forbidden as first piece
break
# roll general piece
else:
index = rng.randint(0,35)
piece = self.pool[index]
self.pool[index] = self.droughtest
if piece not in self.history:
break
# update history
for h in range(self.h_size-1, 0, -1):
self.history[h] = self.history[h-1]
self.history[0] = piece
# unless it's the first piece...
if self.first_piece == 1:
self.first_piece = 0
# ... update droughts
else:
for p in range(radix):
self.drought[p] += 1
self.drought[piece] = 0
# new droughtest
if piece == self.droughtest:
self.droughtest = np.argmax(self.drought)
# real game bug -- under specific conditions the piece pool is not updated with the new droughtest
if not (piece == self.droughtest and rolls > 0 and np.argmin(self.drought) >= 0):
self.pool[index] = self.droughtest
return piece
# Tetris with Cardcaptor Sakura: Eternal Heart
class ccs:
def __init__(self):
self.h_size = 6
self.history = [-1, -1, -1, -1, -1, -1]
def rand(self):
# select next piece
for rolls in range(4):
#roll
piece = rng.randint(0,radix)
# check history
if piece not in self.history:
break
# weird bonus 5th roll
else:
if rng.randint(0,2) == 1:
if self.history[1] != -1:
piece = self.history[1]
else:
piece = rng.randint(0,radix)
else:
if self.history[5] != -1:
piece = self.history[1]
else:
piece = rng.randint(0,radix)
# update history
for h in range(self.h_size-1, 0, -1):
self.history[h] = self.history[h-1]
self.history[0] = piece
return piece
# Super Rotation System / Tetris Guideline / "Random Generator" aka 7-bag
class srs:
def __init__(self):
self.pool = np.arange(0, radix, 1, dtype=np.int64)
rng.shuffle(self.pool)
self.index = 0
def rand(self):
# select next piece
piece = self.pool[self.index]
self.index += 1
if self.index == radix:
self.__init__()
return piece
# Tetris Online Japan (beta)
class toj:
def __init__(self):
self.pool = np.arange(0, radix+1, 1, dtype=np.int64)
self.pool[radix] = rng.randint(0,radix)
rng.shuffle(self.pool)
self.index = 0
def rand(self):
# select next piece
piece = self.pool[self.index]
self.index += 1
if self.index == (radix+1):
self.__init__()
return piece
# Double Bag aka 14-bag
class bag2x:
def __init__(self):
self.pool = np.arange(0, radix*2, 1, dtype=np.int64)
for i in range(radix, radix*2):
self.pool[i] = self.pool[i] % radix
rng.shuffle(self.pool)
self.index = 0
def rand(self):
# select next piece
piece = self.pool[self.index]
self.index += 1
if self.index == (radix*2):
self.__init__()
return piece
# The New Tetris
class tnt64:
def __init__(self):
self.pool = np.arange(0, radix*9, 1, dtype=np.int64)
for i in range(radix, radix*9):
self.pool[i] = self.pool[i] % radix
rng.shuffle(self.pool)
self.index = 0
def rand(self):
# select next piece
piece = self.pool[self.index]
self.index += 1
if self.index == (radix*9):
self.__init__()
return piece
# a recursion to sum calculate the entropy partials
def ent_calc(d, pat_cnt, history, entropy):
for history[d] in range(radix):
if pat_cnt[d][tuple(history[:d+1])] > 0:
entropy[d] -= ( (pat_cnt[d][tuple(history[:d+1])] / iterations) * np.log(pat_cnt[d][tuple(history[:d+1])] / pat_cnt[d-1][tuple(history[:d])]) )
if d < depth-1:
ent_calc(d+1, pat_cnt, history, entropy)
def stats_calc(randomizer):
# interval vars
intervals = np.zeros([1000], dtype=np.int64)
last_seen = np.zeros([radix], dtype=np.int64)
# entropy vars
history = np.zeros((depth), dtype=np.int64)
entropy = np.zeros((depth), dtype=np.float64)
pat_cnt = [np.zeros((radix), dtype=np.int64)]
for i in range(1, depth):
pat_cnt.append(np.repeat(np.expand_dims(pat_cnt[i-1], axis = i), radix, i))
for _ in itertools.repeat(None, iterations):
# get next piece
piece = randomizer()
# update history
for h in reversed(range(depth)):
history[h] = history[h-1]
history[0] = piece
# update interval counters
last_seen += 1
intervals[last_seen[piece]] += 1
last_seen[piece] = 0
# update pattern counters
for h in range(depth):
pat_cnt[h][tuple(history[depth-1-h:])] += 1
# calculate entropy
for history[0] in range(radix):
if pat_cnt[0][history[0]] > 0:
entropy[0] -= ( (pat_cnt[0][history[0]] / iterations) * np.log(pat_cnt[0][history[0]] / iterations) )
if 0 < depth-1:
ent_calc(1, pat_cnt, history, entropy)
intervals = intervals / iterations # converts to % all intervals
entropy = entropy / np.log(radix) # converts to % of pure random
print(randomizer.im_class)
print("interval: ", intervals[:20])
print("entropy: ", entropy)
return (intervals, entropy)
# calculate the intervals for the various randomizers
rnd_int, rnd_ent = stats_calc(pure().rand)
nes_int, nes_ent = stats_calc(nes().rand)
gby_int, gby_ent = stats_calc(gboy().rand)
gm1_int, gm1_ent = stats_calc(tgm1().rand)
gm2_int, gm2_ent = stats_calc(tgm2().rand)
gm3_int, gm3_ent = stats_calc(tgm3().rand)
ccs_int, ccs_ent = stats_calc(ccs().rand)
srs_int, srs_ent = stats_calc(srs().rand)
toj_int, toj_ent = stats_calc(toj().rand)
b14_int, b14_ent = stats_calc(bag2x().rand)
b63_int, b63_ent = stats_calc(tnt64().rand)
# create plots
plt.figure(num=1, figsize=(10, 5), dpi=160, facecolor='w', edgecolor='k')
plt.subplot(121)
plt.plot(rnd_int, '.-', label='pure_random', color='#000000')
plt.plot(nes_int, '.-', label='nes', color='#2277EE')
plt.plot(gby_int, '.-', label='gboy', color='#113311')
plt.plot(gm1_int, '.-', label='tgm1', color='#CC6666')
plt.plot(gm2_int, '.-', label='tgm2', color='#EE6666')
plt.plot(gm3_int, '.-', label='tgm3', color='#FF0000')
plt.plot(ccs_int, '.-', label='ccs', color='#FF00FF')
plt.plot(srs_int, '.-', label='srs', color='#00FFFF')
plt.plot(toj_int, '.-', label='toj', color='#00FF88')
plt.plot(b14_int, '.-', label='bag2x', color='#0000FF')
plt.plot(b63_int, '.-', label='tnt64', color='#FFFF00')
plt.title('probability of drought intervals')
plt.xlabel('interval')
plt.xlim(xmin=1, xmax=15)
plt.ylabel('probability')
plt.subplot(122)
plt.plot(rnd_ent, '.-', label='pure_random', color='#000000')
plt.plot(nes_ent, '.-', label='nes', color='#2277EE')
plt.plot(gby_ent, '.-', label='gboy', color='#113311')
plt.plot(gm1_ent, '.-', label='tgm1', color='#CC6666')
plt.plot(gm2_ent, '.-', label='tgm2', color='#EE6666')
plt.plot(gm3_ent, '.-', label='tgm3', color='#FF0000')
plt.plot(ccs_ent, '.-', label='ccs', color='#FF00FF')
plt.plot(srs_ent, '.-', label='srs', color='#00FFFF')
plt.plot(toj_ent, '.-', label='toj', color='#00FF88')
plt.plot(b14_ent, '.-', label='bag2x', color='#0000FF')
plt.plot(b63_ent, '.-', label='tnt64', color='#FFFF00')
plt.title('conditional entropy given history')
plt.xlabel('history size')
plt.xlim(xmin=0, xmax=14)
plt.ylabel('entropy')
plt.ylim(ymin=0, ymax=1)
plt.tight_layout()
plt.show() | true |
55ed09e2c3dff07d4bba25ecc565fb68face06b9 | Python | CQU-yxy/bigdata_NYCAirbnb | /代码文件及配置说明/WordCount.py | UTF-8 | 1,718 | 2.875 | 3 | [] | no_license | #-*- coding:utf-8 -*-
from pyspark import SparkConf, SparkContext
from visualize import visualize
import jieba
SRCPATH = '/home/hadoop/proj/src/'
conf = SparkConf().setAppName("proj").setMaster("local")
sc = SparkContext(conf=conf)
def getStopWords(stopWords_filePath):
stopwords = [line.strip() for line in open(stopWords_filePath, 'r', encoding='utf-8').readlines()]
return stopwords
def jiebaCut(filePath):
# 读取answers.txt
answersRdd = sc.textFile(filePath) # answersRdd每一个元素对应answers.txt每一行
str = answersRdd.reduce(lambda x,y:x+y)
# jieba分词
words_list = jieba.lcut(str)
return words_list
def wordcount(isvisualize=False):
"""
对所有答案进行
:param visualize: 是否进行可视化
:return: 将序排序结果RDD
"""
# 读取停用词表
stopwords = getStopWords(SRCPATH + 'stop_words.txt')
# 结巴分词
words_list = jiebaCut("file://" + SRCPATH + "AB_data.txt")
# 词频统计
wordsRdd = sc.parallelize(words_list)
resRdd = wordsRdd.filter(lambda word: len(word)!=1) \
.filter(lambda word: word not in stopwords)\
.map(lambda word: (word,1)) \
.reduceByKey(lambda a, b: a+b) \
.sortBy(ascending=False, numPartitions=None, keyfunc=lambda x:x[1]) \
# 可视化展示
if isvisualize:
v = visualize()
# 词云可视化
wwDic = v.rdd2dic(resRdd,50)
v.drawWorcCloud(wwDic)
return resRdd
if __name__ == '__main__':
resRdd = wordcount(isvisualize=True)
print(resRdd.take(10))
| true |
67a3b738d63949f6bf1b5bba50273c04c63d939f | Python | jalague/Projects | /BioInformatics/Assingments/assignment1.py | UTF-8 | 3,667 | 3.203125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 25 11:46:10 2016
@author: John
mRNA Translator, into 6 frames
Read in file of sequence
translate into three letter codons
parse for start and stop codons and mark them for position 1 2 and 3
find reverse compliment of sequence and repeat
"""
import sys
def readSeq(file):
sequence= open(file, 'r')
lines= sequence.readlines()
noNumbers= ''
for line in lines:
for letter in line:
if letter=='A' or letter=='a' or letter=='C' or letter=='c' or letter=='T' or letter=='t' or letter=='G' or letter=='g' :
noNumbers+=letter.capitalize()
# seq=noNumbers.upper()
return(noNumbers)
def translate1(sequence):
translation=''
acids ={'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'CTT': 'L', 'CTC': 'L', 'CTA':'L','CTG':'L',
'ATT': 'I','ATC':'I','ATA':'I', 'ATG':'M', 'GTT': 'V', 'GTC': 'V', 'GTA':'V', 'GTG':'V','TCT':'S',
'TCC':'S','TCA':'S','TCG':'S','CCT':'P', 'CCC':'P','CCA':'P', 'CCG':'P', 'ACT':'T','ACC':'T', 'ACA':'T','ACG':'T','GCT':'A', 'GCC':'A','GCA':'A',
'GCG':'A','TAT':'Y', 'TAC':'Y', 'TAA': 'Stop', 'TAG':'Stop', 'CAT':'H','CAC':'H','CAA':'Q', 'CAG':'Q',
'AAT':'N', 'AAC':'N', 'AAA':'K','AAG':'K', 'GAT':'D','GAC':'D', 'GAA':'E', 'GAG': 'E', 'TGT':'C', 'TGC': 'C',
'TGA': 'Stop', 'TGG': 'W', 'CGT': 'R', 'CGC': 'R', 'CGA':'R', 'CGG':'R', 'AGT':'S','AGC':'S', 'AGA':'R',
'AGG':'R', 'GGT':'G', 'GGC':'G','GGA':'G','GGG':'G'}
for x in range(0,len(sequence)-2):
codon=sequence[x]+sequence[x+1]+sequence[x+2]
translation+=acids.get(codon)
return translation
def translate2(sequence):
translation=''
acids ={'TTT': 'Phe', 'TTC': 'Phe', 'TTA': 'Leu', 'TTG': 'Leu', 'CTT': 'Leu', 'CTC': 'Leu', 'CTA':'Leu','CTG':'Leu',
'ATT': 'Ile','ATC':'Ile','ATA':'Ile', 'ATG':'Met', 'GTT': 'Val', 'GTC': 'Val', 'GTA':'Val', 'GTG':'Val','TCT':'Ser',
'TCC':'Ser','TCA':'Ser','TCG':'Ser','CCT':'Pro', 'CCC':'Pro','CCA':'Pro', 'CCG':'Pro', 'ACT':'Thr','ACC':'Thr', 'ACA':'Thr','ACG':'Thr','GCT':'Ala', 'GCC':'Ala','GCA':'Ala',
'GCG':'Ala','TAT':'Tyr', 'TAC':'Tyr', 'TAA': 'Stop', 'TAG':'Stop', 'CAT':'His','CAC':'His','CAA':'Gln', 'CAG':'Gln',
'AAT':'Asn', 'AAC':'Asn', 'AAA':'Lys','AAG':'Lys', 'GAT':'Asp','GAC':'Asp', 'GAA':'Glu', 'GAG': 'Glu', 'TGT':'Cys', 'TGC': 'Cys',
'TGA': 'Stop', 'TGG': 'Trp', 'CGT': 'Arg', 'CGC': 'Arg', 'CGA':'Arg', 'CGG':'Arg', 'AGT':'Ser','AGC':'Ser', 'AGA':'Arg',
'AGG':'Arg', 'GGT':'Gly', 'GGC':'Gly','GGA':'Gly','GGG':'Gly'}
for x in range(0,len(sequence)-2):
codon=sequence[x]+sequence[x+1]+sequence[x+2]
translation+=acids.get(codon)
return translation
def reverseCompliment(sequence):
reverseC=''
compliments={'A': 'T', 'T':'A', 'G':'C','C':'G'}
reversed=sequence[::-1]
for n in reversed:
reverseC= reverseC+compliments[n]
return reverseC
def frames(sequence):
seq=sequence
title= "5'3' Frame "
for i in range(0,2):
for x in range(0, 3):
print(title+ str((x+1)))
translation= translate1(seq[x:])
print (translation)
print()
print()
title="3'5' Frame "
seq= reverseCompliment(seq)
def main():
infile= sys.argv[1:][0]
seq= readSeq(infile)
frames(seq)
if __name__ == "__main__":
main()
#print(translate(readSeq('seq.txt')))
| true |
f28f602c7026ce8197afbc98ef90359288d33423 | Python | ericssy/smart-home-security | /SVM & Autoencoder/app/ab_one_class_svm.py | UTF-8 | 1,436 | 3.171875 | 3 | [] | no_license | import pickle
import numpy as np
from sklearn import svm
class OneClassSVM_Ab:
def __init__(self, file_name):
self.clf = pickle.load(open(file_name, 'rb'), encoding='latin1')
# temp_change_f: times of temperature changes within the 100s window
# temp_avg_f: average temperature within the 100s window
# door_cnt_f: count of active door status within the 100s window
# motion_cnt_f: count of active motion status within the 100s window
# acc_cnt_f: count of active acceleration status within the 100s window
# timestamp_f: ignore date and the way to get timestamp is:
# (1) convert time of the middle time point of the 100s window to the i-th "second" within a day. For example, if the time is 16:23:15, the "second" is (16 * 60 + 23) * 60 + 15 = 58995
# (2) map it to the 48 time zones in a day, which means each zone include 1200s. Therefore, the way to map is: 58995 / 1200
def predict(self, temp_change_f, temp_avg_f, door_cnt_f, motion_cnt_f, acc_cnt_f, timestamp_f):
X_list = []
X_first = []
X_first.append(temp_change_f)
X_first.append(temp_avg_f)
X_first.append(door_cnt_f)
X_first.append(motion_cnt_f)
X_first.append(acc_cnt_f)
X_first.append(timestamp_f)
X_list.append(X_first)
X = np.array(X_list)
return self.clf.predict(X)
| true |
8bcf62b780d93ddb9c8711d672a8fe08b1c44ef6 | Python | CptThreepwood/TropeStats | /src/scraping/get_trope_list.py | UTF-8 | 1,228 | 2.515625 | 3 | [] | no_license | import os
import bs4
import yaml
import time
import requests
from config import TROPE_INDEX, TROPE_INDEX_DIR
TROPE_LIST_BASE = "https://tvtropes.org/pmwiki/pagelist_having_pagetype_in_namespace.php?n=Main&t=trope"
def get_trope_list_page(n=1):
response = requests.get('{}&page={}'.format(TROPE_LIST_BASE, n))
return response.content
def get_trope_name(url: str) -> str:
return os.path.basename(os.path.splitext(url)[0])
def parse_tropes(content):
soup = bs4.BeautifulSoup(content, features="html.parser")
return [
get_trope_name(link['href'])
for link in soup.find(id='main-article').find('table').find_all('a')
]
def save_trope_page(content, i):
with open(os.path.join(TROPE_INDEX_DIR, 'page_{}.html'.format(i)), 'wb') as html_io:
html_io.write(content)
def download_trope_index():
i = 1
tropes = []
content = get_trope_list_page(i)
new_tropes = parse_tropes(content)
while len(new_tropes) > 0:
save_trope_page(content, i)
tropes += new_tropes
i += 1
time.sleep(1)
content = get_trope_list_page(i)
new_tropes = parse_tropes(content)
with open(TROPE_INDEX, 'w') as f:
yaml.dump(tropes, f)
| true |
e1582d443eb4bf3d066a0099955620a766a348f4 | Python | jxjk/git_jxj | /opencvfiles/copymakeborder_demo.py | UTF-8 | 1,895 | 3.484375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
9.5 为图像扩边(填充)
cv2.copyMakeBorder(src,top,borderType,value)
• src 输入图像
• top, bottom, left, right 对应边界的像素数目。
• borderType 要添加那种类型的边界,类型如下
– cv2.BORDER_CONSTANT 添加有颜色的常数值边界,还需要
下一个参数(value)。
– cv2.BORDER_REFLECT 边界元素的镜像。比如: fedcba|abcdefgh|
hgfedcb
– cv2.BORDER_REFLECT_101 or cv2.BORDER_DEFAULT
跟上面一样,但稍作改动。例如: gfedcb|abcdefgh|gfedcba
– cv2.BORDER_REPLICATE 重复最后一个元素。例如: aaaaaa|
abcdefgh|hhhhhhh
– cv2.BORDER_WRAP 不知道怎么说了, 就像这样: cdefgh|
abcdefgh|abcdefg
• value 边界颜色,如果边界的类型是cv2.BORDER_CONSTANT
蒋小军
2018.7.5
"""
import cv2
import numpy as np
from matplotlib import pyplot as plt
BLUE=[255,0,0]
img1=cv2.imread(r'C:\Users\Public\Pictures\Sample Pictures\opencv_logo.png')
replicate = cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_REPLICATE)
reflect = cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_REFLECT)
reflect101 = cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_REFLECT_101)
wrap = cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_WRAP)
constant= cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_CONSTANT,value=BLUE)
plt.subplot(231),plt.imshow(img1,'gray'),plt.title('ORIGINAL')
plt.subplot(232),plt.imshow(replicate,'gray'),plt.title('REPLICATE')
plt.subplot(233),plt.imshow(reflect,'gray'),plt.title('REFLECT')
plt.subplot(234),plt.imshow(reflect101,'gray'),plt.title('REFLECT_101')
plt.subplot(235),plt.imshow(wrap,'gray'),plt.title('WRAP')
plt.subplot(236),plt.imshow(constant,'gray'),plt.title('CONSTANT')
plt.show()
| true |
fff97196771b5ab9c6a3ddb53c4141fd62af94ff | Python | Lazy-yin/LeetCode-practice | /Questions/q0001TwoSum/BruteForce.py | UTF-8 | 235 | 3.203125 | 3 | [] | no_license | def twoSum(self, nums, target):
ans = []
for i in range(len(nums)-1):
leave = target - nums[i]
for j in range(1,len(nums)):
if nums[j] == leave:
return [i,j]
| true |
81de9971c6333b50bc64a9ffe9cb66d66cf931f7 | Python | hidemori0422/codewars | /tests/test_create_phone_number.py | UTF-8 | 493 | 2.921875 | 3 | [] | no_license | #! /usr/bin/env python3
from src.create_phone_number import create_phone_number
def test_phone_number0():
digits = [0] * 10
result = create_phone_number(digits)
expected = '(000) 000-0000'
assert result == expected
def test_phone_number1():
digits = [i for i in range(10)]
result = create_phone_number(digits)
expected = '(012) 345-6789'
assert result == expected
if __name__ == '__main__':
print('Module codewars/tests/test_create_phone_number.py')
| true |
6180fb010ebfd239eb6089cc8e9dd497a080c5a6 | Python | Raushan117/Python | /033_list.py | UTF-8 | 307 | 3.796875 | 4 | [] | no_license | # To iterate over a list with the numbers
a = ['Python', 'is', 'a', 'great', 'programming', 'lanuage']
for i in range(len(a)):
print(i, a[i])
# Passing a number of strings to a file
# where file is an file object
def write_to_file_many_item(file, separator, *args):
file.write(separator.join(args))
| true |
244f786e87f7fc585efd75a614cde03291dbc983 | Python | ROXER94/Project-Euler | /055/55 - LychrelNumbers.py | UTF-8 | 430 | 3.9375 | 4 | [] | no_license | # Calculates the number of Lychrel numbers below 10,000
def isPalindrome(string):
return string == string[::-1]
def Lychrel(string):
return int(string) + int(string[::-1])
total = 0
for i in range(10000):
L = True
count = 0
value = str(i)
while count < 50:
value = Lychrel(str(value))
if isPalindrome(str(value)):
L = False
break
else:
count += 1
if L:
total += 1
print(total)
| true |
a8cdca63b29340449b2731ad4fa6c486593bf2ca | Python | Crazy-Ginger/MOSAR | /modules/craftmodule.py | UTF-8 | 810 | 2.96875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3.5
"""Module class to hold data on individual modules that form part of the morsecraft structure"""
from numpy import array, round
__author__ = "Rebecca Wardle"
__copyright__ = "Copyright 2020 Rebecca Wardle"
__license__ = "MIT License"
__credit__ = ["Rebecca Wardle"]
__version__ = "0.5"
class Module:
"""A module class that contains:
position, rotation, connections, type, dimensions,
id used within spacecraft"""
def __init__(self, mod_id, dimensions=(0.1, 0.1, 0.1), position=(0, 0, 0)):
self.cons = [None] * len(dimensions) * 2
self.rotation = [1] + [0] * 3
self.pos = round(position, 3)
self.type = None
self.id = mod_id
self.dims = dimensions
def __str__(self):
return self.id
def __repr__(self):
return self.id
| true |
563c01ee21677405d58f6ec151e207b268518329 | Python | shariquemulla/python_basics | /control-flow-1.py | UTF-8 | 4,525 | 3.96875 | 4 | [] | no_license | season = 'spring'
if season == 'spring':
print('plant the garden!')
elif season == 'summer':
print('water the garden!')
elif season == 'fall':
print('harvest the garden!')
elif season == 'winter':
print('stay indoors!')
else:
print('unrecognized season')
#####################################################################################################
#First Example - try changing the value of phone_balance
phone_balance = 10
bank_balance = 50
if phone_balance < 10:
phone_balance += 10
bank_balance -= 10
print(phone_balance)
print(bank_balance)
#Second Example - try changing the value of number
number = 145
if number % 2 == 0:
print("Number " + str(number) + " is even.")
else:
print("Number " + str(number) + " is odd.")
#Third Example - try to change the value of age
age = 35
# Here are the age limits for bus fares
free_up_to_age = 4
child_up_to_age = 18
senior_from_age = 65
# These lines determine the bus fare prices
concession_ticket = 1.25
adult_ticket = 2.50
# Here is the logic for bus fare prices
if age <= free_up_to_age:
ticket_price = 0
elif age <= child_up_to_age:
ticket_price = concession_ticket
elif age >= senior_from_age:
ticket_price = concession_ticket
else:
ticket_price = adult_ticket
message = "Somebody who is {} years old will pay ${} to ride the bus.".format(age, ticket_price)
print(message)
##################################################################################################
points = 174 # use this input to make your submission
# write your if statement here
if points<=50:
result = "Congratulations! You won a Wooden Rabbit!"
elif points<=150:
result = "Oh dear, no prize this time."
elif points<=180:
result = "Congratulations! You won a wafer-thin mint!"
else:
result = "Congratulations! You won a penguin!"
print(result)
###################################################################################################
# '''
# You decide you want to play a game where you are hiding
# a number from someone. Store this number in a variable
# called 'answer'. Another user provides a number called
# 'guess'. By comparing guess to answer, you inform the user
# if their guess is too high or too low.
# Fill in the conditionals below to inform the user about how
# their guess compares to the answer.
# '''
answer = 22
guess = 22
if guess<answer:
result = "Oops! Your guess was too low."
elif guess>answer:
result = "Oops! Your guess was too high."
elif guess==answer:
result = "Nice! Your guess matched the answer!"
print(result)
############################################################################################
# '''
# Depending on where an individual is from we need to tax them
# appropriately. The states of CA, MN, and
# NY have taxes of 7.5%, 9.5%, and 8.9% respectively.
# Use this information to take the amount of a purchase and
# the corresponding state to assure that they are taxed by the right
# amount.
# '''
state = 'MN'
purchase_amount = 1000
if state=='CA':
tax_amount = .075
total_cost = purchase_amount*(1+tax_amount)
result = "Since you're from {}, your total cost is {}.".format(state, total_cost)
elif state=='MN':
tax_amount = .095
total_cost = purchase_amount*(1+tax_amount)
result = "Since you're from {}, your total cost is {}.".format(state, total_cost)
elif state=='NY':
tax_amount = .089
total_cost = purchase_amount*(1+tax_amount)
result = "Since you're from {}, your total cost is {}.".format(state, total_cost)
print(result)
###################################################################################################
points = 55 # use this as input for your submission
# establish the default prize value to None
prize = None
# use the points value to assign prizes to the correct prize names
if points > 0 and points <= 50:
prize = "wooden rabbit"
elif points > 150 and points <= 180:
prize = "wafer-thin mint"
elif points > 180:
prize = "penguin"
# use the truth value of prize to assign result to the correct prize
if prize:
result = "Congratulations! You won a {}!".format(prize)
else:
result = "Oh dear, no prize this time."
print(result)
#######################################################################################################
| true |
f1e02cac88ac8f38283b53ee458f3c3190d791ba | Python | MrRabbit0o0/LeetCode | /python/172.py | UTF-8 | 476 | 3.515625 | 4 | [] | no_license | # coding: utf8
class Solution(object):
def trailingZeroes(self, n):
"""
:type n: int
:rtype: int
"""
return 0 if n < 5 else n / 5 + self.trailingZeroes(n/5)
if __name__ == '__main__':
import random
n = random.randint(0, 10000000)
print n
print Solution().trailingZeroes(n)
n = 7425429
result = Solution().trailingZeroes(n)
assert(1856353 == result), 'n={}, right=1856353, output={}'.format(n, result)
| true |
a2a454e2e682ce41b0b357fa6e2972233aebe8f6 | Python | stevenjwheeler/AntiScamAI | /wit_response_engine.py | UTF-8 | 1,641 | 2.59375 | 3 | [] | no_license | import gvoice_response_engine
import error_reporter
import logger
def respond(wit_response, time):
if wit_response['_text']:
try:
text = wit_response['_text']
intent = wit_response['entities']['intent'][0]['value']
intentconfidence = "{0:.0f}%".format(wit_response['entities']['intent'][0]['confidence'] * 100)
sentiment = wit_response['entities']['sentiment'][0]['value']
sentimentconfidence = "{0:.0f}%".format(wit_response['entities']['sentiment'][0]['confidence'] * 100)
processingtime = "{0:.2f}".format(time)
print("[ ] Wit.ai response: ")
print(" TEXT:", text)
print(" PERCEIVED INTENT:", intent)
print(" INTENT CONFIDENCE:", intentconfidence)
print(" PERCEIVED SENTIMENT:", sentiment)
print(" SENTIMENT CONFIDENCE:", sentimentconfidence)
print(" PROCESSING TIME:", processingtime, "seconds")
#AUDIO PROCESSING HERE
print("[ ] Speaking response")
except:
print("[!!!] Could not parse the wit response")
error_reporter.reportError("App could not parse the wit response")
logger.log("App could not parse the wit response.")
try:
gvoice_response_engine.synthesize_text(wit_response['_text'])
except:
print("[!!!] Could not produce or play sound")
error_reporter.reportError("App could not produce or play sound")
logger.log("App could not produce or play sound.") | true |
d7a8eb6d65a78dcb1ff9868e4d3b351cd3b5700a | Python | Joyita01/Stream_tweets | /Streaming_replies.py | UTF-8 | 3,445 | 3.1875 | 3 | [] | no_license | """
Following code, streams tweets of username 'x1' containing specific keywords and stores the replies to those tweets in a JSON file.
"""
from tweepy import TweepError
import json
from tweepy import API
from tweepy import Cursor
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import creden
class TwitterAuthenticator():
def authenticate_twitter_app(self):
auth = OAuthHandler(creden.consumer_key, creden.consumer_secret)
auth.set_access_token(creden.access_token, creden.access_secret)
return auth
class TwitterClient():
"""
Class to get tweets from user timeline
"""
def __init__(self,twitter_user=None):
self.auth=TwitterAuthenticator().authenticate_twitter_app()
self.twitter_client=API(self.auth)
self.twitter_user=twitter_user
def get_tweets_from_self_timeline(self,num_tweets,tweet_output_file,hash_tag_list):
"""
Function to print tweets,retweets and store the replies to the tweets
"""
for tweet in Cursor(self.twitter_client.user_timeline,id=self.twitter_user,tweet_mode="extended",exclude_replies=True).items(num_tweets):
tweets=[]
if all(x not in tweet.full_text for x in hash_tag_list):
continue
else :
try : #Prints the retweets of account x1, and stores the replies to those retweets
print("\n\n")
print("1.Retweet " +tweet.retweeted_status.full_text)
for all_tweet in Cursor(self.twitter_client.search,q='to:x2', since_id=tweet.id_str).items(300): # x2 is the username of the page,whose tweets user x1 is retweeting
if hasattr(all_tweet, 'in_reply_to_status_id_str'):
if (all_tweet.in_reply_to_status_id_str==tweet.retweeted_status.id_str):
tweets.append(all_tweet.text)
except AttributeError:
print("\n\n")
print("1.Tweet " +tweet.full_text) #Prints the tweets of account x1
for all_tweet in Cursor(self.twitter_client.search,q='to:x1', since_id=tweet.id_str).items(300): #Stores the replies to the tweets of account x1
if hasattr(all_tweet, 'in_reply_to_status_id_str'):
if (all_tweet.in_reply_to_status_id_str==tweet.id_str):
tweets.append(all_tweet.text)
except TweepError:
print("Limit reached!!!")
if len(tweets) != 0 :
self.write_to_file(tweets,tweet_output_file)
def write_to_file(self,replies,fileName):
"""
Function stores the replies of tweets to a JSON file
"""
file_path='./' + fileName
with open(file_path,'a') as fp:
json.dump(replies, fp)
if __name__=="__main__":
hash_tag_list = [x for x in input("Enter the hash tag list to filter tweets(separated by commas): ").split(',')]
account_name=input("Enter account user name whose tweets you want to scrape?")
twitter_client=TwitterClient(account_name)
twitter_client.get_tweets_from_self_timeline(100,"search_results.json",hash_tag_list)
| true |
4faced3e8a0c83a97ea83c5b1011b44de0d7a39e | Python | efedorow/data-science | /simple-tensorflow-model-unit-conversion.py | UTF-8 | 2,046 | 3.484375 | 3 | [] | no_license | import tensorflow as tf
import logging
import numpy as np
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
#these are the input values in kilograms
meters_in = np.array([1, 5, 8, 15, 23, 34, 49, 69, 82, 96])
#these are the output values in pounds determined via the conversion formula
pounds_out = np.array([2.205, 11.023, 17.637, 33.069, 50.706, 74.957, 108.027, 152.119, 180.779, 211.644])
#for printing them out to see the data better
for ivar, cvar in enumerate(meters_in):
print("{} kilograms = {} pounds".format(ivar, pounds_out[ivar]))
#one model used, it has only one layer:
#model = tf.keras.Sequential([
#tf.keras.layers.Dense(units=1, input_shape=[1])
#])
#a model with more neurons would be even more accurate with its predictions:
l0 = tf.keras.layers.Dense(units=4, input_shape=[1])
l1 = tf.keras.layers.Dense(units=4)
l2 = tf.keras.layers.Dense(units=1)
model = tf.keras.Sequential([l0, l1, l2])
#compile the model with a loss function in terms of mean squared error
#and using the optimize function Adam for a learning rate of 0.05
model.compile(loss='mean_squared_error', optimizer=tf.keras.optimizers.Adam(0.05))
#this gives a plot of the loss magnitude over time
#it quickly goes to around 0
history = model.fit(meters_in, pounds_out, epochs=600, verbose=False)
import matplotlib.pyplot as plt
plt.xlabel("Epoch Number")
plt.ylabel("Loss Magnitude")
plt.plot(history.history['loss'])
#this determines the error for some random inputs
y_true = np.random.randint(0, 2, size=(2,3))
y_pred = np.random.random(size=(2, 3))
loss = tf.keras.losses.mean_absolute_error(y_true, y_pred)
assert loss.shape == (2,)
assert np.array_equal(loss.numpy(), np.mean(np.abs(y_true - y_pred), axis=-1))
tf.keras.losses.MAE(
y_true, y_pred)
#to find how accurate it is print the layer weight (real conversion is K = 2.20462*P)
#this only works for one layer, otherwise an output of 4 values would be given
#print("These are the layer vaiables:{}".format(model.get_weights())) #gave value of 2.17817
| true |
540ec7883267b993cf5853640c3af3c2aae3a307 | Python | sassaf/VehicleValueEstimator | /value_estimator.py | UTF-8 | 4,788 | 2.703125 | 3 | [] | no_license | from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D as Conv2D
from keras.layers import MaxPooling2D, SeparableConv2D
from keras.optimizers import SGD, rmsprop
import numpy as np
import scipy
import cv2
import os
from copy import copy
from get_dataset import get_image_data
from sklearn.cross_validation import StratifiedKFold
m = 300
n = 200
def train_evaluate_model(model, train_data, train_values, valid_data, valid_values, eps):
# trains and evaluates model based on kfold data
model.fit(train_data, train_values, epochs=eps, batch_size=32, verbose=1, callbacks=None, validation_split=0.0,
initial_epoch=0)
score = model.evaluate(valid_data, valid_values, batch_size=1, verbose=1)
scores.append(score)
print scores
def create_model():
#Convolutional Neural Network
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='valid', activation='relu', input_shape=(n, m, 1)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dense(1))
# train the model using SGD
sgd = SGD(lr=0.01)
model.compile(optimizer=sgd, loss="mean_squared_logarithmic_error")
return model
if __name__ == "__main__":
# get train images and labels
train_path = '/home/shafe/Documents/College/ECE 6258/Project/Train_Images/Honda Accord/'
# train_path = '/home/shafe/Documents/College/ECE 6258/Project/Train_Images/Toyota Camry/'
train_data = []
train_values = []
get_image_data(train_path, train_data, train_values)
# get test images and labels
test_path = '/home/shafe/Documents/College/ECE 6258/Project/Test_Images/Honda Accord/'
# test_path = '/home/shafe/Documents/College/ECE 6258/Project/Test_Images/Toyota Camry/'
test_data = []
test_values = []
get_image_data(test_path, test_data, test_values)
# convert to numpy arrays, and max/min values for vehicles
train_data = np.array(train_data)
train_values = np.array(train_values)
maxim = np.max(train_values)
minim = np.min(train_values)
print train_data.shape
print train_values.shape
# convert to numpy arrays, max/min values was only for testing
# user shouldn't have access to test values, they're only used to check accuracy of results
test_data = np.array(test_data)
test_values_arr = copy(test_values)
test_values = np.array(test_values)
print test_data.shape
print test_values.shape
# array to keep track of scores, only useful during kfolds to track trends.
# eps is epochs, runs for each neural network
scores = []
eps = 1
# uncomment lines 93-101 and comment lines 105-106 to use kfolds functionality
# nfolds determines how many segments will be used
# n_folds = 5
# skf = StratifiedKFold(train_values, n_folds=n_folds, shuffle=True)
#
# for i, (train,test) in enumerate(skf):
# print "Running Fold", i + 1, "/", n_folds
# model = None # Clearing the NN.
# model = create_model()
#
# train_evaluate_model(model, train_data[train], train_values[train], train_data[test], train_values[test], eps)
# without kfolds, single model and test
# comment lines 105-106 and uncomment lines 93-101 in order to test k-fold functionality.
model = create_model()
train_evaluate_model(model, train_data, train_values, test_data, test_values, eps)
# testing stage with full results.
print '-----------------------------------------------'
score = model.evaluate(test_data, test_values, batch_size=1, verbose=1)
print score
estimates = model.predict_on_batch(test_data)
print estimates
estimated_values = []
compared_values = []
for value in estimates:
estimated_values.append(value)
# print estimated_values
max = np.max(estimated_values)
min = np.min(estimated_values)
estimated_values = (estimated_values - min) / (max - min + 1.0)
estimated_values = (minim + estimated_values * (maxim - minim)).round()
x = 0
mse = 0
for value in estimated_values:
compared_values.append([value[0], test_values_arr[x]])
mse = (value[0] + test_values_arr[x]) * (value[0] + test_values_arr[x])
x += 1
mse = mse / x
# import pdb; pdb.set_trace()
print mse
print estimated_values
print compared_values
| true |
a60b561905f6f1dae79c9f014f5e0fd728bf7f58 | Python | Esiravegna/0pizero_sensors | /sensors/MQX/MQ135.py | UTF-8 | 5,737 | 2.625 | 3 | [
"Apache-2.0"
] | permissive |
from __future__ import division
import time
import math
from utils.log import log
log = log.name(__name__)
class MQ135(object):
######################### Hardware Related Macros #########################
RL_VALUE = 10 # define the load resistance on the board, in kilo ohms
gas_values = {
'AIR': {
'R0': 1
},
'CO': {
'R0': 10.13,
'SCALE_FACTOR': 662.9382,
'EXPONENT': 4.0241,
'ATM': 1
},
'CO2': {
'R0': 79.97,
'SCALE_FACTOR': 116.6020682,
'EXPONENT': 2.769034857,
'ATM': 407.57
},
'ETHANOL': {
'R0': 34.91,
'SCALE_FACTOR': 75.3103,
'EXPONENT': 3.1459,
'ATM': 22.5
},
'NH4': {
'R0': 23.49,
'SCALE_FACTOR': 102.694,
'EXPONENT':2.48818,
'ATM': 15
},
'TOLUENE': {
'R0': 23.06,
'SCALE_FACTOR': 43.7748,
'EXPONENT': 3.42936,
'ATM': 2.9
},
'ACETONE': {
'R0': 41.00,
'SCALE_FACTOR': 33.1197,
'EXPONENT': 3.36587,
'ATM': 16
},
}
# Parameters to model temperature and humidity dependence
CORA = 0.00035
CORB = 0.02718
CORC = 1.39538
CORD = 0.0018
######################### Software Related Macros #########################
CALIBARAION_SAMPLE_TIMES = 50 # define how many samples you are going to take in the calibration phase
CALIBRATION_SAMPLE_INTERVAL = 500 # define the time interal(in milisecond) between each samples in the
# cablibration phase
READ_SAMPLE_INTERVAL = 5 # define how many samples you are going to take in normal operation
READ_SAMPLE_TIMES = 200 # define the time interal(in milisecond) between each samples in
# normal operation
######################### Application Related Macros ######################
GAS_LPG = 0
GAS_CO = 1
GAS_SMOKE = 2
def __init__(self, ArduinoSensor, Ro=10):
"""
Creates the sensor. The ArduinoSensor is a proper, initialized ArduinoGasSensor
"""
log.info("Initializing sensor")
self.Ro = Ro
self.sensor = ArduinoSensor
log.debug("Calibrating...")
self.Ro = self.MQCalibration(self.sensor)
log.debug("Calibration is done...\n")
log.debug("Ro=%f kohm" % self.Ro)
def MQPercentage(self, temperature=None, humidity=None):
val = {}
resistance = self.MQRead()
gas_list = self.gas_values.keys()
gas_list.remove('AIR')
for a_gas in gas_list:
result = 'N/A'
if(temperature and humidity):
result = self.getCalibratedGasPPM(temperature, humidity, resistance, a_gas)
else:
result = self.getGasPPM(resistance, a_gas)
val[a_gas] = result
return val
def MQResistanceCalculation(self, raw_adc):
return float((1023. * self.RL_VALUE * 5.)/(float(raw_adc) * 5.)) - self.RL_VALUE;
#return float(self.RL_VALUE*(1023.0-raw_adc)/float(raw_adc));
def MQCalibration(self, mq_pin):
val = 0.0
for i in range(self.CALIBARAION_SAMPLE_TIMES): # take multiple samples
self.sensor.update()
val += self.MQResistanceCalculation(self.sensor.MQ135Value)
time.sleep(self.CALIBRATION_SAMPLE_INTERVAL/1000.0)
val = val/self.CALIBARAION_SAMPLE_TIMES # calculate the average value
val = val/self.gas_values['AIR']['R0'] # divided by RO_CLEAN_AIR_FACTOR yields the Ro
return val;
def MQRead(self):
rs = 0.0
for i in range(self.READ_SAMPLE_TIMES):
self.sensor.update()
rs += self.MQResistanceCalculation(self.sensor.MQ135Value)
time.sleep(self.READ_SAMPLE_INTERVAL/1000.0)
rs = rs/self.READ_SAMPLE_TIMES
return rs
def getGasPPM(self, resistance, GAS):
"""
Given a valid GAS strong and a resistance value res, returns the concentration in PPM
"""
return self.gas_values[GAS]['SCALE_FACTOR'] * pow((resistance/self.gas_values[GAS]['R0']), -self.gas_values[GAS]['EXPONENT'])
def GetRZero(self, GAS, resistance):
"""
Given a gas, returns the zero level
"""
return resistance * pow((self.gas_values[GAS]['ATM']/self.gas_values[GAS]['SCALE_FACTOR']), (1./self.gas_values[GAS]['EXPONENT']));
def getCorrectedRZero(self, GAS, resistance):
"""
Returns the corrected R value for the given gas
"""
return resistance * pow((self.gas_values[GAS]['ATM']/self.gas_values[GAS]['SCALE_FACTOR']), (1./self.gas_values[GAS]['EXPONENT']))
def getCorrectionFactor(self, temperature, humidity):
return self.CORA * temperature * temperature - self.CORB * temperature + self.CORC - (humidity-33.)*self.CORD
def getCorrectedResistance(self, resistance, temperature, humidity):
return float(resistance/self.getCorrectionFactor(temperature, humidity))
def getCalibratedGasPPM(self, temperature, humidity, resistance, GAS):
return self.gas_values[GAS]['SCALE_FACTOR'] * pow((self.getCorrectedResistance(resistance, temperature, humidity) /
self.getCorrectedRZero(GAS , self.getCorrectedResistance(resistance, temperature, humidity))), -self.gas_values[GAS]['EXPONENT'])
| true |
99da4b388ad0d84145dfc50ebaf5cb34d4b38acb | Python | chenjiahui1991/LeetCode | /P0212.py | UTF-8 | 1,419 | 3.4375 | 3 | [] | no_license | class Solution:
def findWords(self, board, words):
"""
:type board: List[List[str]]
:type words: List[str]
:rtype: List[str]
"""
trie = {}
for word in words:
t = trie
for ch in word:
if ch not in t:
t[ch] = {}
t = t[ch]
t['#'] = word
result = []
if len(board) == 0 or len(board[0]) == 0: return []
m, n = len(board), len(board[0])
step = [(1, 0), (-1, 0), (0, 1), (0, -1)]
def dfs(board, visited, trie, x, y):
if x < 0 or x >= m or y < 0 or y >= n or board[x][y] not in trie or visited[x][y]:
return
if '#' in trie[board[x][y]] and trie[board[x][y]]['#'] not in result: result.append(trie[board[x][y]]['#'])
visited[x][y] = True
for i in range(4):
dfs(board, visited, trie[board[x][y]], x + step[i][0], y + step[i][1])
visited[x][y] = False
visited = [[False for _ in range(n)] for _ in range(m)]
for i in range(m):
for j in range(n):
dfs(board, visited, trie, i, j)
return result
s = Solution()
board =[
['o','a','a','n'],
['e','t','a','e'],
['i','h','k','r'],
['i','f','l','v']
]
print(s.findWords(board, ["oath","pea","eat","rain"]))
print(s.findWords([['a']], ["a","a"]))
| true |
c38c0c6fb166d79ce43e9ac65e032e589f7cd5c3 | Python | zhaosiheng/SR-GNN_PyTorch-Geometric | /src/utils.py | UTF-8 | 2,799 | 2.671875 | 3 | [] | no_license | import networkx as nx
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.utils import to_networkx
class NodeDistance:
def __init__(self, data, nclass=4):
"""
:param graph: Networkx Graph.
"""
G = to_networkx(data)
self.graph = G
self.nclass = nclass
def get_label(self):
path_length = dict(nx.all_pairs_shortest_path_length(self.graph, cutoff=self.nclass-1))
distance = - np.ones((len(self.graph), len(self.graph))).astype(int)
for u, p in path_length.items():
for v, d in p.items():
distance[u][v] = d
distance[distance==-1] = distance.max() + 1
distance = np.triu(distance)
self.distance = distance
return torch.LongTensor(distance) - 1
class PairwiseDistance():
def __init__(self, nhid, device, regression=False):
self.device = device
self.regression = regression
self.nclass = 4
if regression:
self.linear = nn.Linear(nhid, self.nclass).to(device)
else:
self.linear = nn.Linear(nhid, self.nclass).to(device)
self.pseudo_labels = None
def make_loss(self, embeddings, data):
if self.regression:
return self.regression_loss(embeddings)
else:
return self.classification_loss(embeddings, data)
def classification_loss(self, embeddings, data):
agent = NodeDistance(data, nclass=self.nclass)
self.pseudo_labels = agent.get_label().to(self.device)
# embeddings = F.dropout(embeddings, 0, training=True)
self.node_pairs = self.sample(agent.distance)
node_pairs = self.node_pairs
embeddings0 = embeddings[node_pairs[0]]
embeddings1 = embeddings[node_pairs[1]]
h = self.linear(torch.abs(embeddings0 - embeddings1))
output = F.log_softmax(h, dim=1)
loss = F.nll_loss(output, self.pseudo_labels[node_pairs])
# from metric import accuracy
# acc = accuracy(output, self.pseudo_labels[node_pairs])
# print(acc)
return loss
def sample(self, labels, ratio=0.1, k=4000):
node_pairs = []
for i in range(1, labels.max()+1):
tmp = np.array(np.where(labels==i)).transpose()
# indices = np.random.choice(np.arange(len(tmp)), k, replace=False)
indices = np.random.choice(np.arange(len(tmp)), int(50), replace=True)
node_pairs.append(tmp[indices])
node_pairs = np.vstack(node_pairs).transpose()
# node_pairs = np.array(node_pairs).reshape(-1, 2).transpose()
return node_pairs[0], node_pairs[1]
| true |
eac74145405021ad3003ca38e7a72bd279836732 | Python | osgioia/LosSimpsonsFrasesBot | /main.py | UTF-8 | 595 | 2.671875 | 3 | [] | no_license | import json
import requests
import tweepy
from keys import *
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
# Create API object
api = tweepy.API(auth)
# Creamos la petición HTTP con GET:
r = requests.get('https://los-simpsons-quotes.herokuapp.com/v1/quotes')
# Imprimimos el resultado si el código de estado HTTP es 200 (OK):
if r.status_code == 200:
json_data = json.loads(r.text)
# print(json_data[0]['quote'] + ' - ' + json_data[0]['author'])
api.update_status(json_data[0]['quote'] + ' - ' + json_data[0]['author']) | true |
ae343316c56c5cbf1e0fc294a1e9a2959ebf1a53 | Python | bhabicht/gravity-sim | /tests/leap_frog_algorithm_test.py | UTF-8 | 2,777 | 2.796875 | 3 | [] | no_license | import unittest
import numpy as np
import sys
import scipy.constants as const
from code.leap_frog_algorithm import update_all_positions
from code.leap_frog_algorithm import calculate_all_forces
from code.leap_frog_algorithm import update_all_velocities
from code.planet_system_creation import Massiveobject
sys.path.append('/home/benjamin/Documents/computer_science/gravity-sim')
class TestUpdateAllPositions(unittest.TestCase):
obj1 = Massiveobject("obj1", 1, 0, 3, 10)
obj2 = Massiveobject("obj2", 2, 1, 4, 30)
obj3 = Massiveobject("obj3", 3, 2, 4, 30)
def test_update_all_positions(self):
self.assertEqual(self.obj1.x, 0)
self.assertEqual(self.obj2.x, 1)
self.assertEqual(self.obj3.x, 2)
update_all_positions(0.5, [self.obj1, self.obj2, self.obj3])
self.assertEqual(self.obj1.x, 0.75)
self.assertEqual(self.obj2.x, 2)
self.assertEqual(self.obj3.x, 3)
class TestCalculateAllForces(unittest.TestCase):
obj1 = Massiveobject("obj1", 1, np.array([0, 0, 0]), np.array([3, 0, 0]),
np.array([10, 0, 0]))
obj2 = Massiveobject("obj2", 2, np.array([1, 0, 0]), np.array([4, 0, 0]),
np.array([30, 0, 0]))
obj3 = Massiveobject("obj3", 3, np.array([2, 0, 0]), np.array([4, 0, 0]),
np.array([30, 0, 0]))
def test_calculate_all_forces(self):
calculate_all_forces([self.obj1, self.obj2, self.obj3])
self.assertTrue(np.array_equal(self.obj1.F,
np.array([11/4*const.G, 0, 0])))
self.assertTrue(np.array_equal(self.obj2.F,
np.array([4*const.G, 0, 0])))
self.assertTrue(np.array_equal(self.obj3.F,
np.array([-27/4*const.G, 0, 0])))
class TestUpdateAllVelocities(unittest.TestCase):
obj1 = Massiveobject("obj1", 1, np.array([0, 0, 0]), np.array([3, 0, 0]),
np.array([10, 0, 0]))
obj2 = Massiveobject("obj2", 2, np.array([1, 0, 0]), np.array([4, 0, 0]),
np.array([30, 0, 0]))
obj3 = Massiveobject("obj3", 3, np.array([2, 0, 0]), np.array([5, 0, 0]),
np.array([30, 0, 0]))
def test_update_all_velocities(self):
self.assertTrue(np.array_equal(self.obj1.v, np.array([3, 0, 0])))
self.assertTrue(np.array_equal(self.obj2.v, np.array([4, 0, 0])))
self.assertTrue(np.array_equal(self.obj3.v, np.array([5, 0, 0])))
update_all_velocities(0.5, [self.obj1, self.obj2, self.obj3])
self.assertTrue(np.array_equal(self.obj1.v, np.array([8, 0, 0])))
self.assertTrue(np.array_equal(self.obj2.v, np.array([11.5, 0, 0])))
self.assertTrue(np.array_equal(self.obj3.v, np.array([10, 0, 0])))
| true |
ed3594ed74d378674d43ffbc4d10a147f61cb8a2 | Python | vlad24/Univiersity-Computer-Vision | /solovyev/src/gamma.py | UTF-8 | 1,818 | 3.015625 | 3 | [] | no_license | '''
Created on Mar 9, 2017
@author: vlad
'''
import cv2
import matplotlib.pyplot as plt
import numpy as np
def gamma_correction(img, correction):
result = img[:]
result = result / 255.0
result = cv2.pow(result, correction)
return np.uint8(result * 255)
def log_correction(img):
result = np.copy(img)
result = result / 255.0
result = np.ones(result.shape) + result
result = cv2.log(result)
return np.uint8(result * 255)
def neg_correction(img):
result = np.copy(img)
result = result / 255.0
result = np.ones(result.shape) - result
return np.uint8(result * 255)
def p_linear_correction(img, t1, t2, k1, k2):
result = np.zeros(img.shape)
for i in range(img.shape[0]):
for j in range(img.shape[1]):
if img[i][j]/255.0 < t1:
result[i][j] = img[i][j] * k1 / 255.0
if img[i][j]/255.0 > t2:
result[i][j] = img[i][j]* k2 / 255.0
return np.uint8(result * 255)
def linear_correction(img, k):
result = np.copy(img)
result = result / 255.0
result *= k
return np.uint8(result * 255)
img = cv2.imread('../img/V/1/original.jpg', cv2.IMREAD_GRAYSCALE)
alpha1 = 0.6
alpha2 = 1.8
gamma1_img = gamma_correction(img, alpha1)
gamma2_img = gamma_correction(img, alpha2)
log_img = log_correction(img)
neg_img = neg_correction(img)
lin_img = linear_correction(img, 1.5)
plin_img = p_linear_correction(img, 0.2, 0.3, 0.1, 1.5)
cv2.imwrite("small_alpha_image.jpeg", gamma1_img)
cv2.imwrite("big_alpha_image.jpeg", gamma2_img)
cv2.imwrite("log_image.jpeg", log_img)
cv2.imwrite("negative_image.jpeg", neg_img)
cv2.imwrite("lin_image.jpeg", lin_img)
cv2.imwrite("plin_image.jpeg", plin_img)
cv2.imwrite("orig-log_image.jpeg", img-log_img)
print "Program is over" | true |
5638e712e10f4cb8531ff5eaa8a7c69ea5a12e8c | Python | stellaluminary/Baekjoon | /15684.py | UTF-8 | 1,983 | 3.234375 | 3 | [] | no_license | """
Method 2
시간 초과
"""
n, m, h = map(int, input().split())
visited = [[False] * (n+1) for _ in range(h+1)]
combi = []
for _ in range(m):
a, b = map(int, input().split())
visited[a][b] = True
def check():
for i in range(1, n+1):
now = i
for j in range(1, h+1):
if visited[j][now-1]:
now -= 1
elif visited[j][now]:
now += 1
if now != i:
return False
return True
def dfs(depth, idx):
global answer
if depth >= answer:
return
if check():
answer = depth
return
for c in range(idx, len(combi)):
x, y = combi[c]
if not visited[x][y-1] and not visited[x][y+1]:
visited[x][y] = True
dfs(depth+1, c+1)
visited[x][y] = False
for i in range(1,h+1):
for j in range(1, n):
if not visited[i][j-1] and not visited[i][j] and not visited[i][j+1]:
combi.append([i, j])
answer = 4
dfs(0, 0)
print(answer if answer < 4 else -1)
"""
Method 1
시간 초과
"""
def check():
for col in range(n):
y = col
for x in range(h):
if board[x][y]:
y += 1
elif y > 0 and board[x][y-1]:
y -= 1
if y != col:
return False
return True
def dfs(cnt, x, y):
global ans
if check():
ans = min(ans, cnt)
return
if cnt == 3 or ans <= cnt:
return
for i in range(x, h):
k = y if i == x else 0
for j in range(k, n-1):
if j > 0 and board[i][j-1]:
continue
if not board[i][j] and not board[i][j+1]:
board[i][j] = 1
dfs(cnt+1, i, j+2)
board[i][j] = 0
n,m,h = map(int, input().split())
board = [[0]*n for _ in range(h)]
for _ in range(m):
a,b = map(int, input().split())
board[a-1][b-1] = 1
ans = 4
dfs(0,0,0)
print(ans if ans < 4 else -1)
| true |
2fb202a77566f145421182adcd85aaa00e26987e | Python | Trietptm-on-Coding-Algorithms/eulerproject | /046.py | UTF-8 | 447 | 3.140625 | 3 | [] | no_license | #! /usr/bin/env python
from eulerutils import genprime,isprime
def goldbach():
gp = genprime()
p = gp.next()
q = gp.next()
while True:
for com in xrange(p+2,q,2):
if not isop(com):
return com
p = q
q = gp.next()
def isop(com):
for n in range(1,int(com**.5)+1):
if isprime(com - 2*n**2):
return True
return False
print goldbach()
| true |
ef1a845c21de45c1d96b2fd9d0a460663a401839 | Python | kirin7890/ABC | /ABC/056/A.py | UTF-8 | 175 | 3.125 | 3 | [] | no_license | a, b = map(str, input().split())
if a == 'H':
ac = 1
else:
ac = -1
if b == 'H':
tcd = 1
else:
tcd = -1
if ac * tcd > 0:
print ('H')
else:
print ('D')
| true |
3c0e2567dc032ff9e81884a0146793b2e88ae93d | Python | RyanIsCoding2021/RyanIsCoding2021 | /exercises/culculate.py | UTF-8 | 3,014 | 3.203125 | 3 | [
"MIT"
] | permissive | import pygame
import random
from itertools import cycle
class Cloud(pygame.sprite.Sprite):
def __init__(self, x, y):
super().__init__()
self.image = pygame.Surface((50, 20))
self.image.set_colorkey((11, 12, 13))
self.image.fill((11, 12, 13))
pygame.draw.ellipse(self.image, pygame.Color('white'), self.image.get_rect())
self.rect = self.image.get_rect(topleft=(x,y))
def update(self, dt, events):
self.rect.move_ip(dt/10, 0)
if self.rect.left >= pygame.display.get_surface().get_rect().width:
self.rect.right = 0
class DayScene:
def __init__(self):
self.clouds = pygame.sprite.Group(Cloud(0, 30), Cloud(100, 40), Cloud(400, 50))
def draw(self, screen):
screen.fill(pygame.Color('lightblue'))
self.clouds.draw(screen)
def update(self, dt, events):
self.clouds.update(dt, events)
class NightScene:
def __init__(self):
sr = pygame.display.get_surface().get_rect()
self.sky = pygame.Surface(sr.size)
self.sky.fill((50,0,50))
for x in random.sample(range(sr.width), 50):
pygame.draw.circle(self.sky, (200, 200, 0), (x, random.randint(0, sr.height)), 1)
self.clouds = pygame.sprite.Group(Cloud(70, 70), Cloud(60, 40), Cloud(0, 50), Cloud(140, 10), Cloud(100, 20))
def draw(self, screen):
screen.blit(self.sky, (0, 0))
self.clouds.draw(screen)
def update(self, dt, events):
self.clouds.update(dt, events)
class Fader:
def __init__(self, scenes):
self.scenes = cycle(scenes)
self.scene = next(self.scenes)
self.fading = None
self.alpha = 0
sr = pygame.display.get_surface().get_rect()
self.veil = pygame.Surface(sr.size)
self.veil.fill((0, 0, 0))
def next(self):
if not self.fading:
self.fading = 'OUT'
self.alpha = 0
def draw(self, screen):
self.scene.draw(screen)
if self.fading:
self.veil.set_alpha(self.alpha)
screen.blit(self.veil, (0, 0))
def update(self, dt, events):
self.scene.update(dt, events)
if self.fading == 'OUT':
self.alpha += 8
if self.alpha >= 255:
self.fading = 'IN'
self.scene = next(self.scenes)
else:
self.alpha -= 8
if self.alpha <= 0:
self.fading = None
def main():
screen_width, screen_height = 300, 300
screen = pygame.display.set_mode((screen_width, screen_height))
clock = pygame.time.Clock()
dt = 0
fader = Fader([DayScene(), NightScene()])
while True:
events = pygame.event.get()
for e in events:
if e.type == pygame.QUIT:
return
if e.type == pygame.KEYDOWN:
fader.next()
fader.draw(screen)
fader.update(dt, events)
pygame.display.flip()
dt = clock.tick(30)
main() | true |
f1fe43f3805d89556f27449b1d45d9b0889cdb27 | Python | zwy-888/drfday06-andRBAC | /api/authenticator.py | UTF-8 | 1,197 | 2.703125 | 3 | [] | no_license | from rest_framework import exceptions
from rest_framework.authentication import BaseAuthentication, BasicAuthentication
from api.models import User
class MyAuthentication(BaseAuthentication):
def authenticate(self, request):
print('111')
# 获取认证信息 , 没有就返回None get不会报错
auth = request.META.get("HTTP_AUTHORIZATION", None)
print(auth)
if auth is None:
# 代表没有认证,为游客
return None
# 设置验证信息的校验 将前端的AUTHORIZATION 信息进行分割成列表
auth_list = auth.split()
if not (len(auth_list) == 2 and auth_list[0].lower() == 'auth'): # 格式前面为 auth 后面为yan
# if not (len(auth_list) == 2 and auth_list[0].lower() == "auth"):
raise exceptions.APIException('用户验证信息格式有误')
if auth_list[1] != 'yan':
raise exceptions.APIException('用户信息有误')
# 校验是否存在此用户
user = User.objects.filter(username="python").first()
if not user:
raise exceptions.APIException('用户不存在')
return (user, None)
| true |
5e1c4f748e95fd9069864fb63880e9c0c2c98580 | Python | clchiou/scons_package | /rule.py | UTF-8 | 2,106 | 2.53125 | 3 | [
"MIT"
] | permissive | # Copyright (c) 2013 Che-Liang Chiou
from collections import OrderedDict
from scons_package.label import Label, LabelOfRule, LabelOfFile
from scons_package.utils import topology_sort
class RuleRegistry:
def __init__(self):
self.rules = OrderedDict()
def __len__(self):
return len(self.rules)
def __iter__(self):
return iter(self.rules)
def __getitem__(self, label):
assert isinstance(label, Label)
return self.rules[label]
def has_rule(self, rule):
return rule.name in self.rules
def add_rule(self, rule):
assert isinstance(rule, Rule)
self.rules[rule.name] = rule
def get_missing_dependencies(self):
for label, rule in self.rules.items():
for depend in rule.depends:
if depend not in self.rules:
yield label, depend
def get_sorted_rules(self):
def get_neighbors(rule):
assert isinstance(rule, Rule)
return (self.rules[label] for label in rule.depends)
return topology_sort(self.rules.values(), get_neighbors)
class Rule(object):
def __init__(self, name, inputs, depends, outputs):
assert isinstance(name, LabelOfRule)
assert all(isinstance(label, LabelOfFile) for label in inputs)
assert all(isinstance(label, LabelOfRule) for label in depends)
assert all(isinstance(label, LabelOfFile) for label in outputs)
for label in inputs:
if name.package_name != label.package_name:
raise ValueError('input outside the package: %s, %s' %
(repr(label), repr(name)))
for label in outputs:
if name.package_name != label.package_name:
raise ValueError('output outside the package: %s, %s' %
(repr(label), repr(name)))
if name in inputs or name in depends:
raise ValueError('rule depends on itself: %s' % name)
self.name = name
self.inputs = inputs
self.depends = depends
self.outputs = outputs
| true |