blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b067b03d0c05d9f84c455443240226322a4c4fc2 | Python | Marco2018/leetcode | /leetcode118.py | UTF-8 | 605 | 3.046875 | 3 | [] | no_license | class Solution:
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
if n==0:
return []
if n==1:
return [[1]]
if n==2:
return [[1],[1,1]]
res=[[1],[1,1]]
for i in range(2,n):
line=[1]
for j in range(1,i):
temp=res[i-1][j]+res[i-1][j-1]
line.append(temp)
line.append(1)
res.append(line)
return res
object1=Solution()
n=5
print(Solution.generate(object1,n)) | true |
adfbabd8b18733a9d3abaac1529231e37335f712 | Python | Trooper2123/logica_com_python | /python/lista1/circulo_esfera.py | UTF-8 | 121 | 3.578125 | 4 | [] | no_license | raio = float(input("Valor do raio:"))
print(f"Valor do raio:{3.14*raio**2}")
print(f"Valor da esfera:{4*3.14*raio**2}")
| true |
c612767fe49e3d5ca0368c1d7a27a93ce877d1a4 | Python | PatrickVienne/PythonAssessment | /questions/q5.py | UTF-8 | 1,200 | 3.828125 | 4 | [
"MIT"
] | permissive | #############################
# whats the difference (1)? #
#############################
a = (1, 2, 3, "12")
b = [1, 2, 3, "12"]
c = {1, 2, 3, "12"}
#############################
# whats the difference (2)? #
#############################
d = (a for a in range(10) if a % 2 == 0)
e = [a for a in range(10) if a % 2 == 0]
#############################
# whats the difference (3)? #
#############################
def get_lines_f():
with open(__file__, "r") as f:
for line in f:
yield line
f = get_lines_f()
def get_lines_g():
with open(__file__, "r") as f:
return f.readlines()
g = get_lines_g()
#############################
# whats the difference (4)? #
#############################
h = raw_input()
i = input()
#############################
# whats the difference (5)? #
#############################
j = range(10)
k = xrange(10)
#############################
# whats the difference (6)? #
#############################
my_list = [1, 10, 2, 20, 5, 40]
l = sorted(my_list)
# or
my_list.sort()
k = my_list
#############################
# whats the difference (7)? #
#############################
# whats the difference of an array and a list?
| true |
e97a7b44414244b0f22c7cb1efabeee4c09871c1 | Python | geekan/scrapy-general-spider | /misc/common.py | UTF-8 | 942 | 3.015625 | 3 | [
"Apache-2.0"
] | permissive |
from collections import OrderedDict
from misc.log import *
# Make sure css rules have only one root.
def extract_items_from_list(list_item):
items = []
for k, v in list_item.items():
for d in v:
# print type(d), d
oi = OrderedDict(d).items()
# info(oi)
li = {k1: '|'.join(v1) for k1, v1 in oi}
# line = '\t'.join(li[-1:]) + '\n'
item = li
items.append(item)
return items
# Make sure css rules have only one root.
def process_items_from_list(list_item, func):
items = []
for k, v in list_item.iteritems():
for d in v:
# print type(d), d
# oi = d.items()
info(d)
func(d)
# info(oi)
# li = {k1: '|'.join(v1) for k1, v1 in oi}
# line = '\t'.join(li[-1:]) + '\n'
# item = li
# items.append(item)
return items | true |
46e2eee74a44ce1b41ae28a5876d621946ee48a0 | Python | veverkap/food_challenge | /meatsweatsweb/app/rectangle.py | UTF-8 | 537 | 3.953125 | 4 | [] | no_license | class Rectangle:
def __init__(self, pt1, pt2):
self.set_points(pt1, pt2)
def set_points(self, pt1, pt2):
(x1, y1) = pt1
(x2, y2) = pt2
self.left = min(x1, x2)
self.top = min(y1, y2)
self.right = max(x1, x2)
self.bottom = max(y1, y2)
def overlaps(self, other):
"""Return true if a rectangle overlaps this rectangle."""
return (self.right > other.left and self.left < other.right and
self.top < other.bottom and self.bottom > other.top)
| true |
5e301816fbd9f8356af3fc676d87cb4f51390e0b | Python | calebwhite0322/KultureKiwibot | /LikeBot.py | UTF-8 | 1,538 | 3.0625 | 3 | [] | no_license |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
class KultureBot:
def __init__(self, username, password):
self.username = username
self.password = password
self.bot = webdriver.Chrome(executable_path="C:\\chromedriver.exe")
def login(self):
bot = self.bot
bot.get("https://www.instagram.com/accounts/login")
time.sleep(3)
bot.find_element_by_name('username').send_keys(self.username)
bot.find_element_by_name('password').send_keys(self.password + Keys.RETURN)
time.sleep(3)
def search_hashtag(self, hashtag):
bot = self.bot
bot.get('https://www.instagram.com/explore/tags/' + hashtag)
def like_photos(self, amount):
bot = self.bot
time.sleep(2)
bot.find_element_by_class_name('v1Nh3').click()
i = 1
while i <= amount:
time.sleep(4)
bot.find_element_by_class_name('fr66n').click()
bot.find_element_by_class_name('coreSpriteRightPaginationArrow').click()
i += 1
def task_completed(self):
bot = self.bot
print("Task Completed")
bot.get('https://instagram.com/' + self.username)
insta = KultureBot(input("Enter Username: "), input("Enter Password: "))
print("Logging in...")
insta.login()
insta.search_hashtag(input("Enter Target Hashtag: "))
insta.like_photos(float(input("Number of Likes: ")))
insta.task_completed() | true |
031b3ab4e443a19b25847ef8b05eab8ee014b293 | Python | barjinderpaul/Programming | /python/codewars.py | UTF-8 | 319 | 2.765625 | 3 | [] | no_license | def printer_error(s):
count = 0
for ch in s:
if ch in "nopqrstuvwxyz":
count+=1
#print("error_printers(s) => \""+str(count)+"/"+str(len(s))+"\"")
stringg = str(count)+"/"+str(len(s))
return stringg
print(printer_error("aaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbmmmmmmmmmmmmmmmmmmmxyz")) | true |
a0073769067731e4dd2edebd6dd65ff7ee93a631 | Python | 100ideas/schema | /py/tests/test_code_parsing.py | UTF-8 | 10,849 | 3.03125 | 3 | [
"Apache-2.0"
] | permissive | import typing
from stencila.schema.code_parsing import CodeChunkParseResult, annotation_name_to_schema, CodeChunkParser
from stencila.schema.types import Variable, IntegerSchema, CodeChunk, Function, Parameter, SchemaTypes, StringSchema, \
BooleanSchema, NumberSchema, ArraySchema, TupleSchema
ASSIGNMENT_CODE = """
# this code assigns variables
a = 5
b = 6
a = 7
c: int = 8
c = 9
c: int = 10
def test_func():
d = 4
"""
FUNCTION_CODE = """
# this code defines functions with various types of arguments
def basic():
return 1
def standard_args(a, b, c):
return 2
def variable_args(d, e, *args, **kwargs):
return 3
def default_args(f = 1, g = 'foo'):
return 4
def annotated_types(h: int, j: str = 'bar') -> bool:
return True
def named_constants(t = True, f = False, n = None):
return False
def function_defaults(v = somefunc()):
return 0
def basic(): # don't add it twice
return 2
"""
USES_CODE = """
# this code uses a lot of variables in different ways
a + b
d + e + f
g - h
i - j - k
l / m
n / o / p
q * r
s * t * u
v or w
x and y
z | aa
bb ^ cc
dd & ee
ff.gg
hh[ii]
jj.kk.ll
"""
OPEN_CODE = """
# this code has calls to open(), both in the main level and in defined functions
f = open('read1') # no mode, assumed to be read
with open('read2', 'r') as f: # open with ContextManager and mode
a = f.read()
open('write', 'w') # is a write, don't include
open('readwrite', 'r+') # read and write, include file
open('unknownmode', r) # variable mode, don't know what it is, skip
open(file='kwread') # kwargs testing
open('kwread2', mode='r')
open(file='kwread3', mode='r')
open(file='kwread4', mode='r+')
open('kwwrite', mode='w')
open(file='kwwrite2', mode='w')
open(file=1, mode='w') # should never happen but needed for full code coverage
open(v) # don't know the actual file name since it's a variable, don't include
open(v.y)
open(v['y'])
def open_func():
open('readinfunc') # should traverse into function defs to find opens
"""
IMPORT_CODE = """
from abc import deff
from foo.bar import baz
import rex
"""
COLLECTIONS_CODE = """
# this code assigns collections with variables, so they must be in the 'uses' result
[a, b, c, {d: e, f: g, h: somefunc(i)}]
[j, k, {l: m, n: o[p][q]}, anotherfunc(r)]
(s, t, {u: v}, [w, x, lastfunc(y[z])])
{'foo': 'bar'} # shouldn't be in 'uses'
"""
SLICES_CODE = """
# this code checks for parsing of different ways of slicing array to 'uses'
a[b]
a[c:d]
a[d:e:f]
"""
ALTERS_CODE = """
# this code alters properties/items on existing variables
g: SomeType = SomeType()
g.i = 10 # should not be moved to alters
a.b.c = '123'
a.d = '456'
c['d'][3] = 456
h[5][6] = 9
d[f] = 4
f.g = 5 # f should be moved to alters
"""
AUG_ASSIGNMENT_CODE = """
# augmented assignment is e.g. increment/decrement (a += 1 / b -= 1)
a += 1
b -= func_call(c)
d[f] += 1
g.h -= 10
"""
CONDITIONAL_CODE = """
# test out ifs and while
if a > b:
c = 1
elif d < e < f:
g = 2
else:
h = 3
while i:
j = 4
else:
k = 5
"""
FOR_CODE = """
for a in range(10):
print("{}".format(d))
else:
print(e)
"""
EXCEPT_CODE = """
try:
a = 3
except ValueError as f:
b = 4
except:
c = 5
else:
d = 6
finally:
e = 7
"""
def parse_code(code: str) -> CodeChunkParseResult:
return CodeChunkParser().parse(CodeChunk(code))
def check_result_fields_empty(result: CodeChunkParseResult, non_empty_fields: typing.List[str]) -> None:
for name, value in result._asdict().items():
if name in non_empty_fields:
continue
if isinstance(value, list):
assert len(value) == 0
def check_parameter(p: Parameter, name: str, required: bool, default: typing.Any,
schema: typing.Optional[typing.Type[SchemaTypes]]) -> None:
assert p.name == name
assert p.required == required
assert p.default == default
if schema is not None:
assert isinstance(p.schema, schema)
def test_variable_parsing() -> None:
"""
Test that assignments without annotations are extracted into `assigns` and assignments with are to `declares.`
Also test that:
- function definitions are recorded as declarations (basic test just to have a function body to parse, actual
function parsing tests are in test_function_def_parsing)
- variables that are reassigned are only recorded once
- assignment/declarations in function definitions are not recorded
"""
parse_result = parse_code(ASSIGNMENT_CODE)
assert len(parse_result.declares) == 2
assert type(parse_result.declares[0]) == Variable
assert parse_result.declares[0].name == 'c'
assert type(parse_result.declares[0].schema) == IntegerSchema
assert type(parse_result.declares[1]) == Function # The correctness of parsing the function is tested elsewhere
assert parse_result.assigns == ['a', 'b']
check_result_fields_empty(parse_result, ['declares', 'assigns'])
def test_function_def_parsing():
parse_result = parse_code(FUNCTION_CODE)
basic, standard_args, variable_args, default_args, annotated_types, named_constants, function_defaults = \
parse_result.declares
for fn in parse_result.declares:
assert isinstance(fn, Function)
if fn != annotated_types:
assert fn.returns is None
assert basic.name == 'basic'
assert len(basic.parameters) == 0
assert standard_args.name == 'standard_args'
assert len(standard_args.parameters) == 3
check_parameter(standard_args.parameters[0], 'a', True, None, None)
check_parameter(standard_args.parameters[1], 'b', True, None, None)
check_parameter(standard_args.parameters[2], 'c', True, None, None)
assert variable_args.name == 'variable_args'
assert len(variable_args.parameters) == 4
check_parameter(variable_args.parameters[0], 'd', True, None, None)
check_parameter(variable_args.parameters[1], 'e', True, None, None)
check_parameter(variable_args.parameters[2], 'args', False, None, None)
assert variable_args.parameters[2].repeats is True
assert not variable_args.parameters[2].extends
check_parameter(variable_args.parameters[3], 'kwargs', False, None, None)
assert not variable_args.parameters[3].repeats
assert variable_args.parameters[3].extends is True
assert default_args.name == 'default_args'
assert len(default_args.parameters) == 2
check_parameter(default_args.parameters[0], 'f', False, 1, None)
check_parameter(default_args.parameters[1], 'g', False, 'foo', None)
assert annotated_types.name == 'annotated_types'
assert len(annotated_types.parameters) == 2
assert isinstance(annotated_types.returns, BooleanSchema)
check_parameter(annotated_types.parameters[0], 'h', True, None, IntegerSchema)
check_parameter(annotated_types.parameters[1], 'j', False, 'bar', StringSchema)
assert named_constants.name == 'named_constants'
assert len(named_constants.parameters) == 3
check_parameter(named_constants.parameters[0], 't', False, True, None)
check_parameter(named_constants.parameters[1], 'f', False, False, None)
check_parameter(named_constants.parameters[2], 'n', False, None, None)
assert function_defaults.name == 'function_defaults'
assert len(function_defaults.parameters) == 1
check_parameter(function_defaults.parameters[0], 'v', False, None, None)
check_result_fields_empty(parse_result, ['declares'])
def test_uses_parsing():
parse_result = parse_code(USES_CODE)
check_result_fields_empty(parse_result, ['uses'])
uses = ['a', 'b', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z', 'aa', 'bb', 'cc', 'dd', 'ee', 'ff', 'hh', 'ii', 'jj']
assert sorted(uses) == sorted(parse_result.uses)
def test_parsing_error():
parse_result = parse_code('this is invalid python++ code')
assert parse_result.error.kind == 'SyntaxError'
assert parse_result.error.message == 'invalid syntax (<unknown>, line 1)'
def test_reads_parsing():
c = CodeChunk(OPEN_CODE)
ccp = CodeChunkParser()
parse_result = ccp.parse(c)
filenames = ['read1', 'read2', 'readwrite', 'kwread', 'kwread2', 'kwread3', 'kwread4', 'readinfunc']
assert sorted(filenames) == sorted(parse_result.reads)
def test_imports_parsing():
parse_result = parse_code(IMPORT_CODE)
assert ['abc', 'foo.bar', 'rex'] == sorted(parse_result.imports)
check_result_fields_empty(parse_result, ['imports'])
def test_collections_parsing():
parse_result = parse_code(COLLECTIONS_CODE)
assert [chr(c) for c in range(ord('a'), ord('z') + 1)] == sorted(parse_result.uses)
check_result_fields_empty(parse_result, ['uses'])
def test_slices_parsing():
parse_result = parse_code(SLICES_CODE)
assert [chr(c) for c in range(ord('a'), ord('f') + 1)] == sorted(parse_result.uses)
check_result_fields_empty(parse_result, ['uses'])
def test_alters_parsing():
parse_result = parse_code(ALTERS_CODE)
assert ['a', 'c', 'd', 'f', 'h'] == sorted(parse_result.alters)
assert len(parse_result.declares) == 1
assert parse_result.declares[0].name == 'g'
check_result_fields_empty(parse_result, ['alters', 'declares'])
def test_aug_assignment_parsing():
parse_result = parse_code(AUG_ASSIGNMENT_CODE)
assert ['a', 'b', 'd', 'g'] == sorted(parse_result.alters)
assert ['c', 'f'] == sorted(parse_result.uses)
check_result_fields_empty(parse_result, ['alters', 'uses'])
def test_conditional_code_parsing():
parse_result = parse_code(CONDITIONAL_CODE)
assert ['a', 'b', 'd', 'e', 'f', 'i'] == sorted(parse_result.uses)
assert ['c', 'g', 'h', 'j', 'k'] == sorted(parse_result.assigns)
check_result_fields_empty(parse_result, ['assigns', 'uses'])
def test_for_parsing():
parse_result = parse_code(FOR_CODE)
assert ['a'] == parse_result.assigns
assert ['d', 'e'] == sorted(parse_result.uses)
check_result_fields_empty(parse_result, ['assigns', 'uses'])
def test_except_parsing():
parse_result = parse_code(EXCEPT_CODE)
assert ['a', 'b', 'c', 'd', 'e'] == sorted(parse_result.assigns)
check_result_fields_empty(parse_result, ['assigns', 'uses'])
def test_annotation_parsing():
assert annotation_name_to_schema(None) is None
assert isinstance(annotation_name_to_schema('bool'), BooleanSchema)
assert isinstance(annotation_name_to_schema('str'), StringSchema)
assert isinstance(annotation_name_to_schema('int'), IntegerSchema)
assert isinstance(annotation_name_to_schema('float'), NumberSchema)
assert isinstance(annotation_name_to_schema('list'), ArraySchema)
assert isinstance(annotation_name_to_schema('tuple'), TupleSchema)
| true |
210a97984b84cb6b7304a761dfebd9a23503d6de | Python | owkin/FLamby | /flamby/benchmarks/benchmark_utils.py | UTF-8 | 19,861 | 2.625 | 3 | [
"MIT"
] | permissive | import copy
import random
import time
import numpy as np
import pandas as pd
import torch
from opacus import PrivacyEngine
from torch.utils.data import DataLoader as dl
from tqdm import tqdm
from flamby.utils import evaluate_model_on_tests
def set_seed(seed):
"""Set numpy, python and torch seed.
Python seed is necessary for seeding albumentations.
Parameters
----------
seed : int
The seed to set.
"""
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
def fill_df_with_xp_results(
df,
perf_dict,
hyperparams,
method_name,
columns_names,
results_file,
dump=True,
pooled=False,
):
"""Add results to dataframe for a specific strategy with specific hyperparameters.
Parameters
----------
df : pd.DataFrame
The Dataframe of results
perf_dict: dict
A dictionnary with keys being the different tests and values being the metric.
hyperparams : dict
The dict of hyerparameters.
method_name : str
The name of the training method.
columns_names : list[str]
The columns names in the considered dataframe.
dump: bool
Should it dump the dataframe to disk after having added the results.
Defaults to True.
pooled: bool
If it is the pooled result we should change the name of the test to
distinguish it from the first local test.
Default to False.
"""
perf_lines_dicts = df.to_dict("records")
if pooled:
assert (
len(perf_dict) == 1
), "Your pooled perf dict has multiple keys this is impossible."
perf_dict["Pooled Test"] = perf_dict.pop(list(perf_dict)[0])
for k, v in perf_dict.items():
perf_lines_dicts.append(
prepare_dict(
keys=columns_names,
allow_new=True,
Test=k,
Metric=v,
Method=method_name,
# We add the hyperparameters used
**hyperparams,
)
)
# We update csv and save it when the results are there
df = pd.DataFrame.from_dict(perf_lines_dicts)
if dump:
df.to_csv(results_file, index=False)
return df
def find_xps_in_df(df, hyperparameters, sname, num_updates):
"""This function returns the index in the given dataframe where it found
results for a given set of hyperparameters of the sname federated strategy
with num_updates number of updates secified as a dict.
Parameters
----------
df : pd.DataFrame
The dataframe of experiments
hyperparameters : dict
A dict with keys that are columns of the dataframe and values that are
used to filter the dataframe.
sname: str
The name of the FL strategy to investigate.
Should be in the following list:
["FedAvg", "Scaffold", "FedProx", "Cyclic", "FedAdam", "FedAdagrad",
'FedYogi', FedAvgFineTuning,]
num_udpates: int
The number of batch updates used in the strategy.
"""
# This is very ugly but this is the only way I found to accomodate float
# and objects equality in a robust fashion
# The non-robust version would be simpler but it doesn't handle floats well
# index_of_interest = df.loc[
# (df["Method"] == (sname + str(num_updates)))
# & (
# df[list(hyperparameters)] == pd.Series(hyperparameters)
# ).all(axis=1)
# ].index
assert all(
[e in df.columns for e in list(hyperparameters)]
), "Some hyperparameters provided are not included in the dataframe"
assert sname in [
"FedAvg",
"Scaffold",
"FedProx",
"Cyclic",
"FedAdam",
"FedAdagrad",
"FedYogi",
"FedAvgFineTuning",
], f"Strategy name {sname} not recognized."
found_xps = df[list(hyperparameters)]
# Different types of data need different matching strategy
found_xps_numerical = found_xps.select_dtypes(exclude=[object])
col_numericals = found_xps_numerical.columns
col_objects = [c for c in found_xps.columns if not (c in col_numericals)]
# Special cases for boolean parameters
if "deterministic_cycle" in found_xps_numerical.columns:
found_xps_numerical["deterministic_cycle"] = (
found_xps_numerical["deterministic_cycle"].fillna(0.0).astype(float)
)
if len(col_numericals) > 0:
bool_numerical = np.all(
np.isclose(
found_xps_numerical,
pd.Series(
{
k: float(hyperparameters[k])
for k in list(hyperparameters.keys())
if k in col_numericals
}
),
equal_nan=True,
),
axis=1,
)
else:
bool_numerical = np.ones((len(df.index), 1)).astype("bool")
if len(col_objects):
bool_objects = found_xps[col_objects].astype(str) == pd.Series(
{
k: str(hyperparameters[k])
for k in list(hyperparameters.keys())
if k in col_objects
}
)
else:
bool_objects = np.ones((len(df.index), 1)).astype("bool")
# We filter on the Method we want
bool_method = df["Method"] == (sname + str(num_updates))
index_of_interest_1 = df.loc[pd.DataFrame(bool_numerical).all(axis=1)].index
index_of_interest_2 = df.loc[pd.DataFrame(bool_objects).all(axis=1)].index
index_of_interest_3 = df.loc[pd.DataFrame(bool_method).all(axis=1)].index
index_of_interest = index_of_interest_1.intersection(
index_of_interest_2
).intersection(index_of_interest_3)
return index_of_interest
def init_data_loaders(
dataset,
pooled=False,
batch_size=1,
num_workers=1,
num_clients=None,
batch_size_test=None,
collate_fn=None,
):
"""
Initializes the data loaders for the training and test datasets.
"""
if (not pooled) and num_clients is None:
raise ValueError("num_clients must be specified for the non-pooled data")
batch_size_test = batch_size if batch_size_test is None else batch_size_test
if not pooled:
training_dls = [
dl(
dataset(center=i, train=True, pooled=False),
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
collate_fn=collate_fn,
)
for i in range(num_clients)
]
test_dls = [
dl(
dataset(center=i, train=False, pooled=False),
batch_size=batch_size_test,
shuffle=False,
num_workers=num_workers,
collate_fn=collate_fn,
)
for i in range(num_clients)
]
return training_dls, test_dls
else:
train_pooled = dl(
dataset(train=True, pooled=True),
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
collate_fn=collate_fn,
)
test_pooled = dl(
dataset(train=False, pooled=True),
batch_size=batch_size_test,
shuffle=False,
num_workers=num_workers,
collate_fn=collate_fn,
)
return train_pooled, test_pooled
def prepare_dict(keys, allow_new=False, **kwargs):
"""
Prepares the dictionary with the given keys and fills them with the kwargs.
If allow_new is set to False (default)
Kwargs must be one of the keys. If
kwarg is not given for a key the value of that key will be None
"""
if not allow_new:
# ensure all the kwargs are in the columns
assert sum([not (key in keys) for key in kwargs.keys()]) == 0, (
"Some of the keys given were not found in the existsing columns;"
f"keys: {kwargs.keys()}, columns: {keys}"
)
# create the dictionary from the given keys and fill when appropriate with the kwargs
return {**dict.fromkeys(keys), **kwargs}
def get_logfile_name_from_strategy(dataset_name, sname, num_updates, args):
"""Produce exlicit logfile name from strategy num updates and args.
Parameters
----------
dataset_name : str
The name of the dataset.
sname : str
The name of the strategy.
num_updates : int
The number of batch updates used in the strategy.
args : dict
The dict of hyperparameters of the strategy
"""
basename = dataset_name + "-" + sname + f"-num-updates{num_updates}"
for k, v in args.items():
if k in ["learning_rate", "server_learning_rate"]:
basename += "-" + "".join([e[0] for e in str(k).split("_")]) + str(v)
if k in ["mu", "deterministic_cycle"]:
basename += "-" + str(k) + str(v)
return basename
def evaluate_model_on_local_and_pooled_tests(
m, local_dls, pooled_dl, metric, evaluate_func, return_pred=False
):
"""Evaluate the model on a list of dataloaders and on one dataloader using
the evaluate function given.
Parameters
----------
m : torch.nn.Module
The model to evaluate.
local_dls : list[torch.utils.data.DataLoader]
The list of dataloader used for tests.
pooled_dl : torch.utils.data.DataLoader
The single dataloader used for test.
metric: callable
The metric to use for evaluation.
evaluate_func : callable
The function used to evaluate
return_pred: bool
Whether or not to return pred.
Returns
-------
Tuple(dict, dict)
Two performances dicts.
"""
perf_dict = evaluate_func(m, local_dls, metric, return_pred=return_pred)
pooled_perf_dict = evaluate_func(m, [pooled_dl], metric, return_pred=return_pred)
# Very ugly tuple unpacking in case we return the predictions as well
# in thee future the evaluation function should return a dict but there is
# a lot of refactoring needed
if return_pred:
perf_dict, y_true_dict, y_pred_dict = perf_dict
pooled_perf_dict, y_true_pooled_dict, y_pred_pooled_dict = pooled_perf_dict
else:
y_true_dict, y_pred_dict, y_true_pooled_dict, y_pred_pooled_dict = (
None,
None,
None,
None,
)
print("Per-center performance:")
print(perf_dict)
print("Performance on pooled test set:")
print(pooled_perf_dict)
return (
perf_dict,
pooled_perf_dict,
y_true_dict,
y_pred_dict,
y_true_pooled_dict,
y_pred_pooled_dict,
)
def train_single_centric(
global_init,
train_dl,
use_gpu,
name,
opt_class,
learning_rate,
loss_class,
num_epochs,
dp_target_epsilon=None,
dp_target_delta=None,
dp_max_grad_norm=None,
seed=None,
):
"""Train the global_init model using train_dl and default parameters.
Parameters
----------
global_init : torch.nn.Module
The initialized model to train.
train_dl : torch.utils.data.DataLoader
The dataloader to use for training.
use_gpu : bool
Whether or not to use the GPU.
name : str
The name of the method to display.
opt_class: torch.optim
A callable with signature (list[torch.Tensor], lr) -> torch.optim
learning_rate: float
The learning rate of the optimizer.
loss_class: torch.losses._Loss
A callable return a pytorch loss.
num_epochs: int
The number of epochs on which to train.
dp_target_epsilon: float
The target epsilon for (epsilon, delta)-differential private guarantee.
Defaults to None.
dp_target_delta: float
The target delta for (epsilon, delta)-differential private guarantee.
Defaults to None.
dp_max_grad_norm: float
The maximum L2 norm of per-sample gradients; used to enforce
differential privacy. Defaults to None.
Returns
-------
torch.nn.Module
The trained model.
"""
apply_dp = (
(dp_target_epsilon is not None)
and (dp_max_grad_norm is not None)
and (dp_target_delta is not None)
)
if (not apply_dp) and (dp_target_epsilon is not None):
raise ValueError("Missing argument for DP")
if (not apply_dp) and (dp_max_grad_norm is not None):
raise ValueError("Missing argument for DP")
if (not apply_dp) and (dp_target_delta is not None):
raise ValueError("Missing argument for DP")
device = "cpu"
model = copy.deepcopy(global_init)
if use_gpu:
model.cuda()
device = "cuda"
bloss = loss_class()
opt = opt_class(model.parameters(), lr=learning_rate)
if apply_dp:
seed = seed if seed is not None else int(time.time())
privacy_engine = PrivacyEngine()
# put model in train mode if not already the case
model.train()
model, opt, train_dl = privacy_engine.make_private_with_epsilon(
module=model,
optimizer=opt,
data_loader=train_dl,
epochs=num_epochs,
target_epsilon=dp_target_epsilon,
target_delta=dp_target_delta,
max_grad_norm=dp_max_grad_norm,
noise_generator=torch.Generator(device).manual_seed(seed),
)
grad_norm_history = []
for _ in tqdm(range(num_epochs)):
for X, y in train_dl:
if use_gpu:
# use GPU if requested and available
X = X.cuda()
y = y.cuda()
opt.zero_grad()
y_pred = model(X)
loss = bloss(y_pred, y)
loss.backward()
opt.step()
grad_norm = 0
for param in model.parameters():
if param.grad is not None:
grad_norm += torch.linalg.norm(param.grad)
grad_norm_history.append(grad_norm)
return model
def init_xp_plan(
num_clients,
nlocal,
single_centric_baseline=None,
strategy=None,
compute_ensemble_perf=False,
):
"""_summary_
Parameters
----------
num_clients : int
The number of available clients.
nlocal : int
The index of the chosen client.
single_centric_baseline : str
The single centric baseline to comute.
strategy: str
The strategy to compute results for.
Returns
-------
dict
A dict with the plannification of xps to do.
Raises
------
ValueError
_description_
"""
do_strategy = True
do_baselines = {"Pooled": True}
for i in range(num_clients):
do_baselines[f"Local {i}"] = True
# Single client baseline computation
if single_centric_baseline is not None:
if compute_ensemble_perf:
print(
"WARNING: by providing the argument single_centric_baseline"
" you will not be able to compute ensemble performance."
)
compute_ensemble_perf = False
do_baselines = {"Pooled": False}
for i in range(num_clients):
do_baselines[f"Local {i}"] = False
if single_centric_baseline == "Pooled":
do_baselines[single_centric_baseline] = True
elif single_centric_baseline == "Local":
assert nlocal in range(num_clients), "The client you chose does not exist"
do_baselines[single_centric_baseline + " " + str(nlocal)] = True
# If we do a single-centric baseline we don't do the strategies
do_strategy = False
# if we give a strategy we compute only the strategy and not the baselines
if strategy is not None:
if compute_ensemble_perf:
print(
"WARNING: by providing a strategy argument you will"
" not be able to compute ensemble performance."
)
compute_ensemble_perf = False
for k, _ in do_baselines.items():
do_baselines[k] = False
do_all_local = all([do_baselines[f"Local {i}"] for i in range(num_clients)])
if compute_ensemble_perf and not (do_all_local):
raise ValueError(
"Cannot compute ensemble performance if training on only one local"
)
return do_baselines, do_strategy, compute_ensemble_perf
def ensemble_perf_from_predictions(
y_true_dicts, y_pred_dicts, num_clients, metric, num_clients_test=None
):
"""_summary_
Parameters
----------
y_true_dicts : dict
The ground truth dicts for all clients
y_pred_dicts :dict
The prediction array for all models and clients.
num_clients : int
The number of clients
metric : callable
(torch.Tensor, torch.Tensor) -> [0, 1.]
num_clients_test: int
When testing on pooled.
Returns
-------
dict
A dict with the predictions of all ensembles
"""
print("Computing ensemble performance")
ensemble_perf = {}
if num_clients_test is None:
num_clients_test = num_clients
for testset_idx in range(num_clients_test):
# Small safety net
for model_idx in range(1, num_clients):
assert (
y_true_dicts[f"Local {0}"][f"client_test_{testset_idx}"]
== y_true_dicts[f"Local {model_idx}"][f"client_test_{testset_idx}"]
).all(), "Models in the ensemble have different ground truths"
# Since they are all the same we use the first one
# for this specific tests as the ground truth
ensemble_true = y_true_dicts["Local 0"][f"client_test_{testset_idx}"]
# Accumulating predictions
ensemble_pred = y_pred_dicts["Local 0"][f"client_test_{testset_idx}"]
for model_idx in range(1, num_clients):
ensemble_pred += y_pred_dicts[f"Local {model_idx}"][
f"client_test_{testset_idx}"
]
ensemble_pred /= float(num_clients)
ensemble_perf[f"client_test_{testset_idx}"] = metric(
ensemble_true, ensemble_pred
)
return ensemble_perf
def set_dataset_specific_config(dataset_name, compute_ensemble_perf=False, use_gpu=True):
"""_summary_
Parameters
----------
dataset_name : _type_
_description_
compute_ensemble_perf: bool
Whether or not to compute ensemble performances. Cannot be used with
KITS or LIDC. Defaults to None.
Returns
-------
_type_
_description_
"""
# Instantiate all train and test dataloaders required including pooled ones
if dataset_name == "fed_lidc_idri":
batch_size_test = 1
from flamby.datasets.fed_lidc_idri import evaluate_dice_on_tests_by_chunks
def evaluate_func(m, test_dls, metric, use_gpu=use_gpu, return_pred=False):
dice_dict = evaluate_dice_on_tests_by_chunks(m, test_dls, use_gpu)
if return_pred:
return dice_dict, None, None
return dice_dict
compute_ensemble_perf = False
elif dataset_name == "fed_kits19":
from flamby.datasets.fed_kits19 import evaluate_dice_on_tests
batch_size_test = 2
def evaluate_func(m, test_dls, metric, use_gpu=use_gpu, return_pred=False):
dice_dict = evaluate_dice_on_tests(m, test_dls, metric, use_gpu)
if return_pred:
return dice_dict, None, None
return dice_dict
compute_ensemble_perf = False
elif dataset_name == "fed_ixi":
batch_size_test = 1
evaluate_func = evaluate_model_on_tests
compute_ensemble_perf = False
else:
batch_size_test = None
evaluate_func = evaluate_model_on_tests
return evaluate_func, batch_size_test, compute_ensemble_perf
| true |
1ce97a3f2786eeac01f27e06a2388a02e13761f0 | Python | heliosPy/hrmanagement | /hrm/manager/utils.py | UTF-8 | 1,056 | 2.515625 | 3 | [] | no_license | from datetime import date
from django.shortcuts import redirect
from .models import RecuirtmentModel
from applicant.models import ApplicationFormModel
today = date.today()
def check_regestration_ends(x):
"""To check the id got from url
weather the object exist and if
exits its regestration should end"""
try:
if RecuirtmentModel.objects.filter(lastdate_apply__lt=today).get(op_code=x):
return True
else:
return False
except RecuirtmentModel.DoesNotExist:
return False
def check_recu_applic(x, y):
"""to check the applicant and recuirtmetn model exist or not
"""
try:
RecuirtmentModel.objects.get(op_code=y)
ApplicationFormModel.objects.get(id=x)
except:
return False
return True
def check_manager(user):
try:
if user.employee_profile.is_manager:
# "this is user is already loged in then they are redirected to the home page
return redirect('manager:home')
except:
return redirect('home') | true |
8b650cdc7930b867a21cf93b676807a53e166804 | Python | zamfiralina/AuctioX | /Backend/Functions/login.py | UTF-8 | 1,335 | 2.984375 | 3 | [] | no_license | import time
from Backend.DBController.DBConnection import DBConnection
from Backend.Functions.unicodeHash import unicodeHash
def login(username: str, password: str, db_conn: DBConnection, activeUsers: dict) -> bytes :
"""
Checks the user id and SHA256 of the pw against the DB.
Adds the tuple (user id, SHA256(user id + pw + str(time.asctime()))) to the active users dict.
Returns the hash computed above.
"""
result = db_conn.execute(f"SELECT COUNT(*) FROM SITE_USERS WHERE USERNAME LIKE '{username}' AND PASSWORD LIKE '{unicodeHash(password)}'") #AND PASSWORD LIKE '{str(passwordHash)}'" )
print("Result: ", result)
print("PasswordHash: ", unicodeHash(password))
print("Select... ", db_conn.execute("SELECT PASSWORD FROM SITE_USERS WHERE USERNAME LIKE 'username'"))
if result == [(0,)]:
return "LOGINFAIL".encode()
else:
userHash = unicodeHash(username + password + str(time.asctime()))
activeUsers[userHash] = username
return ("LOGINSUCCESS?" + userHash).encode()
def logout(userHash: str, activeUsers: dict) -> bytes:
try:
del activeUsers[userHash]
return b'LOGOUTSUCCESS'
except:
return b'LOGOUTFAIL'
| true |
e5f995799f165483299864199ea79e83d54f8d55 | Python | ksjk2165/pythoSeleniummail | /energy.py | UTF-8 | 2,229 | 2.515625 | 3 | [] | no_license | #!/usr/bin/python3
import sys
sys.path.insert(0,'/usr/lib/chromium-browser/chromedriver')
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
expectedSongName='Save Your Tears'
def mailnotification():
print("emailing................")
email = "" # the email where you sent the email
password = ""
send_to_email = "" # for whom
subject = "The Song has just Started!!!! Start the RAdio"
message = "The song mood has just started on the Energie.at call them right now at 06766060701 "
msg = MIMEMultipart()
msg["From"] = email
msg["To"] = send_to_email
msg["Subject"] = subject
msg.attach(MIMEText(message, 'plain'))
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
server.login(email, password)
text = msg.as_string()
server.sendmail(email, send_to_email, text)
server.quit()
print('Mail Sent')
if __name__ == '__main__':
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
browser = webdriver.Chrome('chromedriver',chrome_options=chrome_options)
browser.get('https://energy.at/wien')
assert "ENERGY" in browser.title
acceptCookie = WebDriverWait(browser, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@id="cookiePopup"]/div/div[1]/div[1]/label')))
acceptCookie.click()
try:
while True:
songname = WebDriverWait(browser, 5).until(EC.presence_of_element_located((By.XPATH, '//*[@class="trackTitle"]')))
if expectedSongName.lower() in songname.text.lower():
print("Song Found")
mailnotification()
break
browser.refresh()
except TimeoutException:
print("No element found")
print("finished")
browser.close()
| true |
1099f4c0626a57ee3844833f0ea03c2b0f2afac9 | Python | smohsinali/smac2JSON | /pjson.py | UTF-8 | 6,569 | 2.609375 | 3 | [] | no_license | from itertools import product
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import CategoricalHyperparameter,NumericalHyperparameter, Constant, \
IntegerHyperparameter, NormalIntegerHyperparameter, NormalFloatHyperparameter
from ConfigSpace.conditions import EqualsCondition, NotEqualsCondition, \
InCondition, AndConjunction, OrConjunction, ConditionComponent
from ConfigSpace.forbidden import ForbiddenEqualsClause, \
ForbiddenAndConjunction, ForbiddenInClause, AbstractForbiddenComponent, MultipleValueForbiddenClause
from math import log
import pyparsing
import six
import json
pp_param_name = pyparsing.Word(pyparsing.alphanums + "_" + "-" + "@" + "." + ":" + ";" + "\\" + "/" + "?" + "!" +
"$" + "%" + "&" + "*" + "+" + "<" + ">")
def build_categorical(param):
# cat_template = "%s '--%s ' c (%s)"
# return [param.name, cat_template % (param.name, param.name, ",".join([str(value) for value in param.choices]))]
return [param.name, {"type": "categrical", "values": [value for value in param.choices], "default": param.default}]
def build_constant(param):
# constant_template = "%s '--%s ' c (%s)"
# return [param.name, constant_template % (param.name, param.name, param.value)]
return [param.name, {"type": "constant", "value": param.value}]
def build_continuous(param):
if type(param) in (NormalIntegerHyperparameter,
NormalFloatHyperparameter):
param = param.to_uniform()
if param.__class__.__name__ == 'UniformIntegerHyperparameter':
intStatus = "true"
else:
intStatus = "false"
if param.log is True:
return [param.name, {"type": "continuous", "range": [param.lower, param.upper],
"log-scale": "true", "default": param.default, "integer": intStatus}]
else:
return [param.name, {"type": "continuous", "range": [param.lower, param.upper],
"log-scale": "false", "default": param.default, "integer": intStatus}]
def build_condition(condition):
if not isinstance(condition, ConditionComponent):
raise TypeError("build_condition must be called with an instance of "
"'%s', got '%s'" %
(ConditionComponent, type(condition)))
# Findout type of parent
pType = "integer"
if condition.parent.__class__.__name__ == 'CategoricalHyperparameter':
pType = 'categorical'
if condition.parent.__class__.__name__ == 'UniformFloatHyperparameter':
pType = 'continuous'
print(type(condition).__name__)
# Now handle the conditions SMAC can handle
condition_template = " | %s %%in%% %s(%s) "
if isinstance(condition, AndConjunction):
raise NotImplementedError("This is not yet implemented!")
else:
if type(condition).__name__ == "EqualsCondition":
return [condition.child.name, condition.parent.name, pType, condition.value]
else:
return [condition.child.name, condition.parent.name, pType, [value for value in condition.values]]
def build_forbidden(clause):
if not isinstance(clause, AbstractForbiddenComponent):
raise TypeError("build_forbidden must be called with an instance of "
"'%s', got '%s'" %
(AbstractForbiddenComponent, type(clause)))
if not isinstance(clause, (ForbiddenEqualsClause, ForbiddenAndConjunction)):
raise NotImplementedError("SMAC cannot handle '%s' of type %s" %
str(clause), (type(clause)))
retval = six.StringIO()
retval.write("{")
# conditions
dlcs = clause.get_descendant_literal_clauses()
for dlc in dlcs:
if retval.tell() > 1:
retval.write(", ")
retval.write("%s=%s" % (dlc.hyperparameter.name, dlc.value))
retval.write("}")
retval.seek(0)
return retval.getvalue()
def write(configuration_space):
if not isinstance(configuration_space, ConfigurationSpace):
raise TypeError("pcs_parser.write expects an instance of %s, "
"you provided '%s'" % (ConfigurationSpace,
type(configuration_space)))
param_lines_dict = dict()
condition_lines_dict = dict()
forbidden_lines = []
for hyperparameter in configuration_space.get_hyperparameters():
# Check if the hyperparameter names are valid IRACE names!
try:
pp_param_name.parseString(hyperparameter.name)
except pyparsing.ParseException:
raise ValueError(
"Illegal hyperparameter name for IRACE: %s" % hyperparameter.name)
if isinstance(hyperparameter, NumericalHyperparameter):
# print "building countinuous param"
param_vars = build_continuous(hyperparameter)
param_lines_dict.update({param_vars[0]: param_vars[1]})
elif isinstance(hyperparameter, CategoricalHyperparameter):
# print "building categorical param"
param_vars = build_categorical(hyperparameter)
param_lines_dict.update({param_vars[0]: param_vars[1]})
elif isinstance(hyperparameter, Constant):
# print "building constant param"
param_vars = build_constant(hyperparameter)
param_lines_dict.update({param_vars[0]: param_vars[1]})
else:
raise TypeError("Unknown type: %s (%s)" % (
type(hyperparameter), hyperparameter))
for condition in configuration_space.get_conditions():
condition_vars = build_condition(condition) # [child, parent, ptype, vals]
child = condition_vars[0]
parent = condition_vars[1]
pType = condition_vars[2]
vals = condition_vars[3]
if type(vals) == str:
tmp = list()
tmp.append(vals)
vals = tmp
if "dependsOn" in param_lines_dict[child]:
param_lines_dict[child]["dependsOn"].append[{parent: {"type": pType, "values": vals}}]
else:
param_lines_dict[child]["dependsOn"] = [{parent: {"type": pType, "values": vals}}]
if "affects" in param_lines_dict[parent]:
param_lines_dict[parent]["affects"].append(child)
else:
param_lines_dict[parent]["affects"] = [child]
with open('data.js', 'w') as outfile:
dump = json.dumps(param_lines_dict, sort_keys=True, indent=4)
outfile.write("var data_js = " + dump)
| true |
1003f6ece2dcac0eb4bed09c253bc75889c5da6f | Python | LukeBluett/PasswordManager | /src/passwordmanager/main.py | UTF-8 | 1,781 | 3.5625 | 4 | [] | no_license | #!/usr/bin/env python3.4
from Account_Information import *
from Create_Password import *
from Database_Handler import *
def main():
title('* Password Manager *')
choice = option_selection()
password = get_password(choice)
account = raw_input('Enter in Account: ')
description = raw_input('Enter in Description: ')
info = Account_Information(password, account, description)
database = Database_Handler('database.sqlite', 'Password_Database')
database.open_connection()
database.create_table()
database.insert_information(info.get_password(), info.get_account(), info.get_description)
database.close_connection()
print
print(info.to_string())
def title(title):
print
print('*' * len(title))
print(title)
print('*' * len(title))
print
def option_selection():
choice = input('Select an option:\n\n' +
'\t(1)Enter it yourself\n' +
'\t(2)Generate by random charactors\n' +
'\t(3)Generate by random words\n\n' +
'Option(1, 2, 3 or quit())? ')
return choice
def get_password(choice):
create_password = Create_Password(True, False, False, False)
password = ''
print
if choice == 1:
password = raw_input('Enter in password: ')
elif choice == 2:
length = int(input('Enter in length: '))
password = create_password.random_charactors(length)
elif choice == 3:
length = int(input('Enter in length: '))
password = create_password.random_charactors(length)
else:
print('\nError: Invalid option \nPlease try again\n')
password = retry()
return password
def retry():
choice = option_selection()
return get_password(choice)
if __name__=='__main__':
main()
| true |
53d3550b7116f08a52bbea60415e4306221dc17f | Python | hathas07/FUNS | /projet-CM-Etudiant.py | UTF-8 | 1,504 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 4 15:47:08 2019
@author: Hassen Drira
"""
from Shadock import Exemple
from Shadock import Diagonalisation
from Shadock import stationnaire
from Shadock import simulation
from Shadock import stochastique
from Shadock import puits
from Shadock import Puissance
from Shadock import Convergence
import numpy as np
#P = np.array([ [5/6 , 1/12, 1/12], [ 1/4, 1/2, 1/4] , [ 1/4, 0, 3/4] ])
#pi0 = [1 , 0, 0]
num = 1 # num entre 1 et 4
P,pi0 = Exemple(num)
n = 10000
print("\nRegime stationaire :\n",stationnaire(P))
D, V = Diagonalisation(P)
print("\nValeurs Propres :\n",D)
print("\nConvergence :",Convergence (P))
print("\nMatrice à la puissance",n,":\n",Puissance(P,n))
Ouiounon = np.allclose(P,Puissance(P,1),rtol=0.001)
print("\nComparaison entre P et P^1:",Ouiounon)
# Initialement en bonne santé
pi0 = [1 , 0, 0]
print("\nP :\n",P)
print("pi0",pi0)
[t,pi] = simulation(P,pi0,1,20)
print('pi[5]',pi[4])
print('pi[10]',pi[9])
print('pi[20]',pi[19])
print('pi[50]',pi[49])
print('pi[100]',pi[99])
# Initialisation differente
pi0 = [0.4 , 0.2, 0.4]
[t,pi] = simulation(P,pi0,1,100)
print('pi[5]',pi[4])
print('pi[10]',pi[9])
print('pi[20]',pi[19])
print('pi[50]',pi[49])
print('pi[100]',pi[99])
# Initialisation differente
pi0 = [0.1 , 0.4, 0.5]
[t,pi] = simulation(P,pi0,1,100)
print('pi[5]',pi[4])
print('pi[10]',pi[9])
print('pi[20]',pi[19])
print('pi[50]',pi[49])
print('pi[100]',pi[99])
#limite(pi)= [0.6 0.1 0.3]
| true |
23579cc0635b01756dbcf5219935a7b5ea56e2ba | Python | konfer/PythonTrain | /src/train/test/ChangeAbleVariable.py | UTF-8 | 117 | 3.015625 | 3 | [] | no_license | #coding:utf-8
def foo(*a):
for i in a:
print i
foo(3,4,5)
b=("sss",78,"de",89)
foo(b)
foo(*b) | true |
0b234c30f9a2076f58c7e7f619a04e71da80e674 | Python | artcheng/eular | /39.py | UTF-8 | 342 | 2.96875 | 3 | [] | no_license | import math
from utility import *
ct = {}
for c in range(2, 1000):
for a in range (1, c):
b_sqr = (c+a)*(c-a)
if isSquare(b_sqr):
b = math.sqrt(b_sqr)
if b>=c:
continue
print a, b, c
cc = a+b+c
if cc < 1000:
ct[cc] = ct.get(cc, 0) +1
m = 0
cc = 0
for c in ct:
if ct[c] > m:
m = ct[c]
cc = c
print cc, m
| true |
f77f63652d5ccd8e0882d1be1f9bf5eaeb9423f5 | Python | zutmkr/Studia | /praca_inz/pole.py | UTF-8 | 267 | 2.75 | 3 | [] | no_license | # -*- coding: utf-8 -*-
class Pole:
def __init__(self,x,y,otwarty):
self.x = x #wiersz
self.y = y #kolumna
self.g = 0
self.h = 0
self.suma = 0
self.otwarty = otwarty
self.aktualny = False
| true |
c8aec8e03facae6a90861672cba01c261d1423d7 | Python | hhk86/Barra | /basic function/makeStkEX.py | UTF-8 | 2,991 | 2.828125 | 3 | [] | no_license | import cx_Oracle
import numpy as np
import pandas as pd
from makeTradeCalendar import getTradeCalendar
from makeDailyUniverse import makeDailyUniverse
class OracleSql(object):
'''
Oracle数据库数据访问
'''
def __init__(self):
'''
初始化数据库连接
'''
self.host, self.oracle_port = '18.210.64.72', '1521'
self.db, self.current_schema = 'tdb', 'wind'
self.user, self.pwd = 'reader', 'reader'
def __enter__(self):
self.conn = self.__connect_to_oracle()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.conn.close()
def __connect_to_oracle(self):
dsn = self.host + ':' + self.oracle_port + '/' + self.db
try:
connection = cx_Oracle.connect(self.user, self.pwd, dsn, encoding="UTF-8", nencoding="UTF-8")
connection.current_schema = self.current_schema
print('连接oracle数据库')
except Exception:
print('不能连接oracle数据库')
connection = None
return connection
def query(self, sql):
'''
查询并返回数据
'''
return pd.read_sql(sql, self.conn)
def execute(self, sql):
'''
对数据库执行插入、修改等数据上行操作
'''
self.conn.cursor().execute(sql)
self.conn.commit()
def makeStkEX(start_date: str, end_date: str, daily_universe=None) -> pd.DataFrame:
'''
查询某一日期区间内(包括查询初始日和结束日)每天的股票的除息除权情况。
除息除权类别有6种:分红、股改、增发、配股、缩股、Null。
:param start_date: str, 初始日期,"YYYYMMDD"
:param end_date: str, 结束日期,"YYYYMMDD"
:return: pd.DataFrame, columns = [tradeday - str, ticker - str, ex_right_dividend - bool, ex_type - str]
'''
sql = \
'''
SELECT
EX_DATE,
S_INFO_WINDCODE,
EX_TYPE
FROM
AShareEXRightDividendRecord
WHERE
EX_DATE >= {0}
AND EX_DATE <= {1}
'''.format(start_date, end_date)
with OracleSql() as oracle:
EX = oracle.query(sql)
EX.columns = ["tradeday", "ticker", "ex_type"]
EX["ex_right_dividend"] = True
EX = EX[["tradeday", "ticker", "ex_right_dividend", "ex_type"]]
if daily_universe is None:
tradedays = getTradeCalendar(start_date, end_date)
daily_universe = pd.DataFrame([])
for tradeday in tradedays:
daily_universe = daily_universe.append(makeDailyUniverse(tradeday))
daily_universe = pd.merge(daily_universe, EX, on=["tradeday", "ticker"], how="left")
daily_universe["ex_right_dividend"].fillna(False, inplace=True)
daily_universe.sort_values(by=["tradeday", "ticker"], inplace=True)
daily_universe.index = list(range(daily_universe.shape[0]))
return daily_universe
| true |
b74a50fa64c76bbec1a3be7f5939f2714b3a14f3 | Python | EricWangyz/Exercises | /Exam4Job/shopee0215/ttttt.py | UTF-8 | 380 | 2.890625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/2/15 14:40
# @Author : Eric Wang
# @File : ttttt.py
import sys
# data = [1, 3, 5, 23, 67, 135, 456]
for line in sys.stdin:
size = len(line)
print(type(line))
line = line[1:size-2]
print(line)
data = list(map(int,line.split(", ")))
#
print(data)
| true |
a037618f735a09aba7f2a0e31b4ca7524bb97e27 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_54/189.py | UTF-8 | 454 | 3.03125 | 3 | [] | no_license | #!/usr/bin/python
## Interpreter: Python 2.6.5
from fractions import gcd
T = int(raw_input())
for c in range(1, T + 1) :
args = raw_input().split()
N = int(args[0])
t = [ int(args[i + 1]) for i in range(N) ]
dt = [ abs(t[i + 1] - t[i]) for i in range(N - 1) ]
T = reduce(gcd, dt)
maxt = max(t)
if maxt % T == 0 :
ans = 0
else :
ans = (maxt / T + 1) * T - maxt
print "Case #{0}: {1}".format(c, ans)
| true |
54bcb74f77c6bbb0fd96c1653679a4659f08e5b6 | Python | didwns7347/algotest | /알고리즘문제/asdfasdfzcvzxcv.py | UTF-8 | 794 | 3.15625 | 3 | [] | no_license | import sys
from collections import deque
n,m,v=map(int,input().split())
checkb=[0 for x in range(n+1)]
checkd=[0 for x in range(n+1)]
g=[[] for _ in range(n+1)]
for x in range(m):
a,b=map(int,sys.stdin.readline().split())
g[a].append(b)
g[b].append(a)
for x in g:
x.sort()
out1=[v]
out2=[v]
checkd[v]=1
checkb[v]=1
def dfs(v):
checkd[v]=1
for x in g[v]:
if checkd[x]==0:
out1.append(x)
checkd[x]=1
dfs(x)
def bfs(v):
q=deque()
q.append(v)
t=1001
while q:
t=q.popleft()
for x in g[t]:
if checkb[x]==0:
out2.append(x)
q.append(x)
checkb[x]=1
dfs(v)
bfs(v)
for x in out1:
print(x,end=' ')
print()
for y in out2:
print(y,end=' ')
| true |
79bfb0503311e65264935909ca9eab4796635c39 | Python | rramr/fa-python | /4. OOP/Third tasks/Task 1.py | UTF-8 | 1,833 | 3.9375 | 4 | [] | no_license | class People:
def __init__(self, name, age) :
self.name = name
self.age = age
def __str__(self) :
return f'Имя: {self.name}, Возраст: {self.age}'
def info(self):
print (self.__class__.__name__ + ': ' + str(self))
class Worker(People):
def __init__(self, name, age, post, salary):
People.__init__(self, name, age)
self.post = post
self.salary = salary
def __str__(self):
return f'{super().__str__()}, Должность: {self.post}, Зарплата: {self.salary}'
class Teacher(Worker):
_disciplines = ['Математика', 'Физика']
def __init__(self, name, age, post, salary):
self.salary = salary
self.post = post
super().__init__(name, age, post, salary)
def __str__(self):
return f'{super().__str__()}'
def add_dis(self, dis):
self._disciplines.append(dis)
print(self._disciplines)
def delete_dis(self, dis):
self._disciplines.remove(dis)
print(self._disciplines)
p1 = People('Chung', 21)
p2 = People('Adkins', 32)
w1 = Worker('Mcfarland', 48, 'Garden worker', 12321)
w2 = Worker('Gilbert', 28, 'Truck loader', 12321)
t1 = Teacher('Fowler', 36, 'Teacher', 45800)
t2 = Teacher('Mcfarland', 53, 'Professor', 89990)
members = [p1, p2, w1, w2, t1, t2]
# Вывод инормации о каждом человеке
print("\nВывод инормации о каждом человеке\n")
for member in members:
member.info()
# Вывод тех, кто моложе 30 лет
print("\nВывод фамилий тех, кто моложе 30 лет\n")
for member in members:
if(int(member.age) < 30):
print(member.name)
print("\n")
t1.add_dis('Информатика')
t1.delete_dis('Физика') | true |
e1a5e5e06495eed0956688613d6d1e2714984dba | Python | rschroer/allhomeworks | /HW03/PyParagraph/main.py | UTF-8 | 803 | 3.96875 | 4 | [] | no_license | import os
import re
#user enters the file name
input_file=os.path.join("raw_data", input("Please type the filename in the raw data folder: "))
#initial variables
paragraph_text=""
words=[]
#read file
with open(input_file,"r") as paragraph:
paragraph_text=paragraph.read().replace('\n', ' ')
#split into sentences, words and letters
sentences=re.split("(?<=[.!?]) +", paragraph_text)
words=re.split(r"\s+", paragraph_text)
letters=len(paragraph_text)
#this counts the words and prints them out
print(f"Paragraph Analysis")
print(f"-----------------")
print(f"Approximate Word Count: {len(words)}")
print(f"Approximate Sentence Count: {len(sentences)}")
print(f"Average Letter Count: {format(letters/len(words),'.2f')}")
print(f"Average Sentence Length: {format(len(words)/len(sentences),'.2f')}") | true |
d56d392f2c834ddc00de8830a5520515c0c6f17d | Python | MiaoPaSiPython/LearnPython | /learn-python-code/books/简明Python教程/ds_reference.py | UTF-8 | 843 | 3.75 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# @Time : 2020/7/3 15:11
# @Author : yuhui.Mr
# @Email : 1299824045@qq.com
# @File : ds_reference.py
# @Software: PyCharm
print('Simple Assignment')
shoplist = ['apple', 'mango', 'carrot', 'banana']
# mylist 只是指向同一对象的另一种名称
mylist = shoplist
# 我购买了第一项项目,所以我将其从列表中删除
del shoplist[0]
print('shoplist is', shoplist)
print('mylist is', mylist)
# 注意到 shoplist 和 mylist 二者都
# 打印出了其中都没有 apple 的同样的列表,以此我们确认
# 它们指向的是同一个对象
print('Copy by making a full slice')
# 通过生成一份完整的切片制作一份列表的副本
mylist = shoplist[:]
# 删除第一个项目
del mylist[0]
print('shoplist is', shoplist)
print('mylist is', mylist)
# 注意到现在两份列表已出现不同 | true |
9355993b20c99745cd8f849d6b382e619ebe0067 | Python | VanLiuZhi/tf_gpu | /to.py | UTF-8 | 1,384 | 3 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
with tf.device('/gpu:0'):
x = tf.placeholder(tf.float32, [None, 1])
W = tf.Variable(tf.zeros([1, 1]))
b = tf.Variable(tf.zeros([1]))
y = tf.matmul(x, W) + b
y_ = tf.placeholder(tf.float32, [None, 1])
cost = tf.reduce_sum(tf.pow((y_ - y), 2))
train_step = tf.train.GradientDescentOptimizer(0.00001).minimize(cost)
init = tf.initialize_all_variables()
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
sess.run(init)
for i in range(100):
# xs = np.array([[i]])
xs = np.array([[20]])
# ys = np.array([[3 * i]])
ys = np.array([[40]])
print(xs)
feed = {x: xs, y_: ys}
# 训练会通过上次的结果进行调整,可以使用同样的数据进行多次训练,得到符合预期的结果
# 例如用同样的数据进行400次训练,得到了预期的结果,100次训练与预期相差甚多。如果是不同的数据,100次就可以得到预期结果(改变注释使用不同的数据)
sess.run(train_step, feed_dict=feed)
print("After%diteration:" % i)
print("W:%f" % sess.run(W))
print("b:%f" % sess.run(b))
# 这次测试中,梯度下降采用的是每次送入一个值
# 可以有多种梯度下降:1.stochastic gradient descent(每次一个值,随机梯度下降)2.mini-batch(送入一堆数据) 3.batch(送入所有数据)
| true |
fcb7b52a6d0991f6fc14ffeda67d70db71bb04fd | Python | JhoanRodriguez/holbertonschool-higher_level_programming | /0x0B-python-input_output/9-add_item.py | UTF-8 | 443 | 2.75 | 3 | [] | no_license | #!/usr/bin/python3
"""
This file contains a function that adds
all arguments to a python list and saves
to a file
"""
import sys
save_to_json_file = __import__("7-save_to_json_file").save_to_json_file
load_from_json_file = __import__("8-load_from_json_file").load_from_json_file
filename = "add_item.json"
try:
new = load_from_json_file(filename)
except Exception:
new = []
new = new + sys.argv[1:]
save_to_json_file(new, filename)
| true |
bd604c13ccb7a3a4c749422c0204fa71d3d40cd2 | Python | matt-rowlinson/NCAS_CVAO | /code/ozone_trends_all.py | UTF-8 | 2,820 | 2.671875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 16 10:31:45 2019
Script to examine different deseasonalisation techniques.
@author: ee11mr
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
plt.style.use('seaborn-darkgrid')
plt.rcParams['figure.figsize'] = (7, 7)
filepath = '/users/mjr583/scratch/NCAS_CVAO/CVAO_datasets/'
savepath = '/users/mjr583/scratch/NCAS_CVAO/plots/'
filen = filepath+'20191007_CV_Merge.csv'
df = pd.read_csv(filen, index_col=0,dtype={'Airmass':str})
df.index = pd.to_datetime(df.index,format='%d/%m/%Y %H:%M')
cols = list(df)
for col in cols:
try:
df[col] = df[col].loc[~(df[col] <= 0.)]
except:
pass
## in test case just use Ozone data for now
start_year='2007'
years = np.arange(2007, 2020)
data = df['O3'][start_year:]
data['2009-07-01' : '2009-09-30'] = np.nan
nyears = len((data.resample('Y').mean()).index.year)
## method three - subtracting monthly average for each observation
mean = data.mean()
std = data.std()
monmean = data.groupby(data.index.month).mean()
deseas_factor = monmean / std
ds3=np.zeros(len(data))
for n,m in enumerate(data.index.month):
ds3[n] = data[n] + (mean - monmean[m])
ds3 = pd.DataFrame(ds3[:])
ds3.index = pd.to_datetime(df[start_year:].index,format='%d/%m/%Y')
Mean = ds3.resample('M').mean()[0]
Max = ds3.resample('M').max()[0]
Min = ds3.resample('M').min()[0]
per75 = ds3.resample('M').quantile(.75)[0]
per25 = ds3.resample('M').quantile(.25)[0]
labels = ['Observations','Mean','Max','Min','75th percentile','25th percentile']
plt.rcParams['figure.figsize'] = (8, 8)
fig = plt.figure()
data_ = data.resample('M').median()
ds = [data_, Mean,Max, Min, per75, per25]
for i in range(len(ds)):
ax = fig.add_subplot(len(ds),1,i+1)
x = data_.index[-len(ds[i]):]
plt.plot(data_.index, data_, color='darkgrey',linestyle='--')
ax.plot(x, ds[i])
ax.set_title(labels[i])
ax.set_ylabel('$O_3$ (ppbv)')
from scipy.stats.stats import pearsonr
y = np.array(ds[i])
xx = np.arange(len(x))
idx=np.isfinite(y)
z = np.polyfit(xx[idx],y[idx],1)
p = np.poly1d(z)
pcc, xxxx = pearsonr(xx[idx],y[idx])
ax.set_title(labels[i]+': '+str(np.round(z[0]*1e3,2))+' ppt $yr^{-1}$')
ax.plot(x,p(xx),'r--')
plt.xlim(data.index[0], data.index[-1])
#plt.ylim(13,44)
plt.tight_layout()
plt.savefig(savepath+'/trends_ozone.png')
plt.close()
data = df['O3']['2007':'2018']
data['2009-07-01' : '2009-09-30'] = np.nan
nyears = len((data.resample('Y').mean()).index.year)
data_ = data.resample('M').mean()
interseas=[] ; i=0 ; j=12
for y in years[:-1]:
Ma = np.nanmax(data_[i:j])
Mi = np.nanmin(data_[i:j])
interseas.append(Ma-Mi)
i=i+12 ; j=j+12 | true |
4c78bf104d6b8e55833576457678135447a2cdb9 | Python | theS3b/TM-1.0 | /TM 1.0/python interface/playAgainstAi.py | UTF-8 | 2,178 | 2.53125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 9 17:15:19 2019
@author: seb
"""
import time
import os
from detectPieces import get_move, get_board_map
from clientConnexion import SocketConnexion
from movePiece import set_magnet_on, set_magnet_off
from ledControl import *
from talking import saying_win, saying_lost
from makeAiMove import makeAiMove
def playAgainstAi(conn, magnet):
try :
haswon = False
first = True
while not haswon:
while get_board_map() != 0xFFFF00000000FFFF:
time.sleep(1)
big_light_on()
wait_for_big_activation()
big_light_off()
print("[+] Beginning to play")
# blinking led to start
while True:
move = get_move(not first)
time.sleep(1) # leave time in case of roque
if first:
first = False
if move == "MENU":
conn.send_data("ME")
return False
# Sending move
print("[*] Sending move :", move)
conn.send_data(move)
# Receiving move from AI
ai_move = conn.recv_data()
if ai_move == "BADMOVE":
print("[*] Bad move played.")
big_light_on()
wait_for_big_activation()
big_light_off()
continue
print("[+] Received :", ai_move)
if ai_move == "WI":
print("[++] You won !")
saying_win()
break
elif ai_move[:2] == "PR":
print("[*] Promotion from the black player.")
ai_move = ai_move[:2]
haswon = makeAiMove(ai_move, magnet)
if haswon:
saying_lost()
break
set_magnet_off() # security
except:
set_magnet_off()
return False
return True | true |
389facd7544292711c2741c4f19cfa087a74a1e9 | Python | joan-kii/Automate-boring-stuff-with-Python | /Chapter 17/prettifiedStopwatch.py | UTF-8 | 1,338 | 3.5 | 4 | [] | no_license | #!python3
#prettifiedStopwatch.py Pues eso, un stopwatch.
import time, pyperclip
# Informa al usuario del funcioanmeinto del cronómetro.
print('\nPulsa ENTER para comenzar. Después, pulsa ENTER de nuevo para parar el reloj en cada vuelta. Para salir, pulsa Ctrl + C.')
# Con la entrada del usuario inicia la cuenta.
input()
print('¡Vamos!')
tiempoInicio = time.time()
ultimoTiempo = tiempoInicio
numVuelta = 1
# Con cada entrada del usuario actualiza el timpo por vuelta,
# el tiempo total y el número de vuelta.
try:
while True:
input()
# Redondea a 2 decimales.
tiempoVuelta = round(time.time() - ultimoTiempo, 2)
tiempoTotal = round(time.time() - tiempoInicio, 2)
# Justifica a izquierda y derecha respectivamente.
numVueltaStr = str(numVuelta).ljust(2)
tiempoVueltaStr = str(tiempoVuelta).rjust(5)
tiempoTotalStr = str(tiempoTotal).rjust(5)
# Imprime en pantalla número de vueltas y tiempos.
print((f'Lap # {numVueltaStr}: {tiempoVueltaStr} ({tiempoTotalStr})'), end='')
numVuelta += 1
ultimoTiempo = time.time()
# Copia en el portapapeles el tiempo total.
pyperclip.copy(round(tiempoTotal, 2))
# Detiene el cronómetro.
except KeyboardInterrupt:
print('\n¡Fin!\n') | true |
d757743b34b32efc2915112b35b4a24262fc40fc | Python | alvinoc/programming-lab | /linhas_cruzadas.py | UTF-8 | 693 | 3.46875 | 3 | [] | no_license | def mergeSort(array):
inv = 0
if len(array) > 1:
mid = len(array) // 2
L = array[:mid]
R = array[mid:]
inv += mergeSort(L)
inv += mergeSort(R)
L.append(float("inf"))
R.append(float("inf"))
j, k = 0, 0
for i in range(len(array)):
if L[j] <= R[k]:
array[i] = L[j]
j += 1
else:
array[i] = R[k]
k += 1
inv += len(L) - j - 1
return inv
def main():
n = int(input())
array = [int(i) for i in input().split()]
print(mergeSort(array))
main() | true |
c334f8a214506511b963a1fd9efb8721047a389b | Python | acadien/lazyPlot | /smoothing.py | UTF-8 | 976 | 2.96875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python
from numpy import *
from scipy import weave
from scipy.weave import converters
#uses a guassian smooth convoluted with finite differences to get an absurdly smooth line but with edge effects
superSmoothCode="""
double pre=0.3989422804014327/sigma;
double dx,xmus;
for(int a=0;a<N;a++){
for(int b=0;b<N;b++){
if(b==0)
dx = xs[b+1]-xs[b];
if(b==N-1)
dx = xs[b]-xs[b-1];
if(b>1 && b<N-1)
dx = (xs[b+1]-xs[b-1])/2.0;
xmus = (xs[a]-xs[b])/sigma;
smoothys[a] += pre * exp( xmus * xmus * -0.5) * ys[b] * dx;
}}
"""
def superSmooth(xs,ys,sigma=0.1):
N=len(ys)
smoothys=zeros(N)
xs=array(xs)
ys=array(ys)
weave.inline(superSmoothCode,['xs','ys','N','smoothys','sigma'])
return smoothys
#1D data
def windowAvg(a,n=11,option='same'):
#a: the list/array to run the window average over
#n: the size of the window
return convolve(a, ones(n)/n,option)
| true |
b832c262c4b21d7da555b9147bb18b421435b917 | Python | MiMoText/roman18 | /Python-Scripts/archive/hyphen/trennung_auflösen_part2.py | UTF-8 | 2,942 | 3.0625 | 3 | [
"LicenseRef-scancode-public-domain"
] | permissive | import glob
import os.path
def line_reader(document):
file = open(document, 'r', encoding='utf8')
inhalt = file.readlines()
inhalt_ = [line.strip() for line in inhalt]
return inhalt_
'''this script deals with hyphen at the end of a line, if there are xml-tags as well
here we have three different cases: with <hi>-element, line-breaks-</C> and both:<hi> and </C>
as input we take files from previous step: "trennung_auflösen". We took a big range for the case of linebreak, because
it may occur, that we have footnotes at the and of a page with linebreak.'''
def hyphen_tags(inhalt):
new_tail = []
tail =[]
old_tail =[]
process = False
for number, line in enumerate(inhalt):
if process:
line = inhalt[number].replace('<hi rend="italic">'+ word_tail, '')
if '-<C/>' in line:
for i in range(number+1,number+30):
if inhalt[i].startswith('<O/>'):
word_suf = inhalt[i]
old_tail.append(word_suf)
word_tail = inhalt[i].split(' ')[0].replace('<O/>','')
new_line =word_suf.replace(word_tail+' ', '')
tail.append(new_line)
line = line.replace('<C/>','')+word_tail+'<C/>'
process= False
break
if line.endswith('-</hi>'):
next_line= inhalt[number+1]
line = line.replace('-</hi>', '-')
next_line = next_line.replace('<hi rend="italic">', '',1)
word_tail = next_line.split(' ')[0]
line = line+word_tail
line = line.replace('</p></hi>','</hi></p>')
process = True
if line.endswith('-</hi><C/>'):
print(line,inhalt[number+2])
for i in range(number+1,number+10):
if inhalt[i].startswith('<O/>'):
word_tail = inhalt[i].split(' ')[1].replace('rend="italic">','')
line = line.replace('</hi><C/>','')+word_tail+'<C/>'
#print(word_tail+str(number)+inhalt[1-10])
process = True
new_tail.append(line)
text = '\n'.join(new_tail)
return old_tail,tail, text
def replacement(old_data, new_data, file):
for index, replaced_word in enumerate(old_data):
file = file.replace(replaced_word, new_data[index])
return file
def main():
for document in glob.glob("C:/Users/yulya/PycharmProjects/TEI-XML/xmls_for_plain1/*.xml" ):
roman = line_reader(document)
roman = hyphen_tags(roman)
roman = replacement(roman[0], roman[1], roman[2])
save_path = 'C:/Users/yulya/PycharmProjects/TEI-XML/xmls_for_plain2/'
name = os.path.basename(document)
fullname = os.path.join(save_path, name)
fa = open(fullname, 'w', encoding="utf8")
fa.write(roman)
fa.close
if __name__ == "__main__":
main() | true |
86e22cdce841585174471efdc61aad25b3a1017f | Python | samtx/ecen760 | /Friedman_Sam_hw2.py | UTF-8 | 8,561 | 3.53125 | 4 | [] | no_license | # Sam Friedman
# 10/9/2018
# HW 2
# ECEN 760
from __future__ import print_function
import sys
class Graph(object):
"""
Graph object that has sets of nodes and edges
"""
def __init__(self, edges=set(), nodes=set()):
self.parents = {}
self.children = {}
self.nodes = nodes
if len(nodes) > 0:
self.size = len(nodes)
else:
self.size = 0
self.edges = edges
self.build_parent_and_child_sets()
def add_node(self, node):
"""
Add node to graph
"""
if node not in self.nodes:
self.size += 1
self.nodes.add((node))
else:
pass
def build_parent_and_child_sets(self):
"""
Loop through set of edges and
- add nodes to Graph object
- create Graph.parents, Graph.children dictionaries
"""
for edge in self.edges:
u, v = edge
self.add_node(u)
self.add_node(v)
if v not in self.parents:
self.parents.update({v: set(u)})
else:
self.parents[v].add(u)
if u not in self.children:
self.children.update({u: set(v)})
else:
self.children[u].add(v)
def get_parents(self, node):
"""
Return parents of node
"""
if node in self.parents:
return self.parents[node]
else:
return set()
def get_children(self, node):
"""
Return children of node
"""
if node in self.children:
return self.children[node]
else:
return set()
def is_active(self, a, b, Z):
"""
Verify if there exists an active trail from node a to b given evidence set Z
"""
# Find set Y of all nodes that are d_separated from a
Y = self.d_separated(a, Z)
# If b is in Y then there is no active trail
return not (b in Y)
def d_separated(self, X, Z):
"""
Find the set Y of all d-separated nodes such that
d-sep_G(X indep Y | Z) is True
Uses the Reachable() algorithm 3.1 in PGM book
"""
# Find all reachable nodes from node X given Z
R = self.reachable(X, Z)
# Return nodes in G that are not in R and not in Z
dsep_nodes = self.nodes - R - Z
return dsep_nodes
def reachable(self, X, Z):
"""
Find all nodes that are reachable via an active trail from the given
node and evidence set.
Based on Algorithm 3.1, page 75, in PGM book
inputs:
X: starting node
Z: set of observations
outputs:
R: set of nodes reachable via active trail
"""
# Phase 1: insert all ancestors of Z into A
L = Z.copy() # set L to be the set of observations
A = set() # set A to be the empty set
while len(L) != 0:
Y = L.pop() # remove an item from the set
if Y not in A:
L = L.union(self.get_parents(Y)) # Y's parents need to be visited
A = A.union(Y) # Y is ancestor of evidence
# Phase 2: traverse active trails starting from node
L = {(X, 'up')}
V = set() # (node, direction) marked as visited
R = set() # Nodes reachable via active trail
while len(L) != 0:
# select some (Y, dr) from L
(Y, dr) = L.pop()
if (Y, dr) not in V:
if Y not in Z:
R = R | {Y} # Y is reachable
V = V | {(Y, dr)} # mark (Y, dr) as visited
if dr == 'up' and Y not in Z:
# trail up through Y active if Y not in Z
for z in self.get_parents(Y):
L = L.union({(z, 'up')}) # Y's parents to be visited from bottom
for z in self.get_children(Y):
L = L.union({(z, 'down')}) # Y's children to be visited from top
elif dr == 'down':
# trails down through Y
if Y not in Z:
# downward trails to Y's children are active
for z in self.get_children(Y):
L = L.union({(z, 'down')}) # Y's children to be visited from top
if Y in A:
# v-structure trails are active
for z in self.get_parents(Y):
L = L.union({(z, 'up')}) # Y's parents to be visited from bottom
return R
def read_file(fname):
"""
Read problem file, generate graph and questions
"""
g_list = [] # list of graphs
q_list = [] # list of queries
edges = set()
queries = []
V, M, Q = 0, 0, 0
with open(fname) as f:
for raw_line in f:
# split the line into components separated by whitespace
line = raw_line.split()
# skip '#' as comment
if line[0][0] == '#':
continue
# New Graph description
elif all([x.isdigit() for x in line]):
edges = set()
queries = []
V, M, Q = [int(x) for x in line]
# V: number of nodes
# M: number of edges
# Q: number of queries
# Edges
elif all([x.isalpha() for x in line]) and len(edges) < M:
u, v = line
edges = edges | {(u, v)}
# Queries
elif line[1] == '|' and len(queries) < Q:
y = line[0] # source node Y
z = {x for x in line[2:]} # evidence set Z
queries.append((y, z))
# Create Graph object
if (len(edges) == M) and (len(queries) == Q):
G = Graph(edges=edges)
# Validate graph
err = False
if len(G.nodes) != V:
print('Not the correct number of nodes')
err = True
if len(G.edges) != M:
print('Not the correct number of edges')
err = True
if err:
return
g_list.append(G)
q_list.append(queries)
return g_list, q_list
if __name__ == "__main__":
if len(sys.argv) > 1:
# read filename as argument
fname = sys.argv[1]
g_list, q_list = read_file(fname)
# Run queries from file
for i in range(len(g_list)):
G = g_list[i] # graph
for j in range(len(q_list[i])):
q = q_list[i][j] # query
X, Z = q
# Evaluate query
dsep_nodes = G.d_separated(X, Z)
# print results to stdout
if not dsep_nodes:
out_str = 'None'
else:
out_str = ""
for x in dsep_nodes:
out_str += str(x) + " "
print(out_str)
else:
# Build Graph from homework 1, problem 3
edges = {('A', 'D'), ('B', 'D'), ('D', 'G'), ('D', 'H'), ('G', 'K'), ('H', 'K'),
('H', 'E'), ('C', 'E'), ('E', 'I'), ('F', 'I'), ('F', 'J'), ('I', 'L'),
('J', 'M')}
G = Graph(edges=edges)
# Check answers to HW 1, Problem 3
print('Check answers from HW 1, Problem 3:')
# parts 3(a)-(e)
queries = {
'a': ('A', 'J', {'G', 'L'}),
'b': ('A', 'C', {'L'}),
'c': ('G', 'L', {'D'}),
'd': ('G', 'L', {'D', 'K', 'M'}),
'e': ('B', 'F', {'C', 'G', 'L'})
}
for i in ['a', 'b', 'c', 'd', 'e']:
q = queries[i]
out_str = '(3{}) Active trail from {} to {} given {}? {}'.format(
i,
q[0],
q[1],
q[2],
G.is_active(q[0], q[1], q[2]))
print(out_str)
# parts 3(f)-(g)
queries.update({
'f': ('A', {'K', 'E'}),
'g': ('B', {'L'})
})
for i in ['f', 'g']:
q = queries[i]
out_str = '(3{}) Nodes d-separated from {} given {} = {}'.format(
i,
q[0],
q[1],
G.d_separated(q[0], q[1]))
print(out_str) | true |
7ec0bc8e758f0d49847a230d67a4aaaa64787eb2 | Python | vladokovac/aoc-2018 | /2/day2.py | UTF-8 | 1,786 | 3.671875 | 4 | [] | no_license | import array
def main():
with open("2.txt") as input_file:
input_lines = input_file.readlines()
input_lines = [x.strip() for x in input_lines]
two_repeating_letters = 0
three_repeating_letters = 0
# part 1
for input_word in input_lines:
letter_count = array.array('I', (0 for i in range(0, 26)))
for character in input_word:
letter_count[ord(character) - 97] += 1
has_two_repeating_letters = False
has_three_repeating_letters = False
for count in letter_count:
if not has_two_repeating_letters and count == 2:
two_repeating_letters += 1
has_two_repeating_letters = True
elif not has_three_repeating_letters and count == 3:
three_repeating_letters += 1
has_three_repeating_letters = True
checksum = two_repeating_letters * three_repeating_letters
print(checksum)
# part 2
for i in range(0, len(input_lines)):
for j in range(0, len(input_lines)):
if i == j:
continue
distance = calculate_string_distance(input_lines[i], input_lines[j])
if distance == 1:
common_characters = [c for c in input_lines[i] if c in input_lines[j]]
print(i, j, ''.join(common_characters))
break
def calculate_string_distance(first_string, second_string):
if len(first_string) == 0:
return len(second_string)
if len(second_string) == 0:
return len(first_string)
if first_string[0] == second_string[0]:
distance = 0
else:
distance = 1
return distance + calculate_string_distance(first_string[1:], second_string[1:])
if __name__ == "__main__":
main()
| true |
4f065f9533426e8228da05204da0612a7fb8fe98 | Python | ztaylor2/cracking-the-coding-interview | /chapter_2/CTCI_2_2.py | UTF-8 | 371 | 3.421875 | 3 | [
"MIT"
] | permissive | """."""
def kth_to_last(k, node):
"""."""
curr_node = node
kth_to_last_nodes = []
for _ in range(k):
if not curr_node.next:
raise ValueError('k larger than list')
curr_node = curr_node.next
while curr_node:
kth_to_last_nodes.append(curr_node.val)
curr_node = curr_node.next
return kth_to_last_nodes
| true |
264c1aa812385f57611eafe4e841a568d77f0d7b | Python | upple/BOJ | /src/15000/15351.py3.py | UTF-8 | 258 | 3.125 | 3 | [
"MIT"
] | permissive | import sys
n=int(input())
for p in range(n):
str=sys.stdin.readline()
ans=0
for ch in str:
if ch.isalpha():
ans+= ord(ch)-ord('A')+1
if ans==100:
print("PERFECT LIFE")
else:
print(ans) | true |
0bc595a662a4f4263f21d056eef04cca5659bb72 | Python | mckayav3/FinTech | /Python_Project/Module_2/cli.py | UTF-8 | 411 | 2.8125 | 3 | [] | no_license | import fire
import random
def clothes_picker(pants=False):
shirts_list = ["solid blue", "red striped", "purple and green tie dye","black dress shirt"]
pants_list = ["Black dress pants", "Gray sweatpants","Khakis"]
if pants:
return random.choice(shirts_list), random.choice(pants_list)
return random.choice(shirts_list)
if __name__ == '__main__':
fire.Fire(clothes_picker) | true |
c93fe2e9420703a5e5a709b22c933b03a009cf31 | Python | code-evince/Competitive-Programming-3-The-New-Lower-Bound-of-Programming-Contests | /Introduction/Getting Started : The Easy Problems/Super Easy/12250 - Language Detection.py | UTF-8 | 363 | 3.734375 | 4 | [] | no_license | i=1
while(True):
text = input()
if(text == '#'):
break
language = {"HELLO":"ENGLISH","HOLA":"SPANISH","HALLO":"GERMAN","BONJOUR":"FRENCH","CIAO":"ITALIAN","ZDRAVSTVUJTE":"RUSSIAN"}
if(text in language):
print("Case {}: {}".format(i,language[text]))
i+=1
else:
print("Case {}: UNKNOWN".format(i))
i=i+1
| true |
ba7da7ff6188638ec66e56c415f856d5174d9318 | Python | nick95a/Python_Practice | /Deque.py | UTF-8 | 1,352 | 4.34375 | 4 | [] | no_license | class Deque:
'''
Deque class based on the built-in list datatype in Python
'''
def __init__(self):
'''
Creates an empty container in the form of a list
'''
self.deque = []
def pushBack(self, item):
'''
The method pushes the item argument provided to the back of the deque
:param item:
:return:
'''
self.deque.append(item)
def pushFront(self, item):
'''
The method pushes the item argument provided to the front of the deque.
This is peculiar to the deque ADT and distinguishes it from the queue for example
:param item:
:return:
'''
self.deque.insert(0, item)
def popBack(self):
'''
The method removes an item from the back of the deque.
:return:
'''
self.deque.pop()
def popFront(self):
'''
The method removes an item from the front of the deque.
:return:
'''
self.deque.pop(0)
def isEmpty(self):
'''
Method checks where the existing deque is empty.
:return:
'''
return len(self.deque) == 0
def getSize(self):
'''
Method returns the number of items present in the deque.
:return:
'''
return len(self.deque)
| true |
f4a123989efe488f2c7ad79d1887c59c84de8dab | Python | renekm/ReneEGebara | /ep3.py | UTF-8 | 2,678 | 3.140625 | 3 | [] | no_license | 3# -*- coding: utf-8 -*-
"""
Created on Wed Apr 20 08:58:03 2016
@author: Rene Martinez
"""
class Jogo:
def __init__(self):
self.M = [[1,2,3], [4,5,6], [7,8,9]]
self.jogadas=0
def recebe_jogada (self, linha, coluna):
if self.jogadas %2 == 0:
self.M[linha][coluna] == "O"
self.jogadas += 1
else:
self.M[linha][coluna] == "X"
self.jogadas += 1
def verifica_ganhador(self):
if self.jogadas >= 4 and self.jogadas<9:
if self.M[0][0] == "X" and self.M[0][1] == "X" and self.M[0][2] == "X":
return 1
elif self.M[1][0] == "X" and self.M[1][1] == "X" and self.M[1][2] == "X":
return 1
elif self.M[2][0] == "X" and self.M[2][1] == "X" and self.M[2][2] == "X":
return 1
elif self.M[0][0] == "X" and self.M[1][1] == "X" and self.M[2][2] == "X":
return 1
elif self.M[0][0] == "X" and self.M [1][0] == "X" and self.M[2][0] == "X":
return 1
elif self.M[0][1] == "X" and self.M[1][1] == "X" and self.M[2][1] == "X":
return 1
elif self.M[0][2] == "X" and self.M[1][2] == "X" and self.M[2][2] == "X":
return 1
elif self.M[0][2] == "X" and self.M[1][1] == "X" and self.M[2][0] == "X":
return 1
elif self.M[0][0] == "O" and self.M[0][1] == "O" and self.M[0][2] == "O":
return 2
elif self.M[1][0] == "O" and self.M[1][1] == "O" and self.M[1][2] == "O":
return 2
elif self.M[2][0] == "O" and self.M[2][1] == "O" and self.M[2][2] == "O":
return 2
elif self.M[0][0] == "O" and self.M[1][1] == "O" and self.M[2][2] == "O":
return 2
elif self.M[0][2] == "O" and self.M[1][1] == "O" and self.M[2][0] == "O":
return 2
elif self.M[0][0] == "O" and self. M [1][0] == "O" and self.M[2][0] == "O":
return 2
elif self.M[0][1] == "O" and self.M[1][1] == "O" and self.M[2][1] == "O":
return 2
elif self.M[0][2] == "O" and self.M[1][2] == "O" and self.M[2][2] == "O":
return 2
elif self.jogadas == 9:
return 0
else:
return -1
def limpa_jogadas(verifica_ganhador):
a = verifica_ganhador()
if a == 1 or a == 2 or a == 0:
self.M = [[1,2,3], [4,5,6], [7,8,9]]
self.jogadas=0
| true |
e69045b1622cb68aeb7baa418c026ef244d8bde6 | Python | jzm-123/test | /Distributed_instagram_spider/util/backup/test_sleep.py | UTF-8 | 133 | 2.859375 | 3 | [] | no_license | import random
import time
second = random.randint(0,60)
print('before:',time.time())
time.sleep(second)
print('after:',time.time())
| true |
13d01048ff3fd092cdeb06304c8151d042557836 | Python | JIANGWQ2017/Algorithm | /Leetcode/LC5.py | UTF-8 | 656 | 3.59375 | 4 | [] | no_license | class Solution:
def longestPalindrome(self, s: str) -> str:
res= ""
for i in range(len(s)):
temp = self.findPalindromic(i,i,s)
if len(temp)>len(res):
res = temp
for i in range(len(s)-1):
temp = self.findPalindromic(i,i+1,s)
if len(temp)>len(res):
res = temp
return res
def findPalindromic(self,left,right,s):
while s[left] == s[right]:
if left>0 and right<len(s)-1:
left -= 1
right += 1
else:
return s[left:right+1]
return s[left+1:right] | true |
19118170b29eff731ab3b709ddc2c0d5a9192ad6 | Python | toddcblank/pitboss | /pokerroom/payouts.py | UTF-8 | 1,050 | 3.171875 | 3 | [] | no_license | PAYOUTS = {
0: [0],
1: [1],
2: [2],
3: [2, 1],
4: [3, 1],
5: [3, 2],
6: [3.5, 2.5],
7: [3.5, 2, 1.5],
8: [4, 2.5, 1.5],
9: [4.5, 3, 1.5],
10: [5, 3, 2],
11: [5.5, 3.5, 2],
12: [6.5, 3.5, 2],
13: [6.5, 4, 2.5],
14: [6, 4, 2.5, 1.5],
15: [7, 4, 2.5, 1.5],
16: [7.5, 4.5, 2.5, 1.5],
17: [8, 4.5, 3, 1.5],
18: [8, 5, 3, 2],
19: [8.5, 5.5, 3, 2],
20: [9, 5.5, 3.5, 2],
21: [9.5, 6, 3.5, 2],
22: [10.5, 6, 3.5, 2]
}
def getPrizeForPlace(players, place, buyin):
if place > len(PAYOUTS[players]):
return 0
return PAYOUTS[players][place - 1] * buyin
def getPoyPointsForPlace(numberOfPlayers, place, cashed):
defaultPoints = 7.5
topNinePoints = [100, 70, 50, 44, 38, 33, 28, 24, 20]
multiplier = numberOfPlayers/9.0
if not cashed:
multiplier *= .1
pointsForPlace = defaultPoints
if len(topNinePoints) > place - 1:
pointsForPlace = topNinePoints[place - 1]
return int(pointsForPlace * multiplier)
| true |
71a61f9a660db84c994e03f01357eff309da487e | Python | sssv587/PythonFullStackStudy | /day07_dict/tuple01.py | UTF-8 | 1,823 | 4.78125 | 5 | [] | no_license | '''
总结列表:
list
1、定义
l = [] 空列表
l = ['aaa']
2.符号
+ ----> 合并 [] + []
* ----> [] * n
in ----> a in [] False / True
not in ---->
is 地址是否相等
not is
3.系统中给列表提供的函数
len(list) ----> int
sorted(list) ----> 排序
max(list) ----> 最大值
min(list) ----> 最小值
list(list) ----> 转换为list类型
enumeate(list) ----> index,value
4.列表自身的函数:
添加函数:
append() 末尾添加
extend() 末尾添加一组元素
insert() 指定位置插入
删除:
del list[index]
remove(obj) 删除指定的元素,如果指定元素不存在则报异常
pop() 队列 FIFO 栈 FILO 默认删除最后一个元素
clear() 清空元素
其他:
count() 指定元素个数
sort() 排序
reverse() 反转
算法:
选择排序:
冒泡排序:
'''
'''
元祖:
类似列表(当成容器)
特点:
1.定义的符号:()
2.元祖中的内容不可修改
3.关键字:tuple
列表 元祖
[] ()
[1] (1,)
[1,2] (1,2)
'''
t1 = ()
print(type(t1)) # <class 'tuple'>
t2 = ('hello',)
print(type(t2))
t3 = ('aa','bb')
print(type(t3))
#
t4 = (3,4,5,1,2,3,4,5,6)
# 增删改 查
import random
list1=[]
for i in range(10):
ran = random.randint(1,20)
list1.append(ran)
print(list1)
# tuple() list()
t5 = tuple(list1)
print(t5)
# 查询:下标index 切片 [:]
print(t5[0])
print(t5[-1])
print(t5[2:-3])
print(t5[::-1])
# 最大值 最小值
print(max(t5))
print(min(t5))
# 求和
print(sum(t5))
# 求长度
print(len(t5))
# 元祖中的函数:
# index() ----> 个数
# count() ----> 下标
print(t5.count(1)) # 个数
print(t5.index(1)) # 从t5这个元祖中找出4的下标位置,没有报错 ValueError: tuple.index(x): x not in tuple
| true |
cbc78f881084b4c11fba6575883b662c88b04ca4 | Python | oaifaye/pyfirst | /nlp/_03_word2vec/__init__.py | UTF-8 | 6,903 | 2.609375 | 3 | [] | no_license | #utf-8
'''
https://www.cnblogs.com/Lin-Yi/p/9007259.html
'''
from gensim.models import word2vec
from gensim.models.word2vec import LineSentence
import jieba
import pymysql
class MyWord2vec():
'''
# 配置词向量的维度
num_features = 1000
# 保证被考虑的词汇的频度
min_word_count = 5
# 并行计算使用cpu核心数量
num_workers = 2
# 定义训练词向量的上下文窗口大小
context = 5
downsapling = 1e-3
'''
def __init__(self,num_features = 1000,min_word_count = 5,num_workers = 2,context = 5,
downsapling = 1e-3,stop_words='hlt_stop_words.txt',model_path=None,txt_path = None):
# 配置词向量的维度
self.num_features = num_features
# 保证被考虑的词汇的频度
self.min_word_count = min_word_count
# 并行计算使用cpu核心数量
self.num_workers = num_workers
# 定义训练词向量的上下文窗口大小
self.context = context
# 下采样
self.downsapling = downsapling
self.stop_words = stop_words
self.txt_path = txt_path
self.model_path = model_path
#获取停用词
def getstopwords(self):
# step 1 读取停用词
stop_words = []
with open(self.stop_words,encoding='utf-8') as f:
line = f.readline()
while line:
stop_words.append(line[:-1])
line = f.readline()
stop_words = set(stop_words)
return stop_words
def train(self):
stop_words = self.getstopwords()
sentences = []
with open(self.txt_path,encoding='utf-8') as f:
line = f.readline()
while line:
nostopwords = list(jieba.cut(line,cut_all=False))
line_words = []
for word in nostopwords:
if word not in stop_words:
line_words.append(word)
sentences.append(line_words)
line = f.readline()
print('开始word2vec...')
# 训练词向量模型
model = word2vec.Word2Vec(sentences=sentences,
workers=self.num_workers,
size=self.num_features,
min_count=self.min_word_count,
window=self.context,
sample=self.downsapling)
# 这个设定代表当前训练好的词向量为最终版, 也可以加速模型训练的速度
# model.init_sims(replace=True)
print('开始保存模型...')
model.save(self.model_path)
print('训练完成...')
def test(self,words,topn=10):
model = word2vec.Word2Vec.load(self.model_path)
wordslink = []
for w in words:
# print(w,':',model.wv.similar_by_word(w, topn = topn))
link = model.wv.similar_by_word(w, topn = topn)
wordslink.append(link)
return wordslink
#生成词频的排名 wordcount_file:生成的词频文件
def wordsorder(self,wordcount_file,insert2mysql=False):
# step 1 读取停用词
stop_words = self.getstopwords()
word_lst = []
with open(self.txt_path,encoding='utf-8') as f:
line = f.readline()
while line:
nostopwords = list(jieba.cut(line,cut_all=False))
for word in nostopwords:
if word not in stop_words:
word_lst.append(word)
line = f.readline()
word_dict= {}
with open(wordcount_file,'w') as wf2: #打开文件
for item in word_lst:
if item not in word_dict: #统计数量
word_dict[item] = 1
else:
word_dict[item] += 1
orderList=list(word_dict.values())
orderList.sort(reverse=True)
if insert2mysql:
db = pymysql.connect("10.0.251.50","root","1234qwer","real_calc_20180410",charset='utf8' )
cursor = db.cursor(pymysql.cursors.DictCursor)
for i in range(len(orderList)):
for key in word_dict:
if word_dict[key]==orderList[i] and key != '\n':
try:
wf2.write(key+' '+str(word_dict[key])+'\n') #写入txt文档
if insert2mysql:
cursor.execute("INSERT INTO tj_words (word, count, list_order, canshow) VALUES ('%s', %s, 1, -1);" % (key,str(word_dict[key])))
except Exception as e:
print(e)
word_dict[key]=0
if insert2mysql:
cursor.close()
def insertWordsLinkByNum(self,limit,wordscount = 20):
db = pymysql.connect("10.0.251.50","root","1234qwer","real_calc_20180410",charset='utf8' )
cursor = db.cursor(pymysql.cursors.DictCursor)
sql = "select * from tj_words a order by a.count desc limit %s" % (limit)
cursor.execute(sql)
rows = cursor.fetchall()
words = []
for row in rows:
words.append(row['word'])
self.insertWordsLinkByWords(words,wordscount)
def insertWordsLinkByWords(self,words,wordscount= 20):
wordslinks = self.test(words, wordscount)
print('开始插入数据库...')
print(wordslinks)
db = pymysql.connect("10.0.251.50","root","1234qwer","real_calc_20180410",charset='utf8' )
cursor = db.cursor(pymysql.cursors.DictCursor)
for i in range(len(words)):
print(i)
word = words[i]
wordslink = wordslinks[i]
for j in range(len(wordslink)):
wl = wordslink[j]
sql = "INSERT INTO tj_words_link (word, link_word, weight, list_order) VALUES ('%s', '%s', %s, %s)" % (word,wl[0],wl[1],str(j+1))
cursor.execute(sql)
cursor.close()
myWord2vec = MyWord2vec(model_path='model_file/word2vec.model',txt_path = 'news-all.txt')
#生成词频的排名
# myWord2vec.wordsorder('wordCount1.txt',True)
#执行训练
# myWord2vec.train()
words = ['习近平','总书记','政府','公司','天津','北方网','中国','服务','天津','建设',
'文化','创新','孩子','平台','建设','经济','科技','智能','数据','城市',
'未来','北京','社会','健康','学习','未来','集团','环境','社区','产业']
#测试
# myWord2vec.test(words,15)
#插入词关系
myWord2vec.insertWordsLinkByNum(1000,40) | true |
8eabb1b246360c5bd0ad410b2939871bdb46aa8e | Python | EliteGirls/Camp2017 | /Girls10/Rhodaline and Linah/ohlo.py | UTF-8 | 481 | 3.140625 | 3 | [] | no_license | c=1
while c ==1:
c=input("press 1 to continue or any key to exit")
if c!=1:
break
score=input("please enter your score")
if score <=100 and score >80:
print "A"
elif score<=79 and score >70:
print "B"
elif score<=69 and score >60:
print "C"
elif score<=59 and score >50:
print "D"
elif score<=49 and score >45:
print "E"
else:
print "F"
| true |
b67007a5fd35f73dd715c753ed595c0d2ed45a24 | Python | kmui2/Rule-Game-server | /python/client-socket.py | UTF-8 | 1,736 | 3 | 3 | [] | no_license | #!/usr/bin/python
#----------------------------------------------------------------------
#-- This is a sample Python program that plays a game with a socket-based
#-- Game Server
#--
#-- Usage:
#-- client-socket.py host port rule-filet nPieces
#-- e.g.
#-- client-socket.py localhost 7501 game-data/rules/rules-01.txt 5
#----------------------------------------------------------------------
import subprocess, sys, re, random, json
import gameLoop
import socket
#game='game-data/rules/rules-01.txt'
host=sys.argv[1]
port=int(sys.argv[2])
game=sys.argv[3]
# nPieces = '5'
#-- this is a string, not a number!
nPieces = sys.argv[4]
sys.stdout.write("Port="+repr(port)+" Rule file=" + game +", #pieces=" + nPieces+"\n")
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print "Socket successfully created"
except socket.error as err:
print "socket creation failed with error %s" %(err)
try:
host_ip = socket.gethostbyname(host)
except socket.gaierror:
# this means could not resolve the host
print "there was an error resolving the host"
sys.exit()
# connecting to the server
try:
sock.connect((host_ip, port))
except socket.error as err:
print "socket connect failed with error %s" %err
sys.exit()
print "the socket has successfully connected to host=" + host_ip+ " on port=" + repr(port)
#-- see https://docs.python.org/2/library/socket.html
#-- https://www.linuxtopia.org/online_books/programming_books/python_programming/python_ch36s06.html
rfile = sock.makefile('rb')
wfile = sock.makefile('wb')
msg = "GAME \"" + game + "\" " + nPieces
print "Sending: " + msg + "\n"
wfile.write(msg + "\n")
wfile.flush()
gameLoop.mainLoop(rfile, wfile)
| true |
a57d0b54b77d0f67651cbf7353fe3371eed67814 | Python | vp1961/Parser | /HTMLparser/models.py | UTF-8 | 2,503 | 2.578125 | 3 | [] | no_license | from django.db import models
import uuid
import requests
from datetime import datetime, timedelta, timezone
from lxml import html
import threading
class Task(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4)
url = models.URLField(max_length=100, verbose_name='URL')
minutes = models.PositiveIntegerField(default=0, verbose_name='минуты', blank=True)
seconds = models.PositiveIntegerField(default=0, verbose_name='секунды', blank=True)
start_time = models.DateTimeField(verbose_name='время старта')
title = models.CharField(max_length=100, verbose_name='заголовок')
code = models.CharField(max_length=100, verbose_name='кодировка')
header = models.CharField(max_length=100, blank=True, verbose_name='H1')
report = models.CharField(max_length=100)
is_done = models.BooleanField(verbose_name='выполнено')
def save(self):
self.start_time = datetime.now(timezone.utc) + timedelta(minutes=self.minutes, seconds=self.seconds)
self.is_done = False
self.report = ''
self.title = ''
self.code = ''
self.header = ''
models.Model.save(self, Task)
all_time = self.minutes*60 + self.seconds
task_timer = threading.Timer(all_time, self.parse, args=())
task_timer.daemon = True
task_timer.start()
def parse(self):
result = {}
try:
response = requests.get(self.url)
except requests.exceptions.RequestException as error:
result['report'] = error
if not result:
if response.status_code == 200:
tree = html.fromstring(response.text)
titles = tree.xpath('//title/text()')
encodes = tree.xpath('//meta[@charset]/@charset')
headers = tree.xpath('//h1/text()')
result['title'] = titles[0].strip() if titles else ''
result['code'] = encodes[0].strip() if encodes else response.encoding
result['header'] = ' /// '.join(map(lambda x: x.strip(), headers))
result['report'] = 'ОК({})'.format(response.status_code)
else:
result['report'] = 'FAIL({})'.format(response.status_code)
result['is_done'] = True
result['start_time'] = datetime.now(timezone.utc)
Task.objects.filter(url=self.url).update(**result)
def __str__(self):
return self.url
| true |
ba8dcdf2ff23c0dc10190ae16a95f7f4924efac6 | Python | ByteHackr/Image_Processing_Practice | /Basics/Noise.py | UTF-8 | 862 | 2.859375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 10 09:57:44 2019
@author: BILU
"""
import cv2
import numpy as np
from matplotlib import pyplot as plt
import random
img = np.array([])
img = cv2.imread('Scan1.jpg',1)
#img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
rows, columns, dim = img.shape
p = 0.05
output = np.zeros(shape=(rows,columns,dim),dtype = int)
for i in range(rows):
for j in range(columns):
r = random.random()
if r < p/2:
output[i][j] = 0
elif r < p:
output[i][j] = 255
else:
output[i][j] = img[i][j]
plt.subplot(1,2,1)
plt.imshow(img)
plt.title('Original Image')
plt.subplot(1,2,2)
plt.imshow(output)
plt.title('Noise Image')
plt.show()
#output = cv2.cvtColor(output,cv2.COLOR_BGR2RGB)
cv2.imwrite('Noise_image.jpg',output) | true |
d88cae5f0f7eba722869280fcf081f4d422cf822 | Python | LONG990122/PYTHON | /第一阶段/2. Python01/day05/exercise/07_even.py | UTF-8 | 358 | 4.25 | 4 | [] | no_license | # 练习:
# 输入一个整数用begin绑定,再输入一个整数用end绑定,打印出从begin~end(包含end)的所有偶数
# (建议用continue语句跳过奇数)
begin = int(input("请输入一个开始整数: "))
end = int(input("请输入一个结束整数: "))
for x in range(begin, end):
if x % 2 == 1:
continue
print(x)
| true |
8da7de21f47f498b846679e9648398e774912759 | Python | srishti88/spy_chat | /message.py | UTF-8 | 1,757 | 3.046875 | 3 | [] | no_license | import sys
from termcolor import colored, cprint
from friends import *
from datetime import datetime
from steganography.steganography import Steganography
#using steganography library to encrypt
def encrypt_message():
input_image = raw_input("please select an image to encode: ")
input_message = raw_input("please enter a message you would like to encrypt: ")
friend.chat.append(input_message + " " + str(datetime.now()))
out_image = raw_input("plese enter the specific name of the encoded image : ")
Steganography.encode(input_image,out_image,input_message)
output = "message encoded"
print output
# encrypt_message()
#stegnography to decrypt message
def decrypt_message():
image_path = raw_input("please select a image to decode : ")
try: #used exception handaling
decrypted_message = Steganography.decode(image_path)
text = colored(decrypted_message, 'black')
print text
if len(decrypted_message) > 100:
exit(0)
else:
number_of_words = len(decrypted_message)
if "SOS" in decrypted_message.upper():
print("Save our souls")
elif "SAVE ME" in decrypted_message.upper():
print("need immediate help")
return decrypted_message
except:
print("image doesnot contain any secert message")
def send_message():
friend_choice =int( select_a_friend())
print ("your message has been successfully send to %s ")%(friends['name'][friend_choice -1])
def read_chat_history():
read_for = select_a_friend()
for chat in friend.chat:
print ("Your message is : " + chat)
print (colored(friend.name,'red'),colored(chat,'black'),colored(datetime,'blue'))
| true |
15f3f5e11c5ff222b1aa2978a90b04a5450c2a18 | Python | Aswinpkrishnan94/Fabulous-Python | /Python/Day 9/Secret_Auction.py | UTF-8 | 1,038 | 3.515625 | 4 | [
"MIT"
] | permissive | # importing clear function
from replit import clear
# To display logo
from art import logo
print(logo)
# State variables
bids = {}
bidding_finished = False
# Bidding Process. Each bidder and their bid amount is stored. Continues until bidding is bidding_finished
while not bidding_finished:
name = input("What is your name?: ")
price = int(input("What is your bid?: $"))
bids[name] = price
should_continue = input("Are there any other bidders? Type 'yes or 'no'.\n")
if should_continue == "no":
bidding_finished = True
highest_bidder(bids)
elif should_continue == "yes":
def highest_bidder(bidding_record):
highest_bid = 0
winner = ""
# bidding_record = name, bid amount
for bidder in bidding_record:
# Check for highest bidder
bid_amount = bidding_record[bidder]
if bid_amount > highest_bid:
highest_bid = bid_amount
winner = bidder
print(f"The winner is {winner} with a bid of ${highest_bid}") | true |
c0d8d42059694d8e77018124c7c3bb351326cbe9 | Python | AleksandraZv/Netology | /Homework_2.5/2.5_homework.py | UTF-8 | 631 | 2.578125 | 3 | [] | no_license | import os
import subprocess
def lets_convert():
cur_dir = os.path.dirname(__file__)
path = os.path.join(cur_dir, 'Source/')
try:
os.makedirs('Result/')
except OSError:
pass
folder = os.listdir(path)
for pic in folder:
# convert = (convert, os.path.join(cur_dir, 'Source', pic), '-resize', '200',
# os.path.join(cur_dir, 'Result', pic)]
convert = r'C:\Program Files\ImageMagick-7.0.5-Q16\magick.exe {} -resize 200 {}'.format(os.path.join('Source', pic), os.path.join('Result', pic))
print(convert)
subprocess.run(convert)
lets_convert()
| true |
25969b638b30d5f6d94e5040332e795053e99b44 | Python | fuston05/Data-Structures | /queue/queue.py | UTF-8 | 6,551 | 4.4375 | 4 | [] | no_license | """
A queue is a data structure whose primary purpose is to store and
return elements in First In First Out order.
1. Implement the Queue class using an array as the underlying storage structure.
Make sure the Queue tests pass.
2. Re-implement the Queue class, this time using the linked list implementation
as the underlying storage structure.
Make sure the Queue tests pass.
3. What is the difference between using an array vs. a linked list when
implementing a Queue?
we have to use the methods given by the class to perform operations on the queue.
Stretch: What if you could only use instances of your Stack class to implement the Queue?
What would that look like? How many Stacks would you need? Try it!
"""
class Node:
def __init__(self, value, next=None):
self.value = value
self.next_node = next
def get_value(self):
# returns the node's data
return self.value
def get_next(self):
# returns the thing pointed at by this node's `next` reference
return self.next_node
def set_next(self, new_next):
# sets this node's `next` reference to `new_next`
self.next_node = new_next
class LinkedList:
def __init__(self):
# the first Node in the LinkedList
self.head = None
# the last Node in the LinkedList
self.tail = None
'''
Adds `data` to the end of the LinkedList
O(1) because this operation doesn't depend on the size of the linked list
'''
def add_to_tail(self, data):
# wrap the `data` in a Node instance
new_node = Node(data)
# what about the empty case, when both self.head = None and self.tail = None?
if not self.head and not self.tail:
# list is empty
# update both head and tail to point to the new node
self.head = new_node
self.tail = new_node
# non-empty linked list case
else:
# call set_next with the new_node on the current tail node
self.tail.set_next(new_node)
# update self.tail to point to the new last Node in the linked list
self.tail = new_node
'''
Removes the Node that `self.tail` is referring to and returns the
Node's data
What's the runtime of this method?
'''
def remove_tail(self):
if self.tail is None:
return None
# save the tail Node's data
data = self.tail.get_value()
# both head and tail refer to the same Node
# there's only one Node in the linked list
if self.head is self.tail:
# set both to be None
self.head = None
self.tail = None
else:
# in order to update `self.tail` to point to the
# the Node _before_ the tail, we need to traverse
# the whole linked list starting from the head,
# because we cannot move backwards from any one
# Node, so we have to start from the beginning
current = self.head
# traverse until we get to the Node right
# before the tail Node
while current.get_next() != self.tail:
current = current.get_next()
# `current` is now pointing at the Node right
# before the tail Node
self.tail = current
return data
'''
Removes the Node that `self.head` is referring to and returns the
Node's data
'''
def remove_head(self):
if self.head is None:
return None
# save the head Node's data
data = self.head.get_value()
# both head and tail refer to the same Node
# there's only one Node in the linked list
if self.head is self.tail:
# set both to be None
self.head = None
self.tail = None
else:
# we have more than one Node in the linked list
# delete the head Node
# update `self.head` to refer to the Node after the Node we just deleted
self.head = self.head.get_next()
return data
'''
Traverses the linked list and returns a boolean indicating whether the
specified `data` is in the linked list.
What's the runtime for this method?
'''
def contains(self, data):
# an empty linked list can't contain what we're looking for
if not self.head:
return False
# get a reference to the first Node in the linked list
# we update what this Node points to as we traverse the linked list
current = self.head
# traverse the linked list so long as `current` is referring
# to a Node
while current is not None:
# check if the Node that `current` is pointing at is holding
# the data we're looking for
if current.get_value() == data:
return True
# update our `current` pointer to point to the next Node in the linked list
current = current.get_next()
# we checked the whole linked list and didn't find the data
return False
'''
Traverses the linked list, fetching the max value in the linked list
What is the runtime of this method?
'''
def get_max(self):
if self.head is None:
return None
max_so_far = self.head.get_value()
current = self.head.get_next()
while current is not None:
if current.get_value() > max_so_far:
max_so_far = current.get_value()
current = current.get_next()
return max_so_far
# *****************************************************************
# class Queue:
# def __init__(self):
# self.size = 0
# self.storage = LinkedList()
# def __len__(self):
# return self.size
# def enqueue(self, value):
# self.storage.add_to_tail(value)
# self.size += 1
# def dequeue(self):
# if self.size > 0:
# popped = self.storage.remove_head()
# self.size -= 1
# return popped
# list implementation
class Queue:
def __init__(self):
self.size = 0
self.storage = []
def __len__(self):
return self.size
def enqueue(self, value):
self.storage.insert(0, value)
self.size+=1
def dequeue(self):
if self.size > 0:
popped= self.storage.pop()
self.size-=1
return popped
| true |
f25fb1816ebcf27a19d8b308d0a8aee23a628b04 | Python | krisfris/pyfongo | /pyfongo/__init__.py | UTF-8 | 10,490 | 2.671875 | 3 | [
"MIT"
] | permissive | import os
import shutil
from bson import ObjectId, json_util
from collections import namedtuple
from operator import itemgetter
from itertools import islice
from atomicwrites import atomic_write
from pymongo import ASCENDING, DESCENDING, errors # noqa
InsertOneResult = namedtuple('InsertOneResult', ['inserted_id'])
InsertManyResult = namedtuple('InsertManyResult', ['inserted_ids'])
def _project(doc, projection):
"""Return new doc with items filtered according to projection."""
def _include_key(key, projection):
for k, v in projection.items():
if key == k:
if v == 0:
return False
elif v == 1:
return True
else:
raise ValueError('Projection value must be 0 or 1.')
if projection and key != '_id':
return False
return True
return {k: v for k, v in doc.items() if _include_key(k, projection)}
def _match(doc, query):
"""Decide whether doc matches query."""
for k, v in query.items():
if doc.get(k, object()) != v:
return False
return True
def _iter_docs(col, query, projection, sort, skip, limit):
docs = (_project(x, projection) for x in col._iter_col()
if _match(x, query))
# Apply sort
if sort is not None:
s = list(docs)
for key, direction in reversed(sort):
s = sorted(s, key=itemgetter(key),
reverse=True if direction == -1 else False)
docs = iter(s)
# Apply skip and limit
docs = islice(docs, skip, (skip+limit) if limit else None)
return docs
class Cursor:
def __init__(self, collection, query={}, projection={}, sort=None,
skip=0, limit=0):
self._col = collection
self._query = query
self._projection = projection
self._sort = sort
self._skip = skip
self._limit = limit
self._docs = None
def _execute(self):
self._docs = _iter_docs(self._col, self._query, self._projection,
self._sort, self._skip, self._limit)
def _check_okay_to_chain(self):
if self._docs is not None:
raise errors.InvalidOperation('cannot set options after executing query')
def __iter__(self):
return self
def __next__(self):
if self._docs is None:
self._execute()
return next(self._docs)
def sort(self, key_or_list, direction=None):
"""Sorts this cursor's results.
Takes either a single key and a direction, or a list of (key,
direction) pairs. The key(s) must be an instance of ``(str,
unicode)``, and the direction(s) must be one of
(:data:`~pymongo.ASCENDING`,
:data:`~pymongo.DESCENDING`). Raises
:class:`~pymongo.errors.InvalidOperation` if this cursor has
already been used. Only the last :meth:`sort` applied to this
cursor has any effect.
"""
self._check_okay_to_chain()
if isinstance(key_or_list, str):
if direction is None:
direction = 1
self._sort = [(key_or_list, direction)]
elif isinstance(key_or_list, list):
self._sort = key_or_list
else:
raise TypeError('key_or_list has invalid type')
return self
def skip(self, n):
"""Skips the first `n` results of this cursor.
Raises TypeError if skip is not an instance of int. Raises
InvalidOperation if this cursor has already been used. The last `n`
applied to this cursor takes precedence.
"""
if not isinstance(n, int):
raise TypeError('skip must be an int')
self._check_okay_to_chain()
self._skip = n
return self
def limit(self, n):
"""Limits the number of results to be returned by this cursor.
Raises TypeError if limit is not an instance of int. Raises
InvalidOperation if this cursor has already been used. The
last `n` applied to this cursor takes precedence. A limit
of ``0`` is equivalent to no limit.
"""
if not isinstance(n, int):
raise TypeError('n must be an int')
self._check_okay_to_chain()
self._limit = n
return self
def count(self, with_limit_and_skip=False):
"""Get the size of the results set for this query.
Returns the number of documents in the results set for this query. Does
not take :meth:`limit` and :meth:`skip` into account by default - set
`with_limit_and_skip` to ``True`` if that is the desired behavior.
"""
if with_limit_and_skip:
docs = _iter_docs(self._col, self._query, self._projection,
None, self._skip, self._limit)
else:
docs = _iter_docs(self._col, self._query, self._projection,
None, None, None)
return sum(1 for _ in docs)
class Collection:
def __init__(self, path):
self._path = path
os.makedirs(path, exist_ok=True)
def _iter_col(self):
for filename in os.listdir(self._path):
path = os.path.join(self._path, filename)
with open(path) as f:
docs = json_util.loads(f.read())
yield from docs
def find(self, query={}, projection={}):
return Cursor(self, query, projection)
def find_one(self, query={}, projection={}):
try:
return next(self.find(query, projection))
except StopIteration:
return None
def distinct(self, key):
values = set()
for doc in self.find():
if key in doc:
values.add(doc[key])
return list(values)
def insert_one(self, doc):
if '_id' not in doc:
doc['_id'] = ObjectId()
# Create new file for now, TODO change this later
path = os.path.join(self._path, str(doc['_id']) + '.json')
with atomic_write(path) as f:
f.write(json_util.dumps([doc]))
return InsertOneResult(doc['_id'])
def insert_many(self, docs):
for doc in docs:
if '_id' not in doc:
doc['_id'] = ObjectId()
# Create new file for now, TODO change this later
path = os.path.join(self._path, str(docs[0]['_id']) + '.json')
with atomic_write(path) as f:
f.write(json_util.dumps(docs))
return InsertManyResult([doc['_id'] for doc in docs])
def update_one(self, query, update):
for filename in os.listdir(self._path):
path = os.path.join(self._path, filename)
with open(path) as f:
docs = json_util.loads(f.read())
for doc in docs:
if _match(doc, query):
for k, v in update['$set'].items():
doc[k] = v
with atomic_write(path, overwrite=True) as f:
f.write(json_util.dumps(docs))
return # TODO return correct value
return # TODO return correct value
def update_many(self, query, update):
for filename in os.listdir(self._path):
path = os.path.join(self._path, filename)
with open(path) as f:
docs = json_util.loads(f.read())
matched = False
for doc in docs:
if _match(doc, query):
matched = True
for k, v in update['$set'].items():
doc[k] = v
if matched:
with atomic_write(path, overwrite=True) as f:
f.write(json_util.dumps(docs))
return # TODO return correct value
def delete_one(self, query):
for filename in os.listdir(self._path):
path = os.path.join(self._path, filename)
with open(path) as f:
docs = json_util.loads(f.read())
new_docs = []
matched_count = 0
for doc in docs:
if _match(doc, query) and matched_count == 0:
matched_count += 1
continue
else:
new_docs.append(doc)
if matched_count > 0:
with atomic_write(path, overwrite=True) as f:
f.write(json_util.dumps(new_docs))
return # TODO return correct value
return # TODO return correct value
def delete_many(self, query):
for filename in os.listdir(self._path):
path = os.path.join(self._path, filename)
with open(path) as f:
docs = json_util.loads(f.read())
docs = [x for x in docs if not _match(x, query)]
with atomic_write(path, overwrite=True) as f:
f.write(json_util.dumps(docs))
return # TODO return correct value
class Database:
def __init__(self, path):
self._path = path
os.makedirs(path, exist_ok=True)
def __getattr__(self, attr):
return Collection(os.path.join(self._path, attr))
def collection_names(self):
return os.listdir(self._path)
__getitem__ = __getattr__
class FongoClient:
def __init__(self, path):
self._path = path
def __getattr__(self, attr):
return Database(os.path.join(self._path, attr))
__getitem__ = __getattr__
def database_names(self):
return os.listdir(self._path)
def drop_database(self, name):
shutil.rmtree(os.path.join(self._path, name))
class PyFongo:
"""This class is for flask apps that use flask_pymongo."""
def init_app(self, app):
self._cx = FongoClient(app.config['FONGO_PATH'])
self._db = self._cx[app.config['FONGO_DBNAME']]
@property
def cx(self):
return self._cx
@property
def db(self):
return self._db
if __name__ == '__main__':
import tempfile
with tempfile.TemporaryDirectory() as tmpdir:
print('using tmpdir', tmpdir)
class App:
config = dict(FONGO_DBNAME='hello', FONGO_PATH=tmpdir)
fongo = PyFongo()
fongo.init_app(App())
r = fongo.db.dataset_data.insert_one({'hello': 'world'})
r = fongo.db.dataset_data.insert_one({'hello': 'peter'})
r = fongo.db.dataset_data.find_one()
print(r)
| true |
cddef8737e6ec9fb2b79b1bf18225adf07e0f1a3 | Python | kangmihee/EX_python | /py_hypo_tensor/pack/ten30rnn.py | UTF-8 | 881 | 3.171875 | 3 | [] | no_license | # RNN - sequence data로 자연어에 대해 이전문자를 참조하여 다음문자를 예측
import tensorflow as tf
import numpy as np
# test1 : 1,1,4
# data = np.array([[[1,0,0,0]]], dtype=np.float32)
# print(data.shape)
# test2 : one-hot encoding한 여러개 사용 - 1,2,3,4,5
one_hot = [[[1,0,0,0,0],[0,1,0,0,0],[0,0,1,0,0],[0,0,0,1,0],[0,0,0,0,1]]]
data = np.array(one_hot, dtype=np.float32)
hidden_size = 2 # 출력 수
#cell = tf.contrib.rnn.BasicRNNCell(num_units=hidden_size)
cell = tf.nn.rnn_cell.LSTMCell(num_units=hidden_size) # cell 생성
outputs, states = tf.nn.dynamic_rnn(cell, data, dtype=tf.float32)
print(outputs.shape) # (1:batch size, 1:sequence수, 2:출력수)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
#print(sess.run(outputs))
print(outputs.eval()) # [[[ 0.04574287 -0.03133253]]]
| true |
3929b075ebcc095bde15c1e090c1ca4536996c95 | Python | tazbingor/EzPascal | /test.py | UTF-8 | 533 | 2.671875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 测试
from interpreter import Interpreter
from lexer import Lexer
def main():
while True:
try:
try:
text = raw_input('ezpas> ')
except NameError:
text = input('ezpas> ')
except EOFError:
break
if not text:
continue
lexer = Lexer(text)
interpr = Interpreter(lexer)
result = interpr.expr()
print result
if __name__ == '__main__':
main()
| true |
2255a449b45d7f823d80e151c54290b7e1714524 | Python | Lakhanbukkawar/Python_programs | /FunctionTotakeNameAndDisplayMessage.py | UTF-8 | 88 | 3.296875 | 3 | [] | no_license | def name(x):
return x
a=input("enter the name")
print("happy birthday",name(a))
| true |
bf754f39b9de1abd54afd78dfc0fdf4162003c97 | Python | bojone/small_norb | /main.py | UTF-8 | 512 | 2.515625 | 3 | [
"MIT"
] | permissive | import matplotlib.pyplot as plt
from smallnorb.dataset import SmallNORBDataset
plt.ion()
if __name__ == '__main__':
# Initialize the dataset from the folder in which
# dataset archives have been uncompressed
dataset = SmallNORBDataset(dataset_root='./smallnorb/')
# Dump all images to disk
dataset.export_to_jpg(export_dir='smallnorb_export')
# Explore random examples of the training set
# to show how data look like
dataset.explore_random_examples(dataset_split='train')
| true |
49117f6a26e7c9c351025c115e666583bdb56d51 | Python | luanhsd/librarysort_aaed | /sorts.py | UTF-8 | 5,870 | 3.421875 | 3 | [] | no_license | def bubblesort(array):
status = True
compare = 0
moves = 0
for i in range(len(array)):
for j in range(1, len(array) - i):
if array[j] < array[j - 1]:
array[j], array[j - 1] = array[j - 1], array[j]
moves += 3
status = False
compare += 1
if status:
break
return compare, moves
def insertionsort(array):
compare = 0
moves = 0
for i in range(len(array)):
aux = array[i]
moves += 1
j = i - 1
if j >= 0:
compare += 1
while j >= 0 and aux < array[j]:
array[j + 1] = array[j]
moves += 1
j -= 1
array[j + 1] = aux
moves += 1
return compare, moves
def selectionsort(array):
compare = 0
moves = 0
for i in range(len(array) - 1):
min = i
for j in range(i + 1, len(array)):
if array[j] < array[min]:
min = j
compare += 1
array[i], array[min] = array[min], array[i]
moves += 3
return compare, moves
compare_heap = 0
moves_heap = 0
def heapify(array, n, i):
global compare_heap
global moves_heap
largest = i
left = 2 * i + 1
right = 2 * i + 2
if left < n:
if array[i] < array[left]:
largest = left
compare_heap += 1
if right < n:
if array[largest] < array[right]:
largest = right
compare_heap += 1
if largest != i:
array[i], array[largest] = array[largest], array[i]
moves_heap += 3
heapify(array, n, largest)
def heapsort(array):
global compare_heap
global moves_heap
n = len(array)
for i in range(n // 2 - 1, -1, -1):
heapify(array, n, i)
for i in range(n-1, 0, -1):
array[i], array[0] = array[0], array[i]
moves_heap += 3
heapify(array, i, 0)
return compare_heap, moves_heap
def mergesort(array):
compare = 0
moves = 0
if(len(array) > 1):
mid = len(array) // 2
left = array[:mid]
right = array[mid:]
mergesort(left)
mergesort(right)
i = j = k = 0
while i < len(left) and j < len(right):
compare += 1
if left[i] < right[j]:
array[k] = left[i]
moves += 1
i += 1
else:
array[k] = right[j]
moves += 1
j += 1
k += 1
while i < len(left):
array[k] = left[i]
moves += 1
i += 1
k += 1
while j < len(right):
array[k] = right[j]
moves += 1
j += 1
k += 1
return compare, moves
def quicksort(array, inicio=0, fim=None):
compare = 0
moves = 0
if fim is None:
fim = len(array)-1
if inicio < fim:
p, count_compare, count_move = partition(array, inicio, fim)
compare += count_compare
moves += count_move
quicksort(array, inicio, p-1)
quicksort(array, p+1, fim)
return compare, moves
def partition(array, inicio, fim):
compare = 0
moves = 0
pivot = array[fim]
i = inicio
for j in range(inicio, fim):
compare += 1
if array[j] <= pivot:
array[j], array[i] = array[i], array[j]
i = i + 1
moves += 3
array[i], array[fim] = array[fim], array[i]
moves += 3
return i, compare, moves
def librarysort(array):
compare = 0
moves = 0
length = len(array)
# cria a lista com espacos em branco e popula com os dados da lista
auxList = [None]*(length << 1)
for i in range(length):
auxList[2*i+1] = array[i]
a, b = 1, 2
for i in range(length):
a <<= 1
b <<= 1
for j in range(a, min(b, length+1)):
p = 2*j-1
s = auxList[p]
# Realiza a busca binaria
x, y = 0, p
while y-x > 1:
c = (x+y) >> 1
if auxList[c] != None:
if auxList[c] < s:
x = c
else:
y = c
compare += 1
else:
e, f = c-1, c+1
while auxList[e] == None:
e -= 1
while auxList[f] == None:
f += 1
if auxList[e] > s:
y = e
elif auxList[f] < s:
compare += 1
x = f
else:
compare += 1
x, y = e, f
break
compare += 1
if y-x > 1:
auxList[(x+y) >> 1] = s
moves += 1
else:
if auxList[x] != None:
if auxList[x] > s:
y = x
compare += 1
while s != None:
auxList[y], s = s, auxList[y]
y += 1
moves += 3
else:
auxList[x] = s
moves += 1
auxList[p] = None
if b > length:
break
if i < length-1:
s = p
while s >= 0:
if auxList[s] != None:
auxList[s], auxList[p] = None, auxList[s]
p -= 2
moves += 3
s -= 1
return compare, moves
if __name__ == '__main__':
arr = [12, 11, 13, 5, 6, 7]
print("Given array is", end="\n")
print(arr)
print("Sorted array is: ", end="\n")
print(heapsort(arr))
| true |
f4174d741e9383afa4a2df4bc081dd2eaf96d639 | Python | omerkap/bulboard | /runners/screen_usages_orchestrator.py | UTF-8 | 3,054 | 2.890625 | 3 | [] | no_license | import logging
import time
import threading
import cPickle
import pickle
from screen_usages.abstract_screen_usage import AbstractScreenUsage
class ScreenUsagesOrchestrator(threading.Thread):
def __init__(self, sr_driver, screen_scroll_delay=0.2, runners={}):
super(ScreenUsagesOrchestrator, self).__init__()
self._logger = logging.getLogger(self.__class__.__name__)
self._sr_driver = sr_driver
self._screen_scroll_delay = screen_scroll_delay
self._current_frame = None
for runner_name, runner in runners.items():
assert isinstance(runner, AbstractScreenUsage)
self._runners = runners
# default runner is the first one
self._active_runner = self._runners[self._runners.keys()[0]]
self._active_runner_name = self._runners.keys()[0]
try:
self._logger.info('trying to load previously saved state')
self.load_state_from_file()
except Exception as ex:
self._logger.exception(ex)
self._should_run = False
def set_active_runner(self, runner_name):
self._logger.info('setting active runner to: {}'.format(runner_name))
try:
self._active_runner = self._runners[runner_name]
self._active_runner_name = runner_name
self._logger.info('runner {} was set successfully'.format(runner_name))
except KeyError:
self._logger.error('runner {} not found, runners names: {}'.format(runner_name, self._runners.keys()))
def get_active_runner(self):
return self._active_runner
def kill_runner(self):
self._should_run = False
def calc_next_frame(self):
return self._active_runner.get_next_step()
def get_current_frame(self):
return self._current_frame
def save_state_to_file(self):
with open('orchestrator_back_up_file.pickle', 'w') as f:
current_runner_data = self._active_runner.serialize_state()
container = dict()
container['runner_data'] = current_runner_data
container['runner_name'] = self._active_runner_name
cPickle.dump(container, f, pickle.HIGHEST_PROTOCOL)
self._logger.info('saved state to file')
def load_state_from_file(self):
with open('orchestrator_back_up_file.pickle', 'r') as f:
container = cPickle.load(f)
self._active_runner_name = container['runner_name']
self._active_runner = self._runners[self._active_runner_name]
self._active_runner.load_state(container['runner_data'])
self._logger.info('loaded state, active runner: {}'.format(self._active_runner_name))
def run(self):
self._should_run = True
while self._should_run:
try:
self._current_frame = self.calc_next_frame()
self._sr_driver.draw(pic=self._current_frame)
except Exception as ex:
self._logger.exception(ex)
time.sleep(self._screen_scroll_delay)
| true |
e44c9531bae2e0d65d67b005e3832363d0c646af | Python | srafi1/introcs2finalproject | /writepokedex.py | UTF-8 | 2,254 | 2.609375 | 3 | [] | no_license | def getids():
try:
idfile = open('data/csv/pokemon_species.csv', 'rU')
s = idfile.read()
except:
return {}
s = s.split('\n')
s = s[1:-1]
ids = {}
for i in s:
i = i.split(',')
ids[i[0]] = i[1]
return ids
def gettypes():
try:
typefile = open('data/csv/types.csv', 'rU')
s= typefile.read()
except:
return {}
s = s.split('\n')
s = s[1:-3]
d = {}
for i in s:
i = i.split(',')
d[i[0]] = i[1]
return d
def idtotype(li):
types = gettypes()
out = []
for i in li:
out.append(types[i])
return out
def getpoketypes():
try:
typefile = open('data/csv/pokemon_types.csv', 'rU')
s = typefile.read()
except:
return {}
s = s.split('\n')
last = s.index('10001,14,1')
s = s[1:last]
d = {}
for i in s:
i = i.split(',')
if i[0] in d:
d[i[0]] = d[i[0]] + [i[1]]
else:
d[i[0]] = [i[1]]
for i in d:
d[i] = idtotype(d[i])
return d
def getstats():
try:
f = open('data/csv/pokemon_stats.csv', 'rU')
s = f.read()
except:
return {}
s = s.split('\n')
last = s.index('10001,1,50,0')
s = s[1:-1]
d = {}
for i in s:
i = i.split(',')
if i[0] in d:
d[i[0]] = d[i[0]] + [i[2]]
else:
d[i[0]] = [i[2]]
return d
def getpokeinfo():
ids = getids()
poketypes = getpoketypes()
stats = getstats()
li = []
for i in ids:
poke = []
poke.append(int(i))
poke.append(ids[i])
poke.append(poketypes[i][0])
if len(poketypes[i]) > 1:
poke.append(poketypes[i][1])
else:
poke.append('')
poke.append(stats[i][0])
for j in range(1,6):
poke.append(stats[i][j])
li.append(poke)
li.sort()
return li
def writedata():
f = open('poketypes.csv', 'w')
out = getpokeinfo()
f.write(str(out[0][0]))
for i in out[0][1:]:
f.write(',' + i)
for i in out[1:]:
f.write('\n' + str(i[0]))
for j in i[1:]:
f.write(',' + j)
f.close()
print 'done'
| true |
e3e3daddaaa1d74acc530254c3f1c83428837954 | Python | abstractlyZach/python_design_patterns | /command_pattern/assignment/actions/appliance.py | UTF-8 | 632 | 3.28125 | 3 | [] | no_license | import logging
class Appliance(object):
def __init__(self, name):
self._name = name
self._is_on = False
def on(self):
if self._is_on:
raise Exception('{} is already on.'.format(self._name))
else:
logging.info('%s has been turned on.' % self._name)
self._is_on = True
def off(self):
if self._is_on:
logging.info('%s has been turned off.' % self._name)
self._is_on = False
else:
raise Exception('{} is already off.'.format(self._name))
@property
def is_on(self):
return self._is_on | true |
7058664e862323923e67f3285127bfa4c9dcdd1f | Python | dominicwhite/cherryblossoms | /darksky_stuff.py | UTF-8 | 2,478 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 07:10:32 2019
@author: dominic
"""
import csv
import datetime
import os
import pandas as pd
import requests
KEY = os.environ.get("DARKSKY_KEY")
tidal_basin_lat = 38.883995
tidal_basin_long = -77.038976
def format_darksky_url(dtime, lat=38.883995, long=-77.038976):
time = dtime.strftime("%Y-%m-%dT%H:%M:%S")
print(time)
api_base = f"https://api.darksky.net/forecast/{KEY}/{lat},{long},{time}"
get_params = "?exclude=hourly"
print(api_base + get_params)
return api_base + get_params
def parse_darksky_response(response_json):
data = {}
data["tempHi"] = response_json["temperatureHigh"]
data["tempLo"] = response_json["temperatureLow"]
data["tempAv"] = data["tempLo"] + (data["tempHi"] - data["tempLo"]) / 2
return data
#print(parse_darksky_response(rj))
def save_2019_data():
one_day = datetime.timedelta(days=1)
start2019 = datetime.datetime(2019, 1, 1, 12)
# print(start2019.strftime("%Y-%m-%dT%H:%M:%S"))
rdate = start2019
hist_data2019 = []
while rdate < datetime.datetime.now():
endpoint = format_darksky_url(rdate)
print(rdate)
r = requests.get(endpoint)
rj = r.json()
temps = parse_darksky_response(rj["daily"]["data"][0])
temps['date'] = rdate.strftime("%Y-%m-%d")
hist_data2019.append(temps)
rdate += one_day
# print(hist_data2019)
with open("data/2019_weather_hist.csv", "w") as f:
writer = csv.DictWriter(f, ["date", "tempHi", "tempLo", "tempAv"])
writer.writeheader()
writer.writerows(hist_data2019)
#save_2019_data()
def save_forecast():
lat = 38.883995
long = -77.038976
api_base = f"https://api.darksky.net/forecast/{KEY}/{lat},{long}"
get_params = "?exclude=hourly"
endpoint = api_base + get_params
print(endpoint)
r = requests.get(endpoint)
rj = r.json()
forecast = []
for day in rj["daily"]["data"]:
temps = parse_darksky_response(day)
day_dt = datetime.datetime.fromtimestamp(day["time"])
temps['date'] = day_dt.strftime("%Y-%m-%d")
forecast.append(temps)
todays_date = datetime.datetime.now().strftime("%Y-%m-%d")
with open("data/forecast_" + todays_date + ".csv", "w") as f:
writer = csv.DictWriter(f, ["date", "tempHi", "tempLo", "tempAv"])
writer.writeheader()
writer.writerows(forecast)
save_forecast()
| true |
8086847b50d75c35516ddad1ec88bf6bed1a3b83 | Python | xiaoqi2019/python14 | /week_6/class_0222/task_02.py | UTF-8 | 510 | 3.4375 | 3 | [] | no_license | #-*-coding:utf-8-*-
#@Time :2019/2/25 18:12
#@Author:xiaoqi
#@File :task_02.py
# 2:思考:分别将我们学过的数据类型 int float boolean str list tuple dict
# 写到每个单元格里面,观察,你通过openpyxl操作后拿到的数据分别是是什么类型。
from openpyxl import load_workbook
wb=load_workbook('python_16.xlsx')
sheet=wb['Sheet1']
for i in range(1,sheet.max_row+1):
res=sheet.cell(i,1).value
print(res)
print(type(res))
# value=sheet.cell(row,column).value
| true |
f6a2bc125693249d86f2622d8477a45ca605338a | Python | dzheleznyakov/PythonMegaCourse | /s10more_on_functions/concat.py | UTF-8 | 130 | 2.9375 | 3 | [] | no_license | def concat(s1, s2='ccc'):
return s1 + s2
print(concat('aaa', 'bbb'))
print(concat(s2='aaa', s1='bbb'))
print(concat('aaa'))
| true |
304d3f065a836270ff58910d8f1e47cefddc940f | Python | shwotherspoon/misc-code-things | /min_edit_dist.py | UTF-8 | 1,104 | 3.59375 | 4 | [
"MIT"
] | permissive | import numpy as np
def compute_med(s1, s2, ins_cost=1, del_cost=1, sub_cost=1):
'''
Compute the minimum edit distance (MED) between string 1 (s1) and
string 2 (s2)
'''
target = s1
source = s2
target_len = len(target)+1 # num cols
source_len = len(source)+1 # num rows
matr = np.zeros((source_len, target_len))
matr[0,0] = 0
# initialize first row and first column
for row in range(matr.shape[0]):
matr[row,0] = row
for col in range(matr.shape[1]):
matr[0,col] = col
# fill in the rest of the table
for row in range(1,source_len):
for col in range(1,target_len):
ins_dist = matr[row, col-1] + ins_cost
del_dist = matr[row-1, col] + del_cost
if source[row-1] == target[col-1]:
sub_dist = matr[row-1, col-1]
else:
sub_dist = matr[row-1, col-1] + sub_cost
dist = min([ins_dist, del_dist, sub_dist])
matr[row,col] = dist
print(matr)
med = matr[source_len-1, target_len-1]
return med
def main():
s1 = 'honda'
s2 = 'hyundai'
med = compute_med(s1, s2)
print(med)
if __name__=="__main__":
main()
| true |
4905c87ccb8988204df91febc2da760b1642d19f | Python | hirosuzuki/procon | /atcoder/abc076/a.py | UTF-8 | 58 | 2.78125 | 3 | [] | no_license | R = int(input())
G = int(input())
r = 2 * G - R
print(r)
| true |
f0ac73a42bd0b039fb0d98f45c38e74b7a94c068 | Python | baixf-xyz/raspberry_pi | /face.py | UTF-8 | 3,966 | 2.609375 | 3 | [] | no_license | # -*- coding: UTF-8 -*-
from picamera import PiCamera
from aip import AipFace
import urllib.request
import RPi.GPIO as GPIO
import base64
import time
import cv2
import pymysql.cursors
import sys
import datetime
#打开数据库连接
conn=pymysql.connect(host='localhost',user='root',passwd='123456',db='rapberry',port=3306)
#使用cursor()方法获取操作游标
cur=conn.cursor()
#获取当前时间
dt=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
#百度人脸识别API账号信息
APP_ID = '15777534'
API_KEY = 'iRCGkO9cV0iZOC4tNM4pjsIR'
SECRET_KEY ='WAaSwk2alCMc5fqefijAGOS9YEHLL4ga'
client = AipFace(APP_ID, API_KEY, SECRET_KEY)#创建一个客户端用以访问百度云
#图像编码方式
IMAGE_TYPE='BASE64'
camera = PiCamera()#定义一个摄像头对象,首先使用picamera模块当中的PiCamera方法创建返回一个camera的对象
#用户组
GROUP = 'raspberry_pi'
#初始化相机并获取对原始相机捕获的引用
#照相函数
def getimage():
#摄像界面为1024*768
camera.resolution = (1024, 768) #设置图像的width和height
camera.saturation = 80 # 设置图像视频的饱和度
camera.framerate = 32 #这里可能用的Fraction是一个分数模块来存储分数1/6,保证分数运算的精度,记得调用模块
time.sleep(0.1)#让相机预热
camera.start_preview()#开始摄像
time.sleep(2)
camera.capture('faceimage.jpg')#拍照并保存
time.sleep(2)
#对图片的格式进行转换
def transimage():
f = open('faceimage.jpg','rb')
img = base64.b64encode(f.read())
return img
#上传到百度api进行人脸检测
def go_api(image):
result = client.search(str(image, 'utf-8'), IMAGE_TYPE, GROUP);#在百度云人脸库中寻找有没有匹配的人脸
if result['error_msg'] == 'SUCCESS':#如果成功了
name = result['result']['user_list'][0]['user_id']#获取名字
score = result['result']['user_list'][0]['score']#获取相似度
print("相似度%d!" %score)
if score > 80:#如果相似度大于80
if name == 'linhaitao':
#print("欢迎%s !" % name)
print("欢迎林海涛")
time.sleep(3)
if name == 'mayun':
#print("欢迎%s !" % name)
print("欢迎马云")
time.sleep(3)
if name == "liyanhong":
#print("欢迎%s !" % name)
print("欢迎李彦宏")
if name == "zhaoliying":
#print("欢迎%s !" % name)
print("欢迎赵丽颖")
else:
print("对不起,我不认识你!我不能给你开门!!!")
name = 'Unknow'
return 0
current_time = time.asctime(time.localtime(time.time()))#获取当前时间
#写入mysql
try:
#执行sql语句
cur.execute("insert into person values('%s','%s')"%(name, dt))
cur.close()
#提交到数据库执行
conn.commit()
print("插入成功")
except:
#发生错误时回滚
conn.rollback()
print("插入失败")
finally:
#关闭数据库链接
conn.close()
return 1
if result['error_msg'] == 'pic not has face':
print('检测不到人脸,请把人脸对准摄像头重试,谢谢!')
time.sleep(2)
return 0
#主函数
if __name__ == '__main__':
while True:
print('准备开始人脸识别')
if True:
getimage()#拍照
img = transimage()#转换照片格式
res = go_api(img)#将转换了格式的图片上传到百度云
if(res == 1):#是人脸库中的人
print("开门")
else:
print("关门")
print('稍等三秒进入下一个')
time.sleep(3)
| true |
94772fdd33d9d0078c3e835528bdc359ffd18b63 | Python | hack4impact-uiuc/globalgiving-tool | /microservices/conftest.py | UTF-8 | 593 | 2.515625 | 3 | [] | no_license | import pytest
import sys, os
"""
Contains methods used before pytest collects all the tests within the microservices directory. Adds the correct directory such that
all of the test imports in each microservice will be found and the test cases will run.
"""
def pytest_sessionstart(session):
# Add microservice directory to path for test case imports to work
sys.path.append(os.path.realpath(os.path.dirname(__file__)))
def pytest_sessionfinish(session, exitstatus):
# Cleanup, return sys.path to original state
sys.path.remove(os.path.realpath(os.path.dirname(__file__)))
| true |
595980326b35e63178642f0f816bdf2c14edaf83 | Python | KratosMultiphysics/Kratos | /applications/DEMApplication/tests/test_erase_particles.py | UTF-8 | 2,998 | 2.59375 | 3 | [
"BSD-3-Clause"
] | permissive | import os
import KratosMultiphysics as Kratos
from Kratos import Logger
import KratosMultiphysics.KratosUnittest as KratosUnittest
import KratosMultiphysics.DEMApplication.DEM_analysis_stage as dem_analysis
# This test consists in a system with a single already existing particle and an inlet that injects a few
# particles during the simulation, which consists in letting the particle fall under gravity.
# The bounding box, which has its bottom placed at z=0 is set to mark the particles to be erased when they
# cross this limit. Depending on the delay imposed on the destruction of the particles after they are marked,
# a different number of particles is recovered at the end of the simulation (more delay should lead
# to equal ot greater number of particles at the end).
debug_mode = False
class TestDEMEraseParticles(dem_analysis.DEMAnalysisStage):
@staticmethod
def StaticGetMainPath():
return os.path.join(os.path.dirname(os.path.realpath(__file__)), "erase_particles_test_files")
def GetMainPath(self):
return self.StaticGetMainPath()
def Finalize(self):
self.number_of_particles_by_the_end = len(self.spheres_model_part.GetElements())
parent_return = super().Finalize()
class TestDEMEraseParticlesWithDelay(KratosUnittest.TestCase):
def setUp(self):
self.parameters_file_name = 'ProjectParametersDEMWithNoDelay.json'
self.path = TestDEMEraseParticles.StaticGetMainPath()
self.analysis = TestDEMEraseParticles
def test_erase_particles_no_delay(self):
project_parameters_file_name = 'ProjectParametersDEMWithNoDelay.json'
expected_number_of_particles = 27
self.RunTest(project_parameters_file_name, expected_number_of_particles)
def test_erase_particles_little_delay(self):
project_parameters_file_name = 'ProjectParametersDEMWithLittleDelay.json'
expected_number_of_particles = 27
self.RunTest(project_parameters_file_name, expected_number_of_particles)
def test_erase_particles_with_delay(self):
project_parameters_file_name = 'ProjectParametersDEMWithDelay.json'
expected_number_of_particles = 29
self.RunTest(project_parameters_file_name, expected_number_of_particles)
def RunTest(self, project_parameters_file_name, expected_number_of_particles):
parameters_file_path = os.path.join(self.path, project_parameters_file_name)
model = Kratos.Model()
with open(parameters_file_path, 'r') as parameter_file:
project_parameters = Kratos.Parameters(parameter_file.read())
analysis = self.analysis(model, project_parameters)
analysis.Run()
self.assertEqual(expected_number_of_particles, analysis.number_of_particles_by_the_end)
if __name__ == "__main__":
if debug_mode:
Logger.GetDefaultOutput().SetSeverity(Logger.Severity.INFO)
else:
Logger.GetDefaultOutput().SetSeverity(Logger.Severity.WARNING)
KratosUnittest.main()
| true |
d144d2dcd7b2571f2d5dba57de5295c6d6241096 | Python | Ercion/learning_python | /ordered_dict_example.py | UTF-8 | 1,732 | 2.859375 | 3 | [] | no_license | # you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
import re
from collections import OrderedDict
line_pattern=re.compile('^([\w\W]+)\.([\w\d]+)\s(\d+)b$')
'''
selected_file_types={
'music':['mp3','acc','flac'],
'images':['jpg','bmp','gif'],
'movies':['mp4','avi','mkv'],
'other':['7z','txt','zip','exe']
}
'''
def get_line_match(line):
return line_pattern.match(line) if line_pattern.match(line) else False
def solution(S):
lines=S.split('\n')
results=''
result="{0} {1}b\n"
calculations={
'music':int(0),
'images':int(0),
'movies':int(0),
'other':int(0)
}
ordered_calculations=OrderedDict(calculations.items())
for line in lines:
file_match=get_line_match(line)
if file_match and int(file_match.group(3)) > 0:
if file_match.group(2) in ['mp3','acc','flac']:
ordered_calculations['music']+= int(file_match.group(3))
elif file_match.group(2) in ['jpg','bmp','gif']:
ordered_calculations['images']+= int(file_match.group(3))
elif file_match.group(2) in ['mp4','avi','mkv']:
ordered_calculations['movies']+= int(file_match.group(3))
else:
ordered_calculations['other']+= int(file_match.group(3))
for key,value in ordered_calculations.items():
results +=result.format(key,value)
return results
if __name__=="__main__":
results = solution("my.song.mp3 11b\nburcu.bu 300b\ngreatSong.flac 1000b\nnot3.txt 5b\nvideo.mp4 200b\ngame.exe 100b\nmov!e.mkv 10000b")
print(results)
| true |
8a605edd7b40bb5fbb205c5df6d134d3baa7ac57 | Python | aalexx-S/picklewrapper | /pickleutils.py | UTF-8 | 1,893 | 3.75 | 4 | [] | no_license | import os
try:
import cPickle as pickle
except:
import pickle
class PickleUtils:
"""
Provide a simple facade for the python package 'pickle'.
A file name for reading and writing is needed, and objects will be read from and writen to the file given.
The file and directories will be created if not exists.
Attributes:
PICKLE_NAME: the name of the file writing to and reading from.
"""
def __init__(self, filename):
"""
Init PickleUtils with a file name.
Args:
filename:the full name to the file.
"""
self.PICKLE_NAME = filename
def pickle_check(self):
"""
Check if the file exists.
Returns:
Return True if the file exists, otherwise false.
"""
if os.path.isfile(self.PICKLE_NAME):
return True
return False
def pickle_read(self):
"""
Read objects from the file. Objects will be returned in a list.
The order of objects will be the same as the order given when writing.
Returns:
A list of objects.
examples:
['Test', {'a':1, 'b':2}]
"""
items = []
with open(self.PICKLE_NAME, 'rb') as pf:
while True:
try:
items.append(pickle.load(pf))
except EOFError:
break
return items
def pickle_write(self, list_of_objects):
"""
Write a list of objects to the file. Any content in the file will be cleared before writing.
Args:
list_of_objects:a list of objects to be written to the file.
examples:
['test', {'a':1, 'b':2}]
"""
with open(self.PICKLE_NAME, 'wb') as pf:
for item in list_of_objects:
pickle.dump(item, pf)
| true |
1990d6395a9e588c4f803d0897e318e1a9a289e2 | Python | PravinAlhat/Splinter-Automation | /Tests/testRadioButtons.py | UTF-8 | 1,402 | 2.578125 | 3 | [] | no_license | from Splinter_Project.Page._PracticePage import practicePage
import unittest
import pytest
class testClass(unittest.TestCase):
_test_Obj = practicePage()
@classmethod
def setUpClass(cls) -> None:
cls._test_Obj.browserClose()
cls._test_Obj.openApplication()
def setUp(self) -> None:
print('<<===========Test method execution has been started========>>')
@pytest.mark.run(order=1)
def test_RadBtns(self):
self._test_Obj.workingWithRadioButtons('BMW','Benz')
@pytest.mark.run(order=2)
def test_DropDown(self):
self._test_Obj.workingWithDropDown('cars','honda')
@pytest.mark.run(order=4)
def test_mulSelect(self):
self._test_Obj.workingWithMultiSelect('Apple','Peach')
@pytest.mark.run(order=3)
def test_checkBox(self):
self._test_Obj.workingWithCheckBox('cars','benz','honda')
@pytest.mark.run(order=5)
def test_switchWindow(self):
self._test_Obj.workingWithSwitchWindow()
@pytest.mark.run(order=6)
def test_newWindowNavigation(self):
self._test_Obj.navigateToTab()
def tearDown(self) -> None:
print('<<=========Test method execution has been finished===========>>')
@classmethod
def tearDownClass(cls) -> None:
cls._test_Obj.browserClose()
print('All the tests have been successfully executed and browser is closed')
| true |
8b0c7c9066b31846da241d10818e8a3d4cc85938 | Python | ekaone/raspberrypi | /Nema17.py | UTF-8 | 1,028 | 3 | 3 | [] | no_license | import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD) #read the pin as board instead of BCM pin
LinearActuatorDir = 33
LinearActuatorStepPin = 35
LinearActuatorEnable = 36
GPIO.setwarnings(False)
GPIO.setup(LinearActuatorDir, GPIO.OUT)
GPIO.setup(LinearActuatorStepPin, GPIO.OUT)
GPIO.setup(LinearActuatorEnable, GPIO.OUT)
FastSpeed = 0.00045 #Change this depends on your stepper motor
LowSpeed = 0.00045
Speed = FastSpeed
GPIO.output(LinearActuatorEnable, GPIO.HIGH)
while True:
print ("Move Backward")
for i in range (5*200):
GPIO.output(LinearActuatorDir, 0)
GPIO.output(LinearActuatorStepPin, 1)
time.sleep(LowSpeed)
GPIO.output(LinearActuatorStepPin, 0)
time.sleep(LowSpeed)
print ("Moving")
time.sleep(1)
print ("Move Forward")
for i in range (5*200):
GPIO.output(LinearActuatorDir, GPIO.HIGH)
GPIO.output(LinearActuatorStepPin, GPIO.HIGH)
time.sleep(FastSpeed)
GPIO.output(LinearActuatorStepPin, GPIO.LOW)
time.sleep(FastSpeed)
time.sleep(1) | true |
39301331b8b70f380c0f786d6d60ffd2281feeab | Python | hayden-williams/ECE4012 | /autoV1.py | UTF-8 | 9,603 | 2.625 | 3 | [] | no_license | # Authors: Stephen Hayden Williams and Edgardo Marchand
# Date Created: 18 Oct 2017
# Date Revised: 18 Oct 2017
# A very basic TurtleBot script that moves TurtleBot forward, bumper paused the movement for 2 sec. Press CTRL + C to stop. To run:
# On TurtleBot:
# roslaunch turtlebot_bringup minimal.launch
# On work station:
# python goStraight.py
import rospy
import sys
import roslib
import cv2
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
from geometry_msgs.msg import Twist#, Pose
from nav_msgs.msg import Odometry
from cmath import *
#from obstacle_detect import obstacle_detect
#from tf2_msgs.msg import TFMessage
#import tf
class GoStraight():
desired = 10 # should never naturally be 10, this was to give bot time to get correct error
thetaError = 0
kTurn = 0.5
#obstacle = obstacle_detect()
#Threshold for detecting object in a zone
#Zones go from left to right on image
# Distance in mm
z_thresh = 1000
z_threshCorner = z_thresh
ZoneList = np.array([0,0,0,0,0,0])
count = 0
x = 0
y = 0
magnitude = 0
def __init__(self):
# initiliaze
rospy.init_node('GoStraight', anonymous=False)
#obs = obstacle_detect()
# tell user how to stop TurtleBot
rospy.loginfo("To stop TurtleBot CTRL + C")
# What function to call when you ctrl + c
rospy.on_shutdown(self.shutdown)
#rospy.Subscriber("/mobile_base/events/bumper",BumperEvent,self.BumperEventCallback)
#rospy.Subscriber("/mobile_base/events/wheel_drop",WheelDropEvent,self.WheelDropEventCallback)
rospy.Subscriber('odom',Odometry,self.Orientation)
# Initialize bridge
self.bridge = CvBridge()
# Subscribe to depth sensor and get raw image
self.depth_sub = rospy.Subscriber("/camera/depth/image_raw",Image,self.callback)
#self.depth_sub = rospy.Subscriber("/camera/depth_registered/image_raw",Image,self.callback_depth)
# Publish to navigation to move robot
# may need rospy.spin();
# Create a publisher which can "talk" to TurtleBot and tell it to move
# Tip: You may need to change cmd_vel_mux/input/navi to /cmd_vel if you're not using TurtleBot2
self.cmd_vel = rospy.Publisher('cmd_vel_mux/input/navi', Twist, queue_size=10)
#TurtleBot will stop if we don't keep telling it to move. How often should we tell it to move? 10 HZ
self.r = rospy.Rate(10);
# Twist is a datatype for velocity
move_cmd = Twist()
# let's go forward at 0.2 m/s
move_cmd.linear.x = 0.0
# let's turn at 0 radians/s
move_cmd.angular.z = 0
# as long as you haven't ctrl + c keeping doing...
while not rospy.is_shutdown():
#rospy.loginfo(np.absolute(self.thetaError))
if (np.absolute(self.thetaError) < 0.25):
self.count = 0
#rospy.loginfo("obstacle " + str(self.ZoneList))
if (np.sum(self.ZoneList) == 0):
if self.desired == 10:
move_cmd.linear.x = 0.0
move_cmd.angular.z = 0
else:
move_cmd.linear.x = 0.2
move_cmd.angular.z = self.kTurn*self.thetaError
else:
if (self.ZoneList[0] == 0 and self.ZoneList[1] == 0 and self.ZoneList[2] == 0 and self.ZoneList[3] != 0):
#rospy.loginfo("inside else")
#soft left
move_cmd.linear.x = 0.2
move_cmd.angular.z = 0.5
elif (self.ZoneList[0] != 0 and self.ZoneList[1] == 0 and self.ZoneList[2] == 0 and self.ZoneList[3] == 0):
#soft right
move_cmd.linear.x = 0.2
move_cmd.angular.z = -0.5
elif (self.ZoneList[0] == 0 and self.ZoneList[1] != 0 and self.ZoneList[2] != 0 and self.ZoneList[3] == 0):
if (self.ZoneList[1] > self.ZoneList[2]):
#Hard Right
move_cmd.linear.x = 0.2
move_cmd.angular.z = -0.75
else:
move_cmd.linear.x = 0.2
move_cmd.angular.z = 0.7
#Hard Leff
elif((self.ZoneList[0]==0 and self.ZoneList[2] !=0) or (self.ZoneList[0] == 0 and self.ZoneList[1] !=0 and self.ZoneList[3] != 0)):
move_cmd.linear.x = 0.2
move_cmd.angular.z = 0.7
# Hard Left
elif((self.ZoneList[1] != 0 and self.ZoneList[3] ==0) or (self.ZoneList[0] != 0 and self.ZoneList[2] != 0 and self.ZoneList[3] == 0)):
move_cmd.linear.x = 0.2
move_cmd.angular.z = -0.75
else:
self.count = self.count + 1
if self.count == 1 :
move_cmd.linear.x = 0.0
move_cmd.angular.z = 0
self.cmd_vel.publish(move_cmd)
self.r.sleep()
while (np.sum(self.ZoneList) != 0 and np.absolute(self.thetaError) < 1.57):
move_cmd.angular.z = 0.5
self.cmd_vel.publish(move_cmd)
self.r.sleep()
elif self.count == 2 :
move_cmd.linear.x = 0.0
move_cmd.angular.z = 0
self.cmd_vel.publish(move_cmd)
self.r.sleep()
while (np.sum(self.ZoneList)!=0):
move_cmd.angular.z = -0.5
self.cmd_vel.publish(move_cmd)
self.r.sleep()
else:
rospy.loginfo("I cant make it around! Help Mommy")
move_cmd.linear.x = 0.0
move_cmd.angular.z = 0
self.cmd_vel.publish(move_cmd)
# publish the velocity
self.cmd_vel.publish(move_cmd)
# wait for 0.1 seconds (10 HZ) and publish again
self.r.sleep()
def Orientation(self,data):
qz = data.pose.pose.orientation.z
qw = data.pose.pose.orientation.w
current = qw + qz*1j
if self.desired == 10:
self.desired = (qw + qz*1j)**2
self.xstart = data.pose.pose.position.x
self.ystart = data.pose.pose.position.y
else:
error = self.desired/(current**2)
self.thetaError = phase(error)
#desired = 1 + 0*1j
#thetaDesired = 0
#desired = cos(thetaDesired)+sin(thetaDesired)*1j
#error = desired/current
#thetaError = phase(error)
#rospy.loginfo("qz: %f qw: %f"%(qz, qw))
#thetaZ = qz/sqrt(1-(qw*qw))
#euler = self.tf.transformations.euler_from_quaternion(quaternion)
#yaw = euler[2]
#rospy.loginfo("theta = %f"%(self.thetaError))
self.x = data.pose.pose.position.x - self.xstart
self.y = data.pose.pose.position.y - self.ystart
self.magnitude = sqrt(self.x **2 + self.y **2)
rospy.loginfo(self.magnitude)
def callback(self,data):
try:
# Get Image and find size of image
self.depth_image = self.bridge.imgmsg_to_cv2(data, "passthrough")
rows, col, channels = self.depth_image.shape #grey scale channel is 1, rgb is 3
# Find Center of Image
cR = np.int(np.round(rows/2))
cC = np.int(np.round(col/2))
colFrac = np.int(np.round(.25*col))
self.mask = np.zeros((rows,col))
self.mask[cR-rows*.25:rows,0:col] = 5
self.mask = np.uint16(self.mask)
self.mask = cv2.inRange(self.mask,np.array(4,dtype = "uint16"),np.array(6,dtype = "uint16"))
min_z= np.array(100, dtype = "uint16") #bgr
max_z= np.array(self.z_thresh, dtype = "uint16")
self.mask2 = cv2.inRange(self.depth_image, min_z, max_z)
#Combination of masks
self.mask3 = cv2.bitwise_and(self.mask,self.mask, mask= self.mask2)
min_zCorn= np.array(100, dtype = "uint16") #bgr
max_zCorn= np.array(self.z_threshCorner, dtype = "uint16")
self.maskCorner = cv2.inRange(self.depth_image, min_zCorn, max_zCorn)
self.maskZone1 = np.zeros((rows,col))
self.maskZone1[0:rows,0:np.round(col/4)] = 5
self.maskZone1 = np.uint16(self.maskZone1)
self.maskZone1 = cv2.inRange(self.maskZone1,np.array(4,dtype = "uint16"),np.array(6,dtype = "uint16"))
self.Zone1 = cv2.bitwise_and(self.maskCorner,self.maskCorner, mask= self.maskZone1)
self.maskZone2 = np.zeros((rows,col))
self.maskZone2[0:rows,np.round(col/4)+1:np.round(col/2)] = 5
self.maskZone2 = np.uint16(self.maskZone2)
self.maskZone2 = cv2.inRange(self.maskZone2,np.array(4,dtype = "uint16"),np.array(6,dtype = "uint16"))
self.Zone2 = cv2.bitwise_and(self.mask3,self.mask3, mask= self.maskZone2)
self.maskZone3 = np.zeros((rows,col))
self.maskZone3[0:rows,np.round(col/2)+1:cC+np.round(col/4)] = 5
self.maskZone3 = np.uint16(self.maskZone3)
self.maskZone3 = cv2.inRange(self.maskZone3,np.array(4,dtype = "uint16"),np.array(6,dtype = "uint16"))
self.Zone3 = cv2.bitwise_and(self.mask3,self.mask3, mask= self.maskZone3)
self.maskZone4 = np.zeros((rows,col))
self.maskZone4[0:rows,cC+np.round(col/4)+1:col] = 5
self.maskZone4 = np.uint16(self.maskZone4)
self.maskZone4 = cv2.inRange(self.maskZone4,np.array(4,dtype = "uint16"),np.array(6,dtype = "uint16"))
self.Zone4 = cv2.bitwise_and(self.mask3,self.mask3, mask= self.maskZone4)
sumZone1 = np.sum(self.Zone1 / 255)
#rospy.loginfo("sum of Zone1 is " + str(sumZone1))
sumZone2 = np.sum(self.Zone2 / 255)
#rospy.loginfo("sum of Zone2 is " + str(sumZone2))
sumZone3 = np.sum(self.Zone3 / 255)
#rospy.loginfo("sum of Zone3 is " + str(sumZone3))
sumZone4 = np.sum(self.Zone4 / 255)
#rospy.loginfo("sum of Zone4 is " + str(sumZone4))
self.ZoneList = np.array([sumZone1, sumZone2, sumZone3, sumZone4])
#rospy.loginfo("Zone List is "+ str(self.ZoneList))
self.r.sleep()
except CvBridgeError, e:
print e
def shutdown(self):
# stop turtlebot
rospy.loginfo("Stop TurtleBot")
# a default Twist has linear.x of 0 and angular.z of 0. So it'll stop TurtleBot
self.cmd_vel.publish(Twist())
# sleep just makes sure TurtleBot receives the stop command prior to shutting down the script
rospy.sleep(1)
if __name__ == '__main__':
try:
GoStraight()
except:
rospy.loginfo("GoStraight node terminated.")
| true |
8b921259874a9d6e04f1aa59d1f5718ad0183673 | Python | JasonJOCKKY/CS4820-jtnfx | /assignments/assignment 5/createAssignment_2_test.py | UTF-8 | 809 | 2.734375 | 3 | [] | no_license | import pytest
import System
import json
# Login as a professor and create assignment in the course that the professor does not teach
def test_createAssignment_2(grading_system):
username = 'saab'
password = 'boomr345'
course = 'databases'
newAssignment = 'assignment000'
newDueDate = '5/10/21'
grading_system.login(username, password)
grading_system.usr.create_assignment(newAssignment, newDueDate, course)
assert json_courses()[course]['assignments'][newAssignment]['due_date'] is None # The professor should not be able to do this
@pytest.fixture
def grading_system():
gradingSystem = System.System()
gradingSystem.load_data()
return gradingSystem
def json_courses():
with open('Data/courses.json') as f:
courses = json.load(f)
return courses | true |
961303bc91935b93d2116f0b22f6f5620cbf7d15 | Python | abdullahelnajjar/FirstPythonProject | /Python Exercises/read a file.py | UTF-8 | 368 | 3.09375 | 3 | [] | no_license | with open('names.txt', 'r') as open_file:
content = {}
for line in open_file:
line = line.strip()
if line in content:
content[line] += 1
else:
content.update({line: 1})
print(content)
'''
with open('names2.txt', 'a+') as open_file:
for count in range(1,10):
open_file.write('Zizo %d \n' % count) ''' | true |
22873b2cd76bf2caa6c97232c0d7b019f50696ff | Python | KleyLima/condo_manager | /source/dao/pessoa_dao.py | UTF-8 | 1,011 | 2.890625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from source.dao.models.pessoa import Pessoa
# Inserting imovel at Database
def insert_pessoa(nome, email, cpf, nacio, fone, nasc, tipo, sexo):
pessoa = Pessoa(
name = nome,
email = email,
cpf = cpf,
nacionality = nacio,
phone = fone,
birthday = nasc,
costumer_type = tipo,
sex = sexo
)
print("Inserting new person...")
return pessoa.save()
# Realiza a busca de todos os nomes de locadores somentes
def select_by_name_locador():
by_name = Pessoa.select().where(Pessoa.name != "", Pessoa.costumer_type == "locador").get()
return by_name
# Realiza a busca de todos os nomes de locatarios somente
def select_by_name_locatario():
by_name = Pessoa.select().where(Pessoa.name != "", Pessoa.costumer_type == "locatario").get()
return by_name
# Realiza a busca de todos os nomes sem excessao.
def select_all():
by_name = Pessoa.select().where(Pessoa.name != "").get()
return by_name | true |
ba5d9bd7f69dbb204d7b71d830c14d6b7cf6cdf5 | Python | AryaStar/Data-Mining-for-Cybersecurity | /Project/2019/5/code/sent_seg.py | UTF-8 | 1,435 | 2.84375 | 3 | [
"MIT"
] | permissive | '''
author:Fr3ya
date:20191205
function: 对每一条语句分句
'''
from nltk.tokenize import sent_tokenize
import re
import pymysql
# 连接数据库
db = pymysql.connect("127.0.0.1",
"root",
"123456",
"Apollo",
use_unicode=True,
charset="utf8mb4"
)
cursor = db.cursor()
sql = "select user,content,time,url from forums"
cursor.execute(sql)
results = cursor.fetchall()
def seg_sentence(data):
reg_http = r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
pattern_http = re.compile(reg_http)
result = []
for i in sent_tokenize(data):
result.append(re.sub(pattern_http, '. ', i))
return result
def write2mysql(user,content,time,url):
sql2 = "insert into dataset(time,content,usr,url) values('%d','%s','%s','%s')" %(time,pymysql.escape_string(content),\
pymysql.escape_string(user),\
pymysql.escape_string(url))
cursor.execute(sql2)
db.commit()
def main():
for row in results:
user = row[0]
content = row[1]
time = row[2]
url = row[3]
contents = seg_sentence(content)
for i in contents:
write2mysql(user, i, time, url)
if __name__ == '__main__':
main() | true |
d782eb14160767132273a8b62bd5d943fd924c21 | Python | AshaS1999/ASHA_S_rmca_s1_A | /ASHA_PYTHON/17-02-2021/C01Q13.py | UTF-8 | 255 | 3.8125 | 4 | [] | no_license | input_string = input("Enter a list element separated by comma ")
list = input_string.split(',')
print("The enterd list is")
for x in range(len(list)):
print (list[x])
print("the first and last colour is\n")
print( "%s %s"%(list[0],list[-1]))
r | true |
4752a0e26a0f02d74d3c979fcac9d17e0f49c32d | Python | Mianto/handwriting | /backend/utilities/basic_info/get_contact_number.py | UTF-8 | 1,119 | 3.015625 | 3 | [] | no_license | import json
import re
import os
def contact_number(json_dict):
"""
Extract contact number from the json_dict file
:param json_dict
:return all present contact numbers
"""
texts = json_dict['textAnnotations'][0]['description']
texts = texts.replace('\n', '$')
try:
li = re.findall(r'[\+\(]?[1-9][0-9 .\-\(\)]{8,}[0-9]', texts)
return ten_digit_contact_number(li)
except Exception as e:
print ("Exception" + str(e))
return []
def remove_space(li):
for x in li:
x.replace(" ", "")
return li
def ten_digit_contact_number(li):
ret_li = []
for x in li:
if len(x) == 10:
ret_li.append(x)
return ret_li
def patient_contact_number(written_number, blank_number):
"""
param: contacts number in prescribed, contacts number in blank page
return: patient conatct number
"""
if blank_number and written_number:
li = list(set(written_number) - set(blank_number))
# li = remove_space(li)
return li
return written_number
if __name__ == "__main__":
pass
| true |
7edc6e63386aad9fcc5be1a47d9e709312a6ddbd | Python | dnath/RamseyCoin | /admin-tool-v2/http_utils.py | UTF-8 | 2,670 | 3.046875 | 3 | [] | no_license | import httplib
import json
class ResponseInfo:
"""
Contains the metadata and data related to a HTTP response. In
particular this class can be used as a holder of HTTP response
code, headers and payload information.
"""
def __init__(self, response=None):
"""
Create a new instance of ResponseInfo using the given HTTPResponse
object.
"""
self.headers = {}
if response:
self.status = response.status
for header in response.getheaders():
self.headers[header[0]] = header[1]
self.payload = response.read()
else:
self.status = -1
self.payload = None
class HttpClient:
def __init__(self, hostname, port, apikey):
self.hostname = hostname
self.port = port
self.apikey = apikey
def send(self, method, path, json_payload=None):
conn = httplib.HTTPSConnection('{0}:{1}'.format(self.hostname, self.port))
headers = { 'Authorization' : 'Bearer {0}'.format(self.apikey) }
response_info = None
try:
if method == 'POST' or method == 'PUT':
headers['Content-type'] = 'application/json'
conn.request(method, path, json_payload, headers=headers)
else:
conn.request(method, path, headers=headers)
response = conn.getresponse()
return ResponseInfo(response)
except Exception as ex:
response_info = ResponseInfo()
response_info.payload = str(ex)
return response_info
finally:
try:
conn.close()
except Exception:
pass
def process_response(response, expected_status=200):
if response.status == expected_status:
return response.payload
elif response.status == 401:
print 'Authorization failure! Please verify your API key.'
elif response.status == 404:
try:
error = json.loads(response.payload)
print '[EC {1}] {0}'.format(error['errorMessage'], error['errorCode'])
except Exception:
print 'Requested resource does not exist.'
elif response.status == -1:
print 'Error while contacting the backend server: {0}.'.format(response.payload)
else:
try:
error = json.loads(response.payload)
print '[EC {1}] {0}'.format(error['errorMessage'], error['errorCode'])
except Exception:
print 'Backend server returned error: Status={0}; Message={1}'.format(response.status, response.payload)
return None
| true |
37c13a99486e7adbc89490b871b06a9b69e20185 | Python | kumarjeetray/Programs | /Python/UniqueColors.py | UTF-8 | 494 | 2.6875 | 3 | [] | no_license | def minimumColors(n,s,v,i,j,count):
l=len(v)
ma=max(v)
if j>=l:
print(count-1)
return
if v[0]==min(v) and v[l-1]==max(v) and v[l-1] - v[0] < s:
print('1')
#print(i,j,v[i],v[j])
while v[j]-v[i]>=s and v[j+1]-v[i]<s:
#print(j)
j=j+1
count=count+1
i=j
j=i+1
minimumColors(n,s,v,i,j,count)
n,s=[int(x) for x in input().split()]
v=[int(i) for i in input().split()]
i=0
j=1
count=0
minimumColors(n,s,v,i,j,count)
| true |
93e972cc713619588f6044b5788f67409b068152 | Python | rystills/CryptoChat | /NS_DH/Bob.py | UTF-8 | 2,775 | 2.546875 | 3 | [] | no_license | from main import generate_nonce, nonceSubtract, diffieHellman, encoder, decoder, sendMessage, receiveMessage, namePrint, chatDataHandler
import sympy, random, sys, threading
try: import simplejson as json
except ImportError: import json
sys.path.insert(0, 'DES/'); import DES
import socket
def main():
TCP_IP = '127.0.0.1'
SERV_PORT = 5005
BOB_PORT = 5004
BUFFER_SIZE = 4096
#connect to KDC to establish bobKey
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, SERV_PORT))
#clients are aware of each others' usernames
alice = "Alice"
bob = "Bob"
s.send(bob.encode("utf-8"))
bobKey = diffieHellman(s, False)
s.close()
bobNonce = generate_nonce()
bobNoncePrime = generate_nonce()
namePrint(bob,"bobKey established with server as " + str(bobKey))
#prepare for a connection from Alice
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, BOB_PORT))
s.listen(1)
conn, addr = s.accept()
#2. Bob responds with a nonce encrypted under his key with the Server
namePrint(bob,"step 2: send Alice noncePrime")
msg = receiveMessage(conn)
msg.append(bobNoncePrime)
encryptedMsg = DES.encrypt(DES.tobits(encoder.encode(msg)),bobKey)
sendMessage(conn, encryptedMsg)
#6. Bob sends Alice a nonce encrypted under K_AB to show that he has the key.
namePrint(bob,"step 6: send Alice nonce encrypted under K_AB")
toBob = receiveMessage(conn)
decryptedAliceMsg = DES.frombits(DES.decrypt(toBob,bobKey))
decryptedAlice = decoder.decode(decryptedAliceMsg)
#verify that bobNoncePrime made it back
if (decryptedAlice[1] != bobNoncePrime):
namePrint(bob,"Error: Bob Nonce Prime has been corrupted! Aborting in case of attack.")
sys.exit()
bobKab = decryptedAlice[2]
newMsg = [bobNonce]
encryptedNewMsg = DES.encrypt(DES.tobits(encoder.encode(newMsg)),bobKab)
sendMessage(conn, encryptedNewMsg)
#8. Bob see's that Alice's computation was correct. Hurray, we're ready to chat!
namePrint(bob,"step 8: verify Alice nonce operation result")
encryptedNewMsg = receiveMessage(conn)
decryptedAliceMsg = DES.frombits(DES.decrypt(encryptedNewMsg,bobKab))
decryptedAlice = decoder.decode(decryptedAliceMsg)
if (decryptedAlice[0] == nonceSubtract(bobNonce)):
namePrint(bob,"Success! Time to chat!")
else:
namePrint(bob,"Error: Did not receive Bob Nonce - 1 from Alice")
sys.exit()
#time to chat!
threading.Thread(target=chatDataHandler, args = (conn, alice, bobKab)).start()
while (True):
sendMessage(conn,DES.encrypt(DES.tobits(encoder.encode(input(""))),bobKab))
if __name__ == "__main__":
main() | true |
28ff35f863ae7e376d72b64a4ac797ca8518783a | Python | JetSimon/Advent-of-Code-2017 | /Day 11/day11.py | UTF-8 | 604 | 3.609375 | 4 | [] | no_license | from math import sqrt
def getInput():
out = []
f = open('input.txt', 'r')
for line in f:
out += line.split(",")
return out
steps = getInput()
x = 0
y = 0
best = 0
for step in steps:
if step == "n":
y+=1
elif step == "s":
y-=1
elif step == "ne":
x+=0.5
y+=0.5
elif step == "se":
x+=0.5
y-=0.5
elif step == "nw":
x-=0.5
y+=0.5
elif step == "sw":
x-=0.5
y-=0.5
if abs(x)+abs(y) > best:
best = abs(x)+abs(y)
print(x,y)
print(abs(x)+abs(y))
print("best: ", best) | true |
3dc6397eb9544a8c494a5bc404f15e1499956050 | Python | tinguen/Currency-bot | /db.py | UTF-8 | 1,229 | 2.71875 | 3 | [] | no_license | import mysql.connector
my_db = mysql.connector.connect(
host="localhost",
user="xxxxx",
passwd="xxxxx"
)
my_cursor = my_db.cursor()
def get_currency(chat_id):
my_cursor.execute("SELECT currency FROM `bot`.`base_currency` WHERE chat_id={}".format(chat_id))
row = my_cursor.fetchone()
if row is None:
return row
return row[0]
def set_currency(chat_id, currency):
if get_currency(chat_id) is not None:
my_cursor.execute("UPDATE `bot`.`base_currency` SET currency='{}' WHERE chat_id='{}'".format(currency, chat_id))
else:
sql = "INSERT INTO `bot`.`base_currency` (chat_id, currency) VALUES (%s, %s)"
values = (chat_id, currency)
my_cursor.execute(sql, values)
my_db.commit()
def add_fav(chat_id, curr1, curr2):
sql = "INSERT INTO `bot`.`favorite` (chat_id, curr1, curr2) VALUES (%s, %s, %s)"
values = (chat_id, curr1, curr2)
my_cursor.execute(sql, values)
my_db.commit()
def get_fav(chat_id):
my_cursor.execute("SELECT * FROM `bot`.`favorite` WHERE chat_id={}".format(chat_id))
arr = []
row = my_cursor.fetchone()
while row is not None:
arr.append(row)
row = my_cursor.fetchone()
return arr
| true |
ba86b7af479cfb273bbdfebd2230ff1ad2320936 | Python | chaelivieira/SSW567HW2a | /TestTriangle.py | UTF-8 | 3,538 | 3.5625 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Updated Jan 21, 2018
The primary goal of this file is to demonstrate a simple unittest implementation
@author: jrr
@author: rk
"""
import unittest
from Triangle import classifyTriangle
# This code implements the unit test functionality
# https://docs.python.org/3/library/unittest.html has a nice description of the framework
class TestTriangles(unittest.TestCase):
# define multiple sets of tests as functions with names that begin
def testRightTriangleA(self):
self.assertEqual(classifyTriangle(3,4,5),'Right','3,4,5 is a Right triangle')
def testRightTriangleB(self):
self.assertEqual(classifyTriangle(5,3,4),'Right','5,3,4 is a Right triangle')
def testRightTriangleC(self):
self.assertEqual(classifyTriangle(5,4,3),'Right','5,4,3 is a Right triangle')
def testEquilateralTrianglesA(self):
self.assertEqual(classifyTriangle(1,1,1),'Equilateral','1,1,1 should be equilateral')
def testEquilateralTrianglesB(self):
self.assertEqual(classifyTriangle(2,2,2),'Equilateral','2,2,2 should be equilateral')
def testIsocelesA(self):
self.assertEqual(classifyTriangle(1,2,2),'Isoceles','1,2,2 should be isoceles')
def testIsocelesB(self):
self.assertEqual(classifyTriangle(2,2,1),'Isoceles','2,2,1 should be isoceles')
def testIsocelesC(self):
self.assertEqual(classifyTriangle(3,2,2),'Isoceles','3,2,2 should be isoceles')
def testNotATriangleA(self):
self.assertEqual(classifyTriangle(1,2,1),'NotATriangle','1,2,1 should be NotATriangle')
def testNotATriangleB(self):
self.assertEqual(classifyTriangle(1,2,3),'NotATriangle','1,2,3 should be NotATriangle')
def testNotATriangleC(self):
self.assertEqual(classifyTriangle(10,20,30),'NotATriangle','10,20,30 should be NotATriangle')
def testScaleneA(self):
self.assertEqual(classifyTriangle(7,9,8),'Scalene','7,9,8 should be scalene')
def testScaleneB(self):
self.assertEqual(classifyTriangle(7,8,9),'Scalene','7,8,9 should be scalene')
def testScaleneC(self):
self.assertEqual(classifyTriangle(9,8,7),'Scalene','9,8,7 should be scalene')
def testBadInputA(self):
self.assertEqual(classifyTriangle("a","a","a"),'InvalidInput','a,a,a should be InvalidInput')
def testBadInputB(self):
self.assertEqual(classifyTriangle(-1,-1,-1),'InvalidInput','-1,-1,-1 should be InvalidInput')
def testBadInputC(self):
self.assertEqual(classifyTriangle(200,201,200),'InvalidInput','200,201,200 should be InvalidInput')
def testBadInputD(self):
self.assertEqual(classifyTriangle(100,"a",100),'InvalidInput','100,a,100 should be InvalidInput')
if __name__ == '__main__':
print('Running unit tests')
print (classifyTriangle(3,4,5))
print (classifyTriangle(5,3,4))
print (classifyTriangle(5,4,3))
print (classifyTriangle(1,1,1))
print (classifyTriangle(2,2,2))
print (classifyTriangle(1,2,2))
print (classifyTriangle(2,2,1))
print (classifyTriangle(3,2,2))
print (classifyTriangle(1,2,1))
print (classifyTriangle(1,2,3))
print (classifyTriangle(10,20,30))
print (classifyTriangle(7,9,8))
print (classifyTriangle(7,8,9))
print (classifyTriangle(9,8,7))
print (classifyTriangle("a","a","a"))
print (classifyTriangle(-1,-1,-1))
print (classifyTriangle(200,201,200))
print (classifyTriangle(100,"a",100))
unittest.main()
| true |
3bbb35d6713d7d1b7f677b430e72b0c58772306e | Python | iRnx/Tabela-de-Times | /Montar uma tabela.py | UTF-8 | 627 | 3.703125 | 4 | [
"MIT"
] | permissive | lista1 = list()
lista2 = list()
while True:
lista1.append(str(input('Nome: ')))
lista1.append(int(input('Vitória: ')))
lista1.append(int(input('Empate: ')))
lista1.append(int(input('Derrota: ')))
lista2.append(lista1[:])
lista1.clear()
resp = ' '
while resp not in 'SN':
resp = str(input('Gostaria de Continuar? [S/N] ')).strip().upper()[0]
if resp == 'N':
break
print(f'{"Nome":<11} {"Vitória":<12} {"Empate":<11} {"Derrota":<11}')
for c in lista2:
nome, vitoria, empate, derrota = c
print(f'{nome:<14} {vitoria:<12} {empate:<11} {derrota}')
| true |
d388ea6e622e350d2f7f95203c4ec24bdbd8c225 | Python | meir367612/File_Project_oop | /Word.py | UTF-8 | 318 | 2.515625 | 3 | [] | no_license | from SuperFile import SuperFile
class WordFile(SuperFile):
def __init__(self, name:str, content:str,who_created:str,description:str,file_size:int):
SuperFile.__init__(self, name, content, who_created, description, file_size)
def __str__(self):
s = SuperFile.__str__(self)
return s
| true |
b624e919f64a0c98be62441d66d7f35f809eae57 | Python | jorson-chen/thu-network-topology-discovery | /map.py | UTF-8 | 3,250 | 2.515625 | 3 | [] | no_license | import os
import re
import networkx as nx
from networkx.readwrite import json_graph
import csv
import json
import sys
import signal
from optparse import OptionParser
from netaddr import IPNetwork, IPAddress
# Initialize the command line parser instance
parser = OptionParser()
parser.add_option("-i", "--input", dest="in_filename",
help="read from file for ip to traceroute")
parser.add_option("-o", "--output", dest="out_filename",
help="write to file the json for d3js visualization")
parser.add_option("-s", "--subnet", dest="network_subnet",
help="subnet for the d3.js nodes")
parser.add_option("--skip", metavar="N", dest="operate_every_n", default=0, help="Traceroute every N ips")
(options, args) = parser.parse_args()
# Get parsed command line argument
in_filename = options.in_filename
out_filename = options.out_filename
network_subnet = options.network_subnet
operate_every_n = int(options.operate_every_n)
basename = os.path.basename(in_filename)
ip_network = IPNetwork(network_subnet)
# Read a series of ips to be traceroute'ed
ip_addrs = csv.reader(open(in_filename,'r'))
# Counter that is used for traceroute every n ips
counter_for_skip = 1
# Temporary file to store the traceroute result
tmp_filename = 'csv%s_tmp.txt' % (os.path.splitext(basename)[0])
G = nx.Graph()
def generate_json():
if '10.200.200.200' in G and '10.0.2.2' in G:
nx.set_node_attributes(G,
{
'10.200.200.200':{'group':'local'},
'10.0.2.2':{'group':'local'},
}
)
os.remove(tmp_filename)
nx_graph_dict = json_graph.node_link_data(G)
os.makedirs(os.path.dirname(out_filename), exist_ok=True)
with open(out_filename,"w+") as json_save_to_file:
nx_graph_json = json.dump(nx_graph_dict,json_save_to_file)
for raw_ip in ip_addrs:
# Only perform traceroute every n ips, as specified in the cmd argument
if(counter_for_skip < operate_every_n):
counter_for_skip += 1
continue
else:
# Reset counter
counter_for_skip = 1
ip = raw_ip[0]
print('tracert ' + ip, end="... ")
# Prevent OS buffering, otherwise done is printed before os.system(command)
sys.stdout.flush()
command = "traceroute -I %s > %s" % (ip,tmp_filename)
status_code = os.system(command)
# Ctrl+C pressed
if status_code == 2:
generate_json()
sys.exit(0)
print('done')
with open(tmp_filename, 'r') as reader:
reader.readline() # Read off heading
current_ip = ''
previous_ip = None
line = reader.readline()
while( line ):
match = re.search(r"\(([\d.]+)\)", line)
if match:
current_ip = match.group(1)
if current_ip not in G:
group = 0
if IPAddress(current_ip) in ip_network:
group = network_subnet
G.add_node(current_ip,group=group)
if previous_ip:
G.add_edge(previous_ip, current_ip)
previous_ip = current_ip
line = reader.readline()
generate_json()
| true |
9da548be860e0bf7a9df69e9df59003c80c980cb | Python | philprobinson84/RPi | /camera/timelapse/cam_timeLapse_Threaded_upload.py | UTF-8 | 1,578 | 2.796875 | 3 | [
"Artistic-2.0"
] | permissive | #!/usr/bin/env python2.7
import time
import os
from subprocess import call
import sys
class Logger(object):
def __init__(self):
self.terminal = sys.stdout
self.log = open("logfile.log", "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
sys.stdout = Logger()
UPLOAD_INTERVAL = 60
def upload_file(inpath, outpath):
uploadCmd = "/home/pi/Dropbox-Uploader/dropbox_uploader.sh upload %s %s" % (inpath, outpath)
call ([uploadCmd], shell=True)
while True:
# record start_time
start_time = time.time()
# initiate the upload process
inpath = "/home/pi/timelapse/latest/latest.jpg"
outpath = "latest.jpg"
if os.path.exists(inpath):
upload_file(inpath,outpath)
print "uploadThread: uploaded %s to %s" % (inpath,outpath)
else:
print "uploadThread: file %s does not exist, skipping" % (inpath)
inpath = "/home/pi/timelapse/latest/latest.mp4"
outpath = "latest.mp4"
if os.path.exists(inpath):
upload_file(inpath,outpath)
print "uploadThread: uploaded %s to %s" % (inpath,outpath)
else:
print "uploadThread: file %s does not exist, skipping" % (inpath)
# record end_time
end_time = time.time()
# determine elapsed time
elapsed_time = end_time - start_time
# determine how long to sleep
sleep_time = UPLOAD_INTERVAL - elapsed_time
# check for negative sleep request!
if (sleep_time < 1):
print "uploadThread: sleep_time < 1!!! (%s)" % sleep_time
sleep_time = 1
# sleep
print "uploadThread: sleeping for %s seconds" % sleep_time
time.sleep(sleep_time)
| true |
a6aee1aeaab9fc5f19edaa44708637b51a3e0adf | Python | Klaudia67/IMDb | /Project/Python/IMDb.py~ | UTF-8 | 2,028 | 2.625 | 3 | [] | no_license | import omdb
import re
with open('Baza', 'r') as myfile:
data=myfile.read()
def txt():
mtext = ment.get()
[print(ment.get()) if mtext == data else print("false")]
def txt2():
[print(ment.get()) if ment.get() in open('Baza').read() else print("false")]
#
def txt3():
movie=omdb.get(title=ment.get(), year=ment1.get(), tomatoes=True, fullplot=True)
print(movie)
from tkinter import *
from tkinter import ttk
root=Tk()
ment=StringVar()
ment1=StringVar()
ment2=StringVar()
ment3=StringVar()
ment4=StringVar()
ment5=StringVar()
sortvar = StringVar()
sort = ttk.Combobox(root, textvariable=sortvar)
Title = Label(root, text="Find your movie!")
Title1 = Label(root, text="Full title")
Title2 = Label(root, text="Part of the title")
Genre = Label(root, text="Genre")
Rating = Label(root, text="Rating")
Popularity = Label(root, text="Popularity")
Profit = Label(root, text="Profit")
Sort = Label(root, text="Sort by")
Title.grid(row=0, columnspan=2)
Title1.grid(row=1, sticky=E)
Title2.grid(row=2, sticky=E)
Genre.grid(row=3, sticky=E)
Rating.grid(row=4, sticky=E)
Popularity.grid(row=5, sticky=E)
Profit.grid(row=6, sticky=E)
Sort.grid(row=7, sticky=E)
entry1 = Entry(root, textvariable=ment)
entry2 = Entry(root, textvariable=ment1)
entry3 = Entry(root, textvariable=ment2)
entry4 = Entry(root, textvariable=ment3)
entry5 = Entry(root, textvariable=ment4)
entry6 = Entry(root, textvariable=ment5)
entry7 = ttk.Combobox(root, textvariable=sortvar)
sort['values'] = ('Rating', 'Title', 'Release date', 'Popularity', 'Profit')
entry1.grid(row=1, column=1)
entry2.grid(row=2, column=1)
entry3.grid(row=3, column=1)
entry4.grid(row=4, column=1)
entry5.grid(row=5, column=1)
entry6.grid(row=6, column=1)
sort.grid(row=7, column=1)
Image = PhotoImage(file="camera.png")
label = Label(root, image=Image)
label.grid(row=1, column=2, columnspan=2, rowspan=6, sticky= W+E+N+S)
button1=Button(text="OK", command=txt3, fg="red")
#button1=Button(text="OK", command=txt2, fg="red")
button1.grid(columnspan=3)
root.mainloop() | true |
22ee69c379c0903d08b23bb64c4760b86d1542b7 | Python | Lain-progressivehouse/atCoder | /atCoder/abc140.py | UTF-8 | 1,228 | 2.828125 | 3 | [
"MIT"
] | permissive | def p_a():
i = int(input())
print(i ** 3)
def p_b():
N = int(input())
A = list(map(int, input().split()))
A = [i - 1 for i in A]
B = list(map(int, input().split()))
C = list(map(int, input().split()))
ans = 0
bf = -100
for i in A:
ans += B[i]
if bf == i - 1:
ans += C[i - 1]
bf = i
print(ans)
def p_c():
N = int(input())
B = list(map(int, input().split()))
A = []
A.append(B[0])
for i in range(0, N - 2):
A.append(min(B[i], B[i + 1]))
A.append(B[-1])
print(sum(A))
def p_d():
N, K = map(int, input().split())
S = input()
ans = 0
for n in range(N - 1):
if S[n] == S[n + 1]:
ans += 1
np = N - ans - 1
for k in range(K):
if np > 1:
ans += 2
np -= 2
elif np == 1:
ans += 1
np -= 1
print(ans)
"""
未AC
"""
def p_e():
N = int(input())
P = list(map(int, input().split()))
n = N - 1
ans = n * (n + 1) * (2 * n + 1) // 6
for i in range(N):
if not (P[i] == i + 1 or P[i] == N - i):
ans -= 1
print(ans)
if __name__ == '__main__':
p_e()
| true |
d8a6e8f75c9128d2e9761670751d4c65ee5ae0b4 | Python | sherholz/render_scripts | /pyscripts/pytools/inset.py | UTF-8 | 6,855 | 2.609375 | 3 | [] | no_license | '''
Created on 20.11.2015
@author: Jirka
'''
import OpenEXR as oe
import Imath
import os
import numpy
import fnmatch
import sys
from PIL import Image
def toSRGB( val, ev ):
res = val * pow( 2, ev )
if res <= 0.0031308:
res = res * 12.92
else:
a = 0.055
res = (1 + a) * pow( res, 1/2.4 ) - a
return res
def isReal( v ):
v = float(v)
return v == v and v != float('+inf') and v != float('-inf')
#We suppose that pixels are indexed from 1
def createInset( inputFilePath, outputFilePath, x, y, x1, y1, ev, scale ):
#inputFilePath = r"d:\Jirka\projects\importance_gitrepo\results\final\pt\CoronaBenchmark\CoronaBenchmark_guided_adrrs_nosplit.exr"
#outputFilePath = r"d:\Jirka\projects\importance_gitrepo\results\final\pt\CoronaBenchmark\insent_test.exr"
copyImage = False
print "x: " ,x, " y: ", y
print "x1: " ,x1, " y1: ", y1
if x1 ==-1 and y1 ==-1:
copyImage = True
#We suppose that pixels are indexed from 1 but internally from 0
x = x - 1
x1 = x1 - 1
y = y - 1
y1 = y1 - 1
w = x1 - x + 1
h = y1 - y + 1
w = x1 - x
h = y1 - y
print "w: " ,w, " h: ", h
print "copyImage:" , copyImage
print "Scale: ", scale
if not copyImage and (w <= 0 or h <= 0):
print("[ERROR] Cannot create the specified inset. Invalid input range.")
exit(-1)
resimg_r = None
resimg_g = None
resimg_b = None
if not (os.path.isfile(inputFilePath) and fnmatch.fnmatch(inputFilePath, "*.exr")):
print("[ERROR] Cannot create inset from file {}".format( inputFilePath ))
exit( -1 )
print('[INFO] Inset from file "%s"' % inputFilePath)
img = oe.InputFile( inputFilePath )
#(r, g, b) = img.channels("RGB", Imath.PixelType(Imath.PixelType.FLOAT))
#pt = Imath.PixelType(Imath.PixelType.HALF)
#print(img.header())
dw = img.header()[ 'dataWindow' ]
size = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
#(rc, gc, bc) = img.channels("RGB", pt)
#(rc, gc, bc) = img.channels("RGB")
(rc, gc, bc) = img.channels("RGB", Imath.PixelType(Imath.PixelType.FLOAT))
channelType = numpy.float32
if copyImage:
h = size[0]
w = size[1]
r = numpy.fromstring(rc, dtype = channelType)
g = numpy.fromstring(gc, dtype = channelType)
b = numpy.fromstring(bc, dtype = channelType)
r.shape = (size[1], size[0]) # Numpy arrays are (row, col)
g.shape = (size[1], size[0]) # Numpy arrays are (row, col)
b.shape = (size[1], size[0]) # Numpy arrays are (row, col)
""" Create the output image according to size of an first observed image """
if resimg_r == None and not copyImage:
resimg_r = numpy.array([0]*w*h, dtype = numpy.float32)
resimg_g = numpy.array([0]*w*h, dtype = numpy.float32)
resimg_b = numpy.array([0]*w*h, dtype = numpy.float32)
resimg_r.shape = (h, w) # Numpy arrays are (row, col)
resimg_g.shape = (h, w) # Numpy arrays are (row, col)
resimg_b.shape = (h, w) # Numpy arrays are (row, col)
""" Check the image """
for j in range(0, size[1]):
for i in range(0, size[0]):
if not isReal( r[j, i] ) or not isReal( g[j, i] ) or not isReal( b[j, i] ):
print("[WARN] pixel at [%d, %d] = (%f, %f, %f) is nor real" % (i, j, r[j, i], g[j, i], b[j, i]))
#resimg_r[y, x] = resimg_r[y, x] + inv_gamma(r[y, x])
#resimg_g[y, x] = resimg_g[y, x] + inv_gamma(g[y, x])
#resimg_b[y, x] = resimg_b[y, x] + inv_gamma(b[y, x])
""" Add the image to the result image"""
print "WW: ", w, "HH: ",h
if not copyImage:
#resimg_r = r[y:y+h,x:x+w]
#resimg_g = g[y:y+h,x:x+w]
#resimg_b = b[y:y+h,x:x+w]
resimg_r = r[y:y+h,x:x+w]
resimg_g = g[y:y+h,x:x+w]
resimg_b = b[y:y+h,x:x+w]
else:
resimg_r = r
resimg_g = g
resimg_b = b
dataR = resimg_r.ravel().tostring()
dataG = resimg_g.ravel().tostring()
dataB = resimg_b.ravel().tostring()
outputFileName = outputFilePath
if not fnmatch.fnmatch(outputFilePath, "*.exr"):
outputFileName = outputFilePath + ".exr"
outputHeader = img.header()
if not copyImage:
tmpHeader = oe.Header( w, h )
#outputHeader[ 'displayWindow' ] = tmpHeader[ 'displayWindow' ]
#outputHeader[ 'dataWindow' ] = tmpHeader[ 'dataWindow' ]
outputHeader = tmpHeader
outExr = oe.OutputFile( outputFileName, outputHeader )
outExr.writePixels({'R' : dataR, 'G' : dataG, 'B' : dataB})
print("[INFO] Inset was written to file %s" % outputFileName)
""" Create its tone-mapped .png version """
prefix, dummy = os.path.splitext( outputFileName )
outputFileName = prefix + ".png"
print "resimg_r: ", numpy.shape(resimg_r), "\t w: ", w, "\t h: ",h
if copyImage:
im = Image.new( "RGB", (h, w) )
pix = im.load()
for i in range( 0, w ):
for j in range( 0, h ):
c = ( resimg_r[i,j ], resimg_g[i,j], resimg_b[i,j ] )
c = tuple( map( lambda c: min( 255, int( round( toSRGB(c,ev) * 255 ) ) ), c ) )
pix[j, i ] = ( c )
im = im.resize((h*scale,w*scale),Image.NEAREST)
try:
im.save(outputFileName)
except IOError:
print("[WARN] Cannot create the image '{}'".format( outputFileName ))
print("[INFO] Tone-mapped version was written to file %s" % outputFileName)
else:
im = Image.new( "RGB", (w, h) )
pix = im.load()
for i in range( 0, w ):
for j in range( 0, h ):
c = ( resimg_r[j,i ], resimg_g[j,i], resimg_b[j,i ] )
c = tuple( map( lambda c: min( 255, int( round( toSRGB(c,ev) * 255 ) ) ), c ) )
pix[i, j ] = ( c )
im = im.resize((w*scale,h*scale),Image.NEAREST)
try:
im.save(outputFileName)
except IOError:
print("[WARN] Cannot create the image '{}'".format( outputFileName ))
print("[INFO] Tone-mapped version was written to file %s" % outputFileName)
if __name__ == '__main__':
#createInset( "reference.exr", "inset", 0, 0, 1023, 575 )
#exit()
if len(sys.argv) < 7:
print("Usage: <inputFile> <outputFile> <x> <y> <x1> <y1> [ev]")
print("\t\twhere x, y are coordinates of the upper left corner while x1, y1 of the lower right corner")
print("\t\tand ev is an optional exposure value of the tone-mapped (sRGB) .png inset.")
if ( len( sys.argv > 7 ) ):
ev = float( sys.argv[7] )
else:
ev = 0.0
createInset( sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6], ev )
| true |
b56941f591a00870cbd4d49fb1f0219e9125778b | Python | revan7/pv-simulator | /src/messaging/senders/RabbitMQBroker.py | UTF-8 | 1,396 | 3.21875 | 3 | [] | no_license | import json
import logging
import pika
logger = logging.getLogger(__name__)
class RabbitMQBroker:
"""
Implementation of a broker that sends messages to a queue. This particular implementation is of a RabbitMQ broker.
Attributes
----------
queue_address: str
The host of the RabbitMQ server.
queue_name: str
The name of the queue we are sending messages to.
"""
def __init__(self, queue_address, queue_name):
self.channel = None
self.queue_address = queue_address
self.queue_name = queue_name
def start_connection(self):
connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.queue_address))
self.channel = connection.channel()
self.channel.queue_declare(queue=self.queue_name)
def send_message(self, message):
"""
Method used to send a message to the queue.
:param message: Message we are sending to the queue.
The message must be appropriately serialized by whoever invokes this method.
:return:
"""
logger.info("Attempting to send message...")
if self.channel is None:
logger.info('Starting connection !')
self.start_connection()
self.channel.basic_publish(exchange='', routing_key=self.queue_name, body=json.dumps(message))
logger.info("Message sent.")
| true |
bcb458ab697f53c547d3872decab2aaa3377e7d8 | Python | princewang1994/work | /MachineLearning/greg.py | UTF-8 | 866 | 2.859375 | 3 | [] | no_license | #!/usr/bin/python
import re
def generateData(f):
word={}
count=0
for line in open(f):
if count >=2000 :
break
i=0
while(line[i]!=' '):
i+=1
first=i
while(line[i]==' '):
i+=1
word[line[0:first]]=line[i:]
count+=1
return word
def analyze():
word=generateData('ee_dic.txt')
wordset={}
wordCount={}
r=re.compile(r'[^A-Z^a-z]')
for w in word:
wordset[w]=r.split(word[w])
for ww in wordset[w] :
wordCount.setdefault(ww,0)
wordCount[ww]+=1
return wordCount
def pearson(w1,w2):
s1=sum([v[0] for (k,v) in w1])
s2=sum([v[1] for (k,v) in si.items()])
sum1Sq=sum([v[0]**2 for (k,v) in si.items()])
sum2Sq=sum([v[1]**2 for (k,v) in si.items()])
pSum=sum([v[0]*v[1] for (k,v) in si.items()])
num=pSum-(s1*s2/n)
den=sqrt((sum1Sq-pow(s1,2)/n)*(sum2Sq-pow(s2,2)/n))
if den==0 :
return 0
return num/den
| true |
b8b169bace6ac198fc503d1f5d556e2dc4683a21 | Python | KimEklund13/SeleniumWD-with-Python3x | /basicsSyntax/multiple_lists.py | UTF-8 | 278 | 4.34375 | 4 | [] | no_license | """
Iterating over multiple lists
"""
l1 = [1, 2, 3]
l2 = [6, 7, 8, 20, 30, 40]
for a, b in zip(l1, l2):
print(a) # printing item from first list
print(b) # printing item from second list
# This will run as many times as the length of the shortest list (3 times) | true |
168a8392fba11f97173cd96e9ae1d0cbff8be59c | Python | chadsten/advent-of-code | /2018/day-1/main.py | UTF-8 | 1,730 | 3.46875 | 3 | [] | no_license | # get values for freq changes
import json
from pprint import pprint
with open('C:/Users/chadsten/source/repos/advent-of-code/2018/day-1/data.json') as data_file:
data = json.load(data_file)
## determine the end frequency after applying all modifiers in the
freq = 0 # base frequency
for f in data['freq']:
freq = freq + int(f['value']) # simply loop to add all values
print(str(freq) + " is the end frequency.") # output final frequency
## determine first frequency that has happened twice
occ = {} # initiate occurence object
freq = 0 # base frequency
occ[freq] = 1 # set starting frequency to have occured once
hit = None # we have not had a hit (frequency occuring twice)
present = dict() # our output, it's a gift!
def buildData(occ, freq):
val = dict() # define object for return
for f in data['freq']:
freq = freq + int(f['value']) # calculate new frequency
try:
occ[freq]
except KeyError:
occ[freq] = 0 # if this doesn't exist, it's the frequency's first occurence
occ[freq] = occ[freq] + 1 # increment the occurence counter
if occ[freq] > 1:
val['number'] = occ[freq]
break # store which frequency has occured twice and leave the loop
else:
val['number'] = 'fail' # default value for checking later
val['freq'] = freq # set new frequency
return val
while hit is None: # if no 2nd occurence has been detected yet
present = buildData(occ, freq) # attempt to find 2nd occurence, carrying over data from previous passes
freq = present['freq'] # set new frequency for next pass
if present['number'] != 'fail': # if it's not our default fail, we have a match
hit = 1
print(str(present['freq']) + " is the first frequency to be repeated.") # output first repeated frequency
| true |