blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
bc7678824ba9db0c3d9276047145e72e2fd548f5 | Python | rushirg/30-day-leetcoding-challenge | /april-leetcoding-challenge/week-5/check-if-a-string-is-a-valid-sequence-from-root-to-leaves-path-in-a-binary-tree.py | UTF-8 | 1,016 | 3.5 | 4 | [] | no_license | """
Week 5 - Problem 2
Check If a String Is a Valid Sequence from Root to Leaves Path in a Binary Tree
https://leetcode.com/explore/featured/card/30-day-leetcoding-challenge/532/week-5/3315/
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isValidSequence(self, root: TreeNode, arr: List[int]) -> bool:
def checkPath(root, arr, n, index):
if not root or index == n:
return False
if not root.left and not root.right:
if root.val == arr[index] and index == n-1:
return True
return False
return ((index < n) and (root.val == arr[index]) and (checkPath(root.left, arr, n, index+1) or checkPath(root.right, arr, n, index+1)))
if not root:
return len(arr) == 0
return checkPath(root, arr, len(arr), 0)
| true |
54e3334ca0c3ab9523be4b879389fd0dd8422926 | Python | gvasold/papilotte | /tests/server/test_configuration_configfile.py | UTF-8 | 6,645 | 2.53125 | 3 | [
"Apache-2.0"
] | permissive | """Test parsing an validation of config file via read_configfile.
"""
import papilotte.configuration
import pytest
import tempfile
import toml
from papilotte.connectors import pony as mockconnector
from papilotte.exceptions import ConfigurationError
def test_read_configfile(configfile, configuration):
"Test reading a complete config file whithout errors."
cfg = papilotte.configuration.read_configfile(configfile)
# this one is added during validation
configuration['connector']['connector_module'] = mockconnector
for section in configuration:
for key in configuration[section]:
assert cfg[section][key] == configuration[section][key], "Missmatch for [{}][{}]".format(section, key)
def test_invalid_port(configuration, tmp_path):
"An invalid port value must raise ConfigurationError."
configuration['server']['port'] = 'foo'
cfile = tmp_path / 'papilotte.toml'
cfile.write_text(toml.dumps(configuration))
with pytest.raises(ConfigurationError) as err:
papilotte.configuration.read_configfile(cfile)
assert err.match('port')
def test_empty_port(configuration, tmp_path):
"A missing port must lead to default port."
del configuration['server']['port']
cfile = tmp_path / 'papilotte.toml'
cfile.write_text(toml.dumps(configuration))
cfg = papilotte.configuration.read_configfile(cfile)
assert cfg['server']['port'] == 5000
# also value None must lead to default port
configuration['server']['port'] = None
cfile.write_text(toml.dumps(configuration))
cfg = papilotte.configuration.read_configfile(cfile)
assert cfg['server']['port'] == 5000
def test_invalid_host(configuration, tmp_path):
configuration['server']['host'] = ''
cfile = tmp_path / 'papilotte.toml'
cfile.write_text(toml.dumps(configuration))
with pytest.raises(ConfigurationError) as err:
papilotte.configuration.read_configfile(cfile)
assert err.match('length')
assert err.match('host')
def test_invalid_debug(configuration, tmp_path):
configuration['server']['debug'] = 'foo'
cfile = tmp_path / 'papilotte.toml'
cfile.write_text(toml.dumps(configuration))
with pytest.raises(ConfigurationError) as err:
papilotte.configuration.read_configfile(cfile)
assert err.match('expected boolean')
assert err.match('debug')
def test_invalid_loglevel(configuration, tmp_path):
configuration['logging']['logLevel'] = 'foo'
cfile = tmp_path / 'papilotte.toml'
cfile.write_text(toml.dumps(configuration))
with pytest.raises(ConfigurationError) as err:
papilotte.configuration.read_configfile(cfile)
assert err.match('not a valid value')
assert err.match('logLevel')
def test_missing_logfile(configuration, tmp_path):
"If logTo is file, logFile must be set."
configuration['logging']['logTo'] = 'file'
configuration['logging']['logFile'] = ''
cfile = tmp_path / 'papilotte.toml'
cfile.write_text(toml.dumps(configuration))
with pytest.raises(ConfigurationError) as err:
papilotte.configuration.read_configfile(cfile)
assert err.match("'logFile' must be configured")
def test_invalid_api_compliance_level(configuration, tmp_path):
configuration['api']['complianceLevel'] = 99
cfile = tmp_path / 'papilotte.toml'
cfile.write_text(toml.dumps(configuration))
with pytest.raises(ConfigurationError) as err:
papilotte.configuration.read_configfile(cfile)
assert err.match('value must be at most 2 ')
assert err.match('complianceLevel')
def test_invalid_api_base_path(configuration, tmp_path):
configuration['api']['basePath'] = ''
cfile = tmp_path / 'papilotte.toml'
cfile.write_text(toml.dumps(configuration))
with pytest.raises(ConfigurationError) as err:
papilotte.configuration.read_configfile(cfile)
assert err.match('length of value must be at least 1')
assert err.match('basePath')
def test_invalid_api_spec_file(configuration, tmp_path):
configuration['api']['specFile'] = '/foo/bar.yml'
cfile = tmp_path / 'papilotte.toml'
cfile.write_text(toml.dumps(configuration))
with pytest.raises(ConfigurationError) as err:
papilotte.configuration.read_configfile(cfile)
assert err.match('not a file')
assert err.match('specFile')
def test_invalid_api_max_size(configuration, tmp_path):
configuration['api']['maxSize'] = 0
cfile = tmp_path / 'papilotte.toml'
cfile.write_text(toml.dumps(configuration))
with pytest.raises(ConfigurationError) as err:
papilotte.configuration.read_configfile(cfile)
assert err.match('value must be at least ')
assert err.match('maxSize')
configuration['api']['maxSize'] = "foo"
cfile.write_text(toml.dumps(configuration))
with pytest.raises(ConfigurationError) as err:
papilotte.configuration.read_configfile(cfile)
assert err.match('expected int')
assert err.match('maxSize')
def test_invalid_api_formats(configuration, tmp_path):
configuration['api']['formats'] = []
cfile = tmp_path / 'papilotte.toml'
cfile.write_text(toml.dumps(configuration))
with pytest.raises(ConfigurationError) as err:
papilotte.configuration.read_configfile(cfile)
assert err.match('value must be at least ')
assert err.match('formats')
configuration['api']['formats'] = 'foo'
cfile.write_text(toml.dumps(configuration))
with pytest.raises(ConfigurationError) as err:
papilotte.configuration.read_configfile(cfile)
assert err.match('expected list')
assert err.match('formats')
def test_invalid_config_name(configuration, tmp_path):
configuration['server']['foo'] = 'bar'
cfile = tmp_path / 'papilotte.toml'
cfile.write_text(toml.dumps(configuration))
with pytest.raises(ConfigurationError) as err:
papilotte.configuration.read_configfile(cfile)
assert err.match('extra keys not allowed')
def test_connector_config_is_not_validated(configuration, tmp_path):
"Make sure that connector-part of config is not validated-"
configuration['connector']['foo'] = 'bar'
cfile = tmp_path / 'papilotte.toml'
cfile.write_text(toml.dumps(configuration))
cfg = papilotte.configuration.read_configfile(cfile)
assert cfg['connector']['foo'] == 'bar'
def test_max_log_file_size(configuration, tmp_path):
configuration['logging']['maxLogFileSize'] = "3M"
cfile = tmp_path / 'papilotte.toml'
cfile.write_text(toml.dumps(configuration))
cfg = papilotte.configuration.get_configuration(cfile)
assert cfg['logging']['maxLogFileSize'] == 3 * 1024 * 1024
| true |
959e6e97377dede8d5c831c99636ae4aa5f4c8cc | Python | yashlab/node_projs | /gita_bot/verse_process.py | UTF-8 | 8,868 | 2.75 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import requests
from bs4 import BeautifulSoup
import imgkit,datetime
import random,json
def get_quote(url):
'''
Note: Credits for the absolutely amazing content goes to https://bhagavadgita.io/.
It wouldn't have been possible without them.
'''
# print('ENG URL:{}'.format(url))
url_hindi = url + 'hi/'
# print('HIN URL:{}'.format(url_hindi))
content = requests.get(url)
content_hindi = requests.get(url_hindi)
english_tea = BeautifulSoup(content.content,'html.parser') # parse response in html
hindi_tea = BeautifulSoup(content_hindi.content,'html.parser') # parse response in html
# type(tea.find_all('div',{"class":'sanskrit-text'}))
verse = english_tea.find_all(class_ = 'verse-sanskrit')[0].get_text()
verse_transliteration = english_tea.find_all(class_ = 'verse-transliteration')[0].get_text()
verse_word_meanings_eng = english_tea.find_all(class_='verse-word')[0].get_text()
verse_meanings_eng = english_tea.find_all(class_='verse-meaning')[0].get_text()
verse_word_meanings_hindi = hindi_tea.find_all(class_='verse-word')[0].get_text()
verse_meanings_hindi = hindi_tea.find_all(class_='verse-meaning')[0].get_text()
return english_tea, {'0':verse,
'1':verse_transliteration,
'2':verse_meanings_hindi,
'3':verse_meanings_eng,
'4':verse_word_meanings_eng,
'5':verse_word_meanings_hindi}
# In[3]:
def htm_2_img(verse):
import random
background_colors = ['#ffb703','#E072A4','#ef476f','#43aa8b','#264653','#2A9D8F','#E9C46A','#F4A261']
krishna_pics = ['./images/k'+str(i)+'.svg' for i in range(1,6)]
html_cont = """
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<meta name="charset" content="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=0">
<style>
.section-head{
font-weight: bold;
text-align: center;
font-size: 25px;
}
.sanskrit-text{
text-align: center;
font-size: 1.6rem;
font-weight: bolder;
padding-bottom: 10px;
background-color: #ffb703;
}
.transliter{
text-align: center;
font-style: italic;
font-size: 1.6rem;
padding-bottom: 10px;
font-family: 'Noto Serif';
}
.grid-container {
display: grid;
grid-template-columns: 550px 550px;
padding: 10px;
margin-top: 10px;
align-content: center;
}
.grid-item {
bottom: 50%;
padding: 10px;
text-align: center;
border-radius: 0.8em;
}
.grid-title {
border: 2px solid #00743f;
border-radius: 0.8em;
padding: 5px;
font-size: 1.2em;
text-align: center;
grid-column-start: 1;
grid-column-end: 3;
}
*
{
background-color:""" + str(random.choice(background_colors)) + """;
}
.body_style {
width: 1200px;
height: 600px;
border: 2px solid #f9f9f9;
border-radius: 0.8em;
align-self: center;
}
hr {
width: 25%;
height: 2px;
align-self: center;
background-color: #ef476f;
border:0px
}
</style>
</head>
<body class="body_style">
<section id = 'shlok'>
<div class="grid-container">
<div class="grid-title" style = "color: #e05330;">||<u>श्लोक</u>|| </div>
<div class="grid-item sanskrit-text"> """ + verse['0'].rstrip().replace('\n','<br>') + """
</div>
<hr>
<div class="grid-item transliter"> """ + verse['1'].rstrip().replace('\n','<br>') + """
</div>
</div>
</section>
<section>
<div class="grid-container">
<div class="grid-title">
<b style="color: #1DA1F2;font-size: 18px;">Translation</b>
</div>
<div class="grid-item" style="font-size: 18px;">
<div style="color: #dc2f02;"> <b> English</b></div> """ + verse['3'] + """
</div>
<hr>
<div class="grid-item" style="font-size: 18px;">
<div style="color: #dc2f02;"> <b> हिंदी</b></div> """ + verse['2'] + """
</div>
</div>
<div style="text-align: center;">
<img src='""" + random.choice(krishna_pics) + """' alt="" style="height: 50px; width: 50px;">
</div>
</section>
</body>
</html>
"""
with open('verse_html.html','w') as f:
f.write(html_cont)
f.close()
imgkit.from_file('verse_html.html','quote.jpeg')
imgkit.from_file('verse_html.html','quote.png')
# In[4]:
def meaning_img(verse):
html_cont = """
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<meta name="charset" content="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=0">
<style>
.grid-container {
display: inline-grid;
align-content: center;
grid-template-columns: 300px 300px;
background-color: #ffcad4;
}
.grid-item {
bottom: 50%;
padding: 10px;
text-align: center;
border-radius: 0.8em;
font-size: 1.3rem;
border:2px solid #cc2936;
margin-top: 5px;
/* width: 50%; */
}
</style>
</head>
<body>
<div class = 'grid-container'>
<div class="grid-item" style="grid-column-end:3; grid-column-start: 1;"> <b> Word Meanings || शब्दार्थ </b> </div>
<span>
<div class="grid-item">""" + str(verse['4'].rstrip().replace('; ','<br>')) + """
</div>
<div class="grid-item"> """ + str(verse['5'].replace('(','<br>(')) + """
</div>
</span>
</div>
</body>
</html>
"""
with open('word_mean.html','w') as f:
f.write(html_cont)
f.close()
imgkit.from_file('word_mean.html','meaning.jpeg')
imgkit.from_file('word_mean.html','meaning.png')
# In[5]:
#obatain next url
def new_url(content):
next_url = 'https://bhagavadgita.io'+str(content.find_all(class_ = 'btn btn-warning btn-rounded waves-effect waves-light')[1].get('href'))
print('The next URL is : '+next_url)
if (next_url.find('page')!=-1):
next_url = url
next_url = next_url[:-3] + '1/'
temp = next_url.split('/')
temp[-4] = str(int(url.split('/')[-4]) + 1)
next_url = '/'.join(temp)
print(next_url)
with open('verse_register.txt','a') as obj:
obj.write("{}->'{}'\n".format(datetime.date.today(),str(next_url)))
print('''Written to file: {}->'{}'\n '''.format(datetime.date.today(),str(next_url)))
obj.close()
# In[6]:
def verse_details(verse,url):
with open('chap_details.json','r') as f:
chap = json.load(f)
ver_num = url.split('/')[-2]
ver_chap = chap[url.split('/')[-4]]
verse.update({'chap_num':url.split('/')[-4],'chap':ver_chap,'number':ver_num})
return verse
| true |
871e74d5805d294de7a5a2eefbf5959c7d1c9ef5 | Python | Neuroschemata/Toy-RNN | /typeset.py | UTF-8 | 1,421 | 3.734375 | 4 | [] | no_license | def terminal_print(matrix_layout):
"""
Prints a 'matrix' of 'text' using ascii symbols.
:@param matrix_layout: a matrix of floats from [0, 1]
"""
for indx, mat in enumerate(matrix_layout):
print('{:2d}⎜'.format(indx), end='')
for val in mat:
if val < 0.0: print('-', end='')
elif val < .15: print(' ', end=''),
elif val < .35: print('░', end=''),
elif val < .65: print('▒', end=''),
elif val < .85: print('▓', end=''),
elif val <= 1.: print('█', end=''),
else: print('+', end='')
print('⎜')
def print_CTC_decoding(symbls):
"""
Returns a function that prints a CTC's "decoding" output
Strips blanks and duplicates
:@param symbls: list of symbols
:return: the printing functions
"""
n_classes = len(symbls)
def lbl_print(labels):
labels_out = []
for lbl_inx, l in enumerate(labels):
if (l != n_classes) and (lbl_inx == 0 or l != labels[lbl_inx-1]):
labels_out.append(l)
print(labels_out, " ".join(symbls[l] for l in labels_out))
def lbl_len(labels):
length = 0
for lbl_inx, l in enumerate(labels):
if (l != n_classes) and (lbl_inx == 0 or l != labels[lbl_inx-1]):
length += 1
return length
return lbl_print, lbl_len
| true |
9b78048d9370d06b88b36b5a92646c000405a64b | Python | mstump/mashumaro | /tests/test_json.py | UTF-8 | 511 | 2.71875 | 3 | [
"Apache-2.0"
] | permissive | import json
from dataclasses import dataclass
from typing import List
from mashumaro import DataClassJSONMixin
def test_to_json():
@dataclass
class DataClass(DataClassJSONMixin):
x: List[int]
dumped = json.dumps({'x': [1, 2, 3]})
assert DataClass([1, 2, 3]).to_json() == dumped
def test_from_json():
@dataclass
class DataClass(DataClassJSONMixin):
x: List[int]
dumped = json.dumps({'x': [1, 2, 3]})
assert DataClass.from_json(dumped) == DataClass([1, 2, 3])
| true |
d7d8caf97f7c42a56a44e54d6a37fbcd5f6c8f18 | Python | davidjamesmoss/handout | /handout/inline_image_extension.py | UTF-8 | 1,199 | 2.640625 | 3 | [] | no_license | import os
import base64
from markdown.treeprocessors import Treeprocessor
from markdown.extensions import Extension
from markdown.util import etree
class InlineImageProcessor(Treeprocessor):
def run(self, doc):
for image in doc.findall('.//img'):
filepath = '/app/src/{}'.format(image.get('src'))
fname, fext = os.path.splitext(filepath)
try:
with open(filepath, 'r') as fp:
b64 = base64.b64encode(fp.read())
except IOError:
continue
figure = etree.Element('figure')
new_image = etree.Element('img')
new_image.set('src', 'data:image/{};base64,{}'.format(fext[1:], b64))
figure.insert(0, new_image)
alt = image.get('alt')
if alt:
caption = etree.Element('figcaption')
caption.text = alt
figure.insert(1, caption)
image.clear()
image.append(figure)
class InlineImageExtension(Extension):
def extendMarkdown(self, md, md_globals):
img_ext = InlineImageProcessor(md)
md.treeprocessors.add('imgext', img_ext, '>inline')
| true |
cabd492ad7e872e3ad46b6db1b84de01eba7ae37 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_136/775.py | UTF-8 | 637 | 2.9375 | 3 | [] | no_license | #!/usr/bin/env pypy
import sys, os
def solving(case, c, f, x):
current_rate = 2
build_time = 0
old_time = x / current_rate
new_time = old_time
while old_time >= new_time:
old_time = new_time
build_time += c / current_rate
current_rate += f
new_time = build_time + x / current_rate
result = "%.7f" % old_time
outfile.write("Case #%d: %s\n" % (case, result))
outfile = open("%s.out" % sys.argv[1], "w")
with open(sys.argv[1]) as infile:
for case, values in enumerate(infile.readlines()[1:]):
solving(case + 1, *map(float, values.split()))
outfile.close()
| true |
895300d6f59b33e985b39a14685138a3693d39c1 | Python | akashhebbar/flask-crud | /db.py | UTF-8 | 313 | 2.609375 | 3 | [] | no_license | import sqlite3
con=sqlite3.connect("taskrecord.db")
print("Database created")
con.execute("create table record(Date_t TEXT DEFAULT CURRENT_TIMESTAMP,aid INTEGER PRIMARY KEY,qid INTEGER NOT NULL,State INTEGER NOT NULL,Amount INTEGER,Reason TEXT,Task_Count FLOAT )")
print("Table created successfully")
con.close()
| true |
242f66d8f9d4a3a86b9782bbbcbf4238a4930861 | Python | Intelligent-AWE/DeepHealth | /Code/DH-1.py | UTF-8 | 10,205 | 2.53125 | 3 | [] | no_license | import tensorflow as tf
import random
import numpy as np
import time
from tensorflow.contrib.layers import flatten
import Data_DH_1
seq_length = 1024
line_num = 1000
# Data
X_data, y_data = Data_DH_1.data_read(seq_length, line_num)
X_data = Data_DH_1.data_embedding(X_data, seq_length)
print("Total data volume: {}".format(len(X_data)))
# Shuffle
Data = list(zip(X_data, y_data))
random.shuffle(Data)
X_data, y_data = zip(*Data)
X_data, y_data = np.array(X_data), np.array(y_data)
# Data split
X_train, y_train = X_data[0:int(len(X_data)*0.7)-1], y_data[0:int(len(y_data)*0.7)-1]
X_valuate, y_valuate = X_data[int(len(X_data)*0.7):int(len(X_data)*0.9)-1], y_data[int(len(X_data)*0.7):int(len(X_data)*0.9)-1]
X_test, y_test = X_data[int(len(X_data)*0.9):len(X_data)-1], y_data[int(len(X_data)*0.9):len(y_data)-1]
print("Train data volume: {}".format(len(X_train)), "Valuate data volume: {}".format(len(X_valuate)), "Teat data volume: {}".format(len(X_test)))
# Hyper-parameters
batch_size = 128
lr = 0.0001
hidden_units = seq_length / 8
maxlen = 8
num_blocks = 3
num_epochs = 300
num_heads = 8
dropout_rate = 0.1
lambda_loss_amount = 0.0015
# Modules
def normalize(inputs, epsilon=1e-8, scope="ln", reuse=None):
with tf.variable_scope(scope, reuse=reuse):
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)
beta = tf.Variable(tf.zeros(params_shape))
gamma = tf.Variable(tf.ones(params_shape))
normalized = (inputs - mean) / ((variance + epsilon) ** (.5))
outputs = gamma * normalized + beta
return outputs
def multihead_attention(queries, keys, num_units=None, num_heads=8, dropout_rate=dropout_rate,
is_training=True, causality=False, scope="multihead_attention", reuse=None):
with tf.variable_scope(scope, reuse=reuse):
# Set the fall back option for num_units
if num_units is None:
num_units = queries.get_shape().as_list()[-1]
# Linear projections
Q = tf.layers.dense(queries, num_units, activation=tf.nn.relu)
K = tf.layers.dense(keys, num_units, activation=tf.nn.relu)
V = tf.layers.dense(keys, num_units, activation=tf.nn.relu)
print(Q.shape, K.shape, V.shape)
# Split and concat
Q_ = tf.concat(tf.split(Q, num_heads, axis=2), axis=0)
K_ = tf.concat(tf.split(K, num_heads, axis=2), axis=0)
V_ = tf.concat(tf.split(V, num_heads, axis=2), axis=0)
print(Q_.shape, K_.shape, V_.shape)
# Multiplication
outputs = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1]))
print(outputs.shape)
# Scale
outputs = outputs / (K_.get_shape().as_list()[-1] ** 0.5)
# Key Masking
key_masks = tf.sign(tf.reduce_sum(tf.abs(keys), axis=-1))
key_masks = tf.tile(key_masks, [num_heads, 1])
key_masks = tf.tile(tf.expand_dims(key_masks, 1), [1, tf.shape(queries)[1], 1])
paddings = tf.ones_like(outputs) * (-2 ** 32 + 1)
outputs = tf.where(tf.equal(key_masks, 0), paddings, outputs)
print(outputs.shape)
# Causality = Future blinding
if causality:
diag_vals = tf.ones_like(outputs[0, :, :])
tril = tf.linalg.LinearOperatorLowerTriangular(diag_vals).to_dense()
masks = tf.tile(tf.expand_dims(tril, 0), [tf.shape(outputs)[0], 1, 1])
paddings = tf.ones_like(masks) * (-2 ** 32 + 1)
outputs = tf.where(tf.equal(masks, 0), paddings, outputs)
# Activation
outputs = tf.nn.softmax(outputs)
# Query Masking
query_masks = tf.sign(tf.reduce_sum(tf.abs(queries), axis=-1))
query_masks = tf.tile(query_masks, [num_heads, 1])
query_masks = tf.tile(tf.expand_dims(query_masks, -1), [1, 1, tf.shape(keys)[1]])
outputs *= query_masks
# Dropouts
outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=tf.convert_to_tensor(is_training))
# Weighted sum
outputs = tf.matmul(outputs, V_)
# Restore shape
outputs = tf.concat(tf.split(outputs, num_heads, axis=0), axis=2)
# Residual connection
outputs += queries
# Normalize
outputs = normalize(outputs)
return outputs
def feedforward(inputs, num_units, scope="multihead_attention", reuse=None):
with tf.variable_scope(scope, reuse=reuse):
# Inner layer
params = {"inputs": inputs, "filters": num_units[0], "kernel_size": 1,
"activation": tf.nn.relu, "use_bias": True}
outputs = tf.layers.conv1d(**params)
print(outputs.shape)
# Readout layer
params = {"inputs": outputs, "filters": num_units[1], "kernel_size": 1,
"activation": None, "use_bias": True}
outputs = tf.layers.conv1d(**params)
print(outputs.shape)
# Residual connection
outputs += inputs
# Normalize
outputs = normalize(outputs)
return outputs
def attention_block(inputs):
enc = multihead_attention(queries=inputs,
keys=inputs,
num_units=hidden_units,
num_heads=num_heads,
dropout_rate=0.1,
is_training=True,
causality=False)
enc = feedforward(enc, num_units=[4 * hidden_units, hidden_units])
return enc
def linear(seq_len, inputs):
logits = flatten(inputs)
fc_W = tf.Variable(tf.truncated_normal(shape=(seq_len, 6), mean=0, stddev=0.1))
fc_b = tf.Variable(tf.zeros(6))
logits = tf.matmul(logits, fc_W) + fc_b
return logits
def one_hot_encoding(y_):
# Function to encode output labels from number indexes
# e.g.: [[5], [0], [3]] --> [[0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]]
y_ = y_.reshape(len(y_))
n_values = 6
return np.eye(n_values)[np.array(y_, dtype=np.int32)] # Returns FLOATS
x = tf.placeholder(tf.float32, [None, maxlen, hidden_units])
y = tf.placeholder(tf.int32, [None, 6])
time_start = time.time()
# Blocks
with tf.variable_scope("num_blocks_1"):
enc1 = attention_block(x)
with tf.variable_scope("num_blocks_2"):
enc2 = attention_block(enc1)
with tf.variable_scope("num_blocks_3"):
enc3 = attention_block(enc2)
pred = linear(seq_length, enc3)
l2 = lambda_loss_amount * sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred)
loss_operation = tf.reduce_mean(cross_entropy) + l2
training_operation = tf.train.AdamOptimizer(learning_rate = lr).minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
test_losses = []
test_accuracies = []
valuate_accuracies = []
valuate_losses = []
train_losses = []
train_accuracies = []
confusion_matrixes = []
train_time, val_time, test_time = [], [], []
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
total_loss = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, batch_size):
batch_x, batch_y = X_data[offset:offset+batch_size], y_data[offset:offset+batch_size]
loss, acc = sess.run(
[loss_operation, accuracy_operation],
feed_dict={
x: batch_x,
y: batch_y,
}
)
total_accuracy += (acc * len(batch_x))
total_loss += (loss * len(batch_x))
return total_accuracy / num_examples, total_loss / num_examples
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
for i in range(num_epochs):
for offset in range(0, num_examples, batch_size):
end = offset + batch_size
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
_, loss, acc = sess.run(
[training_operation, loss_operation, accuracy_operation],
feed_dict={
x: batch_x,
y: one_hot_encoding(batch_y)
}
)
train_accuracies.append(acc)
train_losses.append(loss)
validation_time_start = time.time()
valuate_accuracy, valuate_loss = evaluate(X_valuate, one_hot_encoding(y_valuate))
validation_time_end = time.time()
val_time.append(validation_time_end-validation_time_start)
valuate_accuracies.append(valuate_accuracy)
valuate_losses.append(valuate_loss)
print("EPOCH {} ...".format(i + 1))
print("Valuate Accuracy = {:.4f}".format(valuate_accuracy), "Valuate Loss = {:.4f}".format(valuate_loss), "Validation time = {:.3f}".format(validation_time_end-validation_time_start))
print()
saver.save(sess, './DH1-{}'.format(seq_length))
print("Model saved")
time_end = time.time()
train_time.append(time_end-time_start)
print("The time consumption of training stage = {:.3f}".format(time_end-time_start))
precision, recall, f1_score = [], [], []
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('./'))
test_time_start = time.time()
one_hot_prediction, loss, final_acc = sess.run(
[pred, loss_operation, accuracy_operation],
feed_dict={
x: X_test,
y: one_hot_encoding(y_test),
}
)
test_time_end = time.time()
test_time.append(test_time_end - test_time_start)
test_accuracies.append(final_acc)
test_losses.append(loss)
print("The Final Test Accuracy = {:.5f}".format(final_acc))
print("The time consumption of test = {:.3f}".format(test_time_end - test_time_start)) | true |
b7d7625d1787487d8a355148a53e635c504291b6 | Python | diegopnh/AulaPy2 | /Aula 15/Ex066.py | UTF-8 | 277 | 4.21875 | 4 | [] | no_license | number = count = sumNumber = 0
while True:
if number == 999:
break
sumNumber = sumNumber + number
number = int(input ('Escreva um número inteiro (999 para parar): '))
count += 1
print(f'Foram digitados {count} números e a soma deles é {sumNumber}') | true |
712e05845cd181c2fe4647dbce19cc86bccec90a | Python | MrJustPeachy/CodingChallenges | /Codequest/2016/Prob04.py | UTF-8 | 562 | 3.8125 | 4 | [] | no_license | # Import file
filename = 'Prob04.in.txt'
with open(filename) as file:
test_cases = int(file.readline().strip())
while test_cases > 0:
words = file.readline().strip().split('|')
word1 = words[0]
word2 = words[1]
anagram = True
for letter in word1:
if letter not in word2:
anagram = False
if anagram and word1 != word2:
print('%s|%s = ANAGRAM' % (word1, word2))
else:
print('%s|%s = NOT AN ANAGRAM' % (word1, word2))
test_cases -= 1 | true |
667af20e423c0818623887ff2c2c58f9ca1320c9 | Python | lichao20000/python_spider | /base/class_demo/06_shibengtuji_02.py | UTF-8 | 631 | 3.421875 | 3 | [] | no_license | # coding: utf-8
from gun_class import Gun
class Soldier:
def __init__(self, name):
self.name = name
self.gun = None
def fire(self):
# 1. 判断士兵是否有枪
if self.gun is None :
print('[%s] 还没有枪!' % self.name)
return
# 2.高喊口号
print('冲啊...[%s]' % self.name)
# 3.让枪装填子弹
self.gun.add_bullet(50)
# 4.让枪发射子弹
self.gun.shoot()
if __name__ == '__main__':
xusanduo = Soldier('许三多')
gun_ak47 = Gun('AK47')
xusanduo.gun = gun_ak47
xusanduo.fire()
| true |
fe7d44495183e830b57bb19d1763a32a17e44684 | Python | Bobstin/AutomatedBrewery | /automatedbrewery/AutomatedBreweryUI.py | UTF-8 | 2,576 | 2.5625 | 3 | [
"MIT"
] | permissive | import sys
from PyQt5 import QtCore, QtGui, QtWidgets, uic
import pyqtgraph
from multiprocessing import Pipe
import threading
import time
import RTD
import PID
import HeatControl
qtCreatorFile = "./UI/AutomatedBreweryUI/DashboardLarge.ui"
Ui_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)
class MyApp(QtWidgets.QMainWindow, Ui_MainWindow):
tempGraphSignal = QtCore.pyqtSignal(float,float)
heatGraphSignal = QtCore.pyqtSignal(float,float,str)
heatConn, PIDConn = Pipe()
def __init__(self):
pyqtgraph.setConfigOption('background', 'w')
pyqtgraph.setConfigOption('foreground', 'k')
super(MyApp, self).__init__()
self.tempx = []
self.tempy = []
self.heatx = []
self.heaty = []
self.tempGraphSignal.connect(self.tempGraph)
self.heatGraphSignal.connect(self.heatGraph)
self.setupUi(self)
self.show()
heatThread = threading.Thread(target = self.startHeatControl, args=(self.heatConn,self.heatGraphSignal))
PIDThread = threading.Thread(target = self.startPID, args=(self.PIDConn,self.tempGraphSignal))
heatThread.start()
PIDThread.start()
def startHeatControl(self,heatConn,heatGraphSignal):
HeatCtrl = HeatControl.HeatController(pipeConn=heatConn, heatGraphSignal=heatGraphSignal)
HeatCtrl.kettle = "MLT"
def startPID(self,PIDConn,tempGraphSignal):
#Sets up the RTD
cs_pin = 8
clock_pin = 11
data_in_pin = 9
data_out_pin = 10
rtd = RTD.MAX31865(cs_pin, clock_pin, data_in_pin, data_out_pin, units='f')
time.sleep(5)
#Sets up the PID
inputSource = rtd
inputAttributeName = 'temp'
pid = PID.PID(inputSource,inputAttributeName)
pid.outputPipeConn = PIDConn
pid.outputMin = 0
pid.outputMax = 100
pid.cycleTime = 2000
pid.semiAutoValue = 10
pid.outputAttributeName = 'heatSetting'
pid.mode = 'SemiAuto'
pid.tempGraphSignal = tempGraphSignal
#pid.run()
outputStartValue = 50
outputChange = 50
expectedNoiseAmplitude = 1
steadyRequirementTime = 30*1000
triggerDelta = 2
lookBackTime = 10000
requiredAccuracy=0.02
pid.autoTune(outputStartValue,outputChange,expectedNoiseAmplitude, steadyRequirementTime, triggerDelta, lookBackTime, requiredAccuracy)
def tempGraph(self, x, y):
self.tempx.append(x)
self.tempy.append(y)
self.graph1.plot(self.tempx,self.tempy,clear=True)
def heatGraph(self, x, y, kettle):
self.heatx.append(x)
self.heaty.append(y)
self.graph2.plot(self.heatx,self.heaty,clear=True)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = MyApp()
sys.exit(app.exec_())
heatProcess.join()
PIDProcess.join()
| true |
f4b7f69741e787b88ec2b409a0a82329297a7060 | Python | widegren/competitive-programming | /advent-of-code/2016/day11/day11.py | UTF-8 | 318 | 2.828125 | 3 | [] | no_license | #!/usr/bin/env python
"""
Python solution for Advent of Code Day 11: Radioisotope Thermoelectric Generators
"""
__author__ = "Emelie Widegren"
__email__ = "emeliewide@gmail.com"
if __name__ == '__main__':
print(sum(2 * sum([2, 4, 4, 0][:x]) for x in range(1, 4)) - 3)
print(sum(2 * sum([6, 4, 4, 0][:x]) for x in range(1, 4)) - 3)
| true |
a41b847623f0c4105e653381e4c1bab75790ef79 | Python | moroso/kernel | /user/build-disk | UTF-8 | 1,130 | 2.71875 | 3 | [] | no_license | #!/usr/bin/env python3
import sys
import struct
MAGIC='MROF'.encode()
FILENAME_LEN=40
HEADER_FMT = "<i"
STRUCT_FMT = "<ii%ds" % FILENAME_LEN
def make_headers(files, contents):
headers = []
offset = (len(MAGIC) + struct.calcsize(HEADER_FMT) +
struct.calcsize(STRUCT_FMT) * len(files))
for filename, data in zip(files, contents):
headers += [(offset, len(data), filename.encode())]
offset += len(data)
return headers
def output(f, headers, contents):
f.write(MAGIC)
f.write(struct.pack(HEADER_FMT, len(headers)))
for header in headers:
f.write(struct.pack(STRUCT_FMT, *header))
for data in contents:
f.write(data)
def main(args):
if args[1] == '-o':
target = args[2]
files = args[3:]
else:
target = None
files = args[1:]
files.sort()
contents = [open(s, "rb").read() for s in files]
headers = make_headers(files, contents)
out = open(target, "wb") if target else sys.stdout.buffer
output(out, headers, contents)
out.close()
if __name__ == '__main__':
sys.exit(main(sys.argv))
| true |
619d9699a755f137e66229541e600a03545ed7e2 | Python | AlexanderBeahm/enigma | /main.py | UTF-8 | 493 | 2.546875 | 3 | [] | no_license | import sys
import base64
import cli_processor
import re
def main(args):
result_string = args.string
for x in range(args.iterations):
if(args.mode == 'decode'):
result_string = re.sub('\(.*\)', '', base64.b64decode(result_string).decode('utf-8'))
else:
result_string = base64.b64encode(result_string.encode('utf-8')).decode('utf-8')
print(result_string)
args = sys.argv
instructions = cli_processor.process_cli_args(args)
main(instructions) | true |
ed79666f8929c38e5dd2c6f8d3922a5c2521377d | Python | mion/stevejs | /server.py | UTF-8 | 471 | 2.671875 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
import json
from search_engine import SearchEngine
from flask import Flask, request
app = Flask(__name__)
engine = SearchEngine()
@app.route("/search")
def search():
query = request.args.get('query')
result = engine.search(query)
return json.dumps(result)
if __name__ == "__main__":
print ' * Loading data into search engine...'
engine.load()
print ' * Success: {} records loaded'.format(len(engine.db))
app.run()
| true |
6011b503abb9f835728981e0087c25f9cb182f73 | Python | yzh119/propositional-theorem-prover | /prover/solver.py | UTF-8 | 11,058 | 2.84375 | 3 | [] | no_license | from .ast import *
queue = []
def tautology(seq):
assert isinstance(seq, Sequent)
atom_l_arr = []
atom_r_arr = []
for form in seq.pre.forms:
if isinstance(form, Atom):
atom_l_arr.append(form)
for form in seq.post.forms:
if isinstance(form, Atom):
atom_r_arr.append(form)
if set(atom_l_arr) & set(atom_r_arr):
return True
return False
class Tree(object):
def __init__(self, pos, parent=None):
self.pos = pos
self.childs = []
self.rule = []
self.satis = []
self.parent = parent
self.chd_idx = -1
def add_child(self, rule, node1=None, node2=None):
assert node1
if node2:
self.childs.append((node1, node2))
self.satis.append([0, 0])
else:
self.childs.append(node1)
self.satis.append(0)
self.rule.append(rule)
def trace(self):
if self.chd_idx == -1:
print('{}: {}, by rule {} -> {}'.format(self.pos, queue[self.pos][0], 'P1 ', 'true'))
else:
imps = self.childs[self.chd_idx]
binary = isinstance(imps, tuple)
if binary:
imps = (imps[0].pos, imps[1].pos)
else:
imps = imps.pos
print('{}: {}, by rule {} -> {}'.format(self.pos, queue[self.pos][0], self.rule[self.chd_idx], imps))
if binary:
self.childs[self.chd_idx][0].trace()
self.childs[self.chd_idx][1].trace()
else:
self.childs[self.chd_idx].trace()
def update(self):
if not self.parent:
return True
else:
flag = False
for i, item in enumerate(self.parent.childs):
if isinstance(item, tuple):
if item[0] == self:
self.parent.satis[i][0] = 1
elif item[1] == self:
self.parent.satis[i][1] = 1
if sum(self.parent.satis[i]) == 2:
self.parent.chd_idx = i
flag = True
break
else:
if item == self:
self.parent.satis[i] += 1
self.parent.chd_idx = i
flag = True
break
if flag:
return self.parent.update()
def bfs_solver(seq):
ret = False
global queue
root = Tree(0)
queue.append((seq, root))
head = 0
while head < len(queue):
now = queue[head][0]
t = queue[head][1]
if tautology(now):
if t.update():
ret = True
break
else:
# Rule P2a
pre_list = now.pre.forms
post_list = now.post.forms
for form in post_list:
if isinstance(form, Formula):
if form.conn == Connective.NEG:
new_pre_list = pre_list[:]
new_post_list = post_list[:]
new_post_list.remove(form)
new_pre_list.append(form.get_op(1))
new_t = Tree(len(queue), parent=t)
queue.append((Sequent(String(new_pre_list), String(new_post_list)), new_t))
t.add_child('P2a', node1=new_t)
# Rule P2b
for form in pre_list:
if isinstance(form, Formula):
if form.conn == Connective.NEG:
new_pre_list = pre_list[:]
new_pre_list.remove(form)
new_post_list = post_list[:]
new_post_list.append(form.get_op(1))
new_t = Tree(len(queue), parent=t)
queue.append((Sequent(String(new_pre_list), String(new_post_list)), new_t))
t.add_child('P2b', node1=new_t)
# Rule P3a
for form in post_list:
if isinstance(form, Formula):
if form.conn == Connective.AND:
new_post_list_1 = post_list[:]
new_post_list_2 = post_list[:]
new_post_list_1.remove(form)
new_post_list_2.remove(form)
new_post_list_1.append(form.get_op(0))
new_post_list_2.append(form.get_op(1))
new_t_1 = Tree(len(queue), parent=t)
queue.append((Sequent(now.pre, String(new_post_list_1)), new_t_1))
new_t_2 = Tree(len(queue), parent=t)
queue.append((Sequent(now.pre, String(new_post_list_2)), new_t_2))
t.add_child('P3a', node1=new_t_1, node2=new_t_2)
# Rule P3b
for form in pre_list:
if isinstance(form, Formula):
if form.conn == Connective.AND:
new_pre_list = pre_list[:]
new_pre_list.remove(form)
new_pre_list.append(form.get_op(0))
new_pre_list.append(form.get_op(1))
new_t = Tree(len(queue), parent=t)
queue.append((Sequent(String(new_pre_list), now.post), new_t))
t.add_child('P3b', node1=new_t)
# Rule P4a
for form in post_list:
if isinstance(form, Formula):
if form.conn == Connective.OR:
new_post_list = post_list[:]
new_post_list.remove(form)
new_post_list.append(form.get_op(0))
new_post_list.append(form.get_op(1))
new_t = Tree(len(queue), parent=t)
queue.append((Sequent(now.pre, String(new_post_list)), new_t))
t.add_child('P4a', node1=new_t)
# Rule P4b
for form in pre_list:
if isinstance(form, Formula):
if form.conn == Connective.OR:
new_pre_list_1 = pre_list[:]
new_pre_list_2 = pre_list[:]
new_pre_list_1.remove(form)
new_pre_list_2.remove(form)
new_pre_list_1.append(form.get_op(0))
new_pre_list_2.append(form.get_op(1))
new_t_1 = Tree(len(queue), parent=t)
queue.append((Sequent(String(new_pre_list_1), now.post), new_t_1))
new_t_2 = Tree(len(queue), parent=t)
queue.append((Sequent(String(new_pre_list_2), now.post), new_t_2))
t.add_child('P4b', node1=new_t_1, node2=new_t_2)
# Rule P5a
for form in post_list:
if isinstance(form, Formula):
if form.conn == Connective.IMP:
new_pre_list = pre_list[:]
new_post_list = post_list[:]
new_post_list.remove(form)
new_pre_list.append(form.get_op(0))
new_post_list.append(form.get_op(1))
new_t = Tree(len(queue), parent=t)
queue.append((Sequent(String(new_pre_list), String(new_post_list)), new_t))
t.add_child('P5a', node1=new_t)
# Rule P5b
for form in pre_list:
if isinstance(form, Formula):
if form.conn == Connective.IMP:
new_pre_list_1 = pre_list[:]
new_pre_list_2 = pre_list[:]
new_post_list = post_list[:]
new_pre_list_1.remove(form)
new_pre_list_2.remove(form)
new_pre_list_1.append(form.get_op(1))
new_post_list.append(form.get_op(0))
new_t_1 = Tree(len(queue), parent=t)
queue.append((Sequent(String(new_pre_list_1), now.post), new_t_1))
new_t_2 = Tree(len(queue), parent=t)
queue.append((Sequent(String(new_pre_list_2), String(new_post_list)), new_t_2))
t.add_child('P5b', node1=new_t_1, node2=new_t_2)
# Rule P6a
for form in post_list:
if isinstance(form, Formula):
if form.conn == Connective.IFF:
new_pre_list_1 = pre_list[:]
new_pre_list_2 = pre_list[:]
new_post_list_1 = post_list[:]
new_post_list_2 = post_list[:]
new_post_list_1.remove(form)
new_post_list_2.remove(form)
new_pre_list_1.append(form.get_op(0))
new_post_list_1.append(form.get_op(1))
new_pre_list_2.append(form.get_op(1))
new_post_list_2.append(form.get_op(0))
new_t_1 = Tree(len(queue), parent=t)
queue.append((Sequent(String(new_pre_list_1), String(new_post_list_1)), new_t_1))
new_t_2 = Tree(len(queue), parent=t)
queue.append((Sequent(String(new_pre_list_2), String(new_post_list_2)), new_t_2))
t.add_child('P6a', node1=new_t_1, node2=new_t_2)
# Rule P6b
for form in pre_list:
if isinstance(form, Formula):
if form.conn == Connective.IFF:
new_pre_list_1 = pre_list[:]
new_pre_list_2 = pre_list[:]
new_post_list_2 = post_list[:]
new_pre_list_1.remove(form)
new_pre_list_2.remove(form)
new_pre_list_1.append(form.get_op(0))
new_pre_list_1.append(form.get_op(1))
new_post_list_2.append(form.get_op(0))
new_post_list_2.append(form.get_op(1))
new_t_1 = Tree(len(queue), parent=t)
queue.append((Sequent(String(new_pre_list_1), now.post), new_t_1))
new_t_2 = Tree(len(queue), parent=t)
queue.append((Sequent(String(new_pre_list_2), String(new_post_list_2)), new_t_2))
t.add_child('P6b', node1=new_t_1, node2=new_t_2)
head += 1
"""
if hit != -1:
ret = True
history.append(('P1', queue[hit][0]))
while queue[hit][-1] != -1:
prev = queue[hit][-1]
history.append((queue[hit][1], queue[prev][0]))
hit = prev
"""
if ret:
root.trace()
return ret
| true |
091b596b3a024d2cf63b531ab1402ae0749c4f78 | Python | Zeryoshka/avito-geo | /serdis_client/__init__.py | UTF-8 | 3,765 | 3.390625 | 3 | [] | no_license | '''
Client for python serdis database
'''
from typing import List, Tuple
import requests
from .utils import is_valid_key
class Serdis():
'''
Object, for connection and working with serdis
'''
def __init__(self, host: str = '127.0.0.1', port: int = 5000):
'''
Consructor of Serdis class
'''
self._address = f'http://{host}:{port}/'
def ping(self) -> (str):
'''
Method for PING query
'''
value = requests.get(self._address+'ping').text
return value
def get(self, key: str) -> (Tuple[str, str]):
'''
Method for getting values by key (GET)
Returns: Tuple of
value(str) if key is available None if key is not available
message(str)
'''
key = str(key)
if not is_valid_key(key):
return None, 'invalid key'
data_json = requests.post(
self._address + 'get', json={'KEY': key}
).json()
return data_json['value'], data_json['message']
def set(self, key: str, value: str, ttl: int = None) -> (Tuple[bool, str]):
'''
Method for setting values (SET)
ttl - live time in seconds
Returns: Tuple of
is_created - True(bool) if can create or update value by key,
or False if can not
message - str
'''
key = str(key)
value = str(value)
if not is_valid_key(key):
return False, 'invalid key'
query = {
'KEY': key,
'VALUE': value
}
if ttl is not None:
query['TTL'] = int(ttl)
data_json = requests.post(self._address + 'set', json=query).json()
return data_json['is_created'], data_json['message']
def lset(self, key: str, value: List[str], ttl: int = None) -> (bool):
'''
Method for setting lists of values (SET)
ttl - live time in seconds
Returns: Tuple of
is_created: True(bool) if can create or update list by key,
or False if can not
message: (str) error or ok message
'''
key = str(key)
value = list(map(str, value))
if not is_valid_key(key):
return False, 'invalid key'
query = {
'KEY': key,
'VALUE': value
}
if ttl is not None:
query['TTL'] = int(ttl)
data_json = requests.post(self._address + 'lset', json=query).json()
return data_json['is_created'], data_json['message']
def lget(self, key: str) -> (List[str]):
'''
Method for getting values by key (GET)
Returns:
values lists(list of str) if key is available
None if key is not available
'''
key = str(key)
if not is_valid_key(key):
return None
data_json = requests.post(
self._address + 'lget', json={'KEY': key}
).json()
return data_json['value'], data_json['message']
def keys(self) -> (List[str]):
'''
Method for getting keys list (KEYS)
Returns:
keys: List of str
'''
return requests.get(self._address+'keys').json()['keys']
def delete(self, key) -> (Tuple[bool, str]):
'''
Method for deleting key (DEL)
Returns: Tuple of
is_deleted: (bool) True if deleted and False if didn't deleted
message: (str) ERROR or Ok message
'''
if not is_valid_key(key):
return False, 'invalid key'
data_json = requests.delete(
self._address + 'del', json={'KEY': key}
).json()
return data_json['is_deleted'], data_json['message']
| true |
12a3a983ae38047090ba6deaf6e6b2fd771c7dad | Python | Lucas20029/GetYeskyBeauties | /yeskyImg/SingleBeauty.py | UTF-8 | 1,535 | 2.53125 | 3 | [] | no_license | #coding:utf-8
import urllib
import urllib2
import re #正则表达式
def GetContentByURL(url, IsUtf8):
try:
request = urllib2.Request(url)
response=urllib2.urlopen(request)
content = response.read()
if (IsUtf8==True):
return content.decode('utf-8')
return content
except urllib2.URLError, e:
if hasattr(e,"code"):
print "ex with code"
print e.code
if hasattr(e,"reason"):
print "ex with code"
print e.reason
#打开一个人的详情页后,获取这个人所有的图片链接地址
def FindLinksInSingle(url):
content= GetContentByURL(url,False)
pattern = re.compile('<div id="scroll".*?<ul>(.*?)</ul>', re.S) #定位UL下面的li
items = re.findall(pattern, content)
pattern = re.compile('<a href="(.*?)" target', re.S) #取出每个li里面的链接
links = re.findall(pattern, items[0])
return links
#FindLinksInSingle("http://pic.yesky.com/293/108578293.shtml")
#从一个图片的详情页里面,获取该页的大图
def GetLargeImageOfPage(url):
content = GetContentByURL(url,False)
pattern =re.compile('<div class="l_effect_img_mid".*?<img src="(.*?)" alt',re.S)
imgLink =re.findall(pattern,content)[0]
return imgLink
#保存到本地
def SaveImgUrlToFile(imgUrl, newfilePath):
urllib.urlretrieve(imgUrl,newfilePath)
SaveImgUrlToFile("http://dynamic-image.yesky.com/740x-/uploadImages/2017/048/41/N0C382VD858Z_800x1200.jpg","E:/SpyImg/1.jpg") | true |
0ba986daf57a0cae6aca357e1aa3200159bfa037 | Python | Zorrander/cogrob-tut-hri | /franka_tut_reasoning/src/franka_tut_reasoning/planner.py | UTF-8 | 7,134 | 2.828125 | 3 | [
"BSD-3-Clause"
] | permissive | #! /usr/bin/env python
import abc
import networkx as nx
import rospy
import matplotlib.pyplot as plt
MAP_STEP_ARG={
'CartesianMotion': 'location_target',
'GripperMotion': 'width_target',
'JointAction': 'angle_goal'
}
class Planner(object):
"""docstring for Planner."""
__metaclass__ = abc.ABCMeta
def __init__(self):
super(Planner, self).__init__()
self.graph = nx.DiGraph()
def add_node(self, name, x, agent="Human"):
"""adds a new step"""
params = x.pop(-1)
#moves = [move.split('_')[0] for move in x]
self.graph.add_node(name, value=x, attributes=params, generator=self.generator_move(x, params), executor=agent, done=False)
def attribute_node_to_human(self, node):
nx.set_node_attributes(self.graph, {node : {'executor' : 'Human'}})
def attribute_node_to_robot(self, node):
nx.set_node_attributes(self.graph, {node : {'executor' : 'Robot'}})
def complete_step(self, node):
nx.set_node_attributes(self.graph, {node : {'done' : 'True'}})
def generator_move(self, moves, params):
for action in moves:
print "Generating move for {}".format(action)
if not action.startswith("NullAction"):
#yield (action, params[MAP_STEP_ARG.get(action)])
yield(action)
else:
yield(action)
def print_graph(self):
print list(self.graph.nodes(data=True))
def draw_graph(self):
print "drawing"
pos=nx.spring_layout(self.graph)
human_nodes = [n for n, d in self.graph.nodes.data() if d['executor']=="Human"]
robot_nodes = [n for n, d in self.graph.nodes.data() if d['executor']=="Robot"]
nx.draw_networkx_nodes(self.graph, pos, nodelist=human_nodes, node_color='r', node_size=500, alpha=0.8, label="Handled by humans")
nx.draw_networkx_nodes(self.graph, pos, nodelist=robot_nodes, node_color='b', node_size=500, alpha=0.8, label="Handled by the robot")
nx.draw_networkx_edges(self.graph, pos, edgelist=self.graph.edges(), width=8, alpha=0.5, edge_color='g')
labels = {n: n for n, d in self.graph.nodes.data()}
nx.draw_networkx_labels(self.graph, pos, labels, font_size=16)
plt.show()
@abc.abstractmethod
def initialize(self, steps, constraints):
"""Create groups of steps depending on the strategy"""
@abc.abstractmethod
def decide_next_move(self):
"""Decision making strategy"""
class AutomatedPlanner(Planner):
"""docstring for AutomatedPlanner."""
def __init__(self):
super(AutomatedPlanner, self).__init__()
def initialize(self, steps, constraints):
rospy.logdebug("initialize graph ({})".format(steps))
s = []
for step, tasks in steps:
self.add_node(step, tasks)
s.append(step)
for c in constraints:
if c:
self.graph.add_edge(*c)
self.print_graph()
for n in s:
for e in self.graph.edges():
if e[1] == n:
s.pop(s.index(n))
self.current_step = s[0]
self.draw_graph()
self.print_graph()
def decide_next_move(self):
"""Decides which action to take next"""
current_node = self.graph.nodes.data()[self.current_step]
if not current_node['done']:
try:
return next(current_node['generator'])
except StopIteration:
self.current_step = self.find_next_step()
if self.current_step:
current_node = self.graph.nodes.data()[self.current_step]
return next(current_node['generator'])
def find_next_step(self):
nodes = [n[0] for n in self.graph.nodes.data() if not n[1]['done']]
print nodes
followers = []
for e in self.graph.edges():
if e[1] in nodes:
nodes.pop(nodes.index(e[1]))
if e[0] == self.current_step:
followers.append(e)
if nodes:
return nodes[0]
else:
if followers:
return followers[0][1]
class InteractivePlanner(Planner):
"""docstring for InteractivePlanner."""
def __init__(self):
super(InteractivePlanner, self).__init__()
def initialize(self, steps, constraints):
rospy.logdebug("initialize graph ({})".format(steps))
s = []
working_agent = 'Human'
for step, tasks in steps:
self.add_node(step, tasks)
s.append(step)
for c in constraints:
if c:
print ("CONSTRAINTS : {}".format(c))
if working_agent == 'Robot':
self.attribute_node_to_human(c[1])
working_agent = 'Human'
else:
self.attribute_node_to_robot(c[1])
working_agent = 'Robot'
self.graph.add_edge(*c)
self.current_step = self.find_first_step()
self.draw_graph()
self.print_graph()
def find_first_step(self):
"""Identifies the first action required by the robot"""
poll = []
possible_first = True
for edge1 in self.graph.edges():
for edge2 in self.graph.edges():
if edge1[0] == edge2[1]:
possible_first = False
if possible_first:
poll.append(edge1[0])
print "Possible first : {}".format(poll)
return poll[0]
def decide_next_move(self):
"""Decides which action to take next"""
try:
if self.current_step:
current_node = self.graph.nodes.data()[self.current_step]
print "CURRENT NODE : {}".format(current_node)
if (current_node['executor'] == 'Human'):
return (False, True)
else:
return (next(current_node['generator']), False)
else:
return (False, False)
except StopIteration:
current_node['done'] = True
self.find_next_step()
if self.current_step:
current_node = self.graph.nodes.data()[self.current_step]
if (current_node['executor'] == 'Human'):
return (False, True)
else:
return (next(current_node['generator']), False)
else:
return (False, False)
def find_next_step(self):
has_next = False
self.complete_step(self.current_step)
for e in self.graph.edges():
if e[0] == self.current_step:
print "EDGE : {} == > {}".format(e[0], e[1])
has_next = e[1]
self.current_step = has_next
class CollaborativePlanner(Planner):
"""docstring for CollaborativePlanner."""
def __init__(self, ):
super(CollaborativePlanner, self).__init__()
def decide_next_move(self):
"""Decides which action to take next"""
pass
| true |
e2b663c9aaabf6afa7ef545ff6a5ea078b75c0dc | Python | Aasthaengg/IBMdataset | /Python_codes/p02420/s914281861.py | UTF-8 | 290 | 3 | 3 | [] | no_license | from sys import stdin
while True:
s = stdin.readline().rstrip()
if s == "-":
break
else:
m = int(stdin.readline().rstrip())
for i in range(m):
h = int(stdin.readline().rstrip())
s = s[h:]+s[:h]
else:
print(s)
| true |
9270644f5089f0d12957365b982b5a258275d0e9 | Python | farbodtaymouri/RuleDeviance | /Temporary.py | UTF-8 | 1,926 | 2.828125 | 3 | [
"MIT"
] | permissive | #This module is for temporary methods
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
#Transition System plot
def TS_plot(log1,log2, merg=True):
'''
:param log1: a ist of lists : log1 = [['a', 'e','f','d']] * 10 + [['b', 'a', 'c']] * 4
:param log2: a list of lists: [['c', 'b','a','f','a']] * 10 + [['b', 'a', 'c','b','a']] *1
:param merg: Plotting together or separate
:return:
'''
#Plotting two logs together
if merg == True:
log= log1+log2
uniq_event =list(set([k for t in log for k in t]))
#Create adjuncy matrix
adj_matrix = np.zeros((len(uniq_event),len(uniq_event)))
#filling the matrix
for t in log:
for i in range(len(t)-1):
row_ind = uniq_event.index(t[i])
col_ind = uniq_event.index(t[i+1])
adj_matrix[row_ind][col_ind]=1
#Creating a directed graph
G = nx.DiGraph()
#Adding node
#G.add_nodes_from(uniq_event)
# nodes=[]
# for e in uniq_event:
# nodes.append(dot.node(e))
#adding edges
temp=[]
for t in log:
for i in range(len(t) - 1):
if([t[i],t[i+1]] not in temp):
#print "Temmp:",temp
temp.append((t[i], t[i + 1]))
#G.add_edge(t[i],t[i+1])
G.add_edges_from(temp)
nx.draw(G)
plt.show()
return G
# # Graphviz must be installed in order to save output in pdf format
# if platform.system() == "Windows":
# os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'
# elif platform.system() == "Linux":
# if ('graphviz' not in sys.modules):
# os.system('sudo apt-get install graphviz')
#
# dot.render('test-output/'+time.strftime("%m/%d/%Y, %H%M%S") +".pdf", view=True)
########################################################################## | true |
a0825074211a5f08f3548a6c90c7f566127f6203 | Python | TahjidEshan/webScraper | /scraper.py | UTF-8 | 2,083 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
from bs4 import BeautifulSoup
from urllib2 import urlopen
import re
import pandas
import os
def scrapper(driver, lists):
finalSet = []
for list in lists:
driver.get(list)
valueSet = []
nameVal = driver.find_element_by_tag_name('h2')
#print (nameVal.text)
# sleep(2)
name = nameVal.text.encode("utf-8")
valueSet.append(name)
soup = BeautifulSoup(urlopen(list), "lxml")
country = soup.findAll('td', attrs={'class': 'label'})
for element in country:
if 'Country:' in element.get_text():
countryName = element.find_next('td')
# print(countryName.get_text())
valueSet.append(countryName.get_text().encode("utf-8"))
emails = set(re.findall(
r"[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.[a-z]+", urlopen(list).read(), re.I))
email = " "
if not emails:
email = " "
else:
email = emails.pop()
valueSet.append(email.encode("utf-8"))
#print (email)
finalSet.append(valueSet)
driver.find_element_by_tag_name('body').send_keys(Keys.CONTROL + 'w')
return finalSet
def main():
driver = webdriver.Chrome()
mainUrl = "https://www.unodc.org/ngo/list.jsp"
driver.get(mainUrl)
assert "List" in driver.title
elems = driver.find_elements_by_xpath("//a[@href]")
lists = []
for elem in elems:
value = elem.get_attribute("href")
if 'showSingleDetailed' in value:
lists.append(value)
finalSet = scrapper(driver, lists)
finalSet = pandas.DataFrame(finalSet)
finalSet.columns = ['Name', 'Country', 'Email']
if not os.path.exists('data.csv'):
finalSet.to_csv('data.csv', index=False)
else:
with open('data.csv', 'a') as f:
finalSet.to_csv(f, header=False, index=False)
driver.close()
if __name__ == '__main__':
main()
| true |
94c015fa079de2239d82fcf75d904a5b9168be1b | Python | JaneliaSciComp/Neuroptikon | /Source/Layouts/OrthogonalPathLayout/path_routing.py | UTF-8 | 24,574 | 2.515625 | 3 | [
"BSD-2-Clause"
] | permissive | # Copyright (c) 2010 Howard Hughes Medical Institute.
# All rights reserved.
# Use is subject to Janelia Farm Research Campus Software Copyright 1.1 license terms.
# http://license.janelia.org/license/jfrc_copyright_1_1.html
from display.layout import Layout
import numpy as N
import pyheapq
from heapset import HeapSet
import os
class VisibleMap:
def __init__(self, display, nodeDims, allowDiagonalNeighbors):
self.display = display
self._occupiedNodes = N.ones(list(nodeDims) + [2], N.int_) * -1
self._distances = {}
self.allowDiagonalNeighbors = allowDiagonalNeighbors
self.maxHops = 20
if self.allowDiagonalNeighbors:
# TODO: better hop selection when this is enabled
if self.display.viewDimensions == 2:
self.offsets = [N.array((x, y)) for x in range(-1, 2) for y in range(-1, 2) if x != 0 or y != 0]
elif self.display.viewDimensions == 3:
self.offsets = [N.array((x, y, z)) for x in range(-1, 2) for y in range(-1, 2) for z in range(-1, 2) if x != 0 or y != 0 or z != 0]
else:
if self.display.viewDimensions == 2:
self.offsets = [N.array((x, y)) for x, y in [(-1, 0), (1, 0), (0, -1), (0, 1)]]
elif self.display.viewDimensions == 3:
self.offsets = [N.array((x, y, z)) for x, y in [(-1, 0, 0), (1, 0, 0), (0, -1, 0), (0, 1, 0), (0, 0, -1), (0, 0, 1)]]
def copy(self):
newMap = VisibleMap(self.display, self._occupiedNodes.shape, self.allowDiagonalNeighbors)
newMap._occupiedNodes = self._occupiedNodes.copy()
return newMap
def heuristic_estimate_of_distance(self, start, goal):
return self.dist_between(start, goal)
def neighbor_nodes(self, node, edge):
node = N.array(node)
# TODO: only allow orthogonal hops?
for offset in self.offsets:
try:
neighbor = node.copy()
hoppedNeighbors = []
lastNeighbors = []
for hopCount_ in range(1, self.maxHops + 1):
neighbor += offset
if min(neighbor) < 0:
# Don't go outside of the grid.
break
neighborVisibles = self.nodeOccupiers(tuple(neighbor))
if not any(neighborVisibles):
# Nobody there, it's a valid neighbor.
yield (tuple(neighbor), hoppedNeighbors)
break
hoppedNeighbors.append(tuple(neighbor))
(pathStart, pathEnd) = edge.pathEndPoints()
if not neighborVisibles[0].isPath() and neighborVisibles[0] not in pathStart.ancestors() and neighborVisibles[0] not in pathEnd.ancestors():
# Don't hop into a node's space.
break
if any(set(neighborVisibles).intersection(set(lastNeighbors))):
# Don't hop along a parallel path.
break
lastNeighbors = neighborVisibles
except IndexError:
pass
def dist_between(self, start, goal):
dims = len(goal)
assert len(start) == dims
key = [abs(goal[dim] - start[dim]) for dim in range(dims)]
key = tuple(sorted(key))
if key in self._distances:
distance = self._distances[key]
else:
distance = sum([dim ** 2 for dim in key]) ** 0.5
self._distances[key] = distance
return distance
def setNodeOccupier(self, node, visible):
self._occupiedNodes[tuple(node) + (0,)] = -1 if visible == None else visible.displayId
self._occupiedNodes[tuple(node) + (1,)] = -1
def addNodeOccupier(self, node, visible):
self._occupiedNodes[tuple(node) + (1,)] = visible.displayId
def nodeOccupiers(self, node):
nodeOccupiers = []
id0, id1 = self._occupiedNodes[tuple(node)]
if id0 >= 0:
nodeOccupiers += [self.display.visibleWithId(id0)]
if id1 >= 0:
nodeOccupiers += [self.display.visibleWithId(id1)]
return nodeOccupiers
# def show(self):
# import matplotlib.pyplot as pyplot
# layer1, layer2 = N.split(self._occupiedNodes, 2, axis = 2)
# pyplot.matshow(N.transpose(layer1.squeeze()), origin = 'lower', fignum = 1)
# pyplot.matshow(N.transpose(layer2.squeeze()), origin = 'lower', fignum = 2)
# pyplot.show()
def reconstruct_path(came_from, current_node):
if current_node in came_from:
node, hoppedNeighbors = came_from[current_node]
path = reconstruct_path(came_from, node)
return path + [(current_node, hoppedNeighbors)]
else:
return []
class HeapItem:
def __init__(self, node, goal, a_map, g_score):
self.node = node
# g_score: Distance from start along optimal path.
self.g_score = g_score
# h_score: the heuristic estimates of the distances to goal
self.h_score = a_map.heuristic_estimate_of_distance(node, goal)
# f_score: Estimated total distance from start to goal through node.
self.f_score = self.h_score + self.g_score
def as_tuple(self):
return (self.f_score, self.g_score, self.h_score, self.node)
def __hash__(self):
return self.as_tuple().__hash__()
def __repr__(self):
return str(self.as_tuple())
def type_check(self, other):
return type(self) == type(other)
def __lt__(self, other):
return self.type_check(other) and self.as_tuple().__lt__(other.as_tuple())
def __le__(self, other):
return self.type_check(other) and self.as_tuple().__le__(other.as_tuple())
def __eq__(self, other):
return self.type_check(other) and self.as_tuple().__eq__(other.as_tuple())
def __ne__(self, other):
return self.type_check(other) and self.as_tuple().__ne__(other.as_tuple())
def __gt__(self, other):
return self.type_check(other) and self.as_tuple().__gt__(other.as_tuple())
def __ge__(self, other):
return self.type_check(other) and self.as_tuple().__ge__(other.as_tuple())
class PathRoutingLayout(Layout):
@classmethod
def name(cls):
return gettext('Path Routing')
@classmethod
def canLayoutDisplay(cls, display):
return display.viewDimensions == 2
def __init__(self, nodeSpacing = None, objectPadding = 0.0, crossingPenalty = 5.0, turningPenalty = 5.0, allowDiagonalPaths = True, *args, **keywordArgs):
Layout.__init__(self, *args, **keywordArgs)
# TODO: determine these values automatically based on visible spacing and connection counts
self.nodeSpacing = nodeSpacing
self.objectPadding = objectPadding
self.crossingPenalty = crossingPenalty
self.turningPenalty = turningPenalty
self.allowDiagonalPaths = allowDiagonalPaths
def layoutDisplay(self, display):
# Calculate the bounds of every non-path visible.
# TODO: warn the user if the layout is going to take a while? Or provide progress with cancel?
centerPositions = {}
minPositions = {}
maxPositions = {}
viewDimensions = display.viewDimensions
minBound = N.ones(viewDimensions) * 1e300
maxBound = N.ones(viewDimensions) * -1e300
edges = []
ports = {}
minPortSpacing = 1e300
for visibles in display.visibles.itervalues():
for visible in visibles:
if visible.isPath():
if not visible.pathIsFixed():
(startPoint, endPoint) = visible.pathEndPoints()
edgeLength = ((N.array(endPoint.worldPosition()) - N.array(startPoint.worldPosition())) ** 2).sum()
edges.append((edgeLength, visible))
else:
position = visible.worldPosition()
if viewDimensions == 2:
position = N.array((position[0], position[1]))
else:
position = N.array(position)
centerPositions[visible] = position
size = visible.worldSize()
if self.nodeSpacing == None:
minPorts = len(visible.connectedPaths)
if minPorts > 0:
if viewDimensions == 2:
portSpace = 2.0 * size[0] + 2.0 * size[1]
else:
portSpace = size[0] * size[1] + size[0]* size[2] + size[1] * size[2]
portSpacing = portSpace / minPorts / 2.0
if portSpacing < minPortSpacing:
minPortSpacing = portSpacing
if viewDimensions == 2:
if visible.shape() == 'capsule':
size = (size[0] / 2.0, size[1])
size = N.array((size[0] / 2.0 + self.objectPadding, size[1] / 2.0 + self.objectPadding))
else:
if visible.shape() == 'capsule':
size = (size[0] / 2.0, size[1], size[2] / 2.0)
size = N.array((size[0] / 2.0 + self.objectPadding, size[1] / 2.0 + self.objectPadding, size[2] / 2.0 + self.objectPadding))
minPositions[visible] = position - size
maxPositions[visible] = position + size
for dim in range(viewDimensions):
minBound[dim] = min(minPositions[visible][dim], minBound[dim])
maxBound[dim] = max(maxPositions[visible][dim], maxBound[dim])
ports[visible] = []
if self.nodeSpacing != None:
nodeSpacing = self.nodeSpacing
else:
nodeSpacing = minPortSpacing
# Determine the bounds of all nodes and the mapping scale.
minBound -= nodeSpacing * len(edges) / 2 + (minBound % nodeSpacing)
maxBound += nodeSpacing * len(edges) / 2 + nodeSpacing - (maxBound % nodeSpacing)
mapSize = (maxBound - minBound) / nodeSpacing
# Build the node map
nodeMap = VisibleMap(display, mapSize, self.allowDiagonalPaths)
for visible in minPositions.iterkeys():
minMap = N.ceil((minPositions[visible] - minBound) / nodeSpacing)
maxMap = N.ceil((maxPositions[visible] - minBound) / nodeSpacing)
for x in range(int(minMap[0]) - 1, int(maxMap[0]) + 1):
for y in range(int(minMap[1]) - 1, int(maxMap[1]) + 1):
if viewDimensions == 2:
xOut = x < minMap[0] or x == maxMap[0]
yOut = y < minMap[1] or y == maxMap[1]
if xOut != yOut:
ports[visible].append((x, y))
if not any(visible.children):
nodeMap.setNodeOccupier((x, y), visible)
else:
for z in range(int(minMap[2]) - 1, int(maxMap[2]) + 1):
if x < minMap[0] or x == maxMap[0] or y < minMap[1] or y == maxMap[1] or z < minMap[2] or z == maxMap[2]:
ports[visible].append((x, y, z))
elif not any(visible.children) :
nodeMap.setNodeOccupier((x, y, z), visible)
#nodeMap.show()
# TODO: pre-assign ports? or at least port edges?
# Route each edge starting with the shortest and finishing with the longest.
edges.sort()
totalEdgeLength = 0.0
penalty = 1.0
for edgeLength, edge in edges:
totalEdgeLength += edgeLength * penalty
penalty += 0.1
edgeCount = 0
edgeLengthRouted = 0.0
penalty = 1.0
edgeMidPoints = {}
for edgeLength, edge in edges:
edgeLengthRouted += edgeLength * penalty
penalty += 0.1
if not display.updateProgress('Routing paths...', edgeLengthRouted / totalEdgeLength):
edgeMidPoints.clear()
break
edgeCount += 1
(pathStart, pathEnd) = edge.pathEndPoints()
startName = '???' if not pathStart.client or not pathStart.client.abbreviation else pathStart.client.abbreviation
endName = '???' if not pathEnd.client or not pathEnd.client.abbreviation else pathEnd.client.abbreviation
if 'DEBUG' in os.environ:
print 'Routing path from ' + startName + ' to ' + endName + ' (' + str(edgeCount) + ' of ' + str(len(edges)) + ')'
# TODO: weight the search based on any previous path
# Make a copy of the map to hold our tentative routing. Once the actual route is determined this map will be discarded and the main map will be updated.
# TODO: is this really necessary?
edgeMap = nodeMap.copy()
openHeap = HeapSet() # priority queue of potential steps in the route
openDict = {} # the set of tentative nodes to be evaluated
closedSet = set([]) # the set of blocked nodes
came_from = {} # tracks the route to each visited node
# Aim for the center of the end visible and allow the edge to travel to any unused goal port.
goal = tuple(N.ceil(((centerPositions[pathEnd]) - minBound) / nodeSpacing))
goalPorts = ports[pathEnd]
for goalPort in goalPorts:
if edgeMap.nodeOccupiers(goalPort)[0] == pathEnd:
edgeMap.setNodeOccupier(goalPort, None)
# Seed the walk with all unused ports on the starting visible.
for startPort in ports[pathStart]:
if edgeMap.nodeOccupiers(startPort)[0] == pathStart:
startItem = HeapItem(startPort, goal, edgeMap, edgeMap.dist_between(startPort, goal))
openHeap.append(startItem)
openDict[startPort] = startItem
closedSet.add(startPort)
while any(openHeap):
x = pyheapq.heappop(openHeap)
if x.node in goalPorts:
# The goal has been reached. Build the path in world space and update the global map.
path = []
prevNode = None
for node, hoppedNeighbors in reconstruct_path(came_from, x.node): #[:-1]:
nodeMap.setNodeOccupier(node, edge)
for hoppedNeighbor in hoppedNeighbors:
nodeMap.addNodeOccupier(hoppedNeighbor, edge)
prevNode = node
pathPoint = tuple(N.array(node) * nodeSpacing + minBound)
path += [(pathPoint[0], pathPoint[1], 0.0 if len(pathPoint) == 2 else pathPoint[2])]
# Combine consecutive path segments with the same slope.
for index in range(len(path) - 2, 0, -1):
delta0 = N.array(path[index + 1]) - N.array(path[index])
delta1 = N.array(path[index]) - N.array(path[index - 1])
sameSlope = True
for dim in range(1, viewDimensions):
slope0 = 1e300 if delta0[0] == 0.0 else delta0[dim] / delta0[0]
slope1 = 1e300 if delta1[0] == 0.0 else delta1[dim] / delta1[0]
if abs(slope0 - slope1) > 0.00001:
sameSlope = False
break
if sameSlope:
del path[index]
edgeMidPoints[edge] = path
del edgeMap
break
del openDict[x.node]
closedSet.add(x.node)
neighbornodes = []
for node_y, hoppedNeighbors in edgeMap.neighbor_nodes(x.node, edge):
# This block of code gets executed at least hundreds of thousands of times so it needs to be seriously tight.
# Start with the distance between the nodes.
g_score = x.g_score + edgeMap.dist_between(x.node, node_y)
# Penalize crossing over other edges.
g_score += len(hoppedNeighbors) * self.crossingPenalty
# Penalize turning.
if x.node in came_from:
prevNode = came_from[x.node][0]
delta0 = x.node[0] - prevNode[0]
delta1 = node_y[0] - x.node[0]
if (delta0 < 0) != (delta1 < 0) or (delta0 > 0) != (delta1 > 0):
g_score += self.turningPenalty
else:
delta0 = x.node[1] - prevNode[1]
delta1 = node_y[1] - x.node[1]
if (delta0 < 0) != (delta1 < 0) or (delta0 > 0) != (delta1 > 0):
g_score += self.turningPenalty
elif viewDimensions == 3:
delta0 = x.node[2] - prevNode[2]
delta1 = node_y[2] - x.node[2]
if (delta0 < 0) != (delta1 < 0) or (delta0 > 0) != (delta1 > 0):
g_score += self.turningPenalty
neighbornodes.append((g_score, node_y, hoppedNeighbors))
#better sort here than update the heap ..
neighbornodes.sort()
for tentative_g_score, node_y, hoppedNeighbors in neighbornodes:
if node_y in closedSet:
continue
oldy = openDict.get(node_y, None)
y = HeapItem(node_y, goal, edgeMap, tentative_g_score)
# openDict[node_y] = y
# came_from[node_y] = (x.node, hoppedNeighbors)
# edgeMap.setNodeOccupier(node_y, edge)
# for hoppedNeighbor in hoppedNeighbors:
# edgeMap.addNodeOccupier(hoppedNeighbor, edge)
if oldy == None:
openDict[node_y] = y
came_from[node_y] = (x.node, hoppedNeighbors)
pyheapq.heappush(openHeap, y)
elif tentative_g_score < oldy.g_score:
openDict[node_y] = y
came_from[node_y] = (x.node, hoppedNeighbors)
pyheapq.updateheapvalue(openHeap, openHeap.index(oldy), y)
#edgeMap.show()
if 'edgeMap' in dir():
print '\tCould not find route from ' + startName + ' to ' + endName
#nodeMap.show()
for edge, midPoints in edgeMidPoints.iteritems():
edge.setPathMidPoints(midPoints)
#import pyheapq
#
#from heapset import HeapSet
#from copy import copy
#
#class NotImplemented(Exception):
# pass
#
#
#class Map:
# def __init__(self):
# pass
# def heuristic_estimate_of_distance(self, start,goal):
# raise NotImplemented
# def neighbor_nodes(self, x):
# raise NotImplemented
#
# def dist_between(self, x, y):
# raise NotImplemented
#
#def reconstruct_path(came_from, current_node):
# if current_node in came_from:
# p = reconstruct_path(came_from,came_from[current_node])
# return p + [current_node]
# else:
# return []
#
#
#class HeapItem:
# def __init__(self,y,goal, a_map, g_score):
#
# self.node = y
#
# """ g_score = Distance from start along optimal path."""
# self.g_score = g_score
#
# """h_score the heuristic estimates of the distances to goal"""
# self.h_score = a_map.heuristic_estimate_of_distance(y, goal)
#
# """f_score Estimated total distance from start to goal through y."""
# self.f_score = self.h_score + self.g_score
#
# def as_tuple(self):
# return (self.f_score, self.g_score, self.h_score, self.node)
#
# def __hash__(self):
# return self.as_tuple().__hash__()
#
# def __repr__(self):
# return str(self.as_tuple())
#
# def type_check(self,other):
# return type(self) == type(other)
# def __lt__(self, other):
# return self.type_check(other) and self.as_tuple().__lt__(other.as_tuple())
# def __le__(self, other):
# return self.type_check(other) and self.as_tuple().__le__(other.as_tuple())
# def __eq__(self, other):
# return self.type_check(other) and self.as_tuple().__eq__(other.as_tuple())
# def __ne__(self, other):
# return self.type_check(other) and self.as_tuple().__ne__(other.as_tuple())
# def __gt__(self, other):
# return self.type_check(other) and self.as_tuple().__gt__(other.as_tuple())
# def __ge__(self, other):
# return self.type_check(other) and self.as_tuple().__ge__(other.as_tuple())
#
#
#
#
#def A_star(start, goal, a_map):
# """
# start = the start node in a_map
# goal = the goal node in a_map
# a_map = a object that should inherit the Map class
#
# returns a tuple (path, connections, uptated) where:
# path is the optimal path (as a list of points) from the start to the goal. empty if not found,
#
# connections and updated are for debugging (remove them from code if too slow..,):
# connections is the came_from dictionary and
# uptated is the set of the connections, which were uptated from the heap
# """
#
# """The set of nodes already evaluated."""
# closedset = set([])
#
#
# firstItem = HeapItem(start,goal, a_map, 0.0)
#
#
#
# """
# openDict is the set of tentative nodes to be evaluated
# containing just the initial node
#
# scoreHeap is used as priority queue for next steps.
# """
#
# scoreHeap = HeapSet([firstItem])
# openDict = {start:firstItem}
#
#
# """ the second last node in the shortest path from start node"""
# came_from = {}
#
# """this is the set of points which were uptated when they were in the heap
# this is used only to debug the algorithm. remove if slows too much"""
# updateset = set([])
#
# while any(scoreHeap): # is not empty
# """
# the node in openset having
# the lowest (f_score,g_score,h_score, position) value (f_score means the most ...)
# """
# x = pyheapq.heappop(scoreHeap)
#
# if x.node == goal:
# return [start] + reconstruct_path(came_from,goal), came_from, updateset
#
# del openDict[x.node]
# closedset.add(x.node)
#
# neighbornodes = [
# (x.g_score + a_map.dist_between(x.node, node_y),node_y )
# for node_y in a_map.neighbor_nodes(x.node)
# ]
# #better sort here than update the heap ..
# neighbornodes.sort()
#
#
# for tentative_g_score, node_y in neighbornodes:
#
# if node_y in closedset:
# continue
#
#
# oldy = openDict.get(node_y,None)
# y = copy(oldy)
#
# y = HeapItem(node_y, goal, a_map, tentative_g_score)
#
# if oldy == None:
# openDict[node_y] = y
# came_from[node_y] = x.node
#
# pyheapq.heappush(scoreHeap, y)
#
# elif tentative_g_score < oldy.g_score:
# updateset.add( (node_y, came_from[node_y]) )
#
# openDict[node_y] = y
# came_from[node_y] = x.node
#
# pyheapq.updateheapvalue(scoreHeap, scoreHeap.index(oldy), y)
#
#
# return [], came_from, updateset
| true |
e316d8d29ca87333599f0292ba9d737e20f2c870 | Python | ryuoujisinta/othero | /lib/mcts.py | UTF-8 | 16,051 | 2.640625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Monte-Carlo Tree Search
"""
import time
import numpy as np
from lib import game, model, game_c
import torch.nn.functional as F
import torch.multiprocessing as mp
class MCTS:
"""
Class keeps statistics for every state encountered during the search
"""
def __init__(self, c_puct=1.0):
self.c_puct = c_puct
# count of visits, state_int -> [N(s, a)]
self.visit_count = {}
# total value of the state's action, state_int -> [W(s, a)]
self.value = {}
# average value of actions, state_int -> [Q(s, a)]
self.value_avg = {}
# prior probability of actions, state_int -> [P(s,a)]
self.probs = {}
self.for_time = 0
self.subtrees = []
self.visited_net_results = {}
def clear_subtrees(self, state):
retain_list = [state]
new_subtrees = []
for subtree in self.subtrees:
if len(subtree) > 1:
new_subtrees.append(subtree[1:])
else:
subtree.clear()
del subtree
self.subtrees.clear()
self.subtrees = new_subtrees.copy()
new_subtrees.clear()
for subtree in self.subtrees:
if subtree[0] == state:
new_subtrees.append(subtree)
retain_list.extend(subtree[1:])
else:
subtree.clear()
del subtree
retain_list = list(set(retain_list))
self.subtrees.clear()
self.subtrees = new_subtrees
state_list = list(self.visit_count.keys()).copy()
for s in state_list:
if s not in retain_list:
self.visit_count.pop(s)
self.value.pop(s)
self.value_avg.pop(s)
self.probs.pop(s)
def clear(self):
self.visit_count.clear()
self.value.clear()
self.value_avg.clear()
self.probs.clear()
[subtree.clear() for subtree in self.subtrees]
self.subtrees.clear()
def __len__(self):
return len(self.value)
def find_leaf(self, state_int, player, root_mask):
"""
Traverse the tree until the end of game or leaf node
:param state_int: root node state
:param player: player to move
:return: tuple of (value, leaf_state, player, states, actions)
1. value: None if leaf node, otherwise equals to the game outcome for the player at leaf
2. leaf_state: state_int of the last state
3. player: player at the leaf node
4. states: list of states traversed
5. list of actions taken
"""
states = []
actions = []
cur_state = state_int
cur_player = player
value = None
pass_count = 0
while not self.is_leaf(cur_state):
states.append(cur_state)
counts = self.visit_count[cur_state]
total_sqrt = np.sqrt(counts.sum())
probs = self.probs[cur_state]
values_avg = self.value_avg[cur_state]
# choose action to take, in the root node add the Dirichlet noise to the probs
if cur_state == state_int:
noises = np.random.dirichlet([0.03] * (game.BOARD_SIZE ** 2 + 1))
probs = 0.75 * probs + 0.25 * noises
score = values_avg + self.c_puct * probs * total_sqrt / (1 + counts)
score += root_mask
else:
# select moves that maximise an upper confident bound
score = values_avg + self.c_puct * probs * total_sqrt / (1 + counts)
# suppress pass move
score[-1] = -10.
cur_field, _ = game_c.decode_binary(cur_state)
score[:-1][cur_field != 2] = -np.inf
# empty_states = game_c.empty_states(cur_field)
#
# for action in empty_states:
# if not game_c.is_possible_move_f(cur_field, cur_player, action):
# score[action] = -np.inf
while True:
action = score.argmax()
if game_c.is_possible_move_f(cur_field, cur_player, action):
break
score[action] = -np.inf
# action = score.argmax()
actions.append(action)
cur_state, cur_field = game_c.move_f(cur_field, cur_player, action)
if action == game.BOARD_SIZE ** 2:
pass_count += 1
else:
pass_count = 0
cur_player = 1-cur_player
if pass_count == 2 or (cur_field != 2).all():
value = game_c.calc_result_f(cur_field, cur_player)
break
return value, cur_state, cur_player, states, actions
def is_leaf(self, state_ints):
return state_ints not in self.probs
def search_batch_test(self, count, batch_size, state_int, player, net, device="cpu"):
cur_field, _ = game_c.decode_binary(state_int)
root_mask = np.zeros(game.BOARD_SIZE ** 2 + 1)
root_mask[:-1][cur_field != 2] = -np.inf
empty_states = game.empty_states(cur_field)
for action in empty_states:
if not game_c.is_possible_move_f(cur_field, player, action):
root_mask[action] = -np.inf
for _ in range(count):
backup_queue = []
expand_queue = []
planned = set()
for i in range(count):
value, leaf_state, leaf_player, states, actions = self.find_leaf(state_int, player, root_mask)
self.subtrees.append(states)
# end of the game
if value is not None:
backup_queue.append((value, states, actions))
# encounter leaf node which is not end of the game
else:
# avoid duplication of leaf state
if leaf_state not in planned:
planned.add(leaf_state)
expand_queue.append((leaf_state, states, actions))
else:
states.clear()
self.subtrees.pop()
del planned
# do expansion of nodes
if expand_queue:
expand_states = []
keys = self.visited_net_results.keys()
new_expand_queue = []
existed_expand_queue = []
value_list = []
prob_list = []
rotate_list = []
new_rotate_list = []
for leaf_state, states, actions in expand_queue:
rotate_num = np.random.randint(8)
if (leaf_state, rotate_num) in keys:
existed_expand_queue.append((leaf_state, states, actions))
rotate_list.append(rotate_num)
value, prob = self.visited_net_results[(leaf_state, rotate_num)]
value_list.append(value)
prob_list.append(prob)
else:
new_expand_queue.append((leaf_state, states, actions))
new_rotate_list.append(rotate_num)
leaf_state_lists = game_c.decode_binary(leaf_state)
expand_states.append(leaf_state_lists)
expand_queue = [*existed_expand_queue, *new_expand_queue]
rotate_list.extend(new_rotate_list)
if len(new_rotate_list) == 0:
values = value_list
probs = prob_list
else:
batch_v = model.state_lists_to_batch(expand_states,
device,
new_rotate_list)
logits_v, values_v = net(batch_v)
probs_v = F.softmax(logits_v, dim=1)
values = values_v.data.cpu().numpy()[:, 0]
probs = probs_v.data.cpu().numpy()
values = [*value_list, *list(values)]
probs = [*prob_list, *list(probs)]
expand_states.clear()
# create the nodes
for (leaf_state, states, actions), value, prob, rotate_num in zip(expand_queue, values, probs, rotate_list):
self.visit_count[leaf_state] = np.zeros(game.BOARD_SIZE ** 2 + 1, dtype=np.int32)
self.value[leaf_state] = np.zeros(game.BOARD_SIZE ** 2 + 1, dtype=np.float32)
self.value_avg[leaf_state] = np.zeros(game.BOARD_SIZE ** 2 + 1, dtype=np.float32)
prob_without_pass = prob[:-1].reshape([game.BOARD_SIZE, game.BOARD_SIZE])
prob_without_pass = game.multiple_transform(prob_without_pass, rotate_num, True)
self.probs[leaf_state] = np.concatenate([prob_without_pass.flatten(), [prob[-1]]])
backup_queue.append((value, states, actions))
self.visited_net_results[(leaf_state, rotate_num)] = (value, prob)
rotate_list.clear()
# perform backup of the searches
for value, states, actions in backup_queue:
# leaf state is not stored in states and actions, so the value of the leaf will be the value of the opponent
cur_value = -value
for state_int, action in zip(states[::-1], actions[::-1]):
self.visit_count[state_int][action] += 1
self.value[state_int][action] += cur_value
self.value_avg[state_int][action] = self.value[state_int][action] / self.visit_count[state_int][action]
cur_value = -cur_value
actions.clear()
backup_queue.clear()
def search_batch(self, count, batch_size, state_int, player, net,
device="cpu"):
cur_field, _ = game_c.decode_binary(state_int)
root_mask = np.zeros(game.BOARD_SIZE ** 2 + 1)
root_mask[:-1][cur_field != 2] = -np.inf
empty_states = game.empty_states(cur_field)
for action in empty_states:
if not game_c.is_possible_move_f(cur_field, player, action):
root_mask[action] = -np.inf
for _ in range(count):
self.search_minibatch(batch_size, state_int, player, net,
root_mask, device)
def search_minibatch(self, count, state_int, player, net, root_mask,
device="cpu"):
"""
Perform several MCTS searches.
"""
backup_queue = []
expand_queue = []
planned = set()
for i in range(count):
value, leaf_state, leaf_player, states, actions = self.find_leaf(state_int, player, root_mask)
self.subtrees.append(states)
# end of the game
if value is not None:
backup_queue.append((value, states, actions))
# encounter leaf node which is not end of the game
else:
# avoid duplication of leaf state
if leaf_state not in planned:
planned.add(leaf_state)
expand_queue.append((leaf_state, states, actions))
else:
states.clear()
self.subtrees.pop()
del planned
# do expansion of nodes
if expand_queue:
expand_states = []
keys = self.visited_net_results.keys()
new_expand_queue = []
existed_expand_queue = []
value_list = []
prob_list = []
rotate_list = []
new_rotate_list = []
for leaf_state, states, actions in expand_queue:
rotate_num = np.random.randint(8)
if (leaf_state, rotate_num) in keys:
existed_expand_queue.append((leaf_state, states, actions))
rotate_list.append(rotate_num)
value, prob = self.visited_net_results[(leaf_state, rotate_num)]
value_list.append(value)
prob_list.append(prob)
else:
new_expand_queue.append((leaf_state, states, actions))
new_rotate_list.append(rotate_num)
leaf_state_lists = game_c.decode_binary(leaf_state)
expand_states.append(leaf_state_lists)
expand_queue = [*existed_expand_queue, *new_expand_queue]
rotate_list.extend(new_rotate_list)
if len(new_rotate_list) == 0:
values = value_list
probs = prob_list
else:
batch_v = model.state_lists_to_batch(expand_states,
device,
new_rotate_list)
logits_v, values_v = net(batch_v)
probs_v = F.softmax(logits_v, dim=1)
values = values_v.data.cpu().numpy()[:, 0]
probs = probs_v.data.cpu().numpy()
values = [*value_list, *list(values)]
probs = [*prob_list, *list(probs)]
expand_states.clear()
# create the nodes
for (leaf_state, states, actions), value, prob, rotate_num in zip(expand_queue, values, probs, rotate_list):
# for (leaf_state, states, actions), value, prob in zip(expand_queue, values, probs):
self.visit_count[leaf_state] = np.zeros(game.BOARD_SIZE ** 2 + 1, dtype=np.int32)
self.value[leaf_state] = np.zeros(game.BOARD_SIZE ** 2 + 1, dtype=np.float32)
self.value_avg[leaf_state] = np.zeros(game.BOARD_SIZE ** 2 + 1, dtype=np.float32)
prob_without_pass = prob[:-1].reshape([game.BOARD_SIZE, game.BOARD_SIZE])
prob_without_pass = game.multiple_transform(prob_without_pass, rotate_num, True)
self.probs[leaf_state] = np.concatenate([prob_without_pass.flatten(), [prob[-1]]])
# self.probs[leaf_state] = prob
backup_queue.append((value, states, actions))
self.visited_net_results[(leaf_state, rotate_num)] = (value, prob)
rotate_list.clear()
expand_queue.clear()
# perform backup of the searches
for value, states, actions in backup_queue:
# leaf state is not stored in states and actions, so the value of the leaf will be the value of the opponent
cur_value = -value
for state_int, action in zip(states[::-1], actions[::-1]):
self.visit_count[state_int][action] += 1
self.value[state_int][action] += cur_value
self.value_avg[state_int][action] = self.value[state_int][action] / self.visit_count[state_int][action]
cur_value = -cur_value
actions.clear()
backup_queue.clear()
def get_policy_value(self, state_int, tau=1):
"""
Extract policy and action-values by the state
:param state_int: state of the board
:return: (probs, values)
"""
counts = self.visit_count[state_int]
if tau == 0:
probs = np.zeros(game.BOARD_SIZE ** 2 + 1, dtype=np.float32)
probs[np.argmax(counts)] = 1.0
else:
counts = counts ** (1.0 / tau)
total = counts.sum()
probs = counts / total
values = self.value_avg[state_int]
return probs, values
| true |
fe23b62d199427e3930a2c085b4bc6cb53ed6a94 | Python | weidwonder/python_codes | /pieces/threading_.py | UTF-8 | 7,029 | 2.8125 | 3 | [] | no_license | # coding=utf-8
from Queue import Queue
from functools import wraps
import threading
from concurrent.futures import ThreadPoolExecutor
def setup_thread_error_handler(handler):
"""
通过monkey_patch的形式设置多线程的处理器
:param handler:
"""
run = threading.Thread.run
def wrapped_run(thread):
try:
run(thread)
except Exception:
handler()
raise
threading.Thread.run = wrapped_run
def async(func):
""" 将函数方法转换为异步
:param func: 函数/方法
:return: 异步函数
"""
@wraps(func)
def async_func(*args, **kwargs):
thread = threading.Thread(
target=func,
args=args,
kwargs=kwargs
)
thread.start()
return async_func
def asyncd(func):
""" 将函数方法转换为异步, 并作为守护线程
:param func: 函数/方法
:return: 异步函数
"""
@wraps(func)
def async_func(*args, **kwargs):
thread = threading.Thread(
target=func,
args=args,
kwargs=kwargs
)
thread.daemon = True
thread.start()
return async_func
def serialize(func):
"""
使函数串行化, 任何时候, 被装饰函数不得同时运行
:param func:
:return: func
"""
@wraps(func)
def serialized_func(*args, **kws):
with NamedLock(id(func)):
return func(*args, **kws)
return serialized_func
def django_mysql_serialize(func):
"""
使函数串行化, 任何时候, 被装饰函数不得同时运行
使用django-mysql中的Lock, 可以跨线程,跨机器实现锁
:param func:
:return: func
"""
from django_mysql.locks import Lock
@wraps(func)
def serialized_func(*args, **kws):
with Lock(str(id(func))):
return func(*args, **kws)
return serialized_func
def pooled_submit_not_block(num, default_rtn=None):
"""
线程池并行装饰器 (提交无阻塞)
:param num: int 并行的线程上限
:param default_rtn: 默认返回值
:return: func 无参装饰器
"""
executor = ThreadPoolExecutor(max_workers=num)
def wrapper(func):
@wraps(func)
def async_func(*args, **kws):
executor.submit(func, *args, **kws)
return default_rtn
async_func.executor = executor
async_func.shutdown = executor.shutdown
return async_func
return wrapper
def pooled_submit_blocked(num, default_rtn=None):
"""
线程池并行装饰器 (提交阻塞)
:param num: int 并行的线程上限
:param default_rtn: 默认返回值
:return: func 无参装饰器
"""
pool = ThreadPool(worker_limit=num)
def wrapper(func):
@wraps(func)
def async_func(*args, **kws):
pool.putting_task(func, *args, **kws)
return default_rtn
async_func.executor = pool
async_func.shutdown = pool.join
return async_func
return wrapper
pooled = pooled_submit_blocked
class PooledObject(object):
"""
对象池内的对象
"""
def __init__(self, pool, obj):
"""
:param pool: ObjectPool 所属对象池
:param obj: object 对象
"""
self.obj = obj
self.pool = pool
def __enter__(self):
return self.obj
def __exit__(self, exc_type, exc_val, exc_tb):
self.pool.objects.put(self.obj)
class ObjectPool(object):
"""
对象池
"""
def __init__(self, objects, block=True, timeout=None):
"""
:param objects: list 对象列表
:param block: bool 是否是阻塞式
:param timeout: float 阻塞超时时间(秒)
"""
self._objects = objects
self.objects = Queue(len(objects))
self.block = block
self.timeout = timeout
for o in objects:
self.objects.put(o)
def get(self):
"""
获取一个对象池中的对象
:return: PooledObject 返回并不是真的对象, 而是其封装, 可以通过 `with <rtn_obj> as obj: ... ` 获得其包含的对象,
退出上下文管理器作用域之后物品会归还到其原先所在的对象池中
"""
return PooledObject(self, self.objects.get(block=self.block, timeout=self.timeout))
class NamedLock(object):
"""
命名锁
同名锁互斥, 异名不干扰
目前的实现方式不是很好, 日后再改
"""
num_locks = 64
locks = [threading.Condition() for i in xrange(num_locks)]
def __init__(self, name):
self.name = name
self.index = hash(self.name) % self.num_locks
self.cond = self.locks[self.index]
def __getattr__(self, item):
if item in self.__dict__:
return self.__dict__[item]
else:
return getattr(self.cond, item)
def __enter__(self):
return self.cond.acquire(1)
def __exit__(self, *args):
self.cond.notify()
return self.cond.release()
class ThreadPool:
"""
Customized thread pool
copy from https://github.com/weidwonder/async-iter/blob/master/async_iter/async_iter.py
"""
class TreadPoolException(Exception):
pass
class NULLKEY:
pass
def __init__(self, worker_limit):
self.task_queue = Queue(maxsize=worker_limit)
self.result_dict = {}
self.is_join = False
def setup_func(self, key, func, *args, **kws):
""" wrap the function, redirect the output to the `result_dict`.
"""
def func_wrap():
try:
self.result_dict[key] = func(*args, **kws)
except:
raise
finally:
# mark one position in queue is available.
self.task_queue.get()
self.task_queue.task_done()
def func_origin():
try:
func(*args, **kws)
except:
raise
finally:
self.task_queue.get()
self.task_queue.task_done()
if key is not self.NULLKEY:
return func_wrap
else:
return func_origin
def putting_task(self, func, *args, **kws):
""" put task to the queue
"""
if self.is_join:
raise self.TreadPoolException('Thread pool is closed.')
result_id = kws.pop('_result_id', self.NULLKEY)
task = self.setup_func(result_id, func, *args, **kws)
# mark one position in queue has been taken.
self.task_queue.put(True)
self.execute_task(task)
def execute_task(self, task):
""" execute task by start new thread.
"""
t = threading.Thread(target=task)
t.start()
def join(self):
""" wait until all tasks in task_queue done.
"""
self.is_join = True
self.task_queue.join()
| true |
dd59e93507d1eea496b11a5455ea7d86371042a9 | Python | priyanka1027/assignment1 | /task2.py | UTF-8 | 1,043 | 2.84375 | 3 | [] | no_license | import pandas as pd
import sys
import os
import cProfile
def save_downsample(df,name):
'''
Desc: saves downsampled files
args:
df:(pandas dataFrame) contains downsampled data
name: (str) filename
return: False
'''
print("Saving file...",name)
df.to_csv(name,index=False)
def downsample(df,lag=60):
'''
Desc: Downsamples data at lag
args:
df:(pandas dataFrame) contains downsampled data
lag: (int) lag timing
return: downsampled data
'''
final_data=[]
for lag_range in range(0,len(df),lag):
final_data.append(df.iloc[lag_range].values)
return pd.DataFrame(final_data,columns=df.columns)
def runner():
mapping={"detail.csv":"detailDownsampled.csv","detailTemp.csv":"detailVolDownsampled.csv","detailVol.csv":"detailTempDownsampled.csv"}
for file in mapping:
df=pd.read_csv(file)
ndf=downsample(df)
save_downsample(ndf,mapping[file])
if __name__=='__main__':
runner()
#cProfile.run('runner()') | true |
e3ba7241f890cc5957c35ae948827411ce9d00ea | Python | sunggeunkim/datastructure | /multithread/hit_counter_last_5min_01.py | UTF-8 | 591 | 3.375 | 3 | [] | no_license | class HitCounter:
def __init__(self):
self.times = [0] * 300
self.hits = [0] * 300
#Record hit
def hit(self, timestamp):
index = timestamp % 300
if times[index] != timestamp:
times[index] = timestamp
hits[index] = 1
else:
hits[index] += 1
#Return the number of hits in the past 5 minutes.
def getHits(self, timestamp):
total = 0
for i in range(300):
if timestamp - times[i] < 300:
total += hits[i]
return total
| true |
4db744d12f69d55c2445c4f8adb20591741accf7 | Python | manuel-rhdt/master-thesis | /python/gaussian_system/monte_carlo.py | UTF-8 | 1,363 | 2.734375 | 3 | [
"MIT"
] | permissive | import numpy as np
import pandas as pd
from scipy.special import logsumexp
from scipy.stats import multivariate_normal
from .system import System, time_matrix
def estimate_log_marginal_at(x, num_s, system: System, t):
xdim = x.shape[-1]
x = x.reshape((-1, 1, xdim))
num_x = x.shape[0]
marg_s = multivariate_normal(cov=system.corr_ss(t))
s_samples = marg_s.rvs((num_x, num_s)).reshape((num_x, num_s, -1))
return system.log_likelihood(x, s_samples, t)
def estimate_log_marginal(num_x: int, num_s: int, system: System, t):
marg_x = multivariate_normal(cov=system.corr_xx(t))
x_samples = marg_x.rvs(num_x).reshape((num_x, 1, -1))
log_likelihood = estimate_log_marginal_at(x_samples, num_s, system, t)
return x_samples, logsumexp(log_likelihood, b=1/num_s, axis=-1)
def monte_carlo_sim(dim, delta_t, num_x: int, num_s, system: System):
_, p_x = estimate_log_marginal(
num_x, num_s, system, time_matrix(dim, delta_t))
return pd.DataFrame(
{
"dim": dim,
"delta_t": delta_t,
"num_responses": 1,
"num_signals": num_s,
"marginal_entropy": -p_x,
}
)
def estimate_marginal_entropy(num_x, num_s, sys: System, t):
_, estimated_marginal = estimate_log_marginal(num_x, num_s, sys, t)
return -estimated_marginal.mean(axis=-1)
| true |
2850fbb832259f365a944f85e0f14ed31844c067 | Python | huanzi56/fc-db | /rds-mysql/python/index.py | UTF-8 | 1,295 | 2.578125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import logging
import pymysql
import os,sys
logger = logging.getLogger()
def getConnection():
try:
conn = pymysql.connect(
host = os.environ['MYSQL_HOST'],
port = int(os.environ['MYSQL_PORT']),
user = os.environ['MYSQL_USER'],
passwd = os.environ['MYSQL_PASSWORD'],
db = os.environ['MYSQL_DBNAME'],
connect_timeout = 5)
return conn
except Exception as e:
logger.error(e)
logger.error("ERROR: Unexpected error: Could not connect to MySql instance.")
sys.exit()
def conditionallyCreateUsersTable():
try:
conn = getConnection()
with conn.cursor() as cursor:
sql = """CREATE TABLE IF NOT EXISTS users (
id VARCHAR(64) NOT NULL,
name VARCHAR(128) NOT NULL,
PRIMARY KEY(id))"""
cursor.execute(sql)
conn.commit()
finally:
conn.close()
def initializer(context):
conditionallyCreateUsersTable()
def handler(event, context):
try:
conn = getConnection()
with conn.cursor() as cursor:
sql = "REPLACE INTO users (id, name) VALUES(%s, %s)"
cursor.execute(sql, ('2', 'wan'))
cursor.execute("SELECT * FROM users")
result = cursor.fetchone()
logger.info(result)
return result
finally:
conn.close() | true |
1dc5aca4a5ba1fa71cd9d3f72871160d8f8f481c | Python | adarean5/youtube_audio | /main.py | UTF-8 | 4,001 | 2.53125 | 3 | [] | no_license | from pytube import YouTube
import subprocess
from bs4 import BeautifulSoup
import requests
import os
import threading
import re
DOWNLOAD_PATH = os.path.expanduser("~") + "\\Music\\"
FFMPEG_PATH = "C:\\Users\\Jernej\\Documents\\Other\\ffmpeg\\bin\\ffmpeg.exe"
g_playlist_title = ""
def get_playlist_links(url):
source_code = requests.get(url).text
soup = BeautifulSoup(source_code, 'html.parser')
playlist_title = soup.find("title").contents[0]
playlist_title = re.sub(r'- YouTube$', "", playlist_title).strip()
domain = 'https://www.youtube.com'
playlist_links = []
print("Playlist items:")
i = 1
for link in soup.find_all("a", {"dir": "ltr"}):
href = link.get('href')
if href.startswith('/watch?'):
print(" " + str(i) + ". " + link.string.strip())
playlist_links.append(domain + href)
i += 1
print("")
return playlist_links, playlist_title
def download_playlist(uri):
playlist, playlist_title = get_playlist_links(uri.strip())
global g_playlist_title
g_playlist_title = playlist_title
if not os.path.exists(DOWNLOAD_PATH + playlist_title):
print("Download directory does not exist, making one")
os.makedirs(DOWNLOAD_PATH + playlist_title)
howmany = input('End index (press Enter to download all): ')
if howmany != "":
playlist = playlist[:int(howmany)]
threads = []
for uri in playlist:
t = threading.Thread(target=download_video, args=(uri, playlist_title))
threads.append(t)
t.start()
def download_video(uri, playlist_title):
yt = YouTube(uri.strip())
yt.register_on_complete_callback(extract_mp3)
stream = yt.streams.get_by_itag(22)
if stream is None:
stream = yt.streams.get_by_itag(18)
if os.path.isfile(DOWNLOAD_PATH + playlist_title + "\\" + stream.default_filename.rpartition(".")[0] + ".mp3"):
print("Item \"{}\" already exists, skipping download".format(yt.title))
else:
print("Starting download: " + yt.title)
stream.download()
def extract_mp3(stream, file_handle):
input_name = file_handle.name
output_name = DOWNLOAD_PATH + g_playlist_title + "\\" + stream.default_filename.rpartition(".")[0] + ".mp3"
artist_title = stream.default_filename.rpartition(".")[0].split("-")
artist = ""
if len(artist_title) == 2:
artist = artist_title[0]
title = artist_title[1]
artist = artist.strip()
else:
title = artist_title[0]
title = title.split(" ft. ")[0]
title = title.split(" ft ")[0]
title = title.split(" feat ")[0]
title = re.sub(r'\(.*\)', "", title)
title = re.sub(r'\[.*\]', "", title)
title = title.strip()
print("Done downloading {0}".format(input_name.split("\\")[-1]))
print("Extracting mp3 for {}".format(input_name.split("\\")[-1]))
instruction = \
"\"{ffmpeg}\" -loglevel panic -i \"{input}\" -vn -ar 44100 -ac 2 -ab 192k -f mp3 -metadata title=\"{title}\" -metadata artist=\"{artist}\" \"{output}\"".format(
ffmpeg=FFMPEG_PATH,
input=input_name,
output=output_name,
title=title,
artist=artist)
extraction = subprocess.Popen(instruction)
extraction.wait()
print("Done extracting {0}".format(format(input_name.split("\\")[-1])))
print("")
file_handle.close()
try:
os.remove(input_name)
except Exception as e:
print("{}".format(e))
def main():
uri = input('Link: ')
if uri == "":
return
if "playlist?" in uri:
download_playlist(uri)
else:
if not os.path.exists(DOWNLOAD_PATH + "YT Downloader"):
print("Download directory does not exist, making one")
os.makedirs(DOWNLOAD_PATH + "YT Downloader")
global g_playlist_title
g_playlist_title = "YT Downloader"
download_video(uri, "YT Downloader")
if __name__ == "__main__":
main()
| true |
171a0c6304f6ae2c15c4b807936891ae75fcd326 | Python | jabbalaci/FuDep | /fd_miner_v3.py | UTF-8 | 19,126 | 3.203125 | 3 | [] | no_license | #!/usr/bin/env python3
"""
The TANE alg. It finds FDs in a dataset.
It finds minimal, non-redundant FDs.
It uses stripped partitions.
It uses the C+ set for RHS candidates.
This Python version is not very fast but it was very
useful for prototyping.
Author: Laszlo Szathmary, jabba.laci@gmail.com
Year: 2017
"""
import sys
from pathlib import Path
from pprint import pprint
RED = '\033[1;31m'
GREEN = '\033[0;32m'
NO_COLOR = '\033[0m'
#
OK = '{c}✔{nc}'.format(c=GREEN, nc=NO_COLOR)
NOT_OK = '{c}✘{nc}'.format(c=RED, nc=NO_COLOR)
class FileFormatException(Exception):
"""
If there is a problem with the input file.
"""
pass
class Columns:
"""
Represent the columns of the input dataset.
"""
def __init__(self, attr_names):
"""
It gets the attr. names in a list, e.g. ['a', 'b', 'c'].
Assign a unique ID to each attr.: [1, 2, 3].
Then assign the IDs and the names: {1: 'a', 2: 'b', 3: 'c'}.
"""
self.attr_names = attr_names
self.attr_ids = list(range(1, len(attr_names)+1))
self.set_attr_ids = set(self.attr_ids)
self.d = dict(zip(self.attr_ids, attr_names))
def to_str(self, attrs):
"""
attrs is a tuple of IDs, like (1, 2). It is converted back
to a string: "ab", since 1 represents "a", and 2 represents "b" (example).
"""
s = ""
for val in attrs:
s += self.d[val]
return s
def __str__(self):
"""
for debug
"""
return str(self.attr_names)
# endclass Columns
class Partitions:
"""
Represent the partitions of the attribute sets.
"""
def __init__(self, columns, table):
"""
Process the input dataset column by column and calculate
the partition of each attribute.
self.d is a dictionary where
- key: a tuple, which is the attribute set (using column IDs)
- value: a set of tuples, where a tuple is an equivalence class
self.error_value is a dictionary
e(X) is the minimum fraction of tuples to remove for X to be key
e(X) = 1 - |PI_x| / r
e(X) = (||PI'_x|| / |PI'_x|) / r
where ||PI'_x|| is the sum of sizes of eq. classes in PI'
and PI' is the stripped partition of PI
where r is the number of rows in the dataset
Since r is a constant, we don't store it. So for instance,
instead of 5/8, we simply store 5.
"""
self.columns = columns
self.table = table
self.d = {}
self.error_value = {}
for col in range(len(table[0])):
row_id = 0
partition_id = columns.attr_ids[col]
tmp = {}
for row in table:
row_id += 1
val = row[col]
if val not in tmp:
tmp[val] = []
tmp[val].append(row_id)
#
self.d[(partition_id,)] = set()
for li in tmp.values():
if len(li) > 1: # keep stripped partitions only
self.d[(partition_id,)].add(tuple(li))
#
for key, value in self.d.items():
self.error_value[key] = self.calculate_error_value(value)
#
# self.show()
def calculate_error_value(self, value):
"""
Calculate e(X).
Returns the numerator only, without the denominator,
since the denominator is always the same (the number of rows in the dataset).
"""
double_bar = sum(len(eq_class) for eq_class in value)
return double_bar - len(value)
def register(self, key, value):
"""
Register an attr. set (key) and its corresponding partition (value).
"""
self.d[key] = value
self.error_value[key] = self.calculate_error_value(value)
def show(self):
"""
for debug
"""
for key, value in self.d.items():
print("{k}: {v} [e: {e}]".format(k=key, v=value, e=self.error_value[key]))
# endclass Partitions
class Stripper:
"""
Calculate the product of two stripped partitions
"""
def __init__(self, num_rows):
self.T = [0] * (num_rows + 1)
self.S = []
for i in range(num_rows + 1):
self.S.append([])
def stripped_product(self, set_a, set_b):
T = self.T
S = self.S
cnt = 0
res = set()
for eq_class in set_a:
cnt += 1
for val in eq_class:
T[val] = cnt
#
for eq_class in set_b:
for val in eq_class:
if T[val]:
key = T[val]
S[key].append(val)
#
for val in eq_class:
if T[val]:
key = T[val]
if len(S[key]) > 1:
res.add(tuple(S[key]))
S[key].clear()
#
#
for eq_class in set_a:
for val in eq_class:
T[val] = 0
#
return res
# endclass Stripper
class Dataset:
"""
Represent the input dataset.
For the sake of simplicity, the maximal number of attributes is limited to 26
(it's the number of characters in the English alphabet).
"""
alphabet = "abcdefghijklmnopqrstuvwxyz"
def __init__(self, fname):
self.fname = fname
self.auto_attr_names = False
self.attr_names = None # will be set later
self.table = [] # a matrix, representing the dataset
self.read_dataset(fname)
self.columns = Columns(self.attr_names)
self.partitions = Partitions(self.columns, self.table)
self.num_columns = len(self.table[0])
self.num_rows = len(self.table)
self.table = [] # clear it, it's not needed anymore (free memory)
self.valid_rules = ValidRules()
self.stripper = Stripper(self.num_rows)
def parse_line(self, line):
"""
Parse a line and return the attr. values in a list.
"""
return [w.strip() for w in line.split(',')]
def read_column_names(self, fpath):
with fpath.open() as f:
line = f.readline().rstrip("\n")
return self.parse_line(line)
def read_dataset(self, fname):
"""
Read the dataset in, which must be a CSV file.
If you want to specify the names of the attributes, provide
a file next to the csv file with the extension .cols , which
contains just one line, the name of the attributes, separated by a comma.
"""
auto_cols = True
csv = Path(fname)
cols = Path(csv.parent, csv.stem + ".cols")
if cols.is_file():
auto_cols = False
self.attr_names = self.read_column_names(cols)
#
with csv.open() as f:
for line in f:
line = line.rstrip("\n")
if not line or line.startswith('#'):
continue
self.table.append(self.parse_line(line))
# endwith
if auto_cols:
length = len(self.table[0])
if length > len(Dataset.alphabet):
raise FileFormatException("Error: the dataset may have max. {n} attributes!".format(len(Dataset.alphabet)))
self.attr_names = list(Dataset.alphabet[:length])
#
self.verify_dataset()
def verify_dataset(self):
"""
Check if there's any problem with the dataset (e.g. a row has more or less
attr. values then the other rows).
"""
length = len(self.attr_names)
cnt = 0
for row in self.table:
cnt += 1
if len(row) != length:
raise FileFormatException("Error: number of attributes in row {n} should be {m}!".format(n=cnt, m=length))
#
if len(self.attr_names) != len(set(self.attr_names)):
raise FileFormatException("Error: the attribute names must be unique (no duplicates)!")
def show(self):
"""
Visualizing the dataset.
"""
print(self.columns)
print("-" * 20)
for row in self.table:
print(row)
# endclass Dataset
class ValidRules:
"""
Collection of valid rules.
"""
def __init__(self):
self.rules = []
def add_rule(self, rule):
"""
Add a (valid) rule to the list.
"""
self.rules.append(rule)
def show(self):
"""
Display all the valid rules and print some statistics.
"""
print("Minimal, non-redundant FDs:")
print()
for rule in self.rules:
print(rule)
print()
print("Number of minimal, non-redundant FDs:", len(self.rules))
# endclass ValidRules
class Rule:
"""
Representing a functional dependency (FD). I simply call it a rule
since it has the following form (example): ab -> c.
"""
def __init__(self, lhs, rhs, db): # lhs and rhs are tuples
"""
A rule has a left-hand side (tuple) and a right-hand-side (also tuple).
The tuple contains column IDs. By default, the rule is not valid.
If it's still valid, we'll set that later.
"""
self.lhs = lhs
self.rhs = rhs
self.db = db
self.valid = False # Does this FD hold? If yes, it'll be set later.
def __repr__(self):
"""
Visualizing a rule (LHS, RHS, is it valid?).
"""
left = self.db.columns.to_str(self.lhs)
right = self.db.columns.to_str(self.rhs)
return "{0} -> {1} ({2})".format(left,
right,
OK if self.valid else NOT_OK)
# endclass Rule
class Row:
"""
We use a levelwise approach. When working with a level, we have
rows in it. This class represents such a row.
"""
def __init__(self, attrs, level): # attrs is a tuple
"""
What does a row contain?
- an attr. set, e.g. xy (actually, we store the column IDs)
- partition of the attr. set
- list of rules generated from the attr. set
"""
self.attrs = attrs
self.db = level.db
self.level = level
self.partition = None # will be set later
self.c_plus = set() # C+ value, represented as a set
if len(attrs) == 1:
self.partition = self.db.partitions.d[attrs]
#
self.rules = [] # will be set later
def compute_c_plus(self):
"""
C+ set for RHS candidates.
"""
attrs = self.attrs
prev_level = self.level.prev_level
if len(attrs) in [1, 2]: # 1 or 2
self.c_plus = set(self.db.columns.set_attr_ids) # we make a copy of it (not a reference!)
else:
res = None
for i in range(len(attrs)):
sub = attrs[:i] + attrs[i+1:]
if res is None:
res = prev_level.c_plus_dict[sub]
continue
#
res = res.intersection(prev_level.c_plus_dict[sub])
#
self.c_plus = res
def is_rule_valid(self, rule):
"""
Is the given rule valid?
It's based on Lemma 3 of the TANE research paper (page 7).
"""
part_a = self.db.partitions.error_value[rule.lhs]
part_b = self.db.partitions.error_value[self.attrs]
return part_a == part_b
def set_partition(self, row_a, row_b):
"""
The current row's attr. set is generated by joining row_a and row_b.
To get the partition of the current row, it's enough to join the
partitions of row_a and row_b.
When ready, we also register it in the Partitions class because we'll need
it later.
"""
set_a = row_a.partition
set_b = row_b.partition
#
res = self.db.stripper.stripped_product(set_a, set_b)
#
self.partition = res
# register it in class Partitions too
self.db.partitions.register(self.attrs, res)
def generate_rules(self):
"""
generate the rules (and check which rules are valid)
"""
if len(self.attrs) > 1:
rhs_candidate_set = set(self.attrs).intersection(self.c_plus)
for i in range(len(self.attrs)):
attr = self.attrs[i]
if attr in rhs_candidate_set:
li = list(self.attrs)
rhs = attr
del li[i]
rule = Rule(tuple(li), (rhs,), self.db)
rule.valid = self.is_rule_valid(rule)
if rule.valid:
self.db.valid_rules.add_rule(rule)
self.c_plus.remove(rhs)
r_minus_x = self.db.columns.set_attr_ids.difference(set(self.attrs))
for attr in r_minus_x:
try:
self.c_plus.remove(attr)
except:
pass # no error if we want to remove an element that is not present in the set
#
#
self.rules.append(rule)
@classmethod
def join(cls, row_a, row_b, level):
"""
Apriori's join method. If they are joinable, return a new Row
object, otherwise return None.
"""
length = len(row_a.attrs)
pre_a = row_a.attrs[:length-1]
pre_b = row_b.attrs[:length-1]
if pre_a == pre_b:
return Row(row_a.attrs + row_b.attrs[-1:], level)
#
return None
def __str__(self):
"""
Visualizing the row.
"""
attrs = self.db.columns.to_str(self.attrs)
# show_partition = True
show_partition = False
show_c_plus = False
return "{a} {p}{cp} {r}".format(a=attrs,
p=self.partition if show_partition else "",
cp=self.c_plus if show_c_plus else "",
r=self.rules)
# endclass Row
class Level:
"""
Represents a level during the levelwise exploration.
"""
def __init__(self, length, db, prev_level):
"""
length: length of the attr. sets on this level
prev_level: If it's level i, then prev_level is level (i-1)
It contains a list of Row objects.
If this is level 1, then initialize it with the attributes of the dataset.
If it's level 2, 3, ..., then use the previous level to generate the attr. sets.
"""
if prev_level:
prev_level.prune()
#
self.length = length
self.db = db
self.prev_level = prev_level
self.rows = []
self.set_of_itemsets = set()
self.c_plus_dict = {} # dictionary, where key: itemset, value: C+ value of the itemset
if length == 1:
self.initialize_level_1()
else:
self.create_level_from_prev_level()
#
for row in self.rows:
self.c_plus_dict[row.attrs] = row.c_plus
def prune(self):
"""
Remove rows whose C+ value is empty.
"""
self.rows = [row for row in self.rows if len(row.c_plus) > 0]
def initialize_level_1(self):
"""
Add the attributes to the level (1-long attr. sets).
"""
for attr_id in self.db.columns.attr_ids:
row = Row((attr_id,), self)
row.compute_c_plus()
self.rows.append(row)
self.set_of_itemsets.add((attr_id,))
def create_level_from_prev_level(self):
"""
Using the idea of Apriori's join, populate the rows using the
attr. sets of the previous level.
Rows in prev_level whose C+ value was empty have already been pruned.
"""
length = len(self.prev_level.rows)
for i in range(0, length-1):
row_i = self.prev_level.rows[i]
# if not row_i.c_plus: # if it's an empty set: skip it
# continue
for j in range(i+1, length):
row_j = self.prev_level.rows[j]
# if not row_j.c_plus: # if it's an empty set: skip it
# continue
new_row = Row.join(row_i, row_j, self)
if new_row:
itemset = new_row.attrs
if self.prev_level.contains_all_subsets_of(itemset):
new_row.compute_c_plus()
new_row.set_partition(row_i, row_j)
new_row.generate_rules()
self.rows.append(new_row)
self.set_of_itemsets.add(itemset)
else:
break
def contains_all_subsets_of(self, t):
"""
t is a tuple representing an itemset (or attribute set)
Generate the 1-size smaller subsets of t and check if the table
contains all these subsets.
Example: t = abc. Generate ab, ac, and bc, and check if they are
in the table.
Level 1 table contains all the attributes.
Level 2 table contains 2-long attr. sets that are generated from the 1-long attributes.
No need to check the tables at Level 1 and 2.
Start this pruning process with Level 3.
"""
if len(t) >= 3:
for i in range(len(t)):
sub = t[:i] + t[i+1:]
if sub not in self.set_of_itemsets:
return False
#
#
#
return True
def show(self):
"""
Visualizing the level.
"""
print("-" * 30)
print("Level", self.length)
for row in self.rows:
print(row)
def is_empty(self):
"""
Empty if it has no rows.
"""
return len(self.rows) == 0
# endclass Level
def levelwise_exploration(db):
"""
Start at level 1, then create level 2 from level 1, etc.
When the current level is empty: STOP!
"""
level = Level(1, db, None)
level.show()
# return # debug, show Level 1 only
i = 1
while True:
level = Level(i+1, db, level)
i += 1
if level.is_empty():
break
level.show()
#
print("=" * 30)
def main():
"""
Read the input dataset then start the levelwise exploration.
"""
try:
fname = sys.argv[1]
except IndexError:
print("Error: provide an input file!", file=sys.stderr)
exit(1)
#
db = Dataset(fname)
# db.show()
levelwise_exploration(db)
db.valid_rules.show()
# print('-' * 40)
# db.valid_rules.show_minimal_rules()
##############################################################################
if __name__ == "__main__":
main()
| true |
1ddae89b6ed05b2e0759fd8c5849a5f85b191832 | Python | Rifat951/PythonExercises | /Dictionary/ex6.3.py | UTF-8 | 1,015 | 4.71875 | 5 | [] | no_license | # 6-3. Glossary: A Python dictionary can be used to model an actual dictionary
# However, to avoid confusion, let’s call it a glossary
# • Think of five programming words you’ve learned about in the previous
# chapters Use these words as the keys in your glossary, and store their
# meanings as values
# • Print each word and its meaning as neatly formatted output You might
# print the word followed by a colon and then its meaning, or print the word
# on one line and then print its meaning indented on a second line Use the
# newline character (\n) to insert a blank line between each word-meaning
# pair in your output
print("\n\n")
print("####################")
print("\n\n")
glossary = {
'If-else': "represents conditional statements",
'loop': "write the statement without copy and pasting it everytime... tadaaa!"
}
for user_key, user_val in glossary.items():
print("{} ".format(user_key))
# print("\n")
print("{}".format(user_val))
print("\n")
| true |
478e3bd51d6161deee83313a0dcd8fcd59f1dd39 | Python | poralo/food-crops | /Lunch.py | UTF-8 | 1,533 | 2.8125 | 3 | [] | no_license | import argparse
from FoodCropsDataset import FoodCropsDataset
from FoodCropFactory import FoodCropFactory
from CommodityGroup import CommodityGroup
from IndicatorGroup import IndicatorGroup
from Unit import Unit
class Lunch :
# Chargement du jeu de données 'FeedGrainsLight'
f = FoodCropFactory()
fcd = FoodCropsDataset(f)
fcd.load("FeedGrainsLight.csv")
# CommodityGroup.SORGHUM, IndicatorGroup.PRICES
# Arguments à donner au script pour son lancement (optionnels)
parser=argparse.ArgumentParser(description='Tri à appliquer sur les données food-crops')
parser.add_argument("-cg","--CommodityGroup", type=str, nargs='?')
parser.add_argument("-ig","--IndicatorGroup", type=str, nargs='?')
parser.add_argument("-gl","--geographicalLocation", type=str, nargs='?')
parser.add_argument("-u","--Unit", type=str, nargs='?')
args, unknown = parser.parse_known_args() #gestion des arguments vide
# Gestion des énumérations
cg = None
ig = None
if args.CommodityGroup :
try :
cg = CommodityGroup[args.CommodityGroup]
except :
cg = "Unknown CommodityGroup"
if args.IndicatorGroup :
try :
ig = IndicatorGroup[args.IndicatorGroup]
except :
ig = "Unknown IndicatorGroup"
# Affichage de la liste des mesures
liste = fcd.findMeasurements(cg,ig,args.geographicalLocation,args.Unit)
for n in liste :
print(n.describe())
| true |
ee1059d9feaf0786da2eaac39f41ea30bd0d8347 | Python | camjhirsh/compassion.ai | /app.py | UTF-8 | 1,057 | 2.78125 | 3 | [] | no_license | ### Streamlit App ###
# Imports
import streamlit as st
from pages import home
from pages import survey
from pages import results
from pages import about
# Layout Style
st.set_page_config(page_title="compassion.ai", page_icon=":heart:", initial_sidebar_state='auto')
# Sidebar
# Image and Header
from PIL import Image
image = Image.open('images/logo_2.png').resize(size=(100, 100))
c1, c2, c3 = st.beta_columns(3)
with c2:
st.sidebar.image(image) # caption="We Are compassion.ai :)")
# Navigation Pane
st.sidebar.title("Navigation")
navigation = st.sidebar.radio(label="Go To:", options=['Home', 'Survey', 'Results'])
# Contact Pane
st.sidebar.title("Contact Us")
sign = st.sidebar.button("Sign In")
about_us = st.sidebar.button("About Us")
# Run All Pages
if about_us:
about.display_about()
if navigation == 'Home' and not about_us:
home.home_page()
elif navigation == 'Survey' and not about_us:
survey.generate_survey()
elif navigation == 'Results' and not about_us:
results.display_results()
| true |
996029749e4674bbb6541d3ddf680a88c077471a | Python | jagadeshwarrao/programming | /whatdistance.py | UTF-8 | 126 | 2.765625 | 3 | [] | no_license | def solve(x1,y1,x2,y2):
# write your program from here
d=float(math.sqrt((pow(x2-x1,2)+pow(y2-y1,2))))
return d | true |
364b24159fe3ad58149d5bc6468da5e6ba69aa64 | Python | ElenaTest-dev/test-repository | /Task 19/product_page.py | UTF-8 | 1,286 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | import pytest
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
import time
class ProductPage:
def __init__(self, driver):
self.driver = driver
self.wait = WebDriverWait(driver, 15)
def get_duck_size_values_from_list(self):
element = self.wait.until(EC.element_to_be_clickable((By.NAME, "add_cart_product")))
self.selects = self.driver.find_elements_by_css_selector("#box-product tr:nth-child(1) select")
return self
def choose_duck_size_from_list(self):
self.driver.find_element_by_css_selector("#box-product tr:nth-child(1) select").click()
select1 = Select(self.driver.find_element_by_css_selector("#box-product tr:nth-child(1) select"))
select1.select_by_visible_text("Small")
return self
def add_to_cart_product_and_check_quantity(self, quantity):
self.driver.find_element_by_name("add_cart_product").click()
element = self.wait.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR, "span.quantity"), str(quantity)))
return self
| true |
99468e5c248925378f03b93707e3063fd29b0a2f | Python | adit-gajjar/AdventOfCode2015 | /day3/day3_pt2.py | UTF-8 | 722 | 3.203125 | 3 | [] | no_license | directions = open('input.txt', 'r').read()
visited_houses = {'0,0'}
# [x, y] coordinates, santa and robo santa position
current_positions = [[0, 0], [0, 0]]
santas_turn = True
for direction in directions:
current_position = current_positions[0] if santas_turn else current_positions[1]
if direction == '^':
current_position[1] += 1
elif direction == 'v':
current_position[1] -= 1
elif direction == '>':
current_position[0] += 1
elif direction == '<':
current_position[0] -= 1
else:
print("invalid direction")
visited_houses.add("{x},{y}".format(x=current_position[0], y=current_position[1]))
santas_turn = not santas_turn
print(len(visited_houses))
| true |
333adf5e2bd08600c1f9acd7782b988a2a1634c7 | Python | CS7591/Python-Classes | /2. Python Intermediate/4. Classes/6. OOP part 6.py | UTF-8 | 1,668 | 4.4375 | 4 | [] | no_license | # Sixth - Class / Static Methods -------------------------------------------------------------------------------------
class Employee:
raise_amount = 1.04
number_of_employees = 0
def __init__(self, first, last, salary):
self.first_name = first
self.last_name = last
self.salary = salary
self.email = f'{first.lower()}_{last.lower()}@company.com'
Employee.number_of_employees += 1
def full_name(self):
print(f'Employee full name: {self.first_name} {self.last_name}')
def apply_raise(self):
self.salary = self.salary * Employee.raise_amount
# self.salary = self.salary * self.raise_amount
# Class method affects all instances at once
@classmethod
def set_new_raise(cls, amount):
cls.raise_amount = amount
@staticmethod
def is_even(number):
return not number % 2
# Main Program ---------------------------------------------------------------------------------------------------------
# Instances of the class Employee
employee_1 = Employee('Paul', 'Newman', 10000)
employee_2 = Employee('Rod', 'Stewart', 20000)
# Class attribute (variable)
print(Employee.raise_amount)
print(employee_1.raise_amount)
print(employee_2.raise_amount)
# Access class method
Employee.set_new_raise(1.10)
print(Employee.raise_amount)
print(employee_1.raise_amount)
print(employee_2.raise_amount)
# employee_1.raise_amount = 1.20
# print(employee_1.raise_amount)
# Employee.set_new_raise(1.10)
# print(Employee.raise_amount)
# print(employee_1.raise_amount)
# print(employee_2.raise_amount)
print(Employee.is_even(2))
print(employee_1.is_even(3))
print(employee_2.is_even(4)) | true |
c58adca2dcca8b74a3b096b82dbd7199a0e1bbd8 | Python | justintuduong/CodingDojo-Python | /Python/OOP/bank_account.py | UTF-8 | 1,188 | 4.03125 | 4 | [] | no_license | class BankAccount:
def __init__(self, int_rate, balance):
self.int_rate = int_rate
self.balance = balance
def deposit(self, amount):
self.balance += amount
print(f'Your current balance is: $ {self.balance}')
return self
def withdraw(self, amount):
if self.balance - amount < 0:
print("Insufficient funds: Charging a $5 fee")
self.balance - 5
self.balance -= amount
print(f'Your current balance is: $ {self.balance}')
return self
def display_account_info(self):
print(f'Your current balance is: $ {self.balance}')
return self
def yield_interest(self):
int = 0
if self.balance > 0:
int = self.balance * self.int_rate
print(f'Interest accrued will be of $ {int}')
return self
justin = BankAccount(1/100, 1000)
jack = BankAccount(2/100, 500)
print(justin.deposit(50).deposit(60).deposit(70).withdraw(100).yield_interest().display_account_info())
print(jack.deposit(100).deposit(2000).deposit(3000).withdraw(200).withdraw(200).withdraw(200).withdraw(200).yield_interest().display_account_info())
| true |
9077e3d2113d8b3a18e67454b9124ba9fa155077 | Python | a-toms/evo_game | /core/tests/species_test.py | UTF-8 | 1,495 | 3.40625 | 3 | [] | no_license | import unittest
from core.species import Species
class TestSpecies(unittest.TestCase):
def setUp(self):
self.species = Species()
def test_is_hungry__returns_true_when_hungry(self):
self.species.population = 2
self.species.food_eaten = 1
self.assertEqual(
True,
self.species.is_hungry
)
def test_is_hungry__returns_false_when_not_hungry(self):
self.species.population = 2
self.species.food_eaten = 2
self.assertEqual(
False,
self.species.is_hungry
)
def test_eat_food(self):
self.assertEqual(
0,
self.species.food_eaten
)
self.species.eat_food(4)
self.assertEqual(
4,
self.species.food_eaten
)
def test_update_species_population_and_food_eaten_after_feeding(self):
self.species.population = 4
self.species.food_eaten = 2
self.species.update_species_population_and_food_eaten_after_feeding()
self.assertEqual(
[2, 0],
[self.species.population, self.species.food_eaten]
)
def test_str(self):
self.species.population = 2
self.species.body_size = 3
self.species.food_eaten = 2
self.species.traits.add('Trait 1')
self.assertEqual(
'Species: Pop 2 - Body size 3 - Food eaten 2 - Traits {\'Trait 1\'}',
str(self.species)
)
| true |
7c360680b0ade2bdd83608393d42cb462c8f998e | Python | henryeherman/elixys | /server/config/ExportDatabase.py | UTF-8 | 3,395 | 2.75 | 3 | [] | no_license | """ExportDatabase
Exports the contents of the database to the specified directory
"""
import os
import json
import sys
sys.path.append("/opt/elixys/database/")
sys.path.append("/opt/elixys/core/")
from DBComm import DBComm
from SequenceManager import SequenceManager
if __name__ == '__main__':
# Make sure we have a command line argument
if len(sys.argv) != 2:
print "Target direcory argument not found. Usage:"
print " python ExportDatabase.py [targetdirectory]"
print ""
exit()
# Create the database layer and sequence manager
pDBComm = DBComm()
pDBComm.Connect()
pSequenceManager = SequenceManager(pDBComm)
# Create the directory if it doesn't exist
sTargetDirectory = sys.argv[1]
if not os.path.isdir(sTargetDirectory):
print "Target direcory \"" + sTargetDirectory + "\" not found, creating..."
os.mkdir(sTargetDirectory)
# Create the database object
pDatabase = {}
pDatabase["type"] = "database"
pDatabase["roles"] = []
pDatabase["users"] = []
pDatabase["savedsequences"] = []
pDatabase["runhistory"] = []
# Add the user roles
print "Exporting roles..."
pRoles = pDBComm.GetAllRoles("System")
for pRole in pRoles:
pDatabase["roles"].append({"type":"role",
"name":pRole["name"],
"flags":pRole["flags"]})
# Add the users
print "Exporting users..."
pUsers = pDBComm.GetAllUsers("System")
for pUser in pUsers:
sPasswordHash = pDBComm.GetUserPasswordHash("System", pUser["username"])
pDatabase["users"].append({"type":"user",
"username":pUser["username"],
"firstname":pUser["firstname"],
"lastname":pUser["lastname"],
"passwordhash":sPasswordHash,
"role":pUser["accesslevel"],
"email":pUser["email"],
"phone":pUser["phone"],
"messagelevel":pUser["messagelevel"]})
# Add the saved sequences
print "Exporting saved sequences..."
pSequences = pDBComm.GetAllSequences("System", "Saved")
nCount = 1
for pSequence in pSequences:
# Format the filename
sFilenameShort = "SavedSequence" + str(nCount) + ".seq"
sFilenameLong = os.path.join(sTargetDirectory, sFilenameShort)
pDatabase["savedsequences"].append({"type":"sequence",
"filename":sFilenameShort})
nCount += 1
# Export the sequence
pSequenceManager.ExportSequence("System", pSequence["id"], sFilenameLong)
# Add the run history
print "Exporting run history..."
pSequences = pDBComm.GetAllSequences("System", "History")
nCount = 1
for pSequence in pSequences:
# Format the filename
sFilenameShort = "RunHistory" + str(nCount) + ".seq"
sFilenameLong = os.path.join(sTargetDirectory, sFilenameShort)
pDatabase["runhistory"].append({"type":"sequence",
"filename":sFilenameShort})
nCount += 1
# Export the sequence
pSequenceManager.ExportSequence("System", pSequence["id"], sFilenameLong)
# Save the database file
sDatabaseFile = os.path.join(sTargetDirectory, "Database.dat")
pDatabaseFile = open(sDatabaseFile, "w")
sDatabaseJSON = json.dumps(pDatabase)
pDatabaseFile.write(sDatabaseJSON)
# Complete
pDBComm.Disconnect()
print "Done"
| true |
b5195b6fd6665c7952aff13107a3e76a49e9ebfa | Python | hmumick/FinalProject18 | /PygameVisuals (File With the Most Updated Pygame Code, Using Git Correctly).py | UTF-8 | 5,848 | 2.890625 | 3 | [] | no_license | import pygame
pygame.init()
display_width= 880
display_height= 600
black= (0,0,0)
white= (255,255,255)
red= (255,0,0)
yellow= (255,209,0)
green= (0,255,0)
darkblue= (0,0,255)
lightblue= (0,255,255)
purple= (189,0,255)
lightgreen= (0,255,158)
orange= (255,162,0)
brown= (129,90,6)
gameDisplay = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption('Magnet Monopoly')
BackgroundIMG = pygame.image.load("monopolyboard.png")
gameDisplay.blit(BackgroundIMG, [0,0])
hatIMG = pygame.image.load("hat.png")
carIMG = pygame.image.load("car.png")
Start = (750, 520)
gameDisplay.blit(hatIMG, (Start))
gameDisplay.blit(carIMG, (Start))
PintoCar = (650, 520)
#gameDisplay.blit(carIMG, (PintoCar))
Chance1Car = (580, 520)
#gameDisplay.blit(carIMG, (Chance1Car))
ArnoldCar = (510, 520)
#gameDisplay.blit(carIMG, (ArnoldCar))
Tax1Car = (440, 520)
#gameDisplay.blit(carIMG, (Tax1Car))
MakerStationCar = (370, 520)
#gameDisplay.blit(carIMG, (MakerStationCar))
RaiteCar = (300, 520)
#gameDisplay.blit(carIMG, (RaiteCar))
Chance2Car = (230, 520)
#gameDisplay.blit(carIMG, (Chance2Car))
GuptaCar = (160, 520)
#gameDisplay.blit(carIMG, (GuptaCar))
WickCar = (90, 520)
#gameDisplay.blit(carIMG, (WickCar))
LOPCar = (0, 520)
#gameDisplay.blit(carIMG, (LOPCar))
TenanbaumCar = (0, 440)
#gameDisplay.blit(carIMG, (TenanbaumCar))
Chance3Car = (0, 390)
#gameDisplay.blit(carIMG, (Chance3Car))
NowakoskiCar = (0, 340)
#gameDisplay.blit(carIMG, (NowakoskiCar))
McMenaminCar = (0, 290)
#gameDisplay.blit(carIMG, (McMenaminCar))
GymCar = (0, 240)
#gameDisplay.blit(carIMG, (GymCar))
MansfieldCar = (0,190)
#gameDisplay.blit(carIMG, (MansfieldCar))
Chance4Car = (0,140)
#gameDisplay.blit(carIMG, (Chance4Car))
ValverdeCar = (0,90)
#gameDisplay.blit(carIMG, (ValverdeCar))
MejiaCar = (0,40)
#gameDisplay.blit(carIMG, (MejiaCar))
FreeParkingCar = (0,0)
#gameDisplay.blit(carIMG, (FreeParkingCar))
SansiCar = (90,0)
#gameDisplay.blit(carIMG, (SansiCar))
ValleyCar = (160,0)
#gameDisplay.blit(carIMG, (ValleyCar))
Chance5Car = (230, 0)
#gameDisplay.blit(carIMG, (Chance5Car))
DraeselCar = (300,0)
#gameDisplay.blit(carIMG, (DraeselCar))
LabCar = (370,0)
#gameDisplay.blit(carIMG, (LabCar))
WeisserCar = (440, 0)
#gameDisplay.blit(carIMG, (WeisserCar))
FangCar = (510,0)
#gameDisplay.blit(carIMG, (FangCar))
Chance6Car = (580,0)
#gameDisplay.blit(carIMG, (Chance6Car))
GersteinCar = (650,0)
#gameDisplay.blit(carIMG, (GersteinCar))
GoToLopCar = (750, 0)
#gameDisplay.blit(carIMG, (GoToLOPCar))
OConnorCar = (750, 40)
#gameDisplay.blit(carIMG, (OConnorCar))
LiuCar = (750, 90)
#gameDisplay.blit(carIMG, (LiuCar))
Chance7Car = (750, 140)
#gameDisplay.blit(carIMG, (Chance7Car))
MoskowitzCar = (750, 190)
#gameDisplay.blit(carIMG, (MoskowitzCar))
AuditStationCar = (750, 240)
#gameDisplay.blit(carIMG,(AuditStationCar))
Chance8Car = (750, 290)
#gameDisplay.blit(carIMG, (Chance8Car))
GuidanceCar = (750, 340)
#gameDisplay.blit(carIMG, (GuidanceCar))
Tax2Car = (750, 390)
#gameDisplay.blit(carIMG, (Tax2Car))
RafalowskiCar = (750, 440)
#gameDisplay.blit(carIMG, (RafalowskiCar))
PintoHat = (670, 520)
#gameDisplay.blit(hatIMG, (PintoHat))
Chance1Hat = (600, 520)
#gameDisplay.blit(hatIMG, (Chance1Hat))
ArnoldHat = (530, 520)
#gameDisplay.blit(hatIMG, (ArnoldHat))
Tax1Hat = (460, 520)
#gameDisplay.blit(hatIMG, (Tax1Hat))
MakerStationHat = (390, 520)
#gameDisplay.blit(hatIMG, (MakerStationHat))
RaiteHat = (320, 520)
#gameDisplay.blit(hatIMG, (Chance1Hat))
Chance2Hat = (250, 520)
#gameDisplay.blit(hatIMG, (Chance2Hat))
GuptaHat = (180, 520)
#gameDisplay.blit(hatIMG, (GuptaHat))
WickHat = (110, 520)
#gameDisplay.blit(hatIMG, (WickHat))
LOPHat = (0, 520)
#gameDisplay.blit(hatIMG, (LOPHat))
TenanbaumHat = (0, 455)
#gameDisplay.blit(hatIMG, (TenanbaumHat))
Chance3Hat = (0, 410)
#gameDisplay.blit(hatIMG, (Chance3Hat))
NowakoskiHat = (0, 365)
#gameDisplay.blit(hatIMG, (NowakoskiHat))
McMenaminHat = (0, 320)
#gameDisplay.blit(hatIMG, (McMenaminHat))
GymStationHat = (0,275)
#gameDisplay.blit(hatIMG, (GymStationHat))
MansfieldHat = (0, 225)
#gameDisplay.blit(hatIMG, (MansfieldHat))
Chance4Hat = (0, 180)
#gameDisplay.blit(hatIMG, (Chance4Hat))
ValverdeHat = (0,135)
#gameDisplay.blit(hatIMG, (ValverdeHat))
MejiaHat = (0, 90)
#gameDisplay.blit(hatIMG, (MejiaHat))
FreeParkingHat = (0, 0)
#gameDisplay.blit(hatIMG, (FreeParkingHat))
SansiHat = (110, 0)
#gameDisplay.blit(hatIMG, (SansiHat))
ValleyHat = (180, 0)
#gameDisplay.blit(hatIMG, (ValleyHat))
Chance5Hat = (250, 0)
#gameDisplay.blit(hatIMG, (Chance5Hat))
DraeselHat = (320, 0)
#gameDisplay.blit(hatIMG, (DraeselHat))
LabHat = (390, 0)
#gameDisplay.blit(hatIMG, (LabHat))
WeisserHat = (460, 0)
#gameDisplay.blit(hatIMG, (WeisserHat))
FangHat = (530, 0)
#gameDisplay.blit(hatIMG, (FangHat))
Chance6Hat = (600,0)
#gameDisplay.blit(hatIMG, (Chance6Hat))
GersteinHat = (670, 0)
#gameDisplay.blit(hatIMG, (GersteinHat))
GoToLOPHat = (750, 0)
#gameDisplay.blit(hatIMG, (GoToLOPHat))
OConnorHat = (750, 90)
#gameDisplay.blit(hatIMG, (OConnorHat))
LiuHat = (750, 135)
#gameDisplay.blit(hatIMG, (LiuHat))
Chance7Hat = (750, 180)
#gameDisplay.blit(hatIMG, (Chance7Hat))
MoskowitzHat = (750, 225)
#gameDisplay.blit(hatIMG, (MoskowitzHat))
AuditStationHat = (750, 275)
#gameDisplay.blit(hatIMG, (AuditStationHat))
Chance8Hat = (750, 320)
#gameDisplay.blit(hatIMG, (Chance8Hat))
GuidanceHat = (750, 365)
#gameDisplay.blit(hatIMG, (Chance3Hat))
Tax2Hat = (750, 410)
#gameDisplay.blit(hatIMG, (Tax2Hat))
RafalowskiHat = (750, 455)
#gameDisplay.blit(hatIMG, (RafalowskiHat))
clock= pygame.time.Clock
crashed = False
while not crashed:
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
print(event)
pygame.display.update()
#clock.tick(60)
pygame.quit()
| true |
efc9d0c53d1cbbfb64c9e7691e6007a7f39c0852 | Python | gsurma/twitter_data_parser | /twitter_account_data_parser.py | UTF-8 | 2,579 | 2.53125 | 3 | [
"MIT"
] | permissive | import tweepy
import csv
import os.path
import config
from helpers import Helpers
class Main:
auth = tweepy.OAuthHandler(config.consumer_key, config.consumer_secret)
auth.set_access_token(config.access_key, config.access_secret)
api = tweepy.API(auth)
account_data_requests = 0
def main_loop(self):
list_of_users = Helpers.get_list_of_users(file_path=config.users_file)
for user in list_of_users:
print "Getting data for " + user
self.account_data_requests = Helpers.get_data_for_user_and_sleep_if_necessary(request_counter=self.account_data_requests,
request_limit=config.account_data_request_quota,
method=Main.get_account_data_for_user(self.api, user))
self.save_account_data_for_all_users()
@staticmethod
def get_account_data_for_user(api, user):
try:
account_data = api.get_user(screen_name=user)
Main.prepare_and_save_account_data_for_user(account_data, user)
except Exception, e:
print e
pass
return 1
@staticmethod
def prepare_and_save_account_data_for_user(data, user):
prepared_data = [data.friends_count,
data.followers_count,
data.favourites_count,
data.geo_enabled, data.listed_count,
data.verified, data.created_at,
data.location, data.id,
data.name]
Helpers.save_data_to_file(data=prepared_data,
file_path=Helpers.get_file_path_for_user(user=user,
sufix=config.account_data_name),
headers=config.account_data_headers,
mode=config.write_mode,
single=True)
def save_account_data_for_all_users(self):
account_data = []
for root, dirs, files in os.walk(config.output_folder):
for file in files:
if not (file.startswith('.')):
folder_name = file.split(config.account_data_name, 1)[0][:-1]
file_name = config.output_folder + "/" + folder_name + "/" + file
if os.path.isfile(file_name):
with open(file_name, config.read_or_write_mode) as f:
reader = csv.reader(f)
for index, item in enumerate(reader):
if index == 1:
account_data.append(item)
self.save_account_data_to_file(file_path=config.account_data_file,
data=account_data)
def save_account_data_to_file(self, file_path, data):
print "Saving " + str(len(data)) + " entries to " + file_path
Helpers.save_data_to_file(data=data,
file_path=file_path,
headers=config.account_data_headers,
mode=config.write_mode,
single=False)
if __name__ == '__main__':
Main().main_loop() | true |
163b00199621162a6779dda099cfe6f60a08a16f | Python | costagguilherme/python-desafios | /exercicios/condições/desafio44.py | UTF-8 | 850 | 3.84375 | 4 | [
"MIT"
] | permissive | p = float(input('INFORME O VALOR DO PRODUTO R$'))
a = p - (0.1 * p)
b = p - (0.05 * p)
pcomjuros = p + ( p * 0.2)
print('Qual a opção de pagamento ?')
print(''' OPÇÕES DE PAGAMENTO
[ 1 ] Á vista em dinheiro/cheque.
[ 2 ] Á vista no cartão.
[ 3 ] Em até 2x no cartão.
[ 4 ] 3x ou mais no cartão. ''')
e = int(input('Qual a sua opção?'))
if e == 1:
print(f'O valor do produto ficará R${a} ')
elif e == 2:
print(f'O valor do produtado ficará R${b}')
elif e == 3:
print(f'O preço do produto se manterá em R${p}')
elif e == 4:
vezes = int(input('Em quantas vezes você quer parcelar? '))
print(f'O preço do produto será R${pcomjuros}')
prest = pcomjuros / vezes
print(f'As prestações serão no valor de R${prest} por mês em {vezes} meses')
else:
print('OPÇÃO INVALIDA DE PAGAMENTO, TENTE NOVAMENTE!') | true |
0dce3e5b824e1050c88108cfcd3bfad1c49929fb | Python | fufuninja/GIS-Borderize | /geocoding.py | UTF-8 | 294 | 3.015625 | 3 | [] | no_license | import csv
from geopy.geocoders import Nominatim
f=open('latlong2.csv')
csv_f=csv.reader(f)
address=[]
for row in csv_f:
address.append(row[0])
del address[0]
geolocator=Nominatim()
for x in range (18,54):
location = geolocator.reverse(address[x], timeout=200)
print(location.address)
| true |
9068fa447ad37753addb54b68bf7efbc97932e7f | Python | BNU-LeiWen/Input-output | /compute_change.py | UTF-8 | 3,366 | 2.671875 | 3 | [] | no_license | import os
import pickle
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict, Counter
from utils.data import min_max_norm_for_vec, load_industry_infos
def load_data(path):
with open(path, 'r') as f:
data = pickle.load(f)
arr = data['ss']
f = data["origin_data"]
total_output = f[f.shape[0]-1, 0:33]
total_output = total_output / np.sum(total_output)
new_arr = min_max_norm_for_vec(arr)
return new_arr, total_output
def define_change(arr1, arr2, weights):
assert arr1.shape == arr2.shape
# define change
result = np.zeros((len(arr1),5))
for i in range(len(arr1)):
result[i, 0] = arr1[i, 0] - arr2[i, 0]
result[i, 1] = arr1[i, 1] - arr2[i, 1]
result[i, 2] = np.square(arr1[i, 0] - arr2[i, 0])
result[i, 3] = np.square(arr1[i, 1] - arr2[i, 1])
result[i, 4] = np.sqrt(np.square(arr1[i, 0] - arr2[i, 0]) + np.square(arr1[i, 1] - arr2[i, 1]))
# define change of country as weighted sum of industry change
coefficent = 0
for i in range(len(arr1)):
coefficent += result[i, 4] * weights[i]
return result, coefficent
def define_distance(arr1):
# define change of industry as euclidean distance
result = np.zeros(len(arr1))
for i in range(len(arr1)):
result[i] = np.sqrt(np.square(arr1[i, 0]) + \
np.square(arr1[i, 1]))
return result
if __name__ == '__main__':
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(BASE_DIR, 'pickle')
industry_info = load_industry_infos(os.path.join(BASE_DIR, 'files', 'abb_label.csv'))
countries = sorted(set([i.split('_')[0] for i in os.listdir(data_dir)]))
years = ['1995', '2011']
# fetch data
data_dic = {}
for country in countries:
data_dic[country] = []
for data_file in sorted(os.listdir(data_dir)):
cny = os.path.splitext(data_file)[0].split("_")[0]
yer = os.path.splitext(data_file)[0].split("_")[1]
if cny == country and yer in years:
data_norm, weights = load_data(os.path.join(data_dir, data_file))
data_dic[country].append({yer: [data_norm, weights]})
# define change and rank industry
t = []
result = {}
industry = {}
for ckey in countries:
print len(data_dic[ckey])
arr1 = data_dic[ckey][0].values()[0][0]
arr2 = data_dic[ckey][1].values()[0][0]
weight = data_dic[ckey][1].values()[0][1]
scores, tscores = define_change(arr1,arr2,weight)
t.append(tscores)
idx = sorted(range(len(scores)), key=lambda k:scores[:, 4][k])
inlabel = [industry_info['abb'][i] for i in idx][::-1]
result[ckey] = {"scores":scores, "rank":inlabel}
industry[ckey] = inlabel[:10]
# # rank countries which have undergone dramatic chagne
# idx_c = sorted(range(len(t)), key=lambda k:t[k])
# rank_c = [countries[i] for i in idx_c][::-1]
# print rank_c
counter_list = []
for row in industry.values():
for cnt in row:
counter_list.append(cnt)
c = Counter(counter_list)
print "first 10 industries which dramatic changed from %s to %s" %(years[0], years[1])
print c
| true |
bac5daf9a0ed51dfd3e2e448d3b262be6f32a243 | Python | code-camp-ksw/Ecursius | /Items/fish.py | UTF-8 | 200 | 2.546875 | 3 | [] | no_license | from . import base
class Fish(base.Food):
def __init__(self, data):
super().__init__(data, "fish")
self.name = "Fish"
self.base_price = 13
self.saturation = 300
| true |
ac325473be9e93dceeea9d4c4234efd439b3e0d4 | Python | slaurent22/lss-tools | /lss/merge.py | UTF-8 | 3,348 | 3.078125 | 3 | [] | no_license | from argparse import Namespace
from typing import Dict, Tuple
from .splits import Splits
from .timeutil import format_from_milliseconds, format_time_tuple, to_milliseconds, TimeTuple
TimeDict = Dict[int, TimeTuple]
CombinedTimeEntry = Tuple[int, int, int]
CombinedTimeDict = Dict[int, CombinedTimeEntry]
def display_time_dict(time_dict: TimeDict):
for entry in time_dict.items():
time_id, time_tuple = entry
print(time_id, format_time_tuple(time_tuple))
def _combined_time_sort_key_(combined_entry: Tuple[int, CombinedTimeEntry]):
_, times = combined_entry
_, _, time_sum = times
return time_sum
def display_combined_time_dict(combined_dict: CombinedTimeDict, time_a_label: str, time_b_label: str):
format_str = '{:<15}{:<20}{:<20}{:<15}'
print(format_str.format('Attempt id', time_a_label, time_b_label, 'Sum'))
combined_dict_items_sorted = sorted(combined_dict.items(), key=_combined_time_sort_key_)
for entry in combined_dict_items_sorted:
time_id, times = entry
time_a, time_b, time_sum = times
if time_a == 0 or time_b == 0:
continue
display = format_str.format(time_id,
format_from_milliseconds(time_a),
format_from_milliseconds(time_b),
format_from_milliseconds(time_sum))
print(display)
def __combine_time_dicts_helper__(combined, time_dict_a, time_dict_b, invert=False):
for item in time_dict_a.items():
time_id, time_tuple_a = item
time_tuple_b = None
if time_id in time_dict_b:
time_tuple_b = time_dict_b[time_id]
time_a, time_b = to_milliseconds(time_tuple_a), to_milliseconds(time_tuple_b)
time_sum = time_a + time_b
combined_entry = (time_b, time_a, time_sum) if invert else (time_a, time_b, time_sum)
combined[time_id] = combined_entry
def combine_time_dicts(time_dict_a: TimeDict, time_dict_b: TimeDict):
combined: CombinedTimeDict = {}
__combine_time_dicts_helper__(combined, time_dict_a, time_dict_b)
__combine_time_dicts_helper__(combined, time_dict_b, time_dict_a, invert=True)
return combined
def merge(splits: Splits, args: Namespace):
print('Merging on "{}"'.format(args.merge_point))
segments = list(splits.segments)
merge_segment_match = [(index, seg) for (index, seg) in enumerate(segments) if seg.name == args.merge_point]
if len(merge_segment_match) != 1:
print('Expected to find 1 matching segment, found {} instead'.format(len(merge_segment_match)))
return
(merge_segment_index, merge_segment) = merge_segment_match[0]
next_segment = segments[merge_segment_index + 1]
print('Merging into "{}"'.format(next_segment.name))
print('Sorting by merged Golds. The top time is the proper combined gold, from a single run')
print()
merge_segment_time_dict: Dict[int, TimeTuple] = merge_segment.get_game_time_dict()
next_segment_time_dict: Dict[int, TimeTuple] = next_segment.get_game_time_dict()
combined = combine_time_dicts(merge_segment_time_dict, next_segment_time_dict)
display_combined_time_dict(combined, args.merge_point, next_segment.name)
# display_time_dict(merge_segment_time_dict)
# display_time_dict(next_segment_time_dict)
| true |
aa0028c505e9ec33395d075bb1dfe77b047921d5 | Python | OsvaldoSalomon/PythonCourses | /02SecondCourse/Assignments/Section03/Assignment05.py | UTF-8 | 565 | 4.34375 | 4 | [] | no_license | """
Define a function called key_list_items that can accept an unlimited number
of lists along with another argument. The function should return the second to last item
in the specific list specified by the user of the function.
Example:
For example, the code below function call should return: jan
key_list_items("people", things=['book','tv'], people=['pete','mike','jan','tom'])
"""
def key_list_items(key, **kwargs):
keys = kwargs[key]
print(keys[-2])
key_list_items("people", things=['book', 'tv'], people=['pete', 'mike', 'jan', 'tom'])
| true |
496d80bcbd132b26f957fccca29a5ba1236e258c | Python | hairuo/LearnAByteOfPython | /module_using_sys.py | UTF-8 | 492 | 3.65625 | 4 | [] | no_license | import sys
# 如果为了避免每次都要输入 sys. 可以使用from sys import argv
print('The command line arguments are: ')
for i in sys.argv:
print(i)
print('\n\nThe PYTHONPATH is', sys.path, '\n')
import os
print(os.getcwd())
# 一般来说应该尽量避免使用from...import语句,而去使用import语句。
# 这是为了避免在你的程序中出现名称冲突,同时也为了使程序更加易读。
from math import sqrt
print("Square root of 16 is", sqrt(16))
| true |
4934f5c3c2c5077c130f27e76c6f47dccd760cad | Python | CarpeDiemJR/VatExperiments | /model.py | UTF-8 | 3,882 | 2.75 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
from VatExperiments.generateData import *
from VatExperiments.vat import *
from VatExperiments.denseNets import get_model
input_shape = (28, 28, 1)
num_power_iterations=2
xi=8.0
epsilon=1e-6
model = get_model(input_shape=input_shape)
def labels2binary(labels, dict):
for key in dict.keys():
labels[labels==int(key)] = dict[key]
return labels
def logits2pred(logits):
max_loc = tf.math.argmax(tf.nn.softmax(logits), axis=1)
return tf.reshape(max_loc, (-1, 1))
def train(images, labels, dict, epochs=2, batch_size=32, num_power_iterations=2, xi=8.0,
epsilon=1e-6, verbal=False):
# convert the label to binary
labels = labels2binary(labels, dict)
# prepare training
optimizer = tf.keras.optimizers.RMSprop(1e-4)
counter = 0
print("Start Training...")
print("%d batches in %d epoch(s) to go ..." % (epochs*(images.shape[0]//batch_size), epochs))
for epoch in range(epochs):
print("----epoch {}-----".format(epoch))
batches = make_batch(images, labels, batch_size=batch_size)
for batch in batches:
counter += 1
if verbal:
print(">>>>>>training iteration no.%d"% counter)
x_train, y_train = batch
train_step(x_train, y_train, optimizer=optimizer, num_power_iterations=num_power_iterations,
xi=xi, epsilon=epsilon, verbal=True)
def train_step(x, y, optimizer, num_power_iterations=num_power_iterations, xi=xi, epsilon=epsilon, verbal=False):
# extract unlabeled
unlabel_index = np.isnan(y).reshape(-1)
x_labeled = x[~unlabel_index]
y_labeled = y[~unlabel_index]
x_unlabeled = x[unlabel_index]
# get a good 'r'
d = generative_virtual_adversarial_perturbation(x, model, num_power_iterations=num_power_iterations, xi=xi,
epsilon=epsilon)
# create stats
num_label0 = sum(y_labeled==0)
num_label1 = sum(y_labeled==1)
num_unlabel = x_unlabeled.shape[0]
# gradient descent with rmsprop
weights = model.trainable_weights
with tf.GradientTape() as tape:
labeled_logits = model(x_labeled)
labeled_logits = tf.nn.softmax(labeled_logits)
labeled_pred = labeled_logits[:, 1]
accuracy = (sum(logits2pred(labeled_logits).numpy() == y_labeled) / x_labeled.shape[0]) * 100.0
logit = model(x)
logit_r = model(x + d)
loss = custom_loss(y_labeled, labeled_pred, logit, logit_r)
grad = tape.gradient(loss, weights)
optimizer.apply_gradients(zip(grad, weights))
if verbal:
print(" loss:", loss.numpy())
print(" acc: %.2f"% accuracy)
print(" label 0: %d" % num_label0)
print(" label 1: %d" % num_label1)
print("no label: %d" % num_unlabel)
if __name__ == "__main__":
label_class = (1, 7)
class_dict = {str(label_class[0]): 0, str(label_class[1]): 1}
ib = 0.8
save_dir = "References"
images, labels = get_data(image_class=label_class, imbalance_rate=ib, shadow=.6)
train(images, labels, class_dict, verbal=True, epochs=2)
test_images, test_labels = get_test(label_class)
test_logit = model(test_images)
test_pred = logits2pred(test_logit)
test_labels = labels2binary(test_labels, class_dict)
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
import matplotlib.pyplot as plt
import os
print(test_pred.numpy().reshape(-1))
cm = confusion_matrix(test_labels, test_pred.numpy())
print(cm)
cmp = ConfusionMatrixDisplay(cm, display_labels=label_class)
cmp.plot(cmap=plt.cm.Blues)
plt.title("Confusion Matrix with imbalence rate {}".format(ib))
plt.savefig(os.path.join(save_dir, "test_confusion_matrix_with_"+str(ib)+".pdf"))
plt.show()
| true |
c7a4f45cb60dc6d94b4683597a6647ce172b4d2d | Python | cyrilvincent/math | /demo_module.py | UTF-8 | 111 | 2.875 | 3 | [] | no_license | #import math
# from math import sqrt
import math
def sqrt(x):
return "Toto"
res = math.sqrt(4)
print(res) | true |
7c2ebf8483e33f090b81d7058c9de5951c81e75d | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_78/148.py | UTF-8 | 976 | 2.875 | 3 | [] | no_license | import sys
import functools
def toInt(x):
return int(x)
def new_matrix(L,C,num):
return [[num for row in range(C)] for col in range(L)] #@UnusedVariable
file = open('mandar.txt','w')
sys.stdout = file
input = open('../../input/a.in')
CASES = int(input.readline())
for case in range(CASES):
args = map(toInt,input.readline().rstrip().split(' '))
N=args[0]
D=args[1]
G=args[2]
fD = float(D)/100
res='Broken'
if D<100 and G==100:
res='Broken'
elif D>0 and G==0:
res='Broken'
elif D==0:
res='Possible'
elif fD*N<1:
res='Broken'
else:
fac5=0
d5=D
while fac5<2:
if d5%5==0:
d5=D/5
fac5+=1
else:
break
fac2=0
d2=D
while fac2<2:
if d2%2==0:
d2=D/2
fac2+=1
else:
break
mult5 = 5**(2-fac5)
mult2 = 2**(2-fac2)
if N>=mult2*mult5:
res = 'Possible'
else:
res='Broken'
print 'Case #'+str(case+1)+': '+res
| true |
c2ca6ee785414a210010dae8b04181710f76379f | Python | jpverkamp/advent-of-code | /2015/day-10/part-1.py | UTF-8 | 534 | 3.375 | 3 | [] | no_license | #!/usr/bin/env python3
import sys
def look_and_say(seq):
result = ''
index = 0
count = 0
buffer = None
for c in seq:
if c == buffer:
count += 1
else:
if buffer:
result += '{}{}'.format(count, buffer)
count = 1
buffer = c
result += '{}{}'.format(count, buffer)
return result
def repeat(f, n, seq):
for i in range(n):
seq = f(seq)
return seq
print(len(repeat(look_and_say, int(sys.argv[2]), sys.argv[1])))
| true |
1f328147baaf3cd6637128acf586d966ea359683 | Python | vencabot/monster_collecting_jrpg | /mockups/version_4/test_rules_4.py | UTF-8 | 10,784 | 2.90625 | 3 | [] | no_license | import battle_4
class Rage(battle_4.DynamicRule):
def __init__(self, ruleset, target_unit, severity):
super().__init__(
ruleset, "Rage", "after", ["attack_increase"], severity,
None, None)
self.target_unit = target_unit
def will_trigger_on(self, dynamic_event):
if (
dynamic_event.target is self.target_unit
and dynamic_event.attr_name == "hp"
and dynamic_event.new_value < dynamic_event.old_value):
return True
return False
def trigger(self, dynamic_event):
if self.severity < 3:
attack_boost = 1
trigger_message = (
f"{self.target_unit.unit_name} is getting peeved!")
elif self.severity < 8:
attack_boost = 2
trigger_message = (
f"{self.target_unit.unit_name} is getting angry!")
else:
attack_boost = 3
trigger_message = (
f"{self.target_unit.unit_name} is getting mighty "
"pissed off!")
print(trigger_message)
self.target_unit.update_w_rules(
"atk", self.target_unit.atk + attack_boost,
dynamic_event.perpetrated_by, dynamic_event.with_ability,
dynamic_event.at_effectiveness, self)
class Invincible(battle_4.DynamicRule):
def __init__(self, ruleset, target_unit, severity):
super().__init__(
ruleset, "Invincible", "before", ["damage_decrease"],
severity, None, None)
self.target_unit = target_unit
def will_trigger_on(self, dynamic_event):
if (
dynamic_event.target is self.target_unit
and dynamic_event.attr_name == "hp"
and dynamic_event.new_value < dynamic_event.old_value):
return True
return False
def trigger(self, dynamic_event):
print(
f"{self.target_unit.unit_name} is impervious to "
f"{dynamic_event.perpetrated_by.unit_name}'s attack!")
dynamic_event.replace_value(self.target_unit.hp, self)
class Hench(battle_4.DynamicRule):
def __init__(self, ruleset, target_unit, severity):
super().__init__(
ruleset, "Hench", "before", ["damage_increase"], severity,
None, None)
self.target_unit = target_unit
def will_trigger_on(self, dynamic_event):
if (
dynamic_event.perpetrated_by is self.target_unit
and dynamic_event.original_event.triggering_rule is None
and dynamic_event.attr_name == "hp"
and dynamic_event.new_value < dynamic_event.old_value):
return True
return False
def trigger(self, dynamic_event):
print(
f"{self.target_unit.unit_name} is hench'd out! Their "
"damage doubles!")
old_damage = dynamic_event.old_value - dynamic_event.new_value
new_hp_value = dynamic_event.target.hp - old_damage * 2
dynamic_event.replace_value(new_hp_value, self)
def fail(self, dynamic_event):
original_event = dynamic_event.original_event
if (
dynamic_event.perpetrated_by is self.target_unit
and original_event.triggering_rule is None
and dynamic_event.attr_name == "hp"
and original_event.new_value < original_event.old_value
and dynamic_event.new_value >= dynamic_event.old_value):
print(
f"{self.target_unit.unit_name} is hench'd out! But "
"they failed to do damage!")
class AndOne(battle_4.DynamicRule):
def __init__(self, ruleset, target_unit, severity):
super().__init__(
ruleset, "And One", "before", ["damage_increase"],
severity, None, None)
self.target_unit = target_unit
def will_trigger_on(self, dynamic_event):
if (
dynamic_event.perpetrated_by is self.target_unit
and dynamic_event.original_event.triggering_rule is None
and dynamic_event.attr_name == "hp"
and dynamic_event.new_value < dynamic_event.old_value):
return True
return False
def trigger(self, dynamic_event):
print(
f"And one! {self.target_unit.unit_name} gets extra damage!")
dynamic_event.replace_value(dynamic_event.new_value - 1, self)
def fail(self, dynamic_event):
original_event = dynamic_event.original_event
if (
dynamic_event.perpetrated_by is self.target_unit
and dynamic_event.attr_name == "hp"
and original_event.new_value < original_event.old_value
and dynamic_event.new_value >= dynamic_event.old_value):
print(
f"And- wha...? {self.target_unit.unit_name} failed to "
"do damage!")
class Persistence(battle_4.DynamicRule):
def __init__(self, ruleset, target_unit, severity):
super().__init__(
ruleset, "Persistence", "after", ["attack_increase"],
severity, None, None)
self.target_unit = target_unit
def will_trigger_on(self, dynamic_event):
original_event = dynamic_event.original_event
if (
dynamic_event.perpetrated_by is self.target_unit
and original_event.triggering_rule is None
and dynamic_event.attr_name == "hp"
and original_event.new_value < original_event.old_value
and dynamic_event.new_value >= dynamic_event.old_value):
return True
return False
def trigger(self, dynamic_event):
print(
f"{self.target_unit.unit_name} failed to do damage, but "
f"they won't give up! {self.target_unit.unit_name} gained "
"extra attack power!")
self.target_unit.update_w_rules(
"atk", self.target_unit.atk + 1, self.target_unit,
dynamic_event.with_ability, dynamic_event.at_effectiveness,
self)
class OldManGenes(battle_4.DynamicRule):
def __init__(self, ruleset, target_leader, severity):
super().__init__(
ruleset, "Old Man Genes", "after", ["attack_decrease"],
severity, None, None)
self.target_leader = target_leader
def will_trigger_on(self, dynamic_event):
if (
dynamic_event.target is self.target_leader
and dynamic_event.attr_name == "ap"
and dynamic_event.old_value >= 5
and dynamic_event.new_value < 5):
return True
return False
def trigger(self, dynamic_event):
print(
f"{self.target_leader.leader_name}'s entire party has the "
"old man genes! They're all drowsy. Attack power down! T_T")
for battle_unit in self.target_leader.party:
battle_unit.update_w_rules(
"atk", battle_unit.atk - 1,
dynamic_event.perpetrated_by,
dynamic_event.with_ability,
dynamic_event.at_effectiveness, self)
class Poison(battle_4.DynamicRule):
def __init__(self, ruleset, severity, target_unit):
super().__init__(
ruleset, "Poison", "after", ["hp_damage"], severity, None,
None)
self.target_unit = target_unit
def will_trigger_on(self, dynamic_event):
if (
dynamic_event.perpetrated_by is self.target_unit
and not dynamic_event.triggering_rule
and dynamic_event.with_ability):
return True
return False
def trigger(self, dynamic_event):
print(
f"{self.target_unit.unit_name} suffered the effects of "
f"Poison! They're taking {self.severity} damage!")
self.target_unit.update_w_rules(
"hp", self.target_unit.hp - self.severity, self.target_unit,
dynamic_event.with_ability, dynamic_event.at_effectiveness,
self)
class SealRule(battle_4.DynamicRule):
def __init__(
self, ruleset, initiated_by, with_ability, target_rule):
super().__init__(
ruleset, "Seal Rule", "before", [], 10, initiated_by,
with_ability)
self.target_rule = target_rule
def will_trigger_on(self, dynamic_event):
if (
dynamic_event.triggering_rule is self.target_rule):
return True
return False
def trigger(self, dynamic_event):
rolled_back_event = dynamic_event
while (
rolled_back_event.replaces
and (
rolled_back_event.replaces.triggering_rule
is self.target_rule)):
rolled_back_event = rolled_back_event.replaces
dynamic_event.replace_value(rolled_back_event.old_value, self)
print(f"The effect of {self.target_rule.rule_name} was sealed!")
class RuleFade(battle_4.DynamicRule):
def __init__(
self, ruleset, severity, initiated_by, with_ability,
target_rule):
super().__init__(
ruleset, "Rule Fade", "after", [], severity, initiated_by,
with_ability)
self.target_rule = target_rule
def will_trigger_on(self, dynamic_event):
if (
dynamic_event.triggering_rule is self.target_rule):
return True
return False
def trigger(self, dynamic_event):
print(f"{self.target_rule.rule_name} has faded slightly!")
self.update_w_rules(
"severity", self.severity - 1, self.initiated_by,
self.with_ability, "normal", self)
if self.severity == 0:
print(f"{self.target_rule.rule_name} has faded completely!")
new_rules = self.ruleset.rules.copy()
new_rules.remove(self.target_rule)
self.ruleset.update_w_rules(
"rules", new_rules.copy(), self.initiated_by,
self.with_ability, "normal", self)
new_rules.remove(self)
self.ruleset.update_w_rules(
"rules", new_rules.copy(), self.initiated_by,
self.with_ability, "normal", self)
| true |
0d160039023408a2643c3674ebb305cb51c0f905 | Python | wagnerf42/pyquizz | /data/lessons/6. tables de hachage/uniques.py | UTF-8 | 290 | 3.65625 | 4 | [] | no_license | #!/usr/bin/env python3
# a l'aide d'un set
# renvoyer un vecteur contenant un seul de chacun des elements du vecteur d'entree
# par exemple [1, 2, 2, 1, 3] pourrait renvoyer [1, 2, 3]
def uniques(vecteur):
return list(set(vecteur))## return
###20
print(uniques([1, 3, 5, 3, 1, 2]))
| true |
949015adf5fffe38f3ea63c5bce2e4395c322ff0 | Python | PaulFaith/LearnPython | /Extras/json/myjson.py | UTF-8 | 306 | 2.84375 | 3 | [
"CC0-1.0"
] | permissive | import json
class Myjson:
def __init__(self,file,new):
self._file = file
self._new = new
def read(self):
with open(self._file) as doc:
data = json.load(doc)
return data
def write(self):
with open(self.file,"w") as doc:
json.dump(self.new,doc)
return doc
| true |
8048cfd6dc3741727cd07cc5325b70c95febaa0f | Python | ItsJasonPan/MiniProjects | /Tetris.py | UTF-8 | 9,938 | 3.40625 | 3 | [] | no_license | # Updated Animation Starter Code
from tkinter import *
####################################
# customize these functions
####################################
def init(data):
data.rows, data.cols, data.cellSize, data.margin = gameDimensions()
# empty board is all blue
data.board = [["blue"] * data.cols for i in range(data.rows)]
newFallingPiece(data)
data.score = 0
data.gameover = False
def mousePressed(event, data):
# this game does not need mouse
pass
def keyPressed(event, data):
import copy
if not data.gameover:
if event.char == "s":
newFallingPiece(data)
# move down, left or right when movable
if event.keysym == "Down":
if fallingPieceIsLegal(data, 1, 0):
moveFallingPiece(data, 1, 0)
if event.keysym == "Left":
if fallingPieceIsLegal(data, 0, -1):
moveFallingPiece(data, 0, -1)
if event.keysym == "Right":
if fallingPieceIsLegal(data, 0, 1):
moveFallingPiece(data, 0, 1)
# rotate when nothing is blocking
if event.keysym == "Up":
temp = copy.deepcopy(data.fallingPiece[0])
tempStorage = data.fallingPieceRow, data.fallingPieceCol
rotateFallingPiece(data)
# restore to original orientation when the rotation is illegal
if not fallingPieceIsLegal(data, 0, 0):
data.fallingPiece = temp, data.fallingPiece[1]
data.fallingPieceRow, data.fallingPieceCol = tempStorage
def timerFired(data):
if not data.gameover:
# move the falling piece down by one block when movable
if fallingPieceIsLegal(data, 1, 0):
moveFallingPiece(data, 1, 0)
else:
# place falling piece when not movable
placeFallingPiece(data)
def drawCell(row, col, color, canvas, data):
canvas.create_rectangle(col * data.cellSize + data.margin,
row * data.cellSize + data.margin,
(col + 1) * data.cellSize + data.margin,
(row + 1) * data.cellSize + data.margin,
fill=color, width=3)
def drawBoard(canvas, data):
# draw the entire board cell by cell
for row in range(data.rows):
for col in range(data.cols):
color = data.board[row][col]
drawCell(row, col,color, canvas, data)
def tetrisPiece(data):
# 7 types of tetris piece
iPiece = [
[ True, True, True, True ]
]
jPiece = [
[ True, False, False ],
[ True, True, True ]
]
lPiece = [
[ False, False, True ],
[ True, True, True ]
]
oPiece = [
[ True, True ],
[ True, True ]
]
sPiece = [
[ False, True, True ],
[ True, True, False ]
]
tPiece = [
[ False, True, False ],
[ True, True, True ]
]
zPiece = [
[ True, True, False ],
[ False, True, True ]
]
data.tetrisPieces = [iPiece, jPiece, lPiece, oPiece,
sPiece, tPiece, zPiece]
# each piece corresponds to a specific color
data.tetrisPieceColors = ["red", "yellow", "magenta",
"pink", "cyan", "green", "orange"]
def newFallingPiece(data):
tetrisPiece(data)
import random
# choose a random shape for the new piece
randomIndex = random.randint(0, len(data.tetrisPieces) - 1)
data.fallingPiece = (data.tetrisPieces[randomIndex],
data.tetrisPieceColors[randomIndex])
# row location of the top left piece
data.fallingPieceRow = 0
numFallingPieceCols = len(data.fallingPiece[0][0])
# column location of the top left piece
data.fallingPieceCol = len(data.board[0]) // 2 - numFallingPieceCols//2
# test if it's game over
for row in range(len(data.fallingPiece[0])):
for col in range(len(data.fallingPiece[0][0])):
if data.fallingPiece[0][row][col]:
# game over when the spawn area are not all empty
r, c = data.fallingPieceRow, data.fallingPieceCol
if data.board[row + r][col + c] != "blue":
data.gameover = True
def drawFallingPiece(canvas, data):
# draw falling piece cell by cell
for row in range(len(data.fallingPiece[0])):
for col in range(len(data.fallingPiece[0][0])):
if data.fallingPiece[0][row][col]:
color = data.fallingPiece[1]
drawCell(row + data.fallingPieceRow,
col + data.fallingPieceCol,
color, canvas, data)
def moveFallingPiece(data, drow, dcol):
data.fallingPieceRow += drow
data.fallingPieceCol += dcol
def fallingPieceIsLegal(data, drow, dcol):
for row in range(len(data.fallingPiece[0])):
for col in range(len(data.fallingPiece[0][0])):
if data.fallingPiece[0][row][col]:
nextRow = data.fallingPieceRow + row + drow
nextCol = data.fallingPieceCol + col + dcol
if nextRow > data.rows-1 or nextCol > data.cols-1\
or nextCol < 0:
return False
elif data.board[nextRow][nextCol] != "blue":
return False
return True
def rotateFallingPiece(data):
myPiece = data.fallingPiece[0]
newPiece = [[None] * len(myPiece) for i in range(len(myPiece[0]))]
# rotate the original piece counterclockwise 90 degrees
for row in range(len(newPiece)):
for col in range(len(newPiece[0])):
newPiece[row][col] = myPiece[col][len(myPiece[0])-1-row]
# update the location and shape of the original falling piece
newRow = data.fallingPieceRow + int(len(myPiece) / 2 - len(myPiece[0]) / 2)
newCol = data.fallingPieceCol + int(len(myPiece[0]) / 2 - len(myPiece) / 2)
data.fallingPiece = (newPiece, data.fallingPiece[1])
data.fallingPieceRow = newRow
data.fallingPieceCol = newCol
def placeFallingPiece(data):
piece, color = data.fallingPiece
for row in range(len(piece)):
for col in range(len(piece[0])):
if piece[row][col] == True:
# place color on the original board when
# the falling piece is interrupted
rowBoard = row + data.fallingPieceRow
colBoard = col + data.fallingPieceCol
data.board[rowBoard][colBoard] = color
# generate a new falling piece
newFallingPiece(data)
# check if any rows can be removed
removeFullRows(data)
def removeFullRows(data):
totalRemoved = 0
for row in range(len(data.board)):
if "blue" not in data.board[row]:
data.board.pop(row)
# remove rows when it's completely full
data.board.insert(0, ["blue"] * data.cols)
totalRemoved += 1
# total score will be # of lines removed square
data.score += totalRemoved ** 2
def drawScore(canvas, data):
canvas.create_text(data.width/2, 15, text=("Score:", data.score),
fill="dark blue", font="30")
def drawGameOver(canvas, data):
# draw text and background when game over
canvas.create_rectangle(data.margin, data.height * 2/5,
data.width - data.margin, data.height/2,
fill="maroon")
canvas.create_text(data.width/2, data.height/2, text="Game Over!",
fill="white", font="Chalkduster 28 bold", anchor=S)
def redrawAll(canvas, data):
canvas.create_rectangle(0, 0, data.width, data.height, fill="orange")
drawBoard(canvas, data)
drawFallingPiece(canvas, data)
drawScore(canvas, data)
if data.gameover:
drawGameOver(canvas, data)
####################################
# use the run function as-is
####################################
def run(width=300, height=300):
def redrawAllWrapper(canvas, data):
canvas.delete(ALL)
canvas.create_rectangle(0, 0, data.width, data.height,
fill='white', width=0)
redrawAll(canvas, data)
canvas.update()
def mousePressedWrapper(event, canvas, data):
mousePressed(event, data)
redrawAllWrapper(canvas, data)
def keyPressedWrapper(event, canvas, data):
keyPressed(event, data)
redrawAllWrapper(canvas, data)
def timerFiredWrapper(canvas, data):
timerFired(data)
redrawAllWrapper(canvas, data)
# pause, then call timerFired again
canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)
# Set up data and call init
class Struct(object): pass
data = Struct()
data.width = width
data.height = height
data.timerDelay = 200 # milliseconds
root = Tk()
root.resizable(width=False, height=False) # prevents resizing window
init(data)
# create the root and the canvas
canvas = Canvas(root, width=data.width, height=data.height)
canvas.configure(bd=0, highlightthickness=0)
canvas.pack()
# set up events
root.bind("<Button-1>", lambda event:
mousePressedWrapper(event, canvas, data))
root.bind("<Key>", lambda event:
keyPressedWrapper(event, canvas, data))
timerFiredWrapper(canvas, data)
# and launch the app
root.mainloop() # blocks until window is closed
print("bye!")
def gameDimensions():
# preset the dimension
rows = 25
cols = 13
cellSize = 30
margin = 40
return rows, cols, cellSize, margin
def playTetris():
rows, cols, cellSize, margin = gameDimensions()
# calculate the board size and start the game!
height = rows * cellSize + 2 * margin
width = cols * cellSize + 2 * margin
run(width, height)
playTetris()
| true |
b749e6111b7e87fdc5fc97ab09b5d0cbeab68031 | Python | pokemonStudyGroup/abstractAlgebra | /backup_infrastructure/controller.py | UTF-8 | 1,213 | 2.640625 | 3 | [] | no_license | from flask import Flask, request, render_template
from os import system
from subprocess import check_output
app = Flask(__name__)
@app.route('/')
def index():
output = open('/tmp/output', 'r').read()
return render_template('index.html', output=output)
@app.route('/refresh')
def refresh():
exit_code = system('kill $(cat /tmp/sleep_pid)')
status_str = 'STATUS: {}'.format(exit_code)
return render_template('refresh.html', status=status_str)
@app.route('/set_interval')
def set_interval():
new_interval = request.args.get('interval', None)
if new_interval is None:
get_running_time = \
"ps -eo pid,etime | grep $(cat /tmp/sleep_pid) | awk '{print $2}'"
running_time_str = \
check_output(["bash", "-c", get_running_time]).decode().strip()
minutes = int(running_time_str.split(':')[0])
seconds = int(running_time_str.split(':')[1])
running_time = minutes * 60 + seconds
interval = int(open('/tmp/interval', 'r').read().strip())
return render_template(
'set_interval.html',
interval=interval,
expiration=(interval - running_time))
open('/tmp/interval', 'w').write(new_interval)
return refresh()
| true |
bf2c209ed814adbdaeb998d43141ff195d1f92b6 | Python | Winni8/python-project | /py_workspace/test_case/tests_report/Test_report.py | UTF-8 | 570 | 2.984375 | 3 | [] | no_license | # _*_ coding:utf-8 _*_
# @Author :cjj
# @time :2018/6/27 9:46
# @File :Test_report.py
# @Software :PyCharm
import unittest
class TestReport(unittest.TestCase):
def setUp(self):
pass
def test_a(self):
a = 2
b = 4
self.assertNotEqual(a, b, "a不等于b则OK")
def test_b(self):
print("hello report")
def test_c(self):
c = "python"
d = "where is the python ?,I'm here!"
self.assertIsNot(c, d, "c在b中")
def test_d(self):
pass
def tearDown(self):
pass
| true |
dbd411250bf83581f0bc1e2691ec2fe9deb75e61 | Python | sayyss/QC-simulator | /gates.py | UTF-8 | 237 | 2.8125 | 3 | [] | no_license | import numpy as np
# Identity Matrix
iden = np.identity(2)
# X(NOT) gate
x_gate = np.array([[0, 1],
[1, 0]])
# Hadamard gate
h_gate = np.array([[1/np.sqrt(2), 1/np.sqrt(2)],
[1/np.sqrt(2), -1/np.sqrt(2)]])
| true |
7564efefabccee43421bad41a85313f3d35d7c2c | Python | yangsenwxy/pandacancer_kaggle | /contribs/kappa_rounder.py | UTF-8 | 1,499 | 3.140625 | 3 | [
"MIT"
] | permissive | # SOURCE : https://www.kaggle.com/naveenasaithambi/optimizedrounder-improved
import pandas as pd
import scipy as sp
import numpy as np
from functools import partial
from sklearn.metrics import cohen_kappa_score
class OptimizedRounder_v2(object):
def __init__(self, num_class):
self.coef_ = 0
self.num_class = num_class
def _kappa_loss(self, coef, X, y):
preds = pd.cut(X, [-np.inf] + list(np.sort(coef)) + [np.inf], labels=list(range(self.num_class)))
kappa = cohen_kappa_score(y, preds, weights='quadratic')
return -kappa
def fit(self, X, y):
loss_partial = partial(self._kappa_loss, X=X, y=y)
initial_coef = [i + 1 / 2 for i in range(0, self.num_class-1)]
self.coef_ = sp.optimize.minimize(loss_partial, initial_coef,
method='Nelder-Mead')
def predict(self, X):
preds = pd.cut(X, [-np.inf] + list(np.sort(self.coef_['x'])) + [np.inf], labels=list(range(self.num_class)))
return preds
def coefficients(self):
return self.coef_['x']
# opt=OptimizedRounder_v2(5)
# x = np.array([0.2, 0.4, 0.55, 0.49, 1.2, 1.1, 1.9, 2.3, 3.3, 4.3])
# y = np.array([0, 1, 0, 1, 2, 2, 2, 3, 3, 4])
#
# opt.fit(x, y)
# preds = pd.cut(x, [-np.inf] + list(np.sort([0.3, 1.5, 2.5, 3.5])) + [np.inf], labels=[0, 1, 2, 3, 4])
# kappa = cohen_kappa_score(y, preds, weights='quadratic')
# print(preds)
# print(kappa)
# print(opt.coefficients())
# print(opt.predict(x)) | true |
71928a2591d9e0394bf2bbcf28a3dc06c78af02f | Python | arelmelamed/Simple-Yoda-generator | /main.py | UTF-8 | 755 | 3.171875 | 3 | [] | no_license | from tkinter import *
window = Tk()
window.title("Yoda Generator")
window.minsize(width=500, height=300)
window.config(padx=100, pady=50, bg="yellow")
def master_yoda():
text_box_two.delete("1.0", END)
yoda = text_box_one.get("1.0", END)
yoda = yoda.split()[::-1]
y = text_box_two.insert("1.0", yoda)
my_label = Label(text="Convert to Yoda", font=("David", 24, "bold"))
my_label.grid(column=1, row=2)
my_label.config(bg="green")
text_box_one = Text(height=5, width=50)
text_box_one.grid(column=1, row=3)
text_box_two = Text(height=5, width=50)
text_box_two.grid(column=1, row=4)
convert_button = Button(text="Convert to Yoda", command=master_yoda)
convert_button.grid(column=2, row=3)
window.mainloop()
| true |
e273a0f7e4f0c2bbebe64f83ed28141515374264 | Python | Chanzhln/mlproject | /Code/data_process.py | UTF-8 | 10,391 | 2.796875 | 3 | [] | no_license | import urllib, json, csv, sys, os, pickle, random
from constant import *
import Queue
reload(sys)
sys.setdefaultencoding('utf-8')
def sanctify(text):
ret = text
while ret[0] == ' ':
ret = ret[1:]
while ret.find('(') != -1:
left = ret.find('(')
right = ret.find(')')
if right > left:
ret = ret[:left] + ret[right+1:]
else:
ret = ret[:left]
ret = ret.replace('\'','')
ret = ret.replace('\"','')
while ret[-1] == ' ':
ret = ret[:-1]
return ret
def save(dObj, sFilename):
"""Given an object and a file name, write the object to the file using pickle."""
f = open(sFilename, "w")
p = pickle.Pickler(f)
p.dump(dObj)
f.close()
def load(sFilename):
"""Given a file name, load and return the object stored in the file."""
f = open(sFilename, "r")
u = pickle.Unpickler(f)
dObj = u.load()
f.close()
return dObj
def make_classification():
finput = '../Data/Movie_Data.csv'
foutput = '../Data/Movie_Class.csv'
csv_read = open(finput,'r')
csv_write = open(foutput,'w')
reader = csv.reader(csv_read)
writer = csv.writer(csv_write)
count = 0
for line in reader:
if count > 0:
idx = attribute_index['imdbRating']
rating = (float)(line[idx])
if rating >= 8.5:
level = 'S'
elif rating >= 7.5 and rating < 8.5:
level = 'A'
elif rating >= 6.5 and rating < 7.5:
level = 'B'
elif rating >= 5.5 and rating < 6.5:
level = 'C'
else:
level = 'F'
print line[idx],level
line[idx] = level
writer.writerow(line)
count += 1
csv_read.close()
csv_write.close()
def remove_column():
finput = '../Data/train-test/train.csv'
foutput = '../Data/train-test/removed-train.csv'
csv_read = open(finput,'r')
csv_write = open(foutput,'w')
reader = csv.reader(csv_read)
writer = csv.writer(csv_write)
remove_lst = [0,14,15,16]
row = 0
for line in reader:
tmp = []
for i in range(0,len(line)):
if i not in remove_lst:
tmp.append(line[i])
writer.writerow(tmp)
csv_read.close()
csv_write.close()
def gather(finput, gather_list,fileName):
#finput = '../Data/Movie_Data_1990-now.csv'
gather_dict = {}
csv_read = open(finput,'r')
reader = csv.reader(csv_read)
#gather_list = [5]
rating = 17
row = 0
for line in reader:
if row > 0:
for idx in gather_list:
if line[idx] == 'N/A' or line[idx] == '?':
continue
if gather_dict.has_key(line[idx]):
gather_dict[line[idx]][0] += (float)(line[rating])
gather_dict[line[idx]][1] += 1
else:
gather_dict[line[idx]] = [(float)(line[rating]),1]
row += 1
for key in gather_dict:
gather_dict[key][0] = (float)(gather_dict[key][0])/(float)(gather_dict[key][1])
#print key,gather_dict[key]
path = '../Data/pkl/' + fileName
save(gather_dict,path)
return gather_dict
def clean():
finput = '../Data/Movie_Data.csv'
foutput = '../Data/Movie_Data_clean.csv'
csv_read = open(finput,'r')
csv_write = open(foutput,'w')
reader = csv.reader(csv_read)
writer = csv.writer(csv_write)
for line in reader:
data = line
for i in range(0,len(data)):
data[i] = sanctify(data[i])
writer.writerow(data)
csv_read.close()
csv_write.close()
def topK(finput, foutput, k, valve):
pq = Queue.PriorityQueue()
#finput = '../Data/pkl/Director.pkl'
#foutput = '../'
dic = load(finput)
#print dic
for key in dic:
if pq.qsize() < k and dic[key][1] > valve:
pq.put((dic[key][0],[key,dic[key]]))
elif pq.qsize() == k:
top = pq.get()
if top[1][1][0] < dic[key][0] and dic[key][1] > valve:
pq.put((dic[key][0],[key,dic[key]]))
else:
pq.put(top)
top_lst = []
while pq.empty() == False:
tmp = pq.get()[1]
top_lst.append(tmp)
print tmp
#save(top_lst,foutput)
def numeric():
director = load('../Data/pkl/Director.pkl')
genre = load('../Data/pkl/Genre.pkl')
actor = load('../Data/pkl/Actors.pkl')
write = load('../Data/pkl/Writer.pkl')
year = load('../Data/pkl/Year.pkl')
finput = '../Data/Movie_Class.csv'
foutput = '../Data/Movie_Data_numeric.csv'
csv_read = open(finput,'r')
csv_write = open(foutput,'w')
reader = csv.reader(csv_read)
writer = csv.writer(csv_write)
row = 0
for line in reader:
data = line
if row > 0:
for i in range(0,len(data)):
if data[i] == '?' or data[i] == 'N/A':
data[i] == '?'
elif i in [2,3,4]: #Genre
data[i] = genre[data[i]][0]
elif i in [9,10,11,12]: #Actors
data[i] = actor[data[i]][0]
elif i in [6,7,8]: #Writer
data[i] = write[data[i]][0]
elif i in [5]: #Director
data[i] = director[data[i]][0]
elif i in [2]: # Year
data[i] = year[data[i]][0]
writer.writerow(data)
row += 1
csv_read.close()
csv_write.close()
def filter():
finput = '../Data/Movie_Data.csv'
foutput = '../Data/Year/Movie_Data_2006-2010.csv'
csv_read = open(finput,'r')
csv_write = open(foutput,'w')
reader = csv.reader(csv_read)
writer = csv.writer(csv_write)
row = 0
for line in reader:
if row == 0 or (float)(line[1]) in [2006, 2007,2008,2009,2010]:
writer.writerow(line)
row += 1
csv_read.close()
csv_write.close()
'''
filter()
gather([9,10,11,12],'Actors.pkl')
gather([6,7,8],'Writer.pkl')
gather([1],'Year.pkl')
'''
def check():
obj = load('Director.pkl')
for key in obj:
print key,obj[key]
def pick():
finput = '../Data/Movie_Class_numeric.csv'
foutput = '../Data/PS4.csv'
csv_read = open(finput,'r')
csv_write = open(foutput,'w')
reader = csv.reader(csv_read)
writer = csv.writer(csv_write)
row = 0
data = []
for line in reader:
if row == 0:
writer.writerow(line)
else:
data.append(line)
row += 1
ps4 = random.sample(data,1000)
for sample in ps4:
writer.writerow(sample)
csv_read.close()
csv_write.close()
#check()
#clean()
def p_analysis():
dir = '../Data/Year/Movie_Data_'#'../Data/Year/Movie_Data_2015.csv'
for year in range(2006,2011):
print year
finput = dir + str(year) + '.csv'
gather(finput, [5], 'Director.pkl')
topK('../Data/pkl/Director.pkl','',3,1)
#p_analysis()
#filter()
'''
filter()
gather('../Data/Year/Movie_Data_2006-2010.csv', [2,3,4],'Genre.pkl')
gather('../Data/Year/Movie_Data_2006-2010.csv', [5],'Director.pkl')
gather('../Data/Year/Movie_Data_2006-2010.csv', [6,7,8],'Writer.pkl')
gather('../Data/Year/Movie_Data_2006-2010.csv', [9,10,11,12],'Actors.pkl')
'''
#topK('../Data/pkl/Actors.pkl','',5, 2)
#pick()
def year_changes():
gather_dict = gather('../Data/Movie_Data.csv',[1], 'Year.pkl')
foutput = '../Data/year_changes.csv'
csv_write = open(foutput,'w')
writer = csv.writer(csv_write)
year = []
imdb = []
for y in range(1990,2017):
year.append(y)
imdb.append(gather_dict[str(y)][0])
writer.writerow(year)
writer.writerow(imdb)
csv_write.close()
print imdb
def split_data():
finput = '../Data/Movie_Class_numeric.csv'
csv_total = open(finput,'r')
csv_train= open('../Data/train.csv','w')
csv_test = open('../Data/test.csv','w')
train = []
test = []
total = []
row = 0
reader = csv.reader(csv_total)
train_writer = csv.writer(csv_train)
test_writer = csv.writer(csv_test)
for line in reader:
if row == 0:
train.append(line)
test.append(line)
else:
total.append(line)
row += 1
test += random.sample(total, 3427)
for line in test:
test_writer.writerow(line)
for line in total:
if line not in test:
train.append(line)
for line in train:
train_writer.writerow(line)
csv_total.close()
csv_train.close()
csv_test.close()
def step_train():
finput = '../Data/train-test/train.csv'
fdir = '../Data/train-test/train_'
csv_read = open(finput,'r')
reader = csv.reader(csv_read)
title = []
examples = []
row = 0
for line in reader:
if row == 0:
title = line
else:
examples.append(line)
row+=1
print 'total number: ',len(examples)
for i in range(1,11):
foutput = fdir + str(i) + '.csv'
csv_write = open(foutput, 'w')
writer = csv.writer(csv_write)
writer.writerow(title)
num = 10000 * i / 10
part_examples = random.sample(examples, num)
print "Number of part example: ", len(part_examples)
for line in part_examples:
for i in range(0,len(line)-1):
if line[i] == '?' or line[i] == 'N/A':
line[i] = '?'
else:
line[i] = (float)(line[i])
writer.writerow(line)
csv_write.close()
def useful():
finput = '../Data/train-test/test.csv'
foutput = '../Data/train-test/test-2.csv'
csv_read = open(finput, 'r')
csv_write = open(foutput, 'w')
reader = csv.reader(csv_read)
writer = csv.writer(csv_write)
row = 0
for line in reader:
if row == 0:
writer.writerow(line)
else:
for i in range(0,len(line)-1):
if line[i] == '?' or line[i] == 'N/A':
line[i] = '?'
else:
line[i] = (float)(line[i])
writer.writerow(line)
row += 1
csv_read.close()
csv_write.close()
useful()
| true |
c421f60af34ee1bf090eb73da11359c32d17ea63 | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2620/60727/235699.py | UTF-8 | 147 | 3.265625 | 3 | [] | no_license | num = int(input())
for i in range(0,num):
sum = 0
ran = int(input())
for j in range(1,ran+1):
sum = pow(j,5)+sum
print(sum) | true |
f7f7cff728cbf1f442469266efb9658fd4231178 | Python | barua-anik/integrify_assignments | /Python exercises/recursiveFunction.py | UTF-8 | 1,087 | 4.125 | 4 | [] | no_license | def function(n):
if n > 7:
print("Course Passed.")
elif n < 5:
print('The current value of n is: {}\n'.format(n))
n += 1
return function(n)
else:
print('The current value of n is: {}\n'.format(n))
n += 1
return function(n)
def val_input(n):
return isinstance(n, int) and n in range(11)
def get_num():
try:
n = int(input("Please only enter whole numbers between 0 and 10: "))
while not val_input(n):
n = int(input("Please only enter whole numbers between 0 and 10: "))
return n
except ValueError:
print("Invalid entry, try again.")
return get_num()
def run_program():
call_func = input("Would you like to run the program? Y/N ").lower()
while not (call_func[0] == 'y' or call_func[0] == 'n'):
call_func = input("Please select whether you would like to continue. Enter \"Y\" or \"N\" ").lower()
if call_func[0] == 'y':
n = get_num()
function(n)
run_program()
else:
return 0
run_program() | true |
8b9e63db88aface83b48cc312d3bfa7dfc049a29 | Python | 87crosby/Automated-Scripts | /util/helper_functions.py | UTF-8 | 1,609 | 2.5625 | 3 | [] | no_license | from util.constants import (
BESTBUY_PRODUCT_QUERIES,
COMBO_WEBHOOK_ID,
SPECIFIC_CHANNEL_WEBHOOK_ID,
)
import random
def print_in_stock_msg(item_name):
print("++++++++++++++++++++++++")
print("{} is in stock.".format(item_name))
print("++++++++++++++++++++++++")
def print_out_of_stock_msg(item_name):
print("------------------------")
print("{} is no longer in stock".format(item_name))
print("------------------------")
def get_specific_webhook_id(item_number):
return SPECIFIC_CHANNEL_WEBHOOK_ID.format(item_number)
def get_combo_webhook_id(item_number):
return COMBO_WEBHOOK_ID.format(item_number)
def get_check_interval():
return random.uniform(3, 6)
def get_bestbuy_product_discord_info(name):
product_list = ["3070", "3080", "3090", "5600"] # TODO: get this programatically
for product in product_list:
if product in name:
return BESTBUY_PRODUCT_QUERIES[product]
else:
return BESTBUY_PRODUCT_QUERIES["3070"] # TODO: error handle this later
def get_product_info_from_name(name, keywords):
for keyword in keywords:
if keyword in name:
product_model = keyword
if product_model in ("6800", "6900"):
product_discord_channel = "big-navi"
elif product_model in ("5600X", "5800X", "5900X", "5950X"):
product_discord_channel = "zen-3"
else:
product_discord_channel = product_model
return product_model, product_discord_channel
def get_product_brand(name):
return name.split()[0]
| true |
ea3e589927304050e95f69a1e205e5a7db720c71 | Python | JrdnVan/notes | /cs3331/lab03/WebServer.py | UTF-8 | 2,180 | 3.59375 | 4 | [] | no_license | """
(i) create a connection socket when contacted by a client (browser). x
(ii) receive HTTP request from this connection. Your server should only process GET request. You may assume that only GET requests will be received. x
(iii) parse the request to determine the specific file being requested. x
(iv) get the requested file from the server's file system. x
(v) create an HTTP response message consisting of the requested file preceded by header lines. x
(vi) send the response over the TCP connection to the requesting browser. x
(vii) If the requested file is not present on the server, the server should send an HTTP ?404 Not Found? message back to the client. x
(viii) the server should listen in a loop, waiting for next request from the browser. x
"""
import sys
from socket import *
# create a connection socket when contacted by a client (browser).
s = socket()
s.bind(('127.0.0.1', int(sys.argv[1])))
s.listen(1)
# the server should listen in a loop, waiting for next request from the browser.
while 1:
# receive HTTP request from this connection. Your server should only process GET request. You may assume that only GET requests will be received.
c = s.accept()[0]
# parse the request to determine the specific file being requested.
fn = c.recv(1024).split()[1][1:]
try:
#get the requested file from the server's file system.
f = open(fn, "rb")
response = f.read()
f.close
c.send("HTTP/1.1 200 OK\r\n".encode())
# create an HTTP response message consisting of the requested file preceded by header lines.
if "html".encode() in fn:
c.send("Content-Type: text/html\r\n\r\n".encode())
elif "png".encode() in fn:
c.send("Content-Type: image/png\r\n\r\n".encode())
# send the response over the TCP connection to the requesting browser.
c.send(response)
except FileNotFoundError:
# If the requested file is not present on the server, the server should send an HTTP ?404 Not Found? message back to the client.
c.send("HTTP/1.1 404 File Not Found\r\n".encode())
c.close() | true |
dc7ea62066501a9639b8ba82712c1dac345d6d7d | Python | gonced8/python-miscellaneous | /graph.py | UTF-8 | 1,420 | 2.65625 | 3 | [] | no_license | import numpy as np
import matplotlib
#matplotlib.use('agg')
import matplotlib.pyplot as plt
import sys
import subprocess
if len(sys.argv)>1:
delimiter=None
skiprows=0
ls='.'
for elem in sys.argv[1:]:
if elem.find('delimiter=')!=-1:
delimiter=elem[elem.find('=')+1:]
elif elem.find('skiprows=')!=-1:
skiprows=int(elem[elem.find('=')+1:])
elif elem.find('ls=')!=-1:
ls=elem[elem.find('=')+1:]
elif elem.find('=')==-1:
filepath = elem
data = np.loadtxt(filepath, delimiter=delimiter, skiprows=skiprows)
data = np.transpose(data)
for i in range(1, data.shape[0]):
plt.figure()
plt.plot(data[0], data[i], ls, markersize=1., linewidth=1.)
#filename = "fig_temp%d.png" %i
#plt.savefig(filename, bbox_inches='tight')
plt.show()
'''
for i in range(1, data.shape[0]):
bashCommand = "open fig_temp%d.png" %i
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
for i in range(1, data.shape[0]):
bashCommand = "rm fig_temp%d.png" %i
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
'''
else:
print("graph <filename> <delimiter=> <skiprows=> <ls=>")
| true |
b20eedcd552ed43d2fe7df487daa2eaf071749cf | Python | bbaattaahh/acss | /auxiliary_scripts/test_display_results/test_display_results.py | UTF-8 | 341 | 2.59375 | 3 | [] | no_license | from DisplayClassification import DisplayClassification
import time
display_classification = DisplayClassification(image_size=(960, 1200), letter_pixel_high=35)
for i in range(1,100):
display_classification.add_new_result(i, "Jani a kompon")
time.sleep(2)
display_classification.display_actual()
display_classification.kill()
| true |
b54d63323964e41d8a22a6348192592e8912bfe4 | Python | Aasthaengg/IBMdataset | /Python_codes/p03361/s056405997.py | UTF-8 | 601 | 2.921875 | 3 | [] | no_license | H, W = map(int, input().split())
S = [""]*H
for i in range(H):
S[i] = list(input())
for i in range(H):
for j in range(W):
if S[i][j] == "#":
if i > 0:
if S[i-1][j] == "#":
continue
if j > 0:
if S[i][j-1] == "#":
continue
if j < W-1:
if S[i][j+1] == "#":
continue
if i < H-1 :
if S[i+1][j] == "#":
continue
print("No")
exit()
print("Yes") | true |
066c092459e23e6aac5866cdd3756991611ee08c | Python | hyperledger/aries-cloudagent-python | /aries_cloudagent/vc/ld_proofs/crypto/key_pair.py | UTF-8 | 1,097 | 2.890625 | 3 | [
"LicenseRef-scancode-dco-1.1",
"Apache-2.0"
] | permissive | """Base key pair class."""
from abc import ABC, abstractmethod, abstractproperty
from typing import List, Optional, Union
class KeyPair(ABC):
"""Base key pair class."""
@abstractmethod
async def sign(self, message: Union[List[bytes], bytes]) -> bytes:
"""Sign message(s) using key pair."""
@abstractmethod
async def verify(
self, message: Union[List[bytes], bytes], signature: bytes
) -> bool:
"""Verify message(s) against signature using key pair."""
@abstractproperty
def has_public_key(self) -> bool:
"""Whether key pair has a public key.
Public key is required for verification, but can be set dynamically
in the verification process.
"""
@abstractproperty
def public_key(self) -> Optional[bytes]:
"""Getter for the public key bytes.
Returns:
bytes: The public key
"""
@abstractmethod
def from_verification_method(self, verification_method: dict) -> "KeyPair":
"""Create new key pair class based on the passed verification method."""
| true |
aeda1498c330142c5f57be906cfb7c5caf0c138a | Python | jackmiking/leetcode | /SetToZeros.py | UTF-8 | 1,672 | 3.375 | 3 | [] | no_license | '''
https://leetcode-cn.com/explore/interview/card/top-interview-questions-medium/29/array-and-strings/76/
'''
from typing import List
class Solution:
def setZeroes(self, matrix: List[List[int]]) -> None:
row = matrix.__len__()
if row < 1:
return
col = 0
col = matrix[0].__len__()
firstIsZero = False
firstRowIsZero = False
firstColIsZero = False
if matrix[0][0] == 0:
firstIsZero = True
for i in range(1,row):
if matrix[i][0] == 0:
firstColIsZero = True
matrix[i][0]=0
for j in range(1,col):
if matrix[0][j] == 0:
firstRowIsZero = True
matrix[0][j]=0
for i in range(1, row):
for j in range(1, col):
if matrix[i][j] == 0:
matrix[0][j] = 0
matrix[i][0] = 0
for i in range(1, row):
if matrix[i][0] == 0:
for j in range(col):
matrix[i][j] = 0
for j in range(1, col):
if matrix[0][j] ==0:
for i in range(row):
matrix[i][j] = 0
if firstIsZero or firstColIsZero:
for i in range(row):
matrix[i][0] = 0
if firstIsZero or firstRowIsZero:
for j in range(col):
matrix[0][j] = 0
if __name__ == '__main__':
s = Solution()
m = [[0,0,0,5],[4,3,1,4],[0,1,1,4],[1,2,1,3],[0,0,1,1]]
s.setZeroes(m)
print(m)
m=[[-4,-2147483648,6,-7,0],[-8,6,-8,-6,0],[2147483647,2,-9,-6,-10]]
s.setZeroes(m)
print(m) | true |
a149ee5f0aa3bc9678688ba0d1380fd5aa16a613 | Python | rajgaur98/titanic-survival-prediction | /titanic.py | UTF-8 | 2,494 | 2.703125 | 3 | [] | no_license | import numpy as np
import scipy.optimize as op
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn import preprocessing
df = pd.read_csv('train.csv')
df.fillna(-99999, inplace=True)
df.drop(['PassengerId', 'Name'], axis = 1, inplace = True)
df.replace('male', 1, inplace = True)
df.replace('female', 0, inplace = True)
def pre_pro(df):
word_list1 = []
word_list2 = []
word_list3 = []
n_ticket = 0
n_cabin = 0
n_embarked = 0
for ind in df.index:
val = df['Ticket'][ind]
if val == -99999:
continue
if val not in word_list1:
word_list1.append(val)
df['Ticket'][ind] = n_ticket
n_ticket += 1
else:
x = word_list1.index(val)
df['Ticket'][ind] = x
for ind in df.index:
val = df['Cabin'][ind]
if val == -99999:
continue
if val not in word_list2:
word_list2.append(val)
df['Cabin'][ind] = n_cabin
n_cabin += 1
else:
x = word_list2.index(val)
df['Cabin'][ind] = x
for ind in df.index:
val = df['Embarked'][ind]
if val == -99999:
continue
if val not in word_list3:
word_list3.append(val)
df['Embarked'][ind] = n_embarked
n_embarked += 1
else:
x = word_list3.index(val)
df['Embarked'][ind] = x
return df
df = pre_pro(df)
X_train = np.array(df[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch','Ticket', 'Fare', 'Cabin', 'Embarked']])
m, n = np.shape(X_train)
print(X_train[:10][:])
#X_train = np.hstack((np.ones((m,1)), X_train ))
X_train = preprocessing.scale(X_train)
y_train = np.array(df[['Survived']])
df1 = pd.read_csv('test.csv')
df1.fillna(-99999, inplace=True)
df1.drop(['PassengerId', 'Name'], axis = 1, inplace = True)
df1.replace('male', 1, inplace = True)
df1.replace('female', 0, inplace = True)
df1 = pre_pro(df1)
X_test = np.array(df1[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked']])
i, j = np.shape(X_test)
#X_test = np.hstack((np.ones((i,1)), X_test ))
X_test = preprocessing.scale(X_test)
clf = LogisticRegression(random_state=0, solver='lbfgs', multi_class='multinomial')
clf.fit(X_train, y_train)
y_test = clf.predict(X_test)
y_test = y_test.reshape(i, )
print(clf.score(X_train, y_train))
passenger_id = [i for i in range(892,1310)]
pred = {'PassengerId': passenger_id,
'Survived': y_test}
df2 = pd.DataFrame(pred, columns = ['PassengerId', 'Survived'])
export_csv = df2.to_csv(r'output.csv', index = None, header = True)
| true |
71d7b88c660081a2fbade26f70d6e49a2fc1135e | Python | roarem/pyser | /bdag/bdag.py | UTF-8 | 1,720 | 2.9375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import datetime as dt
import os
END = '\033[0m'
REDbg = '\033[41m'
REDfg = '\033[31m'
GREENbg = '\033[42m'
GREENfg = '\033[32m'
YELLOfg = '\033[33m'
BLACKfg = '\033[30m'
BLINK = '\033[5m'
class bdager:
def __init__(self):
path = os.getcwd()
with open(path+'/bdag/datoer.csv','r') as datoer:
alle = []
idag = dt.date.today()
for line in datoer:
templine = line.strip().split(',')
navn = templine[0]
dag = int(templine[1])
maaned = int(templine[2])
aar = int(templine[3])
datoen = dt.date(aar,maaned,dag)
d_igjen = (datoen.replace(year=idag.year) - idag).days
alle.append({'navn':navn,'bdag':datoen,'d_igjen':d_igjen})
self.alle = sorted(alle, key=lambda k: k['d_igjen'])
def igjen(self):
siste = ''
ut_streng = '{0:10} {1:3} dager {2}\n'
for pers in self.alle:
digjen = pers['d_igjen']
if digjen<0:
ut_streng = '{0:10} {1:3} dager '+REDfg+'siden'+END
elif digjen >0 and digjen <5:
ut_streng = '{0:10} {1:3} dager '+BLINK+YELLOfg+'igjen'+END
elif digjen==0:
ut_streng = BLINK+'{0:10} {1:3} dager igjen'+END
else:
ut_streng = '{0:10} {1:3} dager '+GREENfg+'igjen'+END
ut_streng += '\n'
siste += ut_streng.format(pers['navn'],abs(pers['d_igjen']))
return siste
if __name__=='__main__':
dager = bdager()
print(dager.igjen())
| true |
25282868ae1b82a4f4a2b083bfe43c2b5694f0ff | Python | bond400apm/3stateCA | /lib/tristateca.py | UTF-8 | 1,636 | 3.4375 | 3 | [] | no_license | import random
import numpy as np
from matplotlib import pyplot as plt
#Convert decimal number to binary or ternary
def decimal_converter(Number,state=2):
states = 8
if state == 3:
states = 9
Converted = str(np.base_repr(Number,state))
Converted_length = len(Converted)
if Converted_length != states:
padding = states - Converted_length
Converted = '0'*padding + Converted
return Converted
#Create the look up table for a certain rule
def look_up_table(Rule):
neighborhoods = [
(2,2),(2,1),(2,0),
(1,2),(1,1),(1,0),
(0,2),(0,1),(0,0)
]
Neighbor = decimal_converter(Rule,3)
Table = {}
for i in range(9):
key = neighborhoods[i]
value = Neighbor[i]
Table.update({key:value})
return Table
#This is the class of Cells
class CellularAutomata(object):
#This stores the configure, rule number inforation
def __init__(self,Rule_number,initial_condition):
self.initial = initial_condition
self.Rule = Rule_number
self.current_config = initial_condition
self._length = len(initial_condition)
self.config = [initial_condition]
#This tells the Cellular how to evolve
def evolve(self):
last_figure = self.current_config
figure_store = []
for i in range(self._length):
neighbor = (last_figure[i-1],last_figure[i])
value = int(look_up_table(self.Rule)[neighbor])
figure_store.append(value)
self.current_config = figure_store
self.config.append(self.current_config)
| true |
a9892c2c35606038220dd5b2bd8a4e1a6c18eedb | Python | Sunnyleave/chenye_1 | /CRH.py | UTF-8 | 4,423 | 2.65625 | 3 | [] | no_license | import csv
import pickle
import numpy as np
import time
data_mat_const = pickle.load(open('Hospital_dataset.pickle', 'rb')).astype(np.int64)
ground_truth = np.asarray(list(csv.reader(open('Complications - Hospital - Encoded.csv'))), dtype=int)
numS, numE, numA = data_mat_const.shape[0], data_mat_const.shape[1], data_mat_const.shape[2]
max_it = 5
def evaluate(truth_val, n_answered):
return np.count_nonzero(truth_val - ground_truth) / (n_answered * numA)
# return np.count_nonzero(truth_val - ground_truth) / (numE * numA)
if __name__ == '__main__':
# Parameters
start = time.clock()
w = np.ones(shape=(numS,))
num_Claims = np.ones(shape=(numS,), dtype=np.int64)
truth = np.zeros(shape=(numE, numA), dtype=np.int64)
claim_confident = [[1.0 for a in range(numA)] for e in range(numE)]
num_answered = 0
num_errors = 0
total_claims = 0
# Count num_Claims
for s in range(numS):
for e in range(numE):
if not np.all(data_mat_const[s][e] == -1):
num_Claims[s] += 1
# knowledge_pairs = [(0, 1), (8, 9)]
#knowledge_pairs = [(0, 1)]
# Voting Initialization
for e in range(numE):
if np.max(data_mat_const[:, e]) > 0:
for a in range(numA):
claim_list = data_mat_const[:, e, a].tolist()
claim_list = np.asarray([x for x in claim_list if x != -1])
truth[e][a] = np.argmax(np.bincount(claim_list))
num_answered += 1
# Count num_Claims
for s in range(numS):
for e in range(numE):
if not np.all(data_mat_const[s][e] == -1):
for a in range(numA):
if data_mat_const[s][e][a] != ground_truth[e][a]:
num_errors += 1
total_claims += 1
# Evaluate the result
print('Error rate: {0}'.format(evaluate(truth, num_answered)))
print('Data Error rate: {0}'.format(num_errors / total_claims))
# Iteratively solve the problem
for it in range(max_it):
print('Iteration: {0}'.format(it + 1))
data_mat = np.copy(data_mat_const)
num_changed = 0
num_change2_true = 0
# Update Weight
w_old = w
score1 = np.zeros(shape=(numS,))
# Calculate all the costs
for e in range(numE):
for a in range(numA):
for s in range(numS):
score1[s] += int((truth[e, a] != data_mat[s, e, a]))
# print(truth[e, a], data_mat[s, e, a], int((truth[e, a] != data_mat[s, e, a])))
score1 = score1 / num_Claims
w = -np.log(score1 / max(score1) + 1e-300) + 0.00001
# Update Truth
for e in range(numE):
for a in range(numA):
claim_list = data_mat[:, e, a]
claim_list_raw = data_mat_const[:, e, a]
claim_species = np.unique(claim_list, return_index=False)
wk = np.zeros(shape=(claim_species.shape[0],))
for k in range(len(claim_species)):
wk[k] = np.sum((claim_list == claim_species[k]).astype(int) * w) # Change here if partial coverage
claim_confident[e][a] = [claim_species, wk]
# Select most confident claim
most_confident_claim = 0
most_confident_confidence = -1
for ii in range(len(claim_confident[e][a][0])):
if claim_confident[e][a][0][ii] != -1:
if most_confident_confidence < claim_confident[e][a][1][ii]:
most_confident_confidence = claim_confident[e][a][1][ii]
most_confident_claim = claim_confident[e][a][0][ii]
truth[e, a] = most_confident_claim
for ii in range(len(claim_list_raw)):
if claim_list_raw[ii] != -1 and claim_list_raw[ii] != most_confident_claim:
num_changed += 1
if most_confident_claim == ground_truth[e, a]:
num_change2_true += 1
end = time.clock()
print(end - start)
print('Precision: {0}'.format(num_change2_true / num_changed))
print('Recall: {0}'.format(num_change2_true / num_errors))
# Evaluate the result
print('Error rate: {0}'.format(evaluate(truth, num_answered)))
print(w)
| true |
8764b86606a94e8e4f0f95dc0a313a13917bba63 | Python | intermezzio/DataAnalytics | /ReadingData.py | UTF-8 | 12,133 | 4.125 | 4 | [] | no_license | def first_question():
'''
Function to retrieve data for First Question
Returns the number of people in the United States who speak each language
Returns in form of dictionary with the key as uppercase language (String) and the value as the number of speakers (float)
Ex. {"FRENCH",3000.0}
'''
#openDataFile
datafile = open('2009-2013-languages-spoken-at-home-ability-to-speak-english-united-states.csv','r')
data = datafile.readlines()
#lines to collect data from
languages=[8,13,18,22,95,104,105]
Southeast_Asian_languages=[106,108,109,110,132]
Indian_languages=[55,56,57,58,122,123,124,125,127,128]
Native_American_languages=[181,182]
#declare dictionaries
languages_data={}
Southeast_Asian_languages_data={}
Indian_languages_data={}
Native_American_languages_data={}
#retrieve all speakers
for i in range(8,183): # Omit header lines
if i in languages:
language, speakers = line_parser1(data,i)
languages_data[language] = speakers
if i in Southeast_Asian_languages:
language, speakers = line_parser1(data,i)
Southeast_Asian_languages_data[language] = speakers
if i in Indian_languages:
language, speakers = line_parser1(data,i)
Indian_languages_data[language] = speakers
if i in Native_American_languages:
language, speakers = line_parser1(data,i)
Native_American_languages_data[language] = speakers
#consolidate Southeast Asian Languages
total_speakers = 0
for language in Southeast_Asian_languages_data:
total_speakers += float(Southeast_Asian_languages_data[language])
languages_data['SOUTHEAST ASIAN LANGUAGES'] = total_speakers
#consolidate Indian Languages
total_speakers = 0
for language in Indian_languages_data:
total_speakers += float(Indian_languages_data[language])
languages_data['INDIAN LANGUAGES'] = total_speakers
#consolidate Native American Languages
total_speakers = 0
for language in Native_American_languages_data:
total_speakers += float(Native_American_languages_data[language])
languages_data['NATIVE AMERICAN LANGUAGES'] = total_speakers
datafile.close()
return languages_data
def line_parser1(data,i):
'''
Function to parse data for first question
Returns a String and a float
String language = uppercase language
Float speakers = number of people who speak that language
'''
#split data
lst = data[i].split(',')
language = lst[0] #get language
speakers = 0 #keep track of number of speakers
n = 0 #counter
while True: #get number of speakers
if '.00' in lst[n]:
speakers = float(lst[n])
n=0
break
else:
n += 1
#format language String
language = language.upper()
language = language.strip('\"')
language = language.strip('.')
if '(' in language:
language = language[:language.find('(')]
language = language.strip()
return language, speakers
def second_question():
'''
Function to retrieve data for Second Question
Returns the percentage of people in the United States who speak English well out of all bilingual people
Returns in form of dictionary with the key as uppercase language (String) and the value as the percentage of speakers (float)
Ex. {"FRENCH",50.0}
'''
#openDataFile
datafile = open('2009-2013-languages-spoken-at-home-ability-to-speak-english-united-states.csv','r')
data = datafile.readlines()
#declare dictionaries
total_speakers = first_question()
speak_english_well_data = {}
Southeast_well_data={}
Indian_well_data={}
Native_American_well_data={}
#lines to collect data from
languages=[8,13,18,22,95,104,105]
Southeast_Asian_languages=[106,108,109,110,132]
Indian_languages=[55,56,57,58,122,123,124,125,127,128]
Native_American_languages=[181,182]
#retrieve number who speak english well
for i in range(8,183): # Omit header lines
if i in languages:
language, speakers = line_parser2(data,i)
speak_english_well_data[language] = speakers
if i in Southeast_Asian_languages:
language, speakers = line_parser2(data,i)
Southeast_well_data[language] = speakers
if i in Indian_languages:
language, speakers = line_parser2(data,i)
Indian_well_data[language] = speakers
if i in Native_American_languages:
language, speakers = line_parser2(data,i)
Native_American_well_data[language] = speakers
#consolidate Southeast Asian Languages
total_well = 0
for language in Southeast_well_data:
total_well += float(Southeast_well_data[language])
speak_english_well_data['SOUTHEAST ASIAN LANGUAGES'] = total_well
#consolidate Indian Languages
total_well = 0
for language in Indian_well_data:
total_well += float(Indian_well_data[language])
speak_english_well_data['INDIAN LANGUAGES'] = total_well
#consolidate Native American Languages
total_well = 0
for language in Native_American_well_data:
total_well += float(Native_American_well_data[language])
speak_english_well_data['NATIVE AMERICAN LANGUAGES'] = total_well
#make dictionary of percentages
percentages = {}
for language in total_speakers:
percent = speak_english_well_data[language] / (total_speakers[language] * 0.01)
percentages[language] = percent
datafile.close()
return percentages
def line_parser2(data,i):
'''
Function to parse data for second question
Returns a String and a float
String language = uppercase language
Float speakers = number of people who speak that language speak English well language
'''
#split data
lst = data[i].split(',')
language = lst[0] #get language
#keep track of number of speakers who speak english well
speakers = 0
n = 0 #counter
#get number of speakers who speak english well
while True:
if '.00' in lst[n]:
speakers = float(lst[n+2]) #the number who speak english well is two columns down from the first number
n=0
break
else:
n += 1
#format language String
language = language.upper()
language = language.strip('\"')
language = language.strip('.')
if '(' in language:
language = language[:language.find('(')]
language = language.strip()
return language, speakers
def third_question():
'''
Function to retrieve data for Third Question
Returns the percentage of people in each state who speak another language who speak english well
Returns in form of dictionary with the key as state (String) and the value as the percentage of people who speak english well (float)
Ex. {"New Jersey",50.0}
'''
#list of states
states = ['Alabama','Alaska','Arizona','Arkansas','California','Colorado','Connecticut','Delaware','District_of_Columbia',
'Florida','Georgia','Hawaii','Idaho','Illinois','Indiana','Iowa','Kansas','Kentucky','Louisiana','Maine','Maryland',
'Massachusetts','Michigan','Minnesota','Mississippi','Missouri','Montana','Nebraska','Nevada','New_Hampshire',
'New_Jersey','New_Mexico','New_York','North_Carolina','North_Dakota','Ohio','Oklahoma','Oregon','Pennsylvania',
'Puerto_Rico','Rhode_Island','South_Carolina','South_Dakota','Tennessee','Texas','Utah','Vermont','Virginia',
'Washington','West_Virginia','Wisconsin','Wyoming']
#states = ['Alabama','Alaska', 'Arizona', 'Arkansas', 'California', 'Connecticut']
bilingual_people = {}
speak_english_well = {}
percentages = {}
for state in states:
#openDataFile
with open(state + '.csv','r') as datafile:
data = datafile.readlines()
#edit state name String
state_name = state.replace('_',' ')
#get data for bilingual in each state and people who speak english well
bilingual, well = bilingual_people_by_state(data, False)
bilingual_people[state_name] = bilingual
speak_english_well[state_name] = well
#calculate percentages
for state in bilingual_people:
percentages[state] = 100 - (speak_english_well[state] / (bilingual_people[state] * 0.01))
return percentages
def fourth_question():
'''
Function to retrieve data for Fourth Question
Returns the percentage of people in each state who speak a second language
Returns in form of dictionary with the key as state (String) and the value as the percentage of speakers (float)
Ex. {"New Jersey",75.0}
'''
#list of states
states = ['Alabama','Alaska','Arizona','Arkansas','California','Colorado','Connecticut','Delaware','District_of_Columbia',
'Florida','Georgia','Hawaii','Idaho','Illinois','Indiana','Iowa','Kansas','Kentucky','Louisiana','Maine','Maryland',
'Massachusetts','Michigan','Minnesota','Mississippi','Missouri','Montana','Nebraska','Nevada','New_Hampshire',
'New_Jersey','New_Mexico','New_York','North_Carolina','North_Dakota','Ohio','Oklahoma','Oregon','Pennsylvania',
'Puerto_Rico','Rhode_Island','South_Carolina','South_Dakota','Tennessee','Texas','Utah','Vermont','Virginia',
'Washington','West_Virginia','Wisconsin','Wyoming']
bilingual_people = {}
total_people = {}
percentages = {}
for state in states:
#openDataFile
with open(state + '.csv','r') as datafile:
data = datafile.readlines()
#edit name of String
state_name = state.replace('_',' ')
#get data for bilingual in each state and total state population
bilingual, total = bilingual_people_by_state(data, True)
bilingual_people[state_name] = bilingual
total_people[state_name] = total
#calculate percentages
for state in bilingual_people:
percentages[state] = bilingual_people[state] / (total_people[state] * 0.01)
return percentages
def bilingual_people_by_state(data, total):
'''
Function to return the number of bilingual people by state, and a second value that depends on total
Returns two floats
Float = number of bilingual people in the state
If total = True, the second float = total population of state
If total = False, the second float = total people who speak English well
'''
#split data
lst = data[7].split(',')
#keep track of number of bilingual people and second variable
speakers = 0
speakers2 = 0
n = 0 #counter
#get number of bilingual people
while True:
if '.00' in lst[n]:
speakers = float(lst[n])
n=0
if not total:
#number of people who speak english well
speakers2 = float(lst[n+3])
break
else:
n += 1
n = 0
if total:
#total number of people
lst = data[5].split(',')
while True:
if '.00' in lst[n]:
speakers2 = float(lst[n])
n=0
break
else:
n += 1
n = 0
return speakers, speakers2 | true |
dc4b04bd0f957d555ebebc3784156e8250e14b59 | Python | Azure/azure-sdk-for-python | /sdk/cosmos/azure-cosmos/samples/index_management.py | UTF-8 | 30,131 | 2.515625 | 3 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for
# license information.
# -------------------------------------------------------------------------
import azure.cosmos.documents as documents
import azure.cosmos.cosmos_client as cosmos_client
import azure.cosmos.exceptions as exceptions
from azure.cosmos.partition_key import PartitionKey
import requests
import traceback
import urllib3
from requests.utils import DEFAULT_CA_BUNDLE_PATH as CaCertPath
import config
HOST = config.settings['host']
MASTER_KEY = config.settings['master_key']
DATABASE_ID = config.settings['database_id']
CONTAINER_ID = config.settings['container_id']
PARTITION_KEY = PartitionKey(path='/id', kind='Hash')
# A typical container has the following properties within it's indexingPolicy property
# indexingMode
# automatic
# includedPaths
# excludedPaths
#
# We can toggle 'automatic' to either be True or False depending upon whether we want to have indexing over all columns by default or not.
#
# We can provide options while creating documents. indexingDirective is one such,
# by which we can tell whether it should be included or excluded in the index of the parent container.
# indexingDirective can be either 'Include', 'Exclude' or 'Default'
# To run this Demo, please provide your own CA certs file or download one from
# http://curl.haxx.se/docs/caextract.html
# Setup the certificate file in .pem format.
# If you still get an SSLError, try disabling certificate verification and suppress warnings
def obtain_client():
connection_policy = documents.ConnectionPolicy()
connection_policy.SSLConfiguration = documents.SSLConfiguration()
# Try to setup the cacert.pem
# connection_policy.SSLConfiguration.SSLCaCerts = CaCertPath
# Else, disable verification
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
connection_policy.SSLConfiguration.SSLCaCerts = False
return cosmos_client.CosmosClient(HOST, MASTER_KEY, "Session", connection_policy=connection_policy)
# Query for Entity / Entities
def query_entities(parent, entity_type, id = None):
find_entity_by_id_query = {
"query": "SELECT * FROM r WHERE r.id=@id",
"parameters": [
{ "name":"@id", "value": id }
]
}
entities = None
try:
if entity_type == 'database':
if id == None:
entities = list(parent.list_databases())
else:
entities = list(parent.query_databases(find_entity_by_id_query))
elif entity_type == 'container':
if id == None:
entities = list(parent.list_containers())
else:
entities = list(parent.query_containers(find_entity_by_id_query))
elif entity_type == 'document':
if id == None:
entities = list(parent.read_all_items())
else:
entities = list(parent.query_items(find_entity_by_id_query))
except exceptions.AzureError as e:
print("The following error occurred while querying for the entity / entities ", entity_type, id if id != None else "")
print(e)
raise
if id == None:
return entities
if len(entities) == 1:
return entities[0]
return None
def create_database_if_not_exists(client, database_id):
try:
database = query_entities(client, 'database', id = database_id)
if database == None:
return client.create_database(id=database_id)
else:
return client.get_database_client(database_id)
except exceptions.CosmosResourceExistsError:
pass
def delete_container_if_exists(db, container_id):
try:
db.delete_container(container_id)
print('Container with id \'{0}\' was deleted'.format(container_id))
except exceptions.CosmosResourceNotFoundError:
pass
except exceptions.CosmosHttpResponseError as e:
if e.status_code == 400:
print("Bad request for container link", container_id)
raise
def print_dictionary_items(dict):
for k, v in dict.items():
print("{:<15}".format(k), v)
print()
def fetch_all_databases(client):
databases = query_entities(client, 'database')
print("-" * 41)
print("-" * 41)
for db in databases:
print_dictionary_items(db)
print("-" * 41)
def query_documents_with_custom_query(container, query_with_optional_parameters, message = "Document(s) found by query: "):
try:
results = list(container.query_items(query_with_optional_parameters, enable_cross_partition_query=True))
print(message)
for doc in results:
print(doc)
return results
except exceptions.CosmosResourceNotFoundError:
print("Document doesn't exist")
except exceptions.CosmosHttpResponseError as e:
if e.status_code == 400:
# Can occur when we are trying to query on excluded paths
print("Bad Request exception occurred: ", e)
pass
else:
raise
finally:
print()
def explicitly_exclude_from_index(db):
""" The default index policy on a DocumentContainer will AUTOMATICALLY index ALL documents added.
There may be scenarios where you want to exclude a specific doc from the index even though all other
documents are being indexed automatically.
This method demonstrates how to use an index directive to control this
"""
try:
delete_container_if_exists(db, CONTAINER_ID)
# Create a container with default index policy (i.e. automatic = true)
created_Container = db.create_container(id=CONTAINER_ID, partition_key=PARTITION_KEY)
print(created_Container)
print("\n" + "-" * 25 + "\n1. Container created with index policy")
properties = created_Container.read()
print_dictionary_items(properties["indexingPolicy"])
# Create a document and query on it immediately.
# Will work as automatic indexing is still True
doc = created_Container.create_item(body={ "id" : "doc1", "orderId" : "order1" })
print("\n" + "-" * 25 + "Document doc1 created with order1" + "-" * 25)
print(doc)
query = {
"query": "SELECT * FROM r WHERE r.orderId=@orderNo",
"parameters": [ { "name":"@orderNo", "value": "order1" } ]
}
query_documents_with_custom_query(created_Container, query)
# Now, create a document but this time explicitly exclude it from the container using IndexingDirective
# Then query for that document
# Should NOT find it, because we excluded it from the index
# BUT, the document is there and doing a ReadDocument by Id will prove it
doc2 = created_Container.create_item(
body={ "id" : "doc2", "orderId" : "order2" },
indexing_directive=documents.IndexingDirective.Exclude
)
print("\n" + "-" * 25 + "Document doc2 created with order2" + "-" * 25)
print(doc2)
query = {
"query": "SELECT * FROM r WHERE r.orderId=@orderNo",
"parameters": [ { "name":"@orderNo", "value": "order2" } ]
}
query_documents_with_custom_query(created_Container, query)
docRead = created_Container.read_item(item="doc2", partition_key="doc2")
print("Document read by ID: \n", docRead["id"])
# Cleanup
db.delete_container(created_Container)
print("\n")
except exceptions.CosmosResourceExistsError:
print("Entity already exists")
except exceptions.CosmosResourceNotFoundError:
print("Entity doesn't exist")
def use_manual_indexing(db):
"""The default index policy on a DocumentContainer will AUTOMATICALLY index ALL documents added.
There may be cases where you can want to turn-off automatic indexing and only selectively add only specific documents to the index.
This method demonstrates how to control this by setting the value of automatic within indexingPolicy to False
"""
try:
delete_container_if_exists(db, CONTAINER_ID)
# Create a container with manual (instead of automatic) indexing
created_Container = db.create_container(
id=CONTAINER_ID,
indexing_policy={"automatic" : False},
partition_key=PARTITION_KEY
)
properties = created_Container.read()
print(created_Container)
print("\n" + "-" * 25 + "\n2. Container created with index policy")
print_dictionary_items(properties["indexingPolicy"])
# Create a document
# Then query for that document
# We should find nothing, because automatic indexing on the container level is False
# BUT, the document is there and doing a ReadDocument by Id will prove it
doc = created_Container.create_item(body={ "id" : "doc1", "orderId" : "order1" })
print("\n" + "-" * 25 + "Document doc1 created with order1" + "-" * 25)
print(doc)
query = {
"query": "SELECT * FROM r WHERE r.orderId=@orderNo",
"parameters": [ { "name":"@orderNo", "value": "order1" } ]
}
query_documents_with_custom_query(created_Container, query)
docRead = created_Container.read_item(item="doc1", partition_key="doc1")
print("Document read by ID: \n", docRead["id"])
# Now create a document, passing in an IndexingDirective saying we want to specifically index this document
# Query for the document again and this time we should find it because we manually included the document in the index
doc2 = created_Container.create_item(
body={ "id" : "doc2", "orderId" : "order2" },
indexing_directive=documents.IndexingDirective.Include
)
print("\n" + "-" * 25 + "Document doc2 created with order2" + "-" * 25)
print(doc2)
query = {
"query": "SELECT * FROM r WHERE r.orderId=@orderNo",
"parameters": [ { "name":"@orderNo", "value": "order2" } ]
}
query_documents_with_custom_query(created_Container, query)
# Cleanup
db.delete_container(created_Container)
print("\n")
except exceptions.CosmosResourceExistsError:
print("Entity already exists")
except exceptions.CosmosResourceNotFoundError:
print("Entity doesn't exist")
def exclude_paths_from_index(db):
"""The default behavior is for Cosmos to index every attribute in every document automatically.
There are times when a document contains large amounts of information, in deeply nested structures
that you know you will never search on. In extreme cases like this, you can exclude paths from the
index to save on storage cost, improve write performance and also improve read performance because the index is smaller
This method demonstrates how to set excludedPaths within indexingPolicy
"""
try:
delete_container_if_exists(db, CONTAINER_ID)
doc_with_nested_structures = {
"id" : "doc1",
"foo" : "bar",
"metaData" : "meta",
"subDoc" : { "searchable" : "searchable", "nonSearchable" : "value" },
"excludedNode" : { "subExcluded" : "something", "subExcludedNode" : { "someProperty" : "value" } }
}
container_to_create = { "id" : CONTAINER_ID ,
"indexingPolicy" :
{
"includedPaths" : [ {'path' : "/*"} ], # Special mandatory path of "/*" required to denote include entire tree
"excludedPaths" : [ {'path' : "/metaData/*"}, # exclude metaData node, and anything under it
{'path' : "/subDoc/nonSearchable/*"}, # exclude ONLY a part of subDoc
{'path' : "/\"excludedNode\"/*"} # exclude excludedNode node, and anything under it
]
}
}
print(container_to_create)
print(doc_with_nested_structures)
# Create a container with the defined properties
# The effect of the above IndexingPolicy is that only id, foo, and the subDoc/searchable are indexed
created_Container = db.create_container(
id=container_to_create['id'],
indexing_policy=container_to_create['indexingPolicy'],
partition_key=PARTITION_KEY
)
properties = created_Container.read()
print(created_Container)
print("\n" + "-" * 25 + "\n4. Container created with index policy")
print_dictionary_items(properties["indexingPolicy"])
# The effect of the above IndexingPolicy is that only id, foo, and the subDoc/searchable are indexed
doc = created_Container.create_item(body=doc_with_nested_structures)
print("\n" + "-" * 25 + "Document doc1 created with nested structures" + "-" * 25)
print(doc)
# Querying for a document on either metaData or /subDoc/subSubDoc/someProperty > fail because these paths were excluded and they raise a BadRequest(400) Exception
query = {"query": "SELECT * FROM r WHERE r.metaData=@desiredValue", "parameters" : [{ "name":"@desiredValue", "value": "meta" }]}
query_documents_with_custom_query(created_Container, query)
query = {"query": "SELECT * FROM r WHERE r.subDoc.nonSearchable=@desiredValue", "parameters" : [{ "name":"@desiredValue", "value": "value" }]}
query_documents_with_custom_query(created_Container, query)
query = {"query": "SELECT * FROM r WHERE r.excludedNode.subExcludedNode.someProperty=@desiredValue", "parameters" : [{ "name":"@desiredValue", "value": "value" }]}
query_documents_with_custom_query(created_Container, query)
# Querying for a document using foo, or even subDoc/searchable > succeed because they were not excluded
query = {"query": "SELECT * FROM r WHERE r.foo=@desiredValue", "parameters" : [{ "name":"@desiredValue", "value": "bar" }]}
query_documents_with_custom_query(created_Container, query)
query = {"query": "SELECT * FROM r WHERE r.subDoc.searchable=@desiredValue", "parameters" : [{ "name":"@desiredValue", "value": "searchable" }]}
query_documents_with_custom_query(created_Container, query)
# Cleanup
db.delete_container(created_Container)
print("\n")
except exceptions.CosmosResourceExistsError:
print("Entity already exists")
except exceptions.CosmosResourceNotFoundError:
print("Entity doesn't exist")
def range_scan_on_hash_index(db):
"""When a range index is not available (i.e. Only hash or no index found on the path), comparisons queries can still
be performed as scans using Allow scan request headers passed through options
This method demonstrates how to force a scan when only hash indexes exist on the path
===== Warning=====
This was made an opt-in model by design.
Scanning is an expensive operation and doing this will have a large impact
on RequestUnits charged for an operation and will likely result in queries being throttled sooner.
"""
try:
delete_container_if_exists(db, CONTAINER_ID)
# Force a range scan operation on a hash indexed path
container_to_create = { "id" : CONTAINER_ID ,
"indexingPolicy" :
{
"includedPaths" : [ {'path' : "/"} ],
"excludedPaths" : [ {'path' : "/length/*"} ] # exclude length
}
}
created_Container = db.create_container(
id=container_to_create['id'],
indexing_policy=container_to_create['indexingPolicy'],
partition_key=PARTITION_KEY
)
properties = created_Container.read()
print(created_Container)
print("\n" + "-" * 25 + "\n5. Container created with index policy")
print_dictionary_items(properties["indexingPolicy"])
doc1 = created_Container.create_item(body={ "id" : "dyn1", "length" : 10, "width" : 5, "height" : 15 })
doc2 = created_Container.create_item(body={ "id" : "dyn2", "length" : 7, "width" : 15 })
doc3 = created_Container.create_item(body={ "id" : "dyn3", "length" : 2 })
print("Three docs created with ids : ", doc1["id"], doc2["id"], doc3["id"])
# Query for length > 5 - fail, this is a range based query on a Hash index only document
query = { "query": "SELECT * FROM r WHERE r.length > 5" }
query_documents_with_custom_query(created_Container, query)
# Now add IndexingDirective and repeat query
# expect 200 OK because now we are explicitly allowing scans in a query
# using the enableScanInQuery directive
query_documents_with_custom_query(created_Container, query)
results = list(created_Container.query_items(
query,
enable_scan_in_query=True,
enable_cross_partition_query=True
))
print("Printing documents queried by range by providing enableScanInQuery = True")
for doc in results: print(doc["id"])
# Cleanup
db.delete_container(created_Container)
print("\n")
except exceptions.CosmosResourceExistsError:
print("Entity already exists")
except exceptions.CosmosResourceNotFoundError:
print("Entity doesn't exist")
def use_range_indexes_on_strings(db):
"""Showing how range queries can be performed even on strings.
"""
try:
delete_container_if_exists(db, CONTAINER_ID)
# containers = query_entities(client, 'container', parent_link = database_link)
# print(containers)
# Use range indexes on strings
# This is how you can specify a range index on strings (and numbers) for all properties.
# This is the recommended indexing policy for containers. i.e. precision -1
#indexingPolicy = {
# 'indexingPolicy': {
# 'includedPaths': [
# {
# 'indexes': [
# {
# 'kind': documents.IndexKind.Range,
# 'dataType': documents.DataType.String,
# 'precision': -1
# }
# ]
# }
# ]
# }
#}
# For demo purposes, we are going to use the default (range on numbers, hash on strings) for the whole document (/* )
# and just include a range index on strings for the "region".
container_definition = {
'id': CONTAINER_ID,
'indexingPolicy': {
'includedPaths': [
{
'path': '/region/?',
'indexes': [
{
'kind': documents.IndexKind.Range,
'dataType': documents.DataType.String,
'precision': -1
}
]
},
{
'path': '/*'
}
]
}
}
created_Container = db.create_container(
id=container_definition['id'],
indexing_policy=container_definition['indexingPolicy'],
partition_key=PARTITION_KEY
)
properties = created_Container.read()
print(created_Container)
print("\n" + "-" * 25 + "\n6. Container created with index policy")
print_dictionary_items(properties["indexingPolicy"])
created_Container.create_item(body={ "id" : "doc1", "region" : "USA" })
created_Container.create_item(body={ "id" : "doc2", "region" : "UK" })
created_Container.create_item(body={ "id" : "doc3", "region" : "Armenia" })
created_Container.create_item(body={ "id" : "doc4", "region" : "Egypt" })
# Now ordering against region is allowed. You can run the following query
query = { "query" : "SELECT * FROM r ORDER BY r.region" }
message = "Documents ordered by region"
query_documents_with_custom_query(created_Container, query, message)
# You can also perform filters against string comparison like >= 'UK'. Note that you can perform a prefix query,
# the equivalent of LIKE 'U%' (is >= 'U' AND < 'U')
query = { "query" : "SELECT * FROM r WHERE r.region >= 'U'" }
message = "Documents with region begining with U"
query_documents_with_custom_query(created_Container, query, message)
# Cleanup
db.delete_container(created_Container)
print("\n")
except exceptions.CosmosResourceExistsError:
print("Entity already exists")
except exceptions.CosmosResourceNotFoundError:
print("Entity doesn't exist")
def perform_index_transformations(db):
try:
delete_container_if_exists(db, CONTAINER_ID)
# Create a container with default indexing policy
created_Container = db.create_container(id=CONTAINER_ID, partition_key=PARTITION_KEY)
properties = created_Container.read()
print(created_Container)
print("\n" + "-" * 25 + "\n7. Container created with index policy")
print_dictionary_items(properties["indexingPolicy"])
# Insert some documents
doc1 = created_Container.create_item(body={ "id" : "dyn1", "length" : 10, "width" : 5, "height" : 15 })
doc2 = created_Container.create_item(body={ "id" : "dyn2", "length" : 7, "width" : 15 })
doc3 = created_Container.create_item(body={ "id" : "dyn3", "length" : 2 })
print("Three docs created with ids : ", doc1["id"], doc2["id"], doc3["id"], " with indexing mode", properties['indexingPolicy']['indexingMode'])
# Switch to use string & number range indexing with maximum precision.
print("Changing to string & number range indexing with maximum precision (needed for Order By).")
properties['indexingPolicy']['includedPaths'][0]['indexes'] = [{
'kind': documents.IndexKind.Range,
'dataType': documents.DataType.String,
'precision': -1
}]
created_Container = db.replace_container(
container=created_Container.id,
partition_key=PARTITION_KEY,
indexing_policy=properties['indexingPolicy']
)
properties = created_Container.read()
# Check progress and wait for completion - should be instantaneous since we have only a few documents, but larger
# containers will take time.
print_dictionary_items(properties["indexingPolicy"])
# Now exclude a path from indexing to save on storage space.
print("Now excluding the path /length/ to save on storage space")
properties['indexingPolicy']['excludedPaths'] = [{"path" : "/length/*"}]
created_Container = db.replace_container(
container=created_Container.id,
partition_key=PARTITION_KEY,
indexing_policy=properties['indexingPolicy']
)
properties = created_Container.read()
print_dictionary_items(properties["indexingPolicy"])
# Cleanup
db.delete_container(created_Container)
print("\n")
except exceptions.CosmosResourceExistsError:
print("Entity already exists")
except exceptions.CosmosResourceNotFoundError:
print("Entity doesn't exist")
def perform_multi_orderby_query(db):
try:
delete_container_if_exists(db, CONTAINER_ID)
# Create a container with composite indexes
indexing_policy = {
"compositeIndexes": [
[
{
"path": "/numberField",
"order": "ascending"
},
{
"path": "/stringField",
"order": "descending"
}
],
[
{
"path": "/numberField",
"order": "descending"
},
{
"path": "/stringField",
"order": "ascending"
},
{
"path": "/numberField2",
"order": "descending"
},
{
"path": "/stringField2",
"order": "ascending"
}
]
]
}
created_container = db.create_container(
id=CONTAINER_ID,
indexing_policy=indexing_policy,
partition_key=PARTITION_KEY
)
properties = created_container.read()
print(created_container)
print("\n" + "-" * 25 + "\n8. Container created with index policy")
print_dictionary_items(properties["indexingPolicy"])
# Insert some documents
doc1 = created_container.create_item(body={"id": "doc1", "numberField": 1, "stringField": "1", "numberField2": 1, "stringField2": "1"})
doc2 = created_container.create_item(body={"id": "doc2", "numberField": 1, "stringField": "1", "numberField2": 1, "stringField2": "2"})
doc3 = created_container.create_item(body={"id": "doc3", "numberField": 1, "stringField": "1", "numberField2": 2, "stringField2": "1"})
doc4 = created_container.create_item(body={"id": "doc4", "numberField": 1, "stringField": "1", "numberField2": 2, "stringField2": "2"})
doc5 = created_container.create_item(body={"id": "doc5", "numberField": 1, "stringField": "2", "numberField2": 1, "stringField2": "1"})
doc6 = created_container.create_item(body={"id": "doc6", "numberField": 1, "stringField": "2", "numberField2": 1, "stringField2": "2"})
doc7 = created_container.create_item(body={"id": "doc7", "numberField": 1, "stringField": "2", "numberField2": 2, "stringField2": "1"})
doc8 = created_container.create_item(body={"id": "doc8", "numberField": 1, "stringField": "2", "numberField2": 2, "stringField2": "2"})
doc9 = created_container.create_item(body={"id": "doc9", "numberField": 2, "stringField": "1", "numberField2": 1, "stringField2": "1"})
doc10 = created_container.create_item(body={"id": "doc10", "numberField": 2, "stringField": "1", "numberField2": 1, "stringField2": "2"})
doc11 = created_container.create_item(body={"id": "doc11", "numberField": 2, "stringField": "1", "numberField2": 2, "stringField2": "1"})
doc12 = created_container.create_item(body={"id": "doc12", "numberField": 2, "stringField": "1", "numberField2": 2, "stringField2": "2"})
doc13 = created_container.create_item(body={"id": "doc13", "numberField": 2, "stringField": "2", "numberField2": 1, "stringField2": "1"})
doc14 = created_container.create_item(body={"id": "doc14", "numberField": 2, "stringField": "2", "numberField2": 1, "stringField2": "2"})
doc15 = created_container.create_item(body={"id": "doc15", "numberField": 2, "stringField": "2", "numberField2": 2, "stringField2": "1"})
doc16 = created_container.create_item(body={"id": "doc16", "numberField": 2, "stringField": "2", "numberField2": 2, "stringField2": "2"})
print("Query documents and Order by 1st composite index: Ascending numberField and Descending stringField:")
query = {
"query": "SELECT * FROM r ORDER BY r.numberField ASC, r.stringField DESC",
}
query_documents_with_custom_query(created_container, query)
print("Query documents and Order by inverted 2nd composite index -")
print("Ascending numberField, Descending stringField, Ascending numberField2, Descending stringField2")
query = {
"query": "SELECT * FROM r ORDER BY r.numberField ASC, r.stringField DESC, r.numberField2 ASC, r.stringField2 DESC",
}
query_documents_with_custom_query(created_container, query)
# Cleanup
db.delete_container(created_container)
print("\n")
except exceptions.CosmosResourceExistsError:
print("Entity already exists")
except exceptions.CosmosResourceNotFoundError:
print("Entity doesn't exist")
def run_sample():
try:
client = obtain_client()
fetch_all_databases(client)
# Create database if doesn't exist already.
created_db = create_database_if_not_exists(client, DATABASE_ID)
print(created_db)
# 1. Exclude a document from the index
explicitly_exclude_from_index(created_db)
# 2. Use manual (instead of automatic) indexing
use_manual_indexing(created_db)
# 4. Exclude specified document paths from the index
exclude_paths_from_index(created_db)
# 5. Force a range scan operation on a hash indexed path
range_scan_on_hash_index(created_db)
# 6. Use range indexes on strings
use_range_indexes_on_strings(created_db)
# 7. Perform an index transform
perform_index_transformations(created_db)
# 8. Perform Multi Orderby queries using composite indexes
perform_multi_orderby_query(created_db)
except exceptions.AzureError as e:
raise e
if __name__ == '__main__':
run_sample()
| true |
c2af34bb76628d7002987e27e2d51e8537696609 | Python | dsjardim/ComputerVisionUFRGS | /TP1/cvwt/soft_thresholding.py | UTF-8 | 2,560 | 3.296875 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 5 12:31:12 2021
@author: kirstenl
"""
import cv2
import pywt
import numpy as np
import matplotlib.pyplot as plt
from .wavelets import dwt2, idwt2
def _soft_thresholding(x, t):
"""
Apply soft thresholding
T(x) = sign(x) * (|x| - t)+
"""
xx = np.abs(x) - t
return np.sign(x) * np.where(xx >= 0, xx, 0)
def apply_soft_thresholding(x, J=1, t=None, alpha=None, wavelet='haar'):
"""
Apply soft thresholding in 2D input image using Wavelet Transform.
Parameters
----------
x : numpy.ndarray
Input signal.
wavelet : str or list<numpy.ndarray>, optional
Wavelet filter to be used. The default is haar.
J : int, optional
Maximum decomposition level. The default is 1.
t : float
Valeu to be subtracted from detail coefficients. The default value is None. t or alpha should be defined!
alpha : float
Ratio of the maximum values to be subtracted from the detail coefficients. The default value is None. t or alpha should be defined!
Returns
-------
cA : numpy.ndarray
Smooth Thresholded signal.
"""
assert J > 0, 'J should be greater than 0'
assert t is not None or alpha is not None, 't or alpha value should be defined!'
# going forward in the wavelet transform
Dj = []
cA = np.copy(x)
for j in range(J):
cA, cD = dwt2(cA, wavelet=wavelet)
# apply soft threhsolding and save detail coefficients
if alpha:
t = alpha*np.max(np.abs(cD))
cD = _soft_thresholding(cD, t)
Dj.append(cD)
# returning to the filtered image
for j, dj in enumerate(reversed(Dj)):
cA = idwt2(cA, dj, wavelet=wavelet)
return cA.astype(x.dtype).reshape(x.shape)
if __name__ == '__main__':
# number of iterations
J = 1
# parameter for soft thersholding
t = 50
# define input image
x = cv2.imread('./barbara.jpg')
x = cv2.cvtColor(x, cv2.COLOR_BGR2GRAY)
# add noise to the image
mean = 0
var = 100
sigma = np.sqrt(var)
noise = x.astype('float32') + np.random.normal(mean, sigma, x.shape)
noise = np.uint8(noise)
# soft thresholding
smooth = apply_soft_thresholding(noise, J=J, t=t)
fig, axs = plt.subplots(1, 3)
fig.suptitle(f'Own: Soft thresholding J={J}')
axs[0].imshow(x)
axs[0].set_title('Original image')
axs[1].imshow(noise)
axs[1].set_title('Noise image')
axs[2].imshow(smooth)
axs[2].set_title('Smooth image') | true |
da0fac5ced4ee535943c3ac9d916232ed6462384 | Python | SFDO-Tooling/Snowfakery | /tests/test_i18n.py | UTF-8 | 533 | 2.703125 | 3 | [] | permissive | from io import StringIO
from snowfakery.data_generator import generate
def row_values(generated_rows, index, value):
return generated_rows.mock_calls[index][1][1][value]
class Testi18n:
def test_i18n(self, generated_rows):
yaml = """
- object: foo
fields:
japanese_name:
i18n_fake:
locale: ja_JP
fake: name"""
generate(StringIO(yaml), {})
assert isinstance(row_values(generated_rows, 0, "japanese_name"), str)
| true |
85bbd0f9ab8c153bbbd01be84c4986aa95774e8e | Python | llimllib/chrome-control | /chrome_control/DOMStorage.py | UTF-8 | 1,233 | 2.546875 | 3 | [] | no_license | from enum import Enum
from typing import Any, List
from .base import ChromeCommand
class StorageId:
"""DOM Storage identifier."""
def __init__(self, securityOrigin: str, isLocalStorage: bool):
# Security origin for the storage.
self.securityOrigin = securityOrigin
# Whether the storage is local storage (not session storage).
self.isLocalStorage = isLocalStorage
# DOM Storage item.
Item = List[str]
class enable(ChromeCommand):
"""Enables storage tracking, storage events will now be delivered to the client."""
def __init__(self): pass
class disable(ChromeCommand):
"""Disables storage tracking, prevents storage events from being sent to the client."""
def __init__(self): pass
class getDOMStorageItems(ChromeCommand):
def __init__(self, storageId: "StorageId"):
self.storageId = storageId
class setDOMStorageItem(ChromeCommand):
def __init__(self, storageId: "StorageId", key: str, value: str):
self.storageId = storageId
self.key = key
self.value = value
class removeDOMStorageItem(ChromeCommand):
def __init__(self, storageId: "StorageId", key: str):
self.storageId = storageId
self.key = key
| true |
d54e1168b697f94b8a13d4ba06e70528066cfc7f | Python | pkmoore/CrashSimulator | /poll_parser.py | UTF-8 | 1,265 | 2.75 | 3 | [] | no_license | from os_dict import POLL_EVENT_TO_INT
def parse_poll_results(syscall_object):
ol = syscall_object.original_line
ret_struct = ol[ol.rfind('('):]
ret_struct = ret_struct.strip('()')
ret_struct = ret_struct.strip('[]')
pollfds = []
while ret_struct != '':
closing_curl_index = ret_struct.find('}')
tmp = ret_struct[:closing_curl_index].lstrip(' ,{').split(', ')
tmp_dict = {}
for i in tmp:
entry = i.split('=')
tmp_dict[entry[0]] = entry[1]
pollfds += [tmp_dict]
ret_struct = ret_struct[closing_curl_index+1:]
for i in pollfds:
i['fd'] = int(i['fd'])
i['revents'] = __revents_to_int(i['revents'])
return pollfds
def parse_poll_input(syscall_object):
results = syscall_object.args[0].value
pollfds = []
for i in results:
tmp = {}
i = eval(str(i))
tmp['fd'] = i[0]
tmp['events'] = i[1]
tmp['revents'] = i[2]
pollfds += [tmp]
return pollfds
def __revents_to_int(revents):
val = 0
if '|' in revents:
revents = revents.split('|')
for i in revents:
val = val | POLL_EVENT_TO_INT[i]
else:
val = POLL_EVENT_TO_INT[revents]
return val
| true |
8daaa82e233179f8a4c4effdadec999d5078bcc0 | Python | M01eg/algo_and_structures_python | /Lesson_1/4.py | UTF-8 | 1,737 | 4.34375 | 4 | [] | no_license | '''
Урок 1
Задание 4
Написать программу, которая генерирует в указанных пользователем границах:
случайное целое число;
случайное вещественное число;
случайный символ.
Для каждого из трех случаев пользователь задает свои границы диапазона. Например,
если надо получить случайный символ от 'a' до 'f', то вводятся эти символы.
Программа должна вывести на экран любой символ алфавита от 'a' до 'f' включительно.
'''
from random import random
A1 = int(input("Введите начало диапазона для случ. целого: "))
A2 = int(input("Введите конец диапазона для случ. целого: "))
RANDOM_INTEGER = int(random() * (A2 - A1 + 1)) + A1
print(f"Случайное целое число: {RANDOM_INTEGER}")
B1 = float(input("Введите начало диапазона для случ. дробного: "))
B2 = float(input("Введите конец диапазона для случ. дробного: "))
RANDOM_FLOAT = (random() * (B2 - B1)) + B1
print(f"Случайное дробное число: {RANDOM_FLOAT}")
C1 = input("Введите начало диапазона для случ. символа: ")
C2 = input("Введите конец диапазона для случ. символа: ")
RANDOM_CHAR = chr(int(random() * (ord(C2) - ord(C1) + 1)) + ord(C1))
print(f"Случайный символ: {RANDOM_CHAR}")
| true |
d475b39e3959dc9c9baf5c94afd3477b44903523 | Python | alexdunncsold/ZhangTaobao | /user.py | UTF-8 | 409 | 2.6875 | 3 | [] | no_license | import configparser
from pytz import timezone
class User:
def __init__(self, nickname):
user_config = configparser.ConfigParser()
user_config.read('account_config.ini')
self.id = user_config[nickname]['Id']
self.email = user_config[nickname]['Email']
self.password = user_config[nickname]['Password']
self.tz = timezone(user_config[nickname]['Timezone'])
| true |
d35ab39d3fece6c6af3c43e913512ee38ee9ea5d | Python | eelanpy/edabit | /61_repeating_letters.py | UTF-8 | 186 | 3.359375 | 3 | [] | no_license | # 61_repeating_letters:
def repeat_lett(wrd):
new_word = ''
for i in range(len(wrd)):
new_word = new_word + wrd[i] * 2
print(new_word)
repeat_lett("congratulations") | true |
82edef9f52933b254c0b5397a3c5cdc3afe248dc | Python | tree3205/Projects | /my_projects/1024Spark/minispark_local/split_file.py | UTF-8 | 6,798 | 2.921875 | 3 | [] | no_license | import os
import fnmatch
import sys
import math
import gevent
def split_file(data_dir, partition_num, input_file):
"""
Generate split information
:param partition_num: bucket number
:param input_file: single file: filename or multiple file filename_
:return:
split_info = {0:[(file_name0, start, end)], 1:[(file_name1, start, end)]}
One split may has more than one file.
split_info = {0:[(file_name0, start, end), (file_name1, start, end)],
1:[(file_name1, start, end)]}
file_info = [(file0_path, file0_size), (file1_path, file1_size)]
"""
split_info = {}
file_info = []
# Single file
if not input_file.endswith('_'):
file_path = data_dir + '/' + input_file
file_size = os.path.getsize(file_path)
split_size = int(math.ceil(float(file_size) / partition_num))
# Split file
for i in range(partition_num):
split_info[i] = []
start = i * split_size
if (start + split_size) > file_size:
end = file_size
else:
end = start + split_size
split_info[i].append((file_path, start, end))
file_info = [(file_path, file_size)]
# Multiple files
else:
# Get all file name by the base name
# and calculate the total file size.
# file_info = [[file_dir1, file_size], [file_dir2, file_size], ...]
total_size = 0
for root, dir_names, file_names in os.walk(data_dir):
for file_name in fnmatch.filter(file_names, input_file + '*'):
dir_file = root + '/' + file_name
one_file_size = os.path.getsize(dir_file)
total_size += one_file_size
file_info.append((dir_file, one_file_size))
# Get worker num(split num)
split_size = int(math.ceil(float(total_size) / partition_num))
# Split file
start = 0
used_file = 0
for i in range(partition_num):
remaining_size = split_size
split_info[i] = []
while remaining_size > 0:
current_file_name = file_info[used_file][0]
current_file_size = file_info[used_file][1]
# Required remaining_size <= file remaining_size
if remaining_size <= (current_file_size - start):
split_info[i].append((current_file_name, start, start + remaining_size))
if remaining_size == current_file_size - start:
start = 0
used_file += 1
else:
start = start + remaining_size
remaining_size = 0
# Required remaining_size > file remaining_size
else:
if used_file < len(file_info) - 1:
split_info[i].append((current_file_name, start, current_file_size))
remaining_size -= current_file_size - start
start = 0
used_file += 1
# This is the last file, then finish split
else:
split_info[i].append((current_file_name, start, current_file_size))
remaining_size = 0
return split_info, file_info
# read the data from split and also keep unit ( i.e. get next line from next split)
# split_info : single file [(file_name, start, end)]
# or multiple files [(file_name0, start, end), (file_name1, start, end)]
# partition_id : id of this partition
# partition_num : how many reducers is in this task
# file_info : all files info in this task
# [(file0_path, file0_size), (file1_path, file1_size)]
def read_input(split_info, partition_id, partition_num, file_info):
data = ""
filename = ""
start = 0
read_size = 0
# read data from the split_info for this mapper
for file in split_info:
filename = file[0]
start = file[1]
read_size = file[2] - file[1]
data += read_data_from_file(filename, start, read_size)
last_file_path = filename
start = start + read_size
# get the last filename of this mapper in file_info
used_file = 0
for file in file_info:
if file[0] == last_file_path:
break
used_file += 1
if used_file > len(file_info):
raise Exception("can't find the last file in split")
split_delimitter = '\n'
# Remove the first split if mapper_id is not 0
if partition_id != 0:
if len(data.split(split_delimitter)) > 1:
data = data.split(split_delimitter, 1)[1]
else:
data = ""
# Get more split if the mapper is not the last mapper
if partition_id != partition_num - 1:
data += read_data_from_file(file_info[used_file][0], start, file_info[used_file][1] - start) \
.split(split_delimitter, 1)[0]
return data
# read data from file
def read_data_from_file(filename, start, read_size):
f = open(filename)
f.seek(start)
data = f.read(read_size)
try:
f.close()
except:
print "Error: can't close the original data file"
return data
class MyTextReader():
def __init__(self, path, minPartitions=None):
if "/" in path:
data_dir = os.path.dirname(path)
path = os.path.basename(path)
else:
data_dir = os.getcwd()
self.minPartitions = minPartitions
self.split_infos, self.file_info = split_file(data_dir, minPartitions, path)
self.lines = None
def line_iterator(self, partition_id):
self.lines = read_input(self.split_infos[partition_id], partition_id, len(self.split_infos),
self.file_info).split("\n")
for r in iter(self.lines):
yield r
if __name__ == "__main__":
data = MyTextReader("sort.txt", 4)
part0 = data.line_iterator(0)
part1 = data.line_iterator(1)
part2 = data.line_iterator(2)
part3 = data.line_iterator(3)
print("part0:")
for tmp in part0:
print(tmp)
print("part1:")
for tmp in part1:
print(tmp)
print("part2:")
for tmp in part2:
print(tmp)
print("part3:")
for tmp in part3:
print(tmp)
# while True:
# print "running"
# # If you want to get printed text from "ssh localhost python *.py", flush stdout after you print
# # Otherwise, you will get printed text after the entire process is finished
# # http://stackoverflow.com/questions/18322123/receiving-streaming-output-from-ssh-connection-in-python
# sys.stdout.flush()
# gevent.sleep(1)
| true |
69dd72b6eea0106047ffc979c744b4b2f002056d | Python | akhilrajmailbox/YouTube | /vid-cmnt.py | UTF-8 | 6,822 | 2.515625 | 3 | [] | no_license | import os
import google_auth_oauthlib.flow
import googleapiclient.discovery
import googleapiclient.errors
import pprint
import time
import array as arr
from random import randint
import sys, getopt
##################################################################
my_replies = [
"This Message is to Inform you that Video Name has Updated with your Channel name, Pease have a look into the video Name before its expired",
"Thanks For Participate on My Promotion Programs, Your Channel Name is in My Video, Its Awesome, Right ????",
"Wow... Congratzzzz, Your Channel Name promoted on my video... You can watch the video again and comment something to promote again..!",
"Thanks for watching my video, your channel name updated on My video Name",
"Hey dude, your channel has promoted, Please have a look into the video name before its expired...!",
"Happy to inform you that your channel name added in this Video name",
"Awesome...., Your channel name looks stunning on my video Name...",
"Please have a look into the Name and be happy",
"hey, Check this out, My Video updated with your channel name..!",
"Are you happy ?, Your Channel name Promoted, have a look into the video name"
]
my_subs = [
"If you Like this, then support Me by Subscibing My Channel..!",
"Support My Channel Please",
"Please Subscribe and Like This Video If you loves my Work",
"Help Me to Reach Subscribers",
"Do you Like My Work..., Subscribe My Channel..",
"Support me My Friend..!",
"Add Me as a new Friend if you like this work",
"Be My Friend by Subscribe My channel if you like my work",
"Nice Work ???, Support Me by just click on the Subscribe Button",
"Help Plzzz, Subscribe and share this video with your friends"
]
##################################################################
params_validation="\n\npython auto-reply.py -v <ytvid_id> -u <google user>\n google user : choose between 0 and 9\n"
api_service_name = "youtube"
api_version = "v3"
scopes = ["https://www.googleapis.com/auth/youtube.force-ssl"]
##################################################################
def main(argv):
# Disable OAuthlib's HTTPS verification when running locally.
# *DO NOT* leave this option enabled in production.
# os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
ytvid_id = "" # no need to change anything here
google_user = "" # no need to change anything here
try:
opts, args = getopt.getopt(argv,"hv:u:")
except getopt.GetoptError:
print(params_validation)
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print(params_validation)
sys.exit()
elif opt in ("-v"):
ytvid_id = arg
elif opt in ("-u"):
google_user = arg
if ytvid_id and len(ytvid_id) >= 3:
print ("Video ID is ", ytvid_id)
ytvid_id = ytvid_id
else:
print(params_validation)
sys.exit(2)
if google_user and len(google_user) >= 1:
print ("Google User is ", google_user)
else:
print(params_validation)
sys.exit(2)
client_secrets_file = "secrets/" + google_user + "-yt-secret.json"
# Get credentials and create an API client
flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(
client_secrets_file, scopes)
credentials = flow.run_console()
youtube = googleapiclient.discovery.build(
api_service_name, api_version, credentials=credentials)
## Get channel ID
mychannel_request = youtube.channels().list(
part="statistics",
mine=True
)
mychannel_response = mychannel_request.execute()
mychannelid = mychannel_response["items"][0]["id"]
while 1:
randomnum_1 = randint(0,9)
randomnum_2 = randint(0,9)
## Check the non-spam comments
cmnt_request = youtube.commentThreads().list(
part="snippet,replies",
videoId=ytvid_id
)
cmnt_response = cmnt_request.execute()
cmnt_data = cmnt_response["items"][0];
cmnt_commentid = cmnt_data["id"];
cmnt_commentown = cmnt_data["snippet"]["topLevelComment"]["snippet"]["authorDisplayName"]
cmnt_ytchannel = cmnt_commentown.replace(" ", "")
## get views and video name
vidname_request = youtube.videos().list(
part="snippet,contentDetails,statistics",
id=ytvid_id
)
vidname_response = vidname_request.execute()
vidname_data = vidname_response["items"][0];
vidname_snippet = vidname_data["snippet"];
vidname_title = vidname_snippet["title"];
vidname_views = str(vidname_data["statistics"]["viewCount"]);
print("Current video Name : " + vidname_title)
print(vidname_views)
## updating the video name
change = (vidname_views not in vidname_title)
if(change):
vidname_title = "This Video Has " + vidname_views + " Views, Promotion Channel : #" + cmnt_ytchannel;
vidname_snippet["title"] = vidname_title
change_request = youtube.videos().update(
part="snippet",
body={
"id": ytvid_id,
"snippet": vidname_snippet
}
)
change_response = change_request.execute()
## vaidate reply
reply_check = "null"
if "replies" in cmnt_data:
replies_data = cmnt_data["replies"];
for reply in replies_data["comments"]:
reply_check = "null"
reply_own = reply["snippet"]["authorChannelId"]["value"]
# print(reply_own)
contain = (reply_own in mychannelid)
if(contain):
print(mychannelid + " already response to the comment")
reply_check = "found"
break;
else:
print(mychannelid + " going to respond to the latest comment")
else:
print("No one Replied to This Comment yet...!")
## reply to the comment
if reply_check == 'null':
cmnt_reply = youtube.comments().insert(
part="snippet",
body=dict(
snippet=dict(
parentId=cmnt_commentid,
textOriginal=my_replies[randomnum_1] + "\n\n" + my_subs[randomnum_2]
)
)
)
cmnt_response = cmnt_reply.execute()
print("Worked...!");
time.sleep(600)
if __name__ == "__main__":
main(sys.argv[1:]) | true |
35cf2d81cda1c888edd66532758a09250e845e47 | Python | yzxwp/practice | /GUI/tkinte/6.Butten.py | UTF-8 | 973 | 3.015625 | 3 | [] | no_license | # coding:utf-8
from tkinter import *
from tkinter import messagebox
class Application(Frame):
"""一个经典的GUI程序类写法"""
def __init__(self, master=None):
super().__init__(master) # super代表的是父类的定义,而不是父类的对象
self.master = master
self.pack()
self.createWidget()
def createWidget(self):
"""创建组件"""
self.btn01 = Button(root, text='登录', command=self.login,width=16,height=4,anchor=NE )
self.btn01.pack()
global photo
photo = PhotoImage(file=r"../\image/\1.gif ")
self.btn02 = Button(root, image=photo, command=self.login, state=DISABLED)
self.btn02.pack()
def login(self):
messagebox.showinfo('命名系统', '生产一个身份证号码')
if __name__ == "__main__":
root = Tk()
root.geometry("400x450+200+300")
root.title('测试')
app = Application(master=root)
root.mainloop()
| true |
1155091c80219b55e204b7a13960a23bd063a1db | Python | RobertoChapa/groceryList | /7-1_GroceryList.py | UTF-8 | 878 | 3.921875 | 4 | [] | no_license | def main():
groceryList = {}
calc = groceryListCalc()
groceryList = calc.addToList(groceryList, 'chicken', 10.00)
groceryList = calc.addToList(groceryList, 'steak', 35.78)
calc.printList(groceryList)
return
class groceryListCalc:
def addToList(self, groceryList, item, price):
groceryList[item] = price
return groceryList
def printList(self, groceryList):
subTotal = 0
for item, price in groceryList.items():
print(item, ' : ', price)
subTotal += price
# sub total
print()
print('Subtotal: ', subTotal)
print()
# total with tax
total = '{0:.2f}'.format(subTotal + (subTotal * .07))
print('Total + .07% tax: ', total)
return
if __name__ == '__main__':
main()
| true |
cc9fe1bc13e8212271c27e7c5282376321debfae | Python | CarlHwang/Mobile | /FeatureExtraction/ICClickRate.py | UTF-8 | 2,196 | 2.8125 | 3 | [] | no_license | #! /usr/bin/env python
# -*- coding:utf-8 -*-
import csv
def ICClickRate():
itemTable = {}
categoryTable = {}
with open('../csv/user_item_behavior_count.csv', 'rb') as f:
reader = csv.reader(f)
for row in reader:
user_id = row[0]
item_id = row[1]
item_category = row[2]
click = row[3]
if user_id == 'user_id':
continue
click = int(click)
if itemTable.get(item_id):
itemTable[item_id] += click
else:
itemTable[item_id] = click
if categoryTable.get(item_category):
categoryTable[item_category] += click
else:
categoryTable[item_category] = click
# 输出的文件头
outfile = open('../csv/ic_click_rate.csv', 'wb')
spamwriter = csv.writer(outfile, dialect = 'excel')
spamwriter.writerow(['item_id', 'click_rate'])
with open('../csv/user_item_behavior_count.csv', 'rb') as f:
reader = csv.reader(f)
for row in reader:
item_id = row[1]
item_category = row[2]
if item_id == 'item_id':
continue
if categoryTable[item_category] == 0:
spamwriter.writerow([item_id, 0])
else:
spamwriter.writerow([item_id, itemTable[item_id]/float(categoryTable[item_category])])
print 'ClickRate Done!'
'''
#
#
# GET FEATURE
#
#
'''
def GetICClickRate(outputTable):
inputTable = {}
with open('../csv/ic_click_rate.csv', 'rb') as f:
reader = csv.reader(f)
for row in reader:
item_id = row[0]
ClickRate = row[1]
if item_id == 'item_id':
continue
inputTable[item_id] = float(ClickRate)
for key in outputTable.keys():
item_id = key.split()[1]
if not inputTable.get(item_id):
outputTable[key].append(0)
else:
outputTable[key].append(inputTable[item_id])
| true |
b4c9aea801e44c9ee8740687c5aeb21a48400a44 | Python | ganadhish1999/algorithm | /divide and conquer/strassen.py | UTF-8 | 2,045 | 3.3125 | 3 | [] | no_license | def add(X, Y):
result = [[X[i][j] + Y[i][j] for j in range(len(X[0]))] for i in range(len(X))]
return result
def sub(X, Y):
result = [[X[i][j] - Y[i][j] for j in range(len(X[0]))] for i in range(len(X))]
return result
def mul(x, y, n):
if(n==2):
p = (x[0][0]+x[1][1])*(y[0][0]+y[1][1])
q = (x[1][0]+x[1][1])*y[0][0]
r = x[0][0]*(y[0][1] - y[1][1])
s = x[1][1]*(y[1][0]-y[0][0])
t = (x[0][0]+x[0][1])*y[1][1]
u = (x[1][0]-x[0][0])*(y[0][0]+y[0][1])
v = (x[0][1]-x[1][1])*(y[1][0]+y[1][1])
temp =[[0,0], [0,0]]
temp[0][0] = p+s-t+v
temp[0][1] = r+t
temp[1][0] = q+s
temp[1][1] = p+r-q+u
return temp
m = n//2
a1 = [[x[i][j] for j in range(m)] for i in range(m)]
a2 = [[x[i][j] for j in range(m, n)] for i in range(m)]
a3 = [[x[i][j] for j in range(m)] for i in range(m, n)]
a4 = [[x[i][j] for j in range(m, n)] for i in range(m, n)]
b1 = [[y[i][j] for j in range(m)] for i in range(m)]
b2 = [[y[i][j] for j in range(m, n)] for i in range(m)]
b3 = [[y[i][j] for j in range(m)] for i in range(m, n)]
b4 = [[y[i][j] for j in range(m, n)] for i in range(m, n)]
p = mul(add(a1,a4), add(b1, b4), m)
q = mul(add(a3, a4), b1, m)
r = mul(a1, sub(b2, b4), m)
s = mul(a4, sub(b3, b1), m)
t = mul(add(a1,a2), b4, m)
u = mul(sub(a3, a1), add(b1, b2), m)
v = mul(sub(a2, a4), add(b3, b4), m)
c1 = add(sub(p,t), add(s,v))
c2 = add(r, t)
c3 = add(q, s)
c4 = add(sub(p, q), add(r, u))
for i in range(n):
for j in range(n):
if i<m and j<m:
x[i][j] = c1[i][j]
elif i<m and j>=m:
x[i][j] = c2[i][j-m]
elif i>=m and j<m:
x[i][j] = c3[i-m][j]
else:
x[i][j] = c4[i-m][j-m]
return x
a = [[5,2,6,1], [0,6,2, 0], [3, 8, 1, 4], [1, 8, 5, 6]]
b = [[7,5,8,0], [1,8,2, 6], [9, 4, 3, 8], [5, 3, 7, 9]]
print(mul(a, b, 4)) | true |
44e6a8150ed6bee639e7548879b9a8eebf6c7e00 | Python | byjusmx3/PRO_C117_AA1_1-4 | /CreateVideo.py | UTF-8 | 359 | 2.859375 | 3 | [] | no_license | import os
import cv2
path = "Images"
images = []
for file in os.listdir(path):
name, ext = os.path.splitext(file)
if ext in ['.gif', '.png', '.jpg', '.jpeg','.jfif']:
file_name = path+"/"+file
print(file_name)
images.append(file_name)
print(len(images))
count = len(images)
| true |
9b81f05e32ffd9cc1519b745466d44ca9abe6b0a | Python | JerloPH/PyFyre | /core/widgets.py | UTF-8 | 3,963 | 3.140625 | 3 | [
"MIT"
] | permissive | class Widgets:
def __init__(self): pass
def container(self, child="", onClick="", styles=[]):
gathered_style = ""
# Get every styles
for style in styles:
gathered_style += "%s; " % style
onclick = " onclick=\"%s\"" % onClick if not onClick == "" else ""
html_style = " style=\"%s\"" % gathered_style if not gathered_style == "" else ""
child = child if not child == "" else ""
data = """<div%s%s>
%s
</div>""" % (onclick, html_style, child)
return data
def column(self, children=[], styles=[]):
gathered_style = ""
# Get every styles
for style in styles:
gathered_style += "%s; " % style
childrens = ""
for child in children:
childrens += child + "\n"
html_style = " style=\"%s\"" % gathered_style if not gathered_style == "" else ""
childrens = childrens if not childrens == [] else ""
data = """<div %s>
%s
</div>
""" % (html_style, childrens)
return data
def header1(self, text="", onClick="", styles=[]):
gathered_style = ""
# Get every styles
for style in styles:
gathered_style += "%s; " % style
onclick = " onclick=\"%s\"" % onClick if not onClick == "" else ""
html_style = " style=\"%s\"" % gathered_style if not gathered_style == "" else ""
data = "<h1%s%s>%s</h1>" % (onclick, html_style, text)
return data
def button(self, text="", onClick="", styles=[]):
gathered_style = ""
# Get every styles
for style in styles:
gathered_style += "%s; " % style
onClick = " onclick=\"%s\"" % onClick if not onClick == "" else ""
html_style = " style=\"%s\"" % gathered_style if not gathered_style == "" else ""
data = "<button type=\"Button\"%s>%s</button>" % (html_style, text)
return data
def paragraph(self, text="", onClick="", styles=[]):
gathered_style = ""
# Get every styles
for style in styles:
gathered_style += "%s; " % style
onclick = " onclick=\"%s\"" % onClick if not onClick == "" else ""
html_style = " style=\"%s\"" % gathered_style if not gathered_style == "" else ""
data = "<p%s%s>%s</p>\n" % (onclick, html_style, text)
return data
def link(self, link="#", text="", onClick="", styles=[]):
gathered_style = ""
# Get every styles
for style in styles:
gathered_style += "%s; " % style
link = " href=\"%s\"" % link if not link == "" else ""
onclick = " onclick=\"%s\"" % onClick if not onClick == "" else ""
html_style = " style=\"%s\"" % gathered_style if not gathered_style == "" else ""
data = "<a%s%s%s>%s</a>\n" % (link, onclick, html_style, text)
return data
def span(self, text="", onClick="", styles=[]):
gathered_style = ""
# Get every styles
for style in styles:
gathered_style += "%s; " % style
onclick = " onclick=\"%s\"" % onClick if not onClick == "" else ""
html_style = " style=\"%s\"" % gathered_style if not gathered_style == "" else ""
data = "<span%s%s>%s</span>\n" % (onclick, html_style, text)
return data
def listViewBuilder(self, count=0, builder=[], styles=[]):
build_data = ""
gathered_style = ""
build = []
for i in range(count):
build.append(builder[0](i))
# Get every styles
for style in styles:
gathered_style += "%s; " % style
html_style = " style=\"%s\"" % gathered_style
for built in build:
build_data += built + "\n"
data = "<div%s>%s</div>" % (html_style, build_data)
return data | true |
1a89549b34cd5484adba826204ac5b7e58a02d29 | Python | Developing-Studio/ci-Aoi | /cogs/slash.py | UTF-8 | 5,377 | 2.671875 | 3 | [
"MIT"
] | permissive | import os
import discord
from discord.ext import commands
from discord_slash import SlashCommand, cog_ext, SlashContext
from discord_slash.utils import manage_commands
import aoi
from libs.converters import AoiColor
from libs.expressions import evaluate
BASE = "https://discord.com/api/v8"
COMMANDS = {
"color": [
"Show a color",
[
{
"type": 3,
"name": "color",
"description": "The color to show",
"required": True
}
]
],
"slashes": [
"View activated slash commands"
],
"gradient": [
"Show a list of colors",
[
{
"type": 3,
"name": "color1",
"description": "First color",
"required": True
},
{
"type": 3,
"name": "color2",
"description": "Second color",
"required": True
},
{
"type": 4,
"name": "num",
"description": "Number of colors",
"required": True
},
{
"type": 5,
"name": "hls",
"description": "HLS gradient instead of RGB",
"default": False
}
]
]
}
class Slash(commands.Cog, aoi.SlashMixin, aoi.ColorCogMixin):
def __init__(self, bot: aoi.AoiBot):
self.bot = bot
bot.slash = SlashCommand(bot, override_type=True, auto_delete=True, auto_register=True)
self.bot.slash.get_cog_commands(self)
bot.loop.create_task(self.register_commands())
super(Slash, self).__init__()
def cog_unload(self):
self.bot.slash.remove_cog_commands(self)
@property
def description(self) -> str:
return "Slash commands"
@cog_ext.cog_slash(name="slashes", description="Activated slash commands")
async def _slashes(self, ctx: SlashContext):
await ctx.send(send_type=3, hidden=True, content="Enabled slash commands on Aoi:\n"
"`/color [color]`\n"
"`/gradient [color1] [color2] [num] [hls]`\n"
"`/calc [expression]`")
@cog_ext.cog_slash(name="color")
async def _color(self, ctx: SlashContext, color: str):
try:
clr: AoiColor = await AoiColor.convert(ctx, color)
except commands.BadColourArgument:
return await ctx.send(content=f"`{color}` is an invalid color. Aoi supports CSS color names "
f"and colors in the formats `#rgb` and `#rrggbb`")
await ctx.send(send_type=5)
await self.embed(ctx, title=str(clr), image=self._color_buf(clr))
@cog_ext.cog_slash(name="gradient")
async def _gradient(self, ctx: SlashContext, color1: str, color2: str, num: int, hls: bool = False):
try:
color1: AoiColor = await AoiColor.convert(ctx, color1)
except commands.BadColourArgument:
return await ctx.send(content=f"`{color1}` is an invalid color. Aoi supports CSS color names "
f"and colors in the formats `#rgb` and `#rrggbb`")
try:
color2: AoiColor = await AoiColor.convert(ctx, color2)
except commands.BadColourArgument:
return await ctx.send(content=f"`{color2}` is an invalid color. Aoi supports CSS color names "
f"and colors in the formats `#rgb` and `#rrggbb`")
try:
buf, colors = self._gradient_buf(color1, color2, num, hls)
except commands.BadArgument as e:
return await ctx.send(content=str(e))
await ctx.send(send_type=5)
await self.embed(ctx, title="Gradient",
description=" ".join("#" + "".join(hex(x)[2:].rjust(2, "0") for x in c) for c in colors),
image=buf)
@cog_ext.cog_slash(name="calc", description="Calculate an expression", options=[
{
"type": 3,
"name": "expr",
"description": "The expression",
"required": True
}
])
async def _calc(self, ctx: SlashContext, expr: str):
try:
res = await evaluate(expr)
except aoi.CalculationSyntaxError:
await ctx.send(send_type=3, hidden=True, content="Syntax error")
except aoi.DomainError as e:
await ctx.send(send_type=3, hidden=True, content="Domain error for {e}")
except aoi.MathError:
await ctx.send(send_type=3, hidden=True, content="Math error")
else:
await ctx.send(send_type=3, hidden=True, content=f"Expression: {discord.utils.escape_markdown(expr)}\n"
f"Result:\n{res}")
async def register_commands(self):
await self.bot.wait_until_ready()
# await manage_commands.remove_all_commands(self.bot.user.id, os.getenv("TOKEN"), None)
cmds = await manage_commands.get_all_commands(self.bot.user.id, os.getenv("TOKEN"), None)
print(cmds)
def setup(bot: aoi.AoiBot) -> None:
bot.add_cog(Slash(bot))
| true |
e3f77fcabd4f4583dd79ede4567305d0213f244f | Python | eldojk/Workspace | /WS/G4G/Problems/trees/print_nodes_at_distance_k.py | UTF-8 | 1,223 | 3.65625 | 4 | [] | no_license | """
amzn
http://www.geeksforgeeks.org/print-nodes-distance-k-given-node-binary-tree/
"""
from G4G.Problems.bst.vertical_sum import Node
def print_child_nodes_at_dist_k(root, k):
if root is None or k < 0:
return
if k == 0:
print root
return
print_child_nodes_at_dist_k(root.left, k - 1)
print_child_nodes_at_dist_k(root.right, k - 1)
def print_at_dist_k(root, node, k):
if root is None or node is None:
return -1
if root == node:
print_child_nodes_at_dist_k(root, k)
return 1
left_dist = print_at_dist_k(root.left, node, k)
right_dist = print_at_dist_k(root.right, node, k)
if left_dist > 0:
print_child_nodes_at_dist_k(root.right, k - (left_dist + 1))
return left_dist + 1
if right_dist > 0:
print_child_nodes_at_dist_k(root.left, k - (right_dist + 1))
return right_dist + 1
return -1
if __name__ == '__main__':
r = Node(1)
r.left = Node(2)
r.right = Node(3)
r.left.left = Node(4)
r.left.right = Node(5)
r.right.left = Node(6)
r.right.right = Node(7)
r.left.right.right = Node(8)
r.left.right.right.left = Node(9)
print_at_dist_k(r, r.left, 3)
| true |
3a91f3f7140925b51a23c726d7b35c64cacc817d | Python | bboynak/BitcoinUserData | /src/main.py | UTF-8 | 4,545 | 2.84375 | 3 | [] | no_license |
from pyspark import SparkContext
from pyspark.sql import SparkSession
from pyspark.sql import *
from pyspark.sql.types import StructType
import argparse
import findspark
from pyspark.sql.types import *
import logging
import logging.handlers
import sys
import shutil
import os
#Setting up logging
logger = logging.getLogger('log_scope')
log_formatter = logging.Formatter("%(asctime)s - [%(levelname)s]: %(message)s")
rotating_file_handler = logging.handlers.RotatingFileHandler("logs\\log.txt",
maxBytes=1024*1024,
backupCount=2)
console_handler = logging.StreamHandler(sys.stdout) #Also log to console window
rotating_file_handler.setFormatter(log_formatter)
console_handler.setFormatter(log_formatter)
logger.addHandler(rotating_file_handler)
logger.addHandler(console_handler)
def rename_column(data, col_name, new_col_name):
"""
Renaming a column in a data frame.
:param data: data frame
:type data: spark.sql.dataframe.DataFrame
:param col_name: column name to be renamed
:type col_name: str
:param new_col_name: new column name
:type new_col_name: str
:raise: ValueError if col_name is not in data
:return: data frame with the renamed column
:rtype: spark.sql.dataframe.DataFrame
"""
if not col_name in data.columns:
logging.getLogger('log_scope').error(f"No column named '{col_name}'.")
raise ValueError(f"No column named '{col_name}'.")
return data.withColumnRenamed(col_name, new_col_name)
def filter_country(data, country_names, country_column_name='country'):
"""
Filters the data frame by specified county names.
:param data: data frame to be filtered
:type data: pyspark.sql.dataframe.DataFrame
:param country_names: country names to filter
:type country_names: list[str]
:param country_column_name: name of column in data which contains the countries
:type country_names: str, default='country'
:raise: ValueError if country_column_name is not in data
:return: filtered data frame
:rtype: pyspark.sql.dataframe.DataFrame
"""
if not country_column_name in data.columns:
logging.getLogger('log_scope').error(f"The value '{country_column_name}' for country_column_name does not exist in the data.")
raise ValueError(f"The value '{country_column_name}' for country_column_name does not exist in the data.")
return data[data[country_column_name].isin(country_names)]
if __name__ == '__main__':
# # Parse command line arguments
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--path1', type=str, required=True)
arg_parser.add_argument('--path2', type=str, required=True)
arg_parser.add_argument('--country', type=str, required=True, nargs='+')
arg_parser.add_argument('--spark_path', type=str, default='C:\\Spark\\spark')
args = arg_parser.parse_args()
# Create Spark session
findspark.init(args.spark_path)
sc = SparkContext("local", "pyspark")
spark = SparkSession.builder.getOrCreate()
#Load datasets
df_client = spark.read.option("header",True).csv(args.path1)
df_financial = spark.read.option("header",True).csv(args.path2)
# Organizing the data frames
## Drop personal client information
df_client = df_client.drop('first_name')
df_client = df_client.drop('last_name')
df_financial = df_financial.drop('cc_n')
#Rename columns in both dataframes
try:
df_client = rename_column(df_client, 'id', 'client_identifier')
df_financial = rename_column(df_financial, 'id', 'client_identifier')
df_financial = rename_column(df_financial, 'btc_a', 'bitcoin_address')
df_financial = rename_column(df_financial, 'cc_t', 'credit_card_type')
#Error, column does not exist
except ValueError as ve:
#Log the error and end the program
logger.error('Failed to rename one a columns. Program ending early, no output will be saved.', ve.message)
sys.exit(1)
#Join dataframes on 'id' / 'client_identifier'
df = df_client.join(df_financial, 'client_identifier')
#Filter
df_filtered = filter_country(df, args.country)
df_filtered.show()
#Clear previous saved outputs
output_directory = 'client_data'
if os.path.exists(output_directory):
shutil.rmtree(output_directory)
#Save the data
df_filtered.write.option('header', True).csv(path=output_directory)
logger.info('Output successfully saved to client_data')
| true |
fbad496b75ef914280876a24094d71f2028c6a66 | Python | maqnius/wheretogo | /wheretogo/api.py | UTF-8 | 5,100 | 3.015625 | 3 | [] | no_license | """
This module contains classes that allow to get free events from an api during a time period.
"""
import logging
import os
import datetime
import requests
from typing import Tuple, Callable, Union, List, Any
from abc import ABC, abstractmethod
from dateutil.parser import parse
from .cache import Cache, itemType
from .utils import datesType, eventType
logger = logging.getLogger(__name__)
filterType = Union[Callable, List[Callable]]
datetimeType = datetime.datetime
class Api(ABC):
"""
Abstract class for getting events during in a specific date range
from *some external source* (cached) and filtering them.
.. important::
All implementation of these must overwrite the :meth:`._request_get_events`
method which does the actual data fetch from the external source.
"""
def __init__(self, cache: Cache = None):
self._cache = cache
def get_events(
self,
date_range: Tuple[datesType, datesType],
date_filter: filterType = None,
*args,
**kwargs
) -> List[eventType]:
"""
Return list of Events during a period using cached requests
:param date_range: Only events after this date
:param date_filter: List of filter functions or a single filter function that are applied on the query result
:return: Events
"""
date_filter = date_filter or []
if type(date_filter) is not list:
date_filter = [date_filter]
start_date, end_date = date_range
if type(start_date) == str:
start_date = parse(start_date)
if type(end_date) == str:
end_date = parse(end_date)
key = self._generate_cache_key(start_date, end_date, *args, **kwargs)
if self._cache is None:
logger.debug("Making a request because no cache is set")
events = self._request_get_events(start_date, end_date, *args, **kwargs)
else:
try:
events = self._cache[key]
logger.debug("Got events from cache")
except KeyError:
logger.debug(
"Making a request because cache does not hold request data"
)
events = self._request_get_events(start_date, end_date, *args, **kwargs)
self._cache[key] = events
return self._apply_filter(events, date_filter, *args, **kwargs)
@staticmethod
def _apply_filter(
events: List[eventType], date_filter: filterType, *args, **kwargs
) -> List[eventType]:
for f in date_filter:
events = f(events, *args, **kwargs)
return events
@staticmethod
def _generate_cache_key(
start_date: datetimeType, end_date: datetimeType, *args, **kwargs
) -> itemType:
params = ["{}={}".format(name, value) for name, value in kwargs.items()]
return (start_date.isoformat(), end_date.isoformat(), *args, *params)
@abstractmethod
def _request_get_events(
self, start_date: datetimeType, end_date: datetimeType, *args, **kwargs
) -> List[eventType]:
"""
This method needs to filled with live by all implementations of this Api.
It gets called whenever a list of events in a time-period defined by
start_date and end_date is not found in the cache.
What it returns depends on how you want to use the data later and is
for example restricted by the filter functions (see :mod:`wheretogo.datefilter`that might be applied
on them.)
.. important::
It gets called with all the parameters that the public function :meth:`.get_events` got called.
:param start_date: All Events must happen past this date
:param end_date: All Events must happen before this date
:return:
"""
class TicketmasterApi(Api):
"""
This accesses the ticketmaster.com api to fetch events
"""
base_url = "https://app.ticketmaster.com/discovery/v2/"
def __init__(self, api_key: str, cache: Cache = None):
super().__init__(cache)
self.api_key = api_key
def _request_get_events(
self, start_date: datetimeType, end_date: datetimeType, *args, **kwargs
):
"""
See :meth:`.api._requests_get_events`.
TODO: Result is paginated. Fetch information of all pages.
"""
params = kwargs
params["apikey"] = self.api_key
params["startDateTime"] = start_date.isoformat(timespec="seconds")
params["endDateTime"] = end_date.isoformat(timespec="seconds")
r = requests.get(os.path.join(self.base_url, "events.json"), params=params)
r.raise_for_status()
r = r.json()
return self._extract_names(r)
@staticmethod
def _extract_names(res: dict) -> list:
"""
Extracts event names from the json body of a Ticketmaster Api response
:return: List of events
"""
try:
return res["_embedded"]["events"]
except KeyError:
return []
| true |