blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
afd0e272f53f664ee3b9139fa436e06067281f3b | fe8da1d3efa5bcc5b9833bd1358275fb517f1060 | /facenet-pytorch/models/tensorflow2pytorch.py | 359e6d245ca30b8a48d0f5f7db120f22cd4d1dd8 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | permissive | juliagong/sketch2face | d57a95a4eea9bcdafff88f801cee7b0bb740049e | 40b7f1ee129dc0ff14c4d3a4e3479a7ee5439296 | refs/heads/master | 2023-05-27T08:22:12.110124 | 2020-06-30T02:51:15 | 2020-06-30T02:51:15 | 192,847,872 | 13 | 2 | NOASSERTION | 2023-05-22T22:16:07 | 2019-06-20T04:22:22 | Jupyter Notebook | UTF-8 | Python | false | false | 11,619 | py | from dependencies.facenet.src import facenet
from dependencies.facenet.src.models import inception_resnet_v1 as tf_mdl
import tensorflow as tf
import torch
import json
import os
from models.inception_resnet_v1 import InceptionResNetV1
def import_tf_params(tf_mdl_dir, sess):
"""Import tensorflow model from save directory.
Arguments:
tf_mdl_dir {str} -- Location of protobuf, checkpoint, meta files.
sess {tensorflow.Session} -- Tensorflow session object.
Returns:
(list, list, list) -- Tuple of lists containing the layer names,
parameter arrays as numpy ndarrays, parameter shapes.
"""
print('\nLoading tensorflow model\n')
facenet.load_model(tf_mdl_dir)
print('\nGetting model weights\n')
tf_layers = tf.trainable_variables()
tf_params = sess.run(tf_layers)
tf_shapes = [p.shape for p in tf_params]
tf_layers = [l.name for l in tf_layers]
with open(os.path.join(tf_mdl_dir, 'layer_description.json'), 'w') as f:
json.dump({l: s for l, s in zip(tf_layers, tf_shapes)}, f)
return tf_layers, tf_params, tf_shapes
def get_layer_indices(layer_lookup, tf_layers):
"""Giving a lookup of model layer attribute names and tensorflow variable names,
find matching parameters.
Arguments:
layer_lookup {dict} -- Dictionary mapping pytorch attribute names to (partial)
tensorflow variable names. Expects dict of the form {'attr': ['tf_name', ...]}
where the '...'s are ignored.
tf_layers {list} -- List of tensorflow variable names.
Returns:
list -- The input dictionary with the list of matching inds appended to each item.
"""
layer_inds = {}
for name, value in layer_lookup.items():
layer_inds[name] = value + [[i for i, n in enumerate(tf_layers) if value[0] in n]]
return layer_inds
def load_tf_batchNorm(weights, layer):
"""Load tensorflow weights into nn.BatchNorm object.
Arguments:
weights {list} -- Tensorflow parameters.
layer {torch.nn.Module} -- nn.BatchNorm.
"""
layer.bias.data = torch.tensor(weights[0]).view(layer.bias.data.shape)
layer.weight.data = torch.ones_like(layer.weight.data)
layer.running_mean = torch.tensor(weights[1]).view(layer.running_mean.shape)
layer.running_var = torch.tensor(weights[2]).view(layer.running_var.shape)
def load_tf_conv2d(weights, layer):
"""Load tensorflow weights into nn.Conv2d object.
Arguments:
weights {list} -- Tensorflow parameters.
layer {torch.nn.Module} -- nn.Conv2d.
"""
if isinstance(weights, list):
if len(weights) == 2:
layer.bias.data = (
torch.tensor(weights[1])
.view(layer.bias.data.shape)
)
weights = weights[0]
layer.weight.data = (
torch.tensor(weights)
.permute(3, 2, 0, 1)
.view(layer.weight.data.shape)
)
def load_tf_basicConv2d(weights, layer):
"""Load tensorflow weights into grouped Conv2d+BatchNorm object.
Arguments:
weights {list} -- Tensorflow parameters.
layer {torch.nn.Module} -- Object containing Conv2d+BatchNorm.
"""
load_tf_conv2d(weights[0], layer.conv)
load_tf_batchNorm(weights[1:], layer.bn)
def load_tf_linear(weights, layer):
"""Load tensorflow weights into nn.Linear object.
Arguments:
weights {list} -- Tensorflow parameters.
layer {torch.nn.Module} -- nn.Linear.
"""
if isinstance(weights, list):
if len(weights) == 2:
layer.bias.data = (
torch.tensor(weights[1])
.view(layer.bias.data.shape)
)
weights = weights[0]
layer.weight.data = (
torch.tensor(weights)
.permute(1, 0)
.view(layer.weight.data.shape)
)
# High-level parameter-loading functions:
def load_tf_block35(weights, layer):
load_tf_basicConv2d(weights[:4], layer.branch0)
load_tf_basicConv2d(weights[4:8], layer.branch1[0])
load_tf_basicConv2d(weights[8:12], layer.branch1[1])
load_tf_basicConv2d(weights[12:16], layer.branch2[0])
load_tf_basicConv2d(weights[16:20], layer.branch2[1])
load_tf_basicConv2d(weights[20:24], layer.branch2[2])
load_tf_conv2d(weights[24:26], layer.conv2d)
def load_tf_block17_8(weights, layer):
load_tf_basicConv2d(weights[:4], layer.branch0)
load_tf_basicConv2d(weights[4:8], layer.branch1[0])
load_tf_basicConv2d(weights[8:12], layer.branch1[1])
load_tf_basicConv2d(weights[12:16], layer.branch1[2])
load_tf_conv2d(weights[16:18], layer.conv2d)
def load_tf_mixed6a(weights, layer):
if len(weights) != 16:
raise ValueError(f'Number of weight arrays ({len(weights)}) not equal to 16')
load_tf_basicConv2d(weights[:4], layer.branch0)
load_tf_basicConv2d(weights[4:8], layer.branch1[0])
load_tf_basicConv2d(weights[8:12], layer.branch1[1])
load_tf_basicConv2d(weights[12:16], layer.branch1[2])
def load_tf_mixed7a(weights, layer):
if len(weights) != 28:
raise ValueError(f'Number of weight arrays ({len(weights)}) not equal to 28')
load_tf_basicConv2d(weights[:4], layer.branch0[0])
load_tf_basicConv2d(weights[4:8], layer.branch0[1])
load_tf_basicConv2d(weights[8:12], layer.branch1[0])
load_tf_basicConv2d(weights[12:16], layer.branch1[1])
load_tf_basicConv2d(weights[16:20], layer.branch2[0])
load_tf_basicConv2d(weights[20:24], layer.branch2[1])
load_tf_basicConv2d(weights[24:28], layer.branch2[2])
def load_tf_repeats(weights, layer, rptlen, subfun):
if len(weights) % rptlen != 0:
raise ValueError(f'Number of weight arrays ({len(weights)}) not divisible by {rptlen}')
weights_split = [weights[i:i+rptlen] for i in range(0, len(weights), rptlen)]
for i, w in enumerate(weights_split):
subfun(w, getattr(layer, str(i)))
def load_tf_repeat_1(weights, layer):
load_tf_repeats(weights, layer, 26, load_tf_block35)
def load_tf_repeat_2(weights, layer):
load_tf_repeats(weights, layer, 18, load_tf_block17_8)
def load_tf_repeat_3(weights, layer):
load_tf_repeats(weights, layer, 18, load_tf_block17_8)
def test_loaded_params(mdl, tf_params, tf_layers):
"""Check each parameter in a pytorch model for an equivalent parameter
in a list of tensorflow variables.
Arguments:
mdl {torch.nn.Module} -- Pytorch model.
tf_params {list} -- List of ndarrays representing tensorflow variables.
tf_layers {list} -- Corresponding list of tensorflow variable names.
"""
tf_means = torch.stack([torch.tensor(p).mean() for p in tf_params])
for name, param in mdl.named_parameters():
pt_mean = param.data.mean()
matching_inds = ((tf_means - pt_mean).abs() < 1e-8).nonzero()
print(f'{name} equivalent to {[tf_layers[i] for i in matching_inds]}')
def compare_model_outputs(pt_mdl, sess, test_data):
"""Given some testing data, compare the output of pytorch and tensorflow models.
Arguments:
pt_mdl {torch.nn.Module} -- Pytorch model.
sess {tensorflow.Session} -- Tensorflow session object.
test_data {torch.Tensor} -- Pytorch tensor.
"""
print('\nPassing test data through TF model\n')
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
feed_dict = {images_placeholder: test_data.numpy(), phase_train_placeholder: False}
tf_output = torch.tensor(sess.run(embeddings, feed_dict=feed_dict))
print(tf_output)
print('\nPassing test data through PT model\n')
pt_output = pt_mdl(test_data.permute(0, 3, 1, 2))
print(pt_output)
distance = (tf_output - pt_output).norm()
print(f'\nDistance {distance}\n')
def load_tf_model_weights(mdl, layer_lookup, tf_mdl_dir):
"""Load tensorflow parameters into a pytorch model.
Arguments:
mdl {torch.nn.Module} -- Pytorch model.
layer_lookup {[type]} -- Dictionary mapping pytorch attribute names to (partial)
tensorflow variable names, and a function suitable for loading weights.
Expects dict of the form {'attr': ['tf_name', function]}.
tf_mdl_dir {str} -- Location of protobuf, checkpoint, meta files.
"""
tf.reset_default_graph()
with tf.Session() as sess:
tf_layers, tf_params, tf_shapes = import_tf_params(tf_mdl_dir, sess)
layer_info = get_layer_indices(layer_lookup, tf_layers)
for layer_name, info in layer_info.items():
print(f'Loading {info[0]}/* into {layer_name}')
weights = [tf_params[i] for i in info[2]]
layer = getattr(mdl, layer_name)
info[1](weights, layer)
test_loaded_params(mdl, tf_params, tf_layers)
compare_model_outputs(mdl, sess, torch.randn(5, 160, 160, 3).detach())
def tensorflow2pytorch():
layer_lookup = {
'conv2d_1a': ['InceptionResnetV1/Conv2d_1a_3x3', load_tf_basicConv2d],
'conv2d_2a': ['InceptionResnetV1/Conv2d_2a_3x3', load_tf_basicConv2d],
'conv2d_2b': ['InceptionResnetV1/Conv2d_2b_3x3', load_tf_basicConv2d],
'conv2d_3b': ['InceptionResnetV1/Conv2d_3b_1x1', load_tf_basicConv2d],
'conv2d_4a': ['InceptionResnetV1/Conv2d_4a_3x3', load_tf_basicConv2d],
'conv2d_4b': ['InceptionResnetV1/Conv2d_4b_3x3', load_tf_basicConv2d],
'repeat_1': ['InceptionResnetV1/Repeat/block35', load_tf_repeat_1],
'mixed_6a': ['InceptionResnetV1/Mixed_6a', load_tf_mixed6a],
'repeat_2': ['InceptionResnetV1/Repeat_1/block17', load_tf_repeat_2],
'mixed_7a': ['InceptionResnetV1/Mixed_7a', load_tf_mixed7a],
'repeat_3': ['InceptionResnetV1/Repeat_2/block8', load_tf_repeat_3],
'block8': ['InceptionResnetV1/Block8', load_tf_block17_8],
'last_linear': ['InceptionResnetV1/Bottleneck/weights', load_tf_linear],
'last_bn': ['InceptionResnetV1/Bottleneck/BatchNorm', load_tf_batchNorm],
'logits': ['Logits', load_tf_linear],
}
print('\nLoad VGGFace2-trained weights and save\n')
mdl = InceptionResNetV1(num_classes=8631).eval()
tf_mdl_dir = 'data/20180402-114759'
data_name = 'vggface2'
load_tf_model_weights(mdl, layer_lookup, tf_mdl_dir)
state_dict = mdl.state_dict()
torch.save(state_dict, f'{tf_mdl_dir}-{data_name}.pt')
torch.save(
{
'logits.weight': state_dict['logits.weight'],
'logits.bias': state_dict['logits.bias'],
},
f'{tf_mdl_dir}-{data_name}-logits.pt'
)
state_dict.pop('logits.weight')
state_dict.pop('logits.bias')
torch.save(state_dict, f'{tf_mdl_dir}-{data_name}-features.pt')
print('\nLoad CASIA-Webface-trained weights and save\n')
mdl = InceptionResNetV1(num_classes=10575).eval()
tf_mdl_dir = 'data/20180408-102900'
data_name = 'casia-webface'
load_tf_model_weights(mdl, layer_lookup, tf_mdl_dir)
state_dict = mdl.state_dict()
torch.save(state_dict, f'{tf_mdl_dir}-{data_name}.pt')
torch.save(
{
'logits.weight': state_dict['logits.weight'],
'logits.bias': state_dict['logits.bias'],
},
f'{tf_mdl_dir}-{data_name}-logits.pt'
)
state_dict.pop('logits.weight')
state_dict.pop('logits.bias')
torch.save(state_dict, f'{tf_mdl_dir}-{data_name}-features.pt')
| [
"juliaxgong@gmail.com"
] | juliaxgong@gmail.com |
6e8f312ce8d26da7d371c9bd295ee0598f010704 | 5cc1296f10af0d65691fd01a23221d6d85f4deff | /cotizacion/migrations/0009_auto_20150805_1400.py | f213b7ccb1a13cf363c1195baf3b10f04e54fea3 | [] | no_license | yusnelvy/mtvmcotizacion | e52b58fe8c50d3921d36490084de328c52e4e9ea | 07d2bd5f36350b149c16a0aa514bb610b0cd3e18 | refs/heads/master | 2016-09-05T23:31:15.800940 | 2015-11-07T13:12:30 | 2015-11-07T13:12:30 | 35,440,629 | 0 | 0 | null | 2015-12-18T16:16:23 | 2015-05-11T18:01:47 | JavaScript | UTF-8 | Python | false | false | 1,033 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cotizacion', '0008_auto_20150727_1207'),
]
operations = [
migrations.AlterField(
model_name='tiempo_carga',
name='peso_max',
field=models.DecimalField(blank=True, default=0.0, max_digits=8, decimal_places=3),
),
migrations.AlterField(
model_name='tiempo_carga',
name='peso_min',
field=models.DecimalField(blank=True, default=0.0, max_digits=8, decimal_places=3),
),
migrations.AlterField(
model_name='vehiculo',
name='capacidad_peso',
field=models.DecimalField(max_digits=8, decimal_places=3),
),
migrations.AlterField(
model_name='vehiculo',
name='capacidad_volumen',
field=models.DecimalField(max_digits=8, decimal_places=3),
),
]
| [
"yusnelvy@gmail.com"
] | yusnelvy@gmail.com |
ca2ae996f994c2921230d0d35c9f69c38a27eabf | 877c8c11143be609c3105d6f29a18f992cca5cfc | /0064_20190415_204400.py | 096608f9dc3ad2ee5fa514fd243a57b9cbb9e5c6 | [] | no_license | IvanaGyro/LeetCode-Answer | 8a0b0c56d7d751002f430f676280f6ee9dba1939 | aab9b4e6fda821763ce54e4f580a39c622971349 | refs/heads/master | 2020-05-01T17:53:15.651545 | 2020-04-18T11:49:20 | 2020-04-18T11:49:20 | 177,611,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | class Solution:
def minPathSum(self, grid: List[List[int]]) -> int:
if not grid or not grid[0]:
return 0
if len(grid) > 1:
for i in range(1, len(grid)):
grid[i][0] += grid[i - 1][0]
if len(grid[0]) > 1:
for j in range(1, len(grid[0])):
grid[0][j] += grid[0][j - 1]
if len(grid) > 1 and len(grid[0]) > 1:
for i in range(1, len(grid)):
for j in range(1, len(grid[0])):
grid[i][j] += min(grid[i-1][j], grid[i][j-1])
return grid[-1][-1] | [
"iven00000000@gmail.com"
] | iven00000000@gmail.com |
0f606882ce06c84c1d476970b5e536632bad59e4 | b5aea44506bd21fcd7b259abb372d93cca5837a4 | /income_classification.py | 167d494b05429e52f77aec7108da545266c93fff | [] | no_license | programmingknowledege/MachineLearning | b5135c996c6c103852023a6a33f01d5d930baa17 | 37c523a3dd107fa8a22ef268a9ee9f64c90afaca | refs/heads/master | 2022-11-10T08:48:15.519752 | 2020-06-24T17:23:05 | 2020-06-24T17:23:05 | 274,725,891 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | import pandas as pd
from sklearn.preprocessing import LabelEncoder
data=pd.read_csv("C:\\Users\\kunal\\Downloads\\income-classification\\income_evaluation.csv")
print(data.columns)
le_income=LabelEncoder()
data[" income"]=le_income.fit_transform(data[" income"])
print(data[" income"].unique()) | [
"bafnakunal1998@gmail.com"
] | bafnakunal1998@gmail.com |
1591371310314083e2b8c2848fe393223b425cc9 | b50d4539a08d18839f7260dd5283eced8a40f932 | /project/urls.py | 29be03a36c2098adf0b01931dddd988f236ccb3f | [] | no_license | walid-brahim/django-class | 6b8739b1c370489182e2662d8f06e5c750ab65e5 | 80ad2264c09adb3fec287806be7b79a109abe9e2 | refs/heads/master | 2022-11-17T05:51:00.875416 | 2020-07-15T11:43:41 | 2020-07-15T11:43:41 | 279,705,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | """project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path , include
urlpatterns = [
path('admin/', admin.site.urls),
path('blog/', include('post.urls')),
]
| [
"wbrahim1899@gmail.com"
] | wbrahim1899@gmail.com |
d510a984109e30d272424766c0f4ceedc20d77e2 | ec5c35ac5163c4e81262a81a6a6c46667c01733d | /server/api.py | dfdfa338713c8c53b8fe3fb180871a407ed32b13 | [] | no_license | kotawiw/bytedance-exercise-2 | 27b32d81aa7e8040c1c8448acbe9c4ff20ff5b26 | 8db190487a6490ec852d8418d93ba62251a5437f | refs/heads/master | 2022-12-24T00:04:53.047395 | 2020-09-23T11:48:13 | 2020-09-23T11:48:13 | 297,948,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,826 | py | from flask import Blueprint, request, abort, jsonify
from flask import g
from server.auth import login_required
from server.models.users import User
from server.models.events import Event
from server.models.events import EventRegistration
bp = Blueprint("api", __name__, url_prefix="/api")
@bp.route("/events", methods=("GET",))
def query_events():
offset = request.args.get("offset", default=0, type=int)
limit = request.args.get("limit", default=10, type=int)
total_count, events = Event.query_events(offset=offset, limit=limit)
return jsonify(
totalCount=total_count,
values=[event_output(e) for e in events])
@bp.route("/events", methods=("POST",))
@login_required
def create_event():
user = g.user
event = Event.create(
user, request.json
)
return event_output(event)
@bp.route("/event/<string:event_id>", methods=("GET",))
def get_event(event_id):
event = Event.by_identifier(event_id)
if not event:
return abort(404, 'Event not found')
return event_output(event)
@bp.route("/event/<string:event_id>/registrations", methods=("GET",))
def get_registrations(event_id):
event = Event.by_identifier(event_id)
if not event:
return abort(404, 'Event not found')
registrations = EventRegistration.by_event(event)
return jsonify([registration_output(r) for r in registrations])
@bp.route("/event/<string:event_id>/registrations", methods=("PUT",))
def register_event(event_id):
event = Event.by_identifier(event_id)
if not event:
return abort(404, 'Event not found')
user = g.user
if not user:
return abort(401, 'Please login to register for an event')
register = EventRegistration.register(event, user)
return registration_output(register)
@bp.route("/event/<string:event_id>/registrations", methods=("DELETE",))
def unregister_event(event_id):
user = g.user
if not user:
return abort(401, 'Please login to unregister for an event')
event = Event.by_identifier(event_id)
if not event:
return abort(404, 'Event not found')
register = EventRegistration.by_event_user(event, user)
if not register:
return abort(404, 'Event registration not found')
EventRegistration.unregister(register)
return registration_output(register)
def event_output(event: Event):
return dict(
id=event.identifier,
name=event.name,
location=event.location,
description=event.description,
startTimestamp=event.start_timestamp,
endTimestamp=event.end_timestamp)
def registration_output(registration: EventRegistration):
# Todo: De-normalize registration info to include user email
user = User.query.get(registration.user_id)
return dict(
email=user.email
)
| [
"you@example.com"
] | you@example.com |
fa46caa1f1f3f3becac7ffa22defea3a911bab75 | ade138f110f2a311a6d2b425be33b5691fe8bd11 | /src/regex/patterns.py | f07bf814760ef45cc07ba4723857880fd075dde6 | [] | no_license | colinbazzano/learning-python | 68f6c7435691834b7c6e2dd6ac43872b76c94aee | 118c51d8b4a16c571584457bbadf97041c23953d | refs/heads/master | 2022-12-10T12:59:52.122819 | 2020-03-22T17:26:29 | 2020-03-22T17:26:29 | 247,514,583 | 1 | 0 | null | 2022-12-08T03:51:47 | 2020-03-15T17:21:34 | Python | UTF-8 | Python | false | false | 543 | py | """patterns
patterns are a powerful way to use Regex
to learn more
regex101.com
to practice
https://regexone.com/
email validation r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)"
"""
import re
pattern = re.compile(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)")
string = 'fake_email@email.org'
pattern2 = re.compile(r"[a-zA-Z0-9$%#@]{8,}")
password = 'difaj$wEDJO%sjdi'
a = pattern.search(string)
print(a)
check = pattern2.fullmatch(password)
print(check)
# At least 8 char long
# contain any sort of letters, numbers, $%#@
| [
"colinbazzano@Colins-MacBook-Pro.local"
] | colinbazzano@Colins-MacBook-Pro.local |
b9950dd4f6bb688de78a9a92c88f0ae70755ed6e | 8f6a9ff4c63fd24d145088077d5da1c3e4caaa3a | /programming trade/easyhistory - download 备份修改/easyhistroy/history.py | ea5e4c058c768ff89e5b70d20e111adb96f0d2fc | [] | no_license | liaofuwei/pythoncoding | 6fd2afba0d27c4a4bbb4b2d321b3fa402a60d6fe | 966bd99459be933cf48287412a40e0c7a3d0b8e5 | refs/heads/master | 2021-07-15T10:34:57.701528 | 2017-10-10T05:27:13 | 2017-10-10T05:27:13 | 107,651,470 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,621 | py | # coding:utf-8
import os
import pandas as pd
import talib
class Indicator(object):
def __init__(self, stock_code, history):
self.stock_code = stock_code
self.history = history
def load_csv_files(self, path):
file_list = [f for f in os.listdir(path) if f.endswith('.csv')]
for stock_csv in file_list:
csv_ext_index_start = -4
stock_code = stock_csv[:csv_ext_index_start]
self.market[stock_code] = pd.read_csv(stock_csv, index_col='date')
def __getattr__(self, item):
def talib_func(*args, **kwargs):
str_args = ''.join(map(str, args))
if self.history.get(item + str_args) is not None:
return self.history
func = getattr(talib, item)
res_arr = func(self.history['close'].values, *args, **kwargs)
self.history[item + str_args] = res_arr
return self.history
return talib_func
class History(object):
def __init__(self, dtype='D', path='history'):
self.market = dict()
data_path = os.path.join(path, 'day', 'data')
self.load_csv_files(data_path)
def load_csv_files(self, path):
file_list = [f for f in os.listdir(path) if f.endswith('.csv')]
for stock_csv in file_list:
csv_ext_index_start = -4
stock_code = stock_csv[:csv_ext_index_start]
csv_path = os.path.join(path, stock_csv)
self.market[stock_code] = Indicator(stock_code, pd.read_csv(csv_path, index_col='date'))
def __getitem__(self, item):
return self.market[item]
| [
"459193023@qq.com"
] | 459193023@qq.com |
f0c5be2e54cfd6b4c05e9c5ed5ce3fd666c97b30 | ce2feffb3b8c3433eefb596aacbb9b73ff8bb3bb | /Desktop/TAGTOSHOP/Chatbot/qrcode-bot/fb_chatbot/models.py | e932d9c0e6e0c6d56aa0984ccaa6a386132d3f42 | [] | no_license | RamonRR93/MicrocontrollerPrisonProject | a6fe0fa8de6b05a490514bec3857918639afe138 | 3f7e65e29440744fa37e178737e18edc157c7f70 | refs/heads/master | 2020-12-24T09:56:22.248517 | 2017-09-29T14:50:38 | 2017-09-29T14:50:38 | 73,256,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,074 | py | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class FacebookUser(models.Model):
fb_id = models.IntegerField(primary_key=True)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
profile_pic = models.CharField(max_length=255)
locale = models.CharField(max_length=255)
gender = models.CharField(max_length=255)
timezone = models.CharField(max_length=255)
has_fb_data = models.BooleanField(default=False)
def __str__(self): # __unicode__ on Python 2
return self.first_name
class QRCode(models.Model):
style = models.CharField(max_length=255, default="Text")
url = models.CharField(max_length=255)
data = models.CharField(max_length=255)
def __str__(self): # __unicode__ on Python 2
return self.name
class Scan(models.Model):
fbuser = models.ForeignKey(FacebookUser, db_index=False)
qrcode = models.ForeignKey(QRCode, db_index=False)
date = models.DateTimeField(auto_now=True)
| [
"rr3088@columbia.edu"
] | rr3088@columbia.edu |
77e0b14f23eeea5597c04077de558897a24bc4d1 | d62ab6cb9243e790ac9537274f5bfa424b154d45 | /04-Flappy_bird_single/Brid_DQN.py | 7c1eec3fe592b3fdb4acf78eb6e18efab6b481d3 | [] | no_license | HeatedMajin/RL_experiment_note | cd06916b296f8ffcce96ebfe8250d14cf81ecf0b | 445a76c9c85895fe5bcda83cd0d37d7cbfd0107f | refs/heads/master | 2022-11-29T23:56:45.188342 | 2020-08-10T12:52:56 | 2020-08-10T12:52:56 | 278,829,649 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,562 | py | from collections import deque
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Bird_DQN(nn.Module):
''''
Deep Q network for flapping bird:
Given image , the network will output what the action should be taken
'''
empty_frame = np.zeros((72, 128), dtype=np.float32)
empty_state = np.stack((empty_frame, empty_frame, empty_frame), axis=0)
def __init__(self, epsilon, mem_size):
super(Bird_DQN, self).__init__()
self.epsilon = epsilon
self.actions_num = 2
self.buildDQN()
self.trainable = None
self.mem_size = mem_size
self.replay_mem = deque()
self.time_step = 0
def buildDQN(self):
self.map_size = (32, 16, 9)
self.conv1 = nn.Conv2d(3, 16, kernel_size=8, stride=4, padding=2).to(device)
self.relu1 = nn.LeakyReLU(inplace=True).to(device)
self.conv2 = nn.Conv2d(16, 32, kernel_size=4, stride=2, padding=1).to(device)
self.relu2 = nn.LeakyReLU(inplace=True).to(device)
self.fc1 = nn.Linear(self.map_size[0] * self.map_size[1] * self.map_size[2], 128).to(device)
self.relu3 = nn.LeakyReLU(inplace=True).to(device)
self.fc2 = nn.Linear(128, self.actions_num).to(device)
def set_trainable(self, trainable):
self.trainable = trainable
def set_initial_state(self, obs=None):
"""
Set initial state
state: initial state. if None, use `BrainDQN.empty_state`
"""
if obs is None:
self.current_state = Bird_DQN.empty_state
else:
self.current_state = np.append(Bird_DQN.empty_state[1:, :, :], obs.reshape((1,) + obs.shape),
axis=0)
def forward(self, obs):
# get Q estimation
out = self.conv1(obs)
out = self.relu1(out)
out = self.conv2(out)
out = self.relu2(out)
out = out.view(out.size()[0], -1)
out = self.fc1(out)
out = self.relu3(out)
out = self.fc2(out)
return out
def optimal_action(self): # greedy choose (exploitation)
state = self.current_state
state_var = Variable(torch.from_numpy(state), requires_grad=False).unsqueeze(0).to(device)
q_values = self.forward(state_var)
_, actions_index = torch.max(q_values, dim=1)
action_index = actions_index.data[0]
action = np.zeros(self.actions_num, dtype=np.float32)
action[action_index] = 1
return action
def random_action(self): # random choose (exploration)
action = np.zeros(self.actions_num, dtype=np.float32)
action_index = 0 if np.random.random() < 0.8 else 1
action[action_index] = 1
return action
def take_action(self):
if np.random.random() < self.epsilon:
return self.random_action()
else:
return self.optimal_action()
def store_trans(self, action, reward, next_obs, finish):
next_state = np.append(self.current_state[1:, :, :], next_obs.reshape((1,) + next_obs.shape), axis=0)
self.replay_mem.append((self.current_state, action, reward, next_state, finish))
if (len(self.replay_mem) > self.mem_size):
self.replay_mem.popleft()
if (finish):
self.set_initial_state()
else:
self.current_state = next_state
def increase_timestep(self):
self.time_step += 1
| [
"a145926@163.com"
] | a145926@163.com |
c02a678107f5e807bc54b95fb1bc038e46931756 | f338eb32c45d8d5d002a84798a7df7bb0403b3c4 | /DQM/DTMonitorModule/test/DTkFactValidation_1_TEMPL_cfg.py | 28873b4aebd3900356c5f720350f92f2c2e3d464 | [] | permissive | wouf/cmssw | 0a8a8016e6bebc611f1277379e12bef130464afb | 60da16aec83a0fc016cca9e2a5ed0768ba3b161c | refs/heads/CMSSW_7_3_X | 2022-06-30T04:35:45.380754 | 2015-05-08T17:40:17 | 2015-05-08T17:40:17 | 463,028,972 | 0 | 0 | Apache-2.0 | 2022-02-24T06:05:30 | 2022-02-24T06:05:26 | null | UTF-8 | Python | false | false | 3,607 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("PROD")
process.MessageLogger = cms.Service("MessageLogger",
debugModules = cms.untracked.vstring('resolutionTest_step1',
'resolutionTest_step2',
'resolutionTest_step3'),
cout = cms.untracked.PSet(
threshold = cms.untracked.string('ERROR'),
default = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
resolution = cms.untracked.PSet(
limit = cms.untracked.int32(10000000)
),
noLineBreaks = cms.untracked.bool(True)
),
categories = cms.untracked.vstring('resolution'),
destinations = cms.untracked.vstring('cout')
)
process.load("Configuration.StandardSequences.Geometry_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = "GLOBALTAGTEMPLATE"
process.load("CondCore.DBCommon.CondDBSetup_cfi")
process.load("DQMServices.Core.DQM_cfg")
process.load("RecoLocalMuon.Configuration.RecoLocalMuonCosmics_cff")
process.source = cms.Source("PoolSource",
debugFlag = cms.untracked.bool(True),
debugVebosity = cms.untracked.uint32(10),
fileNames = cms.untracked.vstring()
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.calibDB = cms.ESSource("PoolDBESSource",
process.CondDBSetup,
timetype = cms.string('runnumber'),
toGet = cms.VPSet(cms.PSet(
record = cms.string('DTTtrigRcd'),
tag = cms.string('ttrig')
)),
connect = cms.string('sqlite_file:/afs/cern.ch/cms/CAF/CMSALCA/ALCA_MUONCALIB/DTCALIB/RUNPERIODTEMPLATE/ttrig/ttrig_DUMPDBTEMPL_RUNNUMBERTEMPLATE.db'),
authenticationMethod = cms.untracked.uint32(0)
)
process.es_prefer_calibDB = cms.ESPrefer('PoolDBESSource','calibDB')
# if read from RAW
process.load("EventFilter.DTRawToDigi.dtunpacker_cfi")
process.eventInfoProvider = cms.EDFilter("EventCoordinatesSource",
eventInfoFolder = cms.untracked.string('EventInfo/')
)
process.DTkFactValidation = cms.EDAnalyzer("DTCalibValidation",
# Write the histos on file
OutputMEsInRootFile = cms.bool(True),
# Lable to retrieve 2D segments from the event
segment2DLabel = cms.untracked.string('dt2DSegments'),
OutputFileName = cms.string('residuals.root'),
# Lable to retrieve 4D segments from the event
segment4DLabel = cms.untracked.string('dt4DSegments'),
debug = cms.untracked.bool(False),
# Lable to retrieve RecHits from the event
recHits1DLabel = cms.untracked.string('dt1DRecHits')
)
process.FEVT = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring('drop *',
'keep *_MEtoEDMConverter_*_*'),
fileName = cms.untracked.string('DQM.root')
)
process.load("DQMServices.Components.MEtoEDMConverter_cff")
process.dummyProducer = cms.EDProducer("ThingWithMergeProducer")
# if read from RAW
#process.firstStep = cms.Sequence(process.muonDTDigis*process.dt1DRecHits*process.dt2DSegments*process.dt4DSegments*process.DTkFactValidation)
process.firstStep = cms.Sequence(process.dummyProducer + process.muonDTDigis*process.dt1DRecHits*process.dt2DSegments*process.dt4DSegments*process.DTkFactValidation*process.MEtoEDMConverter)
#process.firstStep = cms.Sequence(process.dummyProducer + process.dt1DRecHits*process.dt2DSegments*process.dt4DSegments*process.DTkFactValidation*process.MEtoEDMConverter)
process.p = cms.Path(process.firstStep)
process.outpath = cms.EndPath(process.FEVT)
process.DQM.collectorHost = ''
| [
"giulio.eulisse@gmail.com"
] | giulio.eulisse@gmail.com |
a03b1ddee9bd276eb940bb87b27069497c127011 | b5c33e768a9845ae242026cb2a85ec4f073c4aa4 | /2_create-topic-model/sparkutils.py | ea2bc40e68ad0e69fe65c29a9bff4ba918b2b465 | [] | no_license | htrc/ACS-samayoa | 560e292db3d90fa1f4c1228d3239a7933863e4fb | 3a4652eef53e86e5ac6e566a6fdf62da88525854 | refs/heads/master | 2023-05-14T20:07:16.282745 | 2021-06-03T20:01:43 | 2021-06-03T20:01:43 | 362,940,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | import sys
from typing import Union
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
def config_spark(app_name: str, num_cores: int = None) -> SparkSession:
spark_conf = SparkConf().setAppName(app_name)
if num_cores is not None:
spark_conf.setMaster(f"local[{num_cores}]")
spark = SparkSession.builder \
.config(conf=spark_conf) \
.getOrCreate()
return spark
def stop_spark_and_exit(spark: Union[SparkSession, SparkContext], exit_code: int = 0):
try:
spark.stop()
finally:
sys.exit(exit_code)
| [
"capitanu@illinois.edu"
] | capitanu@illinois.edu |
269ba1cfc40017b082ebff0bdbc3320c353476f2 | febd53417e07f52dc4a717cf241967be6e977913 | /DataRendering/structures.py | de84e5e948b5a1f2fe43f99a24c790b2bebcbaad | [] | no_license | DynamicsAndNeuralSystems/DevelopingMouse | 1d2d9d1e14fcee22f1032876e37fdd09334fd64f | 551d68ade7e522a62534293d004e24dc05ccb804 | refs/heads/master | 2023-03-21T09:25:22.200676 | 2021-03-19T02:24:54 | 2021-03-19T02:24:54 | 93,137,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,026 | py | # import sys
# print('\n'.join(sys.path))
from allensdk.api.queries.rma_api import RmaApi
import pandas as pd
import os
api = RmaApi()
MOUSE_GRAPH_ID = 17
def getStructureInfo(structure_level, other_criteria):
STRUCTURE_LEVEL=structure_level
OTHER_CRITERIA=other_criteria
structures = pd.DataFrame(
api.model_query('Structure',
criteria=('[graph_id$eq%d]' % MOUSE_GRAPH_ID)+\
('[st_level$eq%d]' % STRUCTURE_LEVEL)+\
(str(OTHER_CRITERIA)),
num_rows='all'))
return structures
def getStructureInfo_AdultMouse():
structures = pd.DataFrame(
api.model_query('Structure',
criteria='[graph_id$eq1]',
num_rows='all'))
return structures
def getCentreCoordinates_DevMouse(structure_level):
STRUCTURE_LEVEL=structure_level
structure_centers = pd.DataFrame(
api.model_query('StructureCenter',
criteria='structure'+\
('[st_level$eq%d]' % STRUCTURE_LEVEL)+\
('[graph_id$eq%d]' % MOUSE_GRAPH_ID),
num_rows='all'))
return structure_centers
def getCentreCoordinates_AdultMouse():
structure_centers_adult = pd.DataFrame(
api.model_query('StructureCenter',
criteria='structure[graph_id$eq1]',
num_rows='all'))
return structure_centers_adult
def getAcronymPath(structure_level, other_criteria):
STRUCTURE_LEVEL=structure_level
OTHER_CRITERIA=other_criteria
OntologyNode = pd.DataFrame(
api.model_query('OntologyNode',
criteria='structure'+\
('[st_level$eq%d]' % STRUCTURE_LEVEL)+\
('[graph_id$eq%d]' % MOUSE_GRAPH_ID)+\
(str(OTHER_CRITERIA)),
num_rows='all'))
return OntologyNode
def main():
print('hi')
#os.chdir(r'D:\Data\DevelopingAllenMouseAPI-master\Git') # user input the Git directory as on their computer here
# download level 5 structures of developing mouse
other_criteria_level5 = '[parent_structure_id$ne126651574]\
[parent_structure_id$ne126651586]\
[parent_structure_id$ne126651606]\
[parent_structure_id$ne126651618]\
[parent_structure_id$ne126651642]\
[parent_structure_id$ne126651654]\
[parent_structure_id$ne126651670]\
[parent_structure_id$ne126651682]\
[parent_structure_id$ne126651698]\
[parent_structure_id$ne126651710]\
[parent_structure_id$ne126651730]\
[parent_structure_id$ne126651742]\
[parent_structure_id$ne126651758]\
[parent_structure_id$ne126651770]\
[parent_structure_id$ne126651790]\
[parent_structure_id$ne126651810]\
[parent_structure_id$ne126651830]\
[parent_structure_id$ne126651854]\
[parent_structure_id$ne126651874]\
[parent_structure_id$ne126651898]\
[parent_structure_id$ne126651918]\
[parent_structure_id$ne126651942]\
[parent_structure_id$ne126651962]\
[parent_structure_id$ne126651982]\
[parent_structure_id$ne126652002]\
[parent_structure_id$ne126652022]\
[parent_structure_id$ne17651]\
[parent_structure_id$ne126652042]'
structures=getStructureInfo(structure_level=5, other_criteria=other_criteria_level5)
STRUCTURE_LEVEL = 5
# specify the directories
abs_dir = os.path.dirname(__file__)
rel_dir = os.path.join(abs_dir, '..','Data','API','Structures')
data = os.path.join(rel_dir, 'structureData_level%d.csv' % STRUCTURE_LEVEL)
structures.to_csv(data)
# download level 3 structures pf developing mouse
other_criteria_level3 = '[parent_structure_id$ne126651566]\
[parent_structure_id$ne126651634]\
[parent_structure_id$ne126651722]\
[parent_structure_id$ne126651786]\
[parent_structure_id$ne126651850]\
[parent_structure_id$ne126651894]\
[parent_structure_id$ne126651938]'
structures=getStructureInfo(structure_level=3, other_criteria=other_criteria_level3)
STRUCTURE_LEVEL = 3
data = os.path.join(rel_dir, 'structureData_level%d.csv' % STRUCTURE_LEVEL)
structures.to_csv(data)
# download adult mouse structure info
structures = getStructureInfo_AdultMouse()
data = os.path.join(rel_dir, 'structureData_adult.csv')
structures.to_csv(data)
# Download coordinates of centre of developing mouse structures
structure_centers=getCentreCoordinates_DevMouse(structure_level=5)
STRUCTURE_LEVEL = 5
data = os.path.join(rel_dir, 'structureCenters_level%d.csv' % STRUCTURE_LEVEL)
structure_centers.to_csv(data)
# Download coordinates of centre of adult mouse structures
structure_centers_adult=getCentreCoordinates_AdultMouse()
data = os.path.join(rel_dir, 'structureCenters_adult.csv')
structure_centers_adult.to_csv(data)
# download acronym path for developing mouse
OntologyNode=getAcronymPath(structure_level=5, other_criteria=other_criteria_level5)
STRUCTURE_LEVEL = 5
data = os.path.join(rel_dir, 'AcronymPath_level%d.csv' % STRUCTURE_LEVEL)
OntologyNode.to_csv(data)
if __name__ == '__main__':
main()
| [
"lauhoiyangladys@gmail.com"
] | lauhoiyangladys@gmail.com |
33889b44e80246dbdece176c1b56e7f53f3baae2 | f64a580208cfd7fa332dc1df9cb9e776f9581216 | /jobportal/views.py | df0025f809db53e36768749963ad5a71e1de3618 | [] | no_license | pravsp/job-portal | 66f0bb3051643da6af73a5ea3328cd61ad22254a | 84c2a18a1ce54374b107de18d73f2184565f1658 | refs/heads/master | 2022-12-23T09:09:38.763835 | 2020-10-01T16:45:32 | 2020-10-01T16:45:32 | 299,570,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | from jobportal import app
from flask import jsonify
@app.route('/')
def home():
return jsonify({'message': 'Welcome to rusteez!!!'}) | [
"praveen.kumar.sp@gmail.com"
] | praveen.kumar.sp@gmail.com |
1a24d817c3f129cc65696bef11cd602b33c56141 | edde0db6d7df34dce6b68b64bd88b382300d5661 | /021.py | 1a4a7697905445a6524d39963a4f8134345e59b0 | [] | no_license | Trietptm-on-Coding-Algorithms/eulerproject | 2af378c2464130e097e410ca981d11e8de743d7b | c794ae2528a516bd62069d59f9375210e4376969 | refs/heads/master | 2021-01-14T02:29:38.582949 | 2013-04-05T05:03:44 | 2013-04-05T05:03:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | #! /usr/bin/env python
def memo(func):
cache={}
def f(*arg):
if arg not in cache:
cache[arg] = func(*arg)
return cache[arg]
return f
@memo
def divisors(n):
return [x for x in range(1,n//2+1) if n%x == 0]
def d(n):
return sum(divisors(n))
#print d(284),d(220) #220,284
res = []
for i in range(1,10000):
if i in res: continue
j = d(i)
if i!=j and d(j) == i:
res.append(i)
res.append(j)
print sum(res)
| [
"kennel209@gmail.com"
] | kennel209@gmail.com |
4869b01a3ceb8a2fe7ccd3a7f8289efc0306cd9a | 4b8100df8c29a3676f3ed3f9cf4ce19f83fac6d5 | /images/urls.py | 038957d622f8315d93fcfe38b4d4c39f1b274bb3 | [] | no_license | mtali/bookmark | bc96818764e18865ef18142ab075d512cd028cfa | f568e2463599b7d4333074d4dc0de28ca67f76ce | refs/heads/master | 2021-04-12T01:45:48.934820 | 2018-03-18T19:43:34 | 2018-03-18T19:43:34 | 125,758,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | from django.urls import path
from . import views
app_name = 'images'
urlpatterns = [
path('', views.image_list, name='list'),
path('create/', views.image_create, name='images'),
path('detail/<int:id>/<slug:slug>/', views.image_detail, name='detail'),
path('like/', views.image_like, name='like'),
path('ranking/', views.image_ranking, name='image_ranking'),
]
| [
"emmanuelmtali@yahoo.com"
] | emmanuelmtali@yahoo.com |
0282347967f78e46d8770816ad02f6e2e4a5cc3d | 1f2494e856352873edba6dd24dcd5736a6c05351 | /pystart/pystart/spiders/Hue.py | 530c13f5d2cf578f79eadaa8e8868f7c8f2b2221 | [] | no_license | namvh145/Nam-Scott | c0f5ae4b2e0e9cabe7b7896d5a2153ac2b5e7cbc | a9c7c84f85bb3cecd622ec07f44e05efdf955c50 | refs/heads/master | 2021-07-04T06:54:24.790942 | 2021-05-11T09:36:02 | 2021-05-11T09:36:02 | 234,663,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,033 | py | import re
import scrapy
class QuangTri(scrapy.Spider):
name = "Hue"
start_urls = [
"https://toplist.vn/top-list/diem-du-lich-noi-tieng-xu-hue-382.htm"
]
def parse(self, response, **kwargs):
address = ".media-body li::text , .media-body p::text , strong::text , .item_dsp_row .media-heading::text"
source = response.css(address).extract()
title = response.css(".item_dsp_row .media-heading::text").extract()
n = len(source)
name = ""
description = ""
for i in range(n):
if source[i] in title:
name = source[i]
title.remove(name)
description = ""
else:
if (i + 1 < n and source[i + 1] in title) or i + 1 == n:
description += source[i]
yield {
"Name": name,
"Description": description
}
else:
description += source[i] | [
"noreply@github.com"
] | namvh145.noreply@github.com |
ff4ddb936b2b3872ee557d4c51e532d34037b10d | 38b5dff792caa9246b9cb0a92c74c713878f1a19 | /Python/0804_UniqueMorseCodeWords/uniqueMorseRepresentations.py | 3cfca6e6817a9ceae5e7bbc481b0cf325ad0971f | [] | no_license | mtmmy/Leetcode | 12b108f81a3b3cee4f2801353e8fc8e9ec5e791e | 75aef2f6c42aeb51261b9450a24099957a084d51 | refs/heads/master | 2020-12-02T06:43:22.419495 | 2019-04-07T18:30:45 | 2019-04-07T18:30:45 | 96,882,649 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | import string
class Solution:
def uniqueMorseRepresentations(self, words):
"""
:type words: List[str]
:rtype: int
"""
morseCode = [".-","-...","-.-.","-..",".","..-.","--.","....","..",".---","-.-",".-..","--","-.","---",".--.","--.-",".-.","...","-","..-","...-",".--","-..-","-.--","--.."]
alphabet = string.ascii_lowercase
morseDict = dict(zip(alphabet, morseCode))
result = set()
for word in words:
codedMorse = ""
for c in word:
codedMorse += morseDict[c]
result.add(codedMorse)
return len(result) | [
"mtmmy@users.noreply.github.com"
] | mtmmy@users.noreply.github.com |
bc4df46eb7872b86c36c55931baed6b9e64e0e68 | 0b2590e8c6783a0ba9d811d4467e59241b037767 | /python3_programming_tricks/ch09/9-4.py | 7d550afaa409e34ca6df96184d15899b3b21c182 | [] | no_license | tuxnotes/python_notes | f070dffb46f34eefd341c47fdb2e414e4f85261c | f031490b3a208898a45fec67d83bf75f6ad91b8e | refs/heads/master | 2020-05-20T20:39:29.239620 | 2019-12-04T06:34:35 | 2019-12-04T06:34:35 | 185,746,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,023 | py | import time
import logging
import random
def warn_timeout(timeout):
# 既然是带参数的,那么就是一个生产装饰器的工厂
# 定义装饰器
def decorator(func):
def wrap(*args, **kwargs):
t0 = timem.time()
res = func(*args, **kwargs)
used = time.time() - t0
if used > timeout:
logging.warning("%s: %s > %s", func.__name__, used, timeout)
return res
# 通过定义一个函数,来动态修改timeout的值
def set_timeout(new_timeout):
# timeout是闭包中的一个自由变量,不能直接赋值,python3中使用nonlocal
nonlocal timeout
timeout = new_timeout
wrap.set_timeout = set_timeout # 函数增加属性
return wrap
return decorator
@warn_timeout(1.5)
def f(i):
print('in f [%s]' % i)
while random.randint(0, 1):
time.spleep(0.6)
for i in range(30):
f(i)
f.set_timeout(1)
for i in range(30):
f(i)
| [
"vodaka@126.com"
] | vodaka@126.com |
fb6f67a97a02f948ad2aa2760275229fce44c0f1 | 1abe01b916df738f6597fd3d754e0caf5bba82db | /Books/contact/views.py | 486198a07c7f7a2686fb8f898ce2ddaa145b8fda | [] | no_license | alegoriyas/Books | 58cb711b3b35ca9069802e0437f7a28a4e8db8f0 | b8fc2e0a5db88b921bd1b22965e95012feaadffd | refs/heads/master | 2022-12-24T10:08:09.362715 | 2020-10-08T08:31:06 | 2020-10-08T08:31:06 | 273,240,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,495 | py | from django.shortcuts import render
from django.core.mail import send_mail
def index(request):
send_mail('Hello from SteblinaEE',
'Hello there. This is an automated message.',
'steblinaee@gmail.com',
['egorinkas@gmail.com'],
fail_silently=False)
return render(request, 'contact/index.html')
def contact_form(request):
#errors = []
if request.method == 'POST':
message_name = request.POST['message-name']
message = request.POST['message']
message_email = request.POST['message-email']
# send an email
send_mail(
message_name, # subject
message, # message
message_email, # from email
['egorinkas@gmail.com'], # To Email
)
return render(request, 'contact/contact_form.html', {'message_name': message_name,
'message_email': message_email})
else:
return render(request, 'contact/contact_form.html', {})
#if not request.POST.get('subject', ''):
#errors.append('Введите тему.')
#if not request.POST.get('message', ''):
# errors.append('Введите сообщение.')
#if not request.POST.get('e-mail') and '@' not in request.POST['e-mail']:
# errors.append('Введите правильный адрес e-mail.')
#if not errors:
# send_mail( | [
"alegoriyass@yandex.ru"
] | alegoriyass@yandex.ru |
5f90d9616705b9653e695c4d316f9a93a74866bd | 080ed9d2b5ff76c5613d8dd42cb4f33bd9b3a82e | /code/sheet_cleaner/sheet_processor.py | f41e8a41c99357f2914903acd8abb13763aa675a | [
"MIT"
] | permissive | garain/nCoV2019 | 2114b9031baa13a52a93c30e92995ccfb01ea075 | 9cd6829ada1fd193908b213e4716f536253dbbe4 | refs/heads/master | 2022-12-12T01:21:18.883025 | 2020-09-09T05:01:52 | 2020-09-09T05:01:52 | 260,449,278 | 2 | 0 | MIT | 2020-05-01T12:03:27 | 2020-05-01T12:03:26 | null | UTF-8 | Python | false | false | 6,509 | py | import logging
import os
from datetime import datetime
from typing import List
import configparser
import pandas as pd
from geocoding import csv_geocoder
from spreadsheet import GoogleSheet
from functions import (duplicate_rows_per_column, fix_na, fix_sex,
generate_error_tables, trim_df, values2dataframe)
class SheetProcessor:
def __init__(self, sheets: List[GoogleSheet], geocoder: csv_geocoder.CSVGeocoder, config: configparser.ConfigParser):
self.for_github = []
self.sheets = sheets
self.geocoder = geocoder
self.config = config
def process(self):
"""Does all the heavy handling of spreadsheets, writing output to CSV files."""
for s in self.sheets:
logging.info("Processing sheet %s", s.name)
### Clean Private Sheet Entries. ###
# note : private sheet gets updated on the fly and redownloaded to ensure continuity between fixes (granted its slower).
range_ = f'{s.name}!A:AG'
data = values2dataframe(s.read_values(range_))
# Expand aggregated cases into one row each.
logging.info("Rows before expansion: %d", len(data))
if len(data) > 150000:
logging.warning("Sheet %s has more than 150K rows, it should be split soon", s.name)
data.aggregated_num_cases = pd.to_numeric(data.aggregated_num_cases, errors='coerce')
data = duplicate_rows_per_column(data, "aggregated_num_cases")
logging.info("Rows after expansion: %d", len(data))
# Generate IDs for each row sequentially following the sheet_id-inc_int pattern.
data['ID'] = s.base_id + "-" + pd.Series(range(1, len(data)+1)).astype(str)
# Remove whitespace.
data = trim_df(data)
# Fix columns that can be fixed easily.
data.sex = fix_sex(data.sex)
# fix N/A => NA
for col in data.select_dtypes("string"):
data[col] = fix_na(data[col])
# Regex fixes
fixable, non_fixable = generate_error_tables(data)
if len(fixable) > 0:
logging.info('fixing %d regexps', len(fixable))
s.fix_cells(fixable)
data = values2dataframe(s.read_values(range_))
# ~ negates, here clean = data with IDs not in non_fixable IDs.
clean = data[~data.ID.isin(non_fixable.ID)]
clean = clean.drop('row', axis=1)
clean.sort_values(by='ID')
s.data = clean
non_fixable = non_fixable.sort_values(by='ID')
# Save error_reports
# These are separated by Sheet.
logging.info('Saving error reports')
directory = self.config['FILES']['ERRORS']
file_name = f'{s.name}.error-report.csv'
error_file = os.path.join(directory, file_name)
non_fixable.to_csv(error_file, index=False, header=True, encoding="utf-8")
self.for_github.append(error_file)
# Combine data from all sheets into a single datafile
all_data = []
for s in self.sheets:
logging.info("sheet %s had %d rows", s.name, len(s.data))
all_data.append(s.data)
all_data = pd.concat(all_data, ignore_index=True)
all_data = all_data.sort_values(by='ID')
logging.info("all_data has %d rows", len(all_data))
# Fill geo columns.
geocode_matched = 0
for i, row in all_data.iterrows():
geocode = self.geocoder.geocode(row.city, row.province, row.country)
if not geocode:
continue
geocode_matched += 1
all_data.at[i, 'latitude'] = geocode.lat
all_data.at[i, 'longitude'] = geocode.lng
all_data.at[i, 'geo_resolution'] = geocode.geo_resolution
all_data.at[i, 'location'] = geocode.location
all_data.at[i, 'admin3'] = geocode.admin3
all_data.at[i, 'admin2'] = geocode.admin2
all_data.at[i, 'admin1'] = geocode.admin1
all_data.at[i, 'admin_id'] = geocode.admin_id
all_data.at[i, 'country_new'] = geocode.country_new
logging.info("Geocode matched %d/%d", geocode_matched, len(all_data))
logging.info("Top 10 geocode misses: %s", self.geocoder.misses.most_common(10))
with open("geocode_misses.csv", "w") as f:
self.geocoder.write_misses_to_csv(f)
logging.info("Wrote all geocode misses to geocode_misses.csv")
# Reorganize csv columns so that they are in the same order as when we
# used to have those geolocation within the spreadsheet.
# This is to avoid breaking latestdata.csv consumers.
all_data = all_data[["ID","age","sex","city","province","country","latitude","longitude","geo_resolution","date_onset_symptoms","date_admission_hospital","date_confirmation","symptoms","lives_in_Wuhan","travel_history_dates","travel_history_location","reported_market_exposure","additional_information","chronic_disease_binary","chronic_disease","source","sequence_available","outcome","date_death_or_discharge","notes_for_discussion","location","admin3","admin2","admin1","country_new","admin_id","data_moderator_initials","travel_history_binary"]]
# save
logging.info("Saving files to disk")
dt = datetime.now().strftime('%Y-%m-%dT%H%M%S')
file_name = self.config['FILES']['DATA'].replace('TIMESTAMP', dt)
latest_name = os.path.join(self.config['FILES']['LATEST'], 'latestdata.csv')
all_data.to_csv(file_name, index=False, encoding="utf-8")
all_data.to_csv(latest_name, index=False, encoding="utf-8")
logging.info("Wrote %s, %s", file_name, latest_name)
self.for_github.extend([file_name, latest_name])
def push_to_github(self):
"""Pushes csv files created by Process to Github."""
logging.info("Pushing to github")
# Create script for uploading to github
script = 'set -e\n'
script += 'cd {}\n'.format(self.config['GIT']['REPO'])
script += 'git pull origin master\n'
for g in self.for_github:
script += f'git add {g}\n'
script += 'git commit -m "data update"\n'
script += 'git push origin master\n'
script += f'cd {os.getcwd()}\n'
print(script)
os.system(script)
| [
"thomas.brewer@childrens.harvard.edu"
] | thomas.brewer@childrens.harvard.edu |
d3169a5021ad5997b3bd8cbd3b310c67d027bb0e | 53356576265cd15a98837e0f7ba60ce2b6a8d687 | /getemails/pipelines.py | 8cc50f657318fe68120fd1b906966134090cffa1 | [] | no_license | cnoott/getemails | 87abd44533d315f8929b00dc55ee23f5d328e424 | 2465a01f058ff4d47b57bf5184537023d54fe834 | refs/heads/master | 2022-11-28T15:37:36.620549 | 2020-08-11T16:37:55 | 2020-08-11T16:37:55 | 286,796,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
class GetemailsPipeline:
def process_item(self, item, spider):
return item
| [
"liamamadio@gmail.com"
] | liamamadio@gmail.com |
03eb716acba6ddc2f77eb15d4d74e33f46e68cc5 | a9bbcbb4d6142c0b290ad7177d29085354d3a1b1 | /upstream/python-bitstring/test/test_bitstream.py | ebd92078b09f342167699b6492d4421e71d5e798 | [
"MIT"
] | permissive | nssllc/carver | 44caf9181824215e3c317485d93ad1ed92e0d2f6 | 89ce21ee8df216ce8485adb5f0b413050a4b3cce | refs/heads/master | 2021-01-13T01:41:23.561399 | 2011-10-26T13:03:22 | 2011-10-26T13:03:22 | 2,506,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135,708 | py | #!/usr/bin/env python
import unittest
import sys
sys.path.insert(0, '..')
import bitstring
import copy
import os
import collections
from bitstring import BitStream, ConstBitStream, pack
from bitstring.bitstore import ByteStore, offsetcopy
class FlexibleInitialisation(unittest.TestCase):
def testFlexibleInitialisation(self):
a = BitStream('uint:8=12')
c = BitStream(' uint : 8 = 12')
self.assertTrue(a == c == BitStream(uint=12, length=8))
a = BitStream(' int:2= -1')
b = BitStream('int :2 = -1')
c = BitStream(' int: 2 =-1 ')
self.assertTrue(a == b == c == BitStream(int=-1, length=2))
def testFlexibleInitialisation2(self):
h = BitStream('hex=12')
o = BitStream('oct=33')
b = BitStream('bin=10')
self.assertEqual(h, '0x12')
self.assertEqual(o, '0o33')
self.assertEqual(b, '0b10')
def testFlexibleInitialisation3(self):
for s in ['se=-1', ' se = -1 ', 'se = -1']:
a = BitStream(s)
self.assertEqual(a.se, -1)
for s in ['ue=23', 'ue =23', 'ue = 23']:
a = BitStream(s)
self.assertEqual(a.ue, 23)
def testMultipleStringInitialisation(self):
a = BitStream('0b1 , 0x1')
self.assertEqual(a, '0b10001')
a = BitStream('ue=5, ue=1, se=-2')
self.assertEqual(a.read('ue'), 5)
self.assertEqual(a.read('ue'), 1)
self.assertEqual(a.read('se'), -2)
b = BitStream('uint:32 = 12, 0b11') + 'int:100=-100, 0o44'
self.assertEqual(b.read(32).uint, 12)
self.assertEqual(b.read(2).bin, '11')
self.assertEqual(b.read(100).int, -100)
class Reading(unittest.TestCase):
def testReadBits(self):
s = BitStream(bytes=b'\x4d\x55')
self.assertEqual(s.read(4).hex, '4')
self.assertEqual(s.read(8).hex, 'd5')
self.assertEqual(s.read(1), [0])
self.assertEqual(s.read(3).bin, '101')
self.assertFalse(s.read(0))
def testReadByte(self):
s = BitStream(hex='4d55')
self.assertEqual(s.read(8).hex, '4d')
self.assertEqual(s.read(8).hex, '55')
def testReadBytes(self):
s = BitStream(hex='0x112233448811')
self.assertEqual(s.read(3 * 8).hex, '112233')
self.assertRaises(ValueError, s.read, -2 * 8)
s.bitpos += 1
self.assertEqual(s.read(2 * 8).bin, '1000100100010000')
def testReadUE(self):
self.assertRaises(bitstring.InterpretError, BitStream('')._getue)
# The numbers 0 to 8 as unsigned Exponential-Golomb codes
s = BitStream(bin='1 010 011 00100 00101 00110 00111 0001000 0001001')
self.assertEqual(s.pos, 0)
for i in range(9):
self.assertEqual(s.read('ue'), i)
self.assertRaises(bitstring.ReadError, s.read, 'ue')
def testReadSE(self):
s = BitStream(bin='010 00110 0001010 0001000 00111')
self.assertEqual(s.read('se'), 1)
self.assertEqual(s.read('se'), 3)
self.assertEqual(s.readlist(3 * ['se']), [5, 4, -3])
class Find(unittest.TestCase):
def testFind1(self):
s = ConstBitStream(bin='0b0000110110000')
self.assertTrue(s.find(BitStream(bin='11011'), False))
self.assertEqual(s.bitpos, 4)
self.assertEqual(s.read(5).bin, '11011')
s.bitpos = 0
self.assertFalse(s.find('0b11001', False))
def testFind2(self):
s = BitStream(bin='0')
self.assertTrue(s.find(s, False))
self.assertEqual(s.pos, 0)
self.assertFalse(s.find('0b00', False))
self.assertRaises(ValueError, s.find, BitStream(), False)
def testFindWithOffset(self):
s = BitStream(hex='0x112233')[4:]
self.assertTrue(s.find('0x23', False))
self.assertEqual(s.pos, 8)
def testFindCornerCases(self):
s = BitStream(bin='000111000111')
self.assertTrue(s.find('0b000'))
self.assertEqual(s.pos, 0)
self.assertTrue(s.find('0b000'))
self.assertEqual(s.pos, 0)
self.assertTrue(s.find('0b0111000111'))
self.assertEqual(s.pos, 2)
self.assertTrue(s.find('0b000', start=2))
self.assertEqual(s.pos, 6)
self.assertTrue(s.find('0b111', start=6))
self.assertEqual(s.pos, 9)
s.pos += 2
self.assertTrue(s.find('0b1', start=s.pos))
def testFindBytes(self):
s = BitStream('0x010203040102ff')
self.assertFalse(s.find('0x05', bytealigned=True))
self.assertTrue(s.find('0x02', bytealigned=True))
self.assertEqual(s.read(16).hex, '0203')
self.assertTrue(s.find('0x02', start=s.bitpos, bytealigned=True))
s.read(1)
self.assertFalse(s.find('0x02', start=s.bitpos, bytealigned=True))
def testFindBytesAlignedCornerCases(self):
s = BitStream('0xff')
self.assertTrue(s.find(s))
self.assertFalse(s.find(BitStream(hex='0x12')))
self.assertFalse(s.find(BitStream(hex='0xffff')))
def testFindBytesBitpos(self):
s = BitStream(hex='0x1122334455')
s.pos = 2
s.find('0x66', bytealigned=True)
self.assertEqual(s.pos, 2)
s.pos = 38
s.find('0x66', bytealigned=True)
self.assertEqual(s.pos, 38)
def testFindByteAligned(self):
s = BitStream(hex='0x12345678')
self.assertTrue(s.find(BitStream(hex='0x56'), bytealigned=True))
self.assertEqual(s.bytepos, 2)
s.pos = 0
self.assertFalse(s.find(BitStream(hex='0x45'), bytealigned=True))
s = BitStream('0x1234')
s.find('0x1234')
self.assertTrue(s.find('0x1234'))
s += '0b111'
s.pos = 3
s.find('0b1', start=17, bytealigned=True)
self.assertFalse(s.find('0b1', start=17, bytealigned=True))
self.assertEqual(s.pos, 3)
def testFindByteAlignedWithOffset(self):
s = BitStream(hex='0x112233')[4:]
self.assertTrue(s.find(BitStream(hex='0x23')))
def testFindByteAlignedErrors(self):
s = BitStream(hex='0xffff')
self.assertRaises(ValueError, s.find, '')
self.assertRaises(ValueError, s.find, BitStream())
class Rfind(unittest.TestCase):
def testRfind(self):
a = BitStream('0b001001001')
b = a.rfind('0b001')
self.assertEqual(b, (6,))
self.assertEqual(a.pos, 6)
big = BitStream(length=100000) + '0x12' + BitStream(length=10000)
found = big.rfind('0x12', bytealigned=True)
self.assertEqual(found, (100000,))
self.assertEqual(big.pos, 100000)
def testRfindByteAligned(self):
a = BitStream('0x8888')
b = a.rfind('0b1', bytealigned=True)
self.assertEqual(b, (8,))
self.assertEqual(a.pos, 8)
def testRfindStartbit(self):
a = BitStream('0x0000ffffff')
b = a.rfind('0x0000', start=1, bytealigned=True)
self.assertEqual(b, ())
self.assertEqual(a.pos, 0)
b = a.rfind('0x00', start=1, bytealigned=True)
self.assertEqual(b, (8,))
self.assertEqual(a.pos, 8)
def testRfindEndbit(self):
a = BitStream('0x000fff')
b = a.rfind('0b011', bytealigned=False, start=0, end=14)
self.assertEqual(bool(b), True)
b = a.rfind('0b011', False, 0, 13)
self.assertEqual(b, ())
def testRfindErrors(self):
a = BitStream('0x43234234')
self.assertRaises(ValueError, a.rfind, '', bytealigned=True)
self.assertRaises(ValueError, a.rfind, '0b1', start=-99, bytealigned=True)
self.assertRaises(ValueError, a.rfind, '0b1', end=33, bytealigned=True)
self.assertRaises(ValueError, a.rfind, '0b1', start=10, end=9, bytealigned=True)
class Shift(unittest.TestCase):
def testShiftLeft(self):
s = BitStream('0b1010')
t = s << 1
self.assertEqual(s.bin, '1010')
self.assertEqual(t.bin, '0100')
t = t << 0
self.assertEqual(t, '0b0100')
t = t << 100
self.assertEqual(t.bin, '0000')
def testShiftLeftErrors(self):
s = BitStream()
self.assertRaises(ValueError, s.__lshift__, 1)
s = BitStream('0xf')
self.assertRaises(ValueError, s.__lshift__, -1)
def testShiftRight(self):
s = BitStream('0b1010')
t = s >> 1
self.assertEqual(s.bin, '1010')
self.assertEqual(t.bin, '0101')
q = s >> 0
self.assertEqual(q, '0b1010')
q.replace('0b1010', '')
s = s >> 100
self.assertEqual(s.bin, '0000')
def testShiftRightErrors(self):
s = BitStream()
self.assertRaises(ValueError, s.__rshift__, 1)
s = BitStream('0xf')
self.assertRaises(ValueError, s.__rshift__, -1)
def testShiftRightInPlace(self):
s = BitStream('0xffff')[4:12]
s >>= 1
self.assertEqual(s, '0b01111111')
s = BitStream('0b11011')
s >>= 2
self.assertEqual(s.bin, '00110')
s >>= 100000000000000
self.assertEqual(s.bin, '00000')
s = BitStream('0xff')
s >>= 1
self.assertEqual(s, '0x7f')
s >>= 0
self.assertEqual(s, '0x7f')
def testShiftRightInPlaceErrors(self):
s = BitStream()
self.assertRaises(ValueError, s.__irshift__, 1)
s += '0b11'
self.assertRaises(ValueError, s.__irshift__, -1)
def testShiftLeftInPlace(self):
s = BitStream('0xffff')
t = s[4:12]
t <<= 2
self.assertEqual(t, '0b11111100')
s = BitStream('0b11011')
s <<= 2
self.assertEqual(s.bin, '01100')
s <<= 100000000000000000000
self.assertEqual(s.bin, '00000')
s = BitStream('0xff')
s <<= 1
self.assertEqual(s, '0xfe')
s <<= 0
self.assertEqual(s, '0xfe')
def testShiftLeftInPlaceErrors(self):
s = BitStream()
self.assertRaises(ValueError, s.__ilshift__, 1)
s += '0b11'
self.assertRaises(ValueError, s.__ilshift__, -1)
class Replace(unittest.TestCase):
def testReplace1(self):
a = BitStream('0b1')
n = a.replace('0b1', '0b0', bytealigned=True)
self.assertEqual(a.bin, '0')
self.assertEqual(n, 1)
n = a.replace('0b1', '0b0', bytealigned=True)
self.assertEqual(n, 0)
def testReplace2(self):
a = BitStream('0b00001111111')
n = a.replace('0b1', '0b0', bytealigned=True)
self.assertEqual(a.bin, '00001111011')
self.assertEqual(n, 1)
n = a.replace('0b1', '0b0', bytealigned=False)
self.assertEqual(a.bin, '00000000000')
self.assertEqual(n, 6)
def testReplace3(self):
a = BitStream('0b0')
n = a.replace('0b0', '0b110011111', bytealigned=True)
self.assertEqual(n, 1)
self.assertEqual(a.bin, '110011111')
n = a.replace('0b11', '', bytealigned=False)
self.assertEqual(n, 3)
self.assertEqual(a.bin, '001')
def testReplace4(self):
a = BitStream('0x00114723ef4732344700')
n = a.replace('0x47', '0x00', bytealigned=True)
self.assertEqual(n, 3)
self.assertEqual(a.hex, '00110023ef0032340000')
a.replace('0x00', '', bytealigned=True)
self.assertEqual(a.hex, '1123ef3234')
a.replace('0x11', '', start=1, bytealigned=True)
self.assertEqual(a.hex, '1123ef3234')
a.replace('0x11', '0xfff', end=7, bytealigned=True)
self.assertEqual(a.hex, '1123ef3234')
a.replace('0x11', '0xfff', end=8, bytealigned=True)
self.assertEqual(a.hex, 'fff23ef3234')
def testReplace5(self):
a = BitStream('0xab')
b = BitStream('0xcd')
c = BitStream('0xabef')
c.replace(a, b)
self.assertEqual(c, '0xcdef')
self.assertEqual(a, '0xab')
self.assertEqual(b, '0xcd')
a = BitStream('0x0011223344')
a.pos = 12
a.replace('0x11', '0xfff', bytealigned=True)
self.assertEqual(a.pos, 8)
self.assertEqual(a, '0x00fff223344')
def testReplaceWithSelf(self):
a = BitStream('0b11')
a.replace('0b1', a)
self.assertEqual(a, '0xf')
a.replace(a, a)
self.assertEqual(a, '0xf')
def testReplaceCount(self):
a = BitStream('0x223344223344223344')
n = a.replace('0x2', '0x0', count=0, bytealigned=True)
self.assertEqual(n, 0)
self.assertEqual(a.hex, '223344223344223344')
n = a.replace('0x2', '0x0', count=1, bytealigned=True)
self.assertEqual(n, 1)
self.assertEqual(a.hex, '023344223344223344')
n = a.replace('0x33', '', count=2, bytealigned=True)
self.assertEqual(n, 2)
self.assertEqual(a.hex, '02442244223344')
n = a.replace('0x44', '0x4444', count=1435, bytealigned=True)
self.assertEqual(n, 3)
self.assertEqual(a.hex, '02444422444422334444')
def testReplaceBitpos(self):
a = BitStream('0xff')
a.bitpos = 8
a.replace('0xff', '', bytealigned=True)
self.assertEqual(a.bitpos, 0)
a = BitStream('0b0011110001')
a.bitpos = 4
a.replace('0b1', '0b000')
self.assertEqual(a.bitpos, 8)
a = BitStream('0b1')
a.bitpos = 1
a.replace('0b1', '0b11111', bytealigned=True)
self.assertEqual(a.bitpos, 5)
a.replace('0b11', '0b0', False)
self.assertEqual(a.bitpos, 3)
a.append('0b00')
a.replace('0b00', '0xffff')
self.assertEqual(a.bitpos, 17)
def testReplaceErrors(self):
a = BitStream('0o123415')
self.assertRaises(ValueError, a.replace, '', '0o7', bytealigned=True)
self.assertRaises(ValueError, a.replace, '0b1', '0b1', start=-100, bytealigned=True)
self.assertRaises(ValueError, a.replace, '0b1', '0b1', end=19, bytealigned=True)
class SliceAssignmentWithStep(unittest.TestCase):
def testSetSliceStep(self):
a = BitStream()
a[0:0:12] = '0xabcdef'
self.assertEqual(a.bytepos, 3)
a[1:4:4] = ''
self.assertEqual(a, '0xaef')
self.assertEqual(a.bitpos, 4)
a[1::8] = '0x00'
self.assertEqual(a, '0xae00')
self.assertEqual(a.bytepos, 2)
a += '0xf'
a[1::8] = '0xe'
self.assertEqual(a, '0xaee')
self.assertEqual(a.bitpos, 12)
b = BitStream()
b[0:100:8] = '0xffee'
self.assertEqual(b, '0xffee')
b[1:12:4] = '0xeed123'
self.assertEqual(b, '0xfeed123')
b[-100:2:4] = '0x0000'
self.assertEqual(b, '0x0000ed123')
a = BitStream('0xabcde')
self.assertEqual(a[-100:-90:4], '')
self.assertEqual(a[-100:-4:4], '0xa')
a[-100:-4:4] = '0x0'
self.assertEqual(a, '0x0bcde')
self.assertRaises(ValueError, a.__setitem__, slice(2, 0, 4), '0x33')
def testSetSliceNegativeStep(self):
a = BitStream('0x000000')
a[1::-8] = '0x1122'
self.assertEqual(a, '0x221100')
a[-1:-3:-4] = '0xaeebb'
self.assertEqual(a, '0x2211bbeea')
a[-1::-8] = '0xffdd'
self.assertEqual(a, '0xddff')
self.assertRaises(ValueError, a.__setitem__, slice(3, 4, -1), '0x12')
b = BitStream('0x00')
b[::-1] = '0b10001111'
self.assertEqual(b, '0xf1')
def testInsertingUsingSetItem(self):
a = BitStream()
a[0:0] = '0xdeadbeef'
self.assertEqual(a, '0xdeadbeef')
self.assertEqual(a.bytepos, 4)
a[4:4:4] = '0xfeed'
self.assertEqual(a, '0xdeadfeedbeef')
self.assertEqual(a.bytepos, 4)
a[14232:442232:0] = '0xa'
self.assertEqual(a, '0xadeadfeedbeef')
self.assertEqual(a.bitpos, 4)
a.bytepos = 6
a[0:0] = '0xff'
self.assertEqual(a.bytepos, 1)
a[8:0] = '0x00000'
self.assertTrue(a.startswith('0xff00000adead'))
def testSliceAssignmentBitPos(self):
a = BitStream('int:64=-1')
a.pos = 64
a[0:8] = ''
self.assertEqual(a.pos, 0)
a.pos = 52
a[48:56] = '0x0000'
self.assertEqual(a.pos, 64)
a[10:10] = '0x0'
self.assertEqual(a.pos, 14)
a[56:68] = '0x000'
self.assertEqual(a.pos, 14)
class Pack(unittest.TestCase):
def testPack1(self):
s = bitstring.pack('uint:6, bin, hex, int:6, se, ue, oct', 10, '0b110', 'ff', -1, -6, 6, '54')
t = BitStream('uint:6=10, 0b110, 0xff, int:6=-1, se=-6, ue=6, oct=54')
self.assertEqual(s, t)
self.assertRaises(bitstring.CreationError, pack, 'tomato', '0')
self.assertRaises(bitstring.CreationError, pack, 'uint', 12)
self.assertRaises(bitstring.CreationError, pack, 'hex', 'penguin')
self.assertRaises(bitstring.CreationError, pack, 'hex12', '0x12')
def testPackWithLiterals(self):
s = bitstring.pack('0xf')
self.assertEqual(s, '0xf')
self.assertTrue(type(s), BitStream)
s = pack('0b1')
self.assertEqual(s, '0b1')
s = pack('0o7')
self.assertEqual(s, '0o7')
s = pack('int:10=-1')
self.assertEqual(s, '0b1111111111')
s = pack('uint:10=1')
self.assertEqual(s, '0b0000000001')
s = pack('ue=12')
self.assertEqual(s.ue, 12)
s = pack('se=-12')
self.assertEqual(s.se, -12)
s = pack('bin=01')
self.assertEqual(s.bin, '01')
s = pack('hex=01')
self.assertEqual(s.hex, '01')
s = pack('oct=01')
self.assertEqual(s.oct, '01')
def testPackWithDict(self):
a = pack('uint:6=width, se=height', height=100, width=12)
w, h = a.unpack('uint:6, se')
self.assertEqual(w, 12)
self.assertEqual(h, 100)
d = {}
d['w'] = '0xf'
d['300'] = 423
d['e'] = '0b1101'
a = pack('int:100=300, bin=e, uint:12=300', **d)
x, y, z = a.unpack('int:100, bin, uint:12')
self.assertEqual(x, 423)
self.assertEqual(y, '1101')
self.assertEqual(z, 423)
def testPackWithDict2(self):
a = pack('int:5, bin:3=b, 0x3, bin=c, se=12', 10, b='0b111', c='0b1')
b = BitStream('int:5=10, 0b111, 0x3, 0b1, se=12')
self.assertEqual(a, b)
a = pack('bits:3=b', b=BitStream('0b101'))
self.assertEqual(a, '0b101')
a = pack('bits:24=b', b=BitStream('0x001122'))
self.assertEqual(a, '0x001122')
def testPackWithDict3(self):
s = pack('hex:4=e, hex:4=0xe, hex:4=e', e='f')
self.assertEqual(s, '0xfef')
s = pack('sep', sep='0b00')
self.assertEqual(s, '0b00')
def testPackWithDict4(self):
s = pack('hello', hello='0xf')
self.assertEqual(s, '0xf')
s = pack('x, y, x, y, x', x='0b10', y='uint:12=100')
t = BitStream('0b10, uint:12=100, 0b10, uint:12=100, 0b10')
self.assertEqual(s, t)
a = [1, 2, 3, 4, 5]
s = pack('int:8, div,' * 5, *a, **{'div': '0b1'})
t = BitStream('int:8=1, 0b1, int:8=2, 0b1, int:8=3, 0b1, int:8=4, 0b1, int:8=5, 0b1')
self.assertEqual(s, t)
def testPackWithLocals(self):
width = 352
height = 288
s = pack('uint:12=width, uint:12=height', **locals())
self.assertEqual(s, '0x160120')
def testPackWithLengthRestriction(self):
s = pack('bin:3', '0b000')
self.assertRaises(bitstring.CreationError, pack, 'bin:3', '0b0011')
self.assertRaises(bitstring.CreationError, pack, 'bin:3', '0b11')
self.assertRaises(bitstring.CreationError, pack, 'bin:3=0b0011')
self.assertRaises(bitstring.CreationError, pack, 'bin:3=0b11')
s = pack('hex:4', '0xf')
self.assertRaises(bitstring.CreationError, pack, 'hex:4', '0b111')
self.assertRaises(bitstring.CreationError, pack, 'hex:4', '0b11111')
self.assertRaises(bitstring.CreationError, pack, 'hex:8=0xf')
s = pack('oct:6', '0o77')
self.assertRaises(bitstring.CreationError, pack, 'oct:6', '0o1')
self.assertRaises(bitstring.CreationError, pack, 'oct:6', '0o111')
self.assertRaises(bitstring.CreationError, pack, 'oct:3', '0b1')
self.assertRaises(bitstring.CreationError, pack, 'oct:3=hello', hello='0o12')
s = pack('bits:3', BitStream('0b111'))
self.assertRaises(bitstring.CreationError, pack, 'bits:3', BitStream('0b11'))
self.assertRaises(bitstring.CreationError, pack, 'bits:3', BitStream('0b1111'))
self.assertRaises(bitstring.CreationError, pack, 'bits:12=b', b=BitStream('0b11'))
def testPackNull(self):
s = pack('')
self.assertFalse(s)
s = pack(',')
self.assertFalse(s)
s = pack(',,,,,0b1,,,,,,,,,,,,,0b1,,,,,,,,,,')
self.assertEqual(s, '0b11')
s = pack(',,uint:12,,bin:3,', 100, '100')
a, b = s.unpack(',,,uint:12,,,,bin:3,,,')
self.assertEqual(a, 100)
self.assertEqual(b, '100')
def testPackDefaultUint(self):
s = pack('10, 5', 1, 2)
a, b = s.unpack('10, 5')
self.assertEqual((a, b), (1, 2))
s = pack('10=150, 12=qee', qee=3)
self.assertEqual(s, 'uint:10=150, uint:12=3')
t = BitStream('100=5')
self.assertEqual(t, 'uint:100=5')
def testPackDefualtUintErrors(self):
self.assertRaises(bitstring.CreationError, BitStream, '5=-1')
def testPackingLongKeywordBitstring(self):
s = pack('bits=b', b=BitStream(128000))
self.assertEqual(s, BitStream(128000))
def testPackingWithListFormat(self):
f = ['bin', 'hex', 'uint:10']
a = pack(','.join(f), '00', '234', 100)
b = pack(f, '00', '234', 100)
self.assertEqual(a, b)
class Unpack(unittest.TestCase):
def testUnpack1(self):
s = BitStream('uint:13=23, hex=e, bin=010, int:41=-554, 0o44332, se=-12, ue=4')
s.pos = 11
a, b, c, d, e, f, g = s.unpack('uint:13, hex:4, bin:3, int:41, oct:15, se, ue')
self.assertEqual(a, 23)
self.assertEqual(b, 'e')
self.assertEqual(c, '010')
self.assertEqual(d, -554)
self.assertEqual(e, '44332')
self.assertEqual(f, -12)
self.assertEqual(g, 4)
self.assertEqual(s.pos, 11)
def testUnpack2(self):
s = BitStream('0xff, 0b000, uint:12=100')
a, b, c = s.unpack('bits:8, bits, uint:12')
self.assertEqual(type(s), BitStream)
self.assertEqual(a, '0xff')
self.assertEqual(type(s), BitStream)
self.assertEqual(b, '0b000')
self.assertEqual(c, 100)
a, b = s.unpack(['bits:11', 'uint'])
self.assertEqual(a, '0xff, 0b000')
self.assertEqual(b, 100)
def testUnpackNull(self):
s = pack('0b1, , , 0xf,')
a, b = s.unpack('bin:1,,,hex:4,')
self.assertEqual(a, '1')
self.assertEqual(b, 'f')
class FromFile(unittest.TestCase):
def testCreationFromFileOperations(self):
s = BitStream(filename='smalltestfile')
s.append('0xff')
self.assertEqual(s.hex, '0123456789abcdefff')
s = ConstBitStream(filename='smalltestfile')
t = BitStream('0xff') + s
self.assertEqual(t.hex, 'ff0123456789abcdef')
s = BitStream(filename='smalltestfile')
del s[:1]
self.assertEqual((BitStream('0b0') + s).hex, '0123456789abcdef')
s = BitStream(filename='smalltestfile')
del s[:7 * 8]
self.assertEqual(s.hex, 'ef')
s = BitStream(filename='smalltestfile')
s.insert('0xc', 4)
self.assertEqual(s.hex, '0c123456789abcdef')
s = BitStream(filename='smalltestfile')
s.prepend('0xf')
self.assertEqual(s.hex, 'f0123456789abcdef')
s = BitStream(filename='smalltestfile')
s.overwrite('0xaaa', 12)
self.assertEqual(s.hex, '012aaa6789abcdef')
s = BitStream(filename='smalltestfile')
s.reverse()
self.assertEqual(s.hex, 'f7b3d591e6a2c480')
s = BitStream(filename='smalltestfile')
del s[-60:]
self.assertEqual(s.hex, '0')
s = BitStream(filename='smalltestfile')
del s[:60]
self.assertEqual(s.hex, 'f')
def testFileProperties(self):
s = ConstBitStream(filename='smalltestfile')
self.assertEqual(s.hex, '0123456789abcdef')
self.assertEqual(s.uint, 81985529216486895)
self.assertEqual(s.int, 81985529216486895)
self.assertEqual(s.bin, '0000000100100011010001010110011110001001101010111100110111101111')
self.assertEqual(s[:-1].oct, '002215053170465363367')
s.bitpos = 0
self.assertEqual(s.read('se'), -72)
s.bitpos = 0
self.assertEqual(s.read('ue'), 144)
self.assertEqual(s.bytes, b'\x01\x23\x45\x67\x89\xab\xcd\xef')
self.assertEqual(s.tobytes(), b'\x01\x23\x45\x67\x89\xab\xcd\xef')
def testCreationFromFileWithLength(self):
s = ConstBitStream(filename='test.m1v', length=32)
self.assertEqual(s.length, 32)
self.assertEqual(s.hex, '000001b3')
s = ConstBitStream(filename='test.m1v', length=0)
self.assertFalse(s)
self.assertRaises(bitstring.CreationError, BitStream, filename='smalltestfile', length=65)
self.assertRaises(bitstring.CreationError, ConstBitStream, filename='smalltestfile', length=64, offset=1)
# self.assertRaises(bitstring.CreationError, ConstBitStream, filename='smalltestfile', offset=65)
f = open('smalltestfile', 'rb')
# self.assertRaises(bitstring.CreationError, ConstBitStream, auto=f, offset=65)
self.assertRaises(bitstring.CreationError, ConstBitStream, auto=f, length=65)
self.assertRaises(bitstring.CreationError, ConstBitStream, auto=f, offset=60, length=5)
def testCreationFromFileWithOffset(self):
a = BitStream(filename='test.m1v', offset=4)
self.assertEqual(a.peek(4 * 8).hex, '00001b31')
b = BitStream(filename='test.m1v', offset=28)
self.assertEqual(b.peek(8).hex, '31')
def testFileSlices(self):
s = BitStream(filename='smalltestfile')
t = s[-2::8]
self.assertEqual(s[-2::8].hex, 'cdef')
def testCreataionFromFileErrors(self):
self.assertRaises(IOError, BitStream, filename='Idonotexist')
def testFindInFile(self):
s = BitStream(filename='test.m1v')
self.assertTrue(s.find('0x160120'))
self.assertEqual(s.bytepos, 4)
s3 = s.read(3 * 8)
self.assertEqual(s3.hex, '160120')
s.bytepos = 0
self.assertTrue(s._pos == 0)
self.assertTrue(s.find('0x0001b2'))
self.assertEqual(s.bytepos, 13)
def testHexFromFile(self):
s = BitStream(filename='test.m1v')
self.assertEqual(s[0:32].hex, '000001b3')
self.assertEqual(s[-32:].hex, '000001b7')
s.hex = '0x11'
self.assertEqual(s.hex, '11')
def testFileOperations(self):
s1 = BitStream(filename='test.m1v')
s2 = BitStream(filename='test.m1v')
self.assertEqual(s1.read(32).hex, '000001b3')
self.assertEqual(s2.read(32).hex, '000001b3')
s1.bytepos += 4
self.assertEqual(s1.read(8).hex, '02')
self.assertEqual(s2.read(5 * 8).hex, '1601208302')
s1.pos = s1.len
try:
s1.pos += 1
self.assertTrue(False)
except ValueError:
pass
def testFileBitGetting(self):
s = ConstBitStream(filename='smalltestfile', offset=16, length=8) # 0x45
b = s[1]
self.assertTrue(b)
b = s.any(0, [-1, -2, -3])
self.assertTrue(b)
b = s.all(0, [0, 1, 2])
self.assertFalse(b)
def testVeryLargeFiles(self):
# This uses an 11GB file which isn't distributed for obvious reasons
# and so this test won't work for anyone except me!
try:
s = ConstBitStream(filename='11GB.mkv')
except IOError:
return
self.assertEqual(s.len, 11743020505 * 8)
self.assertEqual(s[1000000000:1000000100].hex, 'bdef7335d4545f680d669ce24')
self.assertEqual(s[-4::8].hex, 'bbebf7a1')
class CreationErrors(unittest.TestCase):
def testIncorrectBinAssignment(self):
s = BitStream()
self.assertRaises(bitstring.CreationError, s._setbin_safe, '0010020')
def testIncorrectHexAssignment(self):
s = BitStream()
self.assertRaises(bitstring.CreationError, s._sethex, '0xabcdefg')
class Length(unittest.TestCase):
def testLengthZero(self):
self.assertEqual(BitStream('').len, 0)
def testLength(self):
self.assertEqual(BitStream('0x80').len, 8)
def testLengthErrors(self):
#TODO: Lots of new checks, for various inits which now disallow length and offset
pass
#self.assertRaises(ValueError, BitStream, bin='111', length=-1)
#self.assertRaises(ValueError, BitStream, bin='111', length=4)
def testOffsetLengthError(self):
self.assertRaises(bitstring.CreationError, BitStream, hex='0xffff', offset=-1)
class SimpleConversions(unittest.TestCase):
def testConvertToUint(self):
self.assertEqual(BitStream('0x10').uint, 16)
self.assertEqual(BitStream('0b000111').uint, 7)
def testConvertToInt(self):
self.assertEqual(BitStream('0x10').int, 16)
self.assertEqual(BitStream('0b11110').int, -2)
def testConvertToHex(self):
self.assertEqual(BitStream(bytes=b'\x00\x12\x23\xff').hex, '001223ff')
s = BitStream('0b11111')
self.assertRaises(bitstring.InterpretError, s._gethex)
class Empty(unittest.TestCase):
def testEmptyBitstring(self):
s = BitStream()
self.assertRaises(bitstring.ReadError, s.read, 1)
self.assertEqual(s.bin, '')
self.assertEqual(s.hex, '')
self.assertRaises(bitstring.InterpretError, s._getint)
self.assertRaises(bitstring.InterpretError, s._getuint)
self.assertFalse(s)
def testNonEmptyBitStream(self):
s = BitStream(bin='0')
self.assertFalse(not s.len)
class Position(unittest.TestCase):
def testBitPosition(self):
s = BitStream(bytes=b'\x00\x00\x00')
self.assertEqual(s.bitpos, 0)
s.read(5)
self.assertEqual(s.pos, 5)
s.pos = s.len
self.assertRaises(bitstring.ReadError, s.read, 1)
def testBytePosition(self):
s = BitStream(bytes=b'\x00\x00\x00')
self.assertEqual(s.bytepos, 0)
s.read(10)
self.assertRaises(bitstring.ByteAlignError, s._getbytepos)
s.read(6)
self.assertEqual(s.bytepos, 2)
def testSeekToBit(self):
s = BitStream(bytes=b'\x00\x00\x00\x00\x00\x00')
s.bitpos = 0
self.assertEqual(s.bitpos, 0)
self.assertRaises(ValueError, s._setbitpos, -1)
self.assertRaises(ValueError, s._setbitpos, 6 * 8 + 1)
s.bitpos = 6 * 8
self.assertEqual(s.bitpos, 6 * 8)
def testSeekToByte(self):
s = BitStream(bytes=b'\x00\x00\x00\x00\x00\xab')
s.bytepos = 5
self.assertEqual(s.read(8).hex, 'ab')
def testAdvanceBitsAndBytes(self):
s = BitStream(bytes=b'\x00\x00\x00\x00\x00\x00\x00\x00')
s.pos += 5
self.assertEqual(s.pos, 5)
s.bitpos += 16
self.assertEqual(s.pos, 2 * 8 + 5)
s.pos -= 8
self.assertEqual(s.pos, 8 + 5)
def testRetreatBitsAndBytes(self):
a = BitStream(length=100)
a.pos = 80
a.bytepos -= 5
self.assertEqual(a.bytepos, 5)
a.pos -= 5
self.assertEqual(a.pos, 35)
class Offset(unittest.TestCase):
def testOffset1(self):
s = BitStream(bytes=b'\x00\x1b\x3f', offset=4)
self.assertEqual(s.read(8).bin, '00000001')
self.assertEqual(s.length, 20)
def testOffset2(self):
s1 = BitStream(bytes=b'\xf1\x02\x04')
s2 = BitStream(bytes=b'\xf1\x02\x04', length=23)
for i in [1, 2, 3, 4, 5, 6, 7, 6, 5, 4, 3, 2, 1, 0, 7, 3, 5, 1, 4]:
s1._datastore = offsetcopy(s1._datastore, i)
self.assertEqual(s1.hex, 'f10204')
s2._datastore = offsetcopy(s2._datastore, i)
self.assertEqual(s2.bin, '11110001000000100000010')
class Append(unittest.TestCase):
def testAppend(self):
s1 = BitStream('0b00000')
s1.append(BitStream(bool=True))
self.assertEqual(s1.bin, '000001')
self.assertEqual((BitStream('0x0102') + BitStream('0x0304')).hex, '01020304')
def testAppendSameBitstring(self):
s1 = BitStream('0xf0')[:6]
s1.append(s1)
self.assertEqual(s1.bin, '111100111100')
def testAppendWithOffset(self):
s = BitStream(bytes=b'\x28\x28', offset=1)
s.append('0b0')
self.assertEqual(s.hex, '5050')
class ByteAlign(unittest.TestCase):
def testByteAlign(self):
s = BitStream(hex='0001ff23')
s.bytealign()
self.assertEqual(s.bytepos, 0)
s.pos += 11
s.bytealign()
self.assertEqual(s.bytepos, 2)
s.pos -= 10
s.bytealign()
self.assertEqual(s.bytepos, 1)
def testByteAlignWithOffset(self):
s = BitStream(hex='0112233')
s._datastore = offsetcopy(s._datastore, 3)
bitstoalign = s.bytealign()
self.assertEqual(bitstoalign, 0)
self.assertEqual(s.read(5).bin, '00001')
def testInsertByteAligned(self):
s = BitStream('0x0011')
s.insert(BitStream('0x22'), 8)
self.assertEqual(s.hex, '002211')
s = BitStream(0)
s.insert(BitStream(bin='101'), 0)
self.assertEqual(s.bin, '101')
class Truncate(unittest.TestCase):
def testTruncateStart(self):
s = BitStream('0b1')
del s[:1]
self.assertFalse(s)
s = BitStream(hex='1234')
self.assertEqual(s.hex, '1234')
del s[:4]
self.assertEqual(s.hex, '234')
del s[:9]
self.assertEqual(s.bin, '100')
del s[:2]
self.assertEqual(s.bin, '0')
self.assertEqual(s.len, 1)
del s[:1]
self.assertFalse(s)
def testTruncateEnd(self):
s = BitStream('0b1')
del s[-1:]
self.assertFalse(s)
s = BitStream(bytes=b'\x12\x34')
self.assertEqual(s.hex, '1234')
del s[-4:]
self.assertEqual(s.hex, '123')
del s[-9:]
self.assertEqual(s.bin, '000')
del s[-3:]
self.assertFalse(s)
s = BitStream('0b001')
del s[:2]
del s[-1:]
self.assertFalse(s)
class Slice(unittest.TestCase):
def testByteAlignedSlice(self):
s = BitStream(hex='0x123456')
self.assertEqual(s[8:16].hex, '34')
s = s[8:24]
self.assertEqual(s.len, 16)
self.assertEqual(s.hex, '3456')
s = s[0:8]
self.assertEqual(s.hex, '34')
s.hex = '0x123456'
self.assertEqual(s[8:24][0:8].hex, '34')
def testSlice(self):
s = BitStream(bin='000001111100000')
s1 = s[0:5]
s2 = s[5:10]
s3 = s[10:15]
self.assertEqual(s1.bin, '00000')
self.assertEqual(s2.bin, '11111')
self.assertEqual(s3.bin, '00000')
class Insert(unittest.TestCase):
def testInsert(self):
s1 = BitStream(hex='0x123456')
s2 = BitStream(hex='0xff')
s1.bytepos = 1
s1.insert(s2)
self.assertEqual(s1.bytepos, 2)
self.assertEqual(s1.hex, '12ff3456')
s1.insert('0xee', 24)
self.assertEqual(s1.hex, '12ff34ee56')
self.assertEqual(s1.bitpos, 32)
self.assertRaises(ValueError, s1.insert, '0b1', -1000)
self.assertRaises(ValueError, s1.insert, '0b1', 1000)
def testInsertNull(self):
s = BitStream(hex='0x123').insert(BitStream(), 3)
self.assertEqual(s.hex, '123')
def testInsertBits(self):
one = BitStream(bin='1')
zero = BitStream(bin='0')
s = BitStream(bin='00')
s.insert(one, 0)
self.assertEqual(s.bin, '100')
s.insert(zero, 0)
self.assertEqual(s.bin, '0100')
s.insert(one, s.len)
self.assertEqual(s.bin, '01001')
s.insert(s, 2)
self.assertEqual(s.bin, '0101001001')
class Resetting(unittest.TestCase):
def testSetHex(self):
s = BitStream()
s.hex = '0'
self.assertEqual(s.hex, '0')
s.hex = '0x010203045'
self.assertEqual(s.hex, '010203045')
self.assertRaises(bitstring.CreationError, s._sethex, '0x002g')
def testSetBin(self):
s = BitStream(bin="000101101")
self.assertEqual(s.bin, '000101101')
self.assertEqual(s.len, 9)
s.bin = '0'
self.assertEqual(s.bin, '0')
self.assertEqual(s.len, 1)
def testSetEmptyBin(self):
s = BitStream(hex='0x000001b3')
s.bin = ''
self.assertEqual(s.len, 0)
self.assertEqual(s.bin, '')
def testSetInvalidBin(self):
s = BitStream()
self.assertRaises(bitstring.CreationError, s._setbin_safe, '00102')
class Overwriting(unittest.TestCase):
def testOverwriteBit(self):
s = BitStream(bin='0')
s.overwrite(BitStream(bin='1'), 0)
self.assertEqual(s.bin, '1')
def testOverwriteLimits(self):
s = BitStream(bin='0b11111')
s.overwrite(BitStream(bin='000'), 0)
self.assertEqual(s.bin, '00011')
s.overwrite('0b000', 2)
self.assertEqual(s.bin, '00000')
def testOverwriteNull(self):
s = BitStream(hex='342563fedec')
s2 = BitStream(s)
s.overwrite(BitStream(bin=''), 23)
self.assertEqual(s.bin, s2.bin)
def testOverwritePosition(self):
s1 = BitStream(hex='0123456')
s2 = BitStream(hex='ff')
s1.bytepos = 1
s1.overwrite(s2)
self.assertEqual((s1.hex, s1.bytepos), ('01ff456', 2))
s1.overwrite('0xff', 0)
self.assertEqual((s1.hex, s1.bytepos), ('ffff456', 1))
def testOverwriteWithSelf(self):
s = BitStream('0x123')
s.overwrite(s)
self.assertEqual(s, '0x123')
class Split(unittest.TestCase):
def testSplitByteAlignedCornerCases(self):
s = BitStream()
bsl = s.split(BitStream(hex='0xff'))
self.assertEqual(next(bsl).hex, '')
self.assertRaises(StopIteration, next, bsl)
s = BitStream(hex='aabbcceeddff')
delimiter = BitStream()
bsl = s.split(delimiter)
self.assertRaises(ValueError, next, bsl)
delimiter = BitStream(hex='11')
bsl = s.split(delimiter)
self.assertEqual(next(bsl).hex, s.hex)
def testSplitByteAligned(self):
s = BitStream(hex='0x1234aa1234bbcc1234ffff')
delimiter = BitStream(hex='1234')
bsl = s.split(delimiter)
self.assertEqual([b.hex for b in bsl], ['', '1234aa', '1234bbcc', '1234ffff'])
self.assertEqual(s.pos, 0)
def testSplitByteAlignedWithIntialBytes(self):
s = BitStream(hex='aa471234fedc43 47112233 47 4723 472314')
delimiter = BitStream(hex='47')
s.find(delimiter)
self.assertEqual(s.bytepos, 1)
bsl = s.split(delimiter, start=0)
self.assertEqual([b.hex for b in bsl], ['aa', '471234fedc43', '47112233',
'47', '4723', '472314'])
self.assertEqual(s.bytepos, 1)
def testSplitByteAlignedWithOverlappingDelimiter(self):
s = BitStream(hex='aaffaaffaaffaaffaaff')
bsl = s.split(BitStream(hex='aaffaa'))
self.assertEqual([b.hex for b in bsl], ['', 'aaffaaff', 'aaffaaffaaff'])
class Adding(unittest.TestCase):
def testAdding(self):
s1 = BitStream(hex='0x0102')
s2 = BitStream(hex='0x0304')
s3 = s1 + s2
self.assertEqual(s1.hex, '0102')
self.assertEqual(s2.hex, '0304')
self.assertEqual(s3.hex, '01020304')
s3 += s1
self.assertEqual(s3.hex, '010203040102')
self.assertEqual(s2[9:16].bin, '0000100')
self.assertEqual(s1[0:9].bin, '000000010')
s4 = BitStream(bin='000000010') +\
BitStream(bin='0000100')
self.assertEqual(s4.bin, '0000000100000100')
s2p = s2[9:16]
s1p = s1[0:9]
s5p = s1p + s2p
s5 = s1[0:9] + s2[9:16]
self.assertEqual(s5.bin, '0000000100000100')
def testMoreAdding(self):
s = BitStream(bin='00') + BitStream(bin='') + BitStream(bin='11')
self.assertEqual(s.bin, '0011')
s = '0b01'
s += BitStream('0b11')
self.assertEqual(s.bin, '0111')
s = BitStream('0x00')
t = BitStream('0x11')
s += t
self.assertEqual(s.hex, '0011')
self.assertEqual(t.hex, '11')
s += s
self.assertEqual(s.hex, '00110011')
def testRadd(self):
s = '0xff' + BitStream('0xee')
self.assertEqual(s.hex, 'ffee')
def testTruncateAsserts(self):
s = BitStream('0x001122')
s.bytepos = 2
del s[-s.len:]
self.assertEqual(s.bytepos, 0)
s.append('0x00')
s.append('0x1122')
s.bytepos = 2
del s[:s.len]
self.assertEqual(s.bytepos, 0)
s.append('0x00')
def testOverwriteErrors(self):
s = BitStream(bin='11111')
self.assertRaises(ValueError, s.overwrite, BitStream(bin='1'), -10)
self.assertRaises(ValueError, s.overwrite, BitStream(bin='1'), 6)
self.assertRaises(ValueError, s.overwrite, BitStream(bin='11111'), 1)
def testDeleteBits(self):
s = BitStream(bin='000111100000')
s.bitpos = 4
del s[4:8]
self.assertEqual(s.bin, '00010000')
del s[4:1004]
self.assertTrue(s.bin, '0001')
def testDeleteBitsWithPosition(self):
s = BitStream(bin='000111100000')
del s[4:8]
self.assertEqual(s.bin, '00010000')
def testDeleteBytes(self):
s = BitStream('0x00112233')
del s[8:8]
self.assertEqual(s.hex, '00112233')
self.assertEqual(s.pos, 0)
del s[8:16]
self.assertEqual(s.hex, '002233')
self.assertEqual(s.bytepos, 0)
del s[:3:8]
self.assertFalse(s)
self.assertEqual(s.pos, 0)
def testGetItemWithPositivePosition(self):
s = BitStream(bin='0b1011')
self.assertEqual(s[0], True)
self.assertEqual(s[1], False)
self.assertEqual(s[2], True)
self.assertEqual(s[3], True)
self.assertRaises(IndexError, s.__getitem__, 4)
def testGetItemWithNegativePosition(self):
s = BitStream(bin='1011')
self.assertEqual(s[-1], True)
self.assertEqual(s[-2], True)
self.assertEqual(s[-3], False)
self.assertEqual(s[-4], True)
self.assertRaises(IndexError, s.__getitem__, -5)
def testSlicing(self):
s = ConstBitStream(hex='0123456789')
self.assertEqual(s[0:8].hex, '01')
self.assertFalse(s[0:0])
self.assertFalse(s[23:20])
self.assertEqual(s[8:12].bin, '0010')
self.assertEqual(s[8:20:4], '0x89')
def testNegativeSlicing(self):
s = ConstBitStream(hex='0x012345678')
self.assertEqual(s[:-8].hex, '0123456')
self.assertEqual(s[-16:-8].hex, '56')
self.assertEqual(s[-24:].hex, '345678')
self.assertEqual(s[-1000:-6:4], '0x012')
def testLen(self):
s = BitStream()
self.assertEqual(len(s), 0)
s.append(BitStream(bin='001'))
self.assertEqual(len(s), 3)
def testJoin(self):
s1 = BitStream(bin='0')
s2 = BitStream(bin='1')
s3 = BitStream(bin='000')
s4 = BitStream(bin='111')
strings = [s1, s2, s1, s3, s4]
s = BitStream().join(strings)
self.assertEqual(s.bin, '010000111')
def testJoin2(self):
s1 = BitStream(hex='00112233445566778899aabbccddeeff')
s2 = BitStream(bin='0b000011')
bsl = [s1[0:32], s1[4:12], s2, s2, s2, s2]
s = ConstBitStream().join(bsl)
self.assertEqual(s.hex, '00112233010c30c3')
bsl = [BitStream(uint=j, length=12) for j in range(10) for i in range(10)]
s = BitStream().join(bsl)
self.assertEqual(s.length, 1200)
def testPos(self):
s = BitStream(bin='1')
self.assertEqual(s.bitpos, 0)
s.read(1)
self.assertEqual(s.bitpos, 1)
def testWritingData(self):
strings = [BitStream(bin=x) for x in ['0', '001', '0011010010', '010010', '1011']]
s = BitStream().join(strings)
s2 = BitStream(bytes=s.bytes)
self.assertEqual(s2.bin, '000100110100100100101011')
s2.append(BitStream(bin='1'))
s3 = BitStream(bytes=s2.tobytes())
self.assertEqual(s3.bin, '00010011010010010010101110000000')
def testWritingDataWithOffsets(self):
s1 = BitStream(bytes=b'\x10')
s2 = BitStream(bytes=b'\x08\x00', length=8, offset=1)
s3 = BitStream(bytes=b'\x04\x00', length=8, offset=2)
self.assertTrue(s1 == s2)
self.assertTrue(s2 == s3)
self.assertTrue(s1.bytes == s2.bytes)
self.assertTrue(s2.bytes == s3.bytes)
def testVariousThings1(self):
hexes = ['12345678', '87654321', 'ffffffffff', 'ed', '12ec']
bins = ['001010', '1101011', '0010000100101110110110', '11', '011']
bsl = []
for (hex, bin) in list(zip(hexes, bins)) * 5:
bsl.append(BitStream(hex=hex))
bsl.append(BitStream(bin=bin))
s = BitStream().join(bsl)
for (hex, bin) in list(zip(hexes, bins)) * 5:
h = s.read(4 * len(hex))
b = s.read(len(bin))
self.assertEqual(h.hex, hex)
self.assertEqual(b.bin, bin)
def testVariousThings2(self):
s1 = BitStream(hex="0x1f08")[:13]
self.assertEqual(s1.bin, '0001111100001')
s2 = BitStream(bin='0101')
self.assertEqual(s2.bin, '0101')
s1.append(s2)
self.assertEqual(s1.length, 17)
self.assertEqual(s1.bin, '00011111000010101')
s1 = s1[3:8]
self.assertEqual(s1.bin, '11111')
def testVariousThings3(self):
s1 = BitStream(hex='0x012480ff')[2:27]
s2 = s1 + s1
self.assertEqual(s2.length, 50)
s3 = s2[0:25]
s4 = s2[25:50]
self.assertEqual(s3.bin, s4.bin)
def testPeekBit(self):
s = BitStream(bin='01')
self.assertEqual(s.peek(1), [0])
self.assertEqual(s.peek(1), [0])
self.assertEqual(s.read(1), [0])
self.assertEqual(s.peek(1), [1])
self.assertEqual(s.peek(1), [1])
s = BitStream(bytes=b'\x1f', offset=3)
self.assertEqual(s.len, 5)
self.assertEqual(s.peek(5).bin, '11111')
self.assertEqual(s.peek(5).bin, '11111')
s.pos += 1
self.assertRaises(bitstring.ReadError, s.peek, 5)
s = BitStream(hex='001122334455')
self.assertEqual(s.peek(8).hex, '00')
self.assertEqual(s.read(8).hex, '00')
s.pos += 33
self.assertRaises(bitstring.ReadError, s.peek, 8)
s = BitStream(hex='001122334455')
self.assertEqual(s.peek(8 * 2).hex, '0011')
self.assertEqual(s.read(8 * 3).hex, '001122')
self.assertEqual(s.peek(8 * 3).hex, '334455')
self.assertRaises(bitstring.ReadError, s.peek, 25)
def testAdvanceBit(self):
s = BitStream(hex='0xff')
s.bitpos = 6
s.pos += 1
self.assertEqual(s.bitpos, 7)
s.bitpos += 1
try:
s.pos += 1
self.assertTrue(False)
except ValueError:
pass
def testAdvanceByte(self):
s = BitStream(hex='0x010203')
s.bytepos += 1
self.assertEqual(s.bytepos, 1)
s.bytepos += 1
self.assertEqual(s.bytepos, 2)
s.bytepos += 1
try:
s.bytepos += 1
self.assertTrue(False)
except ValueError:
pass
def testRetreatBit(self):
s = BitStream(hex='0xff')
try:
s.pos -= 1
self.assertTrue(False)
except ValueError:
pass
s.pos = 5
s.pos -= 1
self.assertEqual(s.pos, 4)
def testRetreatByte(self):
s = BitStream(hex='0x010203')
try:
s.bytepos -= 1
self.assertTrue(False)
except ValueError:
pass
s.bytepos = 3
s.bytepos -= 1
self.assertEqual(s.bytepos, 2)
self.assertEqual(s.read(8).hex, '03')
def testCreationByAuto(self):
s = BitStream('0xff')
self.assertEqual(s.hex, 'ff')
s = BitStream('0b00011')
self.assertEqual(s.bin, '00011')
self.assertRaises(bitstring.CreationError, BitStream, 'hello')
s1 = BitStream(bytes=b'\xf5', length=3, offset=5)
s2 = BitStream(s1, length=1, offset=1)
self.assertEqual(s2, '0b0')
s = BitStream(bytes=b'\xff', offset=2)
t = BitStream(s, offset=2)
self.assertEqual(t, '0b1111')
self.assertRaises(TypeError, BitStream, auto=1.2)
def testCreationByAuto2(self):
s = BitStream('bin=001')
self.assertEqual(s.bin, '001')
s = BitStream('oct=0o007')
self.assertEqual(s.oct, '007')
s = BitStream('hex=123abc')
self.assertEqual(s, '0x123abc')
s = BitStream('bin:2=01')
self.assertEqual(s, '0b01')
for s in ['bin:1=01', 'bits:4=0b1', 'oct:3=000', 'hex:4=0x1234']:
self.assertRaises(bitstring.CreationError, BitStream, s)
def testInsertUsingAuto(self):
s = BitStream('0xff')
s.insert('0x00', 4)
self.assertEqual(s.hex, 'f00f')
self.assertRaises(ValueError, s.insert, 'ff')
def testOverwriteUsingAuto(self):
s = BitStream('0x0110')
s.overwrite('0b1')
self.assertEqual(s.hex, '8110')
s.overwrite('')
self.assertEqual(s.hex, '8110')
self.assertRaises(ValueError, s.overwrite, '0bf')
def testFindUsingAuto(self):
s = BitStream('0b000000010100011000')
self.assertTrue(s.find('0b101'))
self.assertEqual(s.pos, 7)
def testFindbytealignedUsingAuto(self):
s = BitStream('0x00004700')
self.assertTrue(s.find('0b01000111', bytealigned=True))
self.assertEqual(s.bytepos, 2)
def testAppendUsingAuto(self):
s = BitStream('0b000')
s.append('0b111')
self.assertEqual(s.bin, '000111')
s.append('0b0')
self.assertEqual(s.bin, '0001110')
def testSplitByteAlignedUsingAuto(self):
s = BitStream('0x000143563200015533000123')
sections = s.split('0x0001')
self.assertEqual(next(sections).hex, '')
self.assertEqual(next(sections).hex, '0001435632')
self.assertEqual(next(sections).hex, '00015533')
self.assertEqual(next(sections).hex, '000123')
self.assertRaises(StopIteration, next, sections)
def testSplitByteAlignedWithSelf(self):
s = BitStream('0x1234')
sections = s.split(s)
self.assertEqual(next(sections).hex, '')
self.assertEqual(next(sections).hex, '1234')
self.assertRaises(StopIteration, next, sections)
def testPrepend(self):
s = BitStream('0b000')
s.prepend('0b11')
self.assertEqual(s.bin, '11000')
s.prepend(s)
self.assertEqual(s.bin, '1100011000')
s.prepend('')
self.assertEqual(s.bin, '1100011000')
def testNullSlice(self):
s = BitStream('0x111')
t = s[1:1]
self.assertEqual(t._datastore.bytelength, 0)
def testMultipleAutos(self):
s = BitStream('0xa')
s.prepend('0xf')
s.append('0xb')
self.assertEqual(s, '0xfab')
s.prepend(s)
s.append('0x100')
s.overwrite('0x5', 4)
self.assertEqual(s, '0xf5bfab100')
def testReverse(self):
s = BitStream('0b0011')
s.reverse()
self.assertEqual(s.bin, '1100')
s = BitStream('0b10')
s.reverse()
self.assertEqual(s.bin, '01')
s = BitStream()
s.reverse()
self.assertEqual(s.bin, '')
def testInitWithConcatenatedStrings(self):
s = BitStream('0xff 0Xee 0xd 0xcc')
self.assertEqual(s.hex, 'ffeedcc')
s = BitStream('0b0 0B111 0b001')
self.assertEqual(s.bin, '0111001')
s += '0b1' + '0B1'
self.assertEqual(s.bin, '011100111')
s = BitStream(hex='ff0xee')
self.assertEqual(s.hex, 'ffee')
s = BitStream(bin='000b0b11')
self.assertEqual(s.bin, '0011')
s = BitStream(' 0o123 0O 7 0 o1')
self.assertEqual(s.oct, '12371')
s += ' 0 o 332'
self.assertEqual(s.oct, '12371332')
def testEquals(self):
s1 = BitStream('0b01010101')
s2 = BitStream('0b01010101')
self.assertTrue(s1 == s2)
s3 = BitStream()
s4 = BitStream()
self.assertTrue(s3 == s4)
self.assertFalse(s3 != s4)
s5 = BitStream(bytes=b'\xff', offset=2, length=3)
s6 = BitStream('0b111')
self.assertTrue(s5 == s6)
def testLargeEquals(self):
s1 = BitStream(10000000)
s2 = BitStream(10000000)
s1.set(True, [0, 55, 53214, 5342111, 9999999])
s2.set(True, [0, 55, 53214, 5342111, 9999999])
self.assertEqual(s1, s2)
s1.set(True, 8000000)
self.assertNotEqual(s1, s2)
def testNotEquals(self):
s1 = BitStream('0b0')
s2 = BitStream('0b1')
self.assertTrue(s1 != s2)
self.assertFalse(s1 != BitStream('0b0'))
def testEqualityWithAutoInitialised(self):
a = BitStream('0b00110111')
self.assertTrue(a == '0b00110111')
self.assertTrue(a == '0x37')
self.assertTrue('0b0011 0111' == a)
self.assertTrue('0x3 0x7' == a)
self.assertFalse(a == '0b11001000')
self.assertFalse('0x3737' == a)
def testInvertSpecialMethod(self):
s = BitStream('0b00011001')
self.assertEqual((~s).bin, '11100110')
self.assertEqual((~BitStream('0b0')).bin, '1')
self.assertEqual((~BitStream('0b1')).bin, '0')
self.assertTrue(~~s == s)
def testInvertBitPosition(self):
s = ConstBitStream('0xefef')
s.pos = 8
t = ~s
self.assertEqual(s.pos, 8)
self.assertEqual(t.pos, 0)
def testInvertSpecialMethodErrors(self):
s = BitStream()
self.assertRaises(bitstring.Error, s.__invert__)
def testJoinWithAuto(self):
s = BitStream().join(['0xf', '0b00', BitStream(bin='11')])
self.assertEqual(s, '0b11110011')
def testAutoBitStringCopy(self):
s = BitStream('0xabcdef')
t = BitStream(s)
self.assertEqual(t.hex, 'abcdef')
del s[-8:]
self.assertEqual(t.hex, 'abcdef')
def testMultiplication(self):
a = BitStream('0xff')
b = a * 8
self.assertEqual(b, '0xffffffffffffffff')
b = 4 * a
self.assertEqual(b, '0xffffffff')
self.assertTrue(1 * a == a * 1 == a)
c = a * 0
self.assertFalse(c)
a *= 3
self.assertEqual(a, '0xffffff')
a *= 0
self.assertFalse(a)
one = BitStream('0b1')
zero = BitStream('0b0')
mix = one * 2 + 3 * zero + 2 * one * 2
self.assertEqual(mix, '0b110001111')
q = BitStream()
q *= 143
self.assertFalse(q)
q += [True, True, False]
q.pos += 2
q *= 0
self.assertFalse(q)
self.assertEqual(q.bitpos, 0)
def testMultiplicationWithFiles(self):
a = BitStream(filename='test.m1v')
b = a.len
a *= 3
self.assertEqual(a.len, 3 * b)
def testMultiplicationErrors(self):
a = BitStream('0b1')
b = BitStream('0b0')
self.assertRaises(ValueError, a.__mul__, -1)
self.assertRaises(ValueError, a.__imul__, -1)
self.assertRaises(ValueError, a.__rmul__, -1)
self.assertRaises(TypeError, a.__mul__, 1.2)
self.assertRaises(TypeError, a.__rmul__, b)
self.assertRaises(TypeError, a.__imul__, b)
def testFileAndMemEquivalence(self):
a = ConstBitStream(filename='smalltestfile')
b = BitStream(filename='smalltestfile')
self.assertTrue(isinstance(a._datastore._rawarray, bitstring.bits.MmapByteArray))
self.assertTrue(isinstance(b._datastore._rawarray, bytearray))
self.assertEqual(a._datastore.getbyte(0), b._datastore.getbyte(0))
self.assertEqual(a._datastore.getbyteslice(1, 5), bytearray(b._datastore.getbyteslice(1, 5)))
def testByte2Bits(self):
for i in range(256):
s = BitStream(bin=bitstring.bits.BYTE_TO_BITS[i])
self.assertEqual(i, s.uint)
self.assertEqual(s.length, 8)
def testBitwiseAnd(self):
a = BitStream('0b01101')
b = BitStream('0b00110')
self.assertEqual((a & b).bin, '00100')
self.assertEqual((a & '0b11111'), a)
self.assertRaises(ValueError, a.__and__, '0b1')
self.assertRaises(ValueError, b.__and__, '0b110111111')
c = BitStream('0b0011011')
c.pos = 4
d = c & '0b1111000'
self.assertEqual(d.pos, 0)
self.assertEqual(d.bin, '0011000')
d = '0b1111000' & c
self.assertEqual(d.bin, '0011000')
def testBitwiseOr(self):
a = BitStream('0b111001001')
b = BitStream('0b011100011')
self.assertEqual((a | b).bin, '111101011')
self.assertEqual((a | '0b000000000'), a)
self.assertRaises(ValueError, a.__or__, '0b0000')
self.assertRaises(ValueError, b.__or__, a + '0b1')
a = '0xff00' | BitStream('0x00f0')
self.assertEqual(a.hex, 'fff0')
def testBitwiseXor(self):
a = BitStream('0b111001001')
b = BitStream('0b011100011')
self.assertEqual((a ^ b).bin, '100101010')
self.assertEqual((a ^ '0b111100000').bin, '000101001')
self.assertRaises(ValueError, a.__xor__, '0b0000')
self.assertRaises(ValueError, b.__xor__, a + '0b1')
a = '0o707' ^ BitStream('0o777')
self.assertEqual(a.oct, '070')
def testSplit(self):
a = BitStream('0b0 010100111 010100 0101 010')
a.pos = 20
subs = [i.bin for i in a.split('0b010')]
self.assertEqual(subs, ['0', '010100111', '010100', '0101', '010'])
self.assertEqual(a.pos, 20)
def testSplitCornerCases(self):
a = BitStream('0b000000')
bsl = a.split('0b1', False)
self.assertEqual(next(bsl), a)
self.assertRaises(StopIteration, next, bsl)
b = BitStream()
bsl = b.split('0b001', False)
self.assertFalse(next(bsl))
self.assertRaises(StopIteration, next, bsl)
def testSplitErrors(self):
a = BitStream('0b0')
b = a.split('', False)
self.assertRaises(ValueError, next, b)
def testPositionInSlice(self):
a = BitStream('0x00ffff00')
a.bytepos = 2
b = a[8:24]
self.assertEqual(b.bytepos, 0)
def testSliceWithOffset(self):
a = BitStream(bytes=b'\x00\xff\x00', offset=7)
b = a[7:12]
self.assertEqual(b.bin, '11000')
def testSplitWithMaxsplit(self):
a = BitStream('0xaabbccbbccddbbccddee')
self.assertEqual(len(list(a.split('0xbb', bytealigned=True))), 4)
bsl = list(a.split('0xbb', count=1, bytealigned=True))
self.assertEqual((len(bsl), bsl[0]), (1, '0xaa'))
bsl = list(a.split('0xbb', count=2, bytealigned=True))
self.assertEqual(len(bsl), 2)
self.assertEqual(bsl[0], '0xaa')
self.assertEqual(bsl[1], '0xbbcc')
def testSplitMore(self):
s = BitStream('0b1100011001110110')
for i in range(10):
a = list(s.split('0b11', False, count=i))
b = list(s.split('0b11', False))[:i]
self.assertEqual(a, b)
b = s.split('0b11', count=-1)
self.assertRaises(ValueError, next, b)
def testFindByteAlignedWithBits(self):
a = BitStream('0x00112233445566778899')
a.find('0b0001', bytealigned=True)
self.assertEqual(a.bitpos, 8)
def testFindStartbitNotByteAligned(self):
a = BitStream('0b0010000100')
found = a.find('0b1', start=4)
self.assertEqual((found, a.bitpos), ((7,), 7))
found = a.find('0b1', start=2)
self.assertEqual((found, a.bitpos), ((2,), 2))
found = a.find('0b1', bytealigned=False, start=8)
self.assertEqual((found, a.bitpos), ((), 2))
def testFindEndbitNotByteAligned(self):
a = BitStream('0b0010010000')
found = a.find('0b1', bytealigned=False, end=2)
self.assertEqual((found, a.bitpos), ((), 0))
found = a.find('0b1', end=3)
self.assertEqual((found, a.bitpos), ((2,), 2))
found = a.find('0b1', bytealigned=False, start=3, end=5)
self.assertEqual((found, a.bitpos), ((), 2))
found = a.find('0b1', start=3, end=6)
self.assertEqual((found[0], a.bitpos), (5, 5))
def testFindStartbitByteAligned(self):
a = BitStream('0xff001122ff0011ff')
a.pos = 40
found = a.find('0x22', start=23, bytealigned=True)
self.assertEqual((found, a.bytepos), ((24,), 3))
a.bytepos = 4
found = a.find('0x22', start=24, bytealigned=True)
self.assertEqual((found, a.bytepos), ((24,), 3))
found = a.find('0x22', start=25, bytealigned=True)
self.assertEqual((found, a.pos), ((), 24))
found = a.find('0b111', start=40, bytealigned=True)
self.assertEqual((found, a.pos), ((56,), 56))
def testFindEndbitByteAligned(self):
a = BitStream('0xff001122ff0011ff')
found = a.find('0x22', end=31, bytealigned=True)
self.assertFalse(found)
self.assertEqual(a.pos, 0)
found = a.find('0x22', end=32, bytealigned=True)
self.assertTrue(found)
self.assertEqual(a.pos, 24)
self.assertEqual(found[0], 24)
def testFindStartEndbitErrors(self):
a = BitStream('0b00100')
self.assertRaises(ValueError, a.find, '0b1', bytealigned=False, start=-100)
self.assertRaises(ValueError, a.find, '0b1', end=6)
self.assertRaises(ValueError, a.find, '0b1', start=4, end=3)
b = BitStream('0x0011223344')
self.assertRaises(ValueError, a.find, '0x22', bytealigned=True, start=-100)
self.assertRaises(ValueError, a.find, '0x22', end=41, bytealigned=True)
def testSplitStartbit(self):
a = BitStream('0b0010101001000000001111')
bsl = a.split('0b001', bytealigned=False, start=1)
self.assertEqual([x.bin for x in bsl], ['010101', '001000000', '001111'])
b = a.split('0b001', start=-100)
self.assertRaises(ValueError, next, b)
b = a.split('0b001', start=23)
self.assertRaises(ValueError, next, b)
b = a.split('0b1', start=10, end=9)
self.assertRaises(ValueError, next, b)
def testSplitStartbitByteAligned(self):
a = BitStream('0x00ffffee')
bsl = list(a.split('0b111', start=9, bytealigned=True))
self.assertEqual([x.bin for x in bsl], ['1111111', '11111111', '11101110'])
def testSplitEndbit(self):
a = BitStream('0b000010001001011')
bsl = list(a.split('0b1', bytealigned=False, end=14))
self.assertEqual([x.bin for x in bsl], ['0000', '1000', '100', '10', '1'])
self.assertEqual(list(a[4:12].split('0b0', False)), list(a.split('0b0', start=4, end=12)))
# Shouldn't raise ValueError
bsl = list(a.split('0xffee', end=15))
# Whereas this one will when we call next()
bsl = a.split('0xffee', end=16)
self.assertRaises(ValueError, next, bsl)
def testSplitEndbitByteAligned(self):
a = BitStream('0xff00ff')[:22]
bsl = list(a.split('0b 0000 0000 111', end=19))
self.assertEqual([x.bin for x in bsl], ['11111111', '00000000111'])
bsl = list(a.split('0b 0000 0000 111', end=18))
self.assertEqual([x.bin for x in bsl], ['111111110000000011'])
def testSplitMaxSplit(self):
a = BitStream('0b1' * 20)
for i in range(10):
bsl = list(a.split('0b1', count=i))
self.assertEqual(len(bsl), i)
def testPrependAndAppendAgain(self):
c = BitStream('0x1122334455667788')
c.bitpos = 40
c.prepend('0b1')
self.assertEqual(c.bitpos, 41)
c = BitStream()
c.prepend('0x1234')
self.assertEqual(c.bytepos, 2)
c = BitStream()
c.append('0x1234')
self.assertEqual(c.bytepos, 0)
s = BitStream(bytes=b'\xff\xff', offset=2)
self.assertEqual(s.length, 14)
t = BitStream(bytes=b'\x80', offset=1, length=2)
s.prepend(t)
self.assertEqual(s, '0x3fff')
def testFindAll(self):
a = BitStream('0b11111')
p = a.findall('0b1')
self.assertEqual(list(p), [0, 1, 2, 3, 4])
p = a.findall('0b11')
self.assertEqual(list(p), [0, 1, 2, 3])
p = a.findall('0b10')
self.assertEqual(list(p), [])
a = BitStream('0x4733eeff66554747335832434547')
p = a.findall('0x47', bytealigned=True)
self.assertEqual(list(p), [0, 6 * 8, 7 * 8, 13 * 8])
p = a.findall('0x4733', bytealigned=True)
self.assertEqual(list(p), [0, 7 * 8])
a = BitStream('0b1001001001001001001')
p = a.findall('0b1001', bytealigned=False)
self.assertEqual(list(p), [0, 3, 6, 9, 12, 15])
self.assertEqual(a.pos, 15)
def testFindAllGenerator(self):
a = BitStream('0xff1234512345ff1234ff12ff')
p = a.findall('0xff', bytealigned=True)
self.assertEqual(next(p), 0)
self.assertEqual(next(p), 6 * 8)
self.assertEqual(next(p), 9 * 8)
self.assertEqual(next(p), 11 * 8)
self.assertRaises(StopIteration, next, p)
def testFindAllCount(self):
s = BitStream('0b1') * 100
for i in [0, 1, 23]:
self.assertEqual(len(list(s.findall('0b1', count=i))), i)
b = s.findall('0b1', bytealigned=True, count=-1)
self.assertRaises(ValueError, next, b)
def testContains(self):
a = BitStream('0b1') + '0x0001dead0001'
self.assertTrue('0xdead' in a)
self.assertEqual(a.pos, 0)
self.assertFalse('0xfeed' in a)
def testRepr(self):
max = bitstring.bits.MAX_CHARS
bls = ['', '0b1', '0o5', '0x43412424f41', '0b00101001010101']
for bs in bls:
a = BitStream(bs)
b = eval(a.__repr__())
self.assertTrue(a == b)
for f in [ConstBitStream(filename='test.m1v'),
ConstBitStream(filename='test.m1v', length=17),
ConstBitStream(filename='test.m1v', length=23, offset=23102)]:
f2 = eval(f.__repr__())
self.assertEqual(f._datastore._rawarray.source.name, f2._datastore._rawarray.source.name)
self.assertTrue(f2 == f)
a = BitStream('0b1')
self.assertEqual(repr(a), "BitStream('0b1')")
a += '0b11'
self.assertEqual(repr(a), "BitStream('0b111')")
a += '0b1'
self.assertEqual(repr(a), "BitStream('0xf')")
a *= max
self.assertEqual(repr(a), "BitStream('0x" + "f" * max + "')")
a += '0xf'
self.assertEqual(repr(a), "BitStream('0x" + "f" * max + "...') # length=%d" % (max * 4 + 4))
def testPrint(self):
s = BitStream(hex='0x00')
self.assertEqual('0x' + s.hex, s.__str__())
s = BitStream(filename='test.m1v')
self.assertEqual('0x' + s[0:bitstring.bits.MAX_CHARS * 4].hex + '...', s.__str__())
self.assertEqual(BitStream().__str__(), '')
def testIter(self):
a = BitStream('0b001010')
b = BitStream()
for bit in a:
b.append(ConstBitStream(bool=bit))
self.assertEqual(a, b)
def testDelitem(self):
a = BitStream('0xffee')
del a[0:8]
self.assertEqual(a.hex, 'ee')
del a[0:8]
self.assertFalse(a)
del a[10:12]
self.assertFalse(a)
def testNonZeroBitsAtStart(self):
a = BitStream(bytes=b'\xff', offset=2)
b = BitStream('0b00')
b += a
self.assertTrue(b == '0b0011 1111')
#self.assertEqual(a._datastore.rawbytes, b'\xff')
self.assertEqual(a.tobytes(), b'\xfc')
def testNonZeroBitsAtEnd(self):
a = BitStream(bytes=b'\xff', length=5)
#self.assertEqual(a._datastore.rawbytes, b'\xff')
b = BitStream('0b00')
a += b
self.assertTrue(a == '0b1111100')
self.assertEqual(a.tobytes(), b'\xf8')
self.assertRaises(ValueError, a._getbytes)
#def testLargeOffsets(self):
#a = BitStream('0xffffffff', offset=32)
#self.assertFalse(a)
#b = BitStream(bytes=b'\xff\xff\xff\xfd', offset=30, length=1)
#self.assertEqual(b, '0b0')
#o = BitStream(oct='123456707', offset=24)
#self.assertEqual(o, '0o7')
#d = BitStream(bytes=b'\x00\x00\x00\x00\x0f', offset=33, length=5)
#self.assertEqual(d, '0b00011')
def testNewOffsetErrors(self):
self.assertRaises(bitstring.CreationError, BitStream, hex='ff', offset=-1)
self.assertRaises(bitstring.CreationError, BitStream, '0xffffffff', offset=33)
def testSliceStep(self):
a = BitStream('0x3')
b = a[::1]
self.assertEqual(a, b)
self.assertEqual(a[1:2:2], '0b11')
self.assertEqual(a[0:1:2], '0b00')
self.assertEqual(a[:1:3], '0o1')
self.assertEqual(a[::4], a)
self.assertFalse(a[::5])
a = BitStream('0x0011223344556677')
self.assertEqual(a[3:5:8], '0x3344')
self.assertEqual(a[5::8], '0x556677')
self.assertEqual(a[-1::8], '0x77')
self.assertEqual(a[-2::4], '0x77')
self.assertEqual(a[:-3:8], '0x0011223344')
self.assertEqual(a[-1000:-3:8], '0x0011223344')
a.append('0b1')
self.assertEqual(a[5::8], '0x556677')
self.assertEqual(a[5:100:8], '0x556677')
def testSliceNegativeStep(self):
a = BitStream('0o 01 23 45 6')
self.assertEqual(a[::-3], '0o6543210')
self.assertFalse(a[1:3:-6])
self.assertEqual(a[2:0:-6], '0o4523')
self.assertEqual(a[2::-6], '0o452301')
b = a[::-1]
a.reverse()
self.assertEqual(b, a)
b = BitStream('0x01020408') + '0b11'
self.assertEqual(b[::-8], '0x08040201')
self.assertEqual(b[::-4], '0x80402010')
self.assertEqual(b[::-2], '0b11' + BitStream('0x20108040'))
self.assertEqual(b[::-33], b[:33])
self.assertEqual(b[::-34], b)
self.assertFalse(b[::-35])
self.assertEqual(b[-1:-3:-8], '0x0402')
def testInsertionOrderAndBitpos(self):
b = BitStream()
b[0:0] = '0b0'
b[0:0] = '0b1'
self.assertEqual(b, '0b10')
self.assertEqual(b.bitpos, 1)
a = BitStream()
a.insert('0b0')
a.insert('0b1')
self.assertEqual(a, '0b01')
self.assertEqual(a.bitpos, 2)
def testOverwriteOrderAndBitpos(self):
a = BitStream('0xff')
a.overwrite('0xa')
self.assertEqual(a, '0xaf')
self.assertEqual(a.bitpos, 4)
a.overwrite('0xb')
self.assertEqual(a, '0xab')
self.assertEqual(a.bitpos, 8)
self.assertRaises(ValueError, a.overwrite, '0b1')
a.overwrite('0xa', 4)
self.assertEqual(a, '0xaa')
self.assertEqual(a.bitpos, 8)
def testInitSliceWithInt(self):
a = BitStream(length=8)
a[:] = 100
self.assertEqual(a.uint, 100)
a[0] = 1
self.assertEqual(a.bin, '11100100')
a[1] = 0
self.assertEqual(a.bin, '10100100')
a[-1] = -1
self.assertEqual(a.bin, '10100101')
a[-3:] = -2
self.assertEqual(a.bin, '10100110')
def testInitSliceWithIntErrors(self):
a = BitStream('0b0000')
self.assertRaises(ValueError, a.__setitem__, slice(0, 4), 16)
self.assertRaises(ValueError, a.__setitem__, slice(0, 4), -9)
self.assertRaises(ValueError, a.__setitem__, 0, 2)
self.assertRaises(ValueError, a.__setitem__, 0, -2)
def testReverseWithSlice(self):
a = BitStream('0x0012ff')
a.reverse()
self.assertEqual(a, '0xff4800')
a.reverse(8, 16)
self.assertEqual(a, '0xff1200')
b = a[8:16]
b.reverse()
a[8:16] = b
self.assertEqual(a, '0xff4800')
def testReverseWithSliceErrors(self):
a = BitStream('0x123')
self.assertRaises(ValueError, a.reverse, -1, 4)
self.assertRaises(ValueError, a.reverse, 10, 9)
self.assertRaises(ValueError, a.reverse, 1, 10000)
def testInitialiseFromList(self):
a = BitStream([])
self.assertFalse(a)
a = BitStream([True, False, [], [0], 'hello'])
self.assertEqual(a, '0b10011')
a += []
self.assertEqual(a, '0b10011')
a += [True, False, True]
self.assertEqual(a, '0b10011101')
a.find([12, 23])
self.assertEqual(a.pos, 3)
self.assertEqual([1, 0, False, True], BitStream('0b1001'))
a = [True] + BitStream('0b1')
self.assertEqual(a, '0b11')
def testInitialiseFromTuple(self):
a = BitStream(())
self.assertFalse(a)
a = BitStream((0, 1, '0', '1'))
self.assertEqual('0b0111', a)
a.replace((True, True), [])
self.assertEqual(a, (False, True))
def testCut(self):
a = BitStream('0x00112233445')
b = list(a.cut(8))
self.assertEqual(b, ['0x00', '0x11', '0x22', '0x33', '0x44'])
b = list(a.cut(4, 8, 16))
self.assertEqual(b, ['0x1', '0x1'])
b = list(a.cut(4, 0, 44, 4))
self.assertEqual(b, ['0x0', '0x0', '0x1', '0x1'])
a = BitStream()
b = list(a.cut(10))
self.assertTrue(not b)
def testCutErrors(self):
a = BitStream('0b1')
b = a.cut(1, 1, 2)
self.assertRaises(ValueError, next, b)
b = a.cut(1, -2, 1)
self.assertRaises(ValueError, next, b)
b = a.cut(0)
self.assertRaises(ValueError, next, b)
b = a.cut(1, count=-1)
self.assertRaises(ValueError, next, b)
def testCutProblem(self):
s = BitStream('0x1234')
for n in list(s.cut(4)):
s.prepend(n)
self.assertEqual(s, '0x43211234')
def testJoinFunctions(self):
a = BitStream().join(['0xa', '0xb', '0b1111'])
self.assertEqual(a, '0xabf')
a = BitStream('0b1').join(['0b0' for i in range(10)])
self.assertEqual(a, '0b0101010101010101010')
a = BitStream('0xff').join([])
self.assertFalse(a)
def testAddingBitpos(self):
a = BitStream('0xff')
b = BitStream('0x00')
a.bitpos = b.bitpos = 8
c = a + b
self.assertEqual(c.bitpos, 0)
def testIntelligentRead1(self):
a = BitStream(uint=123, length=23)
u = a.read('uint:23')
self.assertEqual(u, 123)
self.assertEqual(a.pos, a.len)
b = BitStream(int=-12, length=44)
i = b.read('int:44')
self.assertEqual(i, -12)
self.assertEqual(b.pos, b.len)
u2, i2 = (a + b).readlist('uint:23, int:44')
self.assertEqual((u2, i2), (123, -12))
def testIntelligentRead2(self):
a = BitStream(ue=822)
u = a.read('ue')
self.assertEqual(u, 822)
self.assertEqual(a.pos, a.len)
b = BitStream(se=-1001)
s = b.read('se')
self.assertEqual(s, -1001)
self.assertEqual(b.pos, b.len)
s, u1, u2 = (b + 2 * a).readlist('se, ue, ue')
self.assertEqual((s, u1, u2), (-1001, 822, 822))
def testIntelligentRead3(self):
a = BitStream('0x123') + '0b11101'
h = a.read('hex:12')
self.assertEqual(h, '123')
b = a.read('bin: 5')
self.assertEqual(b, '11101')
c = '0b' + b + a
b, h = c.readlist('bin:5, hex:12')
self.assertEqual((b, h), ('11101', '123'))
def testIntelligentRead4(self):
a = BitStream('0o007')
o = a.read('oct:9')
self.assertEqual(o, '007')
self.assertEqual(a.pos, a.len)
def testIntelligentRead5(self):
a = BitStream('0x00112233')
c0, c1, c2 = a.readlist('bits:8, bits:8, bits:16')
self.assertEqual((c0, c1, c2), (BitStream('0x00'), BitStream('0x11'), BitStream('0x2233')))
a.pos = 0
c = a.read('bits:16')
self.assertEqual(c, BitStream('0x0011'))
def testIntelligentRead6(self):
a = BitStream('0b000111000')
b1, b2, b3 = a.readlist('bin :3, int: 3, int:3')
self.assertEqual(b1, '000')
self.assertEqual(b2, -1)
self.assertEqual(b3, 0)
def testIntelligentRead7(self):
a = BitStream('0x1234')
a1, a2, a3, a4 = a.readlist('bin:0, oct:0, hex:0, bits:0')
self.assertTrue(a1 == a2 == a3 == '')
self.assertFalse(a4)
self.assertRaises(ValueError, a.read, 'int:0')
self.assertRaises(ValueError, a.read, 'uint:0')
self.assertEqual(a.pos, 0)
def testIntelligentRead8(self):
a = BitStream('0x123456')
for t in ['hex:1', 'oct:1', 'hex4', '-5', 'fred', 'bin:-2',
'uint:p', 'uint:-2', 'int:u', 'int:-3', 'ses', 'uee', '-14']:
self.assertRaises(ValueError, a.read, t)
def testIntelligentRead9(self):
a = BitStream('0xff')
self.assertEqual(a.read('intle'), -1)
def testFillerReads1(self):
s = BitStream('0x012345')
t = s.read('bits')
self.assertEqual(s, t)
s.pos = 0
a, b = s.readlist('hex:8, hex')
self.assertEqual(a, '01')
self.assertEqual(b, '2345')
self.assertTrue(isinstance(b, str))
s.bytepos = 0
a, b = s.readlist('bin, hex:20')
self.assertEqual(a, '0000')
self.assertEqual(b, '12345')
self.assertTrue(isinstance(a, str))
def testFillerReads2(self):
s = BitStream('0xabcdef')
self.assertRaises(bitstring.Error, s.readlist, 'bits, se')
self.assertRaises(bitstring.Error, s.readlist, 'hex:4, bits, ue, bin:4')
def testIntelligentPeek(self):
a = BitStream('0b01, 0x43, 0o4, uint:23=2, se=5, ue=3')
b, c, e = a.peeklist('bin:2, hex:8, oct:3')
self.assertEqual((b, c, e), ('01', '43', '4'))
self.assertEqual(a.pos, 0)
a.pos = 13
f, g, h = a.peeklist('uint:23, se, ue')
self.assertEqual((f, g, h), (2, 5, 3))
self.assertEqual(a.pos, 13)
def testReadMultipleBits(self):
s = BitStream('0x123456789abcdef')
a, b = s.readlist([4, 4])
self.assertEqual(a, '0x1')
self.assertEqual(b, '0x2')
c, d, e = s.readlist([8, 16, 8])
self.assertEqual(c, '0x34')
self.assertEqual(d, '0x5678')
self.assertEqual(e, '0x9a')
def testPeekMultipleBits(self):
s = BitStream('0b1101, 0o721, 0x2234567')
a, b, c, d = s.peeklist([2, 1, 1, 9])
self.assertEqual(a, '0b11')
self.assertEqual(bool(b), False)
self.assertEqual(bool(c), True)
self.assertEqual(d, '0o721')
self.assertEqual(s.pos, 0)
a, b = s.peeklist([4, 9])
self.assertEqual(a, '0b1101')
self.assertEqual(b, '0o721')
s.pos = 13
a, b = s.peeklist([16, 8])
self.assertEqual(a, '0x2234')
self.assertEqual(b, '0x56')
self.assertEqual(s.pos, 13)
def testDifficultPrepends(self):
a = BitStream('0b1101011')
b = BitStream()
for i in range(10):
b.prepend(a)
self.assertEqual(b, a * 10)
def testPackingWrongNumberOfThings(self):
self.assertRaises(bitstring.CreationError, pack, 'bin:1')
self.assertRaises(bitstring.CreationError, pack, '', 100)
def testPackWithVariousKeys(self):
a = pack('uint10', uint10='0b1')
self.assertEqual(a, '0b1')
b = pack('0b110', **{'0b110': '0xfff'})
self.assertEqual(b, '0xfff')
def testPackWithVariableLength(self):
for i in range(1, 11):
a = pack('uint:n', 0, n=i)
self.assertEqual(a.bin, '0' * i)
def testToBytes(self):
a = BitStream(bytes=b'\xab\x00')
b = a.tobytes()
self.assertEqual(a.bytes, b)
for i in range(7):
del a[-1:]
self.assertEqual(a.tobytes(), b'\xab\x00')
del a[-1:]
self.assertEqual(a.tobytes(), b'\xab')
def testToFile(self):
a = BitStream('0x0000ff')[:17]
f = open('temp_bitstring_unit_testing_file', 'wb')
a.tofile(f)
f.close()
b = BitStream(filename='temp_bitstring_unit_testing_file')
self.assertEqual(b, '0x000080')
a = BitStream('0x911111')
del a[:1]
self.assertEqual(a + '0b0', '0x222222')
f = open('temp_bitstring_unit_testing_file', 'wb')
a.tofile(f)
f.close()
b = BitStream(filename='temp_bitstring_unit_testing_file')
self.assertEqual(b, '0x222222')
os.remove('temp_bitstring_unit_testing_file')
#def testToFileWithLargerFile(self):
# a = BitStream(length=16000000)
# a[1] = '0b1'
# a[-2] = '0b1'
# f = open('temp_bitstring_unit_testing_file' ,'wb')
# a.tofile(f)
# f.close()
# b = BitStream(filename='temp_bitstring_unit_testing_file')
# self.assertEqual(b.len, 16000000)
# self.assertEqual(b[1], True)
#
# f = open('temp_bitstring_unit_testing_file' ,'wb')
# a[1:].tofile(f)
# f.close()
# b = BitStream(filename='temp_bitstring_unit_testing_file')
# self.assertEqual(b.len, 16000000)
# self.assertEqual(b[0], True)
# os.remove('temp_bitstring_unit_testing_file')
def testTokenParser(self):
tp = bitstring.constbitstream.tokenparser
self.assertEqual(tp('hex'), (True, [('hex', None, None)]))
self.assertEqual(tp('hex=14'), (True, [('hex', None, '14')]))
self.assertEqual(tp('se'), (False, [('se', None, None)]))
self.assertEqual(tp('ue=12'), (False, [('ue', None, '12')]))
self.assertEqual(tp('0xef'), (False, [('0x', None, 'ef')]))
self.assertEqual(tp('uint:12'), (False, [('uint', 12, None)]))
self.assertEqual(tp('int:30=-1'), (False, [('int', 30, '-1')]))
self.assertEqual(tp('bits:10'), (False, [('bits', 10, None)]))
self.assertEqual(tp('bits:10'), (False, [('bits', 10, None)]))
self.assertEqual(tp('123'), (False, [('uint', 123, None)]))
self.assertEqual(tp('123'), (False, [('uint', 123, None)]))
self.assertRaises(ValueError, tp, 'hex12')
self.assertEqual(tp('hex12', ('hex12',)), (False, [('hex12', None, None)]))
self.assertEqual(tp('2*bits:6'), (False, [('bits', 6, None), ('bits', 6, None)]))
def testAutoFromFileObject(self):
with open('test.m1v', 'rb') as f:
s = ConstBitStream(f, offset=32, length=12)
self.assertEqual(s.uint, 352)
t = ConstBitStream('0xf') + f
self.assertTrue(t.startswith('0xf000001b3160'))
s2 = ConstBitStream(f)
t2 = BitStream('0xc')
t2.prepend(s2)
self.assertTrue(t2.startswith('0x000001b3'))
self.assertTrue(t2.endswith('0xc'))
with open('test.m1v', 'rb') as b:
u = ConstBitStream(bytes=b.read())
self.assertEqual(u, f)
def testFileBasedCopy(self):
with open('smalltestfile', 'rb') as f:
s = BitStream(f)
t = BitStream(s)
s.prepend('0b1')
self.assertEqual(s[1:], t)
s = BitStream(f)
t = copy.copy(s)
t.append('0b1')
self.assertEqual(s, t[:-1])
def testBigEndianSynonyms(self):
s = BitStream('0x12318276ef')
self.assertEqual(s.int, s.intbe)
self.assertEqual(s.uint, s.uintbe)
s = BitStream(intbe=-100, length=16)
self.assertEqual(s, 'int:16=-100')
s = BitStream(uintbe=13, length=24)
self.assertEqual(s, 'int:24=13')
s = BitStream('uintbe:32=1000')
self.assertEqual(s, 'uint:32=1000')
s = BitStream('intbe:8=2')
self.assertEqual(s, 'int:8=2')
self.assertEqual(s.read('intbe'), 2)
s.pos = 0
self.assertEqual(s.read('uintbe'), 2)
def testBigEndianSynonymErrors(self):
self.assertRaises(bitstring.CreationError, BitStream, uintbe=100, length=15)
self.assertRaises(bitstring.CreationError, BitStream, intbe=100, length=15)
self.assertRaises(bitstring.CreationError, BitStream, 'uintbe:17=100')
self.assertRaises(bitstring.CreationError, BitStream, 'intbe:7=2')
s = BitStream('0b1')
self.assertRaises(bitstring.InterpretError, s._getintbe)
self.assertRaises(bitstring.InterpretError, s._getuintbe)
self.assertRaises(ValueError, s.read, 'uintbe')
self.assertRaises(ValueError, s.read, 'intbe')
def testLittleEndianUint(self):
s = BitStream(uint=100, length=16)
self.assertEqual(s.uintle, 25600)
s = BitStream(uintle=100, length=16)
self.assertEqual(s.uint, 25600)
self.assertEqual(s.uintle, 100)
s.uintle += 5
self.assertEqual(s.uintle, 105)
s = BitStream('uintle:32=999')
self.assertEqual(s.uintle, 999)
self.assertEqual(s[::-8].uint, 999)
s = pack('uintle:24', 1001)
self.assertEqual(s.uintle, 1001)
self.assertEqual(s.length, 24)
self.assertEqual(s.read('uintle'), 1001)
def testLittleEndianInt(self):
s = BitStream(int=100, length=16)
self.assertEqual(s.intle, 25600)
s = BitStream(intle=100, length=16)
self.assertEqual(s.int, 25600)
self.assertEqual(s.intle, 100)
s.intle += 5
self.assertEqual(s.intle, 105)
s = BitStream('intle:32=999')
self.assertEqual(s.intle, 999)
self.assertEqual(s[::-8].int, 999)
s = pack('intle:24', 1001)
self.assertEqual(s.intle, 1001)
self.assertEqual(s.length, 24)
self.assertEqual(s.read('intle'), 1001)
def testLittleEndianErrors(self):
self.assertRaises(bitstring.CreationError, BitStream, 'uintle:15=10')
self.assertRaises(bitstring.CreationError, BitStream, 'intle:31=-999')
self.assertRaises(bitstring.CreationError, BitStream, uintle=100, length=15)
self.assertRaises(bitstring.CreationError, BitStream, intle=100, length=15)
s = BitStream('0xfff')
self.assertRaises(bitstring.InterpretError, s._getintle)
self.assertRaises(bitstring.InterpretError, s._getuintle)
self.assertRaises(ValueError, s.read, 'uintle')
self.assertRaises(ValueError, s.read, 'intle')
def testStructTokens1(self):
self.assertEqual(pack('<b', 23), BitStream('intle:8=23'))
self.assertEqual(pack('<B', 23), BitStream('uintle:8=23'))
self.assertEqual(pack('<h', 23), BitStream('intle:16=23'))
self.assertEqual(pack('<H', 23), BitStream('uintle:16=23'))
self.assertEqual(pack('<l', 23), BitStream('intle:32=23'))
self.assertEqual(pack('<L', 23), BitStream('uintle:32=23'))
self.assertEqual(pack('<q', 23), BitStream('intle:64=23'))
self.assertEqual(pack('<Q', 23), BitStream('uintle:64=23'))
self.assertEqual(pack('>b', 23), BitStream('intbe:8=23'))
self.assertEqual(pack('>B', 23), BitStream('uintbe:8=23'))
self.assertEqual(pack('>h', 23), BitStream('intbe:16=23'))
self.assertEqual(pack('>H', 23), BitStream('uintbe:16=23'))
self.assertEqual(pack('>l', 23), BitStream('intbe:32=23'))
self.assertEqual(pack('>L', 23), BitStream('uintbe:32=23'))
self.assertEqual(pack('>q', 23), BitStream('intbe:64=23'))
self.assertEqual(pack('>Q', 23), BitStream('uintbe:64=23'))
self.assertRaises(bitstring.CreationError, pack, '<B', -1)
self.assertRaises(bitstring.CreationError, pack, '<H', -1)
self.assertRaises(bitstring.CreationError, pack, '<L', -1)
self.assertRaises(bitstring.CreationError, pack, '<Q', -1)
def testStructTokens2(self):
endianness = sys.byteorder
sys.byteorder = 'little'
self.assertEqual(pack('@b', 23), BitStream('intle:8=23'))
self.assertEqual(pack('@B', 23), BitStream('uintle:8=23'))
self.assertEqual(pack('@h', 23), BitStream('intle:16=23'))
self.assertEqual(pack('@H', 23), BitStream('uintle:16=23'))
self.assertEqual(pack('@l', 23), BitStream('intle:32=23'))
self.assertEqual(pack('@L', 23), BitStream('uintle:32=23'))
self.assertEqual(pack('@q', 23), BitStream('intle:64=23'))
self.assertEqual(pack('@Q', 23), BitStream('uintle:64=23'))
sys.byteorder = 'big'
self.assertEqual(pack('@b', 23), BitStream('intbe:8=23'))
self.assertEqual(pack('@B', 23), BitStream('uintbe:8=23'))
self.assertEqual(pack('@h', 23), BitStream('intbe:16=23'))
self.assertEqual(pack('@H', 23), BitStream('uintbe:16=23'))
self.assertEqual(pack('@l', 23), BitStream('intbe:32=23'))
self.assertEqual(pack('@L', 23), BitStream('uintbe:32=23'))
self.assertEqual(pack('@q', 23), BitStream('intbe:64=23'))
self.assertEqual(pack('@Q', 23), BitStream('uintbe:64=23'))
sys.byteorder = endianness
def testNativeEndianness(self):
s = pack('@2L', 40, 40)
if sys.byteorder == 'little':
self.assertEqual(s, pack('<2L', 40, 40))
else:
self.assertEqual(sys.byteorder, 'big')
self.assertEqual(s, pack('>2L', 40, 40))
def testStructTokens2(self):
s = pack('>hhl', 1, 2, 3)
a, b, c = s.unpack('>hhl')
self.assertEqual((a, b, c), (1, 2, 3))
s = pack('<QL, >Q \tL', 1001, 43, 21, 9999)
self.assertEqual(s.unpack('<QL, >QL'), [1001, 43, 21, 9999])
def testStructTokensMultiplicativeFactors(self):
s = pack('<2h', 1, 2)
a, b = s.unpack('<2h')
self.assertEqual((a, b), (1, 2))
s = pack('<100q', *range(100))
self.assertEqual(s.len, 100 * 64)
self.assertEqual(s[44:45:64].uintle, 44)
s = pack('@L0B2h', 5, 5, 5)
self.assertEqual(s.unpack('@Lhh'), [5, 5, 5])
def testStructTokensErrors(self):
for f in ['>>q', '<>q', 'q>', '2q', 'q', '>-2q', '@a', '>int:8', '>q2']:
self.assertRaises(bitstring.CreationError, pack, f, 100)
def testImmutableBitStreams(self):
a = ConstBitStream('0x012345')
self.assertEqual(a, '0x012345')
b = BitStream('0xf') + a
self.assertEqual(b, '0xf012345')
try:
a.append(b)
self.assertTrue(False)
except AttributeError:
pass
try:
a.prepend(b)
self.assertTrue(False)
except AttributeError:
pass
try:
a[0] = '0b1'
self.assertTrue(False)
except TypeError:
pass
try:
del a[5]
self.assertTrue(False)
except TypeError:
pass
try:
a.replace('0b1', '0b0')
self.assertTrue(False)
except AttributeError:
pass
try:
a.insert('0b11', 4)
self.assertTrue(False)
except AttributeError:
pass
try:
a.reverse()
self.assertTrue(False)
except AttributeError:
pass
try:
a.reversebytes()
self.assertTrue(False)
except AttributeError:
pass
self.assertEqual(a, '0x012345')
self.assertTrue(isinstance(a, ConstBitStream))
def testReverseBytes(self):
a = BitStream('0x123456')
a.byteswap()
self.assertEqual(a, '0x563412')
b = a + '0b1'
b.byteswap()
self.assertEqual('0x123456, 0b1', b)
a = BitStream('0x54')
a.byteswap()
self.assertEqual(a, '0x54')
a = BitStream()
a.byteswap()
self.assertFalse(a)
def testReverseBytes2(self):
a = BitStream()
a.byteswap()
self.assertFalse(a)
a = BitStream('0x00112233')
a.byteswap(0, 0, 16)
self.assertEqual(a, '0x11002233')
a.byteswap(0, 4, 28)
self.assertEqual(a, '0x12302103')
a.byteswap(start=0, end=18)
self.assertEqual(a, '0x30122103')
self.assertRaises(ValueError, a.byteswap, 0, 10, 2)
self.assertRaises(ValueError, a.byteswap, 0, -4, 4)
self.assertRaises(ValueError, a.byteswap, 0, 24, 48)
a.byteswap(0, 24)
self.assertEqual(a, '0x30122103')
a.byteswap(0, 11, 11)
self.assertEqual(a, '0x30122103')
def testCapitalsInPack(self):
a = pack('A', A='0b1')
self.assertEqual(a, '0b1')
format = 'bits:4=BL_OFFT, uint:12=width, uint:12=height'
d = {'BL_OFFT': '0b1011', 'width': 352, 'height': 288}
s = bitstring.pack(format, **d)
self.assertEqual(s, '0b1011, uint:12=352, uint:12=288')
a = pack('0X0, uint:8, hex', 45, '0XABcD')
self.assertEqual(a, '0x0, uint:8=45, 0xabCD')
def testOtherCapitals(self):
a = ConstBitStream('0XABC, 0O0, 0B11')
self.assertEqual(a, 'hex=0Xabc, oct=0, bin=0B11')
def testEfficientOverwrite(self):
a = BitStream(1000000000)
a.overwrite([1], 123456)
self.assertEqual(a[123456], True)
a.overwrite('0xff', 1)
self.assertEqual(a[0:4:8], '0x7f800000')
b = BitStream('0xffff')
b.overwrite('0x0000')
self.assertEqual(b, '0x0000')
self.assertEqual(b.pos, 16)
c = BitStream(length=1000)
c.overwrite('0xaaaaaaaaaaaa', 81)
self.assertEqual(c[81:81 + 6 * 8], '0xaaaaaaaaaaaa')
self.assertEqual(len(list(c.findall('0b1'))), 24)
s = BitStream(length=1000)
s = s[5:]
s.overwrite('0xffffff', 500)
s.pos = 500
self.assertEqual(s.read(4 * 8), '0xffffff00')
s.overwrite('0xff', 502)
self.assertEqual(s[502:518], '0xffff')
def testPeekAndReadListErrors(self):
a = BitStream('0x123456')
self.assertRaises(ValueError, a.read, 'hex:8, hex:8')
self.assertRaises(ValueError, a.peek, 'hex:8, hex:8')
self.assertRaises(TypeError, a.read, 10, 12)
self.assertRaises(TypeError, a.peek, 12, 14)
self.assertRaises(TypeError, a.read, 8, 8)
self.assertRaises(TypeError, a.peek, 80, 80)
def testStartswith(self):
a = BitStream()
self.assertTrue(a.startswith(BitStream()))
self.assertFalse(a.startswith('0b0'))
a = BitStream('0x12ff')
self.assertTrue(a.startswith('0x1'))
self.assertTrue(a.startswith('0b0001001'))
self.assertTrue(a.startswith('0x12ff'))
self.assertFalse(a.startswith('0x12ff, 0b1'))
self.assertFalse(a.startswith('0x2'))
def testStartswithStartEnd(self):
s = BitStream('0x123456')
self.assertTrue(s.startswith('0x234', 4))
self.assertFalse(s.startswith('0x123', end=11))
self.assertTrue(s.startswith('0x123', end=12))
self.assertTrue(s.startswith('0x34', 8, 16))
self.assertFalse(s.startswith('0x34', 7, 16))
self.assertFalse(s.startswith('0x34', 9, 16))
self.assertFalse(s.startswith('0x34', 8, 15))
def testEndswith(self):
a = BitStream()
self.assertTrue(a.endswith(''))
self.assertFalse(a.endswith(BitStream('0b1')))
a = BitStream('0xf2341')
self.assertTrue(a.endswith('0x41'))
self.assertTrue(a.endswith('0b001'))
self.assertTrue(a.endswith('0xf2341'))
self.assertFalse(a.endswith('0x1f2341'))
self.assertFalse(a.endswith('0o34'))
def testEndswithStartEnd(self):
s = BitStream('0x123456')
self.assertTrue(s.endswith('0x234', end=16))
self.assertFalse(s.endswith('0x456', start=13))
self.assertTrue(s.endswith('0x456', start=12))
self.assertTrue(s.endswith('0x34', 8, 16))
self.assertTrue(s.endswith('0x34', 7, 16))
self.assertFalse(s.endswith('0x34', 9, 16))
self.assertFalse(s.endswith('0x34', 8, 15))
def testUnhashability(self):
s = BitStream('0xf')
self.assertRaises(TypeError, set, [s])
self.assertRaises(TypeError, hash, [s])
def testConstBitStreamSetCreation(self):
sl = [ConstBitStream(uint=i, length=7) for i in range(15)]
s = set(sl)
self.assertEqual(len(s), 15)
s.add(ConstBitStream('0b0000011'))
self.assertEqual(len(s), 15)
self.assertRaises(TypeError, s.add, BitStream('0b0000011'))
def testConstBitStreamFunctions(self):
s = ConstBitStream('0xf, 0b1')
self.assertEqual(type(s), ConstBitStream)
t = copy.copy(s)
self.assertEqual(type(t), ConstBitStream)
a = s + '0o3'
self.assertEqual(type(a), ConstBitStream)
b = a[0:4]
self.assertEqual(type(b), ConstBitStream)
b = a[4:3]
self.assertEqual(type(b), ConstBitStream)
b = a[5:2:-1]
self.assertEqual(type(b), ConstBitStream)
b = ~a
self.assertEqual(type(b), ConstBitStream)
b = a << 2
self.assertEqual(type(b), ConstBitStream)
b = a >> 2
self.assertEqual(type(b), ConstBitStream)
b = a * 2
self.assertEqual(type(b), ConstBitStream)
b = a * 0
self.assertEqual(type(b), ConstBitStream)
b = a & ~a
self.assertEqual(type(b), ConstBitStream)
b = a | ~a
self.assertEqual(type(b), ConstBitStream)
b = a ^ ~a
self.assertEqual(type(b), ConstBitStream)
b = a._slice(4, 4)
self.assertEqual(type(b), ConstBitStream)
b = a.read(4)
self.assertEqual(type(b), ConstBitStream)
def testConstBitStreamProperties(self):
a = ConstBitStream('0x123123')
try:
a.hex = '0x234'
self.assertTrue(False)
except AttributeError:
pass
try:
a.oct = '0o234'
self.assertTrue(False)
except AttributeError:
pass
try:
a.bin = '0b101'
self.assertTrue(False)
except AttributeError:
pass
try:
a.ue = 3453
self.assertTrue(False)
except AttributeError:
pass
try:
a.se = -123
self.assertTrue(False)
except AttributeError:
pass
try:
a.int = 432
self.assertTrue(False)
except AttributeError:
pass
try:
a.uint = 4412
self.assertTrue(False)
except AttributeError:
pass
try:
a.intle = 123
self.assertTrue(False)
except AttributeError:
pass
try:
a.uintle = 4412
self.assertTrue(False)
except AttributeError:
pass
try:
a.intbe = 123
self.assertTrue(False)
except AttributeError:
pass
try:
a.uintbe = 4412
self.assertTrue(False)
except AttributeError:
pass
try:
a.intne = 123
self.assertTrue(False)
except AttributeError:
pass
try:
a.uintne = 4412
self.assertTrue(False)
except AttributeError:
pass
try:
a.bytes = b'hello'
self.assertTrue(False)
except AttributeError:
pass
def testConstBitStreamMisc(self):
a = ConstBitStream('0xf')
b = a
a += '0xe'
self.assertEqual(b, '0xf')
self.assertEqual(a, '0xfe')
c = BitStream(a)
self.assertEqual(a, c)
a = ConstBitStream('0b1')
a._append(a)
self.assertEqual(a, '0b11')
self.assertEqual(type(a), ConstBitStream)
a._prepend(a)
self.assertEqual(a, '0b1111')
self.assertEqual(type(a), ConstBitStream)
def testConstBitStreamHashibility(self):
a = ConstBitStream('0x1')
b = ConstBitStream('0x2')
c = ConstBitStream('0x1')
c.pos = 3
s = set((a, b, c))
self.assertEqual(len(s), 2)
self.assertEqual(hash(a), hash(c))
def testConstBitStreamCopy(self):
a = ConstBitStream('0xabc')
a.pos = 11
b = copy.copy(a)
b.pos = 4
self.assertEqual(id(a._datastore), id(b._datastore))
self.assertEqual(a.pos, 11)
self.assertEqual(b.pos, 4)
def testPython26stuff(self):
s = BitStream('0xff')
self.assertTrue(isinstance(s.tobytes(), bytes))
self.assertTrue(isinstance(s.bytes, bytes))
def testReadFromBits(self):
a = ConstBitStream('0xaabbccdd')
b = a.read(8)
self.assertEqual(b, '0xaa')
self.assertEqual(a[0:8], '0xaa')
self.assertEqual(a[-1], True)
a.pos = 0
self.assertEqual(a.read(4).uint, 10)
class Set(unittest.TestCase):
def testSet(self):
a = BitStream(length=16)
a.set(True, 0)
self.assertEqual(a, '0b10000000 00000000')
a.set(1, 15)
self.assertEqual(a, '0b10000000 00000001')
b = a[4:12]
b.set(True, 1)
self.assertEqual(b, '0b01000000')
b.set(True, -1)
self.assertEqual(b, '0b01000001')
b.set(1, -8)
self.assertEqual(b, '0b11000001')
self.assertRaises(IndexError, b.set, True, -9)
self.assertRaises(IndexError, b.set, True, 8)
def testFileBasedSetUnset(self):
a = BitStream(filename='test.m1v')
a.set(True, (0, 1, 2, 3, 4))
self.assertEqual(a[0:4:8], '0xf80001b3')
a = BitStream(filename='test.m1v')
a.set(False, (28, 29, 30, 31))
self.assertTrue(a.startswith('0x000001b0'))
def testSetList(self):
a = BitStream(length=18)
a.set(True, range(18))
self.assertEqual(a.int, -1)
a.set(False, range(18))
self.assertEqual(a.int, 0)
def testUnset(self):
a = BitStream(length=16, int=-1)
a.set(False, 0)
self.assertEqual(~a, '0b10000000 00000000')
a.set(0, 15)
self.assertEqual(~a, '0b10000000 00000001')
b = a[4:12]
b.set(False, 1)
self.assertEqual(~b, '0b01000000')
b.set(False, -1)
self.assertEqual(~b, '0b01000001')
b.set(False, -8)
self.assertEqual(~b, '0b11000001')
self.assertRaises(IndexError, b.set, False, -9)
self.assertRaises(IndexError, b.set, False, 8)
def testSetWholeBitStream(self):
a = BitStream(14)
a.set(1)
self.assertTrue(a.all(1))
a.set(0)
self.assertTrue(a.all(0))
class Invert(unittest.TestCase):
def testInvertBits(self):
a = BitStream('0b111000')
a.invert(range(a.len))
self.assertEqual(a, '0b000111')
a.invert([0, 1, -1])
self.assertEqual(a, '0b110110')
def testInvertWholeBitStream(self):
a = BitStream('0b11011')
a.invert()
self.assertEqual(a, '0b00100')
#######################
def testIor(self):
a = BitStream('0b1101001')
a |= '0b1110000'
self.assertEqual(a, '0b1111001')
b = a[2:]
c = a[1:-1]
b |= c
self.assertEqual(c, '0b11100')
self.assertEqual(b, '0b11101')
def testIand(self):
a = BitStream('0b0101010101000')
a &= '0b1111110000000'
self.assertEqual(a, '0b0101010000000')
s = BitStream(filename='test.m1v', offset=26, length=24)
s &= '0xff00ff'
self.assertEqual(s, '0xcc0004')
def testIxor(self):
a = BitStream('0b11001100110011')
a ^= '0b11111100000010'
self.assertEqual(a, '0b00110000110001')
class AllAndAny(unittest.TestCase):
def testAll(self):
a = BitStream('0b0111')
self.assertTrue(a.all(True, (1, 3)))
self.assertFalse(a.all(True, (0, 1, 2)))
self.assertTrue(a.all(True, [-1]))
self.assertFalse(a.all(True, [0]))
def testFileBasedAll(self):
a = BitStream(filename='test.m1v')
self.assertTrue(a.all(True, [31]))
a = BitStream(filename='test.m1v')
self.assertTrue(a.all(False, (0, 1, 2, 3, 4)))
def testFileBasedAny(self):
a = BitStream(filename='test.m1v')
self.assertTrue(a.any(True, (31, 12)))
a = BitStream(filename='test.m1v')
self.assertTrue(a.any(False, (0, 1, 2, 3, 4)))
def testAny(self):
a = BitStream('0b10011011')
self.assertTrue(a.any(True, (1, 2, 3, 5)))
self.assertFalse(a.any(True, (1, 2, 5)))
self.assertTrue(a.any(True, (-1,)))
self.assertFalse(a.any(True, (1,)))
def testAllFalse(self):
a = BitStream('0b0010011101')
self.assertTrue(a.all(False, (0, 1, 3, 4)))
self.assertFalse(a.all(False, (0, 1, 2, 3, 4)))
def testAnyFalse(self):
a = BitStream('0b01001110110111111111111111111')
self.assertTrue(a.any(False, (4, 5, 6, 2)))
self.assertFalse(a.any(False, (1, 15, 20)))
def testAnyEmptyBitstring(self):
a = ConstBitStream()
self.assertFalse(a.any(True))
self.assertFalse(a.any(False))
def testAllEmptyBitStream(self):
a = ConstBitStream()
self.assertTrue(a.all(True))
self.assertTrue(a.all(False))
def testAnyWholeBitstring(self):
a = ConstBitStream('0xfff')
self.assertTrue(a.any(True))
self.assertFalse(a.any(False))
def testAllWholeBitstring(self):
a = ConstBitStream('0xfff')
self.assertTrue(a.all(True))
self.assertFalse(a.all(False))
###################
def testFloatInitialisation(self):
for f in (0.0000001, -1.0, 1.0, 0.2, -3.1415265, 1.331e32):
a = BitStream(float=f, length=64)
a.pos = 6
self.assertEqual(a.float, f)
a = BitStream('float:64=%s' % str(f))
a.pos = 6
self.assertEqual(a.float, f)
a = BitStream('floatbe:64=%s' % str(f))
a.pos = 6
self.assertEqual(a.floatbe, f)
a = BitStream('floatle:64=%s' % str(f))
a.pos = 6
self.assertEqual(a.floatle, f)
a = BitStream('floatne:64=%s' % str(f))
a.pos = 6
self.assertEqual(a.floatne, f)
b = BitStream(float=f, length=32)
b.pos = 6
self.assertAlmostEqual(b.float / f, 1.0)
b = BitStream('float:32=%s' % str(f))
b.pos = 6
self.assertAlmostEqual(b.float / f, 1.0)
b = BitStream('floatbe:32=%s' % str(f))
b.pos = 6
self.assertAlmostEqual(b.floatbe / f, 1.0)
b = BitStream('floatle:32=%s' % str(f))
b.pos = 6
self.assertAlmostEqual(b.floatle / f, 1.0)
b = BitStream('floatne:32=%s' % str(f))
b.pos = 6
self.assertAlmostEqual(b.floatne / f, 1.0)
a = BitStream('0x12345678')
a.pos = 6
a.float = 23
self.assertEqual(a.float, 23.0)
def testFloatInitStrings(self):
for s in ('5', '+0.0001', '-1e101', '4.', '.2', '-.65', '43.21E+32'):
a = BitStream('float:64=%s' % s)
self.assertEqual(a.float, float(s))
def testFloatPacking(self):
a = pack('>d', 0.01)
self.assertEqual(a.float, 0.01)
self.assertEqual(a.floatbe, 0.01)
self.assertEqual(a[::-8].floatle, 0.01)
b = pack('>f', 1e10)
self.assertAlmostEqual(b.float / 1e10, 1.0)
c = pack('<f', 10.3)
self.assertAlmostEqual(c.floatle / 10.3, 1.0)
d = pack('>5d', 10.0, 5.0, 2.5, 1.25, 0.1)
self.assertEqual(d.unpack('>5d'), [10.0, 5.0, 2.5, 1.25, 0.1])
def testFloatReading(self):
a = BitStream('floatle:64=12, floatbe:64=-0.01, floatne:64=3e33')
x, y, z = a.readlist('floatle:64, floatbe:64, floatne:64')
self.assertEqual(x, 12.0)
self.assertEqual(y, -0.01)
self.assertEqual(z, 3e33)
a = BitStream('floatle:32=12, floatbe:32=-0.01, floatne:32=3e33')
x, y, z = a.readlist('floatle:32, floatbe:32, floatne:32')
self.assertAlmostEqual(x / 12.0, 1.0)
self.assertAlmostEqual(y / -0.01, 1.0)
self.assertAlmostEqual(z / 3e33, 1.0)
a = BitStream('0b11, floatle:64=12, 0xfffff')
a.pos = 2
self.assertEqual(a.read('floatle:64'), 12.0)
def testFloatErrors(self):
a = BitStream('0x3')
self.assertRaises(bitstring.InterpretError, a._getfloat)
self.assertRaises(bitstring.CreationError, a._setfloat, -0.2)
for l in (8, 10, 12, 16, 30, 128, 200):
self.assertRaises(ValueError, BitStream, float=1.0, length=l)
def testReadErrorChangesPos(self):
a = BitStream('0x123123')
try:
a.read('10, 5')
except ValueError:
pass
self.assertEqual(a.pos, 0)
def testRor(self):
a = BitStream('0b11001')
a.ror(0)
self.assertEqual(a, '0b11001')
a.ror(1)
self.assertEqual(a, '0b11100')
a.ror(5)
self.assertEqual(a, '0b11100')
a.ror(101)
self.assertEqual(a, '0b01110')
a = BitStream('0b1')
a.ror(1000000)
self.assertEqual(a, '0b1')
def testRorErrors(self):
a = BitStream()
self.assertRaises(bitstring.Error, a.ror, 0)
a += '0b001'
self.assertRaises(ValueError, a.ror, -1)
def testRol(self):
a = BitStream('0b11001')
a.rol(0)
self.assertEqual(a, '0b11001')
a.rol(1)
self.assertEqual(a, '0b10011')
a.rol(5)
self.assertEqual(a, '0b10011')
a.rol(101)
self.assertEqual(a, '0b00111')
a = BitStream('0b1')
a.rol(1000000)
self.assertEqual(a, '0b1')
def testRolFromFile(self):
a = BitStream(filename='test.m1v')
l = a.len
a.rol(1)
self.assertTrue(a.startswith('0x000003'))
self.assertEqual(a.len, l)
self.assertTrue(a.endswith('0x0036e'))
def testRorFromFile(self):
a = BitStream(filename='test.m1v')
l = a.len
a.ror(1)
self.assertTrue(a.startswith('0x800000'))
self.assertEqual(a.len, l)
self.assertTrue(a.endswith('0x000db'))
def testRolErrors(self):
a = BitStream()
self.assertRaises(bitstring.Error, a.rol, 0)
a += '0b001'
self.assertRaises(ValueError, a.rol, -1)
def testBytesToken(self):
a = BitStream('0x010203')
b = a.read('bytes:1')
self.assertTrue(isinstance(b, bytes))
self.assertEqual(b, b'\x01')
x, y, z = a.unpack('4, bytes:2, uint')
self.assertEqual(x, 0)
self.assertEqual(y, b'\x10\x20')
self.assertEqual(z, 3)
s = pack('bytes:4', b'abcd')
self.assertEqual(s.bytes, b'abcd')
def testBytesTokenMoreThoroughly(self):
a = BitStream('0x0123456789abcdef')
a.pos += 16
self.assertEqual(a.read('bytes:1'), b'\x45')
self.assertEqual(a.read('bytes:3'), b'\x67\x89\xab')
x, y, z = a.unpack('bits:28, bytes, bits:12')
self.assertEqual(y, b'\x78\x9a\xbc')
def testDedicatedReadFunctions(self):
a = BitStream('0b11, uint:43=98798798172, 0b11111')
x = a._readuint(43, 2)
self.assertEqual(x, 98798798172)
self.assertEqual(a.pos, 0)
x = a._readint(43, 2)
self.assertEqual(x, 98798798172)
self.assertEqual(a.pos, 0)
a = BitStream('0b11, uintbe:48=98798798172, 0b11111')
x = a._readuintbe(48, 2)
self.assertEqual(x, 98798798172)
self.assertEqual(a.pos, 0)
x = a._readintbe(48, 2)
self.assertEqual(x, 98798798172)
self.assertEqual(a.pos, 0)
a = BitStream('0b111, uintle:40=123516, 0b111')
self.assertEqual(a._readuintle(40, 3), 123516)
b = BitStream('0xff, uintle:800=999, 0xffff')
self.assertEqual(b._readuintle(800, 8), 999)
a = BitStream('0b111, intle:48=999999999, 0b111111111111')
self.assertEqual(a._readintle(48, 3), 999999999)
b = BitStream('0xff, intle:200=918019283740918263512351235, 0xfffffff')
self.assertEqual(b._readintle(200, 8), 918019283740918263512351235)
a = BitStream('0b111, floatbe:64=-5.32, 0xffffffff')
self.assertEqual(a._readfloat(64, 3), -5.32)
a = BitStream('0b111, floatle:64=9.9998, 0b111')
self.assertEqual(a._readfloatle(64, 3), 9.9998)
def testAutoInitWithInt(self):
a = BitStream(0)
self.assertFalse(a)
a = BitStream(1)
self.assertEqual(a, '0b0')
a = BitStream(1007)
self.assertEqual(a, BitStream(length=1007))
self.assertRaises(bitstring.CreationError, BitStream, -1)
a = 6 + ConstBitStream('0b1') + 3
self.assertEqual(a, '0b0000001000')
a += 1
self.assertEqual(a, '0b00000010000')
self.assertEqual(ConstBitStream(13), 13)
def testReadingProblems(self):
a = BitStream('0x000001')
b = a.read('uint:24')
self.assertEqual(b, 1)
a.pos = 0
self.assertRaises(bitstring.ReadError, a.read, 'bytes:4')
def testAddVersesInPlaceAdd(self):
a1 = ConstBitStream('0xabc')
b1 = a1
a1 += '0xdef'
self.assertEqual(a1, '0xabcdef')
self.assertEqual(b1, '0xabc')
a2 = BitStream('0xabc')
b2 = a2
c2 = a2 + '0x0'
a2 += '0xdef'
self.assertEqual(a2, '0xabcdef')
self.assertEqual(b2, '0xabcdef')
self.assertEqual(c2, '0xabc0')
def testAndVersesInPlaceAnd(self):
a1 = ConstBitStream('0xabc')
b1 = a1
a1 &= '0xf0f'
self.assertEqual(a1, '0xa0c')
self.assertEqual(b1, '0xabc')
a2 = BitStream('0xabc')
b2 = a2
c2 = a2 & '0x00f'
a2 &= '0xf0f'
self.assertEqual(a2, '0xa0c')
self.assertEqual(b2, '0xa0c')
self.assertEqual(c2, '0x00c')
def testOrVersesInPlaceOr(self):
a1 = ConstBitStream('0xabc')
b1 = a1
a1 |= '0xf0f'
self.assertEqual(a1, '0xfbf')
self.assertEqual(b1, '0xabc')
a2 = BitStream('0xabc')
b2 = a2
c2 = a2 | '0x00f'
a2 |= '0xf0f'
self.assertEqual(a2, '0xfbf')
self.assertEqual(b2, '0xfbf')
self.assertEqual(c2, '0xabf')
def testXorVersesInPlaceXor(self):
a1 = ConstBitStream('0xabc')
b1 = a1
a1 ^= '0xf0f'
self.assertEqual(a1, '0x5b3')
self.assertEqual(b1, '0xabc')
a2 = BitStream('0xabc')
b2 = a2
c2 = a2 ^ '0x00f'
a2 ^= '0xf0f'
self.assertEqual(a2, '0x5b3')
self.assertEqual(b2, '0x5b3')
self.assertEqual(c2, '0xab3')
def testMulVersesInPlaceMul(self):
a1 = ConstBitStream('0xabc')
b1 = a1
a1 *= 3
self.assertEqual(a1, '0xabcabcabc')
self.assertEqual(b1, '0xabc')
a2 = BitStream('0xabc')
b2 = a2
c2 = a2 * 2
a2 *= 3
self.assertEqual(a2, '0xabcabcabc')
self.assertEqual(b2, '0xabcabcabc')
self.assertEqual(c2, '0xabcabc')
def testLshiftVersesInPlaceLshift(self):
a1 = ConstBitStream('0xabc')
b1 = a1
a1 <<= 4
self.assertEqual(a1, '0xbc0')
self.assertEqual(b1, '0xabc')
a2 = BitStream('0xabc')
b2 = a2
c2 = a2 << 8
a2 <<= 4
self.assertEqual(a2, '0xbc0')
self.assertEqual(b2, '0xbc0')
self.assertEqual(c2, '0xc00')
def testRshiftVersesInPlaceRshift(self):
a1 = ConstBitStream('0xabc')
b1 = a1
a1 >>= 4
self.assertEqual(a1, '0x0ab')
self.assertEqual(b1, '0xabc')
a2 = BitStream('0xabc')
b2 = a2
c2 = a2 >> 8
a2 >>= 4
self.assertEqual(a2, '0x0ab')
self.assertEqual(b2, '0x0ab')
self.assertEqual(c2, '0x00a')
def testAutoFromBool(self):
a = ConstBitStream() + True + False + True
self.assertEqual(a, '0b00')
# self.assertEqual(a, '0b101')
# b = ConstBitStream(False)
# self.assertEqual(b, '0b0')
# c = ConstBitStream(True)
# self.assertEqual(c, '0b1')
# self.assertEqual(b, False)
# self.assertEqual(c, True)
# self.assertEqual(b & True, False)
class Bugs(unittest.TestCase):
def testBugInReplace(self):
s = BitStream('0x00112233')
l = list(s.split('0x22', start=8, bytealigned=True))
self.assertEqual(l, ['0x11', '0x2233'])
s = BitStream('0x00112233')
s.replace('0x22', '0xffff', start=8, bytealigned=True)
self.assertEqual(s, '0x0011ffff33')
s = BitStream('0x0123412341234')
s.replace('0x23', '0xf', start=9, bytealigned=True)
self.assertEqual(s, '0x012341f41f4')
def testTruncateStartBug(self):
a = BitStream('0b000000111')[2:]
a._truncatestart(6)
self.assertEqual(a, '0b1')
def testNullBits(self):
s = ConstBitStream(bin='')
t = ConstBitStream(oct='')
u = ConstBitStream(hex='')
v = ConstBitStream(bytes=b'')
self.assertFalse(s)
self.assertFalse(t)
self.assertFalse(u)
self.assertFalse(v)
def testMultiplicativeFactorsCreation(self):
s = BitStream('1*0b1')
self.assertEqual(s, '0b1')
s = BitStream('4*0xc')
self.assertEqual(s, '0xcccc')
s = BitStream('0b1, 0*0b0')
self.assertEqual(s, '0b1')
s = BitStream('0b1, 3*uint:8=34, 2*0o755')
self.assertEqual(s, '0b1, uint:8=34, uint:8=34, uint:8=34, 0o755755')
s = BitStream('0*0b1001010')
self.assertFalse(s)
def testMultiplicativeFactorsReading(self):
s = BitStream('0xc') * 5
a, b, c, d, e = s.readlist('5*4')
self.assertTrue(a == b == c == d == e == 12)
s = ConstBitStream('2*0b101, 4*uint:7=3')
a, b, c, d, e = s.readlist('2*bin:3, 3*uint:7')
self.assertTrue(a == b == '101')
self.assertTrue(c == d == e == 3)
def testMultiplicativeFactorsPacking(self):
s = pack('3*bin', '1', '001', '101')
self.assertEqual(s, '0b1001101')
s = pack('hex, 2*se=-56, 3*uint:37', '34', 1, 2, 3)
a, b, c, d, e, f = s.unpack('hex:8, 2*se, 3*uint:37')
self.assertEqual(a, '34')
self.assertEqual(b, -56)
self.assertEqual(c, -56)
self.assertEqual((d, e, f), (1, 2, 3))
# This isn't allowed yet. See comment in tokenparser.
#s = pack('fluffy*uint:8', *range(3), fluffy=3)
#a, b, c = s.readlist('2*uint:8, 1*uint:8, 0*uint:8')
#self.assertEqual((a, b, c), (0, 1, 2))
def testMultiplicativeFactorsUnpacking(self):
s = ConstBitStream('0b10111')
a, b, c, d = s.unpack('3*bool, bin')
self.assertEqual((a, b, c), (True, False, True))
self.assertEqual(d, '11')
def testPackingDefaultIntWithKeyword(self):
s = pack('12', 100)
self.assertEqual(s.unpack('12')[0], 100)
s = pack('oh_no_not_the_eyes=33', oh_no_not_the_eyes=17)
self.assertEqual(s.uint, 33)
self.assertEqual(s.len, 17)
def testInitFromIterable(self):
self.assertTrue(isinstance(range(10), collections.Iterable))
s = ConstBitStream(range(12))
self.assertEqual(s, '0x7ff')
def testFunctionNegativeIndices(self):
# insert
s = BitStream('0b0111')
s.insert('0b0', -1)
self.assertEqual(s, '0b01101')
self.assertRaises(ValueError, s.insert, '0b0', -1000)
# reverse
s.reverse(-2)
self.assertEqual(s, '0b01110')
t = BitStream('0x778899abcdef')
t.reverse(-12, -4)
self.assertEqual(t, '0x778899abc7bf')
# reversebytes
t.byteswap(0, -40, -16)
self.assertEqual(t, '0x77ab9988c7bf')
# overwrite
t.overwrite('0x666', -20)
self.assertEqual(t, '0x77ab998666bf')
# find
found = t.find('0x998', bytealigned=True, start=-31)
self.assertFalse(found)
found = t.find('0x998', bytealigned=True, start=-32)
self.assertTrue(found)
self.assertEqual(t.pos, 16)
t.pos = 0
found = t.find('0x988', bytealigned=True, end=-21)
self.assertFalse(found)
found = t.find('0x998', bytealigned=True, end=-20)
self.assertTrue(found)
self.assertEqual(t.pos, 16)
#findall
s = BitStream('0x1234151f')
l = list(s.findall('0x1', bytealigned=True, start=-15))
self.assertEqual(l, [24])
l = list(s.findall('0x1', bytealigned=True, start=-16))
self.assertEqual(l, [16, 24])
l = list(s.findall('0x1', bytealigned=True, end=-5))
self.assertEqual(l, [0, 16])
l = list(s.findall('0x1', bytealigned=True, end=-4))
self.assertEqual(l, [0, 16, 24])
# rfind
found = s.rfind('0x1f', end=-1)
self.assertFalse(found)
found = s.rfind('0x12', start=-31)
self.assertFalse(found)
# cut
s = BitStream('0x12345')
l = list(s.cut(4, start=-12, end=-4))
self.assertEqual(l, ['0x3', '0x4'])
# split
s = BitStream('0xfe0012fe1200fe')
l = list(s.split('0xfe', bytealigned=True, end=-1))
self.assertEqual(l, ['', '0xfe0012', '0xfe1200f, 0b111'])
l = list(s.split('0xfe', bytealigned=True, start=-8))
self.assertEqual(l, ['', '0xfe'])
# startswith
self.assertTrue(s.startswith('0x00f', start=-16))
self.assertTrue(s.startswith('0xfe00', end=-40))
self.assertFalse(s.startswith('0xfe00', end=-41))
# endswith
self.assertTrue(s.endswith('0x00fe', start=-16))
self.assertFalse(s.endswith('0x00fe', start=-15))
self.assertFalse(s.endswith('0x00fe', end=-1))
self.assertTrue(s.endswith('0x00f', end=-4))
# replace
s.replace('0xfe', '', end=-1)
self.assertEqual(s, '0x00121200fe')
s.replace('0x00', '', start=-24)
self.assertEqual(s, '0x001212fe')
def testRotateStartAndEnd(self):
a = BitStream('0b110100001')
a.rol(1, 3, 6)
self.assertEqual(a, '0b110001001')
a.ror(1, start=-4)
self.assertEqual(a, '0b110001100')
a.rol(202, end=-5)
self.assertEqual(a, '0b001101100')
a.ror(3, end=4)
self.assertEqual(a, '0b011001100')
self.assertRaises(ValueError, a.rol, 5, start=-4, end=-6)
def testByteSwapInt(self):
s = pack('5*uintle:16', *range(10, 15))
self.assertEqual(list(range(10, 15)), s.unpack('5*uintle:16'))
swaps = s.byteswap(2)
self.assertEqual(list(range(10, 15)), s.unpack('5*uintbe:16'))
self.assertEqual(swaps, 5)
s = BitStream('0xf234567f')
swaps = s.byteswap(1, start=4)
self.assertEqual(swaps, 3)
self.assertEqual(s, '0xf234567f')
s.byteswap(2, start=4)
self.assertEqual(s, '0xf452367f')
s.byteswap(2, start=4, end=-4)
self.assertEqual(s, '0xf234567f')
s.byteswap(3)
self.assertEqual(s, '0x5634f27f')
s.byteswap(2, repeat=False)
self.assertEqual(s, '0x3456f27f')
swaps = s.byteswap(5)
self.assertEqual(swaps, 0)
swaps = s.byteswap(4, repeat=False)
self.assertEqual(swaps, 1)
self.assertEqual(s, '0x7ff25634')
def testByteSwapPackCode(self):
s = BitStream('0x0011223344556677')
swaps = s.byteswap('b')
self.assertEqual(s, '0x0011223344556677')
self.assertEqual(swaps, 8)
swaps = s.byteswap('>3h', repeat=False)
self.assertEqual(s, '0x1100332255446677')
self.assertEqual(swaps, 1)
def testByteSwapIterable(self):
s = BitStream('0x0011223344556677')
swaps = s.byteswap(range(1, 4), repeat=False)
self.assertEqual(swaps, 1)
self.assertEqual(s, '0x0022115544336677')
swaps = s.byteswap([2], start=8)
self.assertEqual(s, '0x0011224455663377')
self.assertEqual(3, swaps)
swaps = s.byteswap([2, 3], start=4)
self.assertEqual(swaps, 1)
self.assertEqual(s, '0x0120156452463377')
def testByteSwapErrors(self):
s = BitStream('0x0011223344556677')
self.assertRaises(ValueError, s.byteswap, 'z')
self.assertRaises(ValueError, s.byteswap, -1)
self.assertRaises(ValueError, s.byteswap, [-1])
self.assertRaises(ValueError, s.byteswap, [1, 'e'])
self.assertRaises(ValueError, s.byteswap, '!h')
self.assertRaises(ValueError, s.byteswap, 2, start=-1000)
def testByteSwapFromFile(self):
s = BitStream(filename='smalltestfile')
swaps = s.byteswap('2bh')
self.assertEqual(s, '0x0123674589abefcd')
self.assertEqual(swaps, 2)
def testBracketExpander(self):
be = bitstring.bits.expand_brackets
self.assertEqual(be('hello'), 'hello')
self.assertEqual(be('(hello)'), 'hello')
self.assertEqual(be('1*(hello)'), 'hello')
self.assertEqual(be('2*(hello)'), 'hello,hello')
self.assertEqual(be('1*(a, b)'), 'a,b')
self.assertEqual(be('2*(a, b)'), 'a,b,a,b')
self.assertEqual(be('2*(a), 3*(b)'), 'a,a,b,b,b')
self.assertEqual(be('2*(a, b, 3*(c, d), e)'), 'a,b,c,d,c,d,c,d,e,a,b,c,d,c,d,c,d,e')
def testBracketTokens(self):
s = BitStream('3*(0x0, 0b1)')
self.assertEqual(s, '0x0, 0b1, 0x0, 0b1, 0x0, 0b1')
s = pack('2*(uint:12, 3*(7, 6))', *range(3, 17))
a = s.unpack('12, 7, 6, 7, 6, 7, 6, 12, 7, 6, 7, 6, 7, 6')
self.assertEqual(a, list(range(3, 17)))
b = s.unpack('2*(12,3*(7,6))')
self.assertEqual(a, b)
def testPackCodeDicts(self):
self.assertEqual(sorted(bitstring.bits.REPLACEMENTS_BE.keys()),
sorted(bitstring.bits.REPLACEMENTS_LE.keys()))
self.assertEqual(sorted(bitstring.bits.REPLACEMENTS_BE.keys()),
sorted(bitstring.bits.PACK_CODE_SIZE.keys()))
for key in bitstring.bits.PACK_CODE_SIZE:
be = pack(bitstring.bits.REPLACEMENTS_BE[key], 0)
le = pack(bitstring.bits.REPLACEMENTS_LE[key], 0)
self.assertEqual(be.len, bitstring.bits.PACK_CODE_SIZE[key] * 8)
self.assertEqual(le.len, be.len)
# These tests don't compile for Python 3, so they're commented out to save me stress.
#def testUnicode(self):
#a = ConstBitStream(u'uint:12=34')
#self.assertEqual(a.uint, 34)
#a += u'0xfe'
#self.assertEqual(a[12:], '0xfe')
#a = BitStream('0x1122')
#c = a.byteswap(u'h')
#self.assertEqual(c, 1)
#self.assertEqual(a, u'0x2211')
#def testLongInt(self):
#a = BitStream(4L)
#self.assertEqual(a, '0b0000')
#a[1:3] = -1L
#self.assertEqual(a, '0b0110')
#a[0] = 1L
#self.assertEqual(a, '0b1110')
#a *= 4L
#self.assertEqual(a, '0xeeee')
#c = a.byteswap(2L)
#self.assertEqual(c, 1)
#a = BitStream('0x11223344')
#a.byteswap([1, 2L])
#self.assertEqual(a, '0x11332244')
#b = a*2L
#self.assertEqual(b, '0x1133224411332244')
#s = pack('uint:12', 46L)
#self.assertEqual(s.uint, 46)
class UnpackWithDict(unittest.TestCase):
def testLengthKeywords(self):
a = ConstBitStream('2*13=100, 0b111')
x, y, z = a.unpack('n, uint:m, bin:q', n=13, m=13, q=3)
self.assertEqual(x, 100)
self.assertEqual(y, 100)
self.assertEqual(z, '111')
def testLengthKeywordsWithStretch(self):
a = ConstBitStream('0xff, 0b000, 0xf')
x, y, z = a.unpack('hex:a, bin, hex:b', a=8, b=4)
self.assertEqual(y, '000')
def testUnusedKeyword(self):
a = ConstBitStream('0b110')
x, = a.unpack('bin:3', notused=33)
self.assertEqual(x, '110')
def testLengthKeywordErrors(self):
a = pack('uint:p=33', p=12)
self.assertRaises(ValueError, a.unpack, 'uint:p')
self.assertRaises(ValueError, a.unpack, 'uint:p', p='a_string')
class ReadWithDict(unittest.TestCase):
def testLengthKeywords(self):
s = BitStream('0x0102')
x, y = s.readlist('a, hex:b', a=8, b=4)
self.assertEqual((x, y), (1, '0'))
self.assertEqual(s.pos, 12)
class PeekWithDict(unittest.TestCase):
def testLengthKeywords(self):
s = BitStream('0x0102')
x, y = s.peeklist('a, hex:b', a=8, b=4)
self.assertEqual((x, y), (1, '0'))
self.assertEqual(s.pos, 0)
##class Miscellany(unittest.TestCase):
##
## def testNumpyInt(self):
## try:
## import numpy
## a = ConstBitStream(uint=numpy.uint8(5), length=3)
## self.assertEqual(a.uint, 5)
## except ImportError:
## # Not to worry
## pass
class BoolToken(unittest.TestCase):
def testInterpretation(self):
a = ConstBitStream('0b1')
self.assertEqual(a.bool, True)
self.assertEqual(a.read('bool'), True)
self.assertEqual(a.unpack('bool')[0], True)
b = ConstBitStream('0b0')
self.assertEqual(b.bool, False)
self.assertEqual(b.peek('bool'), False)
self.assertEqual(b.unpack('bool')[0], False)
def testPack(self):
a = pack('bool=True')
b = pack('bool=False')
self.assertEqual(a.bool, True)
self.assertEqual(b.bool, False)
c = pack('4*bool', False, True, 'False', 'True')
self.assertEqual(c, '0b0101')
def testAssignment(self):
a = BitStream()
a.bool = True
self.assertEqual(a.bool, True)
a.hex = 'ee'
a.bool = False
self.assertEqual(a.bool, False)
a.bool = 'False'
self.assertEqual(a.bool, False)
a.bool = 'True'
self.assertEqual(a.bool, True)
a.bool = 0
self.assertEqual(a.bool, False)
a.bool = 1
self.assertEqual(a.bool, True)
def testErrors(self):
self.assertRaises(bitstring.CreationError, pack, 'bool', 'hello')
self.assertRaises(bitstring.CreationError, pack, 'bool=true')
self.assertRaises(bitstring.CreationError, pack, 'True')
self.assertRaises(bitstring.CreationError, pack, 'bool', 2)
a = BitStream('0b11')
self.assertRaises(bitstring.InterpretError, a._getbool)
b = BitStream()
self.assertRaises(bitstring.InterpretError, a._getbool)
self.assertRaises(bitstring.CreationError, a._setbool, 'false')
def testLengthWithBoolRead(self):
a = ConstBitStream('0xf')
self.assertRaises(ValueError, a.read, 'bool:0')
self.assertRaises(ValueError, a.read, 'bool:1')
self.assertRaises(ValueError, a.read, 'bool:2')
class ReadWithIntegers(unittest.TestCase):
def testReadInt(self):
a = ConstBitStream('0xffeedd')
b = a.read(8)
self.assertEqual(b.hex, 'ff')
self.assertEqual(a.pos, 8)
b = a.peek(8)
self.assertEqual(b.hex, 'ee')
self.assertEqual(a.pos, 8)
b = a.peek(1)
self.assertEqual(b, '0b1')
b = a.read(1)
self.assertEqual(b, '0b1')
def testReadIntList(self):
a = ConstBitStream('0xab, 0b110')
b, c = a.readlist([8, 3])
self.assertEqual(b.hex, 'ab')
self.assertEqual(c.bin, '110')
class FileReadingStrategy(unittest.TestCase):
def testBitStreamIsAlwaysRead(self):
a = BitStream(filename='smalltestfile')
self.assertTrue(isinstance(a._datastore, bitstring.bitstream.ByteStore))
f = open('smalltestfile', 'rb')
b = BitStream(f)
self.assertTrue(isinstance(b._datastore, bitstring.bitstream.ByteStore))
def testBitsIsNeverRead(self):
a = ConstBitStream(filename='smalltestfile')
self.assertTrue(isinstance(a._datastore._rawarray, bitstring.bits.MmapByteArray))
f = open('smalltestfile', 'rb')
b = ConstBitStream(f)
self.assertTrue(isinstance(b._datastore._rawarray, bitstring.bits.MmapByteArray))
class Count(unittest.TestCase):
def testCount(self):
a = ConstBitStream('0xf0f')
self.assertEqual(a.count(True), 8)
self.assertEqual(a.count(False), 4)
b = BitStream()
self.assertEqual(b.count(True), 0)
self.assertEqual(b.count(False), 0)
def testCountWithOffsetData(self):
a = ConstBitStream('0xff0120ff')
b = a[1:-1]
self.assertEqual(b.count(1), 16)
self.assertEqual(b.count(0), 14)
class ZeroBitReads(unittest.TestCase):
def testInteger(self):
a = ConstBitStream('0x123456')
self.assertRaises(bitstring.InterpretError, a.read, 'uint:0')
self.assertRaises(bitstring.InterpretError, a.read, 'float:0')
#class EfficientBitsCopies(unittest.TestCase):
#
# def testBitsCopy(self):
# a = ConstBitStream('0xff')
# b = ConstBitStream(a)
# c = a[:]
# d = copy.copy(a)
# self.assertTrue(a._datastore is b._datastore)
# self.assertTrue(a._datastore is c._datastore)
# self.assertTrue(a._datastore is d._datastore)
class InitialiseFromBytes(unittest.TestCase):
def testBytesBehaviour(self):
a = ConstBitStream(b'uint:5=2')
b = ConstBitStream(b'')
c = ConstBitStream(bytes=b'uint:5=2')
if b'' == '':
# Python 2
self.assertEqual(a, 'uint:5=2')
self.assertFalse(b)
self.assertEqual(c.bytes, b'uint:5=2')
else:
self.assertEqual(a.bytes, b'uint:5=2')
self.assertFalse(b)
self.assertEqual(c, b'uint:5=2')
def testBytearrayBehaviour(self):
a = ConstBitStream(bytearray(b'uint:5=2'))
b = ConstBitStream(bytearray(4))
c = ConstBitStream(bytes=bytearray(b'uint:5=2'))
self.assertEqual(a.bytes, b'uint:5=2')
self.assertEqual(b, '0x00000000')
self.assertEqual(c.bytes, b'uint:5=2')
class CoverageCompletionTests(unittest.TestCase):
def testUeReadError(self):
s = ConstBitStream('0b000000001')
self.assertRaises(bitstring.ReadError, s.read, 'ue')
def testOverwriteWithSelf(self):
s = BitStream('0b1101')
s.overwrite(s)
self.assertEqual(s, '0b1101')
| [
"dwindsor@networksecurityservicesllc.com"
] | dwindsor@networksecurityservicesllc.com |
ead64974c7f331db8bd57afaac4d4c6e4eb8d533 | 98a288ad0496c484a777d31ffaaa0cd8678e6452 | /plusfriend/urls.py | afa16ed72fcced5aec68292880dc2565ef9d9d58 | [] | no_license | WHWH10/Askbot_ex | 24224b78cfb3f5e5d8b15987075ff675396f3690 | 4bc0fb3f7c81470010617ffb927eaa886496a4b1 | refs/heads/master | 2021-06-26T06:46:26.077884 | 2017-09-11T07:47:35 | 2017-09-11T07:47:35 | 103,101,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^keyboard$', views.on_init),
url(r'^friend$', views.on_added),
url(r'^friend/(?P<user_key>[\w-]+)$', views.on_block),
url(r'^chat_room/(?P<user_key>[\w-]+)$', views.on_leave),
url(r'^message$', views.on_message),
]
| [
"eyet010@gmail.com"
] | eyet010@gmail.com |
41ad7dfb509fba61890e0aea60cd3b110cef1c09 | 01362c32c4f28774d35eb040fce84e1bdc1dbe36 | /programming/migrations/0006_auto__add_field_film_picture__add_field_festival_picture__add_field_gi.py | f9215083aa7020ad553feb308aef4229099fee4b | [] | no_license | marcusvalentine/starandshadow | 90c434e04829248a69c6a4d9cf6b32eb395d42c9 | 93a74542bc5d949238232632ee85de66438f78bb | refs/heads/master | 2020-03-29T16:24:36.163141 | 2018-05-10T13:32:08 | 2018-05-10T13:32:08 | 150,112,777 | 0 | 0 | null | 2018-09-24T14:10:08 | 2018-09-24T14:10:07 | null | UTF-8 | Python | false | false | 17,082 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Film.picture'
db.add_column('programming_film', 'picture', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['fileupload.Picture'], null=True, blank=True), keep_default=False)
# Adding field 'Festival.picture'
db.add_column('programming_festival', 'picture', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['fileupload.Picture'], null=True, blank=True), keep_default=False)
# Adding field 'Gig.picture'
db.add_column('programming_gig', 'picture', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['fileupload.Picture'], null=True, blank=True), keep_default=False)
# Adding field 'Season.picture'
db.add_column('programming_season', 'picture', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['fileupload.Picture'], null=True, blank=True), keep_default=False)
if not db.dry_run:
for event in orm.Film.objects.all():
if event.graphic.name != '':
try:
event.picture = orm['fileupload.Picture'].objects.get(file=event.graphic.name)
except:
p = orm['fileupload.Picture'](file=event.graphic.name)
p.save()
event.picture = p
event.save()
for event in orm.Festival.objects.all():
if event.graphic.name != '':
try:
event.picture = orm['fileupload.Picture'].objects.get(file=event.graphic.name)
except:
p = orm['fileupload.Picture'](file=event.graphic.name)
p.save()
event.picture = p
event.save()
for event in orm.Gig.objects.all():
if event.graphic.name != '':
try:
event.picture = orm['fileupload.Picture'].objects.get(file=event.graphic.name)
except:
p = orm['fileupload.Picture'](file=event.graphic.name)
p.save()
event.picture = p
event.save()
for event in orm.Season.objects.all():
if event.graphic.name != '':
try:
event.picture = orm['fileupload.Picture'].objects.get(file=event.graphic.name)
except:
p = orm['fileupload.Picture'](file=event.graphic.name)
p.save()
event.picture = p
event.save()
def backwards(self, orm):
# Deleting field 'Film.picture'
db.delete_column('programming_film', 'picture_id')
# Deleting field 'Festival.picture'
db.delete_column('programming_festival', 'picture_id')
# Deleting field 'Gig.picture'
db.delete_column('programming_gig', 'picture_id')
# Deleting field 'Season.picture'
db.delete_column('programming_season', 'picture_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'fileupload.picture': {
'Meta': {'object_name': 'Picture'},
'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'})
},
'programming.event': {
'Meta': {'ordering': "['start']", 'object_name': 'Event'},
'body': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'end': ('django.db.models.fields.TimeField', [], {}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'picture': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['fileupload.Picture']", 'null': 'True', 'blank': 'True'}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'programmer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['programming.Programmer']"}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'programming.festival': {
'Meta': {'ordering': "['start']", 'object_name': 'Festival'},
'body': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
'events': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['programming.Event']", 'symmetrical': 'False', 'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'films': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['programming.Film']", 'symmetrical': 'False', 'blank': 'True'}),
'gigs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['programming.Gig']", 'symmetrical': 'False', 'blank': 'True'}),
'graphic': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'picture': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['fileupload.Picture']", 'null': 'True', 'blank': 'True'}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'programmer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['programming.Programmer']"}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'programming.film': {
'Meta': {'ordering': "['start']", 'object_name': 'Film'},
'body': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'certificate': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['programming.Rating']"}),
'confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'director': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'filmFormat': ('django.db.models.fields.CharField', [], {'default': "'UK'", 'max_length': '15'}),
'graphic': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lang': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'length': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'picture': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['fileupload.Picture']", 'null': 'True', 'blank': 'True'}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'programmer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['programming.Programmer']"}),
'season': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['programming.Season']"}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'})
},
'programming.gig': {
'Meta': {'ordering': "['start']", 'object_name': 'Gig'},
'body': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'end': ('django.db.models.fields.TimeField', [], {}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'graphic': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'picture': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['fileupload.Picture']", 'null': 'True', 'blank': 'True'}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'programmer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['programming.Programmer']"}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'programming.meeting': {
'Meta': {'ordering': "['start']", 'object_name': 'Meeting'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'programmer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['programming.Programmer']"}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "'General Meeting'", 'max_length': '150'})
},
'programming.programmer': {
'Meta': {'ordering': "['name']", 'object_name': 'Programmer'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'homePhone': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobilePhone': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'default': "'img/programmer/ron1-small.jpg'", 'max_length': '100', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'programming.rating': {
'Meta': {'ordering': "['name']", 'object_name': 'Rating'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'largeImage': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'smallImage': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'})
},
'programming.season': {
'Meta': {'ordering': "['start']", 'object_name': 'Season'},
'body': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'end': ('django.db.models.fields.DateField', [], {}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'graphic': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'picture': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['fileupload.Picture']", 'null': 'True', 'blank': 'True'}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'programmer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['programming.Programmer']"}),
'start': ('django.db.models.fields.DateField', [], {}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150'})
}
}
complete_apps = ['programming']
| [
"sjk@psimonkey.org.uk"
] | sjk@psimonkey.org.uk |
5d009ec1750156835ab05bd369cef58aeaed239e | b4c93bad8ccc9007a7d3e7e1d1d4eb8388f6e988 | /farmercoupon/migrations/0048_auto_20210322_1046.py | 45f4e7b616e00e32a923afc76da686935d36cabb | [] | no_license | flashdreiv/fis | 39b60c010d0d989a34c01b39ea88f7fc3be0a87d | b93277785d6ad113a90a011f7c43b1e3e9209ec5 | refs/heads/main | 2023-04-02T12:46:32.249800 | 2021-03-31T00:27:29 | 2021-03-31T00:27:29 | 343,431,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | # Generated by Django 3.1.7 on 2021-03-22 02:46
from django.db import migrations, models
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('farmercoupon', '0047_auto_20210321_1524'),
]
operations = [
migrations.AddField(
model_name='farmer',
name='crop',
field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[(1, 'Item title 2.1'), (2, 'Item title 2.2'), (3, 'Item title 2.3'), (4, 'Item title 2.4'), (5, 'Item title 2.5')], max_length=9, null=True),
),
migrations.AddField(
model_name='farmer',
name='land_area',
field=models.IntegerField(default=0),
),
]
| [
"dreivan.orprecio@gmail.com"
] | dreivan.orprecio@gmail.com |
b84f468a68c639807ccd982bd0207469b605c051 | d0571268b8b7fa8e6621d138217f6a98a418ca93 | /_1327/main/migrations/0005_add_anonymous_group.py | c8edc82430a8a8a4e4e7e3ee4d329e6563f138d4 | [
"MIT"
] | permissive | xasetl/1327 | 743a4a8b7c9b1984d3b8b434c4db4b6799a5ddb7 | 71a9d3adac0f01fb87612c24bb8d0f1b945cc703 | refs/heads/master | 2020-04-05T11:44:44.422539 | 2017-08-21T16:11:34 | 2017-08-21T16:11:34 | 35,056,504 | 2 | 0 | null | 2015-05-04T20:04:55 | 2015-05-04T20:04:55 | null | UTF-8 | Python | false | false | 596 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.contrib.auth.models import Group
from guardian.management import create_anonymous_user
from guardian.utils import get_anonymous_user
def add_anonymous_group(apps, schema_editor):
create_anonymous_user(None)
group = Group.objects.create(name="Anonymous")
user = get_anonymous_user()
user.groups.add(group)
class Migration(migrations.Migration):
dependencies = [
('main', '0004_add_university_network_group'),
]
operations = [
migrations.RunPython(add_anonymous_group),
]
| [
"steffen.koette@gmail.com"
] | steffen.koette@gmail.com |
8dda7acc6045272a3c4408b6132cc59c2ad575dc | 97b5e5caedf0931b00fdce1df7bbdbad692bdd0b | /pyscf/cc/ec_ccsd_t.py | add7818fc1ad1071e15d9c022971f6ae9c6e6573 | [
"Apache-2.0"
] | permissive | seunghoonlee89/pyscf-ecCC-TCC | c08cd333d7bf9accfb340ad3c1397b5d8a354b26 | 2091566fb83c1474e40bf74f271be2ce4611f60c | refs/heads/main | 2023-08-21T08:22:12.188933 | 2021-10-21T17:54:50 | 2021-10-21T17:54:50 | 326,832,474 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 18,463 | py | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
clone of UCCSD(T) code for ecRCCSD(T)
'''
import time
import ctypes
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.cc import _ccsd
def kernel(mycc, eris, coeff, t1=None, t2=None, verbose=logger.NOTE, ecTCCSD=False):
cpu1 = cpu0 = (time.clock(), time.time())
log = logger.new_logger(mycc, verbose)
mem_now = lib.current_memory()[0]
max_memory = max(0, mycc.max_memory - mem_now)
log.debug('max_memory %d MB (%d MB in use)', max_memory, mem_now)
if t1 is None: t1 = mycc.t1
if t2 is None: t2 = mycc.t2
if ecTCCSD: raise NotImplementedError
else: coeff.exclude_t_ecCCSDt() # off diag Pmat cas
#else: mycc.coeff.get_Pmat_ccsdt_cas() # full Pmat cas
#else: mycc.coeff.get_Pmat_ccsdt() # full Pmat
nocc_cas = int(coeff.nocc_cas)
nvir_cas = int(coeff.nvir_cas)
nocc_iact= int(coeff.nocc_iact)
nocc2 = int(nocc_cas*(nocc_cas-1)/2)
nocc3 = int(nocc_cas*(nocc_cas-1)*(nocc_cas-2)/6 )
t1a = t1
t1b = t1
t2ab= t2
t2aa= t2 - t2.transpose(0,1,3,2)
t2bb= t2aa
nocca, nvira = t1.shape
noccb, nvirb = t1.shape
nmoa = nocca + nvira
nmob = noccb + nvirb
if mycc.incore_complete:
ftmp = None
else:
ftmp = lib.H5TmpFile()
t1aT = t1a.T.copy()
t1bT = t1aT
t2aaT = t2aa.transpose(2,3,0,1).copy()
t2bbT = t2aaT
eris_vooo = numpy.asarray(eris.ovoo).transpose(1,3,0,2).conj().copy()
eris_vvop = _sort_eri(mycc, eris, ftmp, log)
cpu1 = log.timer_debug1('ecCCSD(T) sort_eri', *cpu1)
dtype = numpy.result_type(t1a.dtype, t2aa.dtype, eris_vooo.dtype)
et_sum = numpy.zeros(1, dtype=dtype)
mem_now = lib.current_memory()[0]
max_memory = max(0, mycc.max_memory - mem_now)
# aaa
bufsize = max(8, int((max_memory*.5e6/8-nocca**3*3*lib.num_threads())*.4/(nocca*nmoa)))
log.debug('max_memory %d MB (%d MB in use)', max_memory, mem_now)
orbsym = numpy.zeros(nocca, dtype=int)
contract = _gen_contract_aaa(t1aT, t2aaT, eris_vooo, eris.fock,
eris.mo_energy, orbsym, coeff.Paaa,
nocc_iact, nvir_cas, nocc3, log)
with lib.call_in_background(contract, sync=not mycc.async_io) as ctr:
for a0, a1 in reversed(list(lib.prange_tril(0, nvira, bufsize))):
cache_row_a = numpy.asarray(eris_vvop[a0:a1,:a1], order='C')
if a0 == 0:
cache_col_a = cache_row_a
else:
cache_col_a = numpy.asarray(eris_vvop[:a0,a0:a1], order='C')
ctr(et_sum, a0, a1, a0, a1, (cache_row_a,cache_col_a,
cache_row_a,cache_col_a))
for b0, b1 in lib.prange_tril(0, a0, bufsize/8):
cache_row_b = numpy.asarray(eris_vvop[b0:b1,:b1], order='C')
if b0 == 0:
cache_col_b = cache_row_b
else:
cache_col_b = numpy.asarray(eris_vvop[:b0,b0:b1], order='C')
ctr(et_sum, a0, a1, b0, b1, (cache_row_a,cache_col_a,
cache_row_b,cache_col_b))
cpu1 = log.timer_debug1('contract_aaa', *cpu1)
et_aaa = et_sum[0]*0.5
print('ecCCSD(T) aaa contribution =',et_sum[0]*0.5)
# # bbb
# bufsize = max(8, int((max_memory*.5e6/8-noccb**3*3*lib.num_threads())*.4/(noccb*nmob)))
# log.debug('max_memory %d MB (%d MB in use)', max_memory, mem_now)
# orbsym = numpy.zeros(noccb, dtype=int)
# contract = _gen_contract_aaa(t1bT, t2bbT, eris_VOOO, eris.fockb,
# eris.mo_energy[1], orbsym, log)
# with lib.call_in_background(contract, sync=not mycc.async_io) as ctr:
# for a0, a1 in reversed(list(lib.prange_tril(0, nvirb, bufsize))):
# cache_row_a = numpy.asarray(eris_VVOP[a0:a1,:a1], order='C')
# if a0 == 0:
# cache_col_a = cache_row_a
# else:
# cache_col_a = numpy.asarray(eris_VVOP[:a0,a0:a1], order='C')
# ctr(et_sum, a0, a1, a0, a1, (cache_row_a,cache_col_a,
# cache_row_a,cache_col_a))
#
# for b0, b1 in lib.prange_tril(0, a0, bufsize/8):
# cache_row_b = numpy.asarray(eris_VVOP[b0:b1,:b1], order='C')
# if b0 == 0:
# cache_col_b = cache_row_b
# else:
# cache_col_b = numpy.asarray(eris_VVOP[:b0,b0:b1], order='C')
# ctr(et_sum, a0, a1, b0, b1, (cache_row_a,cache_col_a,
# cache_row_b,cache_col_b))
# cpu1 = log.timer_debug1('contract_bbb', *cpu1)
# Cache t2abT in t2ab to reduce memory footprint
assert(t2ab.flags.c_contiguous)
t2abT = lib.transpose(t2ab.copy().reshape(nocca*noccb,nvira*nvirb), out=t2ab)
t2abT = t2abT.reshape(nvira,nvirb,nocca,noccb)
# baa
bufsize = int(max(12, (max_memory*.5e6/8-noccb*nocca**2*5)*.7/(nocca*nmob)))
ts = t1aT, t1bT, t2aaT, t2abT
fock = eris.fock
vooo = eris_vooo
contract = _gen_contract_baa(ts, vooo, fock, eris.mo_energy, orbsym,
coeff.Pbaa, nocc_cas, nvir_cas, nocc_iact, nocc2, log)
with lib.call_in_background(contract, sync=not mycc.async_io) as ctr:
for a0, a1 in lib.prange(0, nvirb, int(bufsize/nvira+1)):
cache_row_a = numpy.asarray(eris_vvop[a0:a1,:], order='C')
cache_col_a = numpy.asarray(eris_vvop[:,a0:a1], order='C')
for b0, b1 in lib.prange_tril(0, nvira, bufsize/6/2):
cache_row_b = numpy.asarray(eris_vvop[b0:b1,:b1], order='C')
cache_col_b = numpy.asarray(eris_vvop[:b0,b0:b1], order='C')
ctr(et_sum, a0, a1, b0, b1, (cache_row_a,cache_col_a,
cache_row_b,cache_col_b))
cpu1 = log.timer_debug1('contract_baa', *cpu1)
print('ecCCSD(T) baa contribution =',0.5*et_sum[0]-et_aaa)
# t2baT = numpy.ndarray((nvirb,nvira,noccb,nocca), buffer=t2abT,
# dtype=t2abT.dtype)
# t2baT[:] = t2abT.copy().transpose(1,0,3,2)
# # abb
# ts = t1bT, t1aT, t2bbT, t2baT
# fock = (eris.fockb, eris.focka)
# mo_energy = (eris.mo_energy[1], eris.mo_energy[0])
# vooo = (eris_VOOO, eris_VoOo, eris_vOoO)
# contract = _gen_contract_baa(ts, vooo, fock, mo_energy, orbsym, log)
# for a0, a1 in lib.prange(0, nvira, int(bufsize/nvirb+1)):
# with lib.call_in_background(contract, sync=not mycc.async_io) as ctr:
# cache_row_a = numpy.asarray(eris_vVoP[a0:a1,:], order='C')
# cache_col_a = numpy.asarray(eris_VvOp[:,a0:a1], order='C')
# for b0, b1 in lib.prange_tril(0, nvirb, bufsize/6/2):
# cache_row_b = numpy.asarray(eris_VVOP[b0:b1,:b1], order='C')
# cache_col_b = numpy.asarray(eris_VVOP[:b0,b0:b1], order='C')
# ctr(et_sum, a0, a1, b0, b1, (cache_row_a,cache_col_a,
# cache_row_b,cache_col_b))
# cpu1 = log.timer_debug1('contract_abb', *cpu1)
#
# # Restore t2ab
# lib.transpose(t2baT.transpose(1,0,3,2).copy().reshape(nvira*nvirb,nocca*noccb),
# out=t2ab)
et_sum *= .5
if abs(et_sum[0].imag) > 1e-4:
logger.warn(mycc, 'Non-zero imaginary part of ecCCSD(T) energy was found %s',
et_sum[0])
et = et_sum[0].real
mem_now = lib.current_memory()[0]
max_memory = max(0, mycc.max_memory - mem_now)
log.debug('max_memory %d MB (%d MB in use)', max_memory, mem_now)
log.timer('ecCCSD(T)', *cpu0)
log.note('ecCCSD(T) correction = %.15g', et)
return et
def _gen_contract_aaa(t1T, t2T, vooo, fock, mo_energy, orbsym, paaa, nocc_iact, nvir_cas, nocc3, log):
nvir, nocc = t1T.shape
mo_energy = numpy.asarray(mo_energy, order='C')
fvo = fock[nocc:,:nocc].copy()
cpu2 = [time.clock(), time.time()]
orbsym = numpy.hstack((numpy.sort(orbsym[:nocc]),numpy.sort(orbsym[nocc:])))
o_ir_loc = numpy.append(0, numpy.cumsum(numpy.bincount(orbsym[:nocc], minlength=8)))
v_ir_loc = numpy.append(0, numpy.cumsum(numpy.bincount(orbsym[nocc:], minlength=8)))
o_sym = orbsym[:nocc]
oo_sym = (o_sym[:,None] ^ o_sym).ravel()
oo_ir_loc = numpy.append(0, numpy.cumsum(numpy.bincount(oo_sym, minlength=8)))
nirrep = max(oo_sym) + 1
orbsym = orbsym.astype(numpy.int32)
o_ir_loc = o_ir_loc.astype(numpy.int32)
v_ir_loc = v_ir_loc.astype(numpy.int32)
oo_ir_loc = oo_ir_loc.astype(numpy.int32)
dtype = numpy.result_type(t2T.dtype, vooo.dtype, fock.dtype)
if dtype == numpy.complex:
#drv = _ccsd.libcc.CCuccsd_t_zaaa
raise NotImplementedError
else:
drv = _ccsd.libcc.CCecccsd_t_aaa
#drv = _ccsd.libcc.CCuccsd_t_aaa
def contract(et_sum, a0, a1, b0, b1, cache):
cache_row_a, cache_col_a, cache_row_b, cache_col_b = cache
# drv(et_sum.ctypes.data_as(ctypes.c_void_p),
# mo_energy.ctypes.data_as(ctypes.c_void_p),
# t1T.ctypes.data_as(ctypes.c_void_p),
# t2T.ctypes.data_as(ctypes.c_void_p),
# vooo.ctypes.data_as(ctypes.c_void_p),
# fvo.ctypes.data_as(ctypes.c_void_p),
# ctypes.c_int(nocc), ctypes.c_int(nvir),
# ctypes.c_int(a0), ctypes.c_int(a1),
# ctypes.c_int(b0), ctypes.c_int(b1),
# ctypes.c_int(nirrep),
# o_ir_loc.ctypes.data_as(ctypes.c_void_p),
# v_ir_loc.ctypes.data_as(ctypes.c_void_p),
# oo_ir_loc.ctypes.data_as(ctypes.c_void_p),
# orbsym.ctypes.data_as(ctypes.c_void_p),
# cache_row_a.ctypes.data_as(ctypes.c_void_p),
# cache_col_a.ctypes.data_as(ctypes.c_void_p),
# cache_row_b.ctypes.data_as(ctypes.c_void_p),
# cache_col_b.ctypes.data_as(ctypes.c_void_p))
drv(et_sum.ctypes.data_as(ctypes.c_void_p),
mo_energy.ctypes.data_as(ctypes.c_void_p),
t1T.ctypes.data_as(ctypes.c_void_p),
t2T.ctypes.data_as(ctypes.c_void_p),
vooo.ctypes.data_as(ctypes.c_void_p),
fvo.ctypes.data_as(ctypes.c_void_p),
paaa.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(nocc), ctypes.c_int(nvir),
ctypes.c_int(nocc_iact), ctypes.c_int(nvir_cas),
ctypes.c_int(nocc3),
ctypes.c_int(a0), ctypes.c_int(a1),
ctypes.c_int(b0), ctypes.c_int(b1),
ctypes.c_int(nirrep),
o_ir_loc.ctypes.data_as(ctypes.c_void_p),
v_ir_loc.ctypes.data_as(ctypes.c_void_p),
oo_ir_loc.ctypes.data_as(ctypes.c_void_p),
orbsym.ctypes.data_as(ctypes.c_void_p),
cache_row_a.ctypes.data_as(ctypes.c_void_p),
cache_col_a.ctypes.data_as(ctypes.c_void_p),
cache_row_b.ctypes.data_as(ctypes.c_void_p),
cache_col_b.ctypes.data_as(ctypes.c_void_p))
cpu2[:] = log.timer_debug1('contract %d:%d,%d:%d'%(a0,a1,b0,b1), *cpu2)
return contract
def _gen_contract_baa(ts, vooo, fock, mo_energy, orbsym,
pbaa, nocc_cas, nvir_cas, nocc_iact, nocc2, log):
t1aT, t1bT, t2aaT, t2abT = ts
focka = fock
fockb = fock
vOoO = vooo
VoOo = vooo
nvira, nocca = t1aT.shape
nvirb, noccb = t1bT.shape
mo_ea = numpy.asarray(mo_energy, order='C')
mo_eb = mo_ea
fvo = focka[nocca:,:nocca].copy()
fVO = fockb[noccb:,:noccb].copy()
cpu2 = [time.clock(), time.time()]
dtype = numpy.result_type(t2aaT.dtype, vooo.dtype)
if dtype == numpy.complex:
raise NotImplementedError
#drv = _ccsd.libcc.CCuccsd_t_zbaa
else:
#drv = _ccsd.libcc.CCuccsd_t_baa
drv = _ccsd.libcc.CCecccsd_t_baa
def contract(et_sum, a0, a1, b0, b1, cache):
cache_row_a, cache_col_a, cache_row_b, cache_col_b = cache
# drv(et_sum.ctypes.data_as(ctypes.c_void_p),
# mo_ea.ctypes.data_as(ctypes.c_void_p),
# mo_eb.ctypes.data_as(ctypes.c_void_p),
# t1aT.ctypes.data_as(ctypes.c_void_p),
# t1bT.ctypes.data_as(ctypes.c_void_p),
# t2aaT.ctypes.data_as(ctypes.c_void_p),
# t2abT.ctypes.data_as(ctypes.c_void_p),
# vooo.ctypes.data_as(ctypes.c_void_p),
# vOoO.ctypes.data_as(ctypes.c_void_p),
# VoOo.ctypes.data_as(ctypes.c_void_p),
# fvo.ctypes.data_as(ctypes.c_void_p),
# fVO.ctypes.data_as(ctypes.c_void_p),
# ctypes.c_int(nocca), ctypes.c_int(noccb),
# ctypes.c_int(nvira), ctypes.c_int(nvirb),
# ctypes.c_int(a0), ctypes.c_int(a1),
# ctypes.c_int(b0), ctypes.c_int(b1),
# cache_row_a.ctypes.data_as(ctypes.c_void_p),
# cache_col_a.ctypes.data_as(ctypes.c_void_p),
# cache_row_b.ctypes.data_as(ctypes.c_void_p),
# cache_col_b.ctypes.data_as(ctypes.c_void_p))
drv(et_sum.ctypes.data_as(ctypes.c_void_p),
mo_ea.ctypes.data_as(ctypes.c_void_p),
mo_eb.ctypes.data_as(ctypes.c_void_p),
t1aT.ctypes.data_as(ctypes.c_void_p),
t1bT.ctypes.data_as(ctypes.c_void_p),
t2aaT.ctypes.data_as(ctypes.c_void_p),
t2abT.ctypes.data_as(ctypes.c_void_p),
vooo.ctypes.data_as(ctypes.c_void_p),
vOoO.ctypes.data_as(ctypes.c_void_p),
VoOo.ctypes.data_as(ctypes.c_void_p),
fvo.ctypes.data_as(ctypes.c_void_p),
fVO.ctypes.data_as(ctypes.c_void_p),
pbaa.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(nocca), ctypes.c_int(noccb),
ctypes.c_int(nvira), ctypes.c_int(nvirb),
ctypes.c_int(nocc_cas), ctypes.c_int(nvir_cas),
ctypes.c_int(nocc_iact), ctypes.c_int(nocc2),
ctypes.c_int(a0), ctypes.c_int(a1),
ctypes.c_int(b0), ctypes.c_int(b1),
cache_row_a.ctypes.data_as(ctypes.c_void_p),
cache_col_a.ctypes.data_as(ctypes.c_void_p),
cache_row_b.ctypes.data_as(ctypes.c_void_p),
cache_col_b.ctypes.data_as(ctypes.c_void_p))
cpu2[:] = log.timer_debug1('contract %d:%d,%d:%d'%(a0,a1,b0,b1), *cpu2)
return contract
def _sort_eri(mycc, eris, h5tmp, log):
cpu1 = (time.clock(), time.time())
nocc = eris.nocc
nmo = eris.fock.shape[0]
nvir = nmo - nocc
if mycc.t2 is None:
dtype = eris.ovov.dtype
else:
dtype = numpy.result_type(mycc.t2[0], eris.ovov.dtype)
if mycc.incore_complete or h5tmp is None:
eris_vvop = numpy.empty((nvir,nvir,nocc,nmo), dtype)
else:
eris_vvop = h5tmp.create_dataset('vvop', (nvir,nvir,nocc,nmo), dtype)
max_memory = max(2000, mycc.max_memory - lib.current_memory()[0])
max_memory = min(8000, max_memory*.9)
blksize = min(nvir, max(16, int(max_memory*1e6/8/(nvir*nocc*nmo))))
with lib.call_in_background(eris_vvop.__setitem__, sync=not mycc.async_io) as save:
bufopv = numpy.empty((nocc,nmo,nvir), dtype=dtype)
buf1 = numpy.empty_like(bufopv)
for j0, j1 in lib.prange(0, nvir, blksize):
ovov = numpy.asarray(eris.ovov[:,j0:j1])
ovvv = eris.get_ovvv(slice(None), slice(j0,j1))
for j in range(j0,j1):
bufopv[:,:nocc,:] = ovov[:,j-j0].conj()
bufopv[:,nocc:,:] = ovvv[:,j-j0].conj()
save(j, bufopv.transpose(2,0,1))
bufopv, buf1 = buf1, bufopv
ovov = ovvv = None
cpu1 = log.timer_debug1('transpose %d:%d'%(j0,j1), *cpu1)
return eris_vvop
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
from pyscf import cc
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -.757 , .587)],
[1 , (0. , .757 , .587)]]
mol.basis = '631g'
mol.build()
rhf = scf.RHF(mol)
rhf.conv_tol = 1e-14
rhf.scf()
mcc = cc.CCSD(rhf)
mcc.conv_tol = 1e-12
mcc.ccsd()
t1a = t1b = mcc.t1
t2ab = mcc.t2
t2aa = t2bb = t2ab - t2ab.transpose(1,0,2,3)
mycc = cc.UCCSD(scf.addons.convert_to_uhf(rhf))
eris = mycc.ao2mo()
e3a = kernel(mycc, eris, (t1a,t1b), (t2aa,t2ab,t2bb))
print(e3a - -0.00099642337843278096)
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -.757 , .587)],
[1 , (0. , .757 , .587)]]
mol.spin = 2
mol.basis = '3-21g'
mol.build()
mf = scf.UHF(mol).run(conv_tol=1e-14)
nao, nmo = mf.mo_coeff[0].shape
numpy.random.seed(10)
mf.mo_coeff = numpy.random.random((2,nao,nmo))
numpy.random.seed(12)
nocca, noccb = mol.nelec
nmo = mf.mo_occ[0].size
nvira = nmo - nocca
nvirb = nmo - noccb
t1a = .1 * numpy.random.random((nocca,nvira))
t1b = .1 * numpy.random.random((noccb,nvirb))
t2aa = .1 * numpy.random.random((nocca,nocca,nvira,nvira))
t2aa = t2aa - t2aa.transpose(0,1,3,2)
t2aa = t2aa - t2aa.transpose(1,0,2,3)
t2bb = .1 * numpy.random.random((noccb,noccb,nvirb,nvirb))
t2bb = t2bb - t2bb.transpose(0,1,3,2)
t2bb = t2bb - t2bb.transpose(1,0,2,3)
t2ab = .1 * numpy.random.random((nocca,noccb,nvira,nvirb))
t1 = t1a, t1b
t2 = t2aa, t2ab, t2bb
mycc = cc.UCCSD(mf)
eris = mycc.ao2mo(mf.mo_coeff)
e3a = kernel(mycc, eris, [t1a,t1b], [t2aa, t2ab, t2bb])
print(e3a - 9877.2780859693339)
mycc = cc.GCCSD(scf.addons.convert_to_ghf(mf))
eris = mycc.ao2mo()
t1 = mycc.spatial2spin(t1, eris.orbspin)
t2 = mycc.spatial2spin(t2, eris.orbspin)
from pyscf.cc import gccsd_t_slow
et = gccsd_t_slow.kernel(mycc, eris, t1, t2)
print(et - 9877.2780859693339)
| [
"slee89@caltech.edu"
] | slee89@caltech.edu |
17b8933019a86e9c2ffbcdaa3cb4a887f9d66940 | 7411152e1618fe463d170e78fc8df594de9ce88e | /web_scrapper.py | b22663f1377bfd8bab6de148fe86b01a03892fbe | [] | no_license | neramas1221/Bird_web_scrapper | 31aa7f92df2223966359d9235feb452ea3f0d78a | 11fb372932b5443f5586e0af7eb5ce88afbc25ce | refs/heads/master | 2020-04-01T14:54:06.181993 | 2019-03-19T13:43:48 | 2019-03-19T13:43:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,201 | py | import requests
from bs4 import BeautifulSoup
import time
import numpy as np
import pandas as pd
import re
page_counter = 8040 #8575
page_url = "https://www.xeno-canto.org/explore?dir=0&order=xc&pg="
download_url = "https://www.xeno-canto.org"
folder = "/media/neramas1221/Maxtor/sounds/"
row_Data = []
col_Data = []
cont = True
table_data = np.array([])
while cont:
print("loop " + str(page_counter))
row_Data = []
page = requests.get(page_url + str(page_counter))
soup = BeautifulSoup(page.text, 'lxml')
table = soup.find(class_="results")
rows = table.find_all('tr')
for row in range(1, len(rows)):
cols = rows[row].find_all('td')
col_Data = []
for col in range(1, len(cols)):
if cols[col].contents !=[]:
info = cols[col].contents[0]
else:
info = " "
if cols[col].find_all('p') != []:
info = cols[col].find('p')
info = info.contents[0]
elif cols[col].find_all('a') != []:
info = cols[col].find('a')
if 'download' in str(info):
info = info['href']
else:
info = info.contents[0]
if cols[col].find_all(class_='rating') != []:
if cols[col].find_all(class_='selected') != []:
section = cols[col].find(class_='selected')
rating = section.contents[0]
rating = rating.contents[0]
col_Data.append(rating)
else:
col_Data.append(" ")
info = " ".join(str(info).split())
col_Data.append(info)
row_Data.append(col_Data)
f = open("/media/neramas1221/Maxtor/bird_data_text.txt", "a")
for i in range(0, len(row_Data)):
for j in range(0, len(cols)):
row_Data[i][j] = re.sub('[!@#$",]', '', row_Data[i][j])
f.write('"' + str(row_Data[i][j]) + '"' + ",")
f.write("\n")
f.close()
for i in range(0, len(row_Data)):
print("Downloading...")
if "img" not in (download_url + str(row_Data[i][11])):
r = requests.get(download_url + str(row_Data[i][11]), stream=True)
with open(folder+str(row_Data[i][12])+".mp3", 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.close()
print("...Done")
time.sleep(0.1)
# if page_counter == 1:
# table_data = np.array(row_Data)
# else:
# table_data = np.vstack((table_data, row_Data))
if len(row_Data) != 30:
cont = False
else:
page_counter = page_counter + 1
#output = pd.DataFrame(table_data,columns = ['Common name','Length','Recordist','Date','Time','Country',
# 'Location','Elev. (m)','Type','Remarks','Rating','Download link',
# 'ID'])
#output.to_csv("data_set.csv")
print(table_data.size)
print(table_data.shape)
| [
"connorwheeler1997@gmail.com"
] | connorwheeler1997@gmail.com |
1a4bb46bd52734b636a9289f60533e348c0cd036 | 6a7e2db41319fc6a4470a06258bf487234055021 | /venv/Scripts/pip3.8-script.py | fa9e30e2c16968d373a28069a08af45f14f409f1 | [] | no_license | AthanasiosChaloudis/Data-Science-for-Web-Applications | 4cd448f4b9dd4d5df01be3ef12a4c00591a100e0 | a99bfd0fad92c6b6238ee48b10d7543fa32de92f | refs/heads/main | 2023-01-30T00:33:36.762972 | 2020-12-13T10:37:19 | 2020-12-13T10:37:19 | 308,966,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | #!C:\Users\Thanos\PycharmProjects\test1\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
| [
"61148755+thanosuhh@users.noreply.github.com"
] | 61148755+thanosuhh@users.noreply.github.com |
ff6271a955e1cb503a3d11d4da07cb233f75e69b | 897e63cc3e19882e50e76fc4abb3b1e7c5408001 | /manage.py | 7ded26bba34bfae49570ed0facc1d25773c241ff | [] | no_license | sasirekha3/molecular-search-system | 277d67330532401bdb886845430f4dca3623fad4 | eccb7722034bcbe1fa1d173ae91bdf1a98dba83b | refs/heads/master | 2020-12-04T14:10:09.412422 | 2020-01-04T17:24:02 | 2020-01-04T17:24:02 | 231,795,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DjangoProj.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"ksasirekha@gmail.com"
] | ksasirekha@gmail.com |
339cf3d15e438d86c084c9aa0f60c0b938831e58 | 3a910ca48f778e57cb0ab9a42f3e57ba2f527e6b | /reduceDimension/plotMethods.py | 1d0c4ca6a23a62cb52ac8e19fcea7b522952998d | [] | no_license | DaveGeneral/tensorflow_manage_nets | d7daec9f6fbe466b5257ad2add5de8733df35ef0 | 1ed8e657a593ffd37eb15b1ec2dfb1c9e7dff4ea | refs/heads/master | 2021-06-19T11:41:14.760786 | 2017-07-18T01:14:38 | 2017-07-18T01:14:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,554 | py | import sys, os
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
switch_server = True
testdir = os.path.dirname('__file__')
srcdir = '..'
sys.path.insert(0, os.path.abspath(os.path.join(testdir, srcdir)))
if switch_server is True:
from tools import utils
from nets import net_aencoder as AE
from tools.dataset_csv import Dataset_csv
else:
from tensorflow_manage_nets.tools import utils
from tensorflow_manage_nets.nets import net_aencoder as AE
from tensorflow_manage_nets.tools.dataset_csv import Dataset_csv
def get_data_plot(opc):
data = {}
if opc == 0:
# MNIST
data['mnist'] = {}
data['mnist']['ae'] = {}
data['mnist']['ae']['dim'] = [4,6,9,13,19,28,42,63,94,141,211,316,474]
data['mnist']['ae']['fractal'] = [3.1633,4.5932,6.2643,7.8174,0.1364,0.2681,0.4716,1.0265,1.8659,1.9534,25.5753,25.5753,25.5753]
data['mnist']['dct'] = {}
data['mnist']['dct']['dim'] = [4,6,9,13,19,28,42,63,94,141,211,316,474]
data['mnist']['dct']['fractal'] = [3.5165,4.4513,4.7163,4.981,5.4558,5.2292,3.4751,19.0054,22.5753,25.5753,25.5753,25.5753,25.5753]
data['mnist']['ipla'] = {}
data['mnist']['ipla']['dim'] = [4,6,9,13,19,28,42,63,94,141,211,316,474]
data['mnist']['ipla']['fractal'] = [2.0956,3.4231,3.613,4.5425,4.2206,4.4101,4.0596,16.7204,19.8748,23.2534,25.5753,25.5753,25.5753]
# data['mnist']['sax'] = {}
# data['mnist']['sax']['dim'] = [4,6,9,13,19,28,42,63,94,141,211,316,474]
# data['mnist']['sax']['fractal'] = [2.5403,3.5612,4.7923,6.0755,6.1693,6.6266,13.7531,17.0834,21.3274,25.5753,25.5753,25.5753,25.5753]
data['mnist']['pca'] = {}
data['mnist']['pca']['dim'] = [4,6,9,13,19,28,42,63,94,141,211,316,474]
data['mnist']['pca']['fractal'] = [3.7764,3.633,4.3696,4.7597,4.1067,4.5098,18.7939,1.7925,25.5753,25.5753,25.5753,25.5753,25.5753]
data['mnist']['svd'] = {}
data['mnist']['svd']['dim'] = [4,6,9,13,19,28,42,63,94,141,211,316,474]
data['mnist']['svd']['fractal'] = [3.5072,2.9827,3.6737,4.8881,4.3186,4.8956,3.033,22.5753,25.5753,25.5753,25.5753,25.5753,25.5753]
data['mnist']['paa'] = {}
data['mnist']['paa']['dim'] = [4,6,9,13,19,28,42,63,94,141,211,316,474]
data['mnist']['paa']['fractal'] = [3.7688,4.2507,3.8278,3.8522,5.2186,4.6841,4.8219,5.1059,4.2068,19.2354,23.5753,25.5753,25.5753]
data['mnist']['dwt'] = {}
data['mnist']['dwt']['dim'] = [4,6,9,13,19,28,42,63,94,141,211,316,474]
data['mnist']['dwt']['fractal'] = [3.4016,4.5113,4.7749,4.4473,4.4599,4.5201,3.9403,4.6369,19.598,22.5753,25.5753,25.5753,25.5753]
# data['mnist']['cp'] = {}
# data['mnist']['cp']['dim'] = [4,6,9,13,19,28,42,63,94,141,211,316,474]
# data['mnist']['cp']['fractal'] = [3.4246,4.0078,4.4739,4.8383,4.3009,5.4709,15.1808,17.9606,2.0437,24.5753,25.5753,25.5753,25.5753]
dx = data['mnist']
name = 'mnist'
original_dim = 800
elif opc == 1:
# CIFAR10 - 1.6494
data['cifar10'] = {}
data['cifar10']['ae'] = {}
data['cifar10']['ae']['dim'] = [4,6,9,13,19,28,42,63,94,141,211,316,474,711,1066,1599,2398,3597]
data['cifar10']['ae']['fractal'] = [2.2365,2.6026,3.48,0.1136,0.119,0.1171,0.1697,0.2356,0.1557,0.2513,23.9903,25.5753,25.5753,25.5753,25.5753,25.5753,25.5753,25.5753]
data['cifar10']['dct'] = {}
data['cifar10']['dct']['dim'] = [4, 6, 9, 13, 19, 28, 42, 63, 94, 141, 211, 316, 474, 711, 1066, 1599, 2398, 3597]
data['cifar10']['dct']['fractal'] = [3.9918,5.0943,6.243,2.8814,3.2774,4.645,6.1401,15.0665,3.3972,23.5753,25.5753,25.5753,25.5753,25.5753,25.5753,25.5753,25.5753,25.5753]
data['cifar10']['ipla'] = {}
data['cifar10']['ipla']['dim'] = [4, 6, 9, 13, 19, 28, 42, 63, 94, 141, 211, 316, 474, 711, 1066, 1599, 2398, 3597]
data['cifar10']['ipla']['fractal'] = [1.9384,2.9256,3.8386,5.0608,5.8173,6.1881,8.9843,8.9674,9.8928,11.6136,12.7876,15.8204,20.5753,25.5753,25.5753,25.5753,25.5753,25.5753]
# data['cifar10']['sax'] = {}
# data['cifar10']['sax']['dim'] = [4, 6, 9, 13, 19, 28, 42, 63, 94, 141, 211, 316, 474, 711, 1066, 1599, 2398, 3597]
# data['cifar10']['sax']['fractal'] = [0.4879,0.546,0.6239,0.7161,0.8361,0.9713,1.2115,1.5869,2.1696,2.9898,4.2449,6.7758,10.2027,16.8038,25.5753,25.5753,25.5753,25.5753]
data['cifar10']['pca'] = {}
data['cifar10']['pca']['dim'] = [4, 6, 9, 13, 19, 28, 42, 63, 94, 141, 211, 316, 474, 711, 1066, 1599, 2398, 3597]
data['cifar10']['pca']['fractal'] = [3.3653,4.0906,3.9554,1.5782,3.9994,6.2009,5.3791,6.9906,18.8888,25.5753,25.5753,25.5753,25.5753,25.5753,25.5753,25.5753,25.5753,25.5753]
data['cifar10']['svd'] = {}
data['cifar10']['svd']['dim'] = [4, 6, 9, 13, 19, 28, 42, 63, 94, 141, 211, 316, 474, 711, 1066, 1599, 2398, 3597]
data['cifar10']['svd']['fractal'] = [3.634,2.7057,4.6587,2.6081,5.9719,2.5707,6.5027,6.2395,21.8748,25.5753,25.5753,25.5753,25.5753,25.5753,25.5753,25.5753,25.5753,25.5753]
data['cifar10']['paa'] = {}
data['cifar10']['paa']['dim'] = [4, 6, 9, 13, 19, 28, 42, 63, 94, 141, 211, 316, 474, 711, 1066, 1599, 2398, 3597]
data['cifar10']['paa']['fractal'] = [3.5931,4.3796,5.6623,8.6333,0.3525,10.1474,9.4041,0.1516,0.2149,10.5511,9.9956,9.3387,0.6065,0.7336,10.6043,10.1417,1.4088,1.5912]
data['cifar10']['dwt'] = {}
data['cifar10']['dwt']['dim'] = [4, 6, 9, 13, 19, 28, 42, 63, 94, 141, 211, 316, 474, 711, 1066, 1599, 2398, 3597]
data['cifar10']['dwt']['fractal'] = [3.4213,4.8521,5.6459,2.9588,3.3272,5.485,6.7866,12.7766,14.5139,20.3658,23.5753,25.5753,25.5753,25.5753,25.5753,25.5753,25.5753,25.5753]
# data['cifar10']['cp'] = {}
# data['cifar10']['cp']['dim'] = [4, 6, 9, 13, 19, 28, 42, 63, 94, 141, 211, 316, 474, 711, 1066, 1599, 2398, 3597]
# data['cifar10']['cp']['fractal'] = [3.5199,4.6375,1.0774,6.3398,8.323,4.7599,8.2534,10.9358,14.926,20.9903,25.5753,25.5753,25.5753,25.5753,25.5753,25.5753,25.5753,25.5753]
dx = data['cifar10']
name = 'cifar10'
original_dim = 4096
elif opc == 2:
# SVHN = 28.3359
data['svhn'] = {}
data['svhn']['pca'] = {}
data['svhn']['pca']['dim'] = [4,6,9,13,19,28,42,63,94,141,211]
data['svhn']['pca']['fractal'] = [3.6906,5.1793,6.4785,7.2137,12.435,5.422,2.4771,28.3359,28.3359,28.3359,28.3359]
data['svhn']['paa'] = {}
data['svhn']['paa']['dim'] = [4, 6, 9, 13, 19, 28, 42, 63, 94, 141, 211]
data['svhn']['paa']['fractal'] = [3.6458,5.5113,4.277,5.9883,11.0742,8.8174,9.3821,10.8159,11.9396,14.168,14.168]
data['svhn']['dwt'] = {}
data['svhn']['dwt']['dim'] = [4, 6, 9, 13, 19, 28, 42, 63, 94, 141, 211]
data['svhn']['dwt']['fractal'] = [2.9093,4.6779,5.5817,7.5835,8.0256,14.9638,3.4771,25.751,28.3359,28.3359,28.3359]
data['svhn']['dct'] = {}
data['svhn']['dct']['dim'] = [4, 6, 9, 13, 19, 28, 42, 63, 94, 141, 211]
data['svhn']['dct']['fractal'] = [3.8896,5.515,6.5078,7.5054,15.8202,21.736,27.3359,28.3359,28.3359,28.3359,28.3359]
data['svhn']['svd'] = {}
data['svhn']['svd']['dim'] = [4, 6, 9, 13, 19, 28, 42, 63, 94, 141, 211]
data['svhn']['svd']['fractal'] = [3.8101,5.0242,6.5251,7.9185,12.2805,5.5287,22.7814,28.3359,28.3359,28.3359,28.3359]
data['svhn']['ipla'] = {}
data['svhn']['ipla']['dim'] = [4, 6, 9, 13, 19, 28, 42, 63, 94, 141, 211]
data['svhn']['ipla']['fractal'] = [1.8991,2.8153,3.7822,5.4587,7.7079,13.4563,24.751,28.3359,28.3359,28.3359,28.3359]
dx = data['svhn']
name = 'svhn'
original_dim = 1152
elif opc == 3:
# AgNews - 30.4943
data['agnews'] = {}
data['agnews']['dwt'] = {}
data['agnews']['dwt']['dim'] = [4,6,9,13,19,28,42,63,94,141,211,316,474,711,1066,1599,2398,3597]
data['agnews']['dwt']['fractal'] = [0.9801,0.9747,1.7576,1.758,2.6681,2.5881,4.0368,4.0368,2.132,2.2803,3.9624,7.6197,7.6192,8.0396,7.4786,10.2216,12.3825,12.3825]
data['agnews']['ipla'] = {}
data['agnews']['ipla']['dim'] = [4, 6, 9, 13, 19, 28, 42, 63, 94, 141, 211, 316, 474, 711, 1066, 1599, 2398, 3597]
data['agnews']['ipla']['fractal'] = [0.8858,0.8713,0.7968,1.5267,1.8855,1.94,2.6152,3.2711,2.5011,2.5171,3.1752,3.8608,6.2208,6.9321,10.0606,9.9789,9.9536,10.5323]
data['agnews']['paa'] = {}
data['agnews']['paa']['dim'] = [4, 6, 9, 13, 19, 28, 42, 63, 94, 141, 211, 316, 474, 711, 1066, 1599, 2398, 3597]
data['agnews']['paa']['fractal'] = [0.8103,1.3728,1.7259,1.8405,2.7076,1.6259,3.195,5.4737,6.2383,6.8174,8.5794,9.333,0.4478,10.298,9.3606,8.1446,10.3471,11.1106]
data['agnews']['svd'] = {}
data['agnews']['svd']['dim'] = [4, 6, 9, 13, 19, 28, 42, 63, 94, 141, 211, 316, 474, 711, 1066, 1599, 2398, 3597]
data['agnews']['svd']['fractal'] = [2.448,2.7537,2.9912,3.3966,3.6956,4.8341,4.5793,4.6637,17.3139,22.4432,24.7651,24.7651,24.7651,24.7651,24.7651,24.7651,24.7651,24.7651]
data['agnews']['pca'] = {}
data['agnews']['pca']['dim'] = [4, 6, 9, 13, 19, 28, 42, 63, 94, 141, 211, 316, 474, 711, 1066, 1599, 2398, 3597]
data['agnews']['pca']['fractal'] = [2.5685,2.3968,2.8437,2.2094,3.805,4.4401,5.4467,5.8244,4.3857,24.7651,24.7651,24.7651,24.7651,24.7651,24.7651,24.7651,24.7651,24.7651]
data['agnews']['dct'] = {}
data['agnews']['dct']['dim'] = [4, 6, 9, 13, 19, 28, 42, 63, 94, 141, 211, 316, 474, 711, 1066, 1599, 2398, 3597]
data['agnews']['dct']['fractal'] = [1.9761,2.43,2.5922,2.9844,3.9418,4.3012,4.111,2.1457,3.7315,4.676,5.0242,16.3515,1.5,24.7651,24.7651,24.7651,24.7651,24.7651]
dx = data['agnews']
name = 'agnews'
original_dim = 8704
return dx, name, original_dim
colors = cycle(['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal','red', 'yellow', 'magenta', 'gray'])
for i in range(4):
data, name, originalD = get_data_plot(i)
plt.figure(1)
for met, color in zip(data, colors):
dset = data
plt.plot(dset[met]['dim'], dset[met]['fractal'], color=color, lw=1,
label='Method - {0}'.format(met))
plt.xlabel('Dim')
plt.ylabel('Fractal')
plt.title('Dim-Fractal - Dataset ' + name + '-' + str(originalD))
plt.legend(loc="lower right")
plt.show()
| [
"rikardo.corp@gmail.com"
] | rikardo.corp@gmail.com |
cccbb148040f217b8a624f39a07f85f4fb552de4 | 433ada0b349e8a68dd85a5af047b90d23aee44c9 | /include/ClientCaches.py | f4a6eb45028de815aa1b2763dfac4061d03724d5 | [
"WTFPL"
] | permissive | 3wayHimself/hydrus | 7ddfe3507ad2b3e9dc4ab69cb9c6e25efc06c5aa | 804ffe8cecfe01bdb9518070d31dbf826b72e8ef | refs/heads/master | 2020-03-23T04:37:53.849078 | 2018-07-11T20:23:51 | 2018-07-11T20:23:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106,375 | py | import ClientDefaults
import ClientDownloading
import ClientParsing
import ClientPaths
import ClientRendering
import ClientSearch
import ClientServices
import ClientThreading
import HydrusConstants as HC
import HydrusExceptions
import HydrusFileHandling
import HydrusPaths
import HydrusSerialisable
import HydrusSessions
import HydrusThreading
import itertools
import json
import os
import random
import requests
import threading
import time
import urllib
import wx
import HydrusData
import ClientData
import ClientConstants as CC
import HydrusGlobals as HG
import collections
import HydrusTags
import traceback
# important thing here, and reason why it is recursive, is because we want to preserve the parent-grandparent interleaving
def BuildServiceKeysToChildrenToParents( service_keys_to_simple_children_to_parents ):
def AddParents( simple_children_to_parents, children_to_parents, child, parents ):
for parent in parents:
if parent not in children_to_parents[ child ]:
children_to_parents[ child ].append( parent )
if parent in simple_children_to_parents:
grandparents = simple_children_to_parents[ parent ]
AddParents( simple_children_to_parents, children_to_parents, child, grandparents )
service_keys_to_children_to_parents = collections.defaultdict( HydrusData.default_dict_list )
for ( service_key, simple_children_to_parents ) in service_keys_to_simple_children_to_parents.items():
children_to_parents = service_keys_to_children_to_parents[ service_key ]
for ( child, parents ) in simple_children_to_parents.items():
AddParents( simple_children_to_parents, children_to_parents, child, parents )
return service_keys_to_children_to_parents
def BuildServiceKeysToSimpleChildrenToParents( service_keys_to_pairs_flat ):
service_keys_to_simple_children_to_parents = collections.defaultdict( HydrusData.default_dict_set )
for ( service_key, pairs ) in service_keys_to_pairs_flat.items():
service_keys_to_simple_children_to_parents[ service_key ] = BuildSimpleChildrenToParents( pairs )
return service_keys_to_simple_children_to_parents
def BuildSimpleChildrenToParents( pairs ):
simple_children_to_parents = HydrusData.default_dict_set()
for ( child, parent ) in pairs:
if child == parent:
continue
if LoopInSimpleChildrenToParents( simple_children_to_parents, child, parent ): continue
simple_children_to_parents[ child ].add( parent )
return simple_children_to_parents
def CollapseTagSiblingPairs( groups_of_pairs ):
# This now takes 'groups' of pairs in descending order of precedence
# This allows us to mandate that local tags take precedence
# a pair is invalid if:
# it causes a loop (a->b, b->c, c->a)
# there is already a relationship for the 'bad' sibling (a->b, a->c)
valid_chains = {}
for pairs in groups_of_pairs:
pairs = list( pairs )
pairs.sort()
for ( bad, good ) in pairs:
if bad == good:
# a->a is a loop!
continue
if bad not in valid_chains:
we_have_a_loop = False
current_best = good
while current_best in valid_chains:
current_best = valid_chains[ current_best ]
if current_best == bad:
we_have_a_loop = True
break
if not we_have_a_loop:
valid_chains[ bad ] = good
# now we collapse the chains, turning:
# a->b, b->c ... e->f
# into
# a->f, b->f ... e->f
siblings = {}
for ( bad, good ) in valid_chains.items():
# given a->b, want to find f
if good in siblings:
# f already calculated and added
best = siblings[ good ]
else:
# we don't know f for this chain, so let's figure it out
current_best = good
while current_best in valid_chains:
current_best = valid_chains[ current_best ] # pursue endpoint f
best = current_best
# add a->f
siblings[ bad ] = best
return siblings
def LoopInSimpleChildrenToParents( simple_children_to_parents, child, parent ):
potential_loop_paths = { parent }
while len( potential_loop_paths.intersection( simple_children_to_parents.keys() ) ) > 0:
new_potential_loop_paths = set()
for potential_loop_path in potential_loop_paths.intersection( simple_children_to_parents.keys() ):
new_potential_loop_paths.update( simple_children_to_parents[ potential_loop_path ] )
potential_loop_paths = new_potential_loop_paths
if child in potential_loop_paths: return True
return False
class ClientFilesManager( object ):
def __init__( self, controller ):
self._controller = controller
self._lock = threading.Lock()
self._prefixes_to_locations = {}
self._bad_error_occured = False
self._missing_locations = set()
self._Reinit()
def _GenerateExpectedFilePath( self, hash, mime ):
hash_encoded = hash.encode( 'hex' )
prefix = 'f' + hash_encoded[:2]
location = self._prefixes_to_locations[ prefix ]
path = os.path.join( location, prefix, hash_encoded + HC.mime_ext_lookup[ mime ] )
return path
def _GenerateExpectedFullSizeThumbnailPath( self, hash ):
hash_encoded = hash.encode( 'hex' )
prefix = 't' + hash_encoded[:2]
location = self._prefixes_to_locations[ prefix ]
path = os.path.join( location, prefix, hash_encoded ) + '.thumbnail'
return path
def _GenerateExpectedResizedThumbnailPath( self, hash ):
hash_encoded = hash.encode( 'hex' )
prefix = 'r' + hash_encoded[:2]
location = self._prefixes_to_locations[ prefix ]
path = os.path.join( location, prefix, hash_encoded ) + '.thumbnail.resized'
return path
def _GenerateFullSizeThumbnail( self, hash, mime = None ):
if mime is None:
try:
file_path = self._LookForFilePath( hash )
except HydrusExceptions.FileMissingException:
raise HydrusExceptions.FileMissingException( 'The thumbnail for file ' + hash.encode( 'hex' ) + ' was missing. It could not be regenerated because the original file was also missing. This event could indicate hard drive corruption or an unplugged external drive. Please check everything is ok.' )
mime = HydrusFileHandling.GetMime( file_path )
else:
file_path = self._GenerateExpectedFilePath( hash, mime )
try:
percentage_in = self._controller.new_options.GetInteger( 'video_thumbnail_percentage_in' )
thumbnail = HydrusFileHandling.GenerateThumbnail( file_path, mime, percentage_in = percentage_in )
except Exception as e:
raise HydrusExceptions.FileMissingException( 'The thumbnail for file ' + hash.encode( 'hex' ) + ' was missing. It could not be regenerated from the original file for the above reason. This event could indicate hard drive corruption. Please check everything is ok.' )
full_size_path = self._GenerateExpectedFullSizeThumbnailPath( hash )
try:
HydrusPaths.MakeFileWritable( full_size_path )
with open( full_size_path, 'wb' ) as f:
f.write( thumbnail )
except Exception as e:
raise HydrusExceptions.FileMissingException( 'The thumbnail for file ' + hash.encode( 'hex' ) + ' was missing. It was regenerated from the original file, but hydrus could not write it to the location ' + full_size_path + ' for the above reason. This event could indicate hard drive corruption, and it also suggests that hydrus does not have permission to write to its thumbnail folder. Please check everything is ok.' )
def _GenerateResizedThumbnail( self, hash, mime ):
full_size_path = self._GenerateExpectedFullSizeThumbnailPath( hash )
thumbnail_dimensions = self._controller.options[ 'thumbnail_dimensions' ]
if mime in ( HC.IMAGE_GIF, HC.IMAGE_PNG ):
fullsize_thumbnail_mime = HC.IMAGE_PNG
else:
fullsize_thumbnail_mime = HC.IMAGE_JPEG
try:
thumbnail_resized = HydrusFileHandling.GenerateThumbnailFromStaticImage( full_size_path, thumbnail_dimensions, fullsize_thumbnail_mime )
except:
try:
ClientPaths.DeletePath( full_size_path, always_delete_fully = True )
except:
raise HydrusExceptions.FileMissingException( 'The thumbnail for file ' + hash.encode( 'hex' ) + ' was found, but it would not render. An attempt to delete it was made, but that failed as well. This event could indicate hard drive corruption, and it also suggests that hydrus does not have permission to write to its thumbnail folder. Please check everything is ok.' )
self._GenerateFullSizeThumbnail( hash, mime )
thumbnail_resized = HydrusFileHandling.GenerateThumbnailFromStaticImage( full_size_path, thumbnail_dimensions, fullsize_thumbnail_mime )
resized_path = self._GenerateExpectedResizedThumbnailPath( hash )
try:
HydrusPaths.MakeFileWritable( resized_path )
with open( resized_path, 'wb' ) as f:
f.write( thumbnail_resized )
except Exception as e:
HydrusData.ShowException( e )
raise HydrusExceptions.FileMissingException( 'The thumbnail for file ' + hash.encode( 'hex' ) + ' was found, but the resized version would not save to disk. This event suggests that hydrus does not have permission to write to its thumbnail folder. Please check everything is ok.' )
def _GetRecoverTuple( self ):
all_locations = { location for location in self._prefixes_to_locations.values() }
all_prefixes = self._prefixes_to_locations.keys()
for possible_location in all_locations:
for prefix in all_prefixes:
correct_location = self._prefixes_to_locations[ prefix ]
if possible_location != correct_location and os.path.exists( os.path.join( possible_location, prefix ) ):
recoverable_location = possible_location
return ( prefix, recoverable_location, correct_location )
return None
def _GetRebalanceTuple( self ):
( locations_to_ideal_weights, resized_thumbnail_override, full_size_thumbnail_override ) = self._controller.new_options.GetClientFilesLocationsToIdealWeights()
total_weight = sum( locations_to_ideal_weights.values() )
ideal_locations_to_normalised_weights = { location : weight / total_weight for ( location, weight ) in locations_to_ideal_weights.items() }
current_locations_to_normalised_weights = collections.defaultdict( lambda: 0 )
file_prefixes = [ prefix for prefix in self._prefixes_to_locations if prefix.startswith( 'f' ) ]
for file_prefix in file_prefixes:
location = self._prefixes_to_locations[ file_prefix ]
current_locations_to_normalised_weights[ location ] += 1.0 / 256
for location in current_locations_to_normalised_weights.keys():
if location not in ideal_locations_to_normalised_weights:
ideal_locations_to_normalised_weights[ location ] = 0.0
#
overweight_locations = []
underweight_locations = []
for ( location, ideal_weight ) in ideal_locations_to_normalised_weights.items():
if location in current_locations_to_normalised_weights:
current_weight = current_locations_to_normalised_weights[ location ]
if current_weight < ideal_weight:
underweight_locations.append( location )
elif current_weight >= ideal_weight + 1.0 / 256:
overweight_locations.append( location )
else:
underweight_locations.append( location )
#
if len( underweight_locations ) > 0 and len( overweight_locations ) > 0:
overweight_location = overweight_locations.pop( 0 )
underweight_location = underweight_locations.pop( 0 )
random.shuffle( file_prefixes )
for file_prefix in file_prefixes:
location = self._prefixes_to_locations[ file_prefix ]
if location == overweight_location:
return ( file_prefix, overweight_location, underweight_location )
else:
if full_size_thumbnail_override is None:
for hex_prefix in HydrusData.IterateHexPrefixes():
full_size_prefix = 't' + hex_prefix
file_prefix = 'f' + hex_prefix
full_size_location = self._prefixes_to_locations[ full_size_prefix ]
file_location = self._prefixes_to_locations[ file_prefix ]
if full_size_location != file_location:
return ( full_size_prefix, full_size_location, file_location )
else:
for hex_prefix in HydrusData.IterateHexPrefixes():
full_size_prefix = 't' + hex_prefix
full_size_location = self._prefixes_to_locations[ full_size_prefix ]
if full_size_location != full_size_thumbnail_override:
return ( full_size_prefix, full_size_location, full_size_thumbnail_override )
if resized_thumbnail_override is None:
for hex_prefix in HydrusData.IterateHexPrefixes():
resized_prefix = 'r' + hex_prefix
file_prefix = 'f' + hex_prefix
resized_location = self._prefixes_to_locations[ resized_prefix ]
file_location = self._prefixes_to_locations[ file_prefix ]
if resized_location != file_location:
return ( resized_prefix, resized_location, file_location )
else:
for hex_prefix in HydrusData.IterateHexPrefixes():
resized_prefix = 'r' + hex_prefix
resized_location = self._prefixes_to_locations[ resized_prefix ]
if resized_location != resized_thumbnail_override:
return ( resized_prefix, resized_location, resized_thumbnail_override )
return None
def _IterateAllFilePaths( self ):
for ( prefix, location ) in self._prefixes_to_locations.items():
if prefix.startswith( 'f' ):
dir = os.path.join( location, prefix )
filenames = os.listdir( dir )
for filename in filenames:
yield os.path.join( dir, filename )
def _IterateAllThumbnailPaths( self ):
for ( prefix, location ) in self._prefixes_to_locations.items():
if prefix.startswith( 't' ) or prefix.startswith( 'r' ):
dir = os.path.join( location, prefix )
filenames = os.listdir( dir )
for filename in filenames:
yield os.path.join( dir, filename )
def _LookForFilePath( self, hash ):
for potential_mime in HC.ALLOWED_MIMES:
potential_path = self._GenerateExpectedFilePath( hash, potential_mime )
if os.path.exists( potential_path ):
return potential_path
raise HydrusExceptions.FileMissingException( 'File for ' + hash.encode( 'hex' ) + ' not found!' )
def _Reinit( self ):
self._prefixes_to_locations = self._controller.Read( 'client_files_locations' )
if HG.client_controller.IsFirstStart():
try:
for ( prefix, location ) in self._prefixes_to_locations.items():
HydrusPaths.MakeSureDirectoryExists( location )
subdir = os.path.join( location, prefix )
HydrusPaths.MakeSureDirectoryExists( subdir )
except:
text = 'Attempting to create the database\'s client_files folder structure failed!'
wx.MessageBox( text )
raise
else:
self._missing_locations = set()
for ( prefix, location ) in self._prefixes_to_locations.items():
if os.path.exists( location ):
subdir = os.path.join( location, prefix )
if not os.path.exists( subdir ):
self._missing_locations.add( ( location, prefix ) )
else:
self._missing_locations.add( ( location, prefix ) )
if len( self._missing_locations ) > 0:
self._bad_error_occured = True
#
missing_dict = HydrusData.BuildKeyToListDict( self._missing_locations )
missing_locations = list( missing_dict.keys() )
missing_locations.sort()
missing_string = ''
for l in missing_locations:
missing_prefixes = list( missing_dict[ l ] )
missing_prefixes.sort()
missing_prefixes_string = ' ' + os.linesep.join( ( ', '.join( block ) for block in HydrusData.SplitListIntoChunks( missing_prefixes, 32 ) ) )
missing_string += os.linesep
missing_string += l
missing_string += os.linesep
missing_string += missing_prefixes_string
#
if len( self._missing_locations ) > 4:
text = 'When initialising the client files manager, some file locations did not exist! They have all been written to the log!'
text += os.linesep * 2
text += 'If this is happening on client boot, you should now be presented with a dialog to correct this manually!'
wx.MessageBox( text )
HydrusData.DebugPrint( text )
HydrusData.DebugPrint( 'Missing locations follow:' )
HydrusData.DebugPrint( missing_string )
else:
text = 'When initialising the client files manager, these file locations did not exist:'
text += os.linesep * 2
text += missing_string
text += os.linesep * 2
text += 'If this is happening on client boot, you should now be presented with a dialog to correct this manually!'
wx.MessageBox( text )
HydrusData.DebugPrint( text )
def GetMissing( self ):
return self._missing_locations
def LocklessAddFileFromString( self, hash, mime, data ):
dest_path = self._GenerateExpectedFilePath( hash, mime )
HydrusPaths.MakeFileWritable( dest_path )
with open( dest_path, 'wb' ) as f:
f.write( data )
def LocklessAddFile( self, hash, mime, source_path ):
dest_path = self._GenerateExpectedFilePath( hash, mime )
if not os.path.exists( dest_path ):
successful = HydrusPaths.MirrorFile( source_path, dest_path )
if not successful:
raise Exception( 'There was a problem copying the file from ' + source_path + ' to ' + dest_path + '!' )
def AddFullSizeThumbnail( self, hash, thumbnail ):
with self._lock:
self.LocklessAddFullSizeThumbnail( hash, thumbnail )
def LocklessAddFullSizeThumbnail( self, hash, thumbnail ):
path = self._GenerateExpectedFullSizeThumbnailPath( hash )
HydrusPaths.MakeFileWritable( path )
with open( path, 'wb' ) as f:
f.write( thumbnail )
resized_path = self._GenerateExpectedResizedThumbnailPath( hash )
if os.path.exists( resized_path ):
ClientPaths.DeletePath( resized_path, always_delete_fully = True )
self._controller.pub( 'clear_thumbnails', { hash } )
self._controller.pub( 'new_thumbnails', { hash } )
def CheckFileIntegrity( self, *args, **kwargs ):
with self._lock:
self._controller.WriteSynchronous( 'file_integrity', *args, **kwargs )
def ClearOrphans( self, move_location = None ):
with self._lock:
job_key = ClientThreading.JobKey( cancellable = True )
job_key.SetVariable( 'popup_title', 'clearing orphans' )
job_key.SetVariable( 'popup_text_1', 'preparing' )
self._controller.pub( 'message', job_key )
orphan_paths = []
orphan_thumbnails = []
for ( i, path ) in enumerate( self._IterateAllFilePaths() ):
( i_paused, should_quit ) = job_key.WaitIfNeeded()
if should_quit:
return
if i % 100 == 0:
status = 'reviewed ' + HydrusData.ToHumanInt( i ) + ' files, found ' + HydrusData.ToHumanInt( len( orphan_paths ) ) + ' orphans'
job_key.SetVariable( 'popup_text_1', status )
try:
is_an_orphan = False
( directory, filename ) = os.path.split( path )
should_be_a_hex_hash = filename[:64]
hash = should_be_a_hex_hash.decode( 'hex' )
is_an_orphan = HG.client_controller.Read( 'is_an_orphan', 'file', hash )
except:
is_an_orphan = True
if is_an_orphan:
if move_location is not None:
( source_dir, filename ) = os.path.split( path )
dest = os.path.join( move_location, filename )
dest = HydrusPaths.AppendPathUntilNoConflicts( dest )
HydrusData.Print( 'Moving the orphan ' + path + ' to ' + dest )
HydrusPaths.MergeFile( path, dest )
orphan_paths.append( path )
time.sleep( 2 )
for ( i, path ) in enumerate( self._IterateAllThumbnailPaths() ):
( i_paused, should_quit ) = job_key.WaitIfNeeded()
if should_quit:
return
if i % 100 == 0:
status = 'reviewed ' + HydrusData.ToHumanInt( i ) + ' thumbnails, found ' + HydrusData.ToHumanInt( len( orphan_thumbnails ) ) + ' orphans'
job_key.SetVariable( 'popup_text_1', status )
try:
is_an_orphan = False
( directory, filename ) = os.path.split( path )
should_be_a_hex_hash = filename[:64]
hash = should_be_a_hex_hash.decode( 'hex' )
is_an_orphan = HG.client_controller.Read( 'is_an_orphan', 'thumbnail', hash )
except:
is_an_orphan = True
if is_an_orphan:
orphan_thumbnails.append( path )
time.sleep( 2 )
if move_location is None and len( orphan_paths ) > 0:
status = 'found ' + HydrusData.ToHumanInt( len( orphan_paths ) ) + ' orphans, now deleting'
job_key.SetVariable( 'popup_text_1', status )
time.sleep( 5 )
for path in orphan_paths:
( i_paused, should_quit ) = job_key.WaitIfNeeded()
if should_quit:
return
HydrusData.Print( 'Deleting the orphan ' + path )
status = 'deleting orphan files: ' + HydrusData.ConvertValueRangeToPrettyString( i + 1, len( orphan_paths ) )
job_key.SetVariable( 'popup_text_1', status )
ClientPaths.DeletePath( path )
if len( orphan_thumbnails ) > 0:
status = 'found ' + HydrusData.ToHumanInt( len( orphan_thumbnails ) ) + ' orphan thumbnails, now deleting'
job_key.SetVariable( 'popup_text_1', status )
time.sleep( 5 )
for ( i, path ) in enumerate( orphan_thumbnails ):
( i_paused, should_quit ) = job_key.WaitIfNeeded()
if should_quit:
return
status = 'deleting orphan thumbnails: ' + HydrusData.ConvertValueRangeToPrettyString( i + 1, len( orphan_thumbnails ) )
job_key.SetVariable( 'popup_text_1', status )
HydrusData.Print( 'Deleting the orphan ' + path )
ClientPaths.DeletePath( path, always_delete_fully = True )
if len( orphan_paths ) == 0 and len( orphan_thumbnails ) == 0:
final_text = 'no orphans found!'
else:
final_text = HydrusData.ToHumanInt( len( orphan_paths ) ) + ' orphan files and ' + HydrusData.ToHumanInt( len( orphan_thumbnails ) ) + ' orphan thumbnails cleared!'
job_key.SetVariable( 'popup_text_1', final_text )
HydrusData.Print( job_key.ToString() )
job_key.Finish()
def DelayedDeleteFiles( self, hashes, time_to_delete ):
while not HydrusData.TimeHasPassed( time_to_delete ):
time.sleep( 0.5 )
big_pauser = HydrusData.BigJobPauser( period = 1 )
with self._lock:
for hash in hashes:
try:
path = self._LookForFilePath( hash )
except HydrusExceptions.FileMissingException:
continue
ClientPaths.DeletePath( path )
big_pauser.Pause()
def DelayedDeleteThumbnails( self, hashes, time_to_delete ):
while not HydrusData.TimeHasPassed( time_to_delete ):
time.sleep( 0.5 )
with self._lock:
big_pauser = HydrusData.BigJobPauser( period = 1 )
for hash in hashes:
path = self._GenerateExpectedFullSizeThumbnailPath( hash )
resized_path = self._GenerateExpectedResizedThumbnailPath( hash )
ClientPaths.DeletePath( path, always_delete_fully = True )
ClientPaths.DeletePath( resized_path, always_delete_fully = True )
big_pauser.Pause()
def GetFilePath( self, hash, mime = None ):
with self._lock:
return self.LocklessGetFilePath( hash, mime )
def ImportFile( self, file_import_job ):
( pre_import_status, hash, note ) = file_import_job.GenerateHashAndStatus()
if file_import_job.IsNewToDB():
file_import_job.GenerateInfo()
file_import_job.CheckIsGoodToImport()
( temp_path, thumbnail ) = file_import_job.GetTempPathAndThumbnail()
mime = file_import_job.GetMime()
with self._lock:
self.LocklessAddFile( hash, mime, temp_path )
if thumbnail is not None:
self.LocklessAddFullSizeThumbnail( hash, thumbnail )
( import_status, note ) = self._controller.WriteSynchronous( 'import_file', file_import_job )
else:
import_status = pre_import_status
file_import_job.PubsubContentUpdates()
return ( import_status, hash, note )
def LocklessGetFilePath( self, hash, mime = None ):
if mime is None:
path = self._LookForFilePath( hash )
else:
path = self._GenerateExpectedFilePath( hash, mime )
if not os.path.exists( path ):
raise HydrusExceptions.FileMissingException( 'No file found at path + ' + path + '!' )
return path
def GetFullSizeThumbnailPath( self, hash, mime = None ):
with self._lock:
path = self._GenerateExpectedFullSizeThumbnailPath( hash )
if not os.path.exists( path ):
self._GenerateFullSizeThumbnail( hash, mime )
if not self._bad_error_occured:
self._bad_error_occured = True
HydrusData.ShowText( 'A thumbnail for a file, ' + hash.encode( 'hex' ) + ', was missing. It has been regenerated from the original file, but this event could indicate hard drive corruption. Please check everything is ok. This error may be occuring for many files, but this message will only display once per boot. If you are recovering from a fractured database, you may wish to run \'database->regenerate->all thumbnails\'.' )
return path
def GetResizedThumbnailPath( self, hash, mime ):
with self._lock:
path = self._GenerateExpectedResizedThumbnailPath( hash )
if not os.path.exists( path ):
self._GenerateResizedThumbnail( hash, mime )
return path
def LocklessHasFullSizeThumbnail( self, hash ):
path = self._GenerateExpectedFullSizeThumbnailPath( hash )
return os.path.exists( path )
def Rebalance( self, job_key ):
try:
if self._bad_error_occured:
wx.MessageBox( 'A serious file error has previously occured during this session, so further file moving will not be reattempted. Please restart the client before trying again.' )
return
with self._lock:
rebalance_tuple = self._GetRebalanceTuple()
while rebalance_tuple is not None:
if job_key.IsCancelled():
break
( prefix, overweight_location, underweight_location ) = rebalance_tuple
text = 'Moving \'' + prefix + '\' from ' + overweight_location + ' to ' + underweight_location
HydrusData.Print( text )
job_key.SetVariable( 'popup_text_1', text )
# these two lines can cause a deadlock because the db sometimes calls stuff in here.
self._controller.Write( 'relocate_client_files', prefix, overweight_location, underweight_location )
self._Reinit()
rebalance_tuple = self._GetRebalanceTuple()
recover_tuple = self._GetRecoverTuple()
while recover_tuple is not None:
if job_key.IsCancelled():
break
( prefix, recoverable_location, correct_location ) = recover_tuple
text = 'Recovering \'' + prefix + '\' from ' + recoverable_location + ' to ' + correct_location
HydrusData.Print( text )
job_key.SetVariable( 'popup_text_1', text )
recoverable_path = os.path.join( recoverable_location, prefix )
correct_path = os.path.join( correct_location, prefix )
HydrusPaths.MergeTree( recoverable_path, correct_path )
recover_tuple = self._GetRecoverTuple()
finally:
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.Finish()
job_key.Delete()
def RebalanceWorkToDo( self ):
with self._lock:
return self._GetRebalanceTuple() is not None
def RegenerateResizedThumbnail( self, hash, mime ):
with self._lock:
self.LocklessRegenerateResizedThumbnail( hash, mime )
def LocklessRegenerateResizedThumbnail( self, hash, mime ):
self._GenerateResizedThumbnail( hash, mime )
def RegenerateThumbnails( self, only_do_missing = False ):
with self._lock:
job_key = ClientThreading.JobKey( cancellable = True )
job_key.SetVariable( 'popup_title', 'regenerating thumbnails' )
job_key.SetVariable( 'popup_text_1', 'creating directories' )
self._controller.pub( 'modal_message', job_key )
num_broken = 0
for ( i, path ) in enumerate( self._IterateAllFilePaths() ):
try:
while job_key.IsPaused() or job_key.IsCancelled():
time.sleep( 0.1 )
if job_key.IsCancelled():
job_key.SetVariable( 'popup_text_1', 'cancelled' )
HydrusData.Print( job_key.ToString() )
return
job_key.SetVariable( 'popup_text_1', HydrusData.ToHumanInt( i ) + ' done' )
( base, filename ) = os.path.split( path )
if '.' in filename:
( hash_encoded, ext ) = filename.split( '.', 1 )
else:
continue # it is an update file, so let's save us some ffmpeg lag and logspam
hash = hash_encoded.decode( 'hex' )
full_size_path = self._GenerateExpectedFullSizeThumbnailPath( hash )
if only_do_missing and os.path.exists( full_size_path ):
continue
mime = HydrusFileHandling.GetMime( path )
if mime in HC.MIMES_WITH_THUMBNAILS:
self._GenerateFullSizeThumbnail( hash, mime )
thumbnail_resized_path = self._GenerateExpectedResizedThumbnailPath( hash )
if os.path.exists( thumbnail_resized_path ):
ClientPaths.DeletePath( thumbnail_resized_path, always_delete_fully = True )
except:
HydrusData.Print( path )
HydrusData.Print( traceback.format_exc() )
num_broken += 1
if num_broken > 0:
job_key.SetVariable( 'popup_text_1', 'done! ' + HydrusData.ToHumanInt( num_broken ) + ' files caused errors, which have been written to the log.' )
else:
job_key.SetVariable( 'popup_text_1', 'done!' )
HydrusData.Print( job_key.ToString() )
job_key.Finish()
class DataCache( object ):
def __init__( self, controller, cache_size, timeout = 1200 ):
self._controller = controller
self._cache_size = cache_size
self._timeout = timeout
self._keys_to_data = {}
self._keys_fifo = collections.OrderedDict()
self._total_estimated_memory_footprint = 0
self._lock = threading.Lock()
self._controller.sub( self, 'MaintainCache', 'memory_maintenance_pulse' )
def _Delete( self, key ):
if key not in self._keys_to_data:
return
deletee_data = self._keys_to_data[ key ]
del self._keys_to_data[ key ]
self._RecalcMemoryUsage()
def _DeleteItem( self ):
( deletee_key, last_access_time ) = self._keys_fifo.popitem( last = False )
self._Delete( deletee_key )
def _RecalcMemoryUsage( self ):
self._total_estimated_memory_footprint = sum( ( data.GetEstimatedMemoryFootprint() for data in self._keys_to_data.values() ) )
def _TouchKey( self, key ):
# have to delete first, rather than overwriting, so the ordereddict updates its internal order
if key in self._keys_fifo:
del self._keys_fifo[ key ]
self._keys_fifo[ key ] = HydrusData.GetNow()
def Clear( self ):
with self._lock:
self._keys_to_data = {}
self._keys_fifo = collections.OrderedDict()
self._total_estimated_memory_footprint = 0
def AddData( self, key, data ):
with self._lock:
if key not in self._keys_to_data:
while self._total_estimated_memory_footprint > self._cache_size:
self._DeleteItem()
self._keys_to_data[ key ] = data
self._TouchKey( key )
self._RecalcMemoryUsage()
def DeleteData( self, key ):
with self._lock:
self._Delete( key )
def GetData( self, key ):
with self._lock:
if key not in self._keys_to_data:
raise Exception( 'Cache error! Looking for ' + HydrusData.ToUnicode( key ) + ', but it was missing.' )
self._TouchKey( key )
return self._keys_to_data[ key ]
def GetIfHasData( self, key ):
with self._lock:
if key in self._keys_to_data:
self._TouchKey( key )
return self._keys_to_data[ key ]
else:
return None
def HasData( self, key ):
with self._lock:
return key in self._keys_to_data
def MaintainCache( self ):
with self._lock:
while True:
if len( self._keys_fifo ) == 0:
break
else:
( key, last_access_time ) = next( self._keys_fifo.iteritems() )
if HydrusData.TimeHasPassed( last_access_time + self._timeout ):
self._DeleteItem()
else:
break
class LocalBooruCache( object ):
def __init__( self, controller ):
self._controller = controller
self._lock = threading.Lock()
self._RefreshShares()
self._controller.sub( self, 'RefreshShares', 'refresh_local_booru_shares' )
self._controller.sub( self, 'RefreshShares', 'restart_booru' )
def _CheckDataUsage( self ):
if not self._local_booru_service.BandwidthOK():
raise HydrusExceptions.ForbiddenException( 'This booru has used all its monthly data. Please try again next month.' )
def _CheckFileAuthorised( self, share_key, hash ):
self._CheckShareAuthorised( share_key )
info = self._GetInfo( share_key )
if hash not in info[ 'hashes_set' ]:
raise HydrusExceptions.NotFoundException( 'That file was not found in that share.' )
def _CheckShareAuthorised( self, share_key ):
self._CheckDataUsage()
info = self._GetInfo( share_key )
timeout = info[ 'timeout' ]
if timeout is not None and HydrusData.TimeHasPassed( timeout ):
raise HydrusExceptions.ForbiddenException( 'This share has expired.' )
def _GetInfo( self, share_key ):
try: info = self._keys_to_infos[ share_key ]
except: raise HydrusExceptions.NotFoundException( 'Did not find that share on this booru.' )
if info is None:
info = self._controller.Read( 'local_booru_share', share_key )
hashes = info[ 'hashes' ]
info[ 'hashes_set' ] = set( hashes )
media_results = self._controller.Read( 'media_results', hashes )
info[ 'media_results' ] = media_results
hashes_to_media_results = { media_result.GetHash() : media_result for media_result in media_results }
info[ 'hashes_to_media_results' ] = hashes_to_media_results
self._keys_to_infos[ share_key ] = info
return info
def _RefreshShares( self ):
self._local_booru_service = self._controller.services_manager.GetService( CC.LOCAL_BOORU_SERVICE_KEY )
self._keys_to_infos = {}
share_keys = self._controller.Read( 'local_booru_share_keys' )
for share_key in share_keys: self._keys_to_infos[ share_key ] = None
def CheckShareAuthorised( self, share_key ):
with self._lock: self._CheckShareAuthorised( share_key )
def CheckFileAuthorised( self, share_key, hash ):
with self._lock: self._CheckFileAuthorised( share_key, hash )
def GetGalleryInfo( self, share_key ):
with self._lock:
self._CheckShareAuthorised( share_key )
info = self._GetInfo( share_key )
name = info[ 'name' ]
text = info[ 'text' ]
timeout = info[ 'timeout' ]
media_results = info[ 'media_results' ]
return ( name, text, timeout, media_results )
def GetMediaResult( self, share_key, hash ):
with self._lock:
info = self._GetInfo( share_key )
media_result = info[ 'hashes_to_media_results' ][ hash ]
return media_result
def GetPageInfo( self, share_key, hash ):
with self._lock:
self._CheckFileAuthorised( share_key, hash )
info = self._GetInfo( share_key )
name = info[ 'name' ]
text = info[ 'text' ]
timeout = info[ 'timeout' ]
media_result = info[ 'hashes_to_media_results' ][ hash ]
return ( name, text, timeout, media_result )
def RefreshShares( self ):
with self._lock:
self._RefreshShares()
class MenuEventIdToActionCache( object ):
def __init__( self ):
self._ids_to_actions = {}
self._actions_to_ids = {}
self._temporary_ids = set()
self._free_temporary_ids = set()
def _ClearTemporaries( self ):
for temporary_id in self._temporary_ids.difference( self._free_temporary_ids ):
temporary_action = self._ids_to_actions[ temporary_id ]
del self._ids_to_actions[ temporary_id ]
del self._actions_to_ids[ temporary_action ]
self._free_temporary_ids = set( self._temporary_ids )
def _GetNewId( self, temporary ):
if temporary:
if len( self._free_temporary_ids ) == 0:
new_id = wx.NewId()
self._temporary_ids.add( new_id )
self._free_temporary_ids.add( new_id )
return self._free_temporary_ids.pop()
else:
return wx.NewId()
def GetAction( self, event_id ):
action = None
if event_id in self._ids_to_actions:
action = self._ids_to_actions[ event_id ]
if event_id in self._temporary_ids:
self._ClearTemporaries()
return action
def GetId( self, command, data = None, temporary = False ):
action = ( command, data )
if action not in self._actions_to_ids:
event_id = self._GetNewId( temporary )
self._ids_to_actions[ event_id ] = action
self._actions_to_ids[ action ] = event_id
return self._actions_to_ids[ action ]
def GetPermanentId( self, command, data = None ):
return self.GetId( command, data, False )
def GetTemporaryId( self, command, data = None ):
temporary = True
if data is None:
temporary = False
return self.GetId( command, data, temporary )
MENU_EVENT_ID_TO_ACTION_CACHE = MenuEventIdToActionCache()
class ParsingCache( object ):
def __init__( self ):
self._html_to_soups = {}
self._json_to_jsons = {}
self._lock = threading.Lock()
def _CleanCache( self ):
for cache in ( self._html_to_soups, self._json_to_jsons ):
dead_datas = set()
for ( data, ( last_accessed, parsed_object ) ) in cache.items():
if HydrusData.TimeHasPassed( last_accessed + 10 ):
dead_datas.add( data )
for dead_data in dead_datas:
del cache[ dead_data ]
def CleanCache( self ):
with self._lock:
self._CleanCache()
def GetJSON( self, json_text ):
with self._lock:
now = HydrusData.GetNow()
if json_text not in self._json_to_jsons:
json_object = json.loads( json_text )
self._json_to_jsons[ json_text ] = ( now, json_object )
( last_accessed, json_object ) = self._json_to_jsons[ json_text ]
if last_accessed != now:
self._json_to_jsons[ json_text ] = ( now, json_object )
if len( self._json_to_jsons ) > 10:
self._CleanCache()
return json_object
def GetSoup( self, html ):
with self._lock:
now = HydrusData.GetNow()
if html not in self._html_to_soups:
soup = ClientParsing.GetSoup( html )
self._html_to_soups[ html ] = ( now, soup )
( last_accessed, soup ) = self._html_to_soups[ html ]
if last_accessed != now:
self._html_to_soups[ html ] = ( now, soup )
if len( self._html_to_soups ) > 10:
self._CleanCache()
return soup
class RenderedImageCache( object ):
def __init__( self, controller ):
self._controller = controller
cache_size = self._controller.options[ 'fullscreen_cache_size' ]
cache_timeout = self._controller.new_options.GetInteger( 'image_cache_timeout' )
self._data_cache = DataCache( self._controller, cache_size, timeout = cache_timeout )
def Clear( self ):
self._data_cache.Clear()
def GetImageRenderer( self, media ):
hash = media.GetHash()
key = hash
result = self._data_cache.GetIfHasData( key )
if result is None:
image_renderer = ClientRendering.ImageRenderer( media )
self._data_cache.AddData( key, image_renderer )
else:
image_renderer = result
return image_renderer
def HasImageRenderer( self, hash ):
key = hash
return self._data_cache.HasData( key )
class ThumbnailCache( object ):
def __init__( self, controller ):
self._controller = controller
cache_size = self._controller.options[ 'thumbnail_cache_size' ]
cache_timeout = self._controller.new_options.GetInteger( 'thumbnail_cache_timeout' )
self._data_cache = DataCache( self._controller, cache_size, timeout = cache_timeout )
self._lock = threading.Lock()
self._waterfall_queue_quick = set()
self._waterfall_queue_random = []
self._waterfall_event = threading.Event()
self._special_thumbs = {}
self.Clear()
self._controller.CallToThreadLongRunning( self.DAEMONWaterfall )
self._controller.sub( self, 'Clear', 'thumbnail_resize' )
self._controller.sub( self, 'ClearThumbnails', 'clear_thumbnails' )
def _GetResizedHydrusBitmapFromHardDrive( self, display_media ):
thumbnail_dimensions = self._controller.options[ 'thumbnail_dimensions' ]
if tuple( thumbnail_dimensions ) == HC.UNSCALED_THUMBNAIL_DIMENSIONS:
full_size = True
else:
full_size = False
hash = display_media.GetHash()
mime = display_media.GetMime()
locations_manager = display_media.GetLocationsManager()
try:
if full_size:
path = self._controller.client_files_manager.GetFullSizeThumbnailPath( hash, mime )
else:
path = self._controller.client_files_manager.GetResizedThumbnailPath( hash, mime )
except HydrusExceptions.FileMissingException as e:
if locations_manager.IsLocal():
HydrusData.ShowException( e )
return self._special_thumbs[ 'hydrus' ]
mime = display_media.GetMime()
try:
hydrus_bitmap = ClientRendering.GenerateHydrusBitmap( path, mime )
except Exception as e:
try:
self._controller.client_files_manager.RegenerateResizedThumbnail( hash, mime )
try:
hydrus_bitmap = ClientRendering.GenerateHydrusBitmap( path, mime )
except Exception as e:
HydrusData.ShowException( e )
raise HydrusExceptions.FileMissingException( 'The thumbnail for file ' + hash.encode( 'hex' ) + ' was broken. It was regenerated, but the new file would not render for the above reason. Please inform the hydrus developer what has happened.' )
except Exception as e:
HydrusData.ShowException( e )
return self._special_thumbs[ 'hydrus' ]
( media_x, media_y ) = display_media.GetResolution()
( actual_x, actual_y ) = hydrus_bitmap.GetSize()
( desired_x, desired_y ) = self._controller.options[ 'thumbnail_dimensions' ]
too_large = actual_x > desired_x or actual_y > desired_y
small_original_image = actual_x == media_x and actual_y == media_y
too_small = actual_x < desired_x and actual_y < desired_y
if too_large or ( too_small and not small_original_image ):
self._controller.client_files_manager.RegenerateResizedThumbnail( hash, mime )
hydrus_bitmap = ClientRendering.GenerateHydrusBitmap( path, mime )
return hydrus_bitmap
def _RecalcWaterfallQueueRandom( self ):
# here we sort by the hash since this is both breddy random and more likely to access faster on a well defragged hard drive!
def sort_by_hash_key( ( page_key, media ) ):
return media.GetDisplayMedia().GetHash()
self._waterfall_queue_random = list( self._waterfall_queue_quick )
self._waterfall_queue_random.sort( key = sort_by_hash_key )
def CancelWaterfall( self, page_key, medias ):
with self._lock:
self._waterfall_queue_quick.difference_update( ( ( page_key, media ) for media in medias ) )
self._RecalcWaterfallQueueRandom()
def Clear( self ):
with self._lock:
self._data_cache.Clear()
self._special_thumbs = {}
names = [ 'hydrus', 'pdf', 'audio', 'video', 'zip' ]
( os_file_handle, temp_path ) = ClientPaths.GetTempPath()
try:
for name in names:
path = os.path.join( HC.STATIC_DIR, name + '.png' )
thumbnail_dimensions = self._controller.options[ 'thumbnail_dimensions' ]
thumbnail = HydrusFileHandling.GenerateThumbnailFromStaticImage( path, thumbnail_dimensions, HC.IMAGE_PNG )
with open( temp_path, 'wb' ) as f:
f.write( thumbnail )
hydrus_bitmap = ClientRendering.GenerateHydrusBitmap( temp_path, HC.IMAGE_PNG )
self._special_thumbs[ name ] = hydrus_bitmap
finally:
HydrusPaths.CleanUpTempPath( os_file_handle, temp_path )
def ClearThumbnails( self, hashes ):
with self._lock:
for hash in hashes:
self._data_cache.DeleteData( hash )
def DoingWork( self ):
with self._lock:
return len( self._waterfall_queue_random ) > 0
def GetThumbnail( self, media ):
try:
display_media = media.GetDisplayMedia()
except:
# sometimes media can get switched around during a collect event, and if this happens during waterfall, we have a problem here
# just return for now, we'll see how it goes
return self._special_thumbs[ 'hydrus' ]
locations_manager = display_media.GetLocationsManager()
if locations_manager.ShouldIdeallyHaveThumbnail():
mime = display_media.GetMime()
if mime in HC.MIMES_WITH_THUMBNAILS:
hash = display_media.GetHash()
result = self._data_cache.GetIfHasData( hash )
if result is None:
if locations_manager.ShouldDefinitelyHaveThumbnail():
# local file, should be able to regen if needed
hydrus_bitmap = self._GetResizedHydrusBitmapFromHardDrive( display_media )
else:
# repository file, maybe not actually available yet
try:
hydrus_bitmap = self._GetResizedHydrusBitmapFromHardDrive( display_media )
except:
hydrus_bitmap = self._special_thumbs[ 'hydrus' ]
self._data_cache.AddData( hash, hydrus_bitmap )
else:
hydrus_bitmap = result
return hydrus_bitmap
elif mime in HC.AUDIO: return self._special_thumbs[ 'audio' ]
elif mime in HC.VIDEO: return self._special_thumbs[ 'video' ]
elif mime == HC.APPLICATION_PDF: return self._special_thumbs[ 'pdf' ]
elif mime in HC.ARCHIVES: return self._special_thumbs[ 'zip' ]
else: return self._special_thumbs[ 'hydrus' ]
else:
return self._special_thumbs[ 'hydrus' ]
def HasThumbnailCached( self, media ):
display_media = media.GetDisplayMedia()
mime = display_media.GetMime()
if mime in HC.MIMES_WITH_THUMBNAILS:
hash = display_media.GetHash()
return self._data_cache.HasData( hash )
else:
return True
def Waterfall( self, page_key, medias ):
with self._lock:
self._waterfall_queue_quick.update( ( ( page_key, media ) for media in medias ) )
self._RecalcWaterfallQueueRandom()
self._waterfall_event.set()
def DAEMONWaterfall( self ):
last_paused = HydrusData.GetNowPrecise()
while not HydrusThreading.IsThreadShuttingDown():
with self._lock:
do_wait = len( self._waterfall_queue_random ) == 0
if do_wait:
self._waterfall_event.wait( 1 )
self._waterfall_event.clear()
last_paused = HydrusData.GetNowPrecise()
start_time = HydrusData.GetNowPrecise()
stop_time = start_time + 0.005 # a bit of a typical frame
page_keys_to_rendered_medias = collections.defaultdict( list )
while not HydrusData.TimeHasPassedPrecise( stop_time ):
with self._lock:
if len( self._waterfall_queue_random ) == 0:
break
result = self._waterfall_queue_random.pop()
self._waterfall_queue_quick.discard( result )
( page_key, media ) = result
try:
self.GetThumbnail( media ) # to load it
page_keys_to_rendered_medias[ page_key ].append( media )
except Exception as e:
HydrusData.ShowException( e )
for ( page_key, rendered_medias ) in page_keys_to_rendered_medias.items():
self._controller.pub( 'waterfall_thumbnails', page_key, rendered_medias )
time.sleep( 0.00001 )
class ServicesManager( object ):
def __init__( self, controller ):
self._controller = controller
self._lock = threading.Lock()
self._keys_to_services = {}
self._services_sorted = []
self.RefreshServices()
self._controller.sub( self, 'RefreshServices', 'notify_new_services_data' )
def _GetService( self, service_key ):
try:
return self._keys_to_services[ service_key ]
except KeyError:
raise HydrusExceptions.DataMissing( 'That service was not found!' )
def _SetServices( self, services ):
self._keys_to_services = { service.GetServiceKey() : service for service in services }
self._keys_to_services[ CC.TEST_SERVICE_KEY ] = ClientServices.GenerateService( CC.TEST_SERVICE_KEY, HC.TEST_SERVICE, CC.TEST_SERVICE_KEY )
def compare_function( a, b ):
return cmp( a.GetName(), b.GetName() )
self._services_sorted = list( services )
self._services_sorted.sort( cmp = compare_function )
def Filter( self, service_keys, desired_types ):
with self._lock:
def func( service_key ):
return self._keys_to_services[ service_key ].GetServiceType() in desired_types
filtered_service_keys = filter( func, service_keys )
return filtered_service_keys
def FilterValidServiceKeys( self, service_keys ):
with self._lock:
def func( service_key ):
return service_key in self._keys_to_services
filtered_service_keys = filter( func, service_keys )
return filtered_service_keys
def GetName( self, service_key ):
with self._lock:
service = self._GetService( service_key )
return service.GetName()
def GetService( self, service_key ):
with self._lock:
return self._GetService( service_key )
def GetServiceType( self, service_key ):
with self._lock:
return self._GetService( service_key ).GetServiceType()
def GetServiceKeys( self, desired_types = HC.ALL_SERVICES ):
with self._lock:
filtered_service_keys = [ service_key for ( service_key, service ) in self._keys_to_services.items() if service.GetServiceType() in desired_types ]
return filtered_service_keys
def GetServices( self, desired_types = HC.ALL_SERVICES, randomised = True ):
with self._lock:
def func( service ):
return service.GetServiceType() in desired_types
services = filter( func, self._services_sorted )
if randomised:
random.shuffle( services )
return services
def RefreshServices( self ):
with self._lock:
services = self._controller.Read( 'services' )
self._SetServices( services )
def ServiceExists( self, service_key ):
with self._lock:
return service_key in self._keys_to_services
class ShortcutsManager( object ):
def __init__( self, controller ):
self._controller = controller
self._shortcuts = {}
self.RefreshShortcuts()
self._controller.sub( self, 'RefreshShortcuts', 'new_shortcuts' )
def GetCommand( self, shortcuts_names, shortcut ):
for name in shortcuts_names:
if name in self._shortcuts:
command = self._shortcuts[ name ].GetCommand( shortcut )
if command is not None:
if HG.gui_report_mode:
HydrusData.ShowText( 'command matched: ' + repr( command ) )
return command
return None
def RefreshShortcuts( self ):
self._shortcuts = {}
all_shortcuts = HG.client_controller.Read( 'serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_SHORTCUTS )
for shortcuts in all_shortcuts:
self._shortcuts[ shortcuts.GetName() ] = shortcuts
class TagCensorshipManager( object ):
def __init__( self, controller ):
self._controller = controller
self.RefreshData()
self._controller.sub( self, 'RefreshData', 'notify_new_tag_censorship' )
def _CensorshipMatches( self, tag, blacklist, censorships ):
if blacklist:
return not HydrusTags.CensorshipMatch( tag, censorships )
else:
return HydrusTags.CensorshipMatch( tag, censorships )
def GetInfo( self, service_key ):
if service_key in self._service_keys_to_info: return self._service_keys_to_info[ service_key ]
else: return ( True, set() )
def RefreshData( self ):
rows = self._controller.Read( 'tag_censorship' )
self._service_keys_to_info = { service_key : ( blacklist, censorships ) for ( service_key, blacklist, censorships ) in rows }
def FilterPredicates( self, service_key, predicates ):
for service_key_lookup in ( CC.COMBINED_TAG_SERVICE_KEY, service_key ):
if service_key_lookup in self._service_keys_to_info:
( blacklist, censorships ) = self._service_keys_to_info[ service_key_lookup ]
predicates = [ predicate for predicate in predicates if predicate.GetType() != HC.PREDICATE_TYPE_TAG or self._CensorshipMatches( predicate.GetValue(), blacklist, censorships ) ]
return predicates
def FilterStatusesToPairs( self, service_key, statuses_to_pairs ):
for service_key_lookup in ( CC.COMBINED_TAG_SERVICE_KEY, service_key ):
if service_key_lookup in self._service_keys_to_info:
( blacklist, censorships ) = self._service_keys_to_info[ service_key_lookup ]
new_statuses_to_pairs = HydrusData.default_dict_set()
for ( status, pairs ) in statuses_to_pairs.items():
new_statuses_to_pairs[ status ] = { ( one, two ) for ( one, two ) in pairs if self._CensorshipMatches( one, blacklist, censorships ) and self._CensorshipMatches( two, blacklist, censorships ) }
statuses_to_pairs = new_statuses_to_pairs
return statuses_to_pairs
def FilterServiceKeysToStatusesToTags( self, service_keys_to_statuses_to_tags ):
if CC.COMBINED_TAG_SERVICE_KEY in self._service_keys_to_info:
( blacklist, censorships ) = self._service_keys_to_info[ CC.COMBINED_TAG_SERVICE_KEY ]
service_keys = service_keys_to_statuses_to_tags.keys()
for service_key in service_keys:
statuses_to_tags = service_keys_to_statuses_to_tags[ service_key ]
statuses = statuses_to_tags.keys()
for status in statuses:
tags = statuses_to_tags[ status ]
statuses_to_tags[ status ] = { tag for tag in tags if self._CensorshipMatches( tag, blacklist, censorships ) }
for ( service_key, ( blacklist, censorships ) ) in self._service_keys_to_info.items():
if service_key == CC.COMBINED_TAG_SERVICE_KEY:
continue
if service_key in service_keys_to_statuses_to_tags:
statuses_to_tags = service_keys_to_statuses_to_tags[ service_key ]
statuses = statuses_to_tags.keys()
for status in statuses:
tags = statuses_to_tags[ status ]
statuses_to_tags[ status ] = { tag for tag in tags if self._CensorshipMatches( tag, blacklist, censorships ) }
return service_keys_to_statuses_to_tags
def FilterTags( self, service_key, tags ):
for service_key_lookup in ( CC.COMBINED_TAG_SERVICE_KEY, service_key ):
if service_key_lookup in self._service_keys_to_info:
( blacklist, censorships ) = self._service_keys_to_info[ service_key_lookup ]
tags = { tag for tag in tags if self._CensorshipMatches( tag, blacklist, censorships ) }
return tags
class TagParentsManager( object ):
def __init__( self, controller ):
self._controller = controller
self._dirty = False
self._service_keys_to_children_to_parents = collections.defaultdict( HydrusData.default_dict_list )
self._RefreshParents()
self._lock = threading.Lock()
self._controller.sub( self, 'NotifyNewParents', 'notify_new_parents' )
def _RefreshParents( self ):
service_keys_to_statuses_to_pairs = self._controller.Read( 'tag_parents' )
# first collapse siblings
sibling_manager = self._controller.GetManager( 'tag_siblings' )
collapsed_service_keys_to_statuses_to_pairs = collections.defaultdict( HydrusData.default_dict_set )
for ( service_key, statuses_to_pairs ) in service_keys_to_statuses_to_pairs.items():
if service_key == CC.COMBINED_TAG_SERVICE_KEY: continue
for ( status, pairs ) in statuses_to_pairs.items():
pairs = sibling_manager.CollapsePairs( service_key, pairs )
collapsed_service_keys_to_statuses_to_pairs[ service_key ][ status ] = pairs
# now collapse current and pending
service_keys_to_pairs_flat = HydrusData.default_dict_set()
for ( service_key, statuses_to_pairs ) in collapsed_service_keys_to_statuses_to_pairs.items():
pairs_flat = statuses_to_pairs[ HC.CONTENT_STATUS_CURRENT ].union( statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ] )
service_keys_to_pairs_flat[ service_key ] = pairs_flat
# now create the combined tag service
combined_pairs_flat = set()
for pairs_flat in service_keys_to_pairs_flat.values():
combined_pairs_flat.update( pairs_flat )
service_keys_to_pairs_flat[ CC.COMBINED_TAG_SERVICE_KEY ] = combined_pairs_flat
#
service_keys_to_simple_children_to_parents = BuildServiceKeysToSimpleChildrenToParents( service_keys_to_pairs_flat )
self._service_keys_to_children_to_parents = BuildServiceKeysToChildrenToParents( service_keys_to_simple_children_to_parents )
def ExpandPredicates( self, service_key, predicates ):
if self._controller.new_options.GetBoolean( 'apply_all_parents_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
results = []
with self._lock:
for predicate in predicates:
results.append( predicate )
if predicate.GetType() == HC.PREDICATE_TYPE_TAG:
tag = predicate.GetValue()
parents = self._service_keys_to_children_to_parents[ service_key ][ tag ]
for parent in parents:
parent_predicate = ClientSearch.Predicate( HC.PREDICATE_TYPE_PARENT, parent )
results.append( parent_predicate )
return results
def ExpandTags( self, service_key, tags ):
if self._controller.new_options.GetBoolean( 'apply_all_parents_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
with self._lock:
tags_results = set( tags )
for tag in tags:
tags_results.update( self._service_keys_to_children_to_parents[ service_key ][ tag ] )
return tags_results
def GetParents( self, service_key, tag ):
if self._controller.new_options.GetBoolean( 'apply_all_parents_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
with self._lock:
return self._service_keys_to_children_to_parents[ service_key ][ tag ]
def NotifyNewParents( self ):
with self._lock:
self._dirty = True
self._controller.CallLater( 1.0, self.RefreshParentsIfDirty )
def RefreshParentsIfDirty( self ):
with self._lock:
if self._dirty:
self._RefreshParents()
self._dirty = False
class TagSiblingsManager( object ):
def __init__( self, controller ):
self._controller = controller
self._dirty = False
self._service_keys_to_siblings = collections.defaultdict( dict )
self._service_keys_to_reverse_lookup = collections.defaultdict( dict )
self._RefreshSiblings()
self._lock = threading.Lock()
self._controller.sub( self, 'NotifyNewSiblings', 'notify_new_siblings_data' )
def _CollapseTags( self, service_key, tags ):
siblings = self._service_keys_to_siblings[ service_key ]
return { siblings[ tag ] if tag in siblings else tag for tag in tags }
def _RefreshSiblings( self ):
self._service_keys_to_siblings = collections.defaultdict( dict )
self._service_keys_to_reverse_lookup = collections.defaultdict( dict )
local_tags_pairs = set()
tag_repo_pairs = set()
service_keys_to_statuses_to_pairs = self._controller.Read( 'tag_siblings' )
for ( service_key, statuses_to_pairs ) in service_keys_to_statuses_to_pairs.items():
all_pairs = statuses_to_pairs[ HC.CONTENT_STATUS_CURRENT ].union( statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ] )
if service_key == CC.LOCAL_TAG_SERVICE_KEY:
local_tags_pairs = set( all_pairs )
else:
tag_repo_pairs.update( all_pairs )
siblings = CollapseTagSiblingPairs( [ all_pairs ] )
self._service_keys_to_siblings[ service_key ] = siblings
reverse_lookup = collections.defaultdict( list )
for ( bad, good ) in siblings.items():
reverse_lookup[ good ].append( bad )
self._service_keys_to_reverse_lookup[ service_key ] = reverse_lookup
combined_siblings = CollapseTagSiblingPairs( [ local_tags_pairs, tag_repo_pairs ] )
self._service_keys_to_siblings[ CC.COMBINED_TAG_SERVICE_KEY ] = combined_siblings
combined_reverse_lookup = collections.defaultdict( list )
for ( bad, good ) in combined_siblings.items():
combined_reverse_lookup[ good ].append( bad )
self._service_keys_to_reverse_lookup[ CC.COMBINED_TAG_SERVICE_KEY ] = combined_reverse_lookup
self._controller.pub( 'new_siblings_gui' )
def CollapsePredicates( self, service_key, predicates ):
if self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
with self._lock:
siblings = self._service_keys_to_siblings[ service_key ]
results = [ predicate for predicate in predicates if predicate.GetType() != HC.PREDICATE_TYPE_TAG ]
tag_predicates = [ predicate for predicate in predicates if predicate.GetType() == HC.PREDICATE_TYPE_TAG ]
tags_to_predicates = { predicate.GetValue() : predicate for predicate in predicates if predicate.GetType() == HC.PREDICATE_TYPE_TAG }
tags = tags_to_predicates.keys()
tags_to_include_in_results = set()
for tag in tags:
if tag in siblings:
old_tag = tag
old_predicate = tags_to_predicates[ old_tag ]
new_tag = siblings[ old_tag ]
if new_tag not in tags_to_predicates:
( old_pred_type, old_value, old_inclusive ) = old_predicate.GetInfo()
new_predicate = ClientSearch.Predicate( old_pred_type, new_tag, old_inclusive )
tags_to_predicates[ new_tag ] = new_predicate
tags_to_include_in_results.add( new_tag )
new_predicate = tags_to_predicates[ new_tag ]
new_predicate.AddCounts( old_predicate )
else:
tags_to_include_in_results.add( tag )
results.extend( [ tags_to_predicates[ tag ] for tag in tags_to_include_in_results ] )
return results
def CollapsePairs( self, service_key, pairs ):
if self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
with self._lock:
siblings = self._service_keys_to_siblings[ service_key ]
result = set()
for ( a, b ) in pairs:
if a in siblings:
a = siblings[ a ]
if b in siblings:
b = siblings[ b ]
result.add( ( a, b ) )
return result
def CollapseStatusesToTags( self, service_key, statuses_to_tags ):
if self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
with self._lock:
statuses = statuses_to_tags.keys()
new_statuses_to_tags = HydrusData.default_dict_set()
for status in statuses:
new_statuses_to_tags[ status ] = self._CollapseTags( service_key, statuses_to_tags[ status ] )
return new_statuses_to_tags
def CollapseTag( self, service_key, tag ):
if self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
with self._lock:
siblings = self._service_keys_to_siblings[ service_key ]
if tag in siblings:
return siblings[ tag ]
else:
return tag
def CollapseTags( self, service_key, tags ):
if self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
with self._lock:
return self._CollapseTags( service_key, tags )
def CollapseTagsToCount( self, service_key, tags_to_count ):
if self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
with self._lock:
siblings = self._service_keys_to_siblings[ service_key ]
results = collections.Counter()
for ( tag, count ) in tags_to_count.items():
if tag in siblings:
tag = siblings[ tag ]
results[ tag ] += count
return results
def GetAutocompleteSiblings( self, service_key, search_text, exact_match = False ):
if self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
with self._lock:
siblings = self._service_keys_to_siblings[ service_key ]
reverse_lookup = self._service_keys_to_reverse_lookup[ service_key ]
if exact_match:
key_based_matching_values = set()
if search_text in siblings:
key_based_matching_values = { siblings[ search_text ] }
else:
key_based_matching_values = set()
value_based_matching_values = { value for value in siblings.values() if value == search_text }
else:
matching_keys = ClientSearch.FilterTagsBySearchText( service_key, search_text, siblings.keys(), search_siblings = False )
key_based_matching_values = { siblings[ key ] for key in matching_keys }
value_based_matching_values = ClientSearch.FilterTagsBySearchText( service_key, search_text, siblings.values(), search_siblings = False )
matching_values = key_based_matching_values.union( value_based_matching_values )
# all the matching values have a matching sibling somewhere in their network
# so now fetch the networks
lists_of_matching_keys = [ reverse_lookup[ value ] for value in matching_values ]
matching_keys = itertools.chain.from_iterable( lists_of_matching_keys )
matches = matching_values.union( matching_keys )
return matches
def GetSibling( self, service_key, tag ):
if self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
with self._lock:
siblings = self._service_keys_to_siblings[ service_key ]
if tag in siblings:
return siblings[ tag ]
else:
return None
def GetAllSiblings( self, service_key, tag ):
if self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
with self._lock:
siblings = self._service_keys_to_siblings[ service_key ]
reverse_lookup = self._service_keys_to_reverse_lookup[ service_key ]
if tag in siblings:
best_tag = siblings[ tag ]
elif tag in reverse_lookup:
best_tag = tag
else:
return [ tag ]
all_siblings = list( reverse_lookup[ best_tag ] )
all_siblings.append( best_tag )
return all_siblings
def NotifyNewSiblings( self ):
with self._lock:
self._dirty = True
self._controller.CallLater( 1.0, self.RefreshSiblingsIfDirty )
def RefreshSiblingsIfDirty( self ):
with self._lock:
if self._dirty:
self._RefreshSiblings()
self._dirty = False
class UndoManager( object ):
def __init__( self, controller ):
self._controller = controller
self._commands = []
self._inverted_commands = []
self._current_index = 0
self._lock = threading.Lock()
self._controller.sub( self, 'Undo', 'undo' )
self._controller.sub( self, 'Redo', 'redo' )
def _FilterServiceKeysToContentUpdates( self, service_keys_to_content_updates ):
filtered_service_keys_to_content_updates = {}
for ( service_key, content_updates ) in service_keys_to_content_updates.items():
filtered_content_updates = []
for content_update in content_updates:
( data_type, action, row ) = content_update.ToTuple()
if data_type == HC.CONTENT_TYPE_FILES:
if action in ( HC.CONTENT_UPDATE_ADD, HC.CONTENT_UPDATE_DELETE, HC.CONTENT_UPDATE_UNDELETE, HC.CONTENT_UPDATE_RESCIND_PETITION, HC.CONTENT_UPDATE_ADVANCED ):
continue
elif data_type == HC.CONTENT_TYPE_MAPPINGS:
if action in ( HC.CONTENT_UPDATE_RESCIND_PETITION, HC.CONTENT_UPDATE_ADVANCED ):
continue
else:
continue
filtered_content_update = HydrusData.ContentUpdate( data_type, action, row )
filtered_content_updates.append( filtered_content_update )
if len( filtered_content_updates ) > 0:
filtered_service_keys_to_content_updates[ service_key ] = filtered_content_updates
return filtered_service_keys_to_content_updates
def _InvertServiceKeysToContentUpdates( self, service_keys_to_content_updates ):
inverted_service_keys_to_content_updates = {}
for ( service_key, content_updates ) in service_keys_to_content_updates.items():
inverted_content_updates = []
for content_update in content_updates:
( data_type, action, row ) = content_update.ToTuple()
inverted_row = row
if data_type == HC.CONTENT_TYPE_FILES:
if action == HC.CONTENT_UPDATE_ARCHIVE: inverted_action = HC.CONTENT_UPDATE_INBOX
elif action == HC.CONTENT_UPDATE_INBOX: inverted_action = HC.CONTENT_UPDATE_ARCHIVE
elif action == HC.CONTENT_UPDATE_PEND: inverted_action = HC.CONTENT_UPDATE_RESCIND_PEND
elif action == HC.CONTENT_UPDATE_RESCIND_PEND: inverted_action = HC.CONTENT_UPDATE_PEND
elif action == HC.CONTENT_UPDATE_PETITION:
inverted_action = HC.CONTENT_UPDATE_RESCIND_PETITION
( hashes, reason ) = row
inverted_row = hashes
elif data_type == HC.CONTENT_TYPE_MAPPINGS:
if action == HC.CONTENT_UPDATE_ADD: inverted_action = HC.CONTENT_UPDATE_DELETE
elif action == HC.CONTENT_UPDATE_DELETE: inverted_action = HC.CONTENT_UPDATE_ADD
elif action == HC.CONTENT_UPDATE_PEND: inverted_action = HC.CONTENT_UPDATE_RESCIND_PEND
elif action == HC.CONTENT_UPDATE_RESCIND_PEND: inverted_action = HC.CONTENT_UPDATE_PEND
elif action == HC.CONTENT_UPDATE_PETITION:
inverted_action = HC.CONTENT_UPDATE_RESCIND_PETITION
( tag, hashes, reason ) = row
inverted_row = ( tag, hashes )
inverted_content_update = HydrusData.ContentUpdate( data_type, inverted_action, inverted_row )
inverted_content_updates.append( inverted_content_update )
inverted_service_keys_to_content_updates[ service_key ] = inverted_content_updates
return inverted_service_keys_to_content_updates
def AddCommand( self, action, *args, **kwargs ):
with self._lock:
inverted_action = action
inverted_args = args
inverted_kwargs = kwargs
if action == 'content_updates':
( service_keys_to_content_updates, ) = args
service_keys_to_content_updates = self._FilterServiceKeysToContentUpdates( service_keys_to_content_updates )
if len( service_keys_to_content_updates ) == 0: return
inverted_service_keys_to_content_updates = self._InvertServiceKeysToContentUpdates( service_keys_to_content_updates )
if len( inverted_service_keys_to_content_updates ) == 0: return
inverted_args = ( inverted_service_keys_to_content_updates, )
else: return
self._commands = self._commands[ : self._current_index ]
self._inverted_commands = self._inverted_commands[ : self._current_index ]
self._commands.append( ( action, args, kwargs ) )
self._inverted_commands.append( ( inverted_action, inverted_args, inverted_kwargs ) )
self._current_index += 1
self._controller.pub( 'notify_new_undo' )
def GetUndoRedoStrings( self ):
with self._lock:
( undo_string, redo_string ) = ( None, None )
if self._current_index > 0:
undo_index = self._current_index - 1
( action, args, kwargs ) = self._commands[ undo_index ]
if action == 'content_updates':
( service_keys_to_content_updates, ) = args
undo_string = 'undo ' + ClientData.ConvertServiceKeysToContentUpdatesToPrettyString( service_keys_to_content_updates )
if len( self._commands ) > 0 and self._current_index < len( self._commands ):
redo_index = self._current_index
( action, args, kwargs ) = self._commands[ redo_index ]
if action == 'content_updates':
( service_keys_to_content_updates, ) = args
redo_string = 'redo ' + ClientData.ConvertServiceKeysToContentUpdatesToPrettyString( service_keys_to_content_updates )
return ( undo_string, redo_string )
def Undo( self ):
action = None
with self._lock:
if self._current_index > 0:
self._current_index -= 1
( action, args, kwargs ) = self._inverted_commands[ self._current_index ]
if action is not None:
self._controller.WriteSynchronous( action, *args, **kwargs )
self._controller.pub( 'notify_new_undo' )
def Redo( self ):
action = None
with self._lock:
if len( self._commands ) > 0 and self._current_index < len( self._commands ):
( action, args, kwargs ) = self._commands[ self._current_index ]
self._current_index += 1
if action is not None:
self._controller.WriteSynchronous( action, *args, **kwargs )
self._controller.pub( 'notify_new_undo' )
| [
"hydrus.admin@gmail.com"
] | hydrus.admin@gmail.com |
468c319fce38acce24ace0e88fc5325e3bdc9b49 | 4b56e86b33a52d2d1808d6b80f13a169f3f6159a | /ImproveDeepNN/OptimizationMethods/Momentum.py | c93deca6fb6eb05c5f1e573354eb5b638d4451d0 | [] | no_license | vandeppce/DeepLearning | d922d4780afab5b733cc7a87b0c167bc4b2cfc81 | 5e2d4a34b8a5a1c81e132a1a70e2b503859b1f7d | refs/heads/master | 2021-04-03T04:13:07.250159 | 2018-05-24T04:49:51 | 2018-05-24T04:49:51 | 124,764,156 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,219 | py | import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import math
import sklearn
import sklearn.datasets
from ImproveDeepNN.OptimizationMethods.opt_utils import *
from ImproveDeepNN.OptimizationMethods.testCases import *
def initialize_velocity(parameters):
"""
Initializes the velocity as a python dictionary with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
Returns:
v -- python dictionary containing the current velocity.
v['dW' + str(l)] = velocity of dWl
v['db' + str(l)] = velocity of dbl
"""
L = len(parameters) // 2
v = {}
for i in range(L):
v["dW" + str(i + 1)] = np.zeros(parameters["W" + str(i + 1)].shape)
v["db" + str(i + 1)] = np.zeros(parameters["b" + str(i + 1)].shape)
return v
'''
parameters = initialize_velocity_test_case()
v = initialize_velocity(parameters)
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
'''
def update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):
"""
Update parameters using Momentum
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- python dictionary containing the current velocity:
v['dW' + str(l)] = ...
v['db' + str(l)] = ...
beta -- the momentum hyperparameter, scalar
learning_rate -- the learning rate, scalar
Returns:
parameters -- python dictionary containing your updated parameters
v -- python dictionary containing your updated velocities
"""
L = len(parameters) // 2
for i in range(L):
v["dW" + str(i + 1)] = beta * v["dW" + str(i + 1)] + (1.0 - beta) * grads["dW" + str(i + 1)]
v["db" + str(i + 1)] = beta * v["db" + str(i + 1)] + (1.0 - beta) * grads["db" + str(i + 1)]
parameters["W" + str(i + 1)] = parameters["W" + str(i + 1)] - learning_rate * v["dW" + str(i + 1)]
parameters["b" + str(i + 1)] = parameters["b" + str(i + 1)] - learning_rate * v["db" + str(i + 1)]
return parameters, v
'''
parameters, grads, v = update_parameters_with_momentum_test_case()
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta = 0.9, learning_rate = 0.01)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
'''
| [
"sdtczyj@163.com"
] | sdtczyj@163.com |
cba48a755863db9c8468e5ef1df1f3d89d1a79df | 06cb3c450c322f6b01ee44c1f4de0a883a5a785e | /Echoo/__init__.py | 09fa2ddd18cdb10f827e8356184c130e6e0f7acb | [
"MIT"
] | permissive | Lyken17/Echoo | 239077c1791e61745e3dda97ca6661a5e9eca962 | ae604ee42ee3cae9afe9d3583ede568567c7dbe8 | refs/heads/master | 2023-06-22T01:48:22.873260 | 2023-06-15T15:30:38 | 2023-06-15T15:30:38 | 205,288,960 | 5 | 2 | MIT | 2019-08-30T02:59:53 | 2019-08-30T02:26:11 | Python | UTF-8 | Python | false | false | 24 | py | from .echoo import Echoo | [
"ligeng@mit.edu"
] | ligeng@mit.edu |
60860eacc8024b7eec8832f1bace9276b752943b | 9af43f9f52ab8726caacdd594980d5e0bf462c40 | /flask_transmute/decorators.py | 29e4c8463f456ffad3e1540e4880e4cebb3c4467 | [] | no_license | elindell/flask-transmute | 3b28509fee071e606be0021bfdc63bff85b51a38 | bd3c103c5eca9a5e4071f71be4a12460acddfd26 | refs/heads/master | 2021-01-22T09:16:45.945064 | 2016-04-04T08:49:08 | 2016-04-04T08:49:08 | 67,669,319 | 0 | 0 | null | 2016-09-08T04:48:59 | 2016-09-08T04:48:59 | null | UTF-8 | Python | false | false | 769 | py | def updates(f):
"""
this labels a function as one that updates data.
"""
f.updates = True
return f
def creates(f):
"""
this labels a function as one that creates data.
"""
f.creates = True
return f
def deletes(f):
"""
this labels a function as one that deletes data.
"""
f.deletes = True
return f
def annotate(annotations):
"""
in python2, native annotions on parameters do not exist:
def foo(a : str, b: int) -> bool:
...
this provides a way to provide attribute annotations:
@annotate({"a": str, "b": int, "return": bool})
def foo(a, b):
...
"""
def decorate(func):
func.__annotations__ = annotations
return func
return decorate
| [
"tsutsumi.yusuke@gmail.com"
] | tsutsumi.yusuke@gmail.com |
38ec4f2c3c29b218a061ec89a9850e93af9664f6 | c0057c0bfa216ec991894352700c8e7bca52ad4a | /test2.py | 9638768a9f5438f8d5af7319ec5bf987a0ec0be7 | [] | no_license | syurituu/exercise | f040150d2a734fbc458038de2834b84585c5adaa | c7d4f5e16b79305c513c6576d9edf3da4b051974 | refs/heads/master | 2020-12-13T18:45:07.937811 | 2020-01-17T07:48:50 | 2020-01-17T07:48:50 | 234,498,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | import jieba
def splitfile(inputFile, outputFile):
fin=open(inputFile,'r',errors='ignore')
fout=open(outputFile,'w',errors='ignore')
for line in fin:
line = line.strip()
line = jieba.cut(line)
outstr = " ".join(line)
fout.write(outstr + '\n')
fin.close()
fout.close()
splitfile('msr_test.txt','result2.txt') | [
"zhutom1996@gmail.com"
] | zhutom1996@gmail.com |
7c818c5e714842a6d73b7c92f35e9830888d1a26 | c6a37df4e40530dd87f89de5c8f6d98b10173c63 | /concolic.py | b182cfc1bac7d0fa89b9d437352d1a084534721a | [] | no_license | KevinBender/AutomatedConcolic | ed88fc7cbe122d76679093b6945214c0ecd4a0b6 | 7c5e50f434bff0d3e25c4f346790915a55397d7a | refs/heads/master | 2023-05-01T04:42:08.827665 | 2021-05-18T14:50:42 | 2021-05-18T14:50:42 | 361,894,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,125 | py | import z3
import traceback
from inspect import currentframe, signature
from dataclasses import dataclass
"""
This file implements a concolic execution engine. It assumes that
a program under test is instrumented and will use the API
concolic.guard() to collect path constraints, and the API
concolic.set() and concolic.set() to manipulate a symbolic store.
The main algorithm in this file is from the DART paper:
Patrice Godefroid, Nils Klarlund, and Koushik Sen. 2005.
DART: Directed Automated Random Testing. In Proceedings of
the 2005 ACM SIGPLAN conference on Programming Language
Design and Implementation (PLDI '05).
DOI: https://doi.org/10.1145/1065010.1065036
The entry point to the engine is concolic.run().
"""
@dataclass
class PathRecord:
line: int
done: bool
class ConcolicException(Exception):
pass
# Globals
symbols = None
store = None
current_path = None
path_record = None
solver = None
def init(vars):
global path_record, symbols
symbols = {v: z3.Int(v) for v in vars}
path_record = []
reset()
def reset():
global store, current_path, solver
store = {v: symbols[v] for v in symbols.keys()}
current_path = []
solver = z3.Solver()
def get(x):
"""Get concolic store mapping of var `x`"""
if store is not None:
return store[x]
else:
return 0
def set(x, a):
"""Set concolic store mapping of var `x` to arithmetic expression `a`"""
if store is not None:
store[x] = a
def guard(g):
"""Add `g` to current path constraint"""
if solver is None:
return # Concolic testing is not running
solver.append(g)
assert solver.check()
# Get line number of guard
line = int(currentframe().f_back.f_lineno)
# We are just seeing the k-th branch in this execution
k = len(current_path)
# Append to current path
current_path.append(g)
# Check if we have an expected k-th branch in the path record else add to it
if k < len(path_record):
if k == len(path_record)-1:
# We just got to the last negated guard
if path_record[k].line == line:
# We got to an unexpected branch
raise ConcolicException(("Unsoundness! Current path is %s and I'm back on line %d, " +\
"but I was expecting to have negated this branch") % (current_path, line))
else:
path_record[k].line = line
path_record[k].done = True
elif path_record[k].line != line:
# We got to an unexpected branch
raise ConcolicException(("Unsoundness! Current path is %s and I'm on line %d, " +\
"but I was expecting to go to line %d.") % (current_path, line, path_record[k].line))
# else: do nothing, we saw an expected branch
else:
path_record.append(PathRecord(line, False)) # Set `done`=False initially
def dump_path():
"""print Z3 constraints in Python in short hand"""
if solver is not None:
print(solver)
def dump_smt():
"""print Z3 constraints in Python in SMT-LIB format"""
if solver is not None:
print(solver.to_smt2())
# Top-level runner
def run(func, vars):
"""Concolically executes `func` with parameters `vars` and returns (total_paths:int, bug_found:bool)"""
global store, current_path, path_record, solver
# Initialize state
inputs = {str(v): 0 for v in vars} # Could also be random
init(vars)
total_runs = 0
bug_found = False
while True:
# Run concolically
try :
print("Running with inputs %s" % inputs)
total_runs += 1
func(**inputs)
except AssertionError as e:
traceback.print_exc()
print("*** Assertion violation found! Inputs are: %s" % inputs)
bug_found = True
finally:
print("... Path collected: %s" % current_path)
# print("Path Record: %s" % path_record)
# Figure out the next guard to negate
next = len(current_path)-1
while True:
while next >= 0 and path_record[next].done:
next = next - 1
if next == -1:
print("Concolic execution complete! %d paths explored." % total_runs)
# TODO: Actually do a random restart if there was any unsoundness observed
return total_runs, bug_found
else:
# print("next idx=%d" % next)
# Create a new path constraint up to `next` with the condition at index `next` negated
current_path = current_path[:next] + [z3.Not(current_path[next])]
path_record = path_record[:next+1]
solver.reset()
solver.insert(current_path)
# print("Path Record: %s" % path_record)
print("... Negating the condition at line %d...." % path_record[-1].line)
print("...... New candidate path: %s" % current_path)
is_sat = solver.check()
if is_sat == z3.sat:
model = solver.model()
inputs = {var_name: model.eval(var_symbol, model_completion=True).as_long()
for var_name, var_symbol in symbols.items()}
print("...... SAT! New inputs are: %s" % inputs)
reset()
print()
break
elif is_sat == z3.unsat:
print("...... UNSAT!")
next = next - 1
continue # Go look for the next branch to negate
else:
raise Exception("You should not get a z3 result of %s." % is_sat)
return
| [
"theguykevin@gmail.com"
] | theguykevin@gmail.com |
94cb36fc55af1eb504fcbf88f2c20c31038bd4dc | 917b85156ddfb653592b3b0994e7e7e9802a9eed | /ejerXML.py | c8789ca346bf35fd1f02bff24c1534fdec3609d4 | [] | no_license | antoniogomezvarela/XML | 3d2f2e8e1949b4a7f335a0b7c6ea229544d816a4 | c6dfeed3d782c4a28e56c7992414accf9fdcc660 | refs/heads/master | 2021-01-22T03:25:47.441160 | 2015-03-06T07:28:34 | 2015-03-06T07:28:34 | 31,011,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,135 | py | # -*- coding: utf-8 -*-
from lxml import etree
from datetime import date
tree = etree.parse('becas_premios.xml')
documento = tree.getroot()
#MENU
print "1- Buscar beca o premio por teclado"
print "2- Mostrar becas y enlaces"
print "3- Buscar las becas y premios que su fecha de publicación este entre febrero y abril"
print "4- Contar cuantas becas y premios se han dado."
print "5- Mostrar las id de las becas y añadir cuantos dias ha estado abierta"
opcion= raw_input("Elige una opción: ")
#Ejercicio 1
if opcion == '1':
encontrado = False
identificacion = raw_input("Introduce una id: ")
for i in documento:
if i[0].text==identificacion:
encontrado = True
print "ID: ",i[0].text
print "Titulo: ",i[1].text
print "Fecha: ",i[2].text
print "Descripción: ",i[3].text
print "Estado: ",i[5].text
if encontrado == False:
print "Esa ID no existe"
elif opcion == '2':
for i in documento:
print "ID: ",i[0].text,", Enlace: ",i[4].text
elif opcion == '3':
for i in documento:
fecha1=i[2].text
fecha2=fecha1.split("-")
if fecha2[1] >= "02" and fecha2[1] <= "04":
print "ID: ",i[0].text,", Fecha: ",i[2].text
elif opcion == '4':
becas = 0
premios = 0
for i in documento:
titulo = i[1].text
titulo = titulo.split(" ")
if titulo[0] == "Becas":
becas += 1
elif titulo[0] == "Premios":
premios += 1
print "Número de becas concedidas: ",becas
print "Número de premios concedidos: ",premios
elif opcion == '5':
date_format = "%Y/%m/%d"
for i in documento:
incial = i.findall("plazopresentacion/plazopresentacion_item/incial")
final = i.findall("plazopresentacion/plazopresentacion_item/final")
inicial= str(incial[0].text)
final= str(final[0].text)
if inicial != "None" or final != "None":
inicial = inicial.split("T")
final = final.split("T")
inicial = inicial[0].split("-")
final = final[0].split("-")
d0 = date(int(inicial[0]),int(inicial[1]),int(inicial[2]))
d1 = date(int(final[0]),int(final[1]),int(final[2]))
dias = d1-d0
print "la beca ",i[0].text," estuvo abierta ",dias.days," dias"
else:
print "Elige una opción correcta" | [
"root@debian"
] | root@debian |
a6cb5f2f4b102661f20b0783836f1b11b3805ee9 | 3ebe732756194704043bb353c768ebb26cfed22e | /enbuyuksayi.py | e0d3d117bb893497f93897df0bc0708ec1b5cef0 | [] | no_license | BarisKamis/CLARUSWAY | 68ddf303d992d91268e0b5ef472fab13d1bdd554 | 63b2e0b3e119fdcbc27c956b436226134bec57b7 | refs/heads/main | 2023-06-09T16:18:25.579005 | 2021-07-01T20:48:01 | 2021-07-01T20:48:01 | 382,150,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | sayilar=[]
for i in range(1,6):
print ("bir sayi girin")
x=input()
sayilar.append(int(x))
i=i+1
enbuyuk=sayilar[1]
for i in sayilar:
if i > enbuyuk:
enbuyuk=i
i=i+1
print ("en buyuk sayi:", enbuyuk) | [
"noreply@github.com"
] | BarisKamis.noreply@github.com |
5fc9f33ad9117e2363e5ed12f9e8c613c93c79bf | 3ac9007691cacf0620530baf04dda9dd85fee556 | /usr/bin/gooroom-update | db947198d61acf88ab08e6d71ed22d8f717e9f75 | [] | no_license | ozun215/gooroom-update | 406cec20414486113d04867ad8f5ec9fc455378d | 1b69e58b001c36569557f51767310f33fa1a2dc8 | refs/heads/master | 2022-12-01T11:36:19.672171 | 2020-07-08T11:10:11 | 2020-07-08T11:10:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | #!/usr/bin/python
import os
import commands
command="/usr/lib/gooroom/gooroomUpdate/gooroomUpdate.py show &"
os.system(command)
| [
"gooroom@gooroom.kr"
] | gooroom@gooroom.kr | |
321d28bf716d6a7c7adbd903db11e67dbdfd4a8b | 743be419d9af6be760a4c9754a9fb946b84827ec | /manage.py | 55a16d5a5082fbd03c4d4c1723d5af460278aaa9 | [] | no_license | mathbeal/videomembership-django | f76c9debaef1b00171d79e8fd1e9409e24705f68 | 3fa779458197a245aacb82d00ff0e7102a3cb831 | refs/heads/master | 2021-06-04T20:13:38.457946 | 2016-09-13T13:37:05 | 2016-09-13T13:37:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "videomembership.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"leo.maltrait@gmail.com"
] | leo.maltrait@gmail.com |
1262b3cda999d37cac7c4fdb37305365b0c404ab | 38df8849ad0b43594dafb94008fd0036951dde85 | /regexquerytool/regexquerytool/wsgi.py | 505c9396070827e73bb9d9157051f0d3b84ff44c | [] | no_license | yasminhillis/regex_query_tool | ffebdcfeb312e9ec164f4ca429aa23c14a58823d | 10e713755a9b35350ab467f515cbb541af891f0d | refs/heads/main | 2023-01-05T00:03:23.064281 | 2020-11-08T09:33:21 | 2020-11-08T09:33:21 | 306,550,614 | 1 | 0 | null | 2020-11-08T09:33:22 | 2020-10-23T06:40:27 | JavaScript | UTF-8 | Python | false | false | 683 | py | """
WSGI config for regexquerytool project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
import sys
# from pathlib import Path
# path = 'regexquerytool.settings'
from django.core.wsgi import get_wsgi_application
# if path not in sys.path:
# sys.path.append(path)
# project_home = u'/app/regexquerytool/regexquerytool/wsgi.py'
os.environ['DJANGO_SETTINGS_MODULE'] = 'regexquerytool.settings'
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'regexquerytool.settings')
application = get_wsgi_application()
| [
"jasminehillis94@gmail.com"
] | jasminehillis94@gmail.com |
54375b15c50675fc6a4c8d7cd3c9ca6202d57faa | 07cd7d98765ffd3a11342155fb21fd1f209a4c9a | /examples/simple_user_interface.py | c25bfe062f4299ae0c71dd2229b4935b49fe2214 | [
"Apache-2.0"
] | permissive | fooelisa/network_traffic_modeler_py3 | 1ce439b938289c02bfb4c0950b8fee9fefda8fde | 9c5151c066331536b6864f7c5e152de3c352282f | refs/heads/master | 2020-05-07T09:46:29.567195 | 2019-07-24T15:41:31 | 2019-07-24T15:41:31 | 180,391,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,910 | py | """Simple, menu-driven UI for network_modeling module.
Allows users to interact with and relate between associated
demands, interfaces, and nodes."""
from pyNTM import Model
from pyNTM import ModelException
from pyNTM import Interface
from pyNTM import Demand
from pyNTM import Node
from graph_network import graph_network
from graph_network import graph_network_interactive
from tkinter import ttk as ttk
from tkinter import *
from tkinter import filedialog
import re
import pdb
background_color = 'tan'
def open_file():
"""Opens the file that describes the Model and allows user to save
a diagram of the network graph"""
if selected_model_file.get() == '':
selected_model_file.set(filedialog.askopenfilename(initialdir="/",
title="Select file",
filetypes=(("csv files", "*.csv"),
("all files", "*.*"))))
global model
selected_file_label = ttk.Label(label_frame,
text="Network Model file is:")
selected_file_label.grid(row=1, column=0, sticky='W')
selected_file_display = ttk.Label(label_frame, text=' ' * 30)
selected_file_display = ttk.Label(label_frame,
text=selected_model_file.get())
selected_file_display.grid(row=6, column=0)
if selected_model_file.get() != '':
model = Model.load_model_file(selected_model_file.get())
model.update_simulation()
model_status_label = ttk.Label(label_frame, text="Model is:")
model_status_label.grid(row=8, column=0, sticky='W')
model_file_display = ttk.Label(label_frame, text=model)
model_file_display.grid(row=9, column=0, sticky='W')
# Update the Node Explorer tab
examine_selected_node()
# Update the Demand Explorer tab
examine_selected_demand()
# Update the Interface Explorer tab
examine_selected_interface()
# Update the Path Explorer tab
examine_paths()
# Create a button to produce a network graph
graph_network_button = Button(label_frame)
graph_network_button.grid(row=12, column=0, padx=5, pady=5, sticky='W')
graph_network_button["text"] = "Push to create network graph"
# Don't add the trailing () or this will execute immediately/automatically
graph_network_button["command"] = create_interactive_network_graph_and_refresh
if network_graph_file.get() != '':
graph_label_text = "Graph file saved at: " + network_graph_file.get()
graph_file_label = Label(label_frame, text=graph_label_text)
graph_file_label.grid(row=13, column=0, sticky='W')
def create_network_graph():
"""Makes a network graph"""
network_graph_file.set(filedialog.asksaveasfilename(initialdir="/",
title="Select or Create file:"))
graph_network.make_utilization_graph_neat(model, network_graph_file.get(),
display_plot=False)
def create_network_graph_and_refresh():
"""Makes a network graph and refreshes open_file_tab"""
network_graph_file.set(filedialog.asksaveasfilename(initialdir="/",
title="Select or Create file:"))
graph_network.make_utilization_graph_neat(model, network_graph_file.get(),
display_plot=False)
open_file()
def create_interactive_network_graph_and_refresh():
"""Makes an interactive network graph and refreshes open_file_tab"""
graph_network_interactive.make_interactive_network_graph(model)
def set_active_interface_from_listbox(event):
"""Sets the selected interface value from a listbox to the
active_interface"""
w = event.widget
value = (w.curselection()) # TODO -- comment this out and test
value_position = (w.curselection())
# This next part takes the first value in case the listbox has
# extra info trailing the interface
value_in_position = w.get(value_position)
# Use interface repr, so splitting that using split() yields interface
# name in position 2
selected_interface_value = value_in_position
selected_interface.set(selected_interface_value)
# Refresh the tabs
# TODO - add the destroy() function?
examine_selected_node()
examine_selected_demand()
examine_selected_interface()
def set_active_demand_from_listbox(event):
"""Sets the selected demand value from a listbox to the active_demand"""
w = event.widget
value = (w.curselection()) # get the current selection
value_position = (w.curselection()) # get the position of the current selection
selected_demand.set(w.get(value_position)) # set selected_demand to the current selection
# Try to delete the Node Demand Info labelframe to clear the demand paths
for thing in demand_tab.grid_slaves():
thing.destroy()
for thing in node_tab.grid_slaves():
thing.destroy()
for thing in interface_tab.grid_slaves():
thing.destroy()
# Refresh the Node Info and Demand Info tabs
examine_selected_node()
examine_selected_demand()
examine_selected_interface()
def set_active_object_from_option_menu(event):
"""Refreshes the tabs with the new active object info and displays
the info based on the new active object"""
# Try to delete the Node Demand Info labelframe to clear the demand paths
for thing in demand_tab.grid_slaves():
thing.destroy()
for thing in node_tab.grid_slaves():
thing.destroy()
for thing in interface_tab.grid_slaves():
thing.destroy()
# for thing in path_tab.grid_slaves():
# thing.destroy()
# Refresh the Node Info and Demand Info tabs
examine_selected_node()
examine_selected_demand()
examine_selected_interface()
examine_paths()
def get_demand_object_from_repr(demand_repr):
"""Returns demand object with an input of the demand's repr"""
try:
demand_info = re.split(', | |\)', demand_repr)
demand_source = demand_info[2]
demand_dest = demand_info[5]
demand_name = demand_info[11][1:-1]
demand_object = model.get_demand_object(demand_source, demand_dest,
demand_name=demand_name)
return demand_object
except IndexError:
pass
def get_demands_on_interface(interface):
"""Returns a list of demands on the specified interface"""
# Display demands on interface
try:
interface_data = interface.split("'")
interface_name = interface_data[1]
node_name = interface_data[3]
interface_object = model.get_interface_object(interface_name,
node_name)
demands_on_interface = interface_object.demands(model)
except (ModelException, IndexError):
interface_object = None
demands_on_interface = []
return demands_on_interface
def display_selected_objects(canvas_object, row_, column_):
"""Displays the selected objects"""
node_status = 'Unknown'
interface_status = 'Unknown'
demand_status = 'Unknown'
interface_info = 'Unknown'
try:
node_failed = model.get_node_object(selected_node.get()).failed
if node_failed == True:
node_status = 'Failed'
else:
node_status = 'Not Failed'
except ModelException:
pass
try:
selected_interface_name = selected_interface.get().split("'")[1]
selected_interface_node = selected_interface.get().split("'")[3]
interface_object = model.get_interface_object(selected_interface_name,
selected_interface_node)
interface_failed = interface_object.failed
interface_util = str(round((interface_object.utilization * 100), 1))
interface_info = interface_object
if interface_failed == True:
interface_status = 'Failed'
else:
interface_status = interface_util + "% utilized"
except (ModelException, AttributeError, IndexError) as e:
pass
try:
demand_object = get_demand_object_from_repr(selected_demand.get())
demand_routed = demand_object.path
if demand_routed == 'Unrouted':
demand_status = 'Unrouted'
else:
demand_status = 'Routed'
except (ModelException, AttributeError):
pass
selected_object_frame = LabelFrame(canvas_object, background=background_color,
text="Selected Interface, Demand, and Node")
selected_object_frame.grid(column=column_, row=row_, columnspan=3, pady=10)
selected_object_frame.column_width = 40
selected_object_frame.columnconfigure(0, weight=1)
selected_object_frame.columnconfigure(1, weight=2)
selected_object_frame.columnconfigure(2, weight=1)
Label(selected_object_frame, text='Name',
background=background_color).grid(row=row_ + 1, column=1)
Label(selected_object_frame, text='Status',
background=background_color).grid(row=row_ + 1, column=2)
Label(selected_object_frame, text="Selected Node:",
background=background_color).grid(row=row_ + 2, column=0, sticky='W')
Label(selected_object_frame, text=selected_node.get(), width=52,
borderwidth=1, relief="solid").grid(row=row_ + 2, column=1)
Label(selected_object_frame, text=node_status,
background=background_color).grid(row=row_ + 2, column=2, sticky='E')
Label(selected_object_frame, text="Selected Interface:",
background=background_color).grid(row=row_ + 3, column=0, sticky='W')
Label(selected_object_frame, text=selected_interface.get(),
width=52, justify=LEFT, wraplength=450,
borderwidth=1, relief="solid").grid(row=row_ + 3, column=1)
Label(selected_object_frame, text=interface_status,
background=background_color).grid(row=row_ + 3, column=2, sticky='E')
Label(selected_object_frame, text="Selected Demand:",
background=background_color).grid(row=row_ + 4, column=0, sticky='W')
Label(selected_object_frame, text=selected_demand.get(), width=52,
borderwidth=1, wraplength=450, relief="solid").grid(row=row_ + 4, column=1)
Label(selected_object_frame, text=demand_status,
background=background_color).grid(row=row_ + 4, column=2, sticky='E')
def display_demands(label_info, canvas_object, list_of_demands, row_,
column_,):
"""Displays a label for demands and a single-select listbox of the
demands below the label_info on a given canvas_object. A horizontal
scrollbar is included """
demands_frame = LabelFrame(canvas_object)
demands_frame.grid(row=row_, column=column_, pady=10)
Label(demands_frame, text=label_info).grid(row=0,
column=0, sticky='W', padx=10)
# Horizontal scrollbar - TODO create decorator for the scrollbar?
horizontal_scrollbar = Scrollbar(demands_frame, orient=HORIZONTAL)
horizontal_scrollbar.grid(row=3, column=0, sticky=E + W)
# Vertical scrollbar
vertical_scrollbar = Scrollbar(demands_frame, orient=VERTICAL)
vertical_scrollbar.grid(row=1, column=1, sticky=N + S)
demand_listbox = Listbox(demands_frame, selectmode='single', height=10,
width=40, xscrollcommand=horizontal_scrollbar.set,
yscrollcommand=vertical_scrollbar.set)
demand_listbox.grid(row=1, column=0, sticky='W', padx=10)
vertical_scrollbar.config(command=demand_listbox.yview)
horizontal_scrollbar.config(command=demand_listbox.xview)
demand_counter = 1
for demand in list_of_demands:
demand_listbox.insert(demand_counter, demand)
demand_counter += 1
demand_listbox.bind("<<ListBoxSelect>>", set_active_demand_from_listbox)
demand_listbox.bind("<Double-Button-1>", set_active_demand_from_listbox)
def display_interfaces(label_info, canvas_object, list_of_interfaces,
row_, column_):
"""Displays interfaces from list of interfaces in single selectable listbox.
A label with label_info will appear above the listbox."""
# Display Node's Interfaces Label
Label(canvas_object, text=label_info).grid(row=row_, column=column_,
sticky='W', padx=5)
# Vertical scrollbar
vertical_scrollbar = Scrollbar(canvas_object, orient=VERTICAL)
vertical_scrollbar.grid(row=row_ + 1, column=column_ + 2, sticky=N + S)
# Horizontal scrollbar - TODO create decorator for the scrollbar?
horizontal_scrollbar = Scrollbar(canvas_object, orient=HORIZONTAL)
horizontal_scrollbar.grid(row=(row_ + 2), column=column_, sticky=E + W,
columnspan=2)
# Create a listbox with the available interfaces for the Node
interfaces_listbox = Listbox(canvas_object, selectmode='single',
height=8, width=40, xscrollcommand=horizontal_scrollbar.set,
yscrollcommand=vertical_scrollbar.set)
interfaces_listbox.grid(row=row_ + 1, column=column_, columnspan=2,
sticky='W', padx=5)
horizontal_scrollbar.config(command=interfaces_listbox.xview)
vertical_scrollbar.config(command=interfaces_listbox.yview)
intf_counter = 1
for intf_name in list_of_interfaces:
interfaces_listbox.insert(intf_counter, intf_name)
intf_counter += 1
interfaces_listbox.bind("<<ListBoxSelect>>", set_active_interface_from_listbox)
interfaces_listbox.bind("<Double-Button-1>", set_active_interface_from_listbox)
return interfaces_listbox
def examine_selected_node(*args):
"""Examine the selected_node"""
#### Frame to choose a node ####
choose_node_frame = LabelFrame(node_tab)
choose_node_frame.grid(row=0, column=0, padx=10, pady=10)
# Label for choosing node
Label(choose_node_frame, text="Choose a node:").grid(row=0, column=0, sticky='W',
pady=10)
# Dropdown menu to choose a node
node_choices_list = [node.name for node in model.node_objects]
node_choices_list.sort()
# Put the node selection button on the node_tab.
# This option menu will call examine_selected_node when the choice is made.
node_dropdown_select = OptionMenu(choose_node_frame, selected_node,
*node_choices_list,
command=set_active_object_from_option_menu)
node_dropdown_select.grid(row=0, column=1, sticky='E')
# Label to confirm selected Node
Label(choose_node_frame, text="Selected node is:").grid(row=1, column=0, sticky='W')
# Display the selected Node
Label(choose_node_frame, text='-----------------------------------').\
grid(row=1, column=1, sticky='E')
Label(choose_node_frame, text=selected_node.get()).grid(row=1, column=1, sticky='E')
# Get selected_nodes Interfaces and display them in a listbox
try:
interface_choices = (interface for interface in
model.get_node_object(selected_node.get()).interfaces(model))
except:
interface_choices = []
pass
#### Frame to display node's interfaces ####
node_intf_frame = LabelFrame(node_tab)
node_intf_frame.grid(row=0, column=1)
interface_info = [str(round((interface.utilization * 100), 1)) + '% ' + interface.__repr__() for
interface in interface_choices]
interface_listbox = display_interfaces("Node's Interfaces", node_intf_frame,
interface_info, 0, 2)
#### Create a frame to node show demand info ####
demands_frame = LabelFrame(node_tab, text="Node Demand Info")
demands_frame.grid(column=0, row=4, columnspan=4, sticky='W', pady=15)
# Display Demands Sourced From Node
source_demand_choices = \
model.get_demand_objects_source_node(selected_node.get())
display_demands("Demands sourced from node", demands_frame,
source_demand_choices, 0, 0)
# Display Demands Destined To Node
dest_demand_choices = model.get_demand_objects_dest_node(selected_node.get())
display_demands("Demands destined to node", demands_frame,
dest_demand_choices, 0, 1)
#### Create a frame to show interface demand info ####
intf_demands_frame = LabelFrame(node_tab, text="Interface Demand Info")
intf_demands_frame.grid(column=5, row=4, columnspan=2, sticky='W',
padx=15, pady=15)
# Display demands on interface
try:
demands_on_interface = get_demands_on_interface(selected_interface.get())
except (ModelException, IndexError):
interface_object = None
demands_on_interface = []
display_demands("Demands Egressing Selected Interface", intf_demands_frame,
demands_on_interface, 0, 1)
#### Create a frame to show selected object info ####
display_selected_objects(node_tab, 0, 4)
# TODO - fail selected interface or node
def examine_selected_demand(*args):
"""Examine selected_interface object"""
# Label for choosing interface
choose_demand_label = Label(demand_tab,
text="Choose a demand:").grid(row=0, column=0, sticky='W', pady=10)
# Dropdown menu to choose a demand
demand_choices_list = [demand for demand in model.demand_objects]
demand_choices_list_sorted = sorted(demand_choices_list,
key=lambda demand: demand.source_node_object.name)
demand_dropdown_select = OptionMenu(demand_tab, selected_demand,
*demand_choices_list_sorted,
command=set_active_object_from_option_menu)
demand_dropdown_select.grid(row=0, column=1, sticky='EW')
# Display the selected objects
display_selected_objects(demand_tab, 0, 3)
#### Display the selected demand's path(s) ####
demand_path_frame = LabelFrame(demand_tab,
text="Demand Path Info (Ordered hops from source to destination); Displays all paths for ECMP demands.")
demand_path_frame.grid(row=3, column=0, columnspan=10, sticky='W',
padx=10, pady=10)
try:
demand_object = get_demand_object_from_repr(selected_demand.get())
try:
dmd_paths = demand_object.path
except AttributeError:
pass
column_num = 0
for path in dmd_paths:
label_info = "Demand hops ordered from source to dest"
interface_info = [str(round((interface.utilization * 100), 1))
+ '% ' + interface.__repr__() for interface in path]
display_interfaces(label_info, demand_path_frame,
interface_info, 0, column_num)
column_num += 3
except (IndexError, UnboundLocalError):
pass
demands_on_interface = get_demands_on_interface(selected_interface.get())
demands_on_int = display_demands("Demands Egressing Selected Interface", demand_tab,
demands_on_interface, 4, 3)
def examine_selected_interface(*args):
"""Allows user to explore interfaces with different characteristics"""
#### Filter to interfaces above a certain utilization ####
utilization_frame = LabelFrame(interface_tab)
utilization_frame.grid(row=0, column=0)
utilization_pct = [x for x in range(0, 100)]
# Label for pct util selection
pct_label = Label(utilization_frame, text="Display interfaces with \
utilization % greater than:")
pct_label.grid(row=0, column=0, columnspan=2, sticky='W')
pct_label.config(width=50)
# Dropdown menu for pct util
pct_dropdown_select = OptionMenu(utilization_frame, min_pct,
*utilization_pct, command=set_active_object_from_option_menu)
pct_dropdown_select.grid(row=0, column=4, sticky='W')
msg = "Interfaces above " + str(min_pct.get()) + "% utilization"
interface_list = [str(round((interface.utilization * 100), 1)) + '% '
+ interface.__repr__() for interface in model.interface_objects if
((interface.utilization * 100) >= min_pct.get())]
interface_list.sort(key=lambda x: float(x.split('%')[0]))
int_util = display_interfaces(msg, utilization_frame, interface_list, 2, 1)
int_util.grid(sticky='W')
selected_objects_int_tab = LabelFrame(interface_tab)
selected_objects_int_tab.grid(row=0, column=6, padx=10, sticky='W')
display_selected_objects(selected_objects_int_tab, 0, 8)
demands_on_interface = get_demands_on_interface(selected_interface.get())
intf_demands = display_demands("Demands Egressing Selected Interface", interface_tab,
demands_on_interface, 6, 0)
def examine_paths(*args):
"""Allows user to examine shortest paths and all paths between the
selected source and destination nodes in the Model"""
#### Select source and dest nodes ####
node_choices = [node.name for node in model.node_objects]
node_choices.sort()
src_node_select_frame = node_dropdown_select("Select a source node",
node_choices, source_node, 0, 0)
src_node_select_frame.grid(sticky='W')
dest_node_select = node_dropdown_select("Select a dest node",
node_choices, dest_node, 1, 0)
dest_node_select.grid(sticky='W')
#### Display shortest path(s) ####
# Find shortest paths
try:
source_node_object = model.get_node_object(source_node.get())
dest_node_object = model.get_node_object(dest_node.get())
shortest_path = model.get_shortest_path(source_node.get(),
dest_node.get())
paths = shortest_path['path']
cost = shortest_path['cost']
# Create a frame to hold the shortest path(s)
shortest_path_frame = LabelFrame(path_tab, text="Shortest Paths")
shortest_path_frame.grid(row=2, column=0, sticky='W', padx=10)
column_counter = 0
path_counter = 0
for path in paths:
list_of_interfaces = path
label = "Shortest Path %s, cost = %s" % (str(path_counter),
str(cost))
display_interfaces(label, shortest_path_frame, list_of_interfaces,
1, column_counter)
column_counter += 2
path_counter += 1
except ModelException:
pass
#### Display all paths ####
# Note - python, wtf?! Getting the horizontal scrollbar to work with
# multiple listboxes was WAY more difficult than it should have been
try:
source_node_object = model.get_node_object(source_node.get())
dest_node_object = model.get_node_object(dest_node.get())
all_paths = model.get_feasible_paths(source_node.get(),
dest_node.get())
# Create label frame to hold the feasible path(s) # frame_canvas
feasible_path_frame = LabelFrame(path_tab, text="All Paths")
feasible_path_frame.grid(row=3, column=0, padx=10, pady=10)
feasible_path_frame.grid_rowconfigure(0, weight=1)
feasible_path_frame.grid_columnconfigure(0, weight=1)
feasible_path_frame.grid_propagate(False)
# canvas
feasible_path_canvas = Canvas(feasible_path_frame)
feasible_path_canvas.grid(row=0, column=0, sticky='news')
# Horizontal Scrollbar
horizontal_scrollbar = Scrollbar(feasible_path_frame, orient=HORIZONTAL,
command=feasible_path_canvas.xview)
horizontal_scrollbar.grid(row=4, column=0, sticky='ew')
feasible_path_canvas.configure(xscrollcommand=horizontal_scrollbar.set)
# Create a frame to house the path(s)
path_frame = Frame(feasible_path_canvas) # frame_buttons
feasible_path_canvas.create_window((0, 0), window=path_frame,
anchor='nw')
column_counter = 0
path_counter = 0
for path in all_paths:
list_of_interfaces = path
label = "Feasible Path %s" % (str(path_counter))
display_interfaces(label, path_frame, list_of_interfaces,
1, column_counter)
column_counter += 2
path_counter += 1
# These next 3 things need to be in this order or the horizontal
# scrollbar for the multiple listboxes doesn't work; holy cow, python,
# it shouldn't be this difficult
path_frame.update_idletasks()
feasible_path_frame.config(width=1200, height=300)
feasible_path_canvas.config(scrollregion=feasible_path_canvas.bbox("all"))
except ModelException:
pass
def node_dropdown_select(label, node_choices, target_variable, row_, column_):
""""Creates a labelframe with a node select option menu"""
#### Frame to choose a node ####
choose_node_frame = LabelFrame(path_tab)
choose_node_frame.grid(row=row_, column=column_, padx=10, pady=10)
# Label for choosing node
Label(choose_node_frame, text=label).grid(row=0, column=0, sticky='W',
pady=10)
# Dropdown menu to choose a node
node_choices_list = node_choices
# Put the node selection button on the node_tab.
# This option menu will call examine_selected_node when the choice is made.
node_dropdown_select = OptionMenu(choose_node_frame, target_variable,
*node_choices,
command=set_active_object_from_option_menu)
node_dropdown_select.grid(row=0, column=1, sticky='E')
# Label to confirm selected Node
Label(choose_node_frame, text="Selected node is:").grid(row=1, column=0, sticky='W')
# Display the selected Node
Label(choose_node_frame, text='-----------------------------------').\
grid(row=1, column=1, sticky='E')
Label(choose_node_frame, text=target_variable.get()).grid(row=1, column=1, sticky='E')
return choose_node_frame
# Establish the canvas
ui_window = Tk()
ui_window.title('Network modeler UI')
ui_window.geometry('1600x750')
ui_window.resizable(1, 1)
# Create a tabbed notebook in the canvas ui_window
nb = ttk.Notebook(ui_window) # Creates ttk notebook in ui window
# Establish names for selected demand, node, and interface in the notebook
selected_demand = StringVar(nb)
selected_node = StringVar(nb)
selected_interface = StringVar(nb)
selected_model_file = StringVar(nb)
source_node = StringVar(nb)
dest_node = StringVar(nb)
network_graph_file = StringVar(nb)
# selected_model_file.set(None)
model = None
min_pct = IntVar(nb) # Min percent utilization to search over interfaces for
# Notebook grid spans 70 columns and 69 rows and spreads out the notebook
# in all directions
nb.grid(row=1, column=0, columnspan=70, rowspan=69, sticky='NESW')
rows = 0
while rows < 70:
ui_window.rowconfigure(rows, weight=1)
ui_window.columnconfigure(rows, weight=1)
rows += 1
#### File Open Tab ####
# Open a model file
open_file_tab = ttk.Frame(nb)
nb.add(open_file_tab, text="Open Model File")
# Establish a frame label
label_frame = ttk.LabelFrame(open_file_tab, text="Select a Network Model File")
label_frame.grid(column=0, row=0, padx=8, pady=8, sticky='W')
# Make a button to load a file
load_file_button = ttk.Button(open_file_tab)
load_file_button["text"] = "Push button to load network model file"
load_file_button.grid(row=11, column=0, sticky='W')
load_file_button["command"] = open_file
#### Node Tab ####
# Create a new tab and add it to the notebook
node_tab = ttk.Frame(nb)
nb.add(node_tab, text="Node Explorer")
#### Demand Tab ####
# Create a new tab and add it to the notebook
demand_tab = ttk.Frame(nb)
nb.add(demand_tab, text="Demand Explorer")
# TODO - Interface Tab with list of top utilized interfaces
# and be able to set utilization % and see all ints that exceed it
#### Interface Tab ####
interface_tab = ttk.Frame(nb)
nb.add(interface_tab, text="Interface Explorer")
#### Create Paths Tab ####
path_tab = ttk.Frame(nb)
nb.add(path_tab, text="Path Explorer")
ui_window.mainloop()
| [
"elisa@jasinska.de"
] | elisa@jasinska.de |
a29090ef119e51b024e2fc4af969d65ecaef476a | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_155/1805.py | f216188bcb5e778686fc1da1297901988727a426 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py |
def get_min_members(smax, audience):
standing = 0
friends = 0
i = 1
standing += audience[0]
while i <= smax:
if standing < i:
new_friends = i - standing
standing += new_friends
friends += new_friends
standing += audience[i]
i += 1
return friends
# cases = [(4, "11111"), (1, "09"), (5, "110011"), (0, "1")]
t = input()
for i in range(t):
smax, audience = raw_input().split()
result = get_min_members(int(smax), map(int, audience))
print "Case #%d: %d" % (i+1, result)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
e0e32be403a6963887949ef4f1269a652f11e196 | 89e6c3548fbdd06178aae712de1ff19004bc2faa | /my_django/contrib/localflavor/sk/forms.py | f5428d879572000d4ed3f57df9882da6f007f378 | [] | no_license | bhgv/ublog_git.hg.repo-django.python-engine | a3f3cdcbacc95ec98f022f9719d3b300dd6541d4 | 74cdae100bff5e8ab8fb9c3e8ba95623333c2d43 | refs/heads/master | 2020-03-23T01:04:07.431749 | 2018-07-25T12:59:21 | 2018-07-25T12:59:21 | 140,899,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,560 | py | """
Slovak-specific form helpers
"""
from __future__ import absolute_import
from my_django.contrib.localflavor.sk.sk_districts import DISTRICT_CHOICES
from my_django.contrib.localflavor.sk.sk_regions import REGION_CHOICES
from my_django.forms.fields import Select, RegexField
from my_django.utils.translation import ugettext_lazy as _
class SKRegionSelect(Select):
"""
A select widget widget with list of Slovak regions as choices.
"""
def __init__(self, attrs=None):
super(SKRegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class SKDistrictSelect(Select):
"""
A select widget with list of Slovak districts as choices.
"""
def __init__(self, attrs=None):
super(SKDistrictSelect, self).__init__(attrs, choices=DISTRICT_CHOICES)
class SKPostalCodeField(RegexField):
"""
A form field that validates its input as Slovak postal code.
Valid form is XXXXX or XXX XX, where X represents integer.
"""
default_error_messages = {
'invalid': _(u'Enter a postal code in the format XXXXX or XXX XX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(SKPostalCodeField, self).__init__(r'^\d{5}$|^\d{3} \d{2}$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
"""
Validates the input and returns a string that contains only numbers.
Returns an empty string for empty values.
"""
v = super(SKPostalCodeField, self).clean(value)
return v.replace(' ', '')
| [
"bhgv.empire@gmail.com"
] | bhgv.empire@gmail.com |
be7efb58da4aaf5477a500e8e99560403997a417 | f8200c79eeddbf64f39ca21ac0afcaa1c235703a | /debug.py | dc93da203b599c61812c22b41857c85bd4143eb9 | [] | no_license | glanton/robocritic | dbe98eb2ec7d8f79fa482f547f480e780b0688db | afd9545dfa158957e7ca9d40fcf59b410d192dcb | refs/heads/master | 2016-08-05T03:52:24.779438 | 2015-05-01T20:57:25 | 2015-05-01T20:57:25 | 34,097,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | # functions and data used for debugging purposes
# holds all counts kept when running the program
_count_dict = {}
# creates a new counter with the specified name when first called; increments counter on future calls
# also prints the current count based on the specified frequency
def run_counter(name, frequency):
# first check if counter already exists
if name in _count_dict:
# print count and increment
count = _count_dict[name]
if count % frequency == 0:
print(" - " + name + ": " + str(count))
_count_dict[name] += 1
# otherwise create new counter
else:
print(name + ": 0")
_count_dict[name] = 1
# resets a counter so that it can be used again
def reset_counter(name):
if name in _count_dict:
_count_dict[name] = 0
| [
"classay@gmail.com"
] | classay@gmail.com |
e784cfeb07b1b4b44de67e5f78c4e17cfbf1338b | 1d717c797e93b451f7da7c810a0fb4075b1050d5 | /src/data/dataset/basic_dataset.py | bc875ea6516703ea40caa5028c2b7984ad5dd2fa | [] | no_license | jessie0624/nlp-task | 32338b08051a3ea192db2bf74c9c969bdff1f6ad | aaeeed86341356d9fd061664f6f7bccf2ac353d0 | refs/heads/master | 2023-01-24T12:06:13.323646 | 2020-12-10T08:38:23 | 2020-12-10T08:38:23 | 292,151,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py |
import numpy as np
from src.base import BaseDataset
class BasicDataset(BaseDataset):
def __init__(self, x: list, y: list, callbacks=None):
super().__init__(callbacks=callbacks)
self.x = x
self.y = y
self.sample() # 先获取候选的索引池(index pool)
def get_index_pool(self):
'''
index_pool用来保存每一次索引返回的list
:return:
'''
# 默认为x的长度,这里要保证是二维的,便于统一,即[[0], [1], [2],...]
index_pool = np.expand_dims(range(len(self.x)), axis=1).tolist()
return index_pool
def sort(self):
'''
按照x中数据的长度进行排序
'''
old_index_pool = self._index_pool
lengths = [len(item) for item in self.x]
sort_index = np.argsort(lengths)
self._index_pool = [old_index_pool[index] for index in sort_index]
def __getitem__(self, item: int):
x, y = self.x[item], self.y[item]
self._handle_callback_on_batch(x, y)
return x, y | [
"jessie_lijie@126.com"
] | jessie_lijie@126.com |
1c54602c168cdfc90d1c47fa582a445c1da70afa | 730d9b6251cfb911250626b21c8476c30f5729c0 | /day04/code/4-xpath-language.py | 87de8b71941e2a1cc8923144e991fd9861a31ac6 | [] | no_license | Wuhuaxing2017/spider_notes | 7b6d6e9e26e97d9b67dda85fd4833f82ef793f58 | 69cb18e38f54c839e4d0ebaa7199f984dfbcf5da | refs/heads/master | 2020-03-19T08:09:51.723802 | 2018-06-05T14:32:46 | 2018-06-05T14:32:46 | 136,180,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,153 | py | from lxml import etree
html = '''<div>
<ul>
<li class="item-0"><a href="link1.html">first item</a></li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-inactive"><a href="link3.html" class="linkjfdlsfjls">third item</a></li>
<li class="shfs-inactive"><a href="link4.html">third item</a></li>
<li class="isjfls-inactive"><a href="link5.html">third item</a></li>
<li class="qwert-inactive"><a href="link6.html">third item</a></li>
<li class="item-1"><a href="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a>
</ul>
</div>'''
# 数据转换成标签树方式一
html_tree = etree.HTML(html)
# 方式二,可以将文件中的直接进行转换
html_tree2 = etree.parse('./data.html')
# print(html_tree,html_tree2)
# print(etree.tostring(html_tree).decode('utf-8'))
# 获取文件中所有的标签li
# xpath返回的数据是列表,标签<Element 内存地址>
li = html_tree.xpath('//li')
# print(li)
li = html_tree.xpath('//li[@class="item-1"]')
# print(li[0].xpath('..//a/text()'))
# 查询class属性不等于“item-1” 标签
li = html_tree.xpath('//li[@class!="item-1"]')
# print(li)
# 查询li标签,class 包含inactive 字符串
li = html_tree.xpath('//li[contains(@class,"inactive")]')
# print(li)
# print(li[0].xpath('./a/@*'))
# 查询li标签,class 不包含inactive字符串
li = html_tree.xpath('//li[not(contains(@class,"inactive"))]')
# print(li)
# print(etree.tostring(li[0]).decode('utf-8'))
# 查询li标签,class 不包含inactive字符串 同时包含class =item-1
li = html_tree.xpath('//li[not(contains(@class,"inactive"))][@class="item-1"]')
# print(li)
# print(etree.tostring(li[-1]).decode('utf-8'))
# 查询li标签,最后一个
# print(etree.tostring(html_tree).decode('utf-8'))
li = html_tree.xpath('/html/body/div/ul/li')
li = html_tree.xpath('//li[last()-1]')
# print(li,etree.tostring(li[0]))
# 查询位置小于4的标签
li = html_tree.xpath('//li[position()<4]')
print(li)
| [
"Wuhuaxing2017@qq.com"
] | Wuhuaxing2017@qq.com |
3baa1450535e83cf7f724f6e11344aa79973106c | 18f5ea1689c88eac0a8e68f1697d04b9f4028e19 | /manuel-ortiz-fortis-leonardo-Jimenez-miguel-rodriguez.py | 66b9c13a76ea9db5d44f53949579900476c2621e | [] | no_license | MiguelAngelR95/programas-automatas | 44dc69ff4fee2a807417de18e6cbdb946193ed2f | 60881b805c01b09850691c4741e3ea6456d866c2 | refs/heads/master | 2021-01-18T22:55:27.817884 | 2016-08-05T03:47:27 | 2016-08-05T03:47:27 | 62,854,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,063 | py |
mensaje = "ingrese los arreglos"
print mensaje
t = raw_input()
lista = []
for i in range(0, int(t)):
lista.append(raw_input("Ingrese elemento -> "))
print "A^0 = E"
#---------------------------------------------------------------
a = "{"
for i in range(0, len(lista)):
a = a + lista[i]+","
a = a + "}"
print "A^1 = "+a
#---------------------------------------------------------------
b = "{"
for i in range(0, len(lista)):
for j in range(0, len(lista)):
b = b + lista[i]+lista[j]+","
b = b + "}"
print "A^2 = "+b
#---------------------------------------------------------------
c = "{"
for i in range(0, len(lista)):
for j in range(0, len(lista)):
for k in range(0, len(lista)):
c = c + lista[i]+lista[j]+lista[k]+","
c = c + "}"
print "A^3 = "+c
#---------------------------------------------------------------
d = "{"
for i in range(0, len(lista)):
for j in range(0, len(lista)):
for k in range(0, len(lista)):
for l in range(0, len(lista)):
d = d + lista[i]+lista[j]+lista[k]+lista[l]+","
#------------------------------------------------------------------
e = "{"
for i in range(0, len(lista)):
for j in range(0, len(lista)):
for k in range(0, len(lista)):
for l in range(0, len(lista)):
for m in range(0, len(lista)):
e = e + lista[i]+lista[j]+lista[k]+lista[m]+","
e = e + "}"
print "A^5 = "+e
#---------------------------------------------------------------
f = "{"
for i in range(0, len(lista)):
for j in range(0, len(lista)):
for k in range(0, len(lista)):
for l in range(0, len(lista)):
for m in range(0, len(lista)):
for n in range(0, len(lista)):
e = e + lista[i]+lista[j]+lista[k]+lista[m]+lista[m]+","
f = f + "}"
print "A^ = "+f
#---------------------------------------------------------------
a = len(d)
d[a-2].replace(",","}")
#d = d + "}"
print "A^4 = "+d
#print "------------------------------"
#print len(cadena)
#for i in range(0, 10,2):
| [
"noreply@github.com"
] | MiguelAngelR95.noreply@github.com |
7ceee000b032290546e28faeaac57035bee55b29 | 9a65ca76a29102c3a74433a0d11a29e7d369b1b3 | /Assignment03/single_layer_nn.py | 1e93e03133650f348f4b4d9a593a02a3946dcb27 | [
"MIT"
] | permissive | Ericbrod10/Deep-Learning | bd8ffe7d17f2275c885f7550b3394a8969f5c705 | 5b0a01597ce19f2da5bf45b76023b898c494f46a | refs/heads/main | 2023-02-05T11:52:12.677261 | 2020-12-26T23:01:50 | 2020-12-26T23:01:50 | 324,647,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,253 | py | #Eric Broderick
#eab37
#Assignment 3
import numpy as np
import sys
# Command: python single_layer_nn.py <train> <test> <n>
# sys.argv[1] = <train> = training data
# sys.argv[2] = <test> = test data
# sys.argv[3] = <n> = number of nodes
## Read in data ##
f = open(sys.argv[1])
data = np.loadtxt(f)
train = data[:,1:]
trainlabels = data[:,0]
onearray = np.ones((train.shape[0],1))
train = np.append(train,onearray,axis = 1)
# print("train = ",train)
# print("train shape = ",train.shape)
f = open(sys.argv[2])
data = np.loadtxt(f)
test = data[:,1:]
testlabels = data[:,0]
# onearray = np.ones((test.shape[0],1))
# test = np.append(test,onearray,axis = 1)
rows = train.shape[0]
cols = train.shape[1]
hidden_nodes = int(sys.argv[3])
hidden_nodes = 3
## Initialize all weights ##
w = np.random.rand(hidden_nodes)
W = np.random.rand(hidden_nodes, cols)
s = np.random.rand(hidden_nodes)
S = np.random.rand(hidden_nodes, cols)
u = np.random.rand(hidden_nodes)
U = np.random.rand(hidden_nodes, cols)
v = np.random.rand(hidden_nodes)
V = np.random.rand(hidden_nodes, cols)
eta = 0.1
epochs = 1000
stop = 0
prevobj = np.inf
i = 0
## Calculate objective ##
hidden_layer = np.matmul(train, np.transpose(W))
# print("hidden_layer = ",hidden_layer)
# print("hidden_layer shape = ",hidden_layer.shape)
##sigmoid function ##
sigmoid = lambda x: 1/(1+np.exp(-x))
hidden_layer = np.array([sigmoid(xi) for xi in hidden_layer])
# print("hidden_layer = ",hidden_layer)
# print("hidden_layer shape = ",hidden_layer.shape)
output_layer = np.matmul(hidden_layer, np.transpose(w))
# print("output_layer = ",output_layer)
obj = np.sum(np.square(output_layer - trainlabels))
# print("obj = ",obj)
#obj = np.sum(np.square(np.matmul(train, np.transpose(w)) - trainlabels))
#print("Obj = ",obj)
## Begin gradient descent ##
## stop = 0
while(prevobj - obj > stop and i < epochs):
#while(prevobj - obj > 0):
#Update previous objective
prevobj = obj
#Calculate gradient update for final layer (w)
#dellw is the same dimension as w
# print(hidden_layer[0,:].shape, w.shape)
dellw = (np.dot(hidden_layer[0,:],w)-trainlabels[0])*hidden_layer[0,:]
for j in range(1, rows):
dellw += (np.dot(hidden_layer[j,:],np.transpose(w))-trainlabels[j])*hidden_layer[j,:]
#Update w
w = w - eta*dellw
# print("w = ",w)
# print("dellf = ",dellf)
#Calculate gradient update for hidden layer weights (W)
#dellW has to be of same dimension as W
#Let's first calculate dells. After that we do dellu and dellv.
#Here s, u, and v are the three hidden nodes
#dells = df/dz1 * (dz1/ds1, dz1,ds2)
dells = np.sum(np.dot(hidden_layer[0,:],w)-trainlabels[0])*w[0] * (hidden_layer[0,0])*(1-hidden_layer[0,0])*train[0]
for j in range(1, rows):
dells += np.sum(np.dot(hidden_layer[j,:],w)-trainlabels[j])*w[0] * (hidden_layer[j,0])*(1-hidden_layer[j,0])*train[j]
s = s - eta*dells
#TODO: dellu = ?
dellu = np.sum(np.dot(hidden_layer[0,:],w)-trainlabels[0])*w[1] * (hidden_layer[0,1])*(1-hidden_layer[0,1])*train[0]
for j in range(1, rows):
dellu += np.sum(np.dot(hidden_layer[j,:],w)-trainlabels[j])*w[1] * (hidden_layer[j,1])*(1-hidden_layer[j,1])*train[j]
u = u - eta*dellu
#TODO: dellv = ?
dellv = np.sum(np.dot(hidden_layer[0,:],w)-trainlabels[0])*w[2] * (hidden_layer[0,2])*(1-hidden_layer[0,2])*train[0]
for j in range(1, rows):
dellv += np.sum(np.dot(hidden_layer[j,:],w)-trainlabels[j])*w[2] * (hidden_layer[j,2])*(1-hidden_layer[j,2])*train[j]
v = v - eta*dellv
#TODO: Put dells, dellu, and dellv as rows of dellW
dellW = np.empty((0,cols),float)
dellW = np.vstack((dellW,dells,dellu,dellv))
W = W - eta*dellW
hidden_layer = np.matmul(train, np.transpose(W))
hidden_layer = np.array([sigmoid(xi) for xi in hidden_layer])
output_layer = np.matmul(hidden_layer, np.transpose(w))
obj = np.sum(np.square(output_layer - trainlabels))
i = i + 1
print("i = %s Objective = %s " % (i,obj))
# Do final predictions
final = np.matmul(train, np.transpose(W))
predictions = np.sign(np.matmul(sigmoid(final), np.transpose(w)))
error = (1 - (predictions == testlabels).mean()) * 100
print('Predictions: %s' % predictions)
print('Error: ', error)
print('w = %s' % w)
print('s = %s' % s)
print('u = %s' % u)
print('v = %s' % v) | [
"56181235+Ericbrod10@users.noreply.github.com"
] | 56181235+Ericbrod10@users.noreply.github.com |
3df5d2864b2526594da536cb97351fd21b615766 | f8ee3224c99732805a09bd2cb4af787c47d6bc5f | /src/map/Menu.py | 46c1d1c2cc713c69b77073a5d18d639b1ca0b9fc | [] | no_license | q3226257/FreeFantasy_v2 | b57b2e5b4431f8d730ef7d4e6dafb1bf535be138 | d1b5ba206b03a4b57a28a539618772c19f91169e | refs/heads/master | 2020-04-13T15:50:52.044653 | 2019-01-01T15:36:39 | 2019-01-01T15:36:39 | 163,303,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 893 | py | import pygame
import thorpy
from info.App import AppInfo
from main.Constant import *
from info.Items import *
import main.Main
from interface.EventDeal import Update
a = [Big(), Big(), Big()]
class Menu(Update):
def __init__(self, screen: pygame.Surface):
self.screen = screen
rect = pygame.Rect(screen.get_width() / 2, 0, screen.get_width() / 2, screen.get_height())
menu_screen = self.screen.subsurface(rect)
self.table_list: thorpy.TableList = thorpy.TableList(
menu_screen, column_num=5, row_num=10)
def update(self, clock, *params):
# while AppInfo.current_stat == STAT.MENU_STAT:
fps = clock.tick(60)
self.table_list.update(fps, *a)
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
main.Main.change_stat(STAT.MAP_STAT)
| [
"616784622@qq.com"
] | 616784622@qq.com |
fc286ee99bce63c3cf6dad314ec4c1925f581e0f | 7a98fb6d68f06f7926ed4a27e58ea0721d03e008 | /utils/utils.py | a81cc75cb506506a3f3fc833b1e91252bbd44802 | [] | no_license | d-c-a-i/DualVar | 6ae0515e1ec2a35f27aab33cea008a74bb614f0d | 244b19e29b9a8f67db94c41faeb50b91ec5f45f9 | refs/heads/master | 2023-08-31T13:02:07.917998 | 2021-10-22T17:09:44 | 2021-10-22T17:09:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,824 | py | import os
import glob
import math
import pickle
import numpy as np
import torch
from torchvision import transforms
import torch.distributed as dist
from datetime import datetime
from collections import deque
import time
from numba import jit
from shutil import copyfile
from dataloader import KVReader
def save_checkpoint(state, is_best=0, gap=1, filename='models/checkpoint.pth.tar', keep_all=False, is_save=True, save_latest=False):
last_epoch_path = os.path.join(os.path.dirname(filename), 'epoch%s.pth.tar' % str(state['epoch'] - gap))
if not keep_all:
try:
os.remove(last_epoch_path)
except:
pass
if is_save:
torch.save(state, filename)
if save_latest:
latest_filename = os.path.join(os.path.dirname(filename), 'latest.pth.tar')
if os.path.exists(latest_filename):
os.remove(latest_filename)
time.sleep(3)
torch.save(state, latest_filename)
if is_best:
past_best = glob.glob(os.path.join(os.path.dirname(filename), 'model_best_*.pth.tar'))
past_best = sorted(past_best, key=lambda x: int(''.join(filter(str.isdigit, x))))
if len(past_best) >= 5:
try:
os.remove(past_best[0])
except:
pass
torch.save(state, os.path.join(os.path.dirname(filename), 'model_best_epoch%s.pth.tar' % str(state['epoch'])))
def write_log(content, epoch, filename):
if not os.path.exists(filename):
log_file = open(filename, 'w')
else:
log_file = open(filename, 'a')
log_file.write('## Epoch %d:\n' % epoch)
log_file.write('time: %s\n' % str(datetime.now()))
log_file.write(content + '\n\n')
log_file.close()
def denorm(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
assert len(mean) == len(std) == 3
inv_mean = [-mean[i] / std[i] for i in range(3)]
inv_std = [1 / i for i in std]
return transforms.Normalize(mean=inv_mean, std=inv_std)
def batch_denorm(tensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], channel=1):
shape = [1] * tensor.dim();
shape[channel] = 3
dtype = tensor.dtype
mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device).view(shape)
std = torch.as_tensor(std, dtype=dtype, device=tensor.device).view(shape)
output = tensor.mul(std).add(mean)
return output
def calc_topk_accuracy(output, target, topk=(1,)):
"""
Modified from: https://gist.github.com/agermanidis/275b23ad7a10ee89adccf021536bb97e
Given predicted and ground truth labels,
calculate top-k accuracies.
"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred)).contiguous()
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(1 / batch_size))
return res
def calc_mask_accuracy(output, target_mask, topk=(1,)):
maxk = max(topk)
_, pred = output.topk(maxk, 1, True, True)
zeros = torch.zeros_like(target_mask).long()
pred_mask = torch.zeros_like(target_mask).long()
res = []
for k in range(maxk):
pred_ = pred[:, k].unsqueeze(1)
onehot = zeros.scatter(1, pred_, 1)
pred_mask = onehot + pred_mask # accumulate
if k + 1 in topk:
res.append(((pred_mask * target_mask).sum(1) >= 1).float().mean(0))
return res
def neq_load_customized(model, pretrained_dict, verbose=True, args=None):
''' load pre-trained model in a not-equal way,
when new model has been partially modified '''
assert args is not None
model_dict = model.state_dict()
tmp = {}
if verbose:
content = "\n=======Check Weights Loading======\nWeights not used from pretrained file:"
args.logger.info(content)
for k, v in pretrained_dict.items():
if k in model_dict:
tmp[k] = v
else:
args.logger.info(k)
args.logger.info('---------------------------')
args.logger.info('Weights not loaded into new model:')
for k, v in model_dict.items():
if k not in pretrained_dict:
args.logger.info(k)
args.logger.info('===================================\n')
# pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
del pretrained_dict
model_dict.update(tmp)
del tmp
model.load_state_dict(model_dict)
return model
def strfdelta(tdelta, fmt):
d = {"d": tdelta.days}
d["h"], rem = divmod(tdelta.seconds, 3600)
d["m"], d["s"] = divmod(rem, 60)
return fmt.format(**d)
class Logger(object):
'''write something to txt file'''
def __init__(self, path):
self.birth_time = datetime.now()
filepath = os.path.join(path, self.birth_time.strftime('%Y-%m-%d-%H:%M:%S') + '.log')
self.filepath = filepath
with open(filepath, 'a') as f:
f.write(self.birth_time.strftime('%Y-%m-%d %H:%M:%S') + '\n')
def log(self, string):
with open(self.filepath, 'a') as f:
time_stamp = datetime.now() - self.birth_time
f.write(strfdelta(time_stamp, "{d}-{h:02d}:{m:02d}:{s:02d}") + '\t' + string + '\n')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name='null', fmt=':.4f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.local_history = deque([])
self.local_avg = 0
self.history = []
self.dict = {} # save all data values here
self.save_dict = {} # save mean and std here, for summary table
def update(self, val, n=1, history=0, step=5):
self.val = val
self.sum += val * n
self.count += n
if n == 0: return
self.avg = self.sum / self.count
if history:
self.history.append(val)
if step > 0:
self.local_history.append(val)
if len(self.local_history) > step:
self.local_history.popleft()
self.local_avg = np.average(self.local_history)
def dict_update(self, val, key):
if key in self.dict.keys():
self.dict[key].append(val)
else:
self.dict[key] = [val]
def print_dict(self, title='IoU', save_data=False):
"""Print summary, clear self.dict and save mean+std in self.save_dict"""
total = []
for key in self.dict.keys():
val = self.dict[key]
avg_val = np.average(val)
len_val = len(val)
std_val = np.std(val)
if key in self.save_dict.keys():
self.save_dict[key].append([avg_val, std_val])
else:
self.save_dict[key] = [[avg_val, std_val]]
print('Activity:%s, mean %s is %0.4f, std %s is %0.4f, length of data is %d' \
% (key, title, avg_val, title, std_val, len_val))
total.extend(val)
self.dict = {}
avg_total = np.average(total)
len_total = len(total)
std_total = np.std(total)
print('\nOverall: mean %s is %0.4f, std %s is %0.4f, length of data is %d \n' \
% (title, avg_total, title, std_total, len_total))
if save_data:
print('Save %s pickle file' % title)
with open('img/%s.pickle' % title, 'wb') as f:
pickle.dump(self.save_dict, f)
def __len__(self):
return self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def mini_str(self):
fmtstr = '{name} {val' + self.fmt + '}'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix="", logger=None):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
self.logger = logger
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [meter.mini_str() for meter in self.meters]
if self.logger:
self.logger.info(". ".join(entries))
else:
print('. '.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
### from: https://github.com/pytorch/pytorch/issues/15849#issuecomment-518126031
class _RepeatSampler(object):
""" Sampler that repeats forever.
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class Timer():
def __init__(self):
self.o = time.time()
def measure(self, p=1):
x = (time.time() - self.o) / p
x = int(x)
if x >= 3600:
return '{:.1f}h'.format(x / 3600)
if x >= 60:
return '{}m'.format(round(x / 60))
return '{}s'.format(x)
# https://github.com/pytorch/pytorch/issues/15849#issuecomment-573921048
class FastDataLoader(torch.utils.data.dataloader.DataLoader):
'''for reusing cpu workers, to save time'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
# self.batch_sampler = _RepeatSampler(self.batch_sampler)
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
def worker_init_fn(_):
worker_info = torch.utils.data.get_worker_info()
dataset = worker_info.dataset
# Avoid "cannot pickle KVReader object" error
dataset.reader = None # KVReader(dataset.db_path, dataset.num_readers)
class GatherLayer(torch.autograd.Function):
"""Gather tensors from all process, supporting backward propagation.
"""
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
output = [torch.zeros_like(input) \
for _ in range(dist.get_world_size())]
dist.all_gather(output, input)
return tuple(output)
@staticmethod
def backward(ctx, *grads):
input, = ctx.saved_tensors
grad_out = torch.zeros_like(input)
grad_out[:] = grads[dist.get_rank()]
return grad_out | [
"bytedance@c02dw62hml85.sso.bytedance.com"
] | bytedance@c02dw62hml85.sso.bytedance.com |
3ebc4805f20af93c3c9a5846bf9c7185e1c7a7c0 | f2852e75af2c15d59cb962d7d5c81fa1e5cbe374 | /home/migrations/0015_contactpage_map_address.py | b413c8f57fffec09fcecc85728e290124f5f5250 | [
"Unlicense"
] | permissive | jesuispaulbonnet/technic-alu | 27f4cb95a736894de7588d8d5a42efaa42a0ddb8 | 697b948108bdda3c2f538f88d747b5cd50e21254 | refs/heads/master | 2021-05-11T11:05:54.110018 | 2019-01-20T14:23:26 | 2019-01-20T14:23:26 | 118,121,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-02-23 12:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0014_auto_20180223_1254'),
]
operations = [
migrations.AddField(
model_name='contactpage',
name='map_address',
field=models.CharField(default='technic-alu', max_length=250),
preserve_default=False,
),
]
| [
"paul.bonnet@tuware.com"
] | paul.bonnet@tuware.com |
c3469d4c16126daa8fbccf787da1442ae647afdf | 450021885a28d498f309b656973f1afd2ae538ef | /Mnist_UseNN.py | c232b0a1668431936e280db93cbd467d5cdeb590 | [] | no_license | kimwoojoo/DeepLearningStudy | 5f1f2e859b4f926c461847fafab02c855646ed71 | 521816261a2538cb6cb51b9b1019d27ca7e9a0b8 | refs/heads/master | 2020-04-27T11:57:33.578479 | 2019-03-08T01:59:53 | 2019-03-08T01:59:53 | 174,315,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,160 | py | # Lab 7 Learning rate and Evaluation
import tensorflow as tf
import matplotlib.pyplot as plt
import random
from PIL import Image
tf.set_random_seed(777) # for reproducibility
from tensorflow.examples.tutorials.mnist import input_data
# Check out https://www.tensorflow.org/get_started/mnist/beginners for
# more information about the mnist dataset
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
nb_classes = 10
# MNIST data image of shape 28 * 28 = 784
X = tf.placeholder(tf.float32, [None, 784])
# 0 - 9 digits recognition = 10 classes
Y = tf.placeholder(tf.float32, [None, nb_classes])
W1 = tf.Variable(tf.random_normal([784, 256]))
b1 = tf.Variable(tf.random_normal([256]))
layer1 = tf.nn.relu(tf.matmul(X,W1) + b1)
W2 = tf.Variable(tf.random_normal([256, 128]))
b2 = tf.Variable(tf.random_normal([128]))
layer2 = tf.nn.relu(tf.matmul(layer1,W2) + b2)
W3 = tf.Variable(tf.random_normal([128, 10]))
b3 = tf.Variable(tf.random_normal([10]))
# Hypothesis (using softmax)
hypothesis = tf.matmul(layer2, W3) + b3
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=hypothesis, labels=Y))
train = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(cost)
# Test model
is_correct = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
# parameters
num_epochs = 15
batch_size = 100
num_iterations = int(mnist.train.num_examples / batch_size)
with tf.Session() as sess:
# Initialize TensorFlow variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(num_epochs):
avg_cost = 0
for i in range(num_iterations):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
_, cost_val = sess.run([train, cost], feed_dict={X: batch_xs, Y: batch_ys})
avg_cost += cost_val / num_iterations
print("Epoch: {:04d}, Cost: {:.9f}".format(epoch + 1, avg_cost))
print("Learning finished")
# Test the model using test sets
print(
"Accuracy: ",
accuracy.eval(
session=sess, feed_dict={X: mnist.test.images, Y: mnist.test.labels}
),
)
# Get one and predict
r = random.randint(0, mnist.test.num_examples - 1)
print("Label: ", sess.run(tf.argmax(mnist.test.labels[r : r + 1], 1)))
print(
"Prediction: ",
sess.run(tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r : r + 1]}),
)
plt.imshow(
mnist.test.images[r : r + 1].reshape(28, 28),
cmap="Greys",
interpolation="nearest",
)
plt.show()
'''
Epoch: 0001, Cost: 2.826302672
Epoch: 0002, Cost: 1.061668952
Epoch: 0003, Cost: 0.838061315
Epoch: 0004, Cost: 0.733232745
Epoch: 0005, Cost: 0.669279885
Epoch: 0006, Cost: 0.624611836
Epoch: 0007, Cost: 0.591160344
Epoch: 0008, Cost: 0.563868987
Epoch: 0009, Cost: 0.541745171
Epoch: 0010, Cost: 0.522673578
Epoch: 0011, Cost: 0.506782325
Epoch: 0012, Cost: 0.492447643
Epoch: 0013, Cost: 0.479955837
Epoch: 0014, Cost: 0.468893674
Epoch: 0015, Cost: 0.458703488
Learning finished
Accuracy: 0.8951
''' | [
"kwj1217@gmail.com"
] | kwj1217@gmail.com |
1c8145007edb09d77a3b15de5c34d0bc86c0ba97 | f3f38df4c88ab9818bf9c8ef1fe4f7d2533d023c | /libwyag.py | b52610c487ba19ad0f1185c2d04c72475b8b4807 | [] | no_license | dipam7/own_git | 1c2c275f7873e2c09e04b5f8ca7f9ba12e82cd38 | 9c0598bd79cae02d3bb76c1d481593774f3ac1a1 | refs/heads/master | 2022-04-25T03:07:36.082607 | 2020-04-28T01:44:56 | 2020-04-28T01:44:56 | 258,418,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,967 | py | import argparse # for handling command line arguments
import collections # for container types like OrderedDict
import configparser
import hashlib # for SHA-1
import os
import re
import sys
import zlib # git compresses everything using zlib
argparser = argparse.ArgumentParser(description="The stupid content tracker")
# we don't just call git, we always call git command (init, add, clone)
# hence we need to add subparsers to our arg parser
# dest=command means the command we pass will be stored as a string
# in an attribute called command
argsubparsers = argparser.add_subparsers(title="Commands", dest="command")
argsubparsers.required = True
def main(args = sys.argv[1:]):
args = argparser.parse_args(argv)
if args.command == "add" : cmd_add(args)
elif args.command == "cat-file" : cmd_cat_file(args)
elif args.command == "checkout" : cmd_checkout(args)
elif args.command == "commit" : cmd_commit(args)
elif args.command == "hash-object" : cmd_hash_object(args)
elif args.command == "init" : cmd_init(args)
elif args.command == "log" : cmd_log(args)
elif args.command == "ls-tree" : cmd_ls-tree(args)
elif args.command == "merge" : cmd_merge(args)
elif args.command == "rebase" : cmd_rebase(args)
elif args.command == "rev-parse" : cmd_rev_parse(args)
elif args.command == "rm" : cmd_rm(args)
elif args.command == "show-ref" : cmd_show_ref(args)
elif args.command == "tag" : cmd_tag(args)
# abstraction for a git repository
class GitRepository(object):
"""A git repository"""
# a git repo contains 2 things, worktree which is the folder we want to apply version control on
# and a .git repo where git stores its own things
# the config file is stored in .git/config
worktree = None
gitdir = None
conf = None
# an additional force parameter to disable checks
def __init__(self, path, force=False):
self.worktree = path
self.gitdir = os.path.join(path, ".git")
if not (force or os.path.isdir(self.gitdir)):
raise Exception("Not a git repository %s" % path)
# Read configuration file in .git/config
self.conf = configparser.ConfigParser()
cf = repo_file(self, "config")
if cf and os.path.exists(cf):
self.conf.read([cf])
elif not force:
raise Exception("Configuration file missing")
if not force:
vers = int(self.conf.get("core", "repositoryformatversion"))
if vers != 0:
raise Exception("Unsupported repositoryformatversion %s " %vers)
# we will be doing a lot of path manipulations hence we will write some utility functions
def repo_path(repo, *path):
"""Compute path under repo's gitdir"""
return os.path.join(repo.gitdir, *path)
def repo_file(repo, *path, mkdir=False):
"""Same as repo_path, but creates dirname(*path) if absent. For example repo_file(r, "refs", "remotes", "origin")
will create .git/refs/remotes."""
if repo_dir(repo, *path[:-1], mkdir=mkdir):
return repo_path(repo, *path)
def repo_dir(repo, *path, mkdir=False):
"""Same as repo_path, but mkdir *path if absent if mkdir"""
path = repo_path(repo, *path)
if os.path.exists(path):
if (os.path.isdir(path)):
return path
else:
raise Exception("Not a directory %s" % path)
if mkdir:
os.makedirs(path)
return path
else:
return None
# to create a new git repo, we create the following paths
# .git is the git repository
# .git/objects: the object store
# .git/refs: the reference store, it contains 2 subdirs heads and tags
# .git/HEAD: a reference to the current head
# .git/config: repository's configuration file
# .git/description: repository's description file
def repo_create(path):
"""Create a new repository at path."""
repo = GitRepository(path, True)
if os.path.exists(repo.worktree):
if not os.path.isdir(repo.worktree):
raise Exception("%s is not a directory!" % path)
if os.listdir(repo.worktree):
raise Exception("%s is not empty!" % path)
else:
os.makedirs(repo.worktree)
assert(repo_dir(repo, "branches", mkdir=True)
assert(repo_dir(repo, "objects", mkdir=True)
assert(repo_dir(repo, "refs", "tags", mkdir=True)
assert(repo_dir(repo, "refs", "heads", mkdir=True)
# .git/description
with open(repo_file(repo, "description"), "w") as f:
f.write("Unnamed repository: edit this file 'description' to name the repository.\n")
# .git/HEAD
with open(repo_file(repo, "HEAD"), "w") as f:
f.write("ref: refs/heads/master\n")
with open(repo_file(repo, "config"), "w") as f:
config = repo_default_config()
config.write(f)
return repo
| [
"dipam44@gmail.com"
] | dipam44@gmail.com |
04047ac8d8a9f25d66d99cc2fac1fb7c0d56021c | f8054fae8e496cb9859a363d2571b4ac94c1d7a2 | /Python/LineGrapg.py | c23e0235ad9bd49ab90ff0f8844f609f92732e79 | [] | no_license | 22aishwaryagoyal/Python | 54f5d394a88c64496291de6de8dfa6c36d87b5f0 | 1daa508a664af423ce28d92dc7e837a68312fdf1 | refs/heads/main | 2023-07-03T20:18:00.729924 | 2021-08-19T12:15:45 | 2021-08-19T12:15:45 | 397,931,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | import pandas as pd
data={'year':[1971,1981,1991,2001,2011],'pop':[50,73,81,98,111]}
df=pd.DataFrame(data,columns=['year','pop'])
df.plot(x='year',y='pop',kind='line')
| [
"noreply@github.com"
] | 22aishwaryagoyal.noreply@github.com |
971dd6b3cb304f9c7d87eacd5e07e92e1786bc2e | f8d181f293ce950f1a70bef1d023139d9e70a2c7 | /tests/contrib/operators/test_gcp_vision_operator_system.py | 2b75642d6f3a3c93aab282d82e823a4a09d01087 | [
"Apache-2.0",
"BSD-3-Clause",
"Python-2.0",
"MIT",
"BSD-2-Clause"
] | permissive | Piboonsak/airflow | d242f79561d893111ad73b9e3481b9180adecfd4 | dce92a54190155898c75c0f3392d42fb28f1884a | refs/heads/master | 2020-04-29T15:16:06.779329 | 2019-03-18T05:16:14 | 2019-03-18T05:16:14 | 176,222,528 | 1 | 0 | Apache-2.0 | 2019-03-18T06:57:38 | 2019-03-18T06:57:38 | null | UTF-8 | Python | false | false | 1,397 | py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from tests.contrib.utils.base_gcp_system_test_case import DagGcpSystemTestCase, SKIP_TEST_WARNING
from tests.contrib.utils.gcp_authenticator import GCP_AI_KEY
@unittest.skipIf(DagGcpSystemTestCase.skip_check(GCP_AI_KEY), SKIP_TEST_WARNING)
class CloudVisionExampleDagsSystemTest(DagGcpSystemTestCase):
def __init__(self, method_name='runTest'):
super(CloudVisionExampleDagsSystemTest, self).__init__(
method_name, dag_id='example_gcp_vision', gcp_key=GCP_AI_KEY
)
def test_run_example_dag_function(self):
self._run_dag()
| [
"kaxilnaik@gmail.com"
] | kaxilnaik@gmail.com |
467dc99e87a1d24ba74fc20773404fbbbbab9966 | 828e541b8c218db557da35b9d9d7a66fae68485a | /answer5.py | e67977dadcd32fbf0c9906928ea5d8f8e7163257 | [] | no_license | arsh1807/Assignment-2 | 57b177e25dda5b41ce28382621c18c8731e71448 | 586c61138c4667ca651c53052170e9f2ea34edf8 | refs/heads/master | 2020-03-26T17:03:49.923704 | 2018-08-17T16:43:26 | 2018-08-17T16:43:26 | 145,139,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | #5
s="Acadview"
course="Python"
fees=5000
print ('%s' %(s))
print ('%s' %(course))
print ('%d' %(fees))
| [
"arshi.1807@gmail.com"
] | arshi.1807@gmail.com |
f00c3e6b4362f9f605b96e38965bde24e43788d5 | eb10fc180021d020b0bf1ef73a37b96661270a25 | /dbms_submissions/dbms_assignment_006/query.py | be7bc1956e3a11e80d86453f463bcfe51ec1559c | [] | no_license | Venkataramana228/dbms | efdffc66433476d69117aaf26143a87b851ac330 | 94f70053014c6f79170ae8c05b8a03f2ec315e0c | refs/heads/master | 2022-09-08T16:42:57.615502 | 2020-05-28T09:19:12 | 2020-05-28T09:19:12 | 263,019,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | #Q1="SELECT fname,lname FROM Actor a INNER JOIN Cast c on a.id==c.pid WHERE c.mid==12148;"
Q1="SELECT fname,lname FROM Actor INNER JOIN Cast on id==pid WHERE mid==12148;"
Q2="SELECT COUNT(mid) FROM Actor a INNER JOIN Cast c on a.id=c.pid WHERE fname='Harrison (I)' and lname='Ford';"
Q3="SELECT DISTINCT(pid) FROM Movie m INNER JOIN Cast c on m.id=c.mid where m.name LIKE 'Young Latin Girls%';"
Q4="SELECT COUNT(DISTINCT pid) FROM movie m INNER JOIN Cast a on m.id=a.mid where m.year BETWEEN 1990 and 2000;" | [
"ec2-user@ip-172-31-24-189.ap-southeast-1.compute.internal"
] | ec2-user@ip-172-31-24-189.ap-southeast-1.compute.internal |
21f87cfc4e94213f0943b398cd2ea89c8f3719cd | 268a5ff900334afe5a3201391857663cdda9b854 | /VectoElem.py | 5250eb381bfd722154c6b4d417290df90b45bda6 | [] | no_license | Albertillo/Mecanica-Orbital | b065f297bd84421f1e5b1471c53e45ef32060026 | 0185a2614805be0ad39a22aa19439798a4c8b3d1 | refs/heads/master | 2020-03-28T07:06:43.501474 | 2018-09-07T22:50:51 | 2018-09-07T22:50:51 | 147,881,512 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,524 | py | ###############################################################################################################################################################
# VectoElem.py: Esta función se ha desarrollado para uso académico. No se recomienda que se utilice con otros propósitos. #
# La función funciona de la siguiente manera: Se debe especificar un foco de la órbita y los vectores de posición y velocidad. La función calculará los #
# elementos orbitales: Semieje mayor "a" (km), inclinación "i" (rad), longitud del nodo ascendente "Omega" (rad), excentricidad "mode", argumento del #
# periastro "omega" (rad) y anomalía verdadera "theta" (rad). Los focos disponibles en este momento son "Sol", "Tierra" y "Jupiter", pero pueden añadirse #
# facilmente introduciendo las diferentes "mu" en el primer if. #
# Algoritmo de Howard D. Curtis, Orbital Mechanics for Engineering Students, First Edition, Elsevier Butterworth-Heinemann, Oxford (UK), 2005. #
###############################################################################################################################################################
import numpy as np
from math import *
#Elementos orbitales desde vectores de posición y velocidad.
def VectoElem(focpoint,rx,ry,rz,vx,vy,vz): #Posición en km y velocidad en km/s. focpoint solo admite "Sol", "Tierra" o "Jupiter".
if focpoint=="Sol":
mu=132712439935.5 #km^3/s^2
elif focpoint=="Tierra":
mu=398600.4 #km^3/s^2
elif focpoint=="Jupiter":
mu=126711995.4 #km^3/s^2
else:
print("ERROR, FOCO DE LA ÓRBITA NO VÁLIDO.")
r=np.array([rx,ry,rz])
v=np.array([vx,vy,vz])
modr=np.linalg.norm(r)
modv=np.linalg.norm(v)
a=mu/(2*mu/modr-modv*modv)
h=np.cross(r,v)
i=np.arccos(h[2]/np.linalg.norm(h))
N=np.cross([0,0,1],h)
if N[1]>=0:
Omega=np.arccos(N[0]/np.linalg.norm(N))
else:
Omega=2*pi-np.arccos(N[0]/np.linalg.norm(N))
vr=np.dot(r,v)/modr
e=1/mu*((modv*modv-mu/modr)*r-modr*vr*v)
mode=np.linalg.norm(e)
if mode>=0:
omega=np.arccos(np.dot(N,e)/(np.linalg.norm(N)*mode))
else:
omega=2*pi-np.arccos(np.dot(N,e)/(np.linalg.norm(N)*mode))
if vr>=0:
theta=np.arccos(np.dot(e,r)/(modr*mode))
else:
theta=2*pi-np.arccos(np.dot(e,r)/(modr*mode))
return a,i,Omega,mode,omega,theta
| [
"noreply@github.com"
] | Albertillo.noreply@github.com |
0b5713449027037d0ab2ad412af79684d0153c48 | 1a166165ab8287d01cbb377a13efdb5eff5dfef0 | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_06_01/aio/operations/_vpn_site_links_operations.py | 5637893f1b1e6e52468412705bc4b471675b3407 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | manoj0806/azure-sdk-for-python | 7a14b202ff80f528abd068bf50334e91001a9686 | aab999792db1132232b2f297c76800590a901142 | refs/heads/master | 2023-04-19T16:11:31.984930 | 2021-04-29T23:19:49 | 2021-04-29T23:19:49 | 363,025,016 | 1 | 0 | MIT | 2021-04-30T04:23:35 | 2021-04-30T04:23:35 | null | UTF-8 | Python | false | false | 8,712 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VpnSiteLinksOperations:
"""VpnSiteLinksOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
vpn_site_name: str,
vpn_site_link_name: str,
**kwargs
) -> "_models.VpnSiteLink":
"""Retrieves the details of a VPN site link.
:param resource_group_name: The resource group name of the VpnSite.
:type resource_group_name: str
:param vpn_site_name: The name of the VpnSite.
:type vpn_site_name: str
:param vpn_site_link_name: The name of the VpnSiteLink being retrieved.
:type vpn_site_link_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnSiteLink, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.VpnSiteLink
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnSiteLink"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
'vpnSiteLinkName': self._serialize.url("vpn_site_link_name", vpn_site_link_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnSiteLink', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}/vpnSiteLinks/{vpnSiteLinkName}'} # type: ignore
def list_by_vpn_site(
self,
resource_group_name: str,
vpn_site_name: str,
**kwargs
) -> AsyncIterable["_models.ListVpnSiteLinksResult"]:
"""Lists all the vpnSiteLinks in a resource group for a vpn site.
:param resource_group_name: The resource group name of the VpnSite.
:type resource_group_name: str
:param vpn_site_name: The name of the VpnSite.
:type vpn_site_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnSiteLinksResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_06_01.models.ListVpnSiteLinksResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnSiteLinksResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_vpn_site.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnSiteName': self._serialize.url("vpn_site_name", vpn_site_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnSiteLinksResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_vpn_site.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnSites/{vpnSiteName}/vpnSiteLinks'} # type: ignore
| [
"noreply@github.com"
] | manoj0806.noreply@github.com |
f1a64d7a84c54f957d4f22b4840665e842377b13 | 5c052790b46d7b8e22dcf980c52bb3044b55a18f | /CSC308 Examples/Examples/Exceptions/UniversalExcept.py | ef4798f933a46e0471c8c6f950c2d51d5bbf2896 | [] | no_license | raarnoldy23/pythonExamples | 84ae05a20d962ae6f9ef3f2048260ce654e2baaa | 3f50625198fdc51315810ffd7ff6647fbba9fd8a | refs/heads/master | 2022-06-20T04:27:11.293644 | 2020-05-11T18:08:06 | 2020-05-11T18:08:06 | 263,116,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py |
def main():
total = 0.0
try:
infile = open('text'.txt,'r')
for line in infile:
amount = float(line)
total += amount
infile.close()
print(format(total, ".2f"))
except:
print("AN ERROR OCCURED!!!")
main()
| [
"ARN0644@calu.edu"
] | ARN0644@calu.edu |
54a1edcce616c00b9f5f2d3535a386ba07406025 | 4b9d121c8df2d1a9985bec2315aa639976889b81 | /Test de la manette.py | 84a21e43250fb8d5b77ed31b4d06307649601b5f | [] | no_license | totowarx/proberXY | 5d5d833a81b0e96b2e732922b6ede7f7ebcb4f0a | d0b40121d2795d413893b31a39ab2879ae1af980 | refs/heads/main | 2023-06-19T18:04:12.988081 | 2021-07-20T09:30:10 | 2021-07-20T09:30:10 | 360,071,861 | 1 | 0 | null | 2021-06-25T13:21:02 | 2021-04-21T07:28:07 | G-code | UTF-8 | Python | false | false | 1,480 | py | from __future__ import print_function
import xbox
# from xbox import Joystick
# change le nombre à virgule flottante au format de chaîne -x.xxx
def fmtFloat(n):
return '{:6.3f}'.format(n)
def show(*args):
for arg in args:
print(arg, end="")
def showIf(boolean, ifTrue, ifFalse=" "):
if boolean:
show(ifTrue)
else:
show(ifFalse)
joy = xbox.Joystick()
# Boutons
print("Appuyez sur Back pour quitter")
while not joy.Back():
show("Connecté!!!")
showIf(joy.connected(), "Y", "N")
# Joystick droit
show(" Joystick droit:", fmtFloat(joy.rightX()), "/", fmtFloat(joy.rightY()))
# Joystick gauche
show(" Joystick gauche:", fmtFloat(joy.leftX()), "/", fmtFloat(joy.leftY()))
# Gachette droite
show(" Gachette droite:", fmtFloat(joy.rightTrigger()))
# Gauchette gauche
show(" Gauchette gauche:", fmtFloat(joy.leftTrigger()))
# A/B/X/Y
show(" Boutons:")
showIf(joy.A(), "A")
showIf(joy.B(), "B")
showIf(joy.X(), "X")
showIf(joy.Y(), "Y")
# Dpad U/D/L/R
show(" Croix directionnel:")
showIf(joy.dpadUp(), "U")
showIf(joy.dpadDown(), "D")
showIf(joy.dpadLeft(), "L")
showIf(joy.dpadRight(), "R")
# Bumper gauche
show(" Bumper gauche:")
showIf(joy.leftBumper(), "LB")
# Bumper gauche
show(" Bumper droit:")
showIf(joy.rightBumper(), "RB")
# Curseur en debut de ligne : affichage unique
show(chr(13))
# Fin
joy.close()
| [
"noreply@github.com"
] | totowarx.noreply@github.com |
13b67511127b138050e2016acbbbb75feb3e8ca5 | f153a36b5e211690ded1af00c0160eebd2add1ca | /PROGRAMMERS/Level 1/두 정수 사이의 합.py | 70d41e935d84b333409311cfd37d198781261bca | [] | no_license | KyungHoon0126/Algorithm | 47551bbe22c70eac04ed518c2c9c1f65d48ee5b9 | 8369f0e1103d282cdc138666add65dd0ca926e70 | refs/heads/master | 2021-08-17T08:32:09.970502 | 2021-06-22T12:52:22 | 2021-06-22T12:52:22 | 214,456,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | def solution(a, b):
answer = 0
first = 0
second = 0
first = a if a < b else b
second = b if b > a else a
print(first, second)
for i in range(first, second + 1):
answer += i
return answer | [
"kidi052812@gmail.com"
] | kidi052812@gmail.com |
063873b31a1e996e64f6e6dbfa4b16583a381395 | 50784449f366b224f9e652ab49789fe7c148b4f5 | /environments/Env_RoleChangingZeroSum.py | ccd1da607ae956d4e8fe7fe91d0421252e6a3270 | [
"BSD-2-Clause"
] | permissive | wbarfuss/POLD | 655c5e3a499ed2b338d8826c52314a749f2d3175 | 0c12d3f937831770efa83e20d72c37df60c96882 | refs/heads/main | 2023-04-16T22:37:04.126714 | 2022-04-11T08:52:38 | 2022-04-11T08:52:38 | 470,242,430 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,230 | py | """
The 2-state Matching Pennies according to HennesEtAl2010
"""
# import sys
# from pathlib import Path
# base_dir = Path(__file__).resolve().parent.parent.parent
# sys.path.append(str(base_dir))
# from LearningDynamics.Envs.Env_Null import NullEnv
from .Env_Null import NullEnv
import numpy as np
class RoleChangingZeroSum(NullEnv):
def __init__(self, obsnoise):
if not hasattr(obsnoise, "__iter__"):
self.noise = np.array([obsnoise, obsnoise])
else:
assert len(obsnoise) == 2
self.noise = np.array(obsnoise)
assert min(self.noise) >= 0.0
self.N = 2
self.M = len(self.actions())
self.Z = len(self.states())
self.Q = len(self.observations())
# --
self.T = self.TransitionTensor()
self.R = self.RewardTensor()
self.state = 1 # inital state
def actions(self):
acts = ['a', 'b']
return acts
def states(self):
states = ['X', 'Y']
return states
def observations(self):
if not np.all(self.noise > 0.5):
obs = ['x', 'y']
else:
obs = ['z']
return obs
def FinalStates(self):
return [0, 0]
def TransitionTensor(self):
"""Get the Transition Tensor."""
Tsas = np.ones((2, 2, 2, 2)) * (-1)
#investiagte
# T1 = np.array([[1.0, 1.0],
# [0.0, 0.0]])
# T2 = np.array([[0.0, 0.0],
# [1.0, 1.0]])
# T1 = np.array([[0.0, 1.0], # from state 0 to state 1
# [1.0, 0.0]])
# T2 = np.array([[1.0, 0.0], # from state 1 to state 0
# [0.0, 1.0]])
T1 = np.array([[1.0, 1.0], # from state 0 to state 1
[0.0, 0.0]])
T2 = np.array([[0.0, 0.0], # from state 1 to state 0
[1.0, 1.0]])
Tsas[0, :, :, 1] = T1
Tsas[0, :, :, 0] = 1-T1
Tsas[1, :, :, 0] = T2
Tsas[1, :, :, 1] = 1-T2
return Tsas
def RewardTensor(self):
"""Get the Reward Tensor R[i,s,a1,...,aN,s']."""
R = np.zeros((2, 2, 2, 2, 2))
R[0, 0, :, :, 0] = [[1 , 0 ],
[0 , 1 ]]
R[1, 0, :, :, 0] = [[0 , 1 ],
[1 , 0 ]]
R[:, 0, :, :, 1] = R[:, 0, :, :, 0]
R[0, 1, :, :, 1] = [[0 , 1 ],
[1 , 0 ]]
R[1, 1, :, :, 1] = [[1 , 0 ],
[0 , 1 ]]
R[:, 1, :, :, 0] = R[:, 1, :, :, 1]
return R
def ObservationTensor(self):
if np.all(self.noise > 0.5):
#self.Q = 1
Oiso = np.ones((self.N, self.Z, self.Q))
else:
#self.Q = self.Z
Oiso = np.zeros((self.N, self.Z, self.Q))
for i in range(self.N):
Oiso[i,0,0] = 1 - min(self.noise[i], 0.5)
Oiso[i,0,1] = 0 + min(self.noise[i], 0.5)
Oiso[i,1,0] = 0 + min(self.noise[i], 0.5)
Oiso[i,1,1] = 1 - min(self.noise[i], 0.5)
return Oiso | [
"noreply@github.com"
] | wbarfuss.noreply@github.com |
2e21fbc4566ec48ec6d3e36c44da1af16c81e5ea | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc101/A/4927211.py | 4e4246f50058560e91aa73a6173b4e550e2b0b90 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | s = list(input())
print(s.count('+')-s.count('-')) | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
cfb58a7a49bde127229470f43e7c101d5f9d7168 | ba1ddbc6b364dc2fd55f83ea807b50bf45ce3d1a | /PageObject/VivaVideo/home.py | 23b61b58c20490654f07d632cf8e5bfc9c4414a4 | [] | no_license | zlmone/ATX-UI | 81c58fa722586fe6fb20cd39e3a85afa6057db93 | 44bfa67ed2274c2eeb36f905d5bd482fd96a6707 | refs/heads/master | 2022-05-28T09:03:40.380824 | 2020-05-06T11:39:39 | 2020-05-06T11:39:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,770 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from Public.Decorator import *
from Public.Test_data import *
log = Log()
class home_Page(BasePage):
'''创作页首页'''
# @teststep
# def wait_page(self):
# try:
# if self.d(resourceId="com.quvideo.xiaoying:id/iv_vip_home8_cut").wait(timeout=10):
# pass
# else:
# raise Exception('Not in Creation_Page')
# except Exception:
# raise Exception('Not in Creation_Page')
# @teststep
# def close_float_imag(self):
# if self.d(resourceId="com.quvideo.xiaoying:id/float_imageview").wait(timeout=5):
# log.i('关闭创作页浮窗图片')
# self.d(resourceId="com.quvideo.xiaoying:id/float_imageview").child(className="android.widget.ImageView",
# instance=1).click_exists(timeout=3)
# else:
# log.i('没有创作页浮窗图片,跳过')
# pass
@teststep
def close_popup(self):
log.i('关闭首页家庭政策弹窗')
try:
self.d(resourceId="com.quvideo.xiaoying:id/iv_close").click(3)
except:
log.i('弹窗未弹出或者已消除')
pass
@teststep
def close_ad_popup(self,timeout = 3):
log.i('关闭广告弹窗 ')
self.d(resourceId="com.quvideo.xiaoying:id/tt_insert_dislike_icon_img").click_exists(timeout=timeout)
@teststep
def click_template_btn(self):
log.i('点击底部拍同款按钮')
self.d(resourceId="com.quvideo.xiaoying:id/tv_home_tab", text="拍同款").click()
@teststep
def click_home_btn(self):
log.i('点击底部剪辑按钮')
self.d(resourceId="com.quvideo.xiaoying:id/tv_home_tab", text="剪辑").click()
@teststep
def click_me_btn(self):
log.i('点击底部我按钮')
self.d(resourceId="com.quvideo.xiaoying:id/tv_home_tab", text="我").click()
@teststep
def click_vip_btn(self):
log.i('点击VIP按钮')
self.d(resourceId="com.quvideo.xiaoying:id/iv_vip_home8_cut").click()
@teststep
def click_edit_btn(self):
log.i('点击视频剪辑')
self.d(resourceId="com.quvideo.xiaoying:id/iv_edit_home8_cut").click()
try:
self.d(resourceId="com.quvideo.xiaoying:id/imgbtn_help_exit").implicitly_wait(3).click()
except:
log.i("立刻升级页面已消除")
pass
@teststep
def click_mv_btn(self):
log.i('点击相册MV')
self.d(resourceId="com.quvideo.xiaoying:id/iv_mv_home8_cut").click()
@teststep
def click_draft_btn(self):
log.i('点击草稿')
self.d(resourceId="com.quvideo.xiaoying:id/tv_draft_icon_home8_cut",text= '草稿').click()
@teststep
def click_home_more(self):
log.i('点击素材中心查看更多按钮')
self.d(text="查看更多").click()
@teststep
def click_camera_btn(self):
log.i('点击拍摄按钮')
self.watch_device('取消|允许|始终允许')
self.d(resourceId="com.quvideo.xiaoying:id/ll_eight4_home8_cut").click()
time.sleep(5) # 等待相机加载完成
self.d.click(0.5, 0.5) # 点击对焦,取消弹出的滤镜
@teststep
def click_sec_addText(self):
log.i('点击次要功能位加字幕')
self.d(resourceId="com.quvideo.xiaoying:id/ll_eight0_home8_cut").click()
@teststep
def click_sec_Mixer(self):
log.i('点击次要功能位画中画')
self.d(resourceId="com.quvideo.xiaoying:id/ll_eight1_home8_cut").click()
@teststep
def click_sec_Mosaic(self):
log.i('点击次要功能位马赛克')
self.d(resourceId="com.quvideo.xiaoying:id/ll_eight2_home8_cut").click()\
@teststep
def click_sec_FAQ(self):
log.i('点击次要功能位新手教程')
self.d(resourceId="com.quvideo.xiaoying:id/ll_eight3_home8_cut").click()
@teststep
def click_sec_Capture(self):
log.i('点击次要功能位拍摄')
self.d(resourceId="com.quvideo.xiaoying:id/ll_eight4_home8_cut").click()
@teststep
def click_sec_musicExtraction(self):
log.i('点击次要功能位音频提取')
self.d(resourceId="com.quvideo.xiaoying:id/ll_eight5_home8_cut").click()
# @teststep
# def click_view_pager_btn(self, text):
# '''
# 次要功能位置,各个按钮的点击操作
# :param text: 次要功能位置的text名称
# :return:
# '''
# log.i('查找次要功能位 %s 并进行点击操作'% text)
# if self.d(text=text).wait(timeout=1):
# self.d(text=text).click()
# return True
# else:
# try:
# self.d(resourceId="com.quvideo.xiaoying:id/view_pager", scrollable=True).scroll.horiz.to(text=text)
# self.d(text=text).click()
# return True
# except UiObjectNotFoundError:
# log.i("找不到控件-->%s" % text)
# return False
# @teststep
# def select_studio_view(self, inst=1):
# '''
# 点击我的工作室的view 默认第一个
# :param inst: 0为第一个view 以此类推 1、2、3--> 一二三
# '''
# log.i('点击我的工作室第%s个草稿' % inst)
# self.d(resourceId="com.quvideo.xiaoying:id/layout_draft_item").child(className='android.widget.ImageView')[inst-1].click()
if __name__ == '__main__':
from Public.Log import Log
Log().set_logger('udid', './log.log')
BasePage().set_driver(None)
home_Page().close_ad_popup()
| [
"lixin.zhu@quvideo.com"
] | lixin.zhu@quvideo.com |
aa2a3f50015d27ba5e13535865c82d7ddc196297 | 6a200b3c86329d501c9c8082fb69a512fa69228b | /tasksupervisor/endpoint/fiware_orion/orion_interface.py | de408a5dc5b0878f8f6a886bb523c3fd1e473f09 | [
"Apache-2.0"
] | permissive | iml130/mod.sw.tp.ts | 09792a33e4ec4e53ac89d59aa19cb4dae7762b90 | 4cda3ef0d3791eb204d5510631fdb9ec7ec57aab | refs/heads/develop | 2023-04-17T04:07:15.909996 | 2021-04-26T22:00:14 | 2021-04-26T22:00:14 | 335,564,272 | 0 | 1 | Apache-2.0 | 2021-04-26T22:00:15 | 2021-02-03T09:00:35 | Python | UTF-8 | Python | false | false | 6,795 | py | """ Contains OrionInterface class """
import threading
import logging
# import local libs
import tasksupervisor.my_globals as my_globals
from tasksupervisor.helpers import servercheck
from tasksupervisor.helpers.config_reader import ConfigReader
from tasksupervisor.endpoint.fiware_orion.flask.flask_setup import create_flask_app, FI_SUB_ID, FI_DATA
from tasksupervisor.endpoint.fiware_orion.entities.materialflow import Materialflow
from tasksupervisor.endpoint.fiware_orion.entities.sensor_agent_node import SensorAgent
from tasksupervisor.endpoint.fiware_orion.entities.materialflow_specification_state import MaterialflowSpecificationState
from tasksupervisor.endpoint.fiware_orion.entities.transport_order_update import TransportOrderUpdate
from tasksupervisor.endpoint.fiware_orion.entities.tasksupervisor_info import TaskSupervisorInfo
from tasksupervisor.endpoint.fiware_orion.entities.materialflow_update import MaterialflowUpdate
from tasksupervisor.endpoint.broker_interface import BrokerInterface
from tasksupervisor.endpoint.fiware_orion.contextbrokerhandler import ContextBrokerHandler
logger = logging.getLogger(__name__)
def callback_flask_server(flask_app):
""" Callback method to create for the flask server """
logger.info("Starting thread_flask_server")
flask_app.run(host=my_globals.parsed_config_file.FLASK_HOST,
port=my_globals.parsed_config_file.TASKPLANNER_PORT,
threaded=True, use_reloader=False, debug=True)
class OrionInterface(BrokerInterface):
""" Implements the BrokerInterface for the Orion Context Broker """
def __init__(self, broker_connector, broker_name = ""):
BrokerInterface.__init__(self, broker_connector)
self.subscription_dict = {}
self.flask_app = create_flask_app(self)
self.lock = threading.Lock()
config_file_path = "./tasksupervisor/config.ini"
try:
parsed_config_file = ConfigReader(config_file_path)
parsed_config_file.is_valid()
except Exception:
raise Exception("Error while parsing Fiware config file")
self.context_broker_handler = ContextBrokerHandler(parsed_config_file.get_fiware_server_address())
logger.info("Setting up thread_check_if_server_is_up")
self.thread_check_if_server_is_up = threading.Thread(name="checkServerRunning",
target=servercheck.webserver_is_running,
args=("localhost", my_globals.parsed_config_file.TASKPLANNER_PORT,))
logger.info("Setting up thread_flask_server")
self.thread_flask_server = threading.Thread(name="callback_flask_server", target=callback_flask_server,
args=(self.flask_app,))
def start_interface(self):
self.thread_check_if_server_is_up.start()
self.thread_flask_server.start()
logger.info("Starting Flask and wait")
self.thread_check_if_server_is_up.join()
logger.info("Flask is running")
def subscribe(self, topic, opt_data=None, generic=False):
with self.lock:
class_name = str(topic.__class__.__name__)
description = class_name + " subscription"
notification = my_globals.parsed_config_file.get_taskplanner_address() + "/" + class_name.lower()
if opt_data:
description = opt_data.description
if class_name == "SensorAgent":
notification = my_globals.parsed_config_file.get_taskplanner_address() + "/san/" + opt_data.to_id
entities = [{"id": topic.id, "type": class_name}]
sub_id = self.context_broker_handler.subscribe_to_entity(description, entities,
notification, generic=generic)
self.subscription_dict[sub_id] = class_name
return sub_id
def create(self, entity):
fiware_entity = self.create_fiware_entity(entity)
self.context_broker_handler.create_entity(fiware_entity)
def update(self, entity):
fiware_entity = self.create_fiware_entity(entity)
fiware_entity.update_time()
self.context_broker_handler.update_entity(fiware_entity)
def delete(self, id_, delete_entity=True):
with self.lock:
if delete_entity:
self.context_broker_handler.delete_entity(id_)
else:
self.context_broker_handler.delete_subscription_by_id(id_)
def create_fiware_entity(self, entity):
class_name = str(entity.__class__.__name__)
fiware_entity = None
if class_name == "MaterialflowSpecificationState":
fiware_entity = MaterialflowSpecificationState.from_api_object(entity)
elif class_name == "MaterialflowUpdate":
fiware_entity = MaterialflowUpdate.from_api_object(entity)
elif class_name == "TaskSupervisorInfo":
fiware_entity = TaskSupervisorInfo.from_api_object(entity)
elif class_name == "TransportOrderUpdate":
fiware_entity = TransportOrderUpdate.from_api_object(entity)
else:
raise ValueError("Creation of fiware entity for unknown class was requested: {}".format(class_name))
return fiware_entity
def retreive(self, json_requests):
with self.lock:
subscription_id = json_requests[FI_SUB_ID]
if subscription_id in self.subscription_dict:
entity_type = self.subscription_dict[subscription_id]
if entity_type == "Materialflow":
# it might be possible that there are multiple entities
# iterate over each json request
for temp_json_request in json_requests[FI_DATA]:
# create an entity from the json request
orion_materialflow = Materialflow.CreateObjectFromJson(temp_json_request)
api_materialflow = orion_materialflow.to_api_object()
self.broker_connector.retreive(api_materialflow, self)
elif entity_type == "SensorAgent":
for temp_json_request in json_requests[FI_DATA]:
orion_sensor_agent = SensorAgent.create_object_from_json(temp_json_request)
api_sensor_agent = orion_sensor_agent.to_api_object()
self.broker_connector.retreive(api_sensor_agent, self)
else:
raise ValueError("Data from an unknown subscription id was received: {}".format(subscription_id))
def shutdown(self):
self.context_broker_handler.shutdown()
| [
"noreply@github.com"
] | iml130.noreply@github.com |
2d9c96d214896f6dc2b7daa6ac2d6b09a83c5fe6 | 00acd54857f007df1ab2cacfaf48954d7d8a1cd2 | /cp_2/sporysh_fb-95_cp2/lab2_encrypt.py | 1d270d92d444d9f042abce5c435ab6df7901af6e | [] | no_license | Zhek0nBek0n/fb-labs-2021 | 2cc4a20680148a6e6e296f3c37034da8d23d8d5e | 2c14c112ee23cc617227db41283799c3fb9271ad | refs/heads/master | 2023-09-05T20:27:27.289457 | 2021-11-03T15:25:26 | 2021-11-03T15:25:26 | 413,180,574 | 0 | 0 | null | 2021-10-03T19:46:06 | 2021-10-03T19:46:05 | null | UTF-8 | Python | false | false | 965 | py | import numpy as np
import time
import matplotlib.pyplot as plt
import os
from Decryptor import lab_2_Decryptor as L2D
from Cryptor import lab_2_Cryptor as L2C
if __name__ == '__main__':
start_time = time.time()
cryptor = L2C("./voina_i_mir_small.txt")
keys = "да нет киви жалко " \
"приветкиви " \
"приветжалко " \
"приветкивида " \
"приветжалкода " \
"приветжалконет " \
"приветкивижалко " \
"приветкивикивида " \
"приветкивикивинет " \
"приветкивижалконет " \
"приветжалкожалконет " \
"приветивижалкокивида"
keys = keys.split(" ")
for key in keys:
text = cryptor.encrypt(key)
with open(f"./examples/{key}.txt", 'wb') as outfile:
outfile.write(cryptor.toStr(text).encode("utf-8"))
print(cryptor.toStr(text)[:10])
| [
"sporyshzhenya@gmail.com"
] | sporyshzhenya@gmail.com |
882b8ffb7a237ed542db1f3b35ec88de5b26db07 | 223a8859b801df7603f239a0ea3bdda0446591ce | /Corrector.py | 081999aa39ec3369043fa86d70f3a53a69e3a36b | [] | no_license | nthuepl/On-Pen-handwritten-Word-Recognition-Using-Long-Short-Term-Memory-Model | bc3db0cb254b818efbac543314f4e6175bf69766 | 7cb597aa4dc749c03115421570472e43865a1583 | refs/heads/master | 2020-03-27T13:27:53.992505 | 2018-09-10T10:30:54 | 2018-09-10T10:30:54 | 146,611,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,681 | py | from spell import correction
from Levenshtein import levenshtein, index
import numpy as np
class Lexicon():
def __init__(self):
vocab_list = []
# load from file
fp = open('20k.txt', 'r')
for line in fp.readlines():
word = line[:-1]
vocab_list.append(word)
fp.close()
self.word_list = []
self.page_table = [0 for i in range(20)]
self.cnt = -1
for length in range(1, 18):
self.page_table[length] = len(self.word_list)
word_n = []
for i in range(len(vocab_list)):
if len(vocab_list[i]) == length:
word_n.append(vocab_list[i])
word_n = sorted(word_n, key=str.upper)
self.word_list += word_n
au_w, bp_w, ce_w, fl_w, hn_w, rv_w = (0.136, 0.695, 0.628, \
0.501, 0.917, 0.139)
sub_w, del_w, ins_w = (1.389, 1.925, 1.954)
self.del_map = np.array([del_w for i in range(26)])
self.ins_map = np.array([ins_w for i in range(26)])
self.sub_map = np.array([[sub_w for i in range(26)] for j in range(26)])
self.sub_map[index('a'), index('u')] = au_w
self.sub_map[index('p'), index('b')] = bp_w
self.sub_map[index('e'), index('c')] = ce_w
self.sub_map[index('l'), index('f')] = fl_w
self.sub_map[index('r'), index('v')] = rv_w
self.sub_map[index('n'), index('h')] = hn_w
def __iter__(self):
return self
def __next__(self):
if self.cnt >= len(self.word_list):
raise StopIteration
else:
self.cnt += 1
return str(self.word_list[cnt])
def __str__(self):
return "Lexicon:\n\t"+str(len(self.word_list))+" words\n\t"+"page table: "+str(self.page_table)
def page(self, i):
return str(self.word_list[i])
def index(self, i):
return int(self.page_table[i])
def tolist(self):
return list(self.word_list)
def leven_fit(self, word, area=None):
answer = ''
MIN = 20
head, tail = 0, len(self.word_list)-1
if area != None:
head, tail = area
# for w in lexicon:
for w in self.word_list[head:tail]:
d = levenshtein(word, w, insert_costs=self.ins_map, delete_costs=self.del_map, substitute_costs=self.sub_map)
if d < MIN:
MIN = d
answer = w
if d == 0:
break
return answer
class Corrector():
def __init__(self, lexicon=None):
self.lex = lexicon
if lexicon == None:
self.lex = Lexicon()
print(self.lex)
self.MSR = {'r': 0.09, 'other': 0.514, 'n': 0.196, 'e': 0.139, 'l': 0.208, 'p': 0.227, 'a': 0.142}
self.dw = 0.459
def correction(self, word):
tmp = str(word)
word = correction(word, MSR=self.MSR, distance_weight=self.dw)
if word == None:
word = self.lex.leven_fit(tmp, area=(self.lex.index(len(tmp)), self.lex.index(len(tmp)+2)))
return word
if __name__ == '__main__':
corrector = Corrector()
print(corrector.lex)
| [
"gn02336853@gmail.com"
] | gn02336853@gmail.com |
fe1cc4e8b6b8201c08c79ccc09f50d705606c468 | 69e7dca194ab7b190e1a72928e28aa3821b47cfb | /Concepts/Strings/49.py | 579955f18e9b68d977d8b50ba8f8ff8b211b3947 | [] | no_license | Dinesh94Singh/PythonArchivedSolutions | a392891b431d47de0d5f606f7342a11b3127df4d | 80cca595dc688ca67c1ebb45b339e724ec09c374 | refs/heads/master | 2023-06-14T14:56:44.470466 | 2021-07-11T06:07:38 | 2021-07-11T06:07:38 | 384,871,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | """
49. Group Anagrams
Given an array of strings, group anagrams together.
Example:
Input: ["eat", "tea", "tan", "ate", "nat", "bat"],
Output:
[
["ate","eat","tea"],
["nat","tan"],
["bat"]
]
Note:
All inputs will be in lowercase.
The order of your output does not matter.
"""
import collections
def group_anagrams(strs):
dic = collections.defaultdict(list)
ans = []
for each_word in strs:
dic[tuple(sorted(each_word))].append(each_word)
for key, values in dic.items():
ans.append(values)
return ans
group_anagrams(["eat", "tea", "tan", "ate", "nat", "bat"])
| [
"dinesh94singh@gmail.com"
] | dinesh94singh@gmail.com |
31475d7e6cd976e2ad2ea6c3ecd3f56b4ae48fbc | 326a026bcc6bad962159677110d78d3d836532ed | /markote/api/notebook.py | e05023873ff40b79701ec2540061e8c2d53ca0e2 | [
"MIT"
] | permissive | Frederick-S/markote | f63a5007fd0a70ce4b3ae9d03425ae9f9c8b54f3 | 095dabe3da83b5d8809593758661eb78fa527f49 | refs/heads/master | 2023-03-04T16:50:30.541147 | 2022-08-12T01:24:43 | 2022-08-12T01:24:43 | 110,396,888 | 9 | 2 | MIT | 2023-03-04T13:11:38 | 2017-11-12T02:04:32 | Vue | UTF-8 | Python | false | false | 870 | py | from flask import jsonify, request
from markote.api.api_blueprint import api_blueprint
from markote.oauth import oauth
@api_blueprint.route('/notebooks', methods=['GET'])
def get_notebooks():
oauth_client = oauth.microsoft_graph
response = oauth_client.get(
'me/onenote/notebooks?$select=id,displayName')
return jsonify(response.json()), response.status_code
@api_blueprint.route('/notebooks/<notebook_id>/sections', methods=['GET'])
def get_sections(notebook_id):
name = request.args.get('name')
query_filter = '$filter=displayName eq \'{0}\''.format(name) \
if name else ''
oauth_client = oauth.microsoft_graph
response = oauth_client.get(
'me/onenote/notebooks/{0}/sections?$select=id,displayName&{1}'.format(
notebook_id, query_filter))
return jsonify(response.json()), response.status_code
| [
"mao_xiaodan@hotmail.com"
] | mao_xiaodan@hotmail.com |
85307e709e74ac64e17f146241b6cfd3567f4f85 | cf3f8baeb9e431ac9c070dbfa2cf28e748dc40a9 | /Database/JsonToIPC.py | d4f1b93c4a808463082bd56f4e9b0c74d72b6523 | [] | no_license | ericstoneking/42 | b2eb7cb0a2c8c87b092a0b64bd9204f79013a98c | d4547dae44270876657aec009fe59980082ed999 | refs/heads/master | 2023-09-03T17:34:48.025904 | 2023-05-26T12:58:46 | 2023-05-26T12:58:46 | 32,602,944 | 198 | 92 | null | 2023-03-30T07:25:21 | 2015-03-20T19:30:47 | C | UTF-8 | Python | false | false | 36,023 | py | import sys
import os
import re # Regular Expressions
import json
# Set up globals
Prog = ""
Verb = ""
Pipe = ""
outfile = 0
EchoString = ""
ParmPass = 0
########################################################################
def WriteProlog():
global Prog, Verb, Pipe, outfile, EchoString
outfile.write("/* Note: This file was autogenerated by Database/JsonToIPC.py */\n")
outfile.write("/* See Database/Readme.txt for details. */\n")
outfile.write("/* If you are hand-modifying this file, keep in mind that */\n")
outfile.write("/* your work will be lost if you run JsonToIPC.py again. */\n\n")
if Prog == "Sim":
outfile.write("#include \"42.h\"\n\n")
elif Prog == "App":
outfile.write("#include \"Ac.h\"\n\n")
#endif
if Pipe == "Gmsec":
outfile.write("#include \"gmseckit.h\"\n")
#endif
outfile.write("/**********************************************************************/\n")
if Pipe == "Socket":
if Prog == "Sim":
outfile.write("void WriteToSocket(SOCKET Socket, char **Prefix, long Nprefix, long EchoEnabled)\n")
else:
outfile.write("void WriteToSocket(SOCKET Socket, struct AcType *AC)\n")
#endif
elif Pipe == "Gmsec":
if Prog == "Sim":
outfile.write("void WriteToGmsec(GMSEC_ConnectionMgr ConnMgr,GMSEC_Status status, char **Prefix, long Nprefix, long EchoEnabled)\n")
else:
outfile.write("void WriteToGmsec(GMSEC_ConnectionMgr ConnMgr,GMSEC_Status status, struct AcType *AC)\n")
#endif
elif Pipe == "File":
if Prog == "Sim":
outfile.write("void WriteToFile(FILE *StateFile, char **Prefix, long Nprefix, long EchoEnabled)\n")
else:
outfile.write("void WriteToFile(FILE *StateFile, struct AcType *AC)\n")
#endif
#endif
outfile.write("{\n\n")
outfile.write(" long Isc,Iorb,Iw,Ipfx,i;\n")
if Pipe == "Socket":
outfile.write(" char AckMsg[5] = \"Ack\\n\";\n")
outfile.write(" char Msg[16384];\n")
outfile.write(" long MsgLen = 0;\n")
outfile.write(" long LineLen;\n")
elif Pipe == "Gmsec":
outfile.write(" char Header[40] = \"GMSEC.42.TX.MSG.LOG\";\n")
outfile.write(" GMSEC_Message AckMsg;\n")
outfile.write(" char Msg[16384];\n")
outfile.write(" long MsgLen = 0;\n")
outfile.write(" long LineLen;\n")
#endif
outfile.write(" long PfxLen;\n")
outfile.write(" char line[512];\n\n")
if Prog == "App":
outfile.write(" Isc = AC->ID;\n\n")
#endif
if Prog == "Sim":
outfile.write(" sprintf(line,\"TIME %ld-%03ld-%02ld:%02ld:%012.9lf\\n\",\n")
outfile.write(" UTC.Year,UTC.doy,UTC.Hour,UTC.Minute,UTC.Second);\n")
if Pipe == "Socket":
outfile.write(" LineLen = strlen(line);\n")
outfile.write(" memcpy(&Msg[MsgLen],line,LineLen);\n")
outfile.write(" MsgLen += LineLen;\n")
elif Pipe == "Gmsec":
outfile.write(" LineLen = strlen(line);\n")
outfile.write(" memcpy(&Msg[MsgLen],line,LineLen);\n")
outfile.write(" MsgLen += LineLen;\n")
elif Pipe == "File":
outfile.write(" fprintf(StateFile,\"%s\",line);\n")
#endif
outfile.write(" if ("+EchoString+") printf(\"%s\",line);\n\n")
outfile.write(" for(Ipfx=0;Ipfx<Nprefix;Ipfx++) {\n")
outfile.write(" PfxLen = strlen(Prefix[Ipfx]);\n\n")
#endif
########################################################################
def ReadProlog():
global Prog, Verb, Pipe, outfile, EchoString
outfile.write("/* Note: This file was autogenerated by Database/JsonToIPC.py */\n")
outfile.write("/* See Database/Readme.txt for details. */\n")
outfile.write("/* If you are hand-modifying this file, keep in mind that */\n")
outfile.write("/* your work will be lost if you run JsonToIPC.py again. */\n\n")
if Prog == "Sim":
outfile.write("#include \"42.h\"\n\n")
elif Prog == "App":
outfile.write("#include \"Ac.h\"\n\n")
#endif
if Pipe == "Gmsec":
outfile.write("#include \"gmseckit.h\"\n")
#endif
outfile.write("/**********************************************************************/\n")
if Pipe == "Socket":
if Prog == "Sim":
outfile.write("void ReadFromSocket(SOCKET Socket, long EchoEnabled)\n")
else:
outfile.write("void ReadFromSocket(SOCKET Socket, struct AcType *AC)\n")
#endif
elif Pipe == "Gmsec":
if Prog == "Sim":
outfile.write("void ReadFromGmsec(GMSEC_ConnectionMgr ConnMgr,GMSEC_Status status, long EchoEnabled)\n")
else:
outfile.write("void ReadFromGmsec(GMSEC_ConnectionMgr ConnMgr,GMSEC_Status status,struct AcType *AC)\n")
elif Pipe == "File":
if Prog == "Sim":
outfile.write("void ReadFromFile(FILE *StateFile, long EchoEnabled)\n")
else:
outfile.write("void ReadFromFile(FILE *StateFile, struct AcType *AC)\n")
#endif
elif Pipe == "Cmd":
outfile.write("void ReadFromCmd(void)\n");
#endif
outfile.write("{\n\n")
outfile.write(" struct SCType *S;\n")
outfile.write(" struct OrbitType *O;\n")
outfile.write(" struct DynType *D;\n")
outfile.write(" long Isc,Iorb,Iw,i;\n")
outfile.write(" char line[512] = \"Blank\";\n")
outfile.write(" long RequestTimeRefresh = 0;\n")
outfile.write(" long Done;\n")
if Pipe == "Gmsec":
outfile.write(" char Msg[16384];\n")
outfile.write(" GMSEC_Message GsMsg;\n")
outfile.write(" GMSEC_Field Field;\n")
outfile.write(" char AckMsg[5] = \"Ack\\n\";\n")
outfile.write(" long Imsg,Iline;\n")
elif Pipe == "Socket":
outfile.write(" char Msg[16384];\n")
outfile.write(" char AckMsg[5] = \"Ack\\n\";\n")
outfile.write(" long Imsg,Iline;\n")
outfile.write(" int NumBytes;\n")
#endif
outfile.write(" double DbleVal[30];\n")
outfile.write(" long LongVal[30];\n\n")
outfile.write(" long Year,doy,Hour,Minute;\n")
outfile.write(" double Second;\n")
if Prog == "App":
outfile.write(" long Month,Day;\n")
#endif
if Pipe == "Socket":
outfile.write(" \n")
outfile.write(" memset(Msg,'\\0',16384);\n")
outfile.write(" NumBytes = recv(Socket,Msg,16384,0);\n")
outfile.write(" if (NumBytes <= 0) return; /* Bail out if no message */\n\n")
outfile.write(" Done = 0;\n")
outfile.write(" Imsg = 0;\n")
outfile.write(" while(!Done) {\n")
outfile.write(" /* Parse lines from Msg, newline-delimited */\n")
outfile.write(" Iline = 0;\n")
outfile.write(" memset(line,'\\0',512);\n")
outfile.write(" while((Msg[Imsg] != '\\n') && (Iline < 511) && (Imsg < 16383)) {\n")
outfile.write(" line[Iline++] = Msg[Imsg++];\n")
outfile.write(" }\n")
outfile.write(" line[Iline++] = Msg[Imsg++];\n")
elif Pipe == "Gmsec":
outfile.write(" GsMsg = connectionManagerReceive(ConnMgr,GMSEC_WAIT_FOREVER,status);\n")
outfile.write(" CheckGmsecStatus(status);\n")
outfile.write(" Field = messageGetField(GsMsg,\"MSG-TEXT\",status);\n")
outfile.write(" CheckGmsecStatus(status);\n")
outfile.write(" strcpy(Msg,stringFieldGetValue(Field,status));\n")
outfile.write(" CheckGmsecStatus(status);\n\n")
outfile.write(" Done = 0;\n")
outfile.write(" Imsg = 0;\n")
outfile.write(" while(!Done) {\n")
outfile.write(" /* Parse lines from Msg, newline-delimited */\n")
outfile.write(" Iline = 0;\n")
outfile.write(" memset(line,'\\0',512);\n")
outfile.write(" while(Msg[Imsg] != '\\n') {\n")
outfile.write(" line[Iline++] = Msg[Imsg++];\n")
outfile.write(" }\n")
outfile.write(" line[Iline++] = Msg[Imsg++];\n")
elif Pipe == "File":
outfile.write(" Done = 0;\n")
outfile.write(" while(!Done) {\n")
outfile.write(" fgets(line,511,StateFile);\n")
elif Pipe == "Cmd":
outfile.write("\n\n")
#endif
outfile.write(" if ("+EchoString+") printf(\"%s\",line);\n\n")
outfile.write(" if (sscanf(line,\"TIME %ld-%ld-%ld:%ld:%lf\\n\",\n")
outfile.write(" &Year,&doy,&Hour,&Minute,&Second) == 5) {\n")
outfile.write(" RequestTimeRefresh = 1;\n")
outfile.write(" }\n\n")
########################################################################
def WriteEpilog():
global Prog, Verb, Pipe, outfile, EchoString
if Prog == "Sim":
outfile.write(" }\n\n")
#endif
outfile.write(" sprintf(line,\"[EOF]\\n\\n\");\n")
outfile.write(" if ("+EchoString+") printf(\"%s\",line);\n\n")
if Pipe == "Socket":
outfile.write(" LineLen = strlen(line);\n")
outfile.write(" memcpy(&Msg[MsgLen],line,LineLen);\n")
outfile.write(" MsgLen += LineLen;\n")
outfile.write(" send(Socket,Msg,MsgLen,0);\n\n")
outfile.write(" /* Wait for Ack */\n");
outfile.write(" recv(Socket,AckMsg,5,0);\n")
elif Pipe == "Gmsec":
outfile.write(" LineLen = strlen(line);\n")
outfile.write(" memcpy(&Msg[MsgLen],line,LineLen);\n")
outfile.write(" MsgLen += LineLen;\n")
outfile.write(" GmsecSend(Header,Msg,ConnMgr,status);\n")
outfile.write(" /* Wait for ack */\n")
outfile.write(" AckMsg = connectionManagerReceive(ConnMgr,GMSEC_WAIT_FOREVER,status);\n")
outfile.write(" CheckGmsecStatus(status);\n")
outfile.write(" messageDestroy(&AckMsg);\n")
elif Pipe == "File":
outfile.write(" fprintf(StateFile,\"%s\",line);\n")
#endif
outfile.write("}\n")
########################################################################
def ReadEpilog():
global Prog, Verb, Pipe, outfile
outfile.write("\n")
outfile.write(" if (!strncmp(line,\"[EOF]\",5)) {\n")
outfile.write(" Done = 1;\n")
outfile.write(" sprintf(line,\"[EOF] reached\\n\");\n")
outfile.write(" }\n")
if Pipe == "Socket":
outfile.write(" if (Imsg >= 16383) {\n")
outfile.write(" Done = 1;\n")
outfile.write(" printf(\"Imsg limit exceeded\\n\");\n")
outfile.write(" }\n")
outfile.write(" }\n\n")
outfile.write(" /* Acknowledge receipt */\n")
outfile.write(" send(Socket,AckMsg,strlen(AckMsg),0);\n\n")
elif Pipe == "Gmsec":
outfile.write(" messageDestroy(&GsMsg);\n")
outfile.write(" }\n\n")
outfile.write(" /* Acknowledge receipt */\n")
outfile.write(" GmsecSend(\"GMSEC.42.RX.MSG.LOG\",AckMsg,ConnMgr,status);\n\n")
elif Pipe == "File":
outfile.write(" }\n\n")
elif Pipe == "Cmd":
outfile.write(" \n\n")
#outfile.write(" }\n\n")
#endif
########################################################################
def TimeRefreshCode():
global outfile,Prog
if Prog == "Sim":
outfile.write(" if (RequestTimeRefresh) {\n")
outfile.write(" /* Update time variables */\n")
outfile.write(" UTC.Year = Year;\n")
outfile.write(" UTC.doy = doy;\n")
outfile.write(" UTC.Hour = Hour;\n")
outfile.write(" UTC.Minute = Minute;\n")
outfile.write(" UTC.Second = Second;\n")
outfile.write(" DOY2MD(UTC.Year,UTC.doy,&UTC.Month,&UTC.Day);\n")
outfile.write(" CivilTime = DateToTime(UTC.Year,UTC.Month,UTC.Day,UTC.Hour,UTC.Minute,UTC.Second);\n")
outfile.write(" AtomicTime = CivilTime + LeapSec;\n")
outfile.write(" DynTime = AtomicTime + 32.184;\n")
outfile.write(" TT.JulDay = TimeToJD(DynTime);\n")
outfile.write(" TimeToDate(DynTime,&TT.Year,&TT.Month,&TT.Day,\n")
outfile.write(" &TT.Hour,&TT.Minute,&TT.Second,DTSIM);\n")
outfile.write(" TT.doy = MD2DOY(TT.Year,TT.Month,TT.Day);\n")
outfile.write(" UTC.JulDay = TimeToJD(CivilTime);\n")
outfile.write(" JDToGpsTime(TT.JulDay,&GpsRollover,&GpsWeek,&GpsSecond);\n")
outfile.write(" SimTime = DynTime-DynTime0;\n")
outfile.write(" }\n\n")
else:
outfile.write(" if (RequestTimeRefresh) {\n")
outfile.write(" /* Update AC->Time */\n")
outfile.write(" DOY2MD(Year,doy,&Month,&Day);\n")
outfile.write(" AC->Time = DateToTime(Year,Month,Day,Hour,Minute,Second);\n")
outfile.write(" }\n\n")
#endif
########################################################################
def StateRefreshCode():
global outfile
outfile.write("\n/* .. Refresh SC states that depend on inputs */\n\n")
outfile.write(" for(Isc=0;Isc<Nsc;Isc++) {\n")
outfile.write(" if (SC[Isc].RequestStateRefresh) {\n")
outfile.write(" S = &SC[Isc];\n")
outfile.write(" S->RequestStateRefresh = 0;\n")
outfile.write(" if (S->Exists) {\n")
outfile.write(" /* Update RefOrb */\n")
outfile.write(" O = &Orb[S->RefOrb];\n")
outfile.write(" O->Epoch = DynTime;\n")
outfile.write(" for(i=0;i<3;i++) {\n")
outfile.write(" S->PosN[i] = O->PosN[i] + S->PosR[i];\n")
outfile.write(" S->VelN[i] = O->VelN[i] + S->VelR[i];\n")
outfile.write(" }\n")
outfile.write(" RV2Eph(O->Epoch,O->mu,O->PosN,O->VelN,\n")
outfile.write(" &O->SMA,&O->ecc,&O->inc,&O->RAAN,\n")
outfile.write(" &O->ArgP,&O->anom,&O->tp,\n")
outfile.write(" &O->SLR,&O->alpha,&O->rmin,\n")
outfile.write(" &O->MeanMotion,&O->Period);\n")
outfile.write(" FindCLN(O->PosN,O->VelN,O->CLN,O->wln);\n\n")
outfile.write(" /* Update Dyn */\n")
outfile.write(" MapJointStatesToStateVector(S);\n")
outfile.write(" D = &S->Dyn;\n")
outfile.write(" MapStateVectorToBodyStates(D->u,D->x,D->h,D->a,D->uf,D->xf,S);\n")
outfile.write(" MotionConstraints(S);\n")
outfile.write(" }\n")
outfile.write(" }\n")
outfile.write(" }\n")
########################################################################
def WriteCodeBlock(Indent,FmtPrefix,ArrayIdx,ArgPrefix,VarString,IdxLen,Ni,Nj,StructIdxString,FormatString):
global Prog, Verb, Pipe, outfile, EchoString
line = Indent+" sprintf(line,\""
line += FmtPrefix
line += VarString
line += " ="
for i in range (0,Ni):
for j in range (0,Nj):
line += " "+FormatString
#next j
#next i
line += "\\n\",\n"+" "+Indent+ArrayIdx+StructIdxString
if Nj > 1:
for i in range (0,Ni):
for j in range (0,Nj):
line += ",\n"+" "+Indent+ArgPrefix+VarString+"["+str(i)+"]["+str(j)+"]"
#next j
#next i
elif Ni > 1:
for i in range (0,Ni):
line += ",\n"+" "+Indent+ArgPrefix+VarString+"["+str(i)+"]"
#next i
else:
line += ",\n"+" "+Indent+ArgPrefix+VarString
#endif
line += ");\n"
outfile.write(line)
if Prog == "Sim":
if Pipe == "Socket":
outfile.write(" "+Indent+"if (!strncmp(line,Prefix[Ipfx],PfxLen)) {\n")
outfile.write(" "+Indent+" LineLen = strlen(line);\n")
outfile.write(" "+Indent+" memcpy(&Msg[MsgLen],line,LineLen);\n")
outfile.write(" "+Indent+" MsgLen += LineLen;\n")
outfile.write(" "+Indent+" if ("+EchoString+") printf(\"%s\",line);\n")
outfile.write(" "+Indent+"}\n\n")
elif Pipe == "Gmsec":
outfile.write(" "+Indent+"if (!strncmp(line,Prefix[Ipfx],PfxLen)) {\n")
outfile.write(" "+Indent+" LineLen = strlen(line);\n")
outfile.write(" "+Indent+" memcpy(&Msg[MsgLen],line,LineLen);\n")
outfile.write(" "+Indent+" MsgLen += LineLen;\n\n")
outfile.write(" "+Indent+" if ("+EchoString+") printf(\"%s\",line);\n")
outfile.write(" "+Indent+"}\n\n")
elif Pipe == "File":
outfile.write(" "+Indent+"if (!strncmp(line,Prefix[Ipfx],PfxLen)) {\n")
outfile.write(" "+Indent+" fprintf(StateFile,\"%s\",line);\n")
outfile.write(" "+Indent+" if ("+EchoString+") printf(\"%s\",line);\n")
outfile.write(" "+Indent+"}\n\n")
#endif
else:
if Pipe == "Socket":
outfile.write(" "+Indent+"LineLen = strlen(line);\n")
outfile.write(" "+Indent+"memcpy(&Msg[MsgLen],line,LineLen);\n")
outfile.write(" "+Indent+"MsgLen += LineLen;\n")
outfile.write(" "+Indent+"if ("+EchoString+") printf(\"%s\",line);\n\n")
elif Pipe == "Gmsec":
outfile.write(" "+Indent+"LineLen = strlen(line);\n")
outfile.write(" "+Indent+"memcpy(&Msg[MsgLen],line,LineLen);\n")
outfile.write(" "+Indent+"MsgLen += LineLen;\n\n")
outfile.write(" "+Indent+"if ("+EchoString+") printf(\"%s\",line);\n\n")
elif Pipe == "File":
outfile.write(" "+Indent+"fprintf(StateFile,\"%s\",line);\n")
outfile.write(" "+Indent+"if ("+EchoString+") printf(\"%s\",line);\n\n")
#endif
#endif
########################################################################
def ReadCodeBlock(Indent,FmtPrefix,ArrayIdx,ArgPrefix,ArgString,VarString,IdxLen,Ni,Nj,StructIdxString,Narg,FormatString):
global Prog, outfile
line = Indent+"if (sscanf(line,\""
line += FmtPrefix
line += VarString
line += " ="
for i in range (0,Ni):
for j in range (0,Nj):
line += " "+FormatString
#next j
#next i
line += "\","+"\n "+Indent+"&"+ArrayIdx+StructIdxString
if Nj > 1:
for i in range (0,Ni):
for j in range (0,Nj):
line += ","+"\n "+Indent+"&"+ArgString+"["+str(Nj*i+j)+"]"
#next j
#next i
elif Ni > 1:
for i in range (0,Ni):
line += ","+"\n "+Indent+"&"+ArgString+"["+str(i)+"]"
#next i
else:
line += ","+"\n "+Indent+"&"+ArgString+"[0]"
#endif
line += ") == "+str(Narg)+") {"
if Prog == "App":
line += "\n "+Indent+"if (Isc == AC->ID) {"
Indent += " "
#endif
if Nj > 1:
for i in range (0,Ni):
for j in range (0,Nj):
line += "\n "+Indent+ArgPrefix+VarString+"["+str(i)+"]["+str(j)+"] = "+ArgString+"["+str(Nj*i+j)+"];"
#next j
#next i
elif Ni > 1:
for i in range (0,Ni):
line += "\n "+Indent+ArgPrefix+VarString+"["+str(i)+"] = "+ArgString+"["+str(i)+"];"
#next i
else:
line += "\n "+Indent+ArgPrefix+VarString+" = "+ArgString+"[0];"
#endif
if Prog == "App":
Indent = Indent[0:-3]
line += "\n "+Indent+"}"
#endif
if ArgPrefix.startswith("SC") and ArgPrefix.count("AC") == 0:
line += "\n "+Indent+"SC[Isc].RequestStateRefresh = 1;"
#endif
line += "\n"+Indent+"}\n\n"
outfile.write(line)
########################################################################
def ParseStruct(StructList,Struct,Indent,FmtPrefix,ArrayIdx,ArgPrefix,StructIdxString,Narg):
global Prog, Verb, Pipe, outfile, ParmPass
Primitives = {"long","double"}
VarList = Struct["Table Data"]
for Var in VarList:
DataType = Var["Data Type"]
if DataType in Primitives:
VarString = Var["Variable Name"]
if "Array Size" in Var:
IdxString = Var["Array Size"].strip(r"[]")
IdxList = IdxString.split(",")
IdxLen = len(IdxList)
if IdxLen == 2:
Ni = int(IdxList[0])
Nj = int(IdxList[1])
elif IdxList[0].isnumeric():
Ni = int(IdxList[0])
Nj = 1
else:
Ni = 1
Nj = 1
#endif
else:
IdxLen = 0
Ni = 1
Nj = 1
#endif
if DataType == "long":
WriteFormatString = "%ld"
ReadFormatString = "%ld"
ArgString = "LongVal"
else:
WriteFormatString = "%18.12le"
ReadFormatString = "%le"
ArgString = "DbleVal"
#endif
if Prog == "Sim":
ReadWrite = Var["Sim Read/Write"]
elif Prog == "App":
ReadWrite = Var["App Read/Write"]
else:
ReadWrite = ""
#endif
PktRole = Var["Packet Role"]
if ParmPass == 1:
if Verb == "WriteTo" and PktRole == "PRM" and ReadWrite == "":
WriteCodeBlock(Indent,FmtPrefix,ArrayIdx,ArgPrefix,VarString,IdxLen,Ni,Nj,StructIdxString,WriteFormatString)
#endif
if Verb == "ReadFrom" and PktRole == "PRM" and ReadWrite == "":
ReadCodeBlock(Indent,FmtPrefix,ArrayIdx,ArgPrefix,ArgString,VarString,IdxLen,Ni,Nj,StructIdxString,Narg+Ni*Nj,ReadFormatString)
#endif
else:
if Verb == "WriteTo" and ReadWrite in ["WRITE","READ_WRITE"]:
WriteCodeBlock(Indent,FmtPrefix,ArrayIdx,ArgPrefix,VarString,IdxLen,Ni,Nj,StructIdxString,WriteFormatString)
#endif
if Verb == "ReadFrom" and ReadWrite in ["READ","READ_WRITE"]:
ReadCodeBlock(Indent,FmtPrefix,ArrayIdx,ArgPrefix,ArgString,VarString,IdxLen,Ni,Nj,StructIdxString,Narg+Ni*Nj,ReadFormatString)
#endif
if Prog == "Sim" and Verb == "ReadFrom" and Pipe == "Cmd" and Var["Cmd Read"] == "READ":
ReadCodeBlock(Indent,FmtPrefix,ArrayIdx,ArgPrefix,ArgString,VarString,IdxLen,Ni,Nj,StructIdxString,Narg+Ni*Nj,ReadFormatString)
#endif
#endif
else: # struct
for SubStruct in StructList:
if SubStruct["Table Name"] == Var["Data Type"]:
LocalFmtPrefix = FmtPrefix + Var["Variable Name"]
LocalArgPrefix = ArgPrefix + Var["Variable Name"]
LocalStructIdxString = StructIdxString
if "Array Size" in Var:
IdxString = Var["Array Size"].strip(r"[]")
IdxList = IdxString.split(",")
IdxLen = len(IdxList)
if IdxString.isalpha():
if Verb == "WriteTo":
outfile.write(Indent+" for(i=0;i<"+ArgPrefix+IdxString+";i++) {\n")
LocalIndent = Indent+" "
if Prog == "Sim":
LocalStructIdxString += ",i"
else:
LocalStructIdxString += ",i"
#endif
else:
LocalIndent = Indent
LocalStructIdxString += ",&i"
#endif
LocalFmtPrefix += "[%ld]."
LocalArgPrefix += "[i]."
LocalNarg = Narg+1
ParseStruct(StructList,SubStruct,LocalIndent,LocalFmtPrefix,ArrayIdx,LocalArgPrefix,LocalStructIdxString,LocalNarg)
if Verb == "WriteTo":
outfile.write(Indent+" }\n\n")
#endif
elif IdxLen == 2:
LocalFmtPrefix += "[%ld][%ld]."
LocalArgPrefix += "[i][j]."
if Verb == "WriteTo":
LocalStructIdxString += ",i,j"
else:
LocalStructIdxString += ",&i,&j"
#endif
LocalNarg = Narg+2
LocalIndent = Indent+""
ParseStruct(StructList,SubStruct,LocalIndent,LocalFmtPrefix,ArrayIdx,LocalArgPrefix,LocalStructIdxString,LocalNarg)
else:
LocalFmtPrefix += "[%ld]."
LocalArgPrefix += "[i]."
if Verb == "WriteTo":
LocalStructIdxString += ",i"
else:
LocalStructIdxString += ",&i"
#endif
LocalNarg = Narg+1
LocalIndent = Indent+""
ParseStruct(StructList,SubStruct,LocalIndent,LocalFmtPrefix,ArrayIdx,LocalArgPrefix,LocalStructIdxString,LocalNarg)
#endif
else:
LocalFmtPrefix += "."
LocalArgPrefix += "."
LocalIndent = Indent+""
ParseStruct(StructList,SubStruct,LocalIndent,LocalFmtPrefix,ArrayIdx,LocalArgPrefix,LocalStructIdxString,Narg)
#endif
#endif
#next SubStruct
#endif
#next Var
########################################################################
def StripEmptyLoops(infile,outfile):
line1 = infile.readline()
while (line1 != ''): # EOF
StrippedLine1 = line1.strip()
FoundFor = 0
LoopIsEmpty = 0
if StrippedLine1.startswith('for'):
FoundFor = 1
line2 = infile.readline()
StrippedLine2 = line2.strip()
if StrippedLine2.startswith('}'):
LoopIsEmpty = 1
line3 = infile.readline() # Blank line
#endif
#endif
if (FoundFor and LoopIsEmpty):
pass
# Write nothing
elif FoundFor:
outfile.write(line1)
outfile.write(line2)
else:
outfile.write(line1)
#endif
line1 = infile.readline()
#end while
########################################################################
def main():
global Prog, Verb, Pipe, outfile, EchoString, ParmPass
ProgList = {"Sim","App"}
VerbList = {"WriteTo","ReadFrom"}
PipeList = {"Socket","Gmsec","File","Cmd"}
infile = open('42.json')
StructDict = json.load(infile)
infile.close()
for Prog in ProgList:
if Prog == "Sim":
EchoString = "EchoEnabled"
else:
EchoString = "AC->EchoEnabled"
#endif
for Verb in VerbList:
for Pipe in PipeList:
if not(Verb == "WriteTo" and Pipe == "Cmd") and not (Prog == "App" and Pipe == "Gmsec"):
outfile = open("TempIpc.c","w")
if Verb == "WriteTo":
WriteProlog()
elif Verb == "ReadFrom":
ReadProlog()
#endif
for ParmPass in [0,1]:
StructList = StructDict["Table Definition"]
for Struct in StructList:
Indent = " "
if Prog == "Sim":
if Struct["Table Name"] == "SCType":
if ParmPass == 0:
if Verb == "WriteTo":
outfile.write(Indent+"for(Isc=0;Isc<Nsc;Isc++) {\n")
outfile.write(Indent+" if (SC[Isc].Exists) {\n")
#endif
ParseStruct(StructList,Struct,Indent+" ","SC[%ld].","Isc","SC[Isc].","",1)
if Verb == "WriteTo":
outfile.write(Indent+" }\n")
outfile.write(Indent+"}\n\n")
#endif
#endif
#endif
if Struct["Table Name"] == "OrbitType":
if ParmPass == 0:
if Verb == "WriteTo":
outfile.write(Indent+"for(Iorb=0;Iorb<Norb;Iorb++) {\n")
outfile.write(Indent+" if (Orb[Iorb].Exists) {\n")
#endif
ParseStruct(StructList,Struct,Indent+" ","Orb[%ld].","Iorb","Orb[Iorb].","",1)
if Verb == "WriteTo":
outfile.write(Indent+" }\n")
outfile.write(Indent+"}\n\n")
#endif
#endif
#endif
if Struct["Table Name"] == "WorldType":
if ParmPass == 0:
if Verb == "WriteTo":
outfile.write(Indent+"for(Iw=1;Iw<NWORLD;Iw++) {\n")
outfile.write(Indent+" if (World[Iw].Exists) {\n")
#endif
ParseStruct(StructList,Struct,Indent+" ","World[%ld].","Iw","World[Iw].","",1)
if Verb == "WriteTo":
outfile.write(Indent+" }\n")
outfile.write(Indent+"}\n\n")
#endif
#endif
#endif
if Struct["Table Name"] == "AcType":
if ParmPass == 1:
if Verb == "WriteTo":
Indent = " "
outfile.write(Indent+"for(Isc=0;Isc<Nsc;Isc++) {\n")
outfile.write(Indent+" if (SC[Isc].Exists) {\n")
outfile.write(Indent+" if (SC[Isc].AC.ParmLoadEnabled) {\n")
#endif
else:
Indent = " "
outfile.write(Indent+"for(Isc=0;Isc<Nsc;Isc++) {\n")
outfile.write(Indent+" if (SC[Isc].Exists) {\n")
outfile.write(Indent+" if (SC[Isc].AC.ParmDumpEnabled) {\n")
Indent += " "
#endif
ParseStruct(StructList,Struct,Indent+" ","SC[%ld].AC.","Isc","SC[Isc].AC.","",1)
if Verb == "WriteTo":
outfile.write(Indent+" }\n")
outfile.write(Indent+" }\n")
outfile.write(Indent+"}\n\n")
else:
Indent = Indent[0:-3]
outfile.write(Indent+" }\n")
outfile.write(Indent+" }\n")
outfile.write(Indent+"}\n\n")
#endif
#endif
#endif
else:
if Struct["Table Name"] == "AcType":
if ParmPass == 1:
if Verb == "WriteTo":
Indent = " "
outfile.write(Indent+" if (AC->ParmDumpEnabled) {\n")
#endif
else:
Indent = " "
outfile.write(Indent+"if (AC->ParmLoadEnabled) {\n")
#endif
ParseStruct(StructList,Struct,Indent+" ","SC[%ld].AC.","Isc","AC->","",1)
if Verb == "WriteTo":
outfile.write(Indent+" }\n\n")
else:
outfile.write(Indent+"}\n\n")
#endif
else:
if Verb == "WriteTo":
Indent = " "
#endif
else:
Indent = " "
#endif
ParseStruct(StructList,Struct,Indent,"SC[%ld].AC.","Isc","AC->","",1)
#endif
#endif
#endif
#next Struct
#next ParmPass
if Verb == "WriteTo":
WriteEpilog()
elif Verb == "ReadFrom":
ReadEpilog()
TimeRefreshCode()
if Prog == "Sim":
StateRefreshCode()
#endif
outfile.write("}\n")
#endif
outfile.close()
infile = open("TempIpc.c")
outfile = open("../Source/IPC/"+Prog+Verb+Pipe+".c","w")
StripEmptyLoops(infile,outfile)
infile.close()
outfile.close()
os.remove("TempIpc.c")
#endif
#next Pipe
#next Verb
#next Prog
########################################################################
if __name__ == '__main__': main()
| [
"Eric.T.Stoneking@nasa.gov"
] | Eric.T.Stoneking@nasa.gov |
4b9a62611c764cd8d705fcf54fd46f2a5624deae | d9e26e516ab3863b6e7d00c4e3cdecf1af7028eb | /src/oaklib/io/rollup_report_writer.py | e4644c058309aeb0aeae82b0c4cc2fa52f2b5e04 | [
"Apache-2.0"
] | permissive | INCATools/ontology-access-kit | 2f08a64b7308e8307d1aaac2a81764e7d98b5928 | 8d2a124f7af66fe2e796f9e0ece55585438796a5 | refs/heads/main | 2023-08-30T14:28:57.201198 | 2023-08-29T17:40:19 | 2023-08-29T17:40:19 | 475,072,415 | 67 | 15 | Apache-2.0 | 2023-09-07T01:06:04 | 2022-03-28T15:50:45 | Jupyter Notebook | UTF-8 | Python | false | false | 3,444 | py | from typing import Dict, List, TextIO
from airium import Airium
from linkml_runtime.dumpers import json_dumper, yaml_dumper
def format_object(curie, label):
if label:
return f"{label} [{curie}]"
else:
return curie
def add_association_group(doc: Airium, associations: List[Dict], subject: str, header_label: str):
associations_for_subject = [a for a in associations if a.get("subject") == subject]
if associations_for_subject:
with doc.div(klass="association-group"):
doc.div(_t=header_label, klass="association-group-header")
with doc.ul(klass="association-group-list"):
for association in associations_for_subject:
label = format_object(
association.get("object"), association.get("object_label")
)
doc.li(_t=label)
def generate_html(subjects: List[str], groups: List[Dict]) -> str:
doc = Airium()
doc("<!DOCTYPE html>")
with doc.html(lang="en"):
with doc.head():
doc.meta(charset="utf-8")
doc.title(_t="Rollup Table")
doc.style(
_t="""
.rollup-table {
border-collapse: collapse;
width: 100%;
}
.rollup-table tr {
vertical-align: top;
}
.rollup-table td {
padding: 0.25rem;
border-top: 1px solid black;
}
.primary-group-label {
font-weight: bold;
}
.association-group {
margin-bottom: 1rem;
}
.association-group-header {
font-style: italic;
}
.association-group-list {
margin: 0;
}
"""
)
with doc.body():
with doc.table(klass="rollup-table"):
with doc.tr():
doc.td(_t="Subject", klass="primary-group-label")
for subject in subjects:
doc.td(_t=subject)
for group in groups:
with doc.tr():
label = format_object(
group.get("group_object"), group.get("group_object_label")
)
doc.td(_t=label, klass="primary-group-label")
for subject in subjects:
with doc.td():
for sub_group in group.get("sub_groups", []):
add_association_group(
doc,
sub_group.get("associations", []),
subject,
format_object(
sub_group.get("group_object"),
sub_group.get("group_object_label"),
),
)
add_association_group(
doc, group.get("associations", []), subject, "Other"
)
return str(doc)
def write_report(subjects: List[str], groups: List[Dict], output: TextIO, format: str):
if format == "json":
output.write(json_dumper.dumps(groups, inject_type=False))
elif format == "yaml":
output.write(yaml_dumper.dumps(groups))
elif format == "html":
output.write(generate_html(subjects, groups))
else:
raise ValueError(f"Unsupported format: {format}")
| [
"noreply@github.com"
] | INCATools.noreply@github.com |
3a487eaa5b78fc12e70fe3c362a2bbce9ff5fb7c | fd7498ec78932500333d1c922bdbdebac94fbd7e | /leads/api.py | caeb33d61ba3b09108f7dee82ab41b9f0bdc6ace | [] | no_license | ajay-staruss/DjangoReactLEadManager | 303419f2108379eb8ac78ad1bb7c8e171e2e1ded | 83dadbb2d60657662f46a61aa8cf7fae2c0ee567 | refs/heads/master | 2022-07-02T08:25:32.669245 | 2020-05-16T09:39:04 | 2020-05-16T09:39:04 | 264,403,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | from leads.models import Lead
from rest_framework import viewsets,permissions
from .serializers import LeadSerializer
class LeadViewSet(viewsets.ModelViewSet):
queryset = Lead.objects.all()
permission_classes = [
permissions.AllowAny
]
serializer_class = LeadSerializer | [
"ajaypratap9980@gmail.com"
] | ajaypratap9980@gmail.com |
00fb40dc657af4f71f40f6e500abe5ae5e629c29 | 66c8b9ee95b951a60847cfabad08250e65289812 | /src/stringMatching.py | 3d448a7161cd94aaf929827be219ed63195eda51 | [] | no_license | sampanayak/ccfl-elastic | 707206c2c08cc63d67bd9846fc38fce078b3b091 | c717cce1855de0dce6d4b9c3b709a94331db55c6 | refs/heads/master | 2021-07-24T15:14:29.855623 | 2020-04-28T19:10:01 | 2020-04-28T19:10:01 | 157,615,068 | 0 | 0 | null | 2018-11-14T21:44:36 | 2018-11-14T21:44:35 | null | UTF-8 | Python | false | false | 529 | py | #contents = []
import re
#i = 0
with open('gene.txt') as f:
lines = f.readlines()
i = 1
sizeOflines = len(lines)
while i < sizeOflines:
# print(lines[i])
fullString = lines[i]
i += 1
sub = lines[i]
print(fullString)
print(sub)
i += 1
for match in re.finditer('sub','fullString'):
print(match.start())
#[m.start() for m in re.finditer('sub', 'fullString')]
| [
"31867379+sampanayak@users.noreply.github.com"
] | 31867379+sampanayak@users.noreply.github.com |
12360d4c69b79a4dda8833a2bc7d232357e2cee1 | d0a240a606f6de871197ab21ff911f94c6eb6e20 | /encoder.py | ba1215fdc88fed29ccbdbcc25a1cf565796be6cd | [] | no_license | PhucNguyen12038/RNN | e7bd2d8ed8fadab16af92257b9fc4b81b5e634e6 | e7c8fee7fca8e846ddc01e6c4e5a2adfb64fd65f | refs/heads/main | 2023-04-24T13:20:02.541340 | 2021-05-09T10:12:29 | 2021-05-09T10:12:29 | 349,739,483 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | import json
import numpy as np
class NumpyEncoder(json.JSONEncoder):
""" Special json encoder for numpy types """
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj) | [
"nguyenhuuphuc12038@gmail.com"
] | nguyenhuuphuc12038@gmail.com |
7acf6cb2e275ff6ff0ce9cbc15c9a5046904a862 | c8dee6f9c6885a8005765726673062f00cb18565 | /main.py | bb748e27c54729621348c5365dd91804f2f2b9b0 | [] | no_license | bhavanasrini/NFLFinal | 6e41aab7c667cb073b4964d7ea906baa8076393d | 5a877b336834b7ac341da3d8806481e9a0e6c0ab | refs/heads/master | 2021-08-24T13:29:13.029930 | 2017-12-10T01:07:05 | 2017-12-10T01:07:05 | 113,593,079 | 0 | 1 | null | 2017-12-08T18:52:06 | 2017-12-08T16:12:06 | Python | UTF-8 | Python | false | false | 31,166 | py |
def teamname1():
return "GB"
def teamname2():
return "DET"
def list(teamname):
# CHIEFS
if teamname == "KC":
return ["http://www.espn.com/nfl/matchup?gameId=400951566", "http://www.espn.com/nfl/matchup?gameId=400951636",
"http://www.espn.com/nfl/matchup?gameId=400951634", "http://www.espn.com/nfl/matchup?gameId=400951752",
"http://www.espn.com/nfl/matchup?gameId=400951664", "http://www.espn.com/nfl/matchup?gameId=400951776",
"http://www.espn.com/nfl/matchup?gameId=400951571", "http://www.espn.com/nfl/matchup?gameId=400951737",
"http://www.espn.com/nfl/matchup?gameId=400951786", "http://www.espn.com/nfl/matchup?gameId=400951595"]
# LIONS
if teamname == "DET":
return ["http://www.espn.com/nfl/matchup?gameId=400951576", "http://www.espn.com/nfl/matchup?gameId=400951681",
"http://www.espn.com/nfl/matchup?gameId=400951594", "http://www.espn.com/nfl/matchup?gameId=400951724",
"http://www.espn.com/nfl/matchup?gameId=400951558", "http://www.espn.com/nfl/matchup?gameId=400951704",
"http://www.espn.com/nfl/matchup?gameId=400951736", "http://www.espn.com/nfl/matchup?gameId=400951790",
"http://www.espn.com/nfl/matchup?gameId=400951563", "http://www.espn.com/nfl/matchup?gameId=400951701",
"http://www.espn.com/nfl/matchup?gameId=400951569"]
# STEELERS
if teamname == "PIT":
return ["http://www.espn.com/nfl/matchup?gameId=400951574", "http://www.espn.com/nfl/matchup?gameId=400951643",
"http://www.espn.com/nfl/matchup?gameId=400951708", "http://www.espn.com/nfl/matchup?gameId=400951655",
"http://www.espn.com/nfl/matchup?gameId=400951776", "http://www.espn.com/nfl/matchup?gameId=400951609",
"http://www.espn.com/nfl/matchup?gameId=400951736", "http://www.espn.com/nfl/matchup?gameId=400951565",
"http://www.espn.com/nfl/matchup?gameId=400951698", "http://www.espn.com/nfl/matchup?gameId=400951633"]
# RAIDERS
if teamname == "OAK":
return ["http://www.espn.com/nfl/matchup?gameId=400951584", "http://www.espn.com/nfl/matchup?gameId=400951669",
"http://www.espn.com/nfl/matchup?gameId=400951644", "http://www.espn.com/nfl/matchup?gameId=400951744",
"http://www.espn.com/nfl/matchup?gameId=400951659", "http://www.espn.com/nfl/matchup?gameId=400951779",
"http://www.espn.com/nfl/matchup?gameId=400951571", "http://www.espn.com/nfl/matchup?gameId=400951706",
"http://www.espn.com/nfl/matchup?gameId=400951787", "http://www.espn.com/nfl/matchup?gameId=400951815",
"http://www.espn.com/nfl/matchup?gameId=400951629"]
# BRONCOS
if teamname == "DEN":
return ["http://www.espn.com/nfl/matchup?gameId=400951615","http://www.espn.com/nfl/matchup?gameId=400951673",
"http://www.espn.com/nfl/matchup?gameId=400951583", "http://www.espn.com/nfl/matchup?gameId=400951744",
"http://www.espn.com/nfl/matchup?gameId=400951782", "http://www.espn.com/nfl/matchup?gameId=400951624",
"http://www.espn.com/nfl/matchup?gameId=400951737", "http://www.espn.com/nfl/matchup?gameId=400951760",
"http://www.espn.com/nfl/matchup?gameId=400951690", "http://www.espn.com/nfl/matchup?gameId=400951810",
"http://www.espn.com/nfl/matchup?gameId=400951629"]
# EAGLES
if teamname == "PHI":
return ["http://www.espn.com/nfl/matchup?gameId=400951592", "http://www.espn.com/nfl/matchup?gameId=400951636",
"http://www.espn.com/nfl/matchup?gameId=400951616", "http://www.espn.com/nfl/matchup?gameId=400951738",
"http://www.espn.com/nfl/matchup?gameId=400951695", "http://www.espn.com/nfl/matchup?gameId=400951641",
"http://www.espn.com/nfl/matchup?gameId=400951723", "http://www.espn.com/nfl/matchup?gameId=400951651",
"http://www.espn.com/nfl/matchup?gameId=400951760", "http://www.espn.com/nfl/matchup?gameId=400951817",
"http://www.espn.com/nfl/matchup?gameId=400951610"]
# BUCCANEERS
if teamname == "TB":
return["http://www.espn.com/nfl/matchup?gameId=400951552","http://www.espn.com/nfl/matchup?gameId=400951645",
"http://www.espn.com/nfl/matchup?gameId=400951604","http://www.espn.com/nfl/matchup?gameId=400951742",
"http://www.espn.com/nfl/matchup?gameId=400951770","http://www.espn.com/nfl/matchup?gameId=400951575",
"http://www.espn.com/nfl/matchup?gameId=400951725", "http://www.espn.com/nfl/matchup?gameId=400951755",
"http://www.espn.com/nfl/matchup?gameId=400951653", "http://www.espn.com/nfl/matchup?gameId=400981391",
"http://www.espn.com/nfl/matchup?gameId=400951586"]
# PANTHERS
if teamname == "CAR":
return ["http://www.espn.com/nfl/matchup?gameId=400951605","http://www.espn.com/nfl/matchup?gameId=400951630",
"http://www.espn.com/nfl/matchup?gameId=400951587","http://www.espn.com/nfl/matchup?gameId=400951727",
"http://www.espn.com/nfl/matchup?gameId=400951558","http://www.espn.com/nfl/matchup?gameId=400951695",
"http://www.espn.com/nfl/matchup?gameId=400951578", "http://www.espn.com/nfl/matchup?gameId=400951725",
"http://www.espn.com/nfl/matchup?gameId=400951749", "http://www.espn.com/nfl/matchup?gameId=400951693",
"http://www.espn.com/nfl/matchup?gameId=400951606"]
# BILLS
if teamname == "BUF":
return ["http://www.espn.com/nfl/matchup?gameId=400951567","http://www.espn.com/nfl/matchup?gameId=400951630",
"http://www.espn.com/nfl/matchup?gameId=400951583", "http://www.espn.com/nfl/matchup?gameId=400951685",
"http://www.espn.com/nfl/matchup?gameId=400951554", "http://www.espn.com/nfl/matchup?gameId=400951575",
"http://www.espn.com/nfl/matchup?gameId=400951706", "http://www.espn.com/nfl/matchup?gameId=400951743",
"http://www.espn.com/nfl/matchup?gameId=400951555", "http://www.espn.com/nfl/matchup?gameId=400951807",
"http://www.espn.com/nfl/matchup?gameId=400951595"]
# CHARGERS
if teamname == "LAC":
return ["http://www.espn.com/nfl/matchup?gameId=400951615", "http://www.espn.com/nfl/matchup?gameId=400951666",
"http://www.espn.com/nfl/matchup?gameId=400951634", "http://www.espn.com/nfl/matchup?gameId=400951738",
"http://www.espn.com/nfl/matchup?gameId=400951650", "http://www.espn.com/nfl/matchup?gameId=400951779",
"http://www.espn.com/nfl/matchup?gameId=400951624", "http://www.espn.com/nfl/matchup?gameId=400951715",
"http://www.espn.com/nfl/matchup?gameId=400951807", "http://www.espn.com/nfl/matchup?gameId=400951573"]
# JETS
if teamname == "NYJ":
return ["http://www.espn.com/nfl/matchup?gameId=400951567","http://www.espn.com/nfl/matchup?gameId=400951669",
"http://www.espn.com/nfl/matchup?gameId=400951611","http://www.espn.com/nfl/matchup?gameId=400951556",
"http://www.espn.com/nfl/matchup?gameId=400951766", "http://www.espn.com/nfl/matchup?gameId=400951598",
"http://www.espn.com/nfl/matchup?gameId=400951721", "http://www.espn.com/nfl/matchup?gameId=400951743",
"http://www.espn.com/nfl/matchup?gameId=400951653", "http://www.espn.com/nfl/matchup?gameId=400951606"]
# FALCONS
if teamname == "ATL":
return ["http://www.espn.com/nfl/matchup?gameId=400951570", "http://www.espn.com/nfl/matchup?gameId=400951679",
"http://www.espn.com/nfl/matchup?gameId=400951594", "http://www.espn.com/nfl/matchup?gameId=400951685",
"http://www.espn.com/nfl/matchup?gameId=400951697", "http://www.espn.com/nfl/matchup?gameId=400951638",
"http://www.espn.com/nfl/matchup?gameId=400951721", "http://www.espn.com/nfl/matchup?gameId=400951749",
"http://www.espn.com/nfl/matchup?gameId=400951686", "http://www.espn.com/nfl/matchup?gameId=400951818",
"http://www.espn.com/nfl/matchup?gameId=400951586"]
# SAINTS
if teamname == "NO":
return ["http://www.espn.co.uk/nfl/matchup?gameId=400951612", "http://www.espn.com/nfl/matchup?gameId=400951639",
"http://www.espn.com/nfl/matchup?gameId=400951587", "http://www.espn.com/nfl/matchup?gameId=400950241",
"http://www.espn.com/nfl/matchup?gameId=400951704", "http://www.espn.com/nfl/matchup?gameId=400951585",
"http://www.espn.com/nfl/matchup?gameId=400951717", "http://www.espn.com/nfl/matchup?gameId=400951755",
"http://www.espn.com/nfl/matchup?gameId=400951555", "http://www.espn.com/nfl/matchup?gameId=400951614"]
# VIKINGS
if teamname == "MIN":
return ["http://www.espn.co.uk/nfl/matchup?gameId=400951612", "http://www.espn.com/nfl/matchup?gameId=400951643",
"http://www.espn.com/nfl/matchup?gameId=400951604", "http://www.espn.com/nfl/matchup?gameId=400951724",
"http://www.espn.com/nfl/matchup?gameId=400951691", "http://www.espn.com/nfl/matchup?gameId=400951702",
"http://www.espn.com/nfl/matchup?gameId=400951603", "http://www.espn.com/nfl/matchup?gameId=400951683",
"http://www.espn.com/nfl/matchup?gameId=400951658", "http://www.espn.com/nfl/matchup?gameId=400951775",
"http://www.espn.com/nfl/matchup?gameId=400951569"]
# BENGALS
if teamname == "CIN":
return ["http://www.espn.com/nfl/matchup?gameId=400951572", "http://www.espn.com/nfl/matchup?gameId=400951620",
"http://www.espn.com/nfl/matchup?gameId=400951712", "http://www.espn.com/nfl/matchup?gameId=400951554",
"http://www.espn.com/nfl/matchup?gameId=400951609", "http://www.espn.com/nfl/matchup?gameId=400951711",
"http://www.espn.com/nfl/matchup?gameId=400951753", "http://www.espn.com/nfl/matchup?gameId=400951656",
"http://www.espn.com/nfl/matchup?gameId=400951810", "http://www.espn.com/nfl/matchup?gameId=400951588"]
# RAVENS
if teamname == "BAL":
return ["http://www.espn.com/nfl/matchup?gameId=400951572", "http://www.espn.com/nfl/matchup?gameId=400951626",
"http://www.espn.com/nfl/matchup?gameId=400951579", "http://www.espn.com/nfl/matchup?gameId=400951708",
"http://www.espn.com/nfl/matchup?gameId=400951659", "http://www.espn.com/nfl/matchup?gameId=400951603",
"http://www.espn.com/nfl/matchup?gameId=400951670", "http://www.espn.com/nfl/matchup?gameId=400951761",
"http://www.espn.com/nfl/matchup?gameId=400951703", "http://www.espn.com/nfl/matchup?gameId=400951640"]
# COLTS
if teamname == "IND":
return ['http://www.espn.com/nfl/matchup?gameId=400951597', "http://www.espn.com/nfl/matchup?gameId=400951599",
"http://www.espn.com/nfl/matchup?gameId=400951747", "http://www.espn.com/nfl/matchup?gameId=400951785",
"http://www.espn.com/nfl/matchup?gameId=400951589", "http://www.espn.com/nfl/matchup?gameId=400951711",
"http://www.espn.com/nfl/matchup?gameId=400951751", "http://www.espn.com/nfl/matchup?gameId=400951565",
"http://www.espn.com/nfl/matchup?gameId=400951591"]
# RAMS
if teamname == "LAR":
return ['http://www.espn.com/nfl/matchup?gameId=400951597', "http://www.espn.com/nfl/matchup?gameId=400951674",
"http://www.espn.com/nfl/matchup?gameId=400951568", "http://www.espn.com/nfl/matchup?gameId=400951716",
"http://www.espn.com/nfl/matchup?gameId=400951657", "http://www.espn.com/nfl/matchup?gameId=400951773",
"http://www.espn.com/nfl/matchup?gameId=400951593", "http://www.espn.com/nfl/matchup?gameId=400951758",
"http://www.espn.com/nfl/matchup?gameId=400951663", "http://www.espn.com/nfl/matchup?gameId=400951775",
"http://www.espn.com/nfl/matchup?gameId=400951614"]
# REDSKINS
if teamname == "WAS":
return ["http://www.espn.com/nfl/matchup?gameId=400951592", "http://www.espn.com/nfl/matchup?gameId=400951644",
"http://www.espn.com/nfl/matchup?gameId=400951674", "http://www.espn.com/nfl/matchup?gameId=400951752",
"http://www.espn.com/nfl/matchup?gameId=400951767", "http://www.espn.com/nfl/matchup?gameId=400951641",
"http://www.espn.com/nfl/matchup?gameId=400951732", "http://www.espn.com/nfl/matchup?gameId=400951765",
"http://www.espn.com/nfl/matchup?gameId=400951658", "http://www.espn.com/nfl/matchup?gameId=400951577"]
# BROWNS
if teamname == "CLE":
return ["http://www.espn.com/nfl/matchup?gameId=400951574","http://www.espn.com/nfl/matchup?gameId=400951626",
"http://www.espn.com/nfl/matchup?gameId=400951599", "http://www.espn.com/nfl/matchup?gameId=400951712",
"http://www.espn.com/nfl/matchup?gameId=400951556", "http://www.espn.com/nfl/matchup?gameId=400951700",
"http://www.espn.com/nfl/matchup?gameId=400951683", "http://www.espn.com/nfl/matchup?gameId=400951563",
"http://www.espn.com/nfl/matchup?gameId=400951769", "http://www.espn.com/nfl/matchup?gameId=400951588"]
# PATRIOTS
if teamname == "NE":
return ["http://www.espn.com/nfl/matchup?gameId=400951566", "http://www.espn.com/nfl/matchup?gameId=400951639",
"http://www.espn.com/nfl/matchup?gameId=400951607", "http://www.espn.com/nfl/matchup?gameId=400951727",
"http://www.espn.com/nfl/matchup?gameId=400951552", "http://www.espn.com/nfl/matchup?gameId=400951766",
"http://www.espn.com/nfl/matchup?gameId=400951638", "http://www.espn.com/nfl/matchup?gameId=400951715",
"http://www.espn.com/nfl/matchup?gameId=400951690", "http://www.espn.com/nfl/matchup?gameId=400951815",
"http://www.espn.com/nfl/matchup?gameId=400951600"]
# BEARS
if teamname == "CHI":
return ["http://www.espn.com/nfl/matchup?gameId=400951570", "http://www.espn.com/nfl/matchup?gameId=400951645",
"http://www.espn.com/nfl/matchup?gameId=400951678", "http://www.espn.com/nfl/matchup?gameId=400951691",
"http://www.espn.com/nfl/matchup?gameId=400951578", "http://www.espn.com/nfl/matchup?gameId=400951717",
"http://www.espn.com/nfl/matchup?gameId=400951559", "http://www.espn.com/nfl/matchup?gameId=400951701",
"http://www.espn.com/nfl/matchup?gameId=400951610"]
# TITANS
if teamname == "TEN":
return ["http://www.espn.com/nfl/matchup?gameId=400951584", "http://www.espn.com/nfl/matchup?gameId=400951635",
"http://www.espn.com/nfl/matchup?gameId=400951623", "http://www.espn.com/nfl/matchup?gameId=400951720",
"http://www.espn.com/nfl/matchup?gameId=400951646", "http://www.espn.com/nfl/matchup?gameId=400951785",
"http://www.espn.com/nfl/matchup?gameId=400951761", "http://www.espn.com/nfl/matchup?gameId=400951656",
"http://www.espn.com/nfl/matchup?gameId=400951698", "http://www.espn.com/nfl/matchup?gameId=400951591"]
# JAGUARS
if teamname == "JAX":
return ["http://www.espn.com/nfl/matchup?gameId=400951580", "http://www.espn.com/nfl/matchup?gameId=400951635",
"http://www.espn.com/nfl/matchup?gameId=400951579", "http://www.espn.com/nfl/matchup?gameId=400951655",
"http://www.espn.com/nfl/matchup?gameId=400951773", "http://www.espn.com/nfl/matchup?gameId=400951589",
"http://www.espn.com/nfl/matchup?gameId=400951753", "http://www.espn.com/nfl/matchup?gameId=400951622",
"http://www.espn.com/nfl/matchup?gameId=400951769" ]
# TEXANS
if teamname == "HOU":
return ["http://www.espn.com/nfl/matchup?gameId=400951580", "http://www.espn.com/nfl/matchup?gameId=400951620",
"http://www.espn.com/nfl/matchup?gameId=400951607", "http://www.espn.com/nfl/matchup?gameId=400951720",
"http://www.espn.com/nfl/matchup?gameId=400951664", "http://www.espn.com/nfl/matchup?gameId=400951700",
"http://www.espn.com/nfl/matchup?gameId=400951729", "http://www.espn.com/nfl/matchup?gameId=400951751",
"http://www.espn.com/nfl/matchup?gameId=400951663", "http://www.espn.com/nfl/matchup?gameId=400951771",
"http://www.espn.com/nfl/matchup?gameId=400951640"]
# COWBOYS
if teamname == "DAL":
return ["http://www.espn.com/nfl/matchup?gameId=400951608","http://www.espn.com/nfl/matchup?gameId=400951673",
"http://www.espn.com/nfl/matchup?gameId=400951668", "http://www.espn.com/nfl/matchup?gameId=400951716",
"http://www.espn.com/nfl/matchup?gameId=400951661", "http://www.espn.com/nfl/matchup?gameId=400951619",
"http://www.espn.com/nfl/matchup?gameId=400951732", "http://www.espn.com/nfl/matchup?gameId=400951786",
"http://www.espn.com/nfl/matchup?gameId=400951686", "http://www.espn.com/nfl/matchup?gameId=400951817",
"http://www.espn.com/nfl/matchup?gameId=400951573"]
# CARDINALS
if teamname == "ARI":
return ["http://www.espn.com/nfl/matchup?gameId=400951576", "http://www.espn.com/nfl/matchup?gameId=400951668",
"http://www.espn.com/nfl/matchup?gameId=400951651", "http://www.espn.com/nfl/matchup?gameId=400951770",
"http://www.espn.com/nfl/matchup?gameId=400951593", "http://www.espn.com/nfl/matchup?gameId=400951763",
"http://www.espn.com/nfl/matchup?gameId=400951553", "http://www.espn.com/nfl/matchup?gameId=400951771",
"http://www.espn.com/nfl/matchup?gameId=400951622"]
# 4e9ers
if teamname == "SFO":
return ["http://www.espn.com/nfl/matchup?gameId=400951605", "http://www.espn.com/nfl/matchup?gameId=400951676",
"http://www.espn.com/nfl/matchup?gameId=400951568", "http://www.espn.com/nfl/matchup?gameId=400951767",
"http://www.espn.com/nfl/matchup?gameId=400951619", "http://www.espn.com/nfl/matchup?gameId=400951723",
"http://www.espn.com/nfl/matchup?gameId=400951763", "http://www.espn.com/nfl/matchup?gameId=400951688",
"http://www.espn.com/nfl/matchup?gameId=400951618"]
# SEAHAWKS
if teamname == "SEA":
return ["http://www.espn.com/nfl/matchup?gameId=400951601", "http://www.espn.com/nfl/matchup?gameId=400951676",
"http://www.espn.com/nfl/matchup?gameId=400951623", "http://www.espn.com/nfl/matchup?gameId=400951747",
"http://www.espn.com/nfl/matchup?gameId=400951657", "http://www.espn.com/nfl/matchup?gameId=400951628",
"http://www.espn.com/nfl/matchup?gameId=400951729", "http://www.espn.com/nfl/matchup?gameId=400951765",
"http://www.espn.com/nfl/matchup?gameId=400951553", "http://www.espn.com/nfl/matchup?gameId=400951818",
"http://www.espn.com/nfl/matchup?gameId=400951618"]
# PACKERS
if teamname == "GB":
return ["http://www.espn.com/nfl/matchup?gameId=400951601", "http://www.espn.com/nfl/matchup?gameId=400951679",
"http://www.espn.com/nfl/matchup?gameId=400951678", "http://www.espn.com/nfl/matchup?gameId=400951661",
"http://www.espn.com/nfl/matchup?gameId=400951702", "http://www.espn.com/nfl/matchup?gameId=400951585",
"http://www.espn.com/nfl/matchup?gameId=400951790", "http://www.espn.com/nfl/matchup?gameId=400951559",
"http://www.espn.com/nfl/matchup?gameId=400951703", "http://www.espn.com/nfl/matchup?gameId=400951633"]
# GIANTS
if teamname == "NYG":
return ["http://www.espn.com/nfl/matchup?gameId=400951608", "http://www.espn.com/nfl/matchup?gameId=400951681",
"http://www.espn.com/nfl/matchup?gameId=400951616", "http://www.espn.com/nfl/matchup?gameId=400951742",
"http://www.espn.com/nfl/matchup?gameId=400951650", "http://www.espn.com/nfl/matchup?gameId=400951782",
"http://www.espn.com/nfl/matchup?gameId=400951628", "http://www.espn.com/nfl/matchup?gameId=400951758",
"http://www.espn.com/nfl/matchup?gameId=400951688", "http://www.espn.com/nfl/matchup?gameId=400951577"]
# DOLPHINS
if teamname == "MIA":
return ["http://www.espn.com/nfl/matchup?gameId=400951666", "http://www.espn.com/nfl/matchup?gameId=400951611",
"http://www.espn.com/nfl/matchup?gameId=400950241", "http://www.espn.com/nfl/matchup?gameId=400951646",
"http://www.espn.com/nfl/matchup?gameId=400951697", "http://www.espn.com/nfl/matchup?gameId=400951598",
"http://www.espn.com/nfl/matchup?gameId=400951670", "http://www.espn.com/nfl/matchup?gameId=400951787",
"http://www.espn.com/nfl/matchup?gameId=400951693", "http://www.espn.com/nfl/matchup?gameId=400981391",
"http://www.espn.com/nfl/matchup?gameId=400951600"]
# HAVE FOUR WEEKS
def masterlist():
##if teamname() == "MASTER":
return ["http://www.espn.com/nfl/matchup?gameId=400951574", "http://www.espn.com/nfl/matchup?gameId=400951566",
"http://www.espn.com/nfl/matchup?gameId=400951570","http://www.espn.com/nfl/matchup?gameId=400951592",
"http://www.espn.com/nfl/matchup?gameId=400951567","http://www.espn.com/nfl/matchup?gameId=400951584",
"http://www.espn.com/nfl/matchup?gameId=400951580","http://www.espn.com/nfl/matchup?gameId=400951669",
"http://www.espn.com/nfl/matchup?gameId=400951644","http://www.espn.com/nfl/matchup?gameId=400951615",
"http://www.espn.com/nfl/matchup?gameId=400951673", "http://www.espn.com/nfl/matchup?gameId=400951583",
"http://www.espn.com/nfl/matchup?gameId=400951576", "http://www.espn.com/nfl/matchup?gameId=400951572",
"http://www.espn.com/nfl/matchup?gameId=400951597", "http://www.espn.com/nfl/matchup?gameId=400951605",
"http://www.espn.com/nfl/matchup?gameId=400951601", "http://www.espn.com/nfl/matchup?gameId=400951608",
"http://www.espn.com/nfl/matchup?gameId=400951612", "http://www.espn.com/nfl/matchup?gameId=400951620",
"http://www.espn.com/nfl/matchup?gameId=400951636", "http://www.espn.com/nfl/matchup?gameId=400951635",
"http://www.espn.com/nfl/matchup?gameId=400951645", "http://www.espn.com/nfl/matchup?gameId=400951630",
"http://www.espn.com/nfl/matchup?gameId=400951626", "http://www.espn.com/nfl/matchup?gameId=400951643",
"http://www.espn.com/nfl/matchup?gameId=400951639", "http://www.espn.com/nfl/matchup?gameId=400951666",
"http://www.espn.com/nfl/matchup?gameId=400951676", "http://www.espn.com/nfl/matchup?gameId=400951674",
"http://www.espn.com/nfl/matchup?gameId=400951679", "http://www.espn.com/nfl/matchup?gameId=400951681",
"http://www.espn.com/nfl/matchup?gameId=400951568", "http://www.espn.com/nfl/matchup?gameId=400951579",
"http://www.espn.com/nfl/matchup?gameId=400951594", "http://www.espn.com/nfl/matchup?gameId=400951599",
"http://www.espn.com/nfl/matchup?gameId=400951587", "http://www.espn.com/nfl/matchup?gameId=400951616",
"http://www.espn.com/nfl/matchup?gameId=400951611", "http://www.espn.com/nfl/matchup?gameId=400951607",
"http://www.espn.com/nfl/matchup?gameId=400951604", "http://www.espn.com/nfl/matchup?gameId=400951623",
"http://www.espn.com/nfl/matchup?gameId=400951634", "http://www.espn.com/nfl/matchup?gameId=400951668",
"http://www.espn.com/nfl/matchup?gameId=400951678", "http://www.espn.com/nfl/matchup?gameId=400950241",
"http://www.espn.com/nfl/matchup?gameId=400951720", "http://www.espn.com/nfl/matchup?gameId=400951716",
"http://www.espn.com/nfl/matchup?gameId=400951712", "http://www.espn.com/nfl/matchup?gameId=400951685",
"http://www.espn.com/nfl/matchup?gameId=400951708", "http://www.espn.com/nfl/matchup?gameId=400951727",
"http://www.espn.com/nfl/matchup?gameId=400951724", "http://www.espn.com/nfl/matchup?gameId=400951742",
"http://www.espn.com/nfl/matchup?gameId=400951738", "http://www.espn.com/nfl/matchup?gameId=400951744",
"http://www.espn.com/nfl/matchup?gameId=400951747", "http://www.espn.com/nfl/matchup?gameId=400951752",
"http://www.espn.com/nfl/matchup?gameId=400951552", "http://www.espn.com/nfl/matchup?gameId=400951558",
"http://www.espn.com/nfl/matchup?gameId=400951556", "http://www.espn.com/nfl/matchup?gameId=400951554",
"http://www.espn.com/nfl/matchup?gameId=400951655", "http://www.espn.com/nfl/matchup?gameId=400951651",
"http://www.espn.com/nfl/matchup?gameId=400951650", "http://www.espn.com/nfl/matchup?gameId=400951646",
"http://www.espn.com/nfl/matchup?gameId=400951657", "http://www.espn.com/nfl/matchup?gameId=400951659",
"http://www.espn.com/nfl/matchup?gameId=400951661", "http://www.espn.com/nfl/matchup?gameId=400951664",
"http://www.espn.com/nfl/matchup?gameId=400951691", "http://www.espn.com/nfl/matchup?gameId=400951695",
"http://www.espn.com/nfl/matchup?gameId=400951700", "http://www.espn.com/nfl/matchup?gameId=400951767",
"http://www.espn.com/nfl/matchup?gameId=400951697", "http://www.espn.com/nfl/matchup?gameId=400951766",
"http://www.espn.com/nfl/matchup?gameId=400951704", "http://www.espn.com/nfl/matchup?gameId=400951702",
"http://www.espn.com/nfl/matchup?gameId=400951773", "http://www.espn.com/nfl/matchup?gameId=400951770",
"http://www.espn.com/nfl/matchup?gameId=400951776", "http://www.espn.com/nfl/matchup?gameId=400951779",
"http://www.espn.com/nfl/matchup?gameId=400951782", "http://www.espn.com/nfl/matchup?gameId=400951785",
"http://www.espn.com/nfl/matchup?gameId=400951571", "http://www.espn.com/nfl/matchup?gameId=400951578",
"http://www.espn.com/nfl/matchup?gameId=400951575", "http://www.espn.com/nfl/matchup?gameId=400951593",
"http://www.espn.com/nfl/matchup?gameId=400951603", "http://www.espn.com/nfl/matchup?gameId=400951585",
"http://www.espn.com/nfl/matchup?gameId=400951589", "http://www.espn.com/nfl/matchup?gameId=400951619",
"http://www.espn.com/nfl/matchup?gameId=400951624", "http://www.espn.com/nfl/matchup?gameId=400951609",
"http://www.espn.com/nfl/matchup?gameId=400951598", "http://www.espn.com/nfl/matchup?gameId=400951628",
"http://www.espn.com/nfl/matchup?gameId=400951638", "http://www.espn.com/nfl/matchup?gameId=400951641",
"http://www.espn.com/nfl/matchup?gameId=400951670", "http://www.espn.com/nfl/matchup?gameId=400951683",
"http://www.espn.com/nfl/matchup?gameId=400951711", "http://www.espn.com/nfl/matchup?gameId=400951706",
"http://www.espn.com/nfl/matchup?gameId=400951725", "http://www.espn.com/nfl/matchup?gameId=400951723",
"http://www.espn.com/nfl/matchup?gameId=400951721", "http://www.espn.com/nfl/matchup?gameId=400951715",
"http://www.espn.com/nfl/matchup?gameId=400951717", "http://www.espn.com/nfl/matchup?gameId=400951729",
"http://www.espn.com/nfl/matchup?gameId=400951732", "http://www.espn.com/nfl/matchup?gameId=400951736",
"http://www.espn.com/nfl/matchup?gameId=400951737", "http://www.espn.com/nfl/matchup?gameId=400951743",
"http://www.espn.com/nfl/matchup?gameId=400951749", "http://www.espn.com/nfl/matchup?gameId=400951760",
"http://www.espn.com/nfl/matchup?gameId=400951761", "http://www.espn.com/nfl/matchup?gameId=400951758",
"http://www.espn.com/nfl/matchup?gameId=400951755", "http://www.espn.com/nfl/matchup?gameId=400951753",
"http://www.espn.com/nfl/matchup?gameId=400951751", "http://www.espn.com/nfl/matchup?gameId=400951763",
"http://www.espn.com/nfl/matchup?gameId=400951765", "http://www.espn.com/nfl/matchup?gameId=400951786",
"http://www.espn.com/nfl/matchup?gameId=400951787", "http://www.espn.com/nfl/matchup?gameId=400951790",
"http://www.espn.com/nfl/matchup?gameId=400951553", "http://www.espn.com/nfl/matchup?gameId=400951656",
"http://www.espn.com/nfl/matchup?gameId=400951563",
"http://www.espn.com/nfl/matchup?gameId=400951565", "http://www.espn.com/nfl/matchup?gameId=400951559",
"http://www.espn.com/nfl/matchup?gameId=400951555", "http://www.espn.com/nfl/matchup?gameId=400951658",
"http://www.espn.com/nfl/matchup?gameId=400951653", "http://www.espn.com/nfl/matchup?gameId=400951663",
"http://www.espn.com/nfl/matchup?gameId=400951686", "http://www.espn.com/nfl/matchup?gameId=400951688",
"http://www.espn.com/nfl/matchup?gameId=400951690", "http://www.espn.com/nfl/matchup?gameId=400951693",
"http://www.espn.com/nfl/matchup?gameId=400951698", "http://www.espn.com/nfl/matchup?gameId=400951775",
"http://www.espn.com/nfl/matchup?gameId=400981391", "http://www.espn.com/nfl/matchup?gameId=400951771",
"http://www.espn.com/nfl/matchup?gameId=400951703", "http://www.espn.com/nfl/matchup?gameId=400951769",
"http://www.espn.com/nfl/matchup?gameId=400951701", "http://www.espn.com/nfl/matchup?gameId=400951807",
"http://www.espn.com/nfl/matchup?gameId=400951810", "http://www.espn.com/nfl/matchup?gameId=400951815",
"http://www.espn.com/nfl/matchup?gameId=400951817", "http://www.espn.com/nfl/matchup?gameId=400951818",
"http://www.espn.com/nfl/matchup?gameId=400951569", "http://www.espn.com/nfl/matchup?gameId=400951573",
"http://www.espn.com/nfl/matchup?gameId=400951577", "http://www.espn.com/nfl/matchup?gameId=400951600",
"http://www.espn.com/nfl/matchup?gameId=400951595", "http://www.espn.com/nfl/matchup?gameId=400951591",
"http://www.espn.com/nfl/matchup?gameId=400951588", "http://www.espn.com/nfl/matchup?gameId=400951586",
"http://www.espn.com/nfl/matchup?gameId=400951610", "http://www.espn.com/nfl/matchup?gameId=400951606",
"http://www.espn.com/nfl/matchup?gameId=400951618", "http://www.espn.com/nfl/matchup?gameId=400951622",
"http://www.espn.com/nfl/matchup?gameId=400951614", "http://www.espn.com/nfl/matchup?gameId=400951629",
"http://www.espn.com/nfl/matchup?gameId=400951633", "http://www.espn.com/nfl/matchup?gameId=400951640"]
| [
"crystinrodrick7@hotmail.com"
] | crystinrodrick7@hotmail.com |
0b6ed03178625556243a0a366ec1c4729202755e | 93a35763a4fe06d6d7daa17166f7a899420905c0 | /SRC/create_output.py | 43256507f3904e77134d8e6a0b5157adad20be70 | [] | no_license | uscoburgo/project-pipelines | 43308d392d4c515204065cb1dec114449c38feea | aba5808a4d93d20e582155610634cc80657c2055 | refs/heads/master | 2022-11-11T15:43:30.782726 | 2020-06-23T01:47:12 | 2020-06-23T01:47:12 | 273,896,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,792 | py | import pandas as pd
import numpy as np
import re
import requests
from bs4 import BeautifulSoup
from alpha_vantage.timeseries import TimeSeries
from alpha_vantage.techindicators import TechIndicators
from plots_stocks import plotSentiment
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import os
from dotenv import load_dotenv
load_dotenv()
def CreateReport(stock=None):
#Creates a report filtering the database according to the parameters given
data = pd.read_csv("../INPUT/df_analysis.csv")
df = pd.DataFrame(data)
outputString=''
if stock:
try:
df_stock = df[df['Ticker'] == stock]
positive=df_stock.iloc[0,1]
negative=df_stock.iloc[0,2]
low=df_stock.iloc[0,3]
high=df_stock.iloc[0,4]
outputString += f'For {stock}, we found {positive} tweets with positive sentiment and {negative} with negative sentiment.\n'
outputString += f'The high price for {stock} was {high} and the low price was {low}.\n'
return outputString
except:
return "The ticker you entered has no sentiment analysis available"
#plot sentiment
res=plotSentiment(df_stock)
print(res)
print("\n")
"""
#-------------------------------------------------------------------------
#basic statistics
mean=df_analysis['DELAY'].mean()
outputString += f'mean delay = {mean}\n'
maxd=df_analysis['DELAY'].max()#show the maximum delay
outputString += f'max delay = {maxd}\n'
mind=df_analysis['DELAY'].min()#show the minimum delay
outputString += f'min dealy = {mind}\n'
stdd=df_analysis['DELAY'].std()
outputString += f'std delay = {stdd}\n'
#if the dataframe is empty it means the input parameters were not a valid input
if df_analysis.shape[0]==0:
outputString='There is no flight connection between these airports'
print(outputString + '\n')
"""
#------------------------------------------------------------------------------
"""
#plotBestAirport
if best:
res=plotBestAirport(df_analysis)
print(res)
print('\n')
else:
res=plotWorstAirport(df_analysis)
print(res)
print('\n')
##################################################################################
#PDF GENERATION
generatePDF1()
##################################################################################
#email
filename = "OUTPUT/report.pdf"
#address='tzvuccyseraf@gmail.com'
address=input('insert your e-mail address: ')
sendMail(address,filename,outputString)
""" | [
"uscoburgo@gmail.com"
] | uscoburgo@gmail.com |
bb926ed9470058408a9f838241a53266d6394661 | 3411c5b7b6821fb7ea5c836b47395cd6692cfc66 | /single.py | 9066023ec75cf1fab996795331007848d92db18d | [] | no_license | sparticlesteve/py-mt-example | 410dbd057e77bee83793ccf244a52ac7cbf5a5af | 349fb2e674388a0e50ad6dd881de0a57c0e72703 | refs/heads/master | 2020-04-28T23:18:10.610223 | 2015-04-04T22:19:44 | 2015-04-04T22:19:44 | 33,418,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | #!/usr/bin/env python3
import logging
import os
from time import time
from download import setup_download_dir, get_links, download_link
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.getLogger('requests').setLevel(logging.CRITICAL)
logger = logging.getLogger(__name__)
def main():
ts = time()
client_id = os.getenv('IMGUR_CLIENT_ID')
if not client_id:
raise Exception('Couldn\'t find IMGUR_CLIENT_ID environment variable!')
download_dir = setup_download_dir()
links = [l for l in get_links(client_id) if l.endswith('.jpg')]
for link in links:
download_link(download_dir, link)
print('Took {}s'.format(time() - ts))
if __name__ == '__main__':
main()
| [
"sfarrell@cern.ch"
] | sfarrell@cern.ch |
5d531cf3d38f4598b918f4a682fe7d61279880ef | 2e4d1da92dbf5b3d8a88322c19fc700f3bd1ef4e | /FDLNet-master/latency/rfnet/config.py | 28d993a8b6d6bdf0d30d823a1e0305b76247a83a | [] | no_license | iamwangyabin/hardnetNas | e0ad756134556dacb152f1a326014baa48d6d010 | ed2b22031971b5de15aa40844cab350ec1a1b8aa | refs/heads/master | 2022-09-26T11:33:40.607810 | 2022-09-19T01:49:23 | 2022-09-19T01:49:23 | 215,681,633 | 1 | 0 | null | 2022-09-19T02:02:11 | 2019-10-17T01:59:48 | Python | UTF-8 | Python | false | false | 3,115 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
__C = edict()
cfg = __C
"""
Project options
"""
__C.PROJ = edict()
# whether us gpu
__C.PROJ.USE_GPU = True
# seed for random
__C.PROJ.SEED = 0
# training, evaluate and test data
__C.PROJ.TRAIN = "view"
__C.PROJ.TRAIN_ALL = False
__C.PROJ.TRAIN_PPT = 0.8
__C.PROJ.EVAL = "view"
__C.PROJ.EVAL_ALL = False
__C.PROJ.EVAL_PPT = 0.1
__C.PROJ.TEST = "view"
__C.PROJ.TEST_ALL = False
__C.PROJ.TEST_PPT = 0.1
"""
Model options
"""
__C.MODEL = edict()
# gaussian kernel size
__C.MODEL.GAUSSIAN_KSIZE = 15
# gaussian kernel sigma
__C.MODEL.GAUSSIAN_SIGMA = 0.5
# Descriptor Threshold
__C.MODEL.DES_THRSH = 1.0
# Coordinate Threshold
__C.MODEL.COO_THRSH = 5.0
# Ksize
__C.MODEL.KSIZE = 3
# padding
__C.MODEL.padding = 1
# dilation
__C.MODEL.dilation = 1
# scale_list
__C.MODEL.scale_list = [3.0, 5.0, 7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0]
"""
Loss options
"""
__C.LOSS = edict()
# score loss wight
__C.LOSS.SCORE = 1000
# pair loss weight
__C.LOSS.PAIR = 1
"""
Training options
"""
__C.TRAIN = edict()
# batch size
__C.TRAIN.BATCH_SIZE = 1
# Train epoch
__C.TRAIN.EPOCH_NUM = 201
# Train log interval
__C.TRAIN.LOG_INTERVAL = 5
# weight decay
__C.TRAIN.WEIGHT_DECAY = 1e-4
# detector learning rate
__C.TRAIN.DET_LR = 0.1
# descriptor learning rate
__C.TRAIN.DES_LR = 10
# detection optimizer (adam/sgd)
__C.TRAIN.DET_OPTIMIZER = "adam"
# adjust detection lr (sgd/exp)
__C.TRAIN.DET_LR_SCHEDULE = "exp"
# detector weight decay
__C.TRAIN.DET_WD = 0
# descriptor optimizer (adam/sgd)
__C.TRAIN.DES_OPTIMIZER = "adam"
# adjust descriptor lr (sgd/exp)
# __C.TRAIN.DES_LR_SCHEDULE = 'exp'
__C.TRAIN.DES_LR_SCHEDULE = "sgd"
# descriptor weight decay
__C.TRAIN.DES_WD = 0
# learning rate decay epoch
__C.TRAIN.LR_DECAY_EPOCH = 5
# learning rate base line
__C.TRAIN.LR_BASE = 0.0001
# score strength weight
__C.TRAIN.score_com_strength = 100.0
# scale strength weight
__C.TRAIN.scale_com_strength = 100.0
# non maximum supression threshold
__C.TRAIN.NMS_THRESH = 0.0
# nms kernel size
__C.TRAIN.NMS_KSIZE = 5
# top k patch
__C.TRAIN.TOPK = 250
"""
Image data options
"""
# View train sequence Mean and Std
__C.view = edict()
__C.view.csv = "hpatch_view.csv"
__C.view.root = "../data/hpatch_v_sequence"
__C.view.MEAN = 0.4230204841414801
__C.view.STD = 0.25000138349993173
__C.view.NUM = 295
# illumination sequence Mean and Std
__C.illu = edict()
__C.illu.csv = "hpatch_illum.csv"
__C.illu.root = "../data/hpatch_i_sequence"
__C.illu.MEAN = 0.4337542740124942
__C.illu.STD = 0.2642307153894012
__C.illu.NUM = 285
# illumination sequence Mean and Std
__C.ef = edict()
__C.ef.csv = "EFDataset.csv"
__C.ef.root = "../data/EFDataset"
__C.ef.MEAN = 0.4630827743610772
__C.ef.STD = 0.24659232013004403
__C.ef.NUM = 293
"""
Patch options
"""
__C.PATCH = edict()
# patch size
__C.PATCH.SIZE = 32
"""
Hardnet options
"""
__C.HARDNET = edict()
# margin for hardnet loss
__C.HARDNET.MARGIN = 1.0
| [
"38123329+iamwangyabin@users.noreply.github.com"
] | 38123329+iamwangyabin@users.noreply.github.com |
3df81fa28702b06dbef1125fae7633efa3dc8cc4 | f81cb2e289ea1554c09164908c05dda1714005fc | /readSql_execute_in_DB/readScript.py | fe3ec84372868d68a7183ec7fccdd9f665b1a9ee | [] | no_license | priya100697/Python_ | 1a1997f409039c2406d3efea534e3e91e2cc91cc | 55c62b9b2999022b4407d0ac9ccea56a4535d3dd | refs/heads/master | 2022-12-18T03:05:31.651468 | 2020-09-20T18:21:11 | 2020-09-20T18:21:11 | 297,124,710 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | def readfile(path):
data = list()
buffer = str()
with open(path, "r") as file:
for line in file:
if not line.isspace():
if line.strip()[-1] == ";":
data.append(buffer + line.strip())
buffer = str()
else:
buffer = buffer + line.strip() + "\n"
return data
| [
"pandaypriya926@gmail.com"
] | pandaypriya926@gmail.com |
91cfc2c67452f91c2a9477b90f68a5ca951e0c4a | 71053f65cad20188ae5182fce4c77a074fde4309 | /background-backup/hardware.py | 71a5393a9447748f831f99a7812aa47e0a3a70b1 | [] | no_license | Chenzhiyong47/Hello-World | f446b6ae4429e976ecafb27dec4fe4de63bceab3 | 5f41681a1630487af21d01013dba0618f8aebac9 | refs/heads/master | 2021-01-13T01:17:38.448915 | 2020-08-07T08:14:09 | 2020-08-07T08:14:09 | 81,441,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,304 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Writing in newfile.py:
from hardware import Hardware
hardware = Hardware()
# Press "Ctrl+C" to end a progress.
hardware.ultrasound_A.test_distance()
hardware.ultrasound_B.test_distance()
hardware.ultrasound_C.test_distance()
hardware.ultrasound_D.test_distance()
Input in commanding script:
python3 newfile.py
"""
import RPi.GPIO as GPIO
from time import time, sleep
GPIO.setwarnings(False)
# To control motor to start or stop.
class Motor:
def __init__(self, Control):
self.Control = Control
self.init()
def init(self):
GPIO.setup(self.Control, GPIO.OUT, initial=GPIO.HIGH)
sleep(0.05)
def start(self):
GPIO.output(self.Control, GPIO.LOW)
sleep(0.01)
def stop(self):
GPIO.output(self.Control, GPIO.HIGH)
sleep(0.01)
# To use ultrasound to measure the distance
class Ultrasound:
def __init__(self, Trig, Echo):
self.Trig = Trig
self.Echo = Echo
self.init()
def init(self):
GPIO.setup(self.Trig, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(self.Echo, GPIO.IN)
sleep(0.08)
'''
这是一个超声测距模块的测量转换函数,它的原理是先向TRIG脚输入至少10us的触发信号,
该模块内部将发出 8 个 40kHz 周期电平并检测回波。一旦检测到有回波信号则ECHO输出
高电平回响信号。回响信号的脉冲宽度与所测的距离成正比。由此通过发射信号到收到的回
响信号时间间隔可以计算得到距离。公式: 距离=高电平时间*声速(34000cm/S)/2。返回一个
测量值(单位是cm)
其中:
t1是发现Echo脚收到高电平时的瞬时时间
t2是发现Echo脚由高电平变为低电平时的瞬时时间
t2-t1 就是Echo检测到高电平的时间
'''
def get_distance_cm(self):
# 给TRIG脚一个12μs的高电平脉冲,发出一个触发信号
GPIO.output(self.Trig, GPIO.HIGH)
sleep(0.00012)
GPIO.output(self.Trig, GPIO.LOW)
while not GPIO.input(self.Echo):
pass
t1 = time()
while GPIO.input(self.Echo):
pass
t2 = time()
distance = (t2 - t1) * 34000 / 2
return float('%.1f' % distance)
def test(self):
print("distance: " + str(self.get_distance_cm()))
# Object the ultrasound and motor
class Hardware:
def __init__(self):
self.setmode_BCM()
# Four ultrasound: A, B, C, D
self.ultrasound_A = Ultrasound(Trig=2, Echo=3)
self.ultrasound_B = Ultrasound(Trig=17, Echo=27)
self.ultrasound_C = Ultrasound(Trig=10, Echo=9)
self.ultrasound_D = Ultrasound(Trig=5, Echo=6)
# Four motor: A, B, C, D
self.motor_A = Motor(Control=14)
self.motor_B = Motor(Control=15)
self.motor_C = Motor(Control=18)
self.motor_D = Motor(Control=23)
def setmode_BCM(self):
GPIO.setmode(GPIO.BCM)
def clearmode(self):
GPIO.cleanup()
| [
"chenzhiyong47@163.com"
] | chenzhiyong47@163.com |
83a1d8573fd98ab9baa0b2388b9256d3d27f2daf | 483d26722245774d0860f45157b0bc578dd2ff15 | /crudproject/crud/views.py | dee5b041dd944c71b1cab6032b56809eee6b90fe | [] | no_license | raianibnfaiz/crudproject | a44a0a9a49e83442bb0019c2141b6304ff10fb2d | 0a7ab9efb57aa57784b169ffb2eea8792daa1b86 | refs/heads/master | 2022-11-28T04:19:27.177597 | 2020-08-05T07:39:36 | 2020-08-05T07:39:36 | 285,215,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | from django.shortcuts import render,redirect
from .forms import MyUserForms
from .models import MyUser
def create_user(request):
form= MyUserForms(request.POST or None)
users= MyUser.objects.all()
if form.is_valid():
form.save()
return redirect('/')
return render(request,'index.html',{'form':form,'users':users})
def update_user(request,id):
id=MyUser.objects.get(id=id)
form= MyUserForms(request.POST or None,instance=id)
users= MyUser.objects.all()
if form.is_valid():
form.save()
return redirect('/')
return render(request,'index.html',{'form':form,'users':users})
def delete_user(request,id):
user=MyUser.objects.get(id=id)
user.delete()
return redirect('/')
| [
"raianibnfaiz@gmail.com"
] | raianibnfaiz@gmail.com |
6c04b7cbd139e06c8c9112a9e505b24f5d41fcbb | 3a9ed017ed45361811fee0980af2eaf1bd7e3624 | /homework3.py | 9b7384f0e0a60585a00be03fd7b61b7ea89d6117 | [] | no_license | harshsjani/State-space-search-Artificial-Intelligence | 058e97f1bda8ce2bc55f52aad9a8584f5a956944 | 5609dbaa92c44a8cc5c7df554f7edf0b69428cd0 | refs/heads/main | 2023-06-19T05:58:12.968683 | 2021-07-17T00:58:58 | 2021-07-17T00:58:58 | 337,557,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,604 | py | import os
from collections import deque
class Constants:
input_filename = "input.txt"
output_filename = "output.txt"
fail_output = "FAIL"
directions = [(0, 1), (1, 0), (0, -1), (-1, 0), (1, 1), (1, -1), (-1, 1), (-1, -1)]
class Solver:
def __init__(self):
self.parse_input()
self.paths_to_settling_sites = [None for _ in range(self.num_settling_sites)]
self.serialized_output = []
self.calls = {"BFS": self.bfs, "UCS": self.ucs, "A*": self.a_star}
def parse_input(self):
cwd = os.getcwd()
input_file_path = os.path.join(cwd, Constants.input_filename)
with open(input_file_path, 'r') as file:
self.algo = file.readline().strip("\n")
self.num_cols, self.num_rows = map(int, file.readline().split())
self.starting_x, self.starting_y = map(int, file.readline().split())
self.max_rock_height = int(file.readline())
self.num_settling_sites = int(file.readline())
self.settling_sites = []
for _ in range(self.num_settling_sites):
self.settling_sites.append((map(int, file.readline().split())))
self.grid = [[0 for _ in range(self.num_cols)] for _ in range(self.num_rows)]
for idx in range(self.num_rows):
col_vals = map(int, file.readline().split())
for widx, val in enumerate(col_vals):
self.grid[idx][widx] = val
def serialize_outputs(self):
for path in self.paths_to_settling_sites:
if path is None:
self.serialized_output.append(Constants.fail_output)
else:
self.serialized_output.append(" ".join(["%s,%s" % cell for cell in path]))
def write_output(self):
cwd = os.getcwd()
output_file_path = os.path.join(cwd, Constants.output_filename)
self.serialize_outputs()
with open(output_file_path, "w") as file:
file.writelines('\n'.join(self.serialized_output))
def show_input(self):
print("Algorithm: %s\nW H: %d %d\nStarting Position: %d %d\nMaximum Rock Height Difference: %d\nNumber of Settling Sites: %d\nSettling Sites: %s" % (
self.algo, self.num_cols, self.num_rows, self.starting_x, self.starting_y, self.max_rock_height, self.num_settling_sites, self.settling_sites
))
print("\nGrid:")
for row in self.grid:
print(row)
def get_valid_neighbors(self, x, y):
neighbors = []
for i, j in Constants.directions:
p = x + i
q = y + j
cost = 14 if abs(i) == abs(j) else 10
if 0 <= p < self.num_cols and 0 <= q < self.num_rows:
cur_height = self.grid[y][x]
new_height = self.grid[q][p]
height_dif = 0
if cur_height < 0:
if new_height < 0:
height_dif = abs(cur_height - new_height)
else:
height_dif = abs(cur_height)
elif new_height < 0:
height_dif = abs(new_height)
if height_dif <= self.max_rock_height:
neighbors.append((p, q, cost))
return neighbors
def bfs(self):
sx, sy = self.starting_x, self.starting_y
open = deque()
open.append((sx, sy))
visited = set([sx, sy])
parentpointer = {}
while open:
temp = deque()
while open:
nx, ny = open.popleft()
for p, q, _ in self.get_valid_neighbors(nx, ny):
if (p, q) not in visited:
visited.add((p, q))
parentpointer[(p, q)] = (nx, ny)
temp.append((p, q))
open = temp
for idx, (x, y) in enumerate(self.settling_sites):
path = []
if (x, y) not in parentpointer:
continue
while (x, y) != (sx, sy):
path.append((x, y))
x, y = parentpointer[(x, y)]
path.append((sx, sy))
self.paths_to_settling_sites[idx] = reversed(path)
def ucs(self):
print("SOLVING UCS")
def a_star(self):
print("SOLVING A*!")
def solve(self):
self.calls[self.algo]()
def main():
solver = Solver()
solver.solve()
solver.write_output()
# solver.show_input()
if __name__ == "__main__":
main()
| [
"whizkid@gmail.com"
] | whizkid@gmail.com |
9da3525a508b015180d6cba65f119f57588df51e | 608900d5be9f4d45fdfebda9822a314405754913 | /config/settings/test.py | 0e7b2de859fa698aba87ce0873244f80f8d82f1b | [
"MIT"
] | permissive | brahimchougrani/e-sanad | 2a204cd4198ab702638ff247850850379e2760ad | 0b3869a9cd07dd037421de7f562ffdc70ddb867c | refs/heads/master | 2022-12-05T16:57:02.707895 | 2020-08-23T12:43:55 | 2020-08-23T12:43:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,702 | py | """
With these settings, tests run faster.
"""
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="0hRchob8ySF9ncjpDqq4RdtY5WgRdRfGR8uAHx2r1IVMXSw0JM5KdBi1lvXDrL32",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#test-runner
TEST_RUNNER = "django.test.runner.DiscoverRunner"
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
# TEMPLATES
# ------------------------------------------------------------------------------
TEMPLATES[-1]["OPTIONS"]["loaders"] = [ # type: ignore[index] # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
# Your stuff...
# ------------------------------------------------------------------------------
| [
"brahimchiugrani31@gmail.com"
] | brahimchiugrani31@gmail.com |
8a32ce25bfea1cadc9dbf675455e80e62c2842db | 54d81ffa9dc1faacb0a2cf0baa9a334bc99faa23 | /day00/ex02/TinyStatistician.py | 1e72ff0de7481fcb678d1b6099fd067cb94e55f7 | [] | no_license | AdrianWR/MachineLearningBootcamp | 94bee5cd1ec30c62390ccd3c4b679223dd011174 | b30f717aaceca02f9fbb273b607a0ec496e432a0 | refs/heads/master | 2022-12-22T17:38:16.883083 | 2020-09-26T15:40:20 | 2020-09-26T15:40:20 | 259,935,134 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,317 | py | import math
class TinyStatistician:
@staticmethod
def mean(x):
if not len(x):
return None
return sum(x) / len(x)
@staticmethod
def median(x):
if not len(x):
return None
return TinyStatistician.quartile(x, 50)
@staticmethod
def quartile(x, percentile):
if not len(x):
return None
data = TinyStatistician.__sort(x)
n = len(data)
index = percentile * n / 100
floor = math.floor(index)
if index == floor:
return float((data[floor] + data[floor - 1]) / 2)
return float(data[floor])
@staticmethod
def var(x):
n = len(x)
if not n:
return None
mu = TinyStatistician.mean(x)
sqd = 0 # sum of square deviations
for i in x:
sqd += (i - mu) ** 2
return sqd / n
@staticmethod
def std(x):
if not len(x):
return None
return math.sqrt(TinyStatistician.var(x))
@staticmethod
def __sort(array):
x = array.copy()
for i in range(1, len(x)):
key = x[i]
j = i
while j > 0 and key < x[j - 1]:
x[j] = x[j - 1]
j -= 1
x[j] = key
return x
| [
"adrian.w.roque@gmail.com"
] | adrian.w.roque@gmail.com |
eafde55b4128f1e79475991b307f8e91de418586 | 8a283e52a9180b81356e68dd35b1293c3d32aa5c | /task9.py | 505435e338a98373032a0ff4dc1235507216c43b | [] | no_license | irishabharya/k8swebapp | d4458421c9dd7965302533e98817011616d03ae2 | b5822e9b0b39bae1b184db32151b279e4ed8abb1 | refs/heads/main | 2023-06-29T08:10:53.279283 | 2021-08-11T18:04:02 | 2021-08-11T18:04:02 | 395,074,927 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,369 | py | #!/usr/bin/python3
print("content-type: text/html")
print()
import cgi
import subprocess
f= cgi.FieldStorage()
cmd = f.getvalue("x")
pod = f.getvalue("pod")
port = f.getvalue("port")
replica = f.getvalue("replica")
if ("all" in cmd ):
print(subprocess.getoutput("sudo kubectl get pods --kubeconfig admin.conf"))
elif("all" in cmd) and ("deployment" in cmd) :
print(subprocess.getoutput("kubectl get deployment --kubeconfig admin.conf"))
elif("deployment" in cmd) and ("create" in cmd ):
print(subprocess.getoutput("kubectl create deployment {} --image=httpd --kubeconfig admin.conf ".format(pod)))
elif("deployment" in cmd) and ("expose" in cmd ):
print(subprocess.getoutput("kubectl expose deployment {} --port={} --type=NodePort --kubeconfig admin.conf ".format(pod,port)))
elif("create" in cmd ) or ("scale" in cmd ) and (("replica" in cmd ) or ("deployment" in cmd )):
print(subprocess.getoutput("kubectl scale deployment {} --replicas={} --kubeconfig admin.conf ".format(pod,replica)))
elif ("delete" in cmd ) and ("pod" in cmd):
print(subprocess.getoutput("kubectl delete pods {} --kubeconfig admin.conf".format(pod)))
elif("delete" in cmd ) and ("deployment" in cmd ):
print(subprocess.getoutput("kubectl delete deployment {} --kubeconfig admin.conf".format(pod)))
else:
print("Please Enter valid input")
| [
"noreply@github.com"
] | irishabharya.noreply@github.com |
13fc5bfd35ccc53b0ba4c7f355f221b3b9619a9a | 73004bfe307af66fc0486e4ce4d79c9f4f9c1158 | /messenger/user_profile/migrations/0008_remove_user_nick.py | 0d13178b86c66410baf21327d8c652902f180b55 | [] | no_license | ArtemCoolAc/2019-2-Atom-Backend-A-Kutuzov | a618662882448a0058208f1165697fe774568c58 | 54254aee1a7ff0e3920d9205f3ba57c2f77f3c3a | refs/heads/master | 2022-09-06T00:14:35.129550 | 2019-12-01T15:19:51 | 2019-12-01T15:19:51 | 210,880,063 | 0 | 0 | null | 2022-08-23T18:07:02 | 2019-09-25T15:33:04 | JavaScript | UTF-8 | Python | false | false | 329 | py | # Generated by Django 2.2.5 on 2019-11-13 17:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user_profile', '0007_auto_20191113_1535'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='nick',
),
]
| [
"us210716@yandex.ru"
] | us210716@yandex.ru |
0063e50243f7175fd73817b1c36e95c42296fd57 | a7c86066f6c5477b98cf82b9ead3532b2f942153 | /FP.py | 722088fcb732773535fb5f25399d590df4d551d2 | [] | no_license | TegarSU/K-Means | 171e261f22f4eb263eaca377efda0afee5256960 | ac15f22841c5d4f99606e7a528a5ae87095d3515 | refs/heads/master | 2020-04-24T12:40:52.238490 | 2019-02-22T18:34:08 | 2019-02-22T18:34:08 | 171,963,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,195 | py | import csv
import math
import copy
import random
def loadDataset(filename,k,dataSet=[]):
with open(filename) as csvfile:
lines=csv.reader(csvfile)
dataset=list(lines)
normalize(dataset)
banyak=len(dataset)/k
mulai=0
for x in range(len(dataset)):
for y in range(len(dataset[x])-1): #kalau gak ada kelasnya seperti Iris-virginica hapus -1nya
dataset[x][y]=float(dataset[x][y])
dataset[x].append(0) #buat kelas baru
dataSet.append(dataset[x])
def loadDataset2(filename,k,centroid=[]):
with open(filename) as csvfile:
lines=csv.reader(csvfile)
dataset=list(lines)
normalize(dataset)
banyak=len(dataset)//k
mulai=0
for x in range(k):
if x==k-1:
z=dataset[len(dataset)-1]
else:
z=dataset[mulai]
for y in range(len(z)-1):#kalau gakada kelasnya seperti Iris-virginica hapus -1nya
z[y]=float(z[y])
z.append(0)#buat kelas baru
centroid.append(z)
mulai=mulai+banyak
def normalize(dataset):
for m in range(len(dataset[0]) - 1):
temp = []
for n in range(len(dataset)):
temp.append(float(dataset[n][m]))
minimal = min(temp)
maksimal = max(temp)
for o in range(len(dataset)):
if maksimal - minimal == 0:
dataset[o][m] = temp[o]
else:
dataset[o][m] = (temp[o] - minimal) / (maksimal - minimal)
def carijarak(dataset,centroid):
distance=0
distance=int(distance)
#print "---------hitungmulai-------"
for x in range(len(dataset)-2):#ganti -1 kalau gakada kelas seperti Iris-virginica
dif=dataset[x]-centroid[x]
distance=distance+(dif*dif)
#print "--------hitungakhir--------"
return math.sqrt(distance)
def carikelas(dataset,k,centroid):
terpendek=9223372036854775807
kelas=0
for y in range(k):
a=carijarak(dataset,centroid[y])
#print a
if a<terpendek:
terpendek=a
kelas=y+1
#print a
#print "kelas"
return kelas
def printdataset(dataset):
for x in range(len(dataset)):
print (dataset[x])
#print(updatecentroid)
def updatecentroid(dataset,k,centroid=[]):
awal=[]
for x in range(k):
for y in range(len(centroid[x])):
centroid[x][y]=0
atribut=len(dataset[0])
#print atribut
for x in range(len(dataset)):#mencari jumlah total atribut
kls=dataset[x][atribut-1]
for y in range(atribut-2):#ganti -1 kalau gak ada kelas
centroid[kls-1][y]=centroid[kls-1][y]+dataset[x][y]
centroid[kls-1][atribut-1]=centroid[kls-1][atribut-1]+1#terakhir sendiri
for x in range(k):#mencari jumlah rata-ratanya
for y in range(atribut-2):#ganti -1 kalau gak ada kelas
centroid[x][y]=centroid[x][y]/centroid[x][atribut-1]
#def centroidakhir(oldcen[],centriod[]):
def main():
masuk=input("Masukkan Data : ")
k=input("Jumlah Kelas yang Diinginkan : ")
k=int(k)
dataset=[]
centroid=[]
loadDataset(masuk,k,dataset)
#######################Membuat K Means############################################
#loadDataset2(masuk,k,centroid)
pick = input("Input 0 untuk Centroid random :")
if(pick=="0"):
loadDataset2(masuk,k,centroid)
#else
for x in range(len(dataset)):
#print "---------mulai--------------"
kelas=carikelas(dataset[x],k,centroid)
dataset[x][len(dataset[x])-1]=kelas
#print "----------Akhir-------------"
updatecentroid(dataset,k,centroid)#mengupdate centroid
while True:
cek=1#udah konfergen belum
for x in range(len(dataset)):
#print "---------mulai--------------"
kelas=carikelas(dataset[x],k,centroid)
if dataset[x][len(dataset[x])-1]!=kelas:
cek=0
dataset[x][len(dataset[x])-1]=kelas
#print "----------Akhir-------------"
updatecentroid(dataset,k,centroid)#mengupdate centroid
#printdataset(centroid)
if cek==1:
#print "Sudah Konfergen"
break
#input()
print ("===================Data Baru Setelah K Means============================")
printdataset(dataset)
print ("\nCentroid akhir :\n")
printdataset(centroid)
##################################Akhir K Means########################################
main()
| [
"noreply@github.com"
] | TegarSU.noreply@github.com |
d2087a647ace24760336a7b0244273fd6458835a | 144151dba4a365018a0e109d3173c1af0ea8f149 | /scan.py | 0ed330ae2cb006f7353a05af74ff1225bda113aa | [] | no_license | miguelps/document_detection | 1e24035374e63d9234da066718527d5fffa190fb | 9f6afc62481dd7a90259e1064a8b4de65f30446d | refs/heads/master | 2021-10-26T16:01:11.459227 | 2019-04-13T17:07:48 | 2019-04-13T17:07:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,575 | py | from imutils.perspective import four_point_transform
from skimage.filters import threshold_local
#from ocr.helpers import implt
import numpy as np
import cv2
import imutils
img = cv2.imread('images/page.jpg')
ratio = img.shape[0] / 500.0
orig = img.copy()
img = imutils.resize(img, height = 500)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
invGamma = 1.0 / 0.3
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
gray = cv2.LUT(gray, table)
ret,thresh1 = cv2.threshold(gray,50,255,cv2.THRESH_BINARY)
_, contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
def biggestRectangle(contours):
biggest = None
max_area = 0
indexReturn = -1
for index in range(len(contours)):
i = contours[index]
area = cv2.contourArea(i)
if area > 100:
peri = cv2.arcLength(i,True)
approx = cv2.approxPolyDP(i,0.1*peri,True)
if area > max_area: #and len(approx)==4:
biggest = approx
max_area = area
indexReturn = index
return indexReturn, biggest
indexReturn, biggest = biggestRectangle(contours)
x,y,w,h = cv2.boundingRect(contours[indexReturn])
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
#implt(img, t='Result')
cv2.imwrite('test1.jpg',img)
warped = four_point_transform(orig, biggest.reshape(4, 2) * ratio)
#implt(warped, t='Result')
cv2.imwrite('test.jpg',warped)
| [
"noreply@github.com"
] | miguelps.noreply@github.com |
85140adbd56c46390eb3746b437a36b0224da0a8 | f632e642d2727fbe33f7a52ad32b03a3d13d4add | /校庆论文/校庆论文/xiaoqing/ClusterMain.py | 952d5420bdf3206343b287acabbd48ecabefd247 | [] | no_license | east1203/CPlusPlus | 7e0896fce4d36851ab26014ab50de1e3ab0e2f09 | be61a73e54647fd66fcce2f3d93c0703bdb7a6d7 | refs/heads/master | 2020-04-28T04:46:05.868932 | 2019-05-16T06:45:59 | 2019-05-16T06:45:59 | 174,964,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,211 | py | # coding=utf-8
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
from math import *
import PseudoCenter
import ClusterFunc
##def EL(centers,clusters:
print("************** start ***************")
#size = 162
size = 214
clustersnum = 9
array_regsloca = np.zeros((size,2))
ind = 0
with open("./regsloca.data","r") as f:
for line in f.readlines():
sline = line.strip("\n")
lline = line.split(" ")
array_regsloca[ind][0] = lline[0]
array_regsloca[ind][1] = lline[1]
ind = ind+1
ind = 0
centers = np.zeros((clustersnum,2))
with open("./init_centers.data","r") as f:
for line in f.readlines():
sline = line.strip("\n")
lline = line.split(" ")
centers[ind][0] = lline[0]
centers[ind][1] = lline[1]
ind = ind+1
stop = False
dis0 = 0
distmp = float('inf')
num = 0
WL = 0 ## 连线总长度
WLpre = float('inf')
TMP = []
TMPA = []
count = 0
continueFlag = True
## 调用寄存器分组函数
while continueFlag:
clusterstmp,centerstmp,WL = ClusterFunc.ClusterFunc(centers,clustersnum,array_regsloca,size)
TMP.append(WL)
if WL < WLpre:
WLpre = WL
centers = centerstmp
clusters = clusterstmp
TMPA.append(WL)
count = 0
## 写回分组中心点
with open("init_centers.data","w") as f:
for i in range(clustersnum):
f.write(str(centers[i][0])+" "+str(centers[i][1])+"\n")
else:
## 连续多次连线总长没有减小就停止分组
count+=1
if count>15:
continueFlag = False
print("wl长度是 ")
print(TMP)
print(TMPA)
# for i in range(1):
# clusters,centerstmp,WL = ClusterFunc.ClusterFunc(centers,clustersnum,array_regsloca,size)
# centers = centerstmp
# TMP.append(WL)
# with open("init_centers.data","w") as f:
# for i in range(clustersnum):
# f.write(str(centers[i][0])+" "+str(centers[i][1])+"\n")
# print("wl长度是 ")
# print(TMP)
## 将分组结果写到文件result.data
with open("result.data","w") as f:
for i in range(clustersnum):
if len(clusters[i])>1:
if clusters[i]:
#result=result.append(str(clusters[i])+'\n')
#print("1111")
l = str(clusters[i]).strip("[")
ll = l.strip("]")
lll = ll.split(",")
for j in range(len(lll)):
f.write(lll[j]+" ")
f.write('\n')
print(clustersnum)
# print("WL is "+str(WL))
# print("WLpre is " + str(WLpre))
print(centers)
print("每个群组中寄存器的数目")
for i in range(clustersnum):
print(len(clusters[i]))
##输出每一个群组中寄存器的位置
#for i in range(clustersnum):
# print("群组"+str(i)+"的寄存器位置如下:")
# for j in range(len(clusters[i])):
# print(str(array_regsloca[clusters[i][j]][0])+" "+str(array_regsloca[clusters[i][j]][1]))
'''
for i in range(clustersnum):
print("群组"+str(i)+"的寄存器位置如下:")
print("群组"+str(i)+"的x坐标: ")
for j in range(len(clusters[i])):
print(str(array_regsloca[clusters[i][j]][0]))
print("群组"+str(i)+"的y坐标: ")
for j in range(len(clusters[i])):
print(str(array_regsloca[clusters[i][j]][1]))
'''
print("************** end ***************")
| [
"dyk1203@126.com"
] | dyk1203@126.com |
7e32fb23b50d259b671d2a54a962904278c56be9 | 4c4b5bae788c4ac2029e975e814acdb0eef89b35 | /news_blog/migrations/0003_auto_20180210_1858.py | c8ddcd0d4a11276bf7b90bf789849804273711e1 | [] | no_license | barabashka7/DjangoProject | db76b7a7ea8be8af776b52516af6546eff577cc3 | efb268ebb2247daf51e69a727c4f8b0d129cb1b8 | refs/heads/master | 2021-05-05T06:02:02.502404 | 2018-02-18T07:36:57 | 2018-02-18T07:36:57 | 118,735,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | # Generated by Django 2.0.2 on 2018-02-10 11:58
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('news_blog', '0002_auto_20180210_1856'),
]
operations = [
migrations.AlterField(
model_name='author',
name='id',
field=models.UUIDField(default=uuid.UUID('cdc7c68e-28ab-4d5b-b01e-bb4845fdc69f'), editable=False, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='comment',
name='id',
field=models.UUIDField(default=uuid.UUID('1867856a-1826-4b04-90af-f53c90736f74'), editable=False, primary_key=True, serialize=False),
),
]
| [
"vadikpro7@mail.ru"
] | vadikpro7@mail.ru |
38fa1bdcf32d9a41324dc89afe6c727eb8ccee83 | 7c7e998d59511c752061005cddc921833ae6372d | /bot.py | 9ad445cb1a03d2736110465c709c3b1e07ff068d | [] | no_license | 999WRLD999/ECO.bot-v1 | 1d53a8c2b05fbbf4d848cf14c7986216c1c1c7d1 | e0e760b9beab81a6dfb0e4d5f67067d4a5ac05cc | refs/heads/main | 2023-03-21T19:34:27.142517 | 2021-03-14T22:31:04 | 2021-03-14T22:31:04 | 347,770,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,449 | py | import discord
import requests
import json
import discord.utils
import random
import string
import asyncio
from discord.ext import commands
Version = 'v1.0.2.7 - Alpha.4'
bot = commands.Bot(command_prefix='$')
bot.remove_command("help")
with open('usercash.json', 'r') as f:
usercashjson = json.load(f)
def check(author):
def inner_check(message):
return message.author == author
return inner_check
@bot.command()
@commands.cooldown(2, 10, commands.BucketType.user)
async def work(ctx):
authorid = str(ctx.author.id)
cashearned = ''.join(random.choice(string.digits) for i in range(3))
await ctx.send(f"Goodjob, you earned ${cashearned} at work today")
with open('usercash.json', 'r') as f:
usercashjson = json.load(f)
usercashjson[authorid] += int(cashearned)
with open('usercash.json', 'w') as f:
json.dump(usercashjson, f, indent=4)
print(authorid)
@bot.command()
@commands.cooldown(1, 20, commands.BucketType.user)
async def mine(ctx):
rustypickaxe = discord.utils.get(ctx.author.guild.roles, name="Rusty Pickaxe")
steelpickaxe = discord.utils.get(ctx.author.guild.roles, name="Steel Pickaxe")
goldenpickaxe1 = discord.utils.get(ctx.author.guild.roles, name="Golden Pickaxe")
magmaritepickaxe = discord.utils.get(ctx.author.guild.roles, name="Magmarite Pickaxe")
with open('items.json', 'r') as f:
itemjson = json.load(f)
with open("mineraldata.json", 'r') as f:
mineraljson = json.load(f)
author = str(ctx.author.id)
if rustypickaxe in ctx.author.roles:
await ctx.send("You're `Rusty Pickaxe` mined three rubies! ")
itemjson[author][0]['rubies'] += 3
with open('items.json', 'w') as f:
json.dump(itemjson, f, indent=4)
elif steelpickaxe in ctx.author.roles:
await ctx.send("You're `Steel Pickaxe` mined six rubies! ")
itemjson[author][0]['rubies'] += 6
with open('items.json', 'w') as f:
json.dump(itemjson, f, indent=4)
if goldenpickaxe1 in ctx.author.roles:
randommineral_goldpick = random.choice(['magmarite', "saphire", "alumanite"])
await ctx.send(f"You're `Golden Pickaxe` mined thirteen rubies and 3 {randommineral_goldpick}")
mineraljson[author][0][randommineral_goldpick] += 3
itemjson[author][0]['rubies'] += 13
with open('items.json', 'w') as f:
json.dump(itemjson, f, indent=4)
with open('mineraldata.json', 'w') as f:
json.dump(mineraljson, f, indent=4)
if magmaritepickaxe in ctx.author.roles:
randommineral_magmarpick = random.choice(['magmarite', "saphire", "hellian", "alumanite"])
await ctx.send(f"You're `Magmarite Pickaxe` mined fifty rubies and six {randommineral_magmarpick}")
mineraljson[author][0][randommineral_magmarpick] += 6
itemjson[author][0]['rubies'] += 50
with open('items.json', 'w') as f:
json.dump(itemjson, f, indent=4)
with open('mineraldata.json', 'w') as f:
json.dump(mineraljson, f, indent=4)
else:
await ctx.send("You just mined one ruby! ")
itemjson[author][0]['rubies'] += 1
with open('items.json', 'w') as f:
json.dump(itemjson, f, indent=4)
@mine.error
async def clear_error(ctx, error):
if isinstance(error, commands.MissingRole):
await ctx.send("You are missing the `pickaxe` role; you can buy it in the shop!")
if isinstance(error, commands.CommandOnCooldown):
await ctx.send("Whoa, you're on cooldown! Chill out before you overwork yourself.")
@work.error
async def clear_error(ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send("Whoa, you're on cooldown! Chill out before you overwork yourself.")
@bot.command()
async def sellrubies(ctx):
authorid = str(ctx.author.id)
with open("items.json", 'r') as f:
itemjson = json.load(f)
if itemjson[authorid][0]["rubies"] >= 1:
await ctx.send(f"Sold: {itemjson[authorid][0]['rubies']} Rubies")
for i in range(itemjson[authorid][0]["rubies"]):
with open('usercash.json', 'r') as f:
usercashjson = json.load(f)
itemjson[authorid][0]["rubies"] -= 1
usercashjson[authorid] += 250
with open('usercash.json', 'w') as f:
json.dump(usercashjson, f, indent=4)
with open('items.json', 'w') as f:
json.dump(itemjson, f, indent=4)
else:
await ctx.send("You don't have any `rubies` to sell! Do $mine to collect them!")
@bot.command()
async def bal(ctx):
authorid = str(ctx.author.id)
with open('usercash.json', 'r') as f:
usercashjson = json.load(f)
embed = discord.Embed(title=f'{ctx.author}\'s Balance', description=f"**Version: {Version}**", color=0x00ff00)
embed.add_field(name="Balance", value=f"${usercashjson[str(ctx.author.id)][authorid]}")
await ctx.send(embed=embed)
@bot.command()
async def shop(ctx):
embed = discord.Embed(title=f"Shop", description=f"**Version: {Version}**", color=0x00ff00)
embed.add_field(name="pickaxe", value="$5,000", inline=False)
embed.add_field(name="Gun", value="$5,000", inline=False)
embed.add_field(name='Weedfarm Business', value="$50,000", inline=False)
embed.add_field(name="Meth Lab", value="$100,000", inline=False)
embed.add_field(name="Methlab Trailer", value="$2,500", inline=False)
embed.add_field(name="King Monke", value="$3,000,000", inline=False)
await ctx.send(embed=embed)
await ctx.send("To purchase an item, do $buy(item) all in one word, no capitals.")
@bot.command()
async def buygun(ctx):
author = str(ctx.author.id)
global usercash, gun
with open('usercash.json', 'r') as f:
usercashjson = json.load(f)
if usercashjson[str(ctx.author.id)][str(ctx.author.id)] >= 5000:
usercashjson[str(ctx.author.id)] -= 5000
with open('usercash.json', 'r') as f:
usercashjson = json.load(f)
with open('items.json', 'r') as f:
itemsjson = json.load(f)
itemsjson[author][0]["gun"] += 1
with open('items.json', 'w') as f:
json.dump(itemsjson, f, indent=4)
usercashjson[author] -= 5000
with open('usercash.json', 'w') as f:
json.dump(usercashjson, f, indent=4)
await ctx.send("Purchased `gun`, this will protect you against robbers and attackers!")
gun += 1
else:
await ctx.send("Sorry, you do not have enough money to purchase this item.")
@bot.command()
async def buyweedfarm(ctx):
global weedfarm
with open('usercash.json', 'r') as f:
usercashjson = json.load(f)
with open('items.json', 'r') as f:
itemsjson = json.load(f)
authorid = str(ctx.author.id)
if usercashjson[str(ctx.author.id)][str(ctx.author.id)] >= 50000:
if itemsjson[authorid][0]["weedfarm"] ==1:
await ctx.send("Sorry, you already have a weed farm.")
else:
with open('usercash.json', 'r') as f:
usercashjson = json.load(f)
usercashjson[authorid][str(ctx.author.id)] -= 50000
itemsjson[authorid][0]["weedfarm"] += 1
with open('usercash.json', 'w') as f:
json.dump(usercashjson, f, indent=4)
with open('items.json', 'w') as f:
json.dump(itemsjson, f, indent=4)
await ctx.send("You just purchased your very first `weed farm` here you can produce weed, do it manually or purchase workers to produce weed for you!")
embed = discord.Embed(title="Weed Farm Statistics", description=f"**Version: {Version}**", color=0x00ff00)
embed.add_field(name=f"Worker Statistics", value=f"Worker Amount: NULL")
embed.add_field(name=f"Cash Per Cycle Statistics", value=f"Producing: ${cpc}")
await ctx.send(embed=embed)
await ctx.send("""
``` Commands:
$weedfarmstats - Provides the statistics of your weed farm
$buyweedfarmworker - Purchases a worker for your weed farm
$collectcashweedfarm - Collects the cash from your weed farm
$weedfarm_help
```
""")
else:
await ctx.send("Pfft, you need more cash to buy this! It costs $50000 to open up a weed farm.")
@bot.command()
async def weedfarmstats(ctx):
global cpc
with open('usercash.json', 'r') as f:
usercashjson = json.load(f)
with open('items.json', 'r') as f:
itemsjson = json.load(f)
if itemsjson[str(ctx.author.id)][0]["weedfarm"] >= 1:
embed = discord.Embed(title="Weed Farm Statistics", description=f"**Version: {Version}**", color=0x00ff00)
embed.add_field(name=f"Worker Statistics", value=f"Worker Amount: NULL")
embed.add_field(name=f"Cash Per Cycle Statistics", value=f"Producing: ${itemsjson[str(ctx.author.id)][0]['cpc']}")
await ctx.send(embed=embed)
@bot.command()
async def buyweedfarmworker(ctx):
with open('items.json', 'r') as f:
itemsjson = json.load(f)
with open('usercash.json', 'r') as f:
usercashjson = json.load(f)
if usercashjson[str(ctx.author.id)][str(ctx.author.id)] >= 1000:
itemsjson[str(ctx.author.id)][0]['cpc'] += 125
usercashjson[str(ctx.author.id)][str(ctx.author.id)] -= 1000
with open('usercash.json', 'w') as f:
json.dump(usercashjson, f, indent=4)
with open('items.json', 'w') as f:
json.dump(itemsjson, f, indent=4)
with open('items.json', 'r') as f:
itemsjson = json.load(f)
await ctx.send("Purchased 1 Worker")
embed = discord.Embed(title="Weed Farm Statistics", description=f"**Version: {Version}**", color=0x00ff00)
embed.add_field(name=f"Worker Statistics", value=f"Worker Amount: NULL")
embed.add_field(name=f"Cash Per Cycle Statistics", value=f"Producing: ${itemsjson[str(ctx.author.id)][0]['cpc']}")
await ctx.send(embed=embed)
@bot.command()
@commands.cooldown(1, 60, commands.BucketType.user)
async def sellweed(ctx):
with open('items.json', 'r') as f:
itemsjson = json.load(f)
with open('usercash.json', 'r') as f:
usercashjson = json.load(f)
usercashjson[str(ctx.author.id)] += itemsjson[str(ctx.author.id)][0]['cpc']
await ctx.send(f"Wow Boss, you collected ${itemsjson[str(ctx.author.id)][0]['cpc']} from your weed business!")
with open('usercash.json', 'w') as f:
json.dump(usercashjson, f, indent=4)
@sellweed.error
async def clear_error(ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send("Whoa, let your weed grow! Collect cash from your weed farm every 60 seconds!!")
@bot.command()
async def weedfarm_help(ctx):
await ctx.send("""
``` Commands:
$weedfarmstats - Provides the statistics of your weed farm
$buyweedfarmworker - Purchases a worker for your weed farm
$collectcashweedfarm - Collects the cash from your weed farm
$weedfarm_help
```""")
@bot.command()
async def help(ctx):
embed = discord.Embed(title="Command List", description=f"**Version: {Version}**", color=0x00ff00)
embed.add_field(name="Commands", value="""
$work - allocates you to work for a set amount of cash twice per minute
$mine - with a `pickaxe` you can mine rubies which can then be sold for $$$
$shop - displays the shop
$updates - displays the update log
$crime - commits a ***legal*** activity
$gamble (amount) - Gambles a set amount of cash, 50/50 chance to fill your pockets; or leak them.
$rob @person - Robs someone, easy cash, easy guap.
$buy(item) - Purchases an item, no capitals, no spaces.
$give (amount) @person - Gives cash to whoever you ping!
$prestige - Shows your current prestige
$beg - Asks a random person for money
$inventory - Shows all your current items, (DOES NOT SHOW ORES)
$sellrubies - Sells your rubies
$forgerecipes - Shows all the forging Recipes
$forge (pickaxe name) - Forges a pickaxe
""")
embed.set_footer(text=f"Version: {Version}", icon_url="https://cdn.discordapp.com/emojis/754736642761424986.png")
embed.set_author(name=f"Requested by: {ctx.author.name}", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
embed1 = discord.Embed(title="Command List Page two", description=f"**Version: {Version}**", color=0x00ff00)
embed1.add_field(name="Command Page 2", value="""
$upgradepickaxe - Upgrades your current pickaxe
$weedfarmstats - Provides the statistics of your `Weed Farm`
$buyweedfarmworker - Purchases a worker for your `Weed Farm`
$collectcashweedfarm - Collects the cash from your `Weed Farm`
$weedfarm_help - Displays all the commands for the `Weed Farm`
$buymethlab - Purchases a `Methlab`
$collectmeth - Collects meth from your trailers
$sellmeth (amount)- Sells all your `Meth baggies`
$buymethlabtrailer (amount) - Purchases a `Methlab Trailer`
$methlabstats - Displays the statis of your `Methlab`
""")
embed1.set_footer(text=f"Version: {Version}", icon_url="https://cdn.discordapp.com/emojis/754736642761424986.png")
embed1.set_author(name=f"Requested by: {ctx.author.name}", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed1)
@bot.command()
async def updates(ctx):
embed = discord.Embed(title="Update 2.6.1", description=f"**Version: v.{Version}**", color=0x00ff00)
embed.set_author(name=f"Requested by: {ctx.author.name}",icon_url=ctx.author.avatar_url)
embed.set_footer(text=f"Version: {Version}",
icon_url="https://cdn.discordapp.com/emojis/754736642761424986.png")
embed.add_field(name="**Added**", value="", inline=False)
await ctx.send(embed=embed)
@commands.cooldown(6, 60, commands.BucketType.user)
@bot.command()
async def crime(ctx):
with open('items.json', 'r') as f:
itemsjson = json.load(f)
chances = [1, 2]
arrestrate = ''.join(random.choice(string.digits)for i in range(2))
with open('usercash.json', 'r') as f:
usercashjson = json.load(f)
if int(arrestrate) >= 90:
if itemsjson[str(ctx.author.id)][0]["gun"] >= 1:
await ctx.send("You decide to bring your 9mm out! You're in a live shootout with the police!")
win = random.choice(chances)
if win == 2:
await ctx.send("You won! You shot his leg then bit his ear! Ran away with $3000")
usercashjson[ctx.author.id][str(ctx.author.id)] += 3000
json.dump(usercashjson, f, indent=4)
else:
await ctx.send("You lost:( He shot your ear then bit your leg, those hospital fees gon be expensive, -$5000")
usercashjson[str(ctx.author.id)][str(ctx.author.id)] -= 5000
json.dump(usercashjson, f, indent=4)
else:
await ctx.send("**You have been arrested, lost $10000**")
usercashjson[str(ctx.author.id)][str(ctx.author.id)] -= 10000
with open('usercash.json', 'w') as f:
json.dump(usercashjson, f, indent=4)
elif int(arrestrate) >= 80:
await ctx.send("You robbed a casino Stole $2450!")
usercashjson[str(ctx.author.id)][str(ctx.author.id)] += 2450
with open('usercash.json', 'w') as f:
json.dump(usercashjson, f, indent=4)
elif int(arrestrate) >= 70:
await ctx.send("You robbed a train! Stole $1500")
usercashjson[str(ctx.author.id)][str(ctx.author.id)] += 1500
with open('usercash.json', 'w') as f:
json.dump(usercashjson, f, indent=4)
elif int(arrestrate) >= 60:
await ctx.send("You robbed a bar! Stole $1000")
usercashjson[str(ctx.author.id)][str(ctx.author.id)] += 1000
with open('usercash.json', 'w') as f:
json.dump(usercashjson, f, indent=4)
elif int(arrestrate) >= 50:
await ctx.send("You robbed your Grandmas house! Stole all her rubies!")
itemsjson[str(ctx.author.id)][0]["rubies"] += 5
with open('items.json', 'w') as f:
json.dump(itemsjson, f, indent=4)
elif int(arrestrate) >= 40:
await ctx.send("You robbed Jess's house! Stole her vibra-, $34")
usercashjson[str(ctx.author.id)][str(ctx.author.id)] += 34
with open('usercash.json', 'w') as f:
json.dump(usercashjson, f, indent=4)
elif int(arrestrate) >= 30:
await ctx.send("You robbed WRLD's! Stole his Supreme Water Bottle, $100")
usercashjson[str(ctx.author.id)][str(ctx.author.id)] += 100
with open('usercash.json', 'w') as f:
json.dump(usercashjson, f, indent=4)
elif int(arrestrate) >= 20:
await ctx.send("You robbed WRLD's! Stole his Supreme Water Bottle, $100")
usercashjson[str(ctx.author.id)][str(ctx.author.id)] += 100
with open('usercash.json', 'w') as f:
json.dump(usercashjson, f, indent=4)
elif int(arrestrate) >= 10:
await ctx.send("You robbed Tristian's trailor! He had no money:( ")
elif int(arrestrate) >= 0:
await ctx.send("You robbed my house?! You lost $1250!!!")
usercashjson[str(ctx.author.id)][str(ctx.author.id)] -= 1250
with open('usercash.json', 'w') as f:
json.dump(usercashjson, f, indent=4)
@crime.error
async def clear_error(ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send("Whoa, buddy, stop stealing, you're on cooldown!")
@commands.cooldown(1, 60, commands.BucketType.user)
@bot.command()
async def rob(ctx, member: discord.Member):
robee = member.id
cashstolen = ''.join(random.choice(string.digits) for i in range(4))
with open('usercash.json', 'r') as f:
usercashjson = json.load(f)
if int(cashstolen) >= usercashjson[str(robee)][str(robee)]:
while int(cashstolen) > usercashjson[str(robee)][str(robee)]:
cashstolen = ''.join(random.choice(string.digits) for i in range(4))
usercashjson[str(robee)][str(robee)] -= int(cashstolen)
usercashjson[str(ctx.author.id)][str(ctx.author.id)] += int(cashstolen)
await ctx.send(f"**You stole ${cashstolen}, stash it in the warehouse Bain!**")
with open('usercash.json', 'w') as f:
json.dump(usercashjson, f, indent=4)
@rob.error
async def error_clear(ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send("Stop robbing people! You're on cooldown for 120 Seconds!!11!")
@bot.command()
async def buymethlab(ctx):
author = str(ctx.author.id)
with open('usercash.json', 'r') as f:
usercashjson = json.load(f)
if usercashjson[author][str(ctx.author.id)] >= 100000:
with open('items.json', 'r') as f:
itemsjson = json.load(f)
if itemsjson[author][0]["methlab"] >= 1:
await ctx.send("Sorry, you already have a `methlab`!")
itemsjson[author][0]['methlab'] += 1
usercashjson[str(ctx.author.id)][str(ctx.author.id)] -= 100000
await ctx.send("You just purchased your very first methlab! You can see all the commands with $methlab_help!")
with open('items.json', 'w') as f:
json.dump(itemsjson, f, indent=4)
with open('usercash.json', 'w') as f:
json.dump(usercashjson, f, indent=4)
else:
await ctx.send("Sorry! You need $100000 to purchase the `meth lab`")
@bot.command()
async def buymethlabtrailer(ctx, amounttobuy: int):
mpc = 0
author = str(ctx.author.id)
with open('items.json', 'r') as f:
itemsjson = json.load(f)
with open('usercash.json', 'r') as f:
usercashjson = json.load(f)
if usercashjson[author][str(ctx.author.id)] >= amounttobuy * 2500:
if itemsjson[author][0]["methlab"] == 1:
await ctx.send(f"Purchased: {amounttobuy} Trailer/s")
for i in range(amounttobuy):
itemsjson[author][0]["methtrailers"] += 1
usercashjson[author][str(ctx.author.id)] -= 2500
embed = discord.Embed(title="Meth Lab Statistics", description=f"**Version: {Version}**", color=discord.Color.green())
embed.add_field(name=f"Trailer Statistics", value=f"Trailer Amount: {itemsjson[author][0]['methtrailers']}")
for i in range(itemsjson[author][0]["methtrailers"]):
mpc += 1
embed.add_field(name=f"MBs Per Cycle Statistics", value=f"Producing: {mpc} MPC")
await ctx.send(embed=embed)
with open('items.json', 'w') as f:
json.dump(itemsjson, f, indent=4)
with open('usercash.json', 'w') as f:
json.dump(usercashjson, f, indent=4)
else:
await ctx.send("You don't have a `methlab`, buy one with $buymethlab!")
else:
await ctx.send("Sorry! You do not have enough money to purchase this! $2500")
@buymethlabtrailer.error
async def clear_error(ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send("Whoa, the trailer companies are getting sus of you! Slowdown!")
@bot.command()
async def methlab_help(ctx):
await ctx.send("""
```Commands:
$buymethlabtrailer
$buymethlab
$collectmeth
$sellmeth
$methlab_help
$methlabstats
```
""")
@bot.command()
@commands.cooldown(3, 60, commands.BucketType.user)
async def methlabstats(ctx):
author = str(ctx.author.id)
mpc = 0
with open('items.json', 'r') as f:
itemsjson = json.load(f)
with open('usercash.json', 'r') as f:
usercashjson = json.load(f)
if itemsjson[author][0]["methlab"]:
embed = discord.Embed(title="Meth Lab Statistics", description=f"**Version: {Version}**",
color=discord.Color.green())
embed.add_field(name=f"Trailer Statistics", value=f"Trailer Amount: {itemsjson[author][0]['methtrailers']}")
embed.add_field(name=f"MBs Per Cycle Statistics", value=f"Producing: {itemsjson[author][0]['methtrailers']} MPC")
await ctx.send(embed=embed)
else:
await ctx.send("You do not own a `methlab`, buy one with $buymethlab!")
@bot.command()
@commands.cooldown(1, 60, commands.BucketType.user)
async def collectmeth(ctx):
author = str(ctx.author.id)
with open('items.json', 'r') as f:
itemsjson = json.load(f)
with open('usercash.json', 'r') as f:
usercashjson = json.load(f)
itemsjson[author][0]["methbags"] += itemsjson[author][0]["methtrailers"]
with open('items.json', 'w') as f:
json.dump(itemsjson, f, indent=4)
await ctx.send(f"Collected: {itemsjson[author][0]['methtrailers']} Methbags")
with open('usercash.json', 'w') as f:
json.dump(usercashjson, f, indent=4)
@bot.command()
async def sellmeth(ctx, amounttosell: int):
sold = 0
author = str(ctx.author.id)
with open('items.json', 'r') as f:
itemsjson = json.load(f)
with open('usercash.json', 'r') as f:
usercashjson = json.load(f)
if itemsjson[author][0]["methlab"]:
if itemsjson[author][0]["methbags"] >= 1:
itemsjson[author][0]['methbags'] -= amounttosell
usercashjson[author][str(ctx.author.id)] += 1000 * amounttosell
with open('items.json', 'w') as f:
json.dump(itemsjson, f, indent=4)
with open('usercash.json', 'w') as f:
json.dump(usercashjson, f, indent=4)
await ctx.send(f"Sold: {amounttosell} Methbags for ${amounttosell * 1000}")
else:
await ctx.send("You don't have any `methbags`! Go collect them with $collectmeth")
else:
await ctx.send("You do not own a `methlab`, buy one with $buymethlab!")
@sellmeth.error
async def clear_error(ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send("Whoa, no way you have ***that*** much meth, you're on cooldown!")
@collectmeth.error
async def clear_error(ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send("Whoa, no way you have ***that*** much meth, you're on cooldown!")
@bot.command()
@commands.cooldown(1, 20)
async def beg(ctx):
author = str(ctx.author.id)
with open('usercash.json', 'r') as f:
usercashjson = json.load(f)
begcash = ''.join(random.choice(string.digits) for i in range(3))
usercashjson[author][str(ctx.author.id)] += int(begcash)
await ctx.send(f"Fineee :rolling_eyes:, ig ill give u cash ${begcash}")
with open('usercash.json', 'w') as f:
json.dump(usercashjson, f, indent=4)
@beg.error
async def clear_error(ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send("Whoa poor begger, you're on cooldown! ")
@commands.cooldown(2, 60)
@bot.command()
async def scam(ctx, member: discord.Member):
with open('items.json', 'r') as f:
itemsjson = json.load(f)
options = ["rubies", "methbags"]
chosenoption = random.choice(options)
amountstolen = random.choice(string.digits)
while int(amountstolen) > itemsjson[str(member.id)][0]["rubies"]:
amountstolen = random.choice(string.digits)
if int(amountstolen) == 0:
amountstolen = 1
itemsjson[str(member.id)][0][chosenoption] -= int(amountstolen)
itemsjson[str(ctx.author.id)][0][chosenoption] += int(amountstolen)
await ctx.send(f'You stole {amountstolen} {chosenoption} from {member.name}')
with open('items.json', 'w') as f:
json.dump(itemsjson, f, indent=4)
@scam.error
async def clear_error(ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send("Whoa, you're on cooldown! Please stop scamming for a bit")
@bot.command()
async def gamble(ctx):
author = str(ctx.author.id)
with open('usercash.json', 'r') as f:
usercashjson = json.load(f)
betamount = int(ctx.message.content[8:])
if usercashjson[author][str(ctx.author.id)] < betamount:
await ctx.send("Sorry! You don't have that much cash! *bum*")
else:
chance = random.choice([1, 2])
if chance == 2:
await ctx.send("Generating chance. . .")
await asyncio.sleep(1)
usercashjson[author][str(ctx.author.id)] += betamount
await ctx.send("You won! Nice on ya lad!")
else:
await ctx.send("Generating chance. . .")
await asyncio.sleep(1)
usercashjson[author][str(ctx.author.id)] -= betamount
await ctx.send("You lost! Wanna rob the Casino and get your money back? Do $heist casino!")
with open('usercash.json', 'w') as f:
json.dump(usercashjson, f, indent=4)
@bot.command()
async def upgradepickaxe(ctx):
rustypickaxe = discord.utils.get(ctx.author.guild.roles, name="Rusty Pickaxe")
steelpickaxe = discord.utils.get(ctx.author.guild.roles, name="Steel Pickaxe")
goldenpickaxe = discord.utils.get(ctx.author.guild.roles, name="Golden Pickaxe")
txt = discord.utils.get(ctx.author.guild.channels, name='general')
with open('usercash.json', 'r') as f:
usercashjson = json.load(f)
author = str(ctx.author.id)
if rustypickaxe in ctx.author.roles:
if usercashjson[author][str(ctx.author.id)] >= 15000:
await ctx.author.add_roles(steelpickaxe)
await ctx.author.remove_roles(rustypickaxe)
await ctx.send("Upgraded pickaxe to `Steel Pickaxe`!")
usercashjson[author][str(ctx.author.id)] -= 15000
with open('usercash.json', 'w') as f:
json.dump(usercashjson, f, indent=4)
if steelpickaxe in ctx.author.roles:
if usercashjson[author][str(ctx.author.id)] >= 17500:
await ctx.author.add_roles(goldenpickaxe)
await ctx.author.remove_roles(steelpickaxe)
await ctx.send("Upgraded pickaxe to `Golden Pickaxe`!")
usercashjson[author][str(ctx.author.id)] -= 17500
with open('usercash.json', 'w') as f:
json.dump(usercashjson, f, indent=4)
await ctx.send("Too upgrade your tools from here, you need to collect minerals only obtainable through mining, crafting recipes will be available if you do $forgelist.")
@bot.command()
async def forgerecipes(ctx):
pickaxerecips = """
Magmarite Pickaxe - 15 Magmarite, 9 Sapphire
Hellian Pickaxe - 16 Magmarite, 13 Sapphire, 4 Hellian
"""
embed = discord.Embed(title="Forging Recipes", description=f"**Version: {Version}**", color=0x00ff00)
embed.add_field(name=f"Pickaxe Recipes", value=f"{pickaxerecips}")
embed.set_footer(text=f"Version: {Version}",
icon_url="https://cdn.discordapp.com/emojis/754736642761424986.png")
embed.set_author(name=f"Requested by: {ctx.author.name}", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
@bot.command()
async def give(ctx, amounttogive: int, member: discord.Member):
author = str(ctx.author.id)
with open('usercash.json', 'r') as f:
usercashjson = json.load(f)
if usercashjson[author][str(ctx.author.id)] >= amounttogive:
usercashjson[author][str(ctx.author.id)] -= amounttogive
usercashjson[str(member.id)][str(ctx.author.id)] += amounttogive
embed = discord.Embed(title="Give Cash", description=f"**Version: {Version}**", color=0x00ff00)
embed.add_field(name=f"Cash Given", value=f"${amounttogive}")
embed.set_footer(text=f"Version: {Version}",
icon_url="https://cdn.discordapp.com/emojis/754736642761424986.png")
embed.set_author(name=f"Requested by: {ctx.author.name}", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
with open('usercash.json', 'w') as f:
json.dump(usercashjson, f, indent=4)
else:
ctx.send("You don't have enough cash to send to them!")
@bot.command()
async def forge(ctx, pickaxe: str):
print(pickaxe)
if pickaxe == "magmaritepickaxe":
author = str(ctx.author.id)
with open("mineraldata.json", 'r') as f:
mineralsjson = json.load(f)
if mineralsjson[author][0]["magmarite"] >= 15:
if mineralsjson[author][0]["saphire"] >= 9:
goldenpickaxe = discord.utils.get(ctx.author.guild.roles, name="Golden Pickaxe")
if goldenpickaxe in ctx.author.roles:
await ctx.send("You forged a `Magmarite Pickaxe`!")
role = discord.utils.get(ctx.author.guild.roles, name="Magmarite Pickaxe")
await ctx.author.add_roles(role)
mineralsjson[author][0]["magmarite"] -= 15
mineralsjson[author][0]["saphire"] -= 9
await ctx.author.remove_roles(goldenpickaxe)
with open('mineraldata.json', 'w') as f:
json.dump(mineralsjson, f, indent=4)
else:
await ctx.send("You need a `Golden Pickaxe` to forge the `Magmarite Pickaxe`!")
else:
await ctx.send("You don't have enough `Sapphire!`, do $mine with a Pickaxe to collect some!")
else:
await ctx.send("You don't have enough `Magmarite!`, do $mine with a Pickaxe to collect some!")
if pickaxe == "hellianpickaxe":
author = str(ctx.author.id)
with open("mineraldata.json", 'r') as f:
mineralsjson = json.load(f)
if mineralsjson[author][0]["magmarite"] >= 16:
if mineralsjson[author][0]["saphire"] >= 13:
if mineralsjson[author][0]["hellian"] >= 4:
magmarpick = discord.utils.get(ctx.author.guild.roles, name="Magmarite Pickaxe")
if magmarpick in ctx.author.roles:
await ctx.send("You forged a `Hellian Pickaxe`!")
role = discord.utils.get(ctx.author.guild.roles, name="Hellian Pickaxe")
await ctx.author.add_roles(role)
mineralsjson[author][0]["magmarite"] -= 16
mineralsjson[author][0]["saphire"] -= 13
mineralsjson[author][0]['hellian'] -= 4
await ctx.author.remove_roles(magmarpick)
with open('mineraldata.json', 'w') as f:
json.dump(mineralsjson, f, indent=4)
else:
await ctx.send("You need a `Magmarite Pickaxe` to forge the `Hellian Pickaxe`!")
else:
await ctx.send("You don't have enough `Hellian!`, do $mine with a Pickaxe to collect some!")
else:
await ctx.send("You don't have enough `Sapphire!`, do $mine with a Pickaxe to collect some!")
else:
await ctx.send("You don't have enough `Magmarite!`, do $mine with a Pickaxe to collect some!")
@bot.command()
async def buykingmonke(ctx):
author = str(ctx.author.id)
with open('usercash.json', 'r') as f:
usercashjson = json.load(f)
role = discord.utils.get(ctx.author.guild.roles, name="👑King Monk👑")
if usercashjson[author][str(ctx.author.id)] >= 3000000:
await ctx.author.add_roles(role)
else:
await ctx.send("You cannot afford King Monke")
@bot.command()
async def bananaphone(ctx):
role = discord.utils.get(ctx.author.guild.roles, name="👑King Monk👑")
if role in ctx.author.roles:
for i in range(100):
await ctx.send(":banana:")
def verifycheck(message):
global captcha
if message.content != captcha:
return TypeError
return message.content == captcha
@bot.command()
async def verify(ctx):
global captcha
captcha = ''.join(random.choice(string.ascii_letters) for i in range(6))
role = discord.utils.get(ctx.author.guild.roles, name='verified')
embed = discord.Embed(title="Captcha Verification", description="*Please complete this captcha to get access to the server*", color=discord.Color.purple())
embed.add_field(name="Captcha Provided", value=f"{captcha}")
await ctx.send(embed=embed)
try:
await bot.wait_for('message', check=verifycheck, timeout=5)
if TypeError:
await ctx.send('failed')
except asyncio.TimeoutError:
await ctx.send("**The verification token has expired! Please send $verify to start a new one.**")
else:
await ctx.author.add_roles(role)
@bot.command()
async def inventory(ctx):
author = str(ctx.author.id)
with open('items.json', 'r') as f:
itemsjson = json.load(f)
embed = discord.Embed(title="Prestige", description=f"**Version: {Version}**", color=0x00ff00)
embed.add_field(name="Inventory Items ", value=f"""
Rubies: {itemsjson[author][0]["rubies"]}
Weed Farms: {itemsjson[author][0]["weedfarm"]}
Meth Labs: {itemsjson[author][0]["methlab"]}
Meth baggies: {itemsjson[author][0]["methbags"]}
Meth Trailers: {itemsjson[author][0]["methtrailers"]}
""")
embed.set_footer(text=f"Version: {Version}", icon_url="https://cdn.discordapp.com/emojis/754736642761424986.png")
embed.set_author(name=f"Requested by: {ctx.author.name}", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
@bot.command()
async def prestige(ctx):
with open("presteige.json", 'r') as f:
prestigejson = json.load(f)
embed = discord.Embed(title="Prestige", description=f"**Version: {Version}**", color=0x00ff00)
embed.add_field(name="Prestige", value=f"{prestigejson[str(ctx.author.id)]}")
embed.set_footer(text=f"Version: {Version}", icon_url="https://cdn.discordapp.com/emojis/754736642761424986.png")
embed.set_author(name=f"Requested by: {ctx.author.name}", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
@bot.command()
async def start(ctx):
with open("mineraldata.json", 'r') as f:
mineraljson = json.load(f)
with open("usercash.json", 'r') as f:
usercashjson = json.load(f)
mineraljson[str(ctx.author.id)] = [
{
"saphire": 0,
"magmarite": 0,
"alumanite": 0,
"hellian": 0,
"ECOi": 0,
"fishre": 0
}
]
author = str(ctx.author.id)
usercashjson[author] = {author: 0}
with open('mineraldata.json', 'w') as f:
json.dump(mineraljson, f, indent=4)
with open('usercash.json', 'w') as f:
json.dump(usercashjson, f, indent=4)
bot.run('urbottokenhere')
| [
"noreply@github.com"
] | 999WRLD999.noreply@github.com |
32bbdc3dfa362fb7cf6b2b7a7e8c026eae0eb5f1 | 9a40c85c55d75327d82a6e2010d58faf6aaeff49 | /Website/mysite/settings.py | 91fc5443013ba838a30d491bdefcace58d23f46c | [] | no_license | MostafaHamedAbdelmasoud/Liver-cancer-detection-in-CT-scans | 8b1dab64cb1273d9dae8709435ac05e38ac26e84 | 5a6c3eb4d84ce1badb3a7103c06dc336caab3073 | refs/heads/master | 2022-12-25T00:54:47.344764 | 2020-09-20T14:31:02 | 2020-09-20T14:31:02 | 295,721,623 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,159 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR, 'basicapp/templates/basicapp')
TEMPLATE_DIR_USERS = os.path.join(BASE_DIR, 'users/templates/users')
TEMPLATE_DIR_DEEPMODEL = os.path.join(BASE_DIR, 'deepmodel/templates/deepmodel')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECURITY WARNING: don't run with debug turned on in production!
SECRET_KEY = '8lu*6g0lg)9z!ba+a$ehk)xt)x%rxgb$i1&022shmi1jcgihb*'
#SECRET_KEY = os.environ.get("SECRET_KEY")
DEBUG = True
#DEBUG = int(os.environ.get("DEBUG", default=0))
# 'DJANGO_ALLOWED_HOSTS' should be a single string of hosts with a space between each.
# For example: 'DJANGO_ALLOWED_HOSTS=localhost 127.0.0.1 [::1]'
#ALLOWED_HOSTS = os.environ.get("DJANGO_ALLOWED_HOSTS").split(" ")
ALLOWED_HOSTS = '*'
#ALLOWED_HOSTS = ['app']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'basicapp',
'django_dicom',
'crispy_forms',
'users',
'deepmodel',
'debug_toolbar',
'dicom'
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware'
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR, TEMPLATE_DIR_USERS, TEMPLATE_DIR_DEEPMODEL, ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'postgres',
'USER': 'shaker',
'PASSWORD': 'a',
'HOST': 'localhost',
'PORT': '5432',
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
# 'NAME': 'sqlite3.db', # Or path to database file if using sqlite3.
# 'USER': '', # Not used with sqlite3.
# 'PASSWORD': '', # Not used with sqlite3.
# 'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
# 'PORT': '', # Set to empty string for default. Not used with sqlite3.
# }
# }
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
APP_DIR = os.path.join(BASE_DIR, 'deepmodel')
STATICFILES_DIRS = [
os.path.join(APP_DIR, "static"),
os.path.join(APP_DIR, "static/DICOM"),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
AUTH_USER_MODEL = 'users.CustomUser'
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/'
LOGIN_URL = 'login'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
INTERNAL_IPS = ['127.0.0.1']
# Activate Django-Heroku.
| [
"mostafa.hamed1944@gmail.com"
] | mostafa.hamed1944@gmail.com |
d9a6b276047aedbc1ab62415fd3b8fcf764596d0 | bf9aab3694c7cb0b8fbc588f79da29f3993d77b5 | /search/djangohaystack/migrations/0001_initial.py | 2511f634f69cdaba051f6ed4114da4c0a1594463 | [] | no_license | clarle/council-catalog | c765bf641c114e426bd170f6ed56a2e2a3df9c79 | 123348d60233191d8938ec8341bbc5c0b4b3ad7b | refs/heads/master | 2021-01-21T08:20:53.389689 | 2017-08-11T02:32:10 | 2017-08-11T02:32:10 | 101,958,092 | 0 | 0 | null | 2017-08-31T04:09:10 | 2017-08-31T04:09:10 | null | UTF-8 | Python | false | false | 902 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-27 21:47
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pub_date', models.DateTimeField()),
('title', models.CharField(max_length=200)),
('body', models.TextField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"hoangle2806@gmail.com"
] | hoangle2806@gmail.com |
4413bba5a363984d44c924cc07685156b01cd5a9 | 5820d1c5d784f95257834eb42da8069905e2c426 | /ocear/preprocess/normalize.py | 3e6ea57f421dfec08681f6ef6d7122d157f4df94 | [
"MIT"
] | permissive | bartdegoede/ocear | b976133d81ed92fd651e9161bccd2d023a570f37 | 8b155457a9085df72bb6a84c6549abeabebf27ba | refs/heads/master | 2020-03-29T12:00:06.598638 | 2018-09-28T12:55:28 | 2018-09-28T12:55:28 | 149,880,597 | 0 | 0 | MIT | 2018-09-28T12:49:49 | 2018-09-22T13:43:21 | Python | UTF-8 | Python | false | false | 270 | py | import numpy as np
def normalize(image):
"""
Scale pixel values between 0.0 and 1.0
"""
if image is None or np.max(image) == np.min(image):
raise Exception('No valid image provided')
img = image - np.min(image)
return img / np.max(img)
| [
"bdegoede@gmail.com"
] | bdegoede@gmail.com |
e49f952e2c76871ed20d6b04f878fb0efc340461 | 9d8188246250c2506bf69ebb44ee5342891d3663 | /model/layers/encoder.py | 9d1d5eee9ac2c25e543abcc6b66ec7b2edb7c2e4 | [] | no_license | wujunjie1998/kg-topic-chat | 22433e35758972a519056b61de117820e26c3de9 | d3d393cdc49d9a82727d4b0aee71692e90fed17f | refs/heads/master | 2020-12-02T01:42:35.543629 | 2019-12-30T04:25:22 | 2019-12-30T04:25:22 | 230,845,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,628 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence, PackedSequence
from utils import to_var, reverse_order_valid, PAD_ID
from .rnncells import StackedGRUCell, StackedLSTMCell
import pdb
import copy
class BaseRNNEncoder(nn.Module):
def __init__(self):
"""Base RNN Encoder Class"""
super(BaseRNNEncoder, self).__init__()
@property
def use_lstm(self):
if hasattr(self, 'rnn'):
return isinstance(self.rnn, nn.LSTM)
else:
raise AttributeError('no rnn selected')
def init_h(self, batch_size=None, hidden=None):
"""Return RNN initial state"""
if hidden is not None:
return hidden
if self.use_lstm:
return (to_var(torch.zeros(self.num_layers*self.num_directions,
batch_size,
self.hidden_size)),
to_var(torch.zeros(self.num_layers*self.num_directions,
batch_size,
self.hidden_size)))
else:
return to_var(torch.zeros(self.num_layers*self.num_directions,
batch_size,
self.hidden_size))
def batch_size(self, inputs=None, h=None):
"""
inputs: [batch_size, seq_len]
h: [num_layers, batch_size, hidden_size] (RNN/GRU)
h_c: [2, num_layers, batch_size, hidden_size] (LSTM)
"""
if inputs is not None:
batch_size = inputs.size(0)
return batch_size
else:
if self.use_lstm:
batch_size = h[0].size(1)
else:
batch_size = h.size(1)
return batch_size
def forward(self):
raise NotImplementedError
class EncoderRNN(BaseRNNEncoder):
def __init__(self, vocab_size, embedding_size,
hidden_size, rnn=nn.GRU, num_layers=1, bidirectional=False,
dropout=0.0, bias=True, batch_first=True):
"""Sentence-level Encoder"""
super(EncoderRNN, self).__init__()
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.dropout = dropout
self.batch_first = batch_first
self.bidirectional = bidirectional
if bidirectional:
self.num_directions = 2
else:
self.num_directions = 1
# word embedding
self.embedding = nn.Embedding(vocab_size, embedding_size, padding_idx=PAD_ID)
self.rnn = rnn(input_size=embedding_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional)
def forward(self, inputs, input_length, hidden=None):
"""
Args:
inputs (Variable, LongTensor): [num_setences, max_seq_len]
input_length (Variable, LongTensor): [num_sentences]
Return:
outputs (Variable): [max_source_length, batch_size, hidden_size]
- list of all hidden states
hidden ((tuple of) Variable): [num_layers*num_directions, batch_size, hidden_size]
- last hidden state
- (h, c) or h
"""
batch_size, seq_len = inputs.size()
# Sort in decreasing order of length for pack_padded_sequence()
input_length_sorted, indices = input_length.sort(descending=True)
input_length_sorted = input_length_sorted.data.tolist()
# [num_sentences, max_source_length]
inputs_sorted = inputs.index_select(0, indices)
# [num_sentences, max_source_length, embedding_dim]
embedded = self.embedding(inputs_sorted)
# batch_first=True
rnn_input = pack_padded_sequence(embedded, input_length_sorted,
batch_first=self.batch_first)
hidden = self.init_h(batch_size, hidden=hidden)
# outputs: [batch, seq_len, hidden_size * num_directions]
# hidden: [num_layers * num_directions, batch, hidden_size]
self.rnn.flatten_parameters()
outputs, hidden = self.rnn(rnn_input, hidden)
outputs, outputs_lengths = pad_packed_sequence(outputs, batch_first=self.batch_first)
# Reorder outputs and hidden
_, inverse_indices = indices.sort()
outputs = outputs.index_select(0, inverse_indices)
if self.use_lstm:
hidden = (hidden[0].index_select(1, inverse_indices),
hidden[1].index_select(1, inverse_indices))
else:
hidden = hidden.index_select(1, inverse_indices)
return outputs, hidden
def step(self, inputs, hidden=None):
batch_size = inputs.size(0)
embedded = self.embedding(inputs)
# encoder_hidden: [1, batch_size, hidden_size]
hidden = self.init_h(batch_size, hidden=hidden)
self.rnn.flatten_parameters()
outputs, hidden = self.rnn(embedded, hidden)
return outputs, hidden
class ContextRNN(BaseRNNEncoder):
def __init__(self, input_size, context_size, rnn=nn.GRU, num_layers=1, dropout=0.0,
bidirectional=False, bias=True, batch_first=True):
"""Context-level Encoder"""
super(ContextRNN, self).__init__()
self.input_size = input_size
self.context_size = context_size
self.hidden_size = self.context_size
self.num_layers = num_layers
self.dropout = dropout
self.bidirectional = bidirectional
self.batch_first = batch_first
if bidirectional:
self.num_directions = 2
else:
self.num_directions = 1
self.rnn = rnn(input_size=input_size,
hidden_size=context_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional)
def forward(self, encoder_hidden, conversation_length, hidden=None):
"""
Args:
encoder_hidden (Variable, FloatTensor): [batch_size, max_len, num_layers * direction * hidden_size]
conversation_length (Variable, LongTensor): [batch_size]
Return:
outputs (Variable): [batch_size, max_seq_len, hidden_size]
- list of all hidden states
hidden ((tuple of) Variable): [num_layers*num_directions, batch_size, hidden_size]
- last hidden state
- (h, c) or h
"""
batch_size, seq_len, _ = encoder_hidden.size()
# Sort for PackedSequence
conv_length_sorted, indices = conversation_length.sort(descending=True)
conv_length_sorted = conv_length_sorted.data.tolist()
encoder_hidden_sorted = encoder_hidden.index_select(0, indices)
rnn_input = pack_padded_sequence(encoder_hidden_sorted, conv_length_sorted, batch_first=True)
hidden = self.init_h(batch_size, hidden=hidden)
self.rnn.flatten_parameters()
outputs, hidden = self.rnn(rnn_input, hidden)
# outputs: [batch_size, max_conversation_length, context_size]
outputs, outputs_length = pad_packed_sequence(outputs, batch_first=True)
# reorder outputs and hidden
_, inverse_indices = indices.sort()
outputs = outputs.index_select(0, inverse_indices)
if self.use_lstm:
hidden = (hidden[0].index_select(1, inverse_indices),
hidden[1].index_select(1, inverse_indices))
else:
hidden = hidden.index_select(1, inverse_indices)
# outputs: [batch, seq_len, hidden_size * num_directions]
# hidden: [num_layers * num_directions, batch, hidden_size]
return outputs, hidden
def step(self, encoder_hidden, hidden):
batch_size = encoder_hidden.size(0)
# encoder_hidden: [1, batch_size, hidden_size]
pdb.set_trace()
encoder_hidden = torch.unsqueeze(encoder_hidden, 1)
if hidden is None:
hidden = self.init_h(batch_size, hidden=None)
self.rnn.flatten_parameters()
outputs, hidden = self.rnn(encoder_hidden, hidden)
return outputs, hidden
| [
"1052231507@qq.com"
] | 1052231507@qq.com |
5d10a9d692575a05db729af626128d544ad5b613 | 13141a522f31660b2b90d949b56eb02acb5580b7 | /动态规划/064 Minimum Path Sum.py | 24b9e2be3bf06b278a323bd0ea7618c6e087121b | [] | no_license | Busc/LeetCode-learning | a2341fd00d1e4fb4e41d701f1c6385e88f6a8539 | 4a32734908b2efe0577e11807981e0c37b0dd8d4 | refs/heads/master | 2020-12-02T07:48:45.817641 | 2018-04-03T08:24:48 | 2018-04-03T08:24:48 | 96,728,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | '''
Array -- Medium
Given a m x n grid filled with non-negative numbers, find a path from top left to bottom right which minimizes the sum of all numbers along its path.
Note: You can only move either down or right at any point in time.
'''
class Solution(object):
def minPathSum(self, grid):
"""
:param grid:List[List[int]]
:return: int
"""
rows = len(grid)
cols = len(grid[0])
# one-dimensional
dp = [0 for __ in range(cols)]
dp[0] = grid[0][0]
for j in range(1, cols):
dp[j] = dp[j-1]+grid[0][j]
for i in range(1, rows):
dp[0] += grid[i][0]
for j in range(1, cols):
dp[j] = min(dp[j], dp[j-1])+grid[i][j]
return dp[-1]
if __name__ == "__main__":
# assert Solution().minPathSum([
# [1, 2, 4],
# [2, 4, 1],
# [3, 2, 1]]) == 9
print(Solution().minPathSum([[1, 2, 4], [2, 4, 1], [3, 2, 1]])) | [
"762521772@qq.com"
] | 762521772@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.