text stringlengths 8 6.05M |
|---|
# -*- coding: utf-8 -*-
import scrapy
import json
import random, time
from loguru import logger
from scrapy.utils.project import get_project_settings
from KuaiShou.items import KuxuanKolUserItem
class KuxuanKolUserSpider(scrapy.Spider):
"""
这是一个根据酷炫KOL列表接口获取seeds,并以快手的user_id为切入点,补全相关作者的基本信息,构建KOL种子库的爬虫工程
"""
name = 'kuxuan_kol_user'
custom_settings = {'ITEM_PIPELINES': {
'KuaiShou.pipelines.KuaishouKafkaPipeline': 701,
'KuaiShou.pipelines.KuaishouUserSeedsMySQLPipeline': 700
}}
settings = get_project_settings()
# allowed_domains = ['dataapi.kuxuan-inc.com']
sort_type = settings.get('SPIDER_KUXUAN_SORT_TYPE')
start_urls = ['http://dataapi.kuxuan-inc.com/api/kwaiUser/index?sort_type={}&page=6792'.format(sort_type)]
def parse(self, response):
rsp_json = json.loads(response.text)
logger.info(rsp_json)
if rsp_json['errno'] != '0':
logger.error('API response error: %s' % response.text)
return
current_page_num = int(rsp_json['rst']['pageInfo']['page'])
page_limit = self.settings.get('SPIDER_KUXUAN_PAGE_LIMIT')
if page_limit <= 0:
page_limit = int(rsp_json['rst']['pageInfo']['pages'])
if current_page_num < page_limit:
try:
time.sleep(random.randint(3, 7))
page_url = 'http://dataapi.kuxuan-inc.com/api/kwaiUser/index?sort_type={}&page={}'.format(
self.sort_type,
current_page_num + 1)
logger.info('Request page url: %s' % page_url)
yield scrapy.Request(page_url, callback=self.parse, dont_filter=True)
except Exception as e:
logger.error('scrapy.Request.errback: %s' % e)
data = rsp_json['rst']['data']
for user_dict in data:
kuxuan_kol_user_item = KuxuanKolUserItem()
kuxuan_kol_user_item['spider_name'] = self.name
for key, value in user_dict.items():
if key == 'user_id':key='userId'
kuxuan_kol_user_item[key] = value
yield kuxuan_kol_user_item
|
import json
import os
from os import path
from absl import app
from absl import flags
import jax
import numpy as np
from PIL import Image
FLAGS = flags.FLAGS
flags.DEFINE_string('blenderdir', None,
'Base directory for all Blender data.')
flags.DEFINE_string('outdir', None,
'Where to save multiscale data.')
flags.DEFINE_integer('n_down', 4,
'How many levels of downscaling to use.')
jax.config.parse_flags_with_absl()
def load_renderings(data_dir, split):
"""Load images and metadata from disk."""
f = 'transforms_{}.json'.format(split)
with open(path.join(data_dir, f), 'r') as fp:
meta = json.load(fp)
images = []
cams = []
print('Loading imgs')
for frame in meta['frames']:
fname = os.path.join(data_dir, frame['file_path'] + '.png')
with open(fname, 'rb') as imgin:
image = np.array(Image.open(imgin), dtype=np.float32) / 255.
cams.append(frame['transform_matrix'])
images.append(image)
ret = {}
ret['images'] = np.stack(images, axis=0)
print('Loaded all images, shape is', ret['images'].shape)
ret['camtoworlds'] = np.stack(cams, axis=0)
w = ret['images'].shape[2]
camera_angle_x = float(meta['camera_angle_x'])
ret['focal'] = .5 * w / np.tan(.5 * camera_angle_x)
return ret
def down2(img):
sh = img.shape
return np.mean(np.reshape(img, [sh[0] // 2, 2, sh[1] // 2, 2, -1]), (1, 3))
def convert_to_nerfdata(basedir, newdir, n_down):
"""Convert Blender data to multiscale."""
if not os.path.exists(newdir):
os.makedirs(newdir)
splits = ['train', 'val', 'test']
bigmeta = {}
# Foreach split in the dataset
for split in splits:
print('Split', split)
# Load everything
data = load_renderings(basedir, split)
# Save out all the images
imgdir = 'images_{}'.format(split)
os.makedirs(os.path.join(newdir, imgdir), exist_ok=True)
fnames = []
widths = []
heights = []
focals = []
cam2worlds = []
lossmults = []
labels = []
nears, fars = [], []
f = data['focal']
print('Saving images')
for i, img in enumerate(data['images']):
for j in range(n_down):
fname = '{}/{:03d}_d{}.png'.format(imgdir, i, j)
fnames.append(fname)
fname = os.path.join(newdir, fname)
with open(fname, 'wb') as imgout:
img8 = Image.fromarray(np.uint8(img * 255))
img8.save(imgout)
widths.append(img.shape[1])
heights.append(img.shape[0])
focals.append(f / 2**j)
cam2worlds.append(data['camtoworlds'][i].tolist())
lossmults.append(4.**j)
labels.append(j)
nears.append(2.)
fars.append(6.)
img = down2(img)
# Create metadata
meta = {}
meta['file_path'] = fnames
meta['cam2world'] = cam2worlds
meta['width'] = widths
meta['height'] = heights
meta['focal'] = focals
meta['label'] = labels
meta['near'] = nears
meta['far'] = fars
meta['lossmult'] = lossmults
fx = np.array(focals)
fy = np.array(focals)
cx = np.array(meta['width']) * .5
cy = np.array(meta['height']) * .5
arr0 = np.zeros_like(cx)
arr1 = np.ones_like(cx)
k_inv = np.array([
[arr1 / fx, arr0, -cx / fx],
[arr0, -arr1 / fy, cy / fy],
[arr0, arr0, -arr1],
])
k_inv = np.moveaxis(k_inv, -1, 0)
meta['pix2cam'] = k_inv.tolist()
bigmeta[split] = meta
for k in bigmeta:
for j in bigmeta[k]:
print(k, j, type(bigmeta[k][j]), np.array(bigmeta[k][j]).shape)
jsonfile = os.path.join(newdir, 'metadata.json')
with open(jsonfile, 'w') as f:
json.dump(bigmeta, f, ensure_ascii=False, indent=4)
def main(unused_argv):
blenderdir = FLAGS.blenderdir
outdir = FLAGS.outdir
n_down = FLAGS.n_down
if not os.path.exists(outdir):
os.makedirs(outdir)
dirs = [os.path.join(blenderdir, f) for f in os.listdir(blenderdir)]
dirs = [d for d in dirs if os.path.isdir(d)]
print(dirs)
for basedir in dirs:
print()
newdir = os.path.join(outdir, os.path.basename(basedir))
print('Converting from', basedir, 'to', newdir)
convert_to_nerfdata(basedir, newdir, n_down)
if __name__ == '__main__':
app.run(main)
|
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('Mall_Customers.csv')
X = dataset.iloc[:,3:].values
#elbow Method
from sklearn.cluster import KMeans
wcss = []
for i in range(1,11,1):
kmeans = KMeans(n_clusters=i,init='k-means++',random_state=42)
kmeans.fit(X)
wcss.append(kmeans.inertia_)
plt.plot(range(1,11),wcss)
plt.title('Plot WCSS for n clusters')
plt.xlabel('N Cluster')
plt.ylabel('WCSS')
plt.show()
kmeans = KMeans(n_clusters=5, init='k-means++', random_state=42)
y_kmeans = kmeans.fit_predict(X)
print(y_kmeans)
#visualise
plt.scatter(X[y_kmeans == 0,0],X[y_kmeans == 0,1], s=100, c='red',label='Cluster 0')
plt.scatter(X[y_kmeans == 1,0],X[y_kmeans == 1,1], s=100, c='blue',label='Cluster 1')
plt.scatter(X[y_kmeans == 2,0],X[y_kmeans == 2,1], s=100, c='green',label='Cluster 2')
plt.scatter(X[y_kmeans == 3,0],X[y_kmeans == 3,1], s=100, c='black',label='Cluster 3')
plt.scatter(X[y_kmeans == 4,0],X[y_kmeans == 4,1], s=100, c='magenta',label='Cluster 3')
plt.scatter(kmeans.cluster_centers_[:,0],kmeans.cluster_centers_[:,1], s=300, c='yellow', label='centroids')
plt.title('KMeans Clustering using Elbow Method')
plt.xlabel('Annual Income')
plt.ylabel('Spending Score')
plt.legend()
plt.show() |
"""
MIT License
Copyright (c) 2018 Max Planck Institute of Molecular Physiology
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import typing
import re
import numpy as np # type: ignore
import pandas as pd # type: ignore
from . import util
def get_ctffind_4_1_0_header_names() -> typing.List[str]:
"""
Returns the header names for the ctffind4 input file.
Arguments:
None
Returns:
List of names
"""
return [
'DefocusU',
'DefocusV',
'DefocusAngle',
'PhaseShift',
'CtfFigureOfMerit',
'CtfMaxResolution',
]
def get_ctffind_4_1_0_extract_dict() -> typing.Dict[str, str]:
"""
Returns the extraction dict for the ctffind4 meta information.
Arguments:
None
Returns:
Dictionary with key as key and regular expression as value.
"""
return {
'version': r'.*CTFFind version ([^, ]*).*',
'MicrographNameNoDW': r'.*Input file: ([^ ]*).*',
'PixelSize': r'.*Pixel size: ([^ ]*).*',
'Voltage': r'.*acceleration voltage: ([^ ]*).*',
'SphericalAberration': r'.*spherical aberration: ([^ ]*).*',
'AmplitudeContrast': r'.*amplitude contrast: ([^ ]*).*',
}
def get_ctffind_4_1_0_meta(file_name: str) -> pd.DataFrame:
"""
Import the ctffind information used.
Arguments:
file_name - Name of the file to export the information from.
Returns:
Pandas data frame containing the information.
"""
extract_dict: typing.Dict[str, str]
ctffind_meta_data: pd.DataFrame
lines: typing.List[str]
match: typing.Optional[typing.Match[str]]
non_string_values: typing.Set[str]
extract_dict = get_ctffind_4_1_0_extract_dict()
ctffind_meta_data = pd.DataFrame(index=[0], columns=extract_dict.keys())
with open(file_name, 'r') as read:
lines = read.readlines()
non_string_values = set([
'MicrographNameNoDW',
'version'
])
for line in lines:
for key, value in extract_dict.items():
match = re.match(value, line)
if match is not None:
try:
ctffind_meta_data[key] = float(match.group(1))
except ValueError:
assert key in non_string_values, f'{key}: {match.group(1)}'
ctffind_meta_data[key] = match.group(1)
else:
pass
return ctffind_meta_data
def load_ctffind_4_1_0(file_name: str) -> pd.DataFrame:
"""
Load a ctffind file.
Arguments:
file_name - Path to the ctffind file
Returns:
Pandas dataframe containing the ctffind file information
"""
header_names: typing.List[str]
ctffind_data: pd.DataFrame
ctffind_meta: pd.DataFrame
header_names = get_ctffind_4_1_0_header_names()
ctffind_data = util.load_file(
file_name,
names=header_names,
skiprows=5,
usecols=(1, 2, 3, 4, 5, 6)
)
ctffind_data['PhaseShift'] = np.degrees(ctffind_data['PhaseShift'])
ctffind_meta = get_ctffind_4_1_0_meta(file_name=file_name)
return pd.concat([ctffind_data, ctffind_meta], axis=1)
def load_ctffind(
file_name: str,
version: typing.Optional[str]=None
) -> pd.DataFrame:
"""
Create a cter partres file based on the cter_data information.
By default, the latest cter version is assumed.
Arguments:
file_name - Path to the output partres file.
version - Cter version default the latest version
Returns:
Pandas dataframe containing the ctffind file information
"""
function_dict: typing.Dict[
str,
typing.Callable[
[str],
pd.DataFrame
]
]
function: typing.Callable[[str], pd.DataFrame]
function_dict = {
'4.1.0': load_ctffind_4_1_0,
}
function = util.extract_function_from_function_dict(function_dict, version)
return function(file_name)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-12-19 13:30
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('course', '0004_ranking-size'),
]
operations = [
migrations.CreateModel(
name='ClassInstructor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('course_class', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='course.CourseClass')),
],
),
migrations.CreateModel(
name='Instructor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(blank=True, max_length=200)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='classinstructor',
name='instructor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='course.Instructor'),
),
migrations.AlterUniqueTogether(
name='classinstructor',
unique_together=set([('instructor', 'course_class')]),
),
]
|
"""
Proxy models
These models exist so that we can present
more than one view of the same DB table in /admin
"""
from datetime import datetime
from django.contrib.gis.db.models import Q
from django.db.models import Manager
from .divisions import OrganisationDivision
from .organisations import Organisation, OrganisationGeography
INVALID_SOURCES = ("unknown", "lgbce", "")
class DivisionProblemManager(Manager):
def get_queryset(self):
qs = super().get_queryset()
# some of these conditions are OK in forward-dated division(set)s
# they only become an issue once they are current/past
# so we'll ignore DivisionSets with a future start_date in this report
qs = qs.filter(divisionset__start_date__lte=datetime.today())
return qs.filter(
(
# we always want divisions to have
# an associated geography record
Q(geography=None)
)
| (
# if the division has a GSS code,
# the boundary source should be BoundaryLine/OSNI
Q(geography__source__in=INVALID_SOURCES)
& Q(official_identifier__startswith="gss:")
)
| (
# once a division is current (or past)
# it should have a GSS code
# ... mostly
~Q(official_identifier__startswith="gss:")
& ~Q(division_type="CED")
& ~Q(division_type="NIE")
)
)
class DivisionProblem(OrganisationDivision):
objects = DivisionProblemManager()
@property
def no_gss_code(self):
return self.official_identifier[:4] != "gss:"
@property
def invalid_source(self):
try:
return self.geography.source in INVALID_SOURCES
except OrganisationDivision.geography.RelatedObjectDoesNotExist:
return True
@property
def no_geography(self):
try:
return not self.geography
except OrganisationDivision.geography.RelatedObjectDoesNotExist:
return True
@property
def problem_text(self):
if self.no_geography:
return "No associated DivisionGeography"
if self.no_gss_code:
return "No GSS code"
if self.invalid_source:
return "Boundary source is invalid"
return ""
class Meta:
verbose_name_plural = "⚠️ Division Geography Problems"
proxy = True
class OrganisationProblemManager(Manager):
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(
(
# we always want Organisations to have at least
# one related OrganisationGeography record
Q(geographies=None)
)
| (
# usually if an Organisation has no related
# DivsionSet records this is a problem
Q(divisionset=None)
&
# although (as always), there are some exceptions to this..
~Q(organisation_type="combined-authority")
& ~Q(organisation_type="police-area")
& ~(
Q(organisation_type="local-authority")
& Q(official_name="Greater London Authority")
)
)
| (
# we always want Organisations to have at least
# one related ElectedRole record
Q(electedrole=None)
)
)
class OrganisationProblem(Organisation):
objects = OrganisationProblemManager()
@property
def no_geography(self):
return len(self.geographies.all()) == 0
@property
def no_divisionset(self):
return len(self.divisionset.all()) == 0
@property
def no_electedrole(self):
return len(self.electedrole.all()) == 0
@property
def problem_text(self):
if self.no_geography:
return "No associated OrganisationGeography"
if self.no_divisionset:
return "No associated DivisionSet"
if self.no_electedrole:
return "No associated ElectedRole"
return ""
class Meta:
verbose_name_plural = "⚠️ Organisation Problems"
proxy = True
class OrganisationGeographyProblemManager(Manager):
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(
(
# OrganisationGeographies should have a GSS code...mostly
Q(gss="")
& ~Q(organisation__organisation_type="police-area")
)
| (
# OrganisationGeography with NULL
# geography field is always a problem
Q(geography=None)
)
| (
# so is OrganisationGeography with
# source != BoundaryLine/OSNI, etc
Q(source__in=INVALID_SOURCES)
)
)
class OrganisationGeographyProblem(OrganisationGeography):
objects = OrganisationGeographyProblemManager()
@property
def no_gss_code(self):
return self.gss == ""
@property
def no_geography(self):
return not self.geography
@property
def invalid_source(self):
return self.source in INVALID_SOURCES
@property
def problem_text(self):
if self.no_geography:
return "Geography field is NULL"
if self.invalid_source:
return "Boundary source is invalid"
if self.no_gss_code:
return "No GSS code"
return ""
class Meta:
verbose_name_plural = "⚠️ Organisation Geography Problems"
proxy = True
|
import matplotlib.pyplot as plt
import numpy as np
def plot_figures(fpr, tpr, history, auc, roc_fn, loss_fn, accuracy_fn):
lw = 2
dpi = 150
plt.figure()
plt.plot(fpr, tpr, lw=lw, label="ROC curve (area = {:0.2f})".format(auc), color='darkorange')
plt.plot([0, 1], [0, 1], label='random guessing', lw=lw, linestyle='--', color='navy')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve')
plt.legend(loc="lower right")
plt.savefig(roc_fn, dpi=dpi)
plt.figure()
plt.plot(history.history['loss'], lw=lw)
plt.plot(history.history['val_loss'], lw=lw)
plt.title('model loss')
plt.ylim([0.0, 1.05])
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig(loss_fn, dpi=dpi)
plt.figure()
plt.plot(history.history['acc'], lw=lw)
plt.plot(history.history['val_acc'], lw=lw)
plt.ylim([0.0, 1.05])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig(accuracy_fn, dpi=dpi)
def load_norm_data():
train_data = np.load("pcam_train_data.npz")
valid_data = np.load("pcam_valid_data.npz")
test_data = np.load("pcam_test_data.npz")
# normalization
x_train = train_data['x_train'].astype('float32')
x_train /= 255
x_valid = valid_data['x_valid'].astype('float32')
x_valid /= 255
x_test = test_data['x_test'].astype('float32')
x_test /= 255
y_train = train_data['y_train']
y_valid = valid_data['y_valid']
y_test = test_data['y_test']
return(x_train, x_valid, x_test, y_train, y_valid, y_test)
|
import os
import sys
sys.path.insert(0, 'tools/families')
sys.path.insert(0, 'tools/trees')
import find_neighbors_to_fam
import fam
import read_tree
import rf_distance
import prune
def get_induced_gene_tree(datadir, family, method, subst_model, leaf_set):
gene_tree_path = fam.get_gene_tree(datadir, subst_model, family, method)
gene_tree = read_tree.read_tree(gene_tree_path)
#gene_tree.prune(leaf_set)
return prune.fast_prune(gene_tree, leaf_set)
#return gene_tree
def get_translator(neighbors):
translator = {}
for n in neighbors:
translator[n[0]] = n[1]
return translator
def get_filtered_neighbors(neighbors):
labels = set()
dup_labels = set()
for n in neighbors:
if (n[1] in labels):
dup_labels.add(n[1])
labels.add(n[1])
res = set()
for n in neighbors:
if (not n[1] in dup_labels):
res.add(n)
return res
def assess_gene_tree(emf, datadir, method, subst_model, families):
if (len(families) == 1 and families[0] == "all"):
families = fam.get_families_list(datadir)
per_family_hom_neighbors = find_neighbors_to_fam.get_per_family_homolog_neighbors(emf, datadir, families)
total = 0
for family in families:
hom_neighbors = per_family_hom_neighbors[family]
for neighbor_family in hom_neighbors:
if (neighbor_family == family):
continue
neighbors = hom_neighbors[neighbor_family]
neighbors = get_filtered_neighbors(neighbors)
if (len(neighbors) < 4):
continue
leaf_set1 = set()
leaf_set2 = set()
for neighbor in neighbors:
leaf_set1.add(neighbor[0])
leaf_set2.add(neighbor[1])
tree1 = get_induced_gene_tree(datadir, family, method, subst_model, leaf_set1)
tree2 = get_induced_gene_tree(datadir, neighbor_family, method, subst_model, leaf_set2)
translator = get_translator(neighbors)
try:
for leaf in tree1.get_leaves():
leaf.name = translator[leaf.name]
except:
print("ERROR in family " + family + " and neighbor_family " + neighbor_family)
print("Tree1 : " + tree1.write())
print("Tree1 2 " + tree2.write())
continue
distance_cell = rf_distance.ete3_rf(tree1, tree2)
total += distance_cell[0]
print("Total: " + str(total))
if (__name__ == "__main__"):
if (len(sys.argv) < 6):
print("Syntax python " + os.path.basename(__file__) + " emf datadir method subst_model families")
emf = sys.argv[1]
datadir = sys.argv[2]
method = sys.argv[3]
subst_model = sys.argv[4]
families = sys.argv[5:]
assess_gene_tree(emf, datadir, method, subst_model, families)
|
from _typeshed import Incomplete
from collections.abc import Generator
def triadic_census(G, nodelist: Incomplete | None = None): ...
def is_triad(G): ...
def all_triplets(G): ...
def all_triads(G) -> Generator[Incomplete, None, None]: ...
def triads_by_type(G): ...
def triad_type(G): ...
def random_triad(G, seed: Incomplete | None = None): ...
|
# import sys
# sys.path.insert(0,'..')
# sys.path.insert(1,'../n1_local_image_descriptors')
# import sift
import imtools
from n1_local_image_descriptors import sift
from numpy.ma import log
from scipy.cluster.vq import *
from numpy import *
import pickle
class Vocabulary(object):
def __init__(self, name):
self.name = name
self.voc = [] # a list of word cluster centers
self.idf = [] # inverse document frequency for each word
self.trainingdata = []
self.nbr_words = 0
def train(self, featurefiles, k=100, subsampling=10):
""" Train a vocabulary from features in files listed
in featurefiles using k-means with k number of words.
Subsampling of training data can be used for speedup. """
nbr_images = len(featurefiles)
# read the features from file
descr = []
descr.append(sift.read_features_from_file(featurefiles[0])[1])
descriptors = descr[0] # stack all features for k-means
for i in arange(1, nbr_images):
descr.append(sift.read_features_from_file(featurefiles[i])[1])
descriptors = vstack((descriptors, descr[i]))
print(i)
# k-means: last number determines number of runs
self.voc, distortion = kmeans(descriptors[::subsampling, :], k, 1)
self.nbr_words = self.voc.shape[0]
# go through all training images and project on vocabulary
imwords = zeros((nbr_images, self.nbr_words))
for i in range(nbr_images):
imwords[i] = self.project(descr[i])
nbr_occurences = sum((imwords > 0) * 1, axis=0)
self.idf = log((1.0 * nbr_images) / (1.0 * nbr_occurences + 1))
self.trainingdata = featurefiles
def project(self, descriptors):
""" Project descriptors on the vocabulary
to create a histogram of words. """
# histogram of image words
imhist = zeros((self.nbr_words))
words, distance = vq(descriptors, self.voc)
for w in words:
imhist[w] += 1
return imhist
def create_vocabulary():
imlist = imtools.get_imlist('images')
nbr_images = len(imlist)
featlist = [imlist[i][:-3] + 'sift' for i in range(nbr_images)]
voc = Vocabulary('ukbenchtest')
voc.train(featlist, 1000, 10)
# saving vocabulary
with open('vocabulary.pkl', 'wb') as f:
pickle.dump(voc, f)
print('vocabulary is:', voc.name, voc.nbr_words)
# create_vocabulary()
|
import smtplib
import pandas as pd
import numpy as np
import pyrebase
import os
'''config = {
"apiKey": "AIzaSyAXtE0fQeJSN8r1Omtyx5vTlsdyYrF9XpE",
"authDomain": "tympass-32736.firebaseapp.com",
"databaseURL" : "https://tympass-32736.firebaseio.com",
"projectId": "tympass-32736",
"storageBucket": "tympass-32736.appspot.com",
"messagingSenderId": "990276104410",
"appId": "1:990276104410:web:a6d956ded09fc3c958b5e3",
"measurementId": "G-7HF9TQ5QC1"
}
firebase = pyrebase.initialize_app(config)
storage = firebase.storage()
path_on_cloud = "data/demo.xlsx"
#path_local=r'D:\lol\demo.xlsx';
#storage.child(path_on_cloud).put(path_local)
d = os.getcwd()
os.chdir(d)
storage.child(path_on_cloud).download("new.xlsx")'''
d=r'D:\lol'
os.chdir(d)
df = pd.read_excel('demo.xlsx')
name=input("Enter your name - ")
x=[]
x=df[df['Name']==name]['email'].tolist()
k=x[0]
print(k)
y=[]
y=df[df['Name']==name]['P/A'].tolist()
j=y[0]
p=0
if(j=='Present'):
p=p+1
#os.remove("demo.xlsx")
# creates SMTP session
s = smtplib.SMTP_SSL('smtp.gmail.com', 465)
# start TLS for security
#s.starttls()
# Authentication
s.login("tracksmartattendance@gmail.com", "12345678a@")
# message to be sent
message = "You have been marked as present today."
message1 = "You have been marked as absent today."
# sending the mail
print(p)
if(p==1):
s.sendmail("tracksmartattendance@gmail.com", k, message)
else:
s.sendmail("tracksmartattendance@gmail.com", k, message1)
# terminating the session
s.quit()
|
"""
Parse QoS statistics such as throughput and jitter from Spirent traffic measurement,
apply criteria, and report testing result
input: 1. a list of remotes: e.g. remote_list = ['e8350', 'x1', 'x3', 'x7', 'x5-A', 'x5-B']
2. a list of priority ("default" or "not_default"):
"default" -- all applications running in the remote are of equal priority
"not_default" -- all applications running in the remote are of equal priority
e.g. priority_list = ['default', 'not_default', 'default', 'default', 'default', 'not_default']
3. statistics file: a csv file
4. criteria file: a csv file
4. criteria file: a csv file
5. test case: an integer indicating which test case
Output: a dictionary, e.g
(1) Test passes
{
'Grade' : 'Pass',
'Num_of_VRMT_Pass' : 18,
'Num_of_VRMT" : 18,
'Info_of_Fail" : []
}
(2) Test fails
{
'Grade' : 'Fail',
'Num_of_VRMT_Pass' : 17,
'Num_of_VRMT" : 18,
'Info_of_Fail" : [{'expected': 45000, 'actual': 65.4, 'Name':'x3_Voip_Jitter']
}
which means Voip application in remote x3 fails due to high jitter (65.4>45000)
Usage:
1. Create an object : obj = processQosResult()
2. Do setup : obj.setup(remote_list, priority_list, "statistics.csv", "criteria.csv", 1)
3. Parse statistics : obj.parseQosStats()
4. Report test result: report = obj.getQosReport() where the output is a dictionary
For each test case, 2, 3, and 4 is repeated
"""
import os
import pandas as pd
import numpy as np
from robot.libraries.BuiltIn import BuiltIn
from robot.errors import ExecutionFailed
class QoS_Spirent_Parse:
"""
Configure testing parameters:
- Arguments
- remote_list : list of remotes
- priority_list : list of settings ("default" or "not_default") regarding priority equality
- qos_stats_file_name : file of Spirent traffic statistics
- qoa_criteria_file_name: file of pass/fail criteria
- test_case : integer number starting with 1 to indicate which test case
- Return
None
"""
def setup(self, remote_list, priority_list, qos_stats_file_name, qos_criteria_file_name, test_case):
self.remote_list = remote_list
self.priority_list = priority_list
self.qos_stats_file_name = qos_stats_file_name
self.qos_criteria_file_name = qos_criteria_file_name
self.test_case = int(test_case)
self.objerr = None
self.grade = dict()
self.stat_list = ["AvgLatency", "AvgJitter", "BitRate", "DroppedFramePercentRate"]
"""
Calculate total throughput in one remote
- Arguments
- remote : remote
- application: list of applications running in one remote
- Return
Throughput
"""
def sumThroughput(self, remote, applications):
total = 0
for app in applications:
column_heading = remote + "_" + app + "_" + "BitRate"
org = self.df_stat[column_heading].tolist()
data = ['0' if x == 'N/A' else x for x in org]
total += np.mean([float(i) for i in data])
return total
"""
Apply pass/fail criteria to QoS statistics
- Arguments
- measurement: QoS statistics value
- criteria: pass/fail criteria
- app: application
- stat: QoS statistics name
- test_case: test case integer
- priority: "default" or "not_default"
- Return
True : if the criteria is met
Fasle: otherwise
"""
def applyCriteria(self, measurement, criteria, app, stat, test_case, priority,column_heading):
passCriteria = False;
test_case = int(test_case)
if test_case == 1 or test_case == 2:
if stat == "BitRate":
self.verify_should_be_true(str(measurement) + ">=" + str(criteria), str(column_heading) + " value "+ str(measurement) + " is not greater than or equal to criteria " + str(criteria))
if measurement >= criteria:
passCriteria = True
elif stat == "AvgJitter":
if self.MIR_is_capped: # capped with MIR
if app != "VOIP":
passCriteria = True
else:
self.verify_should_be_true(str(measurement) + "<=" + str(criteria), str(column_heading) + " value " +
str(measurement) + " is not lesser than or equal to criteria "
+ str(criteria) + " when VOIP MIR is capped")
if measurement <= criteria:
passCriteria = True
else:
# not capped with MIR
self.verify_should_be_true(str(measurement) + "<=" + str(criteria), str(column_heading) + " value " + str(measurement) + " is not lesser than or equal to criteria " + str(criteria) + " when VOIP MIR is not capped")
if measurement <= criteria:
passCriteria = True
elif stat == "AvgLatency":
passCriteria = True
elif stat == "DroppedFramePercentRate":
if self.MIR_is_capped:
passCriteria = True
else:
criteria = 0
self.verify_should_be_true(str(measurement) + "==" + str(criteria), str(column_heading) + " value " +
str(measurement) + " is not equal to criteria " + str(criteria))
if measurement == criteria: # no packet loss
passCriteria = True
elif test_case == 3 or test_case == 4:
if stat == "BitRate":
self.verify_should_be_true(str(measurement) + ">=" + str(criteria), str(column_heading) + " value " +
str(measurement) + " is not greater than or equal to criteria" +
str(criteria))
if measurement >= criteria:
passCriteria = True
elif stat == "AvgJitter":
if priority == "default": # each appliction has the same priority
if self.MIR_is_capped: # remote capped with MIR
if app != "VOIP": # application: Data or Default
passCriteria = True
else: # application: Voip
self.verify_should_be_true(str(measurement) + "<=" + str(criteria), str(column_heading) + " value " + str(measurement)
+ " is not lesser than or equal to criteria " + str(criteria)
+ " when VOIP MIR is capped for " + str(priority) + " priority")
if measurement <= criteria:
passCriteria = True
else: # remote not capped with MIR
self.verify_should_be_true(str(measurement) + "<=" + str(criteria), str(column_heading) + " value " + str(measurement)
+ " is not lesser than or equal to criteria " + str(criteria)
+ " when VOIP MIR is not capped for " + str(priority) + " priority")
if measurement <= criteria:
passCriteria = True
else:
self.verify_should_be_true(str(measurement) + "<=" + str(criteria), str(column_heading) + " value " + str(measurement)
+ " is not lesser than or equal to criteria " + str(criteria)
+ " for " + str(priority) + " priority")
if measurement <= criteria:
if app == "VOIP": # Voip with the highest priority
condition = np.allclose(measurement, np.amin(self.priority_dict))
self.verify_should_be_true(condition, "Voip with the highest priority")
if condition:
passCriteria = True
elif app == "DEFAULT": # Default with the lowest priority
condition = np.allclose(measurement, np.amax(self.priority_dict))
self.verify_should_be_true(condition, "Default with the lowest priority")
if condition:
passCriteria = True
elif app == "DATA":
# Data with the middel priority
condition = measurement >= np.amin(self.priority_dict) and measurement <= np.amax(self.priority_dict)
self.verify_should_be_true(condition, " Data with the middel priority")
if condition:
passCriteria = True
else: # == "AvgLatency" or "DroppedFramePercentRate"
passCriteria = True
return passCriteria
"""
Parse Qos statistics
- Arguments
None
- Return
True : if parsing is successful
False: otherwise
"""
def parseQosStats(self):
# read qos statistics, and qos pass/fail criteria
BuiltIn().should_be_true(os.path.isfile(self.qos_stats_file_name) , "File not Found")
BuiltIn().should_be_true(os.path.isfile(self.qos_criteria_file_name) , "criteria File not found")
if len(self.remote_list) != len(self.priority_list):
BuiltIn().log("list is not matching",level="ERROR",html=True,console=True)
return False
self.df_stat = pd.read_csv(self.qos_stats_file_name)
self.df_criteria = pd.read_csv(self.qos_criteria_file_name)
app_list = list(["VOIP", "DATA", "DEFAULT"])
stat_list = list(["AvgLatency", "AvgJitter", "BitRate", "DroppedFramePercentRate"])
result_lst = list()
app_lst = list()
# iterate through remote
for i in range(len(self.remote_list)):
remote = self.remote_list[i]
priority = self.priority_list[i]
column_heading = remote + "_" + "MIR"
MIR = self.df_criteria[column_heading].tolist()[self.test_case-1]
MIR_tolerance = self.df_criteria[column_heading+"_"+"Tolerance"].tolist()[self.test_case-1]
self.MIR_is_capped = False
if MIR != "infinite":
self.MIR_is_capped = True
throughput_sum = self.sumThroughput(remote, app_list)
self.priority_dict = list()
if priority == "not_default":
for app in app_list:
index = remote + "_" + app + "_" + "AvgJitter"
org = self.df_stat[index].tolist()
data = ['0' if x == 'N/A' else x for x in org]
self.priority_dict.append(np.mean([float(i) for i in data]))
# iterate through application per remote
for app in app_list: #iterate through qos stats per application per remote
for stat in stat_list:
column_heading = remote + "_" + app + "_" + stat
if stat == "BitRate" and MIR != "infinite":
measure = throughput_sum
criteria = MIR - MIR_tolerance
else:
org = self.df_stat[column_heading].tolist()
data = ['0' if x == 'N/A' else x for x in org]
measure = np.mean([float(i) for i in data])
if stat == "AvgJitter":
criteria = 47000 # tolerance = 2000
elif stat == "DroppedFramePercentRate" or stat == "AvgLatency":
criteria = 0
else:
CIR = self.df_criteria[column_heading].tolist()[self.test_case-1]
CIR_tolerance = self.df_criteria[column_heading+"_"+"Tolerance"].tolist()[self.test_case-1]
criteria = CIR - CIR_tolerance
ret = self.applyCriteria(measure, criteria, app, stat, self.test_case, priority,column_heading)
result_lst.append(ret)
app_lst.append(column_heading)
self.grade["data"] = result_lst
self.grade["apps"] = app_lst
if self.objerr:
err = self.objerr
self.objerr = None
raise err
return True
"""
Report QoS test result as "Pass" or "Fail"
- Arguments
None
"""
def getQosResult(self):
data = self.grade["data"]
stat_number = len(self.stat_list)
apps_number = len(data)/stat_number
pass_number = 0
for i in range(apps_number):
true_number = 0
for j in range(stat_number):
true_number += data[i * stat_number + j]
if int(true_number) == int(stat_number):
pass_number += 1
report = dict()
report["Num_of_VRMT_Pass"] = pass_number
report["Num_of_VRMT"] = apps_number
report["Info_of_Fail"] = list()
indice = [i for i, x in enumerate(self.grade['data']) if x == False]
fail_detail = dict()
for idx in indice:
#report["Info_of_Fail"].append(self.grade['apps'][idx])
#report["Info_of_Fail"].append(self.grade['apps'][idx])
column_heading = self.grade['apps'][idx]
org = self.df_stat[column_heading].tolist()
data = ['0' if x == 'N/A' else x for x in org]
actual_value = np.mean([float(i) for i in data])
if column_heading.find("Jitter") != -1:
expected_value = 47000
elif column_heading.find("Dropped") != -1:
expected_value = 0
else:
expected_value = self.df_criteria[column_heading].tolist()[self.test_case - 1]
fail_detail = { "Expected" : expected_value, "Actual" : actual_value, "Name" : column_heading}
report["Info_of_Fail"].append(fail_detail)
s = ""
if pass_number == apps_number:
report["Grade"] = "Pass"
else:
report["Grade"] = "Fail"
for key in report['Info_of_Fail']:
s+=str(key)+"\n"
# s+=("Expected criteria value :" + str(key['Expected'])+",\tActual value: " +str(key['Actual']) + ",\tRemote :" + str(key['Name']) + "\n" )
BuiltIn().should_be_true(report["Grade"] == "Pass","Consolidated failed - Output stats : \n" + str(s))
return report
def verify_should_be_true(self, condition, msg):
try:
BuiltIn().run_keyword_and_continue_on_failure("should_be_true", condition, "Mismatch Stats Found")
except ExecutionFailed as err:
self.objerr=err
BuiltIn().log(str(msg), level="ERROR", html=True)
|
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import models
class Account(models.Model):
owner = models.ForeignKey(User)
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
def get_absolute_url(self):
return reverse('djofx_account', args=[self.pk, ])
def get_unverified_url(self):
return reverse('djofx_account_unverified', args=[self.pk, ])
def get_auto_categorisation_url(self):
return reverse('djofx_account_autocategorise', args=[self.pk, ])
def earliest_transaction(self):
try:
return self.transaction_set.all().order_by('date')[0].date
except IndexError:
return None
def latest_transaction(self):
try:
return self.transaction_set.all().order_by('-date')[0].date
except IndexError:
return None
def unverified_transactions(self):
return self.transaction_set.filter(category_verified=False)
class TransactionCategory(models.Model):
OUTGOINGS = 'out'
INCOME = 'inc'
INTERNAL_TRANSFER = 'int'
TRANSACTION_TYPES = (
(OUTGOINGS, 'Outgoings'),
(INCOME, 'Income'),
(INTERNAL_TRANSFER, 'Internal Transfer'),
)
owner = models.ForeignKey(User)
name = models.CharField(max_length=100)
category_type = models.CharField(
max_length=3,
choices=TRANSACTION_TYPES,
default=OUTGOINGS
)
def __unicode__(self):
return self.name
def get_absolute_url(self):
return reverse('djofx_category', args=[self.pk, ])
class Meta:
verbose_name_plural = 'Transaction categories'
ordering = ('name', )
class Transaction(models.Model):
account = models.ForeignKey(Account)
transaction_key = models.CharField(max_length=255)
amount = models.DecimalField(max_digits=15, decimal_places=2)
date = models.DateField()
payee = models.CharField(max_length=255)
transaction_type = models.CharField(max_length=255)
transaction_category = models.ForeignKey(
TransactionCategory,
blank=True,
null=True
)
category_verified = models.BooleanField(default=False)
def absolute_amount(self):
return self.amount.copy_abs()
def get_categorisation_url(self):
return reverse('djofx_categorise', args=[self.pk, ])
class Meta:
unique_together = ('account', 'transaction_key')
ordering = ('date', )
|
from genetic_algorithm import GA
from experiments.plots import plot_param_evolution
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patheffects as pe
import matplotlib
def main():
"""
This function runs variation 3.
In variation 3, the population of individuals is generated with randomly assigned strategies x (different for every
agent). Furthermore, all players are equally non-egoistic, i.e. 1/2 <= alpha < 1.
We let the strategies x evolve
"""
font = {'family': 'normal', 'size': 15}
matplotlib.rc('font', **font)
population = 200
iterations = 250
k = 0.5
m = 0.5
mu = 0.1
alpha = 0.8
mutable_params = ['x']
x = None
genetic_algo = GA(n_population=population, m_iterations=iterations,
k=k, m=m, mu=mu,
x_init=x, alpha_init=alpha,
mutable_parameters=mutable_params)
alpha = 0.75 if alpha is None else alpha
summary, _ = genetic_algo.run()
print(np.mean(summary['x'][-1, :]))
ax = plot_param_evolution(summary['x'])
# Plot theoretical value (equation 12) -> Maximizes player success.
xlim = ax.get_xlim()
x_tilde = np.linspace(xlim[0], xlim[1], 100)
y_tilde = (alpha * m * (2 * alpha + k)) / (4 * alpha * alpha - k ** 2) * np.ones_like(x_tilde)
plt.semilogy(x_tilde, y_tilde, color='tab:blue', path_effects=[pe.Stroke(linewidth=5, foreground='w'), pe.Normal()],
lw=2, label='Theoretical value', alpha=0.7)
ax.set_xlim(xlim)
ax.set_ylabel('log(x) histogram')
ax.legend()
ax.set_title('Altruistic equilibrium')
ax = plot_param_evolution(summary['alpha'], logy=False)
plt.show()
if __name__ == '__main__':
main()
|
# web address for exercise: https://repl.it/@appbrewery/day-4-3-exercise
# 🚨 Don't change the code below 👇
row1 = ["⬜️","⬜️","⬜️"]
row2 = ["⬜️","⬜️","⬜️"]
row3 = ["⬜️","⬜️","⬜️"]
map = [row1, row2, row3]
print(f"{row1}\n{row2}\n{row3}")
position = input("Where do you want to put the treasure?\n")
# 🚨 Don't change the code above 👆
#Write your code below this row 👇
firstPosition = int(position[0]) - 1
secondPosition = int(position[1]) - 1
if firstPosition == 0:
if secondPosition == 0:
row1.insert(0, "X")
elif secondPosition == 1:
row1.insert(1, "X")
elif secondPosition == 2:
row1.insert(2, "X")
elif firstPosition == 1:
if secondPosition == 0:
row2.insert(0, "X")
elif secondPosition == 1:
row2.insert(1, "X")
elif secondPosition == 2:
row2.insert(2, "X")
elif firstPosition == 2:
if secondPosition == 0:
row3.insert(0, "X")
elif secondPosition == 1:
row3.insert(1, "X")
elif secondPosition == 2:
row3.insert(2, "X")
row1 = row1[:3]
row2 = row2[:3]
row3 = row3[:3]
#Write your code above this row 👆
# 🚨 Don't change the code below 👇
print(f"{row1}\n{row2}\n{row3}")
#####################
# Angela's Solution:
# row1 = ["⬜️","️⬜️","️⬜️"]
# row2 = ["⬜️","⬜️","️⬜️"]
# row3 = ["⬜️️","⬜️️","⬜️️"]
# map = [row1, row2, row3]
# print(f"{row1}\n{row2}\n{row3}")
# position = input("Where do you want to put the treasure? ")
# horizontal = int(position[0])
# vertical = int(position[1])
# map[vertical - 1][horizontal - 1] = "X"
# print(f"{row1}\n{row2}\n{row3}") |
default_app_config = 'gim.front.apps.FrontConfig'
|
from rest_framework.viewsets import ModelViewSet
from .mixins import HistoryModelMixin
class HistoryModelViewSet(HistoryModelMixin, ModelViewSet):
pass
|
# -*- coding: utf-8 -*-
from datetime import date
from unittest import TestCase
import six
from .helpers import example_file
from popolo_data.importer import Popolo
class TestOrganizations(TestCase):
def test_empty_file_gives_no_organizations(self):
with example_file(b'{}') as filename:
popolo = Popolo.from_filename(filename)
assert len(popolo.organizations) == 0
def test_single_organization_name(self):
with example_file(
b'''
{
"organizations": [{"name": "Starfleet"}]
}
''') as fname:
popolo = Popolo.from_filename(fname)
assert len(popolo.organizations) == 1
o = popolo.organizations[0]
assert o.name == 'Starfleet'
def test_wikidata_property_and_id(self):
with example_file(
b'''
{
"organizations": [
{
"id": "starfleet",
"name": "Starfleet",
"identifiers": [
{
"identifier": "Q288523",
"scheme": "wikidata"
}
]
}
]
}
''') as fname:
popolo = Popolo.from_filename(fname)
assert len(popolo.organizations) == 1
o = popolo.organizations[0]
assert o.wikidata == 'Q288523'
assert o.id == 'starfleet'
def test_identifiers_list(self):
with example_file(
b'''
{
"organizations": [
{
"id": "starfleet",
"name": "Starfleet",
"identifiers": [
{
"identifier": "Q288523",
"scheme": "wikidata"
},
{
"identifier": "123456",
"scheme": "made-up-id"
}
]
}
]
}
''') as fname:
popolo = Popolo.from_filename(fname)
assert len(popolo.organizations) == 1
o = popolo.organizations[0]
assert o.identifiers == [
{
'identifier': 'Q288523',
'scheme': 'wikidata',
},
{
'identifier': '123456',
'scheme': 'made-up-id',
},
]
def test_classification_property(self):
with example_file(
b'''
{
"organizations": [
{
"id": "starfleet",
"name": "Starfleet",
"classification": "military"
}
]
}
''') as fname:
popolo = Popolo.from_filename(fname)
assert len(popolo.organizations) == 1
o = popolo.organizations[0]
assert o.classification == 'military'
def test_no_matching_identifier(self):
with example_file(
b'''
{
"organizations": [
{
"id": "starfleet",
"name": "Starfleet" }
]
}
''') as fname:
popolo = Popolo.from_filename(fname)
assert len(popolo.organizations) == 1
o = popolo.organizations.first
assert o.wikidata is None
def test_organization_repr(self):
json = b'{"organizations": ' \
b' [{"name": "M\u00e9decins Sans Fronti\u00e8res"}]}'
with example_file(json) as fname:
popolo = Popolo.from_filename(fname)
assert len(popolo.organizations) == 1
o = popolo.organizations[0]
if six.PY2:
assert repr(o) == \
b"<Organization: M\xc3\xa9decins Sans Fronti\xc3\xa8res>"
else:
assert repr(o) == u"<Organization: Médecins Sans Frontières>"
def test_organization_image(self):
popolo = Popolo({
'organizations': [
{
'name': 'ACME corporation',
'image': 'http://example.org/acme.jpg',
}
]})
o = popolo.organizations.first
assert o.image == 'http://example.org/acme.jpg'
def test_organization_seats(self):
popolo = Popolo({
'organizations': [
{
'name': 'House of Commons',
'seats': 650,
}
]})
o = popolo.organizations.first
assert o.seats == 650
def test_organization_founding_and_dissolution_dates(self):
popolo = Popolo({
'organizations': [
{
'name': 'ACME corporation',
'founding_date': '1950-01-20',
'dissolution_date': '2000-11-15',
}
]})
o = popolo.organizations.first
assert o.founding_date == date(1950, 1, 20)
assert o.dissolution_date == date(2000, 11, 15)
def test_organization_other_names(self):
with example_file(b'''
{
"organizations": [
{
"id": "abc-inc",
"name": "ABC, Inc.",
"other_names": [
{
"name": "Bob's Diner",
"start_date": "1950-01-01",
"end_date": "1954-12-31"
},
{
"name": "Joe's Diner",
"start_date": "1955-01-01"
},
{
"name": "Famous Joe's"
}
]
}
]
}
''') as fname:
popolo = Popolo.from_filename(fname)
assert len(popolo.organizations) == 1
o = popolo.organizations[0]
assert o.other_names == [
{
'name': "Bob's Diner",
'start_date': '1950-01-01',
'end_date': '1954-12-31'
},
{
'name': "Joe's Diner",
'start_date': '1955-01-01'
},
{
'name': "Famous Joe's"
}
]
def test_organization_links_list(self):
with example_file(
b'''
{
"organizations": [
{
"id": "starfleet",
"name": "Starfleet",
"links": [
{
"url": "https://en.wikipedia.org/wiki/Starfleet",
"note": "Wikipedia"
},
{
"url": "http://memory-alpha.wikia.com/wiki/Starfleet",
"note": "Memory Alpha"
}
]
}
]
}
''') as fname:
popolo = Popolo.from_filename(fname)
assert len(popolo.organizations) == 1
o = popolo.organizations[0]
assert o.links == [
{
'url': 'https://en.wikipedia.org/wiki/Starfleet',
'note': 'Wikipedia',
},
{
'url': 'http://memory-alpha.wikia.com/wiki/Starfleet',
'note': 'Memory Alpha',
},
]
def test_organisation_equality(self):
with example_file(
b'''
{
"organizations": [
{
"id": "starfleet",
"name": "Starfleet",
"identifiers": [
{
"identifier": "Q288523",
"scheme": "wikidata"
}
]
}
]
}
''') as fname:
o_a = Popolo.from_filename(fname).organizations[0]
o_b = Popolo.from_filename(fname).organizations[0]
assert o_a == o_b
assert not (o_a != o_b)
|
#!/usr/bin/env python
from generate_cluster_job import generate_cluster_job
import sys
import subprocess
if __name__ == "__main__":
queue = sys.argv[1]
assert queue in['tf', 'rz', 'rzx', 'test'], ("only know "
"rz, rzx, tf and test queues, not: " + queue)
if queue != 'rzx':
queue_name = "meta_gpu-{:s}.q".format(queue)
else:
queue_name = "meta_gpux-rz.q"
if sys.argv[2].startswith('metagpu'):
hostname = sys.argv[2]
print("Running on {:s}".format(hostname))
job_args = sys.argv[3:]
else:
job_args = sys.argv[2:]
hostname = None
job_filepath = generate_cluster_job(queue, job_args)
if hostname is not None:
command = "qsub -l hostname={:s} -q {:s} {:s}".format(
hostname, queue_name, job_filepath)
else:
command = "qsub -q {:s} {:s}".format(queue_name, job_filepath)
print("Running:\n" + command)
subprocess.call([command],shell=True)
|
salario = float(input('Digite seu slario:'))
aumento = (salario * 15)/100
print('O aumento de 15% do salário é:{:.2f}'.format(salario+aumento))
|
"""
Um programa simples.
Estava fazendo sem ao menos saber usar o while...
"""
valor = input("Digite um número: ")
caracters = len(str(valor))
algarismo = "Algarismo"
número_algarismo = 1
fatiamento = 0
while caracters > 0:
x = str(valor[fatiamento])
print("{} {}: {}.".format(algarismo, número_algarismo, x), end='\n')
caracters -=1
número_algarismo +=1
fatiamento +=1
|
from async_consumer import ReconnectingConsumer
from gene_dispatcher import process
import logging
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
LOGGER = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
amqp_url = 'amqp://guest:guest@localhost:5672/%2F?connection_attempts=3&heartbeat=3600'
consumer = ReconnectingConsumer(amqp_url, process)
consumer.run()
|
# _*_ coding:UTF-8 _*_
import win32con
import win32api
import random
import ctypes
import ctypes.wintypes
import threading
import time
import os
import sys
from winapi import window_capture
from other.cv2_t2 import get_can_cant_use
from other.cv2_t3 import read_img_p_count
from icevisual.Utils import Utils
RUN = False # 用来传递运行一次的参数
EXIT = False # 用来传递退出的参数
user32 = ctypes.windll.user32 # 加载user32.dll
id1 = 105 # 注册热键的唯一id,用来区分热键
id2 = 106
class HotkeyThread(threading.Thread): # 创建一个Thread.threading的扩展类
def RunSomething(self, trytime):
VK_E = 0x45
VK_R = 0x52
VK_ESCAPE = win32con.VK_ESCAPE
VK_UP = win32con.VK_UP
VK_RIGHT = win32con.VK_RIGHT
DefaultSleep = 100
vks = [VK_ESCAPE, 0xff + 200, VK_E, 0xff + DefaultSleep, VK_UP, 0xff + DefaultSleep, VK_E, 0xff + DefaultSleep, \
VK_E, 0xff, VK_ESCAPE, 0xff + DefaultSleep, VK_RIGHT,
0xff + DefaultSleep, VK_E, 0xff + 200, VK_E, 0xff + 200, VK_E, 0xff + 300, VK_E, 0xff + 300, VK_E,
0xff + 300, VK_E, 0xff + 300, VK_E, 0xff + 300, VK_E, ]
for j in range(0, trytime):
for i in range(0, len(vks)):
if vks[i] == 0xff:
r = random.randint(28.32)
print("Sleep %d" % r)
time.sleep(r / 1000.0)
elif vks[i] < 0xff:
win32api.keybd_event(vks[i], 0, 0, 0)
win32api.keybd_event(vks[i], 0, win32con.KEYEVENTF_KEYUP, 0)
else:
time.sleep((vks[i] - 0xff) / 1000.0)
if j < trytime - 1:
time.sleep(5)
def RunMain(self):
pass
def run_id1(self):
filename = "storage/ScreenShot/%d.jpg" % time.time();
window_capture(filename)
get_can_cant_use(filename, 'storage/ScreenShot/UseDisableEnable.jpg')
os.unlink(filename)
'''
w = 1600 h=900
Counr = 1842 enable
2018-08-05 23:27:00.050 Run ID1
RUNNING
w = 1600 h=900
Counr = 3284 disable
2018-08-05 23:27:16.142 Run ID1'''
read_img_p_count('storage/ScreenShot/UseDisableEnable.jpg')
print(Utils.format_time_with_millisecond(), "Run ID1")
def run(self):
global EXIT # 定义全局变量,这个可以在不同线程间共用。
global RUN # 定义全局变量,这个可以在不同线程间共用。
if not user32.RegisterHotKey(None, id1, 0, win32con.VK_F9):
# 注册快捷键F9并判断是否成功,该热键用于执行一次需要执行的内容。
print("Unable to register id", id1) # 返回一个错误信息
if not user32.RegisterHotKey(None, id2, 0, win32con.VK_F10):
# 注册快捷键F10并判断是否成功,该热键用于结束程序,且最好这么结束,否则影响下一次注册热键。
print("Unable to register id", id2)
# 以下为检测热键是否被按下,并在最后释放快捷键
try:
msg = ctypes.wintypes.MSG()
while True:
if user32.GetMessageA(ctypes.byref(msg), None, 0, 0) != 0:
if msg.message == win32con.WM_HOTKEY:
if msg.wParam == id1:
RUN = True
self.run_id1()
elif msg.wParam == id2:
EXIT = True
return
user32.TranslateMessage(ctypes.byref(msg))
user32.DispatchMessageA(ctypes.byref(msg))
finally:
# 必须得释放热键,否则下次就会注册失败,所以当程序异常退出,没有释放热键,
user32.UnregisterHotKey(None, id1)
# 那么下次很可能就没办法注册成功了,这时可以换一个热键测试
user32.UnregisterHotKey(None, id2)
if __name__ == "__main__":
print(time.time())
r = random.randint(28, 32)
print(r)
time.sleep(r / 1000.0)
print(time.time())
sys.exit();
hotkey = HotkeyThread()
hotkey.start()
while True:
if RUN:
# 这里放你要用热键启动执行的代码
print("RUNNING")
RUN = False
elif EXIT:
# 这里是用于退出循环的
break
|
def myfnc(x,z,y=10):
print("x =",x,"y = ",y,"z =", z)
myfnc(x = 1,y = 2,z = 5)
a = 5
b = 6
myfnc(x = a,z = b)
a = 1
b = 2
c = 3
myfnc(y = a,z = b,x = c)
|
number = "+918155873903" |
import re
import jieba
import pandas as pd
def data_process(file='./data/message80W1.csv'):
"""
垃圾短信 0 720000
正常短信 1 80000
"""
# header=None 没有列名 header=None 第0行是行索引
data = pd.read_csv(file, header=None, index_col=0)
data.columns = ['label', 'message']
data['label'].value_counts()
"""
------------------------------1.数据抽样--------------------------------------
"""
n = 20000
a = data[data['label'] == 0].sample(n) # 垃圾短信
b = data[data['label'] == 1].sample(n) # 正常短信
"""
数据拼接
两个Series的拼接,默认是在列上(往下)拼接,axis = 0,如果要横向往右拼接,axis = 1
"""
data_new = pd.concat([a, b], axis=0)
"""
------------------------------2.数据预处理-------------------------------------
"""
# 数据去重复
data_dup = data_new['message'].drop_duplicates()
# 数据脱敏(去除x序列)
data_qumin = data_dup.apply(lambda x: re.sub('x', '', x))
# jieba 分词
jieba.load_userdict('./data/newdic1.txt')
data_cut = data_qumin.apply(lambda x: jieba.lcut(x))
# 去除停用词 csv 默认 ,作为分隔符 用sep取一个数据里不存在的字符作为分隔符保障顺利读取
stop_words = pd.read_csv('./data/stopword.txt', encoding='GB18030', sep='hhhhh', header=None)
# pd转列表拼接 iloc[:,0] 取第0列
stop_words = list(stop_words.iloc[:, 0]) + [' ', '→', '-', ':', ' ●']
data_after_stop = data_cut.apply(lambda x: [i for i in x if i not in stop_words])
# 对应短信的状态
labels = data_new.loc[data_after_stop.index, 'label']
# 空格分割字符
data_str = data_after_stop.apply(lambda x: ' '.join(x))
return data_str, data_after_stop, labels
|
message="""We present to you the final week of Aperture.
Theme: Hues Of Bliss
Deadline: 7th October
Send your entries with your Name, College and a caption to fmc@antaragni.in
#HuesOfBliss
#Antaragni16"""
message =""" """+ message+""" #"""
hash_find = message.split("""#""")
print hash_find
len_hash = len(hash_find)
h = []
for i in range(1,(len_hash-1)):
h.append(hash_find[i])
print h |
# Generated by Django 2.2.12 on 2020-09-11 07:46
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('admin', '0016_auto_20200602_1201'),
]
operations = [
migrations.CreateModel(
name='Alarm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('closed', models.DateTimeField(blank=True, null=True)),
('status', models.IntegerField(choices=[(1, 'Opened'), (2, 'Closed')], db_index=True, default=1)),
('title', models.CharField(max_length=150)),
('internal_name', models.CharField(max_length=120)),
('domain', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='alarms', to='admin.Domain')),
('mailbox', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='alarms', to='admin.Mailbox')),
],
options={
'ordering': ['created'],
},
),
]
|
# -*- coding:latin-1 -*-
import random
import math
seed = random.randint(1,100000000000)
print("Current Seed: " + str(seed))
random.seed(seed)
class Input: #takes in its value, most of the time a random number
def __init__(self, value):
self.value = value
class Neuron: #takes in its value from a synapse
def __init__(self, value=0, connections=[]):
self.value = value
self.connections = connections
class Synapse: #takes in its value and weight, can be from Neuron, other synapse, or random number
def __init__(self, weight, value, connection_in, connection_out):
self.weight = weight
self.value = value
self.connections = [connection_in, connection_out]
class First_AI: #Needs target x and y position to operate
def __init__(self, x_pos, y_pos, Syn_Values = []):
self.Input1 = []
self.Synapse1 = []
self.Neuron1 = []
self.Synapse2 = []
self.Output = []
self.Input1.append(Input(x_pos)) #two neurons for directions, position of x and y
self.Input1.append(Input(y_pos))
for i in range(4): #Neuron1 (1st layer of Neurons)
self.Neuron1.append(Neuron(0))
for i in range(8): #Synapse1 (1st layer of Synapses(between In and L1))
#Weight, Value, Connection_In, Connection_Out
temp_int_1 = random.randint(0,1)
temp_int_2 = random.randint(0,3)
self.Synapse1.append(Synapse(random.randint(-100,100)/100, self.Input1[temp_int_1].value, temp_int_1, temp_int_2))
self.Neuron1[temp_int_2].value += self.Synapse1[i].value * self.Synapse1[i].weight
self.Neuron1[temp_int_2].connections.append(self.Synapse1[i]) #adds synapse to connections list of target neuron
for each_neuron in self.Neuron1:
temp_float = 1.0
for i in range(len(each_neuron.connections)):
temp_float += 1.0
each_neuron.value/=temp_float
for i in range(2): #Output
self.Output.append(Neuron(0))
for i in range(8): #Synapse2 (2nd layer of Synapses(between L1 and Out))
#Weight, Value, Connection_In, Connection_Out
temp_int_1 = random.randint(0,3)
temp_int_2 = random.randint(0,1)
self.Synapse2.append(Synapse(random.randint(-100,100)/100, self.Neuron1[temp_int_1].value, temp_int_1, temp_int_2))
self.Output[temp_int_2].value += self.Synapse2[i].value * self.Synapse2[i].weight
self.Output[temp_int_2].connections.append(self.Synapse2[i]) #adds synapse to connections list of target neuron
for each_neuron in self.Output:
temp_float = 1.0
#print(str(x_pos) + ' ' + str(x_pos) + ' ' + str(each_neuron.value))
for i in range(len(each_neuron.connections)):
temp_float += 1.0
each_neuron.value/=temp_float
#print(str(x_pos) + ' ' + str(x_pos) + ' ' + str(each_neuron.value))
def Generate_First_AI(amount = 100, generations = 10, print_all = False):
x_pos = random.randint(-100,100)
y_pos = random.randint(-100,100)
Points = {}
for i in range(amount):
exec("A%d = First_AI(x_pos, y_pos)" %i)
exec("Points.update({A%d : A%d.Output})" %(i, i))
Sorted = []
for i in range(amount):
xd=0
yd=0
#print(exec("A%d.Output[0].value" %i))
#exec("""for each_neuron in A%d.Output:
#print(each_neuron.value)""" %i)
exec("xd = A%d.Output[0].value - x_pos" %i)
exec("yd = A%d.Output[1].value - x_pos" %i)
exec("Sorted.append([(xd*xd + yd*yd)**(1/2), A%d])" %i)
temp_list = Generation(Sorted, Points)
Generate(temp_list[0], temp_list[1], x_pos, y_pos, generations, print_all)
def Generate(AI_Children, AI_Sorted, x_pos, y_pos, generations, print_all):
i=0
for Synapses in AI_Children:
#print(Synapses)
#print(Synapses[0][2]) #- Syn1 Weight
#print(Synapses[0][0]) #- Syn2 Connection in
#print(Synapses[0][1]) #- Syn1 Connection out
#for i in range(2):
#Temp = Synapses[i][0]
#Synapses[i][0] = Synapses[i][1]
#Synapses[i][1] = Temp
exec("A%d = Gen_AI(x_pos, y_pos, [], Synapses)" %i)
i+=1
class Gen_AI:
def __init__(self, x_pos, y_pos, Syn_Values, Synapses):
self.Input1 = []
self.Synapse1 = []
self.Neuron1 = []
self.Synapse2 = []
self.Output = []
self.Synapses = Synapses
self.Input1.append(Input(x_pos)) #two neurons for directions, position of x and y
self.Input1.append(Input(y_pos))
for i in range(4): #Neuron1 (1st layer of Neurons)
self.Neuron1.append(Neuron(0))
temp_int_1 = Synapses[0][0]
temp_int_2 = Synapses[0][0]
for i in range(8): #Synapse1 (1st layer of Synapses(between In and L1))
#Weight, Value, Connection_In, Connection_Out
#print(temp_int_1)
#print(temp_int_2)
#print(Synapses[0])
self.Synapse1.append(Synapse(Synapses[0][2], self.Input1[Synapses[0][0]].value, self.Input1[Synapses[0][0]], Synapses[0][1]))
self.Neuron1[temp_int_2].value += self.Input1[Synapses[0][0]].value * self.Synapse1[i].weight
self.Neuron1[temp_int_2].connections.append(self.Synapse1[i]) #adds synapse to connections list of target neuron
for each_neuron in self.Neuron1:
temp_float = 1.0
for i in range(len(each_neuron.connections)):
temp_float += 1.0
each_neuron.value/=temp_float
for i in range(2): #Output
self.Output.append(Neuron(0))
for i in range(8): #Synapse2 (2nd layer of Synapses(between L1 and Out))
#Weight, Value, Connection_In, Connection_Out
temp_int_1 = Synapses[1][0]
temp_int_2 = Synapses[1][1]
#print(len(Synapses))
#print(temp_int_1)
print(temp_int_2)
self.Synapse2.append(Synapse(Synapses[1][2], self.Input1[Synapses[1][1]].value, Synapses[1][1], Synapses[1][0]))
self.Output[temp_int_2].value += self.Synapse2[i].value * self.Synapse2[i].weight
self.Output[temp_int_2].connections.append(self.Synapse2[i]) #adds synapse to connections list of target neuron
for each_neuron in self.Output:
temp_float = 1.0
#print(str(x_pos) + ' ' + str(x_pos) + ' ' + str(each_neuron.value))
for i in range(len(each_neuron.connections)):
temp_float += 1.0
each_neuron.value/=temp_float
#print(str(x_pos) + ' ' + str(x_pos) + ' ' + str(each_neuron.value))
def Generation(Sorted, Points): #Sorts AI's and creates children, returns all Surviving + Children
sorted(Sorted, key=lambda x: x[0], reverse=False)
Sorted[:int((len(Sorted)/2))]
AI_Sorted = Sorted[:]
AI_Children = []
while Sorted != []:
Temporary_Int = random.randint(1, len(Sorted)-1)
GAIs = [] #Good AIs
RAIs = [] #Random AIs
GAI_Synapses = []
RAI_Synapses = []
Surviving_AI1_Synapse1 = []
Surviving_AI1_Synapse2 = []
Surviving_AI2_Synapse1 = []
Surviving_AI2_Synapse2 = []
#AI1_1 = [number for number, AI in Points.items() if AI == Sorted[0]][0].Synapse1.weight
#AI1_2 = [number for number, AI in Points.items() if AI == Sorted[0]][0].Synapse2.weight
#AI2_1 = [number for number, AI in Points.items() if AI == Sorted[Temporary_Int]][0].Synapse1.weight
#AI2_2 = [number for number, AI in Points.items() if AI == Sorted[Temporary_Int]][0].Synapse2.weight
for Score in Sorted: #Finds best AI
if Score == Sorted[0]:
ListedAI = AI_Sorted[0][1]
GAIs.append(ListedAI)
print(ListedAI)
break
for Score in Sorted: #Finds random AI
if Score == Sorted[Temporary_Int]:
ListedAI = AI_Sorted[Temporary_Int][1]
RAIs.append(ListedAI)
break
for each_syn in GAIs[0].Synapse1: #Stores all Syn1 Data from best AI
GAI_Synapses.append([each_syn.weight, each_syn.connections[0], each_syn.connections[1]])
Surviving_AI1_Synapse1.append([each_syn.weight, each_syn.connections[0], each_syn.connections[1]])
for each_syn in RAIs[0].Synapse1:#Stores all Syn1 Data from random AI
RAI_Synapses.append([each_syn.weight, each_syn.connections[0], each_syn.connections[1]])
Surviving_AI2_Synapse1.append([each_syn.weight, each_syn.connections[0], each_syn.connections[1]])
CHAI_Syn1 = []
CHAI_Syn2 = []
while GAI_Synapses != []: #Gives Children AI all Syn1 data
for i in range(2):
syn_from = 0
syn_to = 0
if random.randint(0,1):
syn_from = GAI_Synapses[0][1]
else:
syn_from = RAI_Synapses[0][1]
if random.randint(0,1):
syn_to = GAI_Synapses[0][2]
else:
syn_to = RAI_Synapses[0][2]
CHAI_Syn1.append([syn_from, syn_to, (GAI_Synapses[0][0]+RAI_Synapses[0][0])/2 + random.randint(-100,100)/1000])
GAI_Synapses.pop()
RAI_Synapses.pop()
for each_syn in GAIs[0].Synapse2: #Stores all Syn2 Data from best AI
GAI_Synapses.append([each_syn.weight, each_syn.connections[0], each_syn.connections[1]])
Surviving_AI1_Synapse2.append([each_syn.weight, each_syn.connections[0], each_syn.connections[1]])
for each_syn in RAIs[0].Synapse2: #Stores all Syn2 Data from random AI
RAI_Synapses.append([each_syn.weight, each_syn.connections[0], each_syn.connections[1]])
Surviving_AI2_Synapse2.append([each_syn.weight, each_syn.connections[0], each_syn.connections[1]])
while GAI_Synapses != []: #Gives Children AI all Syn2 data
for i in range(2):
syn_from = 0
syn_to = 0
if random.randint(0,1):
syn_from = GAI_Synapses[0][1]
else:
syn_from = RAI_Synapses[0][1]
if random.randint(0,1):
syn_to = GAI_Synapses[0][2]
else:
syn_to = RAI_Synapses[0][2]
CHAI_Syn2.append([syn_from, syn_to, (GAI_Synapses[0][0]+RAI_Synapses[0][0])/2 + random.randint(-100,100)/1000])
GAI_Synapses.pop()
RAI_Synapses.pop()
AI_Children.append([CHAI_Syn1[0], CHAI_Syn2[0]])
AI_Children.append([CHAI_Syn1[1], CHAI_Syn2[1]])
AI_Children.append([Surviving_AI1_Synapse1,
Surviving_AI1_Synapse2])
AI_Children.append([Surviving_AI2_Synapse1,
Surviving_AI2_Synapse2])
Sorted.pop(Temporary_Int)
Sorted.pop(0)
return (AI_Children, AI_Sorted)
Generate_First_AI(amount=10, generations=10, print_all=True) |
#coding=utf-8
# 完成debug,创建Data_matrix实体类就可以获得datax 和datay
import numpy as np
import time
class User:
def __init__(self, info):
self.id = int(info[0])
self.grade = int(info[1])
self.sex = int(info[2])
if info[3] != '':
timestring = info[3]
self.brith = int(time.mktime(time.strptime(timestring, '%Y-%m-%d %H:%M:%S')))
else:
self.brith = -99
self.age = int(info[4])
if info[5] != '':
timestring = info[5]
self.babyBrith = int(time.mktime(time.strptime(timestring, '%Y-%m-%d %H:%M:%S')))
else:
self.babyBrith = -99
self.babyAge = int(info[6])
self.babySex = int(info[7])
class Product:
def __init__(self, info):
i = 5
self.product = int(info[0])
self.store = int(info[1])
self.brand = int(info[2])
self.cla = int(info[3])
self.label = info[4]
self.price = int(info[5])
self.labelNum = 0
self.labels = []
self.dealLabel()
def dealLabel(self):
self.labels = self.label.split(' ')
self.labelNum = self.labels.__len__();
class Behavior:
def __init__(self, info):
self.userId = int(info[0])
self.productId = int(info[1])
self.time = int(info[2])
self.cla_be = int(info[3])
def getUserInfo():
user_info = open('./user_info.txt', 'r')
user_line = user_info.readline()
user_line = user_line.split('\n')[0]
users = []
while user_line != '':
user_data = user_line.split('\t')
users.append(User(user_data))
user_line = user_info.readline()
user_line = user_line.split('\n')[0]
return users
def getProductInfo():
product_info = open('./product_info.txt', 'r')
product_line = product_info.readline()
product_line = product_line.split('\n')[0]
products = []
while product_line != '':
product_data = product_line.split('\t')
product = Product(product_data)
products.append(product)
product_line = product_info.readline()
product_line = product_line.split('\n')[0]
return products
def getBehaviorInfo():
behavior_info = open('./behavior_info.txt', 'r')
behavior_line = behavior_info.readline()
behavior_line = behavior_line.split('\n')[0]
behaviors = []
while behavior_line != '':
behavior_data = behavior_line.split('\t')
behavior = Behavior(behavior_data)
behaviors.append(behavior)
behavior_line = behavior_info.readline()
behavior_line = behavior_line.split('\n')[0]
return behaviors
class Data_matrix:
def __init__(self):
self.users = getUserInfo()
# self.products = getProductInfo()
self.behaviors = getBehaviorInfo()
self.inputMatrix = np.array
self.outputMatrix = np.array
self.getMatrix()
def getMatrix(self):
inputDataList = []
outputDataList = []
for behavior in self.behaviors:
temp_data_list = []
temp_out_list = []
user = self.users[behavior.userId - 1]
# length = 9
temp_data_list.append(behavior.userId)
temp_data_list.append(int(user.grade))
temp_data_list.append(user.brith)
temp_data_list.append(user.age)
temp_data_list.append(user.sex)
temp_data_list.append(user.babyBrith)
temp_data_list.append(user.babyAge)
temp_data_list.append(user.babySex)
temp_data_list.append(behavior.time)
# length = 2
temp_out_list.append(behavior.productId)
temp_out_list.append(behavior.cla_be)
inputDataList.append(temp_data_list)
outputDataList.append(temp_out_list)
self.inputMatrix = np.array(inputDataList)
self.outputMatrix = np.array(outputDataList)
#test code
# data = Data_matrix()
#
# data_x = data.inputMatrixdata = Data_matrix()
# data_x = data.inputMatrix
# data_y = data.outputMatrix
#
# print data.inputMatrix[1], data.outputMatrix[0]
# data_y = data.outputMatrix
#
# print data.inputMatrix[1], data.outputMatrix[0] |
for letter in "fox":
if letter == "f":
print letter
|
import cv2 as cv
import numpy as np
import math
def my_conv():
cimg = cv.imread("2.jpg")
img = cv.cvtColor(cimg, cv.COLOR_BGR2GRAY)
img_height = len(img)
img_width = len(img[1])
img = cv.resize(img, (int(img_width*0.8), int(img_height*0.8 )))
xKernal = cv.getGaussianKernel(ksize=13, sigma=2)
kernal = np.matmul(xKernal, np.transpose(xKernal))
newKernal = [0] * len(kernal)
for i in range(len(kernal)):
newKernal[i] = [0] * len(kernal[i])
for i in range(len(kernal), 0, -1):
for j in range(len(kernal[i - 1]), 0, -1):
newKernal[len(kernal) - i][len(kernal) - j] = kernal[i - 1][j - 1]
padding = math.floor(len(kernal) / 2)
newImg = np.zeros((2 * padding + len(img), 2 *
padding + len(img[0])), np.uint8)
for i in range(padding, len(img) + padding):
for j in range(padding, len(img[0]) + padding):
newImg[i][j] = img[i - padding][j - padding]
newerImg = np.zeros((len(img), len(img[0])), np.uint8)
for i in range(padding, len(newImg) - padding):
for j in range(padding, len(newImg[i]) - padding):
arr = newImg[i - padding:i + padding, j - padding:j + padding]
newerImg[i - padding][j - padding] = applyConv(arr, newKernal)
filtImg = cv.filter2D(img, ddepth=-1, kernel=kernal)
compared = abs(filtImg - newerImg)
cv.imshow("Image", newerImg)
cv.waitKey(0)
cv.destroyAllWindows()
cv.imshow("Image", filtImg)
cv.waitKey(0)
cv.destroyAllWindows()
cv.imshow("Image", compared)
cv.waitKey(0)
cv.destroyAllWindows()
def applyConv(arr, kernal):
total = 0
for i in range(len(arr)):
for j in range(len(arr[i])):
total += arr[i][j] * kernal[i][j]
return total
|
# -*- coding: utf-8 -*-
from gevent import monkey; monkey.patch_all()
from bottle import run, response, request, route
import time
import asyncio
import subprocess
import random
import uuid
def fire_and_forget(f):
'''decorator'''
from functools import wraps
@wraps(f)
def wrapped(*args, **kwargs):
loop = asyncio.get_event_loop()
if callable(f):
return loop.run_in_executor(None, f, *args, **kwargs)
else:
raise TypeError('Task must be a callable')
return wrapped
@route('/<_time>')
def test(_time):
x(_time, uuid.uuid4().__str__())
return response.status
@fire_and_forget
def x(_time, u):
print('time:', _time, u)
time.sleep(int(_time))
cmd = "ps -A | grep 'python'"
ps = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
output = ps.communicate()[0]
print('_time:', _time, u, output)
y(u, _time)
def y(u, _time):
_time = int(_time)
print('focus = ', _time, u)
time.sleep(_time)
print('focus = ', _time, u)
time.sleep(_time)
print('focus = ', _time, u)
time.sleep(_time)
print('focus = ', _time, u)
z(u, _time)
def z(u, _time):
print('end', u, _time)
run(host='0.0.0.0', port=8080, debug=True, server='gunicorn', workers=10)
|
#!/usr/bin/env python3.4
# -*- coding: utf-8 -*-
#
# Copyright 2016 Ramil Nugmanov <stsouko@live.ru>
# This file is part of predictor.
#
# predictor
# is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
from smtplib import SMTP
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from flask_misaka import markdown
from flask import render_template
from .config import LAB_NAME, SMTP_MAIL, SMPT_HOST, SMTP_LOGIN, SMTP_PASSWORD, SMTP_PORT
def send_mail(message, to_mail, to_name=None, subject=None, banner=None, title=None):
html = render_template('email.html', body=markdown(message), banner=banner, title=title)
part1 = MIMEText(message, 'plain')
part2 = MIMEText(html, 'html')
msg = MIMEMultipart('alternative')
msg['Subject'] = subject or ""
msg['From'] = '%s <%s>' % (LAB_NAME, SMTP_MAIL)
msg['To'] = '%s <%s>' % (to_name, to_mail) if to_name else to_mail
msg.attach(part1)
msg.attach(part2)
with SMTP(SMPT_HOST, SMTP_PORT) as smtp:
smtp.login(SMTP_LOGIN, SMTP_PASSWORD)
smtp.sendmail(SMTP_MAIL, to_mail, msg.as_string())
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 31 20:35:12 2018
@author: Paul Charnay
"""
import numpy as np
records_array = np.array([1, 4, 3, 2, 2])
vals, inverse, count = np.unique(records_array, return_inverse=True, return_counts=True)
idx_vals_repeated = np.where(count > 1)[0]
vals_repeated = vals[idx_vals_repeated]
rows, cols = np.where(inverse == idx_vals_repeated[:, np.newaxis])
_, inverse_rows = np.unique(rows, return_index=True)
res = np.split(cols, inverse_rows[1:])
print(res)
|
from django.contrib import admin
from .models import Category, Product, ProductPicture, ProductDetailedDescription
from nested_admin.nested import NestedTabularInline
from nested_admin.polymorphic import NestedStackedPolymorphicInline, NestedPolymorphicModelAdmin
from ..common.models import Article
from ..common.admin import ArticleAdmin, SectionInlineAdmin
class ProductPictureAdmin(admin.ModelAdmin):
model = ProductPicture
list_display = ('product', 'description', 'featured', 'cover', 'image_tag')
readonly_fields = ['image_tag']
ordering = ('-product__category', '-product', '-update_at', '-create_at')
class CategoryAdmin(admin.ModelAdmin):
model = Category
class ProductPictureInlineAdmin(NestedTabularInline):
model = ProductPicture
class ProductDetailedDescriptionAdminInline(NestedTabularInline):
model = ProductDetailedDescription
class ProductAdmin(NestedPolymorphicModelAdmin):
inlines = [ProductPictureInlineAdmin, ProductDetailedDescriptionAdminInline]
admin.site.register(Product, ProductAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(ProductPicture, ProductPictureAdmin)
|
"""Helper script to package wheels and relocate binaries."""
import glob
import hashlib
# Standard library imports
import os
import os.path as osp
import platform
import shutil
import subprocess
import sys
import zipfile
from base64 import urlsafe_b64encode
# Third party imports
if sys.platform == "linux":
from auditwheel.lddtree import lddtree
ALLOWLIST = {
"libgcc_s.so.1",
"libstdc++.so.6",
"libm.so.6",
"libdl.so.2",
"librt.so.1",
"libc.so.6",
"libnsl.so.1",
"libutil.so.1",
"libpthread.so.0",
"libresolv.so.2",
"libX11.so.6",
"libXext.so.6",
"libXrender.so.1",
"libICE.so.6",
"libSM.so.6",
"libGL.so.1",
"libgobject-2.0.so.0",
"libgthread-2.0.so.0",
"libglib-2.0.so.0",
"ld-linux-x86-64.so.2",
"ld-2.17.so",
}
WINDOWS_ALLOWLIST = {
"MSVCP140.dll",
"KERNEL32.dll",
"VCRUNTIME140_1.dll",
"VCRUNTIME140.dll",
"api-ms-win-crt-heap-l1-1-0.dll",
"api-ms-win-crt-runtime-l1-1-0.dll",
"api-ms-win-crt-stdio-l1-1-0.dll",
"api-ms-win-crt-filesystem-l1-1-0.dll",
"api-ms-win-crt-string-l1-1-0.dll",
"api-ms-win-crt-environment-l1-1-0.dll",
"api-ms-win-crt-math-l1-1-0.dll",
"api-ms-win-crt-convert-l1-1-0.dll",
}
HERE = osp.dirname(osp.abspath(__file__))
PACKAGE_ROOT = osp.dirname(osp.dirname(HERE))
PLATFORM_ARCH = platform.machine()
PYTHON_VERSION = sys.version_info
def rehash(path, blocksize=1 << 20):
"""Return (hash, length) for path using hashlib.sha256()"""
h = hashlib.sha256()
length = 0
with open(path, "rb") as f:
while block := f.read(blocksize):
length += len(block)
h.update(block)
digest = "sha256=" + urlsafe_b64encode(h.digest()).decode("latin1").rstrip("=")
# unicode/str python2 issues
return (digest, str(length)) # type: ignore
def unzip_file(file, dest):
"""Decompress zip `file` into directory `dest`."""
with zipfile.ZipFile(file, "r") as zip_ref:
zip_ref.extractall(dest)
def is_program_installed(basename):
"""
Return program absolute path if installed in PATH.
Otherwise, return None
On macOS systems, a .app is considered installed if
it exists.
"""
if sys.platform == "darwin" and basename.endswith(".app") and osp.exists(basename):
return basename
for path in os.environ["PATH"].split(os.pathsep):
abspath = osp.join(path, basename)
if osp.isfile(abspath):
return abspath
def find_program(basename):
"""
Find program in PATH and return absolute path
Try adding .exe or .bat to basename on Windows platforms
(return None if not found)
"""
names = [basename]
if os.name == "nt":
# Windows platforms
extensions = (".exe", ".bat", ".cmd", ".dll")
if not basename.endswith(extensions):
names = [basename + ext for ext in extensions] + [basename]
for name in names:
path = is_program_installed(name)
if path:
return path
def patch_new_path(library_path, new_dir):
library = osp.basename(library_path)
name, *rest = library.split(".")
rest = ".".join(rest)
hash_id = hashlib.sha256(library_path.encode("utf-8")).hexdigest()[:8]
new_name = ".".join([name, hash_id, rest])
return osp.join(new_dir, new_name)
def find_dll_dependencies(dumpbin, binary):
out = subprocess.run([dumpbin, "/dependents", binary], stdout=subprocess.PIPE)
out = out.stdout.strip().decode("utf-8")
start_index = out.find("dependencies:") + len("dependencies:")
end_index = out.find("Summary")
dlls = out[start_index:end_index].strip()
dlls = dlls.split(os.linesep)
dlls = [dll.strip() for dll in dlls]
return dlls
def relocate_elf_library(patchelf, output_dir, output_library, binary):
"""
Relocate an ELF shared library to be packaged on a wheel.
Given a shared library, find the transitive closure of its dependencies,
rename and copy them into the wheel while updating their respective rpaths.
"""
print(f"Relocating {binary}")
binary_path = osp.join(output_library, binary)
ld_tree = lddtree(binary_path)
tree_libs = ld_tree["libs"]
binary_queue = [(n, binary) for n in ld_tree["needed"]]
binary_paths = {binary: binary_path}
binary_dependencies = {}
while binary_queue != []:
library, parent = binary_queue.pop(0)
library_info = tree_libs[library]
print(library)
if library_info["path"] is None:
print(f"Omitting {library}")
continue
if library in ALLOWLIST:
# Omit glibc/gcc/system libraries
print(f"Omitting {library}")
continue
parent_dependencies = binary_dependencies.get(parent, [])
parent_dependencies.append(library)
binary_dependencies[parent] = parent_dependencies
if library in binary_paths:
continue
binary_paths[library] = library_info["path"]
binary_queue += [(n, library) for n in library_info["needed"]]
print("Copying dependencies to wheel directory")
new_libraries_path = osp.join(output_dir, "torchvision.libs")
os.makedirs(new_libraries_path, exist_ok=True)
new_names = {binary: binary_path}
for library in binary_paths:
if library != binary:
library_path = binary_paths[library]
new_library_path = patch_new_path(library_path, new_libraries_path)
print(f"{library} -> {new_library_path}")
shutil.copyfile(library_path, new_library_path)
new_names[library] = new_library_path
print("Updating dependency names by new files")
for library in binary_paths:
if library != binary:
if library not in binary_dependencies:
continue
library_dependencies = binary_dependencies[library]
new_library_name = new_names[library]
for dep in library_dependencies:
new_dep = osp.basename(new_names[dep])
print(f"{library}: {dep} -> {new_dep}")
subprocess.check_output(
[patchelf, "--replace-needed", dep, new_dep, new_library_name], cwd=new_libraries_path
)
print("Updating library rpath")
subprocess.check_output([patchelf, "--set-rpath", "$ORIGIN", new_library_name], cwd=new_libraries_path)
subprocess.check_output([patchelf, "--print-rpath", new_library_name], cwd=new_libraries_path)
print("Update library dependencies")
library_dependencies = binary_dependencies[binary]
for dep in library_dependencies:
new_dep = osp.basename(new_names[dep])
print(f"{binary}: {dep} -> {new_dep}")
subprocess.check_output([patchelf, "--replace-needed", dep, new_dep, binary], cwd=output_library)
print("Update library rpath")
subprocess.check_output(
[patchelf, "--set-rpath", "$ORIGIN:$ORIGIN/../torchvision.libs", binary_path], cwd=output_library
)
def relocate_dll_library(dumpbin, output_dir, output_library, binary):
"""
Relocate a DLL/PE shared library to be packaged on a wheel.
Given a shared library, find the transitive closure of its dependencies,
rename and copy them into the wheel.
"""
print(f"Relocating {binary}")
binary_path = osp.join(output_library, binary)
library_dlls = find_dll_dependencies(dumpbin, binary_path)
binary_queue = [(dll, binary) for dll in library_dlls]
binary_paths = {binary: binary_path}
binary_dependencies = {}
while binary_queue != []:
library, parent = binary_queue.pop(0)
if library in WINDOWS_ALLOWLIST or library.startswith("api-ms-win"):
print(f"Omitting {library}")
continue
library_path = find_program(library)
if library_path is None:
print(f"{library} not found")
continue
if osp.basename(osp.dirname(library_path)) == "system32":
continue
print(f"{library}: {library_path}")
parent_dependencies = binary_dependencies.get(parent, [])
parent_dependencies.append(library)
binary_dependencies[parent] = parent_dependencies
if library in binary_paths:
continue
binary_paths[library] = library_path
downstream_dlls = find_dll_dependencies(dumpbin, library_path)
binary_queue += [(n, library) for n in downstream_dlls]
print("Copying dependencies to wheel directory")
package_dir = osp.join(output_dir, "torchvision")
for library in binary_paths:
if library != binary:
library_path = binary_paths[library]
new_library_path = osp.join(package_dir, library)
print(f"{library} -> {new_library_path}")
shutil.copyfile(library_path, new_library_path)
def compress_wheel(output_dir, wheel, wheel_dir, wheel_name):
"""Create RECORD file and compress wheel distribution."""
print("Update RECORD file in wheel")
dist_info = glob.glob(osp.join(output_dir, "*.dist-info"))[0]
record_file = osp.join(dist_info, "RECORD")
with open(record_file, "w") as f:
for root, _, files in os.walk(output_dir):
for this_file in files:
full_file = osp.join(root, this_file)
rel_file = osp.relpath(full_file, output_dir)
if full_file == record_file:
f.write(f"{rel_file},,\n")
else:
digest, size = rehash(full_file)
f.write(f"{rel_file},{digest},{size}\n")
print("Compressing wheel")
base_wheel_name = osp.join(wheel_dir, wheel_name)
shutil.make_archive(base_wheel_name, "zip", output_dir)
os.remove(wheel)
shutil.move(f"{base_wheel_name}.zip", wheel)
shutil.rmtree(output_dir)
def patch_linux():
# Get patchelf location
patchelf = find_program("patchelf")
if patchelf is None:
raise FileNotFoundError("Patchelf was not found in the system, please make sure that is available on the PATH.")
# Find wheel
print("Finding wheels...")
wheels = glob.glob(osp.join(PACKAGE_ROOT, "dist", "*.whl"))
output_dir = osp.join(PACKAGE_ROOT, "dist", ".wheel-process")
image_binary = "image.so"
video_binary = "video_reader.so"
torchvision_binaries = [image_binary, video_binary]
for wheel in wheels:
if osp.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
print("Unzipping wheel...")
wheel_file = osp.basename(wheel)
wheel_dir = osp.dirname(wheel)
print(f"{wheel_file}")
wheel_name, _ = osp.splitext(wheel_file)
unzip_file(wheel, output_dir)
print("Finding ELF dependencies...")
output_library = osp.join(output_dir, "torchvision")
for binary in torchvision_binaries:
if osp.exists(osp.join(output_library, binary)):
relocate_elf_library(patchelf, output_dir, output_library, binary)
compress_wheel(output_dir, wheel, wheel_dir, wheel_name)
def patch_win():
# Get dumpbin location
dumpbin = find_program("dumpbin")
if dumpbin is None:
raise FileNotFoundError("Dumpbin was not found in the system, please make sure that is available on the PATH.")
# Find wheel
print("Finding wheels...")
wheels = glob.glob(osp.join(PACKAGE_ROOT, "dist", "*.whl"))
output_dir = osp.join(PACKAGE_ROOT, "dist", ".wheel-process")
image_binary = "image.pyd"
video_binary = "video_reader.pyd"
torchvision_binaries = [image_binary, video_binary]
for wheel in wheels:
if osp.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
print("Unzipping wheel...")
wheel_file = osp.basename(wheel)
wheel_dir = osp.dirname(wheel)
print(f"{wheel_file}")
wheel_name, _ = osp.splitext(wheel_file)
unzip_file(wheel, output_dir)
print("Finding DLL/PE dependencies...")
output_library = osp.join(output_dir, "torchvision")
for binary in torchvision_binaries:
if osp.exists(osp.join(output_library, binary)):
relocate_dll_library(dumpbin, output_dir, output_library, binary)
compress_wheel(output_dir, wheel, wheel_dir, wheel_name)
if __name__ == "__main__":
if sys.platform == "linux":
patch_linux()
elif sys.platform == "win32":
patch_win()
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#########################################################################################
# #
# create_limit_tables.py: create limit databases for msid trending #
# #
# author: t. isobe (tisobe@cfa.harvrad.edu) #
# #
# last update: Oct 26, 2021 #
# #
#########################################################################################
import os
import sys
import re
import copy
import string
import math
import sqlite3
import Ska.engarchive.fetch as fetch
from Ska.engarchive import fetch_eng
limit_dir = '/data/mta/Script/MSID_limit/Trend_limit_data/'
#------------------------------------------------------------------------------------------
#-- create_limit_tables: create limit databases for msid trending --
#------------------------------------------------------------------------------------------
def create_limit_tables():
"""
create limit databases for msid trending
input: none but read from op_limits.db and glimmon data
output: Limit_data/*_limit
"""
#
#--- read op_limits.db and create dictionaries
#
[u_dict, d_dict, v_dict] = create_data_dict()
#
ifile = limit_dir + 'house_keeping/msid_list'
#
#--- create limit data table
#
aline = extract_limit(ifile, u_dict, d_dict, v_dict)
#
#--- print out the result
#
out = limit_dir + 'Limit_data/op_limits_new.db'
with open(out, 'w') as fo:
fo.write(aline)
#------------------------------------------------------------------------------------------
#-- create_data_dict: create msid <--> unit/description/limit data dictionaries
#------------------------------------------------------------------------------------------
def create_data_dict():
"""
read mta_limits.db and create msid <--> unit/description/data dictionaries
input:none but read from op_limits.db
output: u_dict --- a dictionary of msid <---> unit
d_dict --- a dictionary of msid <---> description
v_idct --- a dictionary of msid <---> a list of lists of limit data
[<lower yellow>, <upper yellow>, <lower red>, <upper red>, <start time>]
"""
#
#--- read units and descriptions of msids and create dictionaries
#
ifile = limit_dir + 'house_keeping/msid_descriptions'
with open(ifile, 'r') as f:
data = [line.strip() for line in f.readlines()]
u_dict = {}
d_dict = {}
for ent in data:
atemp = re.split('#', ent)
msid = atemp[0].strip().lower()
unit = atemp[2].strip()
desc = atemp[3].strip()
u_dict[msid] = unit
d_dict[msid] = desc
#
#--- read mta limit data
#
ifile = limit_dir + 'house_keeping/mta_op_limits.db'
with open(ifile, 'r') as f:
data = [line.strip() for line in f.readlines()]
v_dict = {}
for ent in data:
if ent[0] == '#':
continue
atemp = re.split('#', ent)
btemp = re.split('\t+', atemp[0])
msid = btemp[0].strip().lower()
try:
vlist = [[btemp[1], btemp[2], btemp[3], btemp[4], btemp[7]]]
except:
continue
try:
alist = v_dict[msid]
alist = alist + vlist
v_dict[msid] = alist
except:
v_dict[msid] = vlist
return [u_dict, d_dict, v_dict]
#------------------------------------------------------------------------------------------
#-- extract_limit: read glimmon limit database and create limit database ---
#------------------------------------------------------------------------------------------
def extract_limit(ifile, u_dict, d_dict, v_dict):
"""
read glimmon limit database and create limit database. if glimmon does not have
the data, mta's op_limit.db values are used.
input: ifile --- a list of msids
u_dict --- a dictionary of msid <---> unit
d_dict --- a dictionary of msid <---> description
v_idct --- a dictionary of msid <---> a list of lists of limit data
output: aline --- a strings of data table
"""
with open(ifile, 'r') as f:
data = [line.strip() for line in f.readlines()]
msid_list = []
for ent in data:
atemp = re.split('\s+', ent)
msid_list.append(atemp[0].strip().lower())
glimmon = '/data/mta/Script/MSID_limit/glimmondb.sqlite3'
db = sqlite3.connect(glimmon)
cursor = db.cursor()
aline = '#\n'
aline = aline + '# MSID Y Lower Y Upper R Lower R Upper '
aline = aline + 'Cnd MSID Sate Time\t\t\t\t\t\t\t\t\t\tDescription'
aline = aline + ' Uint Limit Group\n'
aline = aline + '#\n'
for msid in msid_list:
msid.strip()
print("MSID: " + msid)
cmd = "SELECT a.setkey, a.default_set, a.mlmenable, a.switchstate, a.mlimsw, "
cmd = cmd + "a.caution_low, a.caution_high, a.warning_low, a.warning_high, "
cmd = cmd + "a.datesec FROM limits AS a WHERE a.msid ='" + msid + "'"
cursor.execute(cmd)
allrows = cursor.fetchall()
try:
unit = u_dict[msid]
except:
unit = 'na'
try:
desc = d_dict[msid]
except:
desc = ' NA '
#
#--- mta case
#
if len(allrows) == 0:
try:
vlist = v_dict[msid]
except:
vlist = [['-9999998.0', '9999998.0', '-9999999.0', '9999999.0', '48815999']]
for elist in vlist:
if len(msid) < 8:
line = msid + '\t\t'
else:
line = msid + '\t'
elist[0] = "%3.2f" % (float(elist[0]))
elist[1] = "%3.2f" % (float(elist[1]))
elist[2] = "%3.2f" % (float(elist[2]))
elist[3] = "%3.2f" % (float(elist[3]))
line = line + adjust_length(elist[0])
line = line + adjust_length(elist[1])
line = line + adjust_length(elist[2])
line = line + adjust_length(elist[3])
line = line + 'none\t\tnone\t' + str(int(float(elist[4])))
line = line + '\t#' + "%50s" % desc + '\t # \t' + unit + '\t # \tmta\n'
aline = aline + line
#
#--- glimmon case
#
else:
#
#--- first replace none state with others if they exist
#
key_list = []
st_list = []
cnd_list = []
for elist in allrows:
key_list.append(elist[0])
st_list.append(elist[3])
cnd_list.append(elist[4])
#
#--- check whether there is switch
#
switch = 0
for ent in cnd_list:
if ent != 'none':
switch = 1
break
#
#---find key <--> state correspondence
#
k_set = list(set(key_list))
k_len = len(k_set)
key_dict = {}
#
#--- if there are two state with switch msid, assume that they are on and off states
#
if (k_len == 2) and (switch > 0):
key_dict[0] = 'on'
key_dict[1] = 'off'
else:
for key in k_set:
key_dict[key] = 'none'
for k in range(0, len(key_list)):
if st_list[k] != 'none':
key_dict[key_list[k]] = st_list[k]
#
#--- now replace 'none' state with appropriate state (if they exist)
#
temp_list = []
for elist in allrows:
elist = list(elist)
elist[3] = key_dict[elist[0]]
temp_list.append(elist)
#
#--- a base data list updated. start farther checking
#
allrows = temp_list
temp_save = []
state_list = []
time_list = []
chk_list = []
cnd_msid = 'none'
for elist in allrows:
elist = list(elist)
#
#--- the starting time of glimmon is the end of year 2000; extend it to 1999
#
if int(elist[-1]) == 83620796:
elist[-1] = 31536000
else:
elist[-1] = int(elist[-1])
#
#--- limit checking is turned off if "mlmenable" is 0
#
if elist[2] == 0:
elist[5] = '-9999998.0'
elist[6] = ' 9999998.0'
elist[7] = '-9999999.0'
elist[8] = ' 9999999.0'
#
#--- if limit checking is still on, format the limit values
#
else:
if unit.upper() == 'K':
elist[5] = "%3.2f" % temp_to_k(elist[5], msid)
elist[6] = "%3.2f" % temp_to_k(elist[6], msid)
elist[7] = "%3.2f" % temp_to_k(elist[7], msid)
elist[8] = "%3.2f" % temp_to_k(elist[8], msid)
else:
if abs(float(elist[5])) < 0.01:
elist[5] = "%2.3e" % (float(elist[5]))
elist[6] = "%2.3e" % (float(elist[6]))
elist[7] = "%2.3e" % (float(elist[7]))
elist[8] = "%2.3e" % (float(elist[8]))
else:
elist[5] = "%3.2f" % (float(elist[5]))
elist[6] = "%3.2f" % (float(elist[6]))
elist[7] = "%3.2f" % (float(elist[7]))
elist[8] = "%3.2f" % (float(elist[8]))
temp_save.append(elist)
state_list.append(elist[3])
time_list.append(elist[-1])
chk_list.append(elist[2])
if elist[3] != 'none':
cnd_msid = elist[4]
s_list = list(set(state_list))
if len(s_list) == 1:
lim_list = temp_save
#
#--- if the limit lists start the state of "none" but added condition
#--- after that, assume that "none" limits are used for all states, until
#--- the other states are added to the list; so add the "none" state
#--- limits to other state cases
#
else:
slen = len(state_list)
schk = 0
lim_list = []
for k in range(0, slen):
lim_list.append(temp_save[k])
#
#--- check whether the following state is also "none"
#
if (k < slen-1) and (schk == 0) and (state_list[k] == 'none'):
if state_list[k+1] == 'none':
for state in s_list:
if state == 'none':
continue
atemp = copy.deepcopy(temp_save[k])
atemp[3] = state
atemp[4] = cnd_msid
lim_list.append(atemp)
#
#--- the following state is not "none", but there are a time gap
#
elif time_list[k] < time_list[k+1]:
for state in s_list:
if state == 'none':
continue
atemp = copy.deepcopy(temp_save[k])
atemp[3] = state
atemp[4] = cnd_msid
lim_list.append(atemp)
#
#--- once the other than "none" state starts in the limit, stop the procedure
#
else:
schk = 1
else:
schk = 1
#
#--- removing the same time entry; choose wider limit range
#
l_len = len(lim_list)
if l_len > 1:
temp_list = []
for m in range(0, l_len-1):
prev_list = lim_list[m]
ptime = prev_list[-1]
pstate = prev_list[3]
chk = 0
for k in range(m+1, l_len):
this_list = lim_list[k]
ctime = this_list[-1]
cstate = this_list[3]
if (ptime == ctime) and (pstate == cstate):
if (this_list[5] < prev_list[5]) and (this_list[6] > prev_list[6]):
chk = 1
break
else:
this_list[5] = prev_list[5]
this_list[6] = prev_list[6]
this_list[7] = prev_list[7]
this_list[8] = prev_list[8]
lim_list[k] = this_list
chk = 1
break
if chk == 0:
temp_list.append(prev_list)
temp_list.append(lim_list[-1])
lim_list = temp_list
#
#--- write out all the limit lists
#
for elist in lim_list:
if len(msid) < 8:
line = msid + '\t\t'
else:
line = msid + '\t'
line = line + adjust_length(elist[5])
line = line + adjust_length(elist[6])
line = line + adjust_length(elist[7])
line = line + adjust_length(elist[8])
line = line + adjust_length(elist[4])
if len(elist[3]) < 4:
line = line + str(elist[3]) + '\t'
else:
line = line + str(elist[3])
line = line + '\t' + str(elist[-1])
line = line + '\t#' + "%50s" % desc + '\t # \t' + unit + '\t # \tglimmon\n'
aline = aline + line
return aline
#------------------------------------------------------------------------------------------
#-- find_unit: find engineering data unit --
#------------------------------------------------------------------------------------------
def find_unit(msid):
"""
find engineering data unit
input: msid --- msid
output: unit --- uint
"""
data = fetch_eng.Msid(msid, '2019:001')
return data.unit
#------------------------------------------------------------------------------------------
#-- temp_to_k: convert C and F temperature to K --
#------------------------------------------------------------------------------------------
def temp_to_k(val, msid):
"""
convert C and F temperature to K
input: val --- value
msid --- msid
output: val --- converted value if it is C or F
"""
try:
unit = find_unit(msid)
except:
return val
if unit == 'K':
try:
out = float(val)
except:
out = val
elif unit == 'DEGC':
try:
out = float(val) + 273.15
except:
out = val
elif unit == 'DEGF':
try:
out = 5.0 * (float(val) - 32.0) / 9.0 + 273.15
except:
out = val
else:
try:
out = float(val)
except:
out = val
return out
#------------------------------------------------------------------------------------------
#-- adjust_length: adjust length of the string --
#------------------------------------------------------------------------------------------
def adjust_length(val):
"""
adjust length of the string
input: val --- value to be adjusted
output: val --- length adjusted string of the vlaue
"""
vlen = len(val)
if vlen < 4:
val = val + '\t\t\t'
elif vlen < 8:
val = val + '\t\t'
elif vlen < 12:
val = val + '\t'
return val
#------------------------------------------------------------------------------------------
if __name__ == "__main__":
create_limit_tables()
|
from flask import Flask
from flask import render_template
app = Flask(__name__)
def get_data():
#把要呈現的數字字串寫在這裡
data = "[1,2,4,5,8,5,2,1,4,5,6,8],[9,8,7,2,8,5,2,1,4,5,6,8],[9,8,7,2,8,5,2,1,4,5,6,8]"
return data
@app.route("/")
def index():
data = get_data()
return render_template("line_chart.html",data=data)
if __name__ == "__main__":
app.run()
|
# My solution
n = input()
number_of_digit = n.count('4') + n.count('7')
if number_of_digit == 4 or number_of_digit == 7:
print('YES')
else:
print('NO')
# Alternate solution
print("NYOE S"[sum(i in '47' for i in input()) in (4, 7)::2].strip())
|
import pandas as pd
__author__ = 'obr214'
"""
DataReader Class
It reads a file, creates a dataframe and clean it according to the values needed.
"""
class DataReader:
def __init__(self, file_name):
try:
self.dataframe = pd.read_csv(file_name, usecols=['CAMIS', 'BORO', 'GRADE', 'GRADE DATE'])
self.clean_dataframe()
except IOError:
raise IOError('File Not Founded')
except LookupError:
raise LookupError('Columns Not Found in the File')
def clean_dataframe(self):
""""
Cleans the dataframe.
Deletes the NA values, drop the duplicates.
Deletes the Invalid Grades (Z, P and Not Yet Graded
Deletes the Invalid BORO 'Missing'
"""
try:
self.dataframe = self.dataframe.dropna()
self.dataframe = self.dataframe.drop_duplicates()
self.dataframe['GRADE DATE'] = pd.to_datetime(self.dataframe['GRADE DATE'])
idx_grades = self.dataframe['GRADE'].isin(['Z', 'P', 'Not Yet Graded'])
self.dataframe = self.dataframe[~idx_grades]
idx_boro = self.dataframe['BORO'].isin(['Missing'])
self.dataframe = self.dataframe[~idx_boro]
except LookupError:
raise LookupError('Column Not Found in the Dataframe')
def get_dataframe(self):
#Returns the cleaned dataframe
return self.dataframe
|
import mysql.connector
import datos_db
conexion = mysql.connector.connect(**datos_db.dbConnect)
cursor = conexion.cursor()
sql = "delete from usuarios where id = 22"
cursor.execute(sql)
n_id = int(input("Id: "))
sql = "delete from usuarios where id = %s"
cursor.execute(sql,(n_id,))
sql = "delete from usuarios where id = %s"
registros = [(20,), (21,)]
cursor.executemany(sql, registros)
conexion.commit()
cursor.close()
conexion.close()
|
import eelbrain as e
# settings
n_samples = 1000
# Load data
ds = e.datasets.get_mne_sample(tmin=-0.1, tmax=0.2, src='ico', sub="modality=='A'")
# compute distribution of max t values through permutation
res = e.testnd.ttest_ind('src', 'side', 'L', 'R', ds=ds, samples=n_samples, tstart=0.05)
# generate parameter map thresholded at p=0.05
pmap = res.masked_parameter_map(pmin=0.05)
# the next line could be used to plot the result for inspection
# (any area that is significant at any time)
##e.plot.brain.cluster(pmap.sum('time'), surf='inflated')
# create an HTML report with the results form the test
report = e.Report("Permutation Test", author="Prof. Enid Gumby")
# add some information about the test
section = report.add_section("Introduction")
text = ("A comparison of auditory stimulation to the left vs. the right ear. "
"A distribution of t values was calculated by shuffling condition "
"labels %i times and for each test picking the largest absolute t-"
"value across time and space. P-values were then calculated for "
"every source and time point using this distribution." % n_samples)
section.append(text)
# image with significance maps in time bins
section = report.add_section("Result")
image = e.plot.brain.bin_table(pmap, tstep=0.05, surf='smoothwm', views=['lat', 'med'])
section.add_figure("Significant regions in time bins.", image)
# save the report
report.save_html("Source Permutation.html")
|
import imgpr.image as image
import imgpr.warp as warp
import imgpr.layers as layers
import imgpr.filtering as filtering
import imgpr.utils as utils
from imgpr.session import Session
from imgpr.layers import placeholder
from imgpr.consts import *
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# Infitweaker - Copyright 2012 Alex Kaplan
# FreeType high-level python API and rendering - Copyright 2011 Nicolas P. Rougier
# Distributed under the terms of the new BSD license.
#
# -----------------------------------------------------------------------------
import sys
try:
import pygtk
pygtk.require("2.0")
except:
pass
try:
import gtk
import gtk.glade
except:
sys.exit(1)
from freetype import *
import numpy as np
import Image
import StringIO
import os
class pyInfitweaker:
"""This is an PyGTK application to create best settings for INFINALITY freetype"""
def __init__(self):
#Set the Glade file
self.gladefile = "infitweaker.glade"
self.wTree = gtk.glade.XML(self.gladefile, "mainWindow")
#Get the image to store the preview results
self.previewImage = self.wTree.get_widget("previewImage")
self.enFilter1 = self.wTree.get_widget("enFilter1")
self.enFilter2 = self.wTree.get_widget("enFilter2")
self.enFilter3 = self.wTree.get_widget("enFilter3")
self.enFilter4 = self.wTree.get_widget("enFilter4")
self.enFilter5 = self.wTree.get_widget("enFilter5")
self.enChrome = self.wTree.get_widget("enChrome")
if "INFINALITY_FT_CHROMEOS_STYLE_SHARPENING_STRENGTH" in os.environ:
self.enChrome.set_text(os.environ["INFINALITY_FT_CHROMEOS_STYLE_SHARPENING_STRENGTH"])
else:
self.enChrome.set_text("50")
if "INFINALITY_FT_FILTER_PARAMS" in os.environ:
filtersList = os.environ["INFINALITY_FT_FILTER_PARAMS"].split()
self.enFilter1.set_text(filtersList[0])
self.enFilter2.set_text(filtersList[1])
self.enFilter3.set_text(filtersList[2])
self.enFilter4.set_text(filtersList[3])
self.enFilter5.set_text(filtersList[4])
else:
# let's populate to my defaults
self.enFilter1.set_text("0")
self.enFilter2.set_text("35")
self.enFilter3.set_text("35")
self.enFilter4.set_text("35")
self.enFilter5.set_text("0")
#Create our dictionay and connect it
dic = {"on_mainWindow_destroy" : self.destroy,
"on_previewButton_clicked" : self.OnPreview}
self.wTree.signal_autoconnect(dic)
def destroy(self, widget, data=None):
print "destroy signal occurred"
gtk.main_quit()
def main(self):
gtk.main()
def OnPreview(self, widget):
varFilter = self.enFilter1.get_text() + " " + self.enFilter2.get_text() + " " + self.enFilter3.get_text() + " " + self.enFilter4.get_text() + " " + self.enFilter5.get_text()
varChrome = self.enChrome.get_text()
os.environ["INFINALITY_FT_CHROMEOS_STYLE_SHARPENING_STRENGTH"] = varChrome
os.environ["INFINALITY_FT_FILTER_PARAMS"] = varFilter
I = self.render('Vera.ttf', (1,1), 1.25, True)
self.previewImage.set_from_pixbuf(gtk.gdk.pixbuf_new_from_array(np.array(I), gtk.gdk.COLORSPACE_RGB, 8))
def render(self, filename = "Vera.ttf", hinting = (False,False), gamma = 1.5, lcd=True):
text = "A Quick Brown Fox Jumps Over The Lazy Dog 0123456789"
W,H,D = 680, 280, 1
Z = np.zeros( (H,W), dtype=np.ubyte )
face = Face(filename)
pen = Vector(5*64, (H-10)*64)
flags = FT_LOAD_RENDER
#if hinting[1]: flags |= FT_LOAD_FORCE_AUTOHINT
#else: flags |= FT_LOAD_NO_HINTING
if hinting[0]: hres, hscale = 72, 1.0
else: hres, hscale = 72*10, 0.1
if lcd:
flags |= FT_LOAD_TARGET_LCD
Z = np.zeros( (H,W,3), dtype=np.ubyte )
set_lcd_filter( FT_LCD_FILTER_DEFAULT )
for size in range(9,23):
face.set_char_size( size * 64, 0, hres, 72 )
matrix = Matrix( int((hscale) * 0x10000L), int((0.0) * 0x10000L),
int((0.0) * 0x10000L), int((1.0) * 0x10000L) )
previous = 0
pen.x = 5*64
for current in text:
face.set_transform( matrix, pen )
face.load_char( current, flags)
kerning = face.get_kerning( previous, current, FT_KERNING_UNSCALED )
pen.x += kerning.x
glyph = face.glyph
bitmap = glyph.bitmap
x, y = glyph.bitmap_left, glyph.bitmap_top
w, h, p = bitmap.width, bitmap.rows, bitmap.pitch
buff = np.array(bitmap.buffer, dtype=np.ubyte).reshape((h,p))
if lcd:
Z[H-y:H-y+h,x:x+w/3].flat |= buff[:,:w].flatten()
else:
Z[H-y:H-y+h,x:x+w].flat |= buff[:,:w].flatten()
pen.x += glyph.advance.x
previous = current
pen.y -= (size+4)*64
# Gamma correction
Z = (Z/255.0)**(gamma)
Z = ((1-Z)*255).astype(np.ubyte)
if lcd:
I = Image.fromarray(Z, mode='RGB')
else:
I = Image.fromarray(Z, mode='L')
return I
if __name__ == "__main__":
tweaker = pyInfitweaker()
tweaker.main()
|
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
from constants import nb_class
from tracking import get_dataframes
tf.compat.v1.enable_eager_execution() # Remove when switching to tf2
pd.plotting.register_matplotlib_converters()
###############################
# Methods for data formatting #
###############################
def get_n_probs_per_label(df):
outputs = []
for n in range(7):
outputs.append([[], [], [], [], [], [], []])
def handle_row(row):
classification_logits = eval(row["classification_logits"])
right_labels = eval(row["label_boxes"])
for i in range(len(classification_logits)):
logits = classification_logits[i]
right_label = right_labels[i]
probs = tf.nn.softmax(logits).numpy().tolist()
for n in range(7):
n_prob = probs[n]
outputs[right_label][n].append(n_prob)
df.apply(handle_row, axis=1)
for n in range(7):
for i in range(len(outputs[n])):
if (outputs[n][i] == []):
outputs[n][i] = [-1.]
outputs.append(outputs)
return outputs
def get_precision_distribution(df):
outputs = [[[], []], [[], []]]
def handle_row(row):
no_regr_precision = eval(row["no_regr_surface_precision"])[0]
final_precision = eval(row["final_surface_precision"])[0]
outputs[0][0].append(no_regr_precision[0] / no_regr_precision[1])
outputs[0][1].append(final_precision[0] / final_precision[1])
outputs[1][0].append(no_regr_precision[0])
outputs[1][1].append(final_precision[0])
df.apply(handle_row, axis=1)
return outputs
#########################################
# Initializing dataframes and variables #
#########################################
df = get_dataframes()
nb_rows = df["index"].count()
print("Dataframe size: {}".format(nb_rows))
df_tail = df.tail(1000)
all_probs_per_label = get_n_probs_per_label(df_tail)
precision_data = get_precision_distribution(df_tail)
############
# Plotting #
############
fig = plt.figure(figsize=(18, 12))
fig.canvas.set_window_title("Faster-RCNN graph - Last 1000 rows over {} total".format(nb_rows))
# Prob of label tail
plt.subplot(5, 2, 1)
probs_per_label = []
for k in range(7):
probs_per_label.append(all_probs_per_label[k][k])
parts = plt.violinplot(probs_per_label)
plt.xticks([])
plt.ylim(0., 1.)
plt.yticks([0., 1.])
for pc in parts["bodies"]:
pc.set_alpha(1)
parts["cmins"].set_alpha(0)
parts["cmaxes"].set_alpha(0)
parts["cbars"].set_alpha(0)
plt.title("Label Prob density")
# Prob of n label tail
for i in range(7):
plt.subplot(5, 2, 2 + i)
probs_per_label = all_probs_per_label[i]
parts = plt.violinplot(probs_per_label)
plt.xticks([])
plt.ylim(0., 1.)
plt.yticks([0., 1.])
for pc in parts["bodies"]:
pc.set_alpha(1)
pc.set_facecolor("#D43F3A")
parts["cmins"].set_alpha(0)
parts["cmaxes"].set_alpha(0)
parts["cbars"].set_alpha(0)
plt.title("Prob density of {}".format(i))
# Precision distribution
plt.subplot(5, 2, 9)
parts = plt.violinplot(precision_data[0])
plt.xticks([1, 2], ["No Regr", "Final"])
plt.ylim(0., 1.)
plt.yticks([0., 1.])
for pc in parts["bodies"]:
pc.set_alpha(1)
pc.set_color("#F3C43A")
parts["cmins"].set_alpha(0)
parts["cmaxes"].set_alpha(0)
parts["cbars"].set_alpha(0)
plt.title("Precision density")
# Coverage distribution
plt.subplot(5, 2, 10)
parts = plt.violinplot(precision_data[1])
plt.xticks([1, 2], ["No Regr", "Final"])
plt.yticks([144], ["Blob\nSurface"])
for pc in parts["bodies"]:
pc.set_alpha(1)
pc.set_color("#F3C43A")
parts["cmins"].set_alpha(0)
parts["cmaxes"].set_alpha(0)
parts["cbars"].set_alpha(0)
ax = plt.gca()
ax.axhline(y=144, color="black", lw=1., alpha=.2)
plt.title("Coverage density")
plt.show()
|
import os
import json
import pickle
import gc
import numpy as np
import pandas as pd
from spyro.utils import progress
from spyro.memory import ReplayBuffer
from spyro.policies import (
EpsilonGreedyPolicy, GreedyPolicy,
RandomPolicy, SoftmaxPolicy,
FixedActionPolicy
)
from spyro.agents import (
DQNAgent,
A3CAgent,
QuantileRegressionDQNAgent,
)
from spyro.value_estimation import NeuralValueEstimator
POLICY_MAP = {
"EpsilonGreedyPolicy": EpsilonGreedyPolicy,
"GreedyPolicy": GreedyPolicy,
"RandomPolicy": RandomPolicy,
"SoftmaxPolicy": SoftmaxPolicy,
"FixedActionPolicy": FixedActionPolicy
}
MEMORY_MAP = {
"ReplayBuffer": ReplayBuffer
}
AGENT_MAP = {
"DQN_Agent": DQNAgent,
"A3C_Agent": A3CAgent,
"QR_DQN_Agent": QuantileRegressionDQNAgent,
"NeuralEstimator": NeuralValueEstimator
}
LOG_COLUMNS = ["t", "time", "incident_type", "location", "priority",
"object_function", "vehicle_type", "vehicle_id", "dispatch_time",
"turnout_time", "travel_time", "on_scene_time", "response_time",
"target", "station", "base_station_of_vehicle", "crew_type"]
def init_agent_from_config(config_path, force_no_log=False):
"""Initialize an agent based on a config file from a previous run.
Parameters
----------
config_path: str
The path to the config JSON file.
force_no_log: bool, default=False
If true, sets log=False in agent's init. Useful to prevent the new agent
from logging in a subdirectory of the original logdir.
"""
# load config
config = json.load(open(config_path, 'r'))
# determine agent class
agent_cls = AGENT_MAP[config["name"]]
# set logging to False if specified
if force_no_log:
config["log"] = False
del config["logdir"]
# retrieve policy
try:
policy_config = config.pop("policy")
has_policy = True
policy_name = policy_config.pop("name")
if policy_name == "EpsilonGreedyPolicy":
del policy_config["epsilon"]
policy = POLICY_MAP[policy_name](**policy_config)
except KeyError:
has_policy = False
# retrieve memory
try:
memory_config = config.pop("memory")
has_memory = True
_ = memory_config.pop("name")
memory = ReplayBuffer(**memory_config)
except KeyError:
has_memory = False
# init agent
if has_policy and has_memory:
agent = agent_cls(policy, memory, **config)
elif has_policy:
agent = agent_cls(policy, **config)
elif has_memory:
agent = agent_cls(memory, **config)
else:
agent = agent_cls(**config)
progress("Agent reconstructed from config.")
return agent
def load_trained_agent(dirpath, env_cls, env_params=None, **kwargs):
"""Load a pre-trained agent with its weights.
Parameters
----------
dirpath: str
The path to the directory in which the model parameters and agent config
file are stored.
env_cls: class or str
The environment to train on, if a string is provided, it must be the name of a gym env.
env_params: dict, default=None
Key-value pairings to pass to env_cls if a class is provided.
Returns
-------
agent: spyro.agents.*
An agent object with loaded / pre-trained weights.
"""
config_path = os.path.join(dirpath, "agent_config.json")
agent = init_agent_from_config(config_path, **kwargs)
agent.load_weights(os.path.join(dirpath, "model.ckpt"), env_cls=env_cls, env_params=env_params)
progress("Agent's weights loaded.")
return agent
def evaluate_saved_agent(dirpath, env_cls, n_episodes=100000, tmax=1000, policy=None,
env_params=None, save=True, evaluator=None):
"""Load a trained and saved agent from disk and evaluate it on a test environment.
Parameters
----------
dirpath: str
The path to the directory in which the model parameters and agent config
file are stored.
env_cls: class or str
The environment to train on, if a string is provided, it must be the name of a gym env.
n_episodes: int, default=100000
The number of episodes to use for evaluation.
tmax: int, default=1000
The maximum number of steps per episode.
env_params: dict, default=None
Key-value pairings to pass to env_cls if a class is provided.
Returns
-------
results: any
Output of agent.evaluate. Usually, this is a dictionary with 'mean_episode_reward',
'total_episode_reward', and 'episode_length' as keys and numpy arrays as values.
test_log: pd.DataFrame
The simulation log of all tested episodes.
"""
agent = load_trained_agent(dirpath, env_cls, env_params=None, force_no_log=True)
progress("Start test run on {} episodes.".format(n_episodes))
results = agent.evaluate(env_cls, n_episodes=n_episodes, tmax=tmax, policy=policy, env_params=env_params)
test_log = agent.env.get_test_log()
if save:
progress("Saving results to {}.".format(dirpath))
pickle.dump(results, open(os.path.join(dirpath, "test_results_dict.pkl"), "wb"))
test_log.to_csv(os.path.join(dirpath, "test_log.csv"), index=False)
if evaluator is not None:
progress("Extracting metrics using the evaluator")
summary = evaluator.evaluate(test_log)
else:
summary = None
progress("Evaluation completed.")
return results, test_log, summary
def print_test_summary(summary):
for name, data in summary.items():
print("\n{}\n-----------------".format(name))
print(data.T, end="\n\n")
def load_test_log(dirpath):
"""Load the test log from an agent's log directory.
Parameters
----------
dirpath: str
The path to the agent's log directory.
Returns
-------
log: pd.DataFrame
The simulation log of the agent ran on the test episodes.
"""
return pd.read_csv(os.path.join(dirpath, "test_log.csv"))
def construct_test_log_from_episodes(test_episodes, log_columns=LOG_COLUMNS):
"""Reconstruct a single simulation log from separate test episodes."""
concat_log = np.concatenate(
[np.append(d["log"], np.ones((len(d["log"]), 1)) * key, axis=1)
for key, d in test_episodes.items()]
)
df = pd.DataFrame(concat_log, columns=log_columns + ["episode"])
for col in ["t", "dispatch_time", "turnout_time", "travel_time",
"on_scene_time", "response_time", "target", "episode"]:
df[col] = df[col].astype(np.float)
return df
def evaluate_quantile_estimator(model, table_path):
"""Evaluate a Quantile Regression Value Estimator on a table of 'true' quantiles.
Parameters
----------
model: NeuralValueEstimator
The trained model to evaluate.
table_path: str
The path to the table to evaluate on.
Returns
-------
wasserstein: float
The mean Wasserstein distances between the learned and provided distributions.
"""
quantile_table = pickle.load(open(table_path, "rb"))
loss = model.evaluate_on_quantiles(quantile_table)
return loss
def evaluate_saved_quantile_estimator(model_dir, table_path=None, quantile_table=None):
"""Load a trained and saved agent from disk and evaluate it on a test environment.
Parameters
----------
model_dir: str
The path to the directory in which the model parameters and agent config
file are stored.
table_path: str, default=None
The path to the table to evaluate on. Ignored if quantile_table is provided
directly.
quantile_table: dict, default=None
The table to evaluate on. If None, a path must be provided to load the table.
Returns
-------
wasserstein: float
The mean Wasserstein distances between the learned and provided distributions.
"""
assert (table_path is not None) or (quantile_table is not None), \
"One of 'table_path' and 'quantile_table' must be provided."
# load table if not provided directly
if quantile_table is None:
quantile_table = pickle.load(open(table_path, "rb"))
# load model
model = load_trained_agent(model_dir, None)
# evaluate
loss = model.evaluate_on_quantiles(quantile_table)
return loss
def evaluate_log_no_external(log, evaluator):
"""Evaluate a test log disregarding the response times of external vehicles.
Parameters
----------
log: pd.DataFrame
The test log / simulation log.
evaluator: fdsim.evaluation.Evaluator
The evaluator object used to extract performance metrics from the log.
Returns
-------
summary: dict
Summarized results by measure as a dictionary.
"""
log.loc[log['station'] == 'EXTERNAL', 'response_time'] = np.nan
summary = evaluator.evaluate(log)
return summary
def get_all_log_summaries(evaluator, dirpath="./results/test2"):
"""Creates summaries of all logs in a directory.
Uses all files with 'log' in the name in the given directory.
Parameters
----------
evaluator: fdsim.evaluation.Evaluator
The evaluator object used to extract performance metrics from the log.
dirpath: str
The path in which the simulation logs / test logs reside.
Returns
-------
summaries: dict
Dictionary of simulation summaries.
"""
summaries = {}
for f in os.listdir(dirpath):
if 'log' in f:
log = pd.read_csv(os.path.join(dirpath, f))
summaries[f] = evaluate_log_no_external(log, evaluator)
return summaries
def create_results_table(summaries=None, evaluator=None, dirpath=None):
"""Creates summaries of all logs in a directory.
Uses all files with 'log' in the name in the given directory.
Parameters
----------
summaries: dict, default=None
The summaries to create results tables from. If None, dirpath and evaluator
must be provided and the summaries will be made first.
evaluator: fdsim.evaluation.Evaluator
The evaluator object used to extract performance metrics from the log. Ignored
if summaries is not None.
dirpath: str, default=None
The path in which the simulation logs / test logs reside. Ignored
if summaries is not None.
Returns
-------
dfs: dict
Dictionary of results tables, one per measure in the evaluator / summaries.
"""
if summaries is None:
assert dirpath is not None, "either summaries or dirpath must be provided"
summaries = get_all_log_summaries(evaluator, dirpath=dirpath)
measures = list(summaries[list(summaries.keys())[0]].keys())
dfs = {}
for measure in measures:
dfs[measure] = pd.concat([data[measure].assign(file=f)
for f, data in summaries.items()]).reset_index(drop=True)
return dfs
def create_results_table_differences(dirpath, evaluator):
"""Create results tables looking only at deployments for which the vehicle did
not come from the base station for at least one of the logs.
Parameters
----------
dirpath: str
The path with all simulation/test logs to consider. All relevant files are
assumed to have 'log' in their name.
evaluator: fdsim.evaluation.Evaluator
Evaluator object used to evaluate the logs.
Returns
-------
results_table: dict
a pd.DataFrame for every measure in the Evalautor.
"""
fs = [f for f in os.listdir(dirpath) if 'log' in f]
logs = [pd.read_csv(os.path.join(dirpath, f)) for f in fs]
logs = [log.loc[log['episode'].astype(int) < 49999, :] for log in logs]
from_home_by_log = [np.equal(log['station'].values, log['base_station_of_vehicle'].values)
.reshape(-1, 1) for log in logs]
print([log.shape for log in logs])
from_home = np.concatenate(from_home_by_log, axis=1)
not_all_home = np.not_equal(from_home.sum(axis=1), from_home.shape[1])
print("{} of {} are not all from home station".format(not_all_home.sum(), len(not_all_home)))
short_logs = [log.loc[not_all_home, :] for log in logs]
summaries = {fs[i]: evaluate_log_filtered(log, evaluator=evaluator)
for i, log in enumerate(short_logs)}
return create_results_table(evaluator, summaries=summaries)
def evaluate_all_saved_agents(dirpath, resultspath, evaluator, env_cls,
env_params=None, n_episodes=50000, policy=None,
params_to_add=None):
"""Evaluate all agents in a directory. All subdirectories in dirpath
are assumed to be agent folders containing weights and config. Test logs will
be stored in resultspath. Only agents in dirpath for which there is no log
yet in resultspath will be evaluated.
"""
for f in os.listdir(dirpath):
if f + 'test_log.csv' not in os.listdir(resultspath):
results, log, _ = evaluate_saved_agent(
os.path.join(dirpath, f),
env_cls,
env_params=env_params,
policy=policy,
n_episodes=n_episodes
)
log.to_csv(os.path.join(resultspath, f + "test_log.csv"))
del results, log
gc.collect()
summaries = get_all_log_summaries(evaluator, dirpath=resultspath)
table = create_results_table(summaries=summaries)
for tab in table.values():
tab.index = pd.Index([s[:-12] for s in tab['file'].values])
if params_to_add is not None:
for p in params_to_add:
for tab in table.values():
tab[p] = np.nan
for f in os.listdir(dirpath):
config = json.load(open(os.path.join(dirpath, f, 'agent_config.json'), 'rb'))
for p in params_to_add:
for tab in table.values():
if ':' in p:
tab.loc[f, p] = config[p.split(':')[0]][p.split(':')[1]]
else:
tab.loc[f, p] = config[p]
return table
|
from string import punctuation
def nothing_special(s):
try:
return s.translate(None, punctuation)
except AttributeError:
return 'Not a string!'
|
from hamcrest import assert_that, equal_to
from bromine.utils.geometry import Rectangle, RectSize
from bromine.utils.wait import Wait
from selenium.common.exceptions import TimeoutException
class SimpleVerticalLayout(object):
def __init__(self, page):
total_width, total_height = page.size
visible_width, visible_height = page.visible_size
window = page.browser.window
assert total_width == visible_width
self._page = page
self._window = window
self._width = total_width
self._total_height = total_height
self._max_tile_height = visible_height
self._address_bar_height = window.address_bar_height
self._bar_shadow_height = window.bar_shadow_height
def tiles(self):
previous_tile = None
remaining_scroll = self._total_height
while remaining_scroll > 0:
tile = self._get_next_tile(remaining_scroll, previous_tile)
yield tile
previous_tile = tile
remaining_scroll -= previous_tile.height
def _get_next_tile(self, remaining_scroll, previous_tile):
if not previous_tile:
return self._get_first_tile()
elif self._is_last_tile(remaining_scroll):
return self._get_last_tile(remaining_scroll, previous_tile)
else:
return self._get_intermediate_tile(previous_tile)
def _get_first_tile(self):
if self._total_height <= self._max_tile_height:
tile_height = self._max_tile_height
else:
tile_height = self._max_tile_height - self._bar_shadow_height
return self._build_tile(0, 0, tile_height)
def _build_tile(self, top_margin, top, height):
return PagePortion(self._page, (0, top), (self._width, height), (0, top_margin))
def _is_last_tile(self, remaining_scroll):
return remaining_scroll <= self._max_tile_height - self._bar_shadow_height
def _get_last_tile(self, remaining_scroll, previous_tile):
offset = self._get_next_tile_offset(previous_tile)
height = remaining_scroll
margin = self._max_tile_height - height
return self._build_tile(margin, offset, height)
def _get_next_tile_offset(self, previous_tile):
return previous_tile.bottom +1
def _get_intermediate_tile(self, previous_tile):
offset = self._get_next_tile_offset(previous_tile)
height = self._max_tile_height - 2*self._bar_shadow_height
margin = self._bar_shadow_height
return self._build_tile(margin, offset, height)
class PagePortion(object):
def __init__(self, page, page_offset, size, margin):
self._page = page
self._content = Rectangle.from_corner_and_size(page_offset, size)
self._margin = RectSize(*margin)
@property
def page_offset(self):
return self._content.upper_left_corner
@property
def size(self):
return self._content.size
@property
def height(self):
return self._content.height
@property
def bottom(self):
return self._content.bottom
@property
def margin(self):
return self._margin
def scroll_into_view(self):
in_view_position = self.page_offset - self._margin
self._page.scroll.to(*in_view_position)
has_scrolled = lambda: self._page.scroll.level == in_view_position
try:
Wait(2, poll_frequency=0.01).until(has_scrolled)
except TimeoutException:
pass
assert_that(self._page.scroll.level, equal_to(in_view_position))
|
import requests, random
class Unsplash:
def __init__(self):
self.path = "YourPath/unsplash.jpg"
self.KEY = "YourKey"
def get_random_image(self):
response = requests.get("https://api.unsplash.com/photos/random/?client_id=" + self.KEY).json()
return response["urls"]["full"]
def get_photo(self, term):
random_image = random.randint(0, 5)
response = requests.get("https://api.unsplash.com/search/photos/?page=1&query=" + term + "&client_id=" + self.KEY).json()
return response["results"][random_image]["urls"]["full"]
def get_photo_urls(self, term):
random_image = random.randint(0, 5)
response = requests.get("https://api.unsplash.com/search/photos/?page=1&query=" + term + "&client_id=" + self.KEY).json()
return response["results"][random_image]["urls"]
def get_collection(self, collection):
random_image = random.randint(0, 5)
response = requests.get("https://api.unsplash.com/search/photos/?page=1&query=" + collection + "&client_id=" + self.KEY).json()
return response["results"][random_image]["urls"]["full"]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 17 14:10:00 2020
@author: peter_goodridge
"""
from pymongo import MongoClient
import os
from flair.data import Sentence, build_spacy_tokenizer
from flair.models import SequenceTagger
from flair.embeddings import BertEmbeddings
import spacy
import json
import pandas as pd
from datetime import datetime
import re
from flair.tokenization import SpacyTokenizer
"""
connection = MongoClient('ds159204.mlab.com', 59204, retryWrites = False)
db = connection['oxford']
db.authenticate('oxford', 'datasci3nce')
listings_raw = []
docs = db.indeed_perm.find()
for item in docs:
listings_raw.append(item)
"""
tagger = SequenceTagger.load(r'/content/drive/My Drive/best-prodigy-model.pt')
listings_raw = pd.read_csv('indeed_listings3.csv')
#from flair.models import SequenceTagger
#test_tagger = SequenceTagger.load('ner')
import copy
sent_nlp = spacy.blank("en")
sent_nlp.add_pipe(sent_nlp.create_pipe('sentencizer')) # updated
generic_re = r'We are a.+\n'
tokenizer_nlp = spacy.blank("en")
tokenizer = SpacyTokenizer(tokenizer_nlp)
rows = []
all_sentences = []
all_meta = []
for index, row in listings_raw.iterrows():
company = row['company']
job_title = row['job_title']
location = row['location']
job_id = row['job_id']
job_desc = row['job_desc']
doc = sent_nlp(job_desc)
for sent in doc.sents:
sent_string = sent.string.strip()
all_meta.append({'company': company,
'job_title': job_title, 'location': location, 'job_id': job_id})
doc = Sentence(sent_string, use_tokenizer=SpacyTokenizer(sent_nlp))
all_sentences.append(doc)
tagger.predict(all_sentences)
all_data = []
for i in range(len(all_sentences)):
item = all_sentences[i]
for entity in item.get_spans('ner'):
meta = copy.copy(all_meta[i])
meta['skill'] = entity.text
all_data.append(meta)
df = pd.DataFrame(all_data)
all_data.to_csv('indeed_output3.csv') |
import numpy as np
def sample_LRRNN(N, params):
nettype = params["nettype"]
if nettype == "rank1_spont":
g = params["g"]
Mm = params["Mm"]
Mn = params["Mn"]
Sm = params["Sm"]
Sn = params["Sn"]
x1 = np.random.normal(0.0, 1.0, (N, 1))
x2 = np.random.normal(0.0, 1.0, (N, 1))
m = Mm + Sm * x1
n = Mn + Sn * x2
xi = np.random.normal(0.0, 1.0 / np.sqrt(N), (N, N))
P = np.dot(m, n.T) / N
W = g * xi + P
LRRNN = {"W": W, "m": m, "n": n, "xi": xi}
elif nettype == "rank1_input":
g = params["g"]
Mm = params["Mm"]
Mn = params["Mn"]
MI = params["MI"]
Sm = params["Sm"]
Sn = params["Sn"]
SmI = params["SmI"]
SnI = params["SnI"]
Sperp = params["Sperp"]
x1 = np.random.normal(0.0, 1.0, (N, 1))
x2 = np.random.normal(0.0, 1.0, (N, 1))
h = np.random.normal(0.0, 1.0, (N, 1))
m = Mm + Sm * x1
n = Mn + Sn * x2
I = MI + (SmI / Sm) * x1 + (SnI / Sn) * x2 + Sperp * h
xi = np.random.normal(0.0, 1.0 / np.sqrt(N), (N, N))
P = np.dot(m, n.T) / N
W = g * xi + P
LRRNN = {"W": W, "m": m, "n": n, "xi": xi, "I": I}
else:
raise NotImplemented()
return LRRNN
def sim_RNN(x0, W, I, dt, tau, T):
N = x0.shape[0]
x = np.zeros((N, T + 1))
x[:, 0] = x0
fac = dt / tau
x_i = x0
for i in range(T):
dx = fac * (-x_i + np.dot(W, np.tanh(x_i)) + I)
x_i = x_i + dx
x[:, i + 1] = x_i
return x
def measure_mu(kappa, m, I, t_start):
mu = np.mean(kappa * m + I)
return mu
def measure_kappa(x, n, t_start):
N = len(n)
x_valid = x[:, t_start:]
phi_means = np.tanh(np.mean(x_valid, axis=1))
r = np.dot(phi_means, n) / N
return r
def measure_vars(x, t_start):
x_valid = x[:, t_start:]
x_means = np.mean(x_valid, axis=1)
x_vars = np.var(x_valid, axis=1)
delta_inf = np.var(x_means)
delta_T = np.mean(x_vars)
return delta_inf, delta_T
|
from __future__ import division
from sklearn.cluster import KMeans
from numbers import Number
#from pandas import DataFrame
import sys, codecs, numpy
import sklearn
from sklearn.manifold import TSNE
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
class autovivify_list(dict):
'''A pickleable version of collections.defaultdict'''
def __missing__(self, key):
'''Given a missing key, set initial value to an empty list'''
value = self[key] = []
return value
def __add__(self, x):
'''Override addition for numeric types when self is empty'''
if not self and isinstance(x, Number):
return x
raise ValueError
def __sub__(self, x):
'''Also provide subtraction method'''
if not self and isinstance(x, Number):
return -1 * x
raise ValueError
def build_word_vector_matrix(vector_file, n_words,datafile):
'''Return the vectors and labels for the first n_words in vector file'''
zoo_array = []
numpy_arrays = []
labels_array = []
with codecs.open(datafile, 'r', 'utf-8') as f1:
for c1, r in enumerate(f1):
srz = r.split(',')
#print("found "+srz[0]+"\n")
zoo_array.append(srz[0])
print(len(zoo_array))
with codecs.open(vector_file, 'r', 'utf-8') as f:
for c, r in enumerate(f):
sr = r.split()
if sr[0] in zoo_array:
labels_array.append(sr[0])
#print("found "+sr[0]+"\n")
numpy_arrays.append( numpy.array([float(i) for i in sr[1:]]) )
if c == n_words:
return numpy.array( numpy_arrays ), labels_array
print(len(labels_array))
return numpy.array( numpy_arrays ), labels_array
def find_word_clusters(labels_array, cluster_labels):
'''Return the set of words in each cluster'''
cluster_to_words = autovivify_list()
for c, i in enumerate(cluster_labels):
cluster_to_words[ i ].append( labels_array[c] )
return cluster_to_words
def read_from_pickle(path):
with open(path, 'rb') as file:
try:
while True:
yield pickle.load(file)
except EOFError:
pass
if __name__ == "__main__":
input_vector_file = sys.argv[1] # Vector file input (e.g. glove.6B.300d.txt)
n_words = int(sys.argv[2]) # Number of words to analyze
reduction_factor = float(sys.argv[3]) # Amount of dimension reduction {0,1}
datafile = sys.argv[4]
df, labels_array = build_word_vector_matrix(input_vector_file, n_words,datafile)
tsne = TSNE(n_components =2,random_state = 0) # dimensions reduction
df_3d = tsne.fit_transform(df)
n_clusters =3 # Number of clusters to make
kmeans_model = KMeans(init='k-means++', n_clusters=n_clusters, n_init=10)
kmeans_model.fit(df_3d)
cluster_labels = kmeans_model.labels_
cluster_inertia = kmeans_model.inertia_
cluster_to_words = find_word_clusters(labels_array, cluster_labels)
for c in cluster_to_words:
print(cluster_to_words[c])
print("\n")
tsne = TSNE(n_components = 2,random_state = 0)
df_2d = tsne.fit_transform(df)
plt.scatter(x=df_2d[:,0],y=df_2d[:,1],c=cluster_labels)
plt.show()
|
n=int(input('Enter:'))
if n>0:
temp=n
s=0
while temp>0:
dig=temp%10
fact=1
for i in range(1,dig+1):
fact=fact*i
s+=fact
temp//=10
if s==n:
print('Strong num')
else:
print('No')
else:
print('No')
# def fact(k):
# if k==0:
# return 1
# else:
# return k*fact(k-1)
# n=int(input('Enter:'))
# if n>0:
# temp=n
# s=0
# while temp>0:
# dig=temp%10
# s+=fact(dig)
# temp//=10
# if s==n:
# print('Strong num')
# else:
# print('No')
# else:
# print('No')
|
import json
import requests
import websocket
import random,time
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/some_url/')
for i in range(1000):
time.sleep(3)
ws.send(json.dumps({'Temperatura':random.randint(20,60), "Humedad":random.randint(20,60) }))
ws.close() |
import os
import sys
import ntpath
from os import listdir
from os.path import isfile, join
from base import SingleInstance
import settings
from doxieautomator.doxie import DoxieAutomator
import dropbox
class DoxieToDropbox(SingleInstance):
LOCK_PATH = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), "DoxieToDropbox-lock")
doxie = None
client = None
def initialize(self):
self.log(u"Looking for Doxie on %s"%(settings.DOXIE_SERVER))
self.client = dropbox.Dropbox(settings.DROPBOX_ACCESS_TOKEN)
self.log(u"Dropbox connection: %s"%(self.client))
self.doxie = DoxieAutomator()
self.doxie.bind_to(self.notify_new_file)
def loop(self):
self.doxie.loop()
self.check_dir_for_new_files()
def check_dir_for_new_files(self):
files = []
for root, directories, filenames in os.walk(settings.DOXIE_FOLDER):
for filename in filenames:
if not filename.startswith('.'):
files.append(os.path.join(root,filename))
self.log("Found %s files in the doxie folder %s"%(len(files), settings.DOXIE_FOLDER))
for file in files:
self.upload_file(file)
def notify_new_file(self, local_filename):
self.log(u"New file downloaded from Doxie to: %s"%(local_filename))
self.check_dir_for_new_files()
def upload_file(self, local_filepath):
self.log(u"Going to upload file %s"%(local_filepath))
f = open(local_filepath, 'rb')
filename = ntpath.basename(local_filepath)
abs_filename = u"/%s"%(filename)
try:
response = self.client.files_upload(bytes(f.read()), abs_filename)
if os.path.exists(local_filepath):
os.remove(local_filepath)
self.log('File uploaded to dropbox and removed from the local directory')
except dropbox.exceptions.ApiError as e:
self.log('Error uploading file to Dropbox API: %s'%e)
|
# programa que leia um vetor de 10 numeros reais e mostre-so na ordem inversa
vetor = []
x = 1
while x <= 10:
n = float(input("Digite um número: "))
vetor.append(n)
x+=1
i = 9
while i >= 0:
print("Vetor Lido: ", vetor[i])
i-=1
|
import commands
import math
def i2cGetWord(addr):
out = commands.getoutput("sudo i2cget -y 1 0x68 "+ addr + " w")
return (out[4]+out[5]+out[2]+out[3])
def i2cGetWord_HMC5883L(addr):
out = commands.getoutput("sudo i2cget -y 1 0x1e "+ addr + " w")
return (out[4]+out[5]+out[2]+out[3])
#MPU6050
def accel_X():
return i2cGetWord("0x3B")
def accel_Y():
return i2cGetWord("0x3D")
def accel_Z():
return i2cGetWord("0x3F")
def gyro_roll():
return i2cGetWord("0x43")
def gyro_pitch():
return i2cGetWord("0x45")
def gyro_yaw():
return i2cGetWord("0x47")
def trans_accel(a):
return ((a / 2048.0)*9.80665)
def trans_gyro(g):
return (g / 16.4)
def dist(a,b):
return math.sqrt((a*a)+(b*b))
#def get_y_rotation(x,y,z):
#radians = math.atan2(x, dist(y,z))
#return -math.degrees(radians)
#def get_x_rotation(x,y,z):
#radians = math.atan2(y, dist(x,z))
#return math.degrees(radians)
#HMC5883
def mag_x():
return i2cGetWord_HMC5883L("0x03")
def mag_y():
return i2cGetWord_HMC5883L("0x07")
def mag_z():
return i2cGetWord_HMC5883L("0x05")
def trans_magx(mgx):
return mgx*0.92
def trans_magy(mgy):
return mgy*0.92
def trans_magz(mgz):
return mgz*0.92
def vectorH(mgx,mgy):
vec_H = (mgx**2)+(mgy**2)
math.sqrt(vec_H)
return vec_H
def vectorR(mgx,mgy,mgz):
vec_R = (mgx,mgy,mgz)
math.sqrt(vec_R)
return vec_R
def trans_magneticvecterD(mgy,mgx):
radiansD = math.atan2(mgy,mgx)
return math.degrees(radiansD)
def trans_magneticvecterI(mgz,vec_H):
radiansI = math.atan2(mgz,vec_H)
return math.degrees(radiansI)
def vector(radiansD):
deg = 90.0-radiansD
return deg
def Hex2Dec(str0):
su = 0
for x in range(0,len(str0)):
if '0' <= str0[x] and str0[x] <= '9':
n = int(str0[x])
elif 'a' <= str0[x] and str0[x] <= 'f':
n = ord(str0[x]) - ord('a') + 10
else :
return "baka"
su = su * 16 + n
return su
def sing(m):
if 32768 <= m:
return(m - 65536)
elif 32768 > m:
return(m)
else :
return("continue")
def __init__():
#MPU6050
#commands.getoutput("sudo i2cset -y 1 0x68 0x6B 0x00")
#commands.getoutput("sudo i2cset -y 1 0x68 0x6C 0x00")
#print "initialize_ok"
#commands.getoutput("sudo i2cset -y 1 0x68 0x1B 0x18")
#commands.getoutput("sudo i2cset -y 1 0x68 0x1C 0x18")
#print "scale 16[G]_2500[deg/sec]"
#HMC5883L
commands.getoutput("sudo i2cset -y 1 0x1e 0x02 0x00")
commands.getoutput("sudo i2cset -y 1 0x1e 0x09")
commands.getoutput("sudo i2cset -y 1 0x1e 0x20 0x40") #0.92 [mG/LSB]
#print "X [m/sec^2],Y [m/sec^2],Z [m/sec^2],roll [deg/sec],pitch [deg/sec],yaw [deg/sec],x_revolution [deg],y_revolution [deg]"
if __name__ == '__main__':
__init__()
for x in range(0,10):
#print x
#print "X_axics,",trans_accel(sing(Hex2Dec(accel_X()))), ",[m/sec^2]"
#print "Y_axics,",trans_accel(sing(Hex2Dec(accel_Y()))), ",[m/sec^2]"
#print "Z_axics,",trans_accel(sing(Hex2Dec(accel_Z()))), ",[m/sec^2]"
#print "Roll,",trans_gyro(sing(Hex2Dec(gyro_roll()))), ",[deg/sec]"
#print "Pitch,",trans_gyro(sing(Hex2Dec(gyro_pitch()))), ",[deg/sec]"
#print "Yaw,",trans_gyro(sing(Hex2Dec(gyro_yaw()))), ",[deg/sec]"
#print trans_accel(sing(Hex2Dec(accel_X()))),",",trans_accel(sing(Hex2Dec(accel_Y()))),",",trans_accel(sing(Hex2Dec(accel_Z()))),",",trans_gyro(sing(Hex2Dec(gyro_roll()))),",",trans_gyro(sing(Hex2Dec(gyro_pitch()))),",",trans_gyro(sing(Hex2Dec(gyro_yaw()))),",",get_x_rotation(trans_accel(sing(Hex2Dec(accel_X()))),trans_accel(sing(Hex2Dec(accel_Y()))),trans_accel(sing(Hex2Dec(accel_Z())))),",",get_y_rotation(trans_accel(sing(Hex2Dec(accel_X()))),trans_accel(sing(Hex2Dec(accel_Y()))),trans_accel(sing(Hex2Dec(accel_Z()))))
#print sing(Hex2Dec(mag_x())),",",sing(Hex2Dec(mag_y())),",",sing(Hex2Dec(mag_z()))
print trans_magx(sing(Hex2Dec(mag_x()))),",",trans_magy(sing(Hex2Dec(mag_y()))),",",trans_magneticvecterD(trans_magy(sing(Hex2Dec(mag_y()))),trans_magx(sing(Hex2Dec(mag_x())))),",",vector(trans_magneticvecterD(trans_magy(sing(Hex2Dec(mag_y()))),trans_magx(sing(Hex2Dec(mag_x())))))
|
import requests
import csv
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
start=1
end=501
headlines=[]
news=[]
target=[]
user_agent = UserAgent()
for i in range(start,end): #iterating through web pages
r=requests.get(f'https://www.politifact.com/factchecks/list/?page={i}',headers={"user-agent": user_agent.chrome})
soup=BeautifulSoup(r.content,'lxml')
headlines=soup.find_all('li',{ "class" : "o-listicle__item"})
for j in range(len(headlines)): #iterating through different news headlines in a single webpage
info=headlines[j].find('div',{"class":"m-statement__quote"})
news.append(info.find('a').text) #extracting actual news headline text
news[j]=news[j].strip('\n')
news[j]=news[j].strip('"')
pic=headlines[j].find('div',{"class":"m-statement__meter"})
tar=pic.find('img',{"class":"c-image__thumb"}) #extracting whether news is real or fake
target.append((tar.get('alt')))
print(f'page {i} scraped')
for i in range(len(target)): #converting real to 1 and fake to 0
if target[i]=="true":
target[i]=1
else:
target[i]=0
with open('Fake News Data.csv','a',encoding='utf-8') as csv_file: #saving the scraped data to a csv file
writer=csv.writer(csv_file)
for i in range(len(target)):
writer.writerow([news[i],target[i]])
print("Saved to the file")
print(len(news))
print(len(target))
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:hua
import requests
import json
url = "http://192.168.11.220:9900/getstr"
data = {
# "list1":[x for x in range(1000000)]
# "list1":[93,62,51,93,75,82,93,62,65,51,86,89,100]
"str1":12,
"str2":["你好","大"]
}
data=json.dumps(data,ensure_ascii=True)
headers = {'Content-Type': 'application/json'}
response = requests.post(url,data,headers=headers)
data1 = response.content.decode(encoding="unicode-escape")
# data1 = response.content.decode(encoding="utf-8")
# data = data.encode("unicode-escape").decode("utf-8")
# data = data.encode("unicode-escape").decode("gb18030")
print(data1) |
'''
Compose new poem using Markov model.
'''
import numpy as np
def read_data():
poem = open("../data/nguyen-binh.txt").read()
poem += open("../data/truyen_kieu.txt").read()
lines = poem.lower().split("\n")
return lines
def create_frequency_matrix(lines):
state_transition_matrix = dict()
second = dict()
initial = dict()
for line in lines:
line += ' end'
tokens = line.split(' ')
for i in range(len(tokens)):
if i == 0:
# the first word
s_0 = tokens[i]
initial[s_0] = initial.get(s_0, 0) + 1
elif i == 1:
# the second word
s_0 = tokens[i]
s_1 = tokens[i - 1]
if s_1 not in second:
second[s_1] = dict()
second[s_1][s_0] = second[s_1].get(s_0, 0) + 1
else:
s_0 = tokens[i]
s_1 = tokens[i - 1]
s_2 = tokens[i - 2]
key = (s_2, s_1)
if key not in state_transition_matrix:
state_transition_matrix[key] = dict()
state_transition_matrix[key][s_0] = state_transition_matrix[key].get(s_0, 0) + 1
return initial, second, state_transition_matrix
def convert2prob(dict):
"""
Input: dâng {'lên': 2, 'tranh': 2}
Output: dâng {'lên': 0.5, 'tranh': 0.5}
:param dict: a dictionary
:return:
"""
sum = 0
for k, v in dict.items():
sum += v
for k, v in dict.items():
dict[k] = v / sum
return dict
def sample(dict):
"""
Input: dâng {'lên': 2, 'tranh': 2}
Output: 'lên' or 'tranh'
:param dict: a dictionary
:return: a sample from dictionary
"""
pvals = []
word_options = []
for k, v in dict.items():
pvals.append(v)
word_options.append(k)
sample = np.random.multinomial(1, pvals, size=1)
word = word_options[np.argmax(sample)]
return word
if __name__ == '__main__':
lines = read_data()
initial, second, state_transition_matrix = create_frequency_matrix(lines)
# convert frequency matrix to probability matrix
convert2prob(initial)
for _, v in second.items():
convert2prob(v)
for _, v in state_transition_matrix.items():
convert2prob(v)
# print for testing
'''
for k, v in bigram.items():
print(k, v)
for k, v in second.items():
print(k, v)
print(initial)
'''
# compose a poem
num_of_lines = 10
new_poem = []
for i in range(num_of_lines):
words = []
s_2 = sample(initial)
words.append(s_2)
s_1 = sample(second[s_2])
words.append(s_1)
# generate the remaining words
while (True):
t = sample(state_transition_matrix[(s_2, s_1)])
if t == 'end':
break;
else:
words.append(t)
s_2 = s_1
s_1 = t
# save line of new poem
new_poem.append(' ' .join(words))
print('\n'.join(new_poem))
|
# Create an empty set literal
showroom = set()
print(type(showroom))
# Add new values to set
showroom.update(['GMC', 'Honda', 'Toyota', 'Ford'])
print(showroom)
# Print the length of set
print(len(showroom))
# Add more cars
showroom.update(['Nissan', 'Lincoln'])
print(showroom)
# Delete a car
showroom.discard('Nissan')
print(showroom)
# Create another set of cars
junkyard = set()
# Add new cars (with some the same as showroom)
junkyard.update(['Nissan', 'Honda', 'Smart', 'Chevy'])
# return an intersection of the two sets
intersect = showroom.intersection(junkyard)
print(intersect)
# Add the junkyard to the showroom
new_showroom = showroom.union(junkyard)
print(new_showroom)
# Discard a car
new_showroom.discard('Smart')
print(new_showroom)
|
# -*- coding: utf-8 -*-
# -*- author: hechao -*-
|
# Given a set of objects with a value V, and a weight W, and having a bag that can carry,
# at most, a weight Max_W, find a way to fill the bag maximizing the value of the objects
# inside, in this version, it is assumed we can take a fraction of each object and carry only
# that, having, of course, its value multiplied by the fraction as well
# The solution is a list of the objects we take and the fraction of each of them
#An object contains [Name, [Weight, Value]]
objects = [["pera", [1, 0.5]], ["patata", [1, 0.5]], ["tomate", [1, 0.5]],
["arena", [5, 5]], ["pescado", [3, 5]]]
# I determine the value per weight unit of each object
weight_per_unit = [a / b for a, b in zip([item[1][1] for item in objects],
[item[1][0] for item in objects])]
#Adding the names
for i in zip(objects, weight_per_unit):
i[0][1].append(i[1])
# Sort the values in descending order
objects = list(reversed(sorted(objects, key = lambda a: a[1][2])))
print(objects)
max_weight = 5
total_weight = 0
selected_objects = []
while total_weight != max_weight and len(objects) != 0:
max_value_object = objects.pop(0)
if total_weight + max_value_object[1][0] < max_weight:
selected_objects.append((max_value_object, 1))
total_weight += max_value_object[1][0]
else:
portion_to_take = (max_weight - total_weight) / max_value_object[1][0]
selected_objects.append((max_value_object, portion_to_take))
total_weight += max_value_object[1][0] * portion_to_take
print(selected_objects)
|
import unittest
from iranlowo import corpus
class TestCoprusLoader(unittest.TestCase):
def setUp(self):
self.owe_loader = corpus.OweLoader
def test_load_owe(self):
with self.assertRaises(NotADirectoryError):
self.owe_loader()
|
#!/usr/bin/env python
from distutils.core import setup, Extension
setup(name='pyGtranslator',
version='0.6',
description='GUI tool for Google translate',
author='Radovan Lozej',
author_email='radovan(dot)lozej(at)gmail(dot)com',
url='http://xrado.hopto.org',
classifiers=[
'Environment :: X11 Applications',
'Intended Audience :: End Users/Desktop',
'License :: GNU General Public License (GPL)',
'Operating System :: Linux',
'Programming Language :: Python',
'Topic :: Accessories'
],
scripts = ['pygtranslator'],
data_files=[
("/usr/doc/pygtranslator", ["README"]),
("/usr/share/pygtranslator", ["pygtranslator.glade"]),
('/usr/share/applications', ['pygtranslator.desktop']),
('/usr/share/pixmaps', ['pygtranslator.png'])
]
)
|
from os import path
import pandas as pd
from glob import glob
from down_util import pr_from_pid
if __name__ == '__main__':
pr_china = r"Z:\yinry\china.mosaic\china.pr.txt"
# pr_china = r"Z:\yinry\global_mosaic\0.def\prwithrange.csv"
check_dir = r'Z:\yinry\china.mosaic\1986\4.rgb'
pr_china = pd.read_csv(pr_china, dtype={'PR': str})
check_list = glob(path.join(check_dir, '*.tif'))
existpr = [str(int(path.basename(i)[:6])) for i in check_list]
# existpr = [pr_from_pid(path.basename(i)) for i in check_list]
existdf = pd.DataFrame(data={'PR': existpr})
misspr = pr_china.loc[~pr_china['PR'].isin(existdf['PR'])]
misspr.to_csv(r'Z:\yinry\china.mosaic\1986\4.rgb\missing1030.csv', index=False)
|
#Face rec using OpenCV
import cv2
import os
import numpy as np
from PIL import Image
from pathlib import Path
###
# For face DETECTION we will use the Haar Cascade provided by OpenCV.
cascade_path = "/Users/jatinsethi/Downloads/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascade_path)
###
# For face RECOGNITION we will the the LBPH Face Recognizer
recognizer = cv2.face.createLBPHFaceRecognizer()
###
images_folder_path = './yale_faces'
images_folder_for_testing = './yale_faces'
Import_savedModel = False
def learning_images_paths(path):
image_paths = [os.path.join(path, f) \
for f in os.listdir(path) if not f.endswith('.sad')]
return image_paths
def image_to_numpy(image_paths):
if not Import_savedModel:
image_paths = learning_images_paths(images_folder_path)
images_numpy = []
for image_path in image_paths:
image_pil = Image.open(image_path).convert('L')
#Find a better way than converting to uint8
image = np.array(image_pil, 'uint8')
images_numpy.append(image)
return images_numpy
def detecting_faces(images_numpy):
if not Import_savedModel:
images_numpy = image_to_numpy()
faces = []
labels = []
for image in images:
face = faceCascade.detectMultiScale(image)
nbr = int(os.path.split(image_path)[1].split(".")[0].replace("subject", ""))
for (x, y, w, h) in face:
faces.append(image[y: y + h, x: x + w])
labels.append(nbr)
return faces, labels
def training(faces, labels):
faces, labels = detecting_faces()
recognizer.train(faces, np.array(labels))
def recognizing_image_paths(path):
image_paths_for_testing = [os.path.join(path, f) \
for f in os.listdir(path) if f.endswith('.sad')]
return image_paths_for_testing
def recognizing(image_paths_for_testing):
image_paths_for_testing = recognizing_image_paths(images_folder_for_testing)
for image_path in image_paths_for_testing:
predict_image_pil = Image.open(image_path).convert('L')
predict_image = np.array(predict_image_pil, 'uint8')
faces = faceCascade.detectMultiScale(predict_image)
|
import paho.mqtt.client as mqtt
import time
from random import random, sample
import json
laumios = set()
addVol = 0
updatedVol = True
musicVOL = 50
selec = -1
selected = set()
isPlaying = False
answer = None
def toVol(v):
return max(0, min(100, v))
def on_message(client, userdata, msg):
global tmin, tmax
global musicVOL, addVol, updatedVol
global selec, answer, isPlaying, lampes, selected
s = str(msg.payload)[2:-1]
if msg.topic == "laumio/status/advertise":
if s != "discover" and (s not in laumios):
laumios.add(s)
elif msg.topic == "remote/playp/state":
if s == "ON":
client.publish("music/control/toggle")
elif msg.topic == "remote/minus/state" or msg.topic == "remote/plus/state":
isPlus = msg.topic.count("plus") == 1
if s == "ON":
client.publish("music/control/getvol")
if isPlus:
tmax = time.time()
else:
tmin = time.time()
updatedVol = False
else:
dt = time.time()- (tmax if isPlus else tmin)
dvol = (1 + max(0, dt - 1) * 3) * (1 if isPlus else -1)
if updatedVol:
client.publish("music/control/setvol", toVol(musicVOL + dvol))
else:
addVol += dvol
elif msg.topic == "music/status":
try:
musicVOL = int(s[8:])
if addVol != 0:
client.publish("music/control/setvol", toVol(musicVOL + addVol))
addVol = 0
updatedVol = True
except:
pass
elif msg.topic == "remote/next/state":
if s == "ON":
client.publish("music/control/next")
elif msg.topic == "remote/prev/state":
if s == "ON":
client.publish("music/control/previous")
elif msg.topic == "remote/mute/state":
if s == "ON":
client.publish("music/control/setvol", 0)
elif isPlaying and msg.topic.startswith("remote/") and msg.topic.endswith("/state"):
if s == "ON":
i = int(msg.topic[7])
if selec == i:
answer = i
elif i not in selected:
if selec != -1:
client.publish("laumio/{}/json".format(lampes[selec]), json.dumps(comBlack))
selec = i
client.publish("laumio/{}/json".format(lampes[i]), json.dumps(comBlue))
else:
print("Not traited:", msg.topic)
client = mqtt.Client()
client.on_message = on_message
client.connect("mpd.lan")
client.subscribe("remote/playp/state")
client.subscribe("remote/minus/state")
client.subscribe("remote/plus/state")
client.subscribe("remote/next/state")
client.subscribe("remote/prev/state")
client.subscribe("remote/mute/state")
client.subscribe("music/status")
client.subscribe("laumio/status/advertise")
client.loop_start()
client.publish("laumio/all/discover")
for i in range(10):
client.subscribe("remote/{}/state".format(i))
time.sleep(1.5)
comBlue = { 'command': 'fill', 'rgb': [0, 0, 255] }
comBlack = { 'command': 'fill', 'rgb': [0, 0, 0] }
comGreen = { 'command': 'fill', 'rgb': [0, 255, 0] }
comRed = { 'command': 'fill', 'rgb': [255, 0, 0] }
client.publish("laumio/all/json", json.dumps(comBlack))
# lampes = sample(laumios, 10)
lampes = ["Laumio_1D9486", "Laumio_104A13", "Laumio_0FBFBF",
"Laumio_104F03", "Laumio_10508F", "Laumio_10805F",
"Laumio_CD0522", "Laumio_0FC168", "Laumio_D454DB", "Laumio_107DA8"]
t = 1.5
n = 2
while True:
ls = sample(list(range(10)), n)
for l in ls:
client.publish("laumio/{}/json".format(lampes[l]), json.dumps(comBlue))
time.sleep(t)
for l in ls:
client.publish("laumio/{}/json".format(lampes[l]), json.dumps(comBlack))
m = 0
selected = set()
isPlaying = True
while m < n:
selec = -1
answer = None
while answer == None:
time.sleep(0.1)
if answer in ls:
selec = -1
selected.add(answer)
client.publish("laumio/{}/json".format(lampes[answer]), json.dumps(comGreen))
m += 1
else:
client.publish("laumio/{}/json".format(lampes[answer]), json.dumps(comRed))
break
isPlaying = False
time.sleep(0.5)
if m == n:
client.publish("laumio/all/json", json.dumps(comGreen))
t *= 0.9
if t < 0.6:
n = 5
elif t < 0.9:
n = 4
elif t < 1.2:
n = 3
else:
client.publish("laumio/all/json", json.dumps(comRed))
time.sleep(1)
client.publish("laumio/all/json", json.dumps(comBlack)) |
# -*- coding: utf-8 -*-
"""
@author: Aayush Chaube
"""
from tkinter import *
from tkinter import messagebox
import re, pymysql
from PIL import *
def adjustWindow(window):
w = 600 # Width for the window size
h = 600 # Height for the window size
ws = screen.winfo_screenwidth() # Width of the screen
hs = screen.winfo_screenheight() # Height of the screen
x = (ws/2)-(w/2) # Calculate x and y coordinates for the Tk window
y = (hs/2)-(h/2)
window.geometry('%dx%d+%d+%d' %(w, h, x, y)) # Set the dimension of the screen and where it is placed
window.resizable(True, True) # Disabling the resize option for the window
window.configure(background = 'white') # Making the background white of the window
def enter_new_record(entryField, semester):
found = 0
for student in entryField:
for field in student:
if(field.get() == ""): # validating all fields entered or not
found = 1
break
if found == 0:
if semester.get() == '--0--':
messagebox.showerror("Error", "Please select your current semester", parent=screen4) # displaying message for invalid details
else:
# enter new record
connection = pymysql.connect(host="localhost", user="root", passwd="", database="edumate") # database connection
cursor = connection.cursor()
for fields in entryField:
insert_query = "INSERT INTO student_records (subject_name, marks_scored, out_off, credit_point, semester, student_id) VALUES('"+ fields[0].get() + "', "+ str(fields[1].get()) + ", "+ str(fields[2].get()) + ", "+ str(fields[3].get()) + ", "+ str(semester.get()) + ", "+ str(studentID) + ");" # queries for inserting values
cursor.execute(insert_query) # executing the queries
connection.commit() # commiting the connection then closing it.
connection.close() # closing the connection of the database
messagebox.showinfo("Congratulation", "Entry Succesfull",parent=screen2) # displaying message for successful entry
screen4.destroy()
else:
messagebox.showerror("Error", "Please fill all the details", parent=screen4) # displaying message for invalid details
def student_new_record():
global screen4
semester = StringVar()
entryField = list()
screen4 = Toplevel(screen)
screen4.title("New Record")
adjustWindow(screen4) # configuring the window
Label(screen4, text="Enter New Record", width='31', height="2", font=("Calibri", 22, 'bold'), fg='white', bg='#d9660a').grid(row=0, sticky=W, columnspan=4)
Label(screen4, text="", bg='#174873', width='60', height='18').place(x=0, y=127)
Label(screen4, text="", bg='white').grid(row=1,column=0)
Label(screen4, text="Subject Name", font=("Open Sans", 12, 'bold'), fg='white', bg='#174873').grid(row=2,column=0, pady=(5,10))
Label(screen4, text="Your Marks", font=("Open Sans", 12, 'bold'), fg='white', bg='#174873').grid(row=2,column=1, pady=(5,10))
Label(screen4, text="Out of", font=("Open Sans", 12, 'bold'), fg='white', bg='#174873').grid(row=2,column=2, pady=(5,10))
Label(screen4, text="Credits Points", font=("Open Sans", 12, 'bold'), fg='white', bg='#174873').grid(row=2,column=3, pady=(5,10))
rowNo = 3
for i in range(6): # this loop will generate all input field for taking input from the user
temp = list()
for j in range(4):
e = Entry(screen4, width=14)
e.grid(row=rowNo,column=j, padx=(3,0), pady=(0,25))
temp.append(e)
entryField.append(temp)
rowNo += 2
Label(screen4, text="Select Sem:", font=("Open Sans", 12, 'bold'), fg='white', bg='#174873').grid(row=rowNo,column=0, pady=(15,0))
list1 = ['1','2','3','4','5','6','7','8']
droplist = OptionMenu(screen4, semester, *list1)
semester.set('--0--')
droplist.config(width=5)
droplist.grid(row=rowNo, column=1, pady=(15,0))
Button(screen4, text='Submit', width=20, font=("Open Sans", 13, 'bold'), bg='brown', fg='white', command=lambda: enter_new_record(entryField, semester)).grid(row=rowNo,columnspan=2,column=2, pady=(15,0))
def fetch_record(semester):
if semester == '--0--':
messagebox.showerror("Error", "Please select proper semester", parent=screen4) # displaying message for invalid details
else:
connection = pymysql.connect(host="localhost", user="root", passwd="", database="edumate") # database connection
cursor = connection.cursor()
select_query = "SELECT subject_name, marks_scored, out_off, credit_point FROM student_records where semester = " + str(semester.get()) + " AND student_id = " + str(studentID) + ";" # queries for retrieving values
cursor.execute(select_query) # executing the queries
student_record = cursor.fetchall()
connection.commit() # commiting the connection then closing it.
connection.close() # closing the connection of the database
if len(student_record) > 0:
for i in range(len(student_record)): # this loop will display the information to the user
for j in range(4):
Label(screen3, text=student_record[i][j], font=("Open Sans", 11, 'bold'), fg='white', bg='#174873').grid(row=i+4,column=j, pady=(5,10))
output = list() # calculation of cgpa starts from here
for record in student_record:
temp = list()
per = (record[1]/record[2]) * 100.0
per
if per >= 80:
temp.append(10)
temp.append(record[3])
output.append(temp)
elif per >= 75 and per < 80:
temp.append(9)
temp.append(record[3])
output.append(temp)
elif per >= 70 and per < 75:
temp.append(8)
temp.append(record[3])
output.append(temp)
elif per >= 60 and per < 70:
temp.append(7)
temp.append(record[3])
output.append(temp)
elif per >= 50 and per < 60:
temp.append(6)
temp.append(record[3])
output.append(temp)
elif per >= 45 and per < 50:
temp.append(5)
temp.append(record[3])
output.append(temp)
elif per >= 40 and per < 45:
temp.append(4)
temp.append(record[3])
output.append(temp)
else:
temp.append(0)
temp.append(record[3])
output.append(temp)
credits_earned = total_credit_points = 0
for result in output:
credits_earned += result[0] * result[1]
total_credit_points += result[1]
cgpa = credits_earned/total_credit_points
percentage = 7.1 * cgpa + 11 # cgpa calculation ends over here
Label(screen3, text="Your CGPI", font=("Open Sans", 12, 'bold'), fg='white', bg='#174873').grid(row=10,column=0, pady=(15,10))
Label(screen3, text=cgpa, font=("Open Sans", 12, 'bold'), fg='white', bg='#174873').grid(row=10,column=1, pady=(15,10))
Label(screen3, text="Percentage", font=("Open Sans", 12, 'bold'), fg='white', bg='#174873').grid(row=10,column=2, pady=(15,10))
Label(screen3, text=percentage, font=("Open Sans", 12, 'bold'), fg='white', bg='#174873').grid(row=10,column=3, pady=(15,10))
else:
messagebox.showerror("Error", "Entry not found", parent=screen3) # displaying message for invalid semester details
def student_records():
global screen3
semester = StringVar()
screen3 = Toplevel(screen)
screen3.title("Student Records")
adjustWindow(screen3) # configuring the window
Label(screen3, text="Your Record", width='31', height="2", font=("Calibri", 22, 'bold'), fg='white', bg='#d9660a').grid(row=0, sticky=W, columnspan=4)
Label(screen3, text="", bg='#174873', width='60', height='18').place(x=0, y=127)
Label(screen3, text="", bg='white').grid(row=1,column=0)
Label(screen3, text="Select Sem:", font=("Open Sans", 12, 'bold'), fg='white', bg='#174873').grid(row=2,column=0, pady=(5,0))
list1 = ['1','2','3','4','5','6','7','8']
droplist = OptionMenu(screen3, semester, *list1, command=lambda x: fetch_record(semester))
semester.set('--0--')
droplist.config(width=5)
droplist.grid(row=2, column=1, pady=(5,0))
Label(screen3, text="Subject Name", font=("Open Sans", 12, 'bold'), fg='white', bg='#174873').grid(row=3,column=0, pady=(15,10))
Label(screen3, text="Your Marks", font=("Open Sans", 12, 'bold'), fg='white', bg='#174873').grid(row=3,column=1, pady=(15,10))
Label(screen3, text="Out of", font=("Open Sans", 12, 'bold'), fg='white', bg='#174873').grid(row=3,column=2, pady=(15,10))
Label(screen3, text="Credits Points", font=("Open Sans", 12, 'bold'), fg='white', bg='#174873').grid(row=3,column=3, pady=(15,10))
def welcome_page(student_info):
global screen2
screen2 = Toplevel(screen)
screen2.title("Welcome")
adjustWindow(screen2) # configuring the window
Label(screen2, text="Welcome " + student_info[0][1], width='32', height="2", font=("Calibri", 22, 'bold'), fg='white', bg='#d9660a').place(x=0, y=0)
Label(screen2, text="", bg='#174873', width='20', height='20').place(x=0, y=96)
Message(screen2, text='" Some people dream of accomplishing great things. Others stay awake and make it happen. "\n\n - By Some Night Owl', width='180', font=("Helvetica", 10, 'bold', 'italic'), fg='white', bg='#174873', anchor = CENTER).place(x=10, y=100)
photo = PhotoImage(file="hires.png") # opening left side image - Note: If image is in same folder then no need to mention the full path
label = Label(screen2, image=photo, text="") # attaching image to the label
label.place(x=10, y=270)
label.image = photo # it is necessary in Tkinter to keep a instance of image to display image in label
photo1 = PhotoImage(file="Slide1.1.PNG") # opening right side image - Note: If image is in same folder then no need to mention the full path
label1 = Label(screen2, image=photo1, text="") # attaching image to the label
label1.place(x=200, y=96)
label1.image = photo1 # it is necessary in Tkinter to keep a instance of image to display image in label
Button(screen2, text='Enter your grades', width=20, font=("Open Sans", 13, 'bold'), bg='brown', fg='white', command=student_new_record).place(x=270, y=250)
Button(screen2, text='Check your result', width=20, font=("Open Sans", 13, 'bold'), bg='brown', fg='white', command=student_records).place(x=270, y=350)
def register_user():
if fullname.get() and email.get() and password.get() and repassword.get() and gender.get(): # checking for all empty values in entry field
if university.get() == "--select your university--": # checking for selection of university
Label(screen1, text = "Please select your university", fg = "red", font = ("calibri", 11), width = '30', anchor = W, bg = 'white').place(x = 0, y = 570)
return
else:
if tnc.get(): # checking for acceptance of agreement
if re.match("^.+@(\[?)[a-zA-Z0-9-.]+.([a-zA-Z]{2,3}|[0-9]{1,3})(]?)$", email.get()): # validating the email
if password.get() == repassword.get(): # checking both password match or not if u enter in this block everything is fine just enter the values in database
gender_value = 'male'
if gender.get() == 2:
gender_value = 'female'
connection = pymysql.connect(host = "localhost", user = "root", passwd = "", database = "edumate") # database connection
cursor = connection.cursor()
insert_query = "INSERT INTO student_details (fullname, email, password, gender, university) VALUES('"+ fullname.get() + "', '"+ email.get() + "', '"+ password.get() + "', '"+ gender_value + "', '"+ university.get() + "' );" # queries for inserting values
cursor.execute(insert_query) # executing the queries
connection.commit() # commiting the connection then closing it.
connection.close() # closing the connection of the database
Label(screen1, text = "Registration Sucess", fg = "green", font = ("calibri", 11), width = '30', anchor = W, bg = 'white').place(x = 0, y = 570) #printing successful registration message
Button(screen1, text = 'Proceed to Login ->', width = 20, font = ("Open Sans", 9, 'bold'), bg = 'brown', fg = 'white',command = screen1.destroy).place(x = 170, y = 565) # button to navigate back to login page
else:
Label(screen1, text = "Password does not match", fg = "red", font = ("calibri", 11), width = '30', anchor = W, bg = 'white').place(x = 0, y = 570)
return
else:
Label(screen1, text = "Please enter valid email id", fg = "red", font = ("calibri", 11), width = '30', anchor = W, bg = 'white').place(x = 0, y = 570)
return
else:
Label(screen1, text = "Please accept the agreement", fg = "red", font = ("calibri", 11), width = '30', anchor = W, bg = 'white').place(x = 0, y = 570)
return
else:
Label(screen1, text = "Please fill all the details", fg = "red", font = ("calibri", 11), width = '30', anchor = W, bg = 'white').place(x = 0, y = 570)
return
def register():
global screen1, fullname, email, password, repassword, university, gender, tnc # making all entry field variable global
fullname = StringVar()
email = StringVar()
password = StringVar()
repassword = StringVar()
university = StringVar()
gender = IntVar()
tnc = IntVar()
screen1 = Toplevel(screen)
screen1.title("Registeration")
adjustWindow(screen1) # configuring the window
Label(screen1, text = "Registration Form", width = '32', height = "2", font = ("Calibri", 22, 'bold'), fg = 'white', bg = '#d9660a').place(x = 0, y = 0)
Label(screen1, text = "", bg = '#174873', width = '50', height = '17').place(x = 45, y = 120)
Label(screen1, text = "Full Name:", font = ("Open Sans", 11, 'bold'), fg = 'white', bg = '#174873', anchor = W).place(x = 150, y = 160)
Entry(screen1, textvar = fullname).place(x = 300, y = 160)
Label(screen1, text = "Email ID:", font = ("Open Sans", 11, 'bold'), fg = 'white', bg = '#174873', anchor = W).place(x = 150, y = 210)
Entry(screen1, textvar = email).place(x = 300, y = 210)
Label(screen1, text = "Gender:", font = ("Open Sans", 11, 'bold'), fg = 'white', bg = '#174873', anchor = W).place(x = 150, y = 260)
Radiobutton(screen1, text = "Male", variable = gender, value = 1, bg = '#174873').place(x = 300, y = 260)
Radiobutton(screen1, text = "Female", variable = gender, value = 2,bg = '#174873').place(x = 370, y = 260)
Label(screen1, text = "University:", font = ("Open Sans", 11, 'bold'), fg = 'white', bg = '#174873', anchor = W).place(x = 150, y = 310)
list1 = ['Mumbai University', 'Savitribai Phule Pune Univeristy', 'Gujarat Technological University', 'JNTU Kakinada', 'University of Delhi', 'Anna University']
droplist = OptionMenu(screen1, university, *list1)
droplist.config(width = 17)
university.set('--select your university--')
droplist.place(x = 300, y = 305)
Label(screen1, text = "Password:", font = ("Open Sans", 11, 'bold'), fg = 'white', bg = '#174873', anchor = W).place(x = 150, y = 360)
Entry(screen1, textvar = password, show = "*").place(x = 300, y = 360)
Label(screen1, text = "Re-Password:", font = ("Open Sans", 11, 'bold'), fg = 'white', bg = '#174873', anchor = W).place(x = 150, y = 410)
entry_4 = Entry(screen1, textvar = repassword, show = "*")
entry_4.place(x = 300, y = 410)
Checkbutton(screen1, text = "I accept all terms and conditions", variable = tnc, bg = '#174873', font = ("Open Sans", 9, 'bold'), fg = 'brown').place(x = 175, y = 450)
Button(screen1, text = 'Submit', width = 20, font = ("Open Sans", 13, 'bold'), bg = 'brown', fg = 'white', command = register_user).place(x = 170, y = 490)
def login_verify():
global studentID
connection = pymysql.connect(host="localhost", user="root", passwd="", database="edumate") # database connection
cursor = connection.cursor()
select_query = "SELECT * FROM student_details where email = '" + username_verify.get() + "' AND password = '" + password_verify.get() + "';" # queries for retrieving values
cursor.execute(select_query) # executing the queries
student_info = cursor.fetchall()
connection.commit() # commiting the connection then closing it.
connection.close() # closing the connection of the database
if student_info:
messagebox.showinfo("Congratulation", "Login Succesfull") # displaying message for successful login
studentID = student_info[0][0]
welcome_page(student_info) # opening welcome window
else:
messagebox.showerror("Error", "Invalid Username or Password") # displaying message for invalid details
def main_screen():
global screen, username_verify, password_verify
screen=Tk()
username_verify=StringVar()
password_verify=StringVar()
screen.title("EDUMATE")
adjustWindow(screen)
Label(screen, text="EDUMATE - Student Manager", width="500", height="2", font=("Calibri", 22, 'bold'), fg='white', bg='#d9660a').pack()
Label(screen, text="", bg='white').pack()
Label(screen, text="", bg='#174873',width='50', height='17').place(x=45, y=120) # bluebackground in middle of window
Label(screen, text="Please enter details below to login", bg='#174873', fg='white').pack()
Label(screen, text="", bg='#174873').pack() # for leaving a space in between
Label(screen, text="Username * ", font=("Open Sans", 10, 'bold'), bg='#174873', fg='white').pack()
Entry(screen, textvar=username_verify).pack()
Label(screen, text="", bg='#174873').pack() # for leaving a space in between
Label(screen, text="Password * ", font=("Open Sans", 10, 'bold'), bg='#174873', fg='white').pack()
Entry(screen, textvar=password_verify, show="*").pack()
Label(screen, text="", bg='#174873').pack() # for leaving a space in between
Button(screen, text="LOGIN", bg="#e79700", width=15, height=1, font=("Open Sans", 13, 'bold'), fg='white', command=login_verify).pack()
Label(screen, text="", bg='#174873').pack() # for leaving a space in between
Button(screen, text="New User? Register Here", height="2", width="30", bg='#e79700', font=("Open Sans", 10, 'bold'), fg='white', command=register).pack()
screen.mainloop()
main_screen()
|
"""A client for the CONSTELLATION external scripting API."""
import pandas as pd
# Add the directory containing the internal file to the import path.
#
cc_path = '../../../../../../../../../../../CoreUtilities/src/au/gov/asd/tac/constellation/utilities/webserver'
import sys
sys.path.append(cc_path)
import constellation_client
def _test_get(cc):
#data, types = get_dataframe(keys='source.[id],source.icon,destination.[id],destination.icon,transaction.DateTime,transaction.Count')
data = cc.get_dataframe(selected=True)
if data is not None:
print(data)
print(data.columns)
print(data.dtypes)
print(cc.types)
def _test_post():
import datetime
columns = ['source.Name', 'destination.Name', 'transaction.Type', 'transaction.DateTime']
data = [
['1.2.3.4<IP Address>', '5.6.7.8<IP Address>', 'Online Location', datetime.datetime(2016, 1, 1, 2, 3, 4, 0)],
['4.3.2.1<IP Address>', '9.8.7.5<IP Address>', 'Online Location', datetime.datetime(2016, 1, 1, 3, 4, 5, 0)]
]
df = pd.DataFrame(columns=columns, data=data)
cc.put_dataframe(df, arrange=False)
cc.run_plugin('ArrangeInTrees')
cc.run_plugin('ResetView')
cc.run_plugin('SelectAll')
cc.run_plugin('DeleteSelection')
if __name__=='__main__':
cc = constellation_client.Constellation()
id = cc.new_graph()
print('Created new graph {}'.format(id))
_test_post(cc)
|
from landscapesim.async import tasks |
print("sum of list")
def sum_list(L):
if len(L) == 1:
return L[0]
else:
return L[0] + sum_list(L[1:])
L = [2, 2, 2, 2, 2]
print(sum_list(L))
print("harmonic series")
def harmonic_sum(n):
if n == 1:
return 1
else:
return 1 / n + harmonic_sum(n - 1)
n = int(input("n = "))
print(harmonic_sum(n))
print("call function")
def func(x):
return x * (x + 1) % 11
x = 1
L = []
a = func(x)
a1 = func(func(x))
a2 = func(func(func(x)))
L.append(a)
L.append(a1)
L.append(a2)
print(L)
def fun(a, n):
if n == 1:
return a[0]
else:
x = fun(a, n - 1)
if (x > a[n - 1]):
return x
else:
return a[n - 1]
arr = [12, 10, 30, 50, 100]
print(fun(arr, 5))
def decreasing(n):
if n == 0 or n < 0:
print(n, end=", ")
return
else:
print(n, end=", ")
decreasing(n - 5)
print(n, end=", ")
decreasing(16)
print("grid problem by recurssion")
def grid_path(n, m):
if n == 1 or m == 1:
return 1
else:
return grid_path(n - 1, m) + grid_path(n, m - 1)
n = int(input("n = "))
m = int(input("m = "))
print(grid_path(n, m))
print("secret Origins")
print("General Onoroy Function")
def onoroy_value(num):
bin_num = bin(num)
count = 0
for i in bin_num:
if i == '1':
count = count + 1
return count
num = int(input("num = "))
print("Onoroy Value = ", onoroy_value(num))
a = onoroy_value(num)
while (True):
if onoroy_value(num + 1) == a:
break
else:
num = num + 1
print(num + 1)
print(onoroy_value(num + 1))
print("Number recurssion")
def func(n):
if n == 1:
return 0
else:
return 1 + func(n // 2)
n = int(input("n = "))
print(func(n))
print("String reverse by recurrsion")
def reverse(str):
if len(str) == 0:
return str
else:
return reverse(str[1:]) + str[0]
str = 'abcde'
print(reverse(str))
print("Palindrome check")
def reverse(s):
if len(s) == 0:
return s
else:
return reverse(s[1:]) + s[0]
s = str(input())
# print(reverse(s))
if reverse(s) == s:
print("yes")
else:
print("No")
print("Write a recursive function that, given a number n, returns the sum of the digits of the number")
def sum(n):
if n == 0:
return 0
else:
return n % 10 + sum(int(n / 10))
n = 123
print(sum(n))
print("Subsequence of string or not")
def stringtest(s1, s2, L):
for i in s1:
for j in s2:
if i == j:
L.append(i)
return ''.join(L)
s1 = 'hac'
s2 = 'cathartic'
L = []
# print(stringtest(s1, s2, L))
s = stringtest(s1, s2, L) and s1
if len(s) == 0:
print("no")
else:
print("yes")
# print(s)
# Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'
print("Leonardo Number")
def leonum(n):
if n == 0 or n == 1:
return 1
else:
return leonum(n - 1) + leonum(n - 2) + 1
n = int(input("n = "))
print(leonum(n))
print("Maximum elementin list with recurrsion")
def fun(a, n):
if (n == 1):
return a[0]
else:
x = fun(a, n - 1)
if (x > a[n - 1]):
return x
else:
return a[n - 1]
# Driver code
arr = [12, 10, 30, 200, 50, 100]
print(fun(arr, 5))
print("McCarthy 91 function.")
def func(n):
if n>100:
return n-10
else:
return func(func(n+11))
n = int(input("n = "))
print(func(n))
|
age =30
inputage = int(input("guess_age:"))
if(age == inputage):
print("congratulations you")
elif(age > inputage):
print("Think big")
else:
print("Think small") |
import json
import matplotlib.pyplot as plot
import csv
import os
import argparse
import pandas as pd
import numpy as np
from itertools import combinations
folder = '/cmsnfsbrildata/brildata/vdmoutput/AutomationBackgroundCorrection/Analysed_Data/'
scanpair = '/cmsnfsbrildata/brildata/vdmoutput/AutomationBackgroundCorrection/Analysed_Data/6016_28Jul17_100134_28Jul17_102201/'
lumis = ['PLT', 'HFET']
lumi1bg = 2.3396306874090556e-06 #should be a dictionary with all backgrounds
def loadData(scanpair,lumis):
rates = {}
for lumi in lumis:
rates[lumi] = json.load(open(scanpair + '/LuminometerData/Rates_' + lumi + '_6016.json'))
for nlumi1,nlumi2 in combinations(lumis,2):
lumi1 = rates[nlumi1]
lumi2 = rates[nlumi2]
ratios = {bx:[] for bx in lumi1['Scan_1'][0]['Rates'].keys()}
ratios[nlumi1+'Rates'] = []
for scan in lumi1.keys():
if 'Scan' not in scan: continue
for step1,step2 in zip(lumi1[scan],lumi2[scan]):
ratios[nlumi1+'Rates'].append(np.mean(step1['Rates'].values())-lumi1bg)
for bx in step1['Rates'].keys():
ratios[bx].append(0 if not step2['Rates'][bx] else (step1['Rates'][bx]-lumi1bg)/step2['Rates'][bx])
json.dump(ratios, open(nlumi1 + nlumi2 + '_RRratiosBG.json','w'))
pd.DataFrame.from_dict(ratios).to_csv(nlumi1 + nlumi2 + '_RRratiosBG.csv')
loadData(scanpair,lumis)
# def plot(ratios):
# for ratio in ratios: |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/3/31 21:44
# @Author : Yunhao Cao
# @File : __init__.py
__author__ = 'Yunhao Cao'
__all__ = [
]
def _test():
pass
def _main():
pass
if __name__ == '__main__':
_main()
|
from django.db import models
class MostRecent(models.Model):
Images = models.ImageField(default="default.jpg",upload_to='pictures')
foodname = models.CharField(max_length=200)
prize = models.CharField(max_length=200)
class Feedback(models.Model):
name = models.CharField(max_length = 100)
feed = models.CharField(max_length = 200) |
#!/usr/bin/env python3
import sys
def main(filename):
with open(filename) as rd:
data = rd.readlines()
t = 0
t2 = 0
for line in data:
t += evaluate(line)
t2 += evaluate(line, True)
print("1: ", t)
print("2: ", t2)
def evaluate(fmath, sep=False):
newf = fmath
bracketies = []
while '(' in newf:
for i, c in enumerate(newf):
if c == '(':
bracketies.append(i)
elif c == ')':
o = bracketies.pop()
sub = newf[o:i+1]
bracketies.clear()
newf = newf.replace(sub, str(evaluate(sub[1:-1], sep)))
break
res = 0
steps = newf.split(" ")
if sep:
# Now that it's flat, solve + first, then the *. Something with part 2
for pos, x in [(idx, y) for idx, y in enumerate(steps) if y == '+'][::-1]: # noqa
tmp = int(steps[pos-1]) + int(steps[pos+1])
steps = steps[:pos-1] + [str(tmp)] + steps[pos+2:]
res = 1
for x in (int(x) for x in steps if x != '*'):
res *= x
else:
for pos, x in ((idx, y) for idx, y in enumerate(steps)
if y in ('+', '*')):
c = int(steps[pos-1])
if pos != 1:
c = res
if x == '+':
res = c + int(steps[pos+1])
elif x == '*':
res = c * int(steps[pos+1])
return res
if __name__ == "__main__":
if len(sys.argv) < 2:
print("nee")
sys.exit(1)
assert evaluate("((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2", True) == 23340 # noqa
assert evaluate("2 * 3 + (4 * 5)", True) == 46
assert evaluate("((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2") == 13632
assert evaluate("5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4))") == 12240
assert evaluate("5 + (8 * 3 + 9 + 3 * 4 * 3)") == 437
assert evaluate("2 * 3 + (4 * 5)") == 26
main(sys.argv[1])
|
from . import palette_png
|
import discord
import asyncio
import logging
from discord.ext import commands
from msgstats import GuildStatistics
import config
STATS_FOLDER = 'DiscordStats'
class StatsCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.scraped_messages = 0
@commands.Cog.listener()
async def on_ready(self):
self.bot.guildstats = GuildStatistics(self.bot.get_guild(config.primary_guild))
async for message in self.bot.get_channel(self.bot.guildstats.chanlist[0].id).history(limit=config.limit).filter(lambda msg: not msg.author.bot):
self.bot.guildstats.feed(message)
self.scraped_messages += 1
self.bot.guildstats.json_upload()
def setup(bot):
bot.add_cog(StatsCog(bot))
|
from transforms import *
from vector import *
from tkColorChooser import *
##
# Rotate object in center of screen by offset amount along x axis.
# Translates to origin and back to ensure object looks as if it rotates around its own origin
##
def rotate_x(val):
global scene, origin, invorigin
val = float(val)
offset = val - scene.x_rot
scene.x_rot = val
snap_to_origin()
scene.update_transform(Transform.x_rotate(offset))
snap_to_position()
##
# Rotate object in center of screen by offset amount along y axis.
# Translates to origin and back to ensure object looks as if it rotates around its own origin
##
def rotate_y(val):
global scene, origin, invorigin
val = float(val)
offset = val - scene.y_rot
scene.y_rot = val
snap_to_origin()
scene.update_transform(Transform.y_rotate(offset))
snap_to_position()
##
# Rotate object in center of screen by offset amount along z axis.
# Translates to origin and back to ensure object looks as if it rotates around its own origin
##
def rotate_z(val):
global scene, origin, invorigin
val = float(val)
offset = val - scene.z_rot
scene.z_rot = val
snap_to_origin()
scene.update_transform(Transform.z_rotate(offset))
snap_to_position()
##
# Tell scene to toggle between polygon and interpolated mode
##
def switch_render_mode():
global scene
scene.switch_render_mode()
##
# Sets the current ambient light level
##
def set_ambient_light(value):
global canvas, scene
value = float(value) + 0.29
canvas.ambient_intens = Vector3D(value, value, value)
scene.changed = True
##
# Sets the current directed light strength
##
def set_light_strength(value):
global canvas, scene
value = float(value) + 0.69
canvas.light_intens = value
scene.changed = True
##
# Adjusts the directed light color
##
def choose_light_col():
global canvas
col = Vector3D(*askcolor()[0])
canvas.specular_color = col
scene.changed = True
##
# Adjusts the ambient light color.
##
def choose_amb_col():
global canvas
col = Vector3D(*askcolor()[0])
canvas.ambient_intens = col / 255
scene.changed = True
##
# Adjusts object shinyness
##
def set_shinyness(value):
global canvas, scene
value = float(value) + 52
canvas.shinyness = value
scene.changed = True
##
# Toggles wireframe mode.
##
def switch_wireframe():
global scene
scene.switch_wireframe_mode()
##
# Toggles perspective mode
##
def switch_perspective():
global scene
scene.switch_perspective_mode()
##
# Toggles gourad shading
##
def toggle_gshading():
global scene
scene.switch_shading_mode()
##
# toggles toon shading
##
def toggle_tshading():
global scene
scene.switch_toon_shading()
##
# Toggles specular shading
##
def toggle_specular():
global scene
scene.switch_specular()
##
# Toggles interpolated coloring
##
def toggle_gcolor():
global scene
scene.switch_color_mode()
##
# toggles guidelines
##
def toggle_guides():
global scene
scene.draw_guides = not scene.draw_guides
scene.changed = True
##
# Generates hi-res image of current scene
##
def generate_image():
global scene
scene.save_image()
##
# Snaps scene to origin (0, 0)
##
def snap_to_origin():
scene.update_transform(Transform.translation(scene.origin))
##
# Returns object to its original offset position.
##
def snap_to_position():
scene.update_transform(Transform.translation(scene.invorigin))
##
# Moves scene along the x-axis
##
def mov_x(val):
global scene, origin, invorigin
val = float(val)
offset = float(val - scene.x_mov)
scene.x_mov = val
scene.update_transform(
Transform.translation(Vector3D(offset, 0, 0))
)
##
# Moves scene along the y-axis
##
def mov_y(val):
global scene, origin, invorigin
val = float(val)
offset = float(val - scene.y_mov)
scene.y_mov = val
scene.update_transform(
Transform.translation(Vector3D(0, offset, 0))
)
##
# Moves scene along the z-axis
##
def mov_z(val):
global scene, origin, invorigin
val = float(val)
offset = float(val - scene.z_mov)
scene.z_mov = val
scene.update_transform(
Transform.translation(Vector3D(0, 0, offset))
)
|
import game_framework
import logo_state
from pico2d import *
open_canvas()
game_framework.run(logo_state)
close_canvas()
|
if __name__ == "filters":
pass
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 8 12:41:42 2013
@author: bejar
"""
import scipy.io
from numpy import mean, std
import matplotlib.pyplot as plt
from pylab import *
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
from sklearn.svm import SVC
from sklearn.cross_validation import cross_val_score
from scipy import corrcoef
from sklearn.decomposition import PCA,KernelPCA
def correlationMatrix(mdata,linit,lend,nstep):
lstep=(lend-linit)/nstep
corr=np.zeros((mdata.shape[0],mdata.shape[0]))
liter= [linit+(i*lstep) for i in range(nstep)]
print liter, len(liter),lend
zz=0
for length in liter:
corrs=corrcoef(mdata[:,length:length+lstep])
corr+=corrs
zz+=1
print length, length+lstep,
print zz
corr/=nstep
return corr
cpath='/home/bejar/MEG/Data/'
#cres='/home/bejar/Documentos/Investigacion/MAG/res/'
cres='/home/bejar/Copy/MEG/Correlation/'
lnames=['control1-MEG','control2-MEG','control3-MEG','control4-MEG','control5-MEG','control6-MEG','control7-MEG'
,'comp1-MEG','comp3-MEG','comp4-MEG' ,'comp5-MEG','comp6-MEG','comp7-MEG','comp13-MEG'
,'descomp1-MEG','descomp3-MEG','descomp4-MEG','descomp5-MEG','descomp6-MEG','descomp7-MEG'
,'control1-MMN','control2-MMN','control3-MMN','control4-MMN','control5-MMN','control6-MMN','control7-MMN'
,'comp1-MMN','comp3-MMN','comp4-MMN' ,'comp5-MMN','comp6-MMN','comp7-MMN','comp13-MMN'
,'descomp1-MMN','descomp3-MMN','descomp4-MMN','descomp5-MMN','descomp6-MMN','descomp7-MMN']
lcol=[0,0,0,0,0,0,0,1,1,1,1,1,1,1,2,2,2,2,2,2,0,0,0,0,0,0,0,1,1,1,1,1,1,1,2,2,2,2,2,2]
#lnames=['comp10-MEG','comp12-MEG','comp10-MMN','comp12-MMN']
#lcol=[1,1,1,1]
#lband=['alpha','beta','gamma-l','gamma-h','theta','delta','gamma-h']
lband=['gamma-h']
badchannels=['A53','A31','A94']
for band in lband:
examps=None
print band,
for name in lnames:
print name,
mats=scipy.io.loadmat( cpath+band+'/'+name+'-'+band+'.mat')
# mats=scipy.io.loadmat( cpath+'/'+name+'.mat')
data= mats['data']
chann= mats['names']
j=0
mdata=None
lsnames=[]
for i in range(chann.shape[0]):
# cname=chann[i][0][0]
cname=chann[i]
if cname[0]=='A' and not cname in badchannels:
j+=1
lsnames.append(cname)
if mdata==None:
mdata=data[i]
else:
mdata=np.vstack((mdata,data[i]))
else:
print cname,i
# print lsnames
cmatrix=correlationMatrix(mdata,0,mdata.shape[1],10)
#print j,mdata.shape
examp=np.zeros((j*(j-1)/2))
p=0
for i in range(cmatrix.shape[0]-1):
for j in range(i+1,cmatrix.shape[0]):
#if np.isnan(corr[i,j]) or corr[i,j]<0.7:
examp[p]=cmatrix[i,j]
# if p in [1, 5367, 5353, 5668, 4971, 9634, 7867, 3366, 7278, 604, 6881, 2217, 8349, 9401, 5708, 9590, 7460, 4519, 664]:
# print lsnames[i],lsnames[j]
p+=1
if examps==None:
examps=examp
else:
examps=np.vstack((examps,examp))
X=examps
Y=np.array(lcol)
patdata={}
patdata['data']=X
patdata['classes']=Y
scipy.io.savemat(cres+'patcorr-'+band,patdata,do_compression=True)
# scipy.io.savemat(cres+'patcorr-new',patdata,do_compression=True)
print
|
"""Случайное блуждание"""
from random import choice
class RandomWalk:
"""Класс для генерирования случайных блужданий"""
def __init__(self, num_points=5000):
"""Инициализирует атрибуты блуждания"""
self.num_points = num_points
# Все блуждания начинаются с точки(0, 0).
self.x_values = [0]
self.y_values = [0]
def get_step(self):
while True:
direction = choice([1, -1])
distance = choice([0, 1, 2, 3, 4])
step = direction * distance
# Отклонение нулевых перемещений
if step != 0:
return step
def fill_walk(self):
"""Вычисляет все точки блуждания."""
# Шаги генерируются до достижения нужной длинны
while len(self.x_values) < self.num_points:
# Определение направления и длины передвижения
x_step = self.get_step()
y_step = self.get_step()
# Вычисление следующих значений x и y
next_x = self.x_values[-1] + x_step
next_y = self.y_values[-1] + y_step
self.x_values.append(next_x)
self.y_values.append(next_y) |
import numpy as np
import pandas as pd
def IsSpell(arr):
spell = 0
if np.all(arr == arr[0], axis = 0):
spell = 1
return spell
def Merge2RainAverage(arr, spell_num):
'''
takes in 1) numpy array of size (years, days) AND 2) the number of days to create a spell,\
and returns a new array of shape (x,y) with each cell being the average of the spell length
'''
rain_arr = np.zeros((arr.shape[0], arr.shape[1] - spell_num + 1), dtype = float)
for i in range(0, arr.shape[0]):
for j in range(0, arr.shape[1] - spell_num + 1):
temp = arr[i, j:j + spell_num]
val = temp.mean()
rain_arr[i, j] = val
return rain_arr
def main():
classes_raw = np.loadtxt("C:/Users/user/Documents/Personal/Research/MachineLearningClimate19/original_dataset/normalized_daily_JJAS_rainfall_central_India_1948_2014.csv", delimiter = ",")
print(classes_raw.shape)
spell_num = 3
test = Merge2RainAverage(classes_raw, spell_num)
print(test.shape, test)
np.savetxt("C:/Users/user/Desktop/RainAvgOutputFile.csv", test, fmt='%f', delimiter=",")
if __name__ == "__main__":
main() |
class Solution:
def findLUSlength(self, a: str, b: str) -> int:
m, n = len(a), len(b)
return -1 if a == b else max(m, n) |
#! /usr/bin/env python
import sys
with open(sys.argv[1], 'r') as infile:
header = infile.readline().rsplit()
print("chr\tstart\tend\t" + "\t".join(header[1:]))
for line in infile:
line = line.rsplit()
coords = line[0].split(":")
chromosome = "chr" + coords[0]
position = int(coords[1]) - 25
end = int(coords[1]) + 25
data = [chromosome, str(position), str(end)]
print("\t".join(data) + "\t" + "\t".join(line[1:]))
|
#!/usr/bin/env python2.7.12
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 22 13:30:12 2019
@author: thomas
"""
#This script will generate images of the spherobots and fluid flow up until
#the last time step that was recorded
#Specify 1) Dist bw spherobots (R) 2) Angle 3) Anti or Para 4) SSL or LSL
#Import databases (fluid, mesh) and Open them
#Add Omega pseudocolor
#Add Nbot many botlow, botup, and skeleton
#Window size based off of box boundaries
#Save Window Attibutes
#Save in specific directory '../Images/'+RLength+'/PI'+str(Theta)+'/'+str(ANTIoPARA)+'/'+str(SSLoLSL)+'/'
import os
import sys
import pathlib
RLength = "5"
Theta = sys.argv[1]
ANTIoPARA = sys.argv[2]
SSLoLSL = sys.argv[3]
#Theta = "0"
#ANTIoPARA = "Anti"
#SSLoLSL = "SSL"
dir_path = os.path.dirname(os.path.realpath(__file__)) + '/'
#Parameters to be specified
Nbots = 2
plotNumber = 0
#Database paths
collectivePath = dir_path+"../Structures/"+RLength+"/PI"+Theta+"/"+ANTIoPARA+"/"+SSLoLSL+"/"
mesh = collectivePath+"/viz2D/lag_data.visit"
fluid = collectivePath+"/viz2D/dumps.visit"
db = [mesh,fluid]
def SetMeshAttributes(meshName,plotNumber):
SetActivePlots(plotNumber)
#SetMeshAttributes
MeshAtts = MeshAttributes()
MeshAtts.legendFlag = 0
MeshAtts.lineStyle = MeshAtts.SOLID # SOLID, DASH, DOT, DOTDASH
if("skeleton" in meshName):
MeshAtts.lineWidth = 1
MeshAtts.meshColor = (0,255,255,255)
MeshAtts.pointSizePixels = 4
else:
MeshAtts.lineWidth = 1
MeshAtts.meshColor = (255, 255, 255, 255)
MeshAtts.pointSizePixels = 2
MeshAtts.meshColorSource = MeshAtts.MeshCustom # Foreground, MeshCustom
MeshAtts.opaqueColorSource = MeshAtts.Background # Background, OpaqueCustom
MeshAtts.opaqueMode = MeshAtts.Auto # Auto, On, Off
MeshAtts.pointSize = 0.05
MeshAtts.opaqueColor = (255, 255, 255, 255)
MeshAtts.smoothingLevel = MeshAtts.None # None, Fast, High
MeshAtts.pointSizeVarEnabled = 0
MeshAtts.pointSizeVar = "default"
MeshAtts.pointType = MeshAtts.Sphere # Box, Axis, Icosahedron, Octahedron, Tetrahedron, SphereGeometry, Point, Sphere
MeshAtts.showInternal = 0
#MeshAtts.pointSizePixels = 2
MeshAtts.opacity = 1
SetPlotOptions(MeshAtts)
return
if __name__ == '__main__':
#Open databases
OpenDatabase("localhost:" + db[0])
OpenDatabase("localhost:" + db[1])
#Delete sublevel plot
SetActivePlots(0)
DeleteActivePlots()
#Correlation between fluid and mesh
CreateDatabaseCorrelation("Correlation1",('localhost:' + db[0], 'localhost:' + db[1]), 0)
#Save Window Attributes (For images)
s = SaveWindowAttributes()
s.fileName = "Image"
s.family = 1
s.outputToCurrentDirectory = 0
#pathlib.Path("../Images/"+RLength+"/PI"+Theta+"/"+ANTIoPARA+"/"+SSLoLSL).mkdir(parents=True, exist_ok=True)
s.outputDirectory = dir_path+"../Images/"+RLength+"/PI"+Theta+"/"+ANTIoPARA+"/"+SSLoLSL+"/"
s.format = s.PNG
s.width, s.height = 1080, 1080
s.saveTiled = 0
SetSaveWindowAttributes(s)
#SetActiveWindow(1)
#Activate Fluid database
ActivateDatabase("localhost:"+db[1])
#Add Pseudocolor of Vorticity
AddPlot("Pseudocolor", "Omega", 1, 0)
plotNumber += 1
SetActivePlots(0)
PseudocolorAtts = PseudocolorAttributes()
PseudocolorAtts.minFlag = 1
PseudocolorAtts.min = -10
PseudocolorAtts.maxFlag = 1
PseudocolorAtts.max = 10
PseudocolorAtts.centering = PseudocolorAtts.Nodal # Natural, Nodal, Zonal
PseudocolorAtts.colorTableName = "difference"
PseudocolorAtts.invertColorTable = 0
PseudocolorAtts.legendFlag = 0
SetPlotOptions(PseudocolorAtts)
print('Vorticity Plot Created!')
#Activate Mesh database
ActivateDatabase("localhost:"+db[0])
'''Sphere Meshes'''
for idx in range(1,Nbots+1):
meshNameSS = "botlow"+str(idx)+"_vertices"
meshNameLS = "botup"+str(idx)+"_vertices"
meshNameSk = "skeleton"+str(idx)+"_mesh"
AddPlot("Mesh",meshNameLS, 1, 0)
SetMeshAttributes(meshNameLS,plotNumber)
plotNumber += 1
AddPlot("Mesh",meshNameSS, 1, 0)
SetMeshAttributes(meshNameSS,plotNumber)
plotNumber += 1
AddPlot("Mesh",meshNameSk, 1, 0)
SetMeshAttributes(meshNameSk,plotNumber)
#HideActivePlots()
plotNumber += 1
print('Sphere Meshes have been created!')
DrawPlots()
#Window Size/Position
# Begin spontaneous state
View2DAtts = View2DAttributes()
View2DAtts.windowCoords = (-0.05, 0.05, -0.05, 0.05)
View2DAtts.viewportCoords = (0.1, 0.9, 0.1, 0.9)
View2DAtts.fullFrameActivationMode = View2DAtts.Auto # On, Off, Auto
View2DAtts.fullFrameAutoThreshold = 100
View2DAtts.xScale = View2DAtts.LINEAR # LINEAR, LOG
View2DAtts.yScale = View2DAtts.LINEAR # LINEAR, LOG
View2DAtts.windowValid = 1
SetView2D(View2DAtts)
# End spontaneous state
#Create Annotation Objects!
#Title
title = CreateAnnotationObject("Text2D")
title.text = RLength+"R: PI"+Theta+": "+ANTIoPARA+": "+SSLoLSL
title.position = (0.25, 0.95)
title.fontBold = 1
# Add a time slider in the lower left corner
slider = CreateAnnotationObject("TimeSlider")
slider.height = 0.07
SaveSession(collectivePath+"Images.session")
SetActiveTimeSlider("Correlation1")
for state in range(TimeSliderGetNStates()):
SetTimeSliderState(state)
AnnotationAtts = AnnotationAttributes()
AnnotationAtts.axes2D.visible = 0
SetAnnotationAttributes(AnnotationAtts)
SaveWindow()
SaveSession(collectivePath+"Images.session")
exit()
|
import glob
import pdb
import netCDF4 as nc
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from matplotlib.colors import BoundaryNorm
def sparseFull(a):
"""This expands a sparse grid, incorporating all data values"""
"""SLOWWWWWWWWW"""
# Create empty shape for filling
b = np.full([len(a.dimensions['Lat']),len(a.dimensions['Lon'])], np.nan)
# Create empty shape for filling
xlen = len(a.dimensions['Lon'])
ylen = len(a.dimensions['Lat'])
# Gather into four arrays
varname = a.TypeName
var = a.variables[varname][:]
pixel_x = a.variables['pixel_x'][:]
pixel_y = a.variables['pixel_y'][:]
pixel_count = a.variables['pixel_count'][:]
# Loop through the four arrays simultaneously, populating 2D array 'b'
for w,x,y,z in zip(var, pixel_x, pixel_y, pixel_count):
distToEdge = xlen - y
# check if pixel count exceeds distance to right edge of domain
if z > distToEdge:
b[x,y:xlen] = w
pixelsLeft = z - distToEdge
# check if pixels remaining are less than grid width
if pixelsLeft <= xlen:
b[x+1,0:(pixelsLeft-1)] = w
else:
rowsLeft, pixelCount_RemainingRow = divmod(pixelsLeft, xlen)
# b[x:(x+rowsLeft+1),0:7001] = w
b[x:(x+rowsLeft+1),0:int(b.shape[1])] = w
# check if pixels are remaining
if pixelCount_RemainingRow > 0:
b[(x+rowsLeft+1),0:pixelCount_RemainingRow] = w
else:
b[x,y:(y + z)] = w
# pdb.set_trace()
return b
def readwdssii(fin):
"""This loads in the file"""
if isinstance(fin,str):
a = nc.Dataset(fin,)#format="NETCDF3_64BIT_OFFSET")
else:
a = fin
xlen = len(a.dimensions['Lon'])
ylen = len(a.dimensions['Lat'])
lat = a.Latitude
lon = a.Longitude
try:
varname = a.TypeName
var = a.variables[varname][:]
if a.DataType[0:6] == 'Sparse':
var = sparseFull(a)
except:
print('except')
return xlen,ylen,varname,0
# pdb.set_trace()
a.close()
return xlen, lat, ylen, lon, varname, var
|
# coding: utf-8
"""Parser for Specification section of an MDN raw page."""
from .html import HnElement, HTMLElement, HTMLText
from .kumascript import (
KumaScript, KumaVisitor, SpecName, Spec2, kumascript_grammar)
from .utils import join_content
from .visitor import Extractor
class SpecSectionExtractor(Extractor):
"""Extracts data from elements representing a Specifications section.
A specification section looks like:
<h2 name="Specifications" id="Specifications">Specifications</h2>
<table class="standard-table">
<thead>
<tr>
<th scope="col">Specification</th>
<th scope="col">Status</th>
<th scope="col">Comment</th>
</tr>
</thead>
<tbody>
<tr>
<td>{{SpecName('CSS3 Backgrounds', '#the-background-size',
'background-size')}}</td>
<td>{{Spec2('CSS3 Backgrounds')}}</td>
<td>A note about this specification.</td>
</tr>
</tbody>
</table>
Non-table content raises an issue, unless it is wrapped in a
{{WhyNoSpecStart}}/{{WhyNoSpecEnd}} pair.
"""
extractor_name = 'Specifications Extractor'
def __init__(self, **kwargs):
self.initialize_extractor(**kwargs)
def setup_extract(self):
self.specs = []
self.key = None
self.spec_id = None
self.path = None
self.name = None
self.spec2_key = None
self.desc = None
self.section_id = None
def entering_element(self, state, element):
if state == 'begin':
assert isinstance(element, HnElement)
self.extract_header(element)
return 'extracted_header', False
elif state == 'extracted_header':
if self.is_tag(element, 'table'):
return 'in_table', True
elif isinstance(element, HTMLElement):
self.extract_non_table(element)
return 'extracted_header', False
elif state == 'in_table':
if self.is_tag(element, 'tr'):
# Confirm header columns?
return 'in_first_row', False
elif state == 'in_table_data':
if self.is_tag(element, 'tr'):
self.key = None
self.spec_id = None
self.path = None
self.name = None
self.spec2_key = None
self.section_id = None
self.desc = None
return 'in_data_row', True
elif state == 'in_data_row':
if self.is_tag(element, 'td'):
self.extract_specname(element)
return 'extracted_name', False
elif state == 'extracted_name':
if self.is_tag(element, 'td'):
self.extract_spec2(element)
return 'extracted_spec2', False
elif state == 'extracted_spec2':
if self.is_tag(element, 'td'):
self.extract_specdesc(element)
self.specs.append({
'specification.mdn_key': self.key,
'specification.id': self.spec_id,
'section.subpath': self.path,
'section.name': self.name,
'section.note': self.desc,
'section.id': self.section_id})
return 'extracted_desc', False
elif state == 'extracted_desc':
# Warn on extra columns?
pass
elif state == 'extracted_table':
if isinstance(element, HTMLElement):
self.extract_non_table(element)
return 'extracted_table', False
else: # pragma: no cover
raise Exception('Unexpected state "{}"'.format(state))
return state, True
def leaving_element(self, state, element):
if state == 'begin': # pragma: no cover
pass
elif state == 'extracted_header':
pass
elif state == 'in_table':
# Warn when exiting with no data found?
pass
elif state == 'in_first_row':
assert self.is_tag(element, 'tr')
return 'in_table_data'
elif state == 'in_table_data':
if self.is_tag(element, 'table'):
return 'extracted_table'
elif state in ('in_data_row', 'extracted_name', 'extracted_spec2'):
# Error on not enough columns?
pass
elif state == 'extracted_desc':
if self.is_tag(element, 'tr'):
return 'in_table_data'
elif state == 'extracted_table':
pass
else: # pragma: no cover
raise Exception('Unexpected state "{}"'.format(state))
return state
def extracted_data(self):
return {
'specs': self.specs,
'issues': self.issues
}
def extract_header(self, header_element):
expected = ('Specifications', 'Specification')
attributes = header_element.open_tag.attributes.attrs
for name, attribute in attributes.items():
if name == 'id':
h2_id = attribute.value
if h2_id not in expected:
self.add_issue('spec_h2_id', attribute, h2_id=h2_id)
if name == 'name':
h2_name = attribute.value
if h2_name not in expected:
self.add_issue('spec_h2_name', attribute, h2_name=h2_name)
def extract_non_table(self, element):
if element.to_html(drop_tag=True):
self.add_issue('skipped_content', element)
def extract_specname(self, td_element):
reparsed = kumascript_grammar.parse(td_element.raw)
visitor = SpecNameVisitor(data=self.data, offset=td_element.start)
visitor.visit(reparsed)
self.issues.extend(visitor.issues)
self.key = visitor.mdn_key or ''
if not (self.key or visitor.issues):
self.add_issue('specname_omitted', td_element)
if visitor.spec:
self.spec_id = visitor.spec.id
self.section_id = visitor.section_id or None
self.path = visitor.subpath or ''
self.name = visitor.section_name or ''
def extract_spec2(self, td_element):
reparsed = kumascript_grammar.parse(td_element.raw)
visitor = Spec2Visitor(data=self.data, offset=td_element.start)
visitor.visit(reparsed)
self.issues.extend(visitor.issues)
if visitor.mdn_key:
# Standard Spec2 KumaScript - check for match
spec2_key = visitor.mdn_key
if spec2_key != self.key:
self.add_issue(
'spec_mismatch', td_element, spec2_key=spec2_key,
specname_key=self.key)
elif visitor.spec2_item:
# Text like 'Standard' or non-standard KumaScript
item = visitor.spec2_item
if isinstance(item, HTMLText) and not isinstance(item, KumaScript):
self.add_issue(
'spec2_converted', item, key=self.key,
original=item.cleaned)
else:
self.add_issue('spec2_omitted', td_element)
def extract_specdesc(self, td_element):
reparsed = kumascript_grammar.parse(td_element.raw)
visitor = SpecDescVisitor(data=self.data, offset=td_element.start)
visitor.visit(reparsed)
self.issues.extend(visitor.issues)
html = [item.to_html() for item in visitor.desc_items]
self.desc = join_content(html)
class SpecNameVisitor(KumaVisitor):
"""
Visitor for a SpecName HTML fragment.
This is the first column of the Specifications table.
"""
scope = 'specification name'
_allowed_tags = ['td']
def __init__(self, **kwargs):
super(SpecNameVisitor, self).__init__(**kwargs)
self.mdn_key = None
self.subpath = None
self.section_id = None
self.section_name = None
self.spec_item = None
self.spec = None
def process(self, cls, node, **kwargs):
"""Look for SpecName nodes."""
processed = super(SpecNameVisitor, self).process(
cls, node, **kwargs)
if isinstance(processed, SpecName):
assert not self.spec_item
assert not self.mdn_key
assert not self.subpath
assert not self.section_name
self.spec_item = processed
self.mdn_key = processed.mdn_key
self.subpath = processed.subpath
self.section_name = processed.section_name
self.spec = processed.spec
self.section_id = processed.section_id
elif isinstance(processed, KumaScript):
pass # Issues added by KS
elif (isinstance(processed, HTMLText) and processed.cleaned):
text = processed.cleaned
legacy_specs = {
'ECMAScript 1st Edition.': 'ES1',
'ECMAScript 3rd Edition.': 'ES3'}
key = legacy_specs.get(text, '')
if key:
self.mdn_key = key
self.spec_item = processed
self.add_issue(
'specname_converted', processed, original=text, key=key)
self.spec = self.data.lookup_specification(key)
if self.spec:
self.section_id = self.data.lookup_section_id(
self.spec.id, self.subpath)
else:
self.add_issue('unknown_spec', self.spec_item, key=key)
else:
self.add_issue(
'specname_not_kumascript', processed, original=text)
return processed
class Spec2Visitor(KumaVisitor):
"""
Visitor for a Spec2 HTML fragment.
This is the second column of the Specifications table.
"""
scope = 'specification maturity'
_allowed_tags = ['td']
def __init__(self, **kwargs):
super(Spec2Visitor, self).__init__(**kwargs)
self.mdn_key = None
self.spec2_item = None
self.spec = None
self.maturity = None
def process(self, cls, node, **kwargs):
"""Look for Spec2 nodes."""
processed = super(Spec2Visitor, self).process(
cls, node, **kwargs)
if isinstance(processed, Spec2):
assert not self.spec2_item
assert not self.mdn_key
self.spec2_item = processed
self.mdn_key = processed.mdn_key
self.spec = processed.spec
if self.spec:
self.maturity = self.spec.maturity
elif isinstance(processed, KumaScript):
pass
elif (isinstance(processed, HTMLText) and processed.cleaned and
not self.mdn_key and not self.spec2_item):
self.spec2_item = processed
return processed
class SpecDescVisitor(KumaVisitor):
"""
Visitor for a Specification description fragment.
This is the third column of the Specifications table.
"""
scope = 'specification description'
_allowed_tags = ['a', 'br', 'code', 'td']
def __init__(self, **kwargs):
super(SpecDescVisitor, self).__init__(**kwargs)
self.desc_items = None
def process(self, cls, node, **kwargs):
"""Look for description nodes."""
processed = super(SpecDescVisitor, self).process(
cls, node, **kwargs)
if isinstance(processed, HTMLElement) and processed.tag == 'td':
assert self.desc_items is None
self.desc_items = processed.children
return processed
|
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC, LinearSVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
#붓꽃데이터 읽어들이기
colnames = ['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth', 'Name']
iris_data = pd.read_csv("./data/iris.csv", names= colnames, encoding='utf-8')
#붓꽃 데이터를 레이블과 입력 데이터로 뷴리하기
y = iris_data.loc[:, "Name"]
x = iris_data.loc[:, ["SepalLength", "SepalWidth", "PetalLength", "PetalWidth"]]
print(x)
#학습 전용과 테스트 전용 분리하기
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, train_size = 0.8, shuffle=True)
#학습하기
# clf = SVC() #96.6
clf = LinearSVC() # 83.3
# clf = KNeighborsClassifier(n_neighbors=1) #랜덤
clf.fit(x_train, y_train)
#평가하기
y_pred = clf.predict(x_test)
print("정답률: ", accuracy_score(y_test, y_pred)) |
class Solution:
#先确定一个overlap的区间,然后不断更新
def findMinArrowShots(self, points: List[List[int]]) -> int:
if(points == []):
return 0
points.sort(key = lambda x:(x[0],x[1]))
count = 1 #at least need one time
overlap = points[0]
for i in range(1, len(points)):
if(points[i][0] <= overlap[1] and overlap[1] <= points[i][1]) :
overlap[0] = max(overlap[0], points[i][0])
overlap[1] = min(overlap[1], points[i][1])
#print("if",overlap)
elif(overlap[0] <= points[i][0] and points[i][1] <= overlap[1]):
overlap = points[i]
else:
count += 1
overlap = points[i]
#print(overlap)
# print(count)
return count
|
from perlin_noise import PerlinNoise
from PIL import Image
from random import randint
import numpy as np
SEED=randint(0,999999)
print(f"Generating perlin noise generators (SEED={SEED})...")
noise1 = PerlinNoise(octaves=3 , seed=SEED)
noise2 = PerlinNoise(octaves=6 , seed=SEED)
noise3 = PerlinNoise(octaves=12, seed=SEED)
noise4 = PerlinNoise(octaves=24, seed=SEED)
xpix, ypix = 500, 500
print(f"Generating matrix {xpix}x{ypix}...")
pic = np.zeros((xpix, ypix))
for i in range(xpix):
for j in range(ypix):
# noise_val = noise1([i/xpix, j/ypix])
# noise_val += 0.5 * noise2([i/xpix, j/ypix])
# noise_val += 0.25 * noise3([i/xpix, j/ypix])
# noise_val += 0.125* noise4([i/xpix, j/ypix])
e0 = 2*(0.5-abs(0.5-noise1([i/xpix, j/ypix])))
e1 = 0.5 *2*(0.5-abs(0.5-noise2([i/xpix, j/ypix])))*e0
e2 = 0.25 *2*(0.5-abs(0.5-noise3([i/xpix, j/ypix])))*(e0+e1)
e3 = 0.125*2*(0.5-abs(0.5-noise4([i/xpix, j/ypix])))*(e0+e1+e2)
noise_val = e3
# noise_val = pow(noise_val, 2)
pic[i,j] = noise_val
print("Normalizing matrix [0,255]...")
pic = (255*(pic - np.min(pic))/np.ptp(pic)).astype(np.uint8)
print("Saving matrix as hmap.png...")
im = Image.fromarray(pic)
im.save("hmap.png")
|
import support_lib as bnw
import add_player as addp
import parse_config as parser
import email_poller as email
import login_player as login
import options as options
import player_status as status
import trade_route as trade
import retrieve_settings as settings
import port_handler as port
import time
import random
from tinydb import TinyDB, Query
# a rudimentary testing framework for bot creation
# Compute the cost of an upgrade
def upgradeCost(desiredvalue,currentvalue):
DeltaCost = 0
Delta = desiredvalue - currentvalue
while Delta > 0:
DeltaCost = DeltaCost + (2 ** (desiredvalue - Delta))
Delta = Delta - 1
DeltaCost = DeltaCost * 1000
return DeltaCost
# This routine attempts to move the player to Zero within the allotted move limit
# returns the updated sector db
def gotoSectorZero(maxMoves, warpDB):
print('gotoSectorZero called with maxMoves: {}'.format(maxMoves))
# Get player status - pull out current sector
# if current sector == 0, return True
# if not, move to the lowest warp
# return to top
while maxMoves > 0:
playerStatus = status.getStatus()
inSector = playerStatus['currentSector']
warps = playerStatus['warps']
intWarps = [int(i) for i in warps]
warpDB[inSector] = warps
if inSector == 0:
return [True, warpDB]
# attempt to move to the first available warp, which will be the closest to zero (theoretically)
if len(warps) == 0:
print('We are in a dead end!')
exit(1)
newSector = warps[0]
print("gotoSectorZero is attempting to move to sector: {}".format(newSector))
if not bnw.moveTo(newSector):
print("Was unable to move to the desired sector: {}".format(newSector))
exit(1)
maxMoves = maxMoves - 1
print('Ran out of moves in gotoSectorZero')
return [False, warpDB]
# This routine attempts to navigate the supplied sectors
# returns True if it succeeds, False if it is unable to comply
def moveVia(path):
print('moveVia called with path: {}'.format(path))
for eachSector in path:
print("moveVia is attempting to move to sector: {}".format(eachSector))
if not bnw.moveTo(eachSector):
print("Was unable to move to the desired sector: {}".format(eachSector))
return False
# assume we made it
return True
# a wrapper for the shortest path finder which strips off the first element (unless None)
# since it is always the current sector the ship is in
def find_shortest_path(graph, start, end):
shortPath = find_path_shortest(graph, start, end)
if shortPath != None:
shortPath.pop(0)
return shortPath
# A rudimentary shortest path calculator
# found here: https://www.python.org/doc/essays/graphs/
def find_path_shortest(graph, start, end, path=[]):
debug = False
if path ==[]:
# initial entry into the function
if debug:
print("Trying to find path from {} to {}".format(start, end))
else:
# function called itself
if debug:
print("Recursively - path is now: {}".format(path))
path = path + [start]
if start == end:
return path
if not start in graph:
return None
shortest = None
for node in graph[start]:
if node not in path:
newpath = find_path_shortest(graph, node, end, path)
if newpath:
if not shortest or len(newpath) < len(shortest):
shortest = newpath
return shortest
# search for and create an initial trade route
# this route will NOT verify it is unique!
def initialRoute(warpDB):
oreSector = -1
goodsSector = -1
portSector = {}
distanceBeforeSearching = 10
zeroMove = gotoSectorZero(100, warpDB)
inZero = False
if zeroMove[0]:
print("Made it to sector Zero")
inZero = True
else:
print("Starting Route search from where ever we are...")
warpDB = zeroMove[1]
if inZero:
beenThere = []
print("#######\n##########\nAttempting to move {} sectors away from Zero before looking for a trade route".format(distanceBeforeSearching))
while True:
playerStatus = status.getStatus()
print("Player Status:")
print(playerStatus)
availableWarps = playerStatus['warps']
while True:
# select a random warp
randomIndex = random.randint(0, len(availableWarps) - 1)
randomWarp = availableWarps[randomIndex]
print('Considering jumping to: {}'.format(randomWarp))
if randomWarp in beenThere:
print("Already been to {}, picking another".format(randomWarp))
continue
else:
break
print("Performing a random jump to: {}".format(randomWarp))
if not bnw.moveTo(randomWarp):
print("Was unable to move to the desired sector: {}".format(randomWarp))
exit(1)
playerStatus = status.getStatus()
availableWarps = playerStatus['warps']
warpDB[randomWarp] = availableWarps
beenThere.append(randomWarp)
# if we ended up back in port 0, reset beenThere!
if randomWarp == "0":
print('Ended up back in 0, resetting beenThere')
beenThere = []
currentPath = find_shortest_path(warpDB, randomWarp, '0')
if currentPath == None:
print("Went through a one way warp - we're lost!".format(randomWarp))
break
howFar = len(currentPath)
print("We are now {} jumps from zero".format(howFar))
if howFar >= 10:
print("That's far enough!")
break
else:
print("Still need to move further away!")
print("Starting search for a trade route")
searchResults = trade.tradeRouteSearch(warpDB, 100)
print("searchResults: {}".format(searchResults))
searchStatus = searchResults[0]
if searchStatus != "SUCCESS":
print("Failed to find a trade route")
exit(1)
else:
# ['SUCCESS', tradeRoutes, warpDB]
tradeRoutes = searchResults[1]
warpDB = searchResults[2]
return [warpDB, tradeRoutes]
###########################################################
## MAIN ENTRY
##
###########################################################
parseResults = parser.readConfig('blacknova.config')
parseStatus = parseResults[0]
if not parseStatus == 'SUCCESS':
print("Error parsing the config file: {}".format(parseResults[1]))
exit(1)
parseDict = parseResults[1]
print('parseDict: {}'.format(parseDict))
# fire up the browser
bnw.startBrowser("chrome")
print("You have 4 seconds to move the browser before it is used.")
time.sleep(4)
# createPlayer(playerEmail, playerName, playerPassword, shipName, gameURL)
playerEmail = parseDict['UserEmailAddress']
playerName = parseDict['PlayerName']
playerPassword = parseDict['PlayerPassword']
shipName = parseDict['ShipName']
gameURL = parseDict['baseURL']
print("playerEmail: {}".format(playerEmail))
print("playerName: {}".format(playerName))
print("playerPassword: {}".format(playerPassword))
print("Ship Name: {}".format(shipName))
print("Base Game URL: {}".format(gameURL))
print('Retrieving the game settings')
gameSettings = settings.getSettings(gameURL)
print("Retrieved game settings")
print(gameSettings)
print("Trying to login with the supplied credentials")
loginResults = login.login(playerEmail, playerPassword, gameURL)
if not loginResults[0] == "SUCCESS":
errorMsg = loginResults[1]
print("Error logging in with the supplied credentials: {}".format(errorMsg))
if errorMsg == "No Such Player":
print("Need to create player...")
addResults = addp.createPlayer(playerEmail, playerName, shipName, gameURL)
print('addResults: {}'.format(addResults))
tries = 5
while True:
print("Checking for password, tries remaining: {}".format(tries))
passwordDict = email.pollEmail('blacknova.config')
if playerEmail in passwordDict:
serverPass = passwordDict[playerEmail]
print("We received a server password: {}".format(serverPass))
break
else:
tries = tries - 1
if tries > 0:
print('No password yet, waiting 30 seconds before trying again')
else:
print("Email problem? Need to use the password resend link")
print("not implemented - yet!")
exit(1)
print("Now attempting to login the newly created player")
loginResults = login.login(playerEmail, serverPass, gameURL)
if not loginResults[0] == "SUCCESS":
print("Ran into a credentials issue attempting to login as: {} with password: {}".format(playerEmail, serverPass))
exit(1)
print("Logged in - now to change the password...")
chgPassResults = options.changePasswd(serverPass, playerPassword,)
if not chgPassResults[0] == "SUCCESS":
print("Was unable to change the player password")
exit(1)
print("Player password has been changed!")
# end up at the normal main page
mainPage = 'http://{}/main.php'.format(gameURL)
bnw.loadPage(mainPage)
elif errorMsg == 'Incorrect Password':
print("Checking for server password")
passwordDict = email.pollEmail('blacknova.config')
if playerEmail in passwordDict:
serverPass = passwordDict[playerEmail]
print("We received a server password: {}".format(serverPass))
loginResults = login.login(playerEmail, serverPass, gameURL)
if not loginResults[0] == "SUCCESS":
print("Ran into a credentials issue attempting to login as: {} with password: {}".format(playerEmail, serverPass))
exit(1)
print("Logged in - now to change the password...")
chgPassResults = options.changePasswd(serverPass, playerPassword)
if not chgPassResults[0] == "SUCCESS":
print("Was unable to change the player password")
exit(1)
print("Player password has been changed!")
# end up at the normal main page
mainPage = 'http://{}/main.php'.format(gameURL)
bnw.loadPage(mainPage)
else:
print("Supplied password is incorrect, need to request a resend")
print("not implemented - yet!")
exit(1)
else:
print("Unsupported error...")
exit(1)
print("Player should now be logged in!")
# The above needs to be refactored and cleaned up.
# a player object with login credentials, and it logs itself in?
# create a player DB reference based on the name of the bot
db = TinyDB("{}.db".format(playerName))
warpsTable = db.table('warps')
warpDB = warpsTable.all()
print("length of warpDB: {} ".format(len(warpDB)))
# retrieve the warpDB if present
if len(warpDB):
print("Retrieving saved Warp DB")
warpDB = warpDB[0]
else:
print("Initializing a new Warp DB")
warpDB = {}
numberOfSectors = int(gameSettings['Number of Sectors'])
print("Creating an empty sector warp lookup table, {} sectors worth".format(numberOfSectors))
for sectorNumber in range(1, numberOfSectors + 1):
warpDB[str(sectorNumber)] = []
# Main game loop starts here
goals = {"planets": 10, "credits": 10000000000, "techLevel": 20, "traderoutes": 10 }
planetGoal = False
creditsGoal = False
techGoal = False
tradeGoal = False
tradeRoutes = trade.retrieveRoutes()
print('Established trade routes: {}'.format(tradeRoutes))
numRoutes = len(tradeRoutes)
if numRoutes >= goals["traderoutes"]:
tradeGoal = True
print("Trying to retrieve player status")
playerStatus = status.getStatus()
print("Player Status:")
print(playerStatus)
turnsLeft = playerStatus["turnsLeft"]
currentSector = playerStatus['currentSector']
print("Player is currently in sector {}".format(currentSector))
availableWarps = playerStatus['warps']
warpDB[currentSector] = availableWarps
planetStatus = status.getPlanetStatus()
numPlanets = len(planetStatus)
if numPlanets >= goals["planets"]:
planetGoal = True
playerCredits = playerStatus["credits"]
if playerCredits >= goals["credits"]:
creditsGoal = True
shipStatus = status.getShipStatus()
print(shipStatus)
playerTech = shipStatus["Average tech level"]
if playerTech >= goals["techLevel"]:
techGoal = True
print("Player has {} planets, desired number of planets: {}, goal met: {}".format(numPlanets, goals["planets"], planetGoal))
print("Player has {} credits, desired number of credits: {}, goal met: {}".format(playerCredits, goals["credits"], creditsGoal))
print("Player has {} average tech level, desired average tech level: {}, goal met: {}".format(playerTech, goals["techLevel"], techGoal))
print("Player has {} trade routes established, target number of routes: {}, goal met: {}".format(numRoutes, goals["traderoutes"], tradeGoal))
print("ship status")
print(shipStatus)
if not tradeGoal:
needRoutes = goals["traderoutes"] - numRoutes
print("Attempting to establish {} trade routes".format(needRoutes))
if numRoutes == 0:
print("Working on initial trade route")
#return [warpDB, tradeRoutes]
createResults = initialRoute(warpDB)
warpDB = createResults[0]
tradeRoutes = createResults[1]
numRoutes = len(tradeRoutes)
for routeNumber in range(numRoutes+1, 11):
print("Working on trade route #{}".format(routeNumber))
searchResults = trade.tradeRouteSearch(warpDB, 100)
print("searchResults: {}".format(searchResults))
searchStatus = searchResults[0]
if searchStatus != "SUCCESS":
print("Failed to find a trade route")
exit(1)
else:
print("Successfully established a new trade route!")
# ['SUCCESS', tradeRoutes, warpDB]
tradeRoutes = searchResults[1]
warpDB = searchResults[2]
tradeRoutes = trade.retrieveRoutes()
numRoutes = len(tradeRoutes)
turnsLeft = playerStatus["turnsLeft"]
# Examine our established trade routes
shortestRoute = 9999
startRoute = 9999
directPath = False
routeId = -1
currentSector = playerStatus['currentSector']
print("Our current sector is: {}".format(currentSector))
for routeId in tradeRoutes:
currentRoute = tradeRoutes[routeId]
port1 = currentRoute["port1"]
port2 = currentRoute["port2"]
print("Current Sector: {}, Examining Trade Route Id {}, {} <=> {}".format(currentSector, routeId, port1, port2))
# handle the case where we are sitting on a trade route
if currentSector == port1 or currentSector == port2:
shortestRoute = 0
directPath = True
bestRouteId = routeId
break
else:
shortestPath = find_shortest_path(warpDB, currentSector, port1)
if not shortestPath == None:
print("There is no direct path to the start of the Trade Route")
distance = len(shortestPath)
print("Trade Route via direct path is {} turns away".format(distance))
print("direct path: {}".format(shortestPath))
if distance < shortestRoute:
shortestRoute = distance
directPath = True
bestRouteId = routeId
else:
print("Querying indirect path to trade route start")
indirectDistance = int(trade.queryIndirectPath(currentSector, port1)[0])
print("Trade Route via indirect path is {} turns away".format(indirectDistance))
if indirectDistance < shortestRoute:
shortestRoute = indirectDistance
directPath = False
bestRouteId = routeId
theRoute = tradeRoutes[bestRouteId]
print("Selected Trade Route Id {}, starting at port {}, which is {} moves away".format(bestRouteId, theRoute["port1"], shortestRoute))
if shortestRoute == 0:
print("Ship is already there")
else:
if shortestRoute > turnsLeft:
print("Not enough turns left to travel to start of trade route.")
print("This is where we would log out for a while")
exit(1)
if directPath:
print("Traveling to the start of the trade route")
shortestPath = find_shortest_path(warpDB, currentSector, theRoute["port1"])
if moveVia(shortestPath):
print('Successfully moved to start of trade route, sector: {}'.format(theRoute["port1"]))
else:
print('Something happened attempting to move to start of trade route, sector: {}'.format(theRoute["port1"]))
exit(1)
else:
print("Using a realspace jump to the start of the trade route")
if trade.queryIndirectPath(currentSector, theRoute["port1"], True):
print("Jump succeeded!")
else:
print("Something unhandled happended during the Real Space jump...")
exit(1)
print("At this point, we should be ready to run the trade route and make some credits!")
playerStatus = status.getStatus()
print("Player Status:")
print(playerStatus)
currentSector = playerStatus['currentSector']
print("Player is currently in sector {}".format(currentSector))
turnsLeft = playerStatus["turnsLeft"]
print("Warp Database")
print(warpDB)
print("trade routes: {}".format(tradeRoutes))
print("Saving the warpDB")
warpsTable.insert(warpDB)
print("Saving the trade route DB")
warpsTable.insert(tradeRoutes)
print("current trade routes")
print(tradeRoutes)
# determine how many times we can execute the trade - if less than 25
if turnsLeft < 25 * theRoute["distance"]:
maxTrades = int(turnsLeft / theRoute["distance"])
print("Can perform a maximum of {} trades".format(maxTrades))
else:
maxTrades = 25
print("Need to start executing trade route Id: {}".format(bestRouteId))
trade.executeTrade(bestRouteId, maxTrades, True)
shipStatus = status.getShipStatus()
print("ship status")
print(shipStatus)
playerStatus = status.getStatus()
turnsLeft = playerStatus["turnsLeft"]
# Calculate the upgrade cost prior to attempting to buy them
currentHull = shipStatus["Hull"]
currentEngines = shipStatus["Engines"]
currentPower = shipStatus["Power"]
currentComputer = shipStatus["Computer"]
currentSensors = shipStatus["Sensors"]
currentArmor = shipStatus["Armor"]
currentShields = shipStatus["Shields"]
currentBeams = shipStatus["Beam Weapons"]
currentTorpedo = shipStatus["Torpedo launchers"]
currentCloak = shipStatus["Cloak"]
hullCost = upgradeCost(currentHull + 1, currentHull)
engineCost = upgradeCost(currentEngines + 1, currentEngines)
powerCost = upgradeCost(currentPower + 1, currentPower)
computerCost = upgradeCost(currentComputer + 1, currentComputer)
sensorsCost = upgradeCost(currentSensors + 1, currentSensors)
armorCost = upgradeCost(currentArmor + 1, currentArmor)
shieldsCost = upgradeCost(currentShields + 1, currentShields)
beamsCost = upgradeCost(currentBeams + 1, currentBeams)
torpedoCost = upgradeCost(currentTorpedo + 1, currentTorpedo)
cloakCost = upgradeCost(currentCloak + 1, currentCloak)
print("Current Hull tech: {}, cost to upgrade: {}".format(currentHull, hullCost))
print("Current Engine tech: {}, cost to upgrade: {}".format(currentEngines, engineCost))
print("Current Power tech: {}, cost to upgrade: {}".format(currentPower, powerCost))
print("Current Computer tech: {}, cost to upgrade: {}".format(currentComputer, computerCost))
print("Current Sensors tech: {}, cost to upgrade: {}".format(currentSensors, sensorsCost))
print("Current Armor tech: {}, cost to upgrade: {}".format(currentArmor, armorCost))
print("Current Shields tech: {}, cost to upgrade: {}".format(currentShields, shieldsCost))
print("Current Beams tech: {}, cost to upgrade: {}".format(currentBeams, beamsCost))
print("Current Torpedo tech: {}, cost to upgrade: {}".format(currentTorpedo, torpedoCost))
print("Current Cloak tech: {}, cost to upgrade: {}".format(currentCloak, cloakCost))
shoppingList = {}
shoppingList["Hull"] = str(currentHull + 1)
shoppingList["Engines"] = str(currentEngines + 1)
shoppingList["Power"] = str(currentPower + 1)
shoppingList["Computer"] = str(currentComputer + 1)
shoppingList["Sensors"] = str(currentSensors + 1)
shoppingList["Armor"] = str(currentArmor + 1)
shoppingList["Shields"] = str(currentShields + 1)
shoppingList["Beam Weapons"] = str(currentBeams + 1)
shoppingList["Torpedo launchers"] = str(currentTorpedo + 1)
shoppingList["Cloak"] = str(currentCloak + 1)
totalCost = hullCost + engineCost + powerCost + computerCost + sensorsCost + armorCost + shieldsCost + beamsCost + torpedoCost + cloakCost
if totalCost > playerStatus['credits']:
print("Cost for upgrades: {}, Credits available: {}".format(totalCost, playerStatus['credits']))
print("Not enough credits for the upgrades")
exit(1)
else:
print("Returning to Sector Zero to make some purchases")
shortestPath = find_shortest_path(warpDB, currentSector, '0')
if shortestPath != None:
print("Best Path to sector 0")
print(shortestPath)
print('Using the new "moveVia" function')
# remove the first sector, which is where we are currently
if moveVia(shortestPath):
print('Successfully moved to sector 0!')
else:
print('Something happened attempting to move to sector 0')
exit(1)
else:
print('There is no known path from {} to sector 0'.format(currentSector))
print('Attempting to move player to sector 0 starting position')
returnStatus = gotoSectorZero(100, warpDB)
passOrFail = returnStatus[0]
warpDB = returnStatus[1]
print("Found Sector Zero: {}".format(passOrFail))
if not passOrFail:
print('Was unable to find sector zero - aborting')
exit(1)
print("Now in sector Zero - attempting to make a purchase")
print("shopping list: {}".format(shoppingList))
purchaseResults = port.specialPort(shoppingList)
howMuch = purchaseResults[1]
if purchaseResults[0] == "SUCCESS":
print("Purchase was successful, cost was: {}".format(howMuch))
else:
print("Could not afford the purchase")
creditsAvailable = purchaseResults[2]
print("Purchase cost: {}, Credits available: {}, Difference: {}".format(howMuch, creditsAvailable, howMuch - creditsAvailable))
exit(1)
exit(1)
# Need to evaluate turns remaining prior to attempting jumps
|
#!/usr/bin/env python
""" Example for detect IP Fragmentation attacks on the network """
import sys
import os
import pyaiengine
delta = 100
previous_fragments = 0
previous_ip_packets = 0
def timer_5seconds():
global delta
global previous_fragments
global previous_ip_packets
ipstats = st.get_counters("IP")
current_ip_packets = ipstats["packets"]
current_fragmented = ipstats["fragmented packets"]
print("\033[34m" + "INFO: " + str(ipstats) + "\033[0m")
if (current_fragmented > previous_fragments + delta):
print("\033[31m" + "ALERT: IP Fragment attack on the network" + "\033[0m")
previous_ip_packets = current_ip_packets
previous_fragments = current_fragmented
if __name__ == '__main__':
st = pyaiengine.StackLan()
st.tcp_flows = 327680
st.udp_flows = 163840
source = "enp0s31f6"
with pyaiengine.PacketDispatcher(source) as pd:
pd.stack = st
pd.add_timer(timer_5seconds, 5)
pd.run()
sys.exit(0)
|
import pytest
from convert_chars import convert_pybites_chars
@pytest.mark.parametrize("arg, expected", [
("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do",
"LorEm IPSum dolor SIT amET, conSEcTETur adIPIScIng ElIT, SEd do"),
("Vestibulum morbi blandit cursus risus at ultrices",
"VESTIBulum morBI BlandIT curSuS rISuS aT ulTrIcES"),
("Aliquet nibh praesent tristique magna sit amet purus gravida quis",
"AlIquET nIBh PraESEnT TrISTIquE magna SIT amET PuruS gravIda quIS"),
("Fames ac turpis egestas maecenas pharetra",
"FamES ac TurPIS EgESTaS maEcEnaS PharETra"),
("Vitae purus faucibus ornare suspendisse sed nisi lacus",
"VITaE PuruS faucIBuS ornarE SuSPEndISSE SEd nISI lacuS"),
("Pharetra massa massa ultricies mi quis",
"pharETra maSSa maSSa ulTrIcIES mI quIS"),
("Senectus et netus et malesuada fames",
"sEnEcTuS ET nETuS ET malESuada famES"),
("Arcu non sodales neque sodales ut etiam sit",
"Arcu non SodalES nEquE SodalES uT ETIam SIT"),
("Natoque penatibus et magnis dis parturient montes nascetur",
"NaToquE PEnaTIBuS ET magnIS dIS ParTurIEnT monTES naScETur"),
("Urna cursus eget nunc scelerisque viverra mauris in aliquam",
"Urna curSuS EgET nunc ScElErISquE vIvErra maurIS In alIquam"),
("Vestibulum mattis ullamcorper velit sed ullamcorper morbi tincidunt",
"VESTIBulum maTTIS ullamcorPEr vElIT SEd ullamcorPEr morBI TIncIdunT"),
("Tempus urna et pharetra pharetra",
"tEmPuS urna ET PharETra PharETra"),
("Ullamcorper a lacus vestibulum sed",
"UllamcorPEr a lacuS vESTIBulum SEd"),
("Cursus risus at ultrices mi",
"CurSuS rISuS aT ulTrIcES mI"),
("Egestas congue quisque egestas diam in arcu",
"egESTaS conguE quISquE EgESTaS dIam In arcu"),
("Sit amet tellus cras adipiscing enim eu",
"sIT amET TElluS craS adIPIScIng EnIm Eu"),
("Imperdiet sed euismod nisi porta lorem mollis aliquam",
"imPErdIET SEd EuISmod nISI PorTa lorEm mollIS alIquam"),
("Adipiscing tristique risus nec feugiat in fermentum posuere urna",
"AdIPIScIng TrISTIquE rISuS nEc fEugIaT In fErmEnTum PoSuErE urna"),
("Et magnis dis parturient montes",
"eT magnIS dIS ParTurIEnT monTES"),
("Elementum curabitur vitae nunc sed velit dignissim sodales ut.",
"elEmEnTum curaBITur vITaE nunc SEd vElIT dIgnISSIm SodalES uT."),
])
def test_convert_pybites_chars(arg, expected):
assert convert_pybites_chars(arg) == expected |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import bibtexparser
from bibtexparser.bparser import BibTexParser
import re
from pypinyin import pinyin, Style
def to_pinyin(string):
return ' '.join(sum(pinyin(string, style=Style.TONE3), []))
def if_containing_chinese(test_string):
return bool(re.findall(r'[\u4e00-\u9fff]+', test_string))
def add_pinyin_key_to_bib_database(db):
for entry in db.entries:
try:
if if_containing_chinese(entry["author"]):
entry["key"] = to_pinyin(entry["author"])
except KeyError:
print("Entry dose not contain author info. Skip.")
print(entry)
return db
def add_pinyin_key_to_bib_file(input_bib, output_bib, using_common_strings):
with open(input_bib, encoding="utf-8") as input_file:
bibtex_str = input_file.read()
parser = BibTexParser(common_strings=using_common_strings, ignore_nonstandard_types=False)
bib_database = bibtexparser.loads(bibtex_str, parser)
writer = bibtexparser.bwriter.BibTexWriter()
with open(output_bib, encoding="utf-8", mode="w") as output_file:
output_file.write(writer.write(add_pinyin_key_to_bib_database(bib_database)))
|
import requests
import copy
import multiprocessing
import argparse
DENOMINATOR = 1000000000000.0
body = {
"id": "1",
"jsonrpc": "2.0",
"method": "GetBalance",
"params": []
}
zil_api = "https://api.coingecko.com/api/v3/coins/zilliqa?community_data=false&developer_data=false&sparkline=false"
def get_zilliqa_price():
req = requests.get(zil_api)
result = req.json()
return result['market_data']['current_price']['usd']
def main():
parser = argparse.ArgumentParser(description='Check the balance of multiple Zilliqa addresses.')
parser.add_argument('--file', default="addresses",
help='Specify the file containing a list of addresses to read from.')
args = parser.parse_args()
print_balances(args.file)
def print_balances(filename):
addresses = open(filename).read().strip().split("\n")
total = 0
pool = multiprocessing.Pool(40)
results = pool.map(get_balance, addresses)
for address, current_bal in zip(addresses, results):
print("%s: %f" % (address, current_bal/DENOMINATOR))
total += current_bal
current_price = get_zilliqa_price()
zils = total/DENOMINATOR
print("Total: %f ($%.2f USD)" % (zils, zils * current_price))
def get_balance(address):
current_body = copy.deepcopy(body)
current_body['params'].append(address)
req = requests.post("https://api.zilliqa.com/", json=current_body)
result = req.json()
if "result" not in result:
return 0
return int(result['result']['balance'])
if __name__ == '__main__':
main()
|
import os
import argparse
import torch
import asyncio
import pandas as pd
from json import load
from core.CryptoCompare import *
from core.neuralnet import *
from core.preprocessing import *
from core.tools import db_to_csv, update_from_env
SEQUENCE_LENGTH = 25 # days
SPLIT_PERCENTAGE = 0.75 # x100%
DROP_RATE = 0.2
EPOCHS = 1000
df = pd.read_csv('../data/btcusd/AgrStats[2010-7-20][2017-12-27].csv', sep = ',')
x_train, y_train, x_test, y_test, y_prior, bases, window_size = window_transform(df, SEQUENCE_LENGTH, SPLIT_PERCENTAGE, ['Bitcoin Price'])
model = create_model(window_size, x_train.shape[-1], DROP_RATE, 1)
trained_model, training_time = fit_model(model, 'Adam', 'MSELoss', x_train, y_train, EPOCHS)
print(training_time)
y_pred, real_y_test, real_y_pred, fig = test_model(trained_model, x_test, y_test, bases, 'BTC/USD')
fig.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.