max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
doc/conf.py | IJSComplexMatter/dtmm | 3 | 12765951 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'dtmm'
copyright = '2018, <NAME>'
author = '<NAME>'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.7.0'
numfig = True
import sys,os
sys.path.insert(0, os.path.abspath(os.path.split(__file__)[0]))
# custom matplotlib plot_template
if sys.argv[2] in ('latex', 'latexpdf'):
plot_template = """
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.pdf
{%- for option in options %}
{{ option }}
{% endfor %}
\t{{caption}}
{% endfor %}
"""
else:
plot_template = """
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{%- for option in options %}
{{ option }}
{% endfor %}
\t{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{{ caption }} {% if source_link or (html_show_formats and not multi_image) %} (
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% endfor %}
"""
if sys.argv[2] in ('latex', 'latexpdf'):
plot_template = """
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.pdf
{%- for option in options %}
{{ option }}
{% endfor %}
\t{{caption}}
{% endfor %}
"""
else:
plot_template = """
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{%- for option in options %}
{{ option }}
{% endfor %}
\t{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{{ caption }} {% if source_link or (html_show_formats and not multi_image) %} (
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% endfor %}
"""
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.githubpages',
"sphinx.ext.doctest",
"sphinx.ext.imgmath",
"sphinx.ext.autodoc",
'sphinx.ext.napoleon',
#"sphinx.ext.jsmath",
#'matplotlib.sphinxext.plot_directive',
'plot_directive'
]
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.githubpages',
"sphinx.ext.imgmath",
'sphinx.ext.napoleon',
"sphinx.ext.doctest",
'sphinx.ext.inheritance_diagram',
'autoapi.extension',
'matplotlib.sphinxext.plot_directive'
]
autoapi_keep_files = False
napoleon_numpy_docstring = True
autoapi_dirs = ['../dtmm']
autoapi_options = ['members', 'undoc-members', 'show-inheritance', 'special-members']
autoapi_options = ['members', 'show-inheritance']
autoapi_ignore = ["*/test/*.py","*/test"]
numfig = True
import os
doctest_global_setup = '''
try:
import numpy as np
import dtmm
from dtmm.fft import *
from dtmm.color import *
from dtmm.data import *
from dtmm.tmm import *
from dtmm.jones4 import *
from dtmm.jones import *
from dtmm.window import *
from dtmm.linalg import *
from dtmm.rotation import *
except ImportError:
pass
field_in = (np.ones((1,4,6,6))+0j, np.array((3.,)), 100)
field_data_in = field_in
field_data_out = (np.ones((1,4,6,6))+0j, np.array((3.,)), 100)
field_bulk_data = (np.ones((1,2,1,4,6,6))+0j, np.array((3.,)), 100)
field = field_in
optical_data = np.array((1.,)), np.ones((1,6,6,3))*2+0j, np.zeros((1,6,6,3))
data = optical_data
NLAYERS, HEIGHT, WIDTH = 1,6,6
WAVELENGTHS = [500]
PIXELSIZE = 200
'''
plot_working_directory = "examples"#os.path.abspath("../examples")
imgmath_image_format = "svg"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
| 1.859375 | 2 |
city_scrapers/spiders/summ_developmental_disabilities.py | City-Bureau/city-scrapers-akr | 0 | 12765952 | <reponame>City-Bureau/city-scrapers-akr<filename>city_scrapers/spiders/summ_developmental_disabilities.py
import json
import re
from datetime import datetime
import scrapy
from city_scrapers_core.constants import BOARD
from city_scrapers_core.items import Meeting
from city_scrapers_core.spiders import CityScrapersSpider
from dateutil.relativedelta import relativedelta
class SummDevelopmentalDisabilitiesSpider(CityScrapersSpider):
name = "summ_developmental_disabilities"
agency = "Summit County Developmental Disabilities Board"
timezone = "America/Detroit"
custom_settings = {"ROBOTSTXT_OBEY": False, "HTTPERROR_ALLOW_ALL": True}
def __init__(self, *args, **kwargs):
self.month_meeting_map = {}
super().__init__(*args, **kwargs)
def start_requests(self):
this_month = datetime.now().replace(day=1)
for m in range(-3, 3):
yield scrapy.Request(
(
"http://www.summitdd.org/wp-admin/admin-ajax.php?action=WP_FullCalendar&type=event&event-categories=30&start={}" # noqa
).format((this_month + relativedelta(months=m)).strftime("%Y-%m-%d")),
callback=self.parse,
dont_filter=True,
)
def parse(self, response):
"""
`parse` should always `yield` Meeting items.
Change the `_parse_title`, `_parse_start`, etc methods to fit your scraping
needs.
"""
data = json.loads(response.text)
for item in data:
yield response.follow(
item["url"], callback=self._parse_detail, dont_filter=True
)
def _parse_detail(self, response):
start = self._parse_start(response)
if not start:
return
meeting = Meeting(
title="Developmental Disabilities Board",
description="",
classification=BOARD,
start=start,
end=None,
all_day=False,
time_notes="",
location=self._parse_location(response),
links=[],
source=response.url,
)
meeting["status"] = self._get_status(meeting)
meeting["id"] = self._get_id(meeting)
self.month_meeting_map[(start.year, start.month)] = meeting
yield response.follow(
(
"/about/summit-dd-board/board-meetings/{}-meeting-documents/{}-board-meeting-documents/" # noqa
).format(start.year, start.strftime("%B").lower(),),
callback=self._parse_links,
dont_filter=True,
)
def _parse_start(self, response):
"""Parse start datetime as a naive datetime object."""
dt_str = " ".join(response.css(".large-6 h3 + p")[:1].css("*::text").extract())
date_match = re.search(r"\d{1,2}\.\d{1,2}\.\d{4}", dt_str)
if not date_match:
return
time_str = "12:00 am"
time_match = re.search(r"\d{1,2}(:\d{2})? [apm]{2}", dt_str)
if time_match:
time_str = time_match.group()
dt_fmt = "%m.%d.%Y%I:%M %p"
if ":" not in time_str:
dt_fmt = "%m.%d.%Y%I %p"
return datetime.strptime(date_match.group() + time_str, dt_fmt)
def _parse_location(self, response):
"""Parse or generate location."""
loc_parts = [
p.strip()
for p in response.css(".location-address::text").extract()
if p.strip()
]
# Likely remote
if len(loc_parts) == 0:
return {"name": "", "address": ""}
if loc_parts[1][0].isdigit():
return {
"name": loc_parts[0].replace("\u2013", "-"),
"address": " ".join(loc_parts[1:]).strip(),
}
return {"name": "", "address": " ".join(loc_parts)}
def _parse_links(self, response):
"""Parse or generate links."""
month_year_match = re.search(
r"(?P<year>\d{4}).*(?<=/)(?P<month>[a-z]+)", response.url
)
year_str = month_year_match.group("year")
month_str = month_year_match.group("month").title()
date_obj = datetime.strptime(year_str + month_str, "%Y%B").date()
meeting = self.month_meeting_map.get((date_obj.year, date_obj.month))
if not meeting:
return
for link in response.css(".entry-content a"):
meeting["links"].append(
{
"title": " ".join(link.css("*::text").extract())
.strip()
.replace("\u2013", "-"),
"href": response.urljoin(link.attrib["href"]),
}
)
yield meeting
| 2.4375 | 2 |
abc/ABC162/python/c.py | yokotani92/atcoder | 0 | 12765953 | import math
K = int(input())
ans = 0
for i in range(1, K+1):
for j in range(i, K+1):
for k in range(j, K+1):
if (i == j) and (j == k):
ans += math.gcd(i, math.gcd(j, k))
elif (i == j) or (j == k):
ans += 3 * math.gcd(i, math.gcd(j, k))
else:
ans += 6 * math.gcd(i, math.gcd(j, k))
print(ans)
| 3.59375 | 4 |
deprecated.py | jp-richter/formelbaer-rnn | 0 | 12765954 | <reponame>jp-richter/formelbaer-rnn<filename>deprecated.py<gh_stars>0
import sys
import os
import torch
import numpy
import tokens
import config
import generator
import re
import matplotlib.pyplot as plt
class PolicyEvaluator:
def _plot(self, x, y, legend):
figure, axis = plt.subplots()
for y, label in zip(y, legend):
axis.bar(x, y, alpha=0.5, label=label)
plt.legend(loc='best')
return figure
def _policy_step(self, nn_policy, batch, hidden):
state = batch[:, -1, :][:, None, :]
policies, hidden = nn_policy(state, hidden)
distributions = torch.distributions.Categorical(policies)
actions = distributions.sample()
encodings = torch.tensor([tokens.onehot(id) for id in actions])
encodings = encodings[:, None, :].float()
batch = torch.cat((batch, encodings), dim=1)
return policies, batch, hidden
def _policy_average(self, filepath):
nn_policy = generator.Policy()
nn_policy.load(filepath)
batch, hidden = nn_policy.initial()
results = torch.empty((0, batch.shape[0], batch.shape[2]))
for _ in range(config.sequence_length):
policies, batch, hidden = self._policy_step(nn_policy, batch, hidden)
policies = policies.unsqueeze(dim=0)
results = torch.cat((results, policies), dim=0)
results = torch.mean(results, dim=0) # average over steps
results = torch.mean(results, dim=0) # average over batch
results = results.tolist()
return results
def evaluate(self, filepath):
policy_paths = [] # (path)
policies = [] # (average policy)
stepsize = 10
iter = os.scandir(filepath)
for entry in iter:
if entry.name.endswith('.pt'):
policy_paths.append(filepath + '/' + entry.name)
names = [tokens.get(id).name for id in tokens.possibilities()]
for i, path in enumerate(policy_paths):
if i % stepsize == 0:
policy = self._policy_average(path)
with open('{}.txt'.format(path[:-2]), 'w', encoding="utf-8") as file:
string = ''
for name, value in zip(names, policy):
string += '{}: {}\n'.format(name, value)
file.write(string)
policies.append(policy)
x = numpy.array(tokens.possibilities()) # token ids for all tokens
y = numpy.array([p for p in policies])
# save single plots
for i, policy in enumerate(y):
figure = self._plot(x, policy, [''])
epoch = i * stepsize
figure.savefig('{}/{}.png'.format(filepath, epoch))
# save multiplots every tenth policy
legend = [str(epoch * stepsize) for epoch in range(len(policies))]
figure = self._plot(x, y, legend)
figure.savefig('{}/all_distributions.png'.format(filepath))
class LogEvaluator():
# example: values = [(x,y)] with x = np.array(..), ...
# legend = ['reward', 'loss', ..]
def _plot(self, values, xlabel, ylabel, legend, title, fontsize, path):
figure, axis = plt.subplots()
lines = ['b', 'r', 'y']
for (x, y), line, label in zip(values, lines, legend):
axis.plot(x, y, line, label=label, linewidth=0.3)
plt.title(title, fontsize=fontsize)
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
leg = plt.legend()
for line in leg.get_lines():
line.set_linewidth(1)
for text in leg.get_texts():
text.set_fontsize('x-large')
axis.grid()
figure.savefig(path)
def _parse(self, filepath, target):
with open(filepath, 'r') as file:
string = file.read()
targets = {
'greward': r'Generator\sReward\sas\sSequence:\s.*',
'gloss': r'Generator\sLoss\sas\sSequence:\s.*',
'gprediction': r'Generator\sPrediction\sas\sSequence:\s.*',
'dloss': r'Discriminator\sLoss\sas\sSequence:\s.*'
}
targets_substrings = {
'greward': lambda s: s[30:],
'gloss': lambda s: s[28:],
'gprediction': lambda s: s[34:],
'dloss': lambda s: s[32:]
}
pattern = re.compile(targets[target])
result = []
for match in re.finditer(pattern, string):
result.append(match.group())
assert len(result) == 1
result = targets_substrings[target](result[0])
ls = result.split(',')
ls = [float(n) for n in ls]
return ls
def evaluate(self, filepath):
targets = ['greward', 'gloss', 'gprediction']
targets_labels = {
'greward': 'Generator Reward',
'gloss': 'Generator Loss',
'gprediction': 'Generator Prediction'
}
results = []
# single plots
for t in targets:
numbers = self._parse(filepath, t)
x = numpy.arange(0, len(numbers), 1)
y = numpy.array(numbers)
# save single plot
self._plot([(x, y)], 'Step', targets_labels[t], [''], '', 12, '{}/{}_plot.png'.format(filepath[:-11], t))
if t == 'gprediction':
pass
if t == 'gloss':
# y = (y - y.mean()) / (y.std() + numpy.finfo(numpy.float32).eps.item())
y = (y - y.min()) / (y.max() - y.min())
if t == 'greward':
# y = (y - y.mean()) / (y.std() + numpy.finfo(numpy.float32).eps.item())
y = (y - y.min()) / (y.max() - y.min())
results.append((x, y))
# plot all on same surface
legend = ['Generator Reward', 'Generator Loss', 'Discriminator Prediction']
self._plot(results, 'Step', '', legend, '', 12, '{}/gen_plot.png'.format(filepath[:-11]))
numbers = self._parse(filepath, 'dloss')
x = numpy.arange(0, len(numbers), 1)
y = numpy.array(numbers)
self._plot([(x, y)], 'Epoch', 'Discriminator Loss', [''], '', 12, '{}/dis_plot.png'.format(filepath[:-11]))
if __name__ == '__main__':
assert len(sys.argv) == 2
_, filepath = sys.argv
try:
eval = PolicyEvaluator()
eval.evaluate(filepath)
except:
pass
try:
eval = LogEvaluator()
eval.evaluate(filepath)
except:
pass
| 1.960938 | 2 |
eventos/views.py | aromero45/proyecto0 | 0 | 12765955 |
from . import models
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
import json
from django.views.decorators.csrf import csrf_exempt
# Create your views here.
#Se define endpoint para home
def index(request):
events=[]
_userID=request.session.get("user",0)
if(_userID == 0):
sesion=False
else:
sesion=True
events = models.Event.objects.filter(person__id=_userID).order_by("-creation_date")
context={"sesion":sesion, "events":events}
return render(request,"index.html",context)
@csrf_exempt
#Se define endpoint para registro de usuario
def register(request):
context = {}
return render(request, "registro.html", context)
#Se define endpoint para modificacion de evento
@csrf_exempt
def modifyEvent(request,id_ev):
print("modifyEventId "+ id_ev)
_event=models.Event.objects.get(pk=id_ev)
print("eventView: " + str(_event))
_categories=models.Category.objects.all()
_type= models.Type.objects.all()
context={"event":_event, 'categories':_categories, 'types':_type}
return render(request, "modify.html", context)
#Se define endpoint para creacion de evento
@csrf_exempt
def createEvent(request):
_categories = models.Category.objects.all()
_type = models.Type.objects.all()
context = {'categories':_categories, 'types':_type}
return render(request, "createEvent.html", context)
@csrf_exempt
def eventDetails(request,id_ev):
_event=models.Event.objects.get(pk=id_ev)
context={"evento":_event}
return render(request, "details.html", context)
| 2.109375 | 2 |
src/scantree/_scan.py | tomasaschan/scantree | 8 | 12765956 | <filename>src/scantree/_scan.py
from __future__ import print_function, division
import os
from multiprocessing.pool import Pool
from pathspec import RecursionError as _RecursionError
from .compat import fspath
from ._node import (
DirNode,
LinkedDir,
CyclicLinkedDir,
identity,
is_empty_dir_node
)
from ._path import RecursionPath
def scantree(
directory,
recursion_filter=identity,
file_apply=identity,
dir_apply=identity,
follow_links=True,
allow_cyclic_links=True,
cache_file_apply=False,
include_empty=False,
jobs=1
):
"""Recursively scan the file tree under the given directory.
The files and subdirectories in each directory will be used to initialize a
the object: `DirNode(path=..., files=[...], directories=[...])`, where `path`
is the `RecursionPath` to the directory (relative to the root directory of the
recursion), `files` is a list of the results of `file_apply` called on the
recursion path of each file, and `directories` is a list of the results of
`dir_apply` called on each `DirNode` obtained (recursively) for each
subdirectory.
Hence, with the default value (identity function) for `file_apply` and
`dir_apply`, a tree-like data structure is returned representing the file tree
of the scanned directory, with all relevant metadata *cached in memory*.
This example illustrates the core concepts:
```
>>> tree = scantree('/path/to/dir')
>>> tree.directories[0].directories[0].path.absolute
'/path/to/dir/sub_dir_0/sub_sub_dir_0'
>>> tree.directories[0].directories[0].path.relative
'sub_dir_0/sub_sub_dir_0'
>>> tree.directories[0].files[0].relative
'sub_dir_0/file_0'
>>> tree.directories[0].path.real
'/path/to/linked_dir/'
>>> tree.directories[0].path.is_symlink() # already cached, no OS call needed
True
```
By providing a different `dir_apply` and `file_apply` function, you can operate
on the paths and/or data of files while scanning the directory recursively. If
`dir_apply` returns some aggregate or nothing (i.e. `None`) the full tree will
never be stored in memory. The same result can be obtained by calling
`tree.apply(dir_apply=..., file_apply=...)` but this can be done repeatedly
without having to rerun expensive OS calls.
# Arguments:
directory (str | os.PathLike): The directory to scan.
recursion_filter (f: f([RecursionPath]) -> [RecursionPath]): A filter
function, defining which files to include and which subdirectories to
scan, e.g. an instance of `scantree.RecursionFilter`.
The `RecursionPath` implements the `DirEntry` interface (found in
the external `scandir` module in Python < 3.5 or builtin `posix` module
in Python >= 3.5). It caches metadata efficiently and, in addition to
DirEntry, provides real path and path relative to the root directory for
the recursion as properties, see `scantree.RecursionPath` for further
details.
file_apply (f: f(RecursionPath) -> object): The function to apply to the
`RecursionPath` for each file. Default "identity", i.e. `lambda x: x`.
dir_apply (f: f(DirNode) -> object): The function to apply to the `DirNode`
for each (sub) directory. Default "identity", i.e. `lambda x: x`.
follow_links (bool): Whether to follow symbolic links for not, i.e. to
continue the recursive scanning in linked directories. If False, linked
directories are represented by the `LinkedDir` object which does e.g.
not have the `files` and `directories` properties (as these cannot be
known without following the link). Default `True`.
allow_cyclic_links (bool): If set to `False`, a `SymlinkRecursionError` is
raised on detection of cyclic symbolic links, if `True` (default), the
cyclic link is represented by a `CyclicLinkedDir` object. See "Cyclic
Links Handling" section below for further details.
cache_file_apply: If set to `True`, the `file_apply` result will be cached
by *real* path. Default `False`.
include_empty (bool): If set to `True`, empty directories are included in
the result of the recursive scanning, represented by an empty directory
node: `DirNode(directories=[], files=[])`. If `False` (default), empty
directories are not included in the parent directory node (and
subsequently never passed to `dir_apply`).
jobs (int | None): If `1` (default), no multiprocessing is used. If jobs > 1,
the number of processes to use for parallelizing `file_apply` over
included files. If `None`, `os.cpu_count()` number of processes are used.
NOTE: if jobs is `None` or > 1, the entire file tree will first be stored
in memory before applying `file_apply` and `dir_apply`.
# Returns:
The `object` returned by `dir_apply` on the `DirNode` for the top level
`directory`. If the default value ("identity" function: `lambda x: x`) is
used for `dir_apply`, it will be the `DirNode` representing the root node of
the file tree.
# Raises:
SymlinkRecursionError: if `allow_cyclic_links=False` and any cyclic symbolic
links are detected.
# Cyclic Links Handling:
Symbolically linked directories can create cycles in the, otherwise acyclic,
graph representing the file tree. If not handled properly, this leads to
infinite recursion when traversing the file tree (this is e.g. the case for
Python's built-in `os.walk(directory, followlinks=True)`).
Sometimes multiple links form cycles together, therefore - without loss of
generality - cyclic links are defined as:
The first occurrence of a link to a directory that has already been
visited on the current branch of recursion.
With `allow_cyclic_links=True` any link to such a directory is represented
by the object `CyclicLinkedDir(path=..., target_path=...)` where `path` is
the `RecursionPath` to the link and `target_path` the `RecursionPath` to the
parent directory that is the target of the link.
In the example below there are cycles on all branches A/B, A/C and D.
root/
|__A/
| |__B/
| | |__toA@ -> ..
| |__C/
| |__toA@ -> ..
|__D/
|__toB@ -> ../A/B
In this case, the symlinks with relative paths A/B/toA, A/C/toA and
D/toB/toA/B/toA will be represented by a `CyclicLinkedDir` object. Note that
for the third branch, the presence of cyclic links can be *detected* already
at D/toB/toA/B (since B is already visited) but it is D/toB/toA/B/toA which
is considered a cyclic link (and gets represented by a `CyclicLinkedDir`).
This reflects the fact that it is the toA that's "causing" the cycle, not
D/toB or D/toB/toA/B (which is not even a link), and at D/toB/toA/ the cycle
can not yet be detected.
Below is another example where multiple links are involved in forming cycles
as well as links which absolute path is external to the root directory for
the recursion. In this case the symlinks with relative paths A/toB/toA,
B/toA/toB and C/toD/toC are considered cyclic links for
`scandir('/path/to/root')`.
/path/to/root/
|__A/
| |__toB@ -> ../B
|__B/
| |__toA@ -> /path/to/root/A
|__C/
|__toD@ -> /path/to/D
/path/to/D/
|__toC@ -> /path/to/root/C
"""
_verify_is_directory(directory)
if jobs is None or jobs > 1:
return _scantree_multiprocess(**vars())
path = RecursionPath.from_root(directory)
if cache_file_apply:
file_apply = _cached_by_realpath(file_apply)
root_dir_node = _scantree_recursive(
path=path,
recursion_filter=recursion_filter,
file_apply=file_apply,
dir_apply=dir_apply,
follow_links=follow_links,
allow_cyclic_links=allow_cyclic_links,
include_empty=include_empty,
parents={path.real: path},
)
result = dir_apply(root_dir_node)
return result
def _scantree_multiprocess(**kwargs):
"""Multiprocess implementation of scantree.
Note that it is only the `file_apply` function that is parallelized.
"""
file_apply = kwargs.pop('file_apply')
dir_apply = kwargs.pop('dir_apply')
jobs = kwargs.pop('jobs')
file_paths = []
def extract_paths(path):
result_idx = len(file_paths)
file_paths.append(path)
return result_idx
root_dir_node = scantree(file_apply=extract_paths, dir_apply=identity, **kwargs)
pool = Pool(jobs)
try:
file_results = pool.map(file_apply, file_paths)
finally:
pool.close()
def fetch_result(result_idx):
return file_results[result_idx]
return root_dir_node.apply(dir_apply=dir_apply, file_apply=fetch_result)
def _verify_is_directory(directory):
"""Verify that `directory` path exists and is a directory, otherwise raise
ValueError"""
directory = fspath(directory)
if not os.path.exists(directory):
raise ValueError('{}: No such directory'.format(directory))
if not os.path.isdir(directory):
raise ValueError('{}: Is not a directory'.format(directory))
def _cached_by_realpath(file_apply):
"""Wrapps the `file_apply` function with a cache, if `path.real` is already in
the cache, the cached value is returned"""
cache = {}
def file_apply_cached(path):
if path.real not in cache:
cache[path.real] = file_apply(path)
return cache[path.real]
return file_apply_cached
def _scantree_recursive(
path,
recursion_filter,
file_apply,
dir_apply,
follow_links,
allow_cyclic_links,
include_empty,
parents,
):
"""The underlying recursive implementation of scantree.
# Arguments:
path (RecursionPath): the recursion path relative the directory where
recursion was initialized.
recursion_filter (f: f([RecursionPath]) -> [RecursionPath]): A filter
function, defining which files to include and which subdirectories to
scan, e.g. an instance of `scantree.RecursionFilter`.
file_apply (f: f(RecursionPath) -> object): The function to apply to the
`RecursionPath` for each file. Default "identity", i.e. `lambda x: x`.
dir_apply (f: f(DirNode) -> object): The function to apply to the `DirNode`
for each (sub) directory. Default "identity", i.e. `lambda x: x`.
follow_links (bool): Whether to follow symbolic links for not, i.e. to
continue the recursive scanning in linked directories. If False, linked
directories are represented by the `LinkedDir` object which does e.g.
not have the `files` and `directories` properties (as these cannot be
known without following the link). Default `True`.
allow_cyclic_links (bool): If set to `False`, a `SymlinkRecursionError` is
raised on detection of cyclic symbolic links, if `True` (default), the
cyclic link is represented by a `CyclicLinkedDir` object.
include_empty (bool): If set to `True`, empty directories are included in
the result of the recursive scanning, represented by an empty directory
node: `DirNode(directories=[], files=[])`. If `False` (default), empty
directories are not included in the parent directory node (and
subsequently never passed to `dir_apply`).
parents ({str: RecursionPath}): Mapping from real path (`str`) to
`RecursionPath` of parent directories.
# Returns:
`DirNode` for the directory at `path`.
# Raises:
SymlinkRecursionError: if `allow_cyclic_links=False` and any cyclic symbolic
links are detected.
"""
fwd_kwargs = vars()
del fwd_kwargs['path']
if path.is_symlink():
if not follow_links:
return LinkedDir(path)
previous_path = parents.get(path.real, None)
if previous_path is not None:
if allow_cyclic_links:
return CyclicLinkedDir(path, previous_path)
else:
raise SymlinkRecursionError(path, previous_path)
if follow_links:
parents[path.real] = path
dirs = []
files = []
for subpath in sorted(recursion_filter(path.scandir())):
if subpath.is_dir():
dir_node = _scantree_recursive(subpath, **fwd_kwargs)
if include_empty or not is_empty_dir_node(dir_node):
dirs.append(dir_apply(dir_node))
if subpath.is_file():
files.append(file_apply(subpath))
if follow_links:
del parents[path.real]
return DirNode(path=path, directories=dirs, files=files)
class SymlinkRecursionError(_RecursionError):
"""Raised when symlinks cause a cyclic graph of directories.
Extends the `pathspec.util.RecursionError` but with a different name (avoid
overriding the built-in error!) and with a more informative string representation
(used in `dirhash.cli`).
"""
def __init__(self, path, target_path):
super(SymlinkRecursionError, self).__init__(
real_path=path.real,
first_path=os.path.join(target_path.root, target_path.relative),
second_path=os.path.join(path.root, path.relative)
)
def __str__(self):
# _RecursionError.__str__ prints args without context
return 'Symlink recursion: {}'.format(self.message)
| 3.515625 | 4 |
airbyte_dto_factory.py | eSchwander/airbyte-tentacle | 0 | 12765957 | class SourceDto:
"""Data transfer object class for Source-type Airbyte abstractions"""
def __init__(self):
self.source_definition_id = None
self.source_id = None
self.workspace_id = None
self.connection_configuration = {}
self.name = None
self.source_name = None
self.tag = None
def to_payload(self):
"""sends this dto object to a dict formatted as a payload"""
r = {}
r['sourceDefinitionId'] = self.source_definition_id
r['sourceId'] = self.source_id
r['workspaceId'] = self.workspace_id
r['connectionConfiguration'] = self.connection_configuration
r['name'] = self.name
r['sourceName'] = self.source_name
return r
class DestinationDto:
"""Data transfer object class for Destination-type Airbyte abstractions"""
def __init__(self):
self.destination_definition_id = None
self.destination_id = None
self.workspace_id = None
self.connection_configuration = {}
self.name = None
self.destination_name = None
self.tag = None
def to_payload(self):
"""sends this dto object to a dict formatted as a payload"""
r = {}
r['destinationDefinitionId'] = self.destination_definition_id
r['destinationId'] = self.destination_id
r['workspaceId'] = self.workspace_id
r['connectionConfiguration'] = self.connection_configuration
r['name'] = self.name
r['destinationName'] = self.destination_name
return r
class ConnectionDto:
"""Data transfer object class for Connection-type Airbyte abstractions"""
def __init__(self):
self.connection_id = None
self.name = None
self.prefix = None
self.source_id = None
self.destination_id = None
self.sync_catalog = {} # sync_catalog['streams'] is a list of dicts {stream:, config:}
self.schedule = {}
self.status = None
def to_payload(self):
pass # TODO: implement the to_payload method
class StreamDto:
"""Data transfer object class for the stream, belongs to the connection abstraction"""
def __init__(self):
self.name = None
self.json_schema = {}
self.supported_sync_modes = []
self.source_defined_cursor = None
self.default_cursor_field = []
self.source_defined_primary_key = []
self.namespace = None
class StreamConfigDto:
"""Data transfer object class for the stream configuration, belongs to the connection abstraction"""
def __init__(self):
self.sync_mode = None
self.cursor_field = []
self.destination_sync_mode = None
self.primary_key = []
self.alias_name = None
self.selected = None
class WorkspaaceDto:
"""Data transfer object class for Workspace-type Airbyte abstractions"""
def __init__(self):
pass
class AirbyteDtoFactory:
"""
Builds data transfer objects, each representing an abstraction inside the Airbyte architecture
"""
def __init__(self, source_definitions, destination_definitions):
self.source_definitions = source_definitions
self.destination_definitions = destination_definitions
def populate_secrets(self, secrets, new_dtos):
# TODO: Find a better way to deal with unpredictably named secrets
if 'sources' in new_dtos:
for source in new_dtos['sources']:
if source.source_name in secrets['sources']:
if 'access_token' in source.connection_configuration:
source.connection_configuration['access_token'] = secrets['sources'][source.source_name]['access_token']
elif 'token' in source.connection_configuration:
source.connection_configuration['token'] = secrets['sources'][source.source_name]['token']
if 'destinations' in new_dtos:
for destination in new_dtos['destinations']:
if destination.destination_name in secrets['destinations']:
if 'password' in destination.connection_configuration:
destination.connection_configuration['password'] = secrets['destinations'][destination.destination_name]['password']
def build_source_dto(self, source: dict) -> SourceDto:
"""
Builds a SourceDto object from a dict representing a source
"""
r = SourceDto()
r.connection_configuration = source['connectionConfiguration']
r.name = source['name']
r.source_name = source['sourceName']
if 'sourceDefinitionId' in source:
r.source_definition_id = source['sourceDefinitionId']
else:
for definition in self.source_definitions['sourceDefinitions']:
if r.source_name == definition['name']:
r.source_definition_id = definition['sourceDefinitionId']
if 'sourceId' in source:
r.source_id = source['sourceId']
if 'workspaceId' in source:
r.workspace_id = source['workspaceId']
if 'tag' in source:
r.tag = source['tag']
# TODO: check for validity?
return r
def build_destination_dto(self, destination):
r = DestinationDto()
r.connection_configuration = destination['connectionConfiguration']
r.destination_name = destination['destinationName']
r.name = destination['name']
if 'destinationDefinitionId' in destination:
r.destination_definition_id = destination['destinationDefinitionId']
else:
for definition in self.destination_definitions['destinationDefinitions']:
if r.destination_name == definition['name']:
r.destination_definition_id = definition['destinationDefinitionId']
if 'destinationId' in destination:
r.destination_id = destination['destinationId']
if 'workspaceId' in destination:
r.workspace_id = destination['workspaceId']
if 'tag' in destination:
r.tag = destination['tag']
# TODO: check for validity?
return r
def build_connection_dto(self, connection):
r = ConnectionDto()
r.connection_id = connection['connectionId']
r.name = connection['name']
r.prefix = connection['prefix']
r.source_id = connection['sourceId']
r.destination_id = connection['destinationId']
r.sync_catalog = connection['syncCatalog']
r.schedule = connection['schedule']
r.status = connection['status']
# TODO: check for validity?
return r | 2.734375 | 3 |
page/scrape.py | johnb30/atlas | 13 | 12765958 | <filename>page/scrape.py
from __future__ import unicode_literals
import requests
import timeout_decorator
from bs4 import BeautifulSoup
from selenium import webdriver
def scrape(url, extractor, proxy_choice, proxy_login, raw_html=''):
"""
Function to request and parse a given URL. Returns only the "relevant"
text.
Parameters
----------
url : String.
URL to request and parse.
extractor : Goose class instance.
An instance of Goose that allows for parsing of content.
Returns
-------
text : String.
Parsed text from the specified website.
meta : String.
Parsed meta description of an article. Usually equivalent to the
lede.
"""
headers = {'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36"}
try:
if not raw_html:
if proxy_login:
page = requests.get(url, headers=headers, proxies=proxy_choice,
auth=proxy_login)
html = page.content
else:
page = requests.get(url, headers=headers)
html = page.content
else:
html = raw_html
except timeout_decorator.TimeoutError:
print('Goose scrape timed out on URL: {}'.format(url))
html = ''
except Exception, e:
return '', ''
print('\tProblem requesting url: {}. {}'.format(url, e))
if html:
try:
article = extractor.extract(raw_html=html)
except UnicodeDecodeError:
article = extractor.extract(raw_html=html.decode('utf-8',
errors='replace'))
except timeout_decorator.TimeoutError:
print('Goose scrape timed out on URL: {}'.format(url))
article = ''
except Exception, e:
print('\tProblem pulling cleaned content from {}. {}'.format(url,
e))
return '', ''
else:
print('\tNo HTML for page {}.'.format(url))
return '', ''
try:
text = article.cleaned_text
meta = article.meta_description
# Throw out those bad articles.
if len(text.split()) < 30:
return '', ''
else:
return text, meta
# Generic error catching is bad
except Exception, e:
return '', ''
print('\tProblem scraping URL: {}. {}.'.format(url, e))
def bnn_scrape(base_url, extractor):
"""
Function specifically scoped to the BNN news sources. Uses PhantomJS to
request the page, finds the "Read More" link, passes this secondary link
to the standard `scrape()` function, and returns the text and associated
metadata.
Parameters
----------
base_url : String.
URL to request and parse.
extractor : Goose class instance.
An instance of Goose that allows for parsing of content.
Returns
-------
text : String.
Parsed text from the specified website.
meta : String.
Parsed meta description of an article. Usually equivalent to the
lede.
follow_url : String.
URL extracted from the original bnn source. This URL
contains the actual content and is one that is stored
in the database.
"""
browser = webdriver.PhantomJS()
browser.get(base_url)
html_source = browser.page_source
soup = BeautifulSoup(html_source)
# Get the real link to follow
all_links = soup.findAll("a")
follow_url = ''
for li in all_links:
if li.text == "Read more":
follow_url = li['href']
print('\tFollow URL found: {}'.format(follow_url))
# Get the right date
# TODO: Implement this moar better
# sp = soup.findAll("span")
# for i in sp:
# if i.find("em"):
# info['date'] = i.em.text
if follow_url:
try:
text, meta = scrape(follow_url, extractor)
except TypeError:
text = ''
meta = ''
else:
follow_url = base_url
print('\tScraping...')
text, meta = scrape(base_url, extractor, html_source)
return text, meta, follow_url
| 3.453125 | 3 |
4_in_1_Python_Games_for_Beginers.py | M-O-N-Q/4_in_1_Python_Games_for_Beginners | 0 | 12765959 | <gh_stars>0
import random as rand
play_again4 = 'y'
play_again2 = 'y'
while play_again2 == 'y':
print("Which Game would you like to play: ")
print("1. Guess the number")
print("2. Mad libs")
print("3. Roll a Die")
print("4. Math Game")
which_game = input("\nEnter your Choice: ")
which_game = str(which_game)
if which_game.isdigit() is False:
print("Incorrect input")
play_again2 = 'y'
if which_game == '1':
play_again = 'y'
while play_again == 'y':
print("Guessing Game!!!")
rand_num = rand.randint(1, 9)
num = input("Guess the number from 1 to 9: ")
num = str(num)
rand_num = str(rand_num)
if num.isdigit() is False:
print("Incorrect input")
play_again = 'y'
if num == rand_num:
print("\nYou are correct!!!")
rand_num = str(rand_num)
print("The answer is: " + rand_num)
play_again = input("\nDo you want to play again? y/n: ")
play_again = str(play_again)
elif num != rand_num:
print("\nYou are wrong!!!")
rand_num = str(rand_num)
print("The answer is: " + rand_num)
play_again = input("\nDo you want to play again? y/n: ")
play_again = str(play_again)
# play_again2 = input("Do you want to choose game? y/n: ")
# play_again2 = str(play_again2)
elif which_game == '2':
play_again3 = 'y'
while play_again3 == 'y':
print("MAD LIBS")
word1 = input("\nFirst word: ")
word1 = str(word1)
word2 = input("Second word: ")
word2 = str(word2)
word3 = input("Third word: ")
word3 = str(word3)
print("\nRoses are " + word1)
print("Violets are " + word2)
print("Everytime I thought of you I see " + word3)
play_again3 = input("\nDo you want to play again? y/n: ")
play_again3 = str(play_again3)
elif which_game == '3':
while play_again4 == 'y':
dice = rand.randint(1, 6)
dice = str(dice)
print("\nRoll a Die")
print("\n" + dice)
play_again4 = input("\nDo you want to roll again? y/n: ")
play_again4 = str(play_again4)
if play_again4.isdigit() is True:
print("Incorrect input!!!")
play_again4 = input("\nDo you want to roll again? y/n: ")
play_again4 = str(play_again4)
elif which_game == '4':
play_again5 = 'y'
print("Math Game!")
while play_again5 == 'y':
operators = rand.randint(1, 4)
num_a = rand.randint(0, 99)
num_b = rand.randint(0, 99)
num_a = str(num_a)
num_b = str(num_b)
if operators == 1:
sum_1 = int(num_a) + int(num_b)
print(num_a + "+" + num_b + "=")
sum_2 = input("Enter the answer: ")
sum_2 = str(sum_2)
sum_valid = sum_2.isalpha()
if sum_valid is True:
print("\nIncorrect Input!")
play_again5 = 'y'
else:
sum_2 = int(sum_2)
if sum_1 == sum_2:
print("\nYou are Correct!!")
play_again5 = input("\nDo you want to play again? y/n: ")
play_again5 = str(play_again5)
elif sum_1 != sum_2:
print("\nYou are Wrong!!!")
print("\nThe correct answer is: " + str(sum_1))
play_again5 = input("\nDo you want to play again? y/n: ")
play_again5 = str(play_again5)
if operators == 2:
sub_1 = int(num_a) - int(num_b)
print(num_a + "-" + num_b + "=")
sub_2 = input("Enter the answer: ")
sub_2 = str(sub_2)
sub_valid = sub_2.isalpha()
if sub_valid is True:
print("\nIncorrect Input!")
play_again5 = 'y'
else:
sub_2 = int(sub_2)
if sub_1 == sub_2:
print("\nYou are Correct!!")
play_again5 = input("\nDo you want to play again? y/n: ")
play_again5 = str(play_again5)
elif sub_1 != sub_2:
print("\nYou are Wrong!!!")
print("\nThe correct answer is: " + str(sub_1))
play_again5 = input("\nDo you want to play again? y/n: ")
play_again5 = str(play_again5)
if operators == 3:
mul_1 = int(num_a) * int(num_b)
print(num_a + "*" + num_b + "=")
mul_2 = input("Enter the answer: ")
mul_2 = str(mul_2)
mul_valid = mul_2.isalpha()
if mul_valid is True:
print("\nIncorrect Input!")
play_again5 = 'y'
else:
mul_2 = int(mul_2)
if mul_1 == mul_2:
print("\nYou are Correct!!")
play_again5 = input("\nDo you want to play again? y/n: ")
play_again5 = str(play_again5)
elif mul_1 != mul_2:
print("\nYou are Wrong!!!")
print("\nThe correct answer is: " + str(mul_1))
play_again5 = input("\nDo you want to play again? y/n: ")
play_again5 = str(play_again5)
if operators == 4:
div_1 = int(num_a) / int(num_b)
div_1 = round(div_1, 2)
print(num_a + "/" + num_b + "=")
div_2 = input("Enter the answer: ")
div_2 = str(div_2)
div_valid = div_2.isalpha()
if div_valid is True:
print("\nIncorrect Input!")
play_again5 = 'y'
else:
div_2 = float(div_2)
if div_1 == div_2:
print("\nYou are Correct!!")
play_again5 = input("\nDo you want to play again? y/n: ")
play_again5 = str(play_again5)
elif div_1 != div_2:
print("\nYou are Wrong!!!")
print("\nThe correct answer is: " + str(div_1))
play_again5 = input("\nDo you want to play again? y/n: ")
play_again5 = str(play_again5)
print("\nThank you for playing :) ")
| 4.03125 | 4 |
regex_builder/errors.py | Zomatree/regex-builder | 3 | 12765960 | <reponame>Zomatree/regex-builder<filename>regex_builder/errors.py
class NotSection(Exception):
pass
| 1.0625 | 1 |
models/amenity.py | kemboy-254/AirBnB_clone | 0 | 12765961 | #!/usr/bin/python3
"""New class inherit from BaseModel"""
from models.base_model import BaseModel
class Amenity(BaseModel):
"""Class Amenity that inherit from BaseModel"""
name = ""
| 3.15625 | 3 |
django_librest/yasg.py | FRA1T/django_librest | 5 | 12765962 | <gh_stars>1-10
from django.urls import path
from drf_yasg import openapi
from drf_yasg.views import get_schema_view
from rest_framework import permissions
schema_view = get_schema_view(
openapi.Info(
title="Library API",
default_version='v1',
description="Books for everyone!",
contact=openapi.Contact(email="<EMAIL>"),
license=openapi.License(name="MIT License"),
),
public=True,
permission_classes=(permissions.AllowAny,)
)
urlpatterns = [
path('swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
]
| 1.8125 | 2 |
Parsers/Parser.py | maoyuwang/ChangeLogger | 0 | 12765963 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from threading import Thread
from Parsers.Common import *
class Parser(Thread):
def __init__(self):
"""
Initialize a Parser thread.
"""
Thread.__init__(self)
self.deamon = True
self.result = None
def run(self):
"""
Details on thread running.
"""
self.result = self.parse()
def parse(self):
"""
Specific parse method to be override.
"""
pass
def getResult(self):
"""
Return the result after the Parser finnish running.
:return:
"""
return self.result
def __str__(self):
return getFormattedString(self.result)
if __name__ == '__main__':
p = Parser()
p.start()
p.join()
print(p.getResult())
| 3.265625 | 3 |
_includes/die.py | alexj136/alexj136.github.io | 0 | 12765964 | import random
targetNumber = 6
def throwDie():
print("rolling...")
rand = random.randint(1, 6)
print(str(rand) + "!")
return rand
# roll a 6-sided die until the given target number comes up.
# Return the total number of throws.
def rollDieUntilTarget(target):
print("Rolling until a " + str(target) + " comes up...")
currentResult = throwDie()
throws = 1
while currentResult is not target:
currentResult = throwDie()
throws += 1
return throws
totalThrows = rollDieUntilTarget(targetNumber)
print("Took " + str(totalThrows) + " throws to roll a " \
+ str(targetNumber) + ".")
| 3.9375 | 4 |
retsinfo_app/documents/migrations/0002_rename_documentembeddings_documentembedding.py | IIMunchII/retsinformation | 1 | 12765965 | # Generated by Django 3.2 on 2021-05-30 18:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('scrapers', '0011_alter_retsinfosentences_document'),
('documents', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='DocumentEmbeddings',
new_name='DocumentEmbedding',
),
]
| 1.679688 | 2 |
Model_setup/NEISO_data_setup.py | keremakdemir/ISONE_UCED | 0 | 12765966 | # -*- coding: utf-8 -*-
"""
Created on Wed May 03 15:01:31 2017
@author: jdkern
"""
import pandas as pd
import numpy as np
#read generator parameters into DataFrame
df_gen = pd.read_excel('NEISO_data_file/generators.xlsx',header=0)
#read transmission path parameters into DataFrame
df_paths = pd.read_csv('NEISO_data_file/paths.csv',header=0)
#list zones
zones = ['CT', 'ME', 'NH', 'NEMA', 'RI', 'SEMA', 'VT', 'WCMA']
##time series of load for each zone
df_load_all = pd.read_csv('../Time_series_data/Synthetic_demand_pathflows/Sim_hourly_load.csv',header=0)
df_load_all = df_load_all[zones]
##daily hydropower availability
df_hydro = pd.read_csv('Hydro_setup/NEISO_dispatchable_hydro.csv',header=0)
#must run resources (LFG,ag_waste,nuclear)
df_must = pd.read_excel('NEISO_data_file/must_run.xlsx',header=0)
# must run generation
must_run_CT = []
must_run_ME = []
must_run_NEMA = []
must_run_NH = []
must_run_RI = []
must_run_SEMA = []
must_run_VT = []
must_run_WCMA = []
must_run_CT = np.ones((8760,1))*df_must.loc[0,'CT']
must_run_ME = np.ones((8760,1))*df_must.loc[0,'ME']
must_run_NEMA = np.ones((8760,1))*df_must.loc[0,'NEMA']
must_run_NH = np.ones((8760,1))*df_must.loc[0,'NH']
must_run_RI = np.ones((8760,1))*df_must.loc[0,'RI']
must_run_SEMA = np.ones((8760,1))*df_must.loc[0,'SEMA']
must_run_VT = np.ones((8760,1))*df_must.loc[0,'VT']
must_run_WCMA = np.ones((8760,1))*df_must.loc[0,'WCMA']
must_run = np.column_stack((must_run_CT,must_run_ME,must_run_NEMA,must_run_NH,must_run_RI,must_run_SEMA,must_run_VT,must_run_WCMA))
df_total_must_run = pd.DataFrame(must_run,columns=('CT','ME','NEMA','NH','RI','SEMA','VT','WCMA'))
df_total_must_run.to_csv('NEISO_data_file/must_run_hourly.csv')
#natural gas prices
df_ng_all = pd.read_excel('../Time_series_data/Gas_prices/NG.xlsx', header=0)
df_ng_all = df_ng_all[zones]
#oil prices
df_oil_all = pd.read_excel('../Time_series_data/Oil_prices/Oil_prices.xlsx', header=0)
df_oil_all = df_oil_all[zones]
# time series of offshore wind generation for each zone
df_offshore_wind_all = pd.read_excel('../Time_series_data/Synthetic_wind_power/offshore_wind_power_sim.xlsx',header=0)
# time series of solar generation
df_solar = pd.read_excel('NEISO_data_file/hourly_solar_gen.xlsx',header=0)
solar_caps = pd.read_excel('NEISO_data_file/solar_caps.xlsx',header=0)
# time series of onshore wind generation
df_onshore_wind = pd.read_excel('NEISO_data_file/hourly_onshore_wind_gen.xlsx',header=0)
onshore_wind_caps = pd.read_excel('NEISO_data_file/wind_onshore_caps.xlsx',header=0)
def setup(year, Hub_height, Offshore_capacity):
##time series of natural gas prices for each zone
df_ng = globals()['df_ng_all'].copy()
df_ng = df_ng.reset_index()
##time series of oil prices for each zone
df_oil = globals()['df_oil_all'].copy()
df_oil = df_oil.reset_index()
##time series of load for each zone
df_load = globals()['df_load_all'].loc[year*8760:year*8760+8759].copy()
df_load = df_load.reset_index(drop=True)
##time series of operational reserves for each zone
rv= df_load.values
reserves = np.zeros((len(rv),1))
for i in range(0,len(rv)):
reserves[i] = np.sum(rv[i,:])*.04
df_reserves = pd.DataFrame(reserves)
df_reserves.columns = ['reserves']
##daily time series of dispatchable imports by path
df_imports = pd.read_csv('Path_setup/NEISO_dispatchable_imports.csv',header=0)
##hourly time series of exports by zone
df_exports = pd.read_csv('Path_setup/NEISO_exports.csv',header=0)
# time series of offshore wind generation for each zone
df_offshore_wind = globals()['df_offshore_wind_all'].loc[:, Hub_height].copy()
df_offshore_wind = df_offshore_wind.loc[year*8760:year*8760+8759]
df_offshore_wind = df_offshore_wind.reset_index()
offshore_wind_caps = pd.read_excel('NEISO_data_file/wind_offshore_caps.xlsx')
############
# sets #
############
#write data.dat file
import os
from shutil import copy
from pathlib import Path
path = str(Path.cwd().parent) + str(Path('/UCED/LR/NEISO' +'_'+ str(Hub_height) +'_'+ str(Offshore_capacity) +'_'+ str(year)))
os.makedirs(path,exist_ok=True)
generators_file='NEISO_data_file/generators.xlsx'
dispatch_file='../UCED/NEISO_dispatch.py'
dispatchLP_file='../UCED/NEISO_dispatchLP.py'
wrapper_file='../UCED/NEISO_wrapper.py'
simulation_file='../UCED/NEISO_simulation.py'
copy(dispatch_file,path)
copy(wrapper_file,path)
copy(simulation_file,path)
copy(dispatchLP_file,path)
copy(generators_file,path)
filename = path + '/data.dat'
#write data.dat file
# filename = 'NEISO_data_file/data.dat'
with open(filename, 'w') as f:
# generator sets by zone
for z in zones:
# zone string
z_int = zones.index(z)
f.write('set Zone%dGenerators :=\n' % (z_int+1))
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'zone'] == z:
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# NY imports
f.write('set NY_Imports_CT :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'imports' and df_gen.loc[gen,'zone'] == 'NYCT_I':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# NY imports
f.write('set NY_Imports_WCMA :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'imports' and df_gen.loc[gen,'zone'] == 'NYWCMA_I':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# NY imports
f.write('set NY_Imports_VT :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'imports' and df_gen.loc[gen,'zone'] == 'NYVT_I':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# HQ imports
f.write('set HQ_Imports_VT :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'imports' and df_gen.loc[gen,'zone'] == 'HQVT_I':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# NB imports
f.write('set NB_Imports_ME :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'imports' and df_gen.loc[gen,'zone'] == 'NBME_I':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# generator sets by type
# coal
f.write('set Coal :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'coal':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# # oil
# f.write('set Oil :=\n')
# # pull relevant generators
# for gen in range(0,len(df_gen)):
# if df_gen.loc[gen,'typ'] == 'oil':
# unit_name = df_gen.loc[gen,'name']
# unit_name = unit_name.replace(' ','_')
# f.write(unit_name + ' ')
# f.write(';\n\n')
# Slack
f.write('set Slack :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'slack':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# Hydro
f.write('set Hydro :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'hydro':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# Ramping
f.write('set Ramping :=\n')
# pull relevant generators
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'typ'] == 'hydro' or df_gen.loc[gen,'typ'] == 'imports':
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# gas generator sets by zone and type
for z in zones:
# zone string
z_int = zones.index(z)
# Natural Gas
# find relevant generators
trigger = 0
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'zone'] == z and (df_gen.loc[gen,'typ'] == 'ngcc' or df_gen.loc[gen,'typ'] == 'ngct' or df_gen.loc[gen,'typ'] == 'ngst'):
trigger = 1
if trigger > 0:
# pull relevant generators
f.write('set Zone%dGas :=\n' % (z_int+1))
for gen in range(0,len(df_gen)):
if df_gen.loc[gen,'zone'] == z and (df_gen.loc[gen,'typ'] == 'ngcc' or df_gen.loc[gen,'typ'] == 'ngct' or df_gen.loc[gen,'typ'] == 'ngst'):
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# oil generator sets by zone and type
for z in zones:
# zone string
z_int = zones.index(z)
# find relevant generators
trigger = 0
for gen in range(0,len(df_gen)):
if (df_gen.loc[gen,'zone'] == z) and (df_gen.loc[gen,'typ'] == 'oil'):
trigger = 1
if trigger > 0:
# pull relevant generators
f.write('set Zone%dOil :=\n' % (z_int+1))
for gen in range(0,len(df_gen)):
if (df_gen.loc[gen,'zone'] == z) and (df_gen.loc[gen,'typ'] == 'oil'):
unit_name = df_gen.loc[gen,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + ' ')
f.write(';\n\n')
# zones
f.write('set zones :=\n')
for z in zones:
f.write(z + ' ')
f.write(';\n\n')
# sources
f.write('set sources :=\n')
for z in zones:
f.write(z + ' ')
f.write(';\n\n')
# sinks
f.write('set sinks :=\n')
for z in zones:
f.write(z + ' ')
f.write(';\n\n')
################
# parameters #
################
# simulation details
SimHours = 8760
f.write('param SimHours := %d;' % SimHours)
f.write('\n')
f.write('param SimDays:= %d;' % int(SimHours/24))
f.write('\n\n')
HorizonHours = 48
f.write('param HorizonHours := %d;' % HorizonHours)
f.write('\n\n')
HorizonDays = int(HorizonHours/24)
f.write('param HorizonDays := %d;' % HorizonDays)
f.write('\n\n')
# create parameter matrix for transmission paths (source and sink connections)
f.write('param:' + '\t' + 'limit' + '\t' +'hurdle :=' + '\n')
for z in zones:
for x in zones:
f.write(z + '\t' + x + '\t')
match = 0
for p in range(0,len(df_paths)):
source = df_paths.loc[p,'start_zone']
sink = df_paths.loc[p,'end_zone']
if source == z and sink == x:
match = 1
p_match = p
if match > 0:
f.write(str(round(df_paths.loc[p_match,'limit'],3)) + '\t' + str(round(df_paths.loc[p_match,'hurdle'],3)) + '\n')
else:
f.write('0' + '\t' + '0' + '\n')
f.write(';\n\n')
# create parameter matrix for generators
f.write('param:' + '\t')
for c in df_gen.columns:
if c != 'name':
f.write(c + '\t')
f.write(':=\n\n')
for i in range(0,len(df_gen)):
for c in df_gen.columns:
if c == 'name':
unit_name = df_gen.loc[i,'name']
unit_name = unit_name.replace(' ','_')
f.write(unit_name + '\t')
elif c == 'typ' or c == 'zone':
f.write(str(df_gen.loc[i,c]) + '\t')
else:
f.write(str(round(df_gen.loc[i,c],3)) + '\t')
f.write('\n')
f.write(';\n\n')
# times series data
# zonal (hourly)
f.write('param:' + '\t' + 'SimDemand' + '\t' + 'SimOffshoreWind' \
+ '\t' + 'SimSolar' + '\t' + 'SimOnshoreWind' + '\t' + 'SimMustRun:=' + '\n')
for z in zones:
wz = offshore_wind_caps.loc[0,z]
sz = solar_caps.loc[0,z]
owz = onshore_wind_caps.loc[0,z]
for h in range(0,len(df_load)):
f.write(z + '\t' + str(h+1) + '\t' + str(round(df_load.loc[h,z],3))\
+ '\t' + str(round(df_offshore_wind.loc[h,Hub_height]*wz,3))\
+ '\t' + str(round(df_solar.loc[h,'Solar_Output_MWh']*sz,3))\
+ '\t' + str(round(df_onshore_wind.loc[h,'Onshore_Output_MWh']*owz,3))\
+ '\t' + str(round(df_total_must_run.loc[h,z],3)) + '\n')
f.write(';\n\n')
# zonal (daily)
f.write('param:' + '\t' + 'SimGasPrice' + '\t' + 'SimOilPrice:=' + '\n')
for z in zones:
for d in range(0,int(SimHours/24)):
f.write(z + '\t' + str(d+1) + '\t' + str(round(df_ng.loc[d,z], 3)) + '\t' + str(round(df_oil.loc[d,z], 3)) + '\n')
f.write(';\n\n')
#system wide (daily)
f.write('param:' + '\t' + 'SimNY_imports_CT' + '\t' + 'SimNY_imports_VT' + '\t' + 'SimNY_imports_WCMA' + '\t' + 'SimNB_imports_ME' + '\t' + 'SimHQ_imports_VT' + '\t' + 'SimCT_hydro' + '\t' + 'SimME_hydro' + '\t' + 'SimNH_hydro' + '\t' + 'SimNEMA_hydro' + '\t' + 'SimRI_hydro' + '\t' + 'SimVT_hydro' + '\t' + 'SimWCMA_hydro:=' + '\n')
for d in range(0,len(df_imports)):
f.write(str(d+1) + '\t' + str(round(df_imports.loc[d,'NY_imports_CT'],3)) + '\t' + str(round(df_imports.loc[d,'NY_imports_VT'],3)) + '\t' + str(round(df_imports.loc[d,'NY_imports_WCMA'],3)) + '\t' + str(round(df_imports.loc[d,'NB_imports_ME'],3)) + '\t' + str(round(df_imports.loc[d,'HQ_imports_VT'],3)) + '\t' + str(round(df_hydro.loc[d,'CT'],3)) + '\t' + str(round(df_hydro.loc[d,'ME'],3)) + '\t' + str(round(df_hydro.loc[d,'NH'],3)) + '\t' + str(round(df_hydro.loc[d,'NEMA'],3)) + '\t' + str(round(df_hydro.loc[d,'RI'],3)) + '\t' + str(round(df_hydro.loc[d,'VT'],3)) + '\t' + str(round(df_hydro.loc[d,'WCMA'],3)) + '\n')
f.write(';\n\n')
#system wide (hourly)
f.write('param:' + '\t' + 'SimCT_exports_NY' + '\t' + 'SimWCMA_exports_NY' + '\t' + 'SimVT_exports_NY' + '\t' + 'SimVT_exports_HQ' + '\t' + 'SimME_exports_NB' + '\t' + 'SimReserves:=' + '\n')
for h in range(0,len(df_load)):
f.write(str(h+1) + '\t' + str(round(df_exports.loc[h,'CT_exports_NY'],3)) + '\t' + str(round(df_exports.loc[h,'WCMA_exports_NY'],3)) + '\t' + str(round(df_exports.loc[h,'VT_exports_NY'],3)) + '\t' + str(round(df_exports.loc[h,'VT_exports_HQ'],3)) + '\t' + str(round(df_exports.loc[h,'ME_exports_NB'],3)) + '\t' + str(round(df_reserves.loc[h,'reserves'],3)) + '\n')
f.write(';\n\n')
return None
| 2.359375 | 2 |
tests/bootstrap3/test_section.py | angonyfox/djangocms-cascade | 0 | 12765967 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from bs4 import BeautifulSoup
from django.http import QueryDict
from cms.api import add_plugin
from cms.utils.plugins import build_plugin_tree
from cmsplugin_cascade.models import CascadeElement
from cmsplugin_cascade.bootstrap3.container import (BootstrapContainerPlugin, BootstrapRowPlugin,
BootstrapRowForm, BootstrapColumnPlugin, BS3_BREAKPOINT_KEYS)
from cmsplugin_cascade.generic.cms_plugins import HeadingPlugin
from tests.test_base import CascadeTestCase
class SectionPluginTest(CascadeTestCase):
def setUp(self):
super(SectionPluginTest, self).setUp()
# add a Bootstrap Container Plugin
container_model = add_plugin(self.placeholder, BootstrapContainerPlugin, 'en',
glossary={'breakpoints': BS3_BREAKPOINT_KEYS})
self.assertIsInstance(container_model, CascadeElement)
container_plugin = container_model.get_plugin_class_instance(self.admin_site)
self.assertIsInstance(container_plugin, BootstrapContainerPlugin)
ModelForm = container_plugin.get_form(self.request, container_model)
post_data = QueryDict('', mutable=True)
post_data.setlist('breakpoints', ['sm', 'md'])
form = ModelForm(post_data, None, instance=container_model)
soup = BeautifulSoup(form.as_p(), features='lxml')
input_element = soup.find(id="id_glossary_breakpoints_0")
self.assertDictContainsSubset({'type': 'checkbox', 'name': 'breakpoints', 'value': 'xs'},
input_element.attrs)
input_element = soup.find(id="id_glossary_breakpoints_2")
self.assertDictContainsSubset({'type': 'checkbox', 'name': 'breakpoints', 'value': 'md', 'checked': ''},
input_element.attrs)
input_element = soup.find(id="id_glossary_fluid")
self.assertDictContainsSubset({'type': 'checkbox', 'name': 'fluid'},
input_element.attrs)
container_plugin.save_model(self.request, container_model, form, False)
self.assertListEqual(container_model.glossary['breakpoints'], ['sm', 'md'])
self.assertTrue('fluid' in container_model.glossary)
self.assertEqual(str(container_model), 'for tablets, laptops')
# add a RowPlugin with 1 ColumnPlugin
row_model = add_plugin(self.placeholder, BootstrapRowPlugin, 'en', target=container_model)
row_plugin = row_model.get_plugin_class_instance()
row_change_form = BootstrapRowForm({'num_children': 1})
row_change_form.full_clean()
row_plugin.save_model(self.request, row_model, row_change_form, False)
self.assertDictEqual(row_model.glossary, {})
self.assertIsInstance(row_model, CascadeElement)
column_models = CascadeElement.objects.filter(parent_id=row_model.id)
self.assertEqual(column_models.count(), 1)
# work with the ColumnPlugin
self.column_model = column_models.first()
self.assertIsInstance(self.column_model, CascadeElement)
self.column_plugin = self.column_model.get_plugin_class_instance()
self.assertIsInstance(self.column_plugin, BootstrapColumnPlugin)
self.assertEqual(self.column_model.parent.id, row_model.id)
self.plugin_list = [container_model, row_model, self.column_model]
def test_section(self):
heading_model = add_plugin(self.placeholder, HeadingPlugin, 'en', target=self.column_model)
self.assertIsInstance(heading_model, CascadeElement)
heading_plugin = heading_model.get_plugin_class_instance(self.admin_site)
self.assertIsInstance(heading_plugin, HeadingPlugin)
ModelForm = heading_plugin.get_form(self.request, heading_model)
post_data = QueryDict('', mutable=True)
post_data.update(tag_type='h2', content="Hello", element_id='foo')
form = ModelForm(post_data, None, instance=heading_model)
html = form.as_p()
needle = '<input id="id_glossary_element_id" name="element_id" type="text" value="foo" />'
self.assertInHTML(needle, html)
self.assertTrue(form.is_valid())
heading_plugin.save_model(self.request, heading_model, form, False)
# check identifier
html = heading_plugin.get_identifier(heading_model)
expected = '<code>h2</code>: Hello <code>id="foo"</code>'
self.assertHTMLEqual(html, expected)
# render the Container Plugin with the Heading Plgin as a child
self.plugin_list.append(heading_model)
build_plugin_tree(self.plugin_list)
# context = get_request_context(self.request)
# html = heading_model.render_plugin(context)
html = self.get_html(heading_model, self.get_request_context())
expected = '<h2 id="foo">Hello</h2>'
self.assertHTMLEqual(html, expected)
# add another heading model with the same id
heading_model = add_plugin(self.placeholder, HeadingPlugin, 'en', target=self.column_model)
form = ModelForm(post_data, None, instance=heading_model)
self.assertFalse(form.is_valid())
expected = '<ul class="errorlist"><li>glossary<ul class="errorlist"><li>The element ID 'foo' is not unique for this page.</li></ul></li></ul>'
self.assertHTMLEqual(str(form.errors), expected)
| 1.921875 | 2 |
specter/__main__.py | breekristensen/Specter | 18 | 12765968 | from specter.runner import activate
activate()
| 1.015625 | 1 |
L1Trigger/L1THGCal/python/customTriggerCellSelect.py | Purva-Chaudhari/cmssw | 852 | 12765969 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
import SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi as digiparam
from L1Trigger.L1THGCal.hgcalConcentratorProducer_cfi import threshold_conc_proc, best_conc_proc, supertc_conc_proc, coarsetc_onebitfraction_proc, coarsetc_equalshare_proc, bestchoice_ndata_decentralized, custom_conc_proc, autoEncoder_conc_proc
def custom_triggercellselect_supertriggercell(process,
stcSize=supertc_conc_proc.stcSize,
type_energy_division=supertc_conc_proc.type_energy_division,
fixedDataSizePerHGCROC=supertc_conc_proc.fixedDataSizePerHGCROC
):
parameters = supertc_conc_proc.clone(stcSize = stcSize,
type_energy_division = type_energy_division,
fixedDataSizePerHGCROC = fixedDataSizePerHGCROC
)
process.hgcalConcentratorProducer.ProcessorParameters = parameters
return process
def custom_triggercellselect_threshold(process,
threshold_silicon=threshold_conc_proc.threshold_silicon, # in mipT
threshold_scintillator=threshold_conc_proc.threshold_scintillator, # in mipT
coarsenTriggerCells=threshold_conc_proc.coarsenTriggerCells
):
parameters = threshold_conc_proc.clone(
threshold_silicon = threshold_silicon,
threshold_scintillator = threshold_scintillator,
coarsenTriggerCells = coarsenTriggerCells
)
process.hgcalConcentratorProducer.ProcessorParameters = parameters
return process
def custom_triggercellselect_bestchoice(process,
triggercells=best_conc_proc.NData
):
parameters = best_conc_proc.clone(NData = triggercells)
process.hgcalConcentratorProducer.ProcessorParameters = parameters
return process
def custom_triggercellselect_bestchoice_decentralized(process):
return custom_triggercellselect_bestchoice(process, triggercells=bestchoice_ndata_decentralized)
def custom_coarsetc_onebitfraction(process,
stcSize=coarsetc_onebitfraction_proc.stcSize,
fixedDataSizePerHGCROC=coarsetc_onebitfraction_proc.fixedDataSizePerHGCROC,
oneBitFractionThreshold = coarsetc_onebitfraction_proc.oneBitFractionThreshold,
oneBitFractionLowValue = coarsetc_onebitfraction_proc.oneBitFractionLowValue,
oneBitFractionHighValue = coarsetc_onebitfraction_proc.oneBitFractionHighValue,
):
parameters = coarsetc_onebitfraction_proc.clone(
stcSize = stcSize,
fixedDataSizePerHGCROC = fixedDataSizePerHGCROC,
oneBitFractionThreshold = oneBitFractionThreshold,
oneBitFractionLowValue = oneBitFractionLowValue,
oneBitFractionHighValue = oneBitFractionHighValue,
)
process.hgcalConcentratorProducer.ProcessorParameters = parameters
return process
def custom_coarsetc_equalshare(process,
stcSize=coarsetc_equalshare_proc.stcSize,
fixedDataSizePerHGCROC=coarsetc_equalshare_proc.fixedDataSizePerHGCROC,
):
parameters = coarsetc_equalshare_proc.clone(
stcSize = stcSize,
fixedDataSizePerHGCROC = fixedDataSizePerHGCROC,
)
process.hgcalConcentratorProducer.ProcessorParameters = parameters
return process
def custom_triggercellselect_mixedBestChoiceSuperTriggerCell(process,
stcSize=custom_conc_proc.stcSize,
type_energy_division=custom_conc_proc.type_energy_division,
fixedDataSizePerHGCROC=custom_conc_proc.fixedDataSizePerHGCROC,
triggercells=custom_conc_proc.NData
):
parameters = custom_conc_proc.clone(stcSize = stcSize,
type_energy_division = type_energy_division,
fixedDataSizePerHGCROC = fixedDataSizePerHGCROC,
NData=triggercells,
Method = cms.vstring('bestChoiceSelect','superTriggerCellSelect','superTriggerCellSelect'),
)
process.hgcalConcentratorProducer.ProcessorParameters = parameters
return process
def custom_triggercellselect_mixedBestChoiceSuperTriggerCell_decentralized(process):
return custom_triggercellselect_mixedBestChoiceSuperTriggerCell(process, triggercells=bestchoice_ndata_decentralized)
def custom_triggercellselect_autoencoder(process,
cellRemap = autoEncoder_conc_proc.cellRemap,
nBitsPerInput = autoEncoder_conc_proc.nBitsPerInput,
maxBitsPerOutput = autoEncoder_conc_proc.maxBitsPerOutput,
bitsPerLink = autoEncoder_conc_proc.bitsPerLink,
modelFiles = autoEncoder_conc_proc.modelFiles,
linkToGraphMap = autoEncoder_conc_proc.linkToGraphMap,
zeroSuppresionThreshold = autoEncoder_conc_proc.zeroSuppresionThreshold,
saveEncodedValues = autoEncoder_conc_proc.saveEncodedValues,
preserveModuleSum = autoEncoder_conc_proc.preserveModuleSum,
scintillatorMethod = 'thresholdSelect',
):
parameters = autoEncoder_conc_proc.clone(
cellRemap = cellRemap,
nBitsPerInput = nBitsPerInput,
maxBitsPerOutput = maxBitsPerOutput,
bitsPerLink = bitsPerLink,
modelFiles = modelFiles,
linkToGraphMap = linkToGraphMap,
zeroSuppresionThreshold = zeroSuppresionThreshold,
saveEncodedValues = saveEncodedValues,
preserveModuleSum = preserveModuleSum,
Method = cms.vstring(['autoEncoder','autoEncoder', scintillatorMethod]),
)
process.hgcalConcentratorProducer.ProcessorParameters = parameters
return process
| 1.625 | 2 |
deepchem/feat/molecule_featurizers/maccs_keys_fingerprint.py | deloragaskins/deepchem | 3,782 | 12765970 | <gh_stars>1000+
import numpy as np
from deepchem.utils.typing import RDKitMol
from deepchem.feat.base_classes import MolecularFeaturizer
class MACCSKeysFingerprint(MolecularFeaturizer):
"""MACCS Keys Fingerprint.
The MACCS (Molecular ACCess System) keys are one of the most commonly used structural keys.
Please confirm the details in [1]_, [2]_.
Examples
--------
>>> import deepchem as dc
>>> smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O'
>>> featurizer = dc.feat.MACCSKeysFingerprint()
>>> features = featurizer.featurize([smiles])
>>> type(features[0])
<class 'numpy.ndarray'>
>>> features[0].shape
(167,)
References
----------
.. [1] <NAME>., et al. "Reoptimization of MDL keys for use in drug discovery."
Journal of chemical information and computer sciences 42.6 (2002): 1273-1280.
.. [2] https://github.com/rdkit/rdkit/blob/master/rdkit/Chem/MACCSkeys.py
Note
----
This class requires RDKit to be installed.
"""
def __init__(self):
"""Initialize this featurizer."""
self.calculator = None
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""
Calculate MACCS keys fingerprint.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
1D array of RDKit descriptors for `mol`. The length is 167.
"""
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
if self.calculator is None:
try:
from rdkit.Chem.AllChem import GetMACCSKeysFingerprint
self.calculator = GetMACCSKeysFingerprint
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
return self.calculator(datapoint)
| 2.6875 | 3 |
fastai_xla_extensions/core.py | hixio-mh/fastai_xla_extensions | 33 | 12765971 | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/00_core.ipynb (unless otherwise specified).
__all__ = ['XLAOptimProxy', 'DeviceMoverTransform', 'isAffineCoordTfm', 'isDeviceMoverTransform', 'has_affinecoord_tfm',
'has_devicemover_tfm', 'get_last_affinecoord_tfm_idx', 'insert_batch_tfm', 'XLAOptCallback']
# Internal Cell
from .utils import xla_imported
# Internal Cell
try:
import torch_xla
except ImportError:
pass
# Internal Cell
if xla_imported():
import torch_xla.core.xla_model as xm
from fastcore.foundation import GetAttr, patch
from fastcore.transform import Transform,DisplayedTransform
from fastcore.basics import store_attr
from torch import Tensor
import torch
from fastai.vision.augment import AffineCoordTfm, RandomResizedCropGPU
from fastai.data.core import DataLoaders
from fastai.data.load import DataLoader
from fastai.learner import Learner
from fastai.callback.core import Callback, TrainEvalCallback
from fastai.learner import Recorder
# Cell
class XLAOptimProxy(GetAttr):
"Proxy optimizer to override `opt.step` with Pytorch XLA sync method `xm.optimizer_step` "
_default='opt'
def __init__(self,opt, barrier):
self.opt = opt
self._barrier = barrier
def step(self):
xm.optimizer_step(self.opt,barrier=self._barrier)
@property
def barrier(self): return self._barrier
@barrier.setter
def barrier(self,v): self._barrier = v
# Cell
class DeviceMoverTransform(DisplayedTransform):
"Transform to move input to new device and reverse to cpu"
def __init__(self, device_to, device_from=torch.device('cpu')):
store_attr('device_to,device_from')
def encodes(self, o:Tensor):
return o.to(self.device_to)
def decodes(self, o:Tensor):
return o.to(self.device_from)
# Cell
def isAffineCoordTfm(o:Transform):
"check whether the transform is either an AffineCoordTfm or RandomResizedCropGPU"
return isinstance(o,(AffineCoordTfm,RandomResizedCropGPU))
def isDeviceMoverTransform(o:Transform):
"check whether the transform is a DeviceMoverTransform"
return isinstance(o,DeviceMoverTransform)
def has_affinecoord_tfm(dls: DataLoaders) -> bool:
"returns true if train dataloader has an AffineCoordTfm in the batch_tfms"
if not hasattr(dls.train,'after_batch'): return False
if not hasattr(dls.train.after_batch,'fs'): return False
idxs = dls.train.after_batch.fs.argwhere(isAffineCoordTfm)
return len(idxs) > 0
def has_devicemover_tfm(dl: DataLoader) -> bool:
"returns true if train dataloader has a DeviceMoverTransform in the batch_tfms"
if not hasattr(dl,'after_batch'): return False
if not hasattr(dl.after_batch,'fs'): return False
idxs = dl.after_batch.fs.argwhere(isDeviceMoverTransform)
return len(idxs) > 0
def get_last_affinecoord_tfm_idx(dl:DataLoader)-> int: # -1 if none
"returns index of last AffineCoordTfm if it exists, otherwise returns -1"
idxs = dl.after_batch.fs.argwhere(isAffineCoordTfm)
return -1 if len(idxs) == 0 else idxs[-1]
# Cell
def insert_batch_tfm(dl:DataLoader, batch_tfm:Transform, idx:int):
"adds a batch_tfm in the batch_tfms for the dataloader at idx location"
dl.after_batch.fs.insert(idx, batch_tfm)
# Cell
@patch
def setup_input_device_mover(self: Learner, new_device):
"setup batch_tfms to use cpu if dataloader batch_tfms has AffineCoordTfms"
if not has_affinecoord_tfm(self.dls):
self.dls.device = new_device
return
self.dls.device = None
if has_devicemover_tfm(self.dls.train):
return # skip adding device mover if already added
dm_tfm = DeviceMoverTransform(new_device)
for dl in self.dls.loaders:
if not has_devicemover_tfm(dl):
idx = get_last_affinecoord_tfm_idx(dl)
if idx != -1:
insert_batch_tfm(dl, dm_tfm, idx+1)
# Cell
class XLAOptCallback(Callback):
'Callback to replace `opt.step` with `xm.optimizer_step(opt)` as required to run on TPU'
run_after,run_before = TrainEvalCallback,Recorder
def __init__(self, barrier=True):
self._barrier = barrier
def before_fit(self):
'replace opt with proxy which calls `xm.optimizer_step` instead of `opt.step` and set `dls.device` and model to `xla_device`'
# set dls device to none so prevent trigger of moving to batch input to XLA device
# as this move will be done by the DeviceMoverTransform which has been added to the dls after_batch tfms
if has_affinecoord_tfm(self.dls):
self.dls.device = None
if self.learn.opt is not None:
if not isinstance(self.learn.opt,XLAOptimProxy):
# force opt to reinitialize its parameters and make sure its parameters
opt = self.learn.opt
self.learn.opt = XLAOptimProxy(opt, barrier=self._barrier)
def after_fit(self):
'restore original opt '
if isinstance(self.learn.opt, XLAOptimProxy):
opt = self.learn.opt.opt
self.learn.opt = opt
@property
def barrier(self): return self._barrier
@barrier.setter
def barrier(self,v): self._barrier = v
# Cell
@patch
def to_xla(self:Learner, new_device=None):
"Setup learner for single tpu core training"
self.add_cb(XLAOptCallback())
if new_device is None:
new_device = xm.xla_device()
self.model.to(new_device)
self.setup_input_device_mover(new_device)
self.opt = None
return self
# Cell
@patch
def detach_xla(self:Learner):
"reset TPU single core setup and move model and dls back to cpu "
self.remove_cb(XLAOptCallback)
self.dls.device = torch.device('cpu')
self.model = self.model.to(self.dls.device)
self.opt = None
return self | 1.914063 | 2 |
twit_app/__init__.py | RMDircio/Twit_vs_Twit | 0 | 12765972 | <gh_stars>0
from flask import Flask
from twit_app.models import db, migrate
from twit_app.routes.user_routes import user_routes
from twit_app.routes.tweet_routes import tweet_routes
from twit_app.routes.Twitter_routes import Twitter_routes
from twit_app.routes.prediction_routes import prediction_routes
DATABASE_URI = "sqlite:///twit_vs_twit_development.db" # using relative filepath
#DATABASE_URI = "sqlite:////Users/Username/Desktop/your-repo-name/web_app_99.db" # using absolute filepath on Mac (recommended)
#DATABASE_URI = "sqlite:///C:\\Users\\Username\\Desktop\\your-repo-name\\web_app_99.db" # using absolute filepath on Windows (recommended) h/t: https://stackoverflow.com/a/19262231/670433
SECRET_KEY = " temp value"
def create_app():
app = Flask(__name__, template_folder='twit_templates')
app.config["SECRET_KEY"] = SECRET_KEY
app.config["SQLALCHEMY_DATABASE_URI"] = DATABASE_URI
db.init_app(app)
migrate.init_app(app, db)
app.register_blueprint(user_routes)
app.register_blueprint(tweet_routes)
app.register_blueprint(Twitter_routes)
app.register_blueprint(prediction_routes)
return app
if __name__ == "__main__":
my_app = create_app()
my_app.run(debug=True) | 2.296875 | 2 |
applicationinsights/channel/contracts/__init__.py | allieus/ApplicationInsights-Python | 1 | 12765973 | <reponame>allieus/ApplicationInsights-Python<filename>applicationinsights/channel/contracts/__init__.py
from .Data import Data
from .Envelope import Envelope
from .DependencyKind import DependencyKind
from .SeverityLevel import SeverityLevel
from .DataPoint import DataPoint
from .MetricData import MetricData
from .RemoteDependencyData import RemoteDependencyData
from .RequestData import RequestData
from .StackFrame import StackFrame
from .ExceptionDetails import ExceptionDetails
from .ExceptionData import ExceptionData
from .MessageData import MessageData
from .EventData import EventData
from .PageViewData import PageViewData
from .DataPointType import DataPointType
from .DependencySourceType import DependencySourceType
from .Application import Application
from .Device import Device
from .Location import Location
from .Operation import Operation
from .Session import Session
from .User import User
from .Internal import Internal
| 1.179688 | 1 |
sars_dashboard/dashboards/views.py | thomasbtf/sars-dashboard | 0 | 12765974 | import pandas as pd
import plotly.express as px
from django.views.generic.base import TemplateView
from plotly.offline import plot
from sars_dashboard.calls.models import PangolinCall
from sars_dashboard.projects.models import Project
from sars_dashboard.samples.models import Sample
from sars_dashboard.voc_definitions import VOCS
class SarsDashboardView(TemplateView):
template_name = "dashboards/sars-cov-2-dashboard.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
no_data = "No data."
first_project = Project.objects.first()
if first_project is None:
context["project_name"] = "No project found"
else:
context["project_name"] = first_project.title
samples_of_project = Sample.objects.filter(project=first_project)
if not samples_of_project.exists():
context["processed_samples"] = "-"
else:
context["processed_samples"] = samples_of_project.count()
calls_of_project = PangolinCall.objects.filter(sample__in=samples_of_project)
if not calls_of_project.exists():
context["over_time_plot"] = no_data
context["table"] = no_data
context["unique_calls"] = "-"
else:
calls_all_data = self.extract_and_mask_lineages_per_day(calls_of_project)
context["over_time_plot"] = self.plot_lineages_over_time(calls_all_data)
context["table"] = self.get_table_of_lineages(calls_all_data)
context["unique_calls"] = calls_of_project.count()
latest_run_date = samples_of_project.order_by("-date").first()
if latest_run_date is None:
context["latest_run_plot"] = no_data
context["last_update"] = "-"
else:
samples_of_latest_run = samples_of_project.filter(date=latest_run_date.date)
calls_of_last_run = PangolinCall.objects.filter(
sample__in=samples_of_latest_run
)
if not calls_of_last_run.exists():
context["latest_run_plot"] = no_data
context["last_update"] = "-"
else:
lineages_of_last_run = self.extract_and_mask_lineages_per_day(
calls_of_last_run
)
context["latest_run_plot"] = self.plot_lineages_of_last_run(
lineages_of_last_run
)
context["last_update"] = latest_run_date.date
return context
def get_table_of_lineages(self, table):
"""Creates a table of the lineages in the last run"""
table["Week"] = pd.to_datetime(table["Date"]).dt.strftime("%W")
table["Year"] = pd.to_datetime(table["Date"]).dt.strftime("%Y")
table = table.groupby(["Year", "Week", "Lineage"]).sum().reset_index()
if len(table) == 0:
return pd.DataFrame(columns=["Year", "Week", "Lineage", "# of Lineages"])
table = (
table.pivot(
index=["Year", "Week"], columns="Lineage", values="# of Lineages"
)
.fillna(0.0)
.reset_index()
)
table = table.astype(int)
table = table.to_html(
classes=(
'table table-bordered dataTable" id="dataTable" width="100%" '
'cellspacing="0" role="grid" style="width: 100%;'
),
index=False,
index_names=False,
justify="center",
border=0,
)
return table
def plot_lineages_of_last_run(self, data):
"""Created a plot of the lineages of the last run as a doughnut chart"""
fig = px.pie(
data,
values="# of Lineages",
names="Lineage",
hover_data=["Lineage"],
hole=0.8,
)
fig.update_traces(textposition="inside", textinfo="percent+label")
fig.update_layout(
legend=dict(
orientation="h",
)
)
fig.update_layout(
margin=dict(l=0, r=0, t=0, b=0),
)
plot_div = plot(fig, output_type="div", include_plotlyjs=False)
return plot_div
def plot_lineages_over_time(self, data):
"""Created a plot of the lineages over time as a bar chart"""
fig = px.bar(data, x="Date", y="# of Lineages", color="Lineage")
fig.update_layout(
margin=dict(l=0, r=0, t=0, b=0),
)
fig.update_layout(
legend=dict(orientation="h", yanchor="bottom", xanchor="left", y=-0.85, x=0)
)
fig.update_xaxes(
rangeslider_visible=True,
)
plot_div = plot(fig, output_type="div", include_plotlyjs=False)
return plot_div
def extract_and_mask_lineages_per_day(self, calls):
"""Extracts the lineages per day from given calls and masks the lineages that are not in the list of VOCs"""
all_lineages = []
for call in calls:
all_lineages.append(
{
"Called Lineage": call.lineage,
"Date": call.sample.date,
}
)
all_lineages = pd.DataFrame(all_lineages)
if len(all_lineages) == 0:
return pd.DataFrame(columns=["Date", "Lineage", "# of Lineages"])
for masked_lineage, lineage in VOCS.items():
all_lineages.loc[
all_lineages["Called Lineage"].str.contains(lineage), "Lineage"
] = masked_lineage
all_lineages["Lineage"] = all_lineages["Lineage"].fillna("Other")
all_lineages = (
all_lineages.groupby(by=["Date", "Lineage"])
.size()
.reset_index(name="# of Lineages")
)
return all_lineages
| 2.1875 | 2 |
dispatcher.py | dontcrenie/SNCF-Flask-webapp | 1 | 12765975 | <filename>dispatcher.py
from flask import Flask
from restservice import app as restserv
from train import app as mainapp
from handler import app as soapserv
from werkzeug.middleware.dispatcher import DispatcherMiddleware
app = Flask(__name__)
app.wsgi_app = DispatcherMiddleware(mainapp, {
'/restservice': restserv,
'/soapservice':soapserv
})
if __name__ == "__main__":
app.run()
| 1.875 | 2 |
scripts/config.py | Debraj2399/Zebra | 4 | 12765976 | import os
import glob
import sys
import error_handle as eh
###########################################################################################################################
def initialize():
###########################################################################################################################
global CIRCUIT_FILE
global CODE_FILE
global CODE_DATA
global NODE_DATA
global NODE_COUNT
global DEFAULT_NODE_VOLTAGE
global DUPLICATE_FLAG
global MAXIMUM_SIMULATION_COUNT
global INPUT_VARIABLES_COUNT
global INPUT_VARIABLES_VALUE
global INPUT_VARIABLES
global OUTPUT_NODE_INDEX
global subckt_node_count
global MAINCKT_SETUP_NODE
global PRINT_NODE
global PRINTF_NODE
global PLOT_NODE
global FIX_VOLTAGE_NODE
global SCAN_NODE
global INPUT_NODE
global OUTPUT_NODE
global TIME_ANALYSIS_SETUP_NODE
global MAINCKT_DATA
global CLOCK_DATA
global SETUP_DATA
global screen
global initial_x
global initial_y
global y_div
global TRUTH_TABLE_OUTPUT_DATA_FILE
global TRUTH_TABLE_INPUT_DATA_FILE
global MAINCKT_SETUP_DATA
global MAINCKT_SETUP_NODE_INDEX
global FIX_VOLTAGE_INDEX
global PRINT_NODE_INDEX
global PRINTF_NODE_INDEX
global PLOT_NODE_INDEX
global SCAN_NODE_INDEX
global INPUT_NODE_INDEX
global OUTPUT_NODE_INDEX
global TRISTATE_BUFFER_ENABLE
global TRISTATE_BUFFER_OUTPUT
global tristate_buffer_list
global ANALYSIS
global TOTAL_SIMULATION_TIME
global TIME_ANALYSIS_DATA_FILENAME
global TIME_ANALYSIS_DATA_FILE
global TIME_ANALYSIS_SETUP_NODE_INDEX
global TIME_ANALYSIS_SETUP_COUNT
global TIME_ANALYSIS_DATA_ARRAY
global PLOT_DATA
global PLOT_ALLOW_FLAG
global TURTLES
global PREPATH
global BOARD_NAME
global BOARD_INPUT
global BOARD_OUTPUT
global BOARD_INFO
global PORT_NAME
global arduinoSerialData
global CONNECT_OUT_FLAG
global board_input_data
global circuit_simulation_flag
global maincircuit_setup_simulation_flag
global serial_communication_flag
global clocking_flag
global serial_sync_time
global serial_sync_time0
global current_time
global last_time
global start_time
global ddr_DATA
global DDR_DATA
global connect_out_tb
global screen_rt
global turtle_write
global temp_print_node
global temp_height
global CONNECT_OUT_NODE_OUT
global CONNECT_OUT_NODE_IN
global TRUTH_TABLE_INPUT_DATA_FILENAME
global TRUTH_TABLE_OUTPUT_DATA_FILENAME
global tt_time_flag
global serial_sync_time1
global mcktstp_time_flag
global MAINCKT_PRINT_ARRAY
global scan_sampling_time
global MAINCKT_SETUP_FILENAME
global print_plot_result1_flag
global print_plot_result2_flag
global ANALYSIS_DATA
global ANALYSIS_DATA_FILENAME
global end_time
global tt_time_print_flag
global tt_time_plot_flag
###########################################################################################################################
PREPATH=os.path.realpath(os.path.join(os.path.dirname(__file__),'..'))
if len(sys.argv)==2:
CIRCUIT_FILE=sys.argv[1]
else:
eh.display_error(0,0,-4,0)
try:
CODE_FILE=open(CIRCUIT_FILE,"r")
except:
eh.display_error(0,0,-3,CIRCUIT_FILE)
CODE_DATA=CODE_FILE.readlines()
###########################################################################################################################
TURTLES=[]
ANALYSIS=None
TRUTH_TABLE_OUTPUT_DATA_FILE=None
TRUTH_TABLE_INPUT_DATA_FILE=None
MAINCKT_SETUP_FILENAME=None
TOTAL_SIMULATION_TIME=None
TIME_ANALYSIS_DATA_FILE=None
FIX_VOLTAGE_INDEX=[]
PRINT_NODE_INDEX=[]
PRINTF_NODE_INDEX=[]
PLOT_NODE_INDEX=[]
SCAN_NODE_INDEX=[]
INPUT_NODE_INDEX=[]
OUTPUT_NODE_INDEX=[]
TIME_ANALYSIS_SETUP_NODE_INDEX=[]
TRISTATE_BUFFER_OUTPUT=[]
TRISTATE_BUFFER_ENABLE=[]
TIME_ANALYSIS_DATA_ARRAY=[]
PLOT_DATA=[]
SETUP_DATA=[]
MAINCKT_SETUP_DATA=None
INPUT_VARIABLES_VALUE=[]
connect_out_tb=[]
tristate_buffer_list=[]
CLOCK_DATA=None
TIME_ANALYSIS_SETUP_NODE=[]
MAINCKT_SETUP_NODE=[]
MAINCKT_SETUP_NODE_INDEX=[]
PRINT_NODE=[]
PRINTF_NODE=[]
PLOT_NODE=[]
SCAN_NODE=[]
INPUT_NODE=[]
OUTPUT_NODE=[]
MAINCKT_DATA=[]
FIX_VOLTAGE_NODE=[]
NODE_DATA=[]
NODE_COUNT=0
DEFAULT_NODE_VOLTAGE=0
DUPLICATE_FLAG=0
MAXIMUM_SIMULATION_COUNT=1
TIME_ANALYSIS_SETUP_COUNT=0
INPUT_VARIABLES=[]
BOARD_INPUT=[]
BOARD_OUTPUT=[]
BOARD_INFO=None
arduinoSerialData=None
board_input_data="00000000"
circuit_simulation_flag=True
maincircuit_setup_simulation_flag=True
serial_communication_flag=True
clocking_flag=True
serial_sync_time=0.03
ddr_DATA=0
DDR_DATA=0
subckt_node_count=-1
CONNECT_OUT_NODE_OUT=[]
CONNECT_OUT_NODE_IN=[]
TRUTH_TABLE_INPUT_DATA_FILENAME=None
TRUTH_TABLE_OUTPUT_DATA_FILENAME=None
TIME_ANALYSIS_DATA_FILENAME=None
tt_time_flag=0
mcktstp_time_flag=0
MAINCKT_PRINT_ARRAY=[]
scan_sampling_time=0.01
print_plot_result1_flag=True
print_plot_result2_flag=True
ANALYSIS_DATA=None
ANALYSIS_DATA_FILENAME=None
end_time=0
tt_time_print_flag=True
tt_time_plot_flag=True
###########################################################################################################################
files=glob.glob(PREPATH+"/SUBCIRCUITS_USER_DEFINED/*")
for f in files:
os.remove(f)
###########################################################################################################################
def is_number(n):
try:
int(n)
return True
except ValueError:
return False
###########################################################################################################################
| 1.609375 | 2 |
slackapptk/request/view.py | minitriga/slackapptk | 2 | 12765977 | from slackapptk.request.any import AnyRequest
from slackapptk.web.classes.view import View
__all__ = [
'AnyRequest',
'ViewRequest',
'View'
]
class ViewRequest(AnyRequest):
def __init__(
self,
app,
payload
):
super().__init__(
app=app,
rqst_type=payload['type'],
rqst_data=payload,
user_id=payload['user']['id']
)
self.view = View.from_view(view=payload['view'])
| 2.140625 | 2 |
tests/test_speed.py | TobiasKonradsen/Chessure | 1 | 12765978 | <reponame>TobiasKonradsen/Chessure<filename>tests/test_speed.py
# import unittest
# import time
# from game_logic.moves import Moves as SlowMoves, SlowMoves as Moves
# n = 100_000
# class TestSpeed(unittest.TestCase):
# def createMoves(self):
# j = 0
# for i in range(n):
# m = Moves([1]*64)
# j += 1
# def createSlowMoves(self):
# j = 0
# for i in range(n):
# m = SlowMoves([1]*64)
# j += 1
# def indexMoves(self):
# m = Moves([1]*n)
# for i in range(n):
# m[i]
# def indexSlowMoves(self):
# m = SlowMoves([1]*n)
# for i in range(n):
# m[i]
# def test_creation(self):
# start_time = time.time()
# self.createMoves()
# print(time.time() - start_time)
# start_time = time.time()
# self.indexMoves()
# print(time.time() - start_time)
# start_time = time.time()
# self.createSlowMoves()
# print(time.time() - start_time)
# start_time = time.time()
# self.indexSlowMoves()
# print(time.time() - start_time)
# if __name__ == "__main__":
# unittest.main()
| 3.578125 | 4 |
indexers/vector/NmsLibIndexer/__init__.py | Gracegrx/jina-hub | 0 | 12765979 | <reponame>Gracegrx/jina-hub
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Tuple, Optional, Dict, Any
import numpy as np
from jina.executors.indexers.vector import BaseNumpyIndexer
from jina.executors.decorators import batching
class NmsLibIndexer(BaseNumpyIndexer):
batch_size = 512
"""nmslib powered vector indexer
For documentation and explanation of each parameter, please refer to
- https://nmslib.github.io/nmslib/quickstart.html
- https://github.com/nmslib/nmslib/blob/master/manual/methods.md
.. note::
Nmslib package dependency is only required at the query time.
"""
def __init__(self,
space: str = 'cosinesimil',
method: str = 'hnsw',
index_params: Optional[Dict[str, Any]] = {'post': 2},
print_progress: bool = False,
num_threads: int = 1,
*args, **kwargs):
"""
Initialize an NmslibIndexer
:param space: The metric space to create for this index
:param method: The index method to use
:param index_params: Dictionary of optional parameters to use in indexing
:param num_threads: The number of threads to use
:param print_progress: Whether or not to display progress bar when creating index
:param args:
:param kwargs:
"""
super().__init__(*args, compress_level=0, **kwargs)
self.space = space
self.method = method
self.index_params = index_params
self.print_progress = print_progress
self.num_threads = num_threads
def build_advanced_index(self, vecs: 'np.ndarray'):
"""Build an advanced index structure from a numpy array.
:param vecs: numpy array containing the vectors to index
"""
import nmslib
_index = nmslib.init(method=self.method, space=self.space)
self._build_partial_index(vecs, slice(0, len(vecs)), _index)
_index.createIndex(index_params=self.index_params, print_progress=self.print_progress)
return _index
@batching(ordinal_idx_arg=2)
def _build_partial_index(self, vecs: 'np.ndarray', ord_idx: 'slice', _index):
_index.addDataPointBatch(vecs.astype(np.float32), range(ord_idx.start, ord_idx.stop))
def query(self, keys: 'np.ndarray', top_k: int, *args, **kwargs) -> Tuple['np.ndarray', 'np.ndarray']:
"""Find the top-k vectors with smallest ``metric`` and return their ids in ascending order.
:param keys: numpy array containing vectors to search for
:param top_k: upper limit of responses for each search vector
"""
ret = self.query_handler.knnQueryBatch(keys, k=top_k, num_threads=self.num_threads)
idx, dist = zip(*ret)
return self._int2ext_id[np.array(idx)], np.array(dist)
| 2.4375 | 2 |
deps_report/utils/output/common.py | MeilleursAgents/deps-report | 0 | 12765980 | from packaging import version as version_parser
from deps_report.models import Dependency
from deps_report.models.results import VersionResult
def get_display_output_for_dependency(dependency: Dependency) -> str:
"""Get display name for dependency with some details (transitive, dev-only...)."""
properties = []
if dependency.for_dev:
properties.append("dev")
if dependency.transitive:
properties.append("transitive")
if len(properties) == 0:
return dependency.name
return f"{dependency.name} ({','.join(properties)})"
def get_number_of_dependencies_with_outdated_major(results: list[VersionResult]) -> int:
"""Get the number of dependencies with outdated major versions."""
count = 0
for result in results:
latest_version = version_parser.parse(result.latest_version)
installed_version = version_parser.parse(result.installed_version)
if isinstance(latest_version, version_parser.LegacyVersion) or isinstance(
installed_version, version_parser.LegacyVersion
):
continue
if latest_version.major > installed_version.major:
count += 1
return count
| 2.375 | 2 |
modules/kku/choicing_relevant_information_in_articles.py | f4ll1nn/online-lawyer | 0 | 12765981 | """ Module of realisation choice relevant information in articles """
import langdetect
import openpyxl
import pandas as pd
import modules.pytextrank.pytextrank.pytextrank as pyt
from nltk.corpus import wordnet
from modules.kku.trans.mtranslate.mtranslate import translate
class Article:
""" Class of articles """
def __init__(self, number: int, name: str, punkt1=None, punkt2=None,
punkt3=None, punkt4=None, punkt5=None, punkt6=None,
punkt7=None, punkt8=None) -> None:
""" Creates article """
self.number = number
self.name = name
self.punkt1 = punkt1
self.punkt2 = punkt2
self.punkt3 = punkt3
self.punkt4 = punkt4
self.punkt5 = punkt5
self.punkt6 = punkt6
self.punkt7 = punkt7
self.punkt8 = punkt8
self.translated_name = None
self.translated_punkt1 = None
self.translated_punkt2 = None
self.translated_punkt3 = None
self.translated_punkt4 = None
self.translated_punkt5 = None
self.translated_punkt6 = None
self.translated_punkt7 = None
self.translated_punkt8 = None
self.relevant_words = []
def transalte_name(self) -> None:
""" Translates name into english in order to use later Natural language
Google Api. This information will be in self.translated_name """
translated = translate(self.name, "en")
self.translated_name = translated
def translate_punkts(self) -> None:
""" Translates punkt into english in order to use later Natural
language Google Api."""
punkts = [self.punkt1, self.punkt2, self.punkt3, self.punkt4,
self.punkt5, self.punkt6, self.punkt7, self.punkt8]
for i in range(8):
try:
translated = translate(punkts[i], "en")
if i == 0:
self.translated_punkt1 = translated
if i == 1:
self.translated_punkt2 = translated
if i == 2:
self.translated_punkt3 = translated
if i == 3:
self.translated_punkt4 = translated
if i == 4:
self.translated_punkt5 = translated
if i == 5:
self.translated_punkt6 = translated
if i == 6:
self.translated_punkt7 = translated
if i == 7:
self.translated_punkt8 = translated
except TypeError:
pass
def relevant_information_in_name(self) -> list:
""" Choising relevant information in name of articles
This words will be in self.relevant_name """
text = self.name
sentence, keywords = pyt.top_keywords_sentences(text, phrase_limit=15,
sent_word_limit=150)
for i in self.translated_name.split():
self.relevant_words.append(i)
return self.relevant_words
def relevant_information_in_punkts(self) -> list:
""" Choising relevant indormation in each punkts of articles
This words will be in self.relevant_point(number)"""
for i in range(8):
if i == 0:
if self.translated_punkt1 is not None:
text = self.translated_punkt1
else:
text = ""
if i == 1:
if self.translated_punkt2 is not None:
text = self.translated_punkt2
else:
text = ""
if i == 2:
if self.translated_punkt3 is not None:
text = self.translated_punkt3
else:
text = ""
if i == 3:
if self.translated_punkt4 is not None:
text = self.translated_punkt4
else:
text = ""
if i == 4:
if self.translated_punkt5 is not None:
text = self.translated_punkt5
else:
text = ""
if i == 5:
if self.translated_punkt6 is not None:
text = self.translated_punkt6
else:
text = ""
if i == 6:
if self.translated_punkt7 is not None:
text = self.translated_punkt7
else:
text = ""
if i == 7:
if self.translated_punkt8 is not None:
text = self.translated_punkt8
else:
text = ""
sentence, keywords = pyt.top_keywords_sentences(text,
phrase_limit=5000,
sent_word_limit=5000)
if keywords.split() != []:
for k in keywords.split():
self.relevant_words.append(k)
return self.relevant_words
def find_synonyms(self, relevant_words_list) -> set:
""" Returns list of synonyms and this words, that can be relevant
to each article """
synonyms = []
for word in relevant_words_list:
try:
for syn in wordnet.synsets(word):
for k in syn.lemmas():
translated = translate(k.name(), "uk")
synonyms.append(translated)
except AttributeError:
pass
return set(self.synonyms_check(synonyms))
def synonyms_check(self, set_syn) -> list:
""" Check are all word in synonyms set ukrainian """
for word in set_syn:
try:
if langdetect.detect(word) == "uk":
pass
else:
set_syn.remove(word)
except langdetect.lang_detect_exception.LangDetectException:
set_syn.remove(word)
return set_syn
if __name__ == "__main__":
RESULT = pd.read_excel("articles_with_punkts.xlsx")
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', -1)
FINAL = [1, 2]
for i in RESULT.index:
try:
ARTICLE = RESULT.loc[[i]]
INDEX = int(ARTICLE["Рядки"].str.find("."))
NUMBER = int(ARTICLE["Рядки"].str[6:INDEX].str.strip())
NAME = ARTICLE["Рядки"].str[INDEX + 1:].str.strip().to_string()[3:].strip()
PUNKTS = []
for k in ARTICLE:
if ARTICLE[k].to_string().find("NaN") == -1:
if k != "Рядки":
PUNKTS.append(k)
ARTICLE_CL = Article(NUMBER, NAME)
try:
ARTICLE_CL.punkt1 = ARTICLE[PUNKTS[0]].to_string()[3:].strip()
ARTICLE_CL.punkt2 = ARTICLE[PUNKTS[1]].to_string()[3:].strip()
ARTICLE_CL.punkt3 = ARTICLE[PUNKTS[2]].to_string()[3:].strip()
ARTICLE_CL.punkt4 = ARTICLE[PUNKTS[3]].to_string()[3:].strip()
ARTICLE_CL.punkt5 = ARTICLE[PUNKTS[4]].to_string()[3:].strip()
ARTICLE_CL.punkt6 = ARTICLE[PUNKTS[5]].to_string()[3:].strip()
ARTICLE_CL.punkt7 = ARTICLE[PUNKTS[6]].to_string()[3:].strip()
ARTICLE_CL.punkt8 = ARTICLE[PUNKTS[7]].to_string()[3:].strip()
except IndexError:
pass
ARTICLE_CL.transalte_name()
ARTICLE_CL.translate_punkts()
ARTICLE_CL.relevant_information_in_name()
WORDS = ARTICLE_CL.relevant_information_in_punkts()
UKR_WORDS = ARTICLE_CL.find_synonyms(WORDS)
print(UKR_WORDS)
FINAL.append(UKR_WORDS)
except ValueError:
pass
workbook = openpyxl.load_workbook("articles_with_punkts.xlsx")
worksheet = workbook.active
worksheet["J"] = FINAL
workbook.save("articles_with_punkts.xlsx")
| 2.984375 | 3 |
src/babysteps/cal/fluke8845a.py | solohm/multimeter | 1 | 12765982 | #!/usr/bin/env python
import socket,time,serial,sys
def removeexp(s):
try:
return float(s)
except ValueError:
return s
class fluke8845a(object):
def __init__(self,host='fluke1',port=3490):
self.host = host
self.port = port
self.connect()
def connect(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.settimeout(2.0)
while True:
try:
self.socket.connect((self.host,self.port))
print "opened fluke8845a",self.host,self.port
self.clear()
self.remote = 1
self.command("*cls")
self.command("zero:auto 0")
self.command("syst:rem")
self.command("volt:filt on")
self.command("curr:filt on")
if self.host != 'fluke1': # old firmware on fluke1, probably error check this instead of using fluke1 hostname
self.command("filt:dig on")
self.command("volt:filt:dig on")
self.command("curr:filt:dig on")
break
except socket.error:
print "socket error. retrying..."
time.sleep(2)
def disconnect(self):
if self.socket:
self.socket.close()
def close(self):
self.disconnect()
def command(self,c):
self.socket.send(c+"\n")
def query(self,c):
while True:
try:
self.socket.send(c+'\n')
reply = ""
while True:
c = self.socket.recv(1)
#print c,ord(c)
if c != '\r':
reply += c
if c == '\n':
break
return removeexp(reply.strip())
except socket.error:
self.disconnect()
time.sleep(2)
self.connect()
def voltage():
def fget(self):
return self.query("meas:volt:dc? MAX")
return locals()
voltage = property(**voltage())
def current():
def fget(self):
return self.query("meas:curr:dc? 4")
return locals()
current = property(**current())
def remote():
def fset(self,value):
if value == 1:
self.command("syst:rem")
else:
self.command("syst:loc")
return locals()
remote = property(**remote())
def clear(self):
self.command("*cls")
def error():
def fget(self):
return self.query("syst:err?")
return locals()
error = property(**error())
if __name__=='__main__':
meter = fluke8845a()
print meter.voltage
for i in range(10):
print meter.voltage
print meter.error
print meter.current
meter.remote = 0
| 2.703125 | 3 |
main.py | datagym-ru/server-snitch | 2 | 12765983 | <reponame>datagym-ru/server-snitch
import subprocess
import pandas as pd
from flask import Flask
from werkzeug.contrib.cache import SimpleCache
CRT_FILE = '/etc/jupyterhub/jupyterhub.crt'
KEY_FILE = '/etc/jupyterhub/jupyterhub.key'
PORT = 12304
HOST = '0.0.0.0'
JUPYTER_HOST_API_USERS = 'http://127.0.0.1:8081/jupyterhub/hub/api/users'
JUPYTER_HOST_API_TEMPLATE = 'http://127.0.0.1:12301/user/{user}/api/sessions'
JUPYTER_API_TOKEN = '<PASSWORD>'
cache = SimpleCache()
app = Flask(__name__)
style = '''
<meta http-equiv="refresh" content="10" >
<style type="text/css">
.dataframe {
border: solid 1px #DDEEEE;
border-collapse: collapse;
border-spacing: 0;
font: normal 14px Arial, sans-serif;
}
.dataframe thead th {
background-color: #DDEFEF;
border: solid 1px #DDEEEE;
color: #336B6B;
padding: 4px;
text-align: left;
text-shadow: 1px 1px 1px #fff;
}
tr:nth-child(odd) {
background-color: #f8f8f8;
opacity: .999;
}
.dataframe tbody td {
position: relative;
border: solid 1px #DDEEEE;
color: #333;
padding: 4px;
text-shadow: 1px 1px 1px #fff;
text-align: right;
width: 150px;
}
.bg {
position: absolute;
left: 0;
top: 0;
bottom: 0;
background-color: #03c03c;
z-index: -1;
}
.by {
position: absolute;
left: 0;
top: 0;
bottom: 0;
background-color: #ffd700;
z-index: -1;
}
.br {
position: absolute;
left: 0;
top: 0;
bottom: 0;
background-color: #ff4040;
z-index: -1;
}
</style>
'''
def convert_mem(b):
if 100 > b:
return "{:.2f}".format(b)
elif 100*1024 > b > 100:
return "{:.2f} M".format(b/1024)
else:
return "{:.2f} G".format(b/1024/1024)
def get_flag(x):
if x > 0.5:
return 'r'
elif x > 0.2:
return 'y'
return 'g'
def calculate_table():
ps = subprocess.Popen(["ps", "-ax", "--no-headers", "-o", "rss,user"], stdout=subprocess.PIPE).stdout.read()
ram = pd.DataFrame(
[x.split() for x in ps.decode('utf-8').split('\n')],
columns=['mem', 'user']
)
ram['user'] = ram['user'].map(
lambda x: x if (x and (('.' in x) or ('_user' in x))) else '~other~'
)
ram['mem'] = ram['mem'].astype(float)
ram = ram.groupby('user').sum().reset_index().sort_values('mem', ascending=False)
sum_ram = sum(ram['mem'])
free_mem = 'rowname ' + subprocess.Popen(
["free", "-m"],
stdout=subprocess.PIPE
).stdout.read().strip().decode('utf-8').replace(':', '')
free_mem_df = pd.DataFrame(
[x.split() for x in free_mem.split('\n')]
)
free_mem_df.columns = free_mem_df.iloc[0]
free_mem_df = free_mem_df.iloc[1:].set_index('rowname')
ram['% mem (usage)'] = ram['mem'] / sum_ram
ram['% mem (all)'] = ram['mem'] / float(free_mem_df['total']['Mem']) / 1024
ram['mem'] = ram['mem'].map(convert_mem)
ram = ram.append(
{
'mem': "{:.2f} G".format(sum_ram / 1024 / 1024),
'% mem (all)': sum_ram / float(free_mem_df['total']['Mem']) / 1024
},
ignore_index=True
)
ram['flag'] = ram['% mem (all)'].map(get_flag)
ram.iloc[-1, -1] = get_flag(ram.iloc[-1, -2] - 0.4)
ram['% mem (usage)'] = ['{:.2f}'.format(x*100) if 0 <= x <= 1 else '' for x in ram['% mem (usage)']]
ram['% mem (all)'] = ['{:.2f}'.format(x*100) for x in ram['% mem (all)']]
ram['% mem (all)'] = [
(
'<div class="b{}" style="width: {:.2f}%"></div>'.format(y, float(x)) + x
) for x, y in zip(ram['% mem (all)'], ram['flag'])
]
return ram.drop('flag', axis=1).to_html(index=False, na_rep='').replace('<', '<').replace('>', '>')
def calculate_cpu():
ps = subprocess.Popen(["ps", "-ax", "--no-headers", "-o", "%cpu,user"], stdout=subprocess.PIPE).stdout.read()
cpu = pd.DataFrame(
[x.split() for x in ps.decode('utf-8').split('\n')],
columns=['cpu', 'user']
)
cpu['user'] = cpu['user'].map(
lambda x: x if (x and (('.' in x) or ('_user' in x))) else '~other~'
)
cpu['cpu'] = cpu['cpu'].astype(float) / 64.
cpu = cpu.groupby('user').sum().reset_index().sort_values('cpu', ascending=False)
sum_cpu = sum(cpu['cpu'])
cpu['cpu (usage)'] = cpu['cpu'] / sum_cpu
cpu['cpu'] = cpu['cpu'].map(lambda x: "{:.2f}".format(x))
cpu = cpu.append(
{
'cpu': "{:.2f}".format(sum_cpu)
},
ignore_index=True
)
return cpu.to_html(index=False, na_rep='').replace('<', '<').replace('>', '>')
def get_jupyterhub_table():
import requests
import subprocess
import re
from datetime import timedelta
import pandas as pd
import numpy as np
def get_jupyterhub_users():
token = JUPYTER_API_TOKEN
api_url = JUPYTER_HOST_API_USERS
r = requests.get(api_url, headers={'Authorization': 'token ' + token})
r.raise_for_status()
users = [j['name'] for j in r.json() if j['servers']]
return users
def get_kernels(users):
token = JUPYTER_API_TOKEN
rl = []
for user in users:
api_url = JUPYTER_HOST_API_TEMPLATE
try:
r = requests.get(
api_url.format(user=user),
headers={'Authorization': 'token ' + token},
timeout=(1, 1)
)
r.raise_for_status()
l = r.json()
for j in l:
rj = {}
for k in j:
if type(j[k]) == dict:
for kk in j[k]:
rj[k+'_'+kk] = j[k][kk]
else:
rj[k] = j[k]
rj['name'] = user
rl.append(rj)
except:
rl.append({'name': user})
df = pd.DataFrame(rl)
df = df[[
'kernel_connections', 'kernel_execution_state', 'kernel_id', 'kernel_last_activity',
'name', 'notebook_path', 'type'
]]
df.columns = ['connections', 'execution_state', 'kernel_id', 'last_activity', 'name', 'notebook_path', 'type']
return df
def get_ipykernel_launcher():
output = subprocess.Popen(
["ps", "-ax", "--no-headers", "-o", "pid,command"],
stdout=subprocess.PIPE).stdout.read()
result = [re.split('\s+', ' ' + x, 2)[1:]
for x in output.decode('utf-8').split('\n')
if x and 'ipykernel_launcher' in x]
result = pd.DataFrame(result, columns=['pid', 'kernel_id'])
result['kernel_id'] = result['kernel_id'].map(lambda x: re.findall('/kernel-(.*).json', x)[0])
return result
def get_pstree():
output = subprocess.Popen(
["ps", "-axf", "--no-headers", "-o", "pid,ppid,rss,%cpu,user,command"],
stdout=subprocess.PIPE).stdout.read()
return [re.split('\s+', ' ' + x, 6)[1:] for x in output.decode('utf-8').split('\n') if x]
def get_by_parent(parent, data):
result = [get_by_parent(x[0], data) for x in data if (x[1] == parent)]
return {parent: result} if len(result) > 0 else parent
def convert_mem(b):
b = int(b)
if 100 > b:
return "{:.2f}".format(b)
elif 100*1024 > b > 100:
return "{:.2f} M".format(b/1024)
else:
return "{:.2f} G".format(b/1024/1024)
board = pd.merge(
get_kernels(get_jupyterhub_users()),
get_ipykernel_launcher(),
on='kernel_id',
how='outer'
)
pstree = get_pstree()
dict_pstree = {x[0]: x[1:] for x in pstree}
board['tree_pid'] = board['pid'].map(lambda x: get_by_parent(x, pstree))
board['last_activity'] = pd.to_datetime(board['last_activity']).map(lambda x: x + timedelta(hours=3) if x else None)
board['ram'] = board['pid'].map(lambda x: convert_mem(dict_pstree[x][1]) if (type(x) != float) or (not np.isnan(x)) else None)
board['connections'] = board['connections'].fillna('*** потеряшка ***')
board['notebook_path'] = board['notebook_path'].fillna('*** потеряшка ***')
board = board.sort_values('name')
return board.to_html(index=False, na_rep='').replace('<', '<').replace('>', '>')
@app.route('/')
def how():
table_usage_memory = cache.get('usage_memory')
if table_usage_memory is None:
table_usage_memory = calculate_table()
cache.set('usage_memory', table_usage_memory, timeout=20)
table_usage_cpu = cache.get('usage_cpu')
if table_usage_cpu is None:
table_usage_cpu = calculate_cpu()
cache.set('usage_cpu', table_usage_cpu, timeout=20)
table_usage_jupyterhub = cache.get('usage_jupyterhub')
if table_usage_jupyterhub is None:
table_usage_jupyterhub = get_jupyterhub_table()
cache.set('usage_jupyterhub', table_usage_jupyterhub, timeout=20)
return style + table_usage_memory + '<br>' + table_usage_cpu + '<br>' + table_usage_jupyterhub
ssl_context = (CRT_FILE, KEY_FILE) if (CRT_FILE and KEY_FILE) else None
app.run(
host=HOST,
port=PORT,
ssl_context=ssl_context
)
| 2.0625 | 2 |
fee_calculator/settings/docker.py | ministryofjustice/laa-fee-calcualtor | 1 | 12765984 | from .base import * # noqa
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY')
ALLOWED_HOSTS = [
'localhost',
'.dsd.io',
'.service.justice.gov.uk'
]
| 1.25 | 1 |
Chapter04/Exercise4.01/Exercise4.01_Unit_test.py | PacktWorkshops/Applied-Deep-Learning-with-Keras | 1 | 12765985 | <filename>Chapter04/Exercise4.01/Exercise4.01_Unit_test.py
import unittest
import numpy as np
import pandas as pd
import numpy.testing as np_testing
import pandas.testing as pd_testing
import os
import import_ipynb
class Test(unittest.TestCase):
def _dirname_if_file(self, filename):
if os.path.isdir(filename):
return filename
else:
return os.path.dirname(os.path.abspath(filename))
def setUp(self):
import Exercise4_01
self.exercise = Exercise4_01
dirname = self._dirname_if_file('../data/qsar_fish_toxicity.csv')
self.data_loc = os.path.join(dirname, 'qsar_fish_toxicity.csv')
colnames = ['CIC0', 'SM1_Dz(Z)', 'GATS1i', 'NdsCH', 'NdssC','MLOGP', 'LC50']
self.data = pd.read_csv(self.data_loc, sep=';', names=colnames)
self.X = self.data.drop('LC50', axis=1)
self.y = self.data['LC50']
def test_input_frames(self):
pd_testing.assert_frame_equal(self.exercise.X, self.X)
pd_testing.assert_series_equal(self.exercise.y, self.y)
if __name__ == '__main__':
unittest.main()
| 2.84375 | 3 |
fuzzinator/job/update_job.py | pmatos/fuzzinator | 0 | 12765986 | <reponame>pmatos/fuzzinator
# Copyright (c) 2016-2021 <NAME>, <NAME>.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
from ..config import config_get_callable
class UpdateJob(object):
"""
Class for running SUT update jobs.
"""
def __init__(self, id, config, sut_name, db, listener):
self.id = id
self.config = config
self.sut_name = sut_name
capacity = int(config.get('fuzzinator', 'cost_budget'))
self.cost = min(int(config.get('sut.' + sut_name, 'update_cost', fallback=config.get('fuzzinator', 'cost_budget'))), capacity)
def run(self):
update, update_kwargs = config_get_callable(self.config, 'sut.' + self.sut_name, 'update')
with update:
update(**update_kwargs)
return []
| 1.929688 | 2 |
Absolute_Integrator/peak_finding/template_match.py | msarahan/Absolute_Integrator | 0 | 12765987 | import numpy as np
# dictionary describing options available to tune this algorithm
options = {
"peak_size": {"purpose": "Estimate of the peak size, in pixels. If 'auto', attempts to determine automatically. Otherwise, this should be an integer.",
"default": "auto",
"type": "int",
"has_auto": True},
"refine_positions": {"purpose": "TODO",
"default": False,
"type": "bool"},
"progress_object": {"purpose": "Object used to present a progress bar to the user. For definition, see UI_interface folder.",
"default": None},
}
def run(data):
# TODO: need to actually implement this peak finder.
return np.zeros((4,2)) | 2.796875 | 3 |
TSClusteringLayer.py | JingWENZHA/DTC | 0 | 12765988 | <gh_stars>0
"""
Implementation of the Deep Temporal Clustering model
Time Series Clustering layer
@author <NAME> (FlorentF9)
"""
# from keras.engine.topology import Layer, InputSpec
from tensorflow.keras.layers import Layer, InputSpec
import keras.backend as K
class TSClusteringLayer(Layer):
"""
Clustering layer converts input sample (feature) to soft label, i.e. a vector that represents the probability of the
sample belonging to each cluster. The probability is calculated with student's t-distribution.
# Arguments
n_clusters: number of clusters.
weights: list of Numpy array with shape `(n_clusters, timesteps, n_features)` witch represents the initial cluster centers.
alpha: parameter in Student's t-distribution. Default to 1.0.
dist_metric: distance metric between sequences used in similarity kernel ('eucl', 'cir', 'cor' or 'acf').
# Input shape
3D tensor with shape: `(n_samples, timesteps, n_features)`.
# Output shape
2D tensor with shape: `(n_samples, n_clusters)`.
"""
def __init__(self, n_clusters, weights=None, alpha=1.0, dist_metric='eucl', **kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(TSClusteringLayer, self).__init__(**kwargs)
self.n_clusters = n_clusters
self.alpha = alpha
self.dist_metric = dist_metric
self.initial_weights = weights
self.input_spec = InputSpec(ndim=3)
self.clusters = None
self.built = False
def build(self, input_shape):
assert len(input_shape) == 3
input_dim = input_shape[2]
input_steps = input_shape[1]
self.input_spec = InputSpec(dtype=K.floatx(), shape=(None, input_steps, input_dim))
self.clusters = self.add_weight(shape=(self.n_clusters, input_steps, input_dim), initializer='glorot_uniform', name='cluster_centers')
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def call(self, inputs, **kwargs):
"""
Student t-distribution kernel, probability of assigning encoded sequence i to cluster k.
q_{ik} = (1 + dist(z_i, m_k)^2)^{-1} / normalization.
Arguments:
inputs: encoded input sequences, shape=(n_samples, timesteps, n_features)
Return:
q: soft labels for each sample. shape=(n_samples, n_clusters)
"""
if self.dist_metric == 'eucl':
distance = K.sum(K.sqrt(K.sum(K.square(K.expand_dims(inputs, axis=1) - self.clusters), axis=2)), axis=-1)
elif self.dist_metric == 'cid':
ce_x = K.sqrt(K.sum(K.square(inputs[:, 1:, :] - inputs[:, :-1, :]), axis=1)) # shape (n_samples, n_features)
ce_w = K.sqrt(K.sum(K.square(self.clusters[:, 1:, :] - self.clusters[:, :-1, :]), axis=1)) # shape (n_clusters, n_features)
ce = K.maximum(K.expand_dims(ce_x, axis=1), ce_w) / K.minimum(K.expand_dims(ce_x, axis=1), ce_w) # shape (n_samples, n_clusters, n_features)
ed = K.sqrt(K.sum(K.square(K.expand_dims(inputs, axis=1) - self.clusters), axis=2)) # shape (n_samples, n_clusters, n_features)
distance = K.sum(ed * ce, axis=-1) # shape (n_samples, n_clusters)
elif self.dist_metric == 'cor':
inputs_norm = (inputs - K.expand_dims(K.mean(inputs, axis=1), axis=1)) / K.expand_dims(K.std(inputs, axis=1), axis=1) # shape (n_samples, timesteps, n_features)
clusters_norm = (self.clusters - K.expand_dims(K.mean(self.clusters, axis=1), axis=1)) / K.expand_dims(K.std(self.clusters, axis=1), axis=1) # shape (n_clusters, timesteps, n_features)
pcc = K.mean(K.expand_dims(inputs_norm, axis=1) * clusters_norm, axis=2) # Pearson correlation coefficients
distance = K.sum(K.sqrt(2.0 * (1.0 - pcc)), axis=-1) # correlation-based similarities, shape (n_samples, n_clusters)
elif self.dist_metric == 'acf':
raise NotImplementedError
else:
raise ValueError('Available distances are eucl, cid, cor and acf!')
q = 1.0 / (1.0 + K.square(distance) / self.alpha)
q **= (self.alpha + 1.0) / 2.0
q = K.transpose(K.transpose(q) / K.sum(q, axis=1))
return q
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) == 3
return input_shape[0], self.n_clusters
def get_config(self):
config = {'n_clusters': self.n_clusters, 'dist_metric': self.dist_metric}
base_config = super(TSClusteringLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 2.640625 | 3 |
Python Advanced/2. Tuples And Sets/Lab/02. Students' Grades.py | a-shiro/SoftUni-Courses | 0 | 12765989 | <reponame>a-shiro/SoftUni-Courses<gh_stars>0
def get_average(g):
return sum(g) / len(g)
n = int(input())
students_record = {}
for _ in range(n):
student, grade = input().split()
if student not in students_record:
students_record[student] = []
students_record[student].append(float(grade))
for s, g in students_record.items():
average = get_average(g)
grades = ' '.join(f"{x:.2f}" for x in g)
print(f"{s} -> {grades} (avg: {average:.2f})")
| 3.703125 | 4 |
wg-manager-backend/script/wireguard_startup.py | SH-Daemon/wg-manager | 417 | 12765990 | import os
import typing
from sqlalchemy.orm import Session
import const
from database import models
from database.database import SessionLocal
from db.api_key import add_initial_api_key_for_admin
from db.wireguard import server_add_on_init
from script.wireguard import is_installed, start_interface, is_running, load_environment_clients
def setup_on_start():
_db: Session = SessionLocal()
servers: typing.List[models.WGServer] = _db.query(models.WGServer).all()
for s in servers:
try:
last_state = s.is_running
if is_installed() and last_state and is_running(s):
start_interface(s)
except Exception as e:
print(e)
if const.CLIENT:
load_environment_clients(_db)
if const.SERVER_INIT_INTERFACE is not None:
server_add_on_init(_db)
if const.SERVER_STARTUP_API_KEY is not None:
ADMIN_USERNAME = os.getenv("ADMIN_USERNAME")
add_initial_api_key_for_admin(_db, const.SERVER_STARTUP_API_KEY, ADMIN_USERNAME)
_db.close()
| 2.03125 | 2 |
tests/test_players.py | dmayilyan/dominoes | 38 | 12765991 | <filename>tests/test_players.py<gh_stars>10-100
import copy
import dominoes
import time
import unittest
def _new_game_with_fixed_moves(fixed_moves):
while True:
g = dominoes.Game.new()
for _ in range(fixed_moves):
g.make_move(*g.valid_moves[0])
if g.result is not None:
break
if g.result is None:
return g
class TestPlayers(unittest.TestCase):
def _test_player_interface(self, player, fixed_moves=0):
g = _new_game_with_fixed_moves(fixed_moves)
g_copy = copy.deepcopy(g)
player(g)
self.assertEqual(type(g.valid_moves), tuple)
self.assertEqual(len(g.valid_moves), len(g_copy.valid_moves))
self.assertEqual(set(g.valid_moves), set(g_copy.valid_moves))
g_copy.valid_moves = g.valid_moves
self.assertEqual(g, g_copy)
def test_identity(self):
self._test_player_interface(dominoes.players.identity)
g = dominoes.Game.new()
d1 = dominoes.Domino(1, 1)
d2 = dominoes.Domino(2, 2)
d3 = dominoes.Domino(3, 3)
valid_moves_before = [
((d1, True),),
((d1, True), (d2, False)),
((d1, True), (d2, False), (d3, False))
]
valid_moves_after = [
((d1, True),),
((d1, True), (d2, False)),
((d1, True), (d2, False), (d3, False))
]
for vmb, vma in zip(valid_moves_before, valid_moves_after):
g.valid_moves = vmb
dominoes.players.identity(g)
self.assertEqual(g.valid_moves, vma)
def test_counter(self):
self._test_player_interface(dominoes.players.counter())
self.assertEqual(dominoes.players.counter(name='test').__name__, 'test')
bgc = dominoes.players.counter(dominoes.players.bota_gorda)
self.assertEqual(bgc.__name__, 'counter')
self.assertEqual(bgc.count, 0)
# there is a small chance that the valid moves are already
# sorted in bota gorda order, in which case this won't
# test anything interesting. this test suite gets run
# often enough that the danger is negligible.
g = dominoes.Game.new()
g_copy = copy.deepcopy(g)
bgc(g)
dominoes.players.bota_gorda(g_copy)
self.assertEqual(g, g_copy)
self.assertEqual(bgc.count, 1)
def test_random(self):
self._test_player_interface(dominoes.players.random)
gs = [dominoes.Game.new(starting_player=0) for _ in range(3)]
valid_moves_before = tuple(g.valid_moves for g in gs)
for g in gs:
dominoes.players.random(g)
valid_moves_after = tuple(g.valid_moves for g in gs)
# this has a tiny, but nonzero, chance of failing
self.assertNotEqual(valid_moves_before, valid_moves_after)
def test_reverse(self):
self._test_player_interface(dominoes.players.reverse)
g = dominoes.Game.new()
d1 = dominoes.Domino(1, 1)
d2 = dominoes.Domino(2, 2)
d3 = dominoes.Domino(3, 3)
vms = [
((d1, True),),
((d1, True), (d2, False)),
((d1, True), (d2, False), (d3, False))
]
rvms = [
((d1, True),),
((d2, False), (d1, True)),
((d3, False), (d2, False), (d1, True))
]
for vm, rvm in zip(vms, rvms):
g.valid_moves = vm
dominoes.players.reverse(g)
self.assertEqual(g.valid_moves, rvm)
def test_bota_gorda(self):
self._test_player_interface(dominoes.players.bota_gorda)
g = dominoes.Game.new()
d1 = dominoes.Domino(1, 1)
d2 = dominoes.Domino(2, 0)
d3 = dominoes.Domino(1, 0)
valid_moves_before = [
((d1, True),),
((d1, True), (d1, False)),
((d1, True), (d2, True)),
((d1, True), (d3, True)),
((d3, True), (d1, True))
]
valid_moves_after = [
((d1, True),),
((d1, True), (d1, False)),
((d1, True), (d2, True)),
((d1, True), (d3, True)),
((d1, True), (d3, True))
]
for vmb, vma in zip(valid_moves_before, valid_moves_after):
g.valid_moves = vmb
dominoes.players.bota_gorda(g)
self.assertEqual(g.valid_moves, vma)
def test_double(self):
self._test_player_interface(dominoes.players.double)
g = dominoes.Game.new()
d1 = dominoes.Domino(1, 1)
d2 = dominoes.Domino(2, 2)
d3 = dominoes.Domino(1, 0)
valid_moves_before = [
((d1, True),),
((d1, True), (d1, False)),
((d1, True), (d2, True)),
((d1, True), (d3, True)),
((d3, True), (d1, True))
]
valid_moves_after = [
((d1, True),),
((d1, True), (d1, False)),
((d1, True), (d2, True)),
((d1, True), (d3, True)),
((d1, True), (d3, True))
]
for vmb, vma in zip(valid_moves_before, valid_moves_after):
g.valid_moves = vmb
dominoes.players.double(g)
self.assertEqual(g.valid_moves, vma)
def test_omniscient(self):
# game cannot have ended after 6 fixed moves.
self._test_player_interface(dominoes.players.omniscient(), 6)
self.assertEqual(dominoes.players.omniscient(name='test').__name__, 'test')
self.assertEqual(dominoes.players.omniscient().__name__, 'omniscient')
cp1 = dominoes.players.counter()
op1 = dominoes.players.omniscient(start_move=1, player=cp1)
g1 = dominoes.Game.new()
op1(g1)
self.assertEqual(cp1.count, 0)
# due to passes, the amount of total moves will be greater
# than or equal to 6 after playing 6 fixed moves. therefore,
# the following will not test the boundary condition every time.
# this test suite gets run often enough that the danger is negligible.
cp2 = dominoes.players.counter()
op2 = dominoes.players.omniscient(start_move=6, player=cp2)
while True:
g2 = dominoes.Game.new()
for _ in range(6):
g2.make_move(*g2.valid_moves[0])
# the omniscient player is smart enough not
# to run when there is only one valid move.
if len(g2.valid_moves) > 1:
break
op2(g2)
self.assertNotEqual(cp2.count, 0)
d1 = dominoes.Domino(7, 0)
d2 = dominoes.Domino(0, 0)
d3 = dominoes.Domino(0, 1)
d4 = dominoes.Domino(0, 8)
d5 = dominoes.Domino(1, 9)
h1 = dominoes.Hand([d1, d2])
h2 = dominoes.Hand([d3, d2])
h3 = dominoes.Hand([d3, d4, d5])
h4 = dominoes.Hand([d2])
g3 = dominoes.Game.new(starting_player=0)
g3.hands = [h1, h2, h3, h4]
g3.make_move(d1, True)
op3 = dominoes.players.omniscient()
self.assertEqual(g3.valid_moves, ((d3, False), (d2, False)))
op3(g3)
self.assertEqual(g3.valid_moves, ((d2, False), (d3, False)))
def test_probabilistic_alphabeta(self):
# test player interface
self._test_player_interface(dominoes.players.probabilistic_alphabeta(), 15)
# test name
self.assertEqual(dominoes.players.probabilistic_alphabeta(name='test').__name__, 'test')
self.assertEqual(dominoes.players.probabilistic_alphabeta().__name__, 'probabilistic_alphabeta')
# test that start move can prevent running of player
cp1 = dominoes.players.counter()
pap1 = dominoes.players.probabilistic_alphabeta(start_move=1, player=cp1)
g1 = dominoes.Game.new()
pap1(g1)
self.assertEqual(cp1.count, 0)
# test that player can still run even with a start move.
# due to passes, the amount of total moves will be greater
# than or equal to 15 after playing 15 fixed moves. therefore,
# the following will not test the boundary condition every time.
# this test suite gets run often enough that the danger is negligible.
cp2 = dominoes.players.counter()
pap2 = dominoes.players.probabilistic_alphabeta(start_move=15, player=cp2)
while True:
g2 = _new_game_with_fixed_moves(15)
# the probabilistic_alphabeta player is smart enough
# not to run when there is only one valid move.
if len(g2.valid_moves) > 1:
break
pap2(g2)
self.assertNotEqual(cp2.count, 0)
# testing that a small sample size greatly limits the amount of work done
g3 = _new_game_with_fixed_moves(10)
pap3 = dominoes.players.probabilistic_alphabeta(sample_size=1)
start = time.time()
pap3(g3)
elapsed = time.time() - start
self.assertTrue(elapsed < 1)
# test for correct results on a simple example
d1 = dominoes.Domino(2, 0)
d2 = dominoes.Domino(3, 0)
d3 = dominoes.Domino(4, 0)
d4 = dominoes.Domino(5, 0)
d5 = dominoes.Domino(5, 1)
d6 = dominoes.Domino(6, 0)
h1 = dominoes.Hand([d1, d2])
h2 = dominoes.Hand([d3, d4])
h3 = dominoes.Hand([d5])
h4 = dominoes.Hand([d6])
g4 = dominoes.Game.new(starting_player=0)
g4.hands = [h1, h2, h3, h4]
g4.make_move(d1, True)
pap4 = dominoes.players.probabilistic_alphabeta()
self.assertEqual(g4.valid_moves, ((d3, False), (d4, False)))
pap4(g4)
self.assertEqual(g4.valid_moves, ((d4, False), (d3, False)))
if __name__ == '__main__':
unittest.main()
| 2.875 | 3 |
services/datalad/datalad_service/handlers/annex.py | tommydino93/openneuro | 57 | 12765992 | import hashlib
import logging
import os
import shutil
import struct
import tempfile
import falcon
from datalad_service.common.stream import update_file
from datalad_service.handlers.git import _check_git_access, _handle_failed_access
def hashdirmixed(key):
"""Python implementation of git-annex hashing for non-bare git repos
https://git-annex.branchable.com/internals/hashing/"""
digest = hashlib.md5(key.encode()).digest()
first_word = struct.unpack('<I', digest[:4])[0]
nums = [first_word >> (6 * x) & 31 for x in range(4)]
letters = ["0123456789zqjxkmvwgpfZQJXKMVWGPF"[i] for i in nums]
return ("{0:s}{1:s}".format(letters[1], letters[0]), "{0:s}{1:s}".format(letters[3], letters[2]))
def key_to_path(key):
return os.path.join('.git', 'annex', 'objects', *hashdirmixed(key), key, key)
class GitAnnexResource(object):
"""{worker}/{dataset}/annex/{key} serves git-annex object requests
This allows OpenNeuro to act as a special remote, adding or removing objects from .git/annex/objects/
"""
def __init__(self, store):
self.store = store
self.logger = logging.getLogger('datalad_service.' + __name__)
def on_head(self, req, resp, worker, dataset, key):
"""HEAD requests check if objects exist already"""
resp.set_header('WWW-Authenticate', 'Basic realm="dataset git repo"')
if not _check_git_access(req, dataset):
return _handle_failed_access(req, resp)
dataset_path = self.store.get_dataset_path(dataset)
annex_object_path = os.path.join(dataset_path, key_to_path(key))
if os.path.exists(annex_object_path):
resp.status = falcon.HTTP_OK
else:
resp.status = falcon.HTTP_NOT_FOUND
def on_get(self, req, resp, worker, dataset, key):
resp.set_header('WWW-Authenticate', 'Basic realm="dataset git repo"')
if not _check_git_access(req, dataset):
return _handle_failed_access(req, resp)
dataset_path = self.store.get_dataset_path(dataset)
annex_object_path = os.path.join(dataset_path, key_to_path(key))
if os.path.exists(annex_object_path):
resp.status = falcon.HTTP_OK
fd = open(annex_object_path, 'rb')
resp.stream = fd
resp.stream_len = os.fstat(fd.fileno()).st_size
else:
resp.status = falcon.HTTP_NOT_FOUND
def on_post(self, req, resp, worker, dataset, key):
resp.set_header('WWW-Authenticate', 'Basic realm="dataset git repo"')
if not _check_git_access(req, dataset):
return _handle_failed_access(req, resp)
dataset_path = self.store.get_dataset_path(dataset)
annex_object_path = os.path.join(dataset_path, key_to_path(key))
if os.path.exists(annex_object_path):
# Don't allow objects to be replaced
resp.status = falcon.HTTP_CONFLICT
else:
os.makedirs(os.path.dirname(annex_object_path), exist_ok=True)
# Begin writing stream to temp file and hard link once done
# It should not be written unless the full request completes
update_file(annex_object_path, req.stream)
resp.status = falcon.HTTP_OK
def on_delete(self, req, resp, worker, dataset, key):
resp.set_header('WWW-Authenticate', 'Basic realm="dataset git repo"')
if not _check_git_access(req, dataset):
return _handle_failed_access(req, resp)
dataset_path = self.store.get_dataset_path(dataset)
annex_object_path = os.path.join(dataset_path, key_to_path(key))
if os.path.exists(annex_object_path):
os.remove(annex_object_path)
resp.status = falcon.HTTP_NO_CONTENT
| 2.59375 | 3 |
codes/course2/demo1.py | BigShuang/big-shuang-python-introductory-course | 0 | 12765993 | names = ["Alan", "Bruce", "Carlos", "David", "Emma"]
scores = [90, 80, 85, 92, 81]
for name in names:
print("hello, %s!" % name)
| 3.359375 | 3 |
cages/.shared/reverse_call.py | targeted/pythomnic3k | 0 | 12765994 | <gh_stars>0
#!/usr/bin/env python3
#-*- coding: iso-8859-1 -*-
################################################################################
#
# This module handles reverse RPC calls, in which target cage actively polls
# for incoming calls, rather that passively waiting for them.
#
# On source cage, a reverse RPC call is simply this:
# result = pmnc("target_cage:reverse").module.method(*args, **kwargs)
#
# On target cage, an instance of an "revrpc" interface must be created and
# configured to poll source cage specifically.
#
# The reason for such calls to exist is DMZ scenario where source cage may
# be unable to establish connection to the target cage, but only the other
# way around.
#
# Pythomnic3k project
# (c) 2005-2014, <NAME> <<EMAIL>>
# Distributed under BSD license
#
################################################################################
__all__ = [ "execute_reverse", "poll", "post" ]
__reloadable__ = False
################################################################################
import threading; from threading import Lock
if __name__ == "__main__": # add pythomnic/lib to sys.path
import os; import sys
main_module_dir = os.path.dirname(sys.modules["__main__"].__file__) or os.getcwd()
sys.path.insert(0, os.path.normpath(os.path.join(main_module_dir, "..", "..", "lib")))
import exc_string; from exc_string import exc_string
import typecheck; from typecheck import by_regex
import interlocked_queue; from interlocked_queue import InterlockedQueue
import pmnc.resource_pool; from pmnc.resource_pool import ResourceError, RPCError
###############################################################################
valid_cage_name = by_regex("^[A-Za-z0-9_-]{1,32}$")
valid_module_name = by_regex("^[A-Za-z0-9_]{1,64}$")
valid_method_name = by_regex("^[A-Za-z0-9_]{1,64}$")
###############################################################################
# module-level state => not reloadable
_rq_queues = {} # one queue per target cage, permanent
_rq_queues_lock = Lock()
_rs_queues = {} # one queue per active request, transient
_rs_queues_lock = Lock()
###############################################################################
def _get_rq_queue(cage):
with _rq_queues_lock:
if cage not in _rq_queues:
rq_queue = InterlockedQueue()
_rq_queues[cage] = rq_queue
else:
rq_queue = _rq_queues[cage]
return rq_queue
###############################################################################
# this method is called from module_loader.py to handle reverse RPC call
# pmnc("target_cage:reverse").module.method(*args, **kwargs)
def execute_reverse(target_cage: valid_cage_name, module: valid_module_name,
method: valid_method_name, args: tuple, kwargs: dict):
# wrap up an RPC call identical to how it's done in protocol_rpc.py
request_dict = pmnc.request.to_dict()
# remove request parameters that must not cross the RPC border
request_dict["parameters"].pop("retry", None)
# wrap all the call parameters in a plain dict
request = dict(source_cage = __cage__,
target_cage = target_cage,
module = module, method = method,
args = args, kwargs = kwargs,
request = request_dict)
request_description = "reverse RPC request {0:s}.{1:s} to {2:s}".\
format(module, method, target_cage)
# create a one-time response queue just for this request
rs_queue = InterlockedQueue()
request_id = pmnc.request.unique_id
with _rs_queues_lock:
_rs_queues[request_id] = rs_queue # register the call as being active
try:
pmnc.log.info("sending {0:s}".format(request_description))
try:
# enqueue the call and wait for response
rq_queue = _get_rq_queue(target_cage)
rq_queue.push((request_id, request))
response = pmnc.request.pop(rs_queue)
if response is None:
raise Exception("request deadline waiting for response")
try:
result = response["result"]
except KeyError:
raise RPCError(description = response["exception"], terminal = False)
except RPCError as e:
pmnc.log.warning("{0:s} returned error: {1:s}".\
format(request_description, e.description))
raise
except:
pmnc.log.warning("{0:s} failed: {1:s}".\
format(request_description, exc_string()))
ResourceError.rethrow(recoverable = False)
else:
pmnc.log.info("reverse RPC request returned successfully")
return result
finally:
with _rs_queues_lock:
del _rs_queues[request_id] # unregister the call
###############################################################################
# other cages call this method to fetch calls enqueued to them in call()
_poll_response_threshold = 3.0 # this accounts for time differences and for the
# response delivery time as well, so beware
def poll():
poll_timeout = pmnc.request.remain - _poll_response_threshold
while poll_timeout > 0.0:
rq_queue = _get_rq_queue(pmnc.request.parameters["auth_tokens"]["source_cage"])
request_id_request = rq_queue.pop(poll_timeout)
if request_id_request is None:
return None
request_id, request = request_id_request
# see if the original caller is still waiting for the request,
# otherwise the request is silently discarded
with _rs_queues_lock:
if request_id in _rs_queues:
return request
poll_timeout = pmnc.request.remain - _poll_response_threshold
###############################################################################
# other cages call this method to submit response for a request they processed,
# request id's are unique and random, anyone who knows the id of the request
# can submit the response for it
def post(request_id: str, response):
with _rs_queues_lock:
rs_queue = _rs_queues.get(request_id)
# if the original caller is not waiting for the request any more, an exception
# is thrown so that the target cage knows at least there was a problem
if rs_queue is not None:
rs_queue.push(response)
else:
raise Exception("the request is no longer pending for response")
###############################################################################
def self_test():
from pmnc.request import fake_request
from expected import expected
from pmnc.threads import HeavyThread
from time import time, sleep
###################################
def test_get_rq_queue():
q1 = _get_rq_queue("foo")
assert q1 is _get_rq_queue("foo")
q2 = _get_rq_queue("bar")
assert q2 is _get_rq_queue("bar")
assert q1 is not q2
q1.push("foo")
assert q2.pop(0.1) is None
assert q1.pop(0.1) == "foo"
q2.push("bar")
assert q1.pop(0.1) is None
assert q2.pop(0.1) == "bar"
test_get_rq_queue()
###################################
def test_execute_timeout():
fake_request(1.0)
with expected(Exception("request deadline waiting for response")):
pmnc.__getattr__(__name__).execute_reverse("bad_cage", "module", "method", (), {})
test_execute_timeout()
###################################
def test_execute_success():
fake_request(10.0)
request_id = None
response = None
def caller(*args, **kwargs):
fake_request(6.0)
nonlocal request_id, response
request_id = pmnc.request.unique_id
pmnc.request.parameters["AAA"] = "BBB"
pmnc.request.describe("my request")
response = pmnc.__getattr__(__name__).execute_reverse("good_cage", "module", "method", args, kwargs)
assert "good_cage" not in _rq_queues
th = HeavyThread(target = caller, args = (1, "foo"), kwargs = { "biz": "baz" })
th.start()
try:
sleep(2.0)
assert "good_cage" in _rq_queues
assert request_id in _rs_queues
req_id, req = _rq_queues["good_cage"].pop()
assert req_id == request_id
assert abs(req["request"].pop("deadline") - time() - 4.0) < 1.0
assert req == dict \
(
source_cage = __cage__,
target_cage = "good_cage",
module = "module",
method = "method",
args = (1, "foo"),
kwargs = { "biz": "baz" },
request = dict(protocol = pmnc.request.protocol,
interface = pmnc.request.interface,
unique_id = request_id,
description = "my request",
parameters = dict(auth_tokens = {}, AAA = "BBB"),
log_levels = []),
)
_rs_queues[request_id].push({ "result": "RESULT" })
sleep(2.0)
assert "good_cage" in _rq_queues
assert request_id not in _rs_queues
finally:
th.stop()
assert response == "RESULT"
assert "good_cage" in _rq_queues
assert request_id not in _rs_queues
test_execute_success()
###################################
def test_poll():
fake_request(5.0)
pmnc.request.parameters["auth_tokens"]["source_cage"] = "source_cage"
assert pmnc.__getattr__(__name__).poll() is None
assert abs(pmnc.request.remain - _poll_response_threshold) < 1.0
fake_request(5.0)
pmnc.request.parameters["auth_tokens"]["source_cage"] = "source_cage"
_get_rq_queue("source_cage").push(("RQ-123", "request1"))
assert pmnc.__getattr__(__name__).poll() is None
assert abs(pmnc.request.remain - _poll_response_threshold) < 1.0
fake_request(5.0)
pmnc.request.parameters["auth_tokens"]["source_cage"] = "source_cage"
_get_rq_queue("source_cage").push(("RQ-123", "request1"))
_get_rq_queue("source_cage").push(("RQ-456", "request2"))
_rs_queues["RQ-456"] = "whatever"
assert pmnc.__getattr__(__name__).poll() == "request2"
assert abs(pmnc.request.remain - 5.0) < 1.0
test_poll()
###################################
def test_post():
with expected(Exception("the request is no longer pending for response")):
pmnc.__getattr__(__name__).post("RQ-ABC", "RESULT")
rs_queue = InterlockedQueue()
_rs_queues["RQ-ABC"] = rs_queue
pmnc.__getattr__(__name__).post("RQ-ABC", "RESULT")
assert rs_queue.pop() == "RESULT"
test_post()
###################################
if __name__ == "__main__": import pmnc.self_test; pmnc.self_test.run()
###############################################################################
# EOF | 2.25 | 2 |
flora_tools/utilities.py | Atokulus/flora-tools | 1 | 12765995 | <filename>flora_tools/utilities.py
import random
import string
import numpy as np
def get_random_text(length=None, max_length=254):
if length is None:
length = np.random.randint(-1, max_length + 1)
if length < 0:
return None
else:
text = ''.join(random.choices(string.ascii_letters + string.ascii_uppercase + string.digits, k=(length - 1)))
return text
def get_edges(wave):
wave_min = np.amin(wave)
wave_max = np.amax(wave)
digital = np.digitize(wave, [(wave_min + wave_max) / 2.0])
diff = np.diff(digital)
indices = np.argwhere(diff)
return indices
| 2.796875 | 3 |
carmesi/nucleo/tests/test_services_token.py | RedGranatum/Carmesi | 0 | 12765996 | <gh_stars>0
# Standar Library
from datetime import datetime, timezone
# Django
from django.test import TestCase
from django.core.exceptions import ValidationError
# Services & Builder
from nucleo.services.token_builders import(
TokenFactory,
TOKEN_TIPO_CLIENTE,
TOKEN_TIPO_USUARIO,
TOKEN_TIPO_USUARIO_LOGIN,
)
#Constantes
from .constant import (
TOKEN_PREALTA_CLIENTE,
TOKEN_PREALTA_CLIENTE_CADUCO,
TOKEN_PREALTA_USUARIO,
TOKEN_USUARIO_LOGIN,
)
def fecha_expiracion_2120():
fecha = datetime(2120,1,1)
epoch_timestamp = fecha.replace(tzinfo=timezone.utc).timestamp()
return int(epoch_timestamp)
tokenFactory = TokenFactory(fun_fecha_exp=fecha_expiracion_2120)
class TokenBuilderTest(TestCase):
def test_crear_payload_cliente(self):
data = {'email':'<EMAIL>', 'owner_name':'<NAME>'}
token_cliente = tokenFactory.crear_token_cliente(data)
payload = token_cliente.get_payload()
self.assertEqual(len(payload), 4)
self.assertEqual(payload['email'], '<EMAIL>')
self.assertEqual(payload['owner_name'], '<NAME>')
self.assertEqual(payload['exp'], 4733510400)
self.assertEqual(payload['type'], TOKEN_TIPO_CLIENTE)
self.assertEqual(token_cliente.get_jwt(), TOKEN_PREALTA_CLIENTE)
def test_crear_payload_usuario(self):
data = {'email': '<EMAIL>','name':'<NAME>',
'schema_name': 'mitiendita'}
token_usuario = tokenFactory.crear_token_usuario(data)
payload = token_usuario.get_payload()
self.assertEqual(len(payload), 5)
self.assertEqual(payload['email'], '<EMAIL>')
self.assertEqual(payload['name'], '<NAME>')
self.assertEqual(payload['exp'], 4733510400)
self.assertEqual(payload['type'], TOKEN_TIPO_USUARIO)
self.assertEqual(payload['schema_name'], 'mitiendita')
self.assertEqual(token_usuario.get_jwt(), TOKEN_PREALTA_USUARIO)
def test_crear_payload_usuario_login(self):
data = {'email':'<EMAIL>','schema_name': 'mitiendita'}
token_login = tokenFactory.crear_token_usuario_login(data)
payload = token_login.get_payload()
self.assertEqual(len(payload), 4)
self.assertEqual(payload['email'], '<EMAIL>')
self.assertEqual(payload['schema_name'], 'mitiendita')
self.assertEqual(payload['exp'], 4733510400)
self.assertEqual(payload['type'], TOKEN_TIPO_USUARIO_LOGIN)
self.assertEqual(token_login.get_jwt(), TOKEN_USUARIO_LOGIN)
def test_validar_token_prealta_cliente(self):
token_cliente = tokenFactory.crear_token_cliente()
data ={'email': '<EMAIL>', 'owner_name': '<NAME>', 'exp': 4733510400, 'type': 'email_confirmation_new_client'}
payload = token_cliente.decodificar_token(TOKEN_PREALTA_CLIENTE)
self.assertDictEqual(payload, data)
def test_validar_token_prealta_usuario(self):
token_usuario = tokenFactory.crear_token_usuario()
data ={'email': '<EMAIL>', 'name': '<NAME>', 'exp': 4733510400, 'type': 'email_confirmation_new_user',
'schema_name': 'mitiendita'}
payload = token_usuario.decodificar_token(TOKEN_PREALTA_USUARIO)
self.assertDictEqual(payload, data)
def test_validar_token_prealta_usuario_login(self):
token_usuario_login = tokenFactory.crear_token_usuario_login()
data ={'email': '<EMAIL>', 'exp': 4733510400, 'type': 'user_login', 'schema_name': 'mitiendita'}
payload = token_usuario_login.decodificar_token(TOKEN_USUARIO_LOGIN)
self.assertDictEqual(payload, data)
def test_validar_mensaje_token_caduco(self):
token_cliente = tokenFactory.crear_token_cliente()
with self.assertRaises(ValidationError) as cm:
token_cliente.decodificar_token(TOKEN_PREALTA_CLIENTE_CADUCO)
error = cm.exception
self.assertEqual(error.message, 'El token de verificacion ha expirado')
def test_validar_mensaje_token_invalido(self):
token_cliente = tokenFactory.crear_token_cliente()
with self.assertRaises(ValidationError) as cm:
token_invalido = TOKEN_PREALTA_CLIENTE[1:]
token_cliente.decodificar_token(token_invalido)
error = cm.exception
self.assertEqual(error.message, 'El token no es valido')
def test_validar_mensaje_token_tipo_cliente_difente(self):
token_cliente = tokenFactory.crear_token_cliente()
with self.assertRaises(ValidationError) as cm:
token_cliente.decodificar_token(TOKEN_PREALTA_USUARIO)
error = cm.exception
self.assertEqual(error.message, 'El token no es valido para el registro de cliente')
def test_validar_mensaje_token_tipo_usuario_difente(self):
token_usuario = tokenFactory.crear_token_usuario()
with self.assertRaises(ValidationError) as cm:
token_usuario.decodificar_token(TOKEN_PREALTA_CLIENTE)
error = cm.exception
self.assertEqual(error.message, 'El token no es valido para el registro de usuario')
def test_validar_mensaje_token_tipo_usuario__login_difente(self):
token_usuario_login = tokenFactory.crear_token_usuario_login()
with self.assertRaises(ValidationError) as cm:
token_usuario_login.decodificar_token(TOKEN_PREALTA_USUARIO)
error = cm.exception
self.assertEqual(error.message, 'El token no es valido para el ingreso al sistema')
| 2.28125 | 2 |
tests/test_perceptron.py | KEVINYZY/plume | 22 | 12765997 | from plume.perceptron import PerceptronClassifier
import numpy as np
x_train = np.array([[3, 3], [4, 3], [1, 1]])
y_train = np.array([1, 1, -1])
clf = PerceptronClassifier(dual=False)
clf.fit(x_train, y_train)
print(clf.get_model())
print(clf.predict(x_train))
clf1 = PerceptronClassifier()
clf1.fit(x_train, y_train)
print(clf1.get_model())
print(clf1.predict(x_train))
| 3.171875 | 3 |
ChasingTrainFramework_GeneralOneClassDetection/data_provider_base/base_provider.py | dvt0101/A-Light-and-Fast-Face-Detector-for-Edge-Devices | 1,172 | 12765998 | """
This module takes an adapter as data supplier, pack data and provide data for data iterators
"""
class ProviderBaseclass(object):
"""
This is the baseclass of packer. Any other detailed packer must inherit this class.
"""
def __init__(self):
pass
def __str__(self):
return self.__class__.__name__
def __del__(self):
pass
def write(self):
"""
Write a single sample to the files
:return:
"""
raise NotImplementedError()
def read_by_index(self, index):
"""
Read a single sample
:return:
"""
raise NotImplementedError()
if __name__ == '__main__':
provider = ProviderBaseclass()
print(provider)
| 2.875 | 3 |
code401challengespython/array_shift/array_shift.py | danhuyle508/data-structures-and-algorithms | 0 | 12765999 | def insertShiftArray(arr, num):
count = len(arr)
middle = len(arr) // 2
shift_arr = []
for i in range(count):
if i != middle:
shift_arr.append(arr[i])
else:
if count % 2 == 0:
shift_arr.append(num)
shift_arr.append(arr[i])
else:
shift_arr.append(arr[i])
shift_arr.append(num)
return shift_arr
| 3.78125 | 4 |
ner_plugins/NER_BERT.py | ArinaBelova/MASK_public | 0 | 12766000 | from transformers import BertForTokenClassification
import torch
from transformers import BertTokenizer
import numpy as np
import nltk.data
nltk.download('punkt')
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from transformers import BertTokenizer, BertConfig, AutoModelForTokenClassification, AutoConfig
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from transformers import BertForTokenClassification, AdamW
from transformers import get_linear_schedule_with_warmup
from seqeval.metrics import accuracy_score
from sklearn.metrics import f1_score, classification_report, precision_score, recall_score
import torch.nn as nn
from tqdm import trange
import numpy as np
import matplotlib.pyplot as plt
from nltk.tokenize import sent_tokenize
import os
##########################################################
# import wandb
# from transformers import TrainingArguments, Trainer
# wandb.init(project="project", entity="3rd_year_project")
##########################################################
class NER_BERT(object):
device = torch.device("cuda")
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tag2idx = {'O':0, 'ID':1, 'PHI':2, 'NAME':3, 'CONTACT':4, 'DATE':5, 'AGE':6, 'PROFESSION':7, 'LOCATION':8, 'PAD': 9}
tag_values = ["O","ID", "PHI", "NAME", "CONTACT", "DATE", "AGE", "PROFESSION", "LOCATION", "PAD"]
tokenizer = BertTokenizer.from_pretrained('bert-base-cased', num_labels=len(tag2idx), do_lower_case=False)
MAX_LEN = 75 # max length of sequence, needs for padding
bs = 32 # batch size
"""Abstract class that other NER plugins should implement"""
def __init__(self):
#config = AutoConfig.from_pretrained('ArishkaBelovishka/bert-i2b2')
#self.model = AutoModelForTokenClassification.from_pretrained('ArishkaBelovishka/bert-i2b2', config = config)
# Uncomment the following if you want to load your fine-tuned model from Models folder.
# If you just want to run NER use hugging-face repository where fine-tuned on half of i2b2 data model lives.
if os.path.exists("Models/BERT_epoch-10.pt"):
print("Loading model")
state_dict = torch.load("Models/BERT_epoch-10.pt", map_location=torch.device('cuda'))
print("Loaded model")
self.model = BertForTokenClassification.from_pretrained(
"bert-base-cased",
state_dict = state_dict,
num_labels=len(NER_BERT.tag2idx),
output_attentions = True,
output_hidden_states = True
)
else:
self.model = BertForTokenClassification.from_pretrained(
"bert-base-cased",
num_labels=len(NER_BERT.tag2idx),
output_attentions = True,
output_hidden_states = True
)
def perform_NER(self,text):
"""Implementation of the method that should perform named entity recognition"""
# tokenizer to divide data into sentences (thanks, nltk)
list_of_sents = sent_tokenize(text)
list_of_tuples_by_sent = []
for sent in list_of_sents:
# , truncation=True
tokenized_sentence = self.tokenizer.encode(sent, truncation=True) # BERT tokenizer is clever, it will internally divide the sentence by words, so all we need to provide there is sentence and it will return an array where each token is either special token/word/subword, refer to BERT WordPiece tokenizer approach
# truncation=True to comply with 512 length of the sentence
input_ids = torch.tensor([tokenized_sentence])
with torch.no_grad():
# Run inference/classification
output = self.model(input_ids)
label_indices = np.argmax(output[0].to("cpu").numpy(), axis=2)
tokens = self.tokenizer.convert_ids_to_tokens(input_ids.to('cpu').numpy()[0])
new_tokens, new_labels = [], []
for token, label_idx in zip(tokens, label_indices[0]):
if token.startswith("##"):
new_tokens[-1] = new_tokens[-1] + token[2:]
else:
new_labels.append(self.tag_values[label_idx])
new_tokens.append(token)
list_of_tuples = []
for token, label in zip(new_tokens, new_labels):
list_of_tuples.append((token, label))
#print("{}\t{}".format(label, token))
list_of_tuples_by_sent.append(list_of_tuples)
# remove [CLS] and [SEP] tokens to comply wth xml structure
for i in range(len(list_of_tuples_by_sent)):
for tag in self.tag_values:
if ('[CLS]', tag) in list_of_tuples_by_sent[i]:
list_of_tuples_by_sent[i].remove(('[CLS]', tag))
if ('[SEP]', tag) in list_of_tuples_by_sent[i]:
list_of_tuples_by_sent[i].remove(('[SEP]', tag))
return list_of_tuples_by_sent
# Needed for transform_sequences
def tokenize_and_preserve_labels(self, sentence, text_labels):
tokenized_sentence = []
labels = []
for word, label in zip(sentence, text_labels):
# Tokenize the word and count # of subwords the word is broken into
tokenized_word = NER_BERT.tokenizer.tokenize(word)
n_subwords = len(tokenized_word)
# Add the tokenized word to the final tokenized word list
tokenized_sentence.extend(tokenized_word)
# Add the same label to the new list of labels `n_subwords` times
labels.extend([label] * n_subwords)
return tokenized_sentence, labels
def transform_sequences(self,tokens_labels):
"""method that transforms sequences of (token,label) into feature sequences. Returns two sequence lists for X and Y"""
print("I am in transform seq")
# result - one document, result[i] is sentence in document, result [i][i] is word in sentence
tokenized_sentences = []
labels = []
for index, sentence in enumerate(tokens_labels):
text_labels = []
sentence_to_feed = []
for word_label in sentence:
text_labels.append(word_label[1])
sentence_to_feed.append(word_label[0])
a, b = self.tokenize_and_preserve_labels(sentence_to_feed, text_labels)
tokenized_sentences.append(a)
labels.append(b)
# Now need to split long tokenized sequences into subsequences of length less than 512 tokens
# not to loose valuable information in NER, basically not to cut sentences
# i2b2 docs are very ugly and sentences in them are usually way too long as doctors forget to put full stops...
# tokenized_sentences AND labels are the same strucutre of 2d arrays
# I need to take care of the issue if I am going to split beginning of the word and its end, like
# Arina is tokenized as "Ari" and "##na", thus I cannot separate the two, otherwise it will not make sense
distributed_tokenized_sentences, distributed_labels = [], []
for sent, label in zip(tokenized_sentences, labels):
if len(sent) > NER_BERT.MAX_LEN:
while len(sent) > NER_BERT.MAX_LEN:
#print("I am in while loop to truncate sequence")
index = NER_BERT.MAX_LEN - 2
for i in range(NER_BERT.MAX_LEN - 2, 0, -1):
if sent[i][:2] == "##":
index = index - 1
else:
break
new_sent = sent[:index] # 511 because we want to append [SEP] token in the end
new_label = label[:index]
sent = sent[index:] # update given sent
label = label[index:]
distributed_tokenized_sentences.append(new_sent)
distributed_labels.append(new_label)
distributed_tokenized_sentences.append(sent)
distributed_labels.append(label)
#print(sent)
else:
distributed_tokenized_sentences.append(sent)
distributed_labels.append(label)
input_ids = pad_sequences([NER_BERT.tokenizer.convert_tokens_to_ids(txt) for txt in distributed_tokenized_sentences],
maxlen=NER_BERT.MAX_LEN, dtype="long", value=0.0,
truncating="post", padding="post")
tags = pad_sequences([[NER_BERT.tag2idx.get(l) for l in lab] for lab in distributed_labels],
maxlen=NER_BERT.MAX_LEN, value=NER_BERT.tag2idx["PAD"], padding="post",
dtype="long", truncating="post")
# Result is pair X (array of sentences, where each sentence is an array of words) and Y (array of labels)
return input_ids, tags
def learn(self, X_train,Y_train, epochs=1):
"""Function that actually train the algorithm"""
# if torch.cuda.is_available():
# self.model.cuda()
tr_masks = [[float(i != 0.0) for i in ii] for ii in X_train]
print("READY TO CREATE SOME TENZORS!!!!!!!!!!!!!!!!!!!!!!!!!!")
tr_inputs = torch.tensor(X_train).type(torch.long)
tr_tags = torch.tensor(Y_train).type(torch.long)
tr_masks = torch.tensor(tr_masks).type(torch.long)
train_data = TensorDataset(tr_inputs, tr_masks, tr_tags)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=NER_BERT.bs)
print("READY TO PREPARE OPTIMIZER!!!!!!!!!!!!!!!!!!!!!!!!!!")
# Weight decay in Adam optimiser (adaptive gradient algorithm) is a regularisation technique which is extensively disucssed in this paper:
# https://arxiv.org/abs/1711.05101
# (Like L2 for SGD but different)
# resularisation of the model objective function in order to prevent overfitting of the model.
FULL_FINETUNING = True
if FULL_FINETUNING:
param_optimizer = list(self.model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.01}, # in AdamW implementation (default: 1e-2)
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.0}
]
else:
param_optimizer = list(self.model.classifier.named_parameters())
optimizer_grouped_parameters = [{"params": [p for n, p in param_optimizer]}]
# TODO: change to new implementation of AdamW: torch.optim.AdamW(...)
optimizer = AdamW(
optimizer_grouped_parameters,
lr=3e-5,
eps=1e-8
)
max_grad_norm = 1.0
# Total number of training steps is number of batches * number of epochs.
total_steps = len(train_dataloader) * epochs
# Create the learning rate scheduler.
# We need it to adjust learning rate if the accuracy does not change between epochs much,
# basically pushing the model to learn.
# https://sajjjadayobi.github.io/blog/markdown/2021/05/23/adamw-warmup.html
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=0,
num_training_steps=total_steps
)
print("START TRAINING!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
## Store the average loss after each epoch so we can plot them.
loss_values, validation_loss_values = [], []
# just for intermediate model save naming
epoch_num = 3
for _ in trange(epochs, desc="Epoch"):
# ========================================
# Training
# ========================================
# Perform one full pass over the training set.
# clean the cache not to fail with video memory
# if torch.cuda.is_available():
# torch.cuda.empty_cache()
# just for intermediate model save naming
epoch_num += 1
# Put the model into training mode.
self.model.train()
# Reset the total loss for this epoch.
total_loss = 0
print("Start backprop and optimisation!!! Epoch has passed!!!!!!!!!!!!!!!!!!!!!!!")
# Training loop
for step, batch in enumerate(train_dataloader):
print("We are in the batch!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# add batch to gpu
batch = tuple(b.to(NER_BERT.device) for b in batch)
b_input_ids, b_input_mask, b_labels = batch
# Always clear any previously calculated gradients before performing a backward pass.
self.model.zero_grad()
# forward pass
# This will return the loss (rather than the model output)
# because we have provided the `labels`.
outputs = self.model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask, labels=b_labels)
# get the loss
loss = outputs[0]
# Perform a backward pass to calculate the gradients.
loss.backward()
# track train loss
total_loss += loss.item()
# Clip the norm of the gradient
# This is to help prevent the "exploding gradients" problem.
torch.nn.utils.clip_grad_norm_(parameters=self.model.parameters(), max_norm=max_grad_norm)
# update parameters
optimizer.step()
# Update the learning rate.
scheduler.step()
print("We processed one batch!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# Calculate the average loss over the training data.
avg_train_loss = total_loss / len(train_dataloader)
print("Average train loss: {}".format(avg_train_loss))
# Store the loss value for plotting the learning curve.
loss_values.append(avg_train_loss)
# Save intermediate weights of the model, i.e. if computer goes crazy and drops the training or you
# want to test the performance from different epochs
torch.save(self.model.state_dict(), os.path.join("Models_intermediate/", 'BERT_epoch-{}.pt'.format(epoch_num)))
#Plot the learning curve.
plt.figure()
plt.plot(loss_values, 'b-o', label="training loss")
# Label the plot.
plt.title("Learning curve")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.show()
def evaluate(self, X_test,Y_test):
if torch.cuda.is_available():
self.model.cuda()
"""Function to evaluate algorithm"""
val_masks = [[float(i != 0.0) for i in ii] for ii in X_test]
val_inputs = torch.tensor(X_test).type(torch.long)
val_tags = torch.tensor(Y_test).type(torch.long)
val_masks = torch.tensor(val_masks).type(torch.long)
valid_data = TensorDataset(val_inputs, val_masks, val_tags)
valid_sampler = SequentialSampler(valid_data)
valid_dataloader = DataLoader(valid_data, sampler=valid_sampler, batch_size=NER_BERT.bs)
# seed
# for _ in range(2):
#valid_dataloader = DataLoader(valid_data, shuffle=True, batch_size=NER_BERT.bs)
# for one random seed of valid_dataloader:
# ...
# ========================================
# Validation
# ========================================
# After the completion of each training epoch, measure our performance on
# our validation set.
# Put the model into evaluation mode to set dropout and batch normalization layers to evaluation mode to have consistent results
self.model.eval()
# Reset the validation loss for this epoch.
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
predictions , true_labels = [], []
for batch in valid_dataloader:
batch = tuple(t.to(self.device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
# Telling the model not to compute or store gradients,
# saving memory and speeding up validation
with torch.no_grad():
# Forward pass, calculate logit predictions.
# This will return the logits rather than the loss because we have not provided labels.
outputs = self.model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask, labels=b_labels)
# Move logits and labels to CPU
logits = outputs[1].detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# Calculate the accuracy for this batch of test sentences.
eval_loss += outputs[0].mean().item()
predictions.extend([list(p) for p in np.argmax(logits, axis=2)])
true_labels.extend(label_ids)
eval_loss = eval_loss / len(valid_dataloader)
print("Validation loss: {}".format(eval_loss))
pred_tags = [NER_BERT.tag_values[p_i] for p, l in zip(predictions, true_labels)
for p_i, l_i in zip(p, l) if NER_BERT.tag_values[l_i] != "PAD"]
###############################################################################
# reconstruct given text for purposes of algorithms' performance comparison
# our X_test is again a list of sentences, i.e. 2d array
tokens = [self.tokenizer.convert_ids_to_tokens(sent) for sent in X_test]
# Unpack tokens into 1d array to be able to go through it with labels
# [PAD] and not just PAD because that is what BERT actually puts
tokens_flat = [item for sublist in tokens for item in sublist if item != "[PAD]"]
#for sentence in tokens:
new_tokens, new_labels = [], []
for token, pred in zip(tokens_flat, pred_tags):
#print("{}\t{}".format(token, pred))
if token.startswith("##"):
new_tokens[-1] = new_tokens[-1] + token[2:]
else:
new_labels.append(pred)
new_tokens.append(token)
###############################################################################
valid_tags = [NER_BERT.tag_values[l_i] for l in true_labels
for l_i in l if NER_BERT.tag_values[l_i] != "PAD"]
print("Validation Accuracy: {}".format(accuracy_score(valid_tags, pred_tags))) # was other way around, why?
print("Validation F1-Score: {}".format(f1_score(valid_tags, pred_tags, average='weighted'))) # correct
print("Validation precision: {}".format(precision_score(valid_tags, pred_tags, average='weighted')))
print("Validation recall: {}".format(recall_score(valid_tags, pred_tags, average='weighted')))
labels = ["ID", "PHI", "NAME", "CONTACT", "DATE", "AGE",
"PROFESSION", "LOCATION"]
print(classification_report(valid_tags, pred_tags, digits=4, labels=labels))
print()
###############################################################################
# to evaluate union/intersection of algorithms
# for t, l in zip(new_tokens, new_labels):
# print("{}\t{}".format(t, l))
return new_labels
# # Use plot styling from seaborn.
# sns.set(style='darkgrid')
# # Increase the plot size and font size.
# sns.set(font_scale=1.5)
# plt.rcParams["figure.figsize"] = (12,6)
# # Plot the learning curve.
# plt.plot(loss_values, 'b-o', label="training loss")
# plt.plot(validation_loss_values, 'r-o', label="validation loss")
# # Label the plot.
# plt.title("Learning curve")
# plt.xlabel("Epoch")
# plt.ylabel("Loss")
# plt.legend()
# plt.show()
def save(self, model_path):
"""
Function to save model. Models are saved as h5 files in Models directory. Name is passed as argument
:param model_path: Name of the model file
:return: Doesn't return anything
"""
torch.save(self.model.state_dict(), "Models/"+model_path+".pt")
print("Saved model to disk")
| 2.265625 | 2 |
omics/feature_selection/MRMR.py | choyichen/pybcb | 3 | 12766001 | """MRMR method.
Implementation of Ding (2005) Minimum redundancyi-maximum relevance (MRMR) feature selection method.
Minimum redundancy feature selection from microarray gene expression data.
<NAME>, <NAME>
J Bioinform Comput Biol. 2005 Apr; 3(2) 185-205
DOI: 10.1142/S0219720005001004, PMID: 15852500
"""
import pandas as pd
from itertools import product
__author__ = "<NAME>"
__version__ = "20170205"
def _redundancy(df, S):
"""W_I from equation (2).
"""
return 1. * sum([df[i][j] for (i, j) in product(S, repeat=2)]) / len(S) ** 2
def _relevance(df, S, y):
"""V_I from equation (3).
"""
return 1. * sum([df[y][i] for i in S]) / len(S)
def _MID_score(df, S, y):
"""Equation (4).
"""
return _relevance(df, S, y) - _redundancy(df, S)
def _MIQ_score(df, S, y):
"""Equation (5).
"""
return _relevance(df, S, y) / _redundancy(df, S)
def MRMR(df, y, n=10, criterion='MID', to_exclude=[]):
"""Heuristic algorithm described on page 188 (p.4).
df: MI matrix derived from omics.stats.MI
y: target variable name
n: how many features to select
criterion: scoring criterion, either 'MID' or 'MIQ'
to_exclude: do not search for these feature names
Return a dataframe of selected features and their MRMR scores (MID or MIQ) indexed by their selection order (starts from 0).
"""
# Setup
F = set(df.index) - set([y]) - set(to_exclude) # initial feature-to-select set
S = [] # selected feature list
n = n if n < len(F) else len(F) # to select n features
scores = [] # selected feature scores
maxlen = sorted([len(i) for i in df.index])[-1] # max feature name length
# Select first feature
f = df[y][F].idxmax() # 1st feature: largest MI(f, y)
S.append(f)
scores.append(df[y][f])
F.remove(f)
print "Selected feature 1 / %d: %*s (MRMR Score = %.2g)" % (n, maxlen, f, df[y][f])
# Select criterion
if criterion == "MID":
get_score = lambda f: _MID_score(df, S + [f], y)
elif criterion == "MIQ":
get_score = lambda f: _MIQ_score(df, S + [f], y)
else:
raise Exception("Unsupported criterion. Must be either MID or MIQ.")
# Select next feature
for i in range(1, n):
f = sorted(F, key=get_score, reverse=True)[0] # feature w/ largest MRMR score
s = get_score(f)
S.append(f)
scores.append(s)
F.remove(f)
print "Selected feature %2d / %2d: %*s (MRMR Score = %.2g)" % (i+1, n, maxlen, f, s)
return pd.DataFrame({'Name': S, 'Score': scores})
| 3.328125 | 3 |
P0-Tutorial/tutorial/shop.py | jorcus/CS188-Intro-to-AI | 6 | 12766002 | <reponame>jorcus/CS188-Intro-to-AI
# shop.py
# -------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by <NAME>
# (<EMAIL>) and <NAME> (<EMAIL>).
# Student side autograding was added by <NAME>, <NAME>, and
# <NAME> (<EMAIL>).
class FruitShop:
def __init__(self, name, fruitPrices):
"""
name: Name of the fruit shop
fruitPrices: Dictionary with keys as fruit
strings and prices for values e.g.
{'apples':2.00, 'oranges': 1.50, 'pears': 1.75}
"""
self.fruitPrices = fruitPrices
self.name = name
print 'Welcome to %s fruit shop' % (name)
def getCostPerPound(self, fruit):
"""
fruit: Fruit string
Returns cost of 'fruit', assuming 'fruit'
is in our inventory or None otherwise
"""
if fruit not in self.fruitPrices:
return None
return self.fruitPrices[fruit]
def getPriceOfOrder(self, orderList):
"""
orderList: List of (fruit, numPounds) tuples
Returns cost of orderList. If any of the fruit are
"""
totalCost = 0.0
for fruit, numPounds in orderList:
costPerPound = self.getCostPerPound(fruit)
if costPerPound != None:
totalCost += numPounds * costPerPound
return totalCost
def getName(self):
return self.name
def __str__(self):
return "<FruitShop: %s>" % self.getName()
def __repr__(self):
return str(self)
| 3.265625 | 3 |
tests/test_api_refund.py | obytes/tap-python | 3 | 12766003 | <reponame>obytes/tap-python<filename>tests/test_api_refund.py
from . import tap_vcr
import pytest
import tap
from tap.api_resources.refund import Refund
@tap_vcr.use_cassette('refund/create_refund.yaml')
def test_create_refund(create_customer, create_charge):
customer = create_customer()
charge = create_charge(customer.id)
data = {
"charge_id": charge.id,
"amount": charge.amount,
"currency": "KWD",
"description": "Test Description",
"reason": "requested_by_customer"
}
refund = tap.Refund.create(**data)
assert isinstance(refund, Refund)
assert refund.object == 'refund'
| 2.40625 | 2 |
patch_rt_container_registry_repos/python/list_docker_repos.py | jfrog/log4j_tools | 170 | 12766004 | import sys
import requests
from urllib.parse import urljoin
JFROG_API_KEY_HEADER_NAME = 'X-JFrog-Art-Api'
class DockerRegistryPagination:
def __init__(self, concatenating_key):
self.concatenating_key = concatenating_key
def __call__(self, url, *args, **kwargs):
response = requests.get(url, *args, **kwargs)
response.raise_for_status()
concatenated_list = response.json().get(self.concatenating_key, [])
while 'next' in response.links.keys():
url = urljoin(url, response.links['next']['url'])
response = requests.get(url, *args, **kwargs)
response.raise_for_status()
concatenated_list.extend(response.json().get(self.concatenating_key, []))
return concatenated_list
class ArtifactoryIntegrationLogic:
def __init__(self, base_url, api_key, default_repo=None, username=None):
self.username = username
self.base_url = base_url
if not self.base_url.startswith('https://'):
self.base_url = 'https://' + base_url
if self.base_url.endswith('/'):
self.base_url = self.base_url[:-1]
self.api_key = api_key
self.default_repo = default_repo
def get_artifactory_headers(self):
return {
JFROG_API_KEY_HEADER_NAME: self.api_key,
}
def _get_all_repos_data(self):
res = requests.get(
self.base_url + '/artifactory/api/repositories',
headers=self.get_artifactory_headers(),
)
if res.status_code != 200:
if res.status_code == 403:
raise Exception(
'Artifactory token is not valid or has been revoked.'
)
raise Exception(
f'Failed to get repositories. '
f'Error: {res.text}. Code {res.status_code}'
)
return res.json()
def list_repos(self, search=''):
all_repos_data = self._get_all_repos_data()
return sorted([i['key'] for i in all_repos_data if search.lower() in i['key'].lower()])
def get_repo_type(self, repo_name):
all_repos_data = self._get_all_repos_data()
for i in all_repos_data:
if i['key'] == repo_name:
return i['packageType']
raise Exception(
f'Repository {repo_name} does not exist or user does not have permissions for it.'
)
def _list_docker_folders(self, repo, search=''):
request_func = DockerRegistryPagination('repositories')
try:
repos = request_func(
self.base_url + '/artifactory/api/docker/%s/v2/_catalog' % repo,
headers=self.get_artifactory_headers(),
)
return [i for i in repos if search.lower() in i.lower()]
except requests.exceptions.HTTPError as exc:
raise Exception(
f'Failed to get images list using docker catalog. '
f'Error: {exc.response.text}. Code {exc.response.status_code}'
) from exc
def list_folders(self, repo=None, search=''):
if not repo:
repo = self.default_repo
if not repo:
raise ValueError('Either send a repo or set the default repo for this to work.')
folders = self._list_docker_folders(repo, search)
return sorted(folders)
def _list_docker_images(self, folder, repo, search=''):
request_func = DockerRegistryPagination('tags')
try:
tags = request_func(
self.base_url + '/artifactory/api/docker/%s/v2/%s/tags/list' % (repo, folder),
headers=self.get_artifactory_headers()
)
return [i for i in tags if search.lower() in i.lower()]
except requests.exceptions.HTTPError as exc:
raise Exception(
f'Failed to get tag list using docker catalog. '
f'Error: {exc.response.text}. Code {exc.response.status_code}'
) from exc
def list_images(self, folder='', repo=None, search=''):
if not repo:
repo = self.default_repo
if not repo:
raise ValueError('Either send a repo or set the default repo for this to work.')
images = self._list_docker_images(folder, repo, search)
return sorted(images)
rt_domain = sys.argv[1]
api_key = sys.argv[2]
user = sys.argv[3]
with open("images.csv", "w") as outfile:
rt = ArtifactoryIntegrationLogic(f"https://{rt_domain}", api_key, username=user)
repositories = rt.list_repos()
for repository in repositories:
repo_type = rt.get_repo_type(repository).lower()
if repo_type == "docker":
repo_folders = rt.list_folders(repo=repository)
for repo_folder in repo_folders:
folder_images = rt.list_images(repo=repository, folder=repo_folder)
for folder_image in folder_images:
outfile.write(f"{repository}, {repo_folder}, {folder_image}\r\n")
| 2.5625 | 3 |
coffea/lumi_tools/__init__.py | dnoonan08/coffea | 1 | 12766005 | <gh_stars>1-10
"""Tools to parse CMS luminosity non-event data
These tools are currently tailored to the CMS experiment
data formats, however they could be generalized and/or compartmentalized
into a standalone package.
"""
from .lumi_tools import (
LumiData,
LumiList,
LumiMask,
)
__all__ = [
'LumiData',
'LumiList',
'LumiMask',
]
| 1.203125 | 1 |
IceSpringMusicPlayer/plugins/IceSpringPlaylistPlugin/playlistWidgetConfig.py | baijifeilong/rawsteelp | 0 | 12766006 | <filename>IceSpringMusicPlayer/plugins/IceSpringPlaylistPlugin/playlistWidgetConfig.py
# Created by <EMAIL> at 2022/2/14 14:52
from __future__ import annotations
import dataclasses
from IceSpringMusicPlayer.common.jsonSupport import JsonSupport
@dataclasses.dataclass
class PlaylistWidgetConfig(JsonSupport):
rowHeight: int
horizontalScrollBarPolicy: str
verticalScrollBarPolicy: str
showTabBar: bool
@classmethod
def getDefaultObject(cls) -> PlaylistWidgetConfig:
return cls(rowHeight=30, horizontalScrollBarPolicy="AUTO", verticalScrollBarPolicy="AUTO", showTabBar=False)
| 1.828125 | 2 |
hostname_resolver/custom_datatypes/gatherunique/__init__.py | dpazavalos/bulk-hostname-scanner | 0 | 12766007 | <filename>hostname_resolver/custom_datatypes/gatherunique/__init__.py
"""Reusable unique item gatherer. Returns unique values with optional list_in and blacklists"""
class GatherUnique:
"""
Reusable unique item gatherer. Returns unique values with optional list_in and blacklists
"""
def __init__(self):
self._header = ''
"""Optional header to display to user when gathering entries stdin"""
self._list_in = []
"""optional input parameter, in lieu of stdin prompting"""
self._gathered = []
"""Unique items reconciled against potential blacklist(s)"""
self._black_lists = []
"""Optional list(s) of items to verify against"""
def _reset(self):
"""
Reset attributes
"""
self._header = ''
self._list_in.clear()
self._black_lists.clear()
self._gathered.clear()
def _not_in_args(self, item) -> bool:
"""
Check given item against black lists
"""
for black_list in self._black_lists:
if item in black_list:
return False
return True
def _gather_from_stdin(self) -> None:
"""
Gathers input from user, split by newline. Runs until blank line submitted.
Sorts against blacklists into _gathered
"""
print("\n" + self._header + "\n")
prompt: str = None
while prompt != '':
prompt = input('> ').lower().strip()
if prompt != '' and prompt not in self._gathered and self._not_in_args(prompt):
self._gathered.append(prompt)
def _gather_from_list_in(self):
"""
Gathers items from given list_in, sorts against blacklists into _gathered
"""
for item in self._list_in:
if item not in self._gathered and self._not_in_args(item):
self._gathered.append(item)
def _return_gathered(self) -> list:
"""
Returns gathered list, resets working attributes
"""
# Save gathered, clear attributes, return
to_return = self._gathered.copy()
self._reset()
return to_return
def run(self,
header: str = '', list_in: list = None,
**blacklists) -> list:
"""
Gathers a list of unique entries from user, with optional blacklists and header prompt
Note: Item type depends on gathering method. If using stdin, anticipate string returns.
Otherwise, use list_in
Args:
header: Header prompt to display to user on startup.
list_in: An optional input parameter. Feeding a list bypasses stdin, and straight to
unique list generation. If black lists are provided, reconciles against them.
blacklists: Optional *kwargs for one or more blacklists to check entires against.
Returned list will not contain any blacklisted entries. (Note: kwargs used only for
flexible positioning. Arg names literally do not matter)
Returns:
Unique list of values, either from stdin or list_in arg
"""
# Assign running attributes
self._header = header
self._list_in = list_in
for blist in blacklists.values():
self._black_lists.append(blist)
if list_in:
self._gather_from_list_in()
else:
self._gather_from_stdin()
return self._return_gathered()
| 3.0625 | 3 |
tests/test_utils_mem.py | PSandeepSandy/fastai | 0 | 12766008 | import pytest, fastai
from fastai.utils.mem import *
from math import isclose
# Important: When modifying this test module, make sure to validate that it runs w/o
# GPU, by running: CUDA_VISIBLE_DEVICES="" pytest
# most tests are run regardless of cuda available or not, we just get zeros when gpu is not available
if torch.cuda.is_available():
have_cuda = 1
if "CUDA_VISIBLE_DEVICES" in os.environ and not len(os.environ["CUDA_VISIBLE_DEVICES"]):
print('detected no gpu env emulation with CUDA_VISIBLE_DEVICES=""')
have_cuda = 0
# This must run before any tests:
# force pytorch to load cuDNN and its kernels to claim unreclaimable memory (~0.5GB) if it hasn't done so already, so that we get correct measurements
if have_cuda: torch.ones((1, 1)).cuda()
def gpu_mem_consume_some(n): return torch.ones((n, n)).cuda()
def gpu_mem_consume_16mb(): return gpu_mem_consume_some(2000)
def gpu_cache_clear(): torch.cuda.empty_cache()
def gpu_mem_reclaim(): gc.collect(); gpu_cache_clear()
def check_gpu_mem_zeros(total, used, free):
assert total == 0, "have total GPU RAM"
assert used == 0, "have used GPU RAM"
assert free == 0, "have free GPU RAM"
def check_gpu_mem_non_zeros(total, used, free):
assert total > 0, "have total GPU RAM"
assert used > 0, "have used GPU RAM"
assert free > 0, "have free GPU RAM"
def test_gpu_mem_by_id():
# test by currently selected device
total, used, free = get_gpu_mem()
if have_cuda: check_gpu_mem_non_zeros(total, used, free)
else: check_gpu_mem_zeros(total, used, free)
# wrong id that can't exist
check_gpu_mem_zeros(*get_gpu_mem(99))
def test_gpu_mem_all():
# all available gpus
mem_per_id = get_gpu_mem_all()
if have_cuda:
for mem in mem_per_id: check_gpu_mem_non_zeros(*mem)
else:
assert len(mem_per_id) == 0
def test_gpu_with_max_free_mem():
# all available gpus
id, free = get_gpu_with_max_free_mem()
if have_cuda:
assert id != None, "have gpu id"
assert free > 0, "have gpu free ram"
else:
assert id == None, "have no gpu id"
assert free == 0, "have no gpu free ram"
@pytest.mark.skipif(not have_cuda, reason="requires cuda")
def test_gpu_mem_measure_consumed_reclaimed():
gpu_mem_reclaim()
used_before = get_gpu_mem()[1]
# 1. measure memory consumption
x1 = gpu_mem_consume_16mb();
used_after = get_gpu_mem()[1]
diff_real = used_after - used_before
diff_expected_min = 15 # could be slightly different
assert diff_real >= diff_expected_min, f"check gpu consumption, expected at least {diff_expected_min}, got {diff_real} diff"
# 2. measure memory reclamation
del x1 # this may or may not trigger automatic gc.collect - can't rely on that
gpu_mem_reclaim() # force gc.collect and cache clearing
used_after_reclaimed = get_gpu_mem()[1]
# allow 2mb tolerance for rounding of 1 mb on each side
assert isclose(used_before, used_after_reclaimed, abs_tol=2), f"reclaim all consumed memory, started with {used_before}, now {used_after_reclaimed} used"
| 2.703125 | 3 |
savecode/pythonpackages/commonbaby/mslog/filelogprovider.py | Octoberr/swm0920 | 2 | 12766009 | <filename>savecode/pythonpackages/commonbaby/mslog/filelogprovider.py
"""File log provider that provides separated log file write"""
# -*- coding:utf-8 -*-
from .filelogwriter import MsFileLogWriter
from .mslogconfig import MsFileLogConfig, MsLogConfig
from .msloglevel import MsLogLevels
from .mslogprovider import MsLogProvider
class FileLogProvider(MsLogProvider):
"""File log provider that provides separated log file write"""
def __init__(self, ficfg: MsFileLogConfig = None):
if ficfg is None:
ficfg = MsFileLogConfig(MsLogLevels.DEBUG, True)
MsLogProvider.__init__(self, ficfg)
def get_writer(self, name: str,
cfg: MsLogConfig = None) -> MsFileLogWriter:
"""Returns a MsLogWriter with specific name
name: the writer's name"""
if name is None:
name = 'default'
wtr = MsFileLogWriter(name, cfg if cfg is not None else self._config)
return wtr
| 3.015625 | 3 |
client/verta/tests/test_versioning/test_code.py | stefan-petrov-toptal/modeldb | 1 | 12766010 | <gh_stars>1-10
import pytest
import hypothesis
import hypothesis.strategies as st
import itertools
from google.protobuf import json_format
import verta.code
from verta._internal_utils import _git_utils
# check if in git repo
try:
_git_utils.get_git_repo_root_dir()
except OSError:
IN_GIT_REPO = False
else:
IN_GIT_REPO = True
def get_git_test_autocapture_cases():
"""
Arguments to `Git()` with autocapture on (default) must satisfy these conditions:
1) `repo_url` can be None or str
2) `branch`, `tag`, and `commit_hash` can each be None or str
a) but only one of the three can be non-None
3) `is_dirty` can be None, True, or False
"""
if not IN_GIT_REPO:
return []
valid_values = [
[None, _git_utils.get_git_remote_url(), "foo"], # repo_url
[None, _git_utils.get_git_branch_name("HEAD")], # branch
[None, _git_utils.get_git_commit_tag("HEAD") or None], # tag (None if HEAD is not at a tag)
[None, _git_utils.get_git_commit_hash("HEAD")], # commit_hash
[None, True, False], # is_dirty
]
cases = set(itertools.product(*valid_values))
cases = [ # only keep cases if they satisfy (2a)
case for case in cases
if sum(val is not None for val in case[1:4]) <= 1
]
return cases
class TestGit:
def test_no_autocapture(self):
code_ver = verta.code.Git(_autocapture=False)
# protobuf message is empty
assert not json_format.MessageToDict(
code_ver._msg,
including_default_value_fields=False,
).get('git') # may be {'git': {}} if fields are manually set to empty
def test_repr(self):
"""Tests that __repr__() executes without error"""
try:
_git_utils.get_git_repo_root_dir()
except OSError:
pytest.skip("not in git repo")
code_ver = verta.code.Git()
assert code_ver.__repr__()
@pytest.mark.skipif(not IN_GIT_REPO, reason="not in git repo")
@pytest.mark.parametrize(
("repo_url", "branch", "tag", "commit_hash", "is_dirty"),
get_git_test_autocapture_cases(),
)
def test_autocapture(self, repo_url, branch, tag, commit_hash, is_dirty):
code_ver = verta.code.Git(
repo_url=repo_url,
branch=branch, tag=tag, commit_hash=commit_hash, is_dirty=is_dirty,
)
refs = [arg for arg in (branch, tag, commit_hash) if arg]
ref = refs[0] if refs else _git_utils.get_git_commit_hash("HEAD")
assert code_ver.repo_url == (repo_url or _git_utils.get_git_remote_url())
assert code_ver.branch == (branch or _git_utils.get_git_branch_name(ref))
assert code_ver.tag == (tag or _git_utils.get_git_commit_tag(ref) or None) # None if HEAD is not at a tag
assert code_ver.commit_hash == (commit_hash or _git_utils.get_git_commit_hash(ref))
assert code_ver.is_dirty == (is_dirty if is_dirty is not None else _git_utils.get_git_commit_dirtiness(ref))
@hypothesis.given(
repo_url=st.one_of(st.none(), st.emails()),
branch=st.one_of(st.none(), st.text()),
tag=st.one_of(st.none(), st.text()),
commit_hash=st.one_of(st.none(), st.text()),
is_dirty=st.one_of(st.none(), st.booleans()),
)
def test_user_no_autocapture(self, repo_url, branch, tag, commit_hash, is_dirty):
"""Like test_no_autocapture, but with the public `autocapture` param."""
code_ver = verta.code.Git(
repo_url=repo_url,
branch=branch, tag=tag, commit_hash=commit_hash, is_dirty=is_dirty,
autocapture=False,
)
assert code_ver.repo_url == (repo_url or None)
assert code_ver.branch == (branch or None)
assert code_ver.tag == (tag or None)
assert code_ver.commit_hash == (commit_hash or None)
assert code_ver.is_dirty == (is_dirty or False)
class TestNotebook:
def test_no_autocapture(self):
code_ver = verta.code.Notebook(_autocapture=False)
# protobuf message is empty
assert not json_format.MessageToDict(
code_ver._msg,
including_default_value_fields=False,
)
| 2.171875 | 2 |
Data Science With Python/15-statistical-thinking-in-python-(part1)/4-thinking-probabilistically--continuous-variables/the-normal-cdf.py | aimanahmedmoin1997/DataCamp | 3 | 12766011 | '''
The Normal CDF
100xp
Now that you have a feel for how the Normal PDF looks, let's consider its CDF. Using the
samples you generated in the last exercise (in your namespace as samples_std1, samples_std3,
and samples_std10), generate and plot the CDFs.
Instructions
-Use your ecdf() function to generate x and y values for CDFs: x_std1, y_std1, x_std3, y_std3
and x_std10, y_std10, respectively.
-Plot all three CDFs as dots (do not forget the marker and linestyle keyword arguments!).
-Make a 2% margin in your plot.
-Hit submit to make a legend, showing which standard deviations you used, and to show your plot.
There is no need to label the axes because we have not defined what is being described by the Normal distribution; we are just looking at shapes of CDFs.
'''
import numpy as np
import matplotlib.pyplot as plt
def ecdf(data):
"""Compute ECDF for a one-dimensional array of measurements."""
# Number of data points: n
n = len(data)
# x-data for the ECDF: x
x = np.sort(data)
# y-data for the ECDF: y
y = np.arange(1, n+1) / n
return x, y
# Seed random number generator
np.random.seed(42)
# Draw 100000 samples from Normal distribution with stds of interest: samples_std1, samples_std3,
# samples_std10
samples_std1 = np.random.normal(20, 1, size=100000)
samples_std3 = np.random.normal(20, 3, size=100000)
samples_std10 = np.random.normal(20, 10, size=100000)
# Generate CDFs
x_std1, y_std1 = ecdf(samples_std1)
x_std3, y_std3 = ecdf(samples_std3)
x_std10, y_std10 = ecdf(samples_std10)
# Plot CDFs
_ = plt.plot(x_std1, y_std1, marker='.', linestyle='none')
_ = plt.plot(x_std3, y_std3, marker='.', linestyle='none')
_ = plt.plot(x_std10, y_std10, marker='.', linestyle='none')
# Make 2% margin
plt.margins(0.02)
# Make a legend and show the plot
_ = plt.legend(('std = 1', 'std = 3', 'std = 10'), loc='lower right')
plt.show()
| 4.28125 | 4 |
firstcode.py | congnb/mypython | 1 | 12766012 | import sys
a="hello"
def myfunc():
print("xxxx")
a="du"
print (a)
myfunc()
print(a)
x="xxxxx\""
print(x, x[3:9])
print (str.format("abc {}",a))
for x in range(10):
if x%2 == 0:
print(x)
else:
pass
# while True:
# print(a)
powOf = lambda a : a*a
print(powOf(4))
def lbdInside(n):
return lambda a:a+n
print (lbdInside(3)(5))
class Xxx:
Name=""
def __init__(self):
self.Name="hello"
x = Xxx()
print (x.Name)
import helloworld
print(helloworld)
print(dir(helloworld))
import datetime
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
import json
print(json.dumps({"A":"a"}))
print(json.dumps(("a","b")))
print(json.dumps([1,"a"]))
print(json.dumps({"a","b"}))
import threading, queue | 3.625 | 4 |
ParaMol/Tasks/parametrization.py | mnagaku/ParaMol | 15 | 12766013 | # -*- coding: utf-8 -*-
"""
Description
-----------
This module defines the :obj:`ParaMol.Tasks.parametrization.Parametrization` class, which is a ParaMol task that performs force field parametrization.
"""
import numpy as np
import logging
# ParaMol libraries
from .task import *
from ..Optimizers.optimizer import *
from ..Parameter_space.parameter_space import *
from ..Objective_function.objective_function import *
from ..Utils.interface import *
# ------------------------------------------------------------
# #
# PARAMETRIZATION TASK #
# #
# ------------------------------------------------------------
class Parametrization(Task):
"""
ParaMol parametrization task.
"""
def __init__(self):
pass
# ---------------------------------------------------------- #
# #
# PUBLIC METHODS #
# #
# ---------------------------------------------------------- #
def run_task(self, settings, systems, parameter_space=None, objective_function=None, optimizer=None, interface=None, adaptive_parametrization=False, apply_charge_correction=False, restart=False):
"""
Method that performs the standard ParaMol parametrization.
Parameters
----------
settings : dict
Dictionary containing global ParaMol settings.
systems : list of :obj:`ParaMol.System.system.ParaMolSystem`
List containing instances of ParaMol systems.
parameter_space : :obj:`ParaMol.Parameter_space.parameter_space.ParameterSpace`
Instances of ParameterSpace.
objective_function : :obj:`ParaMol.Objective_function.objective_function.ObjectiveFunction`
Instance of the objective function.
optimizer : one of the optimizers defined in the subpackage :obj:`ParaMol.Optimizers`
Instance of the optimizer.
interface: :obj:`ParaMol.Utils.interface.ParaMolInterface`
ParaMol system instance.
adaptive_parametrization: bool
Flag that signals if this parametrization is being done inside a an adaptive parametrization loop. If `False` the sytem xml file is not written in this method (default is `False`).
apply_charge_correction : bool
Whether or not to apply charge correction. Important if charges are being optimized.
restart : bool
Flag that controls whether or not to perform a restart.
Returns
-------
systems, parameter_space, objective_function, optimizer
"""
print("!=================================================================================!")
print("! PARAMETRIZATION !")
print("!=================================================================================!")
for system in systems:
# Perform basic assertions
self._perform_assertions(settings, system)
# Create force field optimizable for every system
system.force_field.create_force_field_optimizable()
# Create IO Interface
if interface is None:
interface = ParaMolInterface()
else:
assert type(interface) is ParaMolInterface
# Create ParameterSpace
if parameter_space is None:
parameter_space = self.create_parameter_space(settings, systems, interface, restart=restart)
else:
assert type(parameter_space) is ParameterSpace
# Create properties and objective function
if objective_function is None:
properties = self.create_properties(settings.properties, settings.parameter_space, systems, parameter_space)
objective_function = self.create_objective_function(settings.objective_function, settings.restart, parameter_space, properties, systems)
else:
assert type(objective_function) is ObjectiveFunction
if settings.objective_function["parallel"]:
# Number of structures might have been changed and therefore it is necessary to re-initialize the parallel objective function
objective_function.init_parallel()
# Recalculate variance in case reference data has changed.
if objective_function.properties is not None:
for property in objective_function.properties:
property.calculate_variance()
'''
for prop in objective_function.properties:
if prop.name == "REGULARIZATION":
# TODO: if commented, reg in adaptive parametrization is done w.r.t. to the initial parameters at iter 0
#prop.set_initial_parameters_values(parameter_space.initial_optimizable_parameters_values_scaled)
pass
'''
# Print Initial Info of Objective Function
objective_function.f(parameter_space.optimizable_parameters_values_scaled, opt_mode=False)
# Create optimizer
if optimizer is None:
optimizer = self.create_optimizer(settings.optimizer["method"],
settings.optimizer[settings.optimizer["method"].lower()])
else:
assert type(optimizer) is Optimizer
# ================================================================================= #
# APPLY CHARGE CORRECTION #
# ================================================================================= #
if apply_charge_correction:
for system in systems:
# Apply charge correction
self._apply_charge_correction(system)
# Create optimizable force field
system.force_field.create_force_field_optimizable()
# Get optimizable parameters
parameter_space.get_optimizable_parameters(systems)
# Calculate prior widths, scaling constants and apply jacobi preconditioning (they may have changes if charges changed).
# Otherwise, we may assume that the change is so small that this has no effect... quite good approximation, hence these lines may be commented
# parameter_space.calculate_scaling_constants()
# parameter_space.calculate_prior_widths()
parameter_space.jacobi_preconditioning()
# Update the OpenMM context
parameter_space.update_systems(systems, parameter_space.optimizable_parameters_values_scaled)
# ================================================================================= #
# END APPLY CHARGE CORRECTION #
# ================================================================================= #
# ================================================================================= #
# PARAMETERS OPTIMZIATION #
# ================================================================================= #
# Perform Optimization
print("Using {} structures in the optimization.".format(np.sum([system.n_structures for system in systems])))
parameters_values = self._perform_optimization(settings, optimizer, objective_function, parameter_space)
# Update the parameters in the force field
parameter_space.update_systems(systems, parameters_values)
# Print Final Info of Objective Function
objective_function.f(parameter_space.optimizable_parameters_values_scaled, opt_mode=False)
# Write ParameterSpace restart file
self.write_restart_pickle(settings.restart, interface, "restart_parameter_space_file", parameter_space.__dict__)
# Write final system to xml file
if not adaptive_parametrization:
for system in systems:
system.engine.write_system_xml("{}_reparametrized.xml".format(system.name))
print("!=================================================================================!")
print("! PARAMETRIZATION TERMINATED SUCCESSFULLY :) !")
print("!=================================================================================!")
return systems, parameter_space, objective_function, optimizer
# -----------------------------------------------------------#
# #
# PRIVATE METHODS #
# #
# -----------------------------------------------------------#
def _perform_optimization(self, settings, optimizer, objective_function, parameter_space):
"""
Method that wraps the functions used to perform the optimization of the parameters.
Parameters
----------
settings : dict
Dictionary containing global ParaMol settings.
parameter_space : :obj:`ParaMol.Parameter_space.parameter_space.ParameterSpace`
Instance of parameter space.
objective_function : :obj:`ParaMol.Objective_function.objective_function.ObjectiveFunction`
Instance of objective function.
optimizer : :obj:`ParaMol.Optimizers.optimizer.Optimizer`
Instance of optimizer.
Returns
-------
parameters_values: list
List of optimized parameters
"""
# Determine whether to perform constrained or unconstrained optimization
constrained = False
for parameter in parameter_space.optimizable_parameters:
if parameter.param_key == "charge":
# If charges are present in the optimizable parameters, perform constrained optimization
constrained = True
break
print("Number of parameters to be optimized: {}.".format(len(parameter_space.optimizable_parameters_values_scaled)))
if constrained:
print("ParaMol will perform constrained optimization.")
constraints = self._get_constraints(scipy_method=settings.optimizer["scipy"]["method"],
parameter_space=parameter_space)
parameters_values = optimizer.run_optimization(f=objective_function.f,
parameters_values=parameter_space.optimizable_parameters_values_scaled,
constraints=constraints)
else:
print("ParaMol will perform unconstrained optimization.")
parameters_values = optimizer.run_optimization(f=objective_function.f,
parameters_values=parameter_space.optimizable_parameters_values_scaled)
return parameters_values
def _apply_charge_correction(self, system):
"""
Method that applies charge correction to the system.
Parameters
----------
system : :obj:`ParaMol.System.system.ParaMolSystem`
Instance of ParaMol System.
Notes
----
Due to numerical errors, the numerical total charge of the system may not be equal to the real total charge of the system.
Hence, in order to overcome this problem, which causes unexpected behaviour specially when constraints are being applied, the excess or deficiency of charge is shared equally amongst all atoms.
This usually changes the charge in each atom by a very small (negligible) amount.
Note that this method only changes the charges in the ParaMol ForceField of the ParaMolSystem. Therefore, it is required to update the OpenMM systems after this method is called.
Returns
-------
total_charge : float
Final total charge of the system.
"""
if "NonbondedForce" in system.force_field.force_field:
# Get total charge and calculate charge correction
total_charge = self._get_total_charge(system)
logging.info("Applying charge correction.")
logging.info("Total charge before correction: {}e .".format(total_charge))
charge_correction = total_charge / system.n_atoms
logging.info("Charge correction {}e per atom.".format(charge_correction))
# Add charge correction to all atoms
for sub_force in system.force_field.force_field["NonbondedForce"]:
for nonbonded_term in sub_force:
nonbonded_term.parameters["charge"].value -= charge_correction
total_charge = self._get_total_charge(system)
logging.info("Total charge after correction: {}e .\n".format(total_charge))
return total_charge
else:
logging.info("Not applying charge correction.")
return 1
# -----------------------------------------------------------#
# #
# STATIC METHODS #
# #
# -----------------------------------------------------------#
@staticmethod
def _get_total_charge(system):
"""
Method that gets the system's total charge as in the ParaMol ForceField of the ParaMolSystem.
Parameters
----------
system : :obj:`ParaMol.System.system.ParaMolSystem`
Instance of ParaMol System.
Returns
-------
total_charge : float
Final total charge of the system.
"""
total_charge = 0.0
if "NonbondedForce" in system.force_field.force_field:
for sub_force in system.force_field.force_field["NonbondedForce"]:
for nonbonded_term in sub_force:
total_charge += nonbonded_term.parameters["charge"].value
return total_charge
@staticmethod
def _perform_assertions(settings, system):
"""
Method that asserts if the parametrization asked by the user contains the necessary data (coordinates, forces, energies, esp).
Parameters
----------
settings : dict
Dictionary containing global ParaMol settings.
system : :obj:`ParaMol.System.system.ParaMolSystem`
Instance of ParaMol System.
Returns
-------
True
"""
assert system.ref_coordinates is not None, "Conformations data was not set."
if settings.properties["include_energies"]:
assert system.ref_energies is not None, "Energies were not set."
if settings.properties["include_forces"]:
assert system.ref_forces is not None, "Forces were not set."
if settings.properties["include_esp"]:
assert system.ref_esp is not None, "ESP was not set."
assert system.ref_esp_grid is not None, "ESP was not set."
return True
@staticmethod
def _get_constraints(scipy_method, parameter_space, total_charge=0.0, threshold=1e-8):
"""
Method that gets the constraints to be passed into the SciPy optimizer.
Parameters
----------
scipy_method : str
SciPy method. Should be "COBYLA", SLSQP" or "trust-consr".
parameter_space : :obj:`ParaMol.Parameter_space.parameter_space.ParameterSpace`
Instance of parameter space.
total_charge : float
System's total charge
threshold : float
Constraint's threshold.
Returns
-------
list
List with constraints.
"""
if scipy_method == "COBYLA":
# Constraint functions must all be >=0 (a single function if only 1 constraint).
# Each function takes the parameters x as its first argument, and it can return either a single number or an array or list of numbers.
constraint_vector_charges = [param.multiplicity if param.param_key == "charge" else 0 for param in parameter_space.optimizable_parameters]
constraints = [
{'type': 'ineq', 'fun': lambda x, b: x.dot(np.asarray(b) * parameter_space.scaling_constants_dict["charge"]) - total_charge + threshold, 'args': (constraint_vector_charges,)},
{'type': 'ineq', 'fun': lambda x, b: -x.dot(np.asarray(b) * parameter_space.scaling_constants_dict["charge"]) + total_charge + threshold, 'args': (constraint_vector_charges,)}]
return constraints
elif scipy_method == "SLSQP":
# Total charge constraint defined as an equality
constraint_vector_charges = [param.multiplicity if param.param_key == "charge" else 0 for param in parameter_space.optimizable_parameters]
constraints = [
{'type': 'ineq', 'fun': lambda x, b: x.dot(np.asarray(b) * parameter_space.scaling_constants_dict["charge"]) - total_charge + threshold, 'args': (constraint_vector_charges,)},
{'type': 'ineq', 'fun': lambda x, b: -x.dot(np.asarray(b) * parameter_space.scaling_constants_dict["charge"]) + total_charge + threshold, 'args': (constraint_vector_charges,)}]
return constraints
elif scipy_method == "trust-constr":
from scipy.optimize import LinearConstraint
constraint_vector = [param.multiplicity if param.param_key == "charge" else 0 for param in parameter_space.optimizable_parameters]
return LinearConstraint(constraint_vector, [total_charge - threshold], [total_charge + threshold])
else:
raise NotImplementedError("SciPy method {} does not support constraints.".format(scipy_method))
| 2.40625 | 2 |
app/__init__.py | BerniceWu/SpotifyBot | 0 | 12766014 | import sys
from flask import Flask
import telegram
import spotipy
import spotipy.util as util
from spotipy.oauth2 import SpotifyClientCredentials
from config import config
bot = None
spotify = None
def create_app(config_name):
global bot
global spotify
app = Flask(__name__)
app.config.from_object(config[config_name])
bot = telegram.Bot(config[config_name].TELEGRAM_API_TOKEN)
webhook_url = config[config_name].WEBHOOK_URL
status = bot.set_webhook(webhook_url)
if not status:
print('Webhook setup failed')
sys.exit(1)
else:
print('Your webhook URL has been set to "{}"'.format(webhook_url))
client_credentials_manager = SpotifyClientCredentials(
config[config_name].SPOTIFY_CLIENT_ID,
config[config_name].SPOTIFY_CLIENT_SECRET
)
spotify = spotipy.Spotify(
client_credentials_manager=client_credentials_manager,
auth=config[config_name].SPOTIFY_TOKEN
)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app | 2.390625 | 2 |
libcloud/container/base.py | brookewhite9/libcloud | 0 | 12766015 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
from typing import Optional
from typing import List
from libcloud.common.base import ConnectionUserAndKey, BaseDriver
from libcloud.container.types import ContainerState
__all__ = [
"Container",
"ContainerImage",
"ContainerCluster",
"ClusterLocation",
"ContainerDriver",
]
class Container(object):
"""
Container.
"""
def __init__(
self,
id, # type: str
name, # type: str
image, # type: ContainerImage
state, # type: ContainerState
ip_addresses, # type: List[str]
driver, # type: ContainerDriver
extra=None, # type: dict
created_at=None, # type: str
):
"""
:param id: Container id.
:type id: ``str``
:param name: The name of the container.
:type name: ``str``
:param image: The image this container was deployed using.
:type image: :class:`.ContainerImage`
:param state: The state of the container, e.g. running
:type state: :class:`libcloud.container.types.ContainerState`
:param ip_addresses: A list of IP addresses for this container
:type ip_addresses: ``list`` of ``str``
:param driver: ContainerDriver instance.
:type driver: :class:`.ContainerDriver`
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
"""
self.id = str(id) if id else None
self.name = name
self.image = image
self.state = state
self.ip_addresses = ip_addresses
self.driver = driver
self.extra = extra or {}
self.created_at = created_at
def start(self):
# type: () -> Container
return self.driver.start_container(container=self)
def stop(self):
# type: () -> Container
return self.driver.stop_container(container=self)
def restart(self):
# type: () -> Container
return self.driver.restart_container(container=self)
def destroy(self):
# type: () -> bool
return self.driver.destroy_container(container=self)
def __repr__(self):
return "<Container: id=%s, name=%s," "state=%s, provider=%s ...>" % (
self.id,
self.name,
self.state,
self.driver.name,
)
class ContainerImage(object):
"""
Container Image.
"""
def __init__(
self,
id, # type: str
name, # type: str
path, # type: str
version, # type: str
driver, # type: ContainerDriver
extra=None, # type: dict
):
"""
:param id: Container Image id.
:type id: ``str``
:param name: The name of the image.
:type name: ``str``
:param path: The path to the image
:type path: ``str``
:param version: The version of the image
:type version: ``str``
:param driver: ContainerDriver instance.
:type driver: :class:`.ContainerDriver`
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
"""
self.id = str(id) if id else None
self.name = name
self.path = path
self.version = version
self.driver = driver
self.extra = extra or {}
def deploy(self, name, parameters, cluster=None, start=True):
# type: (str, str, Optional[ContainerCluster], bool) -> Container
return self.driver.deploy_container(
name=name, image=self, parameters=parameters, cluster=cluster, start=start
)
def __repr__(self):
return "<ContainerImage: id=%s, name=%s, path=%s ...>" % (
self.id,
self.name,
self.path,
)
class ContainerCluster(object):
"""
A cluster group for containers
"""
def __init__(
self,
id, # type: str
name, # type: str
driver, # type: ContainerDriver
extra=None, # type: dict
):
"""
:param id: Container Image id.
:type id: ``str``
:param name: The name of the image.
:type name: ``str``
:param driver: ContainerDriver instance.
:type driver: :class:`.ContainerDriver`
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
"""
self.id = str(id) if id else None
self.name = name
self.driver = driver
self.extra = extra or {}
def list_containers(self):
# type: () -> List[Container]
return self.driver.list_containers(cluster=self)
def destroy(self):
# type: () -> bool
return self.driver.destroy_cluster(cluster=self)
def __repr__(self):
return "<ContainerCluster: id=%s, name=%s, provider=%s ...>" % (
self.id,
self.name,
self.driver.name,
)
class ClusterLocation(object):
"""
A physical location where clusters can be.
>>> from libcloud.container.drivers.dummy import DummyContainerDriver
>>> driver = DummyContainerDriver(0)
>>> location = driver.list_locations()[0]
>>> location.country
'US'
"""
def __init__(
self,
id, # type: str
name, # type: str
country, # type: str
driver, # type: ContainerDriver
):
"""
:param id: Location ID.
:type id: ``str``
:param name: Location name.
:type name: ``str``
:param country: Location country.
:type country: ``str``
:param driver: Driver this location belongs to.
:type driver: :class:`.ContainerDriver`
"""
self.id = str(id)
self.name = name
self.country = country
self.driver = driver
def __repr__(self):
return ("<ClusterLocation: id=%s, name=%s, country=%s, driver=%s>") % (
self.id,
self.name,
self.country,
self.driver.name,
)
class ContainerDriver(BaseDriver):
"""
A base ContainerDriver class to derive from
This class is always subclassed by a specific driver.
"""
connectionCls = ConnectionUserAndKey
name = None
website = None
supports_clusters = False
"""
Whether the driver supports containers being deployed into clusters
"""
def __init__(self, key, secret=None, secure=True, host=None, port=None, **kwargs):
"""
:param key: API key or username to used (required)
:type key: ``str``
:param secret: Secret password to be used (required)
:type secret: ``str``
:param secure: Whether to use HTTPS or HTTP. Note: Some providers
only support HTTPS, and it is on by default.
:type secure: ``bool``
:param host: Override hostname used for connections.
:type host: ``str``
:param port: Override port used for connections.
:type port: ``int``
:return: ``None``
"""
super(ContainerDriver, self).__init__(
key=key, secret=secret, secure=secure, host=host, port=port, **kwargs
)
def install_image(self, path):
# type: (str) -> ContainerImage
"""
Install a container image from a remote path.
:param path: Path to the container image
:type path: ``str``
:rtype: :class:`.ContainerImage`
"""
raise NotImplementedError("install_image not implemented for this driver")
def list_images(self):
# type: () -> List[ContainerImage]
"""
List the installed container images
:rtype: ``list`` of :class:`.ContainerImage`
"""
raise NotImplementedError("list_images not implemented for this driver")
def list_containers(
self,
image=None, # type: Optional[ContainerImage]
cluster=None, # type: Optional[ContainerCluster]
):
# type: (...) -> List[Container]
"""
List the deployed container images
:param image: Filter to containers with a certain image
:type image: :class:`.ContainerImage`
:param cluster: Filter to containers in a cluster
:type cluster: :class:`.ContainerCluster`
:rtype: ``list`` of :class:`.Container`
"""
raise NotImplementedError("list_containers not implemented for this driver")
def deploy_container(
self,
name, # type: str
image, # type: ContainerImage
cluster=None, # type: Optional[ContainerCluster]
parameters=None, # type: Optional[str]
start=True, # type: bool
):
# type: (...) -> Container
"""
Deploy an installed container image
:param name: The name of the new container
:type name: ``str``
:param image: The container image to deploy
:type image: :class:`.ContainerImage`
:param cluster: The cluster to deploy to, None is default
:type cluster: :class:`.ContainerCluster`
:param parameters: Container Image parameters
:type parameters: ``str``
:param start: Start the container on deployment
:type start: ``bool``
:rtype: :class:`.Container`
"""
raise NotImplementedError("deploy_container not implemented for this driver")
def get_container(self, id):
# type: (str) -> Container
"""
Get a container by ID
:param id: The ID of the container to get
:type id: ``str``
:rtype: :class:`.Container`
"""
raise NotImplementedError("get_container not implemented for this driver")
def start_container(self, container):
# type: (Container) -> Container
"""
Start a deployed container
:param container: The container to start
:type container: :class:`.Container`
:rtype: :class:`.Container`
"""
raise NotImplementedError("start_container not implemented for this driver")
def stop_container(self, container):
# type: (Container) -> Container
"""
Stop a deployed container
:param container: The container to stop
:type container: :class:`.Container`
:rtype: :class:`.Container`
"""
raise NotImplementedError("stop_container not implemented for this driver")
def restart_container(self, container):
# type: (Container) -> Container
"""
Restart a deployed container
:param container: The container to restart
:type container: :class:`.Container`
:rtype: :class:`.Container`
"""
raise NotImplementedError("restart_container not implemented for this driver")
def destroy_container(self, container):
# type: (Container) -> bool
"""
Destroy a deployed container
:param container: The container to destroy
:type container: :class:`.Container`
:rtype: ``bool``
"""
raise NotImplementedError("destroy_container not implemented for this driver")
def list_locations(self):
# type: () -> List[ClusterLocation]
"""
Get a list of potential locations to deploy clusters into
:rtype: ``list`` of :class:`.ClusterLocation`
"""
raise NotImplementedError("list_locations not implemented for this driver")
def create_cluster(self, name, location=None):
# type: (str, Optional[ClusterLocation]) -> ContainerCluster
"""
Create a container cluster
:param name: The name of the cluster
:type name: ``str``
:param location: The location to create the cluster in
:type location: :class:`.ClusterLocation`
:rtype: :class:`.ContainerCluster`
"""
raise NotImplementedError("create_cluster not implemented for this driver")
def destroy_cluster(self, cluster):
# type: (ContainerCluster) -> bool
"""
Delete a cluster
:return: ``True`` if the destroy was successful, otherwise ``False``.
:rtype: ``bool``
"""
raise NotImplementedError("destroy_cluster not implemented for this driver")
def list_clusters(self, location=None):
# type: (Optional[ClusterLocation]) -> List[ContainerCluster]
"""
Get a list of potential locations to deploy clusters into
:param location: The location to search in
:type location: :class:`.ClusterLocation`
:rtype: ``list`` of :class:`.ContainerCluster`
"""
raise NotImplementedError("list_clusters not implemented for this driver")
def get_cluster(self, id):
# type: (str) -> ContainerCluster
"""
Get a cluster by ID
:param id: The ID of the cluster to get
:type id: ``str``
:rtype: :class:`.ContainerCluster`
"""
raise NotImplementedError("list_clusters not implemented for this driver")
| 1.765625 | 2 |
alipay/aop/api/domain/AlipayEcoMycarMaintainShopModifyModel.py | articuly/alipay-sdk-python-all | 0 | 12766016 | <filename>alipay/aop/api/domain/AlipayEcoMycarMaintainShopModifyModel.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AlipayEcoMycarMaintainShopModifyModel(object):
def __init__(self):
self._address = None
self._alipay_account = None
self._brand_ids = None
self._city_code = None
self._close_time = None
self._contact_email = None
self._contact_mobile_phone = None
self._contact_name = None
self._district_code = None
self._ext_param = None
self._industry_app_category_id = None
self._industry_category_id = None
self._lat = None
self._lon = None
self._main_image = None
self._merchant_branch_id = None
self._open_time = None
self._other_images = None
self._out_shop_id = None
self._province_code = None
self._shop_branch_name = None
self._shop_id = None
self._shop_name = None
self._shop_tel = None
self._shop_type = None
self._status = None
@property
def address(self):
return self._address
@address.setter
def address(self, value):
self._address = value
@property
def alipay_account(self):
return self._alipay_account
@alipay_account.setter
def alipay_account(self, value):
self._alipay_account = value
@property
def brand_ids(self):
return self._brand_ids
@brand_ids.setter
def brand_ids(self, value):
if isinstance(value, list):
self._brand_ids = list()
for i in value:
self._brand_ids.append(i)
@property
def city_code(self):
return self._city_code
@city_code.setter
def city_code(self, value):
self._city_code = value
@property
def close_time(self):
return self._close_time
@close_time.setter
def close_time(self, value):
self._close_time = value
@property
def contact_email(self):
return self._contact_email
@contact_email.setter
def contact_email(self, value):
self._contact_email = value
@property
def contact_mobile_phone(self):
return self._contact_mobile_phone
@contact_mobile_phone.setter
def contact_mobile_phone(self, value):
self._contact_mobile_phone = value
@property
def contact_name(self):
return self._contact_name
@contact_name.setter
def contact_name(self, value):
self._contact_name = value
@property
def district_code(self):
return self._district_code
@district_code.setter
def district_code(self, value):
self._district_code = value
@property
def ext_param(self):
return self._ext_param
@ext_param.setter
def ext_param(self, value):
self._ext_param = value
@property
def industry_app_category_id(self):
return self._industry_app_category_id
@industry_app_category_id.setter
def industry_app_category_id(self, value):
if isinstance(value, list):
self._industry_app_category_id = list()
for i in value:
self._industry_app_category_id.append(i)
@property
def industry_category_id(self):
return self._industry_category_id
@industry_category_id.setter
def industry_category_id(self, value):
if isinstance(value, list):
self._industry_category_id = list()
for i in value:
self._industry_category_id.append(i)
@property
def lat(self):
return self._lat
@lat.setter
def lat(self, value):
self._lat = value
@property
def lon(self):
return self._lon
@lon.setter
def lon(self, value):
self._lon = value
@property
def main_image(self):
return self._main_image
@main_image.setter
def main_image(self, value):
self._main_image = value
@property
def merchant_branch_id(self):
return self._merchant_branch_id
@merchant_branch_id.setter
def merchant_branch_id(self, value):
self._merchant_branch_id = value
@property
def open_time(self):
return self._open_time
@open_time.setter
def open_time(self, value):
self._open_time = value
@property
def other_images(self):
return self._other_images
@other_images.setter
def other_images(self, value):
if isinstance(value, list):
self._other_images = list()
for i in value:
self._other_images.append(i)
@property
def out_shop_id(self):
return self._out_shop_id
@out_shop_id.setter
def out_shop_id(self, value):
self._out_shop_id = value
@property
def province_code(self):
return self._province_code
@province_code.setter
def province_code(self, value):
self._province_code = value
@property
def shop_branch_name(self):
return self._shop_branch_name
@shop_branch_name.setter
def shop_branch_name(self, value):
self._shop_branch_name = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
@property
def shop_name(self):
return self._shop_name
@shop_name.setter
def shop_name(self, value):
self._shop_name = value
@property
def shop_tel(self):
return self._shop_tel
@shop_tel.setter
def shop_tel(self, value):
self._shop_tel = value
@property
def shop_type(self):
return self._shop_type
@shop_type.setter
def shop_type(self, value):
self._shop_type = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
def to_alipay_dict(self):
params = dict()
if self.address:
if hasattr(self.address, 'to_alipay_dict'):
params['address'] = self.address.to_alipay_dict()
else:
params['address'] = self.address
if self.alipay_account:
if hasattr(self.alipay_account, 'to_alipay_dict'):
params['alipay_account'] = self.alipay_account.to_alipay_dict()
else:
params['alipay_account'] = self.alipay_account
if self.brand_ids:
if isinstance(self.brand_ids, list):
for i in range(0, len(self.brand_ids)):
element = self.brand_ids[i]
if hasattr(element, 'to_alipay_dict'):
self.brand_ids[i] = element.to_alipay_dict()
if hasattr(self.brand_ids, 'to_alipay_dict'):
params['brand_ids'] = self.brand_ids.to_alipay_dict()
else:
params['brand_ids'] = self.brand_ids
if self.city_code:
if hasattr(self.city_code, 'to_alipay_dict'):
params['city_code'] = self.city_code.to_alipay_dict()
else:
params['city_code'] = self.city_code
if self.close_time:
if hasattr(self.close_time, 'to_alipay_dict'):
params['close_time'] = self.close_time.to_alipay_dict()
else:
params['close_time'] = self.close_time
if self.contact_email:
if hasattr(self.contact_email, 'to_alipay_dict'):
params['contact_email'] = self.contact_email.to_alipay_dict()
else:
params['contact_email'] = self.contact_email
if self.contact_mobile_phone:
if hasattr(self.contact_mobile_phone, 'to_alipay_dict'):
params['contact_mobile_phone'] = self.contact_mobile_phone.to_alipay_dict()
else:
params['contact_mobile_phone'] = self.contact_mobile_phone
if self.contact_name:
if hasattr(self.contact_name, 'to_alipay_dict'):
params['contact_name'] = self.contact_name.to_alipay_dict()
else:
params['contact_name'] = self.contact_name
if self.district_code:
if hasattr(self.district_code, 'to_alipay_dict'):
params['district_code'] = self.district_code.to_alipay_dict()
else:
params['district_code'] = self.district_code
if self.ext_param:
if hasattr(self.ext_param, 'to_alipay_dict'):
params['ext_param'] = self.ext_param.to_alipay_dict()
else:
params['ext_param'] = self.ext_param
if self.industry_app_category_id:
if isinstance(self.industry_app_category_id, list):
for i in range(0, len(self.industry_app_category_id)):
element = self.industry_app_category_id[i]
if hasattr(element, 'to_alipay_dict'):
self.industry_app_category_id[i] = element.to_alipay_dict()
if hasattr(self.industry_app_category_id, 'to_alipay_dict'):
params['industry_app_category_id'] = self.industry_app_category_id.to_alipay_dict()
else:
params['industry_app_category_id'] = self.industry_app_category_id
if self.industry_category_id:
if isinstance(self.industry_category_id, list):
for i in range(0, len(self.industry_category_id)):
element = self.industry_category_id[i]
if hasattr(element, 'to_alipay_dict'):
self.industry_category_id[i] = element.to_alipay_dict()
if hasattr(self.industry_category_id, 'to_alipay_dict'):
params['industry_category_id'] = self.industry_category_id.to_alipay_dict()
else:
params['industry_category_id'] = self.industry_category_id
if self.lat:
if hasattr(self.lat, 'to_alipay_dict'):
params['lat'] = self.lat.to_alipay_dict()
else:
params['lat'] = self.lat
if self.lon:
if hasattr(self.lon, 'to_alipay_dict'):
params['lon'] = self.lon.to_alipay_dict()
else:
params['lon'] = self.lon
if self.main_image:
if hasattr(self.main_image, 'to_alipay_dict'):
params['main_image'] = self.main_image.to_alipay_dict()
else:
params['main_image'] = self.main_image
if self.merchant_branch_id:
if hasattr(self.merchant_branch_id, 'to_alipay_dict'):
params['merchant_branch_id'] = self.merchant_branch_id.to_alipay_dict()
else:
params['merchant_branch_id'] = self.merchant_branch_id
if self.open_time:
if hasattr(self.open_time, 'to_alipay_dict'):
params['open_time'] = self.open_time.to_alipay_dict()
else:
params['open_time'] = self.open_time
if self.other_images:
if isinstance(self.other_images, list):
for i in range(0, len(self.other_images)):
element = self.other_images[i]
if hasattr(element, 'to_alipay_dict'):
self.other_images[i] = element.to_alipay_dict()
if hasattr(self.other_images, 'to_alipay_dict'):
params['other_images'] = self.other_images.to_alipay_dict()
else:
params['other_images'] = self.other_images
if self.out_shop_id:
if hasattr(self.out_shop_id, 'to_alipay_dict'):
params['out_shop_id'] = self.out_shop_id.to_alipay_dict()
else:
params['out_shop_id'] = self.out_shop_id
if self.province_code:
if hasattr(self.province_code, 'to_alipay_dict'):
params['province_code'] = self.province_code.to_alipay_dict()
else:
params['province_code'] = self.province_code
if self.shop_branch_name:
if hasattr(self.shop_branch_name, 'to_alipay_dict'):
params['shop_branch_name'] = self.shop_branch_name.to_alipay_dict()
else:
params['shop_branch_name'] = self.shop_branch_name
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
if self.shop_name:
if hasattr(self.shop_name, 'to_alipay_dict'):
params['shop_name'] = self.shop_name.to_alipay_dict()
else:
params['shop_name'] = self.shop_name
if self.shop_tel:
if hasattr(self.shop_tel, 'to_alipay_dict'):
params['shop_tel'] = self.shop_tel.to_alipay_dict()
else:
params['shop_tel'] = self.shop_tel
if self.shop_type:
if hasattr(self.shop_type, 'to_alipay_dict'):
params['shop_type'] = self.shop_type.to_alipay_dict()
else:
params['shop_type'] = self.shop_type
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEcoMycarMaintainShopModifyModel()
if 'address' in d:
o.address = d['address']
if 'alipay_account' in d:
o.alipay_account = d['alipay_account']
if 'brand_ids' in d:
o.brand_ids = d['brand_ids']
if 'city_code' in d:
o.city_code = d['city_code']
if 'close_time' in d:
o.close_time = d['close_time']
if 'contact_email' in d:
o.contact_email = d['contact_email']
if 'contact_mobile_phone' in d:
o.contact_mobile_phone = d['contact_mobile_phone']
if 'contact_name' in d:
o.contact_name = d['contact_name']
if 'district_code' in d:
o.district_code = d['district_code']
if 'ext_param' in d:
o.ext_param = d['ext_param']
if 'industry_app_category_id' in d:
o.industry_app_category_id = d['industry_app_category_id']
if 'industry_category_id' in d:
o.industry_category_id = d['industry_category_id']
if 'lat' in d:
o.lat = d['lat']
if 'lon' in d:
o.lon = d['lon']
if 'main_image' in d:
o.main_image = d['main_image']
if 'merchant_branch_id' in d:
o.merchant_branch_id = d['merchant_branch_id']
if 'open_time' in d:
o.open_time = d['open_time']
if 'other_images' in d:
o.other_images = d['other_images']
if 'out_shop_id' in d:
o.out_shop_id = d['out_shop_id']
if 'province_code' in d:
o.province_code = d['province_code']
if 'shop_branch_name' in d:
o.shop_branch_name = d['shop_branch_name']
if 'shop_id' in d:
o.shop_id = d['shop_id']
if 'shop_name' in d:
o.shop_name = d['shop_name']
if 'shop_tel' in d:
o.shop_tel = d['shop_tel']
if 'shop_type' in d:
o.shop_type = d['shop_type']
if 'status' in d:
o.status = d['status']
return o
| 2.125 | 2 |
frameworks/Python/falcon/app_waitress.py | http4k/FrameworkBenchmarks | 2 | 12766017 | <filename>frameworks/Python/falcon/app_waitress.py
#!/usr/bin/env python
from waitress import serve
from app import wsgi
from gunicorn_conf import bind, keepalive, workers
import logging
if __name__ == "__main__":
logging.basicConfig()
logging.getLogger().setLevel(logging.CRITICAL)
logging.disable(True)
serve(
app=wsgi,
listen=bind,
log_socket_errors=False,
threads=workers,
expose_tracebacks=False,
connection_limit=128,
channel_timeout=keepalive,
_quiet=True)
| 1.632813 | 2 |
server/runtime/cleanData/removeTrashData.py | Hackorama2019/donde-comprar | 0 | 12766018 | <filename>server/runtime/cleanData/removeTrashData.py<gh_stars>0
import json
import pandas as pd
import numpy as np
with open("./totalData.json", 'r') as json_file:
data = json.load(json_file)
data = np.array(data["data"])
futureDataFrame = []
columns = []
for keys in data[0].keys():
if(keys != "price"):
columns.append(keys)
for row in data:
del row["price"]
futureDataFrame.append([])
for value in row.values():
futureDataFrame[-1].append(value)
scrappedData = pd.DataFrame(data=futureDataFrame,columns=columns)
scrappedData.head(10)
#scrappedData.to_csv("temp.csv")
print(scrappedData.head())
pd.DataFrame()
print(scrappedData[scrappedData["categoria"] != "NoData" ])
#scrappedData[scrappedData["content"] != ""].assign(content="NoData")
# print(scrappedData[scrappedData["content"] != "NoData"])
# print(scrappedData.head(20))
'''
while i < len(data):
if(data.item(i))
i = i+1
'''
json_file.close() | 3.09375 | 3 |
app/main.py | jjrpayne/starter-snake-python | 0 | 12766019 | import json
import os
import random
import bottle
from api import ping_response, start_response, move_response, end_response
@bottle.route('/')
def index():
return '''
Battlesnake documentation can be found at
<a href="https://docs.battlesnake.com">https://docs.battlesnake.com</a>.
'''
@bottle.route('/static/<path:path>')
def static(path):
"""
Given a path, return the static file located relative
to the static folder.
This can be used to return the snake head URL in an API response.
"""
return bottle.static_file(path, root='static/')
@bottle.post('/ping')
def ping():
"""
A keep-alive endpoint used to prevent cloud application platforms,
such as Heroku, from sleeping the application instance.
"""
return ping_response()
@bottle.post('/start')
def start():
data = bottle.request.json
"""
TODO: If you intend to have a stateful snake AI,
initialize your snake state here using the
request's data if necessary.
"""
print(json.dumps(data))
color = "#0F74F4"
head = "pixel"
tail = "pixel"
return start_response((color, head, tail))
@bottle.post('/move')
def move():
data = bottle.request.json
"""
TODO: Using the data from the endpoint request object, your
snake AI must choose a direction to move in.
"""
game_data = json.dumps(data)
print(game_data)
you = data["you"]
body = you["body"]
head = body[0]
health = you["health"]
width = data["board"]["width"]
height = data["board"]["height"]
directions = {"up": 0, "down": 0, "left": 0, "right": 0}
snakes = data["board"]["snakes"]
foods = data["board"]["food"]
# get avg distance of closest snake to each direction
min_distances = [0, 0, 0, 0]
# order: up, down, left, right
for snake in snakes:
# make sure snake is not you
if(snake["id"] != you["id"]):
distances = [0, 0, 0, 0]
for coords in snake["body"]:
distances[0] = abs(coords["x"] - head["x"])
distances[0] += abs(coords["y"] - (head["y"]-1))
distances[1] = abs(coords["x"] - head["x"])
distances[1] += abs(coords["y"] - (head["y"]+1))
distances[2] = abs(coords["x"] - (head["x"]-1))
distances[2] += abs(coords["y"] - head["y"])
distances[3] = abs(coords["x"] - (head["x"]+1))
distances[3] += abs(coords["y"] - head["y"])
if (health < 50):
# if health is low, use avg distance (focus on getting food)
# if high, use total distance (focus on avoiding snakes)
distances = [distance/len(snake["body"]) for distance in distances]
for i in range(4):
if(min_distances[i] == 0 or distances[i] < min_distances[i]):
min_distances[i] = distances[i]
directions["up"] += min_distances[0]
directions["down"] += min_distances[1]
directions["left"] += min_distances[2]
directions["right"] += min_distances[3]
min_distances = [-1, -1, -1, -1]
# get distance of closest food
for food in foods:
distances = [0, 0, 0, 0]
distances[0] = abs(food["x"] - head["x"])
distances[0] += abs(food["y"] - (head["y"]-1))
distances[1] = abs(food["x"] - head["x"])
distances[1] += abs(food["y"] - (head["y"]+1))
distances[2] = abs(food["x"] - (head["x"]-1))
distances[2] += abs(food["y"] - head["y"])
distances[3] = abs(food["x"] - (head["x"]+1))
distances[3] += abs(food["y"] - head["y"])
for i in range(4):
if(min_distances[i] == -1 or distances[i] < min_distances[i]):
min_distances[i] = distances[i]
directions["up"] -= min_distances[0]
directions["down"] -= min_distances[1]
directions["left"] -= min_distances[2]
directions["right"] -= min_distances[3]
# stop snake from eating itself
for seg in body:
if("up" in directions and seg["x"] == head["x"] and seg["y"] == head["y"]-1):
del directions["up"]
if("down" in directions and seg["x"] == head["x"] and seg["y"] == head["y"]+1):
del directions["down"]
if("left" in directions and seg["x"] == head["x"]-1 and seg["y"] == head["y"]):
del directions["left"]
if("right" in directions and seg["x"] == head["x"]+1 and seg["y"] == head["y"]):
del directions["right"]
if len(directions) <= 1:
# either snake is trapped, or there is only one viable direction
# in either case, there is no point in checking any more segments
break
# avoid wall collisions
if("up" in directions and head["y"] == 0):
del directions["up"]
if("down" in directions and head["y"] == height-1):
del directions["down"]
if ("left" in directions and head["x"] == 0):
del directions["left"]
if ("right" in directions and head["x"] == width-1):
del directions["right"]
print(directions)
if len(directions) >= 1:
current_movement = max(directions, key=directions.get)
else:
# snake is trapped, just return any direction
current_movement = "up"
return move_response(current_movement)
@bottle.post('/end')
def end():
data = bottle.request.json
"""
TODO: If your snake AI was stateful,
clean up any stateful objects here.
"""
print(json.dumps(data))
return end_response()
# Expose WSGI app (so gunicorn can find it)
application = bottle.default_app()
if __name__ == '__main__':
bottle.run(
application,
host=os.getenv('IP', '0.0.0.0'),
port=os.getenv('PORT', '8080'),
debug=os.getenv('DEBUG', True)
)
| 3.5625 | 4 |
.history/blog/urls_20200416031229.py | abhinavmarwaha/demo-django-blog | 0 | 12766020 | <gh_stars>0
from django.conf.urls import url
from django.contrib import admin
from djangoo.u
urlpatterns = [
path(r'^admin/', admin.site.urls),
]
| 1.375 | 1 |
src/_set_import_paths.py | gordonsilvera/project-implicit | 0 | 12766021 | #!/usr/bin/env python3
import site
import configs
SOURCE_CODE_FILEPATH = '/home/jovyan/work/src'
def set_import_path(import_path=configs.SOURCE_CODE_FILEPATH):
site.addsitedir(import_path)
print("Added the following path to the import paths "
"list:\n{}".format(import_path))
if __name__ == '__main__':
set_import_path() | 2.234375 | 2 |
setup.py | libffcv/tf-ffcv | 7 | 12766022 | from setuptools import setup
setup(
name="tf-ffcv",
version="0.0.2",
packages=["tf_ffcv"],
description='Utilitaries to integrate tensorflow to FFCV',
author='MadryLab',
author_email='<EMAIL>',
)
| 1.078125 | 1 |
service/paged-rest-proxy.py | andebor/paged-rest-proxy | 0 | 12766023 | <gh_stars>0
import json
from flask import Flask, request, Response
import os
import requests
import logging
import dotdictify
from time import sleep
app = Flask(__name__)
logger = None
format_string = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logger = logging.getLogger('paged-datasource-service')
# Log to stdout
stdout_handler = logging.StreamHandler()
stdout_handler.setFormatter(logging.Formatter(format_string))
logger.addHandler(stdout_handler)
logger.setLevel(logging.DEBUG)
prop = os.environ.get('response_property', 'response')
response = None
headers = {}
if os.environ.get('headers') is not None:
headers = json.loads(os.environ.get('headers').replace("'","\""))
class DataAccess:
def __get_all_paged_entities(self, path, url_parameters):
logger.info("Fetching data from paged url: %s", path)
url = os.environ.get("baseurl") + path
url = call_url(url, url_parameters, url_parameters.get(os.environ.get('startpage')))
has_more_results = True
page_counter = 1
while has_more_results:
if os.environ.get('sleep') is not None:
logger.info("sleeping for %s milliseconds", os.environ.get('sleep') )
sleep(float(os.environ.get('sleep')))
logger.info("Fetching data from url: %s", url)
req = requests.get(url, headers=headers)
if req.status_code != 200:
logger.error("Unexpected response status code: %d with response text %s" % (req.status_code, req.text))
raise AssertionError ("Unexpected response status code: %d with response text %s"%(req.status_code, req.text))
dict = dotdictify.dotdictify(json.loads(req.text))
for entity in dict.results:
yield entity
if str_to_bool(dict.get(os.environ.get('next_page_path'))):
page_counter += 1
url = os.environ.get("baseurl") + path
url = call_url(url, url_parameters, str(page_counter))
else:
has_more_results = False
logger.info('Returning entities from %i pages', page_counter)
def get_paged_entities(self, path, url_parameters):
print("getting all paged")
return self.__get_all_paged_entities(path, url_parameters)
def get(self, path):
print('getting all users')
return self.__get_all_users(path)
data_access_layer = DataAccess()
def stream_json(clean):
first = True
yield '['
for i, row in enumerate(clean):
if not first:
yield ','
else:
first = False
yield json.dumps(row)
yield ']'
def call_url(base_url, url_parameters, page):
call_url = base_url
first = True
for k, v in url_parameters.items():
if first:
if k == os.environ.get('startpage'):
call_url += '?' + k + '=' + page
else:
call_url += '?' + k + '=' + v
else:
if k == os.environ.get('startpage'):
call_url += '&' + k + '=' + page
else:
call_url += '&' + k + '=' + v
first = False
return call_url
def str_to_bool(string_input):
return str(string_input).lower() == "true"
@app.route("/<path:path>", methods=["GET"])
def get(path):
entities = data_access_layer.get_paged_entities(path, request.args)
return Response(
stream_json(entities),
mimetype='application/json'
)
@app.route("/", methods=[ "POST"])
def postreceiver():
entities = request.get_json()
header = json.loads(os.environ.get('headers').replace("'","\""))
logger.info("Receiving entities")
response_list = []
client = app.test_client()
statuscode = None
if not isinstance(entities,list):
entities = [entities]
for entity in entities:
for k,v in entity.items():
if k == os.environ.get('post_url', 'post_url'):
logger.info("processing: " + v)
response = client.get(v, headers=header)
data = response.data
statuscode = response.status_code
if not len(data.decode("utf-8")) == 4:
response_list += json.loads(data.decode("utf-8"))
if len(response_list)== 0:
response_list.append({})
return Response(json.dumps(response_list),status=statuscode, mimetype='application/json')
@app.route("/notpaged/", methods=["POST"])
def notpaged():
baseurl = os.environ.get('baseurl')
entities = request.get_json()
header = json.loads(os.environ.get('headers').replace("'","\""))
logger.info("Receiving entities")
if not isinstance(entities, list):
entities = [entities]
for entity in entities:
for k, v in entity.items():
if k == os.environ.get('post_url', 'post_url'):
url = baseurl+v
logger.info("Fetching entity with url: " + str(url))
response = requests.get(url, headers=header)
if response.status_code is not 200:
logger.error("Got Error Code: " + str(response.status_code) + " with text: " + response.text)
return Response(response.text, status=response.status_code, mimetype='application/json')
entity[prop] = {
"status_code": response.status_code,
"response_text": json.loads(response.text)
}
logger.info("Prosessed " + str(len(entities)) + " entities")
return Response(json.dumps(entities), status=response.status_code, mimetype='application/json')
if __name__ == '__main__':
app.run(threaded=True, debug=True, host='0.0.0.0', port=os.environ.get('port',5002))
| 2.390625 | 2 |
inference/grpc_inference_generator.py | nncrystals/maskrcnn-benchmark | 1 | 12766024 | <gh_stars>1-10
import logging
import threading
import grpc
import io
import random
import torch
import torchvision
from PIL import Image
from inference.configuration import cfg
from inference.service import InferenceService
import inference.proto.inference_service_pb2_grpc as grpc_service
import inference.proto.inference_service_pb2 as grpc_def
import torch.multiprocessing as mp
# Consume client uploading stream, yield process results.
class GRPCInferenceGenerator(object):
def __init__(self, request_iterator, context: grpc.ServicerContext):
super(GRPCInferenceGenerator, self).__init__()
self.context = context
self.request_iterator = request_iterator
self.meta = None
self.logger = logging.getLogger("GRPCInferenceGenerator")
self.raw_image_queue = mp.Queue()
self.inference_result_queue = mp.Queue()
self.request_grabber_thread = threading.Thread(target=self.request_iterator_grabber_task)
def __iter__(self):
return self
def __next__(self):
# return the processed image.
# grab data from sync queue.
# main thread blocks here.
queue_result = self.inference_result_queue.get(True)
for image_batch, result, image_name in queue_result:
InferenceService.extract_information_one(result, self.meta["image_shape"], image_name)
response = grpc_def.InferenceResult()
return_images = []
n = self.meta["num_image_returned"]
if n == 0:
pass
elif n == -1:
return_images = queue_result[0]
else:
return_images = random.choices(queue_result[0], k=n)
for img, img_name in return_images:
image_in_response = grpc_def.Image()
image_in_response.name = img_name
image_in_response.images_data = img
response.returned_images.append(image_in_response)
def request_iterator_grabber_task(self):
to_tensor = torchvision.transforms.ToTensor()
while True:
req: grpc_def.ImageBatchRequest = next(self.request_iterator)
if self.meta is None:
if req.opt is None:
self.abort("First request must contain inference options")
return
opt = req.opt
self.meta["num_image_returned"] = opt.num_image_returned
image_shape = (opt.image_width, opt.image_height)
if opt.color_channel != 1:
image_shape += (opt.color_channel,)
self.meta["image_shape"] = image_shape
self.logger.info(f"Request initialized {self.meta}")
num_images = len(req.images)
if num_images == 0:
continue
# Get images from request
images = []
img: grpc_def.Image
for img in req.images:
try:
image_io = Image.open(io.BytesIO(img.images_data))
image_tensor = to_tensor(image_io)
images.append((image_tensor, img.name))
except:
self.logger.warn("Failed to process image, skip")
self.raw_image_queue.put(*images)
| 2.171875 | 2 |
src/jk_simpleexec/invoke_utils.py | jkpubsrc/python-module-jk-simpleexec | 0 | 12766025 | <reponame>jkpubsrc/python-module-jk-simpleexec
import os
import subprocess
import invoke
from . import _common as _common
from .CommandResult import CommandResult
from .TextDataProcessingPolicy import TextDataProcessingPolicy
try:
from fabric import Connection
except ImportError as ee:
pass
#
# Run a command locally or remotely.
# If a "cat <file>" is to be invoked *and* this is to be invoked locally, this method will detect this. In that case instead of running "cat" it will fall back to a regular file read
# for efficiency. Therefore you can access data on local and remote systems in a uniform way without spending too much thoughts on efficiency.
#
# @param fabric.Connection c (optional) Provide a fabric connection here if you want to run a command remotely.
# If you specify <c>None</c> here the command will be run locally.
# @param str command (required) The command to run. Please note that this command will be interpreted by a shell.
# @param bool failOnNonZeroExitCode (optional) Raises an exception if the last command executed returned with a non-zero exit code.
#
#
def runCmd(
c,
command:str,
stdOutProcessing:TextDataProcessingPolicy = None,
stdErrProcessing:TextDataProcessingPolicy = None,
failOnNonZeroExitCode:bool = True,
) -> CommandResult:
stdOutProcessing = _common.DEFAULT_STDOUT_PROCESSING.override(stdOutProcessing)
stdErrProcessing = _common.DEFAULT_STDERR_PROCESSING.override(stdErrProcessing)
# execute command locally
if c is None:
if command.startswith("cat "):
filePath = command[4:]
if _common.debugValve:
_common.debugValve("Using standard file reading for command: " + repr(command))
if os.path.isfile(filePath):
with open(filePath, "r") as f:
return f.read(), "", 0
else:
raise Exception("No such file: " + repr(filePath))
if _common.debugValve:
_common.debugValve("Invoking via subprocess: " + repr(command))
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
binStdOut, binStdErr = p.communicate()
stdOut = binStdOut.decode("utf-8")
stdErr = binStdErr.decode("utf-8")
if _common.debugValve:
_common.debugValve("exit status:", p.returncode)
_common.debugValve("stdout:")
for line in stdOut.split("\n"):
_common.debugValve("\t" + repr(line))
_common.debugValve("stderr:")
for line in stdErr.split("\n"):
_common.debugValve("\t" + repr(line))
if failOnNonZeroExitCode and p.returncode > 0:
raise Exception("Command failed with exit code " + str(p.returncode) + ": " + repr(command))
stdOut = _common.processCmdOutput(stdOut, stdOutProcessing)
stdErr = _common.processCmdOutput(stdErr, stdErrProcessing)
return CommandResult(command, None, stdOut, stdErr, p.returncode)
# execute command remotely with fabric
if (c.__class__.__name__ == "Connection") and (c.__class__.__module__ in [ "fabric", "fabric.connection" ]):
if _common.debugValve:
_common.debugValve("Invoking via fabric: " + repr(command))
try:
r = c.run(command, hide=True)
except invoke.exceptions.UnexpectedExit as ee:
r = ee.result
if _common.debugValve:
_common.debugValve("exit status:", r.exited)
_common.debugValve("stdout:")
for line in r.stdout.split("\n"):
_common.debugValve("\t" + repr(line))
_common.debugValve("stderr:")
for line in r.stderr.split("\n"):
_common.debugValve("\t" + repr(line))
if failOnNonZeroExitCode and r.exited > 0:
raise Exception("Command failed with exit code " + str(r.exited) + ": " + repr(command))
stdOut = _common.processCmdOutput(r.stdout, stdOutProcessing)
stdErr = _common.processCmdOutput(r.stderr, stdErrProcessing)
return CommandResult(command, None, stdOut, stdErr, r.exited)
# error
raise Exception("Sorry, I don't know about " + repr(c.__class__) + " objects for parameter c.")
#
| 2.328125 | 2 |
huxley/core/tests/admin/test_registration.py | srisainachuri/huxley | 18 | 12766026 | # Copyright (c) 2011-2017 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
from django.urls import reverse
from django.test import TestCase
from huxley.utils.test import models
class RegistrationAdminTest(TestCase):
fixtures = ['conference']
def test_preference_export(self):
'''Tests that the admin panel can export registration data.'''
registration = models.new_registration()
models.new_superuser(username='superuser', password='<PASSWORD>')
self.client.login(username='superuser', password='<PASSWORD>')
response = self.client.get(reverse('admin:core_registration_info'))
header = [
"Registration Time", "School Name", "Total Number of Delegates",
"Beginners", "Intermediates", "Advanced", "Spanish Speakers",
"Chinese Speakers", "Assignments Finalized", "Waivers Complete",
"Delegate Fees Paid", "Delegate Fees Owed", "Paid Registration Fee?",
"Country 1", "Country 2", "Country 3", "Country 4", "Country 5",
"Country 6", "Country 7", "Country 8", "Country 9", "Country 10",
"Committee Preferences", "Registration Comments"
]
fields_csv = ",".join(map(str, header)) + "\r\n"
country_preferences = [cp
for cp in registration.country_preferences.all(
).order_by('countrypreference')]
country_preferences += [''] * (10 - len(country_preferences))
committee_preferences = [', '.join(
cp.name for cp in registration.committee_preferences.all())]
fields = [
registration.registered_at,
registration.school.name,
registration.num_beginner_delegates +
registration.num_intermediate_delegates +
registration.num_advanced_delegates,
registration.num_beginner_delegates,
registration.num_intermediate_delegates,
registration.num_advanced_delegates,
registration.num_spanish_speaking_delegates,
registration.num_chinese_speaking_delegates,
registration.assignments_finalized,
registration.waivers_completed,
registration.delegate_fees_paid,
registration.delegate_fees_owed,
registration.registration_fee_paid
]
fields.extend(country_preferences)
fields.extend(committee_preferences)
fields.extend(registration.registration_comments)
fields_csv += ','.join(map(str, fields))
self.assertEquals(fields_csv, response.content[:-3].decode('utf-8'))
| 2.046875 | 2 |
ilscvr/step2/compression_conv_fc.py | chatzikon/DNN-COMPRESSION | 9 | 12766027 | import sys
sys.path.append('../')
import torchnet as tnt
from torch.autograd import Variable
import torch.nn.functional as F
from model_utils.load_utils import load_model, SAVE_ROOT
from model_utils.model_utils import get_layer_names
MODEL_NAME='mobilenetv2_imagenet'
model_init,model = load_model(MODEL_NAME)
layer_names, conv_layer_mask = get_layer_names(model,'conv')
layer_names_bn, bn_layer_mask = get_layer_names(model,'batchnorm')
fc_layer_mask = (1 - conv_layer_mask).astype(bool)
print(model)
bs = 64
from tensor_compression import get_compressed_model
import copy
import torch
import os
import numpy as np
CONV_SPLIT = 3
n_layers = len(layer_names)
n_layers_bn = len(layer_names_bn)
#decomposition_conv = 'cp3'
decomposition_conv = 'tucker2'
#X_FACTOR used (how much each layer will be compressed):
WEAKEN_FACTOR = None
X_FACTOR = 1.71
rank_selection_suffix = "{}x".format(X_FACTOR)
#specify rank of each layer
ranks_conv = [None if not (name.endswith('conv.2') or name.endswith('0.0') ) else -X_FACTOR
for name in layer_names[conv_layer_mask]]
ranks_fc = [-X_FACTOR] * (len(layer_names[fc_layer_mask]))
ranks_conv[0] = None
ranks_conv[1] = None
ranks_conv[2] = -X_FACTOR
ranks = np.array([None] * len(layer_names))
ranks[conv_layer_mask] = ranks_conv
decompositions = np.array([None] * len(layer_names))
decompositions[conv_layer_mask] = decomposition_conv
SPLIT_FACTOR = CONV_SPLIT
save_dir = "{}/models_finetuned/{}/{}/{}/layer_groups:{}".format(SAVE_ROOT,MODEL_NAME,
decomposition_conv,
rank_selection_suffix,
SPLIT_FACTOR)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
device = 'cuda'
split_tuples = np.array_split(np.arange(n_layers)[conv_layer_mask], CONV_SPLIT)[::-1]
split_tuples.reverse()
compressed_model = copy.deepcopy(model)
print(ranks)
for local_iter, tupl in enumerate(split_tuples):
lname,lname_bn, rank, decomposition = layer_names[tupl], layer_names_bn[tupl],ranks[tupl], decompositions[tupl]
if isinstance(tupl[0], np.ndarray):
print(lname, tupl[0])
compressed_model = get_compressed_model(MODEL_NAME,compressed_model,
ranks=rank, layer_names = lname, layer_names_bn = lname_bn,
decompositions = decomposition,
vbmf_weaken_factor = WEAKEN_FACTOR,return_ranks=True)
print(compressed_model)
#
filename = "{}/mobilenetv2_hooi.pth.tar".format(save_dir)
torch.save(compressed_model,filename)
print(filename)
def test(model,test_loader):
model.eval()
test_loss = tnt.meter.AverageValueMeter()
correct = 0
with torch.no_grad():
for data, target,index in test_loader:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = model(data)
loss=F.cross_entropy(output, target)
test_loss.add(loss.item()) # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
loss.item(), correct, len(test_loader.sampler),
100. * float(correct) / len(test_loader.sampler)))
return float(correct) / float(len(test_loader.sampler))
from collections import defaultdict
def count_params(model):
n_params = defaultdict()
for name, param in model.named_parameters():
n_params[name] = param.numel()
return n_params
def count_params_by_layers(params_count_dict):
params_count_dict_modif = defaultdict()
for k, v in params_count_dict.items():
if '-' not in k:
k_head = k.strip('.weight').strip('.bias')
try:
params_count_dict_modif[k_head] += params_count_dict[k]
except:
params_count_dict_modif[k_head] = params_count_dict[k]
else:
k_head = '.'.join(k.split('-')[0].split('.')[:-1])
try:
params_count_dict_modif[k_head] += params_count_dict[k]
except:
params_count_dict_modif[k_head] = params_count_dict[k]
return params_count_dict_modif
params_count_dict_m = count_params(model)
params_count_dict_cm = count_params(compressed_model)
params_count_dict_m_init = count_params(model_init)
num_parameters = sum([param.nelement() for param in compressed_model.parameters()])
num_parameters1 = sum([param.nelement() for param in model.parameters()])
num_parameters2 = sum([param.nelement() for param in model_init.parameters()])
print('Params, a:initial, b:pruned, c:decomposed ')
x1=sum(params_count_dict_m.values())/sum(params_count_dict_cm.values())
x11=sum(params_count_dict_m_init.values())/sum(params_count_dict_cm.values())
print('a: '+str(sum(params_count_dict_m_init.values())))
print('a: '+str(num_parameters2))
print('b: '+str(sum(params_count_dict_m.values())))
print('b: '+str(num_parameters1))
print('c: '+str(sum(params_count_dict_cm.values())))
print('c: '+str(num_parameters))
print('Params ratio, a:initial/decomposed, b:pruned/decomposed')
print('a: '+str(x11))
print('b: '+str(x1))
print('a: '+str(num_parameters2/num_parameters))
print('b: '+str(num_parameters1/num_parameters))
print('Params pruned, a:decomposed to initial, b:decomposed to pruned')
print('a: '+str(1-num_parameters/num_parameters2))
print('b: '+str(1-num_parameters/num_parameters1))
#
import sys
sys.path.append("../")
from flopco import FlopCo
model.cpu()
model_init.cpu()
compressed_model.cpu()
flopco_m = FlopCo(model, img_size=(1, 3, 224, 224), device='cpu')
flopco_m_init = FlopCo(model_init, img_size=(1, 3, 224, 224), device='cpu')
flopco_cm = FlopCo(compressed_model, img_size=(1, 3, 224, 224), device='cpu')
print('FLOPs a:init/decomposed, b:pruned/decomposed')
print('a: '+str(flopco_m_init.total_flops / flopco_cm.total_flops))
print('b: '+str(flopco_m.total_flops / flopco_cm.total_flops))
print('FLOPs pruned, a:decomposed to initial, b:decomposed to pruned')
print('a: '+str(1-flopco_cm.total_flops/flopco_m_init.total_flops) )
print('b: '+str(1-flopco_cm.total_flops/flopco_m.total_flops) )
| 2.375 | 2 |
student/migrations/0002_institute_names.py | YarinBou/SJMaster | 1 | 12766028 | <reponame>YarinBou/SJMaster
from django.db import migrations, transaction
class Migration(migrations.Migration):
dependencies = [
('student', '0001_initial'),
]
def generate_data(apps, schema_editor):
from student.models import EducationalInstitution
institution_names = [
"Technion - institute of Technology",
"Hebrew University of Jerusalem",
"Weizmann Institute of science ",
"Bar Ilan University",
"Tel Aviv University",
"University of Haifa",
"Ben Gurion University",
"Open university",
"Ariel University",
"Reichman University",
"College of law and business - Ramat Gan",
"Academic college of Tel Aviv Yafo",
"Afeka College of engineering, Tel Aviv",
"Ashkelon Academic College",
"Bezalel Academy of Art and Deisgn",
"Center for Academic studies, Or Yehuda",
"College of Management Academic Studies, Rishon Leziyon",
"Dan Academic Studies",
"Hadassah Academic College, Jerusalem",
"Holon Institute of Technology",
"Jerusalem Academy of Music and Dance",
"Jerusalem College of Engineering",
"Jerusalem College of Techonlogy",
"Kinneret Academic College",
"Lander Institute, Jerusalem",
"Max Stern Academic College of Emek Yezreel",
"Mivhar College, Bnei Brak",
"Netanya Academic College",
"Netanya Academic College of Law",
"Ono Academic of College, Kiryat Ono",
"ORT Braude college of Engineering, Karmiel",
"Peres Academic Center, Rehovot",
"Ruppin Academic Center",
"Sapir Academic College",
"Sami Shmaoon College of Engineering",
"<NAME>, <NAME>",
"Shalem College, Jerusalem",
"Shenkar College of Engineering and Design",
"Tel Hai Academic College",
"Westren Galilee College, Acre",
"Yehuda Regional College, Kiryat Araba",
"Zefat Academic College"
]
with transaction.atomic():
for name in institution_names:
EducationalInstitution(name=name).save()
operations = [
migrations.RunPython(generate_data),
]
| 2.15625 | 2 |
consensys_utils/flask/config.py | ConsenSys/python-utils | 4 | 12766029 | """
consensys_utils.flask.config
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
App configuration
:copyright: Copyright 2017 by ConsenSys France.
:license: BSD, see :ref:`license` for more details.
"""
def set_app_config(app, config=None):
"""Set application configuration
:param app: Flask application
:type app: :class:`flask.Flask`
:param config: Optional Application configuration
:type config: dict
"""
if config:
app.config.update(config)
| 1.820313 | 2 |
pylsl/motor_imagery_app.py | aume1/liblsl-Python | 0 | 12766030 | """Example program to show how to read a multi-channel time series from LSL."""
import math
import threading
# import pygame
from random import random
from sklearn.preprocessing import OneHotEncoder
from pylsl import StreamInlet, resolve_stream
import numpy as np
import pandas as pd
import time
from sklearn import model_selection
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
import warnings
from statistics import mode
from datetime import datetime
import sys
import os
import models
import pywt
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
warnings.filterwarnings('error')
def handle_keyboard_chunk(chunk, keys):
''' Returns the button statuses from the LSL keyboard chunk '''
ks, times = chunk
new_chunk = [[], []]
for i in range(len(ks)):
if ks[i][0] in ('LCONTROL pressed', 'LCONTROL released', 'RCONTROL pressed', 'RCONTROL released'):
new_chunk[0].append(ks[i])
new_chunk[1].append(times[i])
chunk = tuple(new_chunk)
if not chunk[0]:
if keys is None:
return [[0, 0, 0]], False
return keys, False
if keys is None:
keys = [[0, 0, 0]]
else:
keys = list(keys[-1][:2])
out = np.zeros((0, 3)) # data should be appended in the format LSHIFT, RSHIFT, TIME
for i in range(len(chunk[0])):
action = chunk[0][i][0]
timestamp = chunk[1][i]
if action == 'LCONTROL pressed':
keys[0] = 1
elif action == 'LCONTROL released':
keys[0] = 0
elif action == 'RCONTROL pressed':
keys[1] = 1
elif action == 'RCONTROL released':
keys[1] = 0
else:
continue
out = np.append(out, [keys + [timestamp]], axis=0)
if len(out) == 0:
return keys, False
return out, True
def normalise_list(x):
x = np.array(x)
try:
out = ((x -x.min()) / (x.max() - x.min())).tolist()
except Warning:
out = [np.zeros(len(x[0])).tolist()]
return out
def normalise_eeg(eeg):
return [normalise_list(eeg[i::8]) for i in range(8)]
def my_filter(x, y, a=None, b=None):
# b = [0.9174, -0.7961, 0.9174]
# a = [-1, 0.7961, -0.8347]
# Parameters for a 40-hz low-pass filter
if a is None:
a = [-1, 0.331]
if b is None:
b = [0.3345, 0.3345]
if len(y) > len(a):
for col in range(len(y[-1][:-3])):
y[-1][col] = sum(a[i]*y[-1-i][col] + b[i]*x[-1-i][col] for i in range(len(a)))
# for i in range(len(a)):
# y[-1][col] += a[i]*y[-1-i][col] + b[i]*x[-1-i][col]
return y
def fir_filter(x, y, a=None):
if a is None:
a = [1.4, -0.8, 1.4] # 50 Hz notch filter
# a = [1] # do nothing
if len(x) >= len(a):
for col in range(len(y[-1][:-3])):
y[-1][col] = sum([a[i]*x[-1-i][col] for i in range(len(a))])
# print(y[-1][col])
return y
class EEG:
def __init__(self, user_id, game, data_length=100, ignore_lsl=False, ignore_BCI=False):
# first resolve an EEG stream on the lab network
self.user_id = user_id
self.game = game
self.data_length = data_length
if not ignore_lsl:
print("looking for an Keyboard stream...")
self.keyboard = resolve_stream('name', 'Keyboard')
print(self.keyboard)
self.keyboard_inlet = StreamInlet(self.keyboard[0])
if not ignore_lsl and not ignore_BCI:
print("looking for an EEG stream...")
self.eeg = resolve_stream('type', 'EEG')
print(self.eeg)
self.eeg_inlet = StreamInlet(self.eeg[0])
self.eeg_dataset = [] # of the format [channel0, c1, ..., timestamp, left_shift, right_shift]
self.filtered = []
self.fft = []
self.keys = None
self.running = False
self.clf = None
self.acc = 0
@property
def prev_dl(self):
return np.array([item[:-3] for item in self.filtered[-1:-1-self.data_length:-1]]).T.tolist()
def eeg_sample(self, data=None):
if data is None:
sample, timestamp = self.eeg_inlet.pull_sample()
data = [sample + [timestamp] + list(self.keys[-1][:2])]
self.eeg_dataset += data
self.filtered += [[0]*8 + list(data[0][-3:])]
# self.filtered = my_filter(self.eeg_dataset, self.filtered)
# self.filtered = my_filter(self.eeg_dataset, self.filtered, a=[-1, 1.452, -0.4523], b=[0.2737, 0, -0.2737])
self.filtered = my_filter(self.eeg_dataset, self.filtered,
b=[float(i) for i in '0.3749 -0.2339 0 0.2339 -0.3749'.split()],
a=[-1*float(i) for i in '1.0000 -1.8173 1.9290 -1.3011 0.2154'.split()]) # this one also works well!
# self.filtered = fir_filter(self.eeg_dataset, self.filtered) # this works well!
if len(self.filtered) > self.data_length:
norm = normalise_eeg(self.prev_dl)
fft = np.array([np.abs(np.fft.fft(n)) for n in norm]).flatten().tolist()
# fft = normalise_list(np.array([pywt.dwt(n, 'db2') for n in norm])[:100].flatten())
self.fft += [fft + self.filtered[-1][-2:]]
def mi_to_fft(self):
hist_mi = [f for f in os.listdir('users/data') if 'mi_' + self.user_id == f[:5]]
hist_fft = [f for f in os.listdir('users/data') if 'fft_' + self.user_id == f[:6]]
needed_hist_fft = []
for fmi in hist_mi:
if 'fft_' + fmi[3:] not in hist_fft:
needed_hist_fft.append(fmi)
print('need to convert to fft:', needed_hist_fft)
print('loading {}'.format(needed_hist_fft))
for mi_file in needed_hist_fft:
loaded_data = np.load('users/data/' + mi_file)
self.eeg_dataset = []
self.filtered = []
self.fft = []
t0 = time.time()
for row in range(len(loaded_data)):
data = [loaded_data[row]]
self.eeg_sample(data)
if row % 1000 == 500:
tr = (time.time() - t0) * (len(loaded_data) - row) / row
print('time remaining: {}'.format(tr))
print()
fft_name = 'users/data/fft_' + mi_file[3:]
print('outputting to', fft_name)
np.save(fft_name, self.fft)
# print(pd.DataFrame(self.fft))
# good = 'users/data/good_' + mi_file[3:]
# good = np.load(good)
# print(pd.DataFrame(good))
#
# print(f'{np.array_equal(self.fft, good) = }')
def gather_data(self):
thread = threading.Thread(target=self.__gather)
thread.start()
return thread
def __gather(self):
self.running = True
self.eeg_dataset = []
self.filtered = []
self.fft = []
while self.running:
# get a new sample (you can also omit the timestamp part if you're not interested in it)
chunk = self.keyboard_inlet.pull_chunk()
self.keys, is_new = handle_keyboard_chunk(chunk, self.keys)
self.eeg_sample() # get and process the latest sample from the EEG headset
self.save_training()
def train(self, classifier='KNN', include_historical=False, **kwargs):
thread = threading.Thread(target=self.__train, args=(classifier, include_historical), kwargs=kwargs)
thread.start()
return thread
def __train(self, classifier='KNN', include_historical=False, **kwargs):
print('data recording complete. building model... (this may take a few moments)')
# hist_fft = [f for f in os.listdir('users/data') if 'fft_' + self.user_id in f and 'npy' in f] # grab historical data for user
#
# # take only the most recent data if we don't include_historical
# if not include_historical or classifier == 'ANN':
# print('ignoring historical data...')
# hist_fft = [hist_fft[-1]]
#
# print('loading {}'.format(hist_fft))
# data = [np.load('users/data/' + f).tolist()[::5] for f in hist_fft]
#
# # X = [dat[:][:-2] for dat in data]
# # Y_i = [dat[:][-2:] for dat in data]
# # Y_o = []
# # X_o = []
# data_o = []
#
# # merge historical data together
# for i in range(len(data)):
# # Y_o += Y_i[i]
# # X_o += X[i]
# print('data', i, 'shape', np.array(data[i]).shape)
# data_o += data[i]
def flatten(t):
return [item for sublist in t for item in sublist]
def get_fmi_dl(index, data, length=100):
np_fmi = np.array(data[index:index + length])
x = flatten(np_fmi[:, :-3].tolist())
y = np_fmi[-1, -2:].tolist()
return [x + y]
data = self.filtered
data_o = []
for line in range(len(data)-100):
data_o += get_fmi_dl(line, data)
# data_o = data
print('balancing data')
# print(data_o)
print('data shape:', np.array(data_o).shape)
fft_df = pd.DataFrame(data_o, columns=['c' + str(i) for i in range(802)])
fft_df['y'] = fft_df.apply(lambda row: row.c800 + 2 * row.c801, axis=1)
fft_df = fft_df.loc[fft_df['y'] != 3].reset_index(drop=True)
m = min(fft_df.y.value_counts()) # grab the count of the least common y value (left, right, or none)
y_vals = fft_df.y.unique()
print('got min={}, unique={}'.format(m, y_vals))
randomized_df = fft_df.sample(frac=1).reset_index(drop=True)
out = np.zeros((m*3, 803))
for i, y in enumerate(y_vals):
arr = randomized_df.loc[randomized_df['y'] == y].head(m).to_numpy()
out[i*m:i*m + m] = arr
print('consolidated data')
randomized_df = pd.DataFrame(out)
randomized_df = randomized_df.sample(frac=1).reset_index(drop=True)
print('reordered data')
Y = randomized_df[[800, 801]].to_numpy()
del randomized_df[800], randomized_df[801], randomized_df[802]
X = randomized_df.to_numpy()
print('created X and Y. X.shape={}, Y.shape={}'.format(X.shape, Y.shape))
# y =
# one hot encoding for Y values
# Y_i = list(Y_o)
Y_i = [[0], [1], [2], [3]] + [[2*Y[i][-2] + Y[i][-1]] for i in range(len(Y))]
enc = OneHotEncoder()
print('fitting one hot encoder')
enc.fit(Y_i)
# X = X_o
Y = enc.transform(Y_i).toarray()[4:]
if len(X) == 0 or len(Y) == 0:
print('no training data provided')
return
def train_test_split(X, Y, test_size):
stop_idx = int(len(Y) * test_size)
return X[:stop_idx], X[stop_idx:], Y[:stop_idx], Y[stop_idx:]
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3)
# if classifier == 'KNN':
# self.clf = models.KNN(n_neighbors=3, **kwargs)
# elif classifier == "LDA":
# self.clf = models.LDA()
# elif classifier == "SVM":
# self.clf = models.SVM(**kwargs)
# elif classifier == "ANN":
# self.clf = models.ANN(**kwargs)
# elif classifier == "RNN":
# self.clf = models.RNN(**kwargs)
# elif classifier == "CNN":
# self.clf = models.CNN(**kwargs)
# else:
# print('no valid classifier provided ({}). Using KNN'.format(classifier))
# self.clf = models.KNN(n_neighbors=3)
print('training model ({} classifier)...'.format(self.clf))
self.clf.fit(X_train, Y_train)
print('analysing model...')
preds = self.clf.predict(X_test)
acc = accuracy_score(Y_test, preds)
print('combined acc:', acc)
self.acc = round(acc, 4)
print('combined acc:', self.acc)
print()
print('model complete.')
def build_model(self, classifier, **kwargs):
thread = threading.Thread(target=self._build_model, args=(classifier, ), kwargs=kwargs)
thread.start()
return thread
def _build_model(self, classifier, **kwargs):
if classifier == 'KNN':
self.clf = models.KNN(n_neighbors=3, **kwargs)
elif classifier == "LDA":
self.clf = models.LDA()
elif classifier == "SVM":
self.clf = models.SVM(**kwargs)
elif classifier == "ANN":
self.clf = models.ANN(**kwargs)
elif classifier == "RNN":
self.clf = models.RNN(**kwargs)
elif classifier == "CNN":
self.clf = models.CNN2(transfer=True, **kwargs)
else:
print(f'no valid classifier provided ({classifier}). Using KNN')
self.clf = models.KNN(n_neighbors=3)
def save_training(self):
suffix = '_' + datetime.today().strftime('%d%m%y_%H%M%S') + '.npy'
print('saving eeg data:', np.array(self.eeg_dataset).shape)
eeg_file = './users/data/mi_' + self.user_id + suffix
np.save(eeg_file, self.eeg_dataset)
print('saving filtered eeg data:', np.array(self.filtered).shape)
filt_eeg_file = './users/data/fmi_' + self.user_id + suffix
np.save(filt_eeg_file, self.filtered)
print('saving filtered fft data:', np.array(self.fft).shape)
fft_eeg_file = './users/data/fft_' + self.user_id + suffix
np.save(fft_eeg_file, self.fft)
def test(self, send_to=None):
thread = threading.Thread(target=self.__test, args=(send_to, ))
thread.start()
return thread
def __test(self, send_to=None):
assert self.clf
self.running = True
self.eeg_dataset = []
self.filtered = []
self.fft = []
last_preds = []
def flatten(t):
return [item for sublist in t for item in sublist]
def get_fmi_dl(index, data, length=100):
np_fmi = np.array(data[index:index + length])
x = flatten(np_fmi[:, :-3].tolist())
return [x]
while self.running:
self.eeg_sample()
if len(self.filtered) > self.data_length:
pred = self.clf.predict(get_fmi_dl(-101, self.filtered))
# if pred[0][2]:
# last_preds += [1]
# elif pred[0][1]:
# last_preds += [-1]
# else:
# last_preds += [0]
# if len(last_preds) >= 25:
# last_preds = last_preds[1:]
# avg = sum(last_preds) / len(last_preds)
# left = avg < -0.25
# right = avg > 0.25
left = pred[0][0] or pred[0][2]
right = pred[0][1] or pred[0][2]
if send_to:
send_to((left, right))
elif send_to:
send_to((0, 0))
def close(self):
print('closing eeg and keyboard streams')
if hasattr(self, 'eeg_inlet'):
self.eeg_inlet.close_stream()
if hasattr(self, 'keyboard_inlet'):
self.keyboard_inlet.close_stream()
def main(user_id, train_time=30, test_time=30, classifier='CNN', model=''):
import motor_bci_game
while len(user_id) != 2:
user_id = str(int(input('please input the user ID provided by the project investigator (Cameron)')))
if len(user_id) == 2:
print('user_id={}'.format(user_id))
break
print('user ID must be 2 digits, you put', len(user_id))
game = motor_bci_game.Game()
eeg = EEG(user_id, game)
gathering = eeg.gather_data() # runs in background
eeg.build_model(classifier=classifier, model=model)#, model_location="cnn_model_8_11_22_32") # runs in background
game.run_keyboard(run_time=train_time) # runs in foreground
eeg.running = False
while gathering.is_alive(): pass
print(game.e.scores)
game.e.scores = [0]
training = eeg.train(classifier=classifier, include_historical=False)#, model_location='cnn_model_8_11_22_32') #, decision_function_shape="ovo")
while training.is_alive(): pass
eeg.running = False # stop eeg gathering once game completes
time.sleep(5)
print('testing')
testing = eeg.test(send_to=game.p1.handle_keys)
game.run_eeg(test_time)
eeg.running = False
while testing.is_alive():
pass
eeg.close()
print('scores:', game.e.scores)
print('acc:', eeg.acc)
game.quit()
sys.exit()
def main_game_2(user_id, train_time=30, test_time=30, classifier='CNN'):
import game_2
while len(user_id) != 2:
user_id = str(int(input('please input the user ID provided by the project investigator (Cameron)')))
if len(user_id) == 2:
print('user_id={}'.format(user_id))
break
print('user ID must be 2 digits, you put', len(user_id))
game = game_2.Game()
eeg = EEG(user_id, game)
gathering = eeg.gather_data() # runs in background
eeg.build_model(classifier=classifier)#, model_location="cnn_model_8_11_22_32") # runs in background
game.run_keyboard(run_time=train_time) # runs in foreground
eeg.running = False
while gathering.is_alive(): pass
training = eeg.train(classifier=classifier, include_historical=False)#, model_location='cnn_model_8_11_22_32') #, decision_function_shape="ovo")
while training.is_alive(): pass
eeg.running = False # stop eeg gathering once game completes
print('scores:', game.block.scores)
game.block.scores = [0]
# time.sleep(5)
print('testing')
testing = eeg.test(send_to=game.block.handle_keys)
game.run_eeg(test_time)
eeg.running = False
while testing.is_alive():
pass
eeg.close()
print('scores:', game.block.scores)
total = sum(game.block.scores) + len(game.block.scores) - 1
print('total blocks:', total)
print('percent caught:', sum(game.block.scores) / total)
game.quit()
sys.exit()
def train_test(user_id):
import motor_bci_game
# while len(user_id) != 2:
# user_id = str(int(input('please input the user ID provided by the project investigator (Cameron)')))
# if len(user_id) == 2:
# print(f'{user_id=}')
# break
# print('user ID must be 2 digits, you put', len(user_id))
game = motor_bci_game.Game()
eeg = EEG(user_id, game, ignore_lsl=True)
training = eeg.train(classifier='CNN', include_historical=False, model='new_test') #, decision_function_shape="ovo")
while training.is_alive(): pass
eeg.close()
print('scores:', game.e.scores)
game.quit()
sys.exit()
def convert_mi_to_fft(user_id):
import motor_bci_game
# user_id = '00'# + str(i)
print('user_id={}'.format(user_id))
game = motor_bci_game.Game()
eeg = EEG(user_id, game, ignore_lsl=True)
eeg.mi_to_fft()
eeg.close()
game.quit()
sys.exit()
if __name__ == '__main__':
user_id = '-5' # -9 is cameron post-training recordings, -8 is same for kevin
mode = 2
if mode == 1:
good = np.load('users/data/fmi_01_300921_211231.npy')
print(pd.DataFrame(good))
elif mode == 2:
main(user_id=user_id,
train_time=30,
test_time=30,
model='models/p00_models/cnn_model_2_200',
classifier='LDA'
)
elif mode == 3:
convert_mi_to_fft(user_id)
elif mode == 4:
train_test(user_id)
elif mode == 5:
main_game_2(user_id=user_id,
train_time=30,
test_time=30)
print('done?')
| 2.578125 | 3 |
at_seq.py | AlejandraWilson/learning_python | 0 | 12766031 | <filename>at_seq.py<gh_stars>0
#!/usr/bin/env python3
import random
#random.seed(1) # comment-out this line to change sequence each time
# Write a program that stores random DNA sequence in a string
# The sequence should be 30 nt long
# On average, the sequence should be 60% AT
# Calculate the actual AT fraction while generating the sequence
# Report the length, AT fraction, and sequence
length = 30
#print(length)
seq = ''
at = 0.6
count = 0
for i in range(length):
r = random.random()
#NOT random.randint because then you'll either get 0 or 1
if r < 0.6:
n = random.randint(2,3)
#if r == 2: print('A', end='')
if n ==2: seq += 'A'
#else: print('T', end='')
else: seq += 'T'
else:
n = random.randint(4,5)
#if r == 4: print('C', end='')
if n == 4: seq += 'C'
#else: print('G', end='')
else: seq += 'G'
print(seq)
for i in seq:
if i == 'A' or i == 'T':
count += 1
#if i == 'T':
#count += 1
fraction = float(count/length)
print('%d %.3f %s' % ((len(seq), fraction, seq)))
"""
30 0.6666666666666666 ATTACCGTAATCTACTATTAAGTCACAACC
"""
| 4.0625 | 4 |
mongomock/connection.py | thedrow/mongomock | 0 | 12766032 | import itertools
from .database import Database
class Connection(object):
_CONNECTION_ID = itertools.count()
def __init__(self, host = None, port = None, max_pool_size = 10,
network_timeout = None, document_class = dict,
tz_aware = False, _connect = True, **kwargs):
super(Connection, self).__init__()
self.host = host
self.port = port
self._databases = {}
self._id = next(self._CONNECTION_ID)
self.document_class = document_class
def __getitem__(self, db_name):
db = self._databases.get(db_name, None)
if db is None:
db = self._databases[db_name] = Database(self, db_name)
return db
def __getattr__(self, attr):
return self[attr]
def __repr__(self):
identifier = []
host = getattr(self,'host','')
port = getattr(self,'port',None)
if host is not None:
identifier = ["'{0}'".format(host)]
if port is not None:
identifier.append(str(port))
return "mongomock.Connection({0})".format(', '.join(identifier))
def server_info(self):
return {
"version" : "2.0.6",
"sysInfo" : "Mock",
"versionArray" : [
2,
0,
6,
0
],
"bits" : 64,
"debug" : False,
"maxBsonObjectSize" : 16777216,
"ok" : 1
}
def database_names(self):
return list(self._databases.keys())
#Connection is now depricated, it's called MongoClient instead
class MongoClient(Connection):
def stub(self):
pass
| 2.359375 | 2 |
tests/brusselator/test_brusselator.py | masumis/pymgrit | 6 | 12766033 | """
Tests brusselator
"""
import numpy as np
from pymgrit.brusselator.brusselator import Brusselator
from pymgrit.brusselator.brusselator import VectorBrusselator
def test_brusselator_constructor():
"""
Test constructor
"""
brusselator = Brusselator(t_start=0, t_stop=1, nt=11)
np.testing.assert_equal(brusselator.a, 1)
np.testing.assert_equal(brusselator.b, 3)
np.testing.assert_equal(True, isinstance(brusselator.vector_template, VectorBrusselator))
np.testing.assert_equal(True, isinstance(brusselator.vector_t_start, VectorBrusselator))
np.testing.assert_equal(brusselator.vector_t_start.get_values(), np.array([0, 1]))
def test_brusselator_step():
"""
Test step()
"""
brusselator = Brusselator(t_start=0, t_stop=1, nt=11)
brusselator_res = brusselator.step(u_start=VectorBrusselator(), t_start=0, t_stop=0.1)
np.testing.assert_almost_equal(brusselator_res.get_values(), np.array([0.08240173, 0.01319825]))
def test_vector_brusselator_constructor():
"""
Test constructor
"""
vector_brusselator = VectorBrusselator()
np.testing.assert_equal(vector_brusselator.value[0], 0)
np.testing.assert_equal(vector_brusselator.value[1], 0)
def test_vector_brusselator_add():
"""
Test __add__
"""
vector_brusselator_1 = VectorBrusselator()
vector_brusselator_1.value = np.ones(2)
vector_brusselator_2 = VectorBrusselator()
vector_brusselator_2.value = 2 * np.ones(2)
vector_brusselator_res = vector_brusselator_1 + vector_brusselator_2
np.testing.assert_equal(vector_brusselator_res.value, 3 * np.ones(2))
vector_brusselator_res += vector_brusselator_1
np.testing.assert_equal(vector_brusselator_res.value, 4 * np.ones(2))
def test_vector_brusselator_sub():
"""
Test __sub__
"""
vector_brusselator_1 = VectorBrusselator()
vector_brusselator_1.value = np.ones(2)
vector_brusselator_2 = VectorBrusselator()
vector_brusselator_2.value = 2 * np.ones(2)
vector_brusselator_res = vector_brusselator_2 - vector_brusselator_1
np.testing.assert_equal(vector_brusselator_res.value, np.ones(2))
vector_brusselator_res -= vector_brusselator_2
np.testing.assert_equal(vector_brusselator_res.value, -np.ones(2))
def test_vector_brusselator_mul():
"""
Test __mul__
"""
vector_brusselator_1 = VectorBrusselator()
vector_brusselator_1.value = np.ones(2)
vector_brusselator_res = vector_brusselator_1 * 2
np.testing.assert_equal(vector_brusselator_res.value, np.ones(2)*2)
vector_brusselator_res = 3 * vector_brusselator_1
np.testing.assert_equal(vector_brusselator_res.value, np.ones(2)*3)
vector_brusselator_res *= 2
np.testing.assert_equal(vector_brusselator_res.value, np.ones(2)*6)
def test_vector_brusselator_norm():
"""
Test norm()
"""
vector_brusselator = VectorBrusselator()
vector_brusselator.value = np.array([1, 2])
np.testing.assert_equal(np.linalg.norm(np.array([1, 2])), vector_brusselator.norm())
def test_vector_brusselator_clone_zero():
"""
Test clone_zero()
"""
vector_brusselator = VectorBrusselator()
vector_brusselator_clone = vector_brusselator.clone_zero()
np.testing.assert_equal(True, isinstance(vector_brusselator_clone, VectorBrusselator))
np.testing.assert_equal(vector_brusselator_clone.value, np.zeros(2))
def test_vector_brusselator_clone_rand():
"""
Test clone_rand()
"""
vector_brusselator = VectorBrusselator()
vector_brusselator_clone = vector_brusselator.clone_rand()
np.testing.assert_equal(True, isinstance(vector_brusselator_clone, VectorBrusselator))
def test_vector_brusselator_set_values():
"""
Test the set_values()
"""
vector_brusselator = VectorBrusselator()
vector_brusselator.set_values(np.array([1, 2]))
np.testing.assert_equal(vector_brusselator.value, np.array([1, 2]))
def test_vector_brusselator_get_values():
"""
Test get_values()
"""
vector_brusselator = VectorBrusselator()
np.testing.assert_equal(vector_brusselator.get_values(), np.zeros(2))
def test_vector_brusselator_plot_solution():
"""
Test get_values()
"""
vector_brusselator = VectorBrusselator()
np.testing.assert_equal(vector_brusselator.plot_solution(), None)
| 2.765625 | 3 |
ref/signature.py | bbbbbr/porklike.gb | 14 | 12766034 | # Carve
# sigs = [255,214,124,179,233]
# msks = [0,9,3,12,6]
# Door
# sigs = [192,48]
# msks = [15,15]
# Freestanding
# sigs=[0,0,0,0,16,64,32,128,161,104,84,146]
# msks=[8,4,2,1,6,12,9,3,10,5,10,5]
# Walls
sigs=[251,233,253,84,146,80,16,144,112,208,241,248,210,177,225,120,179,0,124,104,161,64,240,128,224,176,242,244,116,232,178,212,247,214,254,192,48,96,32,160,245,250,243,249,246,252]
msks=[0,6,0,11,13,11,15,13,3,9,0,0,9,12,6,3,12,15,3,7,14,15,0,15,6,12,0,0,3,6,12,9,0,9,0,15,15,7,15,14,0,0,0,0,0,0]
l = len(sigs)
rows = [[0, 5, 3], [7, 8, 6], [1, 4, 2]]
for i in range(len(sigs)):
s = f'{sigs[i]}/{msks[i]}'
s += ' ' * (8 - len(s))
print(s, end='')
print('\n' + '-----' * l + '---' * (l-1))
for row in rows:
s = ''
for i in range(len(sigs)):
for col in row:
sig = sigs[i]
msk = msks[i]
if msk & (1<<col):
s += '* '
elif sig & (1<<col):
s += '1 '
else:
s += '0 '
s += ' '
print(s)
for i in range(len(sigs)):
sigs[i] |= msks[i]
print(sigs)
| 2.390625 | 2 |
scfmsp/controlflowanalysis/instructions/InstructionCmp.py | sepidehpouyan/SCF-MSP430 | 1 | 12766035 | from scfmsp.controlflowanalysis.instructions.AbstractInstructionTwoRegisters import AbstractInstructionTwoRegisters
class InstructionCmp(AbstractInstructionTwoRegisters):
name = 'cmp'
def get_execution_time(self):
return self.clock
def execute_judgment(self, ac):
self._execute_judgment_carry(ac)
self._execute_judgment_zero(ac)
self._execute_judgment_negative(ac)
self._execute_judgment_overflow(ac) | 2.1875 | 2 |
stock_image_clf/somemodels.py | ernest222/- | 10 | 12766036 | from keras import layers
from keras import models
def cnn_model(shape=(80,80,3),dropout=0.5,last_activation='softmax'):
model=models.Sequential()
model.add(layers.Conv2D(64,(3,3),activation='relu',input_shape=shape))
model.add(layers.MaxPool2D((2,2)))
model.add(layers.Conv2D(64,(3,3),activation='relu'))
model.add(layers.MaxPool2D((2,2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPool2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dropout(dropout))
model.add(layers.Dense(128,activation='relu'))
model.add(layers.Dense(4,activation=last_activation))
model.summary()
return 'cnn', model
def dense_model(shape=(80*80,),last_activation='softmax'):
model = models.Sequential()
model.add(layers.Dense(128, activation='relu',input_shape=shape))
model.add(layers.Dense(4, activation=last_activation))
model.summary()
return 'simple_dense',model
def inception_model():
pass
| 3.296875 | 3 |
python/213_2.py | kylekanos/project-euler-1 | 0 | 12766037 | <filename>python/213_2.py
#!/usr/bin/env python
from gmpy import mpf
off = [(0,1), (1,0), (0,-1), (-1,0)]
paths = [ [[0]*900 for j in xrange(900)] for i in xrange(2) ]
for i in xrange(900):
paths[0][i][i] = 1
for r in xrange(50):
print r
c = (r+1)%2
p = r%2
for i in xrange(900):
y, x = i//30, i%30
adj = []
for o in off:
xo, yo = x+o[0], y+o[1]
if xo<0 or xo>=30 or yo<0 or yo>=30: continue
k = yo*30 + xo
adj.append(k)
for j in xrange(i+1):
paths[c][j][i] = 0
for a in adj: paths[c][i][j] += paths[p][max(j,a)][min(j,a)]
total_paths = [0]*900 # total paths of length 50 starting from pos i
for i in xrange(900):
total_paths[i] = sum(paths[0][max(i,j)][min(i,j)] for j in xrange(900))
ans = 0.0
for i in xrange(900):
e = 1.0
for j in xrange(900):
p = float(total_paths[j] - paths[0][max(i,j)][min(i,j)])/total_paths[j]
e *= p
ans += e
print ans
| 2.25 | 2 |
main.py | mystic-wizord/apex-framework | 0 | 12766038 | <filename>main.py
import pathlib
import os
import pwd
import click
import chevron
import json
import yaml
@click.group()
def cli():
"""
Thank you for using Apex Framework!
"""
pass
@cli.command()
def init():
click.echo(f"Initializing Apex!")
pathlib.Path(f'./apex-framework').mkdir(parents=True, exist_ok=True)
if pathlib.Path('/apex-framework/settings.json').is_file():
click.echo("It seems that apex has already been initialized!")
else:
work_dir = click.prompt("Please state where you would like your main working directory to be")
if not work_dir or not work_dir.strip() or len(work_dir) <= 0:
work_dir = '..'
with open('./mustache/settings.mustache') as mustache:
settings_mustache = chevron.render(mustache, { 'work_dir': work_dir } )
settings_file = open('./apex-framework/settings.json', 'x')
settings_file.write(settings_mustache)
settings_file.close()
os.system(f'sudo mv "./apex-framework" "/home/{get_username()}/apex-framework" -i')
@cli.command()
@click.argument('name')
def create(name: str = None):
if name and name.strip() and len(name) > 0:
click.echo(f"Creating new application: {name}!")
else:
name = click.prompt("Please give your project a name")
click.echo(f"Creating new application: {name}!")
# This hasn't been implemented yet :(
database_required = click.prompt("Do you have a database instance? [Y/N]") == 'Y'
click.echo(f"Database setup needed? {database_required}")
with open('./mustache/project-data-file.mustache') as data_file:
if database_required:
mongodb_uri = click.prompt("Please provide your MongoDB URI string")
rendered_mustache = chevron.render(data_file, { 'name': name, 'db_required': database_required, 'mongodb_uri': mongodb_uri } )
click.echo(rendered_mustache)
else:
open_api_dir = click.prompt("Please provide the location of your OpenApi files")
rendered_mustache = chevron.render(data_file, { 'name': name, 'db_required': database_required, 'open_api_dir': open_api_dir } )
click.echo(rendered_mustache)
working_dir = get_working_dir()
pathlib.Path(f'{working_dir}/{name}/api-definitions').mkdir(parents=True, exist_ok=True)
app_definition_file = open(f'{working_dir}/{name}/application-manifest.json', 'x')
app_definition_file.write(rendered_mustache)
app_definition_file.close()
@cli.command()
@click.argument('name')
def build(name: str = None):
click.echo(f"Building {name}!")
@cli.command()
@click.argument('name')
def purge(name: str = None):
if name and name.strip() and len(name) > 0:
click.echo(f"Purging {name}!")
else:
name = click.prompt("Please enter the name of the application to purge")
click.echo(f"Purging {name}!")
working_dir = get_working_dir()
pathlib.Path(f'{working_dir}/{name}').rmdir()
def get_working_dir():
settings = open(f'/home/{get_username()}/apex-framework/settings.json',)
# returns JSON object as a dictionary
data = json.load(settings)
return data['working-dir']
def get_username():
return pwd.getpwuid(os.getuid())[0]
if __name__ == "__main__":
cli()
# Commands for setup:
# - virtualenv venv
# - . venv/bin/activate | 2.765625 | 3 |
adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/examples/circuitplayground_tapdetect.py | jacoblb64/pico_rgb_keypad_hid | 75 | 12766039 | <filename>adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/examples/circuitplayground_tapdetect.py
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
"""This example prints to the serial console when the board is double-tapped."""
import time
from adafruit_circuitplayground import cp
# Change to 1 for single-tap detection.
cp.detect_taps = 2
while True:
if cp.tapped:
print("Tapped!")
time.sleep(0.05)
| 2.671875 | 3 |
photos/serializers.py | christianwgd/photos | 0 | 12766040 | <filename>photos/serializers.py
# -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from rest_framework import serializers
from .models import Photo, Event, Import, Tag
class EventSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Event
fields = ['url', 'id', 'name']
class TagSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Tag
fields = ['url', 'id', 'name', ]
class ImportSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Import
fields = ['url', 'id', 'name', 'timestamp']
class PhotoSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Photo
fields = [
'url', 'id', 'name', 'timestamp', 'uploaded',
'uploaded_by', 'address', 'event', 'upload',
'tags', 'imagefile', 'thumb'
]
class PhotoEXIFSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Photo
fields = ['url', 'id', 'exif']
class UserSerializer(serializers.HyperlinkedModelSerializer):
full_name = serializers.SerializerMethodField('get_fullname')
@staticmethod
def get_fullname(obj):
return obj.first_name + ' ' + obj.last_name
class Meta:
model = User
fields = ('id', 'url', 'username', 'email', 'full_name')
| 2.09375 | 2 |
govee_api/device_factory.py | filipealvesdef/govee_api | 15 | 12766041 | import govee_api.device as dev
import abc
class _AbstractGoveeDeviceFactory(abc.ABC):
""" Declare an interface for operations that create abstract Govee devices """
@abc.abstractmethod
def build(self, govee, identifier, topic, sku, name, connected):
""" Build Govee device """
pass
class _GoveeBulbFactory(_AbstractGoveeDeviceFactory):
""" Implement the operations to build Govee bulb devices """
def build(self, govee, identifier, topic, sku, name, connected):
if sku == 'H6085':
return dev.GoveeWhiteBulb(govee, identifier, topic, sku, name, connected)
else:
return dev.GoveeBulb(govee, identifier, topic, sku, name, connected)
class _GoveeLedStripFactory(_AbstractGoveeDeviceFactory):
""" Implement the operations to build Govee LED strip devices """
def build(self, govee, identifier, topic, sku, name, connected):
return dev.GoveeLedStrip(govee, identifier, topic, sku, name, connected)
#class _GoveeStringLightFactory(_AbstractGoveeDeviceFactory):
# """ Implement the operations to build Govee string light devices """
# def build(self, govee, identifier, topic, sku, name, connected):
# if sku == 'H7022':
# return dev.H7022GoveeStringLight(govee, identifier, topic, sku, name, connected)
# return None | 3.046875 | 3 |
test/test_cli.py | pratyakshajha/pytest-check-links | 7 | 12766042 | import subprocess
import pytest
def test_cli_meta():
assert subprocess.call(["pytest-check-links", "--version"]) == 0
assert subprocess.call(["pytest-check-links", "--help"]) == 0
@pytest.mark.parametrize("example,rc,expected,unexpected", [
["httpbin.md", 0, [" 6 passed"], [" failed"]],
["rst.rst", 1, [" 2 failed", " 7 passed"], [" warning"]]
])
def test_cli_pass(testdir, example, rc, expected, unexpected):
testdir.copy_example(example)
testdir.copy_example("setup.cfg")
proc = subprocess.Popen(["pytest-check-links"], stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
summary = stdout.decode('utf-8').strip().splitlines()[-1]
assert rc == proc.returncode
for ex in expected:
assert ex in summary, stdout.decode('utf-8')
for unex in unexpected:
assert unex not in summary, stdout.decode('utf-8')
| 2.21875 | 2 |
src/torforce/activations.py | leaprovenzano/torforce | 0 | 12766043 | <reponame>leaprovenzano/torforce
import torch
from torch import nn
class SoftPlusOne(nn.Softplus):
"""this is simply softplus(x) + 1
Example:
>>> import torch
>>> from torforce.activations import SoftPlusOne
>>>
>>> act = SoftPlusOne()
>>> act(torch.tensor(.5))
tensor(1.9741)
"""
def forward(self, x: torch.Tensor) -> torch.Tensor:
return super().forward(x) + 1
_registry = {
'elu': nn.ELU,
'hardshrink': nn.Hardshrink,
'hardtanh': nn.Hardtanh,
'leakyrelu': nn.LeakyReLU,
'logsigmoid': nn.LogSigmoid,
'prelu': nn.PReLU,
'relu': nn.ReLU,
'relu6': nn.ReLU6,
'rrelu': nn.RReLU,
'selu': nn.SELU,
'celu': nn.CELU,
'gelu': nn.GELU,
'sigmoid': nn.Sigmoid,
'softplus': nn.Softplus,
'softshrink': nn.Softshrink,
'softsign': nn.Softsign,
'tanh': nn.Tanh,
'tanhshrink': nn.Tanhshrink,
'softplus1': SoftPlusOne,
'softplusone': SoftPlusOne,
}
def get_named_activation(name: str) -> nn.Module:
"""get an instance of an activation module by it's name.
Note:
if the activation allows init params it will only ever be initilized
with the default values.
Example:
>>> from torforce.activations import get_named_activation
>>>
>>> get_named_activation('relu')
ReLU()
"""
try:
return _registry[name.lower()]()
except KeyError:
raise ValueError(f'unknown activation {name}: activation could not be found in registry')
| 2.703125 | 3 |
conf.py | DawudH/ImageColorizer | 0 | 12766044 | # -*- coding: utf-8 -*-
"""
authors: <NAME>, <NAME>, <NAME> and <NAME>
written for the NN course IN4015 of the TUDelft
"""
##### SETTINGS: #####
# Number of epochs to train the network over
n_epoch = 0
# Folder where the training superbatches are stored
training_folder= 'selection'
# Folder where the validation superbatches are stored
validation_folder= 'selection'
# The colorspace to run the NN in
colorspace= 'CIELab'
# Parameter folder where the parameter files are stored
param_folder = 'params_final_final'
# Parameter file to initialize the network with (do not add .npy), None for no file
param_file = 'params_fruit_Compact_more_end_fmaps_dilation_k10_T02_sigma5_nbins20_labda_03_gridsize10_epoch_25.0'
# Parameter file to save the trained parameters to every epoch (do not add .npy), None for no file
param_save_file = 'params_fruit_Compact_more_end_fmaps_dilation_k10_T02_sigma5_nbins20_labda_03_gridsize10_epoch_25.0'
# error folder where the error files are stored
error_folder = 'errors_final'
# Error file to append with the new training and validation errors (do not add .npy), None dont save
error_file = 'errors_fruit_Compact_more_end_fmaps_classifier_k10_T02_sigma5_nbins20_labda_03_gridsize10_epoch22'
# The architecture to use, can be 'Dahl' or 'Compact' or 'Compact_more_end_fmaps' or 'Dahl_classifier' or 'Dahl_Zhang' or 'Dahl_Zhang_NO_VGG16' or 'Compact_more_end_fmaps_classifier'
architecture= 'Compact_more_end_fmaps_dilation'
# Blur radius
sigma = 3;
# Turn on classification
classification=True
# Colorbin settings:
colorbins_k = 10 # k nearers neughbours
colorbins_T = 0.4 # Temperature
colorbins_sigma = 5 # K nearest neighbour sigma (blur the distance to the bin)
colorbins_nbins = 20 # Number of colorbins
colorbins_labda = 0.5 # Uniform distributie mix factor
colorbins_gridsize=10
| 1.953125 | 2 |
mixEM/examples/demo_multinormal.py | mjafin/LongQC | 61 | 12766045 | #!/usr/bin/env python
import numpy as np
import mixem
from mixem.distribution import MultivariateNormalDistribution
def generate_data():
dist_params = [
(np.array([4]), np.diag([1])),
(np.array([1]), np.diag([0.5]))
]
weights = [0.3, 0.7]
n_data = 5000
data = np.zeros((n_data, 1))
for i in range(n_data):
dpi = np.random.choice(range(len(dist_params)), p=weights)
dp = dist_params[dpi]
data[i] = np.random.multivariate_normal(dp[0], dp[1])
return data
def recover(data):
mu = np.mean(data)
sigma = np.var(data)
init_params = [
(np.array([mu + 0.1]), np.diag([sigma])),
(np.array([mu - 0.1]), np.diag([sigma]))
]
weight, distributions, ll = mixem.em(data, [MultivariateNormalDistribution(mu, sigma) for mu, sigma in init_params])
print(weight, distributions, ll)
if __name__ == '__main__':
data = generate_data()
recover(data)
| 2.46875 | 2 |
engineer/unittests/__init__.py | pridkett/engineer | 1 | 12766046 | <filename>engineer/unittests/__init__.py
# coding=utf-8
from tempfile import mkdtemp
from unittest.case import TestCase
from path import path
from engineer.log import bootstrap
__author__ = '<NAME> <<EMAIL>>'
class SettingsTestCase(TestCase):
settings_file = None
def __init__(self, *args, **kwargs):
from engineer.conf import EngineerConfiguration
super(SettingsTestCase, self).__init__(*args, **kwargs)
self._source_settings_file = self.settings_file
EngineerConfiguration(self._source_settings_file)
def tearDown(self):
from engineer.conf import EngineerConfiguration
EngineerConfiguration(self._source_settings_file)
class CopyDataTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.tmp_dirs = []
def __init__(self, *args, **kwargs):
bootstrap()
super(CopyDataTestCase, self).__init__(*args, **kwargs)
self.copied_data_path = None
@property
def source_path(self):
return self._source_path
@source_path.setter
def source_path(self, value):
if self.copied_data_path is not None:
self.copied_data_path.rmtree()
temp = mkdtemp()
self.copied_data_path = (path(temp) / '__in_progress_test_data').abspath()
self._source_path = value
self.source_path.copytree(self.copied_data_path)
print "Copied temp test data to: %s" % self.copied_data_path
def tearDown(self):
print "Marking temp folder for deletion: %s" % self.copied_data_path.dirname()
self.tmp_dirs.append(self.copied_data_path.dirname())
@classmethod
def tearDownClass(cls):
print "Teardownclass running... %s" % cls.tmp_dirs
for dir in cls.tmp_dirs:
print "Deleting temp folder: %s" % dir
dir.rmtree(ignore_errors=True)
del cls.tmp_dirs
@staticmethod
def removal_error(func, path, exc_info):
pass
| 2.515625 | 3 |
app/models.py | agailloty/api | 0 | 12766047 | from sqlalchemy.sql.expression import null, text
from sqlalchemy.sql.sqltypes import TIMESTAMP
from .database import Base
from sqlalchemy import Column, Integer, String, Boolean
class Post(Base):
__tablename__ = "user_posts"
id = Column(Integer, primary_key=True, nullable=False)
title = Column(String, nullable=False)
content = Column(String, nullable=False)
published = Column(Boolean, nullable=True, server_default="TRUE")
created_at = Column(TIMESTAMP(timezone=True),
nullable=False, server_default=text("now()"))
| 2.703125 | 3 |
pymaginopolis/chunkyfile/common.py | benstone/pymaginopolis | 9 | 12766048 | import struct
from pymaginopolis.chunkyfile import model as model
from pymaginopolis.chunkyfile.model import Endianness, CharacterSet
GRPB_HEADER_SIZE = 20
CHARACTER_SETS = {
model.CharacterSet.ANSI: "latin1",
model.CharacterSet.UTF16LE: "utf-16le"
}
def get_string_size_format(characterset):
# FUTURE: big endian
if characterset == model.CharacterSet.UTF16BE or characterset == model.CharacterSet.UTF16LE:
return "H", 2, 2
else:
return "B", 1, 1
def parse_pascal_string_with_encoding(data):
"""
Read a character set followed by a pascal string
:param data:
:return: tuple containing string, number of bytes consumed and characterset
"""
# Read character set
character_set = struct.unpack("<H", data[0:2])[0]
character_set = model.CharacterSet(character_set)
chunk_name, string_size = parse_pascal_string(character_set, data[2:])
return chunk_name, string_size + 2, character_set
def parse_pascal_string(characterset, data):
"""
Read a Pascal string from a byte array using the given character set.
:param characterset: Character set to use to decode the string
:param data: binary data
:return: tuple containing string and number of bytes consumed
"""
string_size_format, string_size_size, character_size = get_string_size_format(characterset)
if len(data) < string_size_size:
raise FileParseException("String size truncated")
string_size = struct.unpack("<" + string_size_format, data[0:string_size_size])[0] * character_size
string_data = data[string_size_size:string_size_size + string_size]
result = string_data.decode(CHARACTER_SETS[characterset])
total_size = string_size_size + string_size
return result, total_size
def generate_pascal_string(characterset, value):
string_size_format, string_size_size, character_size = get_string_size_format(characterset)
encoded_string = value.encode(CHARACTER_SETS[characterset])
return struct.pack("<" + string_size_format, len(value)) + encoded_string
class FileParseException(Exception):
""" Raised if a problem is found with the chunky file. """
pass
def check_size(expected, actual, desc):
""" Raise an exception if this part of the file is truncated """
if actual < expected:
raise FileParseException("%s truncated: expected 0x%x, got 0x%x" % (desc, expected, actual))
def parse_u24le(data):
""" Parse a 24-bit little endian number """
return data[0] | (data[1] << 8) | (data[2] << 16)
def parse_endianness_and_characterset(data):
check_size(4, len(data), "Endianness/characterset")
endianness, characterset = struct.unpack("<2H", data)
endianness = model.Endianness(endianness)
characterset = model.CharacterSet(characterset)
return endianness, characterset,
def tag_bytes_to_string(tag):
"""
Convert the raw bytes for a tag into a string
:param tag: bytes (eg. b'\x50\x4d\x42\x4d')
:return: tag (eg. "MBMP")
"""
return tag[::-1].decode("ansi").rstrip("\x00")
def parse_grpb_list(data):
"""
Parse a GRPB chunk
:param data: GRPB chunk
:return: tuple containing endianness, characterset, index entry size, item index and item heap
"""
endianness, characterset, index_entry_size, number_of_entries, heap_size, unk1 = struct.unpack("<2H4I", data[
0:GRPB_HEADER_SIZE])
endianness = Endianness(endianness)
characterset = CharacterSet(characterset)
# TODO: figure out what this is
if unk1 != 0xFFFFFFFF:
raise NotImplementedError("can't parse this GRPB because unknown1 isn't 0xFFFFFFFF")
# Read heap
heap = data[GRPB_HEADER_SIZE:GRPB_HEADER_SIZE + heap_size]
# Read index
index_size = index_entry_size * number_of_entries
index_data = data[GRPB_HEADER_SIZE + heap_size:GRPB_HEADER_SIZE + heap_size + index_size]
index_items = [index_data[i * index_entry_size:(i + 1) * index_entry_size] for i in range(0, number_of_entries)]
return endianness, characterset, index_entry_size, index_items, heap
| 2.84375 | 3 |
pyasn1_alt_modules/rfc6010.py | CBonnell/pyasn1-alt-modules | 2 | 12766049 | #
# This file is part of pyasn1-alt-modules software.
#
# Created by <NAME> with assistance from asn1ate v.0.6.0.
# Modified by <NAME> to add maps for use with opentypes.
# Modified by <NAME> to include the opentypemap manager.
#
# Copyright (c) 2019-2022, Vigil Security, LLC
# License: http://vigilsec.com/pyasn1-alt-modules-license.txt
#
# Certificate Extension for CMS Content Constraints (CCC)
#
# ASN.1 source from:
# https://www.rfc-editor.org/rfc/rfc6010.txt
#
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import univ
from pyasn1_alt_modules import rfc5280
from pyasn1_alt_modules import opentypemap
certificateExtensionsMap = opentypemap.get('certificateExtensionsMap')
MAX = float('inf')
AttributeType = rfc5280.AttributeType
AttributeValue = rfc5280.AttributeValue
id_ct_anyContentType = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.0')
class AttrConstraint(univ.Sequence):
pass
AttrConstraint.componentType = namedtype.NamedTypes(
namedtype.NamedType('attrType', AttributeType()),
namedtype.NamedType('attrValues', univ.SetOf(
componentType=AttributeValue()).subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
)
class AttrConstraintList(univ.SequenceOf):
pass
AttrConstraintList.componentType = AttrConstraint()
AttrConstraintList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
class ContentTypeGeneration(univ.Enumerated):
pass
ContentTypeGeneration.namedValues = namedval.NamedValues(
('canSource', 0),
('cannotSource', 1)
)
class ContentTypeConstraint(univ.Sequence):
pass
ContentTypeConstraint.componentType = namedtype.NamedTypes(
namedtype.NamedType('contentType', univ.ObjectIdentifier()),
namedtype.DefaultedNamedType('canSource', ContentTypeGeneration().subtype(value='canSource')),
namedtype.OptionalNamedType('attrConstraints', AttrConstraintList())
)
# CMS Content Constraints (CCC) Extension and Object Identifier
id_pe_cmsContentConstraints = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.18')
class CMSContentConstraints(univ.SequenceOf):
pass
CMSContentConstraints.componentType = ContentTypeConstraint()
CMSContentConstraints.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
# Map of Certificate Extension OIDs to Extensions
# To be added to the ones that are in rfc5280.py
_certificateExtensionsMapUpdate = {
id_pe_cmsContentConstraints: CMSContentConstraints(),
}
certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
| 1.828125 | 2 |
quasimodo/tests/test_gaussian_nb_with_missing_values.py | Aunsiels/CSK | 16 | 12766050 | import unittest
import numpy as np
from quasimodo.assertion_fusion.gaussian_nb_with_missing_values import GaussianNBWithMissingValues
class TestFilterObject(unittest.TestCase):
def test_gaussian2(self):
std = -0.1339048038303071
mean = -0.1339048038303071
x = 150.10086283379565
temp = self.gaussian_nb.get_gaussian(x, mean, std)
self.assertAlmostEqual(temp, 0)
def test_prior(self):
y = np.array([1] * 10 + [0] * 5)
self.gaussian_nb.set_unique_y(y)
self.gaussian_nb.set_prior(y)
prior = self.gaussian_nb.prior
self.assertAlmostEqual(prior[0], 0.33, places=2)
self.assertAlmostEqual(prior[1], 0.67, places=2)
def test_means_standard_deviations(self):
x = [[0, 0],
[0, 0],
[1, -1],
[1, 0],
[1, 0],
[2, 3]]
y = [0, 0, 0, 1, 1, 1]
x = np.array(x)
y = np.array(y)
self.gaussian_nb.fit(x, y)
means = self.gaussian_nb.means
standard_deviations = self.gaussian_nb.standard_deviations
self.assertAlmostEqual(means[0, 0], 0.33, places=2)
self.assertAlmostEqual(means[0, 1], -0.33, places=2)
self.assertAlmostEqual(means[1, 0], 1.33, places=2)
self.assertAlmostEqual(means[1, 1], 1, places=2)
self.assertAlmostEqual(standard_deviations[0, 0] ** 2, 0.33, places=2)
self.assertAlmostEqual(standard_deviations[0, 1] ** 2, 0.33, places=2)
self.assertAlmostEqual(standard_deviations[1, 0] ** 2, 0.33, places=2)
self.assertAlmostEqual(standard_deviations[1, 1] ** 2, 3, places=2)
def test_means_standard_deviations_with_nan(self):
self.gaussian_nb.fit(self.x, self.y)
means = self.gaussian_nb.means
standard_deviations = self.gaussian_nb.standard_deviations
self.assertAlmostEqual(means[0, 0], 0.33, places=2)
self.assertAlmostEqual(means[0, 1], -0.33, places=2)
self.assertAlmostEqual(means[1, 0], 1.33, places=2)
self.assertAlmostEqual(means[1, 1], 1, places=2)
self.assertAlmostEqual(standard_deviations[0, 0] ** 2, 0.33, places=2)
self.assertAlmostEqual(standard_deviations[0, 1] ** 2, 0.33, places=2)
self.assertAlmostEqual(standard_deviations[1, 0] ** 2, 0.33, places=2)
self.assertAlmostEqual(standard_deviations[1, 1] ** 2, 3, places=2)
def test_likelihoods(self):
self.gaussian_nb.fit(self.x, self.y)
x_in = np.array([1, 0])
likelihoods = self.gaussian_nb.get_all_likelihoods(x_in)
self.assertNotAlmostEqual(likelihoods[0], 0, places=2)
self.assertNotAlmostEqual(likelihoods[0], 1, places=2)
self.assertNotAlmostEqual(likelihoods[1], 0, places=2)
self.assertNotAlmostEqual(likelihoods[1], 1, places=2)
def setUp(self):
self.x = [[0, np.nan], # 0
[np.nan, 0], # 0
[0, 0], # 0
[1, -1], # 0
[1, np.nan], # 1
[np.nan, 0], # 1
[1, 0], # 1
[2, 3]] # 1
self.y = [0, 0, 0, 0, 1, 1, 1, 1]
self.y_uniq = [0, 1]
self.x = np.array(self.x)
self.y = np.array(self.y)
self.y_uniq = np.array(self.y_uniq)
self.gaussian_nb = GaussianNBWithMissingValues()
def test_predict_proba(self):
clf = GaussianNBWithMissingValues()
clf.fit(self.x, self.y)
x_in = np.array([[1, 0]])
proba = clf.predict_proba(x_in)
self.assertNotAlmostEqual(proba[0][0], 0, places=2)
self.assertNotAlmostEqual(proba[0][0], 1, places=2)
self.assertNotAlmostEqual(proba[0][1], 0, places=2)
self.assertNotAlmostEqual(proba[0][1], 1, places=2)
self.assertGreater(proba[0][1], proba[0][0])
def test_gaussian(self):
gaussian = self.gaussian_nb.get_gaussian(0.441, 1, 0.447213595)
self.assertAlmostEqual(gaussian, 0.40842, places=2)
if __name__ == '__main__':
unittest.main() | 2.53125 | 3 |