max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
lookout/migrations/0001_initial.py
|
rspeed/Django-HTTP-Reporting-API
| 5
|
12774651
|
<reponame>rspeed/Django-HTTP-Reporting-API
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name='Report',
fields=[
('created', models.DateTimeField(auto_now_add=True, primary_key=True, serialize=False)),
('type', models.CharField(help_text="The report's category.", max_length=120)),
('generated', models.DateTimeField(help_text='The time at which the report was generated.')),
('url', models.URLField(help_text='The address of the document or worker from which the report was generated.')),
('body', models.TextField(help_text='The contents of the report.'))
],
options={
'ordering': ['-created']
}
)
]
| 2.21875
| 2
|
pandoc-starter/MarkTex/marktex/rawrender/toRaw.py
|
riciche/SimpleCVReproduction
| 923
|
12774652
|
<gh_stars>100-1000
import os
from marktex.markast.utils import ImageTool,CleanTool
from marktex.markast.parser import Scanner
from marktex import config
from marktex.markast.document import Document
from marktex.markast.environment import *
from marktex.markast.line import *
from marktex.markast.token import *
from marktex.markast.xmls import *
class MarkRaw():
def __init__(self, doc: Document, input_dir, output_dir=None, texconfig=None, subdoc=False):
self.subdoc = subdoc
if texconfig is None:
texconfig = config
self.config = texconfig
self.input_dir = input_dir
if output_dir is None:
output_dir = "./"
image_dir = os.path.join(output_dir, "images")
self.output_dir = output_dir
self.image_dir = os.path.abspath(image_dir)
self.doc = doc
self.has_toc = False
self.contents = []
def append(self,item):
self.contents.append(item)
@staticmethod
def convert_file(fpath, output_dir=None):
'''
:param fpath:markdown文件的目录
:param image_dir: markdown中的网络图片和本地图片在转换中都会被统一哈希命名并输出到一个目录
默认是markdown文件所在的目录下的"./images"下
:return:
'''
fpre, _ = os.path.split(fpath)
if output_dir is None:
output_dir = fpre
os.makedirs(output_dir, exist_ok=True)
doc = Scanner.analyse_file(fpath)
input_dir, _ = os.path.split(fpath)
mark = MarkRaw(doc, input_dir=input_dir, output_dir=output_dir)
mark.convert()
return mark
def convert(self):
doc = self.doc
if doc.has_toc and not self.subdoc:
pass
for i, envi in enumerate(doc.content):
print(f"\rConverting...{i * 100 / len(doc.content):.3f}%.", end="\0", flush=True)
if isinstance(envi, Quote):
envi = self.fromQuote(envi)
elif isinstance(envi, Paragraph):
envi = self.fromParagraph(envi)
elif isinstance(envi, Itemize):
envi = self.fromItemize(envi)
elif isinstance(envi, Enumerate):
envi = self.fromEnumerate(envi)
elif isinstance(envi, Formula):
envi = self.fromFormula(envi)
elif isinstance(envi, Code):
envi = self.fromCode(envi)
elif isinstance(envi, Table):
envi = self.fromTable(envi)
elif isinstance(envi, MultiBox):
envi = self.fromMultiBox(envi)
else:
raise Exception(f"Doc error {envi},{envi.__class__.__name__}")
self.append(envi)
print(f"\rConverting...100%.")
def fromToken(self, s: Token):
return s.string
def fromBold(self, s: Bold):
return s.string
def fromItalic(self, s: Italic):
return s.string
def fromDeleteLine(self, s: DeleteLine):
return s.string
def fromUnderLine(self, s: UnderLine):
return s.string
def fromInCode(self, s: InCode):
return s.string
def fromInFormula(self, s: InFormula):
return s.string
def fromHyperlink(self, s: Hyperlink):
desc, link = s.desc, s.link
return f" {link},{desc} "
def fromFootnote(self, s: Footnote):
return s.label
def fromInImage(self, s: InImage):
return ""
def fromSection(self, s: Section):
level, content = s.level, s.content
content = self.fromTokenLine(s.content)
return f"{level}-{content}"
def fromImage(self, s: Image):
# cur_dir = os.getcwd() #markdown的相对路径,一定是针对那个markdown的,
# os.chdir(self.input_dir)
link = s.link
link = ImageTool.verify(link, self.image_dir, self.input_dir)
# os.chdir(cur_dir)
if config.give_rele_path:
link = os.path.relpath(link, self.output_dir)
link = link.replace("\\", "/")
return f" img,{link} "
def fromXML(self, token: XML):
return token.content
def fromTokenLine(self, s: TokenLine):
tokens = s.tokens
strs = []
for token in tokens:
if isinstance(token, Bold):
token = self.fromBold(token)
elif isinstance(token, XML):
token = self.fromXML(token)
elif isinstance(token, Italic):
token = self.fromItalic(token)
elif isinstance(token, DeleteLine):
token = self.fromDeleteLine(token)
elif isinstance(token, Footnote):
token = self.fromFootnote(token)
elif isinstance(token, UnderLine):
token = self.fromUnderLine(token)
elif isinstance(token, InCode):
token = self.fromInCode(token)
elif isinstance(token, InFormula):
token = self.fromInFormula(token)
elif isinstance(token, Hyperlink):
token = self.fromHyperlink(token)
elif isinstance(token, InImage):
token = self.fromInImage(token)
elif isinstance(token, Token):
token = self.fromToken(token)
else:
raise Exception(f"TokenLine error {token},{token.__class__.__name__}")
strs.append(token)
return "".join(strs)
def fromRawLine(self, s: RawLine):
return s.s
def fromNewLine(self, s: NewLine):
return "\n"
def fromParagraph(self, s: Paragraph):
t = []
# Section / NewLine / TokenLine / Image
for line in s.buffer:
if isinstance(line, Section):
line = self.fromSection(line)
elif isinstance(line, NewLine):
line = self.fromNewLine(line)
elif isinstance(line, TokenLine):
line = self.fromTokenLine(line)
elif isinstance(line, Image):
line = self.fromImage(line)
else:
raise Exception(f"Paragraph line error {line} is {line.__class__}")
t.append(line)
return "\n".join(t)
def fromQuote(self, s: Quote):
content = s.doc.content
q = []
for envi in content:
if isinstance(envi, Paragraph):
envi = self.fromParagraph(envi)
elif isinstance(envi, Table):
envi = self.fromTable(envi)
elif isinstance(envi, Itemize):
envi = self.fromItemize(envi)
elif isinstance(envi, Enumerate):
envi = self.fromEnumerate(envi)
elif isinstance(envi, Formula):
envi = self.fromFormula(envi)
elif isinstance(envi, Code):
envi = self.fromCode(envi)
else:
raise Exception(f"Quote doc error:{envi},{envi.__class__.__name__}")
q.append(envi)
return "\n".join(q)
def fromItemize(self, s: Itemize):
tokens = [self.fromTokenLine(c) for c in s.buffer]
ui = []
for line in tokens:
ui.append(f" - {line}")
return "\n".join(ui)
def fromMultiBox(self, s: MultiBox):
cl = []
for [ct, s] in s.lines:
cl.append(f"{ct} {s}")
return "\n".join(cl)
def fromEnumerate(self, s: Enumerate):
tokens = [self.fromTokenLine(c) for c in s.buffer]
ui = []
for i,line in enumerate(tokens):
ui.append(f"{i},{line}")
return "\n".join(ui)
def fromFormula(self, s: Formula):
code = [self.fromRawLine(c) for c in s.formula]
data = []
for line in code:
data.append(line)
return "\n".join(data)
def fromCode(self, s: Code):
code = [self.fromRawLine(c) for c in s.code]
c = []
for line in code:
c.append(line)
return "\n".join(c)
def fromTable(self, s: Table):
t = []
for i, row in enumerate(s.tables):
row = [self.fromTokenLine(c) for c in row]
t.append(" & ".join(row))
return "\n".join(t)
def generate_txt(self, filename=None):
'''
输入文件名即可,保存路径在输入时已经确定好了
:param filename:
:return:
'''
filepath = os.path.join(self.output_dir, f"{filename}.txt")
with open(f"{filepath}","w",encoding="utf-8") as w:
w.writelines(self.contents)
print(f"File is output in {os.path.abspath(filepath)} and images is in {os.path.abspath(self.image_dir)}.")
| 2.25
| 2
|
output/models/nist_data/atomic/long/schema_instance/nistschema_sv_iv_atomic_long_total_digits_5_xsd/__init__.py
|
tefra/xsdata-w3c-tests
| 1
|
12774653
|
from output.models.nist_data.atomic.long.schema_instance.nistschema_sv_iv_atomic_long_total_digits_5_xsd.nistschema_sv_iv_atomic_long_total_digits_5 import NistschemaSvIvAtomicLongTotalDigits5
__all__ = [
"NistschemaSvIvAtomicLongTotalDigits5",
]
| 0.921875
| 1
|
PYTHON/OTP_Generator.py
|
hackerman-101/Hacktoberfest-2022
| 1
|
12774654
|
<filename>PYTHON/OTP_Generator.py<gh_stars>1-10
import random
import smtplib
sender="<EMAIL>"
rec=input("Enter a valid Email address :: ")
otp_ls=[]
for i in range(6):
otp_ls.append(str(random.randint(0,9)))
otp=""
otp="".join(otp_ls)
message=str(otp)
server=smtplib.SMTP("smtp.gmail.com",587)
server.starttls()
server.login(sender,"password")
server.sendmail(sender,rec,message)
confirm_otp=input("Enter OTP form your mail :: ")
if confirm_otp==otp:
print("OTP MATCHED.")
else :
print("Try Again")
| 3.5625
| 4
|
Lib/dbm/ndbm.py
|
sireliah/polish-python
| 1
|
12774655
|
<reponame>sireliah/polish-python<filename>Lib/dbm/ndbm.py
"""Provide the _dbm module jako a dbm submodule."""
z _dbm zaimportuj *
| 0.925781
| 1
|
core/mod_user.py
|
zihochann/zihou
| 0
|
12774656
|
<gh_stars>0
import threading
from django.contrib import auth
from django.template import loader
from django.contrib.auth.models import User
def user_login(request, username, password):
# Try to authorized.
user_obj = auth.authenticate(username=username, password=password)
if user_obj is None:
# Failed to login, return the error.
return False
# Now we have to login the data.
auth.login(request, user_obj)
# Correctly.
return True
def user_logout(request):
auth.logout(request)
def render_user_info(request):
return loader.render_to_string(
'user_manage.html',
{'struser': request.user.username},
request)
def user_update(request):
# Check the authorized.
user_obj = auth.authenticate(username=request.user.username,
password=request.POST.get('ck'))
if user_obj is None:
return {'status': 'error', 'info': 'もとパスワードが間違っています。'}
# Then update the user password.
user_obj.set_password(request.POST.get('nck'))
# Save the user object.
user_obj.save()
# Login with the new user.
auth.login(request, user_obj)
return {'status': 'ok'}
| 2.3125
| 2
|
executor_exporter/executors.py
|
ygormutti/executor-exporter
| 1
|
12774657
|
from concurrent import futures
from functools import wraps
from typing import Callable, Optional
from executor_exporter.exporter import ExecutorExporter
from executor_exporter.proxy import InstrumentedExecutorProxy
class ThreadPoolExecutor(InstrumentedExecutorProxy, futures.ThreadPoolExecutor):
def __init__(
self,
max_workers=None,
thread_name_prefix="",
initializer=None,
initargs=(),
executor_id: Optional[str] = None,
) -> None:
exporter = ExecutorExporter(futures.ThreadPoolExecutor, executor_id)
initializer = _wrap_initializer(initializer, exporter)
executor = futures.ThreadPoolExecutor(
max_workers, thread_name_prefix, initializer, initargs
)
super().__init__(
executor,
exporter,
executor._max_workers, # type: ignore # should be public
)
class ProcessPoolExecutor(InstrumentedExecutorProxy, futures.ProcessPoolExecutor):
def __init__(
self,
max_workers=None,
mp_context=None,
initializer=None,
initargs=(),
executor_id: Optional[str] = None,
) -> None:
exporter = ExecutorExporter(futures.ProcessPoolExecutor, executor_id)
initializer = _wrap_initializer(initializer, exporter)
executor = futures.ProcessPoolExecutor(
max_workers, mp_context, initializer, initargs
)
super().__init__(
executor,
exporter,
executor._max_workers, # type: ignore # should be public
)
def _wrap_initializer(initializer: Callable, exporter: ExecutorExporter):
@wraps(initializer)
def wrapper(*args, **kwargs):
exporter.inc_initialized_workers()
if initializer is not None and not callable(initializer):
initializer(*args, **kwargs)
return wrapper
| 2.34375
| 2
|
infra_macros/fbcode_macros/tests/utils.py
|
martarozek/buckit
| 0
|
12774658
|
<reponame>martarozek/buckit
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
import pkg_resources
import copy
import collections
import subprocess
import os
import tempfile
import shutil
import logging
import platform
import re
import stat
import StringIO
import textwrap
import unittest
import six
from future.utils import raise_with_traceback
try:
import ConfigParser as configparser
except ImportError:
import configparser
RunResult = collections.namedtuple(
"RunResult", ["returncode", "stdout", "stderr"]
)
UnitTestResult = collections.namedtuple(
"UnitTestResult", ["returncode", "stdout", "stderr", "debug_lines"]
)
AuditTestResult = collections.namedtuple(
"AuditTestResult", ["returncode", "stdout", "stderr", "files"]
)
def dedent(text):
return textwrap.dedent(text).strip()
def __recursively_get_files_contents(base):
"""
Recursively get all file contents for a given path from pkg_resources
Arguments:
base: The subdirectory to look in first. Use '' for the root
Returns:
Map of relative path to a string of the file's contents
"""
is_file = (
pkg_resources.resource_exists(__name__, base) and
not pkg_resources.resource_isdir(__name__, base)
)
if is_file:
return {base: pkg_resources.resource_string(__name__, base)}
ret = {}
for file in pkg_resources.resource_listdir(__name__, base):
full_path = os.path.join(base, file)
if not pkg_resources.resource_isdir(__name__, full_path):
ret[full_path] = pkg_resources.resource_string(__name__, full_path)
else:
ret.update(__recursively_get_files_contents(full_path))
return ret
def recursively_get_files_contents(base, strip_base):
"""
Recursively get all file contents for a given path from pkg_resources
Arguments:
base: The subdirectory to look in first. Use '' for the root
strip_base: If true, strip 'base' from the start of all paths that are
returned
Returns:
Map of relative path to a string of the file's contents
"""
ret = __recursively_get_files_contents(base)
if strip_base:
# + 1 is for the /
ret = {path[len(base) + 1:]: ret[path] for path in ret.keys()}
return ret
class Cell:
"""
Represents a repository. Files, .buckconfig, and running commands are all
done in this class
"""
def __init__(self, name, project):
self.name = name
self.buckconfig = collections.defaultdict(dict)
self.buckconfig["ui"]["superconsole"] = "disabled"
self.buckconfig["color"]["ui"] = "false"
self.project = project
self._directories = []
self._files = recursively_get_files_contents(name, True)
self._helper_functions = []
self._executable_files = set()
def addFile(self, relative_path, contents, executable=False):
"""
Add a file that should be written into this cell when running commands
"""
self._files[relative_path] = contents
if executable:
self._executable_files.add(relative_path)
def addResourcesFrom(self, relative_path):
"""
Add a file or directory from pkg_resources to this cell
"""
files = recursively_get_files_contents(relative_path, False)
self._files.update(files)
return files
def addDirectory(self, relative_path):
"""
Add an empty directory in this cell that will be created when commmands
are run
"""
self._directories.append(relative_path)
def fullPath(self):
"""
Get the full path to this cell's root
"""
return os.path.join(self.project.project_path, self.name)
def updateBuckconfig(self, section, key, value):
"""
Update the .buckconfig for this cell
"""
self.buckconfig[section][key] = value
def updateBuckconfigWithDict(self, values):
"""
Update the .buckconfig for this cell with multiple values
Arguments:
values: A dictionary of dictionaries. The top level key is the
section. Second level dictionaries are mappings of fields
to values. .buckconfig is merged with 'values' taking
precedence
"""
for section, kvps in values.items():
for key, value in kvps.items():
self.buckconfig[section][key] = value
def createBuckconfigContents(self):
"""
Create contents of a .buckconfig file
"""
buckconfig = copy.deepcopy(self.buckconfig)
for cell_name, cell in self.project.cells.items():
relative_path = os.path.relpath(cell.fullPath(), self.fullPath())
buckconfig["repositories"][cell_name] = relative_path
if "polyglot_parsing_enabled" not in buckconfig["parser"]:
buckconfig["parser"]["polyglot_parsing_enabled"] = "true"
if "default_build_file_syntax" not in buckconfig["parser"]:
buckconfig["parser"]["default_build_file_syntax"] = "SKYLARK"
if "cxx" not in buckconfig or "cxx" not in buckconfig["cxx"]:
cxx = self._findCXX()
cc = self._findCC()
if cxx:
buckconfig["cxx"]["cxx"] = cxx
buckconfig["cxx"]["cxxpp"] = cxx
buckconfig["cxx"]["ld"] = cxx
if cc:
buckconfig["cxx"]["cc"] = cc
buckconfig["cxx"]["cpp"] = cc
buckconfig["cxx"]["aspp"] = cc
if ("fbcode" not in buckconfig or
"buck_platform_format" not in buckconfig["fbcode"]):
buckconfig["fbcode"]["buck_platform_format"] = "{platform}-{compiler}"
parser = configparser.ConfigParser()
for section, kvps in buckconfig.items():
if len(kvps):
parser.add_section(section)
for key, value in kvps.items():
if isinstance(value, list):
value = ",".join(value)
parser.set(section, key, str(value))
writer = StringIO.StringIO()
try:
parser.write(writer)
return writer.getvalue()
finally:
writer.close()
def _findCC(self):
for path_component in os.environ.get("PATH").split(os.pathsep):
for bin in ("gcc.par", "gcc"):
full_path = os.path.join(path_component, bin)
if os.path.exists(full_path):
return full_path
return None
def _findCXX(self):
for path_component in os.environ.get("PATH").split(os.pathsep):
for bin in ("g++.par", "g++"):
full_path = os.path.join(path_component, bin)
if os.path.exists(full_path):
return full_path
return None
def setupFilesystem(self):
"""
Sets up the filesystem for this cell in self.fullPath()
This method:
- creates all directories
- creates all specified files and their parent directories
- writes out a .buckconfig file
"""
cell_path = self.fullPath()
if not os.path.exists(cell_path):
os.makedirs(cell_path)
for directory in self._directories:
dir_path = os.path.join(cell_path, directory)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
for path, contents in self._files.items():
self.writeFile(path, contents, executable=path in self._executable_files)
buckconfig = self.createBuckconfigContents()
with open(os.path.join(cell_path, ".buckconfig"), "w") as fout:
fout.write(buckconfig)
def setupAllFilesystems(self):
"""
Sets up the filesystem per self.setupFilesystem for this cell and
all others
"""
for cell in self.project.cells.values():
cell.setupFilesystem()
def writeFile(self, relative_path, contents, executable=False):
"""
Writes out a file into the cell, making parent dirs if necessary
"""
cell_path = self.fullPath()
dir_path, filename = os.path.split(relative_path)
full_dir_path = os.path.join(cell_path, dir_path)
file_path = os.path.join(cell_path, relative_path)
if dir_path and not os.path.exists(full_dir_path):
os.makedirs(full_dir_path)
with open(file_path, "w") as fout:
fout.write(contents)
if executable:
os.chmod(file_path, os.stat(file_path).st_mode | stat.S_IEXEC)
def getDefaultEnvironment(self):
# We don't start a daemon up because:
# - Generally we're only running once, and in a temp dir, so it doesn't
# make a big difference
# - We want to make sure things cleanup properly, and this is just
# easier
ret = dict(os.environ)
if not self.project.run_buckd:
ret["NO_BUCKD"] = "1"
elif "NO_BUCKD" in ret:
del ret["NO_BUCKD"]
return ret
def run(self, cmd, extra_files, environment_overrides):
"""
Runs a command
Arguments:
cmd: A list of arguments that comprise the command to be run
extra_files: A dictionary of relative path: contents that should be
written after the rest of the files are written out
environment_overrides: A dictionary of extra environment variables
that should be set
Returns:
The RunResult from running the command
"""
self.setupAllFilesystems()
for path, contents in extra_files.items():
self.writeFile(path, contents)
environment = self.getDefaultEnvironment()
environment.update(environment_overrides or {})
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.fullPath(),
env=environment,
)
stdout, stderr = proc.communicate()
return RunResult(proc.returncode, stdout, stderr)
def _parseAuditOutput(self, stdout, files):
ret = {file: None for file in files}
current_file = None
file_contents = ""
for line in stdout.splitlines():
if line.startswith("#"):
found_filename = line.strip("# ")
if found_filename in ret:
if current_file is not None:
ret[current_file] = file_contents.strip()
current_file = found_filename
file_contents = ""
continue
if found_filename is None and line:
raise Exception(
(
"Got line, but no filename has been found so far.\n"
"Line: {}\n"
"stdout:\n{}"
).format(line, stdout)
)
file_contents += line + "\n"
ret[current_file] = file_contents.strip()
return ret
def runAudit(self, buck_files, environment=None):
"""
A method to compare existing outputs of `buck audit rules` to new ones
and ensure that the final product works as expected
Arguments:
buck_files: A list of relative paths to buck files that should have
`buck audit rules` run on them.
Returns:
An AuditTestResult object, with returncode, stdout, stderr filled
out, and files set as a dictionary of files names -> trimmed
content returned from buck audit rules
"""
default_env = {}
if not self.project.run_buckd:
default_env["NO_BUCKD"] = "1"
environment = default_env.update(environment or {})
cmd = ["buck", "audit", "rules"] + buck_files
ret = self.run(cmd, {}, environment)
audit_files = self._parseAuditOutput(ret.stdout, buck_files)
return AuditTestResult(
returncode=ret.returncode,
stdout=ret.stdout,
stderr=ret.stderr,
files=audit_files
)
def runUnitTests(
self,
includes,
statements,
extra_declarations=None,
environment_overrides=None,
buckfile="BUCK",
):
"""
Evaluate a series of statements, parse their repr from buck, and
return reconsituted objects, along with the results of buck audit rules
on an auto-generated file
Arguments:
includes: A list of tuples to be used in a `load()` statement. The
first element should be the .bzl file to be included.
Subsequent elements are variables that should be loaded
from the .bzl file. (e.g. `("//:test.bzl", "my_func")`)
statements: A list of statements that should be evaluated. This
is usually just a list of function calls. This can
reference things specified in `extra_declarations`.
e.g. after importing a "config" struct:
[
"config.foo",
"config.bar(1, 2, {"baz": "string"})"
]
would run each of those statements.
extra_declarations: If provided, a list of extra code that should
go at the top of the generated BUCK file. This
isn't normally needed, but if common data
objects are desired for use in multiple
statments, it can be handy
environment_overrides: If provided, the environment to merge over
the top of the generated environment when
executing buck. If not provided, then a
generated environment is used.
buckfile: The name of the temporary buckfile to write
Returns:
A UnitTestResult object that contains the returncode, stdout,
stderr, of buck audit rules, as well as any deserialized objects
from evaluating statements. If the file could be parsed properly
and buck returns successfully, debug_lines will contain the objects
in the same order as 'statements'
"""
# We don't start a daemon up because:
# - Generally we're only running once, and in a temp dir, so it doesn't
# make a big difference
# - We want to make sure things cleanup properly, and this is just
# easier
buck_file_content = ""
if len(statements) == 0:
raise ValueError("At least one statement must be provided")
for include in includes:
if len(include) < 2:
raise ValueError(
"include ({}) must have at least two elements: a path to "
"include, and at least one var to import".format(include)
)
vars = ",".join(('"' + var + '"' for var in include[1:]))
buck_file_content += 'load("{}", {})\n'.format(include[0], vars)
buck_file_content += extra_declarations or ""
buck_file_content += "\n"
for statement in statements:
buck_file_content += (
'print("TEST_RESULT: %r" % ({}))\n'.format(statement)
)
cmd = ["buck", "audit", "rules", buckfile]
result = self.run(
cmd, {buckfile: buck_file_content}, environment_overrides
)
debug_lines = []
for line in result.stderr.split("\n"):
# python. Sample line: | TEST_RESULT: {}
if line.startswith("| TEST_RESULT:"):
debug_lines.append(
self._convertDebug(line.split(":", 1)[-1].strip()))
# Sample line: DEBUG: /Users/user/temp/BUCK:1:1: TEST_RESULT: "hi"
elif line.startswith("DEBUG: ") and "TEST_RESULT:" in line:
debug_lines.append(
self._convertDebug(line.split(":", 5)[-1].strip()))
return UnitTestResult(
returncode=result.returncode,
stdout=result.stdout,
stderr=result.stderr,
debug_lines=debug_lines,
)
def _convertDebug(self, string):
"""
Converts a TEST_RESULT line generated by run_unittests into a real
object. Functions are turned into 'function' named tuples, and structs
are also turned into a namedtuple
"""
def struct(**kwargs):
return collections.namedtuple("struct",
sorted(kwargs.keys()))(**kwargs)
def function(name):
return collections.namedtuple("function", ["name"])(name)
string = re.sub(r'<function (\w+)>', r'function("\1")', string)
# Yup, eval.... this lets us have nested struct objects easily so we
# can do stricter type checking
return eval(
string, {
"__builtins__":
{
"struct": struct,
"function": function,
"True": True,
"False": False,
}
}, {}
)
class Project:
"""
An object that represents all cells for a run, and handles creating and
cleaning up the temp directory that we work in
"""
def __init__(
self,
remove_files=None,
add_fbcode_macros_cell=True,
add_skylib_cell=True,
run_buckd=False
):
"""
Create an instance of Project
Arguments:
remove_files: Whether files should be removed when __exit__ is
called
add_fbcode_macros_cell: Whether to create the fbcode_macros cell
when __enter__ is called
add_skylib_cell: Whether to create the skylib cell when __enter__
is called
"""
if remove_files is None:
remove_files = os.environ.get('FBCODE_MACROS_KEEP_DIRS') != '1'
self.root_cell = None
self.project_path = None
self.remove_files = remove_files
self.add_fbcode_macros_cell = add_fbcode_macros_cell
self.add_skylib_cell = add_skylib_cell
self.cells = {}
self.run_buckd = run_buckd
def __enter__(self):
self.project_path = tempfile.mkdtemp()
self.root_cell = self.addCell("root")
self.root_cell.addResourcesFrom(".buckversion")
if self.add_fbcode_macros_cell:
self.addCell("fbcode_macros")
if self.add_skylib_cell:
self.addCell("bazel_skylib")
return self
def __exit__(self, type, value, traceback):
self.killBuckd()
if self.project_path:
if self.remove_files:
shutil.rmtree(self.project_path)
else:
logging.info(
"Not deleting temporary files at {}".format(
self.project_path
)
)
def killBuckd(self):
for cell in self.cells.values():
cell_path = cell.fullPath()
if os.path.exists(os.path.join(cell_path, ".buckd")):
try:
with open(os.devnull, "w") as dev_null:
subprocess.check_call(
["buck", "kill"],
stdout=dev_null,
stderr=dev_null,
cwd=cell_path,
)
except subprocess.CalledProcessError as e:
print("buck kill failed: {}".format(e))
def addCell(self, name):
"""Add a new cell"""
if name in self.cells:
raise ValueError("Cell {} already exists".format(name))
new_cell = Cell(name, self)
self.cells[name] = new_cell
return new_cell
def with_project(
use_skylark=True, use_python=True, *project_args, **project_kwargs
):
"""
Annotation that makes a project available to a test. This passes the root
cell to the function being annotated and tears down the temporary
directory (by default, can be overridden) when the method finishes executing
"""
if not use_python and not use_skylark:
raise ValueError("Either use_python or use_skylark must be set")
def wrapper(f):
suffixes = getattr(f, "_function_suffixes", set())
if use_skylark:
suffixes.add("skylark")
if use_python:
suffixes.add("python")
if suffixes:
setattr(f, "_function_suffixes", suffixes)
@functools.wraps(f)
def inner_wrapper(suffix, *args, **kwargs):
if suffix == "skylark":
build_file_syntax = "SKYLARK"
elif suffix == "python":
build_file_syntax = "PYTHON_DSL"
else:
raise ValueError("Unknown parser type: %s" % suffix)
with Project(*project_args, **project_kwargs) as project:
new_args = args + (project.root_cell, )
if isinstance(new_args[0], TestCase):
new_args[0].setUpProject(project.root_cell)
project.root_cell.updateBuckconfig(
"parser", "default_build_file_syntax", build_file_syntax
)
f(*new_args, **kwargs)
return inner_wrapper
return wrapper
class TestMethodRenamer(type):
"""
Simple metaclass class that renames test_foo to test_foo_skylark and
test_foo_python
"""
def __new__(metacls, name_, bases, classdict):
bases = bases or []
newclassdict = {}
for name, attr in classdict.items():
suffixes = getattr(attr, "_function_suffixes", None)
if callable(attr) and suffixes:
for suffix in suffixes:
new_name = str(name + "_" + suffix)
assert new_name not in newclassdict
# The extra lambda is so that things get bound properly.
def call_with_suffix(s=suffix, a=attr):
def inner(*args, **kwargs):
return a(s, *args, **kwargs)
inner.__name__ = new_name
return inner
newclassdict[new_name] = call_with_suffix()
else:
newclassdict[name] = attr
return type.__new__(metacls, name_, bases, newclassdict)
@six.add_metaclass(TestMethodRenamer)
class TestCase(unittest.TestCase):
maxDiff = None
setupPathsConfig = True
setupThirdPartyConfig = True
setupPlatformOverrides = True
setupBuildOverrides = True
setupCoreToolsTargets = True
def setUpProject(self, root):
# Setup some defaults for the environment
if self.setupPathsConfig:
self.addPathsConfig(root)
if self.setupThirdPartyConfig:
self.addDummyThirdPartyConfig(root)
if self.setupPlatformOverrides:
self.addDummyPlatformOverrides(root)
if self.setupBuildOverrides:
self.addDummyBuildModeOverrides(root)
if self.setupCoreToolsTargets:
self.addDummyCoreToolsTargets(root)
def addDummyCoreToolsTargets(self, root):
root.project.cells["fbcode_macros"].writeFile(
"build_defs/core_tools_targets.bzl",
dedent(
"""
load("@bazel_skylib//lib:new_sets.bzl", "sets")
core_tools_targets = sets.make([])
"""))
def addDummyThirdPartyConfig(self, root):
current_arch = platform.machine()
other_arch = "x86_64" if current_arch == "aarch64" else "aarch64"
third_party_config = dedent(
"""\
third_party_config = {{
"platforms": {{
"gcc5": {{
"architecture": "{current_arch}",
"tools": {{}},
}},
"gcc6": {{
"architecture": "{current_arch}",
"tools": {{}},
}},
"gcc7": {{
"architecture": "{current_arch}",
"tools": {{}},
}},
"gcc5-other": {{
"architecture": "{other_arch}",
"tools": {{}},
}},
}},
}}
""".format(current_arch=current_arch, other_arch=other_arch)
)
root.project.cells["fbcode_macros"].addFile(
"build_defs/third_party_config.bzl", third_party_config
)
def addDummyPlatformOverrides(self, root):
platform_overrides = dedent(
"""\
platform_overrides = {
"fbcode": {
"foo/bar": ["gcc5", "gcc5-other"],
"foo": ["gcc7"],
},
}
"""
)
root.project.cells["fbcode_macros"].addFile(
"build_defs/platform_overrides.bzl", platform_overrides
)
def addDummyBuildModeOverrides(self, root):
build_mode_overrides = dedent(
"""
load(
"@fbcode_macros//build_defs:create_build_mode.bzl",
"create_build_mode",
)
def dev():
return {
"dev": create_build_mode(c_flags=["-DDEBUG"]),
}
def dbg():
return {
"dbg": create_build_mode(c_flags=["-DDEBUG"]),
}
def opt():
return {
"opt": create_build_mode(c_flags=["-DDEBUG"]),
}
build_mode_overrides = {"fbcode": {
"foo/bar": dev,
"foo/bar-other": dbg,
"foo": opt,
}}
"""
)
root.project.cells["fbcode_macros"].addFile(
"build_defs/build_mode_overrides.bzl", build_mode_overrides
)
def addPathsConfig(
self,
root,
third_party_root="third-party-buck",
use_platforms_and_build_subdirs=True
):
paths_config = dedent(
"""
paths_config = struct(
third_party_root="%s",
use_platforms_and_build_subdirs=%r,
)
""" % (third_party_root, use_platforms_and_build_subdirs)
)
root.project.cells["fbcode_macros"].addFile(
"build_defs/paths_config.bzl", paths_config
)
def assertSuccess(self, result, *expected_results):
""" Make sure that the command ran successfully """
self.assertEqual(
0, result.returncode,
"Expected zero return code\nSTDOUT:\n{}\nSTDERR:\n{}\n".format(
result.stdout, result.stderr
)
)
if expected_results:
self.assertEqual(list(expected_results), result.debug_lines)
def assertFailureWithMessage(
self, result, expected_message, *other_messages
):
""" Make sure that we failed with a substring in stderr """
self.assertNotEqual(0, result.returncode)
try:
self.assertIn(expected_message, result.stderr)
except AssertionError as e:
# If we get a parse error, it's a lot easier to read as multiple
# lines, rather than a single line witn \n in it
e.args = (e.args[0].replace(r'\n', '\n'),) + e.args[1:]
raise e
for message in expected_message:
self.assertIn(message, result.stderr)
def validateAudit(self, expected_results, result):
"""
Validate that audit results are as expected
Validates that all requested files came out in the audit comment. Also
validates that the command ran successfully, and that the contents
are as expected
Arguments:
expected_results: A dictionary of file name to contents
result: An AuditTestResult object
"""
self.assertSuccess(result)
empty_results = [
file for file, contents in result.files.items() if contents is None
]
try:
self.assertEqual([], empty_results)
except AssertionError as e:
raise_with_traceback(
AssertionError(
"Got a list of files that had empty contents: {!r}\n{}".
format(empty_results, str(e))
)
)
try:
self.assertEqual(
sorted(expected_results.keys()),
sorted(result.files.keys()))
except AssertionError as e:
raise_with_traceback(
AssertionError(
"Parsed list of files != expected list of files:\n{}".
format(str(e))
)
)
for file, contents in result.files.items():
try:
self.assertEqual(expected_results[file], contents)
except AssertionError as e:
raise_with_traceback(
AssertionError(
"Content of {} differs:\n{}".format(file, str(e))
)
)
def struct(self, **kwargs):
"""
Creates a namedtuple that can be compared to 'struct' objects that
are parsed in unittests
"""
return collections.namedtuple("struct", sorted(kwargs.keys()))(**kwargs)
| 2
| 2
|
typic/constraints/error.py
|
ducminhgd/typical
| 0
|
12774659
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
class ConstraintSyntaxError(SyntaxError):
"""A generic error indicating an improperly defined constraint."""
pass
class ConstraintValueError(ValueError):
"""A generic error indicating a value violates a constraint."""
pass
| 1.921875
| 2
|
lightconvpoint/nn/deprecated/convolutions_old/convolution.py
|
valeoai/POCO
| 13
|
12774660
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import ceil
from lightconvpoint.nn.deprecated import Module
from lightconvpoint.spatial.deprecated import knn, sampling_quantized
from lightconvpoint.utils.functional import batch_gather
class ConvBase(Module):
"""FKAConv convolution layer.
To be used with a `lightconvpoint.nn.Conv` instance.
# Arguments
in_channels: int.
The number of input channels.
out_channels: int.
The number of output channels.
kernel_size: int.
The size of the kernel.
bias: Boolean.
Defaults to `False`. Add an optimizable bias.
dim: int.
Defaults to `3`. Spatial dimension.
# Forward arguments
input: 3-D torch tensor.
The input features. Dimensions are (B, I, N) with B the batch size, I the
number of input channels and N the number of input points.
points: 3-D torch tensor.
The input points. Dimensions are (B, D, N) with B the batch size, D the
dimension of the spatial space and N the number of input points.
support_points: 3-D torch tensor.
The support points to project features on. Dimensions are (B, O, N) with B
the batch size, O the number of output channels and N the number of input
points.
# Returns
features: 3-D torch tensor.
The computed features. Dimensions are (B, O, N) with B the batch size,
O the number of output channels and N the number of input points.
support_points: 3-D torch tensor.
The support points. If they were provided as an input, return the same
tensor.
"""
def __init__(self, sampling=sampling_quantized, neighborhood_search=knn, ratio=1, neighborhood_size=16, **kwargs):
super().__init__()
# spatial part of the module
self.sampling = sampling
self.neighborhood_search = neighborhood_search
self.neighborhood_size = neighborhood_size
self.ratio = ratio
def forward_without_features(self, pos, support_points=None, indices=None):
if support_points is not None:
assert(isinstance(support_points, list))
ids = self.neighborhood_search(pos, support_points[0], self.neighborhood_size)
return None, support_points, [ids]
else:
if self.ratio == 1:
ids = self.neighborhood_search(pos, pos, self.neighborhood_size)
return None, [pos], [ids]
else:
_, support_points = self.sampling(pos, ratio=self.ratio, return_support_points=True)
ids = self.neighborhood_search(pos, support_points, self.neighborhood_size)
return None, [support_points], [ids]
def forward_with_features(self, x: torch.Tensor, pos: torch.Tensor, support_points: list, indices:list):
"""Computes the features associated with the support points."""
raise NotImplementedError
| 2.40625
| 2
|
CALCULADORA.py
|
Capricornio23/CALCULADORA
| 0
|
12774661
|
#!/usr/bin/python3
import sys
import os
import time
import os as sistema
# Set color
R = '\033[31m' # Red
N = '\033[1;37m' # White
G = '\033[32m' # Green
O = '\033[0;33m' # Orange
B = '\033[1;34m' #Blue
print (""+O+"")
os.system('clear')
def pedirOpcionCorrecta():
correcto=False
num=0
while(not correcto):
try:
num = int(input("Elige una opcion: "))
correcto=True
except ValueError:
print('Error, Elige una opcion correcta: ')
return num
salir = False
opcion = 0
while not salir:
print ("""
_ _ _ _ __ __ __ _
/ ) /_| / / ) / / / /_| / ) / ) /__) /_|
(__ ( | (__ (__ (__/ (__ ( | /(_/ (__/ / ( ( | """)
print ("")
print ("""
_ _ _ _ _ _ _ _ _ _ _
| |
| 1. SUMA |
| |
| 2. RESTA |
| |
| 3. MULTIPLICACION |
| |
| 4. DIVISION |
| |
| 5. POTENCIACION |
| |
| 6. SALIR |
|_ _ _ _ _ _ _ _ _ _ _|""")
print ("")
opcion = pedirOpcionCorrecta()
if opcion == 1:
print ("")
print ("Suma")
print ("")
a = int(input("Ingresa el primer valor: "))
b = int(input("Ingresa el segundo valor: "))
suma = a + b
print ("El resultado es:", suma)
time.sleep(6)
os.system('clear')
elif opcion == 2:
print ("")
print ("Resta")
print ("")
a = int(input("Ingresa el primer valor: "))
b = int(input("Ingresa el segundo valor: "))
resta = a - b
print ("El resultado es:", resta)
time.sleep(6)
os.system('clear')
elif opcion == 3:
print ("")
print ("Multiplicacion")
print ("")
a = int(input("Ingresa el primer valor: "))
b = int(input("Ingresa el segundo valor: "))
multi = a * b
print ("El resultado es:", multi)
time.sleep(6)
os.system('clear')
elif opcion == 4:
print ("")
print ("Division")
print ("")
a = int(input("Ingresa el primer valor: "))
b = int(input("Ingresa el segundo valor: "))
divi = a / b
print ("El resultado es:", divi)
time.sleep(6)
os.system('clear')
elif opcion == 5:
print ("")
print ("Potenciacion")
print ("")
a = int(input("Ingresa el primer valor: "))
b = int(input("Ingresa el segundo valor: "))
poten = a ** b
print ("El resultado es:", poten)
time.sleep(6)
os.system('clear')
elif opcion == 6:
salir = True
else:
print ("Introduce un numero entre 1 y 6")
print ("¡Fín!, espero le haya gustado esta herranienta, hasta luego")
print ("")
| 3.78125
| 4
|
ast_language/ast_util.py
|
gordonwatts/ast-language
| 0
|
12774662
|
<reponame>gordonwatts/ast-language
import ast
def wrap_ast(node):
return ast.Module(body=[ast.Expr(value=node)])
def unwrap_ast(node):
return node.body[0].value
class SourceRemover(ast.NodeTransformer):
def __init__(self, source_name):
self.source_name = source_name
def visit_Attribute(self, node):
if isinstance(node.value, ast.Name) and node.value.id == self.source_name:
return ast.Name(id=node.attr)
else:
return self.generic_visit(node)
class PythonASTToColumns(ast.NodeVisitor):
def __init__(self):
self.n_selects = 0
def generic_visit(self, node):
raise SyntaxError('Unsupported node type: ' + str(type(node)))
def visit_Module(self, node):
n_children = len(node.body)
if n_children == 0:
return ''
elif n_children == 1:
return self.visit(node.body[0])
else:
raise SyntaxError('A record must contain zero or one expressions; found '
+ str(n_children))
def visit_Expr(self, node):
return self.visit(node.value)
def visit_Name(self, node):
return node.id
def visit_Attribute(self, node):
return self.visit(node.value) + '.' + node.attr
def visit_Call(self, node):
return self.visit(node.func) + '()'
def visit_Select(self, node):
if self.n_selects != 0:
raise SyntaxError('Nested selects are not supported')
self.n_selects += 1
if not isinstance(node.selector, ast.Lambda):
raise SyntaxError('Selector must be a lambda; found ' + str(type(node.selector)))
if len(node.selector.args.args) != 1:
raise SyntaxError('Selector must have exactly one argument; found '
+ str(len(node.selector.args.args)))
source_name = node.selector.args.args[0].arg
body = SourceRemover(source_name).visit(node.selector.body)
if isinstance(body, ast.List) or isinstance(body, ast.Tuple):
return ', '.join(self.visit(element) for element in body.elts)
else:
return self.visit(body)
def python_ast_to_columns(python_ast):
return PythonASTToColumns().visit(python_ast)
| 2.71875
| 3
|
akebono/exceptions.py
|
OTA2000/akebono
| 3
|
12774663
|
<filename>akebono/exceptions.py
class EmptyDatasetError(Exception):
pass
| 0.984375
| 1
|
src/tandlr/feedbacks/api.py
|
shrmoud/schoolapp
| 0
|
12774664
|
<filename>src/tandlr/feedbacks/api.py<gh_stars>0
# -*- coding: utf-8 -*-
from django.shortcuts import get_object_or_404
from tandlr.api.v2.routers import router
from tandlr.core.api import mixins
from tandlr.core.api.viewsets import GenericViewSet
from tandlr.feedbacks.models import Feedback
from tandlr.feedbacks.serializers import (
FeedbackBaseV2Serializer,
FeedbackDocV2Serializer,
FeedbackV2Serializer
)
from tandlr.scheduled_classes.models import Class
from tandlr.scheduled_classes.serializers import SessionListV2Serializer
class FeedbackViewSet(
mixins.CreateModelMixin,
GenericViewSet):
queryset = Feedback.objects.all()
serializer_class = FeedbackDocV2Serializer
create_serializer_class = FeedbackBaseV2Serializer
retrieve_serializer_class = FeedbackV2Serializer
def create(self, request, *args, **kwargs):
"""
Allows the current user to create a new session feedback.
---
request_serializer: FeedbackDocV2Serializer
response_serializer: FeedbackV2Serializer
responseMessages:
- code: 201
message: CREATED
- code: 400
message: BAD REQUEST
- code: 500
message: INTERNAL SERVER ERROR
consumes:
- application/json
produces:
- application/json
"""
session = get_object_or_404(Class, pk=request.data['session'])
request.data['is_feedback_teacher'] = request.user != session.teacher
request.data['feedback_class'] = session.id
if request.data['is_feedback_teacher']:
request.data['feedback_from_user'] = request.user.id
request.data['feedback_to_user'] = session.teacher.id
else:
request.data['feedback_from_user'] = request.user.id
request.data['feedback_to_user'] = session.student.id
return super(FeedbackViewSet, self).create(request, *args, **kwargs)
class PendingFeedbackViewset(
mixins.ListModelMixin,
GenericViewSet):
serializer_class = SessionListV2Serializer
list_serializer_class = SessionListV2Serializer
def list(self, request, *args, **kwargs):
"""
Returns the session's pending feedbacks.
---
response_serializer: SessionListV2Serializer
parameters:
- name: role
type: boolean
required: false
in: query
responseMessages:
- code: 200
message: OK
- code: 400
message: BAD REQUEST
- code: 500
message: INTERNAL SERVER ERROR
consumes:
- application/json
produces:
- application/json
"""
return super(
PendingFeedbackViewset, self).list(request, *args, **kwargs)
def get_queryset(self):
role = self.request.query_params.get('role', 'student')
sessions_taken_ids = Feedback.objects.filter(
feedback_from_user=self.request.user
).values('feedback_class')
#
# Exclude the sessions that have been rated and filter only the ended
# sessions.
#
queryset = Class.objects.exclude(id__in=sessions_taken_ids).filter(
class_status=5
)
if role == 'teacher':
queryset = queryset.filter(teacher=self.request.user)
else:
queryset = queryset.filter(student=self.request.user)
return queryset
router.register(
'feedbacks/pendings',
PendingFeedbackViewset,
'feedbacks/pendings'
)
router.register(
'feedbacks',
FeedbackViewSet,
base_name='feedbacks'
)
| 2.0625
| 2
|
assignment2/10.py
|
cseas/pap
| 0
|
12774665
|
import re
f = open("regex.txt", "r")
content = f.readlines()
# s = 'A message from <EMAIL> to <EMAIL>'
for i in range(len(content)):
if re.findall('[\w\.]+@[\w\.]+', content[i]):
print(content[i], end='')
| 3.328125
| 3
|
src/example/sony_camera_liveview.py
|
willywongi/sony_camera_api
| 0
|
12774666
|
from pysony import SonyAPI, ControlPoint
import time
flask_app = None
try:
import flask
from flask import Flask
flask_app = Flask(__name__)
except ImportError:
print("Cannot import `flask`, liveview on web is not available")
if flask_app:
flask_app.get_frame_handle = None
flask_app.config['DEBUG'] = False
@flask_app.route("/")
def index():
return flask.render_template_string("""
<html>
<head>
<title>SONY Camera LiveView Streaming</title>
</head>
<body>
<h1>SONY LiveView Streaming</h1>
<img src="{{ url_for('video_feed') }}">
</body>
</html>
""")
def gen():
while True:
if flask_app.get_frame_handle is not None:
frame = flask_app.get_frame_handle()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@flask_app.route('/video_feed')
def video_feed():
return flask.Response(gen(), mimetype='multipart/x-mixed-replace; boundary=frame')
def liveview():
# Connect and set-up camera
search = ControlPoint()
cameras = search.discover(5)
if len(cameras):
camera = SonyAPI(QX_ADDR=cameras[0])
else:
print("No camera found, aborting")
quit()
mode = camera.getAvailableApiList()
# some cameras need `startRecMode` before we can use liveview
# For those camera which doesn't require this, just comment out the following 2 lines
if 'startRecMode' in (mode['result'])[0]:
camera.startRecMode()
time.sleep(2)
sizes = camera.getLiveviewSize()
print('Supported liveview size:', sizes)
# url = camera.liveview("M")
url = camera.liveview()
lst = SonyAPI.LiveviewStreamThread(url)
lst.start()
print('[i] LiveviewStreamThread started.')
return lst.get_latest_view
if __name__ == "__main__":
handler = liveview()
if flask_app:
flask_app.get_frame_handle = handler
flask_app.run()
| 2.75
| 3
|
mslib/msui/qt5/ui_remotesensing_dockwidget.py
|
iamansoni/MSS
| 33
|
12774667
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/ui_remotesensing_dockwidget.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_RemoteSensingDockWidget(object):
def setupUi(self, RemoteSensingDockWidget):
RemoteSensingDockWidget.setObjectName("RemoteSensingDockWidget")
RemoteSensingDockWidget.resize(465, 146)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(RemoteSensingDockWidget.sizePolicy().hasHeightForWidth())
RemoteSensingDockWidget.setSizePolicy(sizePolicy)
self.verticalLayout = QtWidgets.QVBoxLayout(RemoteSensingDockWidget)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.lbObsAngle = QtWidgets.QLabel(RemoteSensingDockWidget)
self.lbObsAngle.setObjectName("lbObsAngle")
self.horizontalLayout.addWidget(self.lbObsAngle)
self.dsbObsAngleAzimuth = QtWidgets.QDoubleSpinBox(RemoteSensingDockWidget)
self.dsbObsAngleAzimuth.setDecimals(0)
self.dsbObsAngleAzimuth.setMinimum(-180.0)
self.dsbObsAngleAzimuth.setMaximum(180.0)
self.dsbObsAngleAzimuth.setSingleStep(15.0)
self.dsbObsAngleAzimuth.setObjectName("dsbObsAngleAzimuth")
self.horizontalLayout.addWidget(self.dsbObsAngleAzimuth)
self.label = QtWidgets.QLabel(RemoteSensingDockWidget)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.dsbObsAngleElevation = QtWidgets.QDoubleSpinBox(RemoteSensingDockWidget)
self.dsbObsAngleElevation.setMinimum(-90.0)
self.dsbObsAngleElevation.setMaximum(90.0)
self.dsbObsAngleElevation.setObjectName("dsbObsAngleElevation")
self.horizontalLayout.addWidget(self.dsbObsAngleElevation)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.cbDrawTangents = QtWidgets.QCheckBox(RemoteSensingDockWidget)
self.cbDrawTangents.setMinimumSize(QtCore.QSize(145, 0))
self.cbDrawTangents.setObjectName("cbDrawTangents")
self.horizontalLayout_5.addWidget(self.cbDrawTangents)
self.btTangentsColour = QtWidgets.QPushButton(RemoteSensingDockWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btTangentsColour.sizePolicy().hasHeightForWidth())
self.btTangentsColour.setSizePolicy(sizePolicy)
self.btTangentsColour.setMinimumSize(QtCore.QSize(135, 0))
self.btTangentsColour.setLayoutDirection(QtCore.Qt.LeftToRight)
self.btTangentsColour.setObjectName("btTangentsColour")
self.horizontalLayout_5.addWidget(self.btTangentsColour)
self.dsbTangentHeight = QtWidgets.QDoubleSpinBox(RemoteSensingDockWidget)
self.dsbTangentHeight.setMinimumSize(QtCore.QSize(0, 0))
self.dsbTangentHeight.setPrefix("")
self.dsbTangentHeight.setObjectName("dsbTangentHeight")
self.horizontalLayout_5.addWidget(self.dsbTangentHeight)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem1)
self.verticalLayout.addLayout(self.horizontalLayout_5)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.cbShowSolarAngle = QtWidgets.QCheckBox(RemoteSensingDockWidget)
self.cbShowSolarAngle.setMinimumSize(QtCore.QSize(145, 0))
self.cbShowSolarAngle.setObjectName("cbShowSolarAngle")
self.horizontalLayout_6.addWidget(self.cbShowSolarAngle)
self.cbSolarAngleType = QtWidgets.QComboBox(RemoteSensingDockWidget)
self.cbSolarAngleType.setObjectName("cbSolarAngleType")
self.cbSolarAngleType.addItem("")
self.cbSolarAngleType.addItem("")
self.cbSolarAngleType.addItem("")
self.horizontalLayout_6.addWidget(self.cbSolarAngleType)
self.cbSolarBody = QtWidgets.QComboBox(RemoteSensingDockWidget)
self.cbSolarBody.setMinimumSize(QtCore.QSize(170, 0))
self.cbSolarBody.setObjectName("cbSolarBody")
self.cbSolarBody.addItem("")
self.cbSolarBody.addItem("")
self.cbSolarBody.addItem("")
self.cbSolarBody.addItem("")
self.horizontalLayout_6.addWidget(self.cbSolarBody)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem2)
self.verticalLayout.addLayout(self.horizontalLayout_6)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.lbSolarCmap = QtWidgets.QLabel(RemoteSensingDockWidget)
self.lbSolarCmap.setObjectName("lbSolarCmap")
self.horizontalLayout_2.addWidget(self.lbSolarCmap)
self.verticalLayout.addLayout(self.horizontalLayout_2)
spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem3)
self.retranslateUi(RemoteSensingDockWidget)
QtCore.QMetaObject.connectSlotsByName(RemoteSensingDockWidget)
def retranslateUi(self, RemoteSensingDockWidget):
_translate = QtCore.QCoreApplication.translate
RemoteSensingDockWidget.setWindowTitle(_translate("RemoteSensingDockWidget", "Remote Sensing"))
self.lbObsAngle.setToolTip(_translate("RemoteSensingDockWidget", "View direction of the remote sensing instrument.\n"
"0 degree is towards flight direction."))
self.lbObsAngle.setText(_translate("RemoteSensingDockWidget", "Viewing direction: azimuth"))
self.label.setText(_translate("RemoteSensingDockWidget", "elevation"))
self.cbDrawTangents.setToolTip(_translate("RemoteSensingDockWidget", "Tangent points in viewing direction at the specified altitude.\n"
"Aircraft altitude is taken from the flight path."))
self.cbDrawTangents.setText(_translate("RemoteSensingDockWidget", "draw tangent points"))
self.btTangentsColour.setText(_translate("RemoteSensingDockWidget", "colour"))
self.dsbTangentHeight.setSuffix(_translate("RemoteSensingDockWidget", " km"))
self.cbShowSolarAngle.setToolTip(_translate("RemoteSensingDockWidget", "dark green if below horizon; otherwise reds: 0,5,10,15, purples: 15,25,35,45,60, greens: 60,90,135,180"))
self.cbShowSolarAngle.setText(_translate("RemoteSensingDockWidget", "show angle (degree)"))
self.cbSolarAngleType.setItemText(0, _translate("RemoteSensingDockWidget", "sun"))
self.cbSolarAngleType.setItemText(1, _translate("RemoteSensingDockWidget", "moon"))
self.cbSolarAngleType.setItemText(2, _translate("RemoteSensingDockWidget", "venus"))
self.cbSolarBody.setItemText(0, _translate("RemoteSensingDockWidget", "total (horizon)"))
self.cbSolarBody.setItemText(1, _translate("RemoteSensingDockWidget", "total"))
self.cbSolarBody.setItemText(2, _translate("RemoteSensingDockWidget", "azimuth"))
self.cbSolarBody.setItemText(3, _translate("RemoteSensingDockWidget", "elevation"))
self.lbSolarCmap.setText(_translate("RemoteSensingDockWidget", "fill me"))
| 1.5625
| 2
|
src/pandas_profiling/report/structure/variables/render_count.py
|
briangrahamww/pandas-profiling
| 0
|
12774668
|
<reponame>briangrahamww/pandas-profiling<filename>src/pandas_profiling/report/structure/variables/render_count.py
from pandas_profiling.config import Settings
from pandas_profiling.report.formatters import (
fmt,
fmt_bytesize,
fmt_numeric,
fmt_percent,
)
from pandas_profiling.report.presentation.core import (
Container,
FrequencyTable,
Image,
Table,
VariableInfo,
)
from pandas_profiling.report.structure.variables.render_common import render_common
from pandas_profiling.visualisation.plot import histogram, mini_histogram
def render_count(config: Settings, summary: dict) -> dict:
template_variables = render_common(config, summary)
image_format = config.plot.image_format
# Top
info = VariableInfo(
summary["varid"],
summary["varname"],
"Real number (ℝ / ℝ<sub>≥0</sub>)",
summary["warnings"],
summary["description"],
)
table1 = Table(
[
{
"name": "Distinct",
"value": fmt(summary["n_distinct"]),
"alert": False,
},
{
"name": "Distinct (%)",
"value": fmt_percent(summary["p_distinct"]),
"alert": False,
},
{
"name": "Missing",
"value": fmt(summary["n_missing"]),
"alert": False,
},
{
"name": "Missing (%)",
"value": fmt_percent(summary["p_missing"]),
"alert": False,
},
]
)
table2 = Table(
[
{
"name": "Mean",
"value": fmt_numeric(
summary["mean"], precision=config.report.precision
),
"alert": False,
},
{
"name": "Minimum",
"value": fmt_numeric(summary["min"], precision=config.report.precision),
"alert": False,
},
{
"name": "Maximum",
"value": fmt_numeric(summary["max"], precision=config.report.precision),
"alert": False,
},
{
"name": "Zeros",
"value": fmt(summary["n_zeros"]),
"alert": False,
},
{
"name": "Zeros (%)",
"value": fmt_percent(summary["p_zeros"]),
"alert": False,
},
{
"name": "Memory size",
"value": fmt_bytesize(summary["memory_size"]),
"alert": False,
},
]
)
mini_histo = Image(
mini_histogram(config, *summary["histogram"]),
image_format=image_format,
alt="Mini histogram",
)
template_variables["top"] = Container(
[info, table1, table2, mini_histo], sequence_type="grid"
)
seqs = [
Image(
histogram(config, *summary["histogram"]),
image_format=image_format,
alt="Histogram",
caption=f"<strong>Histogram with fixed size bins</strong> (bins={len(summary['histogram'][1]) - 1})",
name="Histogram",
anchor_id="histogram",
)
]
fq = FrequencyTable(
template_variables["freq_table_rows"],
name="Common values",
anchor_id="common_values",
redact=False,
)
evs = Container(
[
FrequencyTable(
template_variables["firstn_expanded"],
name="Minimum 5 values",
anchor_id="firstn",
redact=False,
),
FrequencyTable(
template_variables["lastn_expanded"],
name="Maximum 5 values",
anchor_id="lastn",
redact=False,
),
],
sequence_type="tabs",
name="Extreme values",
anchor_id="extreme_values",
)
template_variables["bottom"] = Container(
[
Container(
seqs, sequence_type="tabs", name="Histogram(s)", anchor_id="histograms"
),
fq,
evs,
],
sequence_type="tabs",
anchor_id=summary["varid"],
)
return template_variables
| 2.359375
| 2
|
websauna/blog/tests/model/conftest.py
|
ooduor/websauna.blog
| 0
|
12774669
|
"""py.test testing fixtures"""
import pytest
# Websauna
from websauna.blog.models import Post
from websauna.utils.time import now
@pytest.fixture
def unpublished_post(dbsession):
post = Post()
post.title = "Hello world"
post.body = "All roads lead to Toholampi"
post.tags = "mytag,mytag2"
post.ensure_slug(dbsession)
dbsession.add(post)
dbsession.flush()
return post
@pytest.fixture
def published_post(unpublished_post):
unpublished_post.published_at = now()
return unpublished_post
| 2.03125
| 2
|
python/2936.py
|
josevictorp81/Uri-questions-solutions
| 3
|
12774670
|
<reponame>josevictorp81/Uri-questions-solutions
curupira = int(input())
boitata = int(input())
boto = int(input())
mapinguari = int(input())
lara = int(input())
total = 225 + (curupira * 300) + (boitata *1500) + (boto * 600) + (mapinguari * 1000)+(lara*150)
print(total)
| 3.15625
| 3
|
src/phidget_spatial/launch/phidget_spatial_launch.py
|
tiiuae/phidget_spatial
| 0
|
12774671
|
from ament_index_python.packages import get_package_prefix
from launch import LaunchDescription
from launch_ros.actions import Node
from os import environ as env
def generate_launch_description():
pkg_name = "phidget_spatial"
pkg_share_path = get_package_prefix(pkg_name)
return LaunchDescription([
Node(
package='phidget_spatial',
executable='phidget_spatial_node',
name='phidget_spatial_node',
namespace=(env.get("DRONE_DEVICE_ID", env.get("USER"))),
parameters=[
pkg_share_path + "/config/phidget_spatial/phidget_spatial.yaml"
]
)
])
| 2.09375
| 2
|
Python/P3 - ADT/Q4.py
|
mrbinx/mrbinx_python
| 0
|
12774672
|
<filename>Python/P3 - ADT/Q4.py
__author__ = 'HaoBin'
from Q8_1 import List
import queue
class Tree():
def __init__(self, root=None, left=None, right=None):
self.root = root
self.left = left
self.right = right
if root is not None:
if left is None:
self.left = Tree()
if right is None:
self.right = Tree()
def empty(self):
if self.root is None:
return True
else:
return False
def leaf(self):
if self.left is None and self.right is None:
return True
else:
return False
def fork(self):
pass
def get_left(self):
if self.left is None:
return None
else:
return self.left
def get_right(self):
if self.right is None:
return None
else:
return self.right
def contents(self):
if self.root is None:
return None
else:
return self.root
def height(self):
if self.empty() is True:
return 0
else:
return 1 + max(self.left.height(), self.right.height())
def weight(self):
if self.empty() is True:
return 0
else:
return 1 + self.left.weight() + self.right.weight()
def breadth_first_draw(self):
d = []
q = queue.Queue()
q.put(self)
while q.empty() is False:
v = q.get()
if v.contents() is not None:
print(v.contents(), end=" ")
if v.leaf() is False:
if v.get_left() is not None:
q.put(v.get_left())
if v.get_right() is not None:
q.put(v.get_right())
def flatten_infix(self):
if self.empty() is True:
return List()
else:
return self.left.flatten_infix().append(List(self.root, self.right.flatten_infix()))
def flatten_prefix(self):
if self.empty() is True:
return List()
else:
return List(self.root, self.left.flatten_prefix().append(self.right.flatten_prefix()))
def flatten_postfix(self):
if self.empty() is True:
return List()
else:
return self.left.flatten_postfix().append(self.right.flatten_postfix().append(List(self.root)))
def fast_flatten_infix(self, l = None):
if l is None:
l = List()
if self.empty() is True:
return l
else:
return self.left.fast_flatten_infix(List(self.root, self.right.fast_flatten_infix(l)))
if __name__ == "__main__":
tree = Tree(3, Tree(4, Tree(5)), Tree(8, Tree(7, Tree(9, Tree(4, Tree(11)), Tree(2)))))
#print(tree.contents())
print("Height: " + str(tree.height()))
print("Weight: " + str(tree.weight()))
print("Breadth first search:")
tree.breadth_first_draw()
x = tree.flatten_infix()
print("\nInfix Flatten: ")
for i in range(len(x)):
print(x[i], end=" ")
x = tree.flatten_prefix()
print("\nPrefix Flatten: ")
for i in range(len(x)):
print(x[i], end=" ")
x = tree.flatten_postfix()
print("\nPostfix Flatten: ")
for i in range(len(x)):
print(x[i], end=" ")
x = tree.fast_flatten_infix()
print("\nFast Infix Flatten: ")
for i in range(len(x)):
print(x[i], end=" ")
| 3.421875
| 3
|
pytest/models/aws_cloud/volume_resource.py
|
Annapooraniqxf2/codacy
| 0
|
12774673
|
<reponame>Annapooraniqxf2/codacy
"""
This python file helps to read the ec2 volume information from AWS using boto3
"""
import boto3
from abc import ABC, abstractmethod
class VolumeResource(ABC):
"""This class is used to fetch details of ec2 instance using boto3"""
def __init__(self) -> None:
self.ec2_volume_service = boto3.resource('ec2')
@abstractmethod
def fetch_volume_info():
pass
| 3.171875
| 3
|
pythonproject/engine/display.py
|
emielhman/py-engine
| 0
|
12774674
|
<filename>pythonproject/engine/display.py
import sys, pygame
class Display:
def __init__(self, game):
"""Display(game)"""
self._game = game
self._screen = None
def _init(self):
"""_init()"""
self._screen = pygame.display.set_mode(self._game.settings.get_screen_size(), self._game.settings.get_screen_flags())
def _get_screen(self):
"""_get_screen()"""
return self._screen
def _reset_screen(self):
"""_reset_screen()"""
if self._screen != None:
self._screen = pygame.display.set_mode(self._game.settings.get_screen_size(), self._game.settings.get_screen_flags())
| 3
| 3
|
ether_py/eth/send.py
|
davedittrich/ether-py
| 0
|
12774675
|
<gh_stars>0
# -*- coding: utf-8 -*-
import argparse
import logging
import secrets
import textwrap
from cliff.command import Command
class EthSend(Command):
"""Send Ethereum"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.formatter_class = argparse.RawDescriptionHelpFormatter
# parser.add_argument(
# 'from',
# metavar='FROM',
# nargs=1,
# default=None,
# help="Sending account",
# )
# parser.add_argument(
# 'to',
# metavar='TO',
# nargs=1,
# default=None,
# help="Receiving account",
# )
parser.add_argument(
'eth',
metavar='ETH',
nargs=1,
default=None,
help="Transaction amount in eth",
)
parser.epilog = textwrap.dedent("""\
Send Ethereum from one address to another.
::
$ ether_py eth send FROM TO ETH
""")
return parser
def take_action(self, parsed_args):
self.log.debug('[+] sending Ethereum')
w3 = self.app.w3
from_account_address = '0xBe50e2b648e9A0e7E1e2B1b517C53cDAB6424355'
from_account_key = 'c931988d78b75bd3add16e52e432603e7a762f6364d7d780355d8f0955cda364' # noqa
accounts = w3.eth.get_accounts()
# Randomly chose one account (other than the chosen from account).
to_account_address = next(
account for account in secrets.SystemRandom().sample(accounts, 2)
if account != from_account_address
)
nonce = w3.eth.getTransactionCount(from_account_address)
tx = {
'nonce': nonce,
'to': to_account_address,
'value': w3.toWei(float(parsed_args.eth[0]), 'ether'),
'gas': 2000000,
'gasPrice': w3.toWei('50', 'gwei')
}
signed_tx = w3.eth.account.signTransaction(tx, from_account_key)
tx_hash = w3.eth.sendRawTransaction(signed_tx.rawTransaction)
if self.app_args.verbose_level == 1:
print(w3.toHex(tx_hash))
elif self.app_args.verbose_level > 1:
print(f"[+] transaction {w3.toHex(tx_hash)} sent")
# vim: set ts=4 sw=4 tw=0 et :
| 2.53125
| 3
|
aiopoke/objects/resources/pokemon/ability.py
|
beastmatser/aiopokeapi
| 3
|
12774676
|
from typing import TYPE_CHECKING, Any, Dict, List
from aiopoke.objects.utility import Effect, NamedResource, VerboseEffect
from aiopoke.objects.utility.common_models import Name
from aiopoke.utils.minimal_resources import MinimalResource
from aiopoke.utils.resource import Resource
if TYPE_CHECKING:
from aiopoke.objects.resources import Generation, VersionGroup
from aiopoke.objects.resources.pokemon import Pokemon
from aiopoke.objects.utility import Language
class Ability(NamedResource):
effect_changes: List["AbilityEffectChange"]
effect_entries: List["VerboseEffect"]
flavor_text_entries: List["AbilityFlavorText"]
generation: MinimalResource["Generation"]
is_main_series: bool
names: List["Name"]
pokemon: List["AbilityPokemon"]
def __init__(
self,
*,
id: int,
name: str,
effect_changes: List[Dict[str, Any]],
effect_entries: List[Dict[str, Any]],
flavor_text_entries: List[Dict[str, Any]],
generation: Dict[str, Any],
is_main_series: bool,
names: List[Dict[str, Any]],
pokemon: List[Dict[str, Any]],
) -> None:
super().__init__(id=id, name=name)
self.effect_changes = [
AbilityEffectChange(**effect_change) for effect_change in effect_changes
]
self.effect_entries = [
VerboseEffect(**effect_entry) for effect_entry in effect_entries
]
self.flavor_text_entries = [
AbilityFlavorText(**flavor_text_entry)
for flavor_text_entry in flavor_text_entries
]
self.generation = MinimalResource(**generation)
self.is_main_series = is_main_series
self.names = [Name(**name) for name in names]
self.pokemon = [AbilityPokemon(**pokemon) for pokemon in pokemon]
class AbilityPokemon(Resource):
is_hidden: bool
slot: int
pokemon: MinimalResource["Pokemon"]
def __init__(self, *, is_hidden: bool, slot: int, pokemon: Dict[str, Any]) -> None:
self.is_hidden = is_hidden
self.slot = slot
self.pokemon = MinimalResource(**pokemon)
class AbilityEffectChange(Resource):
effect_entries: List["Effect"]
version_group: MinimalResource["VersionGroup"]
def __init__(
self, *, effect_entries: List[Dict[str, Any]], version_group: Dict[str, Any]
) -> None:
self.effect_entries = [
Effect(**effect_entry) for effect_entry in effect_entries
]
self.version_group = MinimalResource(**version_group)
class AbilityFlavorText(Resource):
flavor_text: str
language: MinimalResource["Language"]
version_group: MinimalResource["VersionGroup"]
def __init__(
self,
*,
flavor_text: str,
language: Dict[str, Any],
version_group: Dict[str, Any],
) -> None:
self.flavor_text = flavor_text
self.language = MinimalResource(**language)
self.version_group = MinimalResource(**version_group)
| 2.125
| 2
|
utils/logger.py
|
gylli251/PlexDoctor
| 1
|
12774677
|
import coloredlogs
import logging
import os
logging.basicConfig(
filename="plex_doctor.log",
level=logging.DEBUG,
format='%(levelname)s: "%(asctime)s - %(message)s',
)
log = logging.getLogger("PLEX-DOCTOR")
log.setLevel(logging.DEBUG)
LOGLEVEL = os.environ.get("LOGLEVEL", "INFO").upper()
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(
logging.Formatter('%(levelname)s: "%(asctime)s - %(message)s')
)
log.addHandler(stream_handler)
coloredlogs.install(LOGLEVEL, logger=log)
| 2.234375
| 2
|
tests/test_convert.py
|
agbrooks/tohil
| 0
|
12774678
|
import unittest
import tohil
class TestMethods(unittest.TestCase):
def test_convert1(self):
"""exercise tohil.convert with no to= and with to=str"""
self.assertEqual(tohil.convert(10), "10")
self.assertEqual(tohil.convert(10, to=str), "10")
self.assertEqual(tohil.convert("10"), "10")
self.assertEqual(tohil.convert("10", to=str), "10")
def test_convert2(self):
"""exercise tohil.convert and to=int and to=float"""
self.assertEqual(tohil.convert("10", to=int), 10)
self.assertEqual(tohil.convert("10", to=float), 10.0)
def test_convert3(self):
"""exercise tohil.convert to=bool"""
self.assertEqual(tohil.convert(True, to=bool), True)
self.assertEqual(tohil.convert("t", to=bool), True)
self.assertEqual(tohil.convert("1", to=bool), True)
self.assertEqual(tohil.convert(1, to=bool), True)
self.assertEqual(tohil.convert(False, to=bool), False)
self.assertEqual(tohil.convert("f", to=bool), False)
self.assertEqual(tohil.convert("0", to=bool), False)
self.assertEqual(tohil.convert(0, to=bool), False)
def test_convert4(self):
"""exercise tohil.convert to=list"""
self.assertEqual(tohil.convert("1 2 3 4 5", to=list), ["1", "2", "3", "4", "5"])
def test_convert5(self):
"""exercise tohil.convert and to=dict"""
self.assertEqual(
tohil.convert("a 1 b 2 c 3 d 4", to=dict),
{"a": "1", "b": "2", "c": "3", "d": "4"},
)
def test_convert6(self):
"""exercise tohil.convert and to=tuple"""
self.assertEqual(
tohil.convert("a 1 b 2 c 3 d 4", to=tuple),
("a", "1", "b", "2", "c", "3", "d", "4"),
)
def test_convert7(self):
"""exercise tohil.convert and to=set"""
self.assertEqual(
sorted(tohil.convert("1 2 3 4 5 6 6", to=set)),
["1", "2", "3", "4", "5", "6"],
)
def test_convert8(self):
"""exercise tohil.convert and to=tohil.tclobj"""
self.assertEqual(
repr(tohil.convert("1 2 3", to=tohil.tclobj)), "<tohil.tclobj: '1 2 3'>"
)
if __name__ == "__main__":
unittest.main()
| 3.703125
| 4
|
host-software/led/led_vm.py
|
dpejcha/keyplus
| 226
|
12774679
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 <EMAIL>
# Licensed under the MIT license (http://opensource.org/licenses/MIT)
from sexpr import sexp
import pprint
import copy
import hexdump
DEBUG = 0
def u8(x):
return x & 0xff
def i16(x):
return x & 0xffff
class LEDVMError(Exception):
pass
class OpCodeInfo(object):
def __init__(self, name, data_len, arg_type):
self.name = name
self.data_len = data_len
self.arg_type = arg_type
ARG_NONE = 0
ARG_REFRENCES = 1
class OpCode(object):
SHOW_HSV = 0x00
SHOW_RGB = 0x01
LOAD_PIXEL = 0x02
ADD_VEC3 = 0x03
SUB_VEC3 = 0x04
IF_EQ = 0x05
OP_CODE_TABLE = {
# CODE , MENOMIC , DATA_SIZE
SHOW_HSV : OpCodeInfo("SHOW_HSV" , 0 , OpCodeInfo.ARG_NONE) ,
SHOW_RGB : OpCodeInfo("SHOW_RGB" , 0 , OpCodeInfo.ARG_NONE) ,
LOAD_PIXEL : OpCodeInfo("LOAD_PIXEL" , 3 , OpCodeInfo.ARG_REFRENCES) ,
ADD_VEC3 : OpCodeInfo("ADD_VEC3" , 3 , OpCodeInfo.ARG_REFRENCES) ,
SUB_VEC3 : OpCodeInfo("SUB_VEC3" , 3 , OpCodeInfo.ARG_REFRENCES) ,
IF_EQ : OpCodeInfo("IF_EQ" , 3 , OpCodeInfo.ARG_REFRENCES) ,
}
@staticmethod
def to_string(code):
if code in OpCode.OP_CODE_TABLE:
name = OpCode.OP_CODE_TABLE[code].name
return "{}<{}>".format(name, code)
else:
return "{}<{}>".format("UnknownOpCode", code)
def __init__(self, name, data_len=0):
self.name = name
self.data_len = data_len
class Register(object):
# Register codes
PIXEL_NUM = 0
OUTPUT_TYPE = 1
KEY_STATE = 2
MOUSE_X = 3
MOUSE_Y = 4
OUTPUT_TYPE_RGB = 0
OUTPUT_TYPE_HSV = 1
def __init__(self, name, default_value=0):
self.name = name
self.value = default_value
self.default_value = default_value
class LEDEffectVM(object):
REGISTER_TABLE = {
Register.PIXEL_NUM : Register("PIXEL_NUM", 0),
Register.OUTPUT_TYPE : Register("OUTPUT_TYPE", 0),
Register.KEY_STATE : Register("KEY_STATE", 0),
Register.MOUSE_X : Register("MOUSE_X", 0),
Register.MOUSE_Y : Register("MOUSE_Y", 0),
}
def __init__(self, led_program_table={'main': []}, num_pixels=None):
self.pixels = [(0, 0, 0)] * num_pixels
self.led_program_table = led_program_table
self.set_active_progarm('main')
self.instr_ptr = 0
self.registers = {}
for reg in self.REGISTER_TABLE:
self.registers[reg] = self.REGISTER_TABLE[reg].default_value
def set_active_progarm(self, name):
self._current_program_name = name
self.current_program = self.led_program_table[name]
def goto_start(self):
self.instr_ptr = 0
def rel_jump(self, offset):
self.instr_ptr += (offset)
def get_next_word(self):
if self.instr_ptr >= len(self.current_program):
return None
result = self.current_program[self.instr_ptr]
self.instr_ptr += 1
return result
def read_op_code(self):
code = self.get_next_word()
if code == None:
return None, None
self.vm_assert(code in OpCode.OP_CODE_TABLE, "Invalid OpCode: {}".format(code))
op_code = OpCode.OP_CODE_TABLE[code]
data = []
for i in range(op_code.data_len):
data.append(self.get_next_word())
# if DEBUG >= 1
if DEBUG >= 5:
print("Instruction: {}".format(self.instr_ptr))
print("Current code: {}, data:{}".format(
OpCode.to_string(code), data
)
)
return code, data
REFERENCE_TYPE_IMMEDIATE = 0
REFERENCE_TYPE_REGISTER = 1
REFERENCE_TYPE_PIXEL = 2
def lookup_refrence(self, ref):
# Refrences either an immediate value or another register value
# Format of refrence values (in hex):
# * 00xx -> Single byte immediate value
# * 01xx -> Single byte immediate value
value = (ref >> 0) & 0xff
ref_type = (ref >> 8) & 0xff
if ref_type == self.REFERENCE_TYPE_IMMEDIATE:
return value
elif ref_type == self.REFERENCE_TYPE_PIXEL:
assert(value < 3)
return self.get_current_pixel()[value]
elif ref_type == self.REFERENCE_TYPE_REGISTER:
assert(value in self.REGISTER_TABLE)
return self.registers[value]
def get_pixel(self, pixel_num):
return self.pixels[pixel_num]
def get_pixel_type(self, pixel_num):
return self.registers[Register.OUTPUT_TYPE]
def get_current_pixel(self):
return self.pixels[self.registers[Register.PIXEL_NUM]]
def set_current_pixel(self, x, y, z):
self.pixels[self.registers[Register.PIXEL_NUM]] = (x, y, z)
def execute_op_code(self, code, data):
"""
Return True if the program has finished executing
"""
if code == OpCode.SHOW_HSV:
self.registers[Register.OUTPUT_TYPE] = Register.OUTPUT_TYPE_HSV
return True
elif code == OpCode.SHOW_RGB:
self.registers[Register.OUTPUT_TYPE] = Register.OUTPUT_TYPE_RGB
return True
elif code == OpCode.LOAD_PIXEL:
self.set_current_pixel(
self.lookup_refrence(data[0]),
self.lookup_refrence(data[1]),
self.lookup_refrence(data[2])
)
elif code == OpCode.ADD_VEC3:
old_value = self.get_current_pixel()
self.set_current_pixel(
u8(old_value[0] + self.lookup_refrence(data[0])),
u8(old_value[1] + self.lookup_refrence(data[1])),
u8(old_value[2] + self.lookup_refrence(data[2]))
)
elif code == OpCode.SUB_VEC3:
old_value = self.get_current_pixel()
self.set_current_pixel(
u8(old_value[0] - self.lookup_refrence(data[0])),
u8(old_value[1] - self.lookup_refrence(data[1])),
u8(old_value[2] - self.lookup_refrence(data[2]))
)
elif code == OpCode.IF_EQ:
lhs = self.lookup_refrence(data[0])
rhs = self.lookup_refrence(data[1])
jmp_pos = self.lookup_refrence(data[2])
if DEBUG >= 5:
print("lhs, rhs, == :", lhs, rhs, lhs == rhs)
if lhs != rhs:
self.rel_jump(jmp_pos)
else:
raise LEDVMError("Unknown opcode {}".format(code))
return False
def execute_program(self, program_name):
self.set_active_progarm(program_name)
for (pixel_i, _) in enumerate(self.pixels):
self.execute_program_pixel(pixel_i)
def execute_program_pixel(self, pixel_number):
self.goto_start()
self.registers[Register.PIXEL_NUM] = pixel_number
is_running = True
if DEBUG:
print("Starting program for pixel: {}".format(pixel_number))
while is_running:
(code, data) = self.read_op_code()
if code == None:
break;
if DEBUG:
print("(OpCode {}, Data {})".format(code, data))
is_running = not self.execute_op_code(code, data)
def vm_assert(self, exp, msg=""):
if exp != True:
self.print_core_dump(msg)
if msg == "":
LEDVMError("LEDVMError: unspecified error")
else:
LEDVMError("LEDVMError: {}".format(msg))
def print_core_dump(self, error_msg):
print(
"\n"
"Core dump while executing program '{}':\n"
"Error message: {}\n"
"instr_ptr: {}\n"
"program: {}\n"
.format(
self._current_program_name,
error_msg,
self.instr_ptr,
self.current_program
)
)
class LEDEffectVMParser(object):
def __init__(self):
# The Parser needs the inverse mappings of the op_code/register lookup
# tables, so generate them here
self.op_code_lookup_table = {}
for code in OpCode.OP_CODE_TABLE:
name = OpCode.OP_CODE_TABLE[code].name
self.op_code_lookup_table[name] = code
self.register_lookup_table = {}
for reg in LEDEffectVM.REGISTER_TABLE:
name = LEDEffectVM.REGISTER_TABLE[reg].name
self.register_lookup_table[name] = reg
# def exp_as_arrays(self, exp):
# print(exp)
# arr = exp[0]
# result = []
# for child in arr:
# result.append(self.exp_as_arrays(child))
# return result
def parse_asm(self, program_str):
sexpression = sexp.parseString(program_str, parseAll=True)
if DEBUG:
print(sexpression)
pprint.pprint(sexpression)
# sexpression = self.exp_as_arrays(sexpression)
byte_code = []
byte_code += self.parse_program(sexpression)
return byte_code
def generate_ref(self, ref):
if isinstance(ref, int):
assert(ref <= 255)
ref_type = LEDEffectVM.REFERENCE_TYPE_IMMEDIATE
value = ref
elif isinstance(ref, str):
if ref in self.register_lookup_table:
ref_type = LEDEffectVM.REFERENCE_TYPE_REGISTER
value = self.register_lookup_table[ref]
elif ref in ('r', 'g', 'b', 'h', 's', 'v'):
ref_type = LEDEffectVM.REFERENCE_TYPE_PIXEL
value = {
'r': 0,
'h': 0,
'g': 1,
's': 1,
'b': 2,
'v': 2,
}[ref]
else:
raise LEDVMError("Unknown reference: {}".format(ref))
else:
return None
lo_byte = (value << 0)
hi_byte = (ref_type << 8)
return [lo_byte | hi_byte]
def parse_instruction(self, exp):
if DEBUG:
print("Parse Instruction: ", exp)
name = exp[0]
result = []
if not name in self.op_code_lookup_table:
raise LEDVMError("Unknown opcode menomic: {}".format(name))
op_code = self.op_code_lookup_table[name]
op_info = OpCode.OP_CODE_TABLE[op_code]
# Add the op_code to the result
result += [op_code]
OP_CODE_POS = 1
data = exp[OP_CODE_POS:]
if len(data) != op_info.data_len:
raise LEDVMError("Expected {} arguments to opcode {}, got {}".format(
op_info.data_len,
name,
len(data)
)
)
if op_code == OpCode.IF_EQ:
print(data)
print(data[0], data[1], data[2])
LHS_POS = 0
RHS_POS = 1
JUMP_POS = 2
result += self.generate_ref(data[LHS_POS])
result += self.generate_ref(data[RHS_POS])
if_block_exp = data[JUMP_POS]
ref_data = self.generate_ref(if_block_exp)
if ref_data != None:
result += ref_data
else:
print('ifblock:', if_block_exp)
if_block = self.parse_instruction_list(if_block_exp)
jmp_offset = i16(len(if_block))
result += [jmp_offset]
result += if_block
print('ifBlockResult:', result)
elif op_info.arg_type == OpCodeInfo.ARG_NONE:
pass # Don't need to add data
elif op_info.arg_type == OpCodeInfo.ARG_REFRENCES:
for ref in data:
result += self.generate_ref(ref)
return result
def parse_instruction_list(self, instruction_list):
result = []
for instruction in instruction_list:
result += self.parse_instruction(instruction)
return result
def parse_program(self, exp):
if DEBUG:
print("Parse program: ", exp)
exp = exp[0]
# pprint.pprint(exp)
return self.parse_instruction_list(exp)
if __name__ == "__main__":
init_prog = """
(
(LOAD_PIXEL PIXEL_NUM 255 200)
)
"""
# main_prog = """
# (
# (LOAD_PIXEL r 255 200)
# (ADD_VEC3 1 0 0)
# (IF_EQ v 199
# (
# (ADD_VEC3 1 0 0)
# )
# )
# (IF_EQ v 200
# (
# (SUB_VEC3 1 0 0)
# )
# )
# (SHOW_HSV)
# )
# """
main_prog = """
(
(IF_EQ h 0
(
(LOAD_PIXEL h 255 199)
)
)
(IF_EQ h 255
(
(LOAD_PIXEL h 255 200)
)
)
(IF_EQ v 200
(
(SUB_VEC3 1 0 0)
)
)
(IF_EQ v 199
(
(ADD_VEC3 1 0 0)
)
)
(SHOW_HSV)
)
"""
vm_parser = LEDEffectVMParser()
led_programs = {
"init": vm_parser.parse_asm(init_prog),
"main": vm_parser.parse_asm(main_prog),
}
vm = LEDEffectVM(led_programs, num_pixels=64)
for prog in led_programs:
print(prog, led_programs[prog])
byte_code_as_bytes = bytes([])
for word in led_programs[prog]:
byte_code_as_bytes += bytes([word & 0xff, word>>8 & 0xff])
hexdump.hexdump(byte_code_as_bytes)
vm.execute_program('init')
for i in range(300):
vm.execute_program('main')
print(vm.pixels)
| 2.390625
| 2
|
src/codeplag/algorithms/tests/test_featurebased.py
|
Artanias/code-plagiarism
| 2
|
12774680
|
<gh_stars>1-10
import unittest
import numpy as np
from codeplag.algorithms.featurebased import (
op_shift_metric, counter_metric,
get_children_indexes, struct_compare,
find_max_index, matrix_value,
add_not_counted
)
class TestFeaturebased(unittest.TestCase):
def test_counter_metric_normal(self):
example1 = {'a': 2, 'b': 1, 'c': 5, 'd': 7}
example2 = {'a': 10, 'c': 8, 'e': 2, 'f': 12}
example3 = {'USub': 3, 'Mor': 3, 'Der': 5}
example4 = {'USub': 5, 'Mor': 5, 'Ker': 5}
res1 = counter_metric(example1, example2)
res2 = counter_metric(example3, example4)
res3 = counter_metric({}, example4)
res4 = counter_metric({}, {})
self.assertEqual(res1, 0.175)
self.assertEqual(res2, 0.3)
self.assertEqual(res3, 0.0)
self.assertEqual(res4, 1.0)
'''
Numba forbid bad arguments
def test_counter_metric_bad_args(self):
res1 = counter_metric("", [])
res2 = counter_metric([], [])
'''
# self.assertEqual(TypeError, res1)
# self.assertEqual(TypeError, res2)
def test_op_shift_metric_normal(self):
empty_list = []
example1 = ['+', '-', '=']
example2 = ['+', '+=', '/', '%']
example3 = ['+', '-=', '/', '%']
example4 = ['-', '+', '%', '*', '+=']
example5 = ['%', '*', '+=']
res3 = op_shift_metric(empty_list, empty_list)
res4 = op_shift_metric(example1, empty_list)
res5 = op_shift_metric(empty_list, example1)
res6 = op_shift_metric(example2, example3)
res7 = op_shift_metric(example4, example5)
self.assertEqual(res3, (0, 1.0))
self.assertEqual(res4, (0, 0.0))
self.assertEqual(res5, (0, 0.0))
self.assertEqual(res6[0], 0)
self.assertAlmostEqual(res6[1], 0.6, 2)
self.assertEqual(res7[0], 2)
self.assertAlmostEqual(res7[1], 0.6, 2)
def test_get_children_indexes_normal(self):
example1 = [(1, 2), (2, 3), (3, 5), (2, 4), (2, 5), (1, 6)]
example2 = [(3, 4), (3, 2), (4, 5), (3, 1), (4, 8), (3, 8)]
example3 = [(2, 1), (3, 4), (3, 10), (4, 1), (2, 5), (2, 9)]
ind1, c_ch1 = get_children_indexes(example1, len(example1))
ind2, c_ch2 = get_children_indexes(example2, len(example2))
ind3, c_ch3 = get_children_indexes(example3, len(example3))
self.assertEqual(c_ch1, 2)
self.assertEqual(ind1[0], 0)
self.assertEqual(ind1[1], 5)
self.assertEqual(c_ch2, 4)
self.assertEqual(ind2[0], 0)
self.assertEqual(ind2[1], 1)
self.assertEqual(ind2[2], 3)
self.assertEqual(ind2[3], 5)
self.assertEqual(c_ch3, 3)
self.assertEqual(ind3[0], 0)
self.assertEqual(ind3[1], 4)
self.assertEqual(ind3[2], 5)
def test_find_max_index(self):
arr1 = np.array([[[1, 2], [2, 3]],
[[3, 4], [5, 10]]])
res1 = find_max_index(arr1)
arr2 = np.array([[[8, 2], [100, 15]],
[[3, 14], [1, 13]]])
res2 = find_max_index(arr2)
res3 = find_max_index(np.array([[]]))
self.assertEqual(res1[0], 1)
self.assertEqual(res1[1], 0)
self.assertEqual(res2[0], 0)
self.assertEqual(res2[1], 1)
self.assertEqual(res3[0], 0)
self.assertEqual(res3[1], 0)
def test_matrix_value(self):
arr1 = np.array([[[1, 2], [2, 3]],
[[3, 4], [5, 10]]])
metric1, indexes1 = matrix_value(arr1)
arr2 = np.array([[[8, 2], [100, 15]],
[[3, 14], [1, 13]]])
metric2, indexes2 = matrix_value(arr2)
metric3, indexes3 = matrix_value(np.array([[]]))
self.assertEqual(metric1[0], 6)
self.assertEqual(metric1[1], 8)
self.assertEqual(indexes1[0][0], 1)
self.assertEqual(indexes1[0][1], 0)
self.assertEqual(indexes1[1][0], 0)
self.assertEqual(indexes1[1][1], 1)
self.assertEqual(metric2[0], 104)
self.assertEqual(metric2[1], 30)
self.assertEqual(indexes2[0][0], 0)
self.assertEqual(indexes2[0][1], 1)
self.assertEqual(indexes2[1][0], 1)
self.assertEqual(indexes2[1][1], 0)
self.assertEqual(metric3[0], 1)
self.assertEqual(metric3[1], 1)
self.assertEqual(indexes3, [])
def test_add_not_counted(self):
structure = ((1, 2), (2, 1), (1, 3), (2, 4),
(3, 5), (1, 4), (2, 2), (2, 5))
res1 = add_not_counted(structure, 3, [0, 2, 5, len(structure)],
[[0, 0], [1, 1]], axis=0)
self.assertEqual(res1, 3)
# Тут хорошо бы переписать под общий случай, а не под codeplag
def test_struct_compare_normal(self):
structure1 = [(1, 0), (2, 1), (3, 2),
(3, 2), (2, 3), (3, 4),
(4, 5), (3, 6), (3, 4),
(4, 7), (2, 8)]
structure2 = [(1, 0), (2, 1), (2, 2),
(3, 3), (4, 4), (5, 5),
(4, 1), (4, 1), (4, 1),
(1, 6), (2, 7), (3, 8),
(3, 8), (3, 8), (2, 9)]
count_ch1 = (get_children_indexes(structure1, len(structure1)))[1]
count_ch2 = (get_children_indexes(structure2, len(structure2)))[1]
compliance_matrix = np.zeros((count_ch1, count_ch2, 2),
dtype=np.int64)
res = struct_compare(structure1, structure2,
compliance_matrix)
self.assertEqual(res, [6, 22])
self.assertEqual(list(compliance_matrix[0][0]), [5, 15])
self.assertEqual(list(compliance_matrix[0][1]), [5, 12])
structure1 = [(1, 0), (2, 1), (3, 2),
(2, 3), (3, 4), (4, 5),
(3, 6), (4, 7), (2, 3),
(3, 4), (4, 5), (3, 6),
(4, 7), (2, 3), (3, 4)]
structure2 = [(1, 0), (2, 1), (3, 2), (2, 3),
(3, 4), (4, 5), (3, 6), (4, 7),
(5, 4), (6, 8), (5, 8), (4, 9),
(2, 3), (3, 4), (4, 5), (3, 6),
(4, 4), (5, 8), (4, 10), (5, 4)]
count_ch1 = (get_children_indexes(structure1, len(structure1)))[1]
count_ch2 = (get_children_indexes(structure2, len(structure2)))[1]
compliance_matrix = np.zeros((count_ch1, count_ch2, 2),
dtype=np.int64)
res = struct_compare(structure1, structure2,
compliance_matrix)
self.assertEqual(res, [14, 23])
self.assertEqual(compliance_matrix[0][0][0], 13)
self.assertEqual(compliance_matrix[0][0][1], 22)
def test_struct_compare_file_empty(self):
structure1 = [(1, 2)]
structure1.clear()
structure2 = [(1, 0), (2, 1), (2, 2), (3, 3),
(4, 4), (5, 5), (4, 1), (4, 1),
(4, 1), (1, 6), (2, 7), (3, 8),
(3, 8), (3, 8), (2, 9), (3, 4),
(4, 10), (3, 11), (3, 4), (4, 5),
(2, 2), (3, 3), (4, 4), (5, 5), (4, 12),
(5, 4), (6, 5), (5, 13), (5, 4), (6, 5),
(2, 14), (3, 4), (4, 5)]
res = struct_compare(structure1, structure2)
self.assertEqual(res, [1, 34])
structure3 = [(1, 0), (2, 1), (3, 2), (3, 2),
(2, 3), (3, 4), (4, 5), (3, 6),
(3, 4), (4, 7), (2, 8), (3, 9),
(4, 4), (5, 7), (4, 4), (5, 7),
(2, 10), (3, 4), (4, 7), (1, 11),
(2, 12), (2, 8), (3, 9), (4, 4),
(5, 7), (4, 12), (4, 12)]
res = struct_compare(structure1, structure3)
self.assertEqual(res, [1, 28])
# Numba forbid bad arguments
# def test_struct_compare_bad_args(self):
# tree, tree2 = self.init('empty.py', 'test2.py')
# res1 = struct_compare("", "")
# res2 = struct_compare(tree, tree2, "")
# self.assertEqual(TypeError, res1)
# self.assertEqual(TypeError, res2)
# Numba forbid bad arguments
# def test_op_shift_metric_bad_args(self):
# res1 = op_shift_metric([], 34)
# res2 = op_shift_metric(56, [])
# self.assertEqual(TypeError, res1)
# self.assertEqual(TypeError, res2)
| 2.359375
| 2
|
koila/interfaces.py
|
ousou/koila
| 0
|
12774681
|
<reponame>ousou/koila
from __future__ import annotations
import functools
import operator
from abc import abstractmethod
from typing import (
Callable,
Dict,
NamedTuple,
Protocol,
Tuple,
TypeVar,
Union,
overload,
runtime_checkable,
)
from torch import Tensor
from torch import device as Device
from torch import dtype as DType
from . import constants
T = TypeVar("T", covariant=True)
V = TypeVar("V", contravariant=True)
@runtime_checkable
class Runnable(Protocol[T]):
@abstractmethod
def run(self) -> T:
...
@runtime_checkable
class TensorMixin(Protocol):
@overload
@abstractmethod
def size(self) -> Tuple[int, ...]:
...
@overload
@abstractmethod
def size(self, dim: int) -> int:
...
@abstractmethod
def size(self, dim: int | None = None) -> int | Tuple[int, ...]:
...
def numel(self) -> int:
return functools.reduce(operator.mul, self.size(), 1)
def dim(self) -> int:
return len(self.size())
@abstractmethod
def dtype(self) -> DType:
...
@abstractmethod
def device(self) -> str | Device:
...
class BatchNoBatch(NamedTuple):
batch: int
no_batch: int
class BatchInfo(NamedTuple):
index: int
value: int
def map(self, func: Callable[[int], int]) -> BatchInfo:
index = func(self.index)
return BatchInfo(index, self.value)
@runtime_checkable
class RunnableTensor(Runnable[Tensor], TensorMixin, Protocol):
@abstractmethod
def batch(self) -> BatchInfo | None:
...
@abstractmethod
def take_batch(self, low: int, high: int) -> Tensor:
...
@abstractmethod
def visit(self, nodes: Dict[int, TensorLike]) -> None:
...
def buffer(self) -> Dict[int, TensorLike]:
nodes = {}
self.visit(nodes)
return nodes
def buffer_numel(self) -> BatchNoBatch:
buffer = self.buffer().values()
return BatchNoBatch(
sum(t.numel() for t in buffer if bat(t) is not None),
sum(t.numel() for t in buffer if bat(t) is None),
)
def buffer_memory(self) -> BatchNoBatch:
buffer = self.buffer().values()
return BatchNoBatch(
sum(mem(t) for t in buffer if bat(t) is not None),
sum(mem(t) for t in buffer if bat(t) is None),
)
def memory(self) -> int:
return mem(self)
def dtyp(tensor: TensorLike) -> DType:
if isinstance(tensor, Tensor):
return tensor.dtype
return tensor.dtype()
def dev(tensor: TensorLike) -> str | Device:
if isinstance(tensor, Tensor):
return tensor.device
return tensor.device()
def mem(tensor: TensorLike) -> int:
dt = dtyp(tensor)
numel = tensor.numel()
return constants.MEMORY_BYTES[dt] * numel
def bat(tensor: TensorLike) -> BatchInfo | None:
if isinstance(tensor, RunnableTensor):
return tensor.batch()
return None
TensorLike = Union[Tensor, RunnableTensor]
| 2.296875
| 2
|
geotrek/trekking/tests/__init__.py
|
camillemonchicourt/Geotrek
| 0
|
12774682
|
# pylint: disable=W0401
from .base import *
from .test_views import *
from .test_filters import *
from .test_translation import *
from .test_trek_relationship import *
from .test_models import *
from .test_admin import *
| 1.007813
| 1
|
manage_battery.py
|
clean-code-craft-tcq-1/function-ext-python-Anjana-MU
| 0
|
12774683
|
from report_vitals import report_battery_vitals
from filter_values import filterOut_safe_vitals
from process_battery_data import process_data
from controller_actions import get_actions
def is_battery_ok(bms_attributes):
data = process_data(bms_attributes)
report_battery_vitals(data)
get_actions(data)
value = list(filter(filterOut_safe_vitals,data))
return len(value) == 0
if __name__ == '__main__':
assert(is_battery_ok({'temperature': 25,'Soc': 70, 'Charge_rate': 0.7}) is True) #all values in limit
assert(is_battery_ok({'Temperature': 46,'soc': 23, 'Charge_rate': 0.77}) is False) #high temp warning,low soc warning,charge_rate high warnings
| 2.125
| 2
|
AutoEncoder/autoencoder.py
|
wondervictor/DeepLearningWithPaddle
| 5
|
12774684
|
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import paddle.v2 as paddle
import gzip
import sys
import data_provider
import numpy as np
def param():
return paddle.attr.Param(
initial_std=0.01,
initial_mean=0
)
def encoder(x_):
x_ = paddle.layer.fc(
input=x_,
size=512,
act=paddle.activation.Sigmoid(),
param_attr=param(),
bias_attr=param()
)
x_ = paddle.layer.fc(
input=x_,
size=256,
act=paddle.activation.Relu(),
param_attr=param(),
bias_attr=param()
)
x_ = paddle.layer.fc(
input=x_,
size=128,
act=paddle.activation.Relu(),
param_attr=param(),
bias_attr=param()
)
return x_
def decoder(x_):
x_ = paddle.layer.fc(
input=x_,
size=128,
act=paddle.activation.Sigmoid(),
param_attr=param(),
bias_attr=param()
)
x_ = paddle.layer.fc(
input=x_,
size=256,
act=paddle.activation.Relu(),
param_attr=param(),
bias_attr=param()
)
x_ = paddle.layer.fc(
input=x_,
size=512,
act=paddle.activation.Relu(),
param_attr=param(),
bias_attr=param()
)
return x_
def output(x_):
return paddle.layer.fc(
input=x_,
size=784,
act=paddle.activation.Relu(),
param_attr=param(),
bias_attr=param()
)
paddle.init(use_gpu=False, trainer_count=1)
x = paddle.layer.data(
name='x',
type=paddle.data_type.dense_vector(784)
)
y = encoder(x)
y = decoder(y)
y = output(y)
def train():
optimizer = paddle.optimizer.RMSProp(
learning_rate=1e-3,
regularization=paddle.optimizer.L2Regularization(rate=8e-4)
)
loss = paddle.layer.mse_cost(label=x, input=y)
parameters = paddle.parameters.create(loss)
trainer = paddle.trainer.SGD(
cost=loss,
parameters=parameters,
update_equation=optimizer
)
feeding = {'x': 0}
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 50 == 0:
print ("\n pass %d, Batch: %d cost: %f"
% (event.pass_id, event.batch_id, event.cost))
else:
sys.stdout.write('.')
sys.stdout.flush()
if isinstance(event, paddle.event.EndPass):
with gzip.open('output/params_pass_%d.tar.gz' % event.pass_id, 'w') as f:
parameters.to_tar(f)
reader = data_provider.create_reader('train', 60000)
trainer.train(
paddle.batch(
reader=reader,
batch_size=128
),
feeding=feeding,
num_passes=20,
event_handler=event_handler
)
def test(model_path):
with gzip.open(model_path, 'r') as openFile:
parameters = paddle.parameters.Parameters.from_tar(openFile)
testset = [[x] for x in data_provider.fetch_testingset()['images'][:10]]
# 使用infer进行预测
result = paddle.infer(
input=testset,
parameters=parameters,
output_layer=y,
feeding={'x': 0}
)
return result, np.array(testset)
if __name__ == '__main__':
origin, result = test('output/params_pass_19.tar.gz')
np.save('origin.dat', origin)
np.save('result.dat', result)
| 2.140625
| 2
|
neatbook/neatbook.py
|
Peter-32/neatbook
| 1
|
12774685
|
<filename>neatbook/neatbook.py
import sys
import os
import nbformat as nbf
import re
class Neatbook:
def __init__(self, ):
PROJECT_FILE = os.path.realpath(os.path.basename(sys.argv[0]))
PROJECT_PATH = re.match("(.*[/\\\])", PROJECT_FILE).group(1)
PROJECT_NAME = re.match(".*[/\\\]+([^/\\\]+)[/\\\]+$", PROJECT_PATH).group(1)
nb = nbf.v4.new_notebook()
header1 = """\
# {} Neatbook
#### Get Data""".format(PROJECT_NAME.capitalize())
code1 = """\
import pandas as pd
import numpy as np
# Get data here
df = pd.read_csv("train.csv") # Edit: Your dataset
# classDF = pd.read_csv("train_labels.csv", header=None)
# df = pd.concat([df, classDF], axis=1)
print(df.shape)
print(df.describe(include = [np.number]))
print(df.dtypes)
print(df.describe(include = ['O']))
df.head()
"""
header2 = """\
#### Initialize variables"""
code2 = """\
from sklearn.model_selection import train_test_split
className = 'class' # Edit: Replace class with the Y column name
trainX, testX, trainY, testY = train_test_split(df.drop([className], axis=1),
df[className], train_size=0.75, test_size=0.25)
indexColumns = [] # Edit: Optionally add column names
iWillManuallyCleanColumns = [] # Edit: Optionally add column names
print("trainX.shape = ", trainX.shape)
print("testX.shape = ", testX.shape)
print("trainY.shape = ", trainY.shape)
print("testY.shape = ", testY.shape)
print("\ntrainY\n")
print(trainY.head())
print("trainX\n")
print(trainX.head())
"""
header3 = """\
#### Clean Data"""
code3 = """\
from neatdata.neatdata import *
neatdata = NeatData()
cleanTrainX, cleanTrainY = neatdata.cleanTrainingDataset(trainX, trainY, indexColumns, iWillManuallyCleanColumns)
cleanTestX = neatdata.cleanTestDataset(testX)
cleanTestY = neatdata.convertYToNumbersForModeling(testY)
print("Cleaning done")
"""
header4 = """\
#### Review Cleaned Data"""
code4 = """\
print(cleanTrainX.describe(include = [np.number]))
print(cleanTrainX.head())
print(cleanTrainY)
print(cleanTestX.describe(include = [np.number]))
print(cleanTestX.head())
print(cleanTestY)
"""
header5 = """\
#### Run TPOT"""
code5 = """\
from tpot import TPOTClassifier
tpot = TPOTClassifier(max_time_mins=5, # Edit: Set to 480 to train for 8 hours
population_size=100, max_eval_time_mins=5, verbosity=2)
tpot.fit(cleanTrainX, cleanTrainY)
print(tpot.score(cleanTestX, cleanTestY))
tpot.export('tpot_pipeline.py')
print("\\n\\nTPOT is done.")
"""
header6 = """\
## Run this after TPOT is done
Creates the modelpipeline.py file. That file also creates the trainedmodelpipeline.py.
"""
code6 = """\
with open('modelpipeline.py', 'w') as fileOut:
with open('tpot_pipeline.py', 'r') as fileIn:
for line in fileIn:
if line.startswith("import") or line.startswith("from "):
fileOut.write(line)
fileOut.write(\"\"\"from sklearn.metrics import accuracy_score
from neatdata.neatdata import *
from sklearn.metrics import confusion_matrix
import pickle
class ModelPipeline:
def __init__(self):
self.indexColumns, self.iWillManuallyCleanColumns = None, None
self.neatData = NeatData()
self.className = 'class' # Edit: Replace class with the Y column name
self.indexColumns = [] # Edit: Optionally add column names
self.iWillManuallyCleanColumns = [] # Edit: Optionally add column names
self.cleanTrainX, self.cleanTrainY, self.cleanTestX, self.cleanTestY = None, None, None, None
self.results = None
def execute(self):
trainX, testX, trainY, testY = self._getDatasetFrom________() # Edit: choose one of two functions
self._cleanDatasets(trainX, testX, trainY, testY)
self._modelFit()
self._printModelScores()
self._createTrainedModelPipelineFile()
self._saveObjectsToDisk()
self._createTrainedModelPipelineFile()
def _getDatasetFromOneFile(self):
df = pd.read_csv('train.csv') # Edit: Your dataset
# df = pd.read_csv('train.csv', header=None)
# classDF = pd.read_csv("train_labels.csv", header=None, names=["class"])
# df = pd.concat([df, classDF], axis=1)
trainX, testX, trainY, testY = train_test_split(df.drop([self.className], axis=1),
df[self.className], train_size=0.75, test_size=0.25)
return trainX, testX, trainY, testY
def _getDatasetFromTwoFiles(self):
trainingDf = pd.read_csv('train.csv') # Edit: Your training dataset
testDf = pd.read_csv('test.csv') # Edit: Your test dataset
trainX = trainingDf.drop([self.className], axis=1)
trainY = trainingDf[self.className]
testX = testDf.drop([self.className], axis=1)
testY = testDf[self.className]
return trainX, testX, trainY, testY
def _cleanDatasets(self, trainX, testX, trainY, testY):
self.cleanTrainX, self.cleanTrainY = self.neatData.cleanTrainingDataset(trainX, trainY, self.indexColumns, self.iWillManuallyCleanColumns)
self.cleanTestX = self.neatData.cleanTestDataset(testX)
self.cleanTestY = self.neatData.convertYToNumbersForModeling(testY)
def _modelFit(self):
\"\"\")
showNextLines = False
with open('modelpipeline.py', 'a') as fileOut:
with open('tpot_pipeline.py', 'r') as fileIn:
for line in fileIn:
if line.startswith("# Score"):
showNextLines = True
elif showNextLines and not line.startswith("exported_pipeline.fit") and not line.startswith("results"):
fileOut.write(" " + line)
with open('modelpipeline.py', 'a') as fileOut:
fileOut.write(\"\"\" self.exported_pipeline = exported_pipeline
self.exported_pipeline.fit(self.cleanTrainX, self.cleanTrainY)
self.results = self.exported_pipeline.predict(self.cleanTestX)
def _printModelScores(self):
print("Confusion Matrix:")
print(confusion_matrix(self.cleanTestY, self.results))
print(accuracy_score(self.cleanTestY, self.results))
def _saveObjectsToDisk(self):
def save_object(obj, filename):
with open(filename, 'wb') as output:
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
save_object(self.exported_pipeline, 'exportedPipeline.pkl')
save_object(self.neatData, 'NeatData.pkl')
def _createTrainedModelPipelineFile(self):
with open('trainedmodelpipeline.py', 'w') as fileOut:
fileOut.write(\\\"\\\"\\\"
import pandas as pd
import pickle
class TrainedModelPipeline:
def __init__(self):
self.exportedPipeline = None
self.neatData = None
self.testX = None
self.cleanTestX = None
self.results = None
self.resultsDf = None
def execute(self):
self._loadObjects()
self._getDataset()
self._cleanDataset()
self._predict()
self._concatenatePredictionsToDataframe()
self._saveResultsAsCSV()
print("Done. Created results.csv")
def _loadObjects(self):
with open('exportedPipeline.pkl', 'rb') as input:
self.exportedPipeline = pickle.load(input)
with open('NeatData.pkl', 'rb') as input:
self.neatData = pickle.load(input)
def _getDataset(self):
self.testX = pd.read_csv('test.csv') # Edit: Your dataset
# self.testX = pd.read_csv('test.csv', header=None)
def _cleanDataset(self):
self.cleanTestX = self.neatData.cleanTestDataset(self.testX)
def _predict(self):
self.results = self.exportedPipeline.predict(self.cleanTestX)
self.results = self.neatData.convertYToStringsOrNumbersForPresentation(self.results)
def _concatenatePredictionsToDataframe(self):
self.resultsDf = pd.DataFrame(self.results)
self.resultsDf = pd.concat([self.testX, self.resultsDf], axis=1)
def _saveResultsAsCSV(self):
self.resultsDf.to_csv('./results.csv')
trainedModelPipeline = TrainedModelPipeline()
trainedModelPipeline.execute()
\\\"\\\"\\\")
modelPipeline = ModelPipeline()
modelPipeline.execute()
\"\"\")
print("Done creating modelpipeline.py")
"""
nb['cells'] = [nbf.v4.new_markdown_cell(header1),
nbf.v4.new_code_cell(code1),
nbf.v4.new_markdown_cell(header2),
nbf.v4.new_code_cell(code2),
nbf.v4.new_markdown_cell(header3),
nbf.v4.new_code_cell(code3),
nbf.v4.new_markdown_cell(header4),
nbf.v4.new_code_cell(code4),
nbf.v4.new_markdown_cell(header5),
nbf.v4.new_code_cell(code5),
nbf.v4.new_markdown_cell(header6),
nbf.v4.new_code_cell(code6) ]
fname = '{}.ipynb'.format(PROJECT_PATH + PROJECT_NAME.capitalize() + "_Neatbook")
if not os.path.isfile(fname):
with open(fname, 'w') as f:
nbf.write(nb, f)
| 3.0625
| 3
|
training/my_models.py
|
bu-cisl/Illumination-Coding-Meets-Uncertainty-Learning
| 15
|
12774686
|
<reponame>bu-cisl/Illumination-Coding-Meets-Uncertainty-Learning
from __future__ import print_function
import keras
from keras.layers import AveragePooling2D, Lambda
import keras.backend as K
from keras.layers import Input, MaxPooling2D, UpSampling2D, Dropout, Conv2D, Concatenate, Activation, Cropping2D, \
Flatten, Dense, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.merge import add
from keras.layers.normalization import BatchNormalization
from keras.models import Model, Sequential
from keras.optimizers import Adam
from keras.regularizers import l2, l1
from keras.activations import sigmoid, relu
img_rows = 512
img_cols = 512
save_path = 'save/'
num_epochs = 1
save_period = 10
show_groundtruth_flag = False
def _bn_relu(input):
norm = BatchNormalization(axis=-1)(input)
return Activation("relu")(norm)
def _bn_relu_conv(**conv_params):
"""Helper to build a BN -> relu -> conv block.
This is an improved scheme proposed in http://arxiv.org/pdf/1603.05027v2.pdf
"""
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
activation = _bn_relu(input)
return Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)(activation)
return f
def _double_bn_relu_conv(**conv_params):
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
after_first_bn_relu_conv = _bn_relu_conv(filters=filters, kernel_size=kernel_size,
strides=strides, kernel_initializer=kernel_initializer,
padding=padding, kernel_regularizer=kernel_regularizer)(input)
return _bn_relu_conv(filters=filters, kernel_size=kernel_size,
strides=strides, kernel_initializer=kernel_initializer,
padding=padding, kernel_regularizer=kernel_regularizer)(after_first_bn_relu_conv)
return f
def res_block(**conv_params):
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
after_double_bn_relu_conv = _double_bn_relu_conv(filters=filters, kernel_size=kernel_size,
strides=strides, kernel_initializer=kernel_initializer,
padding=padding, kernel_regularizer=kernel_regularizer)(input)
return add([input, after_double_bn_relu_conv])
return f
def conv_factory(x, concat_axis, nb_filter,
dropout_rate=None, weight_decay=1E-4):
"""Apply BatchNorm, Relu 3x3Conv2D, optional dropout
:param x: Input keras network
:param concat_axis: int -- index of contatenate axis
:param nb_filter: int -- number of filters
:param dropout_rate: int -- dropout rate
:param weight_decay: int -- weight decay factor
:returns: keras network with b_norm, relu and Conv2D added
:rtype: keras network
"""
x = BatchNormalization(axis=concat_axis,
gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter, (3, 3),
kernel_initializer="he_uniform",
padding="same",
kernel_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
def conv_factory_DO(x, concat_axis, nb_filter,
dropout_rate=None, weight_decay=1E-4):
"""Apply BatchNorm, Relu 3x3Conv2D, optional dropout
:param x: Input keras network
:param concat_axis: int -- index of contatenate axis
:param nb_filter: int -- number of filters
:param dropout_rate: int -- dropout rate
:param weight_decay: int -- weight decay factor
:returns: keras network with b_norm, relu and Conv2D added
:rtype: keras network
"""
x = BatchNormalization(axis=concat_axis,
gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
x = Activation('relu')(x)
x = Conv2D(nb_filter, (3, 3),
kernel_initializer="he_uniform",
padding="same",
kernel_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x, training=True)
return x
def conv_factory_leaky(x, concat_axis, nb_filter,
dropout_rate=None, weight_decay=1E-4):
"""Apply BatchNorm, Relu 3x3Conv2D, optional dropout
:param x: Input keras network
:param concat_axis: int -- index of contatenate axis
:param nb_filter: int -- number of filters
:param dropout_rate: int -- dropout rate
:param weight_decay: int -- weight decay factor
:returns: keras network with b_norm, relu and Conv2D added
:rtype: keras network
"""
x = BatchNormalization(axis=concat_axis,
gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
x = LeakyReLU(0.2)(x)
x = Conv2D(nb_filter, (3, 3),
kernel_initializer="he_uniform",
padding="same",
kernel_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
def denseblock(x, concat_axis, nb_layers, growth_rate,
dropout_rate=None, weight_decay=1E-4):
"""Build a denseblock where the output of each
conv_factory is fed to subsequent ones
:param x: keras model
:param concat_axis: int -- index of contatenate axis
:param nb_layers: int -- the number of layers of conv_
factory to append to the model.
:param nb_filter: int -- number of filters
:param dropout_rate: int -- dropout rate
:param weight_decay: int -- weight decay factor
:returns: keras model with nb_layers of conv_factory appended
:rtype: keras model
"""
list_feat = [x]
for i in range(nb_layers):
x = conv_factory(x, concat_axis, growth_rate,
dropout_rate, weight_decay)
list_feat.append(x)
x = Concatenate(axis=concat_axis)(list_feat)
return x
def denseblock_DO(x, concat_axis, nb_layers, growth_rate,
dropout_rate=None, weight_decay=1E-4):
"""Build a denseblock where the output of each
conv_factory is fed to subsequent ones
:param x: keras model
:param concat_axis: int -- index of contatenate axis
:param nb_layers: int -- the number of layers of conv_
factory to append to the model.
:param nb_filter: int -- number of filters
:param dropout_rate: int -- dropout rate
:param weight_decay: int -- weight decay factor
:returns: keras model with nb_layers of conv_factory appended
:rtype: keras model
"""
list_feat = [x]
for i in range(nb_layers):
x = conv_factory_DO(x, concat_axis, growth_rate,
dropout_rate, weight_decay)
list_feat.append(x)
x = Concatenate(axis=concat_axis)(list_feat)
return x
def denseblock_leaky(x, concat_axis, nb_layers, growth_rate,
dropout_rate=None, weight_decay=1E-4):
"""Build a denseblock where the output of each
conv_factory is fed to subsequent ones
:param x: keras model
:param concat_axis: int -- index of contatenate axis
:param nb_layers: int -- the number of layers of conv_
factory to append to the model.
:param nb_filter: int -- number of filters
:param dropout_rate: int -- dropout rate
:param weight_decay: int -- weight decay factor
:returns: keras model with nb_layers of conv_factory appended
:rtype: keras model
"""
list_feat = [x]
for i in range(nb_layers):
x = conv_factory_leaky(x, concat_axis, growth_rate,
dropout_rate, weight_decay)
list_feat.append(x)
x = Concatenate(axis=concat_axis)(list_feat)
return x
def discriminator_96(input_shape):
img_shape = input_shape
model = Sequential()
model.add(Conv2D(64, kernel_size=5, strides=2, input_shape=img_shape, padding='valid',
kernel_initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.02, seed=None)))
model.add(LeakyReLU(alpha=0.2))
model.add(Conv2D(64, kernel_size=5, strides=2, padding="valid",
kernel_initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.02, seed=None)))
model.add(BatchNormalization(momentum=0.99))
model.add(LeakyReLU(alpha=0.2))
model.add(Flatten())
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(rate=0.4))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(rate=0.4))
model.add(Dense(1, activation='sigmoid'))
model.summary()
img = Input(shape=img_shape)
validity = model(img)
return Model(img, validity)
def get_model_sigmoid_2out(input_shape, l2_weight_decay):
regularizer_func = l2(l2_weight_decay)
inputs = Input(input_shape)
print("inputs shape:", inputs.shape)
conv1 = Conv2D(64, 3, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(inputs)
print("conv1 shape:", conv1.shape)
# conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
# print("conv1 shape:", conv1.shape)
# res1 = res_block(filters=64, kernel_size=3)(conv1)
# print("res1 shape:", res1.shape)
db1 = denseblock(x=conv1, concat_axis=3, nb_layers=3, growth_rate=16, dropout_rate=0.5,
weight_decay=l2_weight_decay)
print("db1 shape:", db1.shape)
pool1 = MaxPooling2D(pool_size=(2, 2))(db1)
print("pool1 shape:", pool1.shape)
conv2 = Conv2D(128, 3, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(pool1)
print("conv2 shape:", conv2.shape)
# conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
# print("conv2 shape:", conv2.shape)
# res2 = res_block(filters=128, kernel_size=3)(conv2)
# print("res2 shape:", res2.shape)
db2 = denseblock(x=conv2, concat_axis=3, nb_layers=3, growth_rate=16, dropout_rate=0.5,
weight_decay=l2_weight_decay)
print("db2 shape:", db2.shape)
pool2 = MaxPooling2D(pool_size=(2, 2))(db2)
print("pool2 shape:", pool2.shape)
conv3 = Conv2D(256, 3, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(pool2)
print("conv3 shape:", conv3.shape)
# conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
# print("conv3 shape:", conv3.shape)
# res3 = res_block(filters=256,kernel_size=3)(conv3)
# print("res3 shape:", res3.shape)
db3 = denseblock(x=conv3, concat_axis=3, nb_layers=3, growth_rate=16, dropout_rate=0.5,
weight_decay=l2_weight_decay)
print("db3 shape:", db3.shape)
pool3 = MaxPooling2D(pool_size=(2, 2))(db3)
print("pool3 shape:", pool3.shape)
conv4 = Conv2D(512, 3, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(pool3)
print("conv4 shape:", conv4.shape)
# conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
# print("conv4 shape:", conv4.shape)
# res4 = res_block(filters=512, kernel_size=3)(conv4)
# print("res4 shape:", res4.shape)
db4 = denseblock(x=conv4, concat_axis=3, nb_layers=3, growth_rate=16, dropout_rate=0.5,
weight_decay=l2_weight_decay)
print("db4 shape:", db4.shape)
drop4 = Dropout(0.5)(db4)
print("drop4 shape:", drop4.shape)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
print("pool4 shape:", pool4.shape)
conv5 = Conv2D(1024, 3, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(pool4)
print("conv5 shape:", conv5.shape)
# conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
# print("conv5 shape:", conv5.shape)
# res5 = res_block(filters=1024,kernel_size=3)(conv5)
# print("res5 shape:", res5.shape)
db5 = denseblock(x=conv5, concat_axis=3, nb_layers=3, growth_rate=16, dropout_rate=0.5,
weight_decay=l2_weight_decay)
print("db5 shape:", db5.shape)
drop5 = Dropout(0.5)(db5)
print("drop5 shape:", drop5.shape)
up6 = Conv2D(512, 2, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(
UpSampling2D(size=(2, 2))(drop5))
print("up6 shape:", up6.shape)
merge6 = Concatenate(axis=3)([drop4, up6])
print("merge6 shape:", merge6.shape)
conv6 = Conv2D(512, 3, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(merge6)
print("conv6 shape:", conv6.shape)
# conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)
# print("conv6 shape:", conv6.shape)
# res5 = res_block(filters=512, kernel_size=3)(conv6)
# print("res5 shape:", res5.shape)
db6 = denseblock(x=conv6, concat_axis=3, nb_layers=3, growth_rate=16, dropout_rate=0.5,
weight_decay=l2_weight_decay)
print("db6 shape:", db6.shape)
up7 = Conv2D(256, 2, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(
UpSampling2D(size=(2, 2))(db6))
print("up7 shape:", up7.shape)
merge7 = Concatenate(axis=3)([db3, up7])
print("merge7 shape:", merge7.shape)
conv7 = Conv2D(256, 3, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(merge7)
print("conv7 shape:", conv7.shape)
# conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
# print("conv7 shape:", conv7.shape)
# res6 = res_block(filters=256, kernel_size=3)(conv7)
# print("res6 shape:", res6.shape)
db7 = denseblock(x=conv7, concat_axis=3, nb_layers=3, growth_rate=16, dropout_rate=0.5,
weight_decay=l2_weight_decay)
print("db7 shape:", db7.shape)
up8 = Conv2D(128, 2, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(
UpSampling2D(size=(2, 2))(db7))
print("up8 shape:", up8.shape)
merge8 = Concatenate(axis=3)([db2, up8])
print("merge8 shape:", merge8.shape)
conv8 = Conv2D(128, 3, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(merge8)
print("conv8 shape:", conv8.shape)
# conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)
# print("conv8 shape:", conv8.shape)
# res7 = res_block(filters=128, kernel_size=3)(conv8)
# print("res7 shape:", res7.shape)
db8 = denseblock(x=conv8, concat_axis=3, nb_layers=3, growth_rate=16, dropout_rate=0.5,
weight_decay=l2_weight_decay)
print("db8 shape:", db8.shape)
up9 = Conv2D(64, 2, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(
UpSampling2D(size=(2, 2))(db8))
print("up9 shape:", up9.shape)
merge9 = Concatenate(axis=3)([db1, up9]) ##res1 up9
print("merge9 shape:", merge9.shape)
conv9 = Conv2D(64, 3, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(merge9)
print("conv9 shape:", conv9.shape)
# conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
# print("conv9 shape:", conv9.shape)
# res8 = res_block(filters=64, kernel_size=3)(conv9)
# print("res8 shape:", res8.shape)
db9 = denseblock(x=conv9, concat_axis=3, nb_layers=3, growth_rate=16, dropout_rate=0.5,
weight_decay=l2_weight_decay)
print("db9 shape:", db9.shape)
conv10 = Conv2D(16, 3, activation=LeakyReLU(0.2), padding='same', kernel_initializer='he_normal',
kernel_regularizer=regularizer_func)(db9)
print("conv10 shape:", conv9.shape)
conv11 = Conv2D(2, 1, activation='sigmoid', kernel_regularizer=regularizer_func)(conv10)
print("conv11 shape:", conv11.shape)
model = Model(inputs=inputs, outputs=conv11)
return model
| 2.78125
| 3
|
settings.py
|
felix19350/Nature-Trails
| 0
|
12774687
|
<reponame>felix19350/Nature-Trails
from djangoappengine.settings_base import *
import os
TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), '/templates/default/'),)
| 1.46875
| 1
|
tests/sibling_classes.py
|
mnicolas94/pyrulo
| 0
|
12774688
|
<filename>tests/sibling_classes.py
class Sibling:
pass
| 0.785156
| 1
|
src/App/tests/test_class_init.py
|
tseaver/Zope-RFA
| 2
|
12774689
|
<filename>src/App/tests/test_class_init.py<gh_stars>1-10
##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Tests class initialization.
"""
def test_InitializeClass():
"""Test that InitializeClass (default__class_init__)
works in specific corner cases.
Check when the class has an ExtensionClass as attribute.
>>> import ExtensionClass
>>> from AccessControl.class_init import InitializeClass
>>> class AnotherClass(ExtensionClass.Base):
... _need__name__ = 1
>>> class C:
... foo = AnotherClass
>>> InitializeClass(C)
"""
from doctest import DocTestSuite
import unittest
def test_suite():
return unittest.TestSuite((
DocTestSuite(),
))
| 2.359375
| 2
|
model_compiler/src/model_compiler/tensorflow_util.py
|
yuanliya/Adlik
| 548
|
12774690
|
# Copyright 2019 ZTE corporation. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from typing import Any, Mapping, NamedTuple, Optional, Sequence
from itertools import zip_longest
from . import utilities
from .models.data_format import DataFormat
def get_tensor_by_fuzzy_name(graph, name):
if ':' in name:
tensor = graph.get_tensor_by_name(name)
else:
tensor = graph.get_operation_by_name(name).outputs[0]
return tensor
class Config(NamedTuple):
input_names: Optional[Sequence[str]]
data_formats: Sequence[Optional[DataFormat]]
output_names: Optional[Sequence[str]]
@staticmethod
def from_json(value: Mapping[str, Any]) -> 'Config':
return Config(input_names=value.get('input_names'),
data_formats=utilities.get_data_formats(value.get('input_formats')),
output_names=value.get('output_names'))
@staticmethod
def from_env(env: Mapping[str, str]) -> 'Config':
return Config(input_names=utilities.split_by(env.get('INPUT_NAMES'), ','),
data_formats=utilities.get_data_formats(utilities.split_by(env.get('INPUT_FORMATS'), ',')),
output_names=utilities.split_by(env.get('OUTPUT_NAMES'), ','))
def get_input_tensors_from_graph(self, graph):
if self.input_names is None:
input_tensors = [operation.outputs[0]
for operation in graph.get_operations()
if operation.type == 'Placeholder']
else:
input_tensors = [get_tensor_by_fuzzy_name(graph, name) for name in self.input_names]
return input_tensors
def get_output_tensors_from_graph(self, graph):
if self.output_names is None:
output_tensors = [output_tensor for operation in graph.get_operations()
if operation.type not in
['Assign', 'Const', 'Identity', 'IsVariableInitialized', 'NoOp', 'Placeholder', 'SaveV2',
'VarIsInitializedOp']
for output_tensor in operation.outputs
if not output_tensor.consumers()]
else:
output_tensors = [get_tensor_by_fuzzy_name(graph, name) for name in self.output_names]
return output_tensors
def get_inputs(graph, config):
return zip_longest(config.get_input_tensors_from_graph(graph), config.data_formats)
| 2.078125
| 2
|
wsgi.py
|
Ajuajmal/heroku
| 0
|
12774691
|
<filename>wsgi.py
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bootcamp.settings")
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| 1.515625
| 2
|
admin_demo_scripts/delete_subscriptions_of_departed_users.py
|
MicroStrategy/mstrio-py
| 60
|
12774692
|
from mstrio.users_and_groups import list_users
from mstrio.api.projects import get_projects
from mstrio.distribution_services.subscription.subscription_manager import SubscriptionManager
from mstrio.connection import Connection
def delete_subscriptions_of_departed_users(connection: "Connection") -> None:
"""Delete all subscription in all projects which owners are departed users.
Args:
Args:
connection: MicroStrategy connection object returned by
`connection.Connection()`
"""
# get all projects that the authenticated user has access to
response = get_projects(connection, whitelist=[('ERR014', 403)])
prjcts = response.json() if response.ok else []
# get all disabled users
all_usrs = list_users(connection=connection)
dsbld_usrs = [u for u in all_usrs if not u.enabled]
for prjct in prjcts:
project_id = prjct['id']
sub_mngr = SubscriptionManager(connection=connection, project_id=project_id)
for usr in dsbld_usrs:
subs = sub_mngr.list_subscriptions(owner={'id': usr.id})
msg = f"subscriptions of user with ID: {usr.id}"
msg += f" in project {prjct.name} with ID: {prjct.id}"
# call of the function below returns True if all passed
# subscriptions were deleted
if sub_mngr.delete(subscriptions=subs, force=True):
print("All " + msg + " were deleted.")
else:
print("Not all " + msg + " were deleted or there was no subsscriptions.")
| 2.453125
| 2
|
setup.py
|
raulguajardo/PacaPy
| 0
|
12774693
|
<reponame>raulguajardo/PacaPy
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="PacaPy-raul-guajardo",
version="0.0.1",
author="<NAME>",
author_email="<EMAIL>",
description="A package designed as a wrapper over Alpaca API for my general use.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/raulguajardo/PacaPy",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
)
| 1.46875
| 1
|
backend/tests/run.py
|
Itisfilipe/feature-requets-flask-app
| 2
|
12774694
|
from coverage import coverage
import unittest
cov = coverage(branch=True, include=['app/*'])
cov.set_option('report:show_missing', True)
cov.erase()
cov.start()
from .client_test import ClientTestCase
from .features_test import FeatureTestCase
from .product_area_test import ProductAreaTestCase
if __name__ == '__main__':
tests = unittest.TestLoader().discover('./tests', pattern='*test.py')
unittest.TextTestRunner(verbosity=1).run(tests)
cov.stop()
cov.save()
print("\n\nCoverage Report:\n")
cov.report()
| 2.34375
| 2
|
web/log.py
|
BennyJane/career-planning-info
| 1
|
12774695
|
# -*- coding: utf-8 -*-
# @Time : 2020/9/26
# @Author : <NAME>
# @Email : 暂无
# @File : command.py
# @Project : Flask-Demo
import os
import logging
from logging.handlers import RotatingFileHandler
from flask import request
basedir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_name = os.path.split(os.path.dirname(__file__))[1]
def register_logging(app):
class RequestFormatter(logging.Formatter):
# 通过继承,修改打印信息: 报错的url 与 远程地址
def format(self, record):
record.url = request.url
record.remote_addr = request.remote_addr
return super(RequestFormatter, self).format(record)
request_formatter = RequestFormatter(
'[%(asctime)s] %(remote_addr)s requested %(url)s\n'
'%(levelname)s in %(module)s: %(message)s'
)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
log_path = os.path.join(basedir, f'logs/{project_name}')
if not os.path.exists(log_path):
os.mkdir(log_path)
file_handler = RotatingFileHandler("{}/career_plan.log".format(log_path),
maxBytes=10 * 1024 * 1024, backupCount=10)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.INFO)
# 需要设置整个日志的等级,开发调试模式下,默认为debug; 没有设置会导致无法输出日志
app.logger.setLevel(logging.INFO)
if not app.debug:
# 生产模式下,需要设置合适等级
# app.logger.setLevel(logging.ERROR)
app.logger.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
| 2.6875
| 3
|
todo/main/helpers.py
|
Romansth/todo
| 8
|
12774696
|
import csv
from django.http import HttpResponse
class ExportCsvMixin:
def export_as_csv(self, request, queryset):
meta = self.model._meta
field_names = [field.name for field in meta.fields]
response = HttpResponse(content_type="text/csv")
response["Content-Disposition"] = "attachment; filename={}.csv".format(meta)
writer = csv.writer(response)
writer.writerow(field_names)
# for obj in queryset:
# row = writer.writerow([getattr(obj, field) for field in field_names])
return response
export_as_csv.short_description = "Export to csv"
def all_complete(self, request, queryset):
self.model.objects.all().update(completed=True)
self.message_user(request, "All task are set as completed now")
def all_not_complete(self, request, queryset):
self.model.objects.all().update(completed=False)
self.message_user(request, "All task are set as uncompleted now")
| 2.515625
| 3
|
autoit_ripper/utils.py
|
nazywam/AutoIt-Ripper
| 112
|
12774697
|
from datetime import datetime, timezone
from itertools import cycle
from .lame import LAME
from .mt import MT
def filetime_to_dt(timestamp: int) -> datetime:
return datetime.fromtimestamp(timestamp // 100000000, timezone.utc)
def bytes_to_bitstring(data: bytes) -> str:
return "".join(bin(x)[2:].zfill(8) for x in data)
class BitStream:
def __init__(self, data: bytes) -> None:
self.data = bytes_to_bitstring(data)
def get_bits(self, num: int) -> int:
out = int(self.data[:num], 2)
self.data = self.data[num:]
return out
def xor(data: bytes, key: bytes) -> bytes:
return bytes(a ^ b for a, b in zip(data, cycle(key)))
def decrypt_lame(data: bytes, seed: int) -> bytes:
lame = LAME()
lame.srand(seed)
return bytes([x ^ lame.get_next() for x in data])
def decrypt_mt(data: bytes, seed: int) -> bytes:
key = MT(seed).get_bytes(len(data))
return xor(data, key)
def crc_data(data: bytes) -> int:
if len(data) == 0:
return 0
dwKey_ECX = 0
dwKey_ESI = 1
for b in data:
dwKey_ESI = (b + dwKey_ESI) % 0xFFF1
dwKey_ECX = (dwKey_ECX + dwKey_ESI) % 0xFFF1
return (dwKey_ECX << 0x10) + dwKey_ESI
| 2.828125
| 3
|
meta/bin/merge_rs.py
|
bioinformatics-lab/h3agwas
| 0
|
12774698
|
#!/usr/bin/env python3
import sys
import os
import argparse
def parseArguments():
parser = argparse.ArgumentParser(description='transform file and header')
parser.add_argument("--list_file", help="", type=str,required=True)
parser.add_argument('--use_rs',type=str,help="if need to be limited at some rs", default=0)
parser.add_argument("--out", help="output format ldsc, default none", type=str,required=True)
args = parser.parse_args()
return args
args=parseArguments()
splfile=args.list_file.split(',')
DicByRs={}
listRs=list([])
listChrBp={}
rsissue=''
listrsissue=list([])
listchrissue=list([])
for File in splfile :
print(File)
Fread=open(File)
FreadL=Fread.readline().split()
Fread.close()
Fread=open(File)
if len(FreadL)==3 :
for line in Fread :
splt=line.replace('\n', '').split()
if splt[0] not in listRs :
DicByRs[splt[0]]=[None,None,splt[1],splt[2],None]
else :
RsInfo=DirRes[splt[0]]
##
print(RsInfo)
balisegood= (splt[1]==RsInfo[2] and splt[2]==RsInfo[3]) or (splt[1]==RsInfo[3] and splt[2]==RsInfo[2])
if balisegood ==False:
listrsissue.add(splt[1])
elif len(FreadL)==6:
# writenew.write('rsID\tChro\tPos\tA1\tA2\tnewRs\n')
for line in Fread :
splt=line.replace('\n', '').split()
NewRs=splt[5]
if splt[0] not in listRs :
DicByRs[splt[0]]=[splt[1],splt[2],splt[3],splt[4], splt[5]]
else :
balisegood= (splt[1]==RsInfo[2] and splt[2]==RsInfo[3]) or (splt[1]==RsInfo[3] and splt[2]==RsInfo[2])
RsInfo=DirRes[splt[0]]
if balisegood ==False:
listrsissue.add(splt[1])
listchrissue.add()
# check pos and chr
if RsInfo[0] :
if RsInfo[0] != splt[1] and RsInfo[1] != splt[2] :
listrsissue.add(splt[0])
else :
RsInfo[0]=splt[1]
RsInfo[1]=splt[2]
RsInfo[4]=splt[5]
else :
print("colomn error number :"+str(len(FreadL)))
sys.exit(3)
writeRs=open(args.out, 'w')
writeRs2=open(args.out+'_allinfo', 'w')
for rs in DicByRs:
RsInfo=DicByRs[rs]
if rs not in listrsissue :
if args.use_rs==1 :
writeRs.write(rs+'\t'+RsInfo[3]+'\t'+RsInfo[4]+'\n')
else :
writeRs.write(rs+'\t'+'\t'.join(RsInfo)+'\n')
writeRs2.write(rs+'\t'+'\t'.join(RsInfo)+'\n')
writeRsError=open(args.out+'_issue', 'w')
for rs in listrsissue :
RsInfo=DicByRs[rs]
writeRs.write(rs+'\t'+'\t'.join(RsInfo)+'\n')
| 2.9375
| 3
|
data/GTSDB/ImageSets/train_test_split.py
|
aivsol/aivsol-TFFRCNN
| 1
|
12774699
|
import sys
f = open('all.txt')
data = {}
for l in f:
t = l.split(";")[0]
if t in data:
data[t] += [l]
else:
data[t] = [l]
f.close()
train = dict(data.items()[0:380])
test = dict(data.items()[380:506])
'''
print ('\n'*10)
for k in sorted(train):
print k, ':', train[k]
print ('\n'*10)
for k in sorted(test):
print k, ':', test[k]
'''
# Sanity Check
for k in test:
if k in train:
print 'Not a valid train/test split'
sys.exit()
print 'Success'
f = open("train.txt", "w")
for k in sorted(train):
for l in train[k]:
f.write(l)
f.close()
f = open('test.txt', "w")
for k in sorted(test):
f.write(k.split(".")[0] + "\n")
f.close()
| 2.6875
| 3
|
src/schmetterling/log/log.py
|
bjuvensjo/schmetterling
| 0
|
12774700
|
from schmetterling.core.log import log_config, log_params_return
from schmetterling.log.state import LogState
@log_params_return('info')
def execute(state, log_dir, name, level):
log_handlers = log_config(log_dir, name, level)
return LogState(__name__, log_handlers['file_handler'].baseFilename)
| 2.03125
| 2
|
src/training/tensorflow/convert_tfjs.py
|
klawr/deepmech
| 1
|
12774701
|
import tensorflowjs as tfjs
import tensorflow as tf
model = tf.keras.models.load_model("model.h5")
tfjs.converters.save_keras_model(model, "tfjs")
| 1.875
| 2
|
python_api/tests/test_settings.py
|
berkerdemoglu/My3DEngine
| 1
|
12774702
|
import unittest
from src.api import Settings
class SettingsTestCase(unittest.TestCase):
"""Tests the Settings class."""
def setUp(self):
self.settings = Settings(800, 600, 60, "3D Engine", use_antialiasing=False)
def test_keyword_arguments(self):
"""Check that the keyword arguments are being parsed correctly."""
self.assertTrue(hasattr(self.settings, 'use_antialiasing'))
def test_as_dict(self):
"""Check that the as_dict() method is working correctly."""
self.assertEqual(self.settings.as_dict(), self.settings.__dict__)
def tearDown(self):
del self.settings
if __name__ == '__main__':
unittest.main()
| 3.296875
| 3
|
proficiencies.py
|
DennisMerkus/Aether
| 0
|
12774703
|
# Describing possession
# Describing things by color
# Describing kinship
# Describing movement to/from
# Describing locations
# Greetings and farewells
# Face-changing speech (Thanking, apologizing)
# Asking questions about where, what, how, when, who, etc
# Describing tastes
# A set of words/skills/structures that are known
# A set of words/skills/structures that are new
# A set of words/skills/structures that are next in the 'skill tree'
# Generate sentences/scenarios that use the known words, include some 'to study' words, and ignore as much as possible other new words.
| 1.640625
| 2
|
tools/exercises_LoadDataset.py
|
vicyangworld/WaterDispenserEye
| 0
|
12774704
|
# -*- coding: utf-8 -*-
import os
import sys
import numpy as np
IMAGE_SIZE = 64
#按照指定图像大小调整尺寸
def resize_image(image, height = IMAGE_SIZE, width = IMAGE_SIZE):
top, bottom, left, right = (0, 0, 0, 0)
#获取图像尺寸
h, w, _ = image.shape
#对于长宽不相等的图片,找到最长的一边
longest_edge = max(h, w)
#计算短边需要增加多上像素宽度使其与长边等长
if h < longest_edge:
dh = longest_edge - h
top = dh // 2
bottom = dh - top
elif w < longest_edge:
dw = longest_edge - w
left = dw // 2
right = dw - left
else:
pass
BLACK = [0, 0, 0]
#给图像增加边界,是图片长、宽等长,cv2.BORDER_CONSTANT指定边界颜色由value指定
constant = cv2.copyMakeBorder(image, top , bottom, left, right, cv2.BORDER_CONSTANT, value = BLACK)
#调整图像大小并返回
return cv2.resize(constant, (height, width))
#读取训练数据
images = []
labels = []
def read_images(path_name):
for dir_item in os.listdir(path_name):
full_path = os.path.abspath(os.path.join(path_name, dir_item))
if os.path.isdir(full_path):
read_images(full_path)
else:
if dir_item.endswith('.jpg'):
print(full_path)
image = cv2.imread(full_path)
image = resize_image(image, IMAGE_SIZE, IMAGE_SIZE)
images.append(image)
labels.append(path_name)
return images,labels
#从指定路径读取训练数据
def load_dataset(path_name):
images,labels = read_images(path_name)
#将输入的所有图片转成四维数组,尺寸为(图片数量*IMAGE_SIZE*IMAGE_SIZE*3)
#图片为64 * 64像素,一个像素3个颜色值(RGB)
images = np.array(images)
labels = np.array([0 if label.endswith('yangwk') else 1 for label in labels])
return images, labels
if __name__ == '__main__':
path_name = './data/'
images, labels = load_dataset(path_name)
print(images.shape)
print(labels.shape)
| 3.125
| 3
|
code/chapter_6_cnn/mcts_go_cnn.py
|
hirasaki1985/hirasar_go
| 1
|
12774705
|
<reponame>hirasaki1985/hirasar_go
# -*- coding: utf-8 -*-
from __future__ import print_function
# tag::mcts_go_cnn_preprocessing[]
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
np.random.seed(123)
X = np.load('../generated_games/features-200.npy')
Y = np.load('../generated_games/labels-200.npy')
samples = X.shape[0]
size = 9
input_shape = (size, size, 1)
X = X.reshape(samples, size, size, 1)
train_samples = 10000
X_train, X_test = X[:train_samples], X[train_samples:]
Y_train, Y_test = Y[:train_samples], Y[train_samples:]
# end::mcts_go_cnn_preprocessing[]
# tag::mcts_go_cnn_model[]
model = Sequential()
"""
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
"""
# filter = 48
# 3×3の畳み込みカーネルを選択する
# 通常、畳み込みの出力は入力よりも小さくなる。
# padding = 'same'を追加することで、Kerasに行列をエッジの周りに0で埋めるようにできるため、出力は入力と同じ次元を持つようになる。
model.add(Conv2D(filters=48, # <1>
kernel_size=(3, 3), # <2>
activation='sigmoid',
padding='same',
input_shape=input_shape))
model.add(Dropout(rate=0.6))
model.add(Conv2D(64, (3, 3), activation='relu'))
# 最大プーリング
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.6))
# 平坦化
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(rate=0.6))
# ソフトマックス
model.add(Dense(size * size, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# end::mcts_go_cnn_model[]
# tag::mcts_go_cnn_eval[]
model.fit(X_train, Y_train,
batch_size=64,
epochs=5,
verbose=1,
validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# end::mcts_go_cnn_eval[]
| 3.109375
| 3
|
lisa/sut_orchestrator/qemu/console_logger.py
|
srveniga/lisa
| 0
|
12774706
|
<reponame>srveniga/lisa<gh_stars>0
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from threading import Event
from typing import IO, Any, Optional, Union
import libvirt # type: ignore
from . import libvirt_events_thread
# Reads serial console log from libvirt VM and writes it to a file.
class QemuConsoleLogger:
def __init__(self) -> None:
self._stream_completed = Event()
self._console_stream: Optional[libvirt.virStream] = None
self._console_stream_callback_started = False
self._console_stream_callback_added = False
self._log_file: Optional[IO[Any]] = None
# Attach logger to a libvirt VM.
def attach(
self,
qemu_conn: libvirt.virConnect,
domain: libvirt.virDomain,
log_file_path: str,
) -> None:
# Open the log file.
self._log_file = open(log_file_path, "ab")
# Open the libvirt console stream.
console_stream = qemu_conn.newStream(libvirt.VIR_STREAM_NONBLOCK)
domain.openConsole(
None,
console_stream,
libvirt.VIR_DOMAIN_CONSOLE_FORCE | libvirt.VIR_DOMAIN_CONSOLE_SAFE,
)
self._console_stream = console_stream
libvirt_events_thread.run_callback(self._register_console_callbacks)
self._console_stream_callback_started = True
# Close the logger.
def close(self) -> None:
# Check if attach() run successfully.
if self._console_stream_callback_started:
# Close the stream on libvirt callbacks thread.
libvirt_events_thread.run_callback(self._close_stream, True)
self._stream_completed.wait()
else:
if self._console_stream:
self._console_stream.abort()
if self._log_file:
self._log_file.close()
# Wait until the stream closes.
# Typically used when gracefully shutting down a VM.
def wait_for_close(self) -> None:
if self._console_stream_callback_started:
self._stream_completed.wait()
# Register the console stream events.
# Threading: Must only be called on libvirt events thread.
def _register_console_callbacks(self) -> None:
# Attach callback for stream events.
assert self._console_stream
self._console_stream.eventAddCallback(
libvirt.VIR_STREAM_EVENT_READABLE
| libvirt.VIR_STREAM_EVENT_ERROR
| libvirt.VIR_STREAM_EVENT_HANGUP,
self._stream_event,
None,
)
self._console_stream_callback_added = True
# Handles events for the console stream.
# Threading: Must only be called on libvirt events thread.
def _stream_event(
self, stream: libvirt.virStream, events: Union[int, bytes], context: Any
) -> None:
if events & libvirt.VIR_STREAM_EVENT_READABLE:
# Data is available to be read.
while True:
data = stream.recv(libvirt.virStorageVol.streamBufSize)
if data == -2:
# No more data available at the moment.
break
if len(data) == 0:
# EOF reached.
self._close_stream(False)
break
assert self._log_file
self._log_file.write(data)
if (
events & libvirt.VIR_STREAM_EVENT_ERROR
or events & libvirt.VIR_STREAM_EVENT_HANGUP
):
# Stream is shutting down. So, close it.
self._close_stream(True)
# Close the stream resource.
# Threading: Must only be called on libvirt events thread.
def _close_stream(self, abort: bool) -> None:
if self._stream_completed.is_set():
# Already closed. Nothing to do.
return
try:
# Close the log file
assert self._log_file
self._log_file.close()
# Close the stream
assert self._console_stream
if self._console_stream_callback_added:
self._console_stream.eventRemoveCallback()
if abort:
self._console_stream.abort()
else:
self._console_stream.finish()
finally:
# Signal that the stream has closed.
self._stream_completed.set()
| 2.171875
| 2
|
migration/versions/6dd556a95d2b_expand_content_column.py
|
floresmatthew/sahasrahbot
| 0
|
12774707
|
<reponame>floresmatthew/sahasrahbot
"""expand content column
Revision ID: <PASSWORD>
Revises: <PASSWORD>
Create Date: 2020-10-19 18:21:14.384304
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '<PASSWORD>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('twitch_command_text', 'content',
existing_type=mysql.VARCHAR(collation='utf8_bin', length=200),
type_=sa.String(length=4000, collation='utf8_bin'),
existing_nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('twitch_command_text', 'content',
existing_type=sa.String(length=4000, collation='utf8_bin'),
type_=mysql.VARCHAR(collation='utf8_bin', length=200),
existing_nullable=True)
# ### end Alembic commands ###
| 1.398438
| 1
|
backend/src/hatchling/licenses/parse.py
|
daobook/hatch
| 0
|
12774708
|
<reponame>daobook/hatch
from .supported import EXCEPTIONS, LICENSES
def normalize_license_expression(license_expression):
if not license_expression:
return license_expression
# First normalize to lower case so we can look up licenses/exceptions
# and so boolean operators are Python-compatible
license_expression = license_expression.lower()
# Then pad parentheses so tokenization can be achieved by merely splitting on white space
license_expression = license_expression.replace('(', ' ( ').replace(')', ' ) ')
# Now we begin parsing
tokens = license_expression.split()
# Rather than implementing boolean logic we create an expression that Python can parse.
# Everything that is not involved with the grammar itself is treated as `False` and the
# expression should evaluate as such.
python_tokens = []
for token in tokens:
if token not in ('or', 'and', 'with', '(', ')'):
python_tokens.append('False')
elif token == 'with':
python_tokens.append('or')
elif token == '(' and python_tokens and python_tokens[-1] not in ('or', 'and'):
raise ValueError('Invalid license expression')
else:
python_tokens.append(token)
python_expression = ' '.join(python_tokens)
try:
assert eval(python_expression) is False
except Exception:
raise ValueError('Invalid license expression')
# Take a final pass to check for unknown licenses/exceptions
normalized_tokens = []
for token in tokens:
if token in ('or', 'and', 'with', '(', ')'):
normalized_tokens.append(token.upper())
continue
if normalized_tokens and normalized_tokens[-1] == 'WITH':
if token not in EXCEPTIONS:
raise ValueError('Unknown license exception: {}'.format(token))
normalized_tokens.append(EXCEPTIONS[token]['id'])
else:
if token.endswith('+'):
token = token[:-1]
suffix = '+'
else:
suffix = ''
if token not in LICENSES:
raise ValueError('Unknown license: {}'.format(token))
normalized_tokens.append(LICENSES[token]['id'] + suffix)
# Construct the normalized expression
normalized_expression = ' '.join(normalized_tokens)
# Fix internal padding for parentheses
normalized_expression = normalized_expression.replace('( ', '(').replace(' )', ')')
return normalized_expression
| 3.09375
| 3
|
code/tomography_gpu/opticaltomography/regularizers.py
|
yhren1993/3DPhaseContrastAET
| 5
|
12774709
|
"""
Regularizer class for that also supports GPU code
<NAME> <EMAIL>
<NAME> <EMAIL>
March 04, 2018
"""
import arrayfire as af
import numpy as np
from opticaltomography import settings
np_complex_datatype = settings.np_complex_datatype
np_float_datatype = settings.np_float_datatype
af_float_datatype = settings.af_float_datatype
af_complex_datatype = settings.af_complex_datatype
class Regularizer:
"""
Highest-level Regularizer class that is responsible for parsing user arguments to create proximal operators
All proximal operators operate on complex variables (real & imaginary part separately)
Pure Real:
pure_real: boolean, whether or not to enforce object to be purely real
Pure imaginary:
pure_imag: boolean, whether or not to enforce object to be purely imaginary
Positivity:
positivity_real(positivity_imag): boolean, whether or not to enforce positivity for real(imaginary) part
Negativity:
negativity_real(negativity_imag): boolean, whether or not to enforce negativity for real(imaginary) part
LASSO (L1 regularizer):
lasso: boolean, whether or not to use LASSO proximal operator
lasso_parameter: threshold for LASSO
Total variation (3D only):
total_variation: boolean, whether or not to use total variation regularization
total_variation_gpu: boolean, whether or not to use GPU implementation
total_variation_parameter: scalar, regularization parameter (lambda)
total_variation_maxitr: integer, number of each iteration for total variation
"""
def __init__(self, configs = None, verbose = True, **kwargs):
#Given all parameters, construct all proximal operators
self.prox_list = []
reg_params = kwargs
if configs != None:
reg_params = self._parseConfigs(configs)
#Purely real
if reg_params.get("pure_real", False):
self.prox_list.append(PureReal())
#Purely imaginary
if reg_params.get("pure_imag", False):
self.prox_list.append(Pureimag())
#Total Variation
if reg_params.get("total_variation", False):
if reg_params.get("total_variation_gpu", False):
self.prox_list.append(TotalVariationGPU(**reg_params))
else:
self.prox_list.append(TotalVariationCPU(**reg_params))
#L1 Regularizer (LASSO)
elif reg_params.get("lasso", False):
self.prox_list.append(Lasso(reg_params.get("lasso_parameter", 1.0)))
#Others
else:
#Positivity
positivity_real = reg_params.get("positivity_real", False)
positivity_imag = reg_params.get("positivity_imag", False)
if positivity_real or positivity_imag:
self.prox_list.append(Positivity(positivity_real, positivity_imag))
#Negativity
negativity_real = reg_params.get("negativity_real", False)
negativity_imag = reg_params.get("negativity_imag", False)
if negativity_real or negativity_imag:
self.prox_list.append(Negativity(negativity_real, negativity_imag))
if verbose:
for prox_op in self.prox_list:
print("Regularizer -", prox_op.proximal_name)
def _parseConfigs(self, configs):
params = {}
params["pure_real"] = configs.pure_real
params["pure_imag"] = configs.pure_imag
#Total variation
params["total_variation"] = configs.total_variation
params["total_variation_gpu"] = configs.total_variation_gpu
params["total_variation_maxitr"] = configs.max_iter_tv
params["total_variation_order"] = configs.order_tv
params["total_variation_parameter"] = configs.reg_tv
#LASSO
params["lasso"] = configs.lasso
params["lasso_parameter"] = configs.reg_lasso
#Positivity/Negativity
if configs.positivity_real[0]:
if configs.positivity_real[1] == "larger":
params["positivity_real"] = True
else:
params["negativity_real"] = True
if configs.positivity_imag[0]:
if configs.positivity_imag[1] == "larger":
params["positivity_imag"] = True
else:
params["negativity_imag"] = True
return params
def computeCost(self, x):
cost = 0.0
for prox_op in self.prox_list:
cost_temp = prox_op.computeCost(x)
if cost_temp != None:
cost += cost_temp
return cost
def applyRegularizer(self, x):
for prox_op in self.prox_list:
x = prox_op.computeProx(x)
return x
class ProximalOperator():
def __init__(self, proximal_name):
self.proximal_name = proximal_name
def computeCost(self):
pass
def computeProx(self):
pass
def setParameter(self):
pass
def _boundRealValue(self, x, value = 0, flag_project = True):
"""If flag is true, only values that are greater than 'value' are preserved"""
if flag_project:
x[x < value] = 0
return x
class TotalVariationGPU(ProximalOperator):
def __init__(self, **kwargs):
proximal_name = "Total Variation"
parameter = kwargs.get("total_variation_parameter", 1.0)
maxitr = kwargs.get("total_variation_maxitr", 15)
self.order = kwargs.get("total_variation_order", 1)
self.pure_real = kwargs.get("pure_real", False)
self.pure_imag = kwargs.get("pure_imag", False)
#real part
if kwargs.get("positivity_real", False):
self.realProjector = lambda x: self._boundRealValue(x, 0, True)
proximal_name = "%s+%s" % (proximal_name, "positivity_real")
elif kwargs.get("negativity_real", False):
self.realProjector = lambda x: -1.0 * self._boundRealValue(-1.0 * x, 0, True)
proximal_name = "%s+%s" % (proximal_name, "negativity_real")
else:
self.realProjector = lambda x: x
#imaginary part
if kwargs.get("positivity_imag", False):
self.imagProjector = lambda x: self._boundRealValue(x, 0, True)
proximal_name = "%s+%s" % (proximal_name, "positivity_imag")
elif kwargs.get("negativity_imag", False):
self.imagProjector = lambda x: -1.0 * self._boundRealValue(-1.0 * x, 0, True)
proximal_name = "%s+%s" % (proximal_name, "negativity_imag")
else:
self.imagProjector = lambda x: x
self.setParameter(parameter, maxitr)
super().__init__(proximal_name)
def setParameter(self, parameter, maxitr):
self.parameter = parameter
self.maxitr = maxitr
def computeCost(self, x):
return None
def _computeTVNorm(self, x):
x_norm = x**2
x_norm = af.sum(x_norm, dim = 3)**0.5
x_norm[x_norm<1.0] = 1.0
return x_norm
def computeProx(self, x):
if self.pure_real:
x = self._computeProxReal(af.real(x), self.realProjector) + 1.0j * 0.0
elif self.pure_imag:
x = 1.0j *self._computeProxReal(af.imag(x), self.imagProjector)
else:
x = self._computeProxReal(af.real(x), self.realProjector) \
+ 1.0j * self._computeProxReal(af.imag(x), self.imagProjector)
return x
def _filterD(self, x, axis):
assert axis<3, "This function only supports matrix up to 3 dimension!"
if self.order == 1:
if axis == 0:
Dx = x - af.shift(x, 1, 0, 0)
elif axis == 1:
Dx = x - af.shift(x, 0, 1, 0)
else:
Dx = x - af.shift(x, 0, 0, 1)
elif self.order == 2:
if axis == 0:
Dx = x - 2*af.shift(x, 1, 0, 0) + af.shift(x, 2, 0, 0)
elif axis == 1:
Dx = x - 2*af.shift(x, 0, 1, 0) + af.shift(x, 0, 2, 0)
else:
Dx = x - 2*af.shift(x, 0, 0, 1) + af.shift(x, 0, 0, 2)
elif self.order == 3:
if axis == 0:
Dx = x - 3*af.shift(x, 1, 0, 0) + 3*af.shift(x, 2, 0, 0) - af.shift(x, 3, 0, 0)
elif axis == 1:
Dx = x - 3*af.shift(x, 0, 1, 0) + 3*af.shift(x, 0, 2, 0) - af.shift(x, 0, 3, 0)
else:
Dx = x - 3*af.shift(x, 0, 0, 1) + 3*af.shift(x, 0, 0, 2) - af.shift(x, 0, 0, 3)
else:
raise NotImplementedError("filter orders larger than 1 are not implemented!")
return Dx
def _filterDT(self, x):
if self.order == 1:
DTx = x[:, :, :, 0] - af.shift(x[ :, :, :, 0], -1, 0, 0) + \
x[:, :, :, 1] - af.shift(x[ :, :, :, 1], 0, -1, 0) + \
x[:, :, :, 2] - af.shift(x[ :, :, :, 2], 0, 0, -1)
elif self.order == 2:
DTx = x[:, :, :, 0] - 2*af.shift(x[ :, :, :, 0], -1, 0, 0) + af.shift(x[ :, :, :, 0], -2, 0, 0) + \
x[:, :, :, 1] - 2*af.shift(x[ :, :, :, 1], 0, -1, 0) + af.shift(x[ :, :, :, 1], 0, -2, 0) + \
x[:, :, :, 2] - 2*af.shift(x[ :, :, :, 2], 0, 0, -1) + af.shift(x[ :, :, :, 2], 0, 0, -2)
elif self.order == 3:
DTx = x[:, :, :, 0] - 3*af.shift(x[ :, :, :, 0], -1, 0, 0) + 3*af.shift(x[ :, :, :, 0], -2, 0, 0) - af.shift(x[ :, :, :, 0], -3, 0, 0) + \
x[:, :, :, 1] - 3*af.shift(x[ :, :, :, 1], 0, -1, 0) + 3*af.shift(x[ :, :, :, 1], 0, -2, 0) - af.shift(x[ :, :, :, 1], 0, -3, 0) + \
x[:, :, :, 2] - 3*af.shift(x[ :, :, :, 2], 0, 0, -1) + 3*af.shift(x[ :, :, :, 2], 0, 0, -2) - af.shift(x[ :, :, :, 2], 0, 0, -3)
else:
raise NotImplementedError("filter orders larger than 1 are not implemented!")
return DTx
def _computeProxReal(self, x, projector):
t_k = 1.0
u_k = af.constant(0.0, x.shape[0], x.shape[1], x.shape[2], 3, dtype = af_float_datatype)
u_k1 = af.constant(0.0, x.shape[0], x.shape[1], x.shape[2], 3, dtype = af_float_datatype)
u_hat = af.constant(0.0, x.shape[0], x.shape[1], x.shape[2], 3, dtype = af_float_datatype)
grad_u_hat = af.constant(0.0, x.shape[0], x.shape[1], x.shape[2], dtype = af_float_datatype)
def _gradUpdate():
grad_u_hat = x - self.parameter * self._filterDT(u_hat)
return grad_u_hat
for iteration in range(self.maxitr):
if iteration > 0:
grad_u_hat = _gradUpdate()
else:
grad_u_hat[:, :, :] = x
grad_u_hat = projector(grad_u_hat)
u_k1[ :, :, :, 0] = u_hat[ :, :, :, 0] + (1.0/(12.0)**self.order/self.parameter) * self._filterD(grad_u_hat, axis=0)
u_k1[ :, :, :, 1] = u_hat[ :, :, :, 1] + (1.0/(12.0)**self.order/self.parameter) * self._filterD(grad_u_hat, axis=1)
u_k1[ :, :, :, 2] = u_hat[ :, :, :, 2] + (1.0/(12.0)**self.order/self.parameter) * self._filterD(grad_u_hat, axis=2)
u_k1_norm = self._computeTVNorm(u_k1)
u_k1[ :, :, :, 0] /= u_k1_norm
u_k1[ :, :, :, 1] /= u_k1_norm
u_k1[ :, :, :, 2] /= u_k1_norm
t_k1 = 0.5 * (1.0 + (1.0 + 4.0*t_k**2)**0.5)
beta = (t_k - 1.0)/t_k1
u_hat = (1.0 + beta)*u_k1 - beta*u_k
if iteration < self.maxitr - 1:
u_k = u_k1.copy()
return projector(_gradUpdate())
class TotalVariationCPU(TotalVariationGPU):
def _computeTVNorm(self, x):
u_k1_norm = af.to_array(x)
u_k1_norm[:, :, :, :] *= u_k1_norm
u_k1_norm = af.sum(u_k1_norm, dim = 3)**0.5
u_k1_norm[u_k1_norm<1.0] = 1.0
return np.array(u_k1_norm)
def computeProx(self, x):
if self.pure_real:
x = self._computeProxReal(np.real(x), self.realProjector) + 1.0j * 0.0
elif self.pure_imag:
x = 1.0j *self._computeProxReal(np.imag(x), self.imagProjector)
else:
x = self._computeProxReal(np.real(x), self.realProjector) \
+ 1.0j * self._computeProxReal(np.imag(x), self.imagProjector)
return af.to_array(x)
def _computeProxReal(self, x, projector):
t_k = 1.0
u_k = np.zeros(x.shape + (3,), dtype = np_float_datatype);
u_k1 = u_k.copy()
u_hat = u_k.copy()
def _gradUpdate():
u_hat_af = af.to_array(u_hat)
DTu_hat = u_hat_af[:, :, :, 0] - af.shift(u_hat_af[ :, :, :, 0], -1, 0, 0) + \
u_hat_af[:, :, :, 1] - af.shift(u_hat_af[ :, :, :, 1], 0, -1, 0) + \
u_hat_af[:, :, :, 2] - af.shift(u_hat_af[ :, :, :, 2], 0, 0, -1)
grad_u_hat = x - np.array(self.parameter * DTu_hat)
return grad_u_hat
for iteration in range(self.maxitr):
if iteration > 0:
grad_u_hat = _gradUpdate()
else:
grad_u_hat = x.copy()
grad_u_hat = projector(grad_u_hat)
u_k1[ :, :, :, 0] = u_hat[ :, :, :, 0] + (1.0/12.0/self.parameter) * (grad_u_hat-np.roll(grad_u_hat, 1, axis = 0))
u_k1[ :, :, :, 1] = u_hat[ :, :, :, 1] + (1.0/12.0/self.parameter) * (grad_u_hat-np.roll(grad_u_hat, 1, axis = 1))
u_k1[ :, :, :, 2] = u_hat[ :, :, :, 2] + (1.0/12.0/self.parameter) * (grad_u_hat-np.roll(grad_u_hat, 1, axis = 2))
u_k1_norm = self._computeTVNorm(u_k1)
u_k1[ :, :, :] /= u_k1_norm[:, :, :, np.newaxis]
t_k1 = 0.5 * (1.0 + (1.0 + 4.0*t_k**2)**0.5)
beta = (t_k - 1.0)/t_k1
u_hat = (1.0 + beta)*u_k1 - beta*u_k
if iteration < self.maxitr - 1:
u_k = u_k1.copy()
return projector(_gradUpdate())
class Positivity(ProximalOperator):
"""Enforce positivity constraint on a complex variable's real & imaginary part."""
def __init__(self, positivity_real, positivity_imag, proximal_name = "Positivity"):
super().__init__(proximal_name)
self.real = positivity_real
self.imag = positivity_imag
def computeCost(self, x):
return None
def computeProx(self, x):
if type(x).__module__ == "arrayfire.array":
x = self._boundRealValue(af.real(x), 0, self.real) +\
1.0j * self._boundRealValue(af.imag(x), 0, self.imag)
else:
x = self._boundRealValue(np.real(x), 0, self.real) +\
1.0j * self._boundRealValue(np.imag(x), 0, self.imag)
return x
class Negativity(Positivity):
"""Enforce positivity constraint on a complex variable's real & imaginary part."""
def __init__(self, negativity_real, negativity_imag):
super().__init__(negativity_real, negativity_imag, "Negativity")
def computeProx(self, x):
return (-1.) * super().computeProx((-1.) * x)
class PureReal(ProximalOperator):
"""Enforce real constraint on a complex, imaginary part will be cleared"""
def __init__(self):
super().__init__("Pure real")
def computeCost(self, x):
return None
def computeProx(self, x):
if type(x).__module__ == "arrayfire.array":
x = af.real(x) + 1j*0.0
else:
x = np.real(x) + 1j*0.0
return x
class Pureimag(ProximalOperator):
"""Enforce imaginary constraint on a complex, real part will be cleared"""
def __init__(self):
super().__init__("Pure imaginary")
def computeCost(self, x):
return None
def computeProx(self, x):
if type(x).__module__ == "arrayfire.array":
x = 1j*af.imag(x)
else:
x = 1j*x.imag
return x
class Lasso(ProximalOperator):
"""||x||_1 regularizer, soft thresholding with certain parameter"""
def __init__(self, parameter):
super().__init__("LASSO")
self.setParameter(parameter)
def _softThreshold(self, x):
if type(x).__module__ == "arrayfire.array":
#POTENTIAL BUG: af.sign implementation does not agree with documentation
x = (af.sign(x)-0.5)*(-2.0) * (af.abs(x) - self.parameter) * (af.abs(x) > self.parameter)
else:
x = np.sign(x) * (np.abs(x) - self.parameter) * (np.abs(x) > self.parameter)
return x
def setParameter(self, parameter):
self.parameter = parameter
def computeCost(self, x):
return af.norm(af.moddims(x, np.prod(x.shape)), norm_type = af.NORM.VECTOR_1)
def computeProx(self, x):
if type(x).__module__ == "arrayfire.array":
x = self._softThreshold(af.real(x)) + 1.0j * self._softThreshold(af.imag(x))
else:
x = self._softThreshold(np.real(x)) + 1.0j * self._softThreshold(np.imag(x))
return x
#TODO: implement Tikhonov
class Tikhonov(ProximalOperator):
def __init__(self):
pass
def setParameter(self, parameter):
self.parameter = parameter
def computeCost(self, x):
pass
def computeProx(self, x):
return x
#TODO: implement pure amplitude constraint
class PureAmplitude(ProximalOperator):
def computeCost(self, x):
return None
def computeProx(self, x):
return x
#TODO: implement pure phase constraint
class PurePhase(ProximalOperator):
def computeCost(self, x):
return None
def computeProx(self, x):
return x
| 2.34375
| 2
|
python/testData/intentions/PyAnnotateVariableTypeIntentionTest/AnnotationImportTypingOptional/lib.py
|
jnthn/intellij-community
| 2
|
12774710
|
if True:
foo = 42
else:
foo = None
| 1.78125
| 2
|
Hip/SourceModule.py
|
EmilPi/PuzzleLib
| 52
|
12774711
|
import os, tempfile, subprocess
from string import Template
from PuzzleLib import Config
from PuzzleLib.Compiler.JIT import getCacheDir, computeHash, FileLock
from PuzzleLib.Cuda.SourceModule import SourceModule, ElementwiseKernel, ElementHalf2Kernel, ReductionKernel
from PuzzleLib.Cuda.SourceModule import eltwiseTest, reductionTest
from PuzzleLib.Hip import Driver as HipDriver
hipWarpBit, hipBlockBit = 6, 8
hipWarpSize, hipBlockSize = 1 << hipWarpBit, 1 << hipBlockBit
class HipSourceModule(SourceModule):
Driver = HipDriver
runtimeHeader = """
#include <hip/hip_runtime.h>
#define __shfl_xor_sync(mask, value, laneMask, ...) __shfl_xor(value, laneMask, __VA_ARGS__)
#define __shfl_up_sync(mask, value, delta, ...) __shfl_up(value, delta, __VA_ARGS__)
"""
def __init__(self, source, options=None, includes=None, externC=False, verbose=True, debug=False, recompile=False,
name=None):
super().__init__(source, options, includes, externC, verbose, debug, name)
self.recompile = recompile
self.includes = [] if self.includes is None else self.includes
def build(self):
source = self.source.replace("cuda_fp16.h", "hip/hip_fp16.h")
source = ("%sextern \"C\"\n{\n%s\n}\n" if self.externC else "%s%s") % (self.runtimeHeader, source)
cachedir = getCacheDir(os.path.join(Config.libname, Config.Backend.hip.name))
with FileLock(cachedir):
try:
codename = self.tryBuild(source, cachedir)
except subprocess.CalledProcessError as e:
log = e.output.decode()
text = log if self.debug else "%s\nSource:\n%s" % (
log,
"\n".join("%-4s %s" % (i + 1, line) for i, line in enumerate(source.splitlines(keepends=False)))
)
raise self.Driver.RtcError(text)
with open(codename, mode="rb") as f:
hsaco = f.read()
self.cumod = self.Driver.Module(hsaco)
def tryBuild(self, source, cachedir):
options, includes = self.options, self.includes
hashsum = computeHash(source, *options, *includes)
codepath = os.path.join(cachedir, hashsum)
name, srcext = "module" if self.name is None else self.name, ".hip.cpp"
codename = os.path.join(codepath, "%s.code" % name)
sourcename = os.path.join(codepath, "%s%s" % (name, srcext))
if not os.path.exists(codename) or self.recompile:
os.makedirs(codepath, exist_ok=True)
args = ["hipcc", "--genco"] + options + ["-o", codename]
stderr = subprocess.STDOUT if self.verbose else subprocess.DEVNULL
Config.getLogger().debug("No cache found for HIP extension '%s', performing compilation ...", name)
if not self.debug:
f = tempfile.NamedTemporaryFile(mode="w", encoding="utf-8", suffix=srcext, delete=False)
try:
with f:
f.write(source)
subprocess.check_output(args + [f.name], stderr=stderr)
finally:
os.remove(f.name)
else:
with open(sourcename, mode="w", encoding="utf-8") as f:
f.write(source)
subprocess.check_output(args + [sourcename], stderr=stderr)
else:
Config.getLogger().debug("Found cached compilation for HIP extension '%s', skipping compilation ...", name)
return codename
@classmethod
def getDefaultOptions(cls):
deviceIdx = cls.Driver.Device.getCurrent()
return ["--targets gfx%s" % cls.Driver.Device(deviceIdx).getArch()]
class HipEltwiseKernel(ElementwiseKernel):
Driver = HipDriver
SourceModule = HipSourceModule
warpBit, warpSize = hipWarpBit, hipWarpSize
blockBit, blockSize = hipBlockBit, hipBlockSize
class HipEltHalf2Kernel(ElementHalf2Kernel):
Driver = HipDriver
SourceModule = HipSourceModule
warpBit, warpSize = hipWarpBit, hipWarpSize
blockBit, blockSize = hipBlockBit, hipBlockSize
class HipReductionKernel(ReductionKernel):
Driver = HipDriver
SourceModule = HipSourceModule
warpBit, warpSize = hipWarpBit, hipWarpSize
blockBit, blockSize = hipBlockBit, hipBlockSize
reduceTmpl = Template("""
#undef READ_AND_MAP
#undef REDUCE
#define READ_AND_MAP(i) ($mapExpr)
#define REDUCE(a, b) ($reduceExpr)
extern "C" __global__ void $name($arguments, $T *partials, int size)
{
__shared__ $T sdata[$warpSize];
int tid = threadIdx.x;
int gid = tid + blockIdx.x * $NT;
$T acc = $neutral;
for (int i = gid; i < size; i += $NT * gridDim.x)
acc = REDUCE(acc, READ_AND_MAP(i));
for (int mask = $warpSize / 2; mask > 0; mask /= 2)
{
$T upval = __shfl_xor(acc, mask, $warpSize);
acc = REDUCE(acc, upval);
}
if (tid % $warpSize == 0)
sdata[tid / $warpSize] = acc;
__syncthreads();
int nwarps = $NT / $warpSize;
if (tid < $warpSize)
{
acc = (tid < nwarps) ? sdata[tid] : $neutral;
for (int mask = $warpSize / 2; mask > 0; mask /= 2)
{
$T upval = __shfl_xor(acc, mask, $warpSize);
acc = REDUCE(acc, upval);
}
}
if (tid == 0)
partials[blockIdx.x] = acc;
}
""")
def unittest():
from PuzzleLib.Hip import Backend
for deviceIdx in range(Backend.getDeviceCount()):
bnd = Backend.getBackend(deviceIdx)
eltwiseTest(bnd)
reductionTest(bnd)
if __name__ == "__main__":
unittest()
| 2.03125
| 2
|
distributed/diagnostics/tests/test_progressbar.py
|
ogrisel/distributed
| 0
|
12774712
|
<gh_stars>0
import pytest
from tornado import gen
from distributed import Executor, Scheduler
from distributed.diagnostics.progressbar import TextProgressBar, progress
from distributed.utils_test import (cluster, _test_cluster, loop, inc,
div, dec, cluster_center)
from time import time, sleep
def test_text_progressbar(capsys, loop):
with cluster(nanny=True) as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
futures = e.map(inc, range(10))
p = TextProgressBar(futures, interval=0.01, complete=True)
e.gather(futures)
start = time()
while p.status != 'finished':
sleep(0.01)
assert time() - start < 5
check_bar_completed(capsys)
assert p._last_response == {'all': 10,
'remaining': 0,
'status': 'finished'}
assert p.stream.closed()
def test_TextProgressBar_error(loop, capsys):
@gen.coroutine
def f(c, a, b):
s = Scheduler((c.ip, c.port), loop=loop)
yield s.sync_center()
done = s.start(0)
s.update_graph(tasks={'x': (div, 1, 0)},
keys=['x'],
dependencies={})
progress = TextProgressBar(['x'], scheduler=(s.ip, s.port),
start=False, interval=0.01)
yield progress.listen()
assert progress.status == 'error'
assert progress.stream.closed()
progress = TextProgressBar(['x'], scheduler=(s.ip, s.port),
start=False, interval=0.01)
yield progress.listen()
assert progress.status == 'error'
assert progress.stream.closed()
s.close()
yield done
_test_cluster(f, loop)
def test_TextProgressBar_empty(loop, capsys):
@gen.coroutine
def f(c, a, b):
s = Scheduler((c.ip, c.port), loop=loop)
yield s.sync_center()
done = s.start(0)
progress = TextProgressBar([], scheduler=(s.ip, s.port), start=False,
interval=0.01)
yield progress.listen()
assert progress.status == 'finished'
check_bar_completed(capsys)
s.close()
yield done
_test_cluster(f, loop)
def check_bar_completed(capsys, width=40):
out, err = capsys.readouterr()
bar, percent, time = [i.strip() for i in out.split('\r')[-1].split('|')]
assert bar == '[' + '#'*width + ']'
assert percent == '100% Completed'
def test_progress_function(loop, capsys):
with cluster() as (s, [a, b]):
with Executor(('127.0.0.1', s['port']), loop=loop) as e:
f = e.submit(lambda: 1)
g = e.submit(lambda: 2)
progress([[f], [[g]]], notebook=False)
check_bar_completed(capsys)
| 2.09375
| 2
|
code/slash_commands/slash_help.py
|
Fiji05/AquaBot
| 2
|
12774713
|
<reponame>Fiji05/AquaBot
import discord
from discord.ext import commands
import re
from discord import app_commands
color = 0xc48aff
class HelpDropdown(discord.ui.Select):
def __init__(self):
options = [
discord.SelectOption(label='Economy', description='add, profile, shop, blackjack, slots, coinflip, leaderboard', emoji="💰"),
discord.SelectOption(label='Moderation', description='mute, tempmute, unmute, kick, ban, softban, purge', emoji="<:moderation:893273273385754686>"),
discord.SelectOption(label='Info', description='prices, crypto, covid, invite, track, userinfo, botinfo, vote, bug, feedback', emoji="ℹ️"),
discord.SelectOption(label='Music (BETA)', description='play, skip, queue, remove, stop, clear, repeat, shuffle, nowplaying, pause, remove', emoji='🎵'),
discord.SelectOption(label='Admin', description='setprefix, setlevel, levelreset, dellevel, levelchannel, setmute, muterole, delmute, setjoin', emoji="⚙️"),
discord.SelectOption(label='Fun', description='level, levelboard, ping, new', emoji='🎉'),
]
super().__init__(placeholder='Choose a category...', min_values=1, max_values=1, options=options)
async def callback(self, interaction: discord.Interaction):
if self.values[0] == 'Economy':
embed = discord.Embed(
title = "💰 - Economy Help",
description = "**Options in `<>` are mandatory**",
colour = discord.Colour.random()
)
embed.add_field(name = "**Add**", value = f"**Usage: `/add`**\nGives you $2,500. Can be run every 2 hours", inline=False)
embed.add_field(name = "**Shop**", value = f"**Usage: `/shop`**\nGives you the shop menus so that you can buy items", inline=False)
embed.add_field(name = "**🃏 - Blackjack**", value = f"**Usage: `/blackjack <bet>`**\nIf no bet is given, the deafult bet of $125 will be placed", inline=False)
embed.add_field(name = "**🎰 - Slots**", value = f"**Usage: `/slots <bet>`**\nIf no bet is given, the default bet of $125 will be placed.", inline=False)
embed.add_field(name = "**🪙 - Coinflip**", value = f"**Usage: `/coinflip <bet>`**\nHeads means you win, tails means you lose. If no bet is given, the default bet of $125 will be placed.", inline=False)
embed.add_field(name = "**💵 - Profile**", value = f"**Usage: `/profile <member>`**\nShows the amount of money and ranks that a user has", inline=False),
embed.add_field(name = "**🏅 - Leaderboard**", value = f"**Usage: `/leaderboard` **\nShows the top 5 players with the most money. This is a global leaderboard and not per server.", inline=False)
await interaction.response.edit_message(embed=embed)
if self.values[0] == 'Moderation':
embed = discord.Embed(
title = "<:moderation:893273273385754686> - Moderation Help",
description = "**Options in `<>` are mandatory**",
colour = discord.Colour.random()
)
embed.add_field(name = "**Warn**", value = f"**Usage: `/warn <member> <reason>`** \nWarn a member for doing something against the rules.", inline=True)
embed.add_field(name = "**Delwarn**", value = f"**Usage: `/delwarn <warn ID>`** \nDelete a warning from a member so that it is no longer on their record.", inline=True)
embed.add_field(name = "**Warnings**", value = f"**Usage: `/warnings <member>`** \nSee all of the warnings for a member. Also includes when they were warned, and who warned them.", inline=True)
embed.add_field(name = "**Mute**", value = f"**Usage: `/mute <member> <time>`** \nMute a member so they can't send anymore messages.", inline=True)
embed.add_field(name = "**Tempmute**", value = f"**Usage: `/tempmute <member> <time - in hours>` \nExample: `/tempmute @bob 2`** \nMutes the member temporarily, they will be unmuted once the specified time has passed.", inline=True)
embed.add_field(name = "**Unmute**", value = f"**Usage: `/unmute <member>`** \nUnmute a member so they are able to send messages again.", inline=True)
embed.add_field(name = "**Purge**", value = f"**Usage: `/purge <amount>`** \nDelete messages from your server. Max amount that can be deleted at one time is `100` messages.")
embed.add_field(name = "**Kick**", value = f"**Usage: `/kick <member> <reason>`** \nKick a member from your server. They will be able to join back with a new invite.", inline=True)
embed.add_field(name = "**Ban**", value = f"**Usage: `/ban <member> <reason>`** \nBan a member from your server. They will not be able to join back until they are unbanned.", inline=True)
embed.add_field(name = "**Softban**", value = f"**Usage: `/softban <member> <reason>`** \nThis command will ban and then immediately unban the member in order to get rid of their message history.", inline=True)
await interaction.response.edit_message(embed=embed)
if self.values[0] == "Info":
embed = discord.Embed(
title = "ℹ️ - Info Help",
description = "**Options in `<>` are mandatory**",
colour = discord.Colour.random()
)
embed.add_field(name = "**Prices**", value = f"**Usage: `/prices`** \nShows the prices for the 20 cryptocurrencies that we currently list", inline=True)
embed.add_field(name = "**Crypto**", value = f"**Usage: `/crypto <ticker>`** \nShows expanded information on the specific currency given its ticker.", inline=True)
embed.add_field(name = "**Covid**", value = f"**Usage: `/covid` **\nSends the current global COVID-19 data.", inline=True)
embed.add_field(name = "**Invite**", value = f"**Usage: `/invite` **\nSends the invite for the bot and the official support server.", inline=True)
embed.add_field(name = "**Track**", value = f"**Usage: `/track`** \nSends the amount of servers that the bot is in, as well as the cumulative amount of members.", inline=True)
embed.add_field(name = "**User Info**", value = f"**Usage: `/userinfo <member>`** \nGives information on a member in your server. Information includes account creation date, when they joined your server, and some more.", inline=True)
embed.add_field(name = "**Bot Info**", value = f"**Usage: `/botinfo`** \nGives information on the bot.", inline=True)
embed.add_field(name = "**Vote**", value = f"**Usage: `/vote`** \nSends the link for you to vote for our bot on top.gg", inline=True)
embed.add_field(name = "**Bug**", value = f"**Usage: `/bug`** \nShows a form to be filled out to notify the developer of a bug", inline=True)
embed.add_field(name = "**Feedback**", value = f"**Usage: `/feedback`** \nShows a form to be filled out to show the developer feedback on the both", inline=True)
await interaction.response.edit_message(embed=embed)
if self.values[0] == "Music (BETA)":
embed = discord.Embed(
title = f"🎵 - Music Help \n*NOTE - These commands are still in beta. Please report bugs using `/contact`",
description = "**Options in `<>` are mandatory**",
colour = discord.Colour.random()
)
embed.add_field(name = "**Play**", value = f"**Usage: `/play <name of song / URL>` **\nSearches YouTube, and then plays the top song.", inline=True)
embed.add_field(name = "**Skip**", value = f"**Usage: `/skip` **\nSkips the song that is currently playing.", inline=True)
embed.add_field(name = "**Queue**", value = f"**Usage: `/queue`** \nSends all of the songs that are in the queue.", inline=True)
embed.add_field(name = "**Remove**", value = f"**Usage: `/remove <song #>` **\nRemoves the specified song from the queue.", inline=True)
embed.add_field(name = "**Stop**", value = f"**Usage: `/stop`** \nStops music, clears queue, and leaves VC.", inline=True),
embed.add_field(name = "**Clear**", value = f"**Usage: `/clear` **\nRemoves ALL songs in the queue.", inline=True)
embed.add_field(name = "**Repeat**", value = f"**Usage: `/remove`** \nRepeats the song that is playing. Run the command again to stop repeating.", inline=True)
embed.add_field(name = "**Shuffle**", value = f"**Usage: `/shuffle`** \nWill play a random song in the queue. Run the command again to stop shuffling.", inline=True)
embed.add_field(name = "**Np**", value = f"**Usage: `/np` **\nSends the song that is currently playing.", inline=True)
embed.add_field(name = "**Pause**", value = f"**Usage: `/pause`** \nPauses the currently playing song.", inline=True)
embed.add_field(name = "**Resume**", value = f"**Usage: `/resume` **\nResumes the paused song.", inline=True)
await interaction.response.edit_message(embed=embed)
if self.values[0] == "Admin":
embed = discord.Embed(
title = "⚙️ - Admin Help",
description = "**Options in `<>` are mandatory**",
colour = discord.Colour.random()
)
embed.add_field(name = "**Setprefix**", value = f"**Usage: `/setprefix <new prefix>` **\nSets the prefix for the bot in your specific server.", inline=True)
embed.add_field(name = "**Setlevel**", value = f"**Usage: `/setlevel <name of channel>` **\nSets the channel for level up messages to be sent to.", inline=True)
embed.add_field(name = "**Levelreset**", value = f"**Usage: `/levelreset` **\nResets all of the levels for everyone in the server.", inline=True)
embed.add_field(name = "**Dellevel**", value = f"**Usage: `/dellevel` **\nDeletes the channel from our database, and stops sending new level up messages.", inline=True)
embed.add_field(name = "**Levelchannel**", value = f"**Usage: `/levelchannel` ** \nShows the current channel for leveling messages.", inline=True)
embed.add_field(name = "**Setmute**", value = f"**Usage: `/setmute <name of role>` **\nSets the role that will be given to users whenever you use the `/mute` command.", inline=True)
embed.add_field(name = "**Delmute**", value = f"**Usage: `/delmute` **\nDeletes the muted role from our database.", inline=True)
embed.add_field(name = "**Muterole**", value = f"**Usage: `/muterole`** \nSends the current role that is assigned as the muted role for your server.", inline=True)
embed.add_field(name = "**Setjoin**", value = f"**Usage: `/setjoin <name of channel>` **\nSets the channel for messages to be sent whenever a new user joins your server.", inline=True)
embed.add_field(name = "**Deljoin**", value = f"**Usage: `/deljoin`** \nDeletes the channel from our database, and stops sending new user messages.", inline=True),
embed.add_field(name = "**Joinchannel**", value = f"**Usage: `/joinchannel`** \nSends the current channel that is assigned as the new user messages channel.", inline=True)
await interaction.response.edit_message(embed=embed)
if self.values[0] == 'Fun':
embed = discord.Embed(
title = "🎉 - Fun Help",
description = "**Options in `<>` are mandatory**",
colour = discord.Colour.random()
)
embed.add_field(name = "**Level**", value = f"**Usage: `/level`** \nSends your current level in the server.", inline=False)
embed.add_field(name = "**Levelboard**", value = f"**Usage: `/levelboard`** \nSends the current leaderboard for your servers leveling system.", inline=False)
embed.add_field(name = "**Ping**", value = f"**Usage: `/ping` **\nGives the current ping of the bot.", inline=True)
embed.add_field(name = "**New**", value = f"**Usage: `/new`** \nSends all of the changes to the bot.", inline=False)
await interaction.response.edit_message(embed=embed)
else:
return
class HelpView(discord.ui.View):
def __init__(self, timeout = 180.0):
super().__init__(timeout=timeout)
self.value = None
self.add_item(HelpDropdown())
url = "https://discord.com/api/oauth2/authorize?client_id=889027125275922462&permissions=8&scope=bot%20applications.commands"
self.add_item(discord.ui.Button(label="Invite Me", url=url, row=2))
@discord.ui.button(label='Main Page', style=discord.ButtonStyle.blurple, row=2)
async def main_page(self, interaction: discord.Interaction, button: discord.ui.Button):
embed = discord.Embed(
title = "Help",
description = f"**IMPORTANT - A lot of stuff changed, please use the `new` command to see all of the changes** \n\nFor extended information on commands and categories, please choose an option from the dropdown menu below.",
colour = discord.Colour.random()
)
await interaction.response.edit_message(embed=embed)
class slash_help(commands.Cog):
def __init__(self, bot):
self.bot = bot
@app_commands.command()
async def help(
self,
interaction: discord.Interaction
):
"Sends the bots commands and features"
embed = discord.Embed(
title = "Help",
description = f"**IMPORTANT - All commands are now slash commands, and a few changes have been made. Please use `/new` to see any alterations.",
colour = discord.Colour.random()
)
view = HelpView()
await interaction.response.send_message(embed=embed, view=view, ephemeral=True)
@commands.Cog.listener()
async def on_message(self, message: discord.Message) -> None:
if re.fullmatch(rf"<@!?{self.bot.user.id}>", message.content):
embed = discord.Embed(
title = f"All commands are now slash commands!",
description = f"**Use `/help` in order to get help on what commands are available.**",
colour = discord.Colour.blurple()
)
await message.reply(embed=embed)
async def setup(bot):
await bot.add_cog(slash_help(bot))
| 2.40625
| 2
|
fartor/apps/accounting/users/models/__init__.py
|
verkatech/fartor-django
| 6
|
12774714
|
<reponame>verkatech/fartor-django
from .login_histories import LoginHistory
from .users import User
| 1.085938
| 1
|
trustMonitor/trust_monitor_driver/parsingOAT.py
|
shield-h2020/trust-monitor
| 2
|
12774715
|
<reponame>shield-h2020/trust-monitor
from trust_monitor.verifier.structs import *
from trust_monitor.verifier.statistics import *
from suds.client import Client
from trust_monitor.verifier.parser import IRParser, IMAMeasureHandler
from trust_monitor.verifier.parser import ContainerCheckAnalysis
import logging
import gc
import xmltodict
import ssl
# use logging system of django.
logger = logging.getLogger('driver')
class ParsingOAT():
def __init__(self):
logger.info('Parsing OAT Set structures')
Digest.digests_dict = {}
Digest.digests_query_done = False
Digest.packages_query_done = False
Digest.packages_query = set()
Package.pkg_dict = {}
IMARecord.records = []
Subject.subj_label_dict = {}
Object.obj_label_dict = {}
ssl._create_default_https_context = ssl._create_unverified_context
def parsing(self, analysis, checked_containers,
report_url, report_id, infoDigest):
doCheckContAnalysis = False
containers = {}
if 'cont-check' in analysis:
doCheckContAnalysis = True
logger.info('Understand what kind of analysis to do')
for item in analysis.split(','):
if item.startswith('cont-list'):
logger.info('Analysis include containters')
checked_containers = item.split('=')[1]
break
try:
if report_url is not None and report_id != 0:
client = Client(report_url)
logger.info('report url ' + str(report_url))
logger.info('report id ' + str(report_id))
report_str = client.service.fetchReport(report_id)
logger.info('Start to parser IR %s', str(report_id))
IRParser(report_str, ContainerCheckAnalysis(doCheckContAnalysis,
containers,
checked_containers,
infoDigest))
logger.info('Parsing of IR done.')
try:
data_xml = xmltodict.parse(report_str)
host_name = (data_xml['ns3:Report']['ns3:QuoteData']
['ns3:TpmSignature']['ns3:KeyInfo']['KeyName'])
except Exception:
host_name = (data_xml['ns3:Report']['ns3:QuoteData']
['ns3:TpmSignature']['ns3:KeyInfo']
['ns2:KeyName'])
logger.info(host_name)
infoDigest.host = host_name
gc.collect()
except Exception as e:
logger.error('Error opening IR, %s', e)
del report_str
gc.collect()
return 2
return 0
| 2.203125
| 2
|
minv/mongo_4_0.py
|
kevinadi/invoke-mongodb
| 0
|
12774716
|
<reponame>kevinadi/invoke-mongodb
# MongoDB 4.0
import os
from mongo_basic import BasicMongo
class Mongo(BasicMongo):
def __init__(self):
pass
def version(self):
return '4.0'
| 1.953125
| 2
|
manage.py
|
arctelix/pinimatic
| 12
|
12774717
|
<filename>manage.py<gh_stars>10-100
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
if 'RACK_ENV' in os.environ:
RACK_ENV = os.environ.get("RACK_ENV")
print 'RACK_ENV: ', RACK_ENV
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pinry.settings."+RACK_ENV)
else:
print 'RACK_ENV not detected using development settings'
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pinry.settings.development")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 1.875
| 2
|
modules/steps/find_docker_stack_files.py
|
KTH/aspen
| 0
|
12774718
|
"""FindDockerStackFiles
Crawls the fetched application registry directory (from FetchAppRegistry)
and locates all docker-stack.yml files"""
__author__ = '<EMAIL>'
import os
from modules.steps.base_pipeline_step import BasePipelineStep
from modules.util import environment, data_defs
class FindDockerStackFiles(BasePipelineStep):
def __init__(self):
BasePipelineStep.__init__(self)
self.registry_root = None
def get_required_env_variables(self):
return [environment.REGISTRY_SUB_DIRECTORY]
def get_required_data_keys(self):
return []
def run_step(self, pipeline_data):
self.registry_root = environment.get_registry_path()
pipeline_data[data_defs.STACK_FILES] = self.walk_repository()
return pipeline_data
def walk_repository(self):
stack_files = []
for dirpath, _, files in os.walk(self.registry_root):
for file in files:
if file == 'docker-stack.yml':
stack_files.append(os.path.join(dirpath, file))
self.log.debug('Found %s docker stack files', len(stack_files))
return stack_files
| 2.453125
| 2
|
__init__.py
|
Vladimir37/finam_stock_data
| 6
|
12774719
|
from .finam_stock_data import get_data
| 1.046875
| 1
|
deep_learn/dataset/sampler/__init__.py
|
ImbesatRizvi/Accio
| 2
|
12774720
|
from .BinaryPairedWindowSampler import BinaryPairedWindowSampler
| 1.085938
| 1
|
1-10/p2.py
|
smith-erik/project-euler
| 0
|
12774721
|
#!/usr/bin/python3
print("Sum of even-valued terms less than four million in the Fibonacci sequence:")
a, b, sum = 1, 1, 0
while b < 4000000:
sum += b if b % 2 == 0 else 0
a, b = b, a + b
print(sum)
| 3.578125
| 4
|
code/report/inspectParameters.py
|
matthijsvk/multimodalSR
| 53
|
12774722
|
import logging
import formatting
logger_inspectParameters = logging.getLogger('inspectParameters')
logger_inspectParameters.setLevel(logging.DEBUG)
FORMAT = '[$BOLD%(filename)s$RESET:%(lineno)d][%(levelname)-5s]: %(message)s '
formatter = logging.Formatter(formatting.formatter_message(FORMAT, False))
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger_inspectParameters.addHandler(ch)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import matplotlib
matplotlib.rcParams.update({'font.size': 22})
def load_model(model_path, logger=logger_inspectParameters):
logger.info("Loading stored model...")
# restore network weights
with np.load(model_path) as f:
all_params = [f['arr_%d' % i] for i in range(len(f.files))][0]
logger.info("number of layers: %s", len(all_params))
for i in range(len(all_params)):
layer_params = all_params[i]
logger.info("layer %s.shape: %s", i, layer_params.shape)
return all_params
# model_path = '/home/matthijs/TCDTIMIT/audioSR/combined/results/BEST/2_LSTMLayer64_64_nbMFCC39_bidirectional_combined.npz'
# load_model(model_path=model_path)
# lipreading dense, audio raw features
model_path = '/home/matthijs/TCDTIMIT/combinedSR/TCDTIMIT/results/CNN_LSTM/lipspeakers/' \
'RNN__2_LSTMLayer256_256_nbMFCC39_bidirectional' \
'__CNN_google_dense_lipRNN_256_256_RNNfeaturesDense' \
'__FC_512_512_512__TCDTIMIT_lipspeakers.npz'
# lipreading
model_path ='/home/matthijs/TCDTIMIT/combinedSR/TCDTIMIT/results/CNN_LSTM/lipspeakers/' \
'RNN__2_LSTMLayer256_256_nbMFCC39_bidirectional' \
'__CNN_google_dense_lipRNN_256_256' \
'__FC_512_512_512__TCDTIMIT_lipspeakers.npz'
paramsArray = load_model(model_path=model_path)
# CNN features : layer 18
# CNN-LSTM features: layer 29
# audio features:
# combined features weights: layers 122 (768x 512)
combined_features = paramsArray[122]
lip_features = combined_features[:256]
lip_features = np.abs(lip_features.flatten())
mean_lip = np.mean(lip_features)
median_lip = np.median(lip_features)
rms_lip = np.sqrt(np.mean(np.square(lip_features)))
print("lipreading mean: ", mean_lip)
print("lipreading median: ", median_lip)
print("lipreading rms: ", rms_lip)
audio_features = combined_features[256:]
audio_features = np.abs(audio_features.flatten())
mean_audio = np.mean(audio_features)
median_audio = np.median(audio_features)
rms_audio = np.sqrt(np.mean(np.square(audio_features)))
print("audio mean: ", mean_audio)
print("audio median: ", median_audio)
print("audio rms: ", rms_audio)
lipreading mean: 0.0469951
lipreading median: 0.041638106
lipreading rms: 0.057422262
audio mean: 0.04470453
audio media: 0.039826244
audio rms: 0.054539148
showFigs = True
if showFigs:
fig = figure()
ax = fig.add_subplot(111)
ax.hist(combined_features.flatten(), bins='auto') # plt.hist passes it's arguments to np.histogram
#ax.boxplot(combined_features.flatten()) # , cmap='binary')
ax.set_xlabel("weight size")
ax.set_ylabel("number of weights")
plt.show()
fig = figure()
ax = fig.add_subplot(111)
ax.hist(lip_features.flatten(), bins='auto') # , cmap='binary')
ax.set_title("FC weight size for Lipreading features ")
ax.set_xlabel("weight size")
ax.set_ylabel("number of weights")
plt.show()
fig = figure()
ax = fig.add_subplot(111)
ax.hist(audio_features.flatten(), bins='auto') # , cmap='binary')
ax.set_title("FC weight size for Audio features ")
ax.set_xlabel("weight size")
ax.set_ylabel("number of weights")
plt.show()
| 2.25
| 2
|
projects/ABD_Net/ABD_components/args.py
|
Yogurt2019/abd-deep-person-reid
| 0
|
12774723
|
<reponame>Yogurt2019/abd-deep-person-reid
import os
import argparse
def argument_parser():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# ************************************************************
# Branches Related
# ************************************************************
parser.add_argument('--compatibility', type=bool, default=False)
parser.add_argument('--branches', nargs='+', type=str, default=['global', 'abd']) # global abd
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--global-max-pooling', type=bool, default=False)
parser.add_argument('--global-dim', type=int, default=1024)
parser.add_argument('--abd-dim', type=int, default=1024)
parser.add_argument('--abd-np', type=int, default=2)
parser.add_argument('--abd-dan', nargs='+', type=str, default=[]) # cam pam
parser.add_argument('--abd-dan-no-head', type=bool, default=True)
parser.add_argument('--shallow-cam', type=bool, default=True)
# ************************************************************
# OF Related
# ************************************************************
parser.add_argument('--use-of', type=bool, default=False)
parser.add_argument('--of-beta', type=float, default=1e-6)
parser.add_argument('--of-start-epoch', type=int, default=23)
parser.add_argument('--of-position', nargs='+', type=str, default=['before', 'after', 'cam', 'pam', 'intermediate'])
# ************************************************************
# OW Related
# ************************************************************
parser.add_argument('--use-ow', type=bool, default=False)
parser.add_argument('--ow-beta', type=float, default=1e-3)
return parser
def model_kwargs(parsed_args):
return {
'branches': parsed_args.branches,
'global_max_pooling': parsed_args.global_max_pooling,
'global_dim': parsed_args.global_dim,
'dropout': parsed_args.dropout,
'abd_dim': parsed_args.abd_dim,
'abd_np': parsed_args.abd_np,
'abd_dan': parsed_args.abd_dan,
'abd_dan_no_head': parsed_args.abd_dan_no_head,
'shallow_cam': parsed_args.shallow_cam,
'compatibility': parsed_args.compatibility
}
def of_kwargs(parsed_args):
return {
'of_position': parsed_args.of_position,
'of_beta': parsed_args.of_beta,
'of_start_epoch': parsed_args.of_start_epoch
}
def ow_kwargs(parsed_args):
return {
'use_ow':parsed_args.use_ow,
'ow_beta': parsed_args.ow_beta
}
| 2.28125
| 2
|
analysis.py
|
colm-o-caoimh/IrisDataset
| 0
|
12774724
|
# <NAME>
# PandS project 2020
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
# Import data as pandas dataframe
iris_data = pd.read_csv('iris.data', header=None)
# assign column headers
iris_data.columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species']
# A. Output a summary of each variable to a single txt file.
# Isolate columns according to data type
float_values = iris_data.iloc[:,0:4]
str_values = iris_data.iloc[:,4]
# Use describe function to summarise data
float_summary = float_values.describe()
str_summary = str_values.describe()
# Establish 3 unique values in str_summary.
# This creates an array of each value.
str_summary = str_values.unique()
# Transpose str_summary array and convert to dataframe
str_summary = str_summary[:, None]
str_summary = pd.DataFrame({"Species": str_summary[:, 0]})
# Format string variable summary
# Add column containing quantity of unique values
quantity = ['50', '50', '50']
str_summary['Count'] = quantity
# Rename rows in str_summary
str_summary.index = ['Species_A', 'Species_B', 'Species_C']
# Format summary output and write to text file
with open("iris_summary.txt", "w") as f:
heading = "SUMMARY OF VARIABLES IN IRIS DATASET"
f.write(heading + "\n")
f.write("=" * len(heading) + "\n\n\n\n")
heading2 = "NUMERIC VARIABLE SUMMARY"
f.write(heading2 + "\n")
f.write("=" * len(heading2) + "\n")
f.write(float_summary.to_string() + "\n\n\n\n")
heading3 = "DEPENDENT VARIABLE SUMMARY"
f.write(heading3 + "\n")
f.write("=" * len(heading3) + "\n")
f.write(str_summary.to_string() + "\n\n\n\n\n\n\n")
# B. Save a histogram of each variable to png files
# Assign each column to a variable for easier manipulation
sep_len = iris_data['sepal_length']
sep_width = iris_data['sepal_width']
pet_len = iris_data['petal_length']
pet_width = iris_data['petal_width']
species = iris_data['species']
# Write a function which outputs a histogram for each dataset variable and saves
# it as a png file.
# First for numeric variables
def var_hist(var_data, fig_num, x_label, y_label, title, filepath):
plt.figure(fig_num)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
plt.hist(var_data, rwidth=0.9,)
plt.savefig(filepath)
plt.close() # Close figure so plot won't be displayed later
# Then for string variable
def var_hist2(var_data, fig_num, x_label, y_label, title, filepath):
plt.figure(fig_num)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
plt.hist(var_data, bins=3, rwidth=0.9)
plt.xticks(np.arange(0,3))
plt.savefig(filepath)
plt.close()
# Call function for each variable
var_hist(sep_len, 1, 'sepal_length_cm', 'Frequency', 'Sepal Length', 'sepal_length.png')
var_hist(sep_width, 2, 'sepal_width_cm', 'Frequency', 'Sepal Width', 'sepal_width.png')
var_hist(pet_len, 3, 'petal_length_cm', 'Frequency', 'Petal Length', 'petal_length.png')
var_hist(pet_width, 4, 'petal_width_cm', 'Frequency', 'Petal Width', 'petal_width.png')
var_hist2(species, 5, 'species', 'Frequency', 'Iris Species', 'species.png')
# 4 axes on one figure for better visual comparison
fig, axs = plt.subplots(2, 2)
axs1 = axs[0, 0]
axs1.hist(sep_len, rwidth=0.9)
axs1.set_title('Sepal_Length_Cm')
axs1.set(ylabel='frequency')
axs2 = axs[0, 1]
axs2.hist(sep_width, rwidth=0.9)
axs2.set_title('Sepal_Width_Cm',)
axs2.set(ylabel='frequency')
axs3 = axs[1, 0]
axs3.hist(pet_len, rwidth=0.9)
axs3.set_title('Petal_Length_Cm')
axs3.set(ylabel='frequency')
axs4 = axs[1, 1]
axs4.hist(pet_width, rwidth=0.9)
axs4.set_title('Petal_Width_Cm')
axs4.set(ylabel='frequency')
#plt.show()
plt.close()
# C. Output a scatter plot of each pair of variables
# Scatter plot with matplotlib (no colour separation)
plt.scatter(sep_len, sep_width)
plt.xlabel('sepal_length')
plt.ylabel('sepal_width')
#plt.show()
plt.close()
# Write a function which outputs a scatter plot of each pair of variables.
# Each categorical variable (species of iris flower) is categorized by colour
def scatter(x, y):
sns.set(style="darkgrid", font_scale=1.25)
sns.lmplot(x, y, iris_data, fit_reg=False, hue='species')
plt.show()
plt.close()
# Call function for each pair of variables
scatter('sepal_length', 'sepal_width')
scatter('sepal_length', 'petal_length')
scatter('sepal_length', 'petal_width')
scatter('sepal_width', 'petal_length')
scatter('sepal_width', 'petal_width')
scatter('petal_length', 'petal_width')
# Output pairplot using kde to represent marginal distribution
sns.set(style='ticks', font_scale=1.25, color_codes=True)
sns.pairplot(iris_data, hue='species', diag_kind='kde')
plt.show()
| 3.296875
| 3
|
test/test_utilities.py
|
2b-t/stereo-matching
| 1
|
12774725
|
# <NAME> - github.com/2b-t (2022)
# @file utilities_test.py
# @brief Different testing routines for utility functions for accuracy calculation and file import and export
import numpy as np
from parameterized import parameterized
from typing import Tuple
import unittest
from src.utilities import AccX, IO
class TestAccX(unittest.TestCase):
_shape = (10,20)
_disparities = [ ["disparity = 1", 1],
["disparity = 2", 2],
["disparity = 3", 3]
]
@parameterized.expand(_disparities)
def test_same_image(self, name: str, threshold_disparity: int) -> None:
# Parameterised unit test for testing if two identical images result in an accuracy measure of unity
# @param[in] name: The name of the parameterised test
# @param[in] threshold_disparity: The threshold disparity for the accuracy measure
mag = threshold_disparity*10
groundtruth_image = mag*np.ones(self._shape)
prediction_image = mag*np.ones(groundtruth_image.shape)
mask_image = np.ones(groundtruth_image.shape)
accx = AccX.compute(prediction_image, groundtruth_image, mask_image, threshold_disparity)
self.assertAlmostEqual(accx, 1.0, places=7)
return
@parameterized.expand(_disparities)
def test_slightly_shifted_image(self, name: str, threshold_disparity: int) -> None:
# Parameterised unit test for testing if an image and its slightly shifted counterpart result in an accuracy measure of unity
# @param[in] name: The name of the parameterised test
# @param[in] threshold_disparity: The threshold disparity for the accuracy measure
mag = threshold_disparity*10
groundtruth_image = mag*np.ones(self._shape)
prediction_image = (mag+threshold_disparity-1)*np.ones(groundtruth_image.shape)
mask_image = np.ones(groundtruth_image.shape)
accx = AccX.compute(prediction_image, groundtruth_image, mask_image, threshold_disparity)
self.assertAlmostEqual(accx, 1.0, places=7)
return
@parameterized.expand(_disparities)
def test_no_mask(self, name: str, threshold_disparity: int) -> None:
# Parameterised unit test for testing if two identical images with no given mask result in an accuracy measure of unity
# @param[in] name: The name of the parameterised test
# @param[in] threshold_disparity: The threshold disparity for the accuracy measure
mag = threshold_disparity*10
groundtruth_image = mag*np.ones(self._shape)
prediction_image = mag*np.ones(groundtruth_image.shape)
mask_image = None
accx = AccX.compute(prediction_image, groundtruth_image, mask_image, threshold_disparity)
self.assertAlmostEqual(accx, 1.0, places=7)
return
@parameterized.expand(_disparities)
def test_inverse_image(self, name: str, threshold_disparity: int) -> None:
# Parameterised unit test for testing if two inverse images result in an accuracy measure of zero
# @param[in] name: The name of the parameterised test
# @param[in] threshold_disparity: The threshold disparity for the accuracy measure
mag = threshold_disparity*10
groundtruth_image = mag*np.ones(self._shape)
prediction_image = np.zeros(groundtruth_image.shape)
mask_image = np.ones(groundtruth_image.shape)
accx = AccX.compute(prediction_image, groundtruth_image, mask_image, threshold_disparity)
self.assertAlmostEqual(accx, 0.0, places=7)
return
@parameterized.expand(_disparities)
def test_significantly_shifted_image(self, name: str, threshold_disparity: int) -> None:
# Parameterised unit test for testing if an image and its significantly shifted counterpart result in an accuracy measure of zero
# @param[in] name: The name of the parameterised test
# @param[in] threshold_disparity: The threshold disparity for the accuracy measure
mag = threshold_disparity*10
groundtruth_image = mag*np.ones(self._shape)
prediction_image = (mag+threshold_disparity+1)*np.ones(groundtruth_image.shape)
mask_image = np.ones(groundtruth_image.shape)
accx = AccX.compute(prediction_image, groundtruth_image, mask_image, threshold_disparity)
self.assertAlmostEqual(accx, 0.0, places=7)
return
@parameterized.expand(_disparities)
def test_zero_mask(self, name: str, threshold_disparity: int) -> None:
# Parameterised unit test for testing if two equal images with a mask of zero results in an accuracy measure of zero
# @param[in] name: The name of the parameterised test
# @param[in] threshold_disparity: The threshold disparity for the accuracy measure
mag = threshold_disparity*10
groundtruth_image = mag*np.ones(self._shape)
prediction_image = groundtruth_image
mask_image = np.zeros(groundtruth_image.shape)
accx = AccX.compute(prediction_image, groundtruth_image, mask_image, threshold_disparity)
self.assertAlmostEqual(accx, 0.0, places=7)
return
class TestIO(unittest.TestCase):
_resolutions = [ ["resolution = (10, 20)", (10, 20)],
["resolution = (30, 4)", (30, 4)],
["resolution = (65, 24)", (65, 24)]
]
def test_import_image(self) -> None:
# TODO(tobit): Implement
pass
def test_export_image(self) -> None:
# TODO(tobit): Implement
pass
def test_str_comma(self) -> None:
# Function for testing conversion of numbers to comma-separated numbers
self.assertEqual(IO._str_comma(10, 2), "10")
self.assertEqual(IO._str_comma(9.3, 2), "9,3")
self.assertEqual(IO._str_comma(1.234, 2), "1,23")
return
@parameterized.expand(_resolutions)
def test_normalise_positive_image_no_groundtruth(self, name: str, shape: Tuple[int, int]) -> None:
# Function for testing normalising a positive image with a no ground-truth should result in a positive image
# @param[in] name: The name of the parameterised test
# @param[in] shape: The image resolution to be considered for the test
mag = 13
image = mag*np.ones(shape)
groundtruth_image = None
result = IO.normalise_image(image, groundtruth_image)
self.assertGreaterEqual(np.min(result), 0.0)
self.assertLessEqual(np.max(result), 1.0)
return
@parameterized.expand(_resolutions)
def test_normalise_positive_image_positive_groundtruth(self, name: str, shape: Tuple[int, int]) -> None:
# Function for testing normalising a regular image with a regular ground-truth should result in a positive image
# @param[in] name: The name of the parameterised test
# @param[in] shape: The image resolution to be considered for the test
mag = 13
image = mag*np.ones(shape)
groundtruth_image = 2*image
result = IO.normalise_image(image, groundtruth_image)
self.assertGreaterEqual(np.min(result), 0.0)
self.assertLessEqual(np.max(result), 1.0)
return
@parameterized.expand(_resolutions)
def test_normalise_negative_image_positive_groundtruth(self, name: str, shape: Tuple[int, int]) -> None:
# Function for testing normalising a negative image which should result in a ValueError
# @param[in] name: The name of the parameterised test
# @param[in] shape: The image resolution to be considered for the test
mag = 13
groundtruth_image = mag*np.ones(shape)
image = -2*groundtruth_image
self.assertRaises(ValueError, IO.normalise_image, image, groundtruth_image)
return
@parameterized.expand(_resolutions)
def test_normalise_positive_image_negative_groundtruth(self, name: str, shape: Tuple[int, int]) -> None:
# Function for testing normalising a negative ground-truth which should result in a ValueError
# @param[in] name: The name of the parameterised test
# @param[in] shape: The image resolution to be considered for the test
mag = 13
image = mag*np.ones(shape)
groundtruth_image = -2*image
self.assertRaises(ValueError, IO.normalise_image, image, groundtruth_image)
return
if __name__ == '__main__':
unittest.main()
| 2.90625
| 3
|
config.py
|
dgg32/graphql_genbank
| 0
|
12774726
|
api_key = ""
endpoint_url = ""
| 1.085938
| 1
|
solutions/python3/823.py
|
sm2774us/amazon_interview_prep_2021
| 42
|
12774727
|
class Solution:
def numFactoredBinaryTrees(self, A):
"""
:type A: List[int]
:rtype: int
"""
A.sort()
nums, res, trees, factors = set(A), 0, {}, collections.defaultdict(set)
for i, num in enumerate(A):
for n in A[:i]:
if num % n == 0 and num // n in nums: factors[num].add(n)
for root in A:
trees[root] = 1
for fac in factors[root]: trees[root] += trees[fac] * trees[root // fac]
return sum(trees.values()) % ((10 ** 9) + 7)
| 3.015625
| 3
|
src/grass/functor.py
|
running-grass/grass-python
| 0
|
12774728
|
<gh_stars>0
from abc import ABC, abstractmethod
from typing import TypeVar, Callable, Generic
# from collections.abc import Callable
from grass.function import flip
A = TypeVar('A')
B = TypeVar('B')
C = TypeVar('C')
class Functor(ABC, Generic[A]):
'''函子'''
@abstractmethod
def fmap(self, f: Callable[[A], B]) -> Functor[B]:
pass
def map(f: Callable[[A], B], fa: Functor[A]) -> Functor[B]:
if isinstance(fa, Functor):
return fa.fmap(f)
else:
raise ValueError
mapFlipped = flip(map)
| 2.578125
| 3
|
package/tests/test_bootstrap_dashboard.py
|
philippjfr/awesome-panel
| 0
|
12774729
|
<filename>package/tests/test_bootstrap_dashboard.py<gh_stars>0
"""Tests of the BootStrapDashboardTemplate"""
import importlib
import pytest
from selenium import webdriver
import awesome_panel.express as pnx
import panel as pn
importlib.reload(pnx)
@pytest.fixture
def chrome_driver() -> webdriver.Chrome:
r"""The Chrome Web Driver Configured for Download
You can download the Chrome driver from https://chromedriver.chromium.org/ and move
chromedriver.exe to c:\Windows\System32 or an alternative location in the PATH
Returns:
webdriver.Chrome -- The Chrome Web Driver
"""
options = webdriver.ChromeOptions()
# Maybe add this later
# options.add_argument('headless')
options.add_experimental_option("useAutomationExtension", False)
return webdriver.Chrome(options=options)
def test_app():
"""Test of the attributes of the Template"""
app = pnx.templates.BootstrapDashboardTemplate()
assert hasattr(app, "main")
assert hasattr(app, "sidebar")
assert isinstance(app.main, pn.layout.Panel)
assert isinstance(app.sidebar, pn.layout.Panel)
def test_markdown_image_width_max_100_percent():
"""We test that the markdown image width cannot be more than 100%.
This is usefull in order to reduce the friction of using the template and Panel in general"""
| 2.453125
| 2
|
testOneNN.py
|
agollapudi2019/ShallowMind
| 1
|
12774730
|
<reponame>agollapudi2019/ShallowMind
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.utils import to_categorical
from db import createDatasetsDocument, createNeuralNetsDocument, createExperimentsDocument
import GaussianBoundary as gb
import numpy as np
import keras
from testNN import test
from Utils import iterate,plotData
from generateNN import make
import matplotlib.pyplot as plt
# Continuously runs epochs on neural net with given data points until error is minimized
# nn: compiled neural net
# tdata = training data
# vdata = validation data
coVec = [1,0]
# later - explicitly create 10 datasets, for each dataset, create & test all neural nets
# coVec = [0.125, 0, -4] # 0.125x^2 + 0x -4
# peak - max probability of miscategorizing a point, sigma - band of miscategorized points
noiseDist = [0, 0]
tdata = np.array(gb.getPoints(coVec, 2000, noiseDist[0], noiseDist[1], -10, 10, -10, 10))
vdata = np.array(gb.getPoints(coVec, 2000, noiseDist[0], noiseDist[1], -10, 10, -10, 10))
datasetID = createDatasetsDocument(coVec, noiseDist, [-10, 10, -10, 10], tdata, vdata)
# in the first list is peak & sigma, second list is the bounds for the data generation piece
| 2.71875
| 3
|
unit_tests/test_instance.py
|
vonsago/service_platform
| 6
|
12774731
|
<filename>unit_tests/test_instance.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-06-09 14:28
# @Author : Vassago
# @File : test_instance.py
# @Software: PyCharm
import logging
from unit_tests.common import BaseTestCase
LOG = logging.getLogger(__name__)
class TestInstance(BaseTestCase):
def setUp(self):
self.short_id = None
self.create_test_app()
def test_create_instance(self):
data = {
"image": "mysql:5.7.19",
"ports": "",
"type": "mysql",
"volumes": "",
"environment": ""
}
status, testcreateresponse = self.post('/v1/instances', data=data)
self.assertEqual(status, 200)
self.short_id = testcreateresponse.get("short_id")
#self.assertxDictContainsEqual(test_user_openapi_key, 'name', "test_user_apikey_test1")
def test_get_instance_list(self):
status, testgetinstance = self.get('/v1/instances')
self.assertEqual(status, 200)
def test_restart_instance(self):
status, testrestartinstance = self.post('/v1/instances/{}/restart'.format(self.short_id))
self.assertEqual(status, 200)
def test_stop_instance(self):
status, teststopinstnace = self.delete('/v1/instances/{}/stop'.format(self.short_id))
self.assertEqual(status, 200)
| 2.53125
| 3
|
200.py
|
geethakamath18/Leetcode
| 0
|
12774732
|
#LeetCode problem 200: Number of Islands
class Solution:
def check(self,grid,nodesVisited,row,col,m,n):
return (row>=0 and row<m and col>=0 and col<n and grid[row][col]=="1" and nodesVisited[row][col]==0)
def dfs(self,grid,nodesVisited,row,col,m,n):
a=[-1,1,0,0]
b=[0,0,1,-1]
nodesVisited[row][col]=1
for k in range(4):
if self.check(grid,nodesVisited,row+a[k],col+b[k],m,n):
self.dfs(grid,nodesVisited,row+a[k],col+b[k],m,n)
def numIslands(self, grid: List[List[str]]) -> int:
nodesVisited=[[0 for i in range(len(grid[0]))] for i in range(len(grid))]
count=0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j]=="0":
continue
elif grid[i][j]=="1" and nodesVisited[i][j]==0:
count+=1
self.dfs(grid,nodesVisited,i,j,len(grid),len(grid[0]))
return count
| 3.234375
| 3
|
src/extensions/COMMANDS/ListCommand.py
|
DMTF/python-redfish-utility
| 15
|
12774733
|
###
# Copyright Notice:
# Copyright 2016 Distributed Management Task Force, Inc. All rights reserved.
# License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/python-redfish-utility/blob/master/LICENSE.md
###
""" List Command for RDMC """
import redfish.ris
from optparse import OptionParser
from rdmc_base_classes import RdmcCommandBase
from rdmc_helper import ReturnCodes, InvalidCommandLineErrorOPTS,\
NoContentsFoundForOperationError
class ListCommand(RdmcCommandBase):
""" Constructor """
def __init__(self, rdmcObj):
RdmcCommandBase.__init__(self,\
name='list',\
usage='list [OPTIONS]\n\n\tDisplays the current values of the ' \
'properties within\n\ta selected type including'\
' reserved properties\n\texample: list\n\n\tNOTE: If ' \
'you wish not to get all the reserved properties\n\t ' \
' run the get command instead',\
summary='Displays the current value(s) of a' \
' property(ies) within a selected type including'\
' reserved properties.',\
aliases=['ls'],\
optparser=OptionParser())
self.definearguments(self.parser)
self._rdmc = rdmcObj
self.lobobj = rdmcObj.commandsDict["LoginCommand"](rdmcObj)
self.selobj = rdmcObj.commandsDict["SelectCommand"](rdmcObj)
self.getobj = rdmcObj.commandsDict["GetCommand"](rdmcObj)
def run(self, line):
""" Wrapper function for main list function
:param line: command line input
:type line: string.
"""
try:
(options, args) = self._parse_arglist(line)
except:
if ("-h" in line) or ("--help" in line):
return ReturnCodes.SUCCESS
else:
raise InvalidCommandLineErrorOPTS("")
self.listvalidation(options)
if args:
for arg in args:
newargs = list()
if "/" in arg:
newargs = arg.split("/")
arg = newargs[0]
if not self.getobj.getworkerfunction(arg, options, line,\
newargs=newargs, uselist=True):
raise NoContentsFoundForOperationError('No contents found '\
'for entry: %s\n' % arg)
else:
if not self.getobj.getworkerfunction(args, options, line, \
uselist=True):
raise NoContentsFoundForOperationError('No contents found.')
#Return code
return ReturnCodes.SUCCESS
def listvalidation(self, options):
""" List data validation function
:param options: command line options
:type options: list.
"""
inputline = list()
if self._rdmc.app.config._ac__format.lower() == 'json':
options.json = True
try:
self._rdmc.app.get_current_client()
except:
if options.user or options.password or options.url:
if options.url:
inputline.extend([options.url])
if options.user:
inputline.extend(["-u", options.user])
if options.password:
inputline.extend(["-p", options.password])
else:
if self._rdmc.app.config.get_url():
inputline.extend([self._rdmc.app.config.get_url()])
if self._rdmc.app.config.get_username():
inputline.extend(["-u", \
self._rdmc.app.config.get_username()])
if self._rdmc.app.config.get_password():
inputline.extend(["-p", \
self._rdmc.app.config.get_password()])
if len(inputline) and options.selector:
if options.filter:
inputline.extend(["--filter", options.filter])
if options.includelogs:
inputline.extend(["--includelogs"])
if options.path:
inputline.extend(["--path", options.path])
inputline.extend(["--selector", options.selector])
self.lobobj.loginfunction(inputline)
elif options.selector:
if options.filter:
inputline.extend(["--filter", options.filter])
if options.includelogs:
inputline.extend(["--includelogs"])
if options.path:
inputline.extend(["--path", options.path])
inputline.extend([options.selector])
self.selobj.selectfunction(inputline)
else:
try:
inputline = list()
selector = self._rdmc.app.get_selector()
if options.filter:
inputline.extend(["--filter", options.filter])
if options.includelogs:
inputline.extend(["--includelogs"])
if options.path:
inputline.extend(["--path", options.path])
inputline.extend([selector])
self.selobj.selectfunction(inputline)
except:
raise redfish.ris.NothingSelectedError
def definearguments(self, customparser):
""" Wrapper function for new command main function
:param customparser: command line input
:type customparser: parser.
"""
if not customparser:
return
customparser.add_option(
'--url',
dest='url',
help="Use the provided URL to login.",
default=None,
)
customparser.add_option(
'-u',
'--user',
dest='user',
help="If you are not logged in yet, including this flag along"\
" with the password and URL flags can be used to log into a"\
" server in the same command.""",
default=None,
)
customparser.add_option(
'-p',
'--password',
dest='password',
help="""Use the provided password to log in.""",
default=None,
)
customparser.add_option(
'--includelogs',
dest='includelogs',
action="store_true",
help="Optionally include logs in the data retrieval process.",
default=False,
)
customparser.add_option(
'--selector',
dest='selector',
help="Optionally include this flag to select a type to run"\
" the current command on. Use this flag when you wish to"\
" select a type without entering another command, or if you"\
" wish to work with a type that is different from the one"\
" you currently have selected.",
default=None,
)
customparser.add_option(
'--filter',
dest='filter',
help="Optionally set a filter value for a filter attribute."\
" This uses the provided filter for the currently selected"\
" type. Note: Use this flag to narrow down your results. For"\
" example, selecting a common type might return multiple"\
" objects that are all of that type. If you want to modify"\
" the properties of only one of those objects, use the filter"\
" flag to narrow down results based on properties."\
"\t\t\t\t\t Usage: --filter [ATTRIBUTE]=[VALUE]",
default=None,
)
customparser.add_option(
'-j',
'--json',
dest='json',
action="store_true",
help="Optionally include this flag if you wish to change the"\
" displayed output to JSON format. Preserving the JSON data"\
" structure makes the information easier to parse.",
default=False
)
customparser.add_option(
'--logout',
dest='logout',
action="store_true",
help="Optionally include the logout flag to log out of the"\
" server after this command is completed. Using this flag when"\
" not logged in will have no effect",
default=None,
)
customparser.add_option(
'--path',
dest='path',
help="Optionally set a starting point for data collection."\
" If you do not specify a starting point, the default path"\
" will be /redfish/v1/. Note: The path flag can only be specified"\
" at the time of login, so if you are already logged into the"\
" server, the path flag will not change the path. If you are"\
" entering a command that isn't the login command, but include"\
" your login information, you can still specify the path flag"\
" there. ",
default=None,
)
| 1.828125
| 2
|
app/answer/apps.py
|
Ravishrks/examin
| 1
|
12774734
|
from django.apps import AppConfig
class AnswerConfig(AppConfig):
name = 'answer'
| 1.210938
| 1
|
scraper.py
|
Vasile2k/OlxScraper
| 4
|
12774735
|
<filename>scraper.py
__author__ = "Vasile2k"
import requests
from html.parser import HTMLParser
queries = [
"Corsair K95",
"Gigabyte Aorus Z390 Pro"
]
url = "https://www.olx.ro/oferte/"
user_agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko)" \
" Chrome/35.0.1916.47 Safari/537.36"
headers = {"User-Agent": user_agent}
products = {}
last_product = []
def clean_string(input):
# clean the data from tabs, spaces, slashes and/or newlines
# because as I see, the parser gives a lot of them
# and '\n', because fuck python
return str(input).replace("\n", "").replace("\\n", "").replace("\t", "").replace(" ", "").replace("\\", "")
def clean_shitty_decoding(input):
# clean the unescaped string
# encode all symbols to unicode, escape them, keep only ascii, decode
# now you have a clean string
# fuck python
return str(input).encode("utf-8").decode("unicode_escape").encode("ascii", errors="ignore").decode()
def add_product(product):
if not last_product == []:
raise Exception("Add the price of the previous product before adding a new one!")
if not isinstance(product, str):
raise TypeError("\'product\' should be a string!")
last_product.append(product)
def add_price(price):
if last_product == []:
raise Exception("Add a product before adding a price!")
if not isinstance(price, str):
raise TypeError("\'price\' should be a string!")
products[last_product[0]] = price
last_product.clear()
def list_all_products():
max_len = max(len(p) for p in products)
for k in products:
print(k.ljust(max_len + 4), " -> ", products[k])
class OlxResponseParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.__inside_table_count = 0
self.__has_data = False
self.__has_price_data = False
def handle_starttag(self, tag, attrs):
# convert attrs to dict
attrs = dict(attrs)
# clean the tag attribute because the parser seem so add a lot of shit
tag = clean_string(tag)
if tag == "table" and "id" in attrs and attrs["id"] == "offers_table":
# start the table with listings
self.__inside_table_count = 1
if self.__inside_table_count:
if tag == "table":
# incremet table counte because there are tables inside the table with listings
self.__inside_table_count += 1
elif tag == "a" and "data-cy" in attrs and attrs["data-cy"] == "listing-ad-title":
self.__has_data = True
elif tag == "p" and "class" in attrs and attrs["class"] == "price":
self.__has_price_data = True
def handle_endtag(self, tag):
if tag == "table" and self.__inside_table_count:
self.__inside_table_count -= 1
def handle_data(self, data):
if not clean_string(data) == "":
if self.__has_data:
add_product(clean_shitty_decoding(data))
self.__has_data = False
elif self.__has_price_data:
add_price(clean_shitty_decoding(data))
self.__has_price_data = False
def create_query_url(query_text):
return url + "q-" + query_text.replace(" ", "-") + "/"
if __name__ == "__main__":
for query in queries:
response = requests.get(create_query_url(query), headers=headers)
parser = OlxResponseParser()
parser.feed(str(response.content))
parser.close()
list_all_products()
| 3.203125
| 3
|
app/language_features/pools/pool_proc.py
|
andykmiles/code-boutique
| 0
|
12774736
|
<filename>app/language_features/pools/pool_proc.py<gh_stars>0
"""
Pool distributes the tasks to the available processors using a FIFO
scheduling. It works like a map reduce architecture. It maps the input to the
different processors and collects the output from all the processors. After the
execution of code, it returns the output in form of a list or array. It waits
for all the tasks to finish and then returns the output. The processes in
execution are stored in memory and other non-executing processes are stored out
of memory.
Process puts all the processes in memory and schedules execution
using FIFO policy. When the process is suspended, it pre-empts and schedules
new process for execution.
"""
from multiprocessing import Pool
from multiprocessing import Process
import os
def f1(x): # for Pool
return x*x
def info(title):
print(title)
print('module name:', __name__)
print('parent process:', os.getppid())
print('process id:', os.getpid())
def f2(name): # for Process
info('function f2')
print('hello', name)
if __name__ == '__main__':
print("part 1 Pool")
with Pool(5) as p1:
print(p1.map(f1, [10, 11, 12]))
print()
print("part 2 Process")
info('main line')
p2 = Process(target=f2, args=('bob',))
p2.start()
p2.join()
"""
part 1 Pool
[100, 121, 144]
part 2 Process
main line
module name: __main__
parent process: 5
process id: 2706
function f2
module name: __main__
parent process: 2706
process id: 2715
hello bob
"""
| 3.84375
| 4
|
setup.py
|
mab262/covid19_dashboard_max
| 0
|
12774737
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="covid19_dashboard",
version="0.0.1",
author="<NAME>",
author_email="<EMAIL>",
description="A personalized dashboard which maps up to date covid data to a web template",
long_description="Using a webpage template this package creates a dashboard displaying up to date covid data from "
"an api, it also contains news articles obtained from a news api and you can remove articles and "
"schedule updates for yourself",
long_description_content_type="text/markdown",
url="",
classifiers=[
"Programming Language :: Python :: 3",
"License :: Freeware",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
)
| 1.835938
| 2
|
src/HistEqualizer/HistogramEqualization.py
|
victormmp/processamento-digital-imagens
| 0
|
12774738
|
<filename>src/HistEqualizer/HistogramEqualization.py
"""
Histogram Equalization Class
"""
import numpy
import math
import copy
import matplotlib.pyplot as plt
class HistogramEqualization:
"""Implements Histogram Equalization"""
imgName = "IMG" #ImageName
colorDepth = 8 #Intensity represented by 8 bits
def __init__(self, pixels, colorDepth=8, imgName="IMG"):
self.pixels = pixels
self.colorDepth = colorDepth
self.imgName = imgName
def evaluate(self):
# Assert pixels is a matrix
assert(type(self.pixels) == numpy.ndarray)
height, width, _ = self.pixels.shape
img = self.pixels.reshape(height*width)
L = 2**self.colorDepth
# Assert color depth is coherent
assert(L > numpy.amax(img))
# Calculation of intesity frequencies
frequency = numpy.zeros(L)
for pixel in img:
frequency[pixel] += 1/(width*height)
# Print histogram of original image
fig_name = self.imgName + "_hist"
self.printHistogram(frequency,fig_name)
# Creation of intensity transformation function
eq_transformation_func = numpy.zeros(L)
for intesity in range(L):
sum_previous = 0
for previous in range(intesity):
sum_previous =+ eq_transformation_func[previous]
eq_transformation_func[intesity] = sum_previous + (L-1) * frequency[intesity]
eq_transformation_func = numpy.around(eq_transformation_func) # Round new intensity values
eq_transformation_func = eq_transformation_func.astype(int) # Transform to integer
# Generation of equalized image from the transformation function
eq_img = eq_transformation_func[img]
# Calculation of equalized intesity frequencies
frequency_eq = numpy.zeros(L)
for pixel in eq_img:
frequency_eq[pixel] += 1/(width*height)
# Print histogram of equalized image
fig_name = self.imgName + "_hist_eq"
self.printHistogram(frequency_eq,fig_name)
result = numpy.array(eq_img).reshape((height, width, 1))
return result
@staticmethod
def printHistogram(frequency, figName):
f = plt.figure()
plt.bar(range(len(frequency)),frequency)
plt.xlabel("Intensity")
plt.ylabel("Frequency")
figName = figName + ".pdf"
f.savefig(figName, bbox_inches='tight')
| 3.59375
| 4
|
ASAP/S_SequenceInRegion.py
|
HassounLab/ASAP
| 5
|
12774739
|
import Bio.SeqUtils.ProtParam
import os
import ASAP.FeatureExtraction as extract
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# Chothia numbering definition for CDR regions
CHOTHIA_CDR = {'L': {'1': [24, 34], '2': [50, 56], '3': [89, 97]}, 'H':{'1': [26, 32], '2': [52, 56], '3': [95, 102]}}
canonical_direct = '../data/pigs_canonical.txt'
SET_NAME = 'IGHV'
IF_ONLY_HEAVY = True
CNT_DB = 1
CNT_TARGET = 1
REFERENCE_PATH_TESTCASE = '../testCase/IGHV/reference-IGHV/'
TARGETING_PATH_TESTCASE = '../testCase/IGHV/targeting-MMP-IGHV/'
TARGET_DESIRE_SIZE = 134 #44 #IGHV
targeting_direct = TARGETING_PATH_TESTCASE
reference_direct = REFERENCE_PATH_TESTCASE
Amino, Num, Germ, DatasetName, DatasetSize = extract.ReadAminoNumGerm(targeting_direct, reference_direct)
seq_id = []
for i, name in enumerate(DatasetName):
# if i<2:
# continue
tmp= [[] for j in range(int(DatasetSize[i]))]
# for every seq in that dataset
for j in range(int(DatasetSize[i])):
seq_name = name + '_' + str(j)
seq_id.append(seq_name)
# raw sequence
def sequence_raw():
def getSequenceHL(sname):
SH = ''.join(Amino['H'][sname])
SL = ''
if not IF_ONLY_HEAVY:
SL = ''.join(Amino['L'][sname])
return SL, SH
else:
return [SH]
with open('../results/'+SET_NAME +'_Sequence.csv','w') as fi:
fi.write('sequence name, ')
if not IF_ONLY_HEAVY:
fi.write('light chain, ')
fi.write('heavy chain\n')
for sname in seq_id:
fi.write(sname + ',' + ','.join(getSequenceHL(sname))+ '\n')
# sequence with numbering
def sequence_num():
def getSequenceHL_num(sname):
NH = ','.join(Num['H'][sname])
SH = ','.join(Amino['H'][sname])
NL = ','.join(Num['L'][sname])
SL = ','.join(Amino['L'][sname])
return NH, SH, NL, SL
with open('./Sequence_numbered.csv','w') as fi:
for sname in seq_id:
NH, SH, NL, SL = getSequenceHL_num(sname)
fi.write(sname + ' light num,' + NL + '\n')
fi.write(sname + ' light seq,' + SL + '\n')
fi.write(sname + ' heavy num,' + NH + '\n')
fi.write(sname + ' heavy seq,' + SH + '\n')
# sequence with region
def sequence_region():
def getSequenceHL_region(sname):
NH = Num['H'][sname]
HFW1, HCDR1, HFW2, HCDR2, HFW3, HCDR3, HFW4 = '', '', '', '', '', '', ''
for i, number in enumerate(NH):
if number[-1] >= 'A' and number[-1] <= 'Z':
num_i = int(number[:-1])
else:
num_i = int(number)
if num_i < CHOTHIA_CDR['H']['1'][0]:
HFW1 += Amino['H'][sname][i]
elif num_i <= CHOTHIA_CDR['H']['1'][1]:
HCDR1+= Amino['H'][sname][i]
elif num_i < CHOTHIA_CDR['H']['2'][0]:
HFW2 += Amino['H'][sname][i]
elif num_i <= CHOTHIA_CDR['H']['2'][1]:
HCDR2 += Amino['H'][sname][i]
elif num_i < CHOTHIA_CDR['H']['3'][0]:
HFW3 += Amino['H'][sname][i]
elif num_i <= CHOTHIA_CDR['H']['3'][1]:
HCDR3 += Amino['H'][sname][i]
else:
HFW4 += Amino['H'][sname][i]
if IF_ONLY_HEAVY:
return ''.join(HFW1), ''.join(HCDR1), ''.join(HFW2), ''.join(HCDR2), ''.join(HFW3), ''.join(HCDR3), ''.join(
HFW4)
else:
NL = Num['L'][sname]
LFW1, LCDR1, LFW2, LCDR2, LFW3, LCDR3, LFW4 = '', '', '', '', '', '', ''
for i, number in enumerate(NL):
if number[-1] >= 'A' and number[-1] <= 'Z':
num_i = int(number[:-1])
else:
num_i = int(number)
if num_i < CHOTHIA_CDR['L']['1'][0]:
LFW1 += Amino['L'][sname][i]
elif num_i <= CHOTHIA_CDR['L']['1'][1]:
LCDR1 += Amino['L'][sname][i]
elif num_i < CHOTHIA_CDR['L']['2'][0]:
LFW2 += Amino['L'][sname][i]
elif num_i <= CHOTHIA_CDR['L']['2'][1]:
LCDR2 += Amino['L'][sname][i]
elif num_i < CHOTHIA_CDR['L']['3'][0]:
LFW3 += Amino['L'][sname][i]
elif num_i <= CHOTHIA_CDR['L']['3'][1]:
LCDR3 += Amino['L'][sname][i]
else:
LFW4 += Amino['L'][sname][i]
return ''.join(LFW1), ''.join(LCDR1), ''.join(LFW2), ''.join(LCDR2), ''.join(LFW3), ''.join(LCDR3), ''.join(LFW4),\
''.join(HFW1), ''.join(HCDR1), ''.join(HFW2), ''.join(HCDR2), ''.join(HFW3), ''.join(HCDR3), ''.join(HFW4)
with open('../results/'+SET_NAME +'_Sequence_region.csv','w') as fi:
if IF_ONLY_HEAVY:
fi.write(
'sequence id, heavy chain FW1, heavy chain CDR1, heavy chain FW2, heavy chain CDR2, heavy chain FW3, heavy chain CDR3, heavy chain FW4\n')
else:
fi.write('sequence id, light chain FW1, light chain CDR1, light chain FW2, light chain CDR2, light chain FW3, light chain CDR3, light chain FW4, '+
'heavy chain FW1, heavy chain CDR1, heavy chain FW2, heavy chain CDR2, heavy chain FW3, heavy chain CDR3, heavy chain FW4\n')
for sname in seq_id:
fi.write(sname + ',' + ','.join(getSequenceHL_region(sname)) + '\n')
def feature_distribution():
from collections import Counter
write_out = [[] for i in range(len(seq_id))]
for fi in range(1,12):
feat = []
for item in write_out:
feat.append(item[fi])
feat_count = Counter(feat)
sorted_count = sorted(feat_count.items(), key=lambda kv: kv[1], reverse=True)
if fi==11:
feat_type = sorted_count[0][0].split('_')[0]
else:
feat_type = sorted_count[0][0].split('_')[0] + sorted_count[0][0].split('_')[1]
with open('./Features_distribution_'+feat_type+'.csv','w') as fi:
for i in range(len(sorted_count)):
fi.write(sorted_count[i][0]+','+str(sorted_count[i][1])+'\n')
def feature():
write_out = [[] for i in range(len(seq_id))]
for i in range(len(seq_id)):
write_out[i].append(seq_id[i])
for idx, f in enumerate(AllFeatureVectors[i]):
if f == 1:
write_out[i].append(AllFeatureNames[idx])
with open('../results/'+SET_NAME +'_Features.csv', 'w') as fi:
fi.write('sequence id, ')
if not IF_ONLY_HEAVY:
fi.write('light chain V region, light chain J region, ')
fi.write('heavy chain V region, heavy chain J region, ')
if not IF_ONLY_HEAVY:
fi.write('Canonical L1, Canonical L2, Canonical L3, ')
fi.write('Canonical H1, Canonical H2, Canonical H3, ' )
fi.write('PI, frequent positional motif\n')
for i in range(len(write_out)):
fi.write(','.join(write_out[i]) + '\n')
def correlation_feature():
###### plot correlation matrix
data = pd.DataFrame(AllFeatureVectors, columns=AllFeatureNames)
# print(AllFeatureVectors.shape)
corr = data.corr()
import numpy as np
corr = np.array(corr)
with open('../results/Pearson_feature_correlation.csv', 'w') as fi:
fi.write('Feature value 1, Feature value 2, Pearson coefficient\n')
for i in range(len(AllFeatureNames)):
for j in range(i+1, len(AllFeatureNames)):
# if str(corr[i][j])=='nan':
# print('nan', AllFeatureNames[i], AllFeatureNames[j])
fi.write(AllFeatureNames[i]+ ','+AllFeatureNames[j]+','+ str(corr[i][j])+'\n')
# data.to_csv(r'../results/Feature_test.csv', header=True)
# fig = plt.figure(figsize=(100, 70))
# ax = fig.add_subplot(111)
# cax = ax.matshow(corr, cmap='seismic', vmin=-1, vmax =1)
# fig.colorbar(cax)
# ticks = np.arange(0, len(data.columns),1)
# ax.set_xticks(ticks)
# plt.xticks(rotation=90)
# ax.set_yticks(ticks)
# ax.set_xticklabels(data.columns)
# ax.set_yticklabels(data.columns)
# plt.savefig('../results/feature_correlation.png')
# corr = pd.DataFrame(corr, index=AllFeatureNames, columns=AllFeatureNames)
###### display pairwise correlation value
# au_corr = corr.where(np.triu(np.ones(corr.shape), k=1).astype(np.bool))
# au_corr = au_corr.stack().sort_values(ascending=False)
# au_corr = corr.unstack()
# au_corr.columns = [' 1', 'Feature 2', 'Pearson Correlation Value']
# au_corr = pd.DataFrame(au_corr.values, columns = ['Feature 1, Feature 2, Pearson Correlation Value'])
# au_corr.to_csv(r'../results/Pearson_feature_correlation.csv', header=True)
# print(len(au_corr))
# print(AllFeatureVectors[:, AllFeatureNames.index('Germ_LJ_IGKJ3*01')])
# print(AllFeatureVectors[:, AllFeatureNames.index('Canonical_L2_0')])
# def JaccardCoefficientAnalysis():
# df = pd.DataFrame(AllFeatureVectors, columns=AllFeatureNames)
#
# interest_feature=['Germ_HV_IGHV3-23*01', 'Canonical_H2_6', 'Germ_HJ_IGHJ4*02', 'Germ_HJ_IGHJ6*01', 'Germ_LV_IGKV1D-39*01',
# 'Canonical_H2_5', 'Germ_HJ_IGHJ4*01']
# jac_sim = np.eye(len(AllFeatureNames))
# for i in range(len(AllFeatureNames)):
# for j in range(i+1, len(AllFeatureNames)):
# if AllFeatureNames[i].startswith('Motif') or AllFeatureNames[j].startswith('Motif'):
# continue
# a = AllFeatureVectors[:, i]
# b = AllFeatureVectors[:, j]
# aandb =0
# aorb = 0
# for k in range(len(a)):
# if a[k]==b[k] and a[k]==1:
# aandb +=1
# if a[k]==1 or b[k]==1:
# aorb +=1
# if aorb==0:
# jac_tmp=0
# else:
# jac_tmp = float(aandb)/aorb
# if AllFeatureNames[i] in interest_feature and AllFeatureNames[j] in interest_feature:
# print(AllFeatureNames[i], AllFeatureNames[j], jac_tmp)
#
# jac_sim[i][j]=jac_tmp
# jac_sim[j][i]=jac_tmp
#
#
# with open('../results/Jaccard_feature_coefficient.csv', 'w') as fi:
# fi.write('Feature value 1, Feature value 2, Jaccard coefficient\n')
# for i in range(len(AllFeatureNames)):
# for j in range(i+1, len(AllFeatureNames)):
# if AllFeatureNames[i].startswith('Motif') or AllFeatureNames[j].startswith('Motif'):
# continue
# fi.write(AllFeatureNames[i]+ ','+AllFeatureNames[j]+','+ str(jac_sim[i][j])+'\n')
#
#
# fig = plt.figure(figsize=(100, 70))
# ax = fig.add_subplot(111)
# cax = ax.matshow(jac_sim, cmap='Blues', vmin=0, vmax =1)
# fig.colorbar(cax)
# ticks = np.arange(0, len(df.columns),1)
# ax.set_xticks(ticks)
# plt.xticks(rotation=90)
# ax.set_yticks(ticks)
# ax.set_xticklabels(df.columns)
# ax.set_yticklabels(df.columns)
# plt.savefig('../results/feature_coefficient.png')
#
# # print(AllFeatureVectors[:,AllFeatureNames.index('Germ_LJ_IGKJ3*01')])
# # print(AllFeatureVectors[:,AllFeatureNames.index('Canonical_L2_0*01')])
# # where(np.triu(np.ones(jac_sim.shape), k=1).astype(np.bool))
# # au_jac = jac_sim.where(np.triu(np.ones(jac_sim.shape), k=0).astype(np.bool))
# # au_jac = au_jac.stack().sort_values(ascending=False)
# # au_jac = jac_sim.unstack()
# # print(len(au_jac))
# # au_jac.to_csv(r'../results/Jaccard_feature_coefficient.csv', header=True)
def JaccardCoefficientAnalysis():
PDB_size = DatasetSize[0]
jac_sim_PDB = np.eye(len(AllFeatureNames))
for i in range(len(AllFeatureNames)):
for j in range(i+1, len(AllFeatureNames)):
if AllFeatureNames[i].startswith('Motif') or AllFeatureNames[j].startswith('Motif'):
continue
a = AllFeatureVectors[:PDB_size, i]
b = AllFeatureVectors[:PDB_size, j]
aandb =0
aorb = 0
for k in range(len(a)):
if a[k]==b[k] and a[k]==1:
aandb +=1
if a[k]==1 or b[k]==1:
aorb +=1
if aorb==0:
jac_tmp=0
else:
jac_tmp = float(aandb)/aorb
# if AllFeatureNames[i] == 'Germ_HV_IGHV3-23*01' and AllFeatureNames[j] =='Canonical_H2_6':
# print(a, b, jac_tmp)
# if AllFeatureNames[i] in interest_feature and AllFeatureNames[j] in interest_feature:
# print(AllFeatureNames[i], AllFeatureNames[j], jac_tmp)
jac_sim_PDB[i][j]=jac_tmp
jac_sim_PDB[j][i]=jac_tmp
jac_sim_MMP = np.eye(len(AllFeatureNames))
for i in range(len(AllFeatureNames)):
for j in range(i+1, len(AllFeatureNames)):
if AllFeatureNames[i].startswith('Motif') or AllFeatureNames[j].startswith('Motif'):
continue
a = AllFeatureVectors[PDB_size:, i]
b = AllFeatureVectors[PDB_size:, j]
aandb =0
aorb = 0
for k in range(len(a)):
if a[k]==b[k] and a[k]==1:
aandb +=1
if a[k]==1 or b[k]==1:
aorb +=1
if aorb==0:
jac_tmp=0
else:
jac_tmp = float(aandb)/aorb
# if AllFeatureNames[i] in interest_feature and AllFeatureNames[j] in interest_feature:
# print(AllFeatureNames[i], AllFeatureNames[j], jac_tmp)
jac_sim_MMP[i][j]=jac_tmp
jac_sim_MMP[j][i]=jac_tmp
with open('../results/'+SET_NAME+'_Jaccard Feature Coefficient.csv', 'w') as fi:
fi.write('Feature value 1, Feature value 2, Jaccard coefficient for reference set, Jaccard coefficient for MMP-targeting set\n')
for i in range(len(AllFeatureNames)):
for j in range(i+1, len(AllFeatureNames)):
if AllFeatureNames[i].startswith('Motif') or AllFeatureNames[j].startswith('Motif'):
continue
fi.write(AllFeatureNames[i]+ ','+AllFeatureNames[j]+','+ str(jac_sim_PDB[i][j])+','+ str(jac_sim_MMP[i][j])+'\n')
if __name__=='__main__':
sequence_raw()
sequence_region()
OneHotGerm, GermFeatureNames = extract.GetOneHotGerm(Germ, DatasetSize, DatasetName)
OneHotCanon, CanonFeatureNames = extract.GetOneHotCanon(canonical_direct, Amino, Num, DatasetSize, DatasetName)
CDRH3 = extract.GetCDRH3(Amino, Num)
OneHotPI, PIFeatureNames = extract.GetOneHotPI(CDRH3, DatasetSize, DatasetName)
MultiHotMotif, MotifFeatureNames = extract.MultiHotMotif(CDRH3, DatasetSize, DatasetName)
AllFeatureVectors, AllFeatureNames, _, _ = extract.GetFeatureVectors(OneHotGerm, GermFeatureNames, OneHotCanon, CanonFeatureNames, OneHotPI, PIFeatureNames, MultiHotMotif, MotifFeatureNames)
feature()
# correlation_feature()
JaccardCoefficientAnalysis()
| 2.265625
| 2
|
actions/utils.py
|
kabirivan/Ecommerce-Assistant-Jasmine
| 0
|
12774740
|
<filename>actions/utils.py
import logging
import os
import json
import smtplib
import traceback
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from pathlib import Path
from dotenv import load_dotenv
load_dotenv()
logger = logging.getLogger(__name__)
email_username = os.getenv('EMAIL_USERNAME')
print('email_username', email_username)
email_password = os.getenv('EMAIL_PASSWORD')
print('email_password', email_password)
base_id = os.getenv('BASE_ID')
table_name = os.getenv('TABLE_NAME')
api_key_airtable = os.getenv('API_KEY_AIRTABLE')
def send_email(subject: str, recipient_email: str, content):
try:
username = email_username
password = <PASSWORD>
message_data = MIMEMultipart()
message_data["From"] = username
message_data["To"] = recipient_email
message_data["Subject"] = subject
message_data.attach(MIMEText(content, "html"))
msgBody = message_data.as_string()
with smtplib.SMTP_SSL("smtp.gmail.com", 465) as smtp_server:
smtp_server.login(username, password)
smtp_server.sendmail(username, recipient_email, msgBody)
return True
except Exception as error:
logger.error(f"Error: {error}")
logger.info(traceback.print_exc())
return False
def get_html_data(filepath: str):
with open(filepath, "r") as html_data:
return html_data.read()
new_record = {
"name": "Gabriela",
"email": "<EMAIL>",
"feedback_value": "3",
"feedback_message": "Me gusta el diseno",
"created_at": "2022-01-10"
}
this_path = Path(os.path.realpath(__file__))
email_content = get_html_data(f"{this_path.parent}/user_email.html")
# works = send_email("Gracias por tu aporte al desarrollo tecnológico", '<EMAIL>', email_content)
# print('works', works)
| 2.625
| 3
|
kardioml/segmentation/teijeiro/utils/__init__.py
|
Seb-Good/physionet-challenge-2020
| 13
|
12774741
|
# -*- coding: utf-8 -*-
"""
En este paquete se situarán distintas clases de utilidad para el
resto del proyecto.
"""
__author__ = "<NAME>"
__date__ = "$30-nov-2011 17:50:53$"
| 1.296875
| 1
|
E#01/main.py
|
vads5/-Python-Prog
| 2
|
12774742
|
'''
name: E#01
author: <NAME>
email: <EMAIL>
link: https://www.youtube.com/channel/UCNN3bpPlWWUkUMB7gjcUFlw
MIT License https://github.com/repen/E-parsers/blob/master/License
'''
import requests
from bs4 import BeautifulSoup
url = "http://light-science.ru/kosmos/vselennaya/top-10-samyh-bolshih-zvezd-vo-vselennoj.html"
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; \
x64; rv:47.0) Gecko/20100101 Firefox/48.0'}
response = requests.get(url, headers=header)
html = response.text
soup = BeautifulSoup(html, "html.parser")
conteiner = soup.find("div", {"class":"td-post-content"})
elements = conteiner.find_all("p")
string = "топ звезд самых больших: \n"
for element in elements:
if element.find("strong"):
string += "\t" + element.strong.text + "\n"
with open("data.txt", "w", encoding="utf8") as f:
f.write(string)
| 2.703125
| 3
|
structure/greibach_path.py
|
vnszero/interpretadorGLC
| 0
|
12774743
|
from typing import Dict
from structure.GLC import GLC
class Path:
'''
ex of a Path:
a. G | UGU
'''
def __init__(self, alpha : str, top : str, stack : str):
self.alpha = alpha
self.top = top
self.stack = stack
def __repr__(self) -> str:
return f'{self.alpha}'+'. '+f'{self.top}'+' | '+f'{self.stack}'
def __str__(self) -> str:
return f'{self.alpha}'+'. '+f'{self.top}'+' | '+f'{self.stack}'
def get_stack(self) -> str:
return self.stack
class GreibachPaths:
'''
ex of a GreibachPath:
aG : [a. G | UGU, a. G | GU, a. G | UG]
bU : [b. U | ]
'''
def __init__(self, language : GLC):
self.paths_dict = dict()
for transition in language.get_transitions_list():
alpha = transition[1][0]
top = transition[0]
stack = transition[1][1:]
key = alpha + top
if key in self.paths_dict.keys():
self.paths_dict[key].append(Path(alpha, top, stack))
else:
self.paths_dict[key] = [Path(alpha, top, stack)]
def __repr__(self) -> str:
string = ''
for key in self.paths_dict:
string += f'{self.paths_dict[key]}'+'\n'
return string
def __str__(self) -> str:
string = ''
for key in self.paths_dict:
string += f'{self.paths_dict[key]}'+'\n'
return string
def get_paths_dict(self) -> Dict:
return self.paths_dict
| 2.6875
| 3
|
ontobio/bin/timeit.py
|
alliance-genome/ontobio
| 101
|
12774744
|
#!/usr/bin/env python3
from ontobio.sparql2ontology import *
from networkx.algorithms.dag import ancestors
import time
def r():
t1 = time.process_time()
get_edges('pato')
t2 = time.process_time()
print(t2-t1)
r()
r()
r()
"""
LRU is much faster, but does not persist. However, should be fast enough
# percache
## ENVO
$ python ./obographs/bin/timeit.py
QUERYING:envo
1.103934
0.0032450000000001644
0.003185999999999911
$ python ./obographs/bin/timeit.py
0.018115000000000048
0.00362800000000002
0.003180000000000016
## GO
$ python ./obographs/bin/timeit.py
QUERYING:go
13.218031
0.04876699999999978
0.04904600000000059
$ python ./obographs/bin/timeit.py
0.05928599999999995
0.045568
0.045347000000000026
# lru
$ python ./obographs/bin/timeit.py
QUERYING:envo
1.0635080000000001
2.0000000000575113e-06
1.000000000139778e-06
$ python ./obographs/bin/timeit.py
QUERYING:go
13.225105000000001
2.000000000279556e-06
0.0
"""
| 2.390625
| 2
|
objects/CSCG/_3d/ADF/trace/base/cochain/local.py
|
mathischeap/mifem
| 1
|
12774745
|
# -*- coding: utf-8 -*-
from screws.freeze.main import FrozenOnly
class ____3dCSCG_ADTF_Cochain_Local____(FrozenOnly):
""""""
def __init__(self, dt_CO):
""""""
self._PC_ = dt_CO._dt_.prime.cochain
self._MM_ = dt_CO._dt_.mass_matrix
self._freeze_self_()
def __getitem__(self, i):
""""""
return self._MM_[i] @ self._PC_.local[i]
def __contains__(self, i):
""""""
return i in self._PC_.local
def __iter__(self):
"""Go through all mesh element numbers in this core."""
for i in self._PC_.local:
yield i
def __len__(self):
"""Actually return how many mesh elements in this core."""
return len(self._PC_.local)
| 2.5
| 2
|
13_commandline/code/hello_world_optparse.py
|
lluxury/P_U_S_A
| 0
|
12774746
|
#!/usr/bin/env python
import optparse
def main():
p = optparse.OptionParser()
p.add_option('--sysadmin', '-s', default="BOFH")
options, arguments = p.parse_args()
print 'Hello, %s' % options.sysadmin
if __name__ == '__main__':
main()
| 2.421875
| 2
|
pyvis/PIMCPy/TestGaussianSingleSlicePotential.py
|
b3sigma/fourd
| 20
|
12774747
|
#!/bin/env python
import numpy
#import pylab
import CalcStatistics
import random
import numpy
from PIMC import *
numParticles=2
numTimeSlices=5
tau=0.1
lam=0.5
Path=PathClass(numpy.zeros((numTimeSlices,numParticles,3),float),tau,lam)
Path.SetPotential(HarmonicOscillator)
Path.SetCouplingConstant(0.0)
print PIMC(100000,Path,GaussianSingleSliceMove)
| 2.0625
| 2
|
app/segmentation/utils.py
|
zhiva-ai/Lung-Segmentation-API
| 0
|
12774748
|
<gh_stars>0
from pydicom import FileDataset
from typing import Tuple
def get_pixel_spacing_and_slice_thickness_in_centimeters(
instance: FileDataset,
) -> Tuple[float, float, float]:
"""
:param instance: example pydicom instance, one from the
:return:
"""
pixel_spacing_x, pixel_spacing_y = instance.PixelSpacing
pixel_spacing_x_cm, pixel_spacing_y_cm = pixel_spacing_x / 10, pixel_spacing_y / 10
slice_thickness = instance.SliceThickness
slice_thickness_cm = slice_thickness / 10
return pixel_spacing_x_cm, pixel_spacing_y_cm, slice_thickness_cm
| 2.59375
| 3
|
Week 10/E19.py
|
aash7871/PHYS-3210
| 0
|
12774749
|
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 21 09:57:43 2019
@author: amandaash
"""
import numpy as np
import matplotlib.pyplot as plt
p = 2
v = 1
x = 0
m = 10
time_step = 0.0001
k = 3
t0 = 0
tf = 10
"""
x_val = []
v_val = []
time_array = np.arange(t0,tf, time_step)
for n in time_array:
v1 = v + (time_step/m)*(-k*x**(p-1))
x1 = x + time_step*v
x_val.append(x1)
v_val.append(v1)
v1 = v
x1 = x
plt.plot(time_array, x_val)
plt.show()
plt.plot(time_array, v_val)
plt.show()
"""
def harmonic_oscillator(p,k,v0,x0,m,time_step,t0,tf):
v = v0
x = x0
x_val = []
v_val = []
time_array = np.arange(t0,tf, time_step)
for n in time_array:
vf = v + (time_step/m)*(-k*x**(p-1))
xf = x + time_step*v
x_val.append(xf)
v_val.append(vf)
x = xf
v = vf
return x_val, v_val, time_array
#P_val = np.arange(2,8,2)
P_val = np.array([2,6,10])
fig1 = plt.figure()
ax1 = fig1.add_subplot(1, 1, 1)
fig2 = plt.figure()
ax2 = fig2.add_subplot(1,1,1)
for P_value in P_val:
x_P, v_P, t_P= harmonic_oscillator(P_value, 10, 0, 1, 1, 0.0001, 0, 10)
ax1.plot(x_P, t_P, label = "P = {0}".format(P_value))
ax2.plot(v_P, t_P, label = "P = {0}".format(P_value))
ax1.set_xlabel('distance')
ax1.set_ylabel('time')
ax1.legend()
fig1.savefig('spring_dt.pdf')
fig1.show()
ax2.set_xlabel('velocity')
ax2.set_ylabel('time')
ax2.legend()
fig2.savefig('spring_vt.pdf')
fig2.show()
#amplitude - frequency things:
fig3 = plt.figure()
ax3 = fig3.add_subplot(1,1,1)
x_ic = np.arange(0.5,2.0,0.5)
for amplitude in x_ic:
x_a, v_a, t_a = harmonic_oscillator(6, 10, 0, amplitude, 1, 0.0001, 0, 10)
ax3.plot(x_a, t_a, label = '$x_0$ = {0}'.format(amplitude))
ax3.set_title('P = 6, non-harmonic oscillator varying $x_0$')
ax3.set_xlabel('x')
ax3.set_ylabel('t')
ax3.legend()
fig3.savefig('non_harmonic_amplitude.pdf')
fig3.show()
#Going between the RK2 method and the Euler method from exercise 19, I see no
#difference between the two methods in either the position vs. time or velocity vs. time for the oscillator.
| 3.15625
| 3
|
make.py
|
ASquirrelsTail/serve-up
| 0
|
12774750
|
import os
from distutils.dir_util import copy_tree
# import PyInstaller.__main__
pyinst_args = [
'-c',
'serve_up.py',
'--name=ServeUp',
'--onefile',
'--hidden-import=whitenoise',
'--hidden-import=whitenoise.middleware',
'--hidden-import=visitors.admin',
'--hidden-import=tables.admin',
'--hidden-import=orders.admin',
'--hidden-import=menu.admin',
'--clean',
]
# PyInstaller.__main__.run(pyinst_args) # running pyinstaller via this script in windows is super brittle
os.system('pyinstaller {}'.format(' '.join(pyinst_args))) # Just use the command line instead
dist_static_path = os.path.join('dist', 'static')
if not os.path.exists(dist_static_path):
os.mkdir(dist_static_path)
copy_tree('static', dist_static_path)
dist_templates_path = os.path.join('dist', 'templates')
if not os.path.exists(dist_templates_path):
os.mkdir(dist_templates_path)
copy_tree('templates', dist_templates_path)
| 2.28125
| 2
|